repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
GPSKet | GPSKet-master/GPSKet/operator/hamiltonian/hubbard.py | import jax
import jax.numpy as jnp
import numpy as np
import netket as nk
import netket.jax as nkjax
from numba import jit
from typing import List, Tuple, Union, Optional
from netket.utils.types import DType
from GPSKet.hilbert.discrete_fermion import FermionicDiscreteHilbert
from GPSKet.operator.fermion import FermionicDiscreteOperator, apply_hopping
from GPSKet.operator.hamiltonian.ab_initio import get_parity_multiplicator_hop
class FermiHubbard(FermionicDiscreteOperator):
def __init__(self, hilbert: FermionicDiscreteHilbert, edges: List[Tuple[int, int]], U: float=0.0, t: Union[float, List[float]]=1.):
super().__init__(hilbert)
self.U = U
self.edges = np.array(edges, dtype=int).reshape((-1,2))
if isinstance(t, List):
self.t = np.array(t)
else:
self.t = np.ones(self.edges.shape[0]) * t
@property
def is_hermitian(self) -> bool:
return True
@property
def dtype(self) -> DType:
return float
# pad argument is just a dummy atm -> TODO: improve this!
def get_conn_flattened(self, x, sections, pad=True):
x_primes, mels = self._get_conn_flattened_kernel(np.asarray(x, dtype = np.uint8),
sections, self.U, self.edges, self.t)
return x_primes, mels
@staticmethod
@jit(nopython=True)
def _get_conn_flattened_kernel(x, sections, U, edges, t):
n_conn = x.shape[0] * (1 + edges.shape[0]*4)
x_prime = np.empty((n_conn, x.shape[1]), dtype=np.uint8)
mels = np.empty(n_conn, dtype=np.float64)
count = 0
for batch_id in range(x.shape[0]):
# diagonal element
x_prime[count, :] = x[batch_id, :]
mels[count] = U * np.sum(x[batch_id, :] == 3)
count += 1
is_occ_up = (x[batch_id] & 1).astype(np.bool8)
is_occ_down = (x[batch_id] & 2).astype(np.bool8)
up_count = np.cumsum(is_occ_up)
down_count = np.cumsum(is_occ_down)
# hopping
for edge_count in range(edges.shape[0]):
edge = edges[edge_count]
# spin up
x_prime[count, :] = x[batch_id, :]
mels[count] = -t[edge_count] * apply_hopping(edge[0], edge[1], x_prime[count], 1, cummulative_count=up_count)
count += 1
x_prime[count, :] = x[batch_id, :]
mels[count] = -t[edge_count] * apply_hopping(edge[1], edge[0], x_prime[count], 1, cummulative_count=up_count)
count += 1
# spin down
x_prime[count, :] = x[batch_id, :]
mels[count] = -t[edge_count] * apply_hopping(edge[0], edge[1], x_prime[count], 2, cummulative_count=down_count)
count += 1
x_prime[count, :] = x[batch_id, :]
mels[count] = -t[edge_count] * apply_hopping(edge[1], edge[0], x_prime[count], 2, cummulative_count=down_count)
count += 1
sections[batch_id] = count
return x_prime, mels
""" Wrapper class which can be used to apply the on-the-fly updating,
also includes another flag specifying if fast updating should be applied or not.
"""
class FermiHubbardOnTheFly(FermiHubbard):
pass
def local_en_on_the_fly(logpsi, pars, samples, args, use_fast_update=False, chunk_size=None):
edges, U, t = args
def vmap_fun(sample):
sample = jnp.asarray(sample, np.uint8)
is_occ_up = (sample & 1)
is_occ_down = (sample & 2) >> 1
up_count = jnp.cumsum(is_occ_up, dtype=int)
down_count = jnp.cumsum(is_occ_down, dtype=int)
# Compute log_amp of sample
if use_fast_update:
log_amp, intermediates_cache = logpsi(pars, jnp.expand_dims(sample, 0), mutable="intermediates_cache", cache_intermediates=True)
parameters = {**pars, **intermediates_cache}
else:
log_amp = logpsi(pars, jnp.expand_dims(sample, 0))
""" This function returns the log_amp of the connected configuration which is only specified
by the occupancy on the updated sites as well as the indices of the sites updated."""
def get_connected_log_amp(updated_occ_partial, update_sites):
if use_fast_update:
log_amp_connected = logpsi(parameters, jnp.expand_dims(updated_occ_partial, 0), update_sites=jnp.expand_dims(update_sites, 0))
else:
updated_config = sample.at[update_sites].set(updated_occ_partial)
log_amp_connected = logpsi(pars, jnp.expand_dims(updated_config, 0))
return log_amp_connected
local_en = U * jnp.sum(sample == 3)
def get_hopping_term(spin_int, cumulative_count):
def apply_hopping(annihilate_site, create_site):
def hop(operands):
annihilate_site, create_site = operands
# Updated config at update sites
start_occ = sample[annihilate_site]
end_occ = sample[create_site]
new_occ = jnp.array([start_occ-spin_int, end_occ+spin_int], dtype=jnp.uint8)
update_sites = jnp.array([annihilate_site, create_site])
# Get parity
parity_multiplicator = get_parity_multiplicator_hop(update_sites, cumulative_count)
# Evaluate amplitude ratio
log_amp_connected = get_connected_log_amp(new_occ, update_sites)
amp_ratio = jnp.squeeze(jnp.exp(log_amp_connected - log_amp))
return parity_multiplicator*amp_ratio.astype(jnp.complex_)
def no_hop(operands):
return 0.*1j
start_occ = sample[annihilate_site]
end_occ = sample[create_site]
multiplicator = jax.lax.cond(
jnp.logical_or(~(start_occ & spin_int).astype(bool), (end_occ & spin_int).astype(bool)),
no_hop,
hop,
(annihilate_site, create_site)
)
return multiplicator
def hopping_loop(index):
edge = edges[index, :]
value = apply_hopping(edge[0], edge[1])
value += apply_hopping(edge[1], edge[0])
value *= -t[index]
return value
return jnp.sum(jax.vmap(hopping_loop)(jnp.arange(edges.shape[0])))
if edges.shape[0] > 0:
local_en += get_hopping_term(1, up_count)
local_en += get_hopping_term(2, down_count)
return local_en
return nkjax.vmap_chunked(vmap_fun, chunk_size=chunk_size)(samples)
@nk.vqs.get_local_kernel_arguments.dispatch
def get_local_kernel_arguments(vstate: nk.vqs.MCState, op: FermiHubbardOnTheFly):
samples = vstate.samples
edges = op.edges
U = op.U
t = op.t
return (samples, (edges, U, t))
@nk.vqs.get_local_kernel.dispatch(precedence=1)
def get_local_kernel(vstate: nk.vqs.MCState, op: FermiHubbardOnTheFly, chunk_size: Optional[int] = None):
try:
use_fast_update = vstate.model.apply_fast_update
except:
use_fast_update = False
return nkjax.HashablePartial(local_en_on_the_fly, use_fast_update=use_fast_update, chunk_size=chunk_size) | 7,394 | 40.544944 | 142 | py |
GPSKet | GPSKet-master/GPSKet/operator/hamiltonian/ab_initio.py | import numpy as np
import netket as nk
import jax.numpy as jnp
import jax
from numba import jit
import netket.jax as nkjax
from typing import Optional
from functools import partial
from netket.utils.types import DType
from GPSKet.operator.fermion import FermionicDiscreteOperator, apply_hopping
from GPSKet.models import qGPS
class AbInitioHamiltonian(FermionicDiscreteOperator):
def __init__(self, hilbert, h_mat, eri_mat):
""" Though not entirely necessary it makes our life a little bit easier to restrict
ourselves to fixed electron number/magnetization hilbert spaces. """
assert(hilbert._n_elec is not None)
super().__init__(hilbert)
self.h_mat = h_mat
self.eri_mat = eri_mat
# See [Neuscamman (2013), https://doi.org/10.1063/1.4829835] for the definition of t
self.t_mat = self.h_mat - 0.5 * np.einsum("prrq->pq", eri_mat)
@property
def is_hermitian(self) -> bool:
return True
@property
def dtype(self) -> DType:
return float
# Pad argument is just a dummy at the moment,
# TODO: include padding for unconstrained Hilbert spaces
def get_conn_flattened(self, x, sections, pad=True):
assert(not pad or self.hilbert.constrained)
x_primes, mels = self._get_conn_flattened_kernel(np.asarray(x, dtype = np.uint8),
sections, self.t_mat, self.eri_mat)
return x_primes, mels
# This implementation follows the approach outlined in [Neuscamman (2013), https://doi.org/10.1063/1.4829835].
@staticmethod
@jit(nopython=True)
def _get_conn_flattened_kernel(x, sections, t, eri):
range_indices = np.arange(x.shape[-1])
x_prime = np.empty((0, x.shape[1]), dtype=np.uint8)
mels = np.empty(0, dtype=np.complex128)
c = 0
for batch_id in range(x.shape[0]):
is_occ_up = (x[batch_id] & 1).astype(np.bool8)
is_occ_down = (x[batch_id] & 2).astype(np.bool8)
up_count = np.cumsum(is_occ_up)
down_count = np.cumsum(is_occ_down)
is_empty_up = ~is_occ_up
is_empty_down = ~is_occ_down
up_occ_inds = range_indices[is_occ_up]
down_occ_inds = range_indices[is_occ_down]
up_unocc_inds = range_indices[is_empty_up]
down_unocc_inds = range_indices[is_empty_down]
connected_con = 1
connected_con += len(up_occ_inds) * len(up_unocc_inds)
connected_con += len(down_occ_inds) * len(down_unocc_inds)
connected_con += len(down_occ_inds) * len(down_unocc_inds) * (len(down_occ_inds) - 1) * (len(down_unocc_inds) - 1)
connected_con += len(up_occ_inds) * len(up_unocc_inds) * (len(up_occ_inds) - 1) * (len(up_unocc_inds) - 1)
connected_con += len(up_occ_inds) * len(up_unocc_inds) * len(down_occ_inds) * len(down_unocc_inds)
x_prime = np.append(x_prime, np.empty((connected_con, x.shape[1]), dtype=np.uint8), axis=0)
mels = np.append(mels, np.empty(connected_con))
diag_element = 0.0
for i in up_occ_inds:
diag_element += t[i, i]
for i in down_occ_inds:
diag_element += t[i, i]
for i in up_occ_inds:
for j in down_occ_inds:
diag_element += eri[i, i, j, j]
for i in up_occ_inds:
for j in up_occ_inds:
diag_element += 0.5 * eri[i, i, j, j]
for i in up_occ_inds:
for a in up_unocc_inds:
diag_element += 0.5 * eri[i, a, a, i]
for i in down_occ_inds:
for j in down_occ_inds:
diag_element += 0.5 * eri[i, i, j, j]
for i in down_occ_inds:
for a in down_unocc_inds:
diag_element += 0.5 * eri[i, a, a, i]
x_prime[c, :] = x[batch_id, :]
mels[c] = diag_element
c += 1
# One-body parts
for i in up_occ_inds:
for a in up_unocc_inds:
x_prime[c, :] = x[batch_id, :]
multiplicator = apply_hopping(i, a, x_prime[c], 1,
cummulative_count=up_count)
value = t[i, a]
for k in up_occ_inds:
value += eri[i, a, k, k]
for k in down_occ_inds:
value += eri[i, a, k, k]
for k in up_unocc_inds:
value += 0.5 * eri[i, k, k, a]
for k in up_occ_inds:
value -= 0.5 * eri[k, a, i, k]
mels[c] = multiplicator * value
c += 1
for i in down_occ_inds:
for a in down_unocc_inds:
x_prime[c, :] = x[batch_id, :]
multiplicator = apply_hopping(i, a, x_prime[c], 2,
cummulative_count=down_count)
value = t[i, a]
for k in down_occ_inds:
value += eri[i, a, k, k]
for k in up_occ_inds:
value += eri[i, a, k, k]
for k in down_unocc_inds:
value += 0.5 * eri[i, k, k, a]
for k in down_occ_inds:
value -= 0.5 * eri[k, a, i, k]
mels[c] = multiplicator * value
c += 1
# Two body parts
for i in up_occ_inds:
for a in up_unocc_inds:
for j in up_occ_inds:
for b in up_unocc_inds:
if i != j and a != b:
x_prime[c, :] = x[batch_id, :]
multiplicator = apply_hopping(i, a, x_prime[c], 1,
cummulative_count=up_count)
multiplicator *= apply_hopping(j, b, x_prime[c], 1,
cummulative_count=up_count)
# take first hop into account
left_limit = min(j, b)
right_limit = max(j, b) - 1
if i <= right_limit and i > left_limit:
multiplicator *= -1
if a <= right_limit and a > left_limit:
multiplicator *= -1
mels[c] = 0.5 * multiplicator * eri[i,a,j,b]
c += 1
for i in down_occ_inds:
for a in down_unocc_inds:
for j in down_occ_inds:
for b in down_unocc_inds:
if i != j and a != b:
x_prime[c, :] = x[batch_id, :]
multiplicator = apply_hopping(i, a, x_prime[c], 2,
cummulative_count=down_count)
multiplicator *= apply_hopping(j, b, x_prime[c], 2,
cummulative_count=down_count)
# Take first hop into account
left_limit = min(j, b)
right_limit = max(j, b) - 1
if i <= right_limit and i > left_limit:
multiplicator *= -1
if a <= right_limit and a > left_limit:
multiplicator *= -1
mels[c] = 0.5 * multiplicator * eri[i,a,j,b]
c += 1
for i in up_occ_inds:
for a in up_unocc_inds:
for j in down_occ_inds:
for b in down_unocc_inds:
x_prime[c, :] = x[batch_id, :]
multiplicator = apply_hopping(i, a, x_prime[c], 1,
cummulative_count=up_count)
multiplicator *= apply_hopping(j, b, x_prime[c], 2,
cummulative_count=down_count)
mels[c] = multiplicator * eri[i,a,j,b]
c += 1
sections[batch_id] = c
return x_prime, mels
""" Wrapper class which can be used to apply the on-the-fly updating"""
class AbInitioHamiltonianOnTheFly(AbInitioHamiltonian):
pass
""" Helper function which returns the parity for an electron hop by counting
how many electrons the hopping electron moved past. Careful!, this
is only valid if it is a valid electron move, this function does NOT do any
check if the move is valid (in contrast to the apply_hopping function of the
general fermion operator file)"""
def get_parity_multiplicator_hop(update_sites, cummulative_el_count):
limits = jnp.sort(update_sites)
parity_count = (cummulative_el_count[limits[1] - 1] - cummulative_el_count[limits[0]])
# Type promotion is important, gives incorrect results if not cast to unsigned int
return (jnp.int32(1) - 2 * (parity_count & 1))
"""
If the flag return_local_RDMs is set to true, this function also returns objects resembling the 1-RDMS and 2-RDMS for the samples.
In particular, two t_RDM and eri_RDM are evaluated for each sample which describe the linear dependency
of the local energy on the t_mat and eri_mat, in the sense that local_en = np.sum(t_mat * t_RDM) + np.sum(eri_mat * eri_RDM).
Storing these can be useful to interpolate between different calculations with analytic continuation type approaches.
"""
def local_en_on_the_fly(n_elecs, logpsi, pars, samples, args, use_fast_update=False, chunk_size=None, return_local_RDMs=False):
t = args[0]
eri = args[1]
n_sites = samples.shape[-1]
def vmap_fun(sample):
sample = jnp.asarray(sample, jnp.uint8)
is_occ_up = (sample & 1)
is_occ_down = (sample & 2) >> 1
up_count = jnp.cumsum(is_occ_up, dtype=int)
down_count = jnp.cumsum(is_occ_down, dtype=int)
is_empty_up = 1 >> is_occ_up
is_empty_down = 1 >> is_occ_down
up_occ_inds, = jnp.nonzero(is_occ_up, size=n_elecs[0])
down_occ_inds, = jnp.nonzero(is_occ_down, size=n_elecs[1])
up_unocc_inds, = jnp.nonzero(is_empty_up, size=n_sites-n_elecs[0])
down_unocc_inds, = jnp.nonzero(is_empty_down, size=n_sites-n_elecs[1])
""" The implementation mostly follows the construction of the connected configurations
as applied in the get_conn_flattened method which is maybe more readable. This is based
on the approach as presented in [Neuscamman (2013), https://doi.org/10.1063/1.4829835]."""
# All the diagonal contributions
local_en = jnp.sum(t[up_occ_inds, up_occ_inds])
local_en += jnp.sum(t[down_occ_inds, down_occ_inds])
local_en += jnp.sum(eri[up_occ_inds, up_occ_inds, :, :][:, down_occ_inds, down_occ_inds])
local_en += 0.5 * jnp.sum(eri[up_occ_inds, up_occ_inds, :, :][:, up_occ_inds, up_occ_inds])
local_en += 0.5 * jnp.sum(eri[up_occ_inds, :, :, up_occ_inds][:, up_unocc_inds, up_unocc_inds])
local_en += 0.5 * jnp.sum(eri[down_occ_inds, down_occ_inds, :, :][:, down_occ_inds, down_occ_inds])
local_en += 0.5 * jnp.sum(eri[down_occ_inds, :, :, down_occ_inds][:, down_unocc_inds, down_unocc_inds])
if return_local_RDMs:
t_RDM = jnp.zeros(t.shape, dtype = complex)
eri_RDM = jnp.zeros(eri.shape, dtype = complex)
t_RDM = t_RDM.at[up_occ_inds, up_occ_inds].add(1)
t_RDM = t_RDM.at[down_occ_inds, down_occ_inds].add(1)
def update_eri(count, val):
def inner_update(innercount, innerval):
return innerval.at[up_occ_inds[count], up_occ_inds[count], down_occ_inds[innercount], down_occ_inds[innercount]].add(1)
return jax.lax.fori_loop(0, len(down_occ_inds), inner_update, val)
eri_RDM = jax.lax.fori_loop(0, len(up_occ_inds), update_eri, eri_RDM)
def update_eri(count, val):
def inner_update(innercount, innerval):
return innerval.at[up_occ_inds[count], up_occ_inds[count], up_occ_inds[innercount], up_occ_inds[innercount]].add(0.5)
return jax.lax.fori_loop(0, len(up_occ_inds), inner_update, val)
eri_RDM = jax.lax.fori_loop(0, len(up_occ_inds), update_eri, eri_RDM)
def update_eri(count, val):
def inner_update(innercount, innerval):
return innerval.at[up_occ_inds[count], up_unocc_inds[innercount], up_unocc_inds[innercount], up_occ_inds[count]].add(0.5)
return jax.lax.fori_loop(0, len(up_unocc_inds), inner_update, val)
eri_RDM = jax.lax.fori_loop(0, len(up_occ_inds), update_eri, eri_RDM)
def update_eri(count, val):
def inner_update(innercount, innerval):
return innerval.at[down_occ_inds[count], down_occ_inds[count], down_occ_inds[innercount], down_occ_inds[innercount]].add(0.5)
return jax.lax.fori_loop(0, len(down_occ_inds), inner_update, val)
eri_RDM = jax.lax.fori_loop(0, len(down_occ_inds), update_eri, eri_RDM)
def update_eri(count, val):
def inner_update(innercount, innerval):
return innerval.at[down_occ_inds[count], down_unocc_inds[innercount], down_unocc_inds[innercount], down_occ_inds[count]].add(0.5)
return jax.lax.fori_loop(0, len(down_unocc_inds), inner_update, val)
eri_RDM = jax.lax.fori_loop(0, len(down_occ_inds), update_eri, eri_RDM)
# The following part evaluates the contributions from the connected configurations.
# Compute log_amp of sample
if use_fast_update:
log_amp, intermediates_cache = logpsi(pars, jnp.expand_dims(sample, 0), mutable="intermediates_cache", cache_intermediates=True)
parameters = {**pars, **intermediates_cache}
else:
log_amp = logpsi(pars, jnp.expand_dims(sample, 0))
""" This function returns the log_amp of the connected configuration which is only specified
by the occupancy on the updated sites as well as the indices of the sites updated."""
def get_connected_log_amp(updated_occ_partial, update_sites):
if use_fast_update:
log_amp_connected = logpsi(parameters, jnp.expand_dims(updated_occ_partial, 0), update_sites=jnp.expand_dims(update_sites, 0))
else:
"""
Careful: Go through update_sites in reverse order to ensure the actual updates (which come first in the array)
are applied and not the dummy updates.
Due to the non-determinism of updates with .at, we cannot use this and need to scan explicitly.
"""
def scan_fun(carry, count):
return (carry.at[update_sites[count]].set(updated_occ_partial[count]), None)
updated_config = jax.lax.scan(scan_fun, sample, jnp.arange(len(update_sites)), reverse=True)[0]
log_amp_connected = logpsi(pars, jnp.expand_dims(updated_config, 0))
return log_amp_connected
def get_one_body_term_up(i, a):
# Updated config at update sites
new_occ = jnp.array([sample[i]-1, sample[a]+1], dtype=jnp.uint8)
update_sites = jnp.array([i, a])
# Get parity
parity_multiplicator = get_parity_multiplicator_hop(update_sites, up_count)
# Evaluate amplitude ratio
log_amp_connected = get_connected_log_amp(new_occ, update_sites)
amp_ratio = jnp.squeeze(jnp.exp(log_amp_connected - log_amp))
value = t[i, a]
value += jnp.sum(eri[i, a, up_occ_inds, up_occ_inds])
value += jnp.sum(eri[i, a, down_occ_inds, down_occ_inds])
value += 0.5 * jnp.sum(eri[i, up_unocc_inds, up_unocc_inds, a])
value -= 0.5 * jnp.sum(eri[up_occ_inds, a, i, up_occ_inds])
if return_local_RDMs:
t_contribution = amp_ratio * parity_multiplicator
eri_contribution_1 = jnp.zeros((eri.shape[2], eri.shape[3]), dtype=complex)
eri_contribution_1 = eri_contribution_1.at[up_occ_inds, up_occ_inds].add(t_contribution)
eri_contribution_1 = eri_contribution_1.at[down_occ_inds, down_occ_inds].add(t_contribution)
eri_contribution_2 = jnp.zeros((eri.shape[1], eri.shape[2]), dtype=complex)
eri_contribution_2 = eri_contribution_2.at[up_unocc_inds, up_unocc_inds].add(0.5 * t_contribution)
eri_contribution_3 = jnp.zeros((eri.shape[0], eri.shape[3]), dtype=complex)
eri_contribution_3 = eri_contribution_3.at[up_occ_inds, up_occ_inds].add(-0.5 * t_contribution)
return value * t_contribution, t_contribution, eri_contribution_1, eri_contribution_2, eri_contribution_3
else:
return value * amp_ratio * parity_multiplicator
if return_local_RDMs:
val = jax.vmap(jax.vmap(get_one_body_term_up, in_axes=(None, 0)), in_axes=(0, None))(up_occ_inds, up_unocc_inds)
local_en += jnp.sum(val[0])
t_RDM = t_RDM.at[jnp.ix_(up_occ_inds, up_unocc_inds)].add(val[1])
eri_RDM = eri_RDM.at[jnp.ix_(up_occ_inds, up_unocc_inds, jnp.arange(eri_RDM.shape[2]), jnp.arange(eri_RDM.shape[3]))].add(val[2])
ix = jnp.ix_(up_occ_inds, jnp.arange(eri_RDM.shape[1]), jnp.arange(eri_RDM.shape[2]), up_unocc_inds)
eri_RDM = eri_RDM.at[ix].add(jnp.transpose(val[3], axes=(0, 2, 3, 1)))
ix = jnp.ix_(jnp.arange(eri_RDM.shape[1]), up_unocc_inds, up_occ_inds, jnp.arange(eri_RDM.shape[2]))
eri_RDM = eri_RDM.at[ix].add(jnp.transpose(val[4], axes=(2, 1, 0, 3)))
else:
local_en += jnp.sum(jax.vmap(jax.vmap(get_one_body_term_up, in_axes=(None, 0)), in_axes=(0, None))(up_occ_inds, up_unocc_inds))
def get_one_body_term_down(i, a):
# Updated config at update sites
new_occ = jnp.array([sample[i]-2, sample[a]+2], dtype=jnp.uint8)
update_sites = jnp.array([i, a])
# Get parity
parity_multiplicator = get_parity_multiplicator_hop(update_sites, down_count)
# Evaluate amplitude ratio
log_amp_connected = get_connected_log_amp(new_occ, update_sites)
amp_ratio = jnp.squeeze(jnp.exp(log_amp_connected - log_amp))
value = t[i, a]
value += jnp.sum(eri[i, a, down_occ_inds, down_occ_inds])
value += jnp.sum(eri[i, a, up_occ_inds, up_occ_inds])
value += 0.5 * jnp.sum(eri[i, down_unocc_inds, down_unocc_inds, a])
value -= 0.5 * jnp.sum(eri[down_occ_inds, a, i, down_occ_inds])
if return_local_RDMs:
t_contribution = amp_ratio * parity_multiplicator
eri_contribution_1 = jnp.zeros((eri.shape[2], eri.shape[3]), dtype=complex)
eri_contribution_1 = eri_contribution_1.at[down_occ_inds, down_occ_inds].add(t_contribution)
eri_contribution_1 = eri_contribution_1.at[up_occ_inds, up_occ_inds].add(t_contribution)
eri_contribution_2 = jnp.zeros((eri.shape[1], eri.shape[2]), dtype=complex)
eri_contribution_2 = eri_contribution_2.at[down_unocc_inds, down_unocc_inds].add(0.5 * t_contribution)
eri_contribution_3 = jnp.zeros((eri.shape[0], eri.shape[3]), dtype=complex)
eri_contribution_3 = eri_contribution_3.at[down_occ_inds, down_occ_inds].add(-0.5 * t_contribution)
return value * t_contribution, t_contribution, eri_contribution_1, eri_contribution_2, eri_contribution_3
else:
return value * amp_ratio * parity_multiplicator
if return_local_RDMs:
val = jax.vmap(jax.vmap(get_one_body_term_down, in_axes=(None, 0)), in_axes=(0, None))(down_occ_inds, down_unocc_inds)
local_en += jnp.sum(val[0])
t_RDM = t_RDM.at[jnp.ix_(down_occ_inds, down_unocc_inds)].add(val[1])
eri_RDM = eri_RDM.at[jnp.ix_(down_occ_inds, down_unocc_inds, jnp.arange(eri_RDM.shape[2]), jnp.arange(eri_RDM.shape[3]))].add(val[2])
ix = jnp.ix_(down_occ_inds, jnp.arange(eri_RDM.shape[1]), jnp.arange(eri_RDM.shape[2]), down_unocc_inds)
eri_RDM = eri_RDM.at[ix].add(jnp.transpose(val[3], axes=(0, 2, 3, 1)))
ix = jnp.ix_(jnp.arange(eri_RDM.shape[1]), down_unocc_inds, down_occ_inds, jnp.arange(eri_RDM.shape[2]))
eri_RDM = eri_RDM.at[ix].add(jnp.transpose(val[4], axes=(2, 1, 0, 3)))
else:
local_en += jnp.sum(jax.vmap(jax.vmap(get_one_body_term_down, in_axes=(None, 0)), in_axes=(0, None))(down_occ_inds, down_unocc_inds))
def two_body_up_up_occ(index_outer, val_outer):
i = up_occ_inds[index_outer]
def two_body_up_up_unocc(index_inner, val_inner):
a = up_unocc_inds[index_inner]
occ_inds_outer_removed = up_occ_inds[jnp.nonzero(up_occ_inds != i, size=len(up_occ_inds)-1)]
unocc_inds_outer_removed = up_unocc_inds[jnp.nonzero(up_unocc_inds != a, size=len(up_unocc_inds)-1)]
new_occ_outer = jnp.array([sample[i]-1, sample[a]+1], dtype=jnp.uint8)
update_sites_outer = jnp.array([i, a])
# Get parity multiplicator for first hop
parity_multiplicator_outer = get_parity_multiplicator_hop(update_sites_outer, up_count)
def inner_loop(j, b):
new_occ_inner = jnp.array([sample[j]-1, sample[b]+1], dtype=jnp.uint8)
update_sites_inner = jnp.array([j, b])
# Get parity multiplicator for second hop (this does not take first hop into account)
parity_multiplicator_inner = get_parity_multiplicator_hop(update_sites_inner, up_count)
parity_multiplicator = parity_multiplicator_outer * parity_multiplicator_inner
# Evaluate the modification required to include the first hop
limits_inner = jnp.sort(update_sites_inner)
left_lim = limits_inner[0]
right_lim = (limits_inner[1]-1)
parity_multiplicator = jnp.where((i <= right_lim) & (i > left_lim), -parity_multiplicator, parity_multiplicator)
parity_multiplicator = jnp.where((a <= right_lim) & (a > left_lim), -parity_multiplicator, parity_multiplicator)
# Combined update to the config
new_occ = jnp.concatenate((new_occ_outer, new_occ_inner))
update_sites = jnp.concatenate((update_sites_outer, update_sites_inner))
# Get amplitude ratio
log_amp_connected = get_connected_log_amp(new_occ, update_sites)
amp_ratio = jnp.squeeze(jnp.exp(log_amp_connected - log_amp))
return (parity_multiplicator * amp_ratio)
inner_loops = jax.vmap(jax.vmap(inner_loop, in_axes=(None, 0)), in_axes=(0, None))(occ_inds_outer_removed, unocc_inds_outer_removed)
if return_local_RDMs:
en = val_inner[0] + 0.5 * jnp.sum(eri[jnp.ix_(jnp.array([i]),jnp.array([a]),occ_inds_outer_removed,unocc_inds_outer_removed)] * inner_loops)
eri_RDM_contrib = val_inner[1].at[jnp.ix_(jnp.array([i]),jnp.array([a]),occ_inds_outer_removed,unocc_inds_outer_removed)].add(0.5 * inner_loops)
return en, eri_RDM_contrib
else:
return val_inner + 0.5 * jnp.sum(eri[jnp.ix_(jnp.array([i]),jnp.array([a]),occ_inds_outer_removed,unocc_inds_outer_removed)] * inner_loops)
return jax.lax.fori_loop(0, len(up_unocc_inds), two_body_up_up_unocc, val_outer)
if return_local_RDMs:
local_en, eri_RDM = jax.lax.fori_loop(0, len(up_occ_inds), two_body_up_up_occ, (local_en, eri_RDM))
else:
local_en = jax.lax.fori_loop(0, len(up_occ_inds), two_body_up_up_occ, local_en)
def two_body_down_down_occ(index_outer, val_outer):
i = down_occ_inds[index_outer]
def two_body_down_down_unocc(index_inner, val_inner):
a = down_unocc_inds[index_inner]
occ_inds_outer_removed = down_occ_inds[jnp.nonzero(down_occ_inds != i, size=len(down_occ_inds)-1)]
unocc_inds_outer_removed = down_unocc_inds[jnp.nonzero(down_unocc_inds != a, size=len(down_unocc_inds)-1)]
new_occ_outer = jnp.array([sample[i]-2, sample[a]+2], dtype=jnp.uint8)
update_sites_outer = jnp.array([i, a])
# Get parity multiplicator for first hop
parity_multiplicator_outer = get_parity_multiplicator_hop(update_sites_outer, down_count)
def inner_loop(j, b):
new_occ_inner = jnp.array([sample[j]-2, sample[b]+2], dtype=jnp.uint8)
update_sites_inner = jnp.array([j, b])
# Get parity multiplicator for second hop (this does not take first hop into account)
parity_multiplicator_inner = get_parity_multiplicator_hop(update_sites_inner, down_count)
parity_multiplicator = parity_multiplicator_outer * parity_multiplicator_inner
# Evaluate the modification required to include the first hop
limits_inner = jnp.sort(update_sites_inner)
left_lim = limits_inner[0]
right_lim = (limits_inner[1]-1)
parity_multiplicator = jnp.where(jnp.logical_and((i <= right_lim), (i > left_lim)), -parity_multiplicator, parity_multiplicator)
parity_multiplicator = jnp.where(jnp.logical_and((a <= right_lim), (a > left_lim)), -parity_multiplicator, parity_multiplicator)
# Combined update to the config
new_occ = jnp.concatenate((new_occ_outer, new_occ_inner))
update_sites = jnp.concatenate((update_sites_outer, update_sites_inner))
# Get amplitude ratio
log_amp_connected = get_connected_log_amp(new_occ, update_sites)
amp_ratio = jnp.squeeze(jnp.exp(log_amp_connected - log_amp))
return (parity_multiplicator * amp_ratio)
inner_loops = jax.vmap(jax.vmap(inner_loop, in_axes=(None, 0)), in_axes=(0, None))(occ_inds_outer_removed, unocc_inds_outer_removed)
if return_local_RDMs:
en = val_inner[0] + 0.5 * jnp.sum(eri[jnp.ix_(jnp.array([i]),jnp.array([a]),occ_inds_outer_removed,unocc_inds_outer_removed)] * inner_loops)
eri_RDM_contrib = val_inner[1].at[jnp.ix_(jnp.array([i]),jnp.array([a]),occ_inds_outer_removed,unocc_inds_outer_removed)].add(0.5 * inner_loops)
return en, eri_RDM_contrib
else:
return val_inner + 0.5 * jnp.sum(eri[jnp.ix_(jnp.array([i]),jnp.array([a]),occ_inds_outer_removed,unocc_inds_outer_removed)] * inner_loops)
return jax.lax.fori_loop(0, len(down_unocc_inds), two_body_down_down_unocc, val_outer)
if return_local_RDMs:
local_en, eri_RDM = jax.lax.fori_loop(0, len(down_occ_inds), two_body_down_down_occ, (local_en, eri_RDM))
else:
local_en = jax.lax.fori_loop(0, len(down_occ_inds), two_body_down_down_occ, local_en)
# Two body contribution (up, down)
""" Helper functions to create the new_occ and update_sites arrays
based on whether the site is already in the update_sites array or not
(required since we cannot jit if statements). If the site is already in
the update sites, we update this occupancy accordingly but still add the
site index to the list of updated sites and add the original sample occupation
to the lists of new occupancies, so that effectively the amplitude is unaffected
by this additional update in the fast updating. If no fast updating is performed it
is therefore necessary to ensure that the new configuration takes the actual update
which is at the position of the first occurance the update site.
This construction keeps the shapes fixed so that everything stays jittable.
"""
def get_updated_occ_previous_move(first_update_occ, update_sites, site_index, spin_update):
full_update_sites = jnp.append(update_sites, site_index)
updated_occ = jnp.append(first_update_occ, sample[site_index])
first_matching_index = jnp.nonzero(full_update_sites == site_index, size=1)[0][0]
updated_occ = updated_occ.at[first_matching_index].add(spin_update)
return (updated_occ, full_update_sites)
def two_body_up_down_occ(index_outer, val_outer):
i = up_occ_inds[index_outer]
def two_body_up_down_unocc(index_inner, val_inner):
a = up_unocc_inds[index_inner]
new_occ_outer = jnp.array([sample[i]-1, sample[a]+1], dtype=jnp.uint8)
update_sites_outer = jnp.array([i, a])
# Get parity multiplicator for first hop
parity_multiplicator_up = get_parity_multiplicator_hop(update_sites_outer, up_count)
def inner_loop(j, b):
new_occ_inner = jnp.array([sample[j]-2, sample[b]+2], dtype=jnp.uint8)
update_sites_inner = jnp.array([j, b])
new_occ_updated, update_sites_updated = get_updated_occ_previous_move(new_occ_outer, update_sites_outer, j, -2)
new_occ_final, update_sites_final = get_updated_occ_previous_move(new_occ_updated, update_sites_updated, b, 2)
parity_multiplicator_down = get_parity_multiplicator_hop(jnp.array([j, b]), down_count)
parity_multiplicator = parity_multiplicator_up * parity_multiplicator_down
# Get amplitude ratio
log_amp_connected = get_connected_log_amp(new_occ_final, update_sites_final)
amp_ratio = jnp.squeeze(jnp.exp(log_amp_connected - log_amp))
return (parity_multiplicator * amp_ratio)
inner_loops = jax.vmap(jax.vmap(inner_loop, in_axes=(None, 0)), in_axes=(0, None))(down_occ_inds, down_unocc_inds)
if return_local_RDMs:
en = val_inner[0] + jnp.sum(eri[jnp.ix_(jnp.array([i]),jnp.array([a]),down_occ_inds,down_unocc_inds)] * inner_loops)
eri_RDM_contrib = val_inner[1].at[jnp.ix_(jnp.array([i]),jnp.array([a]),down_occ_inds,down_unocc_inds)].add(inner_loops)
return en, eri_RDM_contrib
else:
return val_inner + jnp.sum(eri[jnp.ix_(jnp.array([i]),jnp.array([a]),down_occ_inds,down_unocc_inds)] * inner_loops)
return jax.lax.fori_loop(0, len(up_unocc_inds), two_body_up_down_unocc, val_outer)
if return_local_RDMs:
local_en, eri_RDM = jax.lax.fori_loop(0, len(up_occ_inds), two_body_up_down_occ, (local_en, eri_RDM))
else:
local_en = jax.lax.fori_loop(0, len(up_occ_inds), two_body_up_down_occ, local_en)
if return_local_RDMs:
return local_en, t_RDM, eri_RDM
else:
return local_en
return nkjax.vmap_chunked(vmap_fun, chunk_size=chunk_size)(samples)
@nk.vqs.get_local_kernel_arguments.dispatch
def get_local_kernel_arguments(vstate: nk.vqs.MCState, op: AbInitioHamiltonianOnTheFly):
samples = vstate.samples
t = jnp.array(op.t_mat)
eri = jnp.array(op.eri_mat)
return (samples, (t, eri))
@nk.vqs.get_local_kernel.dispatch(precedence=1)
def get_local_kernel(vstate: nk.vqs.MCState, op: AbInitioHamiltonianOnTheFly, chunk_size: Optional[int] = None):
try:
use_fast_update = vstate.model.apply_fast_update
except:
use_fast_update = False
return nkjax.HashablePartial(local_en_on_the_fly, vstate.hilbert._n_elec, use_fast_update=use_fast_update, chunk_size=chunk_size)
| 32,804 | 55.658031 | 164 | py |
GPSKet | GPSKet-master/GPSKet/vqs/mc/mc_state/expect.py | from functools import partial
from typing import Callable, Optional
import jax
import jax.numpy as jnp
import netket as nk
from netket.utils.types import PyTree
from netket.stats import Stats
from netket import jax as nkjax
from netket.vqs.mc.mc_state.state import MCState
from netket.vqs.mc.mc_state.expect import get_local_kernel, get_local_kernel_arguments
"""
This simply overrides the NetKet default implementation in order to be able to pass
additional arguments to the model apply function (e.g. required for fast updates).
Ultimately this should probably at one point be merged into NetKet.
"""
@nk.vqs.expect.dispatch
def expect_chunked(vstate: MCState, op: nk.operator.AbstractOperator, chunk_size: int) -> Stats: # noqa: F811
samples, args = get_local_kernel_arguments(vstate, op)
local_estimator_fun = get_local_kernel(vstate, op, chunk_size)
return _expect(
chunk_size,
local_estimator_fun,
vstate._apply_fun,
vstate.sampler.machine_pow,
vstate.parameters,
vstate.model_state,
samples,
args,
)
@nk.vqs.expect.dispatch
def expect(vstate: MCState, op: nk.operator.AbstractOperator) -> Stats: # noqa: F811
samples, args = get_local_kernel_arguments(vstate, op)
local_estimator_fun = get_local_kernel(vstate, op)
return _expect(
None,
local_estimator_fun,
vstate._apply_fun,
vstate.sampler.machine_pow,
vstate.parameters,
vstate.model_state,
samples,
args,
)
@partial(jax.jit, static_argnums=(0, 1, 2))
def _expect(
chunk_size: Optional[int],
local_value_kernel: Callable,
model_apply_fun: Callable,
machine_pow: int,
parameters: PyTree,
model_state: PyTree,
samples: jnp.ndarray,
local_value_args: PyTree,
) -> Stats:
samples_shape = samples.shape
if jnp.ndim(samples) != 2:
samples = samples.reshape((-1, samples_shape[-1]))
def log_pdf(w, samples):
return machine_pow * model_apply_fun({"params": w, **model_state}, samples).real
if chunk_size is not None:
_, op_stats = nkjax.expect(
log_pdf,
partial(local_value_kernel, model_apply_fun, chunk_size=chunk_size),
{"params": parameters, **model_state},
samples,
local_value_args,
n_chains=samples_shape[0],
)
else:
_, op_stats = nkjax.expect(
log_pdf,
partial(local_value_kernel, model_apply_fun),
{"params": parameters, **model_state},
samples,
local_value_args,
n_chains=samples_shape[0],
)
return op_stats
| 2,690 | 29.235955 | 110 | py |
GPSKet | GPSKet-master/GPSKet/vqs/mc/mc_state/state_unique_samples.py | import netket as nk
import netket.jax as nkjax
import jax.numpy as jnp
from typing import Tuple, Optional, Callable, Any
from collections import defaultdict
import numpy as np
from netket.utils.types import PyTree
from netket.utils.dispatch import TrueT
from netket.utils.mpi import (
node_number as _rank,
mpi_sum as _mpi_sum,
n_nodes as _n_nodes,
mpi_sum_jax as _mpi_sum_jax,
mpi_max_jax as _mpi_max_jax,
mpi_allgather_jax as _mpi_allgather_jax
)
from netket.stats import Stats
from netket.stats.mpi_stats import (
sum as _sum
)
from functools import partial
from netket.vqs import get_local_kernel_arguments, get_local_kernel
import jax
""" Very hacky implementation of an MC state which samples until it has accumulated n_samples
unique samples. Expectation values then also include the number of times each of the unique samples
was sampled.
TODO: This needs to be cleaned up, we should think of the best approach to do this, currently only the means
of expectation values can be trusted and sampler properties and variances are probably wrong because the
approach is just hacked into the code with the least possible overhead.
"""
class MCStateUniqueSamples(nk.vqs.MCState):
def __init__(self, *args, max_sampling_steps=None, batch_size=None, **kwargs):
super().__init__(*args, **kwargs)
self.max_sampling_steps = max_sampling_steps
if batch_size is None:
self.batch_size = self.n_samples
else:
self.batch_size = batch_size
def reset(self):
self._samples = None
self._unique_samples = None
self._relative_counts = None
@property
def samples(self) -> jnp.ndarray:
return self.samples_with_counts[0]
@property
def samples_with_counts(self) -> Tuple[jnp.ndarray, jnp.ndarray]:
if self._unique_samples is None:
unique_samps = defaultdict(lambda: 0)
count = 0
continue_sampling = True
if self.max_sampling_steps is not None:
if self.max_sampling_steps <= count:
continue_sampling = False
# Generate a batch of samples
samps = self.sample(n_samples = self.batch_size)
while(continue_sampling):
# Merge the samples from all mpi processes
all_samples = _mpi_allgather_jax(samps)[0].reshape((-1, samps.shape[-1]))
# Add to the previously sampled configurations, there is probably a much more efficient way of doing this but it's good enough for now
for samp in np.array(all_samples):
unique_samps[tuple(samp)] += 1
if len(unique_samps) >= self.n_samples:
break
count += 1
if self.max_sampling_steps is not None:
if self.max_sampling_steps <= count:
continue_sampling = False
if len(unique_samps) >= self.n_samples:
continue_sampling = False
if continue_sampling:
samps = self.sample(n_samples = self.batch_size, n_discard_per_chain=0)
unique_samples = np.tile(np.array(all_samples[0]), (self.n_samples, 1))
relative_counts = np.zeros(self.n_samples, dtype=float)
max_id = min(len(unique_samps), self.n_samples)
np.copyto(unique_samples[:max_id, :], np.array(list(unique_samps.keys()), dtype=unique_samples.dtype)[:max_id, :])
np.copyto(relative_counts[:max_id], np.array(list(unique_samps.values()), dtype=relative_counts.dtype)[:max_id])
# Split samples and counts across mpi processes
lower = _rank * self.n_samples_per_rank
upper = lower + self.n_samples_per_rank
self._unique_samples = jnp.array(unique_samples[lower:upper, :])
self._relative_counts = jnp.array(relative_counts[lower:upper])
self._relative_counts /= _sum(self._relative_counts)
return (self._unique_samples, self._relative_counts)
""" The following functions just override the NetKet implementation to inject the sample counts into the expectation value evaluation."""
@nk.vqs.expect_and_grad.dispatch(precedence=10)
def expect_and_grad(vstate: MCStateUniqueSamples, op: nk.operator.AbstractOperator, use_covariance: TrueT, chunk_size: Optional[int], *, mutable:Any):
_, args = get_local_kernel_arguments(vstate, op)
samples_and_counts = vstate.samples_with_counts
if chunk_size is not None:
local_estimator = get_local_kernel(vstate, op, chunk_size)
else:
local_estimator = get_local_kernel(vstate, op)
assert(mutable is False)
exp, grad = grad_expect_hermitian_chunked(chunk_size, local_estimator, vstate._apply_fun, vstate.parameters, vstate.model_state, samples_and_counts, args, compute_grad=True)
return exp, grad
@nk.vqs.expect.dispatch(precedence=10)
def expect(vstate: MCStateUniqueSamples, op: nk.operator.AbstractOperator, chunk_size: Optional[int]):
_, args = get_local_kernel_arguments(vstate, op)
samples_and_counts = vstate.samples_with_counts
if chunk_size is not None:
local_estimator = get_local_kernel(vstate, op, chunk_size)
else:
local_estimator = get_local_kernel(vstate, op)
exp = grad_expect_hermitian_chunked(chunk_size, local_estimator, vstate._apply_fun, vstate.parameters, vstate.model_state, samples_and_counts, args, compute_grad=False)
return exp
@partial(jax.jit, static_argnums=(0,1,2,7))
def grad_expect_hermitian_chunked(chunk_size: Optional[int], estimator_fun: Callable, model_apply_fun: Callable, parameters: PyTree, model_state: PyTree, samples_and_counts: Tuple[jnp.ndarray, jnp.ndarray], estimator_args: PyTree, compute_grad=False):
samples = samples_and_counts[0]
counts = samples_and_counts[1]
if chunk_size is not None:
loc_vals = estimator_fun(model_apply_fun, {"params": parameters, **model_state}, samples, estimator_args, chunk_size=chunk_size)
else:
loc_vals = estimator_fun(model_apply_fun, {"params": parameters, **model_state}, samples, estimator_args)
mean = _sum(counts * loc_vals)
variance = _sum(counts * (jnp.abs(loc_vals - mean)**2))
loc_val_stats = Stats(mean=mean, variance=variance)
if compute_grad:
loc_vals_centered = counts * (loc_vals - loc_val_stats.mean)
if chunk_size is not None:
vjp_fun = nkjax.vjp_chunked(lambda w, samps: model_apply_fun({"params": w, **model_state}, samps), parameters, samples, conjugate=True, chunk_size=chunk_size, chunk_argnums=1, nondiff_argnums=1)
else:
vjp_fun = nkjax.vjp(lambda w, samps: model_apply_fun({"params": w, **model_state}, samps), parameters, samples, conjugate=True)[1]
val_grad = vjp_fun((jnp.conjugate(loc_vals_centered)))[0]
val_grad = jax.tree_map(lambda x, target: (x if jnp.iscomplexobj(target) else 2 * x.real).astype(target.dtype), val_grad, parameters)
return loc_val_stats, jax.tree_map(lambda x: _mpi_sum_jax(x)[0], val_grad)
else:
return loc_val_stats
| 7,185 | 41.023392 | 251 | py |
GPSKet | GPSKet-master/GPSKet/vqs/mc/mc_state/state_stratified_sampling.py | import numpy as np
from GPSKet.vqs.mc.mc_state.state_unique_samples import MCStateUniqueSamples
import jax
import jax.numpy as jnp
from netket.stats.mpi_stats import (
sum as _sum
)
import netket.jax as nkjax
from netket.utils.mpi import (
node_number as _rank,
mpi_max_jax as _mpi_max_jax,
n_nodes as _n_nodes
)
from typing import Tuple
from dataclasses import replace
""" Implements a state with stratified sampling, splitting the evaluation of expectation values
into a determinisitic evaluation over a fixed set, and a sampled estimate over the complement.
At the moment this is only an implementation for quick testing which is very slow.
The number of samples passed is the number of samples taken in addition to the deterministic set."""
class MCStateStratifiedSampling(MCStateUniqueSamples):
def __init__(self, deterministic_samples, N_total, *args, rand_norm=True, number_random_samples=None, renormalize=True, **kwargs):
super().__init__(*args, **kwargs)
self.n_sweeps = self.sampler.n_sweeps
self.sampler = replace(self.sampler, n_sweeps=1)
self.deterministic_samples = jnp.array_split(deterministic_samples, _n_nodes)[_rank]
self.N_complement = N_total - deterministic_samples.shape[0] # Total size of the complement
self.rand_norm = rand_norm
self.lookup_dict = {tuple(conf): i for i, conf in enumerate(np.array(deterministic_samples))}
# Find a valid initial sample (one from the complement), very inelegant
key = jax.random.split(self.sampler_state.rng)[0]
self.current_sample = []
for j in range(self.sampler.n_chains_per_rank):
samp = self.sampler.hilbert.random_state(key, dtype=self.deterministic_samples.dtype).reshape(-1)
while(tuple(np.array(samp)) in self.lookup_dict):
key = jax.random.split(key)[0]
samp = self.sampler.hilbert.random_state(key, dtype=self.deterministic_samples.dtype).reshape(-1)
self.current_sample.append(samp)
key = jax.random.split(key)[0]
self.current_sample = jnp.array(np.array(self.current_sample))
self.sampler_state = replace(self.sampler_state, σ = self.current_sample)
if not self.rand_norm:
assert(number_random_samples is None)
if number_random_samples is None:
self.number_random_samples = self.n_samples_per_rank
else:
self.number_random_samples = len(np.array_split(np.arange(number_random_samples), _n_nodes)[_rank])
self.renormalize = renormalize
def sample_step(self):
old_sample = self.current_sample
self.current_sample = self.sample(chain_length=1, n_discard_per_chain=0).reshape(self.current_sample.shape)
# Reject the samples in the deterministic set, this is quite hacky (and slow!)
valid = np.ones(self.current_sample.shape[0], dtype=bool)
for i, samp in enumerate(self.current_sample):
if tuple(np.array(samp)) in self.lookup_dict:
valid[i] = False
self.current_sample = jnp.where(np.expand_dims(valid,-1), self.current_sample, old_sample)
self.sampler_state = replace(self.sampler_state, σ = self.current_sample)
@property
def samples_with_counts(self) -> Tuple[jnp.ndarray, jnp.ndarray]:
if self._unique_samples is None:
# Sampling Warm-up
for i in range(self.n_discard_per_chain):
for j in range(self.n_sweeps):
self.sample_step()
# Sample from the complement
sampled_configs = []
for i in range(self.chain_length):
for j in range(self.n_sweeps):
self.sample_step()
sampled_configs.append(self.current_sample)
samples_from_complement = jnp.array(np.array(sampled_configs)).reshape((-1, self.current_sample.shape[-1]))
all_samples = jnp.concatenate((self.deterministic_samples, samples_from_complement))
def log_prob(samp):
return jnp.squeeze(2 * self.log_value(samp.reshape((1,-1))).real)
log_prob_amps_deterministic = nkjax.vmap_chunked(log_prob, chunk_size=self.chunk_size)(self.deterministic_samples)
log_prob_amps_complement = nkjax.vmap_chunked(log_prob, chunk_size=self.chunk_size)(samples_from_complement)
total_samples = _sum(len(log_prob_amps_complement))
if self.renormalize: # Switch off for normalized models (e.g. autoregressive models)
# Renormalise the probability amplitudes for numerical stability
rescale_shift = _mpi_max_jax(jnp.max(jnp.concatenate((log_prob_amps_deterministic, log_prob_amps_complement))))[0]
log_prob_amps_deterministic -= rescale_shift
log_prob_amps_complement -= rescale_shift
# Contribution of the determinisitc set to the norm
norm_deterministic = _sum(jnp.exp(log_prob_amps_deterministic))
if self.rand_norm:
# Approximation to the norm correction from a uniformly sampled set
key = jax.random.split(self.sampler_state.rng)[0]
random_samples = np.empty((self.number_random_samples, self.deterministic_samples.shape[-1]), dtype=self.deterministic_samples.dtype)
found_samples = 0
while found_samples < self.number_random_samples:
key = jax.random.split(key)[0]
proposed_samples = np.array(self.sampler.hilbert.random_state(key, size=self.number_random_samples, dtype=self.deterministic_samples.dtype))
for samp in proposed_samples:
if tuple(samp) not in self.lookup_dict:
np.copyto(random_samples[found_samples], samp)
found_samples += 1
if found_samples == self.number_random_samples:
break
random_samps = jnp.array(random_samples)
log_probs_sampled = nkjax.vmap_chunked(log_prob, chunk_size=self.chunk_size)(random_samps)
norm_sampled = self.N_complement * _sum(jnp.exp(log_probs_sampled - rescale_shift))/_sum(random_samps.shape[0])
else:
# Approximation to the norm correction from the sampled set (evaluated with self-normalizing importance sampling)
norm_sampled = self.N_complement * total_samples /_sum(jnp.exp(-log_prob_amps_complement))
norm_estimate = norm_deterministic + norm_sampled
else:
norm_deterministic = _sum(jnp.exp(log_prob_amps_deterministic))
norm_estimate = 1.
prefactors_det = jnp.exp(log_prob_amps_deterministic)/norm_estimate
prefactors_sampled = jnp.ones(log_prob_amps_complement.shape) * (1 - norm_deterministic/norm_estimate) / total_samples
self._unique_samples = all_samples
self._relative_counts = jnp.concatenate((prefactors_det, prefactors_sampled))
return (self._unique_samples, self._relative_counts)
| 7,298 | 45.788462 | 164 | py |
GPSKet | GPSKet-master/GPSKet/driver/autoreg_state_fitting.py | import jax
import jax.numpy as jnp
import numpy as np
from functools import partial
from typing import Tuple, Optional
from netket.utils.types import Array, SeedT
from netket.operator import AbstractOperator
from netket.utils import mpi
from netket.driver.vmc_common import info
from .abstract_state_fitting import AbstractStateFittingDriver
class ARStateFitting(AbstractStateFittingDriver):
"""
Fit an autoregressive Ansatz to data from another state by minimizing the distance between two normalized quantum states
"""
def __init__(
self,
dataset: Tuple[Array, Array],
hamiltonian: AbstractOperator,
optimizer,
*args,
variational_state=None,
mini_batch_size: int = 32,
seed: Optional[SeedT]=None,
**kwargs
):
super().__init__(dataset, hamiltonian, optimizer, *args, variational_state=variational_state, mini_batch_size=mini_batch_size, seed=seed, **kwargs)
if not hasattr(self._variational_state.model, 'conditionals'):
raise ValueError(
f"{self._variational_state.model} is not autoregressive."
)
self._probas = np.abs(self._dataset[1])**2
self._ids = np.arange(self._size_dataset)
def _forward_and_backward(self):
# Sample mini-batch
self._key, _ = jax.random.split(self._key)
mini_batch_ids = jax.random.choice(self._key, self._ids, (self._mini_batch_size,), p=self._probas, replace=False)
mini_batch = (self._dataset[0][mini_batch_ids], self._dataset[1][mini_batch_ids])
# Compute loss and gradient
self.loss, self._loss_grad = _loss_and_grad(self.state.parameters, self.state.model_state, self.state._apply_fun, mini_batch)
return self._loss_grad
def __repr__(self):
return (
"ARStateFitting("
+ f"\n step_count = {self.step_count},"
+ f"\n state = {self.state})"
)
def info(self, depth=0):
lines = [
"{}: {}".format(name, info(obj, depth=depth + 1))
for name, obj in [
("Hamiltonian ", self._ham),
("Optimizer ", self._optimizer),
("State ", self.state),
]
]
return "\n{}".format(" " * 3 * (depth + 1)).join([str(self)] + lines)
@partial(jax.jit, static_argnums=2)
def _loss(params, model_state, logpsi, mini_batch):
# TODO: this might need some chunking/vmapping
x, y = mini_batch
model_amplitudes = logpsi({'params': params, **model_state}, x)
loss = jnp.mean(jnp.abs(jnp.exp(model_amplitudes)-y)**2)
return loss
@partial(jax.jit, static_argnums=2)
def _loss_and_grad(params, model_state, logpsi, mini_batch):
loss, grad = jax.value_and_grad(_loss, argnums=0)(params, model_state, logpsi, mini_batch)
loss, _ = mpi.mpi_mean_jax(loss)
grad = jax.tree_util.tree_map(lambda p: mpi.mpi_sum_jax(p)[0], grad)
grad = jax.tree_map(jnp.conj, grad)
return loss, grad | 3,039 | 35.190476 | 155 | py |
GPSKet | GPSKet-master/GPSKet/driver/minSR.py | import numpy as np
import jax
import jax.numpy as jnp
from functools import partial
import netket as nk
from netket import VMC
from netket.stats import Stats
from netket.utils import mpi
from netket.stats._autocorr import integrated_time
from netket.stats.mc_stats import _split_R_hat
from GPSKet.vqs import MCStateUniqueSamples
class minSRVMC(VMC):
"""
VMC driver utilizing the minSR updates as proposed in https://arxiv.org/abs/2302.01941
"""
def __init__(self, *args, mode: str = None, holomorphic: bool = None,
solver=lambda A, b: jnp.linalg.lstsq(A, b, rcond=1.e-12)[0], diag_shift: float = 0., **kwargs):
super().__init__(*args, **kwargs)
assert(not (mode is not None and holomorphic is not None))
assert (diag_shift >= 0.) and (diag_shift <= 1.)
if mode is None:
self.mode = nk.jax.jacobian_default_mode(self.state._apply_fun, self.state.parameters,
self.state.model_state, self.state.samples,
holomorphic=holomorphic)
else:
self.mode = mode
self.solver = solver
self.diag_shift = diag_shift
# Super simple implementation of the minSR driver
def _forward_and_backward(self):
self.state.reset()
if hasattr(self.state, "samples_with_counts"):
samples, counts = self.state.samples_with_counts
else:
samples = self.state.samples
counts = jnp.ones(samples.shape[:-1])/(mpi.mpi_sum_jax(np.prod(samples.shape[:-1]))[0])
samples = samples.reshape((-1, samples.shape[-1]))
counts = counts.reshape((-1,))
# Transpose as local_estimators function flips the axes
loc_ens = self.state.local_estimators(self._ham).T.reshape(-1)
O = nk.jax.jacobian(self.state._apply_fun, self.state.parameters, samples,
self.state.model_state, mode = self.mode, pdf = counts, dense=True, center=True)
self._loss_stats, self._loss_grad, dense_update = compute_update(loc_ens, O, counts, self.solver, self.diag_shift)
# Convert back to pytree
unravel = lambda x : x
reassemble = lambda x: x
x = self.state.parameters
if self.mode != "holomorphic":
x, reassemble = nk.jax.tree_to_real(self.state.parameters)
_, unravel = nk.jax.tree_ravel(x)
self._dp = reassemble(unravel(dense_update))
# Cast to real if necessary
self._dp = jax.tree_map(lambda x, target: (x if jnp.iscomplexobj(x) else x.real), self._dp, self.state.parameters)
return self._dp
@partial(jax.jit, static_argnames=("solver"))
def compute_update(loc_ens, O, counts, solver, diag_shift):
loss_stats = _statistics(loc_ens, counts)
loc_ens_centered = (loc_ens - loss_stats.mean) * jnp.sqrt(counts)
loc_ens_centered = (mpi.mpi_allgather_jax(loc_ens_centered)[0]).reshape(-1)
O = (mpi.mpi_allgather_jax(O)[0]).reshape((-1, *O.shape[1:]))
# Complex real split, is this correct? TODO: double check
if len(O.shape) == 3:
O = O[:,0,:] + 1.j * O[:,1,:]
loss_grad = jnp.dot(O.T, loc_ens_centered).real
OO = (1-diag_shift) * O.dot(O.conj().T) + diag_shift * jnp.eye(O.shape[0])
OO_epsilon = solver(OO, loc_ens_centered)
dense_update = O.conj().T.dot(OO_epsilon)
return loss_stats, loss_grad, dense_update
@jax.jit
def _statistics(data, counts):
data = jnp.atleast_1d(data)
if data.ndim == 1:
data = data.reshape((1, -1))
if data.ndim > 2:
raise NotImplementedError("Statistics are implemented only for ndim<=2")
batch_size = mpi.mpi_sum_jax(data.shape[0])[0]
mean = mpi.mpi_sum_jax(jnp.sum(data * counts))[0]
var = mpi.mpi_sum_jax(jnp.sum(abs(data - mean)**2 * counts))[0]
error_of_mean = jnp.sqrt(var / batch_size)
taus = jax.vmap(integrated_time)(data)
tau_avg, _ = mpi.mpi_mean_jax(jnp.mean(taus))
R_hat = _split_R_hat(data, var)
res = Stats(mean, error_of_mean, var, tau_avg, R_hat)
return res
| 4,129 | 33.705882 | 122 | py |
GPSKet | GPSKet-master/GPSKet/driver/abstract_state_fitting.py | import jax
import numpy as np
import netket.jax as nkjax
from textwrap import dedent
from typing import Optional, Tuple
from netket.utils import mpi
from netket.utils.types import SeedT, Array
from netket.vqs import MCState
from netket.operator import AbstractOperator
from netket.driver import AbstractVariationalDriver
class AbstractStateFittingDriver(AbstractVariationalDriver):
"""Abstract base class for State Fitting drivers"""
def __init__(
self,
dataset: Tuple[Array, Array],
hamiltonian: AbstractOperator,
optimizer,
*args,
variational_state=None,
mini_batch_size: int = 32,
seed: Optional[SeedT]=None,
**kwargs):
if variational_state is None:
variational_state = MCState(*args, **kwargs)
if variational_state.hilbert != hamiltonian.hilbert:
raise TypeError(
dedent(
f"""the variational_state has hilbert space {variational_state.hilbert}
(this is normally defined by the hilbert space in the sampler), but
the hamiltonian has hilbert space {hamiltonian.hilbert}.
The two should match.
"""
)
)
super().__init__(variational_state, optimizer, minimized_quantity_name="Loss")
# TODO: maybe shard the dataset over MPI ranks
batches = jax.tree_util.tree_map(lambda arr: np.array_split(arr, self._mpi_nodes), dataset)
batches = jax.tree_util.tree_map(lambda *tup: mpi.mpi_bcast(tup, root=0), *batches)
self._dataset = batches[mpi.rank]
self._ham = hamiltonian.collect()
self._seed = seed
self._mini_batch_size = mini_batch_size
self._size_dataset = self._dataset[0].shape[0]
self._key = nkjax.mpi_split(nkjax.PRNGKey(self._seed))
def reset(self):
self._key = nkjax.mpi_split(nkjax.PRNGKey(self._seed))
super().reset()
@property
def loss(self):
return self._loss_stats
@loss.setter
def loss(self, value):
self._loss_stats = value
| 2,145 | 31.515152 | 99 | py |
GPSKet | GPSKet-master/GPSKet/models/jastrow.py | import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.linen.dtypes import promote_dtype
from jax.nn.initializers import normal
from netket.utils.types import Array, DType, NNInitFunc, Callable
from ..hilbert import FermionicDiscreteHilbert
def up_down_occupancies(x):
"""
Returns spin-up and -down occupancies at each site
Args:
x : an array of input configurations in 2nd quantization of shape (B, L)
Returns:
x_up, x_dn : spin-up and -down occupancies (B, L)
"""
x = jnp.asarray(x, dtype=jnp.int32)
x_up = jnp.asarray(x&1, jnp.int32)
x_dn = jnp.asarray((x&2)/2, jnp.int32)
return x_up, x_dn
class Jastrow(nn.Module):
"""
Implements a Jastrow wavefunction
"""
hilbert: FermionicDiscreteHilbert
"""The Hilbert space of the wavefunction model"""
dtype: DType=jnp.complex128
"""Type of the variational parameters"""
init_fun: NNInitFunc=normal()
"""Initializer for the variational parameters"""
apply_symmetries: Callable=lambda inputs : jnp.expand_dims(inputs, axis=-1)
"""Function to apply symmetries to configurations"""
@nn.compact
def __call__(self, x) -> Array:
nsites = x.shape[-1]
kernel = self.param("kernel", self.init_fun, (nsites, nsites), self.dtype)
kernel = kernel + kernel.T
x = self.apply_symmetries(x) # (B, T)
x_up, x_dn = up_down_occupancies(x)
kernel, x_up, x_dn = promote_dtype(kernel, x_up, x_dn, dtype=None)
y = jax.vmap(lambda u,d: jnp.einsum("...i,ij,...j", (1-u), kernel, (1-d)), in_axes=(-1,-1), out_axes=-1)(x_up, x_dn)
return jnp.sum(y, axis=-1) # (B,)
| 1,676 | 33.22449 | 124 | py |
GPSKet | GPSKet-master/GPSKet/models/pixelcnn.py | import jax.numpy as jnp
import netket as nk
import flax.linen as nn
from math import sqrt
from typing import Optional
from jax.nn.initializers import lecun_normal, zeros
from netket.hilbert.homogeneous import HomogeneousHilbert
from netket.utils.types import Array, DType, NNInitFunc, Callable
from netket.models.autoreg import _normalize
from netket.nn import MaskedConv2D
from GPSKet.nn import VerticalStackConv, HorizontalStackConv, CausalConv2d
default_kernel_init = lecun_normal()
class AbstractARNN(nk.models.AbstractARNN):
"""Overrides the abstract class from NetKet in order to allow constrained Hilbert spaces"""
def __post_init__(self):
nn.Module.__post_init__(self)
if not isinstance(self.hilbert, HomogeneousHilbert):
raise ValueError(
f"Only homogeneous Hilbert spaces are supported by ARNN, but hilbert is a {type(self.hilbert)}."
)
class PixelCNN(AbstractARNN):
"""
Autoregressive wave function Ansatz based on the PixelCNN generative model
"""
hilbert: HomogeneousHilbert
"""The Hilbert space. Only homogeneous Hilbert spaces are supported."""
machine_pow: int = 2
"""Exponent required to normalize the output"""
param_dtype: DType = jnp.float32
"""Type of the variational parameters"""
kernel_size: int = 3
"""Size of the 2D convolutional kernel"""
n_channels: int = 32
"""Number of channels in the convolutional filter"""
depth: int = 10
"""Number of layers in the network"""
normalize: bool = True
"""Whether the Ansatz should be normalized"""
kernel_init: NNInitFunc = default_kernel_init
"""Initializer for the convolutional kernel"""
bias_init: NNInitFunc = zeros
"""Initializer for the bias"""
gauge_fn: Optional[Callable] = None
"""Function that computes the value of a gauge symmetry"""
constraint_fn: Optional[Callable] = None
"""Function that check whether a gauge constraint is broken or not"""
# TODO: add support for symmetries
# Dimensions:
# - B = batch size
# - D = local dimension
# - N = number of size, i.e. Hilbert space size
# - L = number of sites per linear dimension
# - T = number of symmetries
def setup(self):
# Set system dimensions
self._D = self.hilbert.local_size
self._N = self.hilbert.size
self._L = int(sqrt(self._N))
if self._L**2 != self._N:
raise ValueError(f"Number of sites ({self._N}) is not a square number")
# Setup layers
self._activation = nk.nn.activation.reim_relu
self._v_stack_conv = VerticalStackConv(
self.n_channels,
self.kernel_size,
mask_center=True,
param_dtype=self.param_dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init
)
self._h_stack_conv = HorizontalStackConv(
self.n_channels,
self.kernel_size,
mask_center=True,
param_dtype=self.param_dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init
)
self._causal_conv_layers = [
CausalConv2d(
n_channels=self.n_channels,
kernel_size=self.kernel_size,
activation=self._activation,
param_dtype=self.param_dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init
) for _ in range(self.depth)
]
self._final_conv = MaskedConv2D(
features=self._D,
kernel_size=(1, 1),
kernel_dilation=(1, 1),
exclusive=False,
param_dtype=self.param_dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init
)
def conditionals_log_psi(self, inputs: Array) -> Array:
# Compute log probabilities by propagating inputs through the network
batch_size = inputs.shape[0]
x = jnp.reshape(inputs, (batch_size, self._L, self._L, 1))
v_stack, h_stack = self._v_stack_conv(x), self._h_stack_conv(x)
for layer in self._causal_conv_layers:
v_stack, h_stack = layer(v_stack, h_stack)
out = self._activation(h_stack)
out = self._final_conv(out) # (B, L, L, D)
log_psi = jnp.reshape(out, (batch_size, self._N, self._D)) # (B, N, D)
# Enforce gauge symmetry by setting log probabilities to -inf where gauge is broken
if self.gauge_fn is not None and self.constraint_fn is not None:
gauge = self.gauge_fn(inputs)
is_broken = self.constraint_fn(gauge)
log_psi = jnp.where(is_broken, -jnp.inf, log_psi)
# Normalize log probabilities
if self.normalize:
log_psi = _normalize(log_psi, self.machine_pow)
return log_psi
| 4,901 | 36.707692 | 112 | py |
GPSKet | GPSKet-master/GPSKet/models/pfaffian.py | import numpy as np
import jax.numpy as jnp
import jax
from netket.utils.types import Array, DType, Callable
from typing import Tuple
from netket.utils import HashableArray
from jax.nn.initializers import normal
from flax import linen as nn
def get_gauss_leg_elements_Sy(n_grid):
x, w = np.polynomial.legendre.leggauss(n_grid)
return (HashableArray(np.arccos(x)), HashableArray(w))
# Legacy implementation which is slow
# """
# This implements a Pfaffian of a matrix as exemplified on WikiPedia, there are certainly better ways which we should adapt in the future,
# the derivation of this on WikiPedia does not explain why the trace identity can be carried over to non-positive matrices but the code seems to work.
# This approach is also not the numerically most stable one.
# See arxiv: 1102.3440 and the corresponding codebase (pfapack) for better implementations of the Pfaffian.
# TODO: Improve!
# """
# @jax.custom_jvp
# def log_pfaffian(mat):
# n = mat.shape[0]//2
# pauli_y = jnp.array([[0, -1.j], [1.j, 0.]])
# vals = jnp.linalg.eigvals(jnp.dot(jnp.kron(pauli_y, jnp.eye(n)).T, mat))
# return (0.5 * jnp.sum(jnp.log(vals)) + jnp.log(1.j) * (n**2))
"""
This implements the Pfaffian based on the Parlett-Reid algorithm as outlined in arxiv:1102.3440,
this implementation also borrows heavily from the corresponding codebase (pfapack, https://github.com/basnijholt/pfapack)
and is essentially just a reimplementation of its pfaffian_LTL method in jax.
The current implementation involves a for loop which will likely lead to sub-optimal compilation times when jitting this
but currently this seems to be the best solution to get around the jax limitations of requiring static loop counts.
"""
@jax.custom_jvp
def log_pfaffian(mat):
# TODO: add some sanity checks here
n = mat.shape[0]//2
matrix = mat.astype(jnp.complex128)
value = 0.
for count in range(n):
index = count * 2
# permute rows/cols for numerical stability
largest_index = jnp.argmax(jnp.abs(matrix[index+1:,index]))
# exchange rows and columns
updated_mat = matrix.at[index + 1, index:].set(matrix[index + largest_index + 1, index:])
updated_mat = updated_mat.at[index + largest_index + 1, index:].set(matrix[index+1, index:])
matrix = updated_mat
updated_mat = matrix.at[index:, index + 1].set(matrix[index:, index + largest_index + 1])
updated_mat = updated_mat.at[index:, index + largest_index + 1].set(matrix[index:, index+1])
matrix = updated_mat
# sign picked up
value += jnp.where(largest_index != 0, jnp.log(-1 + 0.j), 0.)
# value update
value = jnp.where(matrix[index+1, index] != 0., value + jnp.log(matrix[index, index+1]), jnp.NINF + 0.j)
t = matrix[index, (index + 2):]/matrix[index, index+1]
matrix = matrix.at[index + 2:, index + 2:].add(jnp.outer(t, matrix[index + 2:, index + 1]))
matrix = matrix.at[index + 2:, index + 2:].add(-jnp.outer(matrix[index + 2:, index + 1], t))
return value
@log_pfaffian.defjvp
def log_pfaffian_jvp(primals, tangents):
derivative = 0.5 * jnp.linalg.inv(primals[0]).T
return (log_pfaffian(primals[0]), derivative.flatten().dot(tangents[0].flatten()))
"""
This implements a general Pfaffian wavefunction.
The states which are fed into this model are assumed to be in first quantization, i.e. denote the
positions of Ne electrons, where the positions correspond to site and spin (i.e. take values from 1 ... 2 L).
TODO: We should devise (and stick to) a general framework how the states acting on first quantized inputs should be set up.
"""
class PfaffianState(nn.Module):
n_sites : int
init_fun: Callable = normal()
dtype: DType =jnp.complex128
symmetries: Callable = lambda y: jnp.expand_dims(y, axis=-1)
out_transformation: Callable = lambda x: jax.scipy.special.logsumexp(x, axis=(-2, -1))
@nn.compact
def __call__(self, y) -> Array:
F = self.param("F", self.init_fun, (2 * self.n_sites, 2 * self.n_sites), self.dtype)
y = self.symmetries(y)
def evaluate_symmetries(y_sym):
F_occ = jnp.take(F, y_sym, axis=0)
take_fun = lambda x0, x1: jnp.take(x0, x1, axis=1)
F_occ = jax.vmap(take_fun)(F_occ, y_sym)
F_skew = F_occ - jnp.swapaxes(F_occ, 1, 2)
return jax.vmap(log_pfaffian)(F_skew)
out = jax.vmap(evaluate_symmetries, in_axes=-1, out_axes=-1)(y)
"""
because we don't suport the spin rotations in the S^2 projection for this state (yet),
we expand the dimensions to mimick a zero angle spin rotation. This way the
self.out_transformation has the same meaning for this state as for the ZeroMagnetizationPfaffian
below.
"""
return self.out_transformation(out)
"""
This implements a symmetrised Pfaffian wavefunction which explicitly builds in Sz conservation
with zero magnetization (see e.g. mVMC paper/doc).
This implementation does not check if the Hilbert used actually space satisfies this constraint
so a little bit of care is required if this state makes sense.
The states which are fed into this model are assumed to be in the same representation as for the general pfaffian state
(i.e. positions take values between 1 and 2 L).
This model can also easily be projected onto an S^2 eigenstate via a Gauss-Legendre quadrature of the integral in the projector.
See e.g. the mVMC paper [https://doi.org/10.1016/j.cpc.2018.08.014] where this is explained in detail
(comment YR: I am pretty convinced that there is a (-) sign missing in Eq. (53) of that manuscript, implementation below should be correct)
TODO: Test the symmetrization more extensively.
TODO: Implement sanity checks for this model.
TODO: Add documentation for symmetrization interface (+maybe find a more general way of defining it)
"""
class ZeroMagnetizationPfaffian(PfaffianState):
# S2_projection is a tuple where the first element gives the rotation angles for the Sy rotation
# and the second element are the corresponding characters which should be used
S2_projection: Tuple[HashableArray, HashableArray] = (HashableArray(np.array([0.])), HashableArray(np.array([1.])))
@nn.compact
def __call__(self, y) -> Array:
n_e_half = y.shape[-1]//2
f = self.param("f", self.init_fun, (self.n_sites, self.n_sites), self.dtype)
y = self.symmetries(y)
def evaluate_pfaff_rotations(angle):
F = jnp.block([[-f * jnp.cos(angle/2) * jnp.sin(angle/2), f * jnp.cos(angle/2) * jnp.cos(angle/2)],
[-f * jnp.sin(angle/2) * jnp.sin(angle/2), f * jnp.cos(angle/2) * jnp.sin(angle/2)]])
def evaluate_symmetries(y_sym):
F_occ = jnp.take(F, y_sym, axis=0)
take_fun = lambda x0, x1: jnp.take(x0, x1, axis=1)
F_occ = jax.vmap(take_fun)(F_occ, y_sym)
F_skew = F_occ - jnp.swapaxes(F_occ, 1, 2)
return jax.vmap(log_pfaffian)(F_skew)
return jax.vmap(evaluate_symmetries, in_axes=-1, out_axes=-1)(y)
vals = jax.vmap(evaluate_pfaff_rotations, out_axes=-2)(jnp.array(self.S2_projection[0]))
vals += jnp.log(jnp.asarray(self.S2_projection[1])).reshape((-1,1))
return self.out_transformation(vals)
| 7,358 | 50.461538 | 150 | py |
GPSKet | GPSKet-master/GPSKet/models/plaquetteqGPS.py | import jax
import jax.numpy as jnp
import numpy as np
import flax.linen as nn
from netket.utils import HashableArray
from netket.utils.types import NNInitFunc, Array, DType, Callable
from typing import Tuple, Union, Optional
from netket.hilbert.homogeneous import HomogeneousHilbert
from GPSKet.nn.initializers import normal
from GPSKet.models.qGPS import no_syms
import warnings
# TODO: Improve symmetrisation framework (same as for qGPS)
class PlaquetteqGPS(nn.Module):
# TODO: add documentation
hilbert: HomogeneousHilbert
M: int
plaquettes: HashableArray
dtype: DType = jnp.complex128
init_fun: Optional[NNInitFunc] = None # Defaults to qGPS-normal with the parameter dtype
"""
syms is a tuple of two function representing the symmetry operations.
the first function creates all symmetrically equivalent copies of the test configuration
(represented by the indices into the epsilon tensor).
The second function is optional but is required if fast updating is performed.
It takes two arguments, a single configuration (i.e. a list of indices into the epsilon tensor),
as well as a list of site indices indicating which sites have changed the occupancy.
It returns a tuple of two arrays.
The first returned array should represent the occupancies of all symmetrically equivalent configurations at the updated positions.
The second array returns the transformed site indices for all symmetry operations.
For all returned arrays of the syms function, the last dimension corresponds to the total number of symmetry operations.
"""
syms: Union[Callable, Tuple[Callable, Callable]] = no_syms()
out_transformation: Callable = lambda argument : jnp.sum(argument, axis=(-3,-2,-1))
apply_fast_update: bool = False # Careful with zero-valued parameters here, TODO: implement fallback...
def setup(self):
if type(self.syms) == tuple:
self.symmetries = self.syms[0]
self.symmetries_inverse = self.syms[1]
else:
self.symmetries = self.syms
if self.apply_fast_update:
warnings.warn("Attention! Fast updating is not applied in qGPS as the inverse symmetry operations are not supplied.")
self.apply_fast_update = False
self.L = self.hilbert.size
self.local_dim = self.hilbert.local_size
plaquettes = np.array(self.plaquettes)
inv_plaquette_ids = -1 * np.ones((plaquettes.shape[0], self.L), dtype=int)
for i in range(inv_plaquette_ids.shape[0]):
for j in range(plaquettes.shape[1]):
inv_plaquette_ids[i, plaquettes[i,j]] = j
self.inv_plaquette_ids = HashableArray(inv_plaquette_ids)
"""
Note: It might be cleaner to use the `intermediates` interface provided by flax
to cache intermediate values. However, that stacks intermediates from multiple
calls by default so these could not be fed back into the model without overwritting
this behaviour. In order to avoid this pitfall in the model definition, we thus
use our own interface to store intermediate values which can be fed back into the
model as variables (as required for fast wavefunction updates).
"""
@nn.compact
def __call__(self, inputs, cache_intermediates=False, update_sites=None):
indices = self.hilbert.states_to_local_indices(inputs)
if self.init_fun is None:
init = normal(dtype=self.dtype)
else:
init = self.init_fun
epsilon = self.param("epsilon", init, (self.local_dim, self.plaquettes.shape[0], self.M, self.plaquettes.shape[1]), self.dtype)
plaquettes = np.asarray(self.plaquettes)
inv_plaquette_ids = np.asarray(self.inv_plaquette_ids)
if cache_intermediates or (update_sites is not None):
site_product_save = self.variable("intermediates_cache", "site_prod", lambda : jnp.zeros(0, dtype=self.dtype))
indices_save = self.variable("intermediates_cache", "samples", lambda : jnp.zeros(0, dtype=indices.dtype))
if update_sites is None:
def evaluate_site_product(sample):
def single_plaquette_eval(plaquette_ids, epsi):
result = jnp.take_along_axis(epsi, sample[:,:,plaquette_ids], axis=0).prod(axis=-1)
return result.reshape(-1)
return jax.vmap(single_plaquette_eval, in_axes=(0, 1), out_axes=0)(plaquettes, epsilon)
def get_site_prod(sample):
return jax.vmap(evaluate_site_product, in_axes=-1, out_axes=-1)(self.symmetries(sample))
transformed_samples = jnp.expand_dims(indices, (1, 2)) # required for the inner take_along_axis
site_product = jax.vmap(get_site_prod)(transformed_samples)
else:
site_product_old = site_product_save.value
new_samples = indices
old_samples = jax.vmap(jnp.take, in_axes=(0, 0), out_axes=0)(indices_save.value, update_sites)
def inner_site_product_update(site_prod_old, new_occs, old_occs, sites):
def single_plaquette_update(sp_old, epsi, inv_plaquette):
transformed_sites = inv_plaquette[sites]
valid_elements = transformed_sites != -1
update = jnp.where(jnp.expand_dims(valid_elements,1), x=epsi[new_occs, :, transformed_sites], y=1.).prod(axis=0)
update /= jnp.where(jnp.expand_dims(valid_elements,1), x=epsi[old_occs, :, transformed_sites], y=1.).prod(axis=0)
return sp_old * update
return jax.vmap(single_plaquette_update, in_axes=(0, 1, 0), out_axes=0)(site_prod_old, epsilon, inv_plaquette_ids)
def outer_site_product_update(site_prod_old, sample_new, sample_old, update_sites):
inv_sym_new, inv_sym_sites = self.symmetries_inverse(sample_new, update_sites)
inv_sym_old, inv_sym_sites = self.symmetries_inverse(sample_old, update_sites)
return jax.vmap(inner_site_product_update, in_axes=(-1, -1, -1, -1), out_axes=-1)(site_prod_old, inv_sym_new, inv_sym_old, inv_sym_sites)
site_product = jax.vmap(outer_site_product_update, in_axes=(0, 0, 0, 0), out_axes=0)(site_product_old, new_samples, old_samples, update_sites)
if cache_intermediates:
site_product_save.value = site_product
if update_sites is not None:
def update_fun(saved_config, update_sites, occs):
return saved_config.at[update_sites].set(occs)
indices_save.value = jax.vmap(update_fun, in_axes=(0, 0, 0), out_axes=0)(indices_save.value, update_sites, indices)
else:
indices_save.value = indices
# site_product has dim N_batch x number of plaquettes x M x Number of syms
return self.out_transformation(site_product) | 6,923 | 52.261538 | 154 | py |
GPSKet | GPSKet-master/GPSKet/models/autoreg_qGPS.py | import abc
from typing import Tuple, Union, Optional
import jax
import jax.numpy as jnp
from jax.scipy.special import logsumexp
from flax import linen as nn
from netket.hilbert.homogeneous import HomogeneousHilbert
from netket.utils.types import NNInitFunc, Array, DType, Callable
from jax.nn.initializers import zeros, ones
from GPSKet.nn.initializers import normal
from GPSKet.models import qGPS
def gpu_cond(pred, true_func, false_func, args):
return jax.tree_map(
lambda x, y: pred * x + (1 - pred) * y, true_func(args), false_func(args)
)
class AbstractARqGPS(nn.Module):
"""
Base class for autoregressive qGPS.
Subclasses must implement the methods `__call__` and `conditionals`.
They can also override `_conditional` to implement the caching for fast autoregressive sampling.
They must also implement the field `machine_pow`,
which specifies the exponent to normalize the outputs of `__call__`.
"""
hilbert: HomogeneousHilbert
"""the Hilbert space. Only homogeneous Hilbert spaces are supported."""
# machine_pow: int = 2 Must be defined on subclasses
def __post_init__(self):
super().__post_init__()
if not isinstance(self.hilbert, HomogeneousHilbert):
raise ValueError(
f"Only homogeneous Hilbert spaces are supported by ARNN, but hilbert is a {type(self.hilbert)}."
)
def _conditional(self, inputs: Array, index: int) -> Array:
"""
Computes the conditional probabilities for a site to take a given value.
It should only be called successively with indices 0, 1, 2, ...,
as in the autoregressive sampling procedure.
Args:
inputs: configurations with dimensions (batch, Hilbert.size).
index: index of the site.
Returns:
The probabilities with dimensions (batch, Hilbert.local_size).
"""
return self.conditionals(inputs)[:, index, :]
@abc.abstractmethod
def conditionals(self, inputs: Array) -> Array:
"""
Computes the conditional probabilities for each site to take each value.
Args:
inputs: configurations with dimensions (batch, Hilbert.size).
Returns:
The probabilities with dimensions (batch, Hilbert.size, Hilbert.local_size).
Examples:
>>> import pytest; pytest.skip("skip automated test of this docstring")
>>>
>>> p = model.apply(variables, σ, method=model.conditionals)
>>> print(p[2, 3, :])
[0.3 0.7]
# For the 3rd spin of the 2nd sample in the batch,
# it takes probability 0.3 to be spin down (local state index 0),
# and probability 0.7 to be spin up (local state index 1).
"""
class ARqGPS(AbstractARqGPS):
"""
Implements the autoregressive formulation of the QGPS Ansatz with weight sharing,
support for symmetries and Hilbert spaces constrained to the zero magnetization sector.
"""
M: int
"""Bond dimension"""
dtype: DType = jnp.complex128
"""Type of the variational parameters"""
machine_pow: int = 2
"""Exponent required to normalize the output"""
init_fun: Optional[NNInitFunc] = None # Defaults to qGPS-normal with the parameter dtype
"""Initializer for the variational parameters"""
normalize: bool=True
"""Whether the Ansatz should be normalized"""
apply_symmetries: Callable = lambda inputs : jnp.expand_dims(inputs, axis=-1)
"""Function to apply symmetries to configurations"""
# TODO: extend to cases beyond D=2
count_spins: Callable = lambda spins : jnp.stack([(spins+1)&1, ((spins+1)&2)/2], axis=-1).astype(jnp.int32)
"""Function to count down and up spins"""
# TODO: extend to cases where total_sz != 0
renormalize_log_psi: Callable = lambda n_spins, hilbert, index: jnp.log(jnp.heaviside(hilbert.size//2-n_spins, 0))
"""Function to renormalize conditional log probabilities"""
out_transformation: Callable=lambda argument: jnp.sum(argument, axis=-1)
"""Function of the output layer, by default sums over bond dimension"""
# Dimensions:
# - B = batch size
# - D = local dimension
# - L = number of sites
# - M = bond dimension
# - T = number of symmetries
def _conditional(self, inputs: Array, index: int) -> Array:
# Convert input configurations into indices
inputs = self.hilbert.states_to_local_indices(inputs) # (B, L)
# Compute conditional probability for site at index
log_psi = _conditional(self, inputs, index) # (B, D)
if self.normalize:
log_psi = _normalize(log_psi, self.machine_pow)
p = jnp.exp(self.machine_pow*log_psi.real)
return p
def conditionals(self, inputs: Array) -> Array:
# Convert input configurations into indices
inputs = self.hilbert.states_to_local_indices(inputs) # (B, L)
# Compute conditional probabilities for all sites
log_psi = _conditionals(self, inputs) # (B, L, D)
if self.normalize:
log_psi = _normalize(log_psi, self.machine_pow)
p = jnp.exp(self.machine_pow*log_psi.real)
return p
def setup(self):
if self.init_fun is None:
init = normal(dtype=self.dtype)
else:
init = self.init_fun
self._epsilon = self.param("epsilon", init, (self.hilbert.local_size, self.M, self.hilbert.size), self.dtype)
self._cache = self.variable("cache", "inputs", ones, None, (1, self.hilbert.local_size, self.M), self.dtype)
if self.hilbert.constrained:
self._n_spins = self.variable("cache", "spins", zeros, None, (1, self.hilbert.local_size))
def __call__(self, inputs: Array) -> Array:
if jnp.ndim(inputs) == 1:
inputs = jnp.expand_dims(inputs, axis=0) # (B, L)
# Transform inputs according to symmetries
inputs = self.apply_symmetries(inputs) # (B, L, T)
n_symm = inputs.shape[-1]
# Convert input configurations into indices
inputs = self.hilbert.states_to_local_indices(inputs) # (B, L, T)
batch_size = inputs.shape[0]
# Compute conditional log-probabilities
log_psi = jax.vmap(_conditionals, in_axes=(None, -1), out_axes=-1)(self, inputs) # (B, L, D, T)
if self.normalize:
log_psi = _normalize(log_psi, self.machine_pow, axis=-2)
# Take conditionals along sites-axis according to input indices
log_psi = jnp.take_along_axis(log_psi, jnp.expand_dims(inputs, axis=2), axis=2) # (B, L, 1, T)
log_psi = jnp.sum(log_psi, axis=1) # (B, 1, T)
log_psi = jnp.reshape(log_psi, (batch_size, n_symm)) # (B, T)
# Compute symmetrized log-amplitudes
log_psi_symm = (1/self.machine_pow)*logsumexp(self.machine_pow*log_psi.real, axis=-1, b=1/n_symm)
log_psi_symm_im = logsumexp(1j*log_psi.imag, axis=-1).imag
log_psi_symm = log_psi_symm+1j*log_psi_symm_im
return log_psi_symm # (B,)
class ARqGPSModPhase(ARqGPS):
"""
Implements an Ansatz composed of an autoregressive qGPS for the modulus of the amplitude and a qGPS for the phase.
"""
def setup(self):
assert jnp.issubdtype(self.dtype, jnp.floating)
super().setup()
if self.init_fun is None:
init = normal(dtype=self.dtype)
else:
init = self.init_fun
self._qgps = qGPS(
self.hilbert, self.hilbert.size,
dtype=jnp.float64,
init_fun=init)
def __call__(self, inputs: Array) -> Array:
log_psi_mod = super().__call__(inputs)
log_psi_phase = self._qgps(inputs)
return log_psi_mod + log_psi_phase*1j
def _normalize(log_psi: Array, machine_pow: int, axis: int=-1) -> Array:
return log_psi - (1/machine_pow)*logsumexp(machine_pow*log_psi.real, axis=axis, keepdims=True)
def _compute_conditional(hilbert: HomogeneousHilbert, cache: Array, n_spins: Array, epsilon: Array, inputs: Array, index: int, count_spins: Callable, renormalize_log_psi: Callable, out_transformation: Callable) -> Union[Tuple, Array]:
# Slice inputs at index-1 to get cached products
# (Note: when index=0, it doesn't matter what slice of the cache we take,
# because it is initialized with ones)
inputs_i = inputs[:, index-1] # (B,)
# Compute product of parameters and cache at index along bond dimension
params_i = jnp.asarray(epsilon, epsilon.dtype)[:, :, index] # (D, M)
prods = jax.vmap(lambda c, s: params_i*c[s], in_axes=(0, 0))(cache, inputs_i)
prods = jnp.asarray(prods, epsilon.dtype) # (B, D, M)
# Update cache if index is positive, otherwise leave as is
cache = gpu_cond(
index >= 0,
lambda _: prods,
lambda _: cache,
None
)
# Compute log conditional probabilities
log_psi = out_transformation(prods) # (B, D)
# Update spins count if index is larger than 0, otherwise leave as is
n_spins = gpu_cond(
index > 0,
lambda n_spins: n_spins + count_spins(inputs_i),
lambda n_spins: n_spins,
n_spins
)
# If Hilbert space associated with the model is constrained, i.e.
# model has "n_spins" in "cache" collection, then impose total magnetization.
# This is done by counting number of up/down spins until index, then if
# n_spins is >= L/2 the probability of up/down spin at index should be 0,
# i.e. the log probability becomes -inf
log_psi = gpu_cond(
index >= 0,
lambda log_psi: log_psi+renormalize_log_psi(n_spins, hilbert, index),
lambda log_psi: log_psi,
log_psi
)
return (cache, n_spins), log_psi
def _conditional(model: ARqGPS, inputs: Array, index: int) -> Array:
# Retrieve cache
batch_size = inputs.shape[0]
cache = model._cache.value
cache = jnp.asarray(cache, model.dtype)
cache = jnp.resize(cache, (batch_size, model.hilbert.local_size, model.M)) # (B, D, M)
# Retrieve spins count
if model.has_variable("cache", "spins"):
n_spins = model._n_spins.value
n_spins = jnp.asarray(n_spins, jnp.int32)
n_spins = jnp.resize(n_spins, (batch_size, model.hilbert.local_size)) # (B, D)
else:
n_spins = jnp.zeros((batch_size, model.hilbert.local_size), jnp.int32)
# Compute log conditional probabilities
(cache, n_spins), log_psi = _compute_conditional(model.hilbert, cache, n_spins, model._epsilon, inputs, index, model.count_spins, model.renormalize_log_psi, model.out_transformation)
# Update model cache
if model.has_variable("cache", "inputs"):
model._cache.value = cache
if model.has_variable("cache", "spins"):
model._n_spins.value = n_spins
return log_psi # (B, D)
def _conditionals(model: ARqGPS, inputs: Array) -> Array:
# Loop over sites while computing log conditional probabilities
def _scan_fun(carry, index):
cache, n_spins = carry
(cache, n_spins), log_psi = _compute_conditional(model.hilbert, cache, n_spins, model._epsilon, inputs, index, model.count_spins, model.renormalize_log_psi, model.out_transformation)
n_spins = gpu_cond(
model.hilbert.constrained,
lambda n_spins: n_spins,
lambda n_spins: jnp.zeros_like(n_spins),
n_spins
)
return (cache, n_spins), log_psi
batch_size = inputs.shape[0]
cache = jnp.ones((batch_size, model.hilbert.local_size, model.M), model.dtype)
n_spins = jnp.zeros((batch_size, model.hilbert.local_size), jnp.int32)
indices = jnp.arange(model.hilbert.size)
_, log_psi = jax.lax.scan(
_scan_fun,
(cache, n_spins),
indices
)
log_psi = jnp.transpose(log_psi, [1, 0, 2])
return log_psi # (B, L, D)
| 11,842 | 39.282313 | 234 | py |
GPSKet | GPSKet-master/GPSKet/models/backflow_jastrow.py | import jax
import jax.numpy as jnp
import flax.linen as nn
from typing import Tuple
from netket.utils.types import Array, NNInitFunc, Callable
from netket.utils import HashableArray
from .backflow import Backflow
from .jastrow import Jastrow
from ..hilbert.discrete_fermion import FermionicDiscreteHilbert
class BackflowJastrow(nn.Module):
"""
Implements a linear combination of Slater determinants with Backflow orbitals, multiplied by a Jastrow correlation factor
"""
hilbert: FermionicDiscreteHilbert
"""The Hilbert space of the wavefunction model"""
orbitals: Tuple[HashableArray]
"""Tuple of initial orbitals for the Backflow determinants"""
correction_fun: Tuple[nn.Module]
"""Tuple of modules that compute the correction to the initial Backflow orbitals"""
jastrow_init_fun: NNInitFunc=jax.nn.initializers.normal()
"""Initializer for the variational parameters of the Jastrow coefficient"""
backflow_apply_symmetries: Callable = lambda inputs : jnp.expand_dims(inputs, axis=-1)
"""Function to apply symmetries to configurations in the Slater determinant"""
jastrow_apply_symmetries: Callable = lambda inputs : jnp.expand_dims(inputs, axis=-1)
"""Function to apply symmetries to configurations in the Jastrow factor"""
apply_fast_update: bool=True
"""Whether fast update is used in the computation of the Slater determinants"""
spin_symmetry_by_structure: bool=True
"""Whether the α and β orbitals are the same or not"""
fixed_magnetization: bool=True
"""Whether magnetization should be conserved or not"""
@nn.compact
def __call__(self, x) -> Array:
assert len(self.orbitals) == len(self.correction_fun)
n_determinants = len(self.orbitals)
batch_size = x.shape[0]
log_det = jnp.zeros((batch_size, n_determinants), jnp.complex128)
for d in range(n_determinants):
y = Backflow(
self.hilbert,
self.orbitals[d],
self.correction_fun[d],
apply_symmetries=self.backflow_apply_symmetries,
spin_symmetry_by_structure=self.spin_symmetry_by_structure,
fixed_magnetization=self.fixed_magnetization,
apply_fast_update=self.apply_fast_update
)(x)
log_det = log_det.at[:,d].set(y)
backflow = jax.scipy.special.logsumexp(log_det, axis=-1)
jastrow = Jastrow(
self.hilbert,
init_fun=self.jastrow_init_fun,
apply_symmetries=self.jastrow_apply_symmetries
)(x)
return jastrow+backflow | 2,622 | 43.457627 | 125 | py |
GPSKet | GPSKet-master/GPSKet/models/qGPS.py | import jax
import jax.numpy as jnp
import numpy as np
import flax.linen as nn
from netket.utils import HashableArray
from netket.utils.types import NNInitFunc, Array, DType, Callable
from typing import Tuple, Union, Optional
from netket.hilbert.homogeneous import HomogeneousHilbert
from GPSKet.nn.initializers import normal
import warnings
# helper function to get the symmetry transformation functions for spin systems
def get_sym_transformation_spin(graph, automorphisms=True, spin_flip=True):
if automorphisms:
syms = graph.automorphisms().to_array().T
inv_syms = np.zeros(syms.shape, dtype=syms.dtype)
for i in range(syms.shape[0]):
for j in range(syms.shape[1]):
inv_syms[syms[i,j], j] = i
syms = jnp.array(syms)
inv_syms = jnp.array(inv_syms)
if spin_flip:
def symmetries(samples):
out = jnp.take(samples, syms, axis=-1)
out = jnp.concatenate((out, 1-out), axis=-1)
return out
def inv_symmetries(sample_at_indices, indices):
inv_sym_sites = jnp.concatenate((inv_syms[indices], inv_syms[indices]), axis=-1)
inv_sym_occs = jnp.tile(jnp.expand_dims(sample_at_indices, axis=-1), syms.shape[1])
inv_sym_occs = jnp.concatenate((inv_sym_occs, 1-inv_sym_occs), axis=-1)
return inv_sym_occs, inv_sym_sites
else:
def symmetries(samples):
out = jnp.take(samples, syms, axis=-1)
return out
def inv_symmetries(sample_at_indices, indices):
inv_sym_sites = inv_syms[indices]
inv_sym_occs = jnp.tile(jnp.expand_dims(sample_at_indices, axis=-1), syms.shape[1])
return inv_sym_occs, inv_sym_sites
else:
if spin_flip:
def symmetries(samples):
out = jnp.expand_dims(samples, axis=-1)
out = jnp.concatenate((out, 1-out), axis=-1)
return out
def inv_symmetries(sample_at_indices, indices):
inv_sym_sites = jnp.expand_dims(indices, axis=-1)
inv_sym_sites = jnp.concatenate((inv_sym_sites, inv_sym_sites), axis=-1)
inv_sym_occs = jnp.expand_dims(sample_at_indices, axis=-1)
inv_sym_occs = jnp.concatenate((inv_sym_occs, 1-inv_sym_occs), axis=-1)
return inv_sym_occs, inv_sym_sites
else:
def symmetries(samples):
out = jnp.expand_dims(samples, axis=-1)
return out
def inv_symmetries(sample_at_indices, indices):
inv_sym_sites = jnp.expand_dims(indices, axis=-1)
inv_sym_occs = jnp.expand_dims(sample_at_indices, axis=-1)
return inv_sym_occs, inv_sym_sites
return (symmetries, inv_symmetries)
# default syms function (no symmetries)
def no_syms():
symmetries = lambda samples : jnp.expand_dims(samples, axis=-1)
inv_sym = lambda sample_at_indices, indices : (jnp.expand_dims(sample_at_indices, axis=-1), jnp.expand_dims(indices, axis=-1))
return (symmetries, inv_sym)
# TODO: the framework for the symmetrisation could (and should) definitely be improved at one point
class qGPS(nn.Module):
# TODO: add documentation
hilbert: HomogeneousHilbert
M: int
dtype: DType = jnp.complex128
init_fun: Optional[NNInitFunc] = None # Defaults to qGPS-normal with the parameter dtype
"""
syms is a tuple of two function representing the symmetry operations.
the first function creates all symmetrically equivalent copies of the test configuration
(represented by the indices into the epsilon tensor).
The second function is optional but is required if fast updating is performed.
It takes two arguments, a single configuration (i.e. a list of indices into the epsilon tensor),
as well as a list of site indices indicating which sites have changed the occupancy.
It returns a tuple of two arrays.
The first returned array should represent the occupancies of all symmetrically equivalent configurations at the updated positions.
The second array returns the transformed site indices for all symmetry operations.
For all returned arrays of the syms function, the last dimension corresponds to the total number of symmetry operations.
"""
syms: Union[Callable, Tuple[Callable, Callable]] = no_syms()
out_transformation: Callable = lambda argument : jnp.sum(argument, axis=(-2,-1))
apply_fast_update: bool = True
def setup(self):
if type(self.syms) == tuple:
self.symmetries = self.syms[0]
self.symmetries_inverse = self.syms[1]
else:
self.symmetries = self.syms
assert (not self.apply_fast_update)
self.L = self.hilbert.size
self.local_dim = self.hilbert.local_size
"""
Note: It might be cleaner to use the `intermediates` interface provided by flax
to cache intermediate values. However, that stacks intermediates from multiple
calls by default so these could not be fed back into the model without overwritting
this behaviour. In order to avoid this pitfall in the model definition, we thus
use our own interface to store intermediate values which can be fed back into the
model as variables (as required for fast wavefunction updates).
"""
@nn.compact
def __call__(self, inputs, cache_intermediates=False, update_sites=None):
indices = self.hilbert.states_to_local_indices(inputs)
if self.init_fun is None:
init = normal(dtype=self.dtype)
else:
init = self.init_fun
epsilon = self.param("epsilon", init, (self.local_dim, self.M, self.L), self.dtype)
# Register the cache variables
if update_sites is not None or cache_intermediates:
saved_configs = self.variable("intermediates_cache", "samples", lambda : None)
saved_site_product = self.variable("intermediates_cache", "site_prod", lambda : None)
if update_sites is not None:
indices_save = saved_configs.value
old_samples = jax.vmap(jnp.take, in_axes=(0, 0), out_axes=0)(indices_save, update_sites)
def inner_site_product_update(site_prod_old, new_occs, old_occs, sites):
site_prod_new = site_prod_old / (epsilon[old_occs,:,sites].prod(axis=0))
site_prod_new = site_prod_new * (epsilon[new_occs,:,sites].prod(axis=0))
return site_prod_new
def outer_site_product_update(site_prod_old, sample_new, sample_old, update_sites):
inv_sym_new, inv_sym_sites = self.symmetries_inverse(sample_new, update_sites)
inv_sym_old, inv_sym_sites = self.symmetries_inverse(sample_old, update_sites)
return jax.vmap(inner_site_product_update, in_axes=(-1, -1, -1, -1), out_axes=-1)(site_prod_old, inv_sym_new, inv_sym_old, inv_sym_sites)
site_product_old = saved_site_product.value
site_product = jax.vmap(outer_site_product_update, in_axes=(0, 0, 0, 0), out_axes=0)(site_product_old, indices, old_samples, update_sites)
else:
def evaluate_site_product(sample):
return jnp.take_along_axis(epsilon, sample, axis=0).prod(axis=-1).reshape(-1)
def get_site_prod(sample):
return jax.vmap(evaluate_site_product, in_axes=-1, out_axes=-1)(self.symmetries(sample))
transformed_samples = jnp.expand_dims(indices, (1, 2)) # required for the inner take_along_axis
site_product = jax.vmap(get_site_prod)(transformed_samples)
if cache_intermediates:
saved_site_product.value = site_product
if update_sites is not None:
def update_fun(saved_config, update_sites, occs):
def scan_fun(carry, count):
return (carry.at[update_sites[count]].set(occs[count]), None)
return jax.lax.scan(scan_fun, saved_config, jnp.arange(update_sites.shape[0]), reverse=True)[0]
full_samples = jax.vmap(update_fun, in_axes=(0, 0, 0), out_axes=0)(saved_configs.value, update_sites, indices)
else:
full_samples = indices
saved_configs.value = full_samples
return self.out_transformation(site_product)
| 8,442 | 48.95858 | 153 | py |
GPSKet | GPSKet-master/GPSKet/models/backflow.py | import jax
import jax.numpy as jnp
import numpy as np
import flax.linen as nn
from jax.scipy.special import logsumexp
from netket.utils import HashableArray
from netket.utils.types import Array, Callable
from GPSKet.hilbert import FermionicDiscreteHilbert
from GPSKet.models import occupancies_to_electrons
class Backflow(nn.Module):
"""
Implements a (un)restricted backflow model which can conserve or break magnetization
"""
hilbert: FermionicDiscreteHilbert
"""The Hilbert space of the wavefunction model"""
orbitals: HashableArray
"""Initial orbitals, e.g. Hartree-Fock"""
correction_fun: nn.Module
"""Module that computes the correction to the initial orbitals"""
apply_symmetries: Callable = lambda inputs : jnp.expand_dims(inputs, axis=-1)
"""Function to apply symmetries to configurations"""
apply_fast_update: bool=True
"""Whether to allow fast-updating or not"""
spin_symmetry_by_structure: bool=True
"""Whether the α and β orbitals are the same or not"""
fixed_magnetization: bool=True
"""Whether magnetization should be conserved or not"""
@nn.compact
def __call__(self, x, cache_intermediates=False, update_sites=None) -> Array:
norb = self.hilbert.size
nelec = self.hilbert._n_elec
# TODO: improve performance by scanning over symmetries
orbitals = jnp.array(self.orbitals) # (L, N)
corrections = self.correction_fun(x, cache_intermediates=cache_intermediates, update_sites=update_sites) # (B, L, N, T)
if cache_intermediates or (update_sites is not None):
indices_save = self.variable("intermediates_cache", "samples", lambda : jnp.zeros(0, dtype=x.dtype))
if update_sites is not None:
def update_fun(saved_config, update_sites, occs):
def scan_fun(carry, count):
return (carry.at[update_sites[count]].set(occs[count]), None)
return jax.lax.scan(scan_fun, saved_config, jnp.arange(update_sites.shape[0]), reverse=True)[0]
full_x = jax.vmap(update_fun, in_axes=(0, 0, 0), out_axes=0)(indices_save.value, update_sites, x)
else:
full_x = x
if cache_intermediates:
indices_save.value = full_x
y = occupancies_to_electrons(full_x, nelec)
y = self.apply_symmetries(y) # (B, N, T)
y = jnp.expand_dims(y, axis=-2) # (B, N, 1, T)
if self.fixed_magnetization:
y_up, y_dn = jnp.split(y, np.array([nelec[0]]), axis=1)
if self.spin_symmetry_by_structure:
ɸ_up = jnp.take_along_axis(jnp.expand_dims(orbitals, axis=(0,-1)), y_up, axis=1)
ɸ_dn = jnp.take_along_axis(jnp.expand_dims(orbitals, axis=(0,-1)), y_dn, axis=1)
Δ_up = jnp.take_along_axis(corrections, y_up, axis=1)
Δ_dn = jnp.take_along_axis(corrections, y_dn, axis=1)
else:
ɸ_up = jnp.take_along_axis(jnp.expand_dims(orbitals[:,:nelec[0]], axis=(0,-1)), y_up, axis=1)
ɸ_dn = jnp.take_along_axis(jnp.expand_dims(orbitals[:,nelec[0]:], axis=(0,-1)), y_dn, axis=1)
Δ_up = jnp.take_along_axis(corrections[:,:,:nelec[0],:], y_up, axis=1)
Δ_dn = jnp.take_along_axis(corrections[:,:,nelec[0]:,:], y_dn, axis=1)
ɸ_up = jnp.transpose(ɸ_up, (0,3,1,2)) # (B, T, N, N)
ɸ_dn = jnp.transpose(ɸ_dn, (0,3,1,2)) # (B, T, N, N)
Δ_up = jnp.transpose(Δ_up, (0,3,1,2)) # (B, T, N, N)
Δ_dn = jnp.transpose(Δ_dn, (0,3,1,2)) # (B, T, N, N)
(s_up, log_det_up) = jnp.linalg.slogdet(ɸ_up+Δ_up)
(s_dn, log_det_dn) = jnp.linalg.slogdet(ɸ_dn+Δ_dn)
log_det = log_det_up + log_det_dn + jnp.log(s_up*s_dn+0j) # (B, T)
else:
y = y.at[:,nelec[0]:,:].add(norb)
ɸ = jnp.take_along_axis(jnp.expand_dims(orbitals, axis=(0,-1)), y, axis=1)
Δ = jnp.take_along_axis(corrections, y, axis=1)
ɸ = jnp.transpose(ɸ, (0,3,1,2)) # (B, T, N, N)
Δ = jnp.transpose(Δ, (0,3,1,2)) # (B, T, N, N)
(s, log_det) = jnp.linalg.slogdet(ɸ+Δ)
log_det = log_det + jnp.log(s+0j) # (B, T)
return logsumexp(log_det, axis=-1) | 4,261 | 51.617284 | 127 | py |
GPSKet | GPSKet-master/GPSKet/models/slater.py | import jax
import numpy as np
import jax.numpy as jnp
from flax import linen as nn
from typing import Tuple, Union, Optional
from GPSKet.hilbert import FermionicDiscreteHilbert
from netket.utils.types import Array, Callable, DType, NNInitFunc
from netket.utils import HashableArray
from functools import partial
# Dimensions:
# - B = batch size
# - L = number of sites
# - N = total number of electrons
# - N_up = number of spin-up electrons
# - N_down = number of spin-down electrons
# - M = number of determinants
# - S = number of spin rotations for S^2 projection
# - T = number of symmetries
def occupancies_to_electrons(x : Array, n_elec : Tuple[int, int]) -> Array:
"""
Converts input configs from 2nd quantized representation x to 1st quantized representation y:
x=[x_1, x_2, ..., x_L]
|
v
y=(y_1, y_2, ..., y_{N_up}, y_{N_up+1}, y_{N_up+2}, ..., y_{N_up+N_down})
Args:
x : an array of input configurations in 2nd quantization of shape (B, L)
n_elec : a tuple of ints N_up and N_down specifying the number of spin-up and spin-down electrons
Returns:
y : input configurations transformed into 1st quantization representation (B, N_up+N_down)
"""
batch_size = x.shape[0]
x = jnp.asarray(x, dtype=jnp.int32)
_, y_up = jnp.nonzero(x&1, size=batch_size*n_elec[0])
_, y_down = jnp.nonzero((x&2)/2, size=batch_size*n_elec[1])
y_up = jnp.reshape(y_up, (batch_size, -1))
y_down = jnp.reshape(y_down, (batch_size, -1))
y = jnp.column_stack([y_up, y_down])
return y
class Slater(nn.Module):
"""
This defines a set of M Slater determinants.
Per default these are summed together but other ways of combining them are possible.
"""
hilbert: FermionicDiscreteHilbert
"""Hilbert space"""
n_determinants: int = 1
"""Number of determinants"""
dtype: DType = jnp.complex128
init_fun : Union[NNInitFunc,Tuple[NNInitFunc,NNInitFunc]] = jax.nn.initializers.orthogonal()
symmetries: Callable = lambda inputs : jnp.expand_dims(inputs, axis=-1)
out_transformation: Callable = lambda x: jax.scipy.special.logsumexp(x, axis=(1, -1, -2))
"""Final output transformation. Its input has shape (B, M, S, T)."""
spin_symmetry_by_structure: bool = False
""" Flag determines whether the S^2 symmetry (with S=0) should be enforced
by using the same orbitals for up and down spin.
"""
S2_projection: Optional[Tuple[HashableArray, HashableArray]] = None
""" This (optional) tuple specifies the angles and characters for spin
rotations which should be used for the S^2 projection. Only sensible if above flag
is false.
"""
fixed_magnetization: bool = True
"""Whether this is a SD with fixed particle number and magnetization or not."""
apply_fast_update: bool = True
constant_time_updates: bool = True
""" Whether or not the fast updates of the determinants are done in constant time (which requires an O(L * N^2)
setup cost) or if they are done in O(N) (in which case the setup is cheaper).
One would expect that for Hamiltonians with O(L) terms (e.g. Hubbard model), the constant time updating should be
deactivated and for Hamiltonians with more terms (e.g. ab-initio Hamiltonians) it should be activated to get the best
performance. TODO: We need more benchmarking for this!"""
def setup(self):
""" Just do some sanity checks that the chosen parameters actually make sense.
TODO: Clean this up."""
assert(self.hilbert._n_elec is not None) # SD currently only implemented for a Hilbert with a fixed magnetization/electron number
if self.spin_symmetry_by_structure:
assert(self.S2_projection is None) # While technically possible this does not make sense.
if self.S2_projection is not None:
assert(self.fixed_magnetization) # Not yet implemented
if self.fixed_magnetization:
if isinstance(self.init_fun, Tuple):
self._init_fun_up = self.init_fun[0]
self._init_fun_dn = self.init_fun[1]
else:
self._init_fun_up = self.init_fun
self._init_fun_dn = self.init_fun
@nn.compact
def __call__(self, x, cache_intermediates=False, update_sites=None) -> Array:
n_sites = self.hilbert.size
# Register the cache variables
if update_sites is not None or cache_intermediates:
inverted_submats_save = self.variable("intermediates_cache", "inverted_submats", lambda : None)
update_matrices_save = self.variable("intermediates_cache", "update_matrices", lambda : None)
full_U_save = self.variable("intermediates_cache", "full_U", lambda : None)
sample_save = self.variable("intermediates_cache", "samples", lambda : None)
sites_to_electron_ids_save = self.variable("intermediates_cache", "sites_to_electron_ids", lambda : None)
cumulative_electron_count_save = self.variable("intermediates_cache", "cumulative_electron_count", lambda : None)
log_dets_save = self.variable("intermediates_cache", "log_dets", lambda : None)
# This code is applied if the value is calculated from scratch
if update_sites is None:
# First set up the full matrices of orbitals (with spin-down orbitals indexed by indices site + L)
if self.fixed_magnetization:
U_up = self.param("U_up", self._init_fun_up, (self.n_determinants, n_sites, self.hilbert._n_elec[0]), self.dtype)
if self.spin_symmetry_by_structure:
U_down = U_up
else:
U_down = self.param("U_down", self._init_fun_dn, (self.n_determinants, n_sites, self.hilbert._n_elec[1]), self.dtype)
def get_full_U(up_part, down_part):
return jnp.block([[up_part, jnp.zeros((n_sites, down_part.shape[1]), dtype=up_part.dtype)],
[jnp.zeros((n_sites, up_part.shape[1]), dtype=up_part.dtype), down_part]])
full_U = jax.vmap(get_full_U)(U_up, U_down) # (M, 2 * L, N)
else:
full_U = self.param("U", self.init_fun, (self.n_determinants, 2*n_sites, self.hilbert._n_elec[0]+self.hilbert._n_elec[1]), self.dtype) # (M, 2 * L, N)
# Now include the rotations, after this full_U will have shape (M, 2*L, N, S)
if self.S2_projection is None:
full_U = jnp.expand_dims(full_U, axis=-1) #(M, 2*L, N, S)
else:
def apply_rotation(angle):
# Apply the rotation to the orbitals
U00 = full_U[:, :n_sites, :self.hilbert._n_elec[0]] * jnp.cos(angle/2) + full_U[:, n_sites:, :self.hilbert._n_elec[0]] * jnp.sin(angle/2)
U10 = full_U[:, :n_sites, :self.hilbert._n_elec[0]] * jnp.sin(angle/2) + full_U[:, n_sites:, :self.hilbert._n_elec[0]] * jnp.cos(angle/2)
U01 = full_U[:, :n_sites, self.hilbert._n_elec[0]:] * jnp.cos(angle/2) - full_U[:, n_sites:, self.hilbert._n_elec[0]:] * jnp.sin(angle/2)
U11 = -full_U[:, :n_sites, self.hilbert._n_elec[0]:] * jnp.sin(angle/2) + full_U[:, n_sites:, self.hilbert._n_elec[0]:] * jnp.cos(angle/2)
U_rotated = jnp.block([[U00, U01],
[U10, U11]])
return U_rotated
full_U = jax.vmap(apply_rotation, in_axes=0, out_axes=-1)(jnp.array(self.S2_projection[0])) #(M, 2*L, N, S)
# Convert second quantized representation to first quantized representation
y = occupancies_to_electrons(x, self.hilbert._n_elec)
y = self.symmetries(y).at[:, self.hilbert._n_elec[0]:, :].add(n_sites) # From now on a position >= L correspond to the spin-down orbitals
# Expand the dimension so that we can do the indexing with jnp.take_along_axis
y_expanded = jnp.expand_dims(y, axis=(1, 3, -2))
full_U_expanded = jnp.expand_dims(full_U, axis=(0,-1))
"""Construct the sub-matrices where the rows of unoccupied sites have been removed.
An earlier version did this with vmaps which did however lead to inexplicable issues when calculations were run
with multiple processes on GPUs. TODO: The vmap issue should be investigated further but so far no
progress has been made with this."""
U_submats = jnp.take_along_axis(full_U_expanded, y_expanded, axis=2) # (B, M, N, N, S, T)
# Now evaluate the determinants
def evaluate_SD(U_submat):
if self.S2_projection is None and self.fixed_magnetization:
# Compute Slater determinant as product of the determinants of the
# spin-up and spin-down orbital submatrices:
# SD = det(Ũ_up)det(Ũ_down) which only works if no spin rotation is applied and the magnetization is conserved
(s_up, log_det_up) = jnp.linalg.slogdet(U_submat[:, :, :self.hilbert._n_elec[0], :self.hilbert._n_elec[0]])
(s_down, log_det_down) = jnp.linalg.slogdet(U_submat[:, :, self.hilbert._n_elec[0]:, self.hilbert._n_elec[0]:])
return log_det_up + log_det_down + jnp.log(s_up*s_down+0j)
else:
(s_det, log_det) = jnp.linalg.slogdet(U_submat)
return log_det + jnp.log(s_det+0j)
evaluate_over_rotations = jax.vmap(evaluate_SD, in_axes=-1, out_axes=-1)
log_det_values = jax.vmap(evaluate_over_rotations, in_axes=-1, out_axes=-1)(U_submats) # (B, M, S, T)
# If we store the intermediates for fast updating, we need to invert the sub-matrices
if cache_intermediates:
inverse_over_rotations = jax.vmap(jnp.linalg.inv, in_axes=-1, out_axes=-1) # vmap over rotations
inverted_submats = jax.vmap(inverse_over_rotations, in_axes=-1, out_axes=-1)(U_submats) # vmap over symmetries, output has shape (B, M, N, N, S, T)
# Apply fast updating of the determinants
else:
# Retrieve the full U matrices from the cache
full_U = jnp.take(full_U_save.value, 0, axis=0)
# First we need to determine which electrons move (and where they move)
occupancies_save = sample_save.value
old_occupancies = jax.vmap(jnp.take, in_axes=(0, 0), out_axes=0)(occupancies_save, update_sites)
old_occupancies = jnp.asarray(old_occupancies, jnp.int32)
x = jnp.asarray(x, jnp.int32)
spin_up_updates = (old_occupancies & 1).astype(int) - (x & 1).astype(int)
spin_down_updates = ((old_occupancies & 2).astype(int) - (x & 2).astype(int))//2
updates = jnp.concatenate((spin_up_updates, spin_down_updates), axis=-1) # 0: nothing, 1: remove electron, -1: add electron
# Determines the sites electrons jump to, the electron ids of the jumping electrons and the parity sign obtained from this move
@jax.vmap
def get_add_site_el_id_parity(update_sites_single, updates_single, sites_to_el_single, cum_elec_count):
update_sites_spin_channel_split = jnp.concatenate((update_sites_single, update_sites_single+n_sites))
""" We pad the update arrays with (-1)s to indicate dummy updates, this means that the determinant which is taken below
might be a little bit more expensive than necessary (as we might (and typically) have less updates than indicated
by the size of the update_sites array). We could also apply a little hack and assume that the length of the update
sites array is equal to the number of electron updates but the implementation with the padding is a bit more general
and safer to use. """
add_ids, = jnp.nonzero(updates_single == -1, size=update_sites_single.shape[0], fill_value=-1)
add_sites = jnp.where(add_ids != -1, update_sites_spin_channel_split[add_ids], -1)
remove_ids, = jnp.nonzero(updates_single == 1, size=update_sites_single.shape[0], fill_value=-1)
remove_sites = jnp.where(remove_ids != -1, update_sites_spin_channel_split[remove_ids], -1)
el_ids = jnp.where(remove_ids != -1, sites_to_el_single[remove_sites], -1)
""" Now we need to compute the additional sign we get from pretending these are updates in first quantization but
we want to have the update w.r.t. configs in second quantization (i.e. with a well-defined ordering of the electrons).
Maybe one day we want to code up the Hamiltonians + samplers in first quantization, then this would not be required
but until then we essentially need to revert the computed parity sign we evaluated in the Hamiltonian."""
def loop_fun(index, val):
sign, cum_elec_count_add, cum_elec_count_rm = val
""" Note that the arrays add_ids, add_sites, remove_ids, remove_sites, el_ids are padded with (-1)s.
This can lead to unwanted effects if the code below is modified. """
# Count the number of electrons we move past in this move
no_of_electrons_passed = abs(cum_elec_count_rm[index]-cum_elec_count_add[index]).astype(int)
# Correction if the remove site is beyond the add site
no_of_electrons_passed = jnp.where(remove_sites[index] > add_sites[index], no_of_electrons_passed-1, no_of_electrons_passed)
# Sign modification
new_sign = sign * ((-1)**(no_of_electrons_passed & 1))
# Modify the cummulative electron counts for the following updates
cum_elec_count_add_sites_updated = jnp.where(add_sites >= add_sites[index], cum_elec_count_add, cum_elec_count_add+1)
cum_elec_count_add_sites_updated = jnp.where(add_sites >= remove_sites[index], cum_elec_count_add_sites_updated, cum_elec_count_add_sites_updated-1)
cum_elec_count_rm_sites_updated = jnp.where(remove_sites >= add_sites[index], cum_elec_count_rm, cum_elec_count_rm+1)
cum_elec_count_rm_sites_updated = jnp.where(remove_sites >= remove_sites[index], cum_elec_count_rm_sites_updated, cum_elec_count_rm_sites_updated-1)
return (new_sign, cum_elec_count_add_sites_updated, cum_elec_count_rm_sites_updated)
cum_elec_count_add = cum_elec_count[add_sites]
cum_elec_count_rm = cum_elec_count[remove_sites]
sign_update, _, _ = jax.lax.fori_loop(0, add_sites.shape[0], loop_fun, (1, cum_elec_count_add, cum_elec_count_rm))
return add_sites, remove_sites, el_ids, sign_update
add_sites, remove_sites, moving_electron_ids, sign_update = get_add_site_el_id_parity(update_sites, updates, sites_to_electron_ids_save.value, cumulative_electron_count_save.value)
# Apply the symmetries to the add_sites
add_sites_expanded = jnp.expand_dims(add_sites, axis=-1)
add_sites_sym = self.symmetries(add_sites%n_sites)
add_sites_sym += (add_sites_expanded//n_sites) * n_sites # (B, N_updates, T)
add_sites_sym = jnp.where(add_sites_expanded != -1, add_sites_sym, -1)
# Now update the determinant values
""" If we computed the update matrices before (only done if in previous call
cache_intermediates==True and update_sites==None and also the
self.constant_time_updates flag is set), we can evaluate the determinant
updates in O(1) with this."""
if update_matrices_save.value is not None:
def get_determinant_update(add_sites_single, moving_electron_ids_single, update_matrix):
""" The update is just the determinant of a matrix which has those rows from the update matrix corresponding to the sites
where an electron is added, and the columns of the electrons which are moving. """
identity = jnp.eye(add_sites_single.shape[0])
up_mat = update_matrix[jnp.ix_(add_sites_single, moving_electron_ids_single)]
up_mat = jnp.where(jnp.expand_dims(add_sites_single, axis=-1) != -1, up_mat, identity)
up_mat = jnp.where(add_sites_single != -1, up_mat, identity)
(s_det_update, log_det_update) = jnp.linalg.slogdet(up_mat)
return log_det_update + jnp.log(s_det_update+0j)
log_det_update_per_determinant = jax.vmap(get_determinant_update, in_axes=(None, None, 0), out_axes=0) # vmap over determinants
log_det_update_per_sample = jax.vmap(log_det_update_per_determinant, in_axes=(0, 0, 0), out_axes=0) # vmap over batch dimension
log_det_update_per_rotation = jax.vmap(log_det_update_per_sample, in_axes=(None, None, -1), out_axes=-1) # vmap over rotations
log_det_update_per_symmetry = jax.vmap(log_det_update_per_rotation, in_axes=(-1, None, -1), out_axes=-1) # vmap over symmetries
log_det_values = log_det_update_per_symmetry(add_sites_sym, moving_electron_ids, update_matrices_save.value)
else:
# Here we compute the update matrix on-the-fly with U_submat^(-1) and full_U in O(N)
def get_determinant_update(add_sites_single, moving_electron_ids_single, U, U_submat_inv):
""" Here we need to construct the elements of the update matrices on-the-fly.
The update is just the determinant of a matrix which has those rows from the update matrix corresponding to the sites
where an electron is added, and the columns of the electrons which are moving. """
identity = jnp.eye(add_sites_single.shape[0])
up_mat = U[add_sites_single, :].dot(U_submat_inv[:, moving_electron_ids_single])
up_mat = jnp.where(jnp.expand_dims(add_sites_single, axis=-1) != -1, up_mat, identity)
up_mat = jnp.where(add_sites_single != -1, up_mat, identity)
(s_det_update, log_det_update) = jnp.linalg.slogdet(up_mat)
return log_det_update + jnp.log(s_det_update+0j)
log_det_update_per_determinant = jax.vmap(get_determinant_update, in_axes=(None, None, 0, 0), out_axes=0) # vmap over determinants
log_det_update_per_sample = jax.vmap(log_det_update_per_determinant, in_axes=(0, 0, None, 0), out_axes=0) # vmap over batch dimension
log_det_update_per_rotation = jax.vmap(log_det_update_per_sample, in_axes=(None, None, -1, -1), out_axes=-1) # vmap over rotations
log_det_update_per_symmetry = jax.vmap(log_det_update_per_rotation, in_axes=(-1, None, None, -1), out_axes=-1) # vmap over symmetries
log_det_values = log_det_update_per_symmetry(add_sites_sym, moving_electron_ids, full_U, inverted_submats_save.value)
log_det_values += log_dets_save.value
log_det_values += jnp.expand_dims(jnp.log(sign_update +0.j), axis=(1,2,3))
# If we want to save the cache, then we need to update the inverted sub matrices as well
if cache_intermediates:
# Apply the symmetries to the remove_sites
remove_sites_expanded = jnp.expand_dims(remove_sites, axis=-1)
remove_sites_sym = self.symmetries(remove_sites%n_sites)
remove_sites_sym += (remove_sites_expanded//n_sites) * n_sites # (B, N_updates, T)
remove_sites_sym = jnp.where(remove_sites_expanded != -1, remove_sites_sym, -1)
""" Now we again need to transform the update in first quantization to an update in second quantization.
For this we evaluate the permutation we need to apply in order to compute the correct updated inverse from the
move in first quantization so that the updated inverse correponds to the default ordering."""
el_positions_old = occupancies_to_electrons(sample_save.value, self.hilbert._n_elec).at[:, self.hilbert._n_elec[0]:].add(n_sites)
@jax.vmap
def update_el_pos(add_sites_single, electron_ids_single, el_positions_old_single):
def update_fun(i, el_pos):
return el_pos.at[electron_ids_single[i]].set(add_sites_single[i])
return jax.lax.fori_loop(0, jnp.sum(add_sites_single != -1), update_fun, el_positions_old_single)
updated_pos_first = update_el_pos(add_sites, moving_electron_ids, el_positions_old)
permutation = jnp.argsort(updated_pos_first)
def update_inverse(add_sites_single, remove_sites_single, electron_ids_single, orbitals, inv_old, permutation_single):
# Update inverse with Woodbury matrix identity (O(N^2))
V = orbitals[add_sites_single, :] - orbitals[remove_sites_single, :]
V = jnp.where(jnp.expand_dims(add_sites_single != -1, axis=-1), V, 0.)
identity = jnp.eye(add_sites_single.shape[0])
VAInv = V.dot(inv_old)
VAInvU = VAInv[:, electron_ids_single]
VAInvU = jnp.where(add_sites_single != -1, VAInvU, 0.)
inverted_mat = jnp.linalg.inv(jnp.eye(len(add_sites_single)) + VAInvU)
update = inv_old[:, electron_ids_single].dot(inverted_mat.dot(VAInv))
new_inverse_first = (inv_old - update)
return new_inverse_first[:, permutation_single]
inv_update_per_determinant = jax.vmap(update_inverse, in_axes=(None, None, None, 0, 0, None), out_axes=0) # vmap over determinants
inv_update_per_sample = jax.vmap(inv_update_per_determinant, in_axes=(0, 0, 0, None, 0, 0), out_axes=0) # vmap over batch dimension
inv_update_per_rotation = jax.vmap(inv_update_per_sample, in_axes=(None, None, None, -1, -1, None), out_axes=-1) # vmap over rotations
inv_update_per_symmetry = jax.vmap(inv_update_per_rotation, in_axes=(-1, -1, None, None, -1, None), out_axes=-1) # vmap over symmetries
inverted_submats = inv_update_per_symmetry(add_sites_sym, remove_sites_sym, moving_electron_ids, full_U, inverted_submats_save.value, permutation)
# Store everything we need to store for subsequent fast updates
if cache_intermediates:
# Store the inverted occupied submatrices (required for double updates)
inverted_submats_save.value = inverted_submats
""" Store the update matrices (U.dot(inv(U_occ))); this is O(N^2 * L) but allows for O(1) updates to the determinant values:
If we make a single set of O(L) different updates at a time (as e.g. in local energy evaluation for lattice models),
then it would potentially be faster overall not to do evaluate this matrix-matrix product in the setup and instead do O(L) updates
(though there would still be a benefit from the fast update), therefore this matrix-matrix contraction can be deactivated by unsetting
the self.constant_time_updates flag.
The full matrix-matrix contraction is also only evaluated if no subsequent updates are performed
(as typically consecutive updates are only incurred in the sampling in which case the O(N^2 * L) matrix contraction would essentially
make the fast updating useless)."""
if self.constant_time_updates:
update_matrices_save.value = jnp.einsum("ijkl,miknlp->mijnlp", full_U, inverted_submats) # (B, M, L, N, S, T)
else:
update_matrices_save.value = None
""" Store the full U matrices, we expand the full_U matrices so that we can apply batched selection
of the correct values in the sampler."""
full_U_save.value = jnp.tile(full_U, (inverted_submats.shape[0], *np.ones(len(full_U.shape), dtype=int)))
# Store the occupancies
if update_sites is not None:
def update_fun(saved_config, update_sites, occs):
def scan_fun(carry, count):
return (carry.at[update_sites[count]].set(occs[count]), None)
return jax.lax.scan(scan_fun, saved_config, jnp.arange(update_sites.shape[0]), reverse=True)[0]
full_samples = jax.vmap(update_fun, in_axes=(0, 0, 0), out_axes=0)(sample_save.value, update_sites, x)
else:
full_samples = x
full_samples = jnp.asarray(full_samples, jnp.int32)
sample_save.value = full_samples
# Store the cummulative elextron counts (required for fast evaluation of the parity update)
cumulative_electron_count_save.value = jnp.cumsum(jnp.concatenate((full_samples & 1, (full_samples & 2)//2), axis=1), axis=1)
# Store a mapping from sites to electron indices (-1 values denote unoccupied sites, spin-down sites are indexed by indices site_no + L)
electron_positions = occupancies_to_electrons(full_samples, self.hilbert._n_elec).at[:, self.hilbert._n_elec[0]:].add(n_sites)
def set_occ_per_sample(electron_positions_single):
def set_occ(i, sites_to_els):
return sites_to_els.at[electron_positions_single[i]].set(i)
return jax.lax.fori_loop(0, electron_positions_single.shape[0], set_occ, -jnp.ones(self.hilbert.size*2, dtype=int))
sites_to_electron_ids_save.value = jax.vmap(set_occ_per_sample)(electron_positions)
# Store the calculated determinants
log_dets_save.value = log_det_values
# Multiply with the prefactors from the symmetry projection
if self.S2_projection is not None:
log_det_values += jnp.log(jnp.asarray(self.S2_projection[1])).reshape((-1,1))
return self.out_transformation(log_det_values)
| 26,429 | 65.240602 | 192 | py |
GPSKet | GPSKet-master/GPSKet/models/autoreg_qGPS_full.py | import numpy as np
import jax
from jax.nn.initializers import zeros
import jax.numpy as jnp
from jax.scipy.special import logsumexp
from typing import Union, Optional, Tuple, List
from netket.utils import HashableArray
from netket.utils.types import DType, NNInitFunc, Callable, Array
from flax import linen as nn
from GPSKet.nn.initializers import normal
from GPSKet.models import qGPS
from GPSKet.models.qGPS import no_syms
from .autoreg_qGPS import _normalize, gpu_cond, AbstractARqGPS
class ARqGPSFull(AbstractARqGPS):
"""
Implements the fully variational autoregressive formulation of the QGPS Ansatz,
with support for symmetries and Hilbert spaces constrained to the
zero magnetization sector.
"""
M: Union[int, HashableArray] # If M is a list, it defines a per-site support dimension -> this should be faster to evaluate but gives a significant compilation overhead
"""Bond dimension"""
dtype: DType = jnp.complex128
"""Type of the variational parameters"""
machine_pow: int = 2
"""Exponent required to normalize the output"""
init_fun: Optional[NNInitFunc] = None # Defaults to qGPS-normal with the parameter dtype
"""Initializer for the variational parameters"""
normalize: bool=True
"""Whether the Ansatz should be normalized"""
apply_symmetries: Union[Callable, Tuple[Callable, Callable]] = no_syms()
"""
Function to apply symmetries to configurations (see qGPS model definition
for an explanation of the tuple also specifying the inverse symmetry operation
for fast updating)
"""
# TODO: extend to cases beyond D=2
count_spins: Callable = lambda spins : jnp.stack([(spins+1)&1, ((spins+1)&2)/2], axis=-1).astype(jnp.int32)
"""Function to count down and up spins"""
# TODO: extend to cases where total_sz != 0
renormalize_log_psi: Callable = lambda n_spins, hilbert, index: jnp.log(jnp.heaviside(hilbert.size//2-n_spins, 0))
"""Function to renormalize conditional log probabilities"""
out_transformation: Callable=lambda argument: jnp.sum(argument, axis=-1)
"""Function of the output layer, by default sums over bond dimension"""
apply_fast_update: bool = True
"""Whether or not to apply the fast updating in the model"""
# Dimensions:
# - B = batch size
# - D = local dimension
# - L = number of sites
# - M = bond dimension
# - T = number of symmetries
def _conditional(self, inputs: Array, index: int) -> Array:
# Convert input configurations into indices
inputs = self.hilbert.states_to_local_indices(inputs) # (B, L)
# Compute conditional probability for site at index
log_psi = _conditional(self, inputs, index) # (B, D)
if self.normalize:
log_psi = _normalize(log_psi, self.machine_pow)
p = jnp.exp(self.machine_pow*log_psi.real)
return p
def conditionals(self, inputs: Array) -> Array:
# Convert input configurations into indices
inputs = self.hilbert.states_to_local_indices(inputs) # (B, L)
# Compute conditional probabilities for all sites
log_psi, _ = _conditionals(self, inputs) # (B, L, D)
if self.normalize:
log_psi = _normalize(log_psi, self.machine_pow)
p = jnp.exp(self.machine_pow*log_psi.real)
return p
def setup(self):
if self.init_fun is None:
init = normal(dtype=self.dtype)
else:
init = self.init_fun
if isinstance(self.M, HashableArray):
self._epsilon = tuple([self.param("epsilon_{}".format(i), init, (self.hilbert.local_size, np.asarray(self.M)[i], i+1), self.dtype) for i in range(self.M.shape[0])])
else:
self._epsilon = self.param("epsilon", init, (self.hilbert.local_size, self.M, int(self.hilbert.size * (self.hilbert.size + 1)/2)), self.dtype)
if self.apply_fast_update:
self._saved_configs = self.variable("intermediates_cache", "samples", lambda : None)
if isinstance(self.M, HashableArray):
self._saved_context_product = tuple([self.variable("intermediates_cache", "context_prod_{}".format(i), lambda : None) for i in range(self.M.shape[0])])
else:
self._saved_context_product = self.variable("intermediates_cache", "context_prod", lambda : None)
if self.hilbert.constrained:
self._n_spins = self.variable("cache", "spins", zeros, None, (1, self.hilbert.local_size))
if self.apply_fast_update:
# We can only apply the fast-updating of we have the inverse symmetry operation function as well
assert (type(self.apply_symmetries) == tuple)
def __call__(self, inputs: Array, cache_intermediates=False, update_sites=None) -> Array:
if jnp.ndim(inputs) == 1:
inputs = jnp.expand_dims(inputs, axis=0) # (B, L)
# Generate the full configurations from the partial ones and get inverse symmetries if a fast update is performed
if update_sites is not None:
# The old occupancies
saved_input = self._saved_configs.value
# Old occupancies at the updated sites
prev_occupancies = jax.vmap(jnp.take, in_axes=(0, 0), out_axes=0)(saved_input, update_sites)
# Compute a tuple containing the transformed occupancy and the site to index the epsilon tensor for each symmetry operation
# Old occupancy
old_occupancy, site_indices = self.apply_symmetries[1](prev_occupancies, update_sites) # (B, #updates, T), (B, #updates, T)
# Updated occupancy
new_occupancy, site_indices = self.apply_symmetries[1](inputs, update_sites) # (B, #updates, T), (B, #updates, T)
update_args = (self.hilbert.states_to_local_indices(old_occupancy), self.hilbert.states_to_local_indices(new_occupancy), site_indices)
def update_fun(saved_config, update_sites, occs):
def scan_fun(carry, count):
return (carry.at[update_sites[count]].set(occs[count]), None)
return jax.lax.scan(scan_fun, saved_config, jnp.arange(update_sites.shape[0]), reverse=True)[0]
full_samples = jax.vmap(update_fun, in_axes=(0, 0, 0), out_axes=0)(self._saved_configs.value, update_sites, inputs)
else:
update_args = None
full_samples = inputs
# Transform inputs according to symmetries
if type(self.apply_symmetries) == tuple:
inputs = self.apply_symmetries[0](inputs) # (B, L, T)
full_samples_sym = self.apply_symmetries[0](full_samples) # (B, L, T)
else:
inputs = self.apply_symmetries(inputs) # (B, L, T)
full_samples_sym = self.apply_symmetries(full_samples) # (B, L, T)
n_symm = inputs.shape[-1]
# Convert input configurations into indices
inputs = self.hilbert.states_to_local_indices(inputs) # (B, L, T)
full_samples_sym = self.hilbert.states_to_local_indices(full_samples_sym) # (B, L, T)
batch_size = inputs.shape[0]
# Compute conditional log-probabilities
if update_sites is not None:
if isinstance(self._epsilon, tuple):
old_contexts = [cont_prod.value for cont_prod in self._saved_context_product]
else:
old_contexts = self._saved_context_product.value
log_psi, context_products = jax.vmap(_conditionals, in_axes=(None, -1, -1, -1), out_axes=(-1, -1))(self, full_samples_sym, update_args, old_contexts) # (B, L, D, T), (L, B, M, T)
else:
log_psi, context_products = jax.vmap(_conditionals, in_axes=(None, -1, None, None), out_axes=(-1, -1))(self, full_samples_sym, None, None) # (B, L, D, T), (L, B, M, T)
if self.normalize:
log_psi = _normalize(log_psi, self.machine_pow, axis=-2)
# Take conditionals along sites-axis according to input indices
log_psi = jnp.take_along_axis(log_psi, jnp.expand_dims(full_samples_sym, axis=2), axis=2) # (B, L, 1, T)
log_psi = jnp.sum(log_psi, axis=1) # (B, 1, T)
log_psi = jnp.reshape(log_psi, (batch_size, n_symm)) # (B, T)
# Compute symmetrized log-amplitudes
log_psi_symm_re = (1/self.machine_pow)*logsumexp(self.machine_pow*log_psi.real, axis=-1, b=1/n_symm)
log_psi_symm_im = logsumexp(1j*log_psi.imag, axis=-1).imag
log_psi_symm = log_psi_symm_re+1j*log_psi_symm_im
if cache_intermediates:
if isinstance(self._epsilon, tuple):
for i in range(len(context_products)):
self._saved_context_product[i].value = context_products[i]
else:
self._saved_context_product.value = context_products
self._saved_configs.value = full_samples
return log_psi_symm # (B,)
def _compute_conditional(model: ARqGPSFull, n_spins: Array, inputs: Array, index: int,
update_args: Optional[Tuple[Array, Array, Array]]=None,
saved_context_prod: Optional[Array]=None) -> Union[Array, Array, Array]:
if isinstance(model._epsilon, tuple):
# Currently, We want this function to be callable with index < 0 for _init_cache function
if index < 0:
proper_index = 0
else:
proper_index = index
input_param = model._epsilon[proper_index][:,:,-1]
else:
# Get the epsilon sub-tensor for the current index
lower_index = (index * (index+1))//2
# Retrieve input parameters
input_param = model._epsilon[:,:,lower_index+index]
input_param = jnp.expand_dims(input_param, axis=0) # (1, D, M)
if update_args is None:
if isinstance(model._epsilon, tuple):
inputs = inputs[:,:proper_index+1]
local_epsilon = model._epsilon[proper_index]
else:
local_epsilon = jax.lax.dynamic_slice_in_dim(model._epsilon, lower_index, model.hilbert.size, axis=-1)
# Compute product of parameters over j<index
context_param = jnp.expand_dims(local_epsilon, axis=0) # (1, D, M, L)
inputs_expanded = jnp.expand_dims(inputs, axis=(1,2)) # (B, 1, 1, L)
context_val = jnp.take_along_axis(context_param, inputs_expanded, axis=1).reshape((-1, *local_epsilon.shape[-2:])) # (B, M, L)
# Apply masking for sites > index
context_val = jnp.where(jnp.arange(local_epsilon.shape[-1]) >= index, 1., context_val)
context_prod = jnp.prod(context_val, axis=-1) # (B, M)
else:
old_occupancy, new_occupancy, site_indices = update_args
if isinstance(model._epsilon, tuple):
update = (model._epsilon[index][new_occupancy,:,site_indices])
update /= (model._epsilon[index][old_occupancy,:,site_indices])
else:
update = (model._epsilon[new_occupancy,:,site_indices+lower_index])
update /= (model._epsilon[old_occupancy,:,site_indices+lower_index])
# Apply masking for sites > index
update = jnp.where(jnp.expand_dims(site_indices >= index, axis=-1), 1., update)
context_prod = update.prod(axis=1) * saved_context_prod
site_prod = input_param * jnp.expand_dims(context_prod, axis=1) # (B, D, M)
# Compute log conditional probabilities
log_psi = model.out_transformation(site_prod) # (B, D)
# Slice inputs at index-1 to count previous spins
inputs_i = inputs[:, index-1] # (B,)
# Update spins count if index is larger than 0, otherwise leave as is
n_spins = gpu_cond(
index > 0,
lambda n_spins: n_spins + model.count_spins(inputs_i),
lambda n_spins: n_spins,
n_spins
)
# If Hilbert space associated with the model is constrained, i.e.
# model has "n_spins" in "cache" collection, then impose total magnetization.
# This is done by counting number of up/down spins until index, then if
# n_spins is >= L/2 the probability of up/down spin at index should be 0,
# i.e. the log probability becomes -inf
log_psi = gpu_cond(
index >= 0,
lambda log_psi: log_psi+model.renormalize_log_psi(n_spins, model.hilbert, index),
lambda log_psi: log_psi,
log_psi
)
return n_spins, log_psi, context_prod
def _conditional(model: ARqGPSFull, inputs: Array, index: int) -> Array:
# Retrieve spins count
batch_size = inputs.shape[0]
if model.has_variable("cache", "spins"):
n_spins = model._n_spins.value
n_spins = jnp.asarray(n_spins, jnp.int32)
n_spins = jnp.resize(n_spins, (batch_size, model.hilbert.local_size)) # (B, D)
else:
n_spins = jnp.zeros((batch_size, model.hilbert.local_size), jnp.int32)
# Compute log conditional probabilities
n_spins, log_psi, _ = _compute_conditional(model, n_spins, inputs, index)
# Update model cache
if model.has_variable("cache", "spins"):
model._n_spins.value = n_spins
return log_psi # (B, D)
def _conditionals(model: ARqGPSFull, inputs: Array, update_args: Optional[Tuple[Tuple[Array, Array],
Tuple[Array, Array]]]=None, saved_context_product: Optional[Array]=None) -> Array:
# Loop over sites while computing log conditional probabilities
def _scan_fun(n_spins, index):
if saved_context_product is not None:
if isinstance(model._epsilon, tuple):
n_spins, log_psi, context_product = _compute_conditional(model, n_spins, inputs, index, update_args, saved_context_product[index][:, :])
else:
n_spins, log_psi, context_product = _compute_conditional(model, n_spins, inputs, index, update_args, saved_context_product[index, :, :])
else:
n_spins, log_psi, context_product = _compute_conditional(model, n_spins, inputs, index)
n_spins = gpu_cond(
model.hilbert.constrained,
lambda n_spins: n_spins,
lambda n_spins: jnp.zeros_like(n_spins),
n_spins
)
return n_spins, (log_psi, context_product)
batch_size = inputs.shape[0]
n_spins = jnp.zeros((batch_size, model.hilbert.local_size), jnp.int32)
indices = jnp.arange(model.hilbert.size)
if isinstance(model._epsilon, tuple):
log_psi = None
context_product = None
for i in range(len(indices)):
n_spins, value = _scan_fun(n_spins, i)
if log_psi is None:
log_psi = jnp.expand_dims(value[0], axis=0)
else:
log_psi = jnp.append(log_psi, jnp.expand_dims(value[0], axis=0), axis=0)
if context_product is None:
context_product = (value[1],)
else:
context_product = (*context_product, value[1])
else:
_, value = jax.lax.scan(
_scan_fun,
n_spins,
indices
)
log_psi, context_product = value
log_psi = jnp.transpose(log_psi, [1, 0, 2])
return log_psi, context_product # (B, L, D), (L, B, M)
class ARqGPSModPhaseFull(ARqGPSFull):
"""
Implements an Ansatz composed of an autoregressive qGPS for the modulus of the amplitude and a qGPS for the phase.
"""
def setup(self):
assert jnp.issubdtype(self.dtype, jnp.floating)
super().setup()
self._qgps = qGPS(
self.hilbert, self.hilbert.size,
dtype=jnp.float64,
init_fun=self.init_fun)
def __call__(self, inputs: Array) -> Array:
log_psi_mod = super().__call__(inputs)
log_psi_phase = self._qgps(inputs)
return log_psi_mod + log_psi_phase*1j | 15,671 | 46.490909 | 190 | py |
GPSKet | GPSKet-master/GPSKet/models/slater_jastrow.py | import jax
import jax.numpy as jnp
from jax.nn.initializers import normal
from flax import linen as nn
from typing import Union, Tuple
from netket.models import Jastrow
from netket.utils.types import Array, DType, NNInitFunc, Callable
from .slater import Slater
from .jastrow import Jastrow
from ..hilbert.discrete_fermion import FermionicDiscreteHilbert
from ..nn.initializers import orthogonal
class SlaterJastrow(nn.Module):
"""
Implements a Slater-Jastrow wavefunction
"""
hilbert: FermionicDiscreteHilbert
"""The Hilbert space of the wavefunction model"""
dtype: DType=jnp.complex128
"""Type of the variational parameters"""
n_determinants: int=1
"""Number of determinants"""
slater_init_fun: Union[NNInitFunc,Tuple[NNInitFunc,NNInitFunc]] = orthogonal()
"""Initializer for the variational parameters of the Slater determinant"""
jastrow_init_fun: NNInitFunc=normal()
"""Initializer for the variational parameters of the Jastrow coefficient"""
slater_apply_symmetries: Callable = lambda inputs : jnp.expand_dims(inputs, axis=-1)
"""Function to apply symmetries to configurations in the Slater determinant"""
jastrow_apply_symmetries: Callable = lambda inputs : jnp.expand_dims(inputs, axis=-1)
"""Function to apply symmetries to configurations in the Jastrow factor"""
out_transformation: Callable = lambda x: jax.scipy.special.logsumexp(x, axis=(1, -1, -2))
"""Final output transformation. Its input has shape (B, M, S, T)."""
apply_fast_update: bool=True
"""Whether fast update is used in the computation of the Slater determinants"""
spin_symmetry_by_structure: bool=True
"""Whether the α and β orbitals are the same or not"""
fixed_magnetization: bool=True
"""Whether magnetization should be conserved or not"""
@nn.compact
def __call__(self, x) -> Array:
slater = Slater(
self.hilbert,
n_determinants=self.n_determinants,
dtype=self.dtype,
init_fun=self.slater_init_fun,
symmetries=self.slater_apply_symmetries,
spin_symmetry_by_structure=self.spin_symmetry_by_structure,
fixed_magnetization=self.fixed_magnetization,
out_transformation=self.out_transformation,
apply_fast_update=self.apply_fast_update
)(x)
jastrow = Jastrow(
self.hilbert,
dtype=self.dtype,
init_fun=self.jastrow_init_fun,
apply_symmetries=self.jastrow_apply_symmetries
)(x)
return slater+jastrow | 2,582 | 41.344262 | 93 | py |
GPSKet | GPSKet-master/GPSKet/models/asymm_qGPS.py | import jax.numpy as jnp
import numpy as np
from flax import linen as nn
from typing import Union, Tuple
from netket.utils import HashableArray
from netket.utils.types import Array, Callable, DType, NNInitFunc
from .slater import Slater
from ..hilbert.discrete_fermion import FermionicDiscreteHilbert
from ..nn.initializers import orthogonal
# Dimensions:
# - B = batch size
# - L = number of sites
# - N = total number of electrons
# - N_up = number of spin-up electrons
# - N_down = number of spin-down electrons
# - M = number of determinants
# - T = number of symmetries
class ASymmqGPS(nn.Module):
"""
Implements the antisymmetric qGPS Ansatz with support for multiple determinants,
symmetries and different symmetrization methods
"""
hilbert: FermionicDiscreteHilbert
"""Hilbert space"""
n_determinants: int = 1
"""Number of determinants"""
dtype: DType = jnp.complex128
"""Type of the variational parameters"""
init_fun : Union[NNInitFunc,Tuple[NNInitFunc,NNInitFunc]] = orthogonal()
"""Initializer for the variational parameters"""
coeffs : HashableArray = HashableArray(np.ones(1))
"""Coefficients of the terms in the linear combination of Slater determinants"""
apply_symmetries: Callable = lambda inputs : jnp.expand_dims(inputs, axis=-1)
"""Function to apply symmetries to configurations"""
symmetrization: str = 'kernel'
"""Symmetrization method"""
spin_symmetry_by_structure: bool = False
"""Flag determines whether the S^2 symmetry (with S=0) should be enforced
by using the same orbitals for up and down spin.
"""
apply_fast_update: bool = True
"""Whether fast update is used in the computation of the Slater determinants"""
@nn.compact
def __call__(self, x: Array) -> Array:
assert self.n_determinants == self.coeffs.shape[0]
if len(x.shape) == 1:
x = jnp.expand_dims(x, 0)
x = jnp.asarray(x, jnp.int32) # (B, L)
if self.symmetrization == 'kernel':
def out_transformation(y):
y = jnp.asarray(self.coeffs)*jnp.exp(y) # (B, M, S, T)
y = jnp.sum(y, axis=1) # (B, S, T)
y = jnp.sinh(jnp.sum(y, axis=(-2, -1))) # (B,)
y = jnp.log(y)
return y
elif self.symmetrization == 'projective':
def out_transformation(y):
y = jnp.asarray(self.coeffs)*jnp.exp(y) # (B, M, S, T)
y = jnp.sinh(jnp.sum(y, axis=1)) # (B, S, T)
y = jnp.sum(y, axis=(-2, -1)) # (B,)
y = jnp.log(y)
return y
log_psi = Slater(
self.hilbert,
self.n_determinants,
dtype=self.dtype,
init_fun=self.init_fun,
symmetries=self.apply_symmetries,
out_transformation=out_transformation,
apply_fast_update=self.apply_fast_update,
spin_symmetry_by_structure=self.spin_symmetry_by_structure
)(x)
return log_psi
class ASymmqGPSProd(nn.Module):
"""
Implements the antisymmetric qGPS Ansatz as an odd product of antisymmetric wavefunctions
"""
hilbert: FermionicDiscreteHilbert
"""Hilbert space"""
n_determinants: int = 1
"""Number of determinants"""
dtype: DType = jnp.complex128
"""Type of the variational parameters"""
init_fun : NNInitFunc = orthogonal()
"""Initializer for the variational parameters"""
apply_symmetries: Callable = lambda inputs : jnp.expand_dims(inputs, axis=-1)
"""Function to apply symmetries to configurations"""
spin_symmetry_by_structure: bool = False
"""Flag determines whether the S^2 symmetry (with S=0) should be enforced
by using the same orbitals for up and down spin.
"""
apply_fast_update: bool = True
"""Whether fast update is used in the computation of the Slater determinants"""
def setup(self):
assert self.n_determinants % 2 != 0
@nn.compact
def __call__(self, x: Array) -> Array:
if len(x.shape) == 1:
x = jnp.expand_dims(x, 0)
x = jnp.asarray(x, jnp.int32) # (B, L)
def out_transformation(y):
y = jnp.exp(y) # (B, M, S, T)
y = jnp.prod(jnp.sinh(y), axis=1) # (B, S, T)
y = jnp.sum(y, axis=(-2, -1)) # (B,)
y = jnp.log(y)
return y
log_psi = Slater(
self.hilbert,
self.n_determinants,
dtype=self.dtype,
init_fun=self.init_fun,
symmetries=self.apply_symmetries,
out_transformation=out_transformation,
apply_fast_update=self.apply_fast_update,
spin_symmetry_by_structure=self.spin_symmetry_by_structure
)(x)
return log_psi
| 4,806 | 35.416667 | 93 | py |
GPSKet | GPSKet-master/GPSKet/models/autoreg_plaquetteqGPS.py | import jax
import jax.numpy as jnp
import numpy as np
from jax.scipy.special import logsumexp
from typing import Tuple, Union, Optional
from netket.utils import HashableArray
from netket.hilbert.homogeneous import HomogeneousHilbert
from netket.utils.types import NNInitFunc, Array, DType, Callable
from jax.nn.initializers import zeros
from GPSKet.nn.initializers import normal
from .autoreg_qGPS import AbstractARqGPS, gpu_cond, _normalize
class ARPlaquetteqGPS(AbstractARqGPS):
"""
Implements the autoregressive formulation of the plaquette qGPS Ansatz with weight sharing,
support for symmetries and Hilbert spaces constrained to the zero magnetization sector.
"""
M: int
"""Bond dimension"""
plaquettes: HashableArray
"""Plaquette for each site"""
masks: HashableArray
"""Autoregressive mask for each site"""
dtype: DType = jnp.complex128
"""Type of the variational parameters"""
machine_pow: int = 2
"""Exponent required to normalize the output"""
init_fun: Optional[NNInitFunc] = None # Defaults to qGPS-normal (sigma = 0.01) with the parameter dtype
"""Initializer for the variational parameters"""
normalize: bool=True
"""Whether the Ansatz should be normalized"""
apply_symmetries: Callable = lambda inputs : jnp.expand_dims(inputs, axis=-1)
"""Function to apply symmetries to configurations"""
# TODO: extend to cases beyond D=2
count_spins: Callable = lambda spins : jnp.stack([(spins+1)&1, ((spins+1)&2)/2], axis=-1).astype(jnp.int32)
"""Function to count down and up spins"""
# TODO: extend to cases where total_sz != 0
renormalize_log_psi: Callable = lambda n_spins, hilbert, index: jnp.log(jnp.heaviside(hilbert.size//2-n_spins, 0))
"""Function to renormalize conditional log probabilities"""
out_transformation: Callable=lambda argument: jnp.sum(argument, axis=-1)
"""Function of the output layer, by default sums over bond dimension"""
# Dimensions:
# - B = batch size
# - D = local dimension
# - L = number of sites
# - M = bond dimension
# - T = number of symmetries
def _conditional(self, inputs: Array, args: Tuple) -> Array:
# Convert input configurations into indices
inputs = self.hilbert.states_to_local_indices(inputs) # (B, L)
# Compute conditional probability for site at index
log_psi = _conditional(self, inputs, args) # (B, D)
if self.normalize:
log_psi = _normalize(log_psi, self.machine_pow)
p = jnp.exp(self.machine_pow*log_psi.real)
return p
def conditionals(self, inputs: Array) -> Array:
# Convert input configurations into indices
inputs = self.hilbert.states_to_local_indices(inputs) # (B, L)
# Compute conditional probabilities for all sites
log_psi = _conditionals(self, inputs) # (B, L, D)
if self.normalize:
log_psi = _normalize(log_psi, self.machine_pow)
p = jnp.exp(self.machine_pow*log_psi.real)
return p
def setup(self):
if self.init_fun is None:
init = normal(sigma = 0.01, dtype=self.dtype)
else:
init = self.init_fun
self._epsilon = self.param("epsilon", init, (self.hilbert.local_size, self.M, self.hilbert.size), self.dtype)
if self.hilbert.constrained:
self._n_spins = self.variable("cache", "spins", zeros, None, (1, self.hilbert.local_size))
def __call__(self, inputs: Array) -> Array:
if jnp.ndim(inputs) == 1:
inputs = jnp.expand_dims(inputs, axis=0) # (B, L)
# Transform inputs according to symmetries
inputs = self.apply_symmetries(inputs) # (B, L, T)
n_symm = inputs.shape[-1]
# Convert input configurations into indices
inputs = self.hilbert.states_to_local_indices(inputs) # (B, L, T)
batch_size = inputs.shape[0]
# Compute conditional log-probabilities
log_psi = jax.vmap(_conditionals, in_axes=(None, -1), out_axes=-1)(self, inputs) # (B, L, D, T)
if self.normalize:
log_psi = _normalize(log_psi, self.machine_pow, axis=-2)
# Take conditionals along sites-axis according to input indices
log_psi = jnp.take_along_axis(log_psi, jnp.expand_dims(inputs, axis=2), axis=2) # (B, L, 1, T)
log_psi = jnp.sum(log_psi, axis=1) # (B, 1, T)
log_psi = jnp.reshape(log_psi, (batch_size, n_symm)) # (B, T)
# Compute symmetrized log-amplitudes
log_psi_symm = (1/self.machine_pow)*logsumexp(self.machine_pow*log_psi.real, axis=-1, b=1/n_symm)
log_psi_symm_im = logsumexp(1j*log_psi.imag, axis=-1).imag
log_psi_symm = log_psi_symm+1j*log_psi_symm_im
return log_psi_symm # (B,)
def _compute_conditional(hilbert: HomogeneousHilbert, n_spins: Array, epsilon: Array, mask: Array, plaquette: Array, inputs: Array, index: int, count_spins: Callable, renormalize_log_psi: Callable, out_transformation: Callable) -> Union[Array, Array]:
# Slice inputs at index-1 to count previous spins
inputs_i = inputs[:, index-1] # (B,)
# Retrieve input parameters at j=0
input_param = jnp.asarray(epsilon, epsilon.dtype)[:, :, 0] # (D, M)
input_param = jnp.expand_dims(input_param, axis=0) # (1, D, M)
# Compute product of parameters at 0<j<=index
context_param = jnp.where(mask, epsilon, 1.) # (B, M, L)
context_param = jnp.expand_dims(context_param, axis=0) # (1, D, M, L)
inputs = jnp.expand_dims(inputs[:, plaquette], axis=(1,2)) # (B, 1, 1, L)
context_val = jnp.take_along_axis(context_param, inputs, axis=1) # (B, 1, M, L)
context_prod = jnp.prod(context_val, axis=-1) # (B, 1, M)
site_prod = input_param * context_prod # (B, D, M)
# Compute log conditional probabilities
log_psi = out_transformation(site_prod) # (B, D)
# Update spins count if index is larger than 0, otherwise leave as is
n_spins = gpu_cond(
index > 0,
lambda n_spins: n_spins + count_spins(inputs_i),
lambda n_spins: n_spins,
n_spins
)
# If Hilbert space associated with the model is constrained, i.e.
# model has "n_spins" in "cache" collection, then impose total magnetization.
# This is done by counting number of up/down spins until index, then if
# n_spins is >= L/2 the probability of up/down spin at index should be 0,
# i.e. the log probability becomes -inf
log_psi = gpu_cond(
index >= 0,
lambda log_psi: log_psi+renormalize_log_psi(n_spins, hilbert, index),
lambda log_psi: log_psi,
log_psi
)
return n_spins, log_psi
def _conditional(model: ARPlaquetteqGPS, inputs: Array, args: Tuple) -> Array:
# Retrieve spins count
batch_size = inputs.shape[0]
if model.has_variable("cache", "spins"):
n_spins = model._n_spins.value
n_spins = jnp.asarray(n_spins, jnp.int32)
n_spins = jnp.resize(n_spins, (batch_size, model.hilbert.local_size)) # (B, D)
else:
n_spins = jnp.zeros((batch_size, model.hilbert.local_size), jnp.int32)
# Compute log conditional probabilities
index, mask, plaquette = args
n_spins, log_psi = _compute_conditional(model.hilbert, n_spins, model._epsilon, mask, plaquette, inputs, index, model.count_spins, model.renormalize_log_psi, model.out_transformation)
# Update model cache
if model.has_variable("cache", "spins"):
model._n_spins.value = n_spins
return log_psi # (B, D)
def _conditionals(model: ARPlaquetteqGPS, inputs: Array) -> Array:
# Loop over sites while computing log conditional probabilities
def _scan_fun(n_spins, args):
index, mask, plaquette = args
n_spins, log_psi_cond = _compute_conditional(model.hilbert, n_spins, model._epsilon, mask, plaquette, inputs, index, model.count_spins, model.renormalize_log_psi, model.out_transformation)
n_spins = gpu_cond(
model.hilbert.constrained,
lambda n_spins: n_spins,
lambda n_spins: jnp.zeros_like(n_spins),
n_spins
)
return n_spins, log_psi_cond
batch_size = inputs.shape[0]
n_spins = jnp.zeros((batch_size, model.hilbert.local_size), jnp.int32)
indices = jnp.arange(model.hilbert.size)
masks = np.asarray(model.masks, np.int32)
plaquettes = np.asarray(model.plaquettes, np.int32)
_, log_psi = jax.lax.scan(
_scan_fun,
n_spins,
(indices, masks, plaquettes)
)
log_psi = jnp.transpose(log_psi, [1, 0, 2])
return log_psi # (B, L, D) | 8,576 | 43.21134 | 251 | py |
GPSKet | GPSKet-master/GPSKet/supervised/imag_time_step.py | import netket as nk
import jax
import jax.numpy as jnp
import copy
from netket.vqs.mc.mc_state.expect_chunked import get_local_kernel
from netket.vqs.mc import get_local_kernel_arguments, get_local_kernel
from netket.utils import wrap_afun
from flax.core import freeze
from functools import partial
class ImagTimeStep():
def __init__(self, vstate, hamiltonian):
self.vstate = vstate
self.hamiltonian = hamiltonian
def get_local_energies(self, samples):
# This is a little bit hacky, the interface should probably be improved at one point
old_samples = self.vstate._samples
self.vstate._samples = samples
loc_ens = self.vstate.local_estimators(self.hamiltonian, chunk_size=self.vstate.chunk_size)
self.vstate._samples = old_samples
return loc_ens
def log_imag_time_step(self, tau, samples):
samples_reshaped = samples.reshape((-1, samples.shape[-1]))
self.log_amps = self.vstate.log_value(samples_reshaped)
self.local_energies = self.get_local_energies(samples_reshaped)
return self.log_amps + jnp.log(1 - tau * self.local_energies)
def get_imag_time_step_vstate(tau, hamiltonian, vstate):
"""Returns a variational state with a first order imaginary time evolved model (i.e. (1 - tau H)|Psi>)
based on a given variational state, can therefore be nested arbitrarily often.
Fast updating (if requested) is currently only applied in the innermost local energy
evaluations (which could be slightly improved in the future...).
CAREFUL: This is only correct for custom Hamiltonians evaluating the energy on the fly (without explicitly
generating the connected configurations).
TODO: Add check that evaluation is sensible with the given Hamiltonian (not entirely trivial)
Args:
tau: Propagation time
hamiltonian: The Hamiltonian for the imaginary time evolution
vstate: The original variational state
Returns:
The variational state with updated model (based on a single imaginary time step)
"""
log_model = vstate._apply_fun
_, args = get_local_kernel_arguments(vstate, hamiltonian)
if vstate.chunk_size is None:
local_estimator_fun = get_local_kernel(vstate, hamiltonian)
else:
local_estimator_fun = get_local_kernel(vstate, hamiltonian, vstate.chunk_size)
def imag_time_model_log_amp(model_pars, samples):
pars, tau = freeze(model_pars).pop("tau")
samps = samples.reshape((-1, samples.shape[-1]))
loc_ens = local_estimator_fun(log_model, pars, samps, args)
log_amps = log_model(pars, samps)
return log_amps + jnp.log(1/tau - 1 * loc_ens) + jnp.log(tau)
new_vstate = copy.deepcopy(vstate)
new_vstate._apply_fun = imag_time_model_log_amp
new_vstate._model = wrap_afun(imag_time_model_log_amp)
new_vstate.model_state = {"tau": tau, **new_vstate.model_state}
return new_vstate | 2,952 | 43.074627 | 110 | py |
GPSKet | GPSKet-master/GPSKet/hilbert/discrete_fermion.py | from typing import Optional, Tuple
from numba import jit
import numpy as np
import jax
import jax.numpy as jnp
import netket as nk
from netket.hilbert.custom_hilbert import HomogeneousHilbert
class FermionicDiscreteHilbert(HomogeneousHilbert):
def __init__(
self,
N: int = 1,
n_elec: Optional[Tuple[int, int]] = None
):
local_states = np.arange(4, dtype=np.uint8)
local_states = local_states.tolist()
if n_elec is not None:
def constraints(x):
return self._sum_constraint(x, n_elec)
else:
constraints = None
self._n_elec = n_elec if n_elec is None else n_elec
self._local_size = 4
super().__init__(local_states, N, constraints)
from .random import discrete_fermion
@staticmethod
@jit(nopython=True)
def _sum_constraint(x, n_elec):
result = np.ones(x.shape[0])
for i in range(x.shape[0]):
n_up = 0
n_down = 0
for j in range(x.shape[1]):
if x[i,j] == 1. or x[i,j] == 3.:
n_up += 1
if x[i,j] == 2. or x[i,j] == 3.:
n_down += 1
if n_up == n_elec[0] and n_down == n_elec[1]:
result[i] = 1
else:
result[i] = 0
return result == 1.
def __pow__(self, n):
if self._n_elec is None:
n_elec = None
else:
n_elec = (n_elec[0] * n, n_elec[1] * n)
return FermionicDiscreteHilbert(self.size * n, n_elec=n_elec)
def __repr__(self):
n_elec = (", n_up={}, n_down={}".format(self._n_elec[0], self._n_elec[1]) if self._n_elec is not None else "")
return "Fermion(N={} {}))".format(self.size, n_elec)
def states_to_local_indices(self, x):
return x.astype(jnp.uint8) | 1,881 | 26.676471 | 118 | py |
GPSKet | GPSKet-master/GPSKet/hilbert/random/discrete_fermion.py | import numpy as np
import netket as nk
from GPSKet.hilbert.discrete_fermion import FermionicDiscreteHilbert
import jax
import jax.numpy as jnp
@nk.hilbert.random.random_state.dispatch
def random_state(hilb: FermionicDiscreteHilbert, key, batches: int, *, dtype=jnp.uint8):
shape = (batches, hilb.size)
if hilb._n_elec is None:
out = jax.random.choice(key, jnp.array(hilb.local_states), shape=shape)
else:
def scan_fun(key, val):
key, subkey = jax.random.split(key)
up_pos = jax.random.choice(key, hilb.size, shape=(hilb._n_elec[0],), replace=False)
down_pos = jax.random.choice(subkey, hilb.size, shape=(hilb._n_elec[1],), replace=False)
out = jnp.zeros(hilb.size, dtype=dtype)
out = out.at[up_pos].add(1)
out = out.at[down_pos].add(2)
return key, out
out = jax.lax.scan(scan_fun, key, None, length=batches)[1]
return out | 948 | 38.541667 | 100 | py |
GPSKet | GPSKet-master/GPSKet/hilbert/random/discrete_asep.py | import netket as nk
from GPSKet.hilbert import ASEPDiscreteHilbert
import jax
import jax.numpy as jnp
@nk.hilbert.random.random_state.dispatch
def random_state(hilb: ASEPDiscreteHilbert, key, batches: int, *, dtype=jnp.uint8):
shape = (batches, hilb.size)
out = jax.random.choice(key, jnp.array(hilb.local_states, dtype), shape=shape)
return out | 359 | 31.727273 | 83 | py |
GPSKet | GPSKet-master/GPSKet/optimizer/solvers.py | import jax.numpy as jnp
from netket.jax import tree_ravel
def pinv(A, b, rcond=1e-12, x0=None):
del x0
A = A.to_dense()
b, unravel = tree_ravel(b)
A_inv = jnp.linalg.pinv(A, rcond=rcond, hermitian=True)
x = jnp.dot(A_inv, b)
return unravel(x), None | 274 | 24 | 59 | py |
GPSKet | GPSKet-master/GPSKet/optimizer/sr_rmsprop.py | import jax
import jax.numpy as jnp
from jax.tree_util import tree_map
from dataclasses import dataclass
from typing import Callable, Optional
from netket.utils.types import PyTree, Scalar
from netket.vqs import VariationalState
from netket.optimizer.preconditioner import AbstractLinearPreconditioner
from .qgt import QGTJacobianDenseRMSProp
@dataclass
class SRRMSProp(AbstractLinearPreconditioner):
def __init__(
self,
params_structure: PyTree,
qgt: Callable = QGTJacobianDenseRMSProp,
solver: Callable = jax.scipy.sparse.linalg.cg,
*,
diag_shift: Scalar = 0.01,
decay: Scalar = 0.9,
eps: Scalar = 1e-8,
initial_scale: Scalar = 0.0,
**kwargs,
):
self.qgt_constructor = qgt
self.qgt_kwargs = kwargs
assert (diag_shift >= 0.0) and (diag_shift <= 1.0)
self.diag_shift = diag_shift
self.decay = decay
self.eps = eps
super().__init__(solver)
self._ema = tree_map(
lambda p: jnp.full(p.shape, initial_scale, p.dtype),
params_structure
)
del params_structure
def lhs_constructor(self, vstate: VariationalState, ema: PyTree, step: Optional[Scalar] = None):
return self.qgt_constructor(
vstate,
ema,
diag_shift=self.diag_shift,
eps=self.eps,
**self.qgt_kwargs
)
def __call__(self, vstate: VariationalState, gradient: PyTree, step: Optional[Scalar] = None) -> PyTree:
# Update exponential moving average
def update_ema(nu, g):
if jnp.iscomplexobj(g):
# This assumes that the parameters are split into complex and real parts later on (done in the QGT implementation)
squared_g = (g.real**2 + 1.j * g.imag**2)
else:
squared_g = (g**2)
return self.decay*nu + (1-self.decay)* squared_g
self._ema = tree_map(
update_ema,
self._ema,
gradient
)
# Compute bias correction
t = step+1
ema_hat = tree_map(
lambda nu: nu / (1-self.decay**t),
self._ema
)
# Compute S matrix
self._lhs = self.lhs_constructor(vstate, ema_hat, step)
# Solve system
self.x0, self.info = self._lhs.solve(self.solver, gradient)
return self.x0 | 2,432 | 29.797468 | 130 | py |
GPSKet | GPSKet-master/GPSKet/optimizer/sr_dense.py | import jax
import jax.numpy as jnp
import netket.jax as nkjax
from dataclasses import dataclass
from typing import Callable, Optional, Any
from netket.utils.types import PyTree, Scalar, ScalarOrSchedule
from netket.vqs import VariationalState
from netket.optimizer.preconditioner import AbstractLinearPreconditioner
from netket.optimizer.qgt import QGTJacobianDense
from .solvers import pinv
@dataclass
class SRDense(AbstractLinearPreconditioner):
def __init__(
self,
qgt: QGTJacobianDense,
solver: Callable = pinv
):
self.qgt_constructor = qgt
super().__init__(solver)
def lhs_constructor(self, vstate: VariationalState, step: Optional[Scalar] = None):
return self.qgt_constructor(vstate)
def __call__(self, vstate: VariationalState, gradient: PyTree, step: Optional[Scalar] = None) -> PyTree:
# Ravel gradient
gradient, unravel_fun = nkjax.tree_ravel(gradient)
# Compute S matrix
self._lhs = self.lhs_constructor(vstate, step)
S = self._lhs.to_dense()
# Solve system
x0 = self.solver(S, gradient)
self.x0 = unravel_fun(x0)
return self.x0 | 1,184 | 28.625 | 108 | py |
GPSKet | GPSKet-master/GPSKet/optimizer/qgt/qgt_jacobian_dense_rmsprop.py | import jax
import jax.numpy as jnp
import netket.jax as nkjax
from flax import struct
from typing import Optional, Union
from netket.utils import mpi
from netket.nn import split_array_mpi
from netket.utils.types import PyTree, Scalar
from netket.optimizer import LinearOperator
from netket.optimizer.linear_operator import Uninitialized
from netket.optimizer.qgt.common import check_valid_vector_type
from GPSKet.vqs import MCStateUniqueSamples
from functools import partial
def QGTJacobianDenseRMSProp(
vstate=None,
ema=None,
mode: str = None,
holomorphic: bool = None,
diag_shift=None,
eps=None,
chunk_size=None,
**kwargs,
) -> "QGTJacobianDenseRMSPropT":
if mode is not None and holomorphic is not None:
raise ValueError("Cannot specify both `mode` and `holomorphic`.")
if vstate is None:
return partial(QGTJacobianDenseRMSProp, mode=mode, holomorphic=holomorphic,
diag_shift=diag_shift, eps=eps, chunk_size=chunk_size, **kwargs)
assert diag_shift >= 0.0 and diag_shift <= 1.0
# TODO: Find a better way to handle this case
from netket.vqs import ExactState
if isinstance(vstate, ExactState):
samples = split_array_mpi(vstate._all_states)
pdf = split_array_mpi(vstate.probability_distribution())
elif isinstance(vstate, MCStateUniqueSamples):
samples, pdf = vstate.samples_with_counts
else:
samples = vstate.samples
pdf = None
if mode is None:
mode = nkjax.jacobian_default_mode(
vstate._apply_fun,
vstate.parameters,
vstate.model_state,
samples,
mode=mode,
holomorphic=holomorphic,
)
if mode == "holomorphic":
raise ValueError("Mode cannot be holomorphic for the QGT with RMSProp diagonal shift")
if chunk_size is None and hasattr(vstate, "chunk_size"):
chunk_size = vstate.chunk_size
jacobians = nkjax.jacobian(
vstate._apply_fun,
vstate.parameters,
samples.reshape(-1, samples.shape[-1]),
vstate.model_state,
mode=mode,
pdf=pdf,
chunk_size=chunk_size,
dense=True,
center=True,
)
pars_struct = jax.tree_map(
lambda x: jax.ShapeDtypeStruct(x.shape, x.dtype), vstate.parameters
)
return QGTJacobianDenseRMSPropT(
O=jacobians,
diag_shift=diag_shift,
eps=eps,
ema=ema,
mode=mode,
_params_structure=pars_struct,
**kwargs,
)
@struct.dataclass
class QGTJacobianDenseRMSPropT(LinearOperator):
O: jnp.ndarray = Uninitialized
diag_shift: float = Uninitialized
eps: float = Uninitialized
ema: PyTree = Uninitialized
mode: str = struct.field(pytree_node=False, default=Uninitialized)
_in_solve: bool = struct.field(pytree_node=False, default=False)
_params_structure: PyTree = struct.field(pytree_node=False, default=Uninitialized)
@jax.jit
def __matmul__(self, vec: Union[PyTree, jnp.ndarray]) -> Union[PyTree, jnp.ndarray]:
if not hasattr(vec, "ndim") and not self._in_solve:
check_valid_vector_type(self._params_structure, vec)
vec, reassemble = convert_tree_to_dense_format(
vec, self.mode, disable=self._in_solve
)
ema, _ = convert_tree_to_dense_format(self.ema, self.mode)
result = mat_vec(vec, self.O, self.diag_shift, ema, self.eps)
return reassemble(result)
@jax.jit
def _solve(self, solve_fun, y: PyTree, *, x0: Optional[PyTree] = None) -> PyTree:
if not hasattr(y, "ndim"):
check_valid_vector_type(self._params_structure, y)
y, reassemble = convert_tree_to_dense_format(y, self.mode)
if x0 is not None:
x0, _ = convert_tree_to_dense_format(x0, self.mode)
insolve_self = self.replace(_in_solve=True)
out, info = solve_fun(insolve_self, y, x0=x0)
return reassemble(out), info
@jax.jit
def to_dense(self) -> jnp.ndarray:
"""
Convert the lazy matrix representation to a dense matrix representation.
Returns:
A dense matrix representation of this S matrix.
"""
# Concatenate samples with real/imaginary dimension
O = self.O
O = O.reshape(-1, O.shape[-1])
# Compute S matrix
S = mpi.mpi_sum_jax(O.conj().T @ O)[0]
# Compute diagonal shift and apply it to S matrix
ema, _ = convert_tree_to_dense_format(self.ema, self.mode)
diag = jnp.diag(jnp.sqrt(ema) + self.eps)
return (1-self.diag_shift)*S + self.diag_shift * diag
def __repr__(self):
return (
f"QGTJacobianDenseRMSProp(diag_shift={self.diag_shift}, mode={self.mode})"
)
########################################################################################
##### QGT Logic #####
########################################################################################
def mat_vec(v: PyTree, O: PyTree, diag_shift: Scalar, ema: PyTree, eps: Scalar) -> PyTree:
w = O @ v
res = jnp.tensordot(w.conj(), O, axes=w.ndim).conj()
res = mpi.mpi_sum_jax(res)[0]
return (1-diag_shift) * res + diag_shift * (jnp.sqrt(ema) + eps) * v
def convert_tree_to_dense_format(vec, mode, *, disable=False):
"""
Converts an arbitrary PyTree/vector which might be real/complex
to the dense-(maybe-real)-vector used for QGTJacobian.
The format is dictated by the sequence of operations chosen by
`nk.jax.jacobian(..., dense=True)`. As `nk.jax.jacobian` first
converts the pytree of parameters to real and then concatenates
real and imaginary terms with a tree_ravel, we must do the same
in here.
"""
unravel = lambda x: x
reassemble = lambda x: x
if not disable:
if mode != "holomorphic":
vec, reassemble = nkjax.tree_to_real(vec)
if not hasattr(vec, "ndim"):
vec, unravel = nkjax.tree_ravel(vec)
return vec, lambda x: reassemble(unravel(x)) | 6,141 | 32.380435 | 94 | py |
GPSKet | GPSKet-master/GPSKet/optimizer/qgt/qgt_onthefly_rmsprop.py | import jax
import jax.numpy as jnp
import netket.jax as nkjax
from flax import struct
from typing import Optional, Union, Callable
from functools import partial
from jax.tree_util import Partial, tree_map
from netket.utils import mpi
from netket.utils.types import PyTree
from netket.stats import subtract_mean
from netket.jax import tree_conj
from netket.optimizer import LinearOperator
from netket.optimizer.linear_operator import Uninitialized
from netket.optimizer.qgt.common import check_valid_vector_type
def mat_vec(jvp_fn, v, diag_shift, ema, eps):
# Save linearisation work
# TODO move to mat_vec_factory after jax v0.2.19
vjp_fn = jax.linear_transpose(jvp_fn, v)
w = jvp_fn(v)
w = w * (1.0 / (w.size * mpi.n_nodes))
w = subtract_mean(w) # w/ MPI
# Oᴴw = (wᴴO)ᴴ = (w* O)* since 1D arrays are not transposed
# vjp_fn packages output into a length-1 tuple
(res,) = tree_conj(vjp_fn(w.conjugate()))
res = tree_map(lambda x: mpi.mpi_sum_jax(x)[0], res)
# (1-diag_shift) * res + diag_shift * (sqrt(ema)+eps) * v
return tree_map(
lambda r_, e_, v_: (1-diag_shift) * r_ + diag_shift * (jnp.sqrt(e_) + eps) * v_,
res,
ema,
v
)
@partial(jax.jit, static_argnums=0)
def mat_vec_factory(forward_fn, params, model_state, samples):
# "forward function" that maps params to outputs
def fun(W):
return forward_fn({"params": W, **model_state}, samples)
_, jvp_fn = jax.linearize(fun, params)
return Partial(mat_vec, jvp_fn)
def QGTOnTheFlyRMSProp(
vstate,
ema,
diag_shift=None,
eps=None,
chunk_size=None,
**kwargs,
) -> "QGTOnTheFlyRMSPropT":
assert diag_shift >= 0.0 and diag_shift <= 1.0
# TODO: Find a better way to handle this case
from netket.vqs import ExactState
if isinstance(vstate, ExactState):
raise TypeError("ExactState is not supported. Use QGTJacobianDenseRMSProp instead.")
from GPSKet.vqs import MCStateUniqueSamples
if isinstance(vstate, MCStateUniqueSamples):
raise TypeError("Unique samples state with on-the-fly QGT is not supported.")
if jnp.ndim(vstate.samples) == 2:
samples = vstate.samples
else:
samples = vstate.samples.reshape((-1, vstate.samples.shape[-1]))
if chunk_size is not None:
raise ValueError("Chunking is not support yet.")
n_samples = samples.shape[0]
if chunk_size is None or chunk_size >= n_samples:
mv_factory = mat_vec_factory
chunking = False
mat_vec = mv_factory(
forward_fn=vstate._apply_fun,
params=vstate.parameters,
model_state=vstate.model_state,
samples=samples,
)
return QGTOnTheFlyRMSPropT(
diag_shift=diag_shift,
eps=eps,
ema=ema,
_mat_vec=mat_vec,
_params=vstate.parameters,
_chunking=chunking
)
@struct.dataclass
class QGTOnTheFlyRMSPropT(LinearOperator):
diag_shift: float = Uninitialized
eps: float = Uninitialized
ema: PyTree = Uninitialized
_mat_vec: Callable[[PyTree, float], PyTree] = Uninitialized
_params: PyTree = Uninitialized
_chunking: bool = struct.field(pytree_node=False, default=False)
def __matmul__(self, y):
return onthefly_mat_treevec(self, y)
def _solve(self, solve_fun, y: PyTree, *, x0: Optional[PyTree], **kwargs) -> PyTree:
return _solve(self, solve_fun, y, x0=x0)
def to_dense(self) -> jnp.ndarray:
"""
Convert the lazy matrix representation to a dense matrix representation.
Returns:
A dense matrix representation of this S matrix.
"""
return _to_dense(self)
def __repr__(self):
return f"QGTOnTheFlyRMSProp(diag_shift={self.diag_shift})"
########################################################################################
##### QGT Logic #####
########################################################################################
@jax.jit
def onthefly_mat_treevec(
S: QGTOnTheFlyRMSPropT, vec: Union[PyTree, jnp.ndarray]
) -> Union[PyTree, jnp.ndarray]:
"""
Perform the lazy mat-vec product, where vec is either a tree with the same structure as
params or a ravelled vector
"""
# if has a ndim it's an array and not a pytree
if hasattr(vec, "ndim"):
if not vec.ndim == 1:
raise ValueError("Unsupported mat-vec for chunks of vectors")
# If the input is a vector
if not nkjax.tree_size(S._params) == vec.size:
raise ValueError(
"""Size mismatch between number of parameters ({nkjax.tree_size(S.params)})
and vector size {vec.size}.
"""
)
_, unravel = nkjax.tree_ravel(S._params)
vec = unravel(vec)
ravel_result = True
else:
ravel_result = False
check_valid_vector_type(S._params, vec)
vec = nkjax.tree_cast(vec, S._params)
res = S._mat_vec(vec, S.diag_shift, S.ema, S.eps)
if ravel_result:
res, _ = nkjax.tree_ravel(res)
return res
@jax.jit
def _solve(
self: QGTOnTheFlyRMSPropT, solve_fun, y: PyTree, *, x0: Optional[PyTree], **kwargs
) -> PyTree:
check_valid_vector_type(self._params, y)
y = nkjax.tree_cast(y, self._params)
# we could cache this...
if x0 is None:
x0 = jax.tree_map(jnp.zeros_like, y)
out, info = solve_fun(self, y, x0=x0)
return out, info
@jax.jit
def _to_dense(self: QGTOnTheFlyRMSPropT) -> jnp.ndarray:
"""
Convert the lazy matrix representation to a dense matrix representation
Returns:
A dense matrix representation of this S matrix.
"""
Npars = nkjax.tree_size(self._params)
I = jax.numpy.eye(Npars)
if self._chunking:
# the linear_call in mat_vec_chunked does currently not have a jax batching rule,
# so it cannot be vmapped but we can use scan
# which is better for reducing the memory consumption anyway
_, out = jax.lax.scan(lambda _, x: (None, self @ x), None, I)
else:
out = jax.vmap(lambda x: self @ x, in_axes=0)(I)
if jnp.iscomplexobj(out):
out = out.T
return out | 6,297 | 29.425121 | 92 | py |
GPSKet | GPSKet-master/GPSKet/optimizer/qgt/qgt_jacobian_dense_unique_samples.py | import netket as nk
from netket.optimizer.qgt.qgt_jacobian_common import (sanitize_diag_shift, to_shift_offset, rescale)
from netket.optimizer.qgt.qgt_jacobian_dense import QGTJacobianDenseT
import netket.jax as nkjax
from typing import Tuple, Optional, Callable, Any
from netket.utils.types import PyTree
from netket.stats.mpi_stats import (
sum as _sum
)
import jax
import jax.numpy as jnp
from functools import partial
"""
This is essentially a copy of the constructor of the QGTJacobianDense quantum geometric tensor from netket
but it is adjusted so that it can be used with the unique samples variational state,
this is still very all very hacky. TODO: improve!
"""
def QGTJacobianDenseUniqueSamples(vstate=None, *, mode: str = None, holomorphic: bool = None, diag_shift=None, diag_scale=None, **kwargs) -> "QGTJacobianDenseT":
assert("rescale_shift" not in kwargs)
assert(diag_scale is None) # Not yet implemented -> TODO: implement support!
if vstate is None:
return partial(QGTJacobianDenseUniqueSamples, mode=mode, holomorphic=holomorphic)
if mode is None:
mode = nkjax.jacobian_default_mode(vstate._apply_fun, vstate.parameters, vstate.model_state, vstate.samples[0], holomorphic=holomorphic)
else:
assert(holomorphic is None)
chunk_size = None
try:
chunk_size = vstate.chunk_size
except:
pass
samples, counts = vstate.samples_with_counts
centered_oks = nkjax.jacobian(vstate._apply_fun, vstate.parameters, samples, vstate.model_state, mode=mode, chunk_size=chunk_size, pdf = counts, dense=True, center=True)
pars_struct = jax.tree_map(lambda x: jax.ShapeDtypeStruct(x.shape, x.dtype), vstate.parameters)
return QGTJacobianDenseT(O=centered_oks, mode=mode, _params_structure=pars_struct, diag_shift=diag_shift, **kwargs)
| 1,839 | 35.078431 | 173 | py |
GPSKet | GPSKet-master/tutorials/asep_example.py | import jax
import jax.numpy as jnp
import netket as nk
from scipy.sparse.linalg import eigs
from netket.hilbert import Qubit
from GPSKet.operator.hamiltonian import AsymmetricSimpleExclusionProcess
from GPSKet.models import qGPS, ARqGPS
from GPSKet.sampler import ARDirectSampler
from GPSKet.nn import normal
# Set up Hilbert space
L = 10
hi = Qubit(L)
# Set up ASEP model
lambd = 0.2
alpha = beta = gamma = delta = 0.5
p = q = 0.5
ha = AsymmetricSimpleExclusionProcess(hi, lambd, alpha, beta, gamma, delta, p, q)
# Use Metropolis-Hastings sampler with hopping rule
# sa = nk.sampler.MetropolisLocal(hi, n_chains_per_rank=1)
sa = ARDirectSampler(hi, n_chains_per_rank=300)
# Define the model and the variational state
M = 10
dtype = jnp.float64
init_fun = normal(sigma=0.01, dtype=dtype)
# model = qGPS(hi, M, dtype=dtype)
model = ARqGPS(hi, M, dtype=dtype)
vs = nk.vqs.MCState(sa, model, n_samples=300)
# Optimizer
op = nk.optimizer.Sgd(learning_rate=-0.01)
qgt = nk.optimizer.qgt.QGTJacobianDense(mode="real")
sr = nk.optimizer.SR(qgt=qgt, diag_shift=0.01)
# Variational Monte Carlo driver
gs = nk.VMC(ha, op, variational_state=vs, preconditioner=sr)
# Compute exact energy
gs_energy = eigs(ha.to_linear_operator(), which="LR", k=1, return_eigenvectors=False)
# Run optimization
for it in gs.iter(300,1):
en = gs.energy.mean
print("Iteration: {}, Energy: {}, Abs. energy_error: {}".format(it, en.real, abs(gs_energy - en)), flush=True)
| 1,456 | 28.14 | 114 | py |
GPSKet | GPSKet-master/tutorials/hubbard_example.py | import jax
import jax.numpy as jnp
import netket as nk
from GPSKet.hilbert.discrete_fermion import FermionicDiscreteHilbert
from GPSKet.sampler.fermionic_hopping import MetropolisHopping
from GPSKet.operator.hamiltonian import FermiHubbard
from GPSKet.models import ASymmqGPS
# Set up Hilbert space
L = 6
n_elec = (3, 3)
hi = FermionicDiscreteHilbert(L, n_elec=n_elec)
# Set up lattice
g = nk.graph.Chain(L, pbc=True)
# Set up Fermi-Hubbard model
U = 8
ha = FermiHubbard(hi, g.edges(), U=U)
# Use Metropolis-Hastings sampler with hopping rule
sa = MetropolisHopping(hi, n_chains_per_rank=1)
# Define the model and the variational state
n_dets = 2
dtype = jnp.float64
symmetries = g.automorphisms().to_array().T
def apply_symmetries(y):
return jax.vmap(lambda tau: jnp.take(tau, y), in_axes=-1, out_axes=-1)(symmetries)
model = ASymmqGPS(
hi, n_dets,
dtype=dtype,
apply_symmetries=apply_symmetries)
vs = nk.vqs.MCState(sa, model, n_samples=300)
# Optimizer
op = nk.optimizer.Sgd(learning_rate=0.01)
qgt = nk.optimizer.qgt.QGTJacobianDense()
sr = nk.optimizer.SR(qgt=qgt, diag_shift=0.01)
# Variational Monte Carlo driver
gs = nk.VMC(ha, op, variational_state=vs, preconditioner=sr)
# Compute exact energy
# gs_energy = nk.exact.lanczos_ed(ha)[0]
gs_energy = -2.048130885722
# Run optimization
for it in gs.iter(300,1):
en = gs.energy.mean
print("Iteration: {}, Energy: {}, Rel. energy_error: {}".format(it, en.real, abs((gs_energy - en)/gs_energy)), flush=True)
| 1,496 | 26.722222 | 126 | py |
GPSKet | GPSKet-master/tutorials/autoreg_state_fitting_example.py | import os
import optax
import jax
import jax.numpy as jnp
import numpy as np
import netket as nk
import GPSKet as qk
from functools import partial
from GPSKet.datasets.h2o import BasisType
def count_spins_fermionic(spins):
zeros = jnp.zeros(spins.shape[0])
up_spins = spins&1
down_spins = (spins&2)/2
return jnp.stack([zeros, up_spins, down_spins, zeros], axis=-1).astype(jnp.int32)
@partial(jax.vmap, in_axes=(0, None, None))
def renormalize_log_psi_fermionic(n_spins, hilbert, index):
log_psi = jnp.zeros(hilbert.local_size)
diff = jnp.array(hilbert._n_elec, jnp.int32)-n_spins[1:3]
log_psi = jax.lax.cond(
diff[0] == 0,
lambda log_psi: log_psi.at[1].set(-jnp.inf),
lambda log_psi: log_psi,
log_psi
)
log_psi = jax.lax.cond(
diff[1] == 0,
lambda log_psi: log_psi.at[2].set(-jnp.inf),
lambda log_psi: log_psi,
log_psi
)
log_psi = jax.lax.cond(
(diff == 0).any(),
lambda log_psi: log_psi.at[3].set(-jnp.inf),
lambda log_psi: log_psi,
log_psi
)
log_psi = jax.lax.cond(
(diff >= (hilbert.size-index)).any(),
lambda log_psi: log_psi.at[0].set(-jnp.inf),
lambda log_psi: log_psi,
log_psi
)
log_psi = jax.lax.cond(
(diff[0] >= (hilbert.size-index)).any(),
lambda log_psi: log_psi.at[2].set(-jnp.inf),
lambda log_psi: log_psi,
log_psi
)
log_psi = jax.lax.cond(
(diff[1] >= (hilbert.size-index)).any(),
lambda log_psi: log_psi.at[1].set(-jnp.inf),
lambda log_psi: log_psi,
log_psi
)
return log_psi
# Get dataset
dataset = qk.datasets.get_h2o_dataset(basis_type=BasisType.LOCAL, select_largest=500)
# Setup Hilbert space and Hamiltonian
_DATA = "/tmp/GPSKet_data/"
h1 = np.load(os.path.join(_DATA, f"h1_{BasisType.LOCAL}.npy"))
h2 = np.load(os.path.join(_DATA, f"h2_{BasisType.LOCAL}.npy"))
norb = dataset[0].shape[1]
nelec = 10
hi = qk.hilbert.FermionicDiscreteHilbert(N=norb, n_elec=(nelec//2,nelec//2))
ha = qk.operator.hamiltonian.AbInitioHamiltonianOnTheFly(hi, h1 , h2)
# Setup model, sampler, and variational state
model = qk.models.ARqGPS(hi, 2, dtype=jnp.complex128, count_spins=count_spins_fermionic, renormalize_log_psi=renormalize_log_psi_fermionic)
sa = qk.sampler.ARDirectSampler(hi)
vs = nk.vqs.MCState(sa, model, n_samples=1000)
# Initialize optimizer
op = optax.experimental.split_real_and_imaginary(optax.adam(learning_rate=0.01))
# Setup state fitting driver
driver = qk.driver.ARStateFitting(dataset, ha, op, variational_state=vs, mini_batch_size=32)
# Run fitting
n_iters = 1000
for it in driver.iter(n_iters, 1):
print(it, driver.loss, flush=True) | 2,742 | 30.895349 | 139 | py |
GPSKet | GPSKet-master/tutorials/j1j2_example.py | import jax.numpy as jnp
import netket as nk
from mpi4py import MPI
from GPSKet.operator.hamiltonian import get_J1_J2_Hamiltonian
from GPSKet.models import qGPS, ARqGPS, get_sym_transformation_spin
from GPSKet.sampler import ARDirectSampler
from GPSKet.sampler.metropolis_fast import MetropolisFastExchange
# MPI variables
comm = MPI.COMM_WORLD.Create(MPI.COMM_WORLD.Get_group())
rank = comm.Get_rank()
n_nodes = comm.Get_size()
# Parameters
L = 20
M = 2
ansatz = 'qgps'
dtype = jnp.complex128
sampler = 'metropolis-exchange'
batch_size = 100
n_discard = 100
learning_rate = 0.01
diag_shift = 0.01
n_iters = 100
# Compute samples per rank
if batch_size % n_nodes != 0:
raise ValueError("Define a batch size that is a multiple of the number of MPI ranks")
samples_per_rank = batch_size // n_nodes
# Get Hamiltonian, Hilbert space and graph
# The on the fly calculation of the local energy is only faster for the
# qGPS model where fast updating can be performed.
ha = get_J1_J2_Hamiltonian(L, on_the_fly_en=(ansatz == "qgps"))
hi = ha.hilbert
g = ha.graph
# Ansatz model
if ansatz == 'qgps':
model = qGPS(hi, M, dtype=dtype, syms=get_sym_transformation_spin(g))
elif ansatz == 'arqgps':
apply_symmetries, _ = get_sym_transformation_spin(g, spin_flip=False)
model = ARqGPS(hi, M, dtype=dtype, apply_symmetries=apply_symmetries)
# Sampler
if sampler == 'metropolis-exchange' and ansatz == "qgps":
sa = MetropolisFastExchange(hi, graph=g, n_chains=1)
elif sampler == 'metropolis-exchange':
sa = nk.sampler.MetropolisExchange(hi, graph=g, n_chains=1)
elif sampler == 'ar-direct':
sa = ARDirectSampler(hi, n_chains_per_rank=samples_per_rank)
# Variational quantum state
if sa.is_exact:
vs = nk.vqs.MCState(sa, model, n_samples=batch_size)
else:
vs = nk.vqs.MCState(sa, model, n_samples=batch_size, n_discard_per_chain=n_discard)
# Optimizer
op = nk.optimizer.Sgd(learning_rate=learning_rate)
qgt = nk.optimizer.qgt.QGTJacobianDense(holomorphic=(ansatz == 'qgps'))
sr = nk.optimizer.SR(qgt=qgt, diag_shift=diag_shift)
# Variational Monte Carlo driver
gs = nk.VMC(ha, op, variational_state=vs, preconditioner=sr)
# Run optimization
for it in gs.iter(n_iters,1):
print(it,gs.energy, flush=True)
| 2,235 | 30.942857 | 89 | py |
GPSKet | GPSKet-master/tutorials/abinitio_example.py | import netket as nk
import GPSKet.models as qGPS
import numpy as np
from GPSKet.hilbert.discrete_fermion import FermionicDiscreteHilbert
from GPSKet.sampler.fermionic_hopping import MetropolisHopping, MetropolisFastHopping
from GPSKet.operator.hamiltonian.ab_initio import AbInitioHamiltonian, AbInitioHamiltonianOnTheFly
from GPSKet.models import qGPS
from pyscf import scf, gto, ao2mo, fci, lo
from pyscf.tools import ring
import jax.numpy as jnp
"""
This first bit just sets up the Hamiltonian with PySCF.
In particular it gives us the 1 and 2 electron integrals (h1 and h2) which are required to set up the ab-initio
Hamiltonian with GPSKet/NetKet.
The Hamiltonian is either represented in a canonical or a "local" orbital basis.
"""
local_basis = False
mol = gto.Mole()
# as an example we set up a ring of 8 Hydrogen atoms with dist 1 a0
mol.build(
atom = [('H', x) for x in ring.make(8, 1)],
basis = 'sto-3g',
symmetry = True,
unit="Bohr"
)
nelec = mol.nelectron
print('Number of electrons: ', nelec)
myhf = scf.RHF(mol)
ehf = myhf.scf()
norb = myhf.mo_coeff.shape[1]
print('Number of molecular orbitals: ', norb)
# Get hamiltonian elements
# 1-electron 'core' hamiltonian terms, transformed into MO basis
h1 = np.linalg.multi_dot((myhf.mo_coeff.T, myhf.get_hcore(), myhf.mo_coeff))
# Get 2-electron electron repulsion integrals, transformed into MO basis
eri = ao2mo.incore.general(myhf._eri, (myhf.mo_coeff,)*4, compact=False)
# Previous representation exploited permutational symmetry in storage. Change this to a 4D array.
# Integrals now stored as h2[p,q,r,s] = (pq|rs) = <pr|qs>. Note 8-fold permutational symmetry.
h2 = ao2mo.restore(1, eri, norb)
# Transform to a local orbital basis if wanted
if local_basis:
loc_coeff = lo.orth_ao(mol, 'meta_lowdin')
ovlp = myhf.get_ovlp()
# Check that we still have an orthonormal basis, i.e. C^T S C should be the identity
assert(np.allclose(np.linalg.multi_dot((loc_coeff.T, ovlp, loc_coeff)),np.eye(norb)))
# Find the hamiltonian in the local basis
hij_local = np.linalg.multi_dot((loc_coeff.T, myhf.get_hcore(), loc_coeff))
hijkl_local = ao2mo.restore(1, ao2mo.kernel(mol, loc_coeff), norb)
h1 = hij_local
h2 = hijkl_local
# For testing purposes we can get the GS energy with PySCF, for larger systems this should
# be more efficient than the solver in NetKet.
energy_mo, fcivec_mo = fci.direct_spin1.FCI().kernel(h1, h2, norb, mol.nelectron)
# Note: energy_mo does NOT include the nuclear repulsion energy, the total GS energy is given by
nuc_en = mol.energy_nuc()
gs_energy = energy_mo + nuc_en
"""
This is the GPSKet/NetKet bit of the calculation.
The key elements different from standard netket calculations are:
1.) Setting up a fermionic discrete Hilbert space:
In this Hilbert space configurations are represented as a list of L 8-bit integers
of which only two bits are used. The first (least significant) bit of each int
encodes whether the alpha/spin-up channel is occupied at the particular site and the
second bit encodes whether the beta/spin-down channel is occupied. We might change
this representation at one point but currently this is the best trade-off between
memory efficiency and convenience.
2.) Setting up the ab-initio Hamiltonian:
This requires the defined Hilbert space as well as the one and two electron integrals
as generated above. For a detailed description of the Hamiltonian definition see
[Neuscamman (2013), https://doi.org/10.1063/1.4829835].
3.) A sampler to generate configurations:
Non-autoregressive ansatzes require custom transition rules for the Metropolis-Hastings
algorithm to generate proposals. Currently only a hopping transition is implemented
for which a randomly selected electron hops from one site to another (thus always
conserving the total magnetization and electron number of the initial config).
Two different versions of this hopping sampler are currently available,
the MetropolisHopping class which follows the default NetKet design, as well as the
MetropolisFastHopping class which includes the fast update mechanism which can be
used with the qGPS ansatz.
More samplers should probably be implemented in the future.
For the autoregressive ansatz, the direct sampler can be used for the
Fermionic systems but needs to be amended to take electron number and magnetization
conservation into account (if this is wanted).
TODO: this needs to be implemented and checked.
"""
# Set up Hilbert space
hi = FermionicDiscreteHilbert(norb, n_elec=(nelec//2,nelec//2))
# Set up ab-initio Hamiltonian
ha = AbInitioHamiltonianOnTheFly(hi, h1, h2)
# If we want, we can compare the exact energies given by the PySCF and the NetKet solver
# e_mo_nk = nk.exact.lanczos_ed(ha)[0]
# assert(np.allclose(e_mo_nk, energy_mo))
# Use Metropolis-Hastings sampler with hopping rule (including fast updates for qGPS)
sa = MetropolisFastHopping(hi, n_chains_per_rank=1)
# Define the model and the variational state
model = qGPS(hi, 10, dtype=jnp.complex128)
vs = nk.vqs.MCState(sa, model, n_samples=1000)
# Optimizer
op = nk.optimizer.Sgd(learning_rate=0.02)
qgt = nk.optimizer.qgt.QGTJacobianDense(holomorphic=True)
sr = nk.optimizer.SR(qgt=qgt)
# Variational Monte Carlo driver
gs = nk.VMC(ha, op, variational_state=vs, preconditioner=sr)
# Run optimization
for it in gs.iter(1000,1):
en = gs.energy.mean + nuc_en
print("Iteration: {}, Energy: {}, Rel. energy_error: {}".format(it, en, abs((gs_energy - en)/gs_energy)), flush=True)
| 5,602 | 39.309353 | 121 | py |
GPSKet | GPSKet-master/scripts/GPS_for_ab_initio/H_chain_timing_analysis.py | import sys
import numpy as np
import jax
import jax.numpy as jnp
from pyscf import scf, gto, ao2mo, lo
import netket as nk
from GPSKet.hilbert.discrete_fermion import FermionicDiscreteHilbert
from GPSKet.sampler.fermionic_hopping import MetropolisHopping
from GPSKet.operator.hamiltonian.ab_initio import AbInitioHamiltonianOnTheFly
from GPSKet.operator.hamiltonian.ab_initio_sparse import AbInitioHamiltonianSparse
import time
from flax import linen as nn
# Input arguments
L = int(sys.argv[1]) # linear dimension
dist = float(sys.argv[2]) # inter-atomic distance (in units of a_0)
pruning_threshold = float(sys.argv[3]) # set to 0 for non-sparse implementation
repeats = 10
# Construct basis + one- and two-electron integrals with PySCF
mol = gto.Mole()
mol.build(
atom = [('H', (x, 0., 0.)) for x in dist*np.arange(L)],
basis = 'sto-6g',
symmetry = True,
unit="Bohr"
)
nelec = mol.nelectron
print('Number of electrons: ', nelec)
norb = nelec
print('Number of molecular orbitals: ', norb)
loc_coeff = lo.orth_ao(mol, 'meta_lowdin')
localizer = lo.Boys(mol, loc_coeff)
localizer.verbose = 4
localizer.init_guess = None
loc_coeff = localizer.kernel()
h1 = np.linalg.multi_dot((loc_coeff.T, scf.hf.get_hcore(mol), loc_coeff))
h2 = ao2mo.restore(1, ao2mo.kernel(mol, loc_coeff), norb)
# Set up Hilbert space
hi = FermionicDiscreteHilbert(norb, n_elec=(nelec//2,nelec//2))
if pruning_threshold != 0.:
h1[abs(h1) < pruning_threshold] = 0.
h2[abs(h2) < pruning_threshold] = 0.
ha = AbInitioHamiltonianSparse(hi, h1, h2)
else:
ha = AbInitioHamiltonianOnTheFly(hi, h1, h2)
# Use Metropolis-Hastings sampler with hopping rule
sa = MetropolisHopping(hi)
class uniform_state(nn.Module):
apply_fast_update: bool = True
@nn.compact
def __call__(self, x, cache_intermediates=False, update_sites=None):
self.variable("intermediates_cache", "dummy_var", lambda : None)
self.param("dummy_param", lambda x: None)
return jnp.zeros(x.shape[:-1])
# Model definition
model = uniform_state()
# Variational state
vs = nk.vqs.MCState(sa, model, n_samples=100, n_discard_per_chain=100, chunk_size=1)
key = jax.random.PRNGKey(1)
# Compute exp val once to disregard compilation time
samps = hi.random_state(key=key, size=100, dtype=jnp.uint8)
key = jax.random.split(key)[0]
vs._samples = samps
en = vs.expect(ha)
timings = []
for i in range(repeats):
samps = hi.random_state(key=key, size=100, dtype=jnp.uint8)
key = jax.random.split(key)[0]
vs._samples = samps
time_start = time.time()
en = vs.expect(ha)
time_end = time.time()
timings.append((time_end - time_start)/100)
with open("evaluation_timing.txt", "w") as fl:
fl.write("L Mean time Std time\n")
fl.write("{} {} {}\n".format(L, np.mean(np.array(timings)), np.std(np.array(timings)))) | 2,852 | 26.171429 | 93 | py |
GPSKet | GPSKet-master/scripts/GPS_for_ab_initio/H4x4x4.py | import sys
import pickle
from os.path import exists
import numpy as np
import jax.numpy as jnp
from numba import jit
from flax import linen as nn
from pyscf import scf, gto, ao2mo, lo
import netket as nk
from netket.utils.mpi import (
MPI_py_comm as _MPI_comm,
node_number as _rank,
mpi_sum as _mpi_sum
)
from netket.utils.types import Array
import GPSKet
import GPSKet.models as qGPS
from GPSKet.hilbert.discrete_fermion import FermionicDiscreteHilbert
from GPSKet.sampler.fermionic_hopping import MetropolisHopping
from GPSKet.operator.hamiltonian.ab_initio import AbInitioHamiltonianOnTheFly
from GPSKet.nn.initializers import normal
from GPSKet.models import qGPS
from GPSKet.operator.fermion import apply_hopping
# Input arguments
N = int(sys.argv[1]) # GPS support dimension
dist = float(sys.argv[2]) # inter-atomic distance (in units of Å)
n_samples = 10000 # total number of samples (approximate if run in parallel)
L = 4 # linear dimension of the cubic crystal of H atoms
# Construct basis + one- and two-electron integrals with PySCF
mol = gto.Mole()
atoms = []
for x in range(L):
for y in range(L):
for z in range(L):
atoms.append(('H', (x*dist, y*dist, z*dist)))
mol.build(
atom = atoms,
basis = 'sto-6g',
symmetry = True,
unit="A"
)
nelec = mol.nelectron
print('Number of electrons: ', nelec)
myhf = scf.RHF(mol)
ehf = myhf.scf()
norb = myhf.mo_coeff.shape[1]
print('Number of molecular orbitals: ', norb)
mol.max_memory = 1.e9
h1 = np.zeros((norb, norb))
h2 = np.zeros((norb, norb, norb, norb))
if _rank == 0:
if exists("./basis.npy"):
loc_coeff = np.load("./basis.npy")
else:
loc_coeff = myhf.mo_coeff
loc_coeff = lo.orth_ao(mol, 'meta_lowdin')
localizer = lo.Boys(mol, loc_coeff)
localizer.verbose = 4
localizer.init_guess = None
loc_coeff = localizer.kernel()
np.save("basis.npy", loc_coeff)
if exists("./h1.npy") and exists("./h2.npy"):
h1 = np.load("./h1.npy")
h2 = np.load("./h2.npy")
else:
ovlp = myhf.get_ovlp()
# Check that we still have an orthonormal basis, i.e. C^T S C should be the identity
assert(np.allclose(np.linalg.multi_dot((loc_coeff.T, ovlp, loc_coeff)),np.eye(norb)))
# Find the hamiltonian in the local basis
h1 = np.linalg.multi_dot((loc_coeff.T, myhf.get_hcore(), loc_coeff))
h2 = ao2mo.restore(1, ao2mo.kernel(mol, loc_coeff), norb)
np.save("h1.npy", h1)
np.save("h2.npy", h2)
_MPI_comm.Bcast(h1)
_MPI_comm.barrier()
h2_slice = np.empty((h2.shape[2],h2.shape[3]))
for i in range(h2.shape[0]):
for j in range(h2.shape[1]):
np.copyto(h2_slice, h2[i,j,:,:])
_MPI_comm.Bcast(h2_slice)
_MPI_comm.barrier()
np.copyto(h2[i,j,:,:], h2_slice)
nuc_en = mol.energy_nuc()
# Set up Hilbert space
hi = FermionicDiscreteHilbert(norb, n_elec=(nelec//2,nelec//2))
# Set up ab-initio Hamiltonian
ha = AbInitioHamiltonianOnTheFly(hi, h1, h2)
# Use Metropolis-Hastings sampler with hopping rule
sa = MetropolisHopping(hi, n_sweeps=200, n_chains_per_rank=50)
# Define qGPS x SD model
class SlaterqGPS(nn.Module):
SD: GPSKet.models.Slater
qGPS: GPSKet.models.qGPS
apply_fast_update: bool = True
@nn.compact
def __call__(self, x, cache_intermediates=False, update_sites=None) -> Array:
return self.SD(x, cache_intermediates=cache_intermediates, update_sites=update_sites) + self.qGPS(x, cache_intermediates=cache_intermediates, update_sites=update_sites)
# Run mean-field calcs for initialization of reference state
eigs, vecs = np.linalg.eigh(h1)
mf = scf.RHF(mol)
mf.get_hcore = lambda *args: h1
mf.get_ovlp = lambda *args: np.eye(hi.size)
mf._eri = ao2mo.restore(8, h2, hi.size)
# Assumes RHF
assert (hi._n_elec[0] == hi._n_elec[1])
init_dens = np.dot(vecs[:, :mol.nelectron//2], vecs[:, :mol.nelectron//2].T)
mf.kernel(dm0=init_dens)
if not mf.converged:
mf = scf.newton(mf)
mf.kernel(mo_coeff=mf.mo_coeff, mo_occ=mf.mo_occ)
assert (mf.converged)
# store the canonical orbitals in phi
phi = mf.mo_coeff[:, :mol.nelectron//2]
_MPI_comm.Bcast(phi, root=0)
def slater_init(key, shape, dtype=jnp.complex128):
return jnp.array(phi).astype(dtype).reshape((1, norb, nelec//2))
inner_SD = GPSKet.models.Slater(hi, init_fun=slater_init, dtype=jnp.complex128)
qGPS_part = qGPS(hi, N, dtype=jnp.complex128, init_fun=normal(sigma=1.e-3, dtype=jnp.complex128), apply_fast_update=True)
model = SlaterqGPS(inner_SD, qGPS_part)
# Variational state
vs = nk.vqs.MCState(sa, model, n_samples=n_samples, n_discard_per_chain=100)
# Set up computation of 1RDM expectation values
@jit(nopython=True)
def get_conn_1RDM(x):
x_prime = np.empty((x.shape[0], 2, norb, norb, x.shape[1]), dtype=np.uint8)
mels = np.empty((x.shape[0], 2, norb, norb), dtype=np.complex128)
for batch_id in range(x.shape[0]):
is_occ_up = (x[batch_id] & 1).astype(np.bool8)
is_occ_down = (x[batch_id] & 2).astype(np.bool8)
up_count = np.cumsum(is_occ_up)
down_count = np.cumsum(is_occ_down)
for i in range(norb):
for j in range(norb):
x_prime[batch_id, 0, i, j, :] = x[batch_id, :]
x_prime[batch_id, 1, i, j, :] = x[batch_id, :]
mels[batch_id, 0, i, j] = apply_hopping(i, j, x_prime[batch_id, 0, i, j, :], 1, cummulative_count=up_count)
mels[batch_id, 1, i, j] = apply_hopping(i, j, x_prime[batch_id, 1, i, j, :], 2, cummulative_count=down_count)
return x_prime, mels
def get_1RDM(state):
x = state.samples.reshape((-1,norb))
log_vals = jnp.expand_dims(state.log_value(x), (1,2,3))
x_primes, mels = get_conn_1RDM(np.asarray(x, dtype = np.uint8))
all_log_vals_conn = []
for i in range(x_primes.shape[0]):
conf = x_primes[i, :, :, :]
log_vals_conn = state.log_value(jnp.array(conf.reshape((-1, norb)))).reshape((2, norb, norb))
all_log_vals_conn.append(log_vals_conn)
all_log_vals_conn = jnp.array(all_log_vals_conn)
total_samples = _mpi_sum(log_vals.shape[0])
return _mpi_sum(np.array(jnp.sum(jnp.exp(all_log_vals_conn-log_vals)*mels, axis=0)))/total_samples
# Optimizer
op = nk.optimizer.Sgd(learning_rate=0.075)
qgt = nk.optimizer.qgt.QGTJacobianDense(holomorphic=True)
sr = nk.optimizer.SR(qgt=qgt, diag_shift=0.01)
gs = nk.VMC(ha, op, variational_state=vs, preconditioner=sr)
min_global_count = 1000
# Set up checkpointing
if _rank == 0:
if exists("./out.txt"):
vs.parameters = pickle.load(open("parameters.pickle", "rb"))
out_prev = np.genfromtxt("out.txt", usecols=(0,1,2,3))
if out_prev.shape[0] > 0:
best_var_arg = np.argmin(out_prev[:,3])
best_var = out_prev[best_var_arg, 3]
if best_var_arg < min_global_count:
count = out_prev.shape[0] - best_var_arg
else:
best_var = None
count = 0
global_count = out_prev.shape[0]
else:
best_var = None
count = 0
global_count = 0
print("continuing calculation")
else:
best_var = None
pickle.dump(vs.parameters, open("best_pars.pickle", "wb"))
with open("out.txt", "w") as fl:
fl.write("")
count = 0
global_count = 0
print("starting new calculation")
else:
best_var = None
count = 0
global_count = 0
best_var = _MPI_comm.bcast(best_var, root=0)
count = _MPI_comm.bcast(count, root=0)
global_count = _MPI_comm.bcast(global_count, root=0)
vs.parameters = _MPI_comm.bcast(vs.parameters, root=0)
max_count = 250
# Optimization loop
while count < max_count:
if _rank == 0:
pickle.dump(vs.parameters, open("parameters.pickle", "wb"))
dp = gs._forward_and_backward()
oneRDM = np.array(get_1RDM(vs))
en = gs.energy.mean + nuc_en
if best_var is None:
best_var = gs.energy.variance
count = 0
else:
if gs.energy.variance < best_var:
best_var = gs.energy.variance
if _rank == 0:
pickle.dump(vs.parameters, open("best_pars.pickle", "wb"))
count = 0
else:
count += 1
if global_count < min_global_count:
best_var = None
sampler_acceptance = vs.sampler_state.acceptance
if count < max_count:
gs.update_parameters(dp)
if _rank == 0:
print(en, gs.energy.variance, sampler_acceptance, gs.energy.R_hat, gs.energy.tau_corr)
np.save("oneBRDM_{}.npy".format(global_count), oneRDM)
with open("out.txt", "a") as fl:
fl.write("{} {} {} {} {} {} {} {}\n".format(np.real(en), np.imag(en), gs.energy.error_of_mean, gs.energy.variance, sampler_acceptance, gs.energy.R_hat, gs.energy.tau_corr, vs.n_samples))
global_count += 1
| 8,913 | 31.180505 | 205 | py |
GPSKet | GPSKet-master/scripts/GPS_for_ab_initio/H50_1D.py | import sys
import pickle
from os.path import exists
import numpy as np
import jax.numpy as jnp
from pyscf import scf, gto, ao2mo, lo
import netket as nk
from netket.utils.mpi import (
MPI_py_comm as _MPI_comm,
node_number as _rank,
)
import GPSKet.models as qGPS
from GPSKet.hilbert.discrete_fermion import FermionicDiscreteHilbert
from GPSKet.sampler.fermionic_hopping import MetropolisHopping
from GPSKet.operator.hamiltonian.ab_initio import AbInitioHamiltonianOnTheFly
from GPSKet.models import qGPS
from GPSKet.nn.initializers import normal
# Input arguments
M = int(sys.argv[1]) # GPS support dimension
dist = float(sys.argv[2]) # inter-atomic distance (in units of a_0)
n_samples = 10000 # total number of samples (approximate if run in parallel)
L = 50 # number of H atoms
# Construct basis + one- and two-electron integrals with PySCF
mol = gto.Mole()
mol.build(
atom = [('H', (x, 0., 0.)) for x in dist*np.arange(L)],
basis = 'sto-6g',
symmetry = True,
unit="Bohr"
)
nelec = mol.nelectron
print('Number of electrons: ', nelec)
myhf = scf.RHF(mol)
ehf = myhf.scf()
norb = myhf.mo_coeff.shape[1]
print('Number of molecular orbitals: ', norb)
h1 = np.zeros((norb, norb))
h2 = np.zeros((norb, norb, norb, norb))
if _rank == 0:
if exists("./basis.npy"):
loc_coeff = np.load("./basis.npy")
else:
loc_coeff = lo.orth_ao(mol, 'meta_lowdin')
localizer = lo.Boys(mol, loc_coeff)
localizer.verbose = 4
localizer.init_guess = None
loc_coeff = localizer.kernel()
np.save("basis.npy", loc_coeff)
ovlp = myhf.get_ovlp()
# Check that we still have an orthonormal basis, i.e. C^T S C should be the identity
assert(np.allclose(np.linalg.multi_dot((loc_coeff.T, ovlp, loc_coeff)),np.eye(norb)))
# Find the hamiltonian in the local basis
hij_local = np.linalg.multi_dot((loc_coeff.T, myhf.get_hcore(), loc_coeff))
hijkl_local = ao2mo.restore(1, ao2mo.kernel(mol, loc_coeff), norb)
h1 = hij_local
h2 = hijkl_local
_MPI_comm.Bcast(h1)
_MPI_comm.barrier()
h2_slice = np.empty((h2.shape[2],h2.shape[3]))
for i in range(h2.shape[0]):
for j in range(h2.shape[1]):
np.copyto(h2_slice, h2[i,j,:,:])
_MPI_comm.Bcast(h2_slice)
_MPI_comm.barrier()
np.copyto(h2[i,j,:,:], h2_slice)
nuc_en = mol.energy_nuc()
# Set up Hilbert space
hi = FermionicDiscreteHilbert(norb, n_elec=(nelec//2,nelec//2))
# Set up ab-initio Hamiltonian
ha = AbInitioHamiltonianOnTheFly(hi, h1, h2)
# Use Metropolis-Hastings sampler with hopping rule
sa = MetropolisHopping(hi, n_sweeps=200, n_chains_per_rank=1)
# Model definition
model = qGPS(hi, M, dtype=jnp.complex128, init_fun=normal(1.e-1), apply_fast_update=True)
# Variational state
vs = nk.vqs.MCState(sa, model, n_samples=n_samples, n_discard_per_chain=100, chunk_size=1)
# Optimizer
op = nk.optimizer.Sgd(learning_rate=0.05)
qgt = nk.optimizer.qgt.QGTJacobianDense(holomorphic=True)
sr = nk.optimizer.SR(qgt=qgt, diag_shift=0.01)
gs = nk.VMC(ha, op, variational_state=vs, preconditioner=sr)
# Set up checkpointing
if _rank == 0:
if exists("./out.txt"):
vs.parameters = pickle.load(open("parameters.pickle", "rb"))
out_prev = np.genfromtxt("out.txt", usecols=(0,1,2,3))
if out_prev.shape[0] > 0:
best_var_arg = np.argmin(out_prev[:,3])
best_var = out_prev[best_var_arg, 3]
count = out_prev.shape[0] - best_var_arg
else:
best_var = None
count = 0
print("continuing calculation")
else:
best_var = None
pickle.dump(vs.parameters, open("best_pars.pickle", "wb"))
with open("out.txt", "w") as fl:
fl.write("")
count = 0
print("starting new calculation")
else:
best_var = None
count = 0
best_var = _MPI_comm.bcast(best_var, root=0)
count = _MPI_comm.bcast(count, root=0)
vs.parameters = _MPI_comm.bcast(vs.parameters, root=0)
max_count = 100
# Optimization loop
while count < max_count:
if _rank == 0:
pickle.dump(vs.parameters, open("parameters.pickle", "wb"))
dp = gs._forward_and_backward()
en = gs.energy.mean + nuc_en
if best_var is None:
best_var = gs.energy.variance
count = 0
else:
if gs.energy.variance < best_var:
best_var = gs.energy.variance
if _rank == 0:
pickle.dump(vs.parameters, open("best_pars.pickle", "wb"))
count = 0
else:
count += 1
sampler_acceptance = vs.sampler_state.acceptance
if count < max_count:
gs.update_parameters(dp)
if _rank == 0:
print(en, gs.energy.variance, sampler_acceptance, gs.energy.R_hat, gs.energy.tau_corr)
with open("out.txt", "a") as fl:
fl.write("{} {} {} {} {} {} {} {}\n".format(np.real(en), np.imag(en), gs.energy.error_of_mean, gs.energy.variance, sampler_acceptance, gs.energy.R_hat, gs.energy.tau_corr, vs.n_samples))
| 5,014 | 29.210843 | 205 | py |
GPSKet | GPSKet-master/scripts/GPS_for_ab_initio/H2O.py | from os.path import exists
import numpy as np
import jax
import jax.numpy as jnp
import netket as nk
from netket.utils.types import Array
import GPSKet
import GPSKet.models as qGPS
from GPSKet.hilbert.discrete_fermion import FermionicDiscreteHilbert
from GPSKet.sampler.fermionic_hopping import MetropolisHopping
from GPSKet.operator.hamiltonian.ab_initio import AbInitioHamiltonianOnTheFly
from GPSKet.models import qGPS
from GPSKet.nn.initializers import normal
from GPSKet.models.pfaffian import ZeroMagnetizationPfaffian
from pyscf import scf, gto, ao2mo, lo
from netket.utils.mpi import (
MPI_py_comm as _MPI_comm,
node_number as _rank,
)
from flax import linen as nn
import sys
import pickle
# Input arguments
M = int(sys.argv[1]) # support dimension of the GPS
ref_state = int(sys.argv[2]) # reference state -> 0: no reference state, 1: SD, 2: Spin projected SD, 3: Pfaffian, 4: Spin projected Pfaffian, 5: magnetization breaking SD
basis_type = int(sys.argv[3]) # basis choice -> 0: local, 1: canonical, 2: split
n_samples = int(sys.argv[4]) # total number of samples (approximate if run in parallel)
# Construct basis + one- and two-electron integrals with PySCF
mol = gto.Mole()
mol.build(
atom = [['H', (0., 0.795, -0.454)], ['H', (0., -0.795, -0.454)], ['O', (0., 0., 0.113)]],
basis = '6-31G',
unit="Angstrom"
)
nelec = mol.nelectron
print('Number of electrons: ', nelec)
myhf = scf.RHF(mol)
ehf = myhf.scf()
norb = myhf.mo_coeff.shape[1]
print('Number of molecular orbitals: ', norb)
h1 = np.zeros((norb, norb))
h2 = np.zeros((norb, norb, norb, norb))
if _rank == 0:
if exists("./basis.npy"):
loc_coeff = np.load("./basis.npy")
else:
loc_coeff = myhf.mo_coeff
if basis_type != 1:
loc_coeff = lo.orth_ao(mol, 'meta_lowdin') # Using "lowdin" might improve the starting guess for a subsequent Boys localization
if basis_type == 0:
localizer = lo.Boys(mol, loc_coeff)
localizer.verbose = 4
localizer.init_guess = None
loc_coeff = localizer.kernel()
if basis_type == 2:
localizer = lo.Boys(mol, myhf.mo_coeff[:,:nelec//2])
localizer.verbose = 4
loc_coeff_occ = localizer.kernel()
localizer = lo.Boys(mol, myhf.mo_coeff[:, nelec//2:])
localizer.verbose = 4
loc_coeff_vrt = localizer.kernel()
loc_coeff = np.concatenate((loc_coeff_occ, loc_coeff_vrt), axis=1)
np.save("basis.npy", loc_coeff)
if exists("./h1.npy") and exists("./h2.npy"):
h1 = np.load("./h1.npy")
h2 = np.load("./h2.npy")
else:
ovlp = myhf.get_ovlp()
# Check that we still have an orthonormal basis, i.e. C^T S C should be the identity
assert(np.allclose(np.linalg.multi_dot((loc_coeff.T, ovlp, loc_coeff)),np.eye(norb)))
# Find the hamiltonian in the local basis
h1 = np.linalg.multi_dot((loc_coeff.T, myhf.get_hcore(), loc_coeff))
h2 = ao2mo.restore(1, ao2mo.kernel(mol, loc_coeff), norb)
np.save("h1.npy", h1)
np.save("h2.npy", h2)
_MPI_comm.Bcast(h1, root=0)
h2_slice = np.empty((h2.shape[2],h2.shape[3]))
for i in range(h2.shape[0]):
for j in range(h2.shape[1]):
np.copyto(h2_slice, h2[i,j,:,:])
_MPI_comm.Bcast(h2_slice, root = 0)
np.copyto(h2[i,j,:,:], h2_slice)
nuc_en = mol.energy_nuc()
# Set up Hilbert space
hi = FermionicDiscreteHilbert(norb, n_elec=(nelec//2,nelec//2))
# Set up ab-initio Hamiltonian
ha = AbInitioHamiltonianOnTheFly(hi, h1, h2)
# Use Metropolis-Hastings sampler with hopping rule
sa = MetropolisHopping(hi, n_sweeps=200, n_chains_per_rank=1)
# Model definitions
class PfaffianqGPS(nn.Module):
Pfaffian: nn.module
qGPS: GPSKet.models.qGPS
apply_fast_update: bool = True
@nn.compact
def __call__(self, x, cache_intermediates=False, update_sites=None) -> Array:
if cache_intermediates or (update_sites is not None):
indices_save = self.variable("intermediates_cache", "samples", lambda : jnp.zeros(0, dtype=x.dtype))
if update_sites is not None:
def update_fun(saved_config, update_sites, occs):
def scan_fun(carry, count):
return (carry.at[update_sites[count]].set(occs[count]), None)
return jax.lax.scan(scan_fun, saved_config, jnp.arange(update_sites.shape[0]), reverse=True)[0]
full_x = jax.vmap(update_fun, in_axes=(0, 0, 0), out_axes=0)(indices_save.value, update_sites, x)
else:
full_x = x
if cache_intermediates:
indices_save.value = full_x
y = GPSKet.models.slater.occupancies_to_electrons(full_x, hi._n_elec).at[:,nelec//2:].add(norb)
if M == 0:
return self.Pfaffian(y)
else:
return self.Pfaffian(y) + self.qGPS(x, cache_intermediates=cache_intermediates, update_sites=update_sites)
class SlaterqGPS(nn.Module):
SD: nn.module
qGPS: GPSKet.models.qGPS
apply_fast_update: bool = True
@nn.compact
def __call__(self, x, cache_intermediates=False, update_sites=None) -> Array:
if M == 0:
return self.SD(x, cache_intermediates=cache_intermediates, update_sites=update_sites)
else:
return self.SD(x, cache_intermediates=cache_intermediates, update_sites=update_sites) + self.qGPS(x, cache_intermediates=cache_intermediates, update_sites=update_sites)
# Run mean-field calcs for initialization of reference state
eigs, vecs = np.linalg.eigh(h1)
mf = scf.RHF(mol)
mf.get_hcore = lambda *args: h1
mf.get_ovlp = lambda *args: np.eye(hi.size)
mf._eri = ao2mo.restore(8, h2, hi.size)
# Assumes RHF
assert (hi._n_elec[0] == hi._n_elec[1])
init_dens = np.dot(vecs[:, :mol.nelectron//2], vecs[:, :mol.nelectron//2].T)
mf.kernel(dm0=init_dens)
if not mf.converged:
mf = scf.newton(mf)
mf.kernel(mo_coeff=mf.mo_coeff, mo_occ=mf.mo_occ)
assert (mf.converged)
# store the canonical orbitals in phi
phi = mf.mo_coeff[:, :mol.nelectron//2]
_MPI_comm.Bcast(phi, root=0)
def pfaffian_init(key, shape, dtype=jnp.complex128):
out = jnp.array(np.einsum("in,jn->ij", phi, phi)).astype(dtype)
# out += jax.nn.initializers.normal(dtype=out.dtype)(key, shape=out.shape, dtype=dtype)
return out
def slater_init(key, shape, dtype=jnp.complex128):
out = jnp.array(phi).astype(dtype).reshape((1, norb, nelec//2))
# out += jax.nn.initializers.normal(dtype=out.dtype)(key, shape=out.shape, dtype=dtype)
return out
def full_slater_init(key, shape, dtype=jnp.complex128):
out = jnp.block([[jnp.array(phi).astype(dtype), jnp.zeros(phi.shape, dtype=dtype)],
[jnp.zeros(phi.shape, dtype=dtype), jnp.array(phi).astype(dtype)]])
# out += jax.nn.initializers.normal(dtype=out.dtype)(key, shape=out.shape, dtype=dtype)
return out.reshape((1, 2*norb, nelec))
qGPS_part = qGPS(hi, M, dtype=jnp.complex128, init_fun=normal(sigma=1.e-1, dtype=jnp.complex128), apply_fast_update=True)
# 0: no ref_state, 1: SD, 2: Spin projected SD, 3: Pfaffian, 4: Spin projected Pfaffian, 5: magnetization breaking SD
if ref_state == 1:
inner_SD = GPSKet.models.slater.Slater(hi, init_fun=slater_init, dtype=jnp.complex128, apply_fast_update=True)
model = SlaterqGPS(inner_SD, qGPS_part)
elif ref_state == 2:
inner_SD = GPSKet.models.slater.Slater(hi, init_fun=slater_init, dtype=jnp.complex128, S2_projection = GPSKet.models.pfaffian.get_gauss_leg_elements_Sy(3), apply_fast_update=True)
model = SlaterqGPS(inner_SD, qGPS_part)
elif ref_state == 3:
inner_SD = ZeroMagnetizationPfaffian(norb, init_fun=pfaffian_init, dtype=jnp.complex128)
model = PfaffianqGPS(inner_SD, qGPS_part)
elif ref_state == 4:
inner_SD = ZeroMagnetizationPfaffian(norb, init_fun=pfaffian_init, S2_projection = GPSKet.models.pfaffian.get_gauss_leg_elements_Sy(3), dtype=jnp.complex128)
model = PfaffianqGPS(inner_SD, qGPS_part)
elif ref_state == 5:
inner_SD = GPSKet.models.slater.Slater(hi, init_fun=full_slater_init, dtype=jnp.complex128, fixed_magnetization=False, apply_fast_update=True)
model = SlaterqGPS(inner_SD, qGPS_part)
else:
model = qGPS_part
# Variational state
vs = nk.vqs.MCState(sa, model, n_samples=n_samples, n_discard_per_chain=100, chunk_size=1)
# Optimizer
op = nk.optimizer.Sgd(learning_rate=0.05)
qgt = nk.optimizer.qgt.QGTJacobianDense(holomorphic=True)
sr = nk.optimizer.SR(qgt=qgt, diag_shift=0.01)
gs = nk.VMC(ha, op, variational_state=vs, preconditioner=sr)
# Set up checkpointing
if _rank == 0:
if exists("./out.txt"):
vs.parameters = pickle.load(open("parameters.pickle", "rb"))
out_prev = np.genfromtxt("out.txt", usecols=(0,1,2,3))
if out_prev.shape[0] > 0:
best_var_arg = np.argmin(out_prev[:,3])
best_var = out_prev[best_var_arg, 3]
count = out_prev.shape[0] - best_var_arg
else:
best_var = None
count = 0
print("continuing calculation")
else:
best_var = None
pickle.dump(vs.parameters, open("best_pars.pickle", "wb"))
with open("out.txt", "w") as fl:
fl.write("")
count = 0
print("starting new calculation")
else:
best_var = None
count = 0
best_var = _MPI_comm.bcast(best_var, root=0)
count = _MPI_comm.bcast(count, root=0)
vs.parameters = _MPI_comm.bcast(vs.parameters, root=0)
max_count = 200
# Optimization loop
while count < max_count:
if _rank == 0:
pickle.dump(vs.parameters, open("parameters.pickle", "wb"))
dp = gs._forward_and_backward()
en = gs.energy.mean + nuc_en
if best_var is None:
best_var = gs.energy.variance
count = 0
else:
if gs.energy.variance < best_var:
best_var = gs.energy.variance
if _rank == 0:
pickle.dump(vs.parameters, open("best_pars.pickle", "wb"))
count = 0
else:
count += 1
sampler_acceptance = vs.sampler_state.acceptance
if count < max_count:
gs.update_parameters(dp)
if _rank == 0:
print(en, gs.energy.variance, sampler_acceptance, gs.energy.R_hat, gs.energy.tau_corr)
with open("out.txt", "a") as fl:
fl.write("{} {} {} {} {} {} {} {}\n".format(np.real(en), np.imag(en), gs.energy.error_of_mean, gs.energy.variance, sampler_acceptance, gs.energy.R_hat, gs.energy.tau_corr, vs.n_samples))
| 10,546 | 36.667857 | 205 | py |
GPSKet | GPSKet-master/scripts/ARGPS/argps/vmc.py | import os
import time
import jax
import numpy as np
import netket as nk
import GPSKet as qk
from absl import app
from absl import flags
from absl import logging
from netket.utils.mpi import (
node_number as MPI_rank
)
from ml_collections import config_flags, ConfigDict
from argps.configs.common import resolve
from argps.systems import get_system
from argps.models import get_model
from argps.samplers import get_sampler
from argps.utils import save_config, read_config, CSVLogger, Timer
from flax import serialization
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
_CONFIG = config_flags.DEFINE_config_file(
'config',
None,
'File path to a configuration file',
lock_config=True
)
_WORKDIR = flags.DEFINE_string(
'workdir',
None,
'Directory in which to store results of the optimization run',
required=True
)
def serialize_VMC(driver: nk.driver.VMC):
state_dict = {
"variables": serialization.to_state_dict(driver.state.variables),
"optimizer": serialization.to_state_dict(driver._optimizer_state),
"preconditioner": serialization.to_state_dict(driver.preconditioner._ema),
"step": driver._step_count
}
return state_dict
def deserialize_VMC(driver: nk.driver.VMC, state_dict: dict):
import copy
new_driver = copy.copy(driver)
new_driver.state.variables = serialization.from_state_dict(driver.state.variables, state_dict["variables"])
new_driver._optimizer_state = serialization.from_state_dict(driver._optimizer_state, state_dict["optimizer"])
new_driver._step_count = serialization.from_state_dict(driver._step_count, state_dict["step"])
new_driver.preconditioner._ema = serialization.from_state_dict(driver.preconditioner._ema, state_dict["preconditioner"])
return new_driver
serialization.register_serialization_state(
nk.driver.VMC,
serialize_VMC,
deserialize_VMC
)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Parse config
workdir = _WORKDIR.value
config = _CONFIG.value
filename = os.path.join(workdir, "config.yaml")
if os.path.isfile(filename) and config is None:
config = ConfigDict(read_config(workdir))
config = resolve(config)
# Print and save config
if MPI_rank == 0:
logging.info(f'\n{config}')
save_config(workdir, config)
# System
ha = get_system(config)
hi = ha.hilbert
g = ha.graph if hasattr(ha, 'graph') else None
# Model
ma = get_model(config, hi, g)
# Sampler
sa = get_sampler(config, hi, g)
# Variational state
vs = nk.vqs.MCState(sa, ma, **config.variational_state)
# Optimizer
op = nk.optimizer.Sgd(learning_rate=config.optimizer.learning_rate)
pars_struct = jax.tree_map(
lambda x: jax.ShapeDtypeStruct(x.shape, x.dtype),
vs.parameters
)
sr = qk.optimizer.SRRMSProp(
pars_struct,
qk.optimizer.qgt.QGTJacobianDenseRMSProp,
solver=jax.scipy.sparse.linalg.cg,
diag_shift=config.optimizer.diag_shift,
decay=config.optimizer.decay,
eps=config.optimizer.eps,
mode=config.optimizer.mode
)
# Driver
vmc = nk.driver.VMC(ha, op, variational_state=vs, preconditioner=sr)
# Restore checkpoint
checkpoints_dir = os.path.join(workdir, "checkpoints")
vmc = restore_checkpoint(checkpoints_dir, vmc)
initial_step = vmc.step_count+1
step = initial_step
if MPI_rank == 0:
logging.info('Will start/continue training at initial_step=%d', initial_step)
# Logger
if MPI_rank == 0:
fieldnames = list(nk.stats.Stats().to_dict().keys())+["Runtime"]
logger = CSVLogger(os.path.join(workdir, "metrics.csv"), fieldnames)
# Run optimization loop
if MPI_rank == 0:
logging.info('Starting training loop; initial compile can take a while...')
timer = Timer(config.max_steps)
t0 = time.time()
while step <= config.max_steps:
# Training step
vmc.advance()
# Report compilation time
if MPI_rank == 0 and step == initial_step:
logging.info(f"First step took {time.time() - t0:.1f} seconds.")
# Update timer
if MPI_rank == 0:
timer.update(step)
# Log data
if MPI_rank == 0:
logger(step, {**vmc.energy.to_dict(), "Runtime": timer.runtime})
# Report training metrics
if MPI_rank == 0 and config.progress_every and step % config.progress_every == 0:
grad, _ = nk.jax.tree_ravel(vmc._loss_grad)
grad_norm = np.linalg.norm(grad)
done = step / config.max_steps
logging.info(f"Step: {step}/{config.max_steps} {100*done:.1f}%, "
f"E: {vmc.energy}, "
f"||∇E||: {grad_norm:.4f}, "
f"{timer}")
# Store checkpoint
if MPI_rank == 0 and ((config.checkpoint_every and step % config.checkpoint_every == 0) or step == config.max_steps):
checkpoint_path = save_checkpoint(checkpoints_dir, vmc, step, keep_every_n_steps=config.checkpoint_every)
logging.info(f"Stored checkpoint at step {step} to {checkpoint_path}")
step += 1
return
if __name__ == '__main__':
# Provide access to --jax_log_compiles, --jax_backend_target and --jax_xla_backend flags.
jax.config.config_with_absl()
app.run(main) | 5,497 | 31.532544 | 125 | py |
GPSKet | GPSKet-master/scripts/ARGPS/argps/models.py | import jax
import jax.numpy as jnp
import numpy as np
import netket as nk
import GPSKet as qk
from scipy.linalg import circulant
from functools import partial
from flax import linen as nn
from netket.hilbert import HomogeneousHilbert
from netket.graph import AbstractGraph
from netket.utils import HashableArray
from netket.utils.types import Array
from ml_collections import ConfigDict
from typing import Union, Tuple, Callable, Optional
_MODELS = {
'GPS': qk.models.qGPS,
'FilterGPS': qk.models.qGPS,
'ARGPS': qk.models.ARqGPSFull,
'MaskedGPS': qk.models.ARqGPSFull,
'ARFilterGPS': qk.models.ARPlaquetteqGPS,
'MaskedFilterGPS': qk.models.ARPlaquetteqGPS,
}
def get_model(config : ConfigDict, hilbert : HomogeneousHilbert, graph : Optional[AbstractGraph]=None) -> nn.Module:
"""
Return the model for a wavefunction Ansatz
Args:
config : experiment configuration file
hilbert : Hilbert space on which the model should act
graph : graph associated with the Hilbert space (optional)
Returns:
the model for the wavefunction Ansatz
"""
name = config.model_name
try:
ma_cls = _MODELS[name]
except KeyError:
raise ValueError(f"Model {name} is not a valid class or is not supported yet.")
if config.model.dtype == 'real':
dtype = jnp.float64
elif config.model.dtype == 'complex':
dtype = jnp.complex128
if isinstance(config.model.M, tuple):
assert len(config.model.M) == hilbert.size
M = HashableArray(np.array(config.M))
else:
M = int(config.model.M)
init_fn = qk.nn.initializers.normal(sigma=config.model.sigma, dtype=dtype)
if graph:
groups = config.model.symmetries.split(',')
translations = 'translations' in groups or groups[0] == 'all' or groups[0] == 'automorphisms'
point_symmetries = 'point-symmetries' in groups or groups[0] == 'all' or groups[0] == 'automorphisms'
spin_flip = 'spin-flip' in groups or groups[0] == 'all' or groups[0] == 'automorphisms'
symmetries_fn, inv_symmetries_fn = get_symmetry_transformation_spin(name, translations, point_symmetries, spin_flip, graph)
else:
symmetries_fn, inv_symmetries_fn = qk.models.no_syms()
out_trafo = get_out_transformation(name, config.model.apply_exp)
if name != 'GPS' and name != 'FilterGPS':
if isinstance(hilbert, nk.hilbert.Spin):
count_spins_fn = count_spins
renormalize_log_psi_fn = renormalize_log_psi
elif isinstance(hilbert, qk.hilbert.FermionicDiscreteHilbert):
count_spins_fn = count_spins_fermionic
renormalize_log_psi_fn = renormalize_log_psi_fermionic
args = [hilbert, M]
if 'Filter' in name:
args.extend(get_plaquettes_and_masks(hilbert, graph))
apply_symmetries = symmetries_fn
else:
apply_symmetries = (symmetries_fn, inv_symmetries_fn)
normalize = 'AR' in name
ma = ma_cls(
*args,
dtype=dtype,
init_fun=init_fn,
normalize=normalize,
apply_symmetries=apply_symmetries,
count_spins=count_spins_fn,
renormalize_log_psi=renormalize_log_psi_fn,
out_transformation=out_trafo)
else:
if 'Filter' in name:
# TODO: add support for projective symmetrization of point-group and spin-flip symmetries
args = [hilbert, M]
if config.system_name == 'Hubbard1d':
graph = nk.graph.Chain(config.system.Lx, pbc=config.system.pbc)
symmetries_fn, inv_symmetries_fn = get_symmetry_transformation_spin(name, True, False, False, graph)
else:
args = [hilbert, hilbert.size*M]
ma = ma_cls(
*args,
dtype=dtype,
init_fun=init_fn,
syms=(symmetries_fn, inv_symmetries_fn),
out_transformation=out_trafo)
return ma
def get_symmetry_transformation_spin(name : str, translations : bool, point_symmetries: bool, spin_flip : bool, graph : AbstractGraph) -> Union[Tuple[Callable, Callable], Callable]:
"""
Return the appropriate spin symmetry transformations
Args:
name : name of the Ansatz
translations : whether to include translations or not
point_symmetries : whether to include point-group symmetries or not
spin_flip : whether to include spin_flip symmetry or not
graph : underlying graph of the system
Returns:
spin symmetry transformations and their inverses
"""
automorphisms = translations or point_symmetries
if automorphisms:
if translations and point_symmetries:
syms = graph.automorphisms().to_array().T
elif translations:
syms = graph.translation_group().to_array().T
elif point_symmetries:
syms = graph.point_group().to_array().T
inv_syms = np.zeros(syms.shape, dtype=syms.dtype)
for i in range(syms.shape[0]):
for j in range(syms.shape[1]):
inv_syms[syms[i,j], j] = i
syms = jnp.array(syms)
inv_syms = jnp.array(inv_syms)
if name == 'GPS' or name == 'FilterGPS':
inv_centre = 1
else:
inv_centre = 0
if automorphisms and spin_flip:
def symmetries(samples : Array) -> Array:
out = jnp.take(samples, syms, axis=-1)
out = jnp.concatenate((out, inv_centre-out), axis=-1)
return out
def inv_symmetries(sample_at_indices, indices):
inv_sym_sites = jnp.concatenate((inv_syms[indices], inv_syms[indices]), axis=-1)
inv_sym_occs = jnp.tile(jnp.expand_dims(sample_at_indices, axis=-1), syms.shape[1])
inv_sym_occs = jnp.concatenate((inv_sym_occs, inv_centre-inv_sym_occs), axis=-1)
return inv_sym_occs, inv_sym_sites
elif automorphisms:
def symmetries(samples : Array) -> Array:
out = jnp.take(samples, syms, axis=-1)
return out
def inv_symmetries(sample_at_indices, indices):
inv_sym_sites = inv_syms[indices]
inv_sym_occs = jnp.tile(jnp.expand_dims(sample_at_indices, axis=-1), syms.shape[1])
return inv_sym_occs, inv_sym_sites
elif spin_flip:
def symmetries(samples : Array) -> Array:
out = jnp.expand_dims(samples, axis=-1)
out = jnp.concatenate((out, inv_centre-out), axis=-1)
return out
def inv_symmetries(sample_at_indices, indices):
inv_sym_sites = jnp.expand_dims(indices, axis=-1)
inv_sym_sites = jnp.concatenate((inv_sym_sites, inv_sym_sites), axis=-1)
inv_sym_occs = jnp.expand_dims(sample_at_indices, axis=-1)
inv_sym_occs = jnp.concatenate((inv_sym_occs, inv_centre-inv_sym_occs), axis=-1)
return inv_sym_occs, inv_sym_sites
else:
def symmetries(samples : Array) -> Array:
out = jnp.expand_dims(samples, axis=-1)
return out
def inv_symmetries(sample_at_indices, indices):
inv_sym_sites = jnp.expand_dims(indices, axis=-1)
inv_sym_occs = jnp.expand_dims(sample_at_indices, axis=-1)
return inv_sym_occs, inv_sym_sites
return symmetries, inv_symmetries
def count_spins(spins : Array) -> Array:
"""
Count the number of up- and down-spins in a batch of local configurations x_i,
where x_i can be equal to:
- 0 if it is occupied by an up-spin
- 1 if it is occupied by a down-spin
Args:
spins : array of local configurations (batch,)
Returns:
the number of up- and down-spins for each configuration in the batch (batch, 2)
"""
return jnp.stack([(spins+1)&1, ((spins+1)&2)/2], axis=-1).astype(jnp.int32)
def count_spins_fermionic(spins : Array) -> Array:
"""
Count the spin-up and down electrons in a batch of local occupations x_i,
where x_i can be equal to:
- 0 if it is unoccupied
- 1 if it is occupied by a single spin-up electron
- 2 if it is occupied by a single spin-down electron
- 3 if it is doubly-occupied
Args:
spins : array of local configurations (batch,)
Returns:
the number of spin-up and down electrons for each configuration in the batch (batch, 4)
"""
zeros = jnp.zeros(spins.shape[0])
up_spins = spins&1
down_spins = (spins&2)/2
return jnp.stack([zeros, up_spins, down_spins, zeros], axis=-1).astype(jnp.int32)
def renormalize_log_psi(n_spins : Array, hilbert : HomogeneousHilbert, index : int) -> Array:
"""
Renormalize the log-amplitude to conserve the number of up- and down-spins
Args:
n_spins : number of up- and down-spins up to index (batch, 2)
hilbert : Hilbert space from which configurations are sampled
index : site index
Returns:
renormalized log-amplitude (batch,)
"""
return jnp.log(jnp.heaviside(hilbert.size//2-n_spins, 0))
@partial(jax.vmap, in_axes=(0, None, None))
def renormalize_log_psi_fermionic(n_spins : Array, hilbert : HomogeneousHilbert, index : int) -> Array:
"""
Renormalize the log-amplitude to conserve the number of spin-up and down electrons
Args:
n_spins : number of spin-up and down electrons up to index (batch, 4)
hilbert : Hilbert space from which configurations are sampled
index : site index
Returns:
renormalized log-amplitude (batch,)
"""
# Compute difference between spin-up (spin-down) electrons up to index and
# total number of spin-up (spin-down) electrons
diff = jnp.array(hilbert._n_elec, jnp.int32)-n_spins[1:3]
# 1. if the number of spin-up (spin-down) electrons until index
# is equal to n_elec_up (n_elec_down), then set to 0 the probability
# of sampling a singly occupied orbital with a spin-up (spin-down)
# electron, as well as the probability of sampling a doubly occupied orbital
log_psi = jnp.zeros(hilbert.local_size)
log_psi = jax.lax.cond(
diff[0] == 0,
lambda log_psi: log_psi.at[1].set(-jnp.inf),
lambda log_psi: log_psi,
log_psi
)
log_psi = jax.lax.cond(
diff[1] == 0,
lambda log_psi: log_psi.at[2].set(-jnp.inf),
lambda log_psi: log_psi,
log_psi
)
log_psi = jax.lax.cond(
(diff == 0).any(),
lambda log_psi: log_psi.at[3].set(-jnp.inf),
lambda log_psi: log_psi,
log_psi
)
# 2. if the number of spin-up (spin-down) electrons that still need to be
# distributed, is smaller or equal than the number of sites left, then set the probability
# of sampling an empty orbital and one with the opposite spin to 0
log_psi = jax.lax.cond(
(diff[0] >= (hilbert.size-index)).any(),
lambda log_psi: log_psi.at[np.array([0,2])].set(-jnp.inf),
lambda log_psi: log_psi,
log_psi
)
log_psi = jax.lax.cond(
(diff[1] >= (hilbert.size-index)).any(),
lambda log_psi: log_psi.at[np.array([0,1])].set(-jnp.inf),
lambda log_psi: log_psi,
log_psi
)
return log_psi
def get_out_transformation(name: str, apply_exp: bool):
"""
Return the transformation of the ouput layer
Args:
name : name of the Ansatz
apply_exp : whether to apply the exponential or not
Returns:
a callable function that is applied in the output layer of a GPS model
"""
if name == 'GPS' or name == 'FilterGPS':
axis = (-2,-1)
else:
axis = -1
if apply_exp:
out_trafo = lambda x : jnp.sum(x, axis=axis)
else:
out_trafo = lambda x : jnp.log(jnp.sum(x, axis=axis)+0.j)
return out_trafo
def get_plaquettes_and_masks(hilbert : HomogeneousHilbert, graph : AbstractGraph):
"""
Return the filter plaquettes and masks for a filter-based GPS Ansatz
Args:
hilbert : Hilbert space on which the model should act
graph : graph associated with the Hilbert space
Returns:
a tuple containing the filter plaquettes and masks for a filter-based GPS Ansatz
"""
L = hilbert.size
if graph and graph.ndim == 2 and graph.pbc.all():
translations = graph.translation_group().to_array()
plaquettes = translations[np.argsort(translations[:,0])]
plaquettes = HashableArray(plaquettes)
else:
plaquettes = HashableArray(circulant(np.arange(L)))
masks = HashableArray(np.where(plaquettes >= np.repeat([np.arange(L)], L, axis=0).T, 0, 1))
return (plaquettes, masks) | 12,731 | 38.7875 | 181 | py |
GPSKet | GPSKet-master/tests/test_arqgps.py | import jax
import jax.numpy as jnp
import numpy as np
import netket as nk
from tqdm import tqdm
from jax.scipy.special import logsumexp
from GPSKet.models import ARqGPS
key_in, key_ma = jax.random.split(jax.random.PRNGKey(np.random.randint(0, 100)))
L = 20
M = 2
dtype = jnp.complex128
batch_size = 16
g = nk.graph.Chain(length=L, pbc=True)
hi = nk.hilbert.Spin(1/2, N=g.n_nodes)
arqgps = ARqGPS(
hi, M,
dtype=dtype
)
# Test #1
# Log-amplitude for a configuration x should be equal to:
# \log(\psi(x)) = \sum_{i=1}^L\sum_{n=1}^M\epsilon_{x_i,n,i}(\prod_{j=1}^{i-1}\epsilon_{x_j,n,j})
inputs = hi.random_state(key_in, batch_size)
variables = arqgps.init(key_ma, inputs)
log_psi_test = arqgps.apply(variables, inputs)
log_psi_true = np.zeros(batch_size, dtype=dtype)
epsilon = np.asarray(variables.unfreeze()['params']['epsilon'], dtype=dtype)
for k in tqdm(range(batch_size), desc="Test #1"):
log_psi = 0+0*1j
x_k = hi.states_to_local_indices(inputs[k])
for i in range(L):
log_psi_cond = np.zeros(hi.local_size, dtype=dtype)
for n in range(M):
var_prod = 1+0*1j
for j in range(i):
var_prod *= epsilon[x_k[j], n, j]
log_psi_cond += epsilon[:, n, i]*var_prod
bond_sum = log_psi_cond[x_k[i]]
normalization = 0.5*logsumexp(2*log_psi_cond.real)
log_psi += bond_sum-normalization
log_psi_true[k] = log_psi
np.testing.assert_allclose(log_psi_test, log_psi_true)
symmetries = g.automorphisms().to_array().T
apply_symmetries = lambda x: jnp.take(x, symmetries, axis=-1)
arqgps_symm = ARqGPS(
hi, M,
dtype=dtype,
apply_symmetries=apply_symmetries
)
# Test #2
# Symmetrized amplitudes should be equal to average of
# amplitudes from non-symmetric model over
# symmetry transformed input configurations
log_psi_symm = arqgps_symm.apply(variables, inputs)
n_symm = symmetries.shape[-1]
log_psi = jnp.zeros((batch_size, n_symm), dtype=jnp.complex128)
for t in tqdm(range(n_symm), desc="Test #2"):
inputs_t = jnp.take_along_axis(inputs, jnp.tile(symmetries[:, t], (batch_size, 1)), 1)
y = arqgps.apply(variables, inputs_t)
log_psi = log_psi.at[:, t].set(y)
log_psi_real = 0.5*logsumexp(2*log_psi.real, axis=-1, b=1/n_symm)
log_psi_imag = logsumexp(1j*log_psi.imag, axis=-1).imag
log_psi = log_psi_real+1j*log_psi_imag
np.testing.assert_allclose(log_psi_symm.real, log_psi.real)
# Test #3
# Probabilities from .conditionals should match those over
# L sites from ._conditional for constrained and unconstrained Hilbert space
hi = nk.hilbert.Spin(s=1/2, N=g.n_nodes)
symmetries = g.automorphisms().to_array().T
apply_symmetries = lambda x: jnp.take(x, symmetries, axis=-1)
arqgps_symm = ARqGPS(
hi, M,
dtype=dtype,
apply_symmetries=apply_symmetries
)
inputs = hi.random_state(key_in, batch_size)
variables = arqgps_symm.init(key_ma, inputs)
psi_cond_test = jnp.zeros((batch_size, L, 2), dtype=jnp.float64)
for l in tqdm(range(L), desc="Test #3.1, unconstrained"):
p, variables = arqgps_symm.apply(variables, inputs, l, method=ARqGPS._conditional, mutable=True)
psi_cond_test = psi_cond_test.at[:, l, :].set(p)
psi_cond = arqgps_symm.apply(variables, inputs, method=ARqGPS.conditionals)
np.testing.assert_allclose(psi_cond_test, psi_cond)
hi = nk.hilbert.Spin(s=1/2, N=g.n_nodes, total_sz=0)
symmetries = g.automorphisms().to_array().T
apply_symmetries = lambda x: jnp.take(x, symmetries, axis=-1)
arqgps_symm = ARqGPS(
hi, M,
dtype=dtype,
apply_symmetries=apply_symmetries
)
inputs = hi.random_state(key_in, batch_size)
variables = arqgps_symm.init(key_ma, inputs)
psi_cond_test = jnp.zeros((batch_size, L, 2), dtype=jnp.float64)
for l in tqdm(range(L), desc="Test #3.2, constrained"):
p, variables = arqgps_symm.apply(variables, inputs, l, method=ARqGPS._conditional, mutable=True)
psi_cond_test = psi_cond_test.at[:, l, :].set(p)
psi_cond = arqgps_symm.apply(variables, inputs, method=ARqGPS.conditionals)
np.testing.assert_allclose(psi_cond_test, psi_cond)
# Test #4
# Output should be real for real parameters and complex for complex parameters
dtypes = [jnp.float64, jnp.complex128]
for dtype in tqdm(dtypes, desc="Test #4"):
arqgps_symm = ARqGPS(
hi, M,
dtype=dtype,
apply_symmetries=apply_symmetries
)
variables = arqgps_symm.init(key_ma, inputs)
log_psi = arqgps_symm.apply(variables, inputs)
assert log_psi.dtype == dtype
| 4,452 | 34.34127 | 100 | py |
GPSKet | GPSKet-master/tests/test_arplaquetteqgps.py | import jax
import jax.numpy as jnp
import numpy as np
import netket as nk
from tqdm import tqdm
from scipy.linalg import circulant
from jax.scipy.special import logsumexp
from netket.utils import HashableArray
from GPSKet.models import ARPlaquetteqGPS
key_in, key_ma = jax.random.split(jax.random.PRNGKey(2))
L = 20
M = 2
dtype = jnp.complex128
batch_size = 16
g = nk.graph.Chain(length=L, pbc=True)
hi = nk.hilbert.Spin(1/2, N=g.n_nodes)
plaquettes = HashableArray(circulant(np.arange(L)))
masks = HashableArray(np.where(plaquettes >= np.repeat([np.arange(L)], L, axis=0).T, 0, 1))
arqgps = ARPlaquetteqGPS(
hi, M, plaquettes, masks,
dtype=dtype
)
# Test #1
# Log-amplitude for a configuration x should be equal to:
# \log(\psi(x)) = \sum_{i=1}^L\sum_{n=1}^M\epsilon_{x_i,n,0}(\prod_{j=1}^{i}\epsilon_{x_{i-j},n,j})
inputs = hi.random_state(key_in, batch_size)
inputs = np.asarray(inputs, np.int32)
variables = arqgps.init(key_ma, inputs)
epsilon = np.asarray(variables.unfreeze()['params']['epsilon'], dtype=dtype)
log_psi_test = arqgps.apply(variables, inputs)
log_psi_true = np.zeros(batch_size, dtype=dtype)
for k in tqdm(range(batch_size), desc="Test #1"):
log_psi = 0
x_k = hi.states_to_local_indices(inputs[k])
for i in range(L):
log_psi_cond = np.zeros(hi.local_size, dtype=dtype)
for n in range(M):
context_prod = 1
for j in range(1, i+1):
context_prod *= epsilon[x_k[i-j], n, j]
log_psi_cond += epsilon[:, n, 0]*context_prod
normalization = 0.5*logsumexp(2*log_psi_cond.real)
log_psi += log_psi_cond[x_k[i]]-normalization
log_psi_true[k] = log_psi
np.testing.assert_allclose(log_psi_test, log_psi_true)
symmetries = g.automorphisms().to_array().T
apply_symmetries = lambda x: jnp.take(x, symmetries, axis=-1)
arqgps_symm = ARPlaquetteqGPS(
hi, M, plaquettes, masks,
dtype=dtype,
apply_symmetries=apply_symmetries
)
# Test #2
# Symmetrized amplitudes should be equal to average of
# amplitudes from non-symmetric model over
# symmetry transformed input configurations
log_psi_symm = arqgps_symm.apply(variables, inputs)
n_symm = symmetries.shape[-1]
log_psi = jnp.zeros((batch_size, n_symm), dtype=jnp.complex128)
for t in tqdm(range(n_symm), desc="Test #2"):
inputs_t = jnp.take_along_axis(inputs, jnp.tile(symmetries[:, t], (batch_size, 1)), 1)
y = arqgps.apply(variables, inputs_t)
log_psi = log_psi.at[:, t].set(y)
log_psi_real = 0.5*logsumexp(2*log_psi.real, axis=-1, b=1/n_symm)
log_psi_imag = logsumexp(1j*log_psi.imag, axis=-1).imag
log_psi = log_psi_real+1j*log_psi_imag
np.testing.assert_allclose(log_psi_symm.real, log_psi.real)
# Test #3
# Probabilities from .conditionals should match those over
# L sites from ._conditional for constrained and unconstrained Hilbert space
hi = nk.hilbert.Spin(s=1/2, N=g.n_nodes)
symmetries = g.automorphisms().to_array().T
apply_symmetries = lambda x: jnp.take(x, symmetries, axis=-1)
arqgps_symm = ARPlaquetteqGPS(
hi, M, plaquettes, masks,
dtype=dtype,
apply_symmetries=apply_symmetries
)
inputs = hi.random_state(key_in, batch_size)
variables = arqgps_symm.init(key_ma, inputs)
_masks = np.asarray(masks)
_plaquettes = np.asarray(plaquettes)
psi_cond_test = jnp.zeros((batch_size, L, 2), dtype=jnp.float64)
for l in tqdm(range(L), desc="Test #3.1, unconstrained"):
p, variables = arqgps_symm.apply(variables, inputs, (l, _masks[l], _plaquettes[l]), method=ARPlaquetteqGPS._conditional, mutable=True)
psi_cond_test = psi_cond_test.at[:, l, :].set(p)
psi_cond = arqgps_symm.apply(variables, inputs, method=ARPlaquetteqGPS.conditionals)
np.testing.assert_allclose(psi_cond_test, psi_cond)
hi = nk.hilbert.Spin(s=1/2, N=g.n_nodes, total_sz=0)
symmetries = g.automorphisms().to_array().T
apply_symmetries = lambda x: jnp.take(x, symmetries, axis=-1)
arqgps_symm = ARPlaquetteqGPS(
hi, M, plaquettes, masks,
dtype=dtype,
apply_symmetries=apply_symmetries
)
inputs = hi.random_state(key_in, batch_size)
variables = arqgps_symm.init(key_ma, inputs)
psi_cond_test = jnp.zeros((batch_size, L, 2), dtype=jnp.float64)
for l in tqdm(range(L), desc="Test #3.2, constrained"):
p, variables = arqgps_symm.apply(variables, inputs, (l, _masks[l], _plaquettes[l]), method=ARPlaquetteqGPS._conditional, mutable=True)
psi_cond_test = psi_cond_test.at[:, l, :].set(p)
psi_cond = arqgps_symm.apply(variables, inputs, method=ARPlaquetteqGPS.conditionals)
np.testing.assert_allclose(psi_cond_test, psi_cond)
# Test #4
# Output should be real for real parameters and complex for complex parameters
dtypes = [jnp.float64, jnp.complex128]
for dtype in tqdm(dtypes, desc="Test #4"):
arqgps_symm = ARPlaquetteqGPS(
hi, M, plaquettes, masks,
dtype=dtype,
apply_symmetries=apply_symmetries
)
variables = arqgps_symm.init(key_ma, inputs)
log_psi = arqgps_symm.apply(variables, inputs)
assert log_psi.dtype == dtype
| 4,975 | 36.413534 | 138 | py |
GPSKet | GPSKet-master/tests/test_asymmqgps.py | import jax
import jax.numpy as jnp
import numpy as np
import netket as nk
from GPSKet.models import ASymmqGPS, ASymmqGPSProd, occupancies_to_electrons
from GPSKet.hilbert import FermionicDiscreteHilbert
from tqdm import tqdm
key_in, key_ma = jax.random.split(jax.random.PRNGKey(np.random.randint(0, 100)))
B = 16
L = 10
n_elec = [5, 5]
dtype = jnp.complex128
n_dets = 3
g = nk.graph.Chain(L, pbc=True)
symmetries = g.automorphisms().to_array().T
n_syms = symmetries.shape[-1]
def apply_symmetries(y):
return jax.vmap(lambda tau: jnp.take(tau, y), in_axes=-1, out_axes=-1)(symmetries)
hi = FermionicDiscreteHilbert(L, n_elec=n_elec)
x = hi.random_state(key_in, B)
# Test #1: evaluate ASymmqGPS with kernel symmetrization
# Amplitudes should be equal to:
# Ψ(x) = sinh(∑_τ∑_k det(ɸ_k^↑(τx))det(ɸ_k^↓(τx)))
ma = ASymmqGPS(hi, n_dets, dtype=dtype, apply_symmetries=apply_symmetries)
variables = ma.init(key_ma, x)
log_psi_test = ma.apply(variables, x)
log_psi_true = np.zeros(B, dtype)
params = variables.unfreeze()['params']
orbitals_up = params['Slater_0']['U_up']
orbitals_down = params['Slater_0']['U_down']
y = occupancies_to_electrons(x.astype(jnp.int32), n_elec)
y_t = apply_symmetries(y)
for i in tqdm(range(B), desc="Test #1: ASymmqGPS - kernel"):
sum_over_syms = 0.0+0j
for j in range(n_syms):
y_up = y_t[i,:n_elec[0],j]
y_down = y_t[i,n_elec[0]:,j]
sum_over_dets = 0.0+0j
for k in range(n_dets):
phi_up = orbitals_up[k, y_up, :]
phi_down = orbitals_down[k, y_down, :]
(s_up, log_det_up) = jnp.linalg.slogdet(phi_up)
(s_down, log_det_down) = jnp.linalg.slogdet(phi_down)
log_sd = log_det_up + log_det_down + jnp.log(s_up*s_down+0j)
sum_over_dets += np.exp(log_sd)
sum_over_syms += sum_over_dets
if np.issubdtype(dtype, np.complexfloating):
log_psi_true[i] = np.log(np.sinh(sum_over_syms))
else:
log_psi_true[i] = np.log(np.sinh(sum_over_syms)).real
np.testing.assert_allclose(log_psi_test, log_psi_true)
# Test #2: evaluate ASymmqGPS with projective symmetrization
# Amplitudes should be equal to:
# Ψ(x) = ∑_τ sinh(∑_k det(ɸ_k^↑(τx))det(ɸ_k^↓(τx)))
ma = ASymmqGPS(hi, n_dets, dtype=dtype, apply_symmetries=apply_symmetries, symmetrization='projective')
variables = ma.init(key_ma, x)
log_psi_test = ma.apply(variables, x)
log_psi_true = np.zeros(B, dtype)
params = variables.unfreeze()['params']
orbitals_up = params['Slater_0']['U_up']
orbitals_down = params['Slater_0']['U_down']
y = occupancies_to_electrons(x.astype(jnp.int32), n_elec)
y_t = apply_symmetries(y)
for i in tqdm(range(B), desc="Test #2: ASymmqGPS - projective"):
sum_over_syms = 0.0+0j
for j in range(n_syms):
y_up = y_t[i,:n_elec[0],j]
y_down = y_t[i,n_elec[0]:,j]
sum_over_dets = 0.0+0j
for k in range(n_dets):
phi_up = orbitals_up[k, y_up, :]
phi_down = orbitals_down[k, y_down, :]
(s_up, log_det_up) = jnp.linalg.slogdet(phi_up)
(s_down, log_det_down) = jnp.linalg.slogdet(phi_down)
log_sd = log_det_up + log_det_down + jnp.log(s_up*s_down+0j)
sum_over_dets += np.exp(log_sd)
sum_over_syms += np.sinh(sum_over_dets)
if np.issubdtype(dtype, np.complexfloating):
log_psi_true[i] = np.log(sum_over_syms)
else:
log_psi_true[i] = np.log(sum_over_syms).real
np.testing.assert_allclose(log_psi_test, log_psi_true)
# Test #3: evaluate ASymmqGPSProd
# Amplitudes should be equal to:
# Ψ(x) = ∑_τ ∏_k sinh(det(ɸ_k^↑(τx))det(ɸ_k^↓(τx)))
ma = ASymmqGPSProd(hi, n_dets, dtype=dtype, apply_symmetries=apply_symmetries)
variables = ma.init(key_ma, x)
log_psi_test = ma.apply(variables, x)
log_psi_true = np.zeros(B, dtype)
params = variables.unfreeze()['params']
orbitals_up = params['Slater_0']['U_up']
orbitals_down = params['Slater_0']['U_down']
y = occupancies_to_electrons(x.astype(jnp.int32), n_elec)
y_t = apply_symmetries(y)
for i in tqdm(range(B), desc="Test #3: ASymmqGPSProd"):
sum_over_syms = 0.0+0j
for j in range(n_syms):
y_up = y_t[i,:n_elec[0],j]
y_down = y_t[i,n_elec[0]:,j]
prod_over_dets = 1.0+0j
for k in range(n_dets):
phi_up = orbitals_up[k, y_up, :]
phi_down = orbitals_down[k, y_down, :]
(s_up, log_det_up) = jnp.linalg.slogdet(phi_up)
(s_down, log_det_down) = jnp.linalg.slogdet(phi_down)
log_sd = log_det_up + log_det_down + jnp.log(s_up*s_down+0j)
prod_over_dets *= np.sinh(np.exp(log_sd))
sum_over_syms += prod_over_dets
if np.issubdtype(dtype, np.complexfloating):
log_psi_true[i] = np.log(sum_over_syms)
else:
log_psi_true[i] = np.log(sum_over_syms).real
np.testing.assert_allclose(log_psi_test, log_psi_true)
# Test #4: ASymmqGPSProd should only work for odd numbers of determinants
n_dets = 2
ma = ASymmqGPSProd(hi, n_dets, apply_symmetries=apply_symmetries)
try:
variables = ma.init(key_ma, x)
except Exception as error:
assert isinstance(error, AssertionError) | 5,108 | 39.547619 | 103 | py |
GPSKet | GPSKet-master/tests/test_ardirectsampler.py | import jax
import jax.numpy as jnp
import numpy as np
import netket as nk
from mpi4py import MPI
from GPSKet.models import ARqGPS
from GPSKet.sampler import ARDirectSampler
# MPI variables
comm = MPI.COMM_WORLD.Create(MPI.COMM_WORLD.Get_group())
rank = comm.Get_rank()
n_nodes = comm.Get_size()
# Model variables
key = jax.random.PRNGKey(np.random.randint(0, 100))
L = 8
M = 2
dtype = jnp.complex128
batch_size = 20
# Compute samples per rank
if batch_size % n_nodes != 0:
raise ValueError("Define a number of samples that is a multiple of the number of MPI ranks")
samples_per_rank = batch_size // n_nodes
# Setup
g = nk.graph.Chain(length=L, pbc=True)
hi = nk.hilbert.Spin(s=1/2, N=g.n_nodes)
ha = nk.operator.Heisenberg(hilbert=hi, graph=g, sign_rule=False)
arqgps = ARqGPS(
hi, M,
dtype=dtype)
symmetries = g.automorphisms().to_array().T
apply_symmetries = lambda x: jnp.take(x, symmetries, axis=-1)
arqgps_symm = ARqGPS(
hi, M,
dtype=dtype,
apply_symmetries=apply_symmetries
)
# Test #1
# Shape of sample should be (1, samples_per_rank, L)
sa = ARDirectSampler(hi, n_chains_per_rank=samples_per_rank)
vs = nk.vqs.MCState(sa, arqgps, n_samples=batch_size)
samples = vs.sample()
if rank == 0:
print("Without symmetries:")
print(f"- sampler.n_chains_per_rank = {sa.n_chains_per_rank}")
print(f"- vqs.n_samples = {vs.n_samples}")
print(f"- vqs.chain_length = {vs.chain_length}")
print(f"- samples.shape = {samples.shape}")
np.testing.assert_equal(samples.shape, (1, samples_per_rank, L))
sa = ARDirectSampler(hi, n_chains_per_rank=samples_per_rank)
vs = nk.vqs.MCState(sa, arqgps_symm, n_samples=batch_size)
samples = vs.sample()
if rank == 0:
print("With symmetries:")
print(f"- sampler.n_chains_per_rank = {sa.n_chains_per_rank}")
print(f"- vqs.n_samples = {vs.n_samples}")
print(f"- vqs.chain_length = {vs.chain_length}")
print(f"- samples.shape = {samples.shape}")
np.testing.assert_equal(samples.shape, (1, samples_per_rank, L))
# Test #2
# When sampling from a constrained Hilbert space,
# autoregressive models should generate samples with
# same total magnetization
hi = nk.hilbert.Spin(s=1/2, N=g.n_nodes, total_sz=0)
arqgps = ARqGPS(
hi, M,
dtype=dtype,
)
arqgps_symm = ARqGPS(
hi, M,
dtype=dtype,
apply_symmetries=apply_symmetries
)
sa = ARDirectSampler(hi, n_chains_per_rank=samples_per_rank)
vs = nk.vqs.MCState(sa, arqgps, n_samples=batch_size)
samples = vs.sample()
print("Without symmetries:")
print(f"- samples:\n{samples}")
np.testing.assert_equal(np.sum(np.squeeze(samples), axis=-1), np.zeros(batch_size))
sa = ARDirectSampler(hi, n_chains_per_rank=samples_per_rank)
vs = nk.vqs.MCState(sa, arqgps_symm, n_samples=batch_size)
samples = vs.sample()
print("With symmetries:")
print(f"- samples:\n{samples}")
np.testing.assert_equal(np.sum(np.squeeze(samples), axis=-1), np.zeros(batch_size))
# Test #3
# Changing sample batch size shouldn't be a problem
samples = vs.sample(n_samples=2*batch_size)
print(samples.shape)
np.testing.assert_equal(samples.shape, (2, batch_size, L))
| 3,091 | 30.232323 | 96 | py |
GPSKet | GPSKet-master/tests/test_arqgpsfull.py | import jax
import jax.numpy as jnp
import numpy as np
import netket as nk
from tqdm import tqdm
from jax.scipy.special import logsumexp
from GPSKet.models import ARqGPSFull
key_in, key_ma = jax.random.split(jax.random.PRNGKey(np.random.randint(0, 100)))
L = 20
M = 2
dtype = jnp.complex128
batch_size = 16
g = nk.graph.Chain(length=L, pbc=True)
hi = nk.hilbert.Spin(1/2, N=g.n_nodes)
arqgps = ARqGPSFull(
hi, M,
dtype=dtype
)
# Test #1
# Log-amplitude for a configuration x should be equal to:
# \log(\psi(x)) = \sum_{i=1}^L(\sum_{n=1}^M\epsilon_{x_i,n,i,i}(\prod_{j=1}^{i-1}\epsilon_{x_j,n,j,i}))-\log(\sum_x'|\exp(\sum_{n=1}^M\epsilon_{x',n,i,i}(\prod_{j=1}^{i-1}\epsilon_{x_j,n,j,i}))|^2)/2
inputs = hi.random_state(key_in, batch_size)
variables = arqgps.init(key_ma, inputs)
log_psi_test = arqgps.apply(variables, inputs)
log_psi_true = np.zeros(batch_size, dtype=dtype)
epsilon = np.asarray(variables.unfreeze()['params']['epsilon'], dtype=dtype)
for k in tqdm(range(batch_size), desc="Test #1"):
log_psi = 0+0*1j
x_k = hi.states_to_local_indices(inputs[k])
for i in range(L):
log_psi_cond = np.zeros(hi.local_size, dtype=dtype)
for n in range(M):
var_prod = 1+0*1j
for j in range(i):
var_prod *= epsilon[x_k[j], n, j, i]
log_psi_cond += epsilon[:, n, i, i]*var_prod
bond_sum = log_psi_cond[x_k[i]]
normalization = 0.5*logsumexp(2*log_psi_cond.real)
log_psi += bond_sum-normalization
log_psi_true[k] = log_psi
np.testing.assert_allclose(log_psi_test, log_psi_true)
symmetries = g.automorphisms().to_array().T
apply_symmetries = lambda x: jnp.take(x, symmetries, axis=-1)
arqgps_symm = ARqGPSFull(
hi, M,
dtype=dtype,
apply_symmetries=apply_symmetries
)
# Test #2
# Symmetrized amplitudes should be equal to average of
# amplitudes from non-symmetric model over
# symmetry transformed input configurations
log_psi_symm = arqgps_symm.apply(variables, inputs)
n_symm = symmetries.shape[-1]
log_psi = jnp.zeros((batch_size, n_symm), dtype=jnp.complex128)
for t in tqdm(range(n_symm), desc="Test #2"):
inputs_t = jnp.take_along_axis(inputs, jnp.tile(symmetries[:, t], (batch_size, 1)), 1)
y = arqgps.apply(variables, inputs_t)
log_psi = log_psi.at[:, t].set(y)
log_psi_real = 0.5*logsumexp(2*log_psi.real, axis=-1, b=1/n_symm)
log_psi_imag = logsumexp(1j*log_psi.imag, axis=-1).imag
log_psi = log_psi_real+1j*log_psi_imag
np.testing.assert_allclose(log_psi_symm.real, log_psi.real)
# Test #3
# Probabilities from .conditionals should match those over
# L sites from ._conditional for constrained and unconstrained Hilbert space
hi = nk.hilbert.Spin(s=1/2, N=g.n_nodes)
symmetries = g.automorphisms().to_array().T
apply_symmetries = lambda x: jnp.take(x, symmetries, axis=-1)
arqgps_symm = ARqGPSFull(
hi, M,
dtype=dtype,
apply_symmetries=apply_symmetries
)
inputs = hi.random_state(key_in, batch_size)
variables = arqgps_symm.init(key_ma, inputs)
psi_cond_test = jnp.zeros((batch_size, L, 2), dtype=jnp.float64)
for l in tqdm(range(L), desc="Test #3.1, unconstrained"):
p, variables = arqgps_symm.apply(variables, inputs, l, method=ARqGPSFull._conditional, mutable=True)
psi_cond_test = psi_cond_test.at[:, l, :].set(p)
psi_cond = arqgps_symm.apply(variables, inputs, method=ARqGPSFull.conditionals)
np.testing.assert_allclose(psi_cond_test, psi_cond)
hi = nk.hilbert.Spin(s=1/2, N=g.n_nodes, total_sz=0)
symmetries = g.automorphisms().to_array().T
apply_symmetries = lambda x: jnp.take(x, symmetries, axis=-1)
arqgps_symm = ARqGPSFull(
hi, M,
dtype=dtype,
apply_symmetries=apply_symmetries
)
inputs = hi.random_state(key_in, batch_size)
variables = arqgps_symm.init(key_ma, inputs)
psi_cond_test = jnp.zeros((batch_size, L, 2), dtype=jnp.float64)
for l in tqdm(range(L), desc="Test #3.2, constrained"):
p, variables = arqgps_symm.apply(variables, inputs, l, method=ARqGPSFull._conditional, mutable=True)
psi_cond_test = psi_cond_test.at[:, l, :].set(p)
psi_cond = arqgps_symm.apply(variables, inputs, method=ARqGPSFull.conditionals)
np.testing.assert_allclose(psi_cond_test, psi_cond) | 4,177 | 36.981818 | 199 | py |
GPSKet | GPSKet-master/tests/test_slater.py | import jax
import jax.numpy as jnp
import numpy as np
import netket as nk
from GPSKet.models import Slater, occupancies_to_electrons
from GPSKet.hilbert import FermionicDiscreteHilbert
from tqdm import tqdm
key_in, key_ma = jax.random.split(jax.random.PRNGKey(np.random.randint(0, 100)))
B = 16
L = 10
n_elec = [5, 5]
dtype = jnp.complex128
n_dets = 3
g = nk.graph.Chain(L, pbc=True)
symmetries = g.automorphisms().to_array().T
n_syms = symmetries.shape[-1]
def apply_symmetries(y):
return jax.vmap(lambda tau: jnp.take(tau, y), in_axes=-1, out_axes=-1)(symmetries)
hi = FermionicDiscreteHilbert(L, n_elec=n_elec)
x = hi.random_state(key_in, B)
# Test #1: evaluate Slater
# Amplitudes should be equal to:
# Ψ(x) = ∑_τ∑_k det(ɸ_k^↑(τx))det(ɸ_k^↓(τx))
ma = Slater(hi, n_dets, dtype=dtype, apply_symmetries=apply_symmetries)
variables = ma.init(key_ma, x)
log_psi_test = ma.apply(variables, x)
log_psi_true = np.zeros(B, dtype)
params = variables.unfreeze()['params']
orbitals_up = params['orbitals_up']
orbitals_down = params['orbitals_down']
y = occupancies_to_electrons(x.astype(jnp.int32), n_elec)
y_t = apply_symmetries(y)
for i in tqdm(range(B), desc="Test #1"):
sum_over_syms = 0.0+0j
for j in range(n_syms):
y_up = y_t[i,:n_elec[0],j]
y_down = y_t[i,n_elec[0]:,j]
sum_over_dets = 0.0+0j
for k in range(n_dets):
phi_up = orbitals_up[k, y_up, :]
phi_down = orbitals_down[k, y_down, :]
(s_up, log_det_up) = jnp.linalg.slogdet(phi_up)
(s_down, log_det_down) = jnp.linalg.slogdet(phi_down)
log_sd = log_det_up + log_det_down + jnp.log(s_up*s_down+0j)
sum_over_dets += np.exp(log_sd)
sum_over_syms += sum_over_dets
if np.issubdtype(dtype, np.complexfloating):
log_psi_true[i] = np.log(sum_over_syms)
else:
log_psi_true[i] = np.log(sum_over_syms).real
np.testing.assert_allclose(log_psi_test, log_psi_true) | 1,955 | 35.222222 | 86 | py |
Tree-Supervised | Tree-Supervised-main/train_tree_w_parallel_autocast.py | import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import argparse
from models import *
import torch.nn.functional as F
from utils.autoaugment import CIFAR10Policy
from utils.cutout import Cutout
import torch.backends.cudnn as cudnn
import torch
from torch import nn
import os
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.cuda.amp import GradScaler
from torch.cuda.amp import autocast
import time
from configs import *
parser = argparse.ArgumentParser(description='Self-Distillation CIFAR Training')
parser.add_argument('--model', default="tree_resnet32", type=str, help="resnet18|tree_resnet32|tree_wide|tree_mobilev3|mobilev3|wide")
parser.add_argument('--dataset', default="cifar100", type=str, help="cifar100|cifar10")
parser.add_argument('--epoch', default=200, type=int, help="training epochs")
parser.add_argument('--loss_coefficient', default=0.3, type=float)
parser.add_argument('--feature_loss_coefficient', default=0.03, type=float)
parser.add_argument('--dataset_path', default="data", type=str)
# parser.add_argument('--autoaugment', default=True, type=bool)
parser.add_argument('--autoaugment', default=False, type=bool)
parser.add_argument('--temperature', default=3.0, type=float)
parser.add_argument('--batchsize', default=128 * 4, type=int)
parser.add_argument('--init_lr', default=0.1, type=float)
args = parser.parse_args()
# print(args)
class DistillKL(nn.Module):
"""Distilling the Knowledge in a Neural Network"""
def __init__(self, T):
super(DistillKL, self).__init__()
self.T = T
def forward(self, y_s, y_t):
p_s = F.log_softmax(y_s / self.T, dim=1)
p_t = F.softmax(y_t / self.T, dim=1)
loss = F.kl_div(p_s, p_t, size_average=False) * (self.T ** 2) / y_s.shape[0]
return loss
# set seed for reproducibility
best_acc = 0
best_single = 0
def setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group("nccl", rank=rank, world_size=world_size, init_method='env://')
def cleanup():
dist.destroy_process_group()
if args.autoaugment:
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4, fill=128),
transforms.RandomHorizontalFlip(), CIFAR10Policy(), transforms.ToTensor(),
Cutout(n_holes=1, length=16),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
else:
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4, fill=128),
transforms.RandomHorizontalFlip(), transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
def train(rank, world_size):
torch.manual_seed(rank+1)
cudnn.benchmark = False
cudnn.deterministic = True
scaler = GradScaler()
if rank == 0:
print(args)
print(f"Running basic DDP example on rank {rank}.")
setup(rank, world_size)
args.batchsize = int(args.batchsize / world_size)
# ------------------------ load data
if args.dataset == "cifar100":
trainset = torchvision.datasets.CIFAR100(
root=args.dataset_path,
train=True,
download=True,
transform=transform_train
)
testset = torchvision.datasets.CIFAR100(
root=args.dataset_path,
train=False,
download=True,
transform=transform_test
)
num_class = 100
elif args.dataset == "cifar10":
trainset = torchvision.datasets.CIFAR10(
root=args.dataset_path,
train=True,
download=True,
transform=transform_train
)
testset = torchvision.datasets.CIFAR10(
root=args.dataset_path,
train=False,
download=True,
transform=transform_test
)
num_class = 10
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset,
num_replicas=world_size,
rank=rank)
trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=args.batchsize,
shuffle=False,
num_workers=12,
pin_memory=True,
sampler=train_sampler
)
testloader = torch.utils.data.DataLoader(
testset,
batch_size=args.batchsize,
shuffle=False,
num_workers=12,
pin_memory=True,
)
# -------------------------------------
if args.model == 'tree_wide':
net = Wide_TreeResNet(28, 10, 0, num_class)
config = config_wide_resnet
elif args.model =='tree_mobilev3':
net = TreeMobileNetV3_Large(num_class)
config = config_tree_mobilev3
elif args.model == 'tree_resnet20':
net = TreeCifarResNet20_v1(num_class)
config = config_tree_resnet
else:
raise NameError
# create model and move it to GPU with id rank
net = net.to(rank)
net = DDP(net, device_ids=[rank])
criterion = nn.CrossEntropyLoss()
kl_distill = DistillKL(args.temperature)
optimizer = optim.SGD(net.parameters(), lr=args.init_lr, weight_decay=config.weight_decay, momentum=0.9)
optimizer.zero_grad()
for epoch in range(args.epoch):
train_start = time.time()
######################### train
correct = [0 for _ in range(5)]
predicted = [0 for _ in range(5)]
if epoch in config.down_epoch:
for param_group in optimizer.param_groups:
param_group['lr'] /= 10
net.train()
sum_loss, total = 0.0, 0.0
for i, data in enumerate(trainloader, 0):
length = len(trainloader)
inputs, labels = data
inputs, labels = inputs.to(rank), labels.to(rank)
with autocast():
outputs = net(inputs)
ensemble = sum(outputs) / len(outputs)
ensemble.detach_()
# compute loss
loss = torch.FloatTensor([0.]).to(rank)
for output in outputs:
loss += criterion(output, labels) * (1 - args.loss_coefficient)
for other in outputs:
if other is not output:
# logits distillation
loss += kl_distill(output, other) * args.loss_coefficient / (len(outputs) - 1)
sum_loss += loss.item()
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
total += float(labels.size(0))
outputs.append(ensemble)
if rank == 0:
for classifier_index in range(len(outputs)):
_, predicted[classifier_index] = torch.max(outputs[classifier_index].data, 1)
correct[classifier_index] += float(predicted[classifier_index].eq(labels.data).cpu().sum())
if i % 80 == 79:
print('[epoch:%d, iter:%d] Loss: %.03f | Acc: 4/4: %.2f%% 3/4: %.2f%% 2/4: %.2f%% 1/4: %.2f%%'
' Ensemble: %.2f%%' % (epoch, (i + epoch * length), sum_loss / (i + 1),
100 * correct[0] / total, 100 * correct[1] / total,
100 * correct[2] / total, 100 * correct[3] / total,
100 * correct[4] / total))
if rank == 0:
print('train epoch time:',time.time()-train_start)
print({'train_acc': 100. * correct[4] / total, 'train_acc1': 100. * correct[0] / total,
'train_acc4': 100. * correct[3] / total, 'train_loss': sum_loss})
################################# test
if rank == 3:
with torch.no_grad():
correct = [0 for _ in range(5)]
predicted = [0 for _ in range(5)]
total = 0.0
net.eval()
for data in testloader:
images, labels = data
images, labels = images.to(rank), labels.to(rank)
outputs = net(images)
ensemble = sum(outputs) / len(outputs)
outputs.append(ensemble)
for classifier_index in range(len(outputs)):
_, predicted[classifier_index] = torch.max(outputs[classifier_index].data, 1)
correct[classifier_index] += float(predicted[classifier_index].eq(labels.data).cpu().sum())
total += float(labels.size(0))
print('Test Set AccuracyAcc: 4/4: %.4f%% 3/4: %.4f%% 2/4: %.4f%% 1/4: %.4f%%'
' Ensemble: %.4f%%' % (100 * correct[0] / total, 100 * correct[1] / total,
100 * correct[2] / total, 100 * correct[3] / total,
100 * correct[4] / total))
print({'test_acc': 100. * correct[4] / total, 'test_acc1': 100. * correct[0] / total,
'test_acc4': 100. * correct[3] / total})
global best_single, best_acc
if correct[4] / total > best_acc:
best_acc = correct[4] / total
print("Best Accuracy Updated: ", best_acc * 100)
# torch.save(net.state_dict(), "./checkpoints/" + str(args.model) + ".pth")
for i in range(4):
if correct[i] / total > best_single:
best_single = correct[i] / total
print("Best Single Accuracy Updated: ", best_single * 100)
# torch.save(net.state_dict(), "./checkpoints/" + str(args.model) + ".pth")
print('train and test time:', time.time() - train_start)
print()
if rank == 0:
print("Training Finished, TotalEPOCH=%d, Best Accuracy=%.4f, Best Single=%.4f" % (
args.epoch, 100 * best_acc, 100 * best_single))
cleanup()
def run_demo(demo_fn, world_size):
mp.spawn(demo_fn,
args=(world_size,),
nprocs=world_size,
join=True)
if __name__ == "__main__":
run_demo(train, 4)
| 10,912 | 38.255396 | 134 | py |
Tree-Supervised | Tree-Supervised-main/train_tree_detach.py | import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import argparse
from models.resnet_liu import *
import torch.nn.functional as F
from utils.autoaugment import CIFAR10Policy
from utils.cutout import Cutout
import torch.backends.cudnn as cudnn
import wandb
cudnn.benchmark = True
# set seed for reproducibility
torch.manual_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
parser = argparse.ArgumentParser(description='Self-Distillation CIFAR Training')
parser.add_argument('--model', default="tree_resnet32", type=str, help="resnet18|resnet34|resnet50|resnet101|resnet152|"
"wideresnet50|wideresnet101|resnext50|resnext101")
parser.add_argument('--dataset', default="cifar100", type=str, help="cifar100|cifar10")
# default 250 epoch
parser.add_argument('--epoch', default=270, type=int, help="training epochs")
parser.add_argument('--loss_coefficient', default=0.3, type=float)
parser.add_argument('--feature_loss_coefficient', default=0.03, type=float)
parser.add_argument('--dataset_path', default="data", type=str)
# parser.add_argument('--autoaugment', default=True, type=bool)
parser.add_argument('--autoaugment', default=False, type=bool)
parser.add_argument('--temperature', default=3.0, type=float)
parser.add_argument('--batchsize', default=128 * 2, type=int)
parser.add_argument('--init_lr', default=0.1, type=float)
args = parser.parse_args()
print(args)
class DistillKL(nn.Module):
"""Distilling the Knowledge in a Neural Network"""
def __init__(self, T):
super(DistillKL, self).__init__()
self.T = T
def forward(self, y_s, y_t):
p_s = F.log_softmax(y_s / self.T, dim=1)
p_t = F.softmax(y_t / self.T, dim=1)
loss = F.kl_div(p_s, p_t, size_average=False) * (self.T ** 2) / y_s.shape[0]
return loss
if args.autoaugment:
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4, fill=128),
transforms.RandomHorizontalFlip(), CIFAR10Policy(), transforms.ToTensor(),
Cutout(n_holes=1, length=16),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
else:
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4, fill=128),
transforms.RandomHorizontalFlip(), transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if args.dataset == "cifar100":
trainset = torchvision.datasets.CIFAR100(
root=args.dataset_path,
train=True,
download=True,
transform=transform_train
)
testset = torchvision.datasets.CIFAR100(
root=args.dataset_path,
train=False,
download=True,
transform=transform_test
)
num_class = 100
elif args.dataset == "cifar10":
trainset = torchvision.datasets.CIFAR10(
root=args.dataset_path,
train=True,
download=True,
transform=transform_train
)
testset = torchvision.datasets.CIFAR10(
root=args.dataset_path,
train=False,
download=True,
transform=transform_test
)
num_class = 10
trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=args.batchsize,
shuffle=True,
num_workers=4
)
testloader = torch.utils.data.DataLoader(
testset,
batch_size=args.batchsize,
shuffle=False,
num_workers=4
)
# if args.model == "tree_resnet":
net = TreeCifarResNet32_v1(num_class)
# if args.model == "resnet32":
# net = CifarResNet32(num_class)
net.to(device)
net = torch.nn.DataParallel(net)
criterion = nn.CrossEntropyLoss()
kl_distill = DistillKL(args.temperature)
optimizer = optim.SGD(net.parameters(), lr=args.init_lr, weight_decay=5e-4, momentum=0.9)
# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
def train(epoch):
correct = [0 for _ in range(5)]
predicted = [0 for _ in range(5)]
if epoch in epoch_down:
for param_group in optimizer.param_groups:
param_group['lr'] /= 10
net.train()
sum_loss, total = 0.0, 0.0
for i, data in enumerate(trainloader, 0):
length = len(trainloader)
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs= net(inputs)
ensemble = sum(outputs) / len(outputs)
ensemble.detach_()
# compute loss
loss = torch.FloatTensor([0.]).to(device)
# teacher: -temp: swap; -temp: out4; -further: random; -further: mutual
# further er : distill by ensemble
# for out4 classifier
for output in outputs:
loss += criterion(output, labels) * (1 - args.loss_coefficient)
for other in outputs:
if other is not output:
# logits distillation
loss += kl_distill(output, other.detach()) * args.loss_coefficient/(len(outputs)-1)
sum_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
total += float(labels.size(0))
outputs.append(ensemble)
for classifier_index in range(len(outputs)):
_, predicted[classifier_index] = torch.max(outputs[classifier_index].data, 1)
correct[classifier_index] += float(predicted[classifier_index].eq(labels.data).cpu().sum())
if i % 80 == 79:
print('[epoch:%d, iter:%d] Loss: %.03f | Acc: 4/4: %.2f%% 3/4: %.2f%% 2/4: %.2f%% 1/4: %.2f%%'
' Ensemble: %.2f%%' % (epoch, (i + epoch * length), sum_loss / (i + 1),
100 * correct[0] / total, 100 * correct[1] / total,
100 * correct[2] / total, 100 * correct[3] / total,
100 * correct[4] / total))
wandb.log({'train_acc': 100. * correct[4] / total, 'train_acc1': 100. * correct[0] / total,
'train_acc4': 100. * correct[3] / total, 'train_loss': sum_loss})
def test(epoch):
with torch.no_grad():
correct = [0 for _ in range(5)]
predicted = [0 for _ in range(5)]
total = 0.0
for data in testloader:
net.eval()
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs= net(images)
ensemble = sum(outputs) / len(outputs)
outputs.append(ensemble)
for classifier_index in range(len(outputs)):
_, predicted[classifier_index] = torch.max(outputs[classifier_index].data, 1)
correct[classifier_index] += float(predicted[classifier_index].eq(labels.data).cpu().sum())
total += float(labels.size(0))
print('Test Set AccuracyAcc: 4/4: %.4f%% 3/4: %.4f%% 2/4: %.4f%% 1/4: %.4f%%'
' Ensemble: %.4f%%' % (100 * correct[0] / total, 100 * correct[1] / total,
100 * correct[2] / total, 100 * correct[3] / total,
100 * correct[4] / total))
wandb.log({'test_acc': 100. * correct[4] / total, 'test_acc1': 100. * correct[0] / total,
'test_acc4': 100. * correct[3] / total})
global best_single, best_acc
if correct[4] / total > best_acc:
best_acc = correct[4] / total
print("Best Accuracy Updated: ", best_acc * 100)
torch.save(net.state_dict(), "./checkpoints/" + str(args.model) + ".pth")
for i in range(4):
if correct[i] / total > best_single:
best_single = correct[i] / total
print("Best Single Accuracy Updated: ", best_single * 100)
torch.save(net.state_dict(), "./checkpoints/" + str(args.model) + ".pth")
# scheduler.step()
# print('lr:', scheduler.get_last_lr())
print()
if __name__ == "__main__":
best_acc = 0
best_single = 0
wandb.init(project="distill")
if args.autoaugment==False:
args.epoch=200
epoch_down = [60,120,180]
else:
epoch_down = [90, 160, 210,250]
for epoch in range(args.epoch):
train(epoch)
test(epoch)
print("Training Finished, TotalEPOCH=%d, Best Accuracy=%.4f, Best Single=%.4f" % (
args.epoch, 100 * best_acc, 100 * best_single))
| 8,775 | 37.323144 | 120 | py |
Tree-Supervised | Tree-Supervised-main/train_origin_autocast.py | '''Train CIFAR10 with PyTorch.'''
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import time
import torchvision
import torchvision.transforms as transforms
from torch.cuda.amp import GradScaler
from torch.cuda.amp import autocast
import os
import argparse
from models import *
from configs import *
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--dataset', default="cifar100", type=str, help="cifar100|cifar10")
parser.add_argument('--model', default="resnet20", type=str, help="resnet20|resnet32|mobilev3|wide")
# parser.add_argument('--weight_decay', default=1e-4, type=float, help='5e-4| 1e-4')
parser.add_argument('--gpus', default=4, type=int)
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
scaler = GradScaler()
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if args.dataset == "cifar100":
trainset = torchvision.datasets.CIFAR100(root='./data',train=True,download=True,transform=transform_train)
testset = torchvision.datasets.CIFAR100(root='./data',train=False,download=True,transform=transform_test)
num_class = 100
elif args.dataset == "cifar10":
trainset = torchvision.datasets.CIFAR10(root='./data',train=True,download=True,transform=transform_train)
testset = torchvision.datasets.CIFAR10(root='./data',train=False,download=True,transform=transform_test)
num_class = 10
# Model
print('==> Building model..')
if args.model == 'mobilev3':
net = MobileNetV3_Large(num_class)
config = config_mobilev3
elif args.model == 'mobilev2':
net = MobileNetV2(num_class)
config = config_mobilev3
elif args.model == 'wide':
net = Wide_ResNet(28,10,0,num_class)
config = config_wide_resnet
elif args.model == 'resnet44':
net = CifarResNet44(num_class)
config = config_resnet
elif args.model == 'resnet110':
net = CifarResNet110(num_class)
config = config_resnet
elif args.model == 'resnet20':
net = CifarResNet20(num_class)
config = config_resnet
else:
raise NameError
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = False
cudnn.deterministic = True
torch.manual_seed(0)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr,
momentum=0.9, weight_decay=config.weight_decay)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=config.batch_size*args.gpus, shuffle=True, num_workers=5)
testloader = torch.utils.data.DataLoader(
testset, batch_size=config.batch_size*args.gpus, shuffle=False, num_workers=5)
def adjust_lr(epoch):
if epoch in config.down_epoch:
for param_group in optimizer.param_groups:
param_group['lr'] /= 10
# Training
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
with autocast():
outputs = net(inputs)
loss = criterion(outputs, targets)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
print('train acc:',correct/total*100)
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
print('test_loss:',test_loss)
print('test Acc:', correct/total*100)
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
print('best..')
best_acc = acc
for epoch in range(start_epoch, start_epoch+config.epoch):
start_t = time.time()
adjust_lr(epoch)
train(epoch)
if epoch<5:
print('train time:',time.time()-start_t)
test(epoch)
if epoch<5:
print('train and test time',time.time()-start_t)
print('Finished, best acc',best_acc) | 5,093 | 31.44586 | 110 | py |
Tree-Supervised | Tree-Supervised-main/train_origin.py | '''Train CIFAR10 with PyTorch.'''
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import time
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from models import *
from configs import *
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--dataset', default="cifar100", type=str, help="cifar100|cifar10")
parser.add_argument('--model', default="resnet20", type=str, help="resnet20|resnet32|mobilev3|wide")
# parser.add_argument('--weight_decay', default=1e-4, type=float, help='5e-4| 1e-4')
parser.add_argument('--gpus', default=4, type=int)
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if args.dataset == "cifar100":
trainset = torchvision.datasets.CIFAR100(root='./data',train=True,download=True,transform=transform_train)
testset = torchvision.datasets.CIFAR100(root='./data',train=False,download=True,transform=transform_test)
num_class = 100
elif args.dataset == "cifar10":
trainset = torchvision.datasets.CIFAR10(root='./data',train=True,download=True,transform=transform_train)
testset = torchvision.datasets.CIFAR10(root='./data',train=False,download=True,transform=transform_test)
num_class = 10
# Model
print('==> Building model..')
if args.model == 'mobilev3':
net = MobileNetV3_Large(num_class)
config = config_mobilev3
elif args.model == 'mobilev2':
net = MobileNetV2(num_class)
config = config_mobilev3
elif args.model == 'wide':
net = Wide_ResNet(28,10,0,num_class)
config = config_wide_resnet
elif args.model == 'resnet44':
net = CifarResNet44(num_class)
config = config_resnet
elif args.model == 'resnet110':
net = CifarResNet110(num_class)
config = config_resnet
elif args.model == 'resnet20':
net = CifarResNet20(num_class)
config = config_resnet
else:
raise NameError
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = False
cudnn.deterministic = True
torch.manual_seed(0)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr,
momentum=0.9, weight_decay=config.weight_decay)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=config.batch_size*args.gpus, shuffle=True, num_workers=8)
testloader = torch.utils.data.DataLoader(
testset, batch_size=config.batch_size*args.gpus, shuffle=False, num_workers=8)
def adjust_lr(epoch):
if epoch in config.down_epoch:
for param_group in optimizer.param_groups:
param_group['lr'] /= 10
# Training
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
print('train acc:',correct/total*100)
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
print('test_loss:',test_loss)
print('test Acc:', correct/total*100)
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
print('best..')
best_acc = acc
for epoch in range(start_epoch, start_epoch+config.epoch):
start_t = time.time()
adjust_lr(epoch)
train(epoch)
if epoch<5:
print('train time:',time.time()-start_t)
test(epoch)
if epoch<5:
print('train and test time',time.time()-start_t)
print('Finished, best acc',best_acc) | 4,920 | 31.375 | 110 | py |
Tree-Supervised | Tree-Supervised-main/train_tree.py | import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import argparse
from models import *
import torch.nn.functional as F
from utils.autoaugment import CIFAR10Policy
from utils.cutout import Cutout
import torch.backends.cudnn as cudnn
import wandb
import torch
from torch import nn
import time
from configs import *
from torch.cuda.amp import GradScaler
from torch.cuda.amp import autocast
# set seed for reproducibility
torch.manual_seed(0)
cudnn.benchmark = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
parser = argparse.ArgumentParser(description='Self-Distillation CIFAR Training')
parser.add_argument('--model', default="tree_resnet32", type=str, help="tree_resnet32|tree_wide|tree_mobilev3")
parser.add_argument('--dataset', default="cifar100", type=str, help="cifar100|cifar10")
parser.add_argument('--epoch', default=200, type=int, help="training epochs")
parser.add_argument('--loss_coefficient', default=0.3, type=float)
parser.add_argument('--feature_loss_coefficient', default=0.03, type=float)
parser.add_argument('--dataset_path', default="data", type=str)
# parser.add_argument('--autoaugment', default=True, type=bool)
parser.add_argument('--autoaugment', default=False, type=bool)
parser.add_argument('--temperature', default=3.0, type=float)
parser.add_argument('--gpus', default=1, type=int)
parser.add_argument('--init_lr', default=0.1, type=float)
# schedule
parser.add_argument('--schedule', default='step', type=str, help='step|cos')
args = parser.parse_args()
print(args)
class DistillKL(nn.Module):
"""Distilling the Knowledge in a Neural Network"""
def __init__(self, T):
super(DistillKL, self).__init__()
self.T = T
def forward(self, y_s, y_t):
p_s = F.log_softmax(y_s / self.T, dim=1)
p_t = F.softmax(y_t / self.T, dim=1)
loss = F.kl_div(p_s, p_t, size_average=False) * (self.T ** 2) / y_s.shape[0]
return loss
if args.autoaugment:
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4, fill=128),
transforms.RandomHorizontalFlip(), CIFAR10Policy(), transforms.ToTensor(),
Cutout(n_holes=1, length=16),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
else:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if args.dataset == "cifar100":
trainset = torchvision.datasets.CIFAR100(
root=args.dataset_path,
train=True,
download=True,
transform=transform_train
)
testset = torchvision.datasets.CIFAR100(
root=args.dataset_path,
train=False,
download=True,
transform=transform_test
)
num_class = 100
elif args.dataset == "cifar10":
trainset = torchvision.datasets.CIFAR10(
root=args.dataset_path,
train=True,
download=True,
transform=transform_train
)
testset = torchvision.datasets.CIFAR10(
root=args.dataset_path,
train=False,
download=True,
transform=transform_test
)
num_class = 10
if args.model == 'tree_wide':
net = Wide_TreeResNet(28, 10, 0, num_class)
config = config_wide_resnet
elif args.model == 'tree_mobilev3':
net = TreeMobileNetV3_Large(num_class)
config = config_tree_mobilev3
elif args.model == 'tree_mobilev2':
net = TreeMobileNetV2(num_class)
config = config_tree_mobilev3
elif args.model == 'tree_resnet32':
net = TreeCifarResNet32_v1(num_class)
config = config_tree_resnet
elif args.model == 'tree_resnet110':
net = TreeCifarResNet110_v1(num_class)
config = config_tree_resnet
else:
raise NameError
net.to(device)
net = torch.nn.DataParallel(net)
criterion = nn.CrossEntropyLoss()
kl_distill = DistillKL(args.temperature)
optimizer = optim.SGD(net.parameters(), lr=args.init_lr, weight_decay=config.weight_decay, momentum=0.9)
trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=config.batch_size * args.gpus,
shuffle=True,
num_workers=8
)
testloader = torch.utils.data.DataLoader(
testset,
batch_size=config.batch_size * args.gpus,
shuffle=False,
num_workers=8
)
def adjust_lr(epoch):
if epoch in config.down_epoch:
for param_group in optimizer.param_groups:
param_group['lr'] /= 10
def train(epoch):
correct = [0 for _ in range(5)]
predicted = [0 for _ in range(5)]
global init
net.train()
sum_loss, total = 0.0, 0.0
for i, data in enumerate(trainloader, 0):
length = len(trainloader)
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
ensemble = sum(outputs) / len(outputs)
ensemble.detach_()
# if epoch == 0 and i == 0:
# # init the adaptation layers.
# layer_list = []
# teacher_feature_size = outputs_feature[0].size(1)
# for index in range(1, len(outputs_feature)):
# student_feature_size = outputs_feature[index].size(1)
# layer_list.append(nn.Linear(student_feature_size, teacher_feature_size))
# net.adaptation_layers = nn.ModuleList(layer_list)
# net.adaptation_layers.cuda()
# optimizer = optim.SGD(net.parameters(), lr=args.init_lr, weight_decay=5e-4, momentum=0.9)
# # define the optimizer here again so it will optimize the net.adaptation_layers
# init = True
# compute loss
loss = torch.FloatTensor([0.]).to(device)
for output in outputs:
loss += criterion(output, labels) * (1 - args.loss_coefficient)
for other in outputs:
if other is not output:
# logits distillation
loss += kl_distill(output, other) * args.loss_coefficient / (len(outputs) - 1)
sum_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
total += float(labels.size(0))
outputs.append(ensemble)
for classifier_index in range(len(outputs)):
_, predicted[classifier_index] = torch.max(outputs[classifier_index].data, 1)
correct[classifier_index] += float(predicted[classifier_index].eq(labels.data).cpu().sum())
if i % 80 == 79:
print('[epoch:%d, iter:%d] Loss: %.03f | Acc: 4/4: %.2f%% 3/4: %.2f%% 2/4: %.2f%% 1/4: %.2f%%'
' Ensemble: %.2f%%' % (epoch, (i + epoch * length), sum_loss / (i + 1),
100 * correct[0] / total, 100 * correct[1] / total,
100 * correct[2] / total, 100 * correct[3] / total,
100 * correct[4] / total))
# wandb.log({'train_acc': 100. * correct[4] / total, 'train_acc1': 100. * correct[0] / total,
# 'train_acc4': 100. * correct[3] / total, 'train_loss': sum_loss})
def test(epoch):
with torch.no_grad():
correct = [0 for _ in range(5)]
predicted = [0 for _ in range(5)]
total = 0.0
for data in testloader:
net.eval()
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = net(images)
ensemble = sum(outputs) / len(outputs)
outputs.append(ensemble)
for classifier_index in range(len(outputs)):
_, predicted[classifier_index] = torch.max(outputs[classifier_index].data, 1)
correct[classifier_index] += float(predicted[classifier_index].eq(labels.data).cpu().sum())
total += float(labels.size(0))
print('Test Set AccuracyAcc: 4/4: %.4f%% 3/4: %.4f%% 2/4: %.4f%% 1/4: %.4f%%'
' Ensemble: %.4f%%' % (100 * correct[0] / total, 100 * correct[1] / total,
100 * correct[2] / total, 100 * correct[3] / total,
100 * correct[4] / total))
# wandb.log({'test_acc': 100. * correct[4] / total, 'test_acc1': 100. * correct[0] / total,
# 'test_acc4': 100. * correct[3] / total})
global best_single, best_acc
if correct[4] / total > best_acc:
best_acc = correct[4] / total
print("Best Accuracy Updated: ", best_acc * 100)
# torch.save(net.state_dict(), "./checkpoints/" + str(args.model) + ".pth")
for i in range(4):
if correct[i] / total > best_single:
best_single = correct[i] / total
print("Best Single Accuracy Updated: ", best_single * 100)
torch.save(net.state_dict(), "./checkpoints/" + str(args.model) + ".pth")
print()
if __name__ == "__main__":
best_acc = 0
best_single = 0
# wandb.init(project="distill")
for epoch in range(config.epoch):
start_t = time.time()
adjust_lr(epoch)
train(epoch)
if epoch < 5:
print('train time:', time.time() - start_t)
test(epoch)
if epoch < 5:
print('train and test time:', time.time() - start_t)
print("Training Finished, TotalEPOCH=%d, Best Accuracy=%.4f, Best Single=%.4f" % (
args.epoch, 100 * best_acc, 100 * best_single))
| 9,770 | 36.872093 | 116 | py |
Tree-Supervised | Tree-Supervised-main/train_image.py | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
# import torchvision.models as models
from models import *
# model_names = sorted(name for name in models.__dict__
# if name.islower() and not name.startswith("__")
# and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
# parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
# choices=model_names,
# help='model architecture: ' +
# ' | '.join(model_names) +
# ' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=150, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
raise NotImplementedError
# model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
# model = models.__dict__[args.arch]()
model = TreeMobileNetV2_image()
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main() | 16,684 | 37.802326 | 91 | py |
Tree-Supervised | Tree-Supervised-main/train.py | import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import argparse
from models.resnet_liu import *
import torch.nn.functional as F
from utils.autoaugment import CIFAR10Policy
from utils.cutout import Cutout
import torch.backends.cudnn as cudnn
cudnn.benchmark=True
GPU_double=2
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
parser = argparse.ArgumentParser(description='Self-Distillation CIFAR Training')
parser.add_argument('--model', default="resnet18", type=str, help="resnet18|resnet34|resnet50|resnet101|resnet152|"
"wideresnet50|wideresnet101|resnext50|resnext101")
parser.add_argument('--dataset', default="cifar100", type=str, help="cifar100|cifar10")
parser.add_argument('--epoch', default=250, type=int, help="training epochs")
parser.add_argument('--loss_coefficient', default=0.3, type=float)
parser.add_argument('--feature_loss_coefficient', default=0.03, type=float)
parser.add_argument('--dataset_path', default="data", type=str)
parser.add_argument('--autoaugment', default=True, type=bool)
# parser.add_argument('--autoaugment', default=False, type=bool)
parser.add_argument('--temperature', default=3.0, type=float)
parser.add_argument('--batchsize', default=128*GPU_double, type=int)
parser.add_argument('--init_lr', default=0.1, type=float)
args = parser.parse_args()
print(args)
def CrossEntropy(outputs, targets):
log_softmax_outputs = F.log_softmax(outputs/args.temperature, dim=1)
softmax_targets = F.softmax(targets/args.temperature, dim=1)
return -(log_softmax_outputs * softmax_targets).sum(dim=1).mean()
if args.autoaugment:
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4, fill=128),
transforms.RandomHorizontalFlip(), CIFAR10Policy(), transforms.ToTensor(),
Cutout(n_holes=1, length=16),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
else:
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4, fill=128),
transforms.RandomHorizontalFlip(), transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if args.dataset == "cifar100":
trainset = torchvision.datasets.CIFAR100(
root=args.dataset_path,
train=True,
download=True,
transform=transform_train
)
testset = torchvision.datasets.CIFAR100(
root=args.dataset_path,
train=False,
download=True,
transform=transform_test
)
elif args.dataset == "cifar10":
trainset = torchvision.datasets.CIFAR10(
root=args.dataset_path,
train=True,
download=True,
transform=transform_train
)
testset = torchvision.datasets.CIFAR10(
root=args.dataset_path,
train=False,
download=True,
transform=transform_test
)
trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=args.batchsize,
shuffle=True,
num_workers=4
)
testloader = torch.utils.data.DataLoader(
testset,
batch_size=args.batchsize,
shuffle=False,
num_workers=4
)
if args.model == "resnet18":
net = ResNet18(100)
if args.model == "resnet34":
net = resnet34()
if args.model == "resnet50":
net = resnet50()
if args.model == "resnet101":
net = resnet101()
if args.model == "resnet152":
net = resnet152()
if args.model == "wideresnet50":
net = wide_resnet50_2()
if args.model == "wideresnet101":
net = wide_resnet101_2()
if args.model == "resnext50_32x4d":
net = resnet18()
if args.model == "resnext101_32x8d":
net = resnext101_32x8d()
net.to(device)
net = torch.nn.DataParallel(net)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.init_lr, weight_decay=5e-4, momentum=0.9)
# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
init = False
if __name__ == "__main__":
best_acc = 0
for epoch in range(args.epoch):
correct = [0 for _ in range(5)]
predicted = [0 for _ in range(5)]
if epoch in [args.epoch // 3, args.epoch * 2 // 3, args.epoch - 10]:
for param_group in optimizer.param_groups:
param_group['lr'] /= 10
net.train()
sum_loss, total = 0.0, 0.0
for i, data in enumerate(trainloader, 0):
length = len(trainloader)
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs, outputs_feature = net(inputs)
# bug?
# ensemble = sum(outputs[:-1])/len(outputs)
ensemble = sum(outputs[:-1])/(len(outputs)-1)
ensemble.detach_()
if init is False:
# init the adaptation layers.
# we add feature adaptation layers here to soften the influence from feature distillation loss
# the feature distillation in our conference version : | f1-f2 | ^ 2
# the feature distillation in the final version : |Fully Connected Layer(f1) - f2 | ^ 2
layer_list = []
teacher_feature_size = outputs_feature[0].size(1)
for index in range(1, len(outputs_feature)):
student_feature_size = outputs_feature[index].size(1)
layer_list.append(nn.Linear(student_feature_size, teacher_feature_size))
net.adaptation_layers = nn.ModuleList(layer_list)
net.adaptation_layers.cuda()
optimizer = optim.SGD(net.parameters(), lr=args.init_lr, weight_decay=5e-4, momentum=0.9)
# define the optimizer here again so it will optimize the net.adaptation_layers
init = True
# compute loss
loss = torch.FloatTensor([0.]).to(device)
# for deepest classifier
loss += criterion(outputs[0], labels)
teacher_output = outputs[0].detach()
teacher_feature = outputs_feature[0].detach()
# for shallow classifiers
for index in range(1, len(outputs)):
# logits distillation
loss += CrossEntropy(outputs[index], teacher_output) * args.loss_coefficient
loss += criterion(outputs[index], labels) * (1 - args.loss_coefficient)
# feature distillation
# if index != 1:
# loss += torch.dist(net.adaptation_layers[index-1](outputs_feature[index]), teacher_feature) * \
# args.feature_loss_coefficient
# the feature distillation loss will not be applied to the shallowest classifier
sum_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
total += float(labels.size(0))
outputs.append(ensemble)
for classifier_index in range(len(outputs)):
_, predicted[classifier_index] = torch.max(outputs[classifier_index].data, 1)
correct[classifier_index] += float(predicted[classifier_index].eq(labels.data).cpu().sum())
if i%40==39:
print('[epoch:%d, iter:%d] Loss: %.03f | Acc: 4/4: %.2f%% 3/4: %.2f%% 2/4: %.2f%% 1/4: %.2f%%'
' Ensemble: %.2f%%' % (epoch + 1, (i + 1 + epoch * length), sum_loss / (i + 1),
100 * correct[0] / total, 100 * correct[1] / total,
100 * correct[2] / total, 100 * correct[3] / total,
100 * correct[4] / total))
print("Waiting Test!")
with torch.no_grad():
correct = [0 for _ in range(5)]
predicted = [0 for _ in range(5)]
total = 0.0
for data in testloader:
net.eval()
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs, outputs_feature = net(images)
ensemble = sum(outputs[:-1]) / (len(outputs)-1)
outputs.append(ensemble)
for classifier_index in range(len(outputs)):
_, predicted[classifier_index] = torch.max(outputs[classifier_index].data, 1)
correct[classifier_index] += float(predicted[classifier_index].eq(labels.data).cpu().sum())
total += float(labels.size(0))
print('Test Set AccuracyAcc: 4/4: %.4f%% 3/4: %.4f%% 2/4: %.4f%% 1/4: %.4f%%'
' Ensemble: %.4f%%' % (100 * correct[0] / total, 100 * correct[1] / total,
100 * correct[2] / total, 100 * correct[3] / total,
100 * correct[4] / total))
if correct[4] / total > best_acc:
best_acc = correct[4]/total
print("Best Accuracy Updated: ", best_acc * 100)
torch.save(net.state_dict(), "./checkpoints/"+str(args.model)+".pth")
# scheduler.step()
# print('lr:', scheduler.get_last_lr())
print()
print("Training Finished, TotalEPOCH=%d, Best Accuracy=%.3f" % (args.epoch, best_acc))
| 9,677 | 42.013333 | 117 | py |
Tree-Supervised | Tree-Supervised-main/train_tree_w_parallel.py | import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import argparse
from models import *
import torch.nn.functional as F
from utils.autoaugment import CIFAR10Policy
from utils.cutout import Cutout
import torch.backends.cudnn as cudnn
import torch
from torch import nn
import os
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
import time
from configs import *
parser = argparse.ArgumentParser(description='Self-Distillation CIFAR Training')
parser.add_argument('--model', default="tree_resnet32", type=str, help="resnet18|tree_resnet32|tree_wide|tree_mobilev3|mobilev3|wide")
parser.add_argument('--dataset', default="cifar100", type=str, help="cifar100|cifar10")
parser.add_argument('--epoch', default=200, type=int, help="training epochs")
parser.add_argument('--loss_coefficient', default=0.3, type=float)
parser.add_argument('--feature_loss_coefficient', default=0.03, type=float)
parser.add_argument('--dataset_path', default="data", type=str)
# parser.add_argument('--autoaugment', default=True, type=bool)
parser.add_argument('--autoaugment', default=False, type=bool)
parser.add_argument('--temperature', default=3.0, type=float)
parser.add_argument('--batchsize', default=128 * 4, type=int)
parser.add_argument('--init_lr', default=0.1, type=float)
args = parser.parse_args()
# print(args)
class DistillKL(nn.Module):
"""Distilling the Knowledge in a Neural Network"""
def __init__(self, T):
super(DistillKL, self).__init__()
self.T = T
def forward(self, y_s, y_t):
p_s = F.log_softmax(y_s / self.T, dim=1)
p_t = F.softmax(y_t / self.T, dim=1)
loss = F.kl_div(p_s, p_t, size_average=False) * (self.T ** 2) / y_s.shape[0]
return loss
# set seed for reproducibility
best_acc = 0
best_single = 0
def setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group("nccl", rank=rank, world_size=world_size, init_method='env://')
def cleanup():
dist.destroy_process_group()
if args.autoaugment:
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4, fill=128),
transforms.RandomHorizontalFlip(), CIFAR10Policy(), transforms.ToTensor(),
Cutout(n_holes=1, length=16),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
else:
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4, fill=128),
transforms.RandomHorizontalFlip(), transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
def train(rank, world_size):
torch.manual_seed(rank+1)
cudnn.benchmark = False
cudnn.deterministic = True
if rank == 0:
print(args)
print(f"Running basic DDP example on rank {rank}.")
setup(rank, world_size)
args.batchsize = int(args.batchsize / world_size)
# ------------------------ load data
if args.dataset == "cifar100":
trainset = torchvision.datasets.CIFAR100(
root=args.dataset_path,
train=True,
download=True,
transform=transform_train
)
testset = torchvision.datasets.CIFAR100(
root=args.dataset_path,
train=False,
download=True,
transform=transform_test
)
num_class = 100
elif args.dataset == "cifar10":
trainset = torchvision.datasets.CIFAR10(
root=args.dataset_path,
train=True,
download=True,
transform=transform_train
)
testset = torchvision.datasets.CIFAR10(
root=args.dataset_path,
train=False,
download=True,
transform=transform_test
)
num_class = 10
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset,
num_replicas=world_size,
rank=rank)
trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=args.batchsize,
shuffle=False,
num_workers=12,
pin_memory=True,
sampler=train_sampler
)
testloader = torch.utils.data.DataLoader(
testset,
batch_size=args.batchsize,
shuffle=False,
num_workers=12,
pin_memory=True,
)
# -------------------------------------
if args.model == 'tree_wide':
net = Wide_TreeResNet(28, 10, 0, num_class)
config = config_wide_resnet
elif args.model =='tree_mobilev3':
net = TreeMobileNetV3_Large(num_class)
config = config_tree_mobilev3
elif args.model == 'tree_resnet20':
net = TreeCifarResNet20_v1(num_class)
config = config_tree_resnet
else:
raise NameError
# create model and move it to GPU with id rank
net = net.to(rank)
net = DDP(net, device_ids=[rank])
criterion = nn.CrossEntropyLoss()
kl_distill = DistillKL(args.temperature)
optimizer = optim.SGD(net.parameters(), lr=args.init_lr, weight_decay=config.weight_decay, momentum=0.9)
optimizer.zero_grad()
for epoch in range(args.epoch):
train_start = time.time()
######################### train
correct = [0 for _ in range(5)]
predicted = [0 for _ in range(5)]
if epoch in config.down_epoch:
for param_group in optimizer.param_groups:
param_group['lr'] /= 10
net.train()
sum_loss, total = 0.0, 0.0
for i, data in enumerate(trainloader, 0):
length = len(trainloader)
inputs, labels = data
inputs, labels = inputs.to(rank), labels.to(rank)
outputs = net(inputs)
ensemble = sum(outputs) / len(outputs)
ensemble.detach_()
# compute loss
loss = torch.FloatTensor([0.]).to(rank)
for output in outputs:
loss += criterion(output, labels) * (1 - args.loss_coefficient)
for other in outputs:
if other is not output:
# logits distillation
loss += kl_distill(output, other) * args.loss_coefficient / (len(outputs) - 1)
sum_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
total += float(labels.size(0))
outputs.append(ensemble)
if rank == 0:
for classifier_index in range(len(outputs)):
_, predicted[classifier_index] = torch.max(outputs[classifier_index].data, 1)
correct[classifier_index] += float(predicted[classifier_index].eq(labels.data).cpu().sum())
if i % 80 == 79:
print('[epoch:%d, iter:%d] Loss: %.03f | Acc: 4/4: %.2f%% 3/4: %.2f%% 2/4: %.2f%% 1/4: %.2f%%'
' Ensemble: %.2f%%' % (epoch, (i + epoch * length), sum_loss / (i + 1),
100 * correct[0] / total, 100 * correct[1] / total,
100 * correct[2] / total, 100 * correct[3] / total,
100 * correct[4] / total))
if rank == 0:
print('train epoch time:',time.time()-train_start)
print({'train_acc': 100. * correct[4] / total, 'train_acc1': 100. * correct[0] / total,
'train_acc4': 100. * correct[3] / total, 'train_loss': sum_loss})
################################# test
if rank == 3:
with torch.no_grad():
correct = [0 for _ in range(5)]
predicted = [0 for _ in range(5)]
total = 0.0
net.eval()
for data in testloader:
images, labels = data
images, labels = images.to(rank), labels.to(rank)
outputs = net(images)
ensemble = sum(outputs) / len(outputs)
outputs.append(ensemble)
for classifier_index in range(len(outputs)):
_, predicted[classifier_index] = torch.max(outputs[classifier_index].data, 1)
correct[classifier_index] += float(predicted[classifier_index].eq(labels.data).cpu().sum())
total += float(labels.size(0))
print('Test Set AccuracyAcc: 4/4: %.4f%% 3/4: %.4f%% 2/4: %.4f%% 1/4: %.4f%%'
' Ensemble: %.4f%%' % (100 * correct[0] / total, 100 * correct[1] / total,
100 * correct[2] / total, 100 * correct[3] / total,
100 * correct[4] / total))
print({'test_acc': 100. * correct[4] / total, 'test_acc1': 100. * correct[0] / total,
'test_acc4': 100. * correct[3] / total})
global best_single, best_acc
if correct[4] / total > best_acc:
best_acc = correct[4] / total
print("Best Accuracy Updated: ", best_acc * 100)
# torch.save(net.state_dict(), "./checkpoints/" + str(args.model) + ".pth")
for i in range(4):
if correct[i] / total > best_single:
best_single = correct[i] / total
print("Best Single Accuracy Updated: ", best_single * 100)
# torch.save(net.state_dict(), "./checkpoints/" + str(args.model) + ".pth")
print('train and test time:', time.time() - train_start)
print()
if rank == 0:
print("Training Finished, TotalEPOCH=%d, Best Accuracy=%.4f, Best Single=%.4f" % (
args.epoch, 100 * best_acc, 100 * best_single))
cleanup()
def run_demo(demo_fn, world_size):
mp.spawn(demo_fn,
args=(world_size,),
nprocs=world_size,
join=True)
if __name__ == "__main__":
run_demo(train, 4)
| 10,690 | 38.305147 | 134 | py |
Tree-Supervised | Tree-Supervised-main/train_bi_mutual.py | import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import argparse
from models.resnet_liu import *
import torch.nn.functional as F
from utils.autoaugment import CIFAR10Policy
from utils.cutout import Cutout
import torch.backends.cudnn as cudnn
import wandb
cudnn.benchmark = True
# set seed for reproducibility
torch.manual_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
parser = argparse.ArgumentParser(description='Self-Distillation CIFAR Training')
parser.add_argument('--model', default="resnet18", type=str, help="resnet18|resnet34|resnet50|resnet101|resnet152|"
"wideresnet50|wideresnet101|resnext50|resnext101")
parser.add_argument('--dataset', default="cifar100", type=str, help="cifar100|cifar10")
# default 250 epoch
parser.add_argument('--epoch', default=350, type=int, help="training epochs")
parser.add_argument('--loss_coefficient', default=0.3, type=float)
parser.add_argument('--feature_loss_coefficient', default=0.03, type=float)
parser.add_argument('--dataset_path', default="data", type=str)
parser.add_argument('--autoaugment', default=True, type=bool)
# parser.add_argument('--autoaugment', default=False, type=bool)
parser.add_argument('--temperature', default=3.0, type=float)
parser.add_argument('--batchsize', default=128 * 2, type=int)
parser.add_argument('--init_lr', default=0.1, type=float)
args = parser.parse_args()
print(args)
class DistillKL(nn.Module):
"""Distilling the Knowledge in a Neural Network"""
def __init__(self, T):
super(DistillKL, self).__init__()
self.T = T
def forward(self, y_s, y_t):
p_s = F.log_softmax(y_s / self.T, dim=1)
p_t = F.softmax(y_t / self.T, dim=1)
loss = F.kl_div(p_s, p_t, size_average=False) * (self.T ** 2) / y_s.shape[0]
return loss
if args.autoaugment:
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4, fill=128),
transforms.RandomHorizontalFlip(), CIFAR10Policy(), transforms.ToTensor(),
Cutout(n_holes=1, length=16),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
else:
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4, fill=128),
transforms.RandomHorizontalFlip(), transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if args.dataset == "cifar100":
trainset = torchvision.datasets.CIFAR100(
root=args.dataset_path,
train=True,
download=True,
transform=transform_train
)
testset = torchvision.datasets.CIFAR100(
root=args.dataset_path,
train=False,
download=True,
transform=transform_test
)
num_class = 100
elif args.dataset == "cifar10":
trainset = torchvision.datasets.CIFAR10(
root=args.dataset_path,
train=True,
download=True,
transform=transform_train
)
testset = torchvision.datasets.CIFAR10(
root=args.dataset_path,
train=False,
download=True,
transform=transform_test
)
num_class = 10
trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=args.batchsize,
shuffle=True,
num_workers=4
)
testloader = torch.utils.data.DataLoader(
testset,
batch_size=args.batchsize,
shuffle=False,
num_workers=4
)
if args.model == "resnet18":
net = BiResNet18(num_class)
if args.model == "resnet34":
net = resnet34()
if args.model == "resnet50":
net = resnet50()
if args.model == "resnet101":
net = resnet101()
if args.model == "resnet152":
net = resnet152()
if args.model == "wideresnet50":
net = wide_resnet50_2()
if args.model == "wideresnet101":
net = wide_resnet101_2()
if args.model == "resnext50_32x4d":
net = resnet18()
if args.model == "resnext101_32x8d":
net = resnext101_32x8d()
net.to(device)
net = torch.nn.DataParallel(net)
criterion = nn.CrossEntropyLoss()
kl_distill = DistillKL(args.temperature)
optimizer = optim.SGD(net.parameters(), lr=args.init_lr, weight_decay=5e-4, momentum=0.9)
# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
def train(epoch):
correct = [0 for _ in range(5)]
predicted = [0 for _ in range(5)]
if epoch in [90, 160, 210,250]:
for param_group in optimizer.param_groups:
param_group['lr'] /= 10
net.train()
sum_loss, total = 0.0, 0.0
for i, data in enumerate(trainloader, 0):
length = len(trainloader)
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs, outputs_feature = net(inputs)
ensemble = sum(outputs) / len(outputs)
ensemble.detach_()
# compute loss
loss = torch.FloatTensor([0.]).to(device)
# teacher: -temp: swap; -temp: out4; -further: random; -further: mutual
# further er : distill by ensemble
# for out4 classifier
for output in outputs:
loss += criterion(output, labels) * (1 - args.loss_coefficient)
for other in outputs:
if other is not output:
# logits distillation
loss += kl_distill(output, other) * args.loss_coefficient/(len(outputs)-1)
sum_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
total += float(labels.size(0))
outputs.append(ensemble)
for classifier_index in range(len(outputs)):
_, predicted[classifier_index] = torch.max(outputs[classifier_index].data, 1)
correct[classifier_index] += float(predicted[classifier_index].eq(labels.data).cpu().sum())
if i % 80 == 79:
print('[epoch:%d, iter:%d] Loss: %.03f | Acc: 4/4: %.2f%% 3/4: %.2f%% 2/4: %.2f%% 1/4: %.2f%%'
' Ensemble: %.2f%%' % (epoch, (i + epoch * length), sum_loss / (i + 1),
100 * correct[0] / total, 100 * correct[1] / total,
100 * correct[2] / total, 100 * correct[3] / total,
100 * correct[4] / total))
wandb.log({'train_acc': 100. * correct[4] / total, 'train_acc1': 100. * correct[0] / total,
'train_acc4': 100. * correct[3] / total, 'train_loss': sum_loss})
def test(epoch):
with torch.no_grad():
correct = [0 for _ in range(5)]
predicted = [0 for _ in range(5)]
total = 0.0
for data in testloader:
net.eval()
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs, outputs_feature = net(images)
ensemble = sum(outputs) / len(outputs)
outputs.append(ensemble)
for classifier_index in range(len(outputs)):
_, predicted[classifier_index] = torch.max(outputs[classifier_index].data, 1)
correct[classifier_index] += float(predicted[classifier_index].eq(labels.data).cpu().sum())
total += float(labels.size(0))
print('Test Set AccuracyAcc: 4/4: %.4f%% 3/4: %.4f%% 2/4: %.4f%% 1/4: %.4f%%'
' Ensemble: %.4f%%' % (100 * correct[0] / total, 100 * correct[1] / total,
100 * correct[2] / total, 100 * correct[3] / total,
100 * correct[4] / total))
wandb.log({'test_acc': 100. * correct[4] / total, 'test_acc1': 100. * correct[0] / total,
'test_acc4': 100. * correct[3] / total})
global best_single, best_acc
if correct[4] / total > best_acc:
best_acc = correct[4] / total
print("Best Accuracy Updated: ", best_acc * 100)
torch.save(net.state_dict(), "./checkpoints/" + str(args.model) + ".pth")
for i in range(4):
if correct[i] / total > best_single:
best_single = correct[i] / total
print("Best Single Accuracy Updated: ", best_single * 100)
torch.save(net.state_dict(), "./checkpoints/" + str(args.model) + ".pth")
# scheduler.step()
# print('lr:', scheduler.get_last_lr())
print()
if __name__ == "__main__":
best_acc = 0
best_single = 0
wandb.init(project="distill")
for epoch in range(args.epoch):
train(epoch)
test(epoch)
print("Training Finished, TotalEPOCH=%d, Best Accuracy=%.4f, Best Single=%.4f" % (
args.epoch, 100 * best_acc, 100 * best_single))
| 9,038 | 36.978992 | 116 | py |
Tree-Supervised | Tree-Supervised-main/models/resnet_liu.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion *
planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class TreeCifarResNet_v1(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, image_channels=3, batchnorm=True):
"""layer 1 as root version"""
super(TreeCifarResNet_v1, self).__init__()
if batchnorm:
self.conv1 = nn.Conv2d(image_channels, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
else:
self.conv1 = nn.Conv2d(image_channels, 16, kernel_size=3, stride=1, padding=1, bias=True)
self.bn1 = nn.Sequential()
self.layer1 = nn.ModuleList([self._make_blocks(block, 16, 16, num_blocks[0], stride=1)])
self.layer2 = nn.ModuleList(
[self._make_blocks(block, 16 * block.expansion, 32, num_blocks[1], stride=2) for _ in range(2)])
self.layer3 = nn.ModuleList(
[self._make_blocks(block, 32 * block.expansion, 64, num_blocks[2], stride=2) for _ in range(4)])
self.linears = nn.ModuleList([nn.Linear(64 * block.expansion, num_classes) for _ in range(4)])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_blocks(self, block, in_planes, out_planes, num_blocks, stride):
layers = []
layers.append(block(in_planes, out_planes, stride))
for i in range(num_blocks - 1):
layers.append(block(out_planes * block.expansion, out_planes, 1))
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1[0](out)
out1 = self.layer2[0](out)
out3 = self.layer2[1](out)
out2 = self.layer3[1](out1)
out1 = self.layer3[0](out1)
out4 = self.layer3[3](out3)
out3 = self.layer3[2](out3)
# out = self.layer3(out)
res = [out1, out2, out3, out4]
for i in range(len(res)):
res[i] = F.avg_pool2d(res[i], 8)
res[i] = res[i].view(res[i].size(0), -1)
res[i] = self.linears[i](res[i])
return res
class TreeResNet18_(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, image_channels=3, batchnorm=True):
"""layer 1 as root version"""
super(TreeResNet18_, self).__init__()
if batchnorm:
self.conv1 = nn.Conv2d(image_channels, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
else:
self.conv1 = nn.Conv2d(image_channels, 64, kernel_size=3, stride=1, padding=1, bias=True)
self.bn1 = nn.Sequential()
self.layer1 = nn.ModuleList(
[nn.Sequential(block(64, 64, 1))])
self.layer2 = nn.ModuleList(
[nn.Sequential(block(64, 64, 1),
block(64, 128, 2),
block(128, 128, 1)) for _ in range(2)])
self.layer3 = nn.ModuleList(
[nn.Sequential(block(128, 256, 2),
block(256, 256, 1),
block(256, 512, 2),
block(512, 512, 1)) for _ in range(4)])
self.linears = nn.ModuleList([nn.Linear(512, num_classes) for _ in range(4)])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_blocks(self, block, in_planes, out_planes, num_blocks, stride):
layers = []
layers.append(block(in_planes, out_planes, stride))
for i in range(num_blocks - 1):
layers.append(block(out_planes * block.expansion, out_planes, 1))
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1[0](out)
out1 = self.layer2[0](out)
out3 = self.layer2[1](out)
out2 = self.layer3[1](out1)
out1 = self.layer3[0](out1)
out4 = self.layer3[3](out3)
out3 = self.layer3[2](out3)
# out = self.layer3(out)
res = [out1, out2, out3, out4]
for i in range(len(res)):
res[i] = F.avg_pool2d(res[i], 4)
res[i] = res[i].view(res[i].size(0), -1)
res[i] = self.linears[i](res[i])
return res
class TreeResNet50_(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, image_channels=3, batchnorm=True):
"""layer 1 as root version"""
super(TreeResNet50_, self).__init__()
if batchnorm:
self.conv1 = nn.Conv2d(image_channels, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
else:
self.conv1 = nn.Conv2d(image_channels, 64, kernel_size=3, stride=1, padding=1, bias=True)
self.bn1 = nn.Sequential()
self.layer1 = nn.ModuleList(
[nn.Sequential(block(64, 64, 1), block(64*block.expansion, 64, 1), block(64*block.expansion, 64, 1))])
self.layer2 = nn.ModuleList(
[nn.Sequential(block(64*block.expansion, 128, 2), block(128*block.expansion, 128, 1), block(128*block.expansion, 128, 1),
block(128*block.expansion, 128, 1), block(128*block.expansion, 256, 2),
block(256*block.expansion, 256, 1)) for _ in range(2)])
self.layer3 = nn.ModuleList(
[nn.Sequential(
block(256*block.expansion, 256, 1), block(256*block.expansion, 256, 1), block(256*block.expansion, 256, 1), block(256*block.expansion, 256, 1),
block(256*block.expansion, 512, 2),
block(512*block.expansion, 512, 1), block(512*block.expansion, 512, 1)) for _ in range(4)])
self.linears = nn.ModuleList([nn.Linear(512*block.expansion, num_classes) for _ in range(4)])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_blocks(self, block, in_planes, out_planes, num_blocks, stride):
layers = []
layers.append(block(in_planes, out_planes, stride))
for i in range(num_blocks - 1):
layers.append(block(out_planes * block.expansion, out_planes, 1))
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1[0](out)
out1 = self.layer2[0](out)
out3 = self.layer2[1](out)
out2 = self.layer3[1](out1)
out1 = self.layer3[0](out1)
out4 = self.layer3[3](out3)
out3 = self.layer3[2](out3)
# out = self.layer3(out)
res = [out1, out2, out3, out4]
for i in range(len(res)):
res[i] = F.avg_pool2d(res[i], 4)
res[i] = res[i].view(res[i].size(0), -1)
res[i] = self.linears[i](res[i])
return res
class CifarResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, image_channels=3, batchnorm=True):
super(CifarResNet, self).__init__()
self.in_planes = 16
if batchnorm:
self.conv1 = nn.Conv2d(image_channels, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
else:
self.conv1 = nn.Conv2d(image_channels, 16, kernel_size=3, stride=1, padding=1, bias=True)
self.bn1 = nn.Sequential()
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def CifarResNet20(num_classes):
return CifarResNet(BasicBlock, [3, 3, 3], num_classes)
def CifarResNet32(num_classes):
return CifarResNet(BasicBlock, [5, 5, 5], num_classes)
def CifarResNet44(num_classes):
return CifarResNet(BasicBlock, [7, 7, 7], num_classes)
def CifarResNet56(num_classes):
return CifarResNet(BasicBlock, [9, 9, 9], num_classes)
def CifarResNet110(num_classes):
return CifarResNet(BasicBlock, [18, 18, 18], num_classes)
def TreeCifarResNet32_v1(num_classes):
return TreeCifarResNet_v1(BasicBlock, [5, 5, 5], num_classes)
def TreeCifarResNet20_v1(num_classes):
return TreeCifarResNet_v1(BasicBlock, [3, 3, 3], num_classes)
def TreeCifarResNet44_v1(num_classes):
return TreeCifarResNet_v1(BasicBlock, [7, 7, 7], num_classes)
def TreeCifarResNet56_v1(num_classes):
return TreeCifarResNet_v1(Bottleneck, [9, 9, 9], num_classes)
def TreeCifarResNet110_v1(num_classes):
return TreeCifarResNet_v1(Bottleneck, [18, 18, 18], num_classes)
def TreeResNet18(num_classes):
return TreeResNet18_(BasicBlock, [2, 2, 2, 2], num_classes)
def TreeResNet50(num_classes):
return TreeResNet50_(Bottleneck, [3, 4, 6, 3], num_classes)
def ResNet18(num_classes):
return ResNet(BasicBlock, [2, 2, 2, 2], num_classes)
def ResNet34(num_classes):
return ResNet(BasicBlock, [3, 4, 6, 3], num_classes)
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
def test():
net = TreeResNet50(100)
y = net(torch.randn(1, 3, 32, 32))
print(sum(p.numel() for p in net.parameters()))
print(y[0].size())
test()
| 14,660 | 37.379581 | 159 | py |
Tree-Supervised | Tree-Supervised-main/models/resnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
def ScalaNet(channel_in, channel_out, size):
return nn.Sequential(
nn.Conv2d(channel_in, 128, kernel_size=1, stride=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=size, stride=size),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, channel_out, kernel_size=1, stride=1),
nn.BatchNorm2d(channel_out),
nn.ReLU(),
nn.AvgPool2d(4, 4)
)
class SepConv(nn.Module):
def __init__(self, channel_in, channel_out, kernel_size=3, stride=2, padding=1, affine=True):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.Conv2d(channel_in, channel_in, kernel_size=kernel_size, stride=stride, padding=padding,
groups=channel_in, bias=False),
nn.Conv2d(channel_in, channel_in, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(channel_in, affine=affine),
nn.ReLU(inplace=False),
nn.Conv2d(channel_in, channel_in, kernel_size=kernel_size, stride=1, padding=padding, groups=channel_in,
bias=False),
nn.Conv2d(channel_in, channel_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(channel_out, affine=affine),
nn.ReLU(inplace=False),
)
def forward(self, x):
return self.op(x)
def dowmsampleBottleneck(channel_in, channel_out, stride=2):
return nn.Sequential(
nn.Conv2d(channel_in, 128, kernel_size=1, stride=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=stride, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, channel_out, kernel_size=1, stride=1),
nn.BatchNorm2d(channel_out),
nn.ReLU(),
)
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=100, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.scala1 = nn.Sequential(
SepConv(
channel_in=64 * block.expansion,
channel_out=128 * block.expansion
),
SepConv(
channel_in=128 * block.expansion,
channel_out=256 * block.expansion
),
SepConv(
channel_in=256 * block.expansion,
channel_out=512 * block.expansion
),
nn.AvgPool2d(4, 4)
)
self.scala2 = nn.Sequential(
SepConv(
channel_in=128 * block.expansion,
channel_out=256 * block.expansion,
),
SepConv(
channel_in=256 * block.expansion,
channel_out=512 * block.expansion,
),
nn.AvgPool2d(4, 4)
)
self.scala3 = nn.Sequential(
SepConv(
channel_in=256 * block.expansion,
channel_out=512 * block.expansion,
),
nn.AvgPool2d(4, 4)
)
self.scala4 = nn.AvgPool2d(4, 4)
self.attention1 = nn.Sequential(
SepConv(
channel_in=64 * block.expansion,
channel_out=64 * block.expansion
),
nn.BatchNorm2d(64 * block.expansion),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Sigmoid()
)
self.attention2 = nn.Sequential(
SepConv(
channel_in=128 * block.expansion,
channel_out=128 * block.expansion
),
nn.BatchNorm2d(128 * block.expansion),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Sigmoid()
)
self.attention3 = nn.Sequential(
SepConv(
channel_in=256 * block.expansion,
channel_out=256 * block.expansion
),
nn.BatchNorm2d(256 * block.expansion),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Sigmoid()
)
self.fc1 = nn.Linear(512 * block.expansion, num_classes)
self.fc2 = nn.Linear(512 * block.expansion, num_classes)
self.fc3 = nn.Linear(512 * block.expansion, num_classes)
self.fc4 = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
feature_list = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
fea1 = self.attention1(x)
fea1 = fea1 * x
feature_list.append(fea1)
x = self.layer2(x)
fea2 = self.attention2(x)
fea2 = fea2 * x
feature_list.append(fea2)
x = self.layer3(x)
fea3 = self.attention3(x)
fea3 = fea3 * x
feature_list.append(fea3)
x = self.layer4(x)
feature_list.append(x)
out1_feature = self.scala1(feature_list[0]).view(x.size(0), -1)
out2_feature = self.scala2(feature_list[1]).view(x.size(0), -1)
out3_feature = self.scala3(feature_list[2]).view(x.size(0), -1)
out4_feature = self.scala4(feature_list[3]).view(x.size(0), -1)
out1 = self.fc1(out1_feature)
out2 = self.fc2(out2_feature)
out3 = self.fc3(out3_feature)
out4 = self.fc4(out4_feature)
return [out4, out3, out2, out1], [out4_feature, out3_feature, out2_feature, out1_feature]
class BiResNet(nn.Module):
def __init__(self, block, layers, num_classes=100, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(BiResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.scala1 = nn.Sequential(
SepConv(
channel_in=64 * block.expansion,
channel_out=128 * block.expansion
),
SepConv(
channel_in=128 * block.expansion,
channel_out=256 * block.expansion
),
SepConv(
channel_in=256 * block.expansion,
channel_out=512 * block.expansion
),
nn.AvgPool2d(4, 4)
)
self.scala2 = nn.Sequential(
SepConv(
channel_in=128 * block.expansion,
channel_out=256 * block.expansion,
),
SepConv(
channel_in=256 * block.expansion,
channel_out=512 * block.expansion,
),
nn.AvgPool2d(4, 4)
)
self.scala3 = nn.Sequential(
SepConv(
channel_in=256 * block.expansion,
channel_out=512 * block.expansion,
),
nn.AvgPool2d(4, 4)
)
self.scala4 = nn.AvgPool2d(4, 4)
self.attention1 = nn.Sequential(
SepConv(
channel_in=64 * block.expansion,
channel_out=64 * block.expansion
),
nn.BatchNorm2d(64 * block.expansion),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Sigmoid()
)
self.attention2 = nn.Sequential(
SepConv(
channel_in=128 * block.expansion,
channel_out=128 * block.expansion
),
nn.BatchNorm2d(128 * block.expansion),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Sigmoid()
)
self.attention3 = nn.Sequential(
SepConv(
channel_in=256 * block.expansion,
channel_out=256 * block.expansion
),
nn.BatchNorm2d(256 * block.expansion),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Sigmoid()
)
self.fc1 = nn.Linear(512 * block.expansion, num_classes)
self.fc2 = nn.Linear(512 * block.expansion, num_classes)
self.fc3 = nn.Linear(512 * block.expansion, num_classes)
self.fc4 = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
feature_list = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
fea1 = self.attention1(x)
fea1 = fea1 * x
feature_list.append(fea1)
x = self.layer2(x)
fea2 = self.attention2(x)
fea2 = fea2 * x
feature_list.append(fea2)
x = self.layer3(x)
fea3 = self.attention3(x)
fea3 = fea3 * x
feature_list.append(fea3)
x = self.layer4(x)
feature_list.append(x)
out1_feature = self.scala1(feature_list[0]).view(x.size(0), -1)
out2_feature = self.scala2(feature_list[1]).view(x.size(0), -1)
out3_feature = self.scala3(feature_list[2]).view(x.size(0), -1)
out4_feature = self.scala4(feature_list[3]).view(x.size(0), -1)
out1 = self.fc1(out1_feature)
out2 = self.fc2(out2_feature)
out3 = self.fc3(out3_feature)
out4 = self.fc4(out4_feature)
return [out4, out3, out2, out1], [out4_feature, out3_feature, out2_feature, out1_feature]
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| 24,617 | 36.47032 | 116 | py |
Tree-Supervised | Tree-Supervised-main/models/mobilenetv2.py | '''MobileNetV2 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, in_planes, out_planes, expansion, stride):
super(Block, self).__init__()
self.stride = stride
planes = expansion * in_planes
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out = out + self.shortcut(x) if self.stride == 1 else out
return out
class MobileNetV2(nn.Module):
# (expansion, out_planes, num_blocks, stride)
cfg = [(1, 16, 1, 1),
(6, 24, 2, 2), # NOTE: change stride 2 -> 1 for CIFAR10
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1),
(6, 160, 3, 2),
(6, 320, 1, 1)]
def __init__(self, num_classes=10):
super(MobileNetV2, self).__init__()
# NOTE: change conv1 stride 2 -> 1 for CIFAR10
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(1280)
self.linear = nn.Linear(1280, num_classes)
def _make_layers(self, in_planes):
layers = []
for expansion, out_planes, num_blocks, stride in self.cfg:
strides = [stride] + [1] * (num_blocks - 1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.relu(self.bn2(self.conv2(out)))
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
out = F.avg_pool2d(out, 7)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class TreeMobileNetV2(nn.Module):
# (expansion, out_planes, num_blocks, stride)
cfg = [
[(1, 16, 1, 1),
(6, 24, 2, 1)], # NOTE: change stride 2 -> 1 for CIFAR10
[(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1)],
[(6, 160, 3, 2),
(6, 320, 1, 1)]]
def __init__(self, num_classes=10):
super(TreeMobileNetV2, self).__init__()
# NOTE: change conv1 stride 2 -> 1 for CIFAR10
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layer1 = nn.ModuleList([self._make_layers(32, self.cfg[0])])
self.layer2 = nn.ModuleList([self._make_layers(24, self.cfg[1]) for _ in range(2)])
self.layer3 = nn.ModuleList([self._make_layers(96, self.cfg[2]) for _ in range(4)])
self.conv2s = nn.ModuleList(
[nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False) for _ in range(4)])
self.bn2s = nn.ModuleList([nn.BatchNorm2d(1280) for _ in range(4)])
self.linears = nn.ModuleList([nn.Linear(1280, num_classes) for _ in range(4)])
def _make_layers(self, in_planes, cfg):
layers = []
for expansion, out_planes, num_blocks, stride in cfg:
strides = [stride] + [1] * (num_blocks - 1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1[0](out)
out1 = self.layer2[0](out)
out3 = self.layer2[1](out)
out2 = self.layer3[1](out1)
out1 = self.layer3[0](out1)
out4 = self.layer3[3](out3)
out3 = self.layer3[2](out3)
res = [out1, out2, out3, out4]
for i in range(4):
res[i] = F.relu(self.bn2s[i](self.conv2s[i](res[i])))
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
res[i] = F.avg_pool2d(res[i], 4)
res[i] = res[i].view(res[i].size(0), -1)
res[i] = self.linears[i](res[i])
return res
class TreeMobileNetV2_image(nn.Module):
# (expansion, out_planes, num_blocks, stride)
cfg = [
[(1, 16, 1, 1),
(6, 24, 2, 2)], # NOTE: change stride 2 -> 1 for CIFAR10
[(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1)],
[(6, 160, 3, 2),
(6, 320, 1, 1)]]
def __init__(self, num_classes=1000):
super(TreeMobileNetV2_image, self).__init__()
# NOTE: change conv1 stride 2 -> 1 for CIFAR10
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layer1 = nn.ModuleList([self._make_layers(32, self.cfg[0])])
self.layer2 = nn.ModuleList([self._make_layers(24, self.cfg[1]) for _ in range(2)])
self.layer3 = nn.ModuleList([self._make_layers(96, self.cfg[2]) for _ in range(4)])
self.conv2s = nn.ModuleList(
[nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False) for _ in range(4)])
self.bn2s = nn.ModuleList([nn.BatchNorm2d(1280) for _ in range(4)])
self.linears = nn.ModuleList([nn.Linear(1280, num_classes) for _ in range(4)])
def _make_layers(self, in_planes, cfg):
layers = []
for expansion, out_planes, num_blocks, stride in cfg:
strides = [stride] + [1] * (num_blocks - 1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1[0](out)
out1 = self.layer2[0](out)
out3 = self.layer2[1](out)
out2 = self.layer3[1](out1)
out1 = self.layer3[0](out1)
out4 = self.layer3[3](out3)
out3 = self.layer3[2](out3)
res = [out1, out2, out3, out4]
for i in range(4):
res[i] = F.relu(self.bn2s[i](self.conv2s[i](res[i])))
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
res[i] = F.avg_pool2d(res[i], 7)
res[i] = res[i].view(res[i].size(0), -1)
res[i] = self.linears[i](res[i])
return res
def test():
net = TreeMobileNetV2_image()
x = torch.randn(2, 3, 224, 224)
y = net(x)
print(sum(p.numel() for p in net.parameters()))
print(net)
print(y[0].size())
# test()
| 7,588 | 37.522843 | 114 | py |
Tree-Supervised | Tree-Supervised-main/models/vgg.py | '''VGG11/13/16/19 in Pytorch.'''
import torch
import torch.nn as nn
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name,num_class):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, num_class)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
class TreeVGG(nn.Module):
def __init__(self, vgg_name,num_class):
super(TreeVGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, num_class)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
def test():
net = VGG('VGG11',100)
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.size())
# test() | 2,351 | 30.783784 | 117 | py |
Tree-Supervised | Tree-Supervised-main/models/mobilenetv3.py | '''MobileNetV3 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class hswish(nn.Module):
def forward(self, x):
out = x * F.relu6(x + 3, inplace=True) / 6
return out
class hsigmoid(nn.Module):
def forward(self, x):
out = F.relu6(x + 3, inplace=True) / 6
return out
class SeModule(nn.Module):
def __init__(self, in_size, reduction=4):
super(SeModule, self).__init__()
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_size, in_size // reduction, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(in_size // reduction),
nn.ReLU(inplace=True),
nn.Conv2d(in_size // reduction, in_size, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(in_size),
hsigmoid()
)
def forward(self, x):
return x * self.se(x)
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, kernel_size, in_size, expand_size, out_size, nolinear, semodule, stride):
super(Block, self).__init__()
self.stride = stride
self.se = semodule
self.conv1 = nn.Conv2d(in_size, expand_size, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(expand_size)
self.nolinear1 = nolinear
self.conv2 = nn.Conv2d(expand_size, expand_size, kernel_size=kernel_size, stride=stride,
padding=kernel_size // 2, groups=expand_size, bias=False)
self.bn2 = nn.BatchNorm2d(expand_size)
self.nolinear2 = nolinear
self.conv3 = nn.Conv2d(expand_size, out_size, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_size)
self.shortcut = nn.Sequential()
if stride == 1 and in_size != out_size:
self.shortcut = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_size),
)
def forward(self, x):
out = self.nolinear1(self.bn1(self.conv1(x)))
out = self.nolinear2(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
if self.se != None:
out = self.se(out)
out = out + self.shortcut(x) if self.stride == 1 else out
return out
class MobileNetV3_Large(nn.Module):
def __init__(self, num_classes=1000):
super(MobileNetV3_Large, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.hs1 = hswish()
self.bneck = nn.Sequential(
Block(3, 16, 16, 16, nn.ReLU(inplace=True), None, 1),
Block(3, 16, 64, 24, nn.ReLU(inplace=True), None, 1),
Block(3, 24, 72, 24, nn.ReLU(inplace=True), None, 1),
Block(5, 24, 72, 40, nn.ReLU(inplace=True), SeModule(40), 2),
Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1),
Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1),
Block(3, 40, 240, 80, hswish(), None, 2),
Block(3, 80, 200, 80, hswish(), None, 1),
Block(3, 80, 184, 80, hswish(), None, 1),
Block(3, 80, 184, 80, hswish(), None, 1),
Block(3, 80, 480, 112, hswish(), SeModule(112), 1),
Block(3, 112, 672, 112, hswish(), SeModule(112), 1),
Block(5, 112, 672, 160, hswish(), SeModule(160), 1),
Block(5, 160, 672, 160, hswish(), SeModule(160), 2),
Block(5, 160, 960, 160, hswish(), SeModule(160), 1),
)
self.conv2 = nn.Conv2d(160, 960, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(960)
self.hs2 = hswish()
self.linear3 = nn.Linear(960, 1280)
self.bn3 = nn.BatchNorm1d(1280)
self.hs3 = hswish()
self.linear4 = nn.Linear(1280, num_classes)
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
out = self.hs1(self.bn1(self.conv1(x)))
out = self.bneck(out)
out = self.hs2(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.hs3(self.bn3(self.linear3(out)))
out = self.linear4(out)
return out
class TreeMobileNetV3_Large(nn.Module):
def __init__(self, num_classes=100):
super(TreeMobileNetV3_Large, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.hs1 = hswish()
self.layer1 = nn.ModuleList([nn.Sequential(Block(3, 16, 16, 16, nn.ReLU(inplace=True), None, 1),
Block(3, 16, 64, 24, nn.ReLU(inplace=True), None, 1),
Block(3, 24, 72, 24, nn.ReLU(inplace=True), None, 1),
Block(5, 24, 72, 40, nn.ReLU(inplace=True), SeModule(40), 2),
Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1) )])
self.layer2 = nn.ModuleList([nn.Sequential(Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1),
Block(3, 40, 240, 80, hswish(), None, 2),
Block(3, 80, 200, 80, hswish(), None, 1),
Block(3, 80, 184, 80, hswish(), None, 1),
Block(3, 80, 184, 80, hswish(), None, 1), ) for _ in range(2)])
self.layer3 = nn.ModuleList([nn.Sequential(Block(3, 80, 480, 112, hswish(), SeModule(112), 1),
Block(3, 112, 672, 112, hswish(), SeModule(112), 1),
Block(5, 112, 672, 160, hswish(), SeModule(160), 1),
Block(5, 160, 672, 160, hswish(), SeModule(160), 2),
Block(5, 160, 960, 160, hswish(), SeModule(160), 1) ) for _ in range(4)])
self.conv2s = nn.ModuleList([nn.Conv2d(160, 960, kernel_size=1, stride=1, padding=0, bias=False) for _ in range(4)])
self.bn2s = nn.ModuleList([nn.BatchNorm2d(960) for _ in range(4)])
self.hs2s = nn.ModuleList([hswish() for _ in range(4)])
self.linear3s = nn.ModuleList([nn.Linear(960, 1280) for _ in range(4)])
self.bn3s = nn.ModuleList([nn.BatchNorm1d(1280) for _ in range(4)])
self.hs3s = nn.ModuleList([hswish() for _ in range(4)])
self.linear4s = nn.ModuleList([nn.Linear(1280, num_classes) for _ in range(4)])
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
out = self.hs1(self.bn1(self.conv1(x)))
out = self.layer1[0](out)
out1 = self.layer2[0](out)
out3 = self.layer2[1](out)
out2 = self.layer3[1](out1)
out1 = self.layer3[0](out1)
out4 = self.layer3[3](out3)
out3 = self.layer3[2](out3)
res = [out1, out2, out3, out4]
for i in range(4):
res[i] = self.hs2s[i](self.bn2s[i](self.conv2s[i](res[i])))
res[i] = F.avg_pool2d(res[i], 4)
res[i] = res[i].view(res[i].size(0), -1)
res[i] = self.hs3s[i](self.bn3s[i](self.linear3s[i](res[i])))
res[i] = self.linear4s[i](res[i])
return res
class TreeMobileNetV3_Large_Image(nn.Module):
def __init__(self, num_classes=1000):
super(TreeMobileNetV3_Large_Image, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.hs1 = hswish()
self.layer1 = nn.ModuleList([nn.Sequential(Block(3, 16, 16, 16, nn.ReLU(inplace=True), None, 1),
Block(3, 16, 64, 24, nn.ReLU(inplace=True), None, 2),
Block(3, 24, 72, 24, nn.ReLU(inplace=True), None, 1),
Block(5, 24, 72, 40, nn.ReLU(inplace=True), SeModule(40), 2),
Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1))])
self.layer2 = nn.ModuleList([nn.Sequential(Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1),
Block(3, 40, 240, 80, hswish(), None, 2),
Block(3, 80, 200, 80, hswish(), None, 1),
Block(3, 80, 184, 80, hswish(), None, 1),
Block(3, 80, 184, 80, hswish(), None, 1), ) for _ in range(2)])
self.layer3 = nn.ModuleList([nn.Sequential(Block(3, 80, 480, 112, hswish(), SeModule(112), 1),
Block(3, 112, 672, 112, hswish(), SeModule(112), 1),
Block(5, 112, 672, 160, hswish(), SeModule(160), 1),
Block(5, 160, 672, 160, hswish(), SeModule(160), 2),
Block(5, 160, 960, 160, hswish(), SeModule(160), 1)) for _ in
range(4)])
self.conv2s = nn.ModuleList(
[nn.Conv2d(160, 960, kernel_size=1, stride=1, padding=0, bias=False) for _ in range(4)])
self.bn2s = nn.ModuleList([nn.BatchNorm2d(960) for _ in range(4)])
self.hs2s = nn.ModuleList([hswish() for _ in range(4)])
self.linear3s = nn.ModuleList([nn.Linear(960, 1280) for _ in range(4)])
self.bn3s = nn.ModuleList([nn.BatchNorm1d(1280) for _ in range(4)])
self.hs3s = nn.ModuleList([hswish() for _ in range(4)])
self.linear4s = nn.ModuleList([nn.Linear(1280, num_classes) for _ in range(4)])
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
out = self.hs1(self.bn1(self.conv1(x)))
out = self.layer1[0](out)
out1 = self.layer2[0](out)
out3 = self.layer2[1](out)
out2 = self.layer3[1](out1)
out1 = self.layer3[0](out1)
out4 = self.layer3[3](out3)
out3 = self.layer3[2](out3)
res = [out1, out2, out3, out4]
for i in range(4):
res[i] = self.hs2s[i](self.bn2s[i](self.conv2s[i](res[i])))
res[i] = F.avg_pool2d(res[i], 7)
res[i] = res[i].view(res[i].size(0), -1)
res[i] = self.hs3s[i](self.bn3s[i](self.linear3s[i](res[i])))
res[i] = self.linear4s[i](res[i])
return res
class MobileNetV3_Small(nn.Module):
def __init__(self, num_classes=1000):
super(MobileNetV3_Small, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.hs1 = hswish()
self.bneck = nn.Sequential(
Block(3, 16, 16, 16, nn.ReLU(inplace=True), SeModule(16), 2),
Block(3, 16, 72, 24, nn.ReLU(inplace=True), None, 2),
Block(3, 24, 88, 24, nn.ReLU(inplace=True), None, 1),
Block(5, 24, 96, 40, hswish(), SeModule(40), 2),
Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
Block(5, 40, 120, 48, hswish(), SeModule(48), 1),
Block(5, 48, 144, 48, hswish(), SeModule(48), 1),
Block(5, 48, 288, 96, hswish(), SeModule(96), 2),
Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
)
self.conv2 = nn.Conv2d(96, 576, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(576)
self.hs2 = hswish()
self.linear3 = nn.Linear(576, 1280)
self.bn3 = nn.BatchNorm1d(1280)
self.hs3 = hswish()
self.linear4 = nn.Linear(1280, num_classes)
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
out = self.hs1(self.bn1(self.conv1(x)))
out = self.bneck(out)
out = self.hs2(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 7)
out = out.view(out.size(0), -1)
out = self.hs3(self.bn3(self.linear3(out)))
out = self.linear4(out)
return out
def test():
import time
start = time.time()
net =TreeMobileNetV3_Large_Image()
print('init,',time.time()-start)
print(sum(p.numel() for p in net.parameters()))
x = torch.randn(2, 3, 224, 224)
y = net(x)
print(net)
print(y[0].size())
# test()
| 15,223 | 44.04142 | 124 | py |
Tree-Supervised | Tree-Supervised-main/models/wide_resnet.py | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
import sys
import numpy as np
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(wide_basic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class Wide_ResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, num_classes):
super(Wide_ResNet, self).__init__()
self.in_planes = 16
assert ((depth-4)%6 ==0), 'Wide-resnet depth should be 6n+4'
n = (depth-4)/6
k = widen_factor
print('| Wide-Resnet %dx%d' %(depth, k))
nStages = [16, 16*k, 32*k, 64*k]
self.conv1 = conv3x3(3,nStages[0])
self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1]*(int(num_blocks)-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class Wide_TreeResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, num_classes):
super(Wide_TreeResNet, self).__init__()
assert ((depth-4)%6 ==0), 'Wide-resnet depth should be 6n+4'
n = (depth-4)//6
k = widen_factor
print('| Wide-Resnet %dx%d' %(depth, k))
nStages = [16, 16*k, 32*k, 64*k]
self.conv1 = conv3x3(3,nStages[0])
self.layer1 = nn.ModuleList([self._wide_layer(wide_basic,16, nStages[1], n, dropout_rate, stride=1)])
self.layer2 = nn.ModuleList([self._wide_layer(wide_basic,nStages[1], nStages[2], n, dropout_rate, stride=2) for _ in range(2)])
self.layer3 = nn.ModuleList([self._wide_layer(wide_basic, nStages[2],nStages[3], n, dropout_rate, stride=2) for _ in range(4)])
self.bns = nn.ModuleList([nn.BatchNorm2d(nStages[3], momentum=0.9) for _ in range(4)])
self.linears = nn.ModuleList([nn.Linear(nStages[3], num_classes) for _ in range(4)])
def _wide_layer(self, block,in_planes, planes, num_blocks, dropout_rate, stride):
layers = []
layers.append(block(in_planes, planes, dropout_rate, stride))
for _ in range(num_blocks-1):
layers.append(block(planes, planes, dropout_rate, 1))
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1[0](out)
out1 = self.layer2[0](out)
out3 = self.layer2[1](out)
out2 = self.layer3[1](out1)
out1 = self.layer3[0](out1)
out4 = self.layer3[3](out3)
out3 = self.layer3[2](out3)
res = [out1, out2, out3, out4]
for i in range(len(res)):
res[i] = F.relu(self.bns[i](res[i]))
res[i] = F.avg_pool2d(res[i], 8)
res[i] = res[i].view(res[i].size(0), -1)
res[i] = self.linears[i](res[i])
return res
if __name__ == '__main__':
net=Wide_TreeResNet(28, 10, 0, 10)
y = net(Variable(torch.randn(1,3,32,32)))
print(y[0].size()) | 4,976 | 35.065217 | 135 | py |
Tree-Supervised | Tree-Supervised-main/models/common.py | import torch.nn
###
#%% activation functions
###
class Swish(torch.nn.Module):
def forward(self, x):
return x * torch.nn.functional.sigmoid(x, inplace=True)
class HSwish(torch.nn.Module):
def forward(self, x):
return x * torch.nn.functional.relu6(x + 3.0, inplace=True) / 6.0
class HSigmoid(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.relu6(x + 3.0, inplace=True) / 6.0
def get_activation(activation):
if activation == "relu":
return torch.nn.ReLU(inplace=True)
elif activation == "relu6":
return torch.nn.ReLU6(inplace=True)
elif activation == "swish":
return Swish()
elif activation == "hswish":
return HSwish()
elif activation == "sigmoid":
return torch.nn.Sigmoid(inplace=True)
elif activation == "hsigmoid":
return HSigmoid()
else:
raise NotImplementedError("Activation {} not implemented".format(activation))
###
#%% misc modules
###
class Flatten(torch.nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class SEUnit(torch.nn.Module):
def __init__(self,
channels,
squeeze_factor=16,
squeeze_activation="relu",
excite_activation="sigmoid"):
super().__init__()
squeeze_channels = channels // squeeze_factor
self.pool = torch.nn.AdaptiveAvgPool2d(output_size=1)
self.conv1 = conv1x1(in_channels=channels, out_channels=squeeze_channels, bias=True)
self.activation1 = get_activation(squeeze_activation)
self.conv2 = conv1x1(in_channels=squeeze_channels, out_channels=channels, bias=True)
self.activation2 = get_activation(excite_activation)
def forward(self, x):
s = self.pool(x)
s = self.conv1(s)
s = self.activation1(s)
s = self.conv2(s)
s = self.activation2(s)
return x * s
class Classifier(torch.nn.Module):
def __init__(self, in_channels, num_classes):
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=in_channels,
out_channels=num_classes,
kernel_size=1,
bias=True)
def forward(self, x):
x = self.conv(x)
x = x.view(x.size(0), -1)
return x
def init_params(self):
torch.nn.init.xavier_normal_(self.conv.weight, gain=1.0)
###
#%% conv layer wrapper
###
def conv1x1(in_channels, out_channels, stride=1, bias=False):
return torch.nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
bias=bias)
def conv3x3(in_channels, out_channels, stride=1, bias=False):
return torch.nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=bias)
def conv3x3_dw(channels, stride=1):
return torch.nn.Conv2d(
in_channels=channels,
out_channels=channels,
kernel_size=3,
stride=stride,
padding=1,
groups=channels,
bias=False)
def conv5x5_dw(channels, stride=1):
return torch.nn.Conv2d(
in_channels=channels,
out_channels=channels,
kernel_size=5,
stride=stride,
padding=2,
groups=channels,
bias=False)
###
#%% conv block wrapper
###
class ConvBlock(torch.nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=1,
bias=False,
use_bn=True,
activation="relu"):
super().__init__()
self.use_bn = use_bn
self.use_activation = (activation is not None)
self.conv = torch.nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = torch.nn.BatchNorm2d(num_features=out_channels)
if self.use_activation:
self.activation = get_activation(activation)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.use_activation:
x = self.activation(x)
return x
def conv1x1_block(in_channels,
out_channels,
stride=1,
bias=False,
use_bn=True,
activation="relu"):
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
bias=bias,
use_bn=use_bn,
activation=activation)
def conv3x3_block(in_channels,
out_channels,
stride=1,
bias=False,
use_bn=True,
activation="relu"):
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
use_bn=use_bn,
activation=activation)
def conv7x7_block(in_channels,
out_channels,
stride=1,
bias=False,
use_bn=True,
activation="relu"):
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=3,
bias=bias,
use_bn=use_bn,
activation=activation)
def conv3x3_dw_block(channels,
stride=1,
use_bn=True,
activation="relu"):
return ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=3,
stride=stride,
padding=1,
groups=channels,
use_bn=use_bn,
activation=activation)
def conv5x5_dw_block(channels,
stride=1,
use_bn=True,
activation="relu"):
return ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=5,
stride=stride,
padding=2,
groups=channels,
use_bn=use_bn,
activation=activation) | 6,825 | 27.441667 | 92 | py |
Tree-Supervised | Tree-Supervised-main/utils/cutout.py | import torch
import numpy as np
class Cutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
h = img.size(1)
w = img.size(2)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img = img * mask
return img
| 1,172 | 25.659091 | 82 | py |
catsetmat | catsetmat-master/src/main.py | import argparse
import os
import torch
import sys
import multiprocessing
from concurrent.futures import as_completed, ProcessPoolExecutor
from src.our_utils import get_home_path, mkdir_p, get_data_path
from src.results_analyzer import plot_results_by_max
sys.path.append(get_home_path())
from lib.hypersagnn.main import parse_args as parse_embedding_args
from src.experimenter import perform_experiment
from src.our_modules import device
from multiprocessing import set_start_method
def set_torch_environment():
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
def parse_args():
parser = argparse.ArgumentParser(description="CATSETMAT: Main module")
parser.add_argument('--data_name', type=str, default='sample_mag_acm')
parser.add_argument('--num_splits', type=int, default=15,
help='Number of train-test-splits / negative-samplings. Default is 15.')
parser.add_argument('--start_split', type=int, default=0,
help='Start id of splits; splits go from start_split to start_split+num_splits. Default is 0.')
parser.add_argument('--dim', type=int, default=64,
help='Embedding dimension for node2vec. Default is 64.')
parser.add_argument('--model_name', type=str, default='catsetmat')
parser.add_argument('--num_epochs', type=int, default=200,
help='Number of epochs. Default is 200.')
parser.add_argument('--batch_size', type=int, default=300,
help='Batch size. Default is 100.')
parser.add_argument('--model_save_split_id', type=int, default=0,
help='Split id for which model is to be saved. Default is 0.')
parser.add_argument('--lr', type=float, default=0.001,help='learning rate')
args = parser.parse_args()
return args
def process_args(args):
data_name = args.data_name
num_splits = args.num_splits
start_split = args.start_split
splits = range(start_split, start_split + num_splits)
num_epochs = args.num_epochs
batch_size = args.batch_size
lr=args.lr
return data_name, splits, num_epochs, batch_size, args.model_save_split_id, args.dim, args.model_name,lr
def main():
parallel_version = False
set_torch_environment()
data_name, splits, num_epochs, batch_size, model_save_split_id, dim, model_name, lr = process_args(parse_args())
emb_args = parse_embedding_args()
emb_args.dimensions = dim
home_path = get_home_path()
data_path = get_data_path()
result_path = os.path.join(home_path, 'results', data_name, '_tuning_dim'+str(dim)+'_learning_rate'+str(lr))
# result_path = os.path.join(home_path, 'results', data_name, 'res')
mkdir_p(result_path)
if parallel_version:
num_splits = len(splits)
max_workers = min(num_splits, multiprocessing.cpu_count())
pool = ProcessPoolExecutor(max_workers=max_workers)
process_list = []
for split_id in splits:
process_list.append(pool.submit(perform_experiment, emb_args, home_path, data_path, data_name,
split_id, result_path, num_epochs, batch_size,
model_save_split_id, model_name, lr))
print('{} of {} processes scheduled'.format(len(process_list), num_splits))
results_list = []
for p in as_completed(process_list):
results_list.append(p.result())
print('{} of {} processes completed'.format(len(results_list), len(process_list)))
pool.shutdown(wait=True)
else:
results_list = []
for i, split_id in enumerate(splits):
print('------- SPLIT#{} ({} of {}) -------'.format(split_id, i, len(splits)))
results = perform_experiment(emb_args, home_path, data_path, data_name,
split_id, result_path, num_epochs, batch_size,
model_save_split_id, model_name, lr)
results_list.append(results)
# plot_results(splits, result_path, model_name)
plot_results_by_max(splits,result_path,model_name,dim,lr)
if __name__ == '__main__':
main()
| 4,233 | 44.042553 | 119 | py |
catsetmat | catsetmat-master/src/data_reader.py | import numpy as np
import os
import pandas as pd
import pickle
import random
import torch
def pad_zeros(points, cardinality, _type='torch'):
if _type == 'np':
if points.shape[2] < cardinality:
# pad to fixed size
padding = np.zeros((points.shape[0], points.shape[1], cardinality - points.shape[2]), dtype=float)
points = np.concatenate([points, padding], axis=2)
else:
if points.size(2) < cardinality:
# pad to fixed size
padding = torch.zeros(points.size(0), points.size(1), cardinality - points.size(2)).to(points.device)
points = torch.cat([points, padding], dim=2)
return points
def load_bipartite_hypergraph(data_params):
id_p_map = pd.read_csv(os.path.join(data_params['raw_data_path'], data_params['r_label_file']), sep='\t', header=None)
id_a_map = pd.read_csv(os.path.join(data_params['raw_data_path'], data_params['u_label_file']), sep='\t', header=None)
id_a_map = dict(zip(id_a_map[0], id_a_map[1]))
id_k_map = pd.read_csv(os.path.join(data_params['raw_data_path'], data_params['v_label_file']), sep='\t', header=None)
id_k_map = dict(zip(id_k_map[0], id_k_map[1]))
p_a_list_map = pd.read_csv(os.path.join(data_params['raw_data_path'], data_params['r_u_list_file']), sep=':',
header=None)
p_k_list_map = pd.read_csv(os.path.join(data_params['raw_data_path'], data_params['r_v_list_file']), sep=':',
header=None)
n_p, na, nk = len(id_p_map), len(id_a_map), len(id_k_map)
U = list(map(lambda x: list(map(int, x.split(','))), p_a_list_map[1]))
V = list(map(lambda x: list(map(int, x.split(','))), p_k_list_map[1]))
return U, V
def get_neg_samp(U, V, num_neg):
edges = set(zip(map(frozenset, U), map(frozenset, V)))
setU = set(map(frozenset, U))
setV = set(map(frozenset, V))
non_edges = set()
num_pos = len(U)
num_total = len(setU) * len(setV)
max_num_neg = num_total - num_pos
if num_neg > max_num_neg:
print('WARNING: Too many ({}) negative samples demanded. Capping to max possible ({}).'.format(num_neg,
max_num_neg))
num_neg = max_num_neg
while len(non_edges) < num_neg:
u = random.sample(setU, 1)[0]
v = random.sample(setV, 1)[0]
pair = (u, v)
if pair in edges or pair in non_edges:
continue
non_edges.add(pair)
neg_U, neg_V = zip(*map(lambda x: list(map(list, x)), non_edges))
assert not all([x in non_edges for x in set(zip(map(frozenset, U), map(frozenset, V)))])
assert not all([x in edges for x in set(zip(map(frozenset, neg_U), map(frozenset, neg_V)))])
return neg_U, neg_V
def load_bipartite_hypergraph_with_vector(data_params, mask_flag=True, neg_pos_ratio=1):
embeddings = pickle.load(open(os.path.join(data_params['home_path'], data_params['emb_pkl_file']), 'rb'))
id_p_map = pd.read_csv(os.path.join(data_params['home_path'], data_params['r_label_file']), sep='\t', header=None)
id_a_map = pd.read_csv(os.path.join(data_params['home_path'], data_params['u_label_file']), sep='\t', header=None)
id_a_map = dict(zip(id_a_map[0], id_a_map[1]))
id_k_map = pd.read_csv(os.path.join(data_params['home_path'], data_params['v_label_file']), sep='\t', header=None)
id_k_map = dict(zip(id_k_map[0], id_k_map[1]))
p_a_list_map = pd.read_csv(os.path.join(data_params['home_path'], data_params['r_u_list_file']), sep=':',
header=None)
p_k_list_map = pd.read_csv(os.path.join(data_params['home_path'], data_params['r_v_list_file']), sep=':',
header=None)
n_p, na, nk = len(id_p_map), len(id_a_map), len(id_k_map)
U = list(map(lambda x: list(map(int, x.split(','))), p_a_list_map[1]))
V = list(map(lambda x: list(map(int, x.split(','))), p_k_list_map[1]))
neg_U, neg_V = get_neg_samp(U, V, neg_pos_ratio)
labels = np.array([1] * len(U) + [0] * len(neg_U))
U = U + list(neg_U)
V = V + list(neg_V)
ax_map = {a: embeddings[str(n_p + nk + a)] for a in id_a_map}
kx_map = {k: embeddings[str(n_p + k)] for k in id_k_map}
U = list(map(lambda x: np.array(list(map(ax_map.get, x))).T, U))
V = list(map(lambda x: np.array(list(map(kx_map.get, x))).T, V))
n_points_U = np.array([x.shape[1] for x in U])
n_points_V = np.array([x.shape[1] for x in V])
cardinality_U = max(n_points_U)
cardinality_V = max(n_points_V)
U = np.concatenate([pad_zeros(np.array([x]), cardinality_U, 'np') for x in U])
V = np.concatenate([pad_zeros(np.array([x]), cardinality_V, 'np') for x in V])
if mask_flag:
mask = np.array([[1] * n_points_U[i] + [0] * (U.shape[-1] - n_points_U[i]) for i in range(U.shape[0])])
U = np.concatenate((U, np.broadcast_to(mask[:, None, :], (U.shape[0], 1, cardinality_U))), axis=-2)
mask = np.array([[1] * n_points_V[i] + [0] * (V.shape[-1] - n_points_V[i]) for i in range(V.shape[0])])
V = np.concatenate((V, np.broadcast_to(mask[:, None, :], (V.shape[0], 1, cardinality_V))), axis=-2)
return U, V, n_points_U, n_points_V, cardinality_U, cardinality_V, labels
def main():
pass
if __name__ == '__main__':
main()
| 5,370 | 49.669811 | 122 | py |
catsetmat | catsetmat-master/src/results_analyzer.py | import pickle
import os
import numpy as np
import torch
from matplotlib import pyplot as plt
import pandas as pd
from src.our_modules import device
def plot_results(splits, result_path, model_name):
dfs = []
for split_id in splits:
pkl_file = os.path.join(result_path, '{}_{}.pkl'.format(model_name, split_id))
try:
results = pickle.load(open(pkl_file, 'rb'))
except EOFError:
continue
df = pd.DataFrame(results)
if model_name in ['n2v', 'lp']:
print(df)
return
df['train_auc'] = df['AUC'].apply(lambda x: x[0])
df['test_auc'] = df['AUC'].apply(lambda x: x[1])
df['train_loss'] = df['loss'].apply(lambda x: x[0])
df['test_loss'] = df['loss'].apply(lambda x: x[1])
df['split_id'] = split_id
dfs.append(df[['train_auc', 'test_auc', 'train_loss', 'test_loss']])
means = pd.concat([df.reset_index() for df in dfs]).groupby('index').agg(lambda x: (round(np.mean(x), 4)))
stds = pd.concat([df.reset_index() for df in dfs]).groupby('index').agg(lambda x: (round(np.std(x), 4)))
means.to_csv(os.path.join(result_path, 'means.csv'))
stds.to_csv(os.path.join(result_path, 'stds.csv'))
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
means[['train_auc', 'test_auc']].plot(yerr=stds, ax=axs[0], capsize=4)
axs[0].grid()
axs[0].set_ylim(0, 1)
axs[0].set_xlabel('Epoch')
axs[0].set_ylabel('AUC')
axs[0].set_title('Learning curve for AUC')
means[['train_loss', 'test_loss']].plot(yerr=stds, ax=axs[1], capsize=4)
axs[1].grid()
axs[1].set_ylim(bottom=0)
axs[1].set_xlabel('Epoch')
axs[1].set_ylabel('Loss')
axs[1].set_title('Learning curve for Loss')
fig.suptitle('Learning curves for "{}": "{}"'.format(result_path.split(os.path.sep)[-2], model_name), fontsize=15)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
fig_path = os.path.join(result_path, '{}_learning_curve.png'.format(model_name))
plt.savefig(fig_path)
import statistics as stat
def plot_results_by_max(splits, result_path, model_name,dim,lr):
dfs = []
list_auc_test=[]
list_auc_train=[]
# result_list=[]
for split_id in splits:
pkl_file = os.path.join(result_path, '{}_{}.pkl'.format(model_name, split_id))
try:
Results = pickle.load(open(pkl_file, 'rb'))
except EOFError:
continue
df = pd.DataFrame(Results)
if model_name in ['n2v', 'lp']:
print(df)
continue
df['train_auc'] = df['AUC'].apply(lambda x: x[0])
df['test_auc'] = df['AUC'].apply(lambda x: x[1])
list_auc_test.append(max(list(df['test_auc'].values)))
list_auc_train.append(max(list(df['train_auc'].values)))
result_list={"modelname":model_name,"dim":dim,"learning_rate":lr,\
"test_result_mean":np.mean(list_auc_test),"test_result_var":(np.std(list_auc_test))**2,\
"train_result_mean":np.mean(list_auc_train),"train_result_var":(np.std(list_auc_train))**2}
pickle.dump(result_list, open(os.path.join(result_path, '{}_main.pkl'.format(model_name)), 'wb'))
def visualize_attn(model, data_point, node_tokens=None):
"""
:param model:
:param data_point: In original format (U, V, Labels)
:param node_tokens:
:return:
"""
size = int((data_point[0][0] > 0).sum())
size_ = int((data_point[0][1] > 0).sum())
u_, v_, l_ = zip(*data_point)
xx = torch.cat(u_, dim=0).view(len(u_), u_[0].shape[0]).to(device)
yy = torch.cat(v_, dim=0).view(len(v_), v_[0].shape[0]).to(device)
output, weights = model(xx, yy)
self1 = weights['attn_value']['self'][0][:, :size, :size]
self1_ = weights['attn_value']['self'][1][:, :size_, :size_]
self2 = weights['attn_value']['self'][2][:, :size, :size]
self2_ = weights['attn_value']['self'][3][:, :size_, :size_]
cross = weights['attn_value']['cross'][0][:, :size, :size_]
cross_ = weights['attn_value']['cross'][1][:, :size_, :size]
self1_final = torch.cat([torch.cat([self1, torch.zeros_like(cross)], dim=2),
torch.cat([torch.zeros_like(cross_), self1_], dim=2)], dim=1)
self2_final = torch.cat([torch.cat([self2, torch.zeros_like(cross)], dim=2),
torch.cat([torch.zeros_like(cross_), self2_], dim=2)], dim=1)
cross_final = torch.cat([torch.cat([torch.zeros_like(self1), cross], dim=2),
torch.cat([cross_, torch.zeros_like(self1_)], dim=2)], dim=1)
# call_html()
if not node_tokens:
node_tokens = list(map(lambda x: "U_{}".format(x), map(int, u_[0])))[:size] + \
list(map(lambda x: "V_{}".format(x), map(int, v_[0])))[:size_]
attention = [self1_final.unsqueeze(0), cross_final.unsqueeze(0), self2_final.unsqueeze(0)]
# head_view(attention, node_tokens)
return attention, node_tokens
if __name__ == '__main__':
pass
# attention, tokens = visualize_attn(model, test_data1)
# call_html()
# head_view(attention, tokens)
| 5,113 | 39.267717 | 118 | py |
catsetmat | catsetmat-master/src/our_modules.py | import numpy as np
import torch
import torch.nn as nn
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device('cpu')
# A custom position wise MLP.
# dims is a list, it would create multiple layer with torch.tanh between them
# We don't do residual and layer-norm, because this is only used as the
# final classifier
def get_non_pad_mask(seq):
assert seq.dim() == 2
return seq.ne(0).type(torch.float).unsqueeze(-1)
def get_attn_key_pad_mask(seq_k, seq_q):
""" For masking out the padding part of key sequence."""
# Expand to fit the shape of key query attention matrix.
pm_q = seq_q.eq(0)
pm_k = seq_k.eq(0)
pm_q_ = pm_q.unsqueeze(1).expand(-1, seq_k.shape[1], -1)
pm_k_ = pm_k.unsqueeze(1).expand(-1, seq_q.shape[1], -1)
padding_mask = pm_q_.transpose(1, 2) | pm_k_
return padding_mask
class PositionwiseFeedForward(nn.Module):
def __init__(
self,
dims,
dropout=None,
reshape=False,
use_bias=True,
residual=False,
layer_norm=False):
super(PositionwiseFeedForward, self).__init__()
self.w_stack = []
self.dims = dims
for i in range(len(dims) - 1):
self.w_stack.append(nn.Conv1d(dims[i], dims[i + 1], 1, use_bias))
self.add_module("PWF_Conv%d" % (i), self.w_stack[-1])
self.reshape = reshape
self.layer_norm = nn.LayerNorm(dims[-1])
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.residual = residual
self.layer_norm_flag = layer_norm
def forward(self, x):
output = x.transpose(1, 2)
for i in range(len(self.w_stack) - 1):
output = self.w_stack[i](output)
output = torch.tanh(output)
if self.dropout is not None:
output = self.dropout(output)
output = self.w_stack[-1](output)
output = output.transpose(1, 2)
if self.reshape:
output = output.view(output.shape[0], -1, 1)
if self.dims[0] == self.dims[-1]:
# residual
if self.residual:
output += x
if self.layer_norm_flag:
output = self.layer_norm(output)
return output
class FeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, dims, dropout=True, reshape=False, use_bias=True):
super(FeedForward, self).__init__()
self.w_stack = []
for i in range(len(dims) - 1):
self.w_stack.append(nn.Linear(dims[i], dims[i + 1], use_bias))
self.add_module("FF_Linear%d" % (i), self.w_stack[-1])
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.reshape = reshape
def forward(self, x):
output = x
for i in range(len(self.w_stack) - 1):
output = self.w_stack[i](output)
output = torch.tanh(output)
if self.dropout is not None:
output = self.dropout(output)
output = self.w_stack[-1](output)
if self.reshape:
output = output.view(output.shape[0], -1, 1)
return output
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature):
super().__init__()
self.temperature = temperature
def masked_softmax(self, vector: torch.Tensor,
mask: torch.Tensor,
dim: int = -1,
memory_efficient: bool = False,
mask_fill_value: float = -1e32) -> torch.Tensor:
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
# pdb.set_trace()
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside
# the mask, we zero these out.
masked_vector = vector.masked_fill(mask.bool(), -float('inf'))
result = torch.nn.functional.softmax(masked_vector, dim=dim)
result = result * (1 - mask).bool()
# result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_vector = vector.masked_fill((mask).bool(), mask_fill_value)
result = torch.nn.functional.softmax(masked_vector, dim=dim)
result = result * (1 - mask).bool()
return result
def forward(self, q, k, v, diag_mask, mask=None):
# pdb.set_trace()
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
# if mask is not None:
# attn = attn.masked_fill(mask, -float('inf'))
attn = self.masked_softmax(attn, mask, dim=-1, memory_efficient=True)
# attn = torch.nn.functional.softmax(attn, dim=-1)
output = torch.bmm(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head, d_model, d_k, d_v, dropout, diag_mask, input_dim, static_flag=False):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.static_flag = static_flag
self.w_qs = nn.Linear(input_dim, n_head * d_k, bias=False)
self.w_ks = nn.Linear(input_dim, n_head * d_k, bias=False)
self.w_vs = nn.Linear(input_dim, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0,
std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0,
std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0,
std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.fc1 = FeedForward([n_head * d_v, d_model], use_bias=False)
if self.static_flag:
self.fc2 = FeedForward([n_head * d_v, d_model], use_bias=False)
self.layer_norm1 = nn.LayerNorm(input_dim)
self.layer_norm2 = nn.LayerNorm(input_dim)
self.layer_norm3 = nn.LayerNorm(input_dim)
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = dropout
self.diag_mask_flag = diag_mask
self.diag_mask = None
def pass_(self, inputs):
return inputs
def forward(self, q, k, v, diag_mask, mask=None):
# pdb.set_trace()
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
residual_dynamic = q
residual_static = v
q = self.layer_norm1(q)
k = self.layer_norm2(k)
v = self.layer_norm3(v)
sz_b, len_q, _ = q.shape
sz_b, len_k, _ = k.shape
sz_b, len_v, _ = v.shape
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
n = sz_b * n_head
"""change masking matrix from len_v to len_q for cross attentions"""
self.diag_mask = (torch.ones((len_q, len_v), device=device))
if self.diag_mask_flag == 'True':
self.diag_mask -= torch.eye(len_q, len_v, device=device)
self.diag_mask = self.diag_mask.repeat(n, 1, 1)
diag_mask = self.diag_mask
if mask is not None:
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
dynamic, attn = self.attention(q, k, v, diag_mask, mask=mask)
dynamic = dynamic.view(n_head, sz_b, len_q, d_v)
dynamic = dynamic.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
dynamic = self.dropout(self.fc1(dynamic)) if self.dropout is not None else self.fc1(dynamic)
if self.static_flag:
static = v.view(n_head, sz_b, len_k, d_v)
static = static.permute(1, 2, 0, 3).contiguous().view(sz_b, len_k, -1) # b x lq x (n*dv)
static = self.dropout(self.fc2(static)) if self.dropout is not None else self.fc2(static)
return dynamic, static, attn
else:
return dynamic, attn
class OnlyCrossAttention(nn.Module):
"""A self-attention layer + 2 layered pff"""
def __init__(self, n_head, d_model, d_k, d_v, dropout_mul, dropout_pff, diag_mask, bottle_neck):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.cross_attn_u = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout_mul,
diag_mask=diag_mask, input_dim=bottle_neck, static_flag=True)
self.cross_attn_v = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout_mul,
diag_mask=diag_mask, input_dim=bottle_neck, static_flag=True)
self.pff_U1 = PositionwiseFeedForward([d_model, d_model, d_model],
dropout=dropout_pff, residual=True, layer_norm=True)
self.pff_U2 = PositionwiseFeedForward([bottle_neck, d_model, d_model],
dropout=dropout_pff, residual=False, layer_norm=True)
self.pff_V1 = PositionwiseFeedForward([d_model, d_model, d_model],
dropout=dropout_pff, residual=True, layer_norm=True)
self.pff_V2 = PositionwiseFeedForward([bottle_neck, d_model, d_model],
dropout=dropout_pff, residual=False, layer_norm=True)
def forward(self, dynamic_1, dynamic_2, static_1, static_2, crs_attn_mask1, crs_attn_mask2, slf_attn_mask1,
slf_attn_mask2, non_pad_mask1, non_pad_mask2):
"""here the static_1 refer to the input embeddings of U_side while static_2 relates the embeddings of V sides
and dynamic_1 refer to query embedding of u side(input) and dynamic_2 refers to embeddings of V sides """
"""only change is now self attention mask and non pad_mask""" ########
# pdb.set_trace()
dynamic2u, static2, cr_attn_u = self.cross_attn_u(dynamic_1, static_2, static_2, diag_mask=None,
mask=crs_attn_mask1)
dynamic2v, static1, cr_attn_v = self.cross_attn_v(dynamic_2, static_1, static_1, diag_mask=None,
mask=crs_attn_mask2)
output_attn = [cr_attn_u, cr_attn_v]
dynamic1 = self.pff_U1(dynamic2u * non_pad_mask1) * non_pad_mask1
static1 = self.pff_U2(static1 * non_pad_mask1) * non_pad_mask1
dynamic2 = self.pff_V1(dynamic2v * non_pad_mask2) * non_pad_mask2
static2 = self.pff_V2(static2 * non_pad_mask2) * non_pad_mask2
return dynamic1, static1, dynamic2, static2, output_attn
class CrossAttention(nn.Module):
"""A self-attention layer + 2 layered pff"""
def __init__(self, n_head, d_model, d_k, d_v, dropout_mul, dropout_pff, diag_mask, bottle_neck):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.slf_attn_lv1_u = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout_mul,
diag_mask=diag_mask, input_dim=bottle_neck, static_flag=True)
self.slf_attn_lv1_v = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout_mul,
diag_mask=diag_mask, input_dim=bottle_neck, static_flag=True)
self.cross_attn_u = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout_mul,
diag_mask=diag_mask, input_dim=bottle_neck, static_flag=False)
self.cross_attn_v = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout_mul,
diag_mask=diag_mask, input_dim=bottle_neck, static_flag=False)
self.slf_attn_lv2_u = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout_mul,
diag_mask=diag_mask, input_dim=bottle_neck, static_flag=False)
self.slf_attn_lv2_v = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout_mul,
diag_mask=diag_mask, input_dim=bottle_neck, static_flag=False)
self.pff_U1 = PositionwiseFeedForward([d_model, d_model, d_model],
dropout=dropout_pff, residual=True, layer_norm=True)
self.pff_U2 = PositionwiseFeedForward([bottle_neck, d_model, d_model],
dropout=dropout_pff, residual=False, layer_norm=True)
self.pff_V1 = PositionwiseFeedForward([d_model, d_model, d_model],
dropout=dropout_pff, residual=True, layer_norm=True)
self.pff_V2 = PositionwiseFeedForward([bottle_neck, d_model, d_model],
dropout=dropout_pff, residual=False, layer_norm=True)
# self.dropout = nn.Dropout(0.2)
def forward(self, dynamic_1, dynamic_2, static_1, static_2, crs_attn_mask1, crs_attn_mask2, slf_attn_mask1,
slf_attn_mask2, non_pad_mask1, non_pad_mask2):
"""here the static_1 refer to the input embeddings of U_side while static_2 relates the embeddings of V sides
and dynamic_1 refer to query embedding of u side(input) and dynamic_2 refers to embeddings of V sides """
"""only change is now self attention mask and non pad_mask""" ########
# pdb.set_trace()
dynamic1u, static1, attn_lv1u = self.slf_attn_lv1_u(dynamic_1, static_1, static_1, diag_mask=None,
mask=slf_attn_mask1)
dynamic1v, static2, attn_lv1v = self.slf_attn_lv1_v(dynamic_2, static_2, static_2, diag_mask=None,
mask=slf_attn_mask2)
dynamic2u, cr_attn_u = self.cross_attn_u(dynamic1u, dynamic1v, dynamic1v, diag_mask=None, mask=crs_attn_mask1)
dynamic2v, cr_attn_v = self.cross_attn_v(dynamic1v, dynamic1u, dynamic1u, diag_mask=None, mask=crs_attn_mask2)
dynamic3u, attn_lv2u = self.slf_attn_lv2_u(dynamic2u, dynamic2u, dynamic2u, diag_mask=None, mask=slf_attn_mask1)
dynamic3v, attn_lv2v = self.slf_attn_lv2_v(dynamic2v, dynamic2v, dynamic2v, diag_mask=None, mask=slf_attn_mask2)
output_attn = [attn_lv1u, attn_lv1v, attn_lv2u, attn_lv2v, cr_attn_u, cr_attn_v]
# dynamic1, cr_attn1 = self.mul_head_attn_forward(dynamic_1, static_2, static_2, diag_mask=None,
# mask=crs_attn_mask1)
# dynamic2, cr_attn2 = self.mul_head_attn_backward(dynamic_2, static_1, static_1, diag_mask=None,
# mask=crs_attn_mask2)
# static1, slf_attn1 = self.mul_head_attn_selfU(dynamic_1, static_1, static_1, diag_mask=None,
# mask=slf_attn_mask1)
# static2, slf_attn2 = self.mul_head_attn_selfV(dynamic_2, static_2, static_2, diag_mask=None,
# mask=slf_attn_mask2)
dynamic1 = self.pff_U1(dynamic3u * non_pad_mask1) * non_pad_mask1
static1 = self.pff_U2(static1 * non_pad_mask1) * non_pad_mask1
dynamic2 = self.pff_V1(dynamic3v * non_pad_mask2) * non_pad_mask2
static2 = self.pff_V2(static2 * non_pad_mask2) * non_pad_mask2
return dynamic1, static1, dynamic2, static2, output_attn
class CrossAttentionSimple(nn.Module):
"""A self-attention layer + 2 layered pff"""
def __init__(self, n_head, d_model, d_k, d_v, dropout_mul, dropout_pff, diag_mask, bottle_neck):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.slf_attn_lv1_u = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout_mul,
diag_mask=diag_mask, input_dim=bottle_neck, static_flag=True)
self.slf_attn_lv1_v = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout_mul,
diag_mask=diag_mask, input_dim=bottle_neck, static_flag=True)
self.cross_attn_u = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout_mul,
diag_mask=diag_mask, input_dim=bottle_neck, static_flag=False)
self.cross_attn_v = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout_mul,
diag_mask=diag_mask, input_dim=bottle_neck, static_flag=False)
# self.slf_attn_lv2_u = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout_mul,
# diag_mask=diag_mask, input_dim=bottle_neck, static_flag=False)
#
# self.slf_attn_lv2_v = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout_mul,
# diag_mask=diag_mask, input_dim=bottle_neck, static_flag=False)
self.pff_U1 = PositionwiseFeedForward([d_model, d_model, d_model],
dropout=dropout_pff, residual=True, layer_norm=True)
self.pff_U2 = PositionwiseFeedForward([bottle_neck, d_model, d_model],
dropout=dropout_pff, residual=False, layer_norm=True)
self.pff_V1 = PositionwiseFeedForward([d_model, d_model, d_model],
dropout=dropout_pff, residual=True, layer_norm=True)
self.pff_V2 = PositionwiseFeedForward([bottle_neck, d_model, d_model],
dropout=dropout_pff, residual=False, layer_norm=True)
# self.dropout = nn.Dropout(0.2)
def forward(self, dynamic_1, dynamic_2, static_1, static_2, crs_attn_mask1, crs_attn_mask2, slf_attn_mask1,
slf_attn_mask2, non_pad_mask1, non_pad_mask2):
"""here the static_1 refer to the input embeddings of U_side while static_2 relates the embeddings of V sides
and dynamic_1 refer to query embedding of u side(input) and dynamic_2 refers to embeddings of V sides """
"""only change is now self attention mask and non pad_mask""" ########
# pdb.set_trace()
dynamic1u, static1, attn_lv1u = self.slf_attn_lv1_u(dynamic_1, static_1, static_1, diag_mask=None,
mask=slf_attn_mask1)
dynamic1v, static2, attn_lv1v = self.slf_attn_lv1_v(dynamic_2, static_2, static_2, diag_mask=None,
mask=slf_attn_mask2)
dynamic2u, cr_attn_u = self.cross_attn_u(dynamic1u, dynamic1v, dynamic1v, diag_mask=None, mask=crs_attn_mask1)
dynamic2v, cr_attn_v = self.cross_attn_v(dynamic1v, dynamic1u, dynamic1u, diag_mask=None, mask=crs_attn_mask2)
# dynamic3u, attn_lv2u = self.slf_attn_lv2_u(dynamic2u, dynamic2u, dynamic2u, diag_mask=None,
# mask=slf_attn_mask1)
# dynamic3v, attn_lv2v = self.slf_attn_lv2_v(dynamic2v, dynamic2v, dynamic2v, diag_mask=None,
# mask=slf_attn_mask2)
output_attn = [attn_lv1u, attn_lv1v, cr_attn_u, cr_attn_v]
# dynamic1, cr_attn1 = self.mul_head_attn_forward(dynamic_1, static_2, static_2, diag_mask=None,
# mask=crs_attn_mask1)
# dynamic2, cr_attn2 = self.mul_head_attn_backward(dynamic_2, static_1, static_1, diag_mask=None,
# mask=crs_attn_mask2)
# static1, slf_attn1 = self.mul_head_attn_selfU(dynamic_1, static_1, static_1, diag_mask=None,
# mask=slf_attn_mask1)
# static2, slf_attn2 = self.mul_head_attn_selfV(dynamic_2, static_2, static_2, diag_mask=None,
# mask=slf_attn_mask2)
dynamic1 = self.pff_U1(dynamic2u * non_pad_mask1) * non_pad_mask1
static1 = self.pff_U2(static1 * non_pad_mask1) * non_pad_mask1
dynamic2 = self.pff_V1(dynamic2v * non_pad_mask2) * non_pad_mask2
static2 = self.pff_V2(static2 * non_pad_mask2) * non_pad_mask2
return dynamic1, static1, dynamic2, static2, output_attn
class Classifier(nn.Module):
"""a classifier is the main model for embeddings"""
def __init__(self, n_head, d_model, d_k, d_v, node_embedding1, node_embedding2, diag_mask, bottle_neck, **args):
super().__init__()
self.pff_classifier1 = PositionwiseFeedForward([d_model, 1], reshape=True, use_bias=True)
self.pff_classifier2 = PositionwiseFeedForward([d_model, 1], reshape=True, use_bias=True)
self.pff_classifier3 = PositionwiseFeedForward([1, 1], reshape=True, use_bias=True)
# self.pff_classifier3 = PositionwiseFeedForward([d_model, 1], reshape=True, use_bias=True)
"""remove positional embedding""" ###########
self.node_embedding1 = node_embedding1
self.node_embedding2 = node_embedding2
if args['cross_attn_type'] == 'x':
model_init = OnlyCrossAttention
elif args['cross_attn_type'] == 'sx':
model_init = CrossAttentionSimple
elif args['cross_attn_type'] == 'sxs':
model_init = CrossAttention
else:
raise Exception('No Cross Attention Type specified.')
self.encode1 = model_init(n_head, d_model, d_k, d_v, dropout_mul=0.4, dropout_pff=0.4,
diag_mask=diag_mask, bottle_neck=bottle_neck)
self.diag_mask_flag = diag_mask
self.layer_norm1 = nn.LayerNorm(d_model)
self.layer_norm2 = nn.LayerNorm(d_model)
self.layer_norm3 = nn.LayerNorm(d_model)
self.layer_norm4 = nn.LayerNorm(d_model)
def get_node_embeddings(self, x, mode, return_recon=False):
# shape of x: (b, tuple)
sz_b, len_seq = x.shape
# print(torch.max(x), torch.min(x))
if mode == 1:
x, recon_loss = self.node_embedding1(x.view(-1))
else:
x, recon_loss = self.node_embedding2(x.view(-1))
if return_recon:
return x.view(sz_b, len_seq, -1), recon_loss
else:
return x.view(sz_b, len_seq, -1)
def get_embedding(self, x, y, crs_attn_mask1, crs_attn_mask2, slf_attn_mask1, slf_attn_mask2, non_pad_mask1,
non_pad_mask2, return_recon=False):
if return_recon:
x, recon_loss1 = self.get_node_embeddings(x, 1, return_recon)
y, recon_loss2 = self.get_node_embeddings(y, 2, return_recon)
else:
x = self.get_node_embeddings(x, 1, return_recon)
y = self.get_node_embeddings(y, 2, return_recon)
recon_loss1, recon_loss2 = None, None
dynamic1, static1, dynamic2, static2, output_attn = self.encode1(x, y, x, y, crs_attn_mask1, crs_attn_mask2,
slf_attn_mask1, slf_attn_mask2, non_pad_mask1,
non_pad_mask2)
if return_recon:
return dynamic1, static1, dynamic2, static2, output_attn, recon_loss1, recon_loss2
else:
return dynamic1, static1, dynamic2, static2, output_attn
def forward(self, x, y, mask=None, get_outlier=None, return_recon=False):
x = x.long()
# pdb.set_trace()
cr_attn_mask1 = get_attn_key_pad_mask(seq_k=y, seq_q=x)
slf_attn_mask1 = get_attn_key_pad_mask(seq_k=x, seq_q=x)
non_pad_mask1 = get_non_pad_mask(x)
cr_attn_mask2 = get_attn_key_pad_mask(seq_k=x, seq_q=y)
slf_attn_mask2 = get_attn_key_pad_mask(seq_k=y, seq_q=y)
non_pad_mask2 = get_non_pad_mask(y)
if return_recon:
dynamic1, static1, dynamic2, static2, \
recon_loss1, recon_loss2 = self.get_embedding(x, y, cr_attn_mask1, cr_attn_mask2,
slf_attn_mask1, slf_attn_mask2,
non_pad_mask1, non_pad_mask2, return_recon)
else:
dynamic1, static1, dynamic2, static2, \
output_attn = self.get_embedding(x, y, cr_attn_mask1, cr_attn_mask2, slf_attn_mask1, slf_attn_mask2,
non_pad_mask1, non_pad_mask2, return_recon)
dynamic1 = self.layer_norm1(dynamic1)
static1 = self.layer_norm2(static1)
dynamic2 = self.layer_norm3(dynamic2)
static2 = self.layer_norm4(static2)
sz_b, len_seq, dim = dynamic1.shape
# pdb.set_trace()
# output=torch.cat([((dynamic1-static1)**2),((dynamic2-static2)**2)],dim=1)
output1 = self.pff_classifier1((dynamic1 - static1) ** 2)
output2 = self.pff_classifier2((dynamic2 - static2) ** 2)
# output = dynamic1**2+dynamic2**2
# output1 = self.pff_classifier(dynamic1**2)
# output2 = self.pff_classifier(dynamic2**2)
output = torch.cat([output1, output2], axis=1)
output = self.pff_classifier3(output)
# pdb.set_trace()
# output = torch.sigmoid(torch.cat([output1,output2],axis=1))
output = torch.sigmoid(output)
# embedding_after_attn = [[static1,dynamic1],[static2,dynamic2], output_attn]
embedding_after_attn = None
non_pad_mask = torch.cat([non_pad_mask1, non_pad_mask2], axis=1)
if get_outlier is not None:
k = get_outlier
outlier = ((1 - output) * non_pad_mask).topk(k, dim=1, largest=True, sorted=True)[1]
return outlier.view(-1, k)
mode = 'first'
if mode == 'min':
output, _ = torch.max(
(1 - output) * non_pad_mask, dim=-2, keepdim=False)
output = 1 - output
elif mode == 'sum':
output = torch.sum(output * non_pad_mask, dim=-2, keepdim=False)
mask_sum = torch.sum(non_pad_mask, dim=-2, keepdim=False)
output /= mask_sum
elif mode == 'first':
output = output[:, 0, :]
if return_recon:
return output, None, embedding_after_attn
else:
return output, embedding_after_attn
def return_embeddings(self, x, mode):
# x must be tensor of elements (index)
if mode == 1:
return self.node_embedding1[x]
else:
return self.node_embedding2[x]
def save_trained_embeddings(self, file_path):
file = {"first_set_graph": self.node_embedding1, "second_set_graph": self.node_embedding2}
torch.save(file, file_path)
| 27,636 | 46.486254 | 120 | py |
catsetmat | catsetmat-master/src/hypersagnn_modules.py | import numpy as np
import torch
import torch.nn as nn
from src.our_modules import device
# A custom position wise MLP.
# dims is a list, it would create multiple layer with torch.tanh between them
# We don't do residual and layer-norm, because this is only used as the
# final classifier
def get_non_pad_mask(seq):
assert seq.dim() == 2
return seq.ne(0).type(torch.float).unsqueeze(-1)
def get_attn_key_pad_mask(seq_k, seq_q):
""" For masking out the padding part of key sequence."""
# Expand to fit the shape of key query attention matrix.
pm_q = seq_q.eq(0)
pm_k = seq_k.eq(0)
pm_q_ = pm_q.unsqueeze(1).expand(-1, seq_k.shape[1], -1)
pm_k_ = pm_k.unsqueeze(1).expand(-1, seq_q.shape[1], -1)
padding_mask = pm_q_.transpose(1, 2) | pm_k_
return padding_mask
class PositionwiseFeedForward(nn.Module):
def __init__(
self,
dims,
dropout=None,
reshape=False,
use_bias=True,
residual=False,
layer_norm=False):
super(PositionwiseFeedForward, self).__init__()
self.w_stack = []
self.dims = dims
for i in range(len(dims) - 1):
self.w_stack.append(nn.Conv1d(dims[i], dims[i + 1], 1, use_bias))
self.add_module("PWF_Conv%d" % (i), self.w_stack[-1])
self.reshape = reshape
self.layer_norm = nn.LayerNorm(dims[-1])
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.residual = residual
self.layer_norm_flag = layer_norm
def forward(self, x):
output = x.transpose(1, 2)
for i in range(len(self.w_stack) - 1):
output = self.w_stack[i](output)
output = torch.tanh(output)
if self.dropout is not None:
output = self.dropout(output)
output = self.w_stack[-1](output)
output = output.transpose(1, 2)
if self.reshape:
output = output.view(output.shape[0], -1, 1)
if self.dims[0] == self.dims[-1]:
# residual
if self.residual:
output += x
if self.layer_norm_flag:
output = self.layer_norm(output)
return output
class FeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, dims, dropout=True, reshape=False, use_bias=True):
super(FeedForward, self).__init__()
self.w_stack = []
for i in range(len(dims) - 1):
self.w_stack.append(nn.Linear(dims[i], dims[i + 1], use_bias))
self.add_module("FF_Linear%d" % (i), self.w_stack[-1])
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.reshape = reshape
def forward(self, x):
output = x
for i in range(len(self.w_stack) - 1):
output = self.w_stack[i](output)
output = torch.tanh(output)
if self.dropout is not None:
output = self.dropout(output)
output = self.w_stack[-1](output)
if self.reshape:
output = output.view(output.shape[0], -1, 1)
return output
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature):
super().__init__()
self.temperature = temperature
def masked_softmax(self, vector: torch.Tensor,
mask: torch.Tensor,
dim: int = -1,
memory_efficient: bool = False,
mask_fill_value: float = -1e32) -> torch.Tensor:
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
# pdb.set_trace()
mask = 1-mask
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside
# the mask, we zero these out.
result = torch.nn.functional.softmax(vector * (1 - mask).byte(), dim=dim)
result = result * (1 - mask).byte()
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_vector = vector.masked_fill((mask).bool(), mask_fill_value)
result = torch.nn.functional.softmax(masked_vector, dim=dim)
result = result * ((1-mask).bool())
return result
def forward(self, q, k, v, diag_mask, mask=None):
# pdb.set_trace()
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
# if mask is not None:
# attn = attn.masked_fill(mask, -float('inf'))
mask=(1-mask.float())*diag_mask
attn = self.masked_softmax(attn, mask, dim=-1, memory_efficient=True)
# attn = torch.nn.functional.softmax(attn, dim=-1)
output = torch.bmm(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head, d_model, d_k, d_v, dropout, diag_mask, input_dim, static_flag=False):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.static_flag = static_flag
self.w_qs = nn.Linear(input_dim, n_head * d_k, bias=False)
self.w_ks = nn.Linear(input_dim, n_head * d_k, bias=False)
self.w_vs = nn.Linear(input_dim, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0,
std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0,
std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0,
std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.fc1 = FeedForward([n_head * d_v, d_model], use_bias=False)
if self.static_flag:
self.fc2 = FeedForward([n_head * d_v, d_model], use_bias=False)
self.layer_norm1 = nn.LayerNorm(input_dim)
self.layer_norm2 = nn.LayerNorm(input_dim)
self.layer_norm3 = nn.LayerNorm(input_dim)
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = dropout
self.diag_mask_flag = True
self.diag_mask = diag_mask
def pass_(self, inputs):
return inputs
def forward(self, q, k, v, diag_mask, mask=None):
# pdb.set_trace()
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
residual_dynamic = q
residual_static = v
q = self.layer_norm1(q)
k = self.layer_norm2(k)
v = self.layer_norm3(v)
sz_b, len_q, _ = q.shape
sz_b, len_k, _ = k.shape
sz_b, len_v, _ = v.shape
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
n = sz_b * n_head
"""change masking matrix from len_v to len_q for cross attentions"""
self.diag_mask = (torch.ones((len_q, len_v), device=device))
if self.diag_mask_flag == True:
self.diag_mask -= torch.eye(len_q, len_v, device=device)
self.diag_mask = self.diag_mask.repeat(n, 1, 1)
diag_mask = self.diag_mask
if mask is not None:
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
dynamic, attn = self.attention(q, k, v, diag_mask, mask=mask)
dynamic = dynamic.view(n_head, sz_b, len_q, d_v)
dynamic = dynamic.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
dynamic = self.dropout(self.fc1(dynamic)) if self.dropout is not None else self.fc1(dynamic)
if self.static_flag:
static = v.view(n_head, sz_b, len_k, d_v)
static = static.permute(1, 2, 0, 3).contiguous().view(sz_b, len_k, -1) # b x lq x (n*dv)
static = self.dropout(self.fc2(static)) if self.dropout is not None else self.fc2(static)
return dynamic, static, attn
else:
return dynamic, attn
class self_attention_simple(nn.Module):
"""A self-attention layer + 2 layered pff"""
def __init__(self, n_head, d_model, d_k, d_v, dropout_mul, dropout_pff, diag_mask, bottle_neck):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.slf_attn_lv1_u = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout_mul,
diag_mask=diag_mask, input_dim=bottle_neck, static_flag=True)
self.pff_U1 = PositionwiseFeedForward([d_model, d_model, d_model],
dropout=dropout_pff, residual=True, layer_norm=True)
self.pff_U2 = PositionwiseFeedForward([bottle_neck, d_model, d_model],
dropout=dropout_pff, residual=False, layer_norm=True)
# self.dropout = nn.Dropout(0.2)
def forward(self, dynamic_1, dynamic_2, static_1, static_2, crs_attn_mask1, crs_attn_mask2, slf_attn_mask1,
slf_attn_mask2, non_pad_mask1, non_pad_mask2):
"""here the static_1 refer to the input embeddings of U_side while static_2 relates the embeddings of V sides
and dynamic_1 refer to query embedding of u side(input) and dynamic_2 refers to embeddings of V sides """
"""only change is now self attention mask and non pad_mask""" ########
# pdb.set_trace()
dynamic1u, static1, attn_lv1u = self.slf_attn_lv1_u(dynamic_1, static_1, static_1, diag_mask=None,
mask=slf_attn_mask1)
output_attn = [attn_lv1u]
dynamic1 = self.pff_U1(dynamic1u * non_pad_mask1) * non_pad_mask1
static1 = self.pff_U2(static1 * non_pad_mask1) * non_pad_mask1
# dynamic2 = self.pff_V1(dynamic2v * non_pad_mask2) * non_pad_mask2
# static2 = self.pff_V2(static2 * non_pad_mask2) * non_pad_mask2
return dynamic1, static1, output_attn
class Classifier(nn.Module):
"""a classifier is the main model for embeddings"""
def __init__(self, n_head, d_model, d_k, d_v, node_embedding1, diag_mask, bottle_neck, hypersagnn_mode='sum', **args):
super().__init__()
self.pff_classifier1 = PositionwiseFeedForward([d_model, 1], reshape=True, use_bias=True)
self.pff_classifier2 = PositionwiseFeedForward([1, 1], reshape=True, use_bias=True)
"""remove positional embedding""" ###########
self.node_embedding1 = node_embedding1
# self.node_embedding2 = node_embedding2
self.encode1 = self_attention_simple(n_head, d_model, d_k, d_v, dropout_mul=0.4, dropout_pff=0.4,
diag_mask=diag_mask, bottle_neck=bottle_neck)
self.diag_mask_flag = diag_mask
self.layer_norm1 = nn.LayerNorm(d_model)
self.layer_norm2 = nn.LayerNorm(d_model)
self.mode = hypersagnn_mode
# self.layer_norm3 = nn.LayerNorm(d_model)
# self.layer_norm4 = nn.LayerNorm(d_model)
def get_node_embeddings(self, x, mode, return_recon=False):
# shape of x: (b, tuple)
sz_b, len_seq = x.shape
# print(torch.max(x), torch.min(x))
if mode == 1:
x, recon_loss = self.node_embedding1(x.view(-1))
else:
x, recon_loss = self.node_embedding1(x.view(-1))
if return_recon:
return x.view(sz_b, len_seq, -1), recon_loss
else:
return x.view(sz_b, len_seq, -1)
def get_embedding(self, x, y, crs_attn_mask1, crs_attn_mask2, slf_attn_mask1, slf_attn_mask2, non_pad_mask1,
non_pad_mask2, return_recon=False):
if return_recon:
x, recon_loss1 = self.get_node_embeddings(x, 1, return_recon)
y, recon_loss2 = self.get_node_embeddings(y, 2, return_recon)
else:
x = self.get_node_embeddings(x, 1, return_recon)
y = self.get_node_embeddings(y, 2, return_recon)
dynamic1, static1, output_attn = self.encode1(x, y, x, y, crs_attn_mask1, crs_attn_mask2,
slf_attn_mask1, slf_attn_mask2, non_pad_mask1,
non_pad_mask2)
if return_recon:
return dynamic1, static1, output_attn, recon_loss1, recon_loss2
else:
return dynamic1, static1, output_attn
def forward(self, x, y, mask=None, get_outlier=None, return_recon=False):
x = x.long()
# pdb.set_trace()
cr_attn_mask1 = get_attn_key_pad_mask(seq_k=y, seq_q=x)
slf_attn_mask1 = get_attn_key_pad_mask(seq_k=x, seq_q=x)
non_pad_mask1 = get_non_pad_mask(x)
cr_attn_mask2 = get_attn_key_pad_mask(seq_k=x, seq_q=y)
slf_attn_mask2 = get_attn_key_pad_mask(seq_k=y, seq_q=y)
non_pad_mask2 = get_non_pad_mask(y)
if return_recon:
dynamic1, static1,output_attn, recon_loss1, recon_loss2 = self.get_embedding(x, y,
cr_attn_mask1,
cr_attn_mask2,
slf_attn_mask1,
slf_attn_mask2,
non_pad_mask1,
non_pad_mask2,
return_recon)
else:
dynamic1, static1, output_attn = self.get_embedding(x, y, cr_attn_mask1, cr_attn_mask2,
slf_attn_mask1, slf_attn_mask2,
non_pad_mask1, non_pad_mask2,return_recon)
dynamic1 = self.layer_norm1(dynamic1)
static1 = self.layer_norm2(static1)
sz_b, len_seq, dim = dynamic1.shape
output = self.pff_classifier1((dynamic1 - static1) ** 2)
# output=self.pff_classifier2(output)
output = torch.sigmoid(output)
embedding_after_attn=output_attn
if get_outlier is not None:
k = get_outlier
outlier = ((1 - output) * non_pad_mask).topk(k, dim=1, largest=True, sorted=True)[1]
return outlier.view(-1, k)
mode = self.mode
non_pad_mask = non_pad_mask1
if mode == 'min':
output, _ = torch.max(
(1 - output) * non_pad_mask, dim=-2, keepdim=False)
output = 1 - output
elif mode == 'sum':
output = torch.sum(output * non_pad_mask, dim=-2, keepdim=False)
mask_sum = torch.sum(non_pad_mask, dim=-2, keepdim=False)
output /= mask_sum
elif mode == 'first':
output = output[:, 0, :]
if return_recon:
return output, None, embedding_after_attn
else:
return output, embedding_after_attn
| 16,055 | 39.341709 | 125 | py |
catsetmat | catsetmat-master/src/our_utils.py | import errno
import multiprocessing
import numpy as np
import os
import pickle
import time
import torch.nn as nn
import torch
import sys
from concurrent.futures import as_completed, ProcessPoolExecutor
from gensim.models import Word2Vec
from sklearn.preprocessing import StandardScaler
def get_home_path():
# return '/content/drive/My Drive/projects/textual_analysis_email/catsetmat'
# return "/content/drive/My Drive/textual_analysis_email/catsetmat"
# return '/home/govinds/repos/catsetmat'
# return "/content/drive/My Drive/repos/govind_swyam/catsetmat"
# return "/home2/e1-313-15477/govind/repos/catsetmat"
# return "/home/swyamsingh/repos/catsetmat"
return "C:\\cygwin64\\home\\Nidhi\\repos\\catsetmat"
sys.path.append(get_home_path())
from lib.hypersagnn.Modules import Wrap_Embedding
from lib.hypersagnn.random_walk_hyper import random_walk_hyper
from lib.hypersagnn.utils import walkpath2str
from src.our_modules import device
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python ≥ 2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def pad_zeros_np(x, max_size):
# Adding 1 as well to differentiate between padded zeros and others.
return np.concatenate((x + 1, np.zeros((max_size - x.shape[0],), dtype=int)))
def load_and_process_data(pickled_path):
data = pickle.load(open(pickled_path, 'rb'))
train_data, test_data, max_he_U, max_he_V, node_list_U, node_list_V = \
[data[x] for x in ["train_data", "test_data", "max_length_u",
"max_length_v", 'node_list_U', 'node_list_V']]
U_train_hes, V_train_hes, label_train = zip(*train_data)
U_test_hes, V_test_hes, label_test = zip(*test_data)
U_train_hes_tensor = []
V_train_hes_tensor = []
for i in range(len(U_train_hes)):
U_train_hes_tensor.append(torch.from_numpy(pad_zeros_np(U_train_hes[i], max_he_U)).long())
V_train_hes_tensor.append(torch.from_numpy(pad_zeros_np(V_train_hes[i], max_he_V)).long())
U_test_hes_tensor = []
V_test_hes_tensor = []
for i in range(len(U_test_hes)):
U_test_hes_tensor.append(torch.from_numpy(pad_zeros_np(U_test_hes[i], max_he_U)).long())
V_test_hes_tensor.append(torch.from_numpy(pad_zeros_np(V_test_hes[i], max_he_V)).long())
train_data = list(zip(U_train_hes_tensor, V_train_hes_tensor, label_train))
test_data = list(zip(U_test_hes_tensor, V_test_hes_tensor, label_test))
# Converting list of hyperedges to a set to remove redundancy
U_train_hes = np.array(list(map(lambda x: np.array(list(x)), set(map(frozenset, U_train_hes)))))
V_train_hes = np.array(list(map(lambda x: np.array(list(x)), set(map(frozenset, V_train_hes)))))
return train_data, test_data, U_train_hes, V_train_hes, node_list_U, node_list_V
def process_node_emb(A, node_list, args):
A = StandardScaler().fit_transform(A)
A = np.concatenate((np.zeros((1, A.shape[-1]), dtype='float32'), A), axis=0)
A = A.astype('float32')
A = torch.tensor(A).to(device)
# print(A.shape)
node_embedding = Wrap_Embedding(int(len(node_list) + 1), args.dimensions, scale_grad_by_freq=False, padding_idx=0,
sparse=False)
node_embedding.weight = nn.Parameter(A)
return node_embedding
def w2v_model(walks, emb_args, node_list, silent=False):
start = time.time()
if os.name != 'nt':
split_num = 20
pool = ProcessPoolExecutor(max_workers=split_num)
walks = np.array_split(walks, split_num)
if not silent:
print("Start turning path to strs")
process_list = []
for walk in walks:
process_list.append(pool.submit(walkpath2str, walk, silent))
results = []
for p in as_completed(process_list):
results += p.result()
pool.shutdown(wait=True)
walks = results
else:
walks = walkpath2str(walks, silent)
if not silent:
print(
"Finishing Loading and processing %.2f s" %
(time.time() - start))
print("Start Word2vec")
print("num cpu cores", multiprocessing.cpu_count())
w2v = Word2Vec(
walks,
size=emb_args.dimensions,
window=emb_args.window_size,
min_count=0,
sg=1,
iter=1,
workers=multiprocessing.cpu_count())
wv = w2v.wv
A = [wv[str(i)] for i in node_list]
A = np.array(A)
return A
def obtain_node_embeddings(args, node_list, hyperedges, data_name, set_name, split_id, base_path, silent=False):
walk_path = random_walk_hyper(args, node_list, hyperedges, data_name, set_name, split_id, base_path, silent)
walks = np.loadtxt(walk_path, delimiter=" ").astype('int')
A = w2v_model(walks, args, node_list, silent)
emb_base = os.path.join(base_path, 'walks/embeddings/{}'.format(
data_name))
mkdir_p(emb_base)
emb_path = os.path.join(emb_base, '{}_wv_{}__{}{}{}.npy'.format(
data_name, args.dimensions, data_name, split_id, set_name))
if not silent:
print('Saving embeddings to {}'.format(emb_path))
np.save(emb_path, A)
return A
def get_default_data_params(data_path=None):
if not data_path:
data_path = get_data_path()
data_params = {'raw_data_path': os.path.join(data_path, 'raw'),
'r_label_file': 'id_p_map.txt',
'u_label_file': 'id_a_map.txt',
'v_label_file': 'id_k_map.txt',
'r_u_list_file': 'p_a_list_train.txt',
'r_v_list_file': 'p_k_list_train.txt',
'processed_data_path': os.path.join(data_path, 'processed')}
return data_params
def get_data_path():
# return '/content/drive/My Drive/projects/textual_analysis_email/catsetmat/data'
# return '/home/govinds/repos/catsetmat/data'
# return '/content/drive/My Drive/textual_analysis_email/catsetmat/data'
# return "/content/drive/My Drive/repos/govind_swyam/catsetmat/data"
# return "/home2/e1-313-15477/govind/repos/catsetmat/data"
# return "/home/swyamsingh/repos/catsetmat/data"
return "C:\\cygwin64\\home\\Nidhi\\repos\\catsetmat\\data"
def main():
pass
if __name__ == '__main__':
main()
| 6,327 | 36.443787 | 118 | py |
catsetmat | catsetmat-master/src/experimenter.py | import os
import pickle
import torch
import sys
import torch.nn as nn
from sklearn.metrics import roc_auc_score, pairwise
from sklearn.utils import shuffle
from tqdm.autonotebook import tqdm
from src.link_predictor import predict_links, get_auc_scores
from src.hypersagnn_modules import Classifier as Classifier_hypersagnn
from src.our_modules import device, Classifier
from src.our_utils import obtain_node_embeddings, process_node_emb, \
get_home_path, load_and_process_data, w2v_model
from src.node2vec import *
sys.path.append(get_home_path())
from lib.fspool.main import EMB_LAYER
from src.graphconstructor import read_graph, read_graph_cross
def data_modify(data):
u_, v_, l_ = zip(*data)
npoints_u = [len(x[x > 0].tolist()) for x in u_]
npoints_v = [len(x[x > 0].tolist()) for x in v_]
# pdb.set_trace()
mask_u = torch.cat([(x > 0).float().view(1, x.shape[0], 1) for x in u_], dim=0)
mask_v = torch.cat([(x > 0).float().view(1, x.shape[0], 1) for x in v_], dim=0)
return u_, npoints_u, v_, npoints_v, mask_u, mask_v, l_
def hyp(data, max_node_u):
u_, v_, l_ = zip(*data)
v_new = []
u_new = []
for x in v_:
x = (x[x > 0] + max_node_u + 1)
x = (x - 1).tolist()
v_new.append(x)
for i in range(len(v_new)):
x = (u_[i][u_[i] > 0])
x = (x - 1).tolist()
u_new.append(x)
return u_new, v_new, l_
def hyp_hypersagnn(data, max_node_u):
u_, v_, l_ = zip(*data)
print(max_node_u)
v_new = []
u_new = []
for x in v_:
x = (x[x > 0] + max_node_u + 1)
x = (x - 1).tolist()
v_new.append(x)
for i in range(len(v_new)):
x = (u_[i][u_[i] > 0])
x = (x - 1).tolist()
x = x + v_new[i]
u_new.append(x)
return u_new, l_
def train(model, data, globaliter=0, model_name='catsetmat'):
globaliter += 1
model.train()
# FSPOOL:
if model_name == 'fspool':
U, n_points_U, V, n_points_V, mask_U, mask_V, l_ = data_modify(data)
U = torch.cat(U, dim=0).view(len(U), U[0].shape[0]).to(device)
V = torch.cat(V, dim=0).view(len(V), V[0].shape[0]).to(device)
gold = torch.Tensor(l_).view(-1, 1).to(device)
inputs = (U, V, torch.from_numpy(np.array((list(map(int, n_points_U))))).to(device),
torch.from_numpy(np.array(list(map(int, n_points_V)))).to(device), mask_U.to(device),
mask_V.to(device))
label = model(inputs)
loss = nn.BCEWithLogitsLoss()(label, gold).to(device)
# CATSETMAT:
if model_name.startswith('catsetmat'):
u_, v_, l_ = zip(*data)
xx = torch.cat(u_, dim=0).view(len(u_), u_[0].shape[0]).to(device)
yy = torch.cat(v_, dim=0).view(len(v_), v_[0].shape[0]).to(device)
output, weights = model(xx, yy)
loss = criterion(output, torch.from_numpy(np.array(l_)).float().to(device))
label = output.squeeze(-1)
# del xx,yy,weights
# torch.cuda.empty_cache()
# HYPERSAGNN:
if model_name.startswith('hypersagnn'):
u_, l_ = zip(*data)
xx = torch.cat(u_, dim=0).view(len(u_), u_[0].shape[0]).to(device)
output, weights = model(xx, xx)
loss = criterion(output, torch.from_numpy(np.array(l_)).float().to(device))
label = output.squeeze(-1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
auc = roc_auc_score(l_, label.cpu().detach().numpy())
return loss.item(), auc, None
def test(model, data, model_name='catsetmat'):
model.eval()
# FSPOOL:
if model_name == 'fspool':
U, n_points_U, V, n_points_V, mask_U, mask_V, l_ = data_modify(data)
U = torch.cat(U, dim=0).view(len(U), U[0].shape[0]).to(device)
V = torch.cat(V, dim=0).view(len(V), V[0].shape[0]).to(device)
inputs = (U, V, torch.from_numpy(np.array((list(map(int, n_points_U))))).to(device),
torch.from_numpy(np.array(list(map(int, n_points_V)))).to(device), mask_U.to(device),
mask_V.to(device))
gold = torch.Tensor(l_).view(-1, 1).to(device)
label = model(inputs)
loss = nn.BCEWithLogitsLoss()(label, gold).to(device)
# CATSETMAT:
if model_name.startswith('catsetmat'):
u_, v_, l_ = zip(*data)
xx = torch.cat(u_, dim=0).view(len(u_), u_[0].shape[0]).to(device)
yy = torch.cat(v_, dim=0).view(len(v_), v_[0].shape[0]).to(device)
output, weights = model(xx, yy)
loss = criterion(output, torch.from_numpy(np.array(l_)).float().to(device))
label = output.squeeze(-1)
# HYPERSAGNN:
if model_name.startswith('hypersagnn'):
u_, l_ = zip(*data)
xx = torch.cat(u_, dim=0).view(len(u_), u_[0].shape[0]).to(device)
output, weights = model(xx, xx)
loss = criterion(output, torch.from_numpy(np.array(l_)).float().to(device))
label = output.squeeze(-1)
auc = roc_auc_score(l_, label.cpu().detach().numpy())
return loss.item(), auc, None
def read_cache_node_embeddings(args, node_list_set, train_set, data_name, set_name, split_id, base_path, silent=True):
file_name = os.path.join(base_path, 'walks/embeddings/{}/{}_wv_{}__{}{}{}.npy'.format(
data_name, data_name, args.dimensions, data_name, split_id, set_name))
try:
if not silent:
print('Reading embeddings from cache ({})...'.format(file_name))
A = np.load(file_name, allow_pickle=True)
except FileNotFoundError:
print('Cache not found. Generating...')
A = obtain_node_embeddings(args, node_list_set, train_set, data_name, set_name, split_id, base_path,
silent=silent)
node_embedding = process_node_emb(A, node_list_set, args)
return node_embedding
def built_hyper_edges(train_data, test_data, max_node_u):
u_train, l_train = hyp_hypersagnn(train_data, max_node_u)
u_test, l_test = hyp_hypersagnn(test_data, max_node_u)
len_tr = max([len(x) for x in u_train])
len_t = max([len(x) for x in u_test])
max_ = max([len_tr, len_t])
U_ = []
U_T = []
for i in range(len(u_train)):
y = [x + 1 for x in u_train[i]]
y = [0] * (max_ - len(y)) + y
U_.append(torch.from_numpy(np.array(y)).long())
for i in range(len(u_test)):
y = [x + 1 for x in u_test[i]]
y = [0] * (max_ - len(y)) + y
U_T.append(torch.from_numpy(np.array(y)).long())
train_data = list(zip(U_, l_train))
test_data = list(zip(U_T, l_test))
return train_data, test_data, u_train, l_train
def perform_experiment(emb_args, home_path, data_path, data_name, split_id, result_path,
num_epochs, batch_size, model_save_split_id, model_name, lr):
global criterion, optimizer
pickled_path = os.path.join(data_path, 'processed', data_name, '{}.pkl'.format(split_id))
train_data, test_data, U_t, V_t, node_list_U, node_list_V = load_and_process_data(pickled_path)
base_path = home_path
# print(pickled_path)
# pdb.set_trace()
node_embedding_U = read_cache_node_embeddings(emb_args, node_list_U, U_t, data_name, 'U', split_id, base_path)
node_embedding_V = read_cache_node_embeddings(emb_args, node_list_V, V_t, data_name, 'V', split_id, base_path)
if model_name == 'n2v':
max_node_u = max(node_list_U)
u_train, v_train, l_train = hyp(train_data, max_node_u)
u_test, v_test, l_test = hyp(test_data, max_node_u)
node_list_v = [(x + max_node_u + 1) for x in node_list_V]
node_list = node_list_U + node_list_v
index = [idx for idx, val in enumerate(l_train) if val != 0]
U_train = [u_train[x] for x in index]
V_train = [v_train[x] for x in index]
# for only cross graph from bipartite hypergraph
g_train_c = read_graph_cross(node_list, U_train, V_train)
# for full graph form bipartite hypergraph
for i in range(len(U_train)):
U_train[i] += V_train[i]
g_train_f = read_graph(node_list, U_train)
g_n2v_c = Graph(g_train_c, 0, emb_args.p, emb_args.q)
g_n2v_f = Graph(g_train_f, 0, emb_args.p, emb_args.q)
g_n2v_c.preprocess_transition_probs()
g_n2v_f.preprocess_transition_probs()
walks_c = g_n2v_c.simulate_walks(emb_args.num_walks, emb_args.walk_length)
walks_f = g_n2v_f.simulate_walks(emb_args.num_walks, emb_args.walk_length)
A_C = np.array(w2v_model(walks_c, emb_args, node_list, True))
A_F = np.array(w2v_model(walks_f, emb_args, node_list, True))
result_c = pairwise.cosine_similarity(A_C[node_list_U], A_C[node_list_v], dense_output=True)
result_f = pairwise.cosine_similarity(A_F[node_list_U], A_F[node_list_v], dense_output=True)
auc_result_minc = []
auc_result_minf = []
auc_result_meanc = []
auc_result_meanf = []
for i in range(len(u_test)):
probf = []
probc = []
for j in u_test[i]:
for k in v_test[i]:
probc.append(result_c[j, k - max_node_u - 1])
probf.append(result_f[j, k - max_node_u - 1])
auc_result_minc.append(min(probc))
auc_result_meanc.append(sum(probc) / len(probc))
auc_result_minf.append(min(probf))
auc_result_meanf.append(sum(probf) / len(probf))
# t.set_description("AUC test:min_cross {} , mean_cross {} ,min_full {} ,mean_full {}".\
# format(round(roc_auc_score(l_test,auc_result_minc), 4),
# round(roc_auc_score(l_test,auc_result_meanc), 4),
# round(roc_auc_score(l_test,auc_result_minf), 4),
# round(roc_auc_score(l_test,auc_result_meanf), 4)))
auc = {"min_cross": round(roc_auc_score(l_test, auc_result_minc), 4),
"mean_cross": round(roc_auc_score(l_test, auc_result_meanc), 4),
"min_full": round(roc_auc_score(l_test, auc_result_minf), 4),
"mean_full": round(roc_auc_score(l_test, auc_result_meanf), 4)}
print('AUC', auc)
loss = None
model = [A_C, A_F]
elif model_name == 'lp':
train_scores_df, test_scores_df = predict_links(train_data, test_data, U_t, V_t, node_list_U, node_list_V)
train_auc_scores = get_auc_scores(train_scores_df)
test_auc_scores = get_auc_scores(test_scores_df)
auc = test_auc_scores
print('AUC', auc)
loss = None
model = [train_auc_scores, test_auc_scores]
else:
# FSPOOL:
if model_name == 'fspool':
U, n_points_U, V, n_points_V, mask_U, mask_V, l_ = data_modify(train_data)
hidden_dim = 128
latent_dim = emb_args.dimensions
model = EMB_LAYER(node_embedding_U, node_embedding_V, 0, latent_dim + 1,
latent_dim, hidden_dim,
set_size_U=max(n_points_U),
set_size_V=max(n_points_V),
skip=False, relaxed=False).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr, weight_decay=1E-6)
# CATSETMAT:
if model_name.startswith('catsetmat'):
if '-' not in model_name:
model_name = 'catsetmat-x'
latent_dim = emb_args.dimensions
# print("catset",latent_dim,lr)
model_type = model_name.split('-')[-1]
model = Classifier(n_head=8,
d_model=latent_dim,
d_k=int(latent_dim / 4) if latent_dim >= 4 else 1,
d_v=int(latent_dim / 4) if latent_dim >= 4 else 1,
node_embedding1=node_embedding_U,
node_embedding2=node_embedding_V,
diag_mask=False,
bottle_neck=latent_dim,
cross_attn_type=model_type).to(device).to(device)
criterion = nn.BCELoss().to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr, weight_decay=1e-6)
# HYPER-SAGNN:
if model_name.startswith('hypersagnn'):
if '-' not in model_name:
model_name = 'hypersagnn-sum'
hypersagnn_mode = model_name.split('-')[-1]
max_node_u = max(node_list_U)
node_list_v = [(x + max_node_u + 1) for x in node_list_V]
train_data, test_data, u_train, l_train = built_hyper_edges(train_data, test_data, max_node_u)
node_list = node_list_U + node_list_v
index = [idx for idx, val in enumerate(l_train) if val != 0]
U_train = [np.array(u_train[x], dtype=int) for x in index]
node_embedding = read_cache_node_embeddings(emb_args, node_list, np.array(U_train), data_name, 'hyp_U',
split_id,
base_path)
latent_dim = emb_args.dimensions
model = Classifier_hypersagnn(n_head=8,
d_model=latent_dim,
d_k=int(latent_dim / 4) if latent_dim >= 4 else 1,
d_v=int(latent_dim / 4) if latent_dim >= 4 else 1,
node_embedding1=node_embedding,
diag_mask=False,
bottle_neck=latent_dim,
hypersagnn_mode=hypersagnn_mode).to(device).to(device)
criterion = nn.BCELoss().to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr, weight_decay=1e-6)
# pytorch_total_params = sum(p.numel() for p in model.parameters())
# print(pytorch_total_params)
loss = []
auc = []
t = tqdm(range(num_epochs), 'Split id {} '.format(split_id))
for i in t:
train_data1 = shuffle(train_data)
break_condition = False
batch_losses = []
batch_aucs = []
j = 0
while not break_condition:
if j + batch_size < len(train_data1):
batch_data = train_data1[j:j + batch_size]
j += batch_size
else:
batch_data = train_data1[j:]
break_condition = True
batch_loss, batch_auc, weights = train(model, batch_data, model_name=model_name)
batch_losses.append(batch_loss)
batch_aucs.append(batch_auc)
train_loss = np.mean(batch_losses)
train_auc = np.mean(batch_aucs)
test_loss, test_auc, test_weights = test(model, test_data, model_name=model_name)
loss.append((train_loss, test_loss))
auc.append((train_auc, test_auc))
t.set_description("Split id {}; AUC train: {}, test: {}".format(split_id,
round(train_auc, 4),
round(test_auc, 4)))
t.refresh()
# print('({}/{})'.format(round(train_auc, 4), round(test_auc, 4)), end=' ')
# print("epoch {} :train loss {} and auc {}: test loss {} and auc {} : ".format(i, train_loss_, train_auc,
# test_loss_, test_auc))
Results = {"AUC": auc, "loss": loss}
pickle.dump(Results, open(os.path.join(result_path, '{}_{}.pkl'.format(model_name, split_id)), 'wb'))
# pickle.dump([], open(os.path.join(result_path,
# '{}_{}{}.pkl'.format(model_name,
# split_id,str(pytorch_total_params))), 'wb'))
if split_id == model_save_split_id:
torch.save(model, os.path.join(result_path, 'model_{}_{}.mdl'.format(model_name, split_id)))
return Results
| 16,242 | 45.144886 | 118 | py |
catsetmat | catsetmat-master/lib/fspool/main.py | import os, sys
import argparse
from datetime import datetime
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.multiprocessing as mp
import numpy as np
import torch.nn.functional as F
import torch.optim as optim
import scipy.optimize
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
import matplotlib.pyplot as plt
import pandas as pd
# print(os.path.dirname(os.path.abspath(__file__)))
# sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from .autoencoder.model import *
from .autoencoder import data, track
class FSEncoder(nn.Module):
def __init__(self, *, input_channels, output_channels, dim, **kwargs):
super().__init__()
self.conv = nn.Sequential(
nn.Conv1d(input_channels, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, dim, 1),
)
self.lin = nn.Sequential(
nn.Linear(dim, dim, 1),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels, 1),
)
self.pool = FSPool(dim, 20, relaxed=kwargs.get('relaxed', True))
self.relu = nn.ReLU(inplace=True)
def forward(self, x, n_points, *args):
x = self.conv(x)
x, perm = self.pool(x, n_points)
#x=nn.Dropout(p=0.2)(x)
x = self.lin(x)
return x, perm
class FSEncoder_set(nn.Module):
def __init__(self, dim, point_dim = 128):
super().__init__()
self.enc = nn.Sequential(
nn.Linear(input_channels * set_size, dim)
)
self.lin = nn.Sequential(
nn.Linear(dim, dim, 1),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels, 1),
)
self.pool = FSPool(dim, 20, relaxed=kwargs.get('relaxed', True))
def forward(self, x, n_points, *args):
x = x.view(x.size(0), -1)
x = self.enc(x)
x, perm = self.pool(x, n_points)
x = self.lin(x)
return x
class SumEncoder(nn.Module):
def __init__(self, *, input_channels, output_channels, dim, **kwargs):
super().__init__()
self.conv = nn.Sequential(
nn.Conv1d(input_channels, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, dim, 1),
)
self.lin = nn.Sequential(
nn.Linear(dim, dim, 1),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels, 1),
)
def forward(self, x, n_points, *args):
x = self.conv(x)
x = x.sum(2)
x = self.lin(x)
return x,x.clone()
class MaxEncoder(nn.Module):
def __init__(self, *, input_channels, output_channels, dim, **kwargs):
super().__init__()
self.conv = nn.Sequential(
nn.Conv1d(input_channels, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, dim, 1),
)
self.lin = nn.Sequential(
nn.Linear(dim, dim, 1),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels, 1),
)
def forward(self, x, n_points, *args):
x = self.conv(x)
x = x.max(2)[0]
x = self.lin(x)
return x,x.clone()
class MeanEncoder(nn.Module):
def __init__(self, *, input_channels, output_channels, dim, **kwargs):
super().__init__()
self.conv = nn.Sequential(
nn.Conv1d(input_channels, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, dim, 1),
)
self.lin = nn.Sequential(
nn.Linear(dim, dim, 1),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels, 1),
)
def forward(self, x, n_points, *args):
x = self.conv(x)
x = x.sum(2) / n_points.unsqueeze(1).float()
x = self.lin(x)
return x,x.clone()
class BLP(nn.Module):
def __init__(self, *, input_channels, output_channels, dim, **kwargs):
super().__init__()
FSE=MeanEncoder
self.enc_U = FSE(input_channels = input_channels,
output_channels = output_channels,
set_size = kwargs['set_size_U'],
dim = dim,
**kwargs)
self.enc_V = FSE(input_channels = input_channels,
output_channels = output_channels,
set_size = kwargs['set_size_V'],
dim = dim,
**kwargs)
self.classifier = nn.Linear(2*output_channels, 1)
def forward(self, sample, *args):
U, V, n_points_U, n_points_V = sample
x_U, _ = self.enc_U(U, n_points_U)
x_V, _ = self.enc_V(V, n_points_V)
x = self.classifier(torch.cat([x_U, x_V], dim=1))
return x
class BLP_(nn.Module):
def __init__(self, *, input_channels, output_channels, dim, **kwargs):
super().__init__()
FSE=FSEncoder
self.enc_U = FSE(input_channels = input_channels,
output_channels = output_channels,
set_size = kwargs['set_size_U'],
dim = dim,
**kwargs)
# self.enc_V = FSE(input_channels = input_channels,
# output_channels = output_channels,
# set_size = kwargs['set_size_V'],
# dim = dim,
# **kwargs)
self.classifier = nn.Linear(output_channels, 1)
def forward(self, sample, *args):
U, V, n_points_U, n_points_V = sample
# pdb.set_trace()
U_=torch.cat([U,V],dim=2)
n_points=n_points_U+n_points_V
x_U, _ = self.enc_U(U_, n_points)
# x_V, _ = self.enc_V(V, n_points_V)
x = self.classifier(x_U)
return x
class EMB_LAYER(nn.Module):
def __init__(self,word_map1,word_map2,padd,input_channels, output_channels, dim, set_size_U,set_size_V,**kwargs):
super().__init__()
kwargs['set_size_U'] = set_size_U
kwargs['set_size_V'] = set_size_V
# self.embedding1 = nn.Embedding.from_pretrained(embeddings=word_map1,freeze=False,padding_idx=padd)
# self.embedding2 = nn.Embedding.from_pretrained(embeddings=word_map2,freeze=False,padding_idx=padd)
self.embedding1=word_map1
self.embedding2=word_map2
self.out_ = BLP_(input_channels = input_channels,
output_channels = output_channels,
dim = dim,
**kwargs)
self.c_U=set_size_U
self.c_V=set_size_V
self.padd=padd
def forward(self, sample, *args):
# pdb.set_trace()
U, V, n_points_U, n_points_V,mask_U,mask_V = sample
U_=self.embedding1(U)
V_=self.embedding2(V)
#print(U_.size())
"""mask needed"""
"""U_,V_ are tuple types"""
U_=torch.cat([U_[0],mask_U],dim=2)
V_=torch.cat([V_[0],mask_V],dim=2)
sample=(torch.transpose(U_,1,2),torch.transpose(V_,1,2),n_points_U,n_points_V)
x=self.out_(sample)
return x | 7,346 | 31.082969 | 117 | py |
catsetmat | catsetmat-master/lib/fspool/temp.py | '''
# home_path = '/content/drive/My Drive/projects/textual_analysis_email/'
home_path = '/home/jupyter/project/textual_analysis_email'
# sample_path = os.path.join(home_path, 'sample_data')
data_params = {'home_path': home_path,
'r_label_file': 'id_p_map.txt',
'u_label_file': 'id_a_map.txt',
'v_label_file': 'id_k_map.txt',
'r_u_list_file': 'p_a_list_train.txt',
'r_v_list_file': 'p_k_list_train.txt',
'emb_pkl_file': 'nodevectors.pkl'}
# methods = [commonneigh, admic_adar, jaccard]
# method_name_map = dict(zip(methods, ['CN', 'AA', 'JC']))
num_iter = 2
pos_A, pos_B = load_bipartite_hypergraph(data_params)
G, obs_pos, unobs_data, V_offset = data_process(pos_A, pos_B, neg_pos_ratio = 1, unobs_ratio=0.5)
max_id=max(list(G.nodes))
max_id
embedding_map=model(G,P=0.25,Q=0.25,WALK_LENGTH=100,WINDOW_SIZE=10)
emb_map=[]
for i in range(0,max_id):
emb_map.append(embedding_map.get_vector(str(i)))
emb_map.append(np.zeros(128,dtype='float'))
def mapping(data,max_id):
pairs, labels = zip(*data)
U, V = zip(*pairs)
n_points_U = np.array([len(x) for x in U])
n_points_V = np.array([len(x) for x in V])
cardinality_U = max(n_points_U)
cardinality_V = max(n_points_V)
U = [x + [max_id]*(cardinality_U - len(x)) for x in U]
V = [x + [max_id]*(cardinality_V - len(x)) for x in V]
return U, V, n_points_U, n_points_V, cardinality_U, cardinality_V, labels
weight=torch.from_numpy(np.matrix(emb_map)).type(torch.FloatTensor)
# weight=torch.randn((weight.size(0),weight.size(1))).type(torch.FloatTensor)
# weight[weight.size(0)-1][:]=0
# weight[weight.size(0)-1]
hidden_dim = 256
latent_dim = 32
U, V, n_points_U, n_points_V, cardinality_U, cardinality_V, labels = mapping(unobs_data, max_id)
unobs_data=list(zip(U, V, n_points_U, n_points_V, labels))
train_data,test_data=train_test_split(unobs_data,test_size=0.2)
U, V, n_points_U, n_points_V, labels=zip(*train_data)
U=torch.from_numpy(np.array(U)).cuda()
V=torch.from_numpy(np.array(V)).cuda()
tU, tV, tn_points_U, tn_points_V, tlabels=zip(*test_data)
tU=torch.from_numpy(np.array(tU)).cuda()
tV=torch.from_numpy(np.array(tV)).cuda()
# tpoints_U, tpoints_V, tn_U, tn_V, tc_U, tc_V, t_labels = mapping(embedding_map,test_data)
# points_U = pad_zeros(torch.Tensor(points_U), max([c_U,tc_U]))
# points_V = pad_zeros(torch.Tensor(points_V), max([c_V,tc_V]))
# tpoints_U = pad_zeros(torch.Tensor(tpoints_U), max([c_U,tc_U]))
# tpoints_V = pad_zeros(torch.Tensor(tpoints_V), max([c_V,tc_V]))
# net = BLP(input_channels = 129,
# output_channels = latent_dim,
# set_size_U = cardinality_U,
# set_size_V = cardinality_V,
# dim = hidden_dim,
# skip = False,
# relaxed = False)
net=EMB_LAYER(weight,max_id,129,
latent_dim,hidden_dim,
set_size_U=int(cardinality_U),
set_size_V = int(cardinality_V),
skip=False,relaxed=False).cuda()
# # optimizer = torch.optim.AdamW(net.parameters(), lr=0.0001, weight_decay=1E-6)
# optimizer = torch.optim.Adamax(net.parameters(), lr=0.001, weight_decay=1E-6)
# optimizer = torch.optim.Adagrad(net.parameters(), lr=0.0001, weight_decay=1E-6)
optimizer = torch.optim.Adam(net.parameters(), lr=0.0001, weight_decay=1E-6)
mask_U = torch.from_numpy(np.array([[1]*n_points_U[i] + [0]*(cardinality_U-n_points_U[i]) for i in range(len(U))])).type(torch.FloatTensor).view(U.shape[0],cardinality_U,1).cuda()
mask_V= torch.from_numpy(np.array([[1]*n_points_V[i] + [0]*(cardinality_V-n_points_V[i]) for i in range(len(V))])).type(torch.FloatTensor).view(V.shape[0],cardinality_V,1).cuda()
tmask_U = torch.from_numpy(np.array([[1]*tn_points_U[i] + [0]*(cardinality_U-tn_points_U[i]) for i in range(len(tU))])).type(torch.FloatTensor).view(tU.shape[0],cardinality_U,1).cuda()
tmask_V= torch.from_numpy(np.array([[1]*tn_points_V[i] + [0]*(cardinality_V-tn_points_V[i]) for i in range(len(tV))])).type(torch.FloatTensor).view(tV.shape[0],cardinality_V,1).cuda()
from tqdm import tqdm_notebook
import pickle
losses = []
test_losses = []
gold = torch.Tensor(labels).view(-1, 1).cuda()
tgold = torch.Tensor(tlabels).view(-1, 1).cuda()
n_epoch =300
inputs = (U,V, torch.from_numpy(np.array((list(map(int,n_points_U))))).cuda(), torch.from_numpy(np.array(list(map(int,n_points_V)))).cuda(),mask_U,mask_V)
t_input =(tU, tV, torch.from_numpy(np.array(list(map(int,tn_points_U)))).cuda(), torch.from_numpy(np.array(list(map(int, tn_points_V)))).cuda(),tmask_U,tmask_V)
aucs=[]
for _ in tqdm_notebook(range(n_epoch)):
net.train()
out = net(inputs)
torch.save(net.state_dict, 'fs_pool_authors_keywords.model')
loss = nn.BCEWithLogitsLoss()(out, gold)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print("after",_)
losses.append(loss.detach().item())
net.eval()
test_out = net(t_input)
#aucs.append((roc_auc_score(gold,out),roc_auc_score(tgold,test_out)))
test_loss = nn.BCEWithLogitsLoss()(test_out, tgold)
# print("before",_)
test_losses.append(test_loss.detach().item())
aucs.append((_,roc_auc_score(gold.clone().cpu().detach().numpy(),nn.Sigmoid()(out).cpu().detach().numpy()),
roc_auc_score(tgold.clone().cpu().detach().numpy(),nn.Sigmoid()(test_out).cpu().detach().numpy())))
pickle.dump({'train_losses': losses, 'test_losses': test_losses}, open('fs_pool_authors_keywords.losses.pkl', 'wb'))
import pickle
loss_dict = pickle.load(open('fs_pool_authors_keywords.losses.pkl', 'rb'))
model_dict = pickle.load(open('fs_pool_authors_keywords.model', 'rb'))
losses = loss_dict['train_losses']
test_losses = loss_dict['test_losses']
from matplotlib import pyplot as plt
n_epoch1 = len(losses)
plt.plot(range(n_epoch1), losses, label='train')
plt.plot(range(n_epoch1), test_losses, label='test')
plt.grid()
plt.legend()
plt.show()
# mse, cha, acc = torch.FloatTensor([-1, -1, -1])
# if not args.classify:
# mse = (pred - points).pow(2).mean()
# cha = chamfer_loss(pred, points)
# if args.loss == 'direct':
# loss = mse
# elif args.loss == 'chamfer':
# loss = cha
# elif args.loss == 'hungarian':
# loss = hungarian_loss(pred, points)
# else:
# raise NotImplementedError
# else:
# loss = F.cross_entropy(pred, labels)
# acc = (pred.max(dim=1)[1] == labels).float().mean()
# if train:
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# tracked_mse = tracker.update('{}_mse'.format(prefix), mse.item())
# tracked_cha = tracker.update('{}_cha'.format(prefix), cha.item())
# tracked_loss = tracker.update('{}_loss'.format(prefix), loss.item())
# tracked_acc = tracker.update('{}_acc'.format(prefix), acc.item())
# fmt = '{:.5f}'.format
# loader.set_postfix(
# mse=fmt(tracked_mse),
# cha=fmt(tracked_cha),
# loss=fmt(tracked_loss),
# acc=fmt(tracked_acc),
# )
# if args.show and not train:
# #scatter(input_points, n_points, marker='o', transpose=args.mnist)
# scatter(pred, n_points, marker='x', transpose=args.mnist)
# plt.axes().set_aspect('equal', 'datalim')
# plt.show()
'''
import argparse
from time import sleep
import pdb
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
import torch
import sys
import torch.nn as nn
from sklearn.metrics import roc_auc_score
from sklearn.utils import shuffle
from tqdm.autonotebook import tqdm
# from src.our_modules import device, Classifier
from src.our_utils import obtain_node_embeddings, process_node_emb, get_home_path, mkdir_p, load_and_process_data, \
get_data_path
from src.results_analyzer import plot_results
from src import train_test_sampler
from src import embedding_storer
sys.path.append(get_home_path())
from lib.hypersagnn.main import parse_args as parse_embedding_args
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def set_torch_environment():
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
def parse_args():
parser = argparse.ArgumentParser(description="CATSETMAT: Main module")
parser.add_argument('--data_name', type=str, default='sample_mag_acm')
parser.add_argument('--num_splits', type=int, default=5,
help='Number of train-test-splits / negative-samplings. Default is 15.')
parser.add_argument('--start_split', type=int, default=0,
help='Start id of splits; splits go from start_split to start_split+num_splits. Default is 0.')
parser.add_argument('--num_epochs', type=int, default=200,
help='Number of epochs. Default is 200.')
parser.add_argument('--batch_size', type=int, default=300,
help='Batch size. Default is 100.')
parser.add_argument('--model_save_split_id', type=int, default=0,
help='Split id for which model is to be saved. Default is 0.')
args = parser.parse_args('')
return args
def process_args(args):
data_name = args.data_name
num_splits = args.num_splits
start_split = args.start_split
splits = range(start_split, start_split + num_splits)
num_epochs = args.num_epochs
batch_size = args.batch_size
return data_name, splits, num_epochs, batch_size, args.model_save_split_id
def data_modify(data):
u_, v_, l_ = zip(*data)
npoints_u=[len(x[x>0].tolist())for x in u_]
npoints_v=[len(x[x>0].tolist())for x in v_]
# pdb.set_trace()
mask_u=torch.cat([(x>0).float().view(1,x.shape[0],1) for x in u_ ],dim=0)
mask_v=torch.cat([(x>0).float().view(1,x.shape[0],1) for x in v_ ],dim=0)
return u_,npoints_u,v_,npoints_v,mask_u,mask_v,l_
def train(model, data, globaliter=0):
globaliter += 1
model.train()
U,n_points_U,V,n_points_V,mask_U,mask_V,l_=data_modify(data)
U=torch.cat(U,dim=0).view(len(U),U[0].shape[0]).to(device)
V=torch.cat(V,dim=0).view(len(V),V[0].shape[0]).to(device)
gold = torch.Tensor(l_).view(-1, 1).to(device)
inputs = (U,V, torch.from_numpy(np.array((list(map(int,n_points_U))))).to(device), torch.from_numpy(np.array(list(map(int,n_points_V)))).to(device),mask_U.to(device),mask_V.to(device))
# u_, v_, l_ = zip(*data)
# xx = torch.cat(u_, dim=0).view(len(u_), u_[0].shape[0]).to(device)
# yy = torch.cat(v_, dim=0).view(len(v_), v_[0].shape[0]).to(device)
# pdb.set_trace()
out = model(inputs)
# torch.save(net.state_dict, 'fs_pool_authors_keywords.model')
loss = nn.BCEWithLogitsLoss()(out, gold).to(device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# output, weights = model(xx, yy)
# loss = criterion(output, torch.from_numpy(np.array(l_)).float().to(device))
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# label = output.squeeze(-1)
# # pdb.set_trace()
# del xx,yy,weights
# torch.cuda.empty_cache()
auc = roc_auc_score(l_ , out.cpu().detach().numpy())
return loss.item(), auc,None
def test(model, data):
model.eval()
# u_, v_, l_ = zip(*data)
U,n_points_U,V,n_points_V,mask_U,mask_V,l_=data_modify(data)
U=torch.cat(U,dim=0).view(len(U),U[0].shape[0]).to(device)
V=torch.cat(V,dim=0).view(len(V),V[0].shape[0]).to(device)
inputs = (U,V, torch.from_numpy(np.array((list(map(int,n_points_U))))).to(device), torch.from_numpy(np.array(list(map(int,n_points_V)))).to(device),mask_U.to(device),mask_V.to(device))
gold = torch.Tensor(l_).view(-1, 1).to(device)
# inputs = (U,V, torch.from_numpy(np.array((list(map(int,n_points_U))))), torch.from_numpy(np.array(list(map(int,n_points_V)))).cuda(),mask_U.cuda(),mask_V.cuda())
# u_, v_, l_ = zip(*data)
# xx = torch.cat(u_, dim=0).view(len(u_), u_[0].shape[0]).to(device)
# yy = torch.cat(v_, dim=0).view(len(v_), v_[0].shape[0]).to(device)
out = model(inputs)
# torch.save(net.state_dict, 'fs_pool_authors_keywords.model')
loss = nn.BCEWithLogitsLoss()(out, gold).to(device)
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# output, weights = model(xx, yy)
# loss = criterion(output, torch.from_numpy(np.array(l_)).float().to(device))
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# label = output.squeeze(-1)
# # pdb.set_trace()
# del xx,yy,weights
# torch.cuda.empty_cache()
auc = roc_auc_score(l_ , out.cpu().detach().numpy())
return loss.item(), auc, None
def read_cache_node_embeddings(args, node_list_set, train_set, data_name, set_name, split_id, base_path, silent=True):
file_name = os.path.join(base_path, 'walks/embeddings/{}/{}_wv_{}__{}{}{}.npy'.format(
data_name, data_name, args.dimensions, data_name, split_id, set_name))
try:
if not silent:
print('Reading embeddings from cache ({})...'.format(file_name))
A = np.load(file_name, allow_pickle=True)
except FileNotFoundError:
print('Cache not found. Generating...')
A = obtain_node_embeddings(args, node_list_set, train_set, data_name, set_name, split_id, base_path, silent=silent)
node_embedding = process_node_emb(A, node_list_set, args)
return node_embedding
def perform_experiment(emb_args, home_path, data_path, data_name, split_id, result_path, num_epochs, batch_size, model_save_split_id):
global criterion, optimizer
pickled_path = os.path.join(data_path, 'processed', data_name, '{}.pkl'.format(split_id))
train_data, test_data, U_t, V_t, node_list_U, node_list_V = load_and_process_data(pickled_path)
base_path = home_path
U,n_points_U,V,n_points_V,mask_U,mask_V,l_=data_modify(train_data)
node_embedding_U = read_cache_node_embeddings(emb_args, node_list_U, U_t, data_name, 'U', split_id, base_path)
node_embedding_V = read_cache_node_embeddings(emb_args, node_list_V, V_t, data_name, 'V', split_id, base_path)
# pdb.set_trace()
hidden_dim = 128
# latent_dim = 64
latent_dim = emb_args.dimensions
model = EMB_LAYER(node_embedding_U,node_embedding_V,0,latent_dim+1,
latent_dim,hidden_dim,
set_size_U=max(n_points_U),
set_size_V = max(n_points_V),
skip=False,relaxed=False).to(device)
# print(model)
pytorch_total_params = sum(p.numel() for p in model.parameters())
print(pytorch_total_params)
# criterion = nn.BCELoss().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, weight_decay=1E-6)
# pytorch_total_params = sum(p.numel() for p in model.parameters())
# print(pytorch_total_params)
loss = []
auc = []
t = tqdm(range(num_epochs), 'AUC')
for i in t:
train_data1 = shuffle(train_data)
break_condition = False
batch_losses = []
batch_aucs = []
j = 0
while not break_condition:
if j + batch_size < len(train_data1):
batch_data = train_data1[j:j + batch_size]
j += batch_size
else:
batch_data = train_data1[j:]
break_condition = True
batch_loss, batch_auc, weights = train(model, batch_data)
batch_losses.append(batch_loss)
batch_aucs.append(batch_auc)
train_loss = np.mean(batch_losses)
train_auc = np.mean(batch_aucs)
test_loss, test_auc, test_weights = test(model, test_data)
loss.append((train_loss, test_loss))
auc.append((train_auc, test_auc))
t.set_description("AUC train: {}, test: {}".format(round(train_auc, 4), round(test_auc, 4)))
t.refresh()
# print('({}/{})'.format(round(train_auc, 4), round(test_auc, 4)), end=' ')
# print("epoch {} :train loss {} and auc {}: test loss {} and auc {} : ".format(i, train_loss_, train_auc,
# test_loss_, test_auc))
Results = {"AUC": auc, "loss": loss}
pickle.dump(Results, open(os.path.join(result_path, '{}.pkl'.format(split_id)), 'wb'))
if split_id == model_save_split_id:
torch.save(model, os.path.join(result_path, 'model_{}.mdl'.format(split_id)))
return model
| 16,357 | 35.759551 | 189 | py |
catsetmat | catsetmat-master/lib/fspool/autoencoder/fspool.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class FSPool(nn.Module):
"""
Featurewise sort pooling. From:
FSPool: Learning Set Representations with Featurewise Sort Pooling.
"""
def __init__(self, in_channels, n_pieces, relaxed=False):
"""
in_channels: Number of channels in input
n_pieces: Number of pieces in piecewise linear
relaxed: Use sorting networks relaxation instead of traditional sorting
"""
super().__init__()
self.n_pieces = n_pieces
self.weight = nn.Parameter(torch.zeros(in_channels, n_pieces + 1))
self.relaxed = relaxed
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.weight)
def forward(self, x, n=None):
""" FSPool
x: FloatTensor of shape (batch_size, in_channels, set size).
This should contain the features of the elements in the set.
Variable set sizes should be padded to the maximum set size in the batch with 0s.
n: LongTensor of shape (batch_size).
This tensor contains the sizes of each set in the batch.
If not specified, assumes that every set has the same size of x.size(2).
Note that n.max() should never be greater than x.size(2), i.e. the specified set size in the
n tensor must not be greater than the number of elements stored in the x tensor.
Returns: pooled input x, used permutation matrix perm
"""
assert x.size(1) == self.weight.size(0), 'incorrect number of input channels in weight'
# can call withtout length tensor, uses same length for all sets in the batch
if n is None:
n = x.new(x.size(0)).fill_(x.size(2)).long()
# create tensor of ratios $r$
sizes, mask = fill_sizes(n, x)
mask = mask.expand_as(x)
# turn continuous into concrete weights
weight = self.determine_weight(sizes)
# make sure that fill value isn't affecting sort result
# sort is descending, so put unreasonably low value in places to be masked away
x = x + (1 - mask).float() * -99999
if self.relaxed:
x, perm = cont_sort(x, temp=self.relaxed)
else:
x, perm = x.sort(dim=2, descending=True)
x = (x * weight * mask.float()).sum(dim=2)
return x, perm
def forward_transpose(self, x, perm, n=None):
""" FSUnpool
x: FloatTensor of shape (batch_size, in_channels)
perm: Permutation matrix returned by forward function.
n: LongTensor fo shape (batch_size)
"""
if n is None:
n = x.new(x.size(0)).fill_(perm.size(2)).long()
sizes, mask = fill_sizes(n)
mask = mask.expand(mask.size(0), x.size(1), mask.size(2))
weight = self.determine_weight(sizes)
x = x.unsqueeze(2) * weight * mask.float()
if self.relaxed:
x, _ = cont_sort(x, perm)
else:
x = x.scatter(2, perm, x)
return x, mask
def determine_weight(self, sizes):
"""
Piecewise linear function. Evaluates f at the ratios in sizes.
This should be a faster implementation than doing the sum over max terms, since we know that most terms in it are 0.
"""
# share same sequence length within each sample, so copy weighht across batch dim
weight = self.weight.unsqueeze(0)
weight = weight.expand(sizes.size(0), weight.size(1), weight.size(2))
# linspace [0, 1] -> linspace [0, n_pieces]
index = self.n_pieces * sizes
index = index.unsqueeze(1)
index = index.expand(index.size(0), weight.size(1), index.size(2))
# points in the weight vector to the left and right
idx = index.long()
frac = index.frac()
left = weight.gather(2, idx)
right = weight.gather(2, (idx + 1).clamp(max=self.n_pieces))
# interpolate between left and right point
return (1 - frac) * left + frac * right
def fill_sizes(sizes, x=None):
"""
sizes is a LongTensor of size [batch_size], containing the set sizes.
Each set size n is turned into [0/(n-1), 1/(n-1), ..., (n-2)/(n-1), 1, 0, 0, ..., 0, 0].
These are the ratios r at which f is evaluated at.
The 0s at the end are there for padding to the largest n in the batch.
If the input set x is passed in, it guarantees that the mask is the correct size even when sizes.max()
is less than x.size(), which can be a case if there is at least one padding element in each set in the batch.
"""
if x is not None:
max_size = x.size(2)
else:
max_size = sizes.max()
size_tensor = sizes.new(sizes.size(0), max_size).float().fill_(-1)
size_tensor = torch.arange(end=max_size, device=sizes.device, dtype=torch.float32)
size_tensor = size_tensor.unsqueeze(0) / (sizes.float() - 1).clamp(min=1).unsqueeze(1)
mask = size_tensor <= 1
mask = mask.unsqueeze(1)
return size_tensor.clamp(max=1), mask.float()
def deterministic_sort(s, tau):
"""
"Stochastic Optimization of Sorting Networks via Continuous Relaxations" https://openreview.net/forum?id=H1eSS3CcKX
Aditya Grover, Eric Wang, Aaron Zweig, Stefano Ermon
s: input elements to be sorted. Shape: batch_size x n x 1
tau: temperature for relaxation. Scalar.
"""
n = s.size()[1]
one = torch.ones((n, 1), dtype = torch.float32, device=s.device)
A_s = torch.abs(s - s.permute(0, 2, 1))
B = torch.matmul(A_s, torch.matmul(one, one.transpose(0, 1)))
scaling = (n + 1 - 2 * (torch.arange(n, device=s.device) + 1)).type(torch.float32)
C = torch.matmul(s, scaling.unsqueeze(0))
P_max = (C - B).permute(0, 2, 1)
sm = torch.nn.Softmax(-1)
P_hat = sm(P_max / tau)
return P_hat
def cont_sort(x, perm=None, temp=1):
""" Helper function that calls deterministic_sort with the right shape.
Since it assumes a shape of (batch_size, n, 1) while the input x is of shape (batch_size, channels, n),
we can get this to the right shape by merging the first two dimensions.
If an existing perm is passed in, we compute the "inverse" (transpose of perm) and just use that to unsort x.
"""
original_size = x.size()
x = x.view(-1, x.size(2), 1)
if perm is None:
perm = deterministic_sort(x, temp)
else:
perm = perm.transpose(1, 2)
x = perm.matmul(x)
x = x.view(original_size)
return x, perm
if __name__ == '__main__':
pool = FSort(2, 1)
x = torch.arange(0, 2*3*4).view(3, 2, 4).float()
print('x', x)
y, perm = pool(x, torch.LongTensor([2,3,4]))
print('perm')
print(perm)
print('result')
print(y)
| 6,790 | 36.313187 | 128 | py |
catsetmat | catsetmat-master/lib/fspool/autoencoder/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import torchvision
from .fspool import FSPool, cont_sort
class SAE(nn.Module):
def __init__(self, encoder, decoder, latent_dim, latent_dim_encoder=None, encoder_args={}, decoder_args={}, classify=False, input_channels=2):
super().__init__()
channels = input_channels
latent_dim_encoder = latent_dim_encoder or latent_dim
self.encoder = encoder(input_channels=channels, output_channels=latent_dim_encoder, **encoder_args)
self.decoder = decoder(input_channels=latent_dim, output_channels=channels, **decoder_args)
if classify:
self.classifier = nn.Sequential(
nn.Linear(latent_dim, latent_dim),
nn.ReLU(),
nn.Linear(latent_dim, 10),
)
else:
self.classifier = None
for m in self.modules():
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
init.xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, n_points):
# x :: (n, c, set_size)
x_size = x.size()
latent = self.encoder(x, n_points)
if not isinstance(latent, tuple):
latent = (latent,)
self.x = latent
if self.classifier is None:
reconstruction = self.decoder(*latent, n_points)
return reconstruction.view(x_size)
else:
return self.classifier(latent[0])
############
# Encoders #
############
class LinearEncoder(nn.Module):
def __init__(self, *, input_channels, output_channels, set_size, **kwargs):
super().__init__()
self.lin = nn.Linear(input_channels * set_size, output_channels)
def forward(self, x, *args):
x = x.view(x.size(0), -1)
return self.lin(x)
class MLPEncoder(nn.Module):
def __init__(self, *, input_channels, output_channels, dim, set_size, **kwargs):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_channels * set_size, dim),
nn.ReLU(inplace=True),
nn.Linear(dim, dim),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels),
)
def forward(self, x, *args):
x = x.view(x.size(0), -1)
return self.model(x)
class FSEncoder(nn.Module):
def __init__(self, *, input_channels, output_channels, dim, **kwargs):
super().__init__()
self.conv = nn.Sequential(
nn.Conv1d(input_channels, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, dim, 1),
)
self.lin = nn.Sequential(
nn.Linear(dim, dim, 1),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels, 1),
)
self.pool = FSPool(dim, 20, relaxed=kwargs.get('relaxed', True))
self.relu = nn.ReLU(inplace=True)
def forward(self, x, n_points, *args):
x = self.conv(x)
x, perm = self.pool(x, n_points)
x = self.lin(x)
return x, perm
class FSEncoder_set(nn.Module):
def __init__(self, dim, point_dim = 128):
super().__init__()
self.enc = nn.Sequential(
nn.Linear(input_channels * set_size, dim)
)
self.lin = nn.Sequential(
nn.Linear(dim, dim, 1),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels, 1),
)
self.pool = FSPool(dim, 20, relaxed=kwargs.get('relaxed', True))
def forward(self, x, n_points, *args):
x = x.view(x.size(0), -1)
x = self.enc(x)
x, perm = self.pool(x, n_points)
x = self.lin(x)
return x
class SumEncoder(nn.Module):
def __init__(self, *, input_channels, output_channels, dim, **kwargs):
super().__init__()
self.conv = nn.Sequential(
nn.Conv1d(input_channels, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, dim, 1),
)
self.lin = nn.Sequential(
nn.Linear(dim, dim, 1),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels, 1),
)
def forward(self, x, n_points, *args):
x = self.conv(x)
x = x.sum(2)
x = self.lin(x)
return x
class MaxEncoder(nn.Module):
def __init__(self, *, input_channels, output_channels, dim, **kwargs):
super().__init__()
self.conv = nn.Sequential(
nn.Conv1d(input_channels, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, dim, 1),
)
self.lin = nn.Sequential(
nn.Linear(dim, dim, 1),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels, 1),
)
def forward(self, x, n_points, *args):
x = self.conv(x)
x = x.max(2)[0]
x = self.lin(x)
return x
class MeanEncoder(nn.Module):
def __init__(self, *, input_channels, output_channels, dim, **kwargs):
super().__init__()
self.conv = nn.Sequential(
nn.Conv1d(input_channels, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, dim, 1),
)
self.lin = nn.Sequential(
nn.Linear(dim, dim, 1),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels, 1),
)
def forward(self, x, n_points, *args):
x = self.conv(x)
x = x.sum(2) / n_points.unsqueeze(1).float()
x = self.lin(x)
return x
############
# Decoders #
############
class LinearDecoder(nn.Module):
def __init__(self, *, input_channels, output_channels, set_size, **kwargs):
super().__init__()
self.lin = nn.Linear(input_channels, output_channels * set_size)
def forward(self, x, *args):
x = x.view(x.size(0), -1)
return self.lin(x)
class MLPDecoder(nn.Module):
def __init__(self, *, input_channels, output_channels, set_size, dim, **kwargs):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_channels, dim),
nn.ReLU(inplace=True),
nn.Linear(dim, dim),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels * set_size),
)
def forward(self, x, *args):
x = x.view(x.size(0), -1)
return self.model(x)
class FSDecoder(nn.Module):
def __init__(self, *, input_channels, output_channels, dim, **kwargs):
super().__init__()
self.lin = nn.Sequential(
nn.Linear(input_channels, dim),
nn.ReLU(inplace=True),
nn.Linear(dim, dim),
)
self.unpool = FSPool(dim, 20, relaxed=True)
self.conv = nn.Sequential(
nn.Conv1d(dim, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, output_channels, 1),
)
def forward(self, x, perm, n_points, *args):
x = self.lin(x)
x, mask = self.unpool.forward_transpose(x, perm, n=n_points)
x = self.conv(x) * mask[:, :1, :]
return x
class RNNDecoder(nn.Module):
def __init__(self, *, input_channels, output_channels, set_size, dim, **kwargs):
super().__init__()
self.output_channels = output_channels
self.set_size = set_size
self.dim = dim
self.lin = nn.Linear(input_channels, dim)
self.model = nn.LSTM(1, dim, 1)
self.out = nn.Conv1d(dim, output_channels, 1)
def forward(self, x, *args):
# use input feature vector as initial cell state for the LSTM
cell = x.view(x.size(0), -1)
cell = self.lin(cell)
# zero input of size set_size to get set_size number of outputs
dummy_input = torch.zeros(self.set_size, cell.size(0), 1, device=cell.device)
# initial hidden state of zeros
dummy_hidden = torch.zeros(1, cell.size(0), self.dim, device=cell.device)
# run the LSTM
cell = cell.unsqueeze(0)
output, _ = self.model(dummy_input, (dummy_hidden, cell))
# project into correct number of output dims
output = output.permute(1, 2, 0)
output = self.out(output)
return output
| 8,264 | 30.188679 | 146 | py |
catsetmat | catsetmat-master/lib/fspool/autoencoder/data.py | import os
import math
import random
import torch
import torch.utils.data
import torchvision
import torchvision.transforms as transforms
import torchvision.transforms.functional as T
def collate(batch):
points, labels, n_points = zip(*batch)
point_tensor = torch.zeros(len(points), points[0].size(0), max(n_points))
for i, (point, length) in enumerate(zip(points, n_points)):
point_tensor[i, :, :length] = point
labels = torch.LongTensor(labels)
n_points = torch.LongTensor(n_points)
return point_tensor, labels, n_points
def get_loader(dataset, batch_size, num_workers=8, shuffle=True):
return torch.utils.data.DataLoader(
dataset,
shuffle=shuffle,
batch_size=batch_size,
pin_memory=True,
num_workers=num_workers,
collate_fn=collate,
)
class Polygons(torch.utils.data.Dataset):
def __init__(self, size=2**10, cardinality=10, shift=False, rotate=False, scale=False, variable=False):
self.size = size
self.shift = shift
self.rotate = rotate
self.scale = scale
self.cardinality = cardinality
self.variable = variable
def __getitem__(self, item):
if self.variable:
cardinality = random.choice(range(3, self.cardinality))
else:
cardinality = self.cardinality
rad = torch.linspace(0, 2 * math.pi, cardinality + 1)[:-1]
centre = torch.zeros(2)
if self.shift:
centre += torch.rand(2) - 0.5
radius = 1.0
if self.scale:
radius += torch.rand(1) - 0.5
if self.rotate:
rad += torch.rand(1) * 2 * math.pi
x, y = torch.sin(rad), torch.cos(rad)
points = radius * torch.stack([x, y]) + centre.unsqueeze(1)
points = points[:, torch.randperm(cardinality)]
return points, 0, cardinality
def __len__(self):
return self.size
class MNISTSet(torch.utils.data.Dataset):
def __init__(self, threshold=0.0, train=True, root='mnist'):
self.train = train
self.root = root
self.threshold = threshold
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
mnist = torchvision.datasets.MNIST(train=train, transform=transform, download=True, root=root)
self.data = self.cache(mnist)
def cache(self, dataset):
cache_path = os.path.join(self.root, f'mnist_{self.train}_{self.threshold}.pth')
if os.path.exists(cache_path):
return torch.load(cache_path)
print('Processing dataset...')
data = []
for datapoint in dataset:
img, label = datapoint
point_set, cardinality = self.image_to_set(img)
data.append((point_set, label, cardinality))
torch.save(data, cache_path)
print('Done!')
return data
def image_to_set(self, img):
idx = (img.squeeze(0) > self.threshold).nonzero().transpose(0, 1)
cardinality = idx.size(1)
return idx, cardinality
def __getitem__(self, item):
s, l, c = self.data[item]
s = s[:, torch.randperm(c)]
s = s.float() / 27 # put in range [0, 1]
return s, l, c
def __len__(self):
return len(self.data)
class MNISTSetMasked(MNISTSet):
def __getitem__(self, item):
s, l, c = super().__getitem__(item)
ones = torch.ones(1, s.size(1), device=s.device)
s = torch.cat([s, ones], dim=0)
return s, l, c
if __name__ == '__main__':
import matplotlib.pyplot as plt
dataset = Circles()
for i in range(2):
points, centre, n_points = dataset[i]
x, y = points[0], points[1]
plt.scatter(x.numpy(), y.numpy())
plt.scatter(centre[0], centre[1])
plt.axes().set_aspect('equal', 'datalim')
plt.show()
| 3,909 | 28.847328 | 107 | py |
catsetmat | catsetmat-master/lib/hypersagnn/main.py | from torch.nn.utils.rnn import pad_sequence
from torchsummary import summary
from gensim.models import Word2Vec
import tensorflow as tf
from scipy.sparse import csr_matrix
from scipy.sparse import vstack as s_vstack
import os
import time
import argparse
import warnings
import torch
from .random_walk import random_walk
from .random_walk_hyper import random_walk_hyper
from .Modules import *
from .utils import *
import matplotlib as mpl
mpl.use("Agg")
import multiprocessing
import pdb
cpu_num = multiprocessing.cpu_count()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
device_ids = [0, 1]
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.filterwarnings("ignore")
def parse_args():
# Parses the node2vec arguments.
parser = argparse.ArgumentParser(description="Run node2vec.")
parser.add_argument('--data', type=str, default='drug')
parser.add_argument('--TRY', action='store_true')
parser.add_argument('--FILTER', action='store_true')
parser.add_argument('--grid', type=str, default='')
parser.add_argument('--remark', type=str, default='')
parser.add_argument('--random-walk', action='store_true')
parser.add_argument('--dimensions', type=int, default=64,
help='Number of dimensions. Default is 64.')
parser.add_argument('-l', '--walk-length', type=int, default=40,
help='Length of walk per source. Default is 40.')
parser.add_argument('-r', '--num-walks', type=int, default=10,
help='Number of walks per source. Default is 10.')
parser.add_argument('-k', '--window-size', type=int, default=10,
help='Context size for optimization. Default is 10.')
parser.add_argument('-i', '--iter', default=1, type=int,
help='Number of epochs in SGD')
parser.add_argument('--workers', type=int, default=8,
help='Number of parallel workers. Default is 8.')
parser.add_argument('--p', type=float, default=2,
help='Return hyperparameter. Default is 1.')
parser.add_argument('--q', type=float, default=0.25,
help='Inout hyperparameter. Default is 1.')
parser.add_argument('-a', '--alpha', type=float, default=0.0,
help='The weight of random walk -skip-gram loss. Default is ')
parser.add_argument('--rw', type=float, default=0.01,
help='The weight of reconstruction of adjacency matrix loss. Default is ')
parser.add_argument('-w', '--walk', type=str, default='',
help='The walk type, empty stands for normal rw')
parser.add_argument('-d', '--diag', type=str, default='True',
help='Use the diag mask or not')
parser.add_argument(
'-f',
'--feature',
type=str,
default='walk',
help='Features used in the first step')
args = parser.parse_args('')
if not args.random_walk:
args.model_name = 'model_no_randomwalk'
args.epoch = 25
else:
args.model_name = 'model_{}_'.format(args.data)
args.epoch = 25
if args.TRY:
args.model_name = 'try' + args.model_name
if not args.random_walk:
args.epoch = 5
else:
args.epoch = 1
# args.epoch = 1
args.model_name += args.remark
# print(args.model_name)
args.save_path = os.path.join(
'../checkpoints/', args.data, args.model_name)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
return args
def train_batch_hyperedge(model, loss_func, batch_data, batch_weight, type, y=""):
x = batch_data
w = batch_weight
# When label is not generated, prepare the data
if len(y) == 0:
x, y, w = generate_negative(x, "train_dict", type, w)
print("XB", x.shape)
index = torch.randperm(len(x))
x, y, w = x[index], y[index], w[index]
# forward
pred, recon_loss = model(x, return_recon=True)
loss = loss_func(pred, y, weight=w)
return pred, y, loss, recon_loss
def train_batch_skipgram(model, loss_func, alpha, batch_data):
if alpha == 0:
return torch.zeros(1).to(device)
examples, labels, neg_samples = batch_data
# Embeddings for examples: [batch_size, emb_dim]
example_emb = model.forward_u(examples)
true_w, true_b = model.forward_w_b(labels)
sampled_w, sampled_b = model.forward_w_b(neg_samples)
# True logits: [batch_size, 1]
true_logits = torch.sum(torch.mul(example_emb, true_w), dim=1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise labels for all examples in the batch
# using the matmul.
sampled_b_vec = sampled_b.view(1, -1)
sampled_logits = torch.matmul(example_emb,
sampled_w.transpose(1, 0))
sampled_logits += sampled_b_vec
true_xent = loss_func(true_logits, torch.ones_like(true_logits).to(device))
sampled_xent = loss_func(sampled_logits,
torch.zeros_like(sampled_logits).to(device))
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
loss = (true_xent + sampled_xent) / len(examples) / len(labels)
return loss
def train_epoch(args, model, loss_func, training_data, optimizer, batch_size, only_rw, type):
# Epoch operation in training phase
# Simultaneously train on 2 models: hyperedge-prediction (1) & random-walk with skipgram (2)
model_1, model_2 = model
(loss_1, beta), (loss_2, alpha) = loss_func
edges, edge_weight, sentences = training_data
y = torch.tensor([])
# Permutate all the data
index = torch.randperm(len(edges))
edges, edge_weight = edges[index], edge_weight[index]
if len(y) > 0:
y = y[index]
model_1.train()
model_2.train()
bce_total_loss = 0
skipgram_total_loss = 0
recon_total_loss = 0
acc_list, y_list, pred_list = [], [], []
batch_num = int(math.floor(len(edges) / batch_size))
bar = trange(batch_num, mininterval=0.1, desc=' - (Training) ', leave=False, )
for i in bar:
if only_rw or alpha > 0:
examples, labels, neg_samples, epoch_finished, words = sentences.next_batch()
examples = torch.tensor(examples, dtype=torch.long, device=device)
labels = torch.tensor(labels, dtype=torch.long, device=device)
neg_samples = torch.tensor(neg_samples, dtype=torch.long, device=device)
loss_skipgram = train_batch_skipgram(
model_2, loss_2, alpha, [
examples, labels, neg_samples])
loss = loss_skipgram
acc_list.append(0)
auc1, auc2 = 0.0, 0.0
else:
batch_edge = edges[i * batch_size:(i + 1) * batch_size]
batch_edge_weight = edge_weight[i * batch_size:(i + 1) * batch_size]
batch_y = ""
if len(y) > 0:
batch_y = y[i * batch_size:(i + 1) * batch_size]
if len(batch_y) == 0:
continue
pred, batch_y, loss_bce, loss_recon = train_batch_hyperedge(model_1, loss_1, batch_edge, batch_edge_weight,
type, y=batch_y)
loss_skipgram = torch.Tensor([0.0]).to(device)
loss = beta * loss_bce + alpha * loss_skipgram + loss_recon * args.rw
acc_list.append(accuracy(pred, batch_y))
y_list.append(batch_y)
pred_list.append(pred)
for opt in optimizer:
opt.zero_grad()
# backward
loss.backward()
# update parameters
for opt in optimizer:
opt.step()
bar.set_description(" - (Training) BCE: %.4f skipgram: %.4f recon: %.4f" %
(bce_total_loss / (i + 1), skipgram_total_loss / (i + 1), recon_total_loss / (i + 1)))
bce_total_loss += loss_bce.item()
skipgram_total_loss += loss_skipgram.item()
recon_total_loss += loss_recon.item()
y = torch.cat(y_list)
pred = torch.cat(pred_list)
auc1, auc2 = roc_auc_cuda(y, pred)
return bce_total_loss / batch_num, skipgram_total_loss / batch_num, recon_total_loss / batch_num, np.mean(
acc_list), auc1, auc2
def eval_epoch(args, model, loss_func, validation_data, batch_size, type):
''' Epoch operation in evaluation phase '''
bce_total_loss = 0
recon_total_loss = 0
(loss_1, beta), (loss_2, alpha) = loss_func
loss_func = loss_1
model.eval()
with torch.no_grad():
validation_data, validation_weight = validation_data
y = ""
index = torch.randperm(len(validation_data))
validation_data, validation_weight = validation_data[index], validation_weight[index]
if len(y) > 0:
y = y[index]
pred, label = [], []
for i in tqdm(range(int(math.floor(len(validation_data) / batch_size))),
mininterval=0.1, desc=' - (Validation) ', leave=False):
# prepare data
batch_x = validation_data[i * batch_size:(i + 1) * batch_size]
batch_w = validation_weight[i * batch_size:(i + 1) * batch_size]
if len(y) == 0:
batch_x, batch_y, batch_w = generate_negative(
batch_x, "test_dict", type, weight=batch_w)
else:
batch_y = y[i * batch_size:(i + 1) * batch_size]
index = torch.randperm(len(batch_x))
batch_x, batch_y, batch_w = batch_x[index], batch_y[index], batch_w[index]
pred_batch, recon_loss = model(batch_x, return_recon=True)
pred.append(pred_batch)
label.append(batch_y)
loss = loss_func(pred_batch, batch_y, weight=batch_w)
recon_total_loss += recon_loss.item()
bce_total_loss += loss.item()
pred = torch.cat(pred, dim=0)
label = torch.cat(label, dim=0)
acc = accuracy(pred, label)
auc1, auc2 = roc_auc_cuda(label, pred)
return bce_total_loss / (i + 1), recon_total_loss / (i + 1), acc, auc1, auc2
def train(args, model, loss, training_data, validation_data, optimizer, epochs, batch_size, only_rw):
valid_accus = [0]
# outlier_data = generate_outlier()
for epoch_i in range(epochs):
if only_rw:
save_embeddings(model[0], True)
print('[ Epoch', epoch_i, 'of', epochs, ']')
start = time.time()
bce_loss, skipgram_loss, recon_loss, train_accu, auc1, auc2 = train_epoch(
args, model, loss, training_data, optimizer, batch_size, only_rw, train_type)
print(' - (Training) bce: {bce_loss: 7.4f}, skipgram: {skipgram_loss: 7.4f}, '
'recon: {recon_loss: 7.4f}'
' acc: {accu:3.3f} %, auc: {auc1:3.3f}, aupr: {auc2:3.3f}, '
'elapse: {elapse:3.3f} s'.format(
bce_loss=bce_loss,
skipgram_loss=skipgram_loss,
recon_loss=recon_loss,
accu=100 *
train_accu,
auc1=auc1,
auc2=auc2,
elapse=(time.time() - start)))
start = time.time()
valid_bce_loss, recon_loss, valid_accu, valid_auc1, valid_auc2 = eval_epoch(args, model[0], loss,
validation_data, batch_size,
'hyper')
print(' - (Validation-hyper) bce: {bce_loss: 7.4f}, recon: {recon_loss: 7.4f},'
' acc: {accu:3.3f} %,'
' auc: {auc1:3.3f}, aupr: {auc2:3.3f},'
'elapse: {elapse:3.3f} s'.format(
bce_loss=valid_bce_loss,
recon_loss=recon_loss,
accu=100 *
valid_accu,
auc1=valid_auc1,
auc2=valid_auc2,
elapse=(time.time() - start)))
valid_accus += [valid_auc1]
# check_outlier(model[0], outlier_data)
checkpoint = {
'model_link': model[0].state_dict(),
'model_node2vec': model[1].state_dict(),
'epoch': epoch_i}
model_name = 'model.chkpt'
if valid_auc1 >= max(valid_accus):
torch.save(checkpoint, os.path.join(args.save_path, model_name))
torch.cuda.empty_cache()
if not only_rw:
checkpoint = torch.load(os.path.join(args.save_path, model_name))
model[0].load_state_dict(checkpoint['model_link'])
model[1].load_state_dict(checkpoint['model_node2vec'])
def generate_negative(x, dict1, get_type='all', weight="", forward=True):
if dict1 == 'train_dict':
dict1 = train_dict
elif dict1 == 'test_dict':
dict1 = test_dict
if len(weight) == 0:
weight = torch.ones(len(x), dtype=torch.float)
neg_list = []
zero_num_list = [0] + list(num_list)
new_index = []
max_id = int(num[-1])
if forward:
func1 = pass_
else:
func1 = tqdm
if len(x.shape) > 1:
change_list_all = np.random.randint(
0, x.shape[-1], len(x) * neg_num).reshape((len(x), neg_num))
for j, sample in enumerate(func1(x)):
if len(x.shape) > 1:
change_list = change_list_all[j, :]
else:
change_list = np.random.randint(0, sample.shape[-1], neg_num)
for i in range(neg_num):
temp = np.copy(sample)
a = set()
a.add(tuple(temp))
trial = 0
simple_or_hard = np.random.rand()
if simple_or_hard <= pair_ratio:
change = change_list[i]
while not a.isdisjoint(dict1):
temp = np.copy(sample)
trial += 1
if trial >= 1000:
temp = ""
break
# Only change one node
if simple_or_hard <= pair_ratio:
if len(num_list) == 1:
# Only one node type
temp[change] = np.random.randint(0, max_id, 1) + 1
else:
# Multiple node types
start = zero_num_list[node_type_mapping[change]]
end = zero_num_list[node_type_mapping[change] + 1]
temp[change] = np.random.randint(
int(start), int(end), 1) + 1
else:
if len(num_list) == 1:
# Only one node type
temp = np.random.randint(
0, max_id, sample.shape[-1]) + 1
else:
for k in range(temp.shape[-1]):
start = zero_num_list[node_type_mapping[k]]
end = zero_num_list[node_type_mapping[k] + 1]
temp[k] = np.random.randint(
int(start), int(end), 1) + 1
temp.sort()
a = set([tuple(temp)])
if len(temp) > 0:
neg_list.append(temp)
if i == 0:
new_index.append(j)
if get_type == 'all' or get_type == 'edge':
x_e, neg_e = generate_negative_edge(x, int(len(x)))
if get_type == 'all':
x = list(x) + x_e
neg_list = neg_list + neg_e
else:
x = x_e
neg_list = neg_e
new_index = np.array(new_index)
new_x = x[new_index]
if not forward:
device = 'cpu'
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
new_weight = torch.tensor(weight[new_index]).to(device)
x = np2tensor_hyper(new_x, dtype=torch.long)
neg = np2tensor_hyper(neg_list, dtype=torch.long)
x = pad_sequence(x, batch_first=True, padding_value=0).to(device)
neg = pad_sequence(neg, batch_first=True, padding_value=0).to(device)
# print("x", x, "neg", neg)
return torch.cat([x, neg]), torch.cat(
[torch.ones((len(x), 1), device=device), torch.zeros((len(neg), 1), device=device)], dim=0), torch.cat(
((torch.ones((len(x), 1), device=device) * new_weight.view(-1, 1), (torch.ones((len(neg), 1), device=device)))))
def save_embeddings(model, origin=False):
model.eval()
with torch.no_grad():
ids = np.arange(num_list[-1]) + 1
ids = torch.Tensor(ids).long().to(device).view(-1, 1)
embeddings = []
for j in range(math.ceil(len(ids) / batch_size)):
x = ids[j * batch_size:min((j + 1) * batch_size, len(ids))]
if origin:
embed = model.get_node_embeddings(x)
else:
embed = model.get_embedding_static(x)
embed = embed.detach().cpu().numpy()
embeddings.append(embed)
embeddings = np.concatenate(embeddings, axis=0)[:, 0, :]
for i in range(len(num_list)):
start = 0 if i == 0 else num_list[i - 1]
static = embeddings[int(start):int(num_list[i])]
np.save("../mymodel_%d.npy" % (i), static)
if origin:
np.save("../mymodel_%d_origin.npy" % (i), static)
torch.cuda.empty_cache()
return embeddings
def generate_H(edge, nums_type, weight):
nums_examples = len(edge)
H = [0 for i in range(len(nums_type))]
for i in range(edge.shape[-1]):
# np.sqrt(weight) because the dot product later would recovers it
H[node_type_mapping[i]] += csr_matrix((np.sqrt(weight), (edge[:, i], range(
nums_examples))), shape=(nums_type[node_type_mapping[i]], nums_examples))
return H
def generate_embeddings(edge, nums_type, H=None, weight=1):
if len(num) == 1:
return [get_adjacency(edge, True)]
if H is None:
H = generate_H(edge, nums_type, weight)
embeddings = [H[i].dot(s_vstack([H[j] for j in range(len(num))]).T).astype('float32') for i in
range(len(nums_type))]
new_embeddings = []
zero_num_list = [0] + list(num_list)
for i, e in enumerate(embeddings):
# This is to remove diag entrance
for j, k in enumerate(range(zero_num_list[i], zero_num_list[i + 1])):
e[j, k] = 0
# Automatically removes all zero entries
col_sum = np.array(e.sum(0)).reshape((-1))
new_e = e[:, col_sum > 0]
new_e.eliminate_zeros()
new_embeddings.append(new_e)
# 0-1 scaling
for i in range(len(nums_type)):
col_max = np.array(new_embeddings[i].max(0).todense()).flatten()
_, col_index = new_embeddings[i].nonzero()
new_embeddings[i].data /= col_max[col_index]
return [new_embeddings[i] for i in range(len(nums_type))]
def get_adjacency(data, norm=True):
A = np.zeros((num_list[-1], num_list[-1]))
for datum in tqdm(data):
for i in range(datum.shape[-1]):
for j in range(datum.shape[-1]):
if i != j:
A[datum[i], datum[j]] += 1.0
if norm:
temp = np.concatenate((np.zeros((1), dtype='int'), num), axis=0)
temp = np.cumsum(temp)
for i in range(len(temp) - 1):
A[temp[i]:temp[i + 1],
:] /= (np.max(A[temp[i]:temp[i + 1],
:],
axis=0,
keepdims=True) + 1e-10)
return csr_matrix(A).astype('float32')
def main():
args = parse_args()
neg_num = 5
batch_size = 96
neg_num_w2v = 5
bottle_neck = args.dimensions
pair_ratio = 0.9
train_type = 'hyper'
train_zip = np.load("../data/%s/train_data.npz" % (args.data), allow_pickle=True)
test_zip = np.load("../data/%s/test_data.npz" % (args.data), allow_pickle=True)
train_data, test_data = train_zip['train_data'], test_zip['test_data']
try:
train_weight, test_weight = train_zip["train_weight"].astype('float32'), test_zip["test_weight"].astype('float32')
except BaseException:
print("no specific train weight")
test_weight = np.ones(len(test_data), dtype='float32')
train_weight = np.ones(len(train_data), dtype='float32') * neg_num
# pdb.set_trace()
num = train_zip['nums_type']
num_list = np.cumsum(num)
print("Node type num", num)
if len(num) > 1:
node_type_mapping = [0, 1, 2]
if args.feature == 'adj':
embeddings_initial = generate_embeddings(train_data, num, H=None, weight=train_weight)
print(train_weight)
print(train_weight, np.min(train_weight), np.max(train_weight))
train_weight_mean = np.mean(train_weight)
train_weight = train_weight / train_weight_mean * neg_num
test_weight = test_weight / train_weight_mean * neg_num
# Now for multiple node types, the first column id starts at 0, the second
# starts at num_list[0]...
if len(num) > 1:
for i in range(len(node_type_mapping) - 1):
train_data[:, i + 1] += num_list[node_type_mapping[i + 1] - 1]
test_data[:, i + 1] += num_list[node_type_mapping[i + 1] - 1]
num = torch.as_tensor(num)
num_list = torch.as_tensor(num_list)
print("walk type", args.walk)
# At this stage, the index still starts from zero
node_list = np.arange(num_list[-1]).astype('int')
if args.walk == 'hyper':
walk_path = random_walk_hyper(args, node_list, train_data)
else:
walk_path = random_walk(args, num, train_data)
del node_list
# Add 1 for the padding index
print("adding pad idx")
train_data = add_padding_idx(train_data)
test_data = add_padding_idx(test_data)
# Note that, no matter how many node types are here, make sure the
# hyperedge (N1,N2,N3,...) has id, N1 < N2 < N3...
train_dict = parallel_build_hash(train_data, "build_hash", args, num, initial=set())
test_dict = parallel_build_hash(test_data, "build_hash", args, num, initial=train_dict)
print("dict_size", len(train_dict), len(test_dict))
# dict2 = build_hash2(train_data)
# pos_edges = list(dict2)
# pos_edges = np.array(pos_edges)
# np.random.shuffle(pos_edges)
print("train data amount", len(train_data))
# potential_outliers = build_hash3(np.concatenate((train_data, test), axis=0))
# potential_outliers = np.array(list(potential_outliers))
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
if args.feature == 'walk':
# Note that for this part, the word2vec still takes sentences with
# words starts at "0"
if not args.TRY and os.path.exists(
"../%s_wv_%d_%s.npy" %
(args.data, args.dimensions, args.walk)):
A = np.load(
"../%s_wv_%d_%s.npy" %
(args.data,
args.dimensions,
args.walk),
allow_pickle=True)
else:
print("start loading")
walks = np.loadtxt(walk_path, delimiter=" ").astype('int')
start = time.time()
split_num = 20
pool = ProcessPoolExecutor(max_workers=split_num)
process_list = []
walks = np.array_split(walks, split_num)
result = []
print("Start turning path to strs")
for walk in walks:
process_list.append(pool.submit(walkpath2str, walk))
for p in as_completed(process_list):
result += p.result()
pool.shutdown(wait=True)
walks = result
print(
"Finishing Loading and processing %.2f s" %
(time.time() - start))
print("Start Word2vec")
import multiprocessing
print("num cpu cores", multiprocessing.cpu_count())
w2v = Word2Vec(
walks,
size=args.dimensions,
window=args.window_size,
min_count=0,
sg=1,
iter=1,
workers=multiprocessing.cpu_count())
wv = w2v.wv
A = [wv[str(i)] for i in range(num_list[-1])]
np.save("../%s_wv_%d_%s.npy" %
(args.data, args.dimensions, args.walk), A)
from sklearn.preprocessing import StandardScaler
A = StandardScaler().fit_transform(A)
A = np.concatenate(
(np.zeros((1, A.shape[-1]), dtype='float32'), A), axis=0)
A = A.astype('float32')
A = torch.tensor(A).to(device)
print(A.shape)
node_embedding = Wrap_Embedding(int(
num_list[-1] + 1), args.dimensions, scale_grad_by_freq=False, padding_idx=0, sparse=False)
node_embedding.weight = nn.Parameter(A)
elif args.feature == 'adj':
flag = False
node_embedding = MultipleEmbedding(
embeddings_initial,
bottle_neck,
flag,
num_list,
node_type_mapping).to(device)
classifier_model = Classifier(
n_head=8,
d_model=args.dimensions,
d_k=16,
d_v=16,
node_embedding=node_embedding,
diag_mask=args.diag,
bottle_neck=bottle_neck).to(device)
save_embeddings(classifier_model, True)
Randomwalk_Word2vec = Word2vec_Skipgram(dict_size=int(num_list[-1] + 1), embedding_dim=args.dimensions,
window_size=args.window_size, u_embedding=node_embedding,
sparse=False).to(device)
loss = F.binary_cross_entropy
loss2 = torch.nn.BCEWithLogitsLoss(reduction='sum')
summary(classifier_model, (3,))
try:
from datapipe import Word2Vec_Skipgram_Data
sentences = Word2Vec_Skipgram_Data(train_data=walk_path,
num_samples=neg_num_w2v,
batch_size=128,
window_size=args.window_size,
min_count=0,
subsample=1e-3,
session=session)
except:
sentences = Word2Vec_Skipgram_Data_Empty()
params_list = list(set(list(classifier_model.parameters()) + list(Randomwalk_Word2vec.parameters())))
if args.feature == 'adj':
optimizer = torch.optim.Adam(params_list, lr=1e-3)
else:
optimizer = torch.optim.RMSprop(params_list, lr=1e-3)
model_parameters = filter(lambda p: p.requires_grad, params_list)
params = sum([np.prod(p.size()) for p in model_parameters])
print("params to be trained", params)
train(args, (classifier_model, Randomwalk_Word2vec),
loss=((loss, 1.0), (loss2, 0.0)),
training_data=(train_data, train_weight, sentences),
validation_data=(test_data, test_weight),
optimizer=[optimizer], epochs=300, batch_size=batch_size, only_rw=False)
if __name__ == '__main__':
main()
| 28,119 | 35.95138 | 122 | py |
catsetmat | catsetmat-master/lib/hypersagnn/utils.py | import numpy as np
import torch
from tqdm import tqdm, trange
from sklearn.metrics import average_precision_score, precision_score, recall_score, f1_score
from sklearn.metrics import roc_auc_score, accuracy_score, matthews_corrcoef
from concurrent.futures import as_completed, ProcessPoolExecutor
import errno
import os
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python ≥ 2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def add_padding_idx(vec):
if len(vec.shape) == 1:
return np.asarray([np.sort(np.asarray(v) + 1).astype('int')
for v in tqdm(vec)])
else:
vec = np.asarray(vec) + 1
vec = np.sort(vec, axis=-1)
return vec.astype('int')
def np2tensor_hyper(vec, dtype):
vec = np.asarray(vec)
if len(vec.shape) == 1:
return [torch.as_tensor(v, dtype=dtype) for v in vec]
else:
return torch.as_tensor(vec, dtype=dtype)
def walkpath2str(walk, silent=False):
return [list(map(str, w)) for w in (tqdm(walk) if not silent else walk)]
def roc_auc_cuda(y_true, y_pred):
try:
y_true = y_true.cpu().detach().numpy().reshape((-1, 1))
y_pred = y_pred.cpu().detach().numpy().reshape((-1, 1))
return roc_auc_score(
y_true, y_pred), average_precision_score(
y_true, y_pred)
except BaseException:
return 0.0, 0.0
def accuracy(output, target):
pred = output >= 0.5
truth = target >= 0.5
acc = torch.sum(pred.eq(truth))
acc = float(acc) * 1.0 / (truth.shape[0] * 1.0)
return acc
def build_hash(data):
dict1 = set()
for datum in data:
# We need sort here to make sure the order is right
datum.sort()
dict1.add(tuple(datum))
del data
return dict1
def build_hash2(data):
dict2 = set()
for datum in tqdm(data):
for x in datum:
for y in datum:
if x != y:
dict2.add((x, y))
return dict2
def build_hash3(data):
dict2 = set()
for datum in tqdm(data):
for i in range(3):
temp = np.copy(datum).astype('int')
temp[i] = 0
dict2.add(tuple(temp))
return dict2
def parallel_build_hash(data, func, args, num, initial=None):
import multiprocessing
cpu_num = multiprocessing.cpu_count()
data = np.array_split(data, cpu_num * 3)
dict1 = initial.copy()
pool = ProcessPoolExecutor(max_workers=cpu_num)
process_list = []
if func == 'build_hash':
func = build_hash
if func == 'build_hash2':
func = build_hash2
if func == 'build_hash3':
func = build_hash3
for datum in data:
process_list.append(pool.submit(func, datum))
for p in as_completed(process_list):
a = p.result()
dict1.update(a)
pool.shutdown(wait=True)
# if args.data in ['schic','ramani']:
# print (num[0])
# new_list_of_set = [set() for i in range(int(num[0]+1))]
# for s in dict1:
# try:
# new_list_of_set[s[0]].add(s)
# except:
# print (s)
# raise EOFError
# dict1 = new_list_of_set
return dict1
def generate_negative_edge(x, length):
pos = np.random.choice(len(pos_edges), length)
pos = pos_edges[pos]
negative = []
temp_num_list = np.array([0] + list(num_list))
id_choices = np.array([[0, 1], [1, 2], [0, 2]])
id = np.random.choice([0, 1, 2], length * neg_num, replace=True)
id = id_choices[id]
start_1 = temp_num_list[id[:, 0]]
end_1 = temp_num_list[id[:, 0] + 1]
start_2 = temp_num_list[id[:, 1]]
end_2 = temp_num_list[id[:, 1] + 1]
if len(num_list) == 3:
for i in range(neg_num * length):
temp = [
np.random.randint(
start_1[i],
end_1[i]) + 1,
np.random.randint(
start_2[i],
end_2[i]) + 1]
while tuple(temp) in dict2:
temp = [
np.random.randint(
start_1[i],
end_1[i]) + 1,
np.random.randint(
start_2[i],
end_2[i]) + 1]
negative.append(temp)
return list(pos), negative
def generate_outlier(k=20):
inputs = []
negs = []
split_num = 4
pool = ProcessPoolExecutor(max_workers=split_num)
data = np.array_split(potential_outliers, split_num)
dict_pair = build_hash2(np.concatenate([train_data, test]))
process_list = []
for datum in data:
process_list.append(
pool.submit(
generate_outlier_part,
datum,
dict_pair,
k))
for p in as_completed(process_list):
in_, ne = p.result()
inputs.append(in_)
negs.append(ne)
inputs = np.concatenate(inputs, axis=0)
negs = np.concatenate(negs, axis=0)
index = np.arange(len(inputs))
np.random.shuffle(index)
inputs, negs = inputs[index], negs[index]
pool.shutdown(wait=True)
x = np2tensor_hyper(inputs, dtype=torch.long)
x = pad_sequence(x, batch_first=True, padding_value=0).to(device)
return (torch.tensor(x).to(device), torch.tensor(negs).to(device))
def pass_(x):
return x
def generate_outlier_part(data, dict_pair, k=20):
inputs = []
negs = []
for e in tqdm(data):
point = int(np.where(e == 0)[0])
start = 0 if point == 0 else int(num_list[point - 1])
end = int(num_list[point])
count = 0
trial = 0
while count < k:
trial += 1
if trial >= 100:
break
j = np.random.randint(start, end) + 1
condition = [(j, n) in dict_pair for n in e]
if np.sum(condition) > 0:
continue
else:
temp = np.copy(e)
temp[point] = j
inputs.append(temp)
negs.append(point)
count += 1
inputs, index = np.unique(inputs, axis=0, return_index=True)
negs = np.array(negs)[index]
return np.array(inputs), np.array(negs)
def check_outlier(model, data_):
data, negs = data_
bs = 1024
num_of_batches = int(np.floor(data.shape[0] / bs)) + 1
k = 3
outlier_prec = torch.zeros(k).to(device)
model.eval()
with torch.no_grad():
for i in tqdm(range(num_of_batches)):
inputs = data[i * bs:(i + 1) * bs]
neg = negs[i * bs:(i + 1) * bs]
outlier = model(inputs, get_outlier=k)
outlier_prec += (outlier.transpose(1, 0) == neg).sum(dim=1).float()
# for kk in range(k):
# outlier_prec[kk] += (outlier[:,kk].view(-1)==neg).sum().float()
outlier_prec = outlier_prec.cumsum(dim=0)
outlier_prec /= data.shape[0]
for kk in range(k):
print("outlier top %d hitting: %.5f" % (kk + 1, outlier_prec[kk]))
class Word2Vec_Skipgram_Data_Empty(object):
"""Word2Vec model (Skipgram)."""
def __init__(self):
return
def next_batch(self):
"""Train the model."""
return 0, 0, 0, 0, 0
| 7,319 | 25.813187 | 92 | py |
catsetmat | catsetmat-master/lib/hypersagnn/random_walk.py | import os
import time
import numpy as np
import networkx as nx
import random
from tqdm import tqdm
from pathlib import Path
import torch
from concurrent.futures import as_completed, ProcessPoolExecutor
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device_ids = [0, 1]
class Graph():
def __init__(self, nx_G, p, q, is_directed=False):
self.G = nx_G
self.is_directed = is_directed
self.p = p
self.q = q
self.neighbors = []
print("initialization")
for i in range(len(nx_G.nodes())
): # actualy nx_G.nodes() is already increasing order
self.neighbors.append(sorted(nx_G.neighbors(i)))
self.degree = np.zeros((len(nx_G.nodes())))
for i in range(len(nx_G.nodes())):
self.degree[i] = np.sum([nx_G[i][nbr]['weight']
for nbr in self.neighbors[i]])
print(self.degree)
def get_alias_edge(src, dst):
'''
Get the alias edge setup lists for a given edge.
'''
global sG
G = sG.G
p = sG.p
q = sG.q
unnormalized_probs = []
for dst_nbr in sG.neighbors[dst]:
if dst_nbr == src:
unnormalized_probs.append(
(G[dst][dst_nbr]['weight'] / p) / np.sqrt(sG.degree[dst_nbr]))
# unnormalized_probs.append((G[dst][dst_nbr]['weight'] / p))
elif G.has_edge(dst_nbr, src):
unnormalized_probs.append(
(G[dst][dst_nbr]['weight']) /
np.sqrt(
sG.degree[dst_nbr]))
# unnormalized_probs.append((G[dst][dst_nbr]['weight']))
else:
unnormalized_probs.append(
(G[dst][dst_nbr]['weight'] / q) / np.sqrt(sG.degree[dst_nbr]))
# unnormalized_probs.append((G[dst][dst_nbr]['weight'] / q))
norm_const = sum(unnormalized_probs)
normalized_probs = [
float(u_prob) /
norm_const for u_prob in unnormalized_probs]
return alias_setup(normalized_probs)
def alias_some_edges(edges):
alias_edges = {}
for edge in tqdm(edges):
alias_edges[(edge[0], edge[1])] = get_alias_edge(edge[0], edge[1])
alias_edges[(edge[1], edge[0])] = get_alias_edge(edge[1], edge[0])
return alias_edges
def preprocess_transition_probs(sg):
'''
Preprocessing of transition probabilities for guiding the random walks.
'''
global sG
sG = sg
G = sG.G
is_directed = sG.is_directed
print("transition probs: ")
alias_nodes = {}
for node in tqdm(G.nodes()):
unnormalized_probs = [
G[node][nbr]['weight'] /
np.sqrt(
sG.degree[nbr]) for nbr in sG.neighbors[node]]
# unnormalized_probs = [G[node][nbr]['weight'] for nbr in sG.neighbors[node]]
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob) /
norm_const for u_prob in unnormalized_probs]
alias_nodes[node] = alias_setup(normalized_probs)
triads = {}
# Parallel alias edges
print("alias edges: ")
edges = G.edges()
threads_num = 100
pool = ProcessPoolExecutor(max_workers=threads_num)
process_list = []
edges = np.array_split(edges, threads_num * 2)
for e in edges:
process_list.append(pool.submit(alias_some_edges, e))
alias_edges = {}
for p in as_completed(process_list):
alias_t = p.result()
alias_edges.update(alias_t)
pool.shutdown(wait=True)
sG.alias_nodes = alias_nodes
sG.alias_edges = alias_edges
def alias_setup(probs):
'''
Compute utility lists for non-uniform sampling from discrete distributions.
Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
for details
'''
K = len(probs)
q = np.zeros(K)
J = np.zeros(K, dtype=np.int)
smaller = []
larger = []
for kk, prob in enumerate(probs):
q[kk] = K * prob
if q[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
J[small] = large
q[large] = q[large] + q[small] - 1.0
if q[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
return J, q
def alias_draw(J, q):
'''
Draw sample from a non-uniform discrete distribution using alias sampling.
'''
K = len(J)
kk = int(np.floor(np.random.rand() * K))
if np.random.rand() < q[kk]:
return kk
else:
return J[kk]
def add_weight(G, u, v):
if 'weight' not in G[u][v]:
G[u][v]['weight'] = 1
else:
G[u][v]['weight'] += 1
def node2vec_walk(sG, walk_length, start_node):
'''
Simulate a random walk starting from start node.
'''
alias_nodes = sG.alias_nodes
alias_edges = sG.alias_edges
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = sG.neighbors[cur]
if len(cur_nbrs) > 0:
if len(walk) == 1:
walk.append(cur_nbrs[alias_draw(
alias_nodes[cur][0], alias_nodes[cur][1])])
else:
prev = walk[-2]
next_n = cur_nbrs[alias_draw(alias_edges[(prev, cur)][0],
alias_edges[(prev, cur)][1])]
walk.append(next_n)
else:
walk.append(cur)
continue
return walk
def simulate_walks(sG, num_walks, walk_length):
'''
Repeatedly simulate random walks from each node.
'''
print("sample walks:")
walks = []
nodes = sG.G.nodes()
for node in tqdm(nodes):
for walk_iter in range(num_walks):
temp = node2vec_walk(sG, walk_length, node)
if len(temp) == walk_length:
walks.append(temp)
random.shuffle(walks)
return walks
def read_graph(num, hyperedge_list):
'''
Transfer the hyperedge to pairwise edge & Reads the input network in networkx.
'''
G = nx.Graph()
tot = sum(num)
G.add_nodes_from(range(tot))
for ee in tqdm(hyperedge_list):
e = ee
edges_to_add = []
for i in range(len(e)):
for j in range(i + 1, len(e)):
edges_to_add.append((e[i], e[j]))
G.add_edges_from(edges_to_add)
for i in range(len(e)):
for j in range(i + 1, len(e)):
add_weight(G, e[i], e[j])
G = G.to_undirected()
return G
def toint(hyperedge_list):
return np.array([h.astype('int') for h in hyperedge_list])
def random_walk(args, num, hyperedge_list):
'''
Learn embeddings by optimizing the Skipgram objective using SGD.
'''
# p, q = 1, 1
# num_walks, walk_length, window_size = 10, 80, 10
hyperedge_list = toint(hyperedge_list)
p, q = args.p, args.q
num_walks, walk_length, window_size = args.num_walks, args.walk_length, args.window_size
# emb_save_path = '../embs/{}/p{}_q{}_r{}_l{}_k{}_i{}.embs'.format(args.data, p, q, num_walks, walk_length, window_size, iteration)
path_ = (os.path.dirname(__file__))
print(path_)
path2 = "walks/{}/".format(args.data)
path__ = os.path.join(path_, path2)
print(path__)
if not os.path.exists(path__):
os.mkdir(path__)
path3 = 'walks/{}/p{}_q{}_r{}_l{}_walks.txt'.format(args.data, p, q, num_walks, walk_length)
walks_save_path = os.path.join(path_, path3)
start = time.time()
if not args.TRY and os.path.exists(walks_save_path):
return walks_save_path
else:
nx_G = read_graph(num.numpy(), hyperedge_list)
G = Graph(nx_G, p, q)
preprocess_transition_probs(G)
walks = simulate_walks(G, num_walks, walk_length)
walks = np.array(walks)
print(walks.shape, walks_save_path)
np.savetxt(walks_save_path, walks, fmt="%d", delimiter=" ")
# np.save(walks_save_path, walks)
print("RandomWalk running time: %.2lf" % (time.time() - start))
return walks_save_path
| 8,218 | 27.940141 | 135 | py |
catsetmat | catsetmat-master/lib/hypersagnn/Modules.py | import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
from tqdm import tqdm, trange
import copy
import math
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device_ids = [0, 1]
def get_non_pad_mask(seq):
assert seq.dim() == 2
return seq.ne(0).type(torch.float).unsqueeze(-1)
def get_attn_key_pad_mask(seq_k, seq_q):
''' For masking out the padding part of key sequence. '''
# Expand to fit the shape of key query attention matrix.
len_q = seq_q.size(1)
padding_mask = seq_k.eq(0)
padding_mask = padding_mask.unsqueeze(
1).expand(-1, len_q, -1) # b x lq x lk
return padding_mask
class Wrap_Embedding(torch.nn.Embedding):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, *input):
return super().forward(*input), torch.Tensor([0]).to(device)
# Used only for really big adjacency matrix
class SparseEmbedding(nn.Module):
def __init__(self, embedding_weight, sparse=True):
super().__init__()
print(embedding_weight.shape)
self.sparse = sparse
if self.sparse:
self.embedding = embedding_weight
else:
try:
try:
self.embedding = torch.from_numpy(
np.asarray(embedding_weight.todense())).to(device)
except BaseException:
self.embedding = torch.from_numpy(
np.asarray(embedding_weight)).to(device)
except Exception as e:
print("Sparse Embedding Error", e)
self.sparse = True
self.embedding = embedding_weight
def forward(self, x):
if self.sparse:
x = x.cpu().numpy()
x = x.reshape((-1))
temp = np.asarray((self.embedding[x, :]).todense())
return torch.from_numpy(temp).to(device)
else:
return self.embedding[x, :]
class TiedAutoEncoder(nn.Module):
def __init__(self, inp, out):
super().__init__()
self.weight = nn.parameter.Parameter(torch.Tensor(out, inp))
self.bias1 = nn.parameter.Parameter(torch.Tensor(out))
self.bias2 = nn.parameter.Parameter(torch.Tensor(inp))
self.register_parameter('tied weight', self.weight)
self.register_parameter('tied bias1', self.bias1)
self.register_parameter('tied bias2', self.bias2)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias1 is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias1, -bound, bound)
if self.bias2 is not None:
fan_in, fan_out = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_out)
torch.nn.init.uniform_(self.bias2, -bound, bound)
def forward(self, input):
encoded_feats = F.linear(input, self.weight, self.bias1)
encoded_feats = F.tanh(encoded_feats)
reconstructed_output = F.linear(encoded_feats, self.weight.t(), self.bias2)
return encoded_feats, reconstructed_output
class MultipleEmbedding(nn.Module):
def __init__(
self,
embedding_weights,
dim,
sparse=True,
num_list=None,
node_type_mapping=None):
super().__init__()
print(dim)
self.num_list = torch.tensor([0] + list(num_list)).to(device)
print(self.num_list)
self.node_type_mapping = node_type_mapping
self.dim = dim
self.embeddings = []
for i, w in enumerate(embedding_weights):
try:
self.embeddings.append(SparseEmbedding(w, sparse))
except BaseException as e:
print("Conv Embedding Mode")
self.add_module("ConvEmbedding1", w)
self.embeddings.append(w)
test = torch.zeros(1, device=device).long()
self.input_size = []
for w in self.embeddings:
self.input_size.append(w(test).shape[-1])
self.wstack = [TiedAutoEncoder(self.input_size[i], self.dim).to(device) for i, w in enumerate(self.embeddings)]
self.norm_stack = [nn.LayerNorm(self.dim).to(device) for w in self.embeddings]
for i, w in enumerate(self.wstack):
self.add_module("Embedding_Linear%d" % (i), w)
self.add_module("Embedding_norm%d" % (i), self.norm_stack[i])
self.dropout = nn.Dropout(0.25)
def forward(self, x):
final = torch.zeros((len(x), self.dim)).to(device)
recon_loss = torch.Tensor([0.0]).to(device)
for i in range(len(self.num_list) - 1):
select = (x >= (self.num_list[i] + 1)) & (x < (self.num_list[i + 1] + 1))
if torch.sum(select) == 0:
continue
adj = self.embeddings[i](x[select] - self.num_list[i] - 1)
output = self.dropout(adj)
output, recon = self.wstack[i](output)
output = self.norm_stack[i](output)
final[select] = output
recon_loss += sparse_autoencoder_error(recon, adj)
return final, recon_loss
def sparse_autoencoder_error(y_pred, y_true):
return torch.mean(torch.sum((y_true.ne(0).type(torch.float) * (y_true - y_pred)) ** 2, dim=-1) / torch.sum(
y_true.ne(0).type(torch.float), dim=-1))
class Word2vec_Skipgram(nn.Module):
def __init__(
self,
dict_size,
embedding_dim,
window_size,
u_embedding=None,
sparse=False):
super(Word2vec_Skipgram, self).__init__()
'''
use context (u) to predict center (v)
'''
self.dict_size = dict_size
self.embedding_dim = embedding_dim
self.window_size = window_size
self.u_embedding = u_embedding
self.sm_w_t = nn.Embedding(
dict_size,
embedding_dim,
sparse=sparse,
padding_idx=0,
)
self.sm_b = nn.Embedding(dict_size, 1, sparse=sparse, padding_idx=0, )
def forward_u(self, u):
return self.u_embedding(u)
def forward_w_b(self, id):
return self.sm_w_t(id), self.sm_b(id)
class Classifier(nn.Module):
def __init__(
self,
n_head,
d_model,
d_k,
d_v,
node_embedding,
diag_mask,
bottle_neck,
**args):
super().__init__()
self.pff_classifier = PositionwiseFeedForward(
[d_model, 1], reshape=True, use_bias=True)
self.node_embedding = node_embedding
self.encode1 = EncoderLayer(
n_head,
d_model,
d_k,
d_v,
dropout_mul=0.3,
dropout_pff=0.4,
diag_mask=diag_mask,
bottle_neck=bottle_neck)
# self.encode2 = EncoderLayer(n_head, d_model, d_k, d_v, dropout_mul=0.0, dropout_pff=0.0, diag_mask = diag_mask, bottle_neck=bottle_neck)
self.diag_mask_flag = diag_mask
self.layer_norm1 = nn.LayerNorm(d_model)
self.layer_norm2 = nn.LayerNorm(d_model)
def get_node_embeddings(self, x, return_recon=False):
# shape of x: (b, tuple)
sz_b, len_seq = x.shape
# print(torch.max(x), torch.min(x))
x, recon_loss = self.node_embedding(x.view(-1))
if return_recon:
return x.view(sz_b, len_seq, -1), recon_loss
else:
return x.view(sz_b, len_seq, -1)
def get_embedding(self, x, slf_attn_mask, non_pad_mask, return_recon=False):
if return_recon:
x, recon_loss = self.get_node_embeddings(x, return_recon)
else:
x = self.get_node_embeddings(x, return_recon)
dynamic, static, attn = self.encode1(x, x, slf_attn_mask, non_pad_mask)
# dynamic, static1, attn = self.encode2(dynamic, static,slf_attn_mask, non_pad_mask)
if return_recon:
return dynamic, static, attn, recon_loss
else:
return dynamic, static, attn
def get_embedding_static(self, x):
if len(x.shape) == 1:
x = x.view(-1, 1)
flag = True
else:
flag = False
slf_attn_mask = get_attn_key_pad_mask(seq_k=x, seq_q=x)
non_pad_mask = get_non_pad_mask(x)
x = self.get_node_embeddings(x)
dynamic, static, attn = self.encode1(x, x, slf_attn_mask, non_pad_mask)
# dynamic, static, attn = self.encode2(dynamic, static,slf_attn_mask, non_pad_mask)
if flag:
return static[:, 0, :]
return static
def forward(self, x, mask=None, get_outlier=None, return_recon=False):
x = x.long()
print("Xama", x.shape)
slf_attn_mask = get_attn_key_pad_mask(seq_k=x, seq_q=x)
print(slf_attn_mask)
non_pad_mask = get_non_pad_mask(x)
print(non_pad_mask)
if return_recon:
dynamic, static, attn, recon_loss = self.get_embedding(x, slf_attn_mask, non_pad_mask, return_recon)
else:
dynamic, static, attn = self.get_embedding(x, slf_attn_mask, non_pad_mask, return_recon)
dynamic = self.layer_norm1(dynamic)
static = self.layer_norm2(static)
sz_b, len_seq, dim = dynamic.shape
if self.diag_mask_flag == 'True':
output = (dynamic - static) ** 2
else:
output = dynamic
output = self.pff_classifier(output)
output = torch.sigmoid(output)
if get_outlier is not None:
k = get_outlier
outlier = (
(1 -
output) *
non_pad_mask).topk(
k,
dim=1,
largest=True,
sorted=True)[1]
return outlier.view(-1, k)
mode = 'sum'
if mode == 'min':
output, _ = torch.max(
(1 - output) * non_pad_mask, dim=-2, keepdim=False)
output = 1 - output
elif mode == 'sum':
output = torch.sum(output * non_pad_mask, dim=-2, keepdim=False)
mask_sum = torch.sum(non_pad_mask, dim=-2, keepdim=False)
output /= mask_sum
elif mode == 'first':
output = output[:, 0, :]
if return_recon:
return output, recon_loss
else:
return output
# A custom position-wise MLP.
# dims is a list, it would create multiple layer with tanh between them
# If dropout, it would add the dropout at the end. Before residual and
# layer-norm
class PositionwiseFeedForward(nn.Module):
def __init__(
self,
dims,
dropout=None,
reshape=False,
use_bias=True,
residual=False,
layer_norm=False):
super(PositionwiseFeedForward, self).__init__()
self.w_stack = []
self.dims = dims
for i in range(len(dims) - 1):
self.w_stack.append(nn.Conv1d(dims[i], dims[i + 1], 1, use_bias))
self.add_module("PWF_Conv%d" % (i), self.w_stack[-1])
self.reshape = reshape
self.layer_norm = nn.LayerNorm(dims[-1])
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.residual = residual
self.layer_norm_flag = layer_norm
def forward(self, x):
output = x.transpose(1, 2)
for i in range(len(self.w_stack) - 1):
output = self.w_stack[i](output)
output = torch.tanh(output)
if self.dropout is not None:
output = self.dropout(output)
output = self.w_stack[-1](output)
output = output.transpose(1, 2)
if self.reshape:
output = output.view(output.shape[0], -1, 1)
if self.dims[0] == self.dims[-1]:
# residual
if self.residual:
output += x
if self.layer_norm_flag:
output = self.layer_norm(output)
return output
# A custom position wise MLP.
# dims is a list, it would create multiple layer with torch.tanh between them
# We don't do residual and layer-norm, because this is only used as the
# final classifier
class FeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, dims, dropout=None, reshape=False, use_bias=True):
super(FeedForward, self).__init__()
self.w_stack = []
for i in range(len(dims) - 1):
self.w_stack.append(nn.Linear(dims[i], dims[i + 1], use_bias))
self.add_module("FF_Linear%d" % (i), self.w_stack[-1])
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.reshape = reshape
def forward(self, x):
output = x
for i in range(len(self.w_stack) - 1):
output = self.w_stack[i](output)
output = torch.tanh(output)
if self.dropout is not None:
output = self.dropout(output)
output = self.w_stack[-1](output)
if self.reshape:
output = output.view(output.shape[0], -1, 1)
return output
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature):
super().__init__()
self.temperature = temperature
def masked_softmax(self, vector: torch.Tensor,
mask: torch.Tensor,
dim: int = -1,
memory_efficient: bool = False,
mask_fill_value: float = -1e32) -> torch.Tensor:
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside
# the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_vector = vector.masked_fill(
(1 - mask).byte(), mask_fill_value)
result = torch.nn.functional.softmax(masked_vector, dim=dim)
return result
def forward(self, q, k, v, diag_mask, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -float('inf'))
attn = self.masked_softmax(
attn, diag_mask, dim=-1, memory_efficient=True)
output = torch.bmm(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(
self,
n_head,
d_model,
d_k,
d_v,
dropout,
diag_mask,
input_dim):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(input_dim, n_head * d_k, bias=False)
self.w_ks = nn.Linear(input_dim, n_head * d_k, bias=False)
self.w_vs = nn.Linear(input_dim, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0,
std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0,
std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0,
std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(
temperature=np.power(d_k, 0.5))
self.fc1 = FeedForward([n_head * d_v, d_model], use_bias=False)
self.fc2 = FeedForward([n_head * d_v, d_model], use_bias=False)
self.layer_norm1 = nn.LayerNorm(input_dim)
self.layer_norm2 = nn.LayerNorm(input_dim)
self.layer_norm3 = nn.LayerNorm(input_dim)
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = dropout
self.diag_mask_flag = diag_mask
self.diag_mask = None
def pass_(self, inputs):
return inputs
def forward(self, q, k, v, diag_mask, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
residual_dynamic = q
residual_static = v
q = self.layer_norm1(q)
print(residual_dynamic.shape, q.shape)
k = self.layer_norm2(k)
v = self.layer_norm3(v)
sz_b, len_q, _ = q.shape
sz_b, len_k, _ = k.shape
sz_b, len_v, _ = v.shape
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous(
).view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous(
).view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous(
).view(-1, len_v, d_v) # (n*b) x lv x dv
n = sz_b * n_head
if self.diag_mask is not None:
if (len(self.diag_mask) <= n) or (
self.diag_mask.shape[1] != len_v):
self.diag_mask = torch.ones((len_v, len_v), device=device)
if self.diag_mask_flag == 'True':
self.diag_mask -= torch.eye(len_v, len_v, device=device)
self.diag_mask = self.diag_mask.repeat(n, 1, 1)
diag_mask = self.diag_mask
else:
diag_mask = self.diag_mask[:n]
else:
self.diag_mask = (torch.ones((len_v, len_v), device=device))
if self.diag_mask_flag == 'True':
self.diag_mask -= torch.eye(len_v, len_v, device=device)
self.diag_mask = self.diag_mask.repeat(n, 1, 1)
diag_mask = self.diag_mask
if mask is not None:
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
dynamic, attn = self.attention(q, k, v, diag_mask, mask=mask)
dynamic = dynamic.view(n_head, sz_b, len_q, d_v)
dynamic = dynamic.permute(
1, 2, 0, 3).contiguous().view(
sz_b, len_q, -1) # b x lq x (n*dv)
static = v.view(n_head, sz_b, len_q, d_v)
static = static.permute(
1, 2, 0, 3).contiguous().view(
sz_b, len_q, -1) # b x lq x (n*dv)
dynamic = self.dropout(self.fc1(dynamic)) if self.dropout is not None else self.fc1(dynamic)
static = self.dropout(self.fc2(static)) if self.dropout is not None else self.fc2(static)
return dynamic, static, attn
class EncoderLayer(nn.Module):
'''A self-attention layer + 2 layered pff'''
def __init__(
self,
n_head,
d_model,
d_k,
d_v,
dropout_mul,
dropout_pff,
diag_mask,
bottle_neck):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.mul_head_attn = MultiHeadAttention(
n_head,
d_model,
d_k,
d_v,
dropout=dropout_mul,
diag_mask=diag_mask,
input_dim=bottle_neck)
self.pff_n1 = PositionwiseFeedForward(
[d_model, d_model, d_model], dropout=dropout_pff, residual=True, layer_norm=True)
self.pff_n2 = PositionwiseFeedForward(
[bottle_neck, d_model, d_model], dropout=dropout_pff, residual=False, layer_norm=True)
# self.dropout = nn.Dropout(0.2)
def forward(self, dynamic, static, slf_attn_mask, non_pad_mask):
dynamic, static1, attn = self.mul_head_attn(
dynamic, dynamic, static, slf_attn_mask)
dynamic = self.pff_n1(dynamic * non_pad_mask) * non_pad_mask
static1 = self.pff_n2(static * non_pad_mask) * non_pad_mask
return dynamic, static1, attn
| 20,477 | 32.029032 | 146 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/src/dataloader.py | # -*- coding: utf-8 -*-
# @Time : 3/8/22
# @Modified by : Juncheng B Li
# @Original Author : Yuan Gong
# @File : dataloader.py
# modified from:
# Author: David Harwath
# with some functions borrowed from https://github.com/SeanNaren/deepspeech.pytorch
import csv
import json
import torchaudio
import numpy as np
import torch
import torch.nn.functional
from torch.utils.data import Dataset
import random
def make_index_dict(label_csv):
index_lookup = {}
with open(label_csv, 'r') as f:
csv_reader = csv.DictReader(f)
line_count = 0
for row in csv_reader:
index_lookup[row['mid']] = row['index']
line_count += 1
return index_lookup
def make_name_dict(label_csv):
name_lookup = {}
with open(label_csv, 'r') as f:
csv_reader = csv.DictReader(f)
line_count = 0
for row in csv_reader:
name_lookup[row['index']] = row['display_name']
line_count += 1
return name_lookup
def lookup_list(index_list, label_csv):
label_list = []
table = make_name_dict(label_csv)
for item in index_list:
label_list.append(table[item])
return label_list
def preemphasis(signal,coeff=0.97):
"""perform preemphasis on the input signal.
:param signal: The signal to filter.
:param coeff: The preemphasis coefficient. 0 is none, default 0.97.
:returns: the filtered signal.
"""
return np.append(signal[0],signal[1:]-coeff*signal[:-1])
class AudiosetDataset(Dataset):
def __init__(self, dataset_json_file, audio_conf, label_csv=None):
"""
Dataset that manages audio recordings
:param audio_conf: Dictionary containing the audio loading and preprocessing settings
:param dataset_json_file
"""
self.datapath = dataset_json_file
with open(dataset_json_file, 'r') as fp:
data_json = json.load(fp)
self.data = data_json['data']
self.audio_conf = audio_conf
print('---------------the {:s} dataloader---------------'.format(self.audio_conf.get('mode')))
self.melbins = self.audio_conf.get('num_mel_bins')
self.freqm = self.audio_conf.get('freqm')
self.timem = self.audio_conf.get('timem')
print('now using following mask: {:d} freq, {:d} time'.format(self.audio_conf.get('freqm'), self.audio_conf.get('timem')))
self.mixup = self.audio_conf.get('mixup')
print('now using mix-up with rate {:f}'.format(self.mixup))
self.dataset = self.audio_conf.get('dataset')
print('now process ' + self.dataset)
# dataset spectrogram mean and std, used to normalize the input
self.norm_mean = self.audio_conf.get('mean')
self.norm_std = self.audio_conf.get('std')
# skip_norm is a flag that if you want to skip normalization to compute the normalization stats using src/get_norm_stats.py, if Ture, input normalization will be skipped for correctly calculating the stats.
# set it as True ONLY when you are getting the normalization stats.
self.skip_norm = self.audio_conf.get('skip_norm') if self.audio_conf.get('skip_norm') else False
if self.skip_norm:
print('now skip normalization (use it ONLY when you are computing the normalization stats).')
else:
print('use dataset mean {:.3f} and std {:.3f} to normalize the input.'.format(self.norm_mean, self.norm_std))
# if add noise for data augmentation
self.noise = self.audio_conf.get('noise')
if self.noise == True:
print('now use noise augmentation')
self.index_dict = make_index_dict(label_csv)
self.label_num = len(self.index_dict)
print('number of classes is {:d}'.format(self.label_num))
def _wav2fbank(self, filename, filename2=None):
# no mixup
if filename2 == None:
waveform, sr = torchaudio.load(filename)
waveform = waveform - waveform.mean()
# mixup
else:
waveform1, sr = torchaudio.load(filename)
waveform2, _ = torchaudio.load(filename2)
waveform1 = waveform1 - waveform1.mean()
waveform2 = waveform2 - waveform2.mean()
if waveform1.shape[1] != waveform2.shape[1]:
if waveform1.shape[1] > waveform2.shape[1]:
# padding
temp_wav = torch.zeros(1, waveform1.shape[1])
temp_wav[0, 0:waveform2.shape[1]] = waveform2
waveform2 = temp_wav
else:
# cutting
waveform2 = waveform2[0, 0:waveform1.shape[1]]
# sample lambda from uniform distribution
#mix_lambda = random.random()
# sample lambda from beta distribtion
mix_lambda = np.random.beta(10, 10)
mix_waveform = mix_lambda * waveform1 + (1 - mix_lambda) * waveform2
waveform = mix_waveform - mix_waveform.mean()
if self.melbins > 100:
fbank = torchaudio.compliance.kaldi.fbank(waveform, htk_compat=True, sample_frequency=sr, use_energy=False,
window_type='hanning', num_mel_bins=self.melbins, dither=0.0, frame_shift=10)
else:
melspec = torchaudio.transforms.MelSpectrogram(sample_rate=sr, n_fft=4096, win_length=1024, hop_length=400, center=True, pad_mode="constant", power=2, norm='slaney', onesided=True, n_mels=self.melbins, mel_scale="slaney")
spec = melspec(waveform).squeeze()
a2db = torchaudio.transforms.AmplitudeToDB(spec)
fbank = a2db(spec)
fbank = fbank.transpose(0,1)
target_length = self.audio_conf.get('target_length')
n_frames = fbank.shape[0]
p = target_length - n_frames
# cut and pad
if p > 0:
m = torch.nn.ZeroPad2d((0, 0, 0, p))
fbank = m(fbank)
elif p < 0:
fbank = fbank[0:target_length, :]
if filename2 == None:
return fbank, 0
else:
return fbank, mix_lambda
def __getitem__(self, index):
"""
returns: image, audio, nframes
where image is a FloatTensor of size (3, H, W)
audio is a FloatTensor of size (N_freq, N_frames) for spectrogram, or (N_frames) for waveform
nframes is an integer
"""
# do mix-up for this sample (controlled by the given mixup rate)
if random.random() < self.mixup:
datum = self.data[index]
# find another sample to mix, also do balance sampling
# sample the other sample from the multinomial distribution, will make the performance worse
if 'samples_weight' in self.audio_conf.keys():
samples_weight = self.audio_conf.get('samples_weight')
samples_weight = samples_weight / samples_weight.sum()
mix_sample_idx = np.random.choice(len(self.data), p=samples_weight)
else:
# sample the other sample from the uniform distribution
mix_sample_idx = random.randint(0, len(self.data)-1)
mix_datum = self.data[mix_sample_idx]
# get the mixed fbank
fbank, mix_lambda = self._wav2fbank(datum['wav'], mix_datum['wav'])
# initialize the label
label_indices = np.zeros(self.label_num)
# add sample 1 labels
for label_str in datum['labels'].split(','):
label_indices[int(self.index_dict[label_str])] += mix_lambda
# add sample 2 labels
for label_str in mix_datum['labels'].split(','):
label_indices[int(self.index_dict[label_str])] += 1.0-mix_lambda
label_indices = torch.FloatTensor(label_indices)
# if not do mixup
else:
datum = self.data[index]
label_indices = np.zeros(self.label_num)
fbank, mix_lambda = self._wav2fbank(datum['wav'])
for label_str in datum['labels'].split(','):
label_indices[int(self.index_dict[label_str])] = 1.0
label_indices = torch.FloatTensor(label_indices)
# SpecAug, not do for eval set
freqm = torchaudio.transforms.FrequencyMasking(self.freqm)
timem = torchaudio.transforms.TimeMasking(self.timem)
fbank = torch.transpose(fbank, 0, 1)
fbank = fbank.unsqueeze(0)
if self.freqm != 0:
fbank = freqm(fbank)
if self.timem != 0:
fbank = timem(fbank)
# squeeze it back, it is just a trick to satisfy new torchaudio version
fbank = fbank.squeeze(0)
fbank = torch.transpose(fbank, 0, 1)
# normalize the input for both training and test
if not self.skip_norm:
fbank = (fbank - self.norm_mean) / (self.norm_std * 2)
# skip normalization the input if you are trying to get the normalization stats.
else:
pass
if self.noise == True:
fbank = fbank + torch.rand(fbank.shape[0], fbank.shape[1]) * np.random.rand() / 10
fbank = torch.roll(fbank, np.random.randint(-10, 10), 0)
mix_ratio = min(mix_lambda, 1-mix_lambda) / max(mix_lambda, 1-mix_lambda)
# the output fbank shape is [time_frame_num, frequency_bins], e.g., [1024, 128]
return fbank, label_indices
def __len__(self):
return len(self.data) | 9,531 | 40.991189 | 233 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/src/run.py | # -*- coding: utf-8 -*-
# @Time : 3/8/22
# @Modified by : Juncheng B Li
# @Original Author : Yuan Gong
# @File : run.py
import argparse
import os
import ast
import pickle
import sys
import time
import torch
from torch.utils.data import WeightedRandomSampler
basepath = os.path.dirname(os.path.dirname(sys.path[0]))
sys.path.append(basepath)
import dataloader
import models
import numpy as np
from traintest import train, validate
print("I am process %s, running on %s: starting (%s)" % (os.getpid(), os.uname()[1], time.asctime()))
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--data-train", type=str, default='', help="training data json")
parser.add_argument("--data-val", type=str, default='', help="validation data json")
parser.add_argument("--data-eval", type=str, default='', help="evaluation data json")
parser.add_argument("--label-csv", type=str, default='', help="csv with class labels")
parser.add_argument("--n_class", type=int, default=527, help="number of classes")
parser.add_argument("--model", type=str, default='ast', help="the model used")
parser.add_argument("--dataset", type=str, default="audioset", help="the dataset used", choices=["audioset","audioset_s", "esc50", "speechcommands"])
parser.add_argument("--n_mels", type=int, default=128, help="number of mel bins")
parser.add_argument('--mean', default=-12.7508, type=float, help='normalizing mean')
parser.add_argument('--std', default=11.7584, type=float, help='normalizing std')
parser.add_argument("--exp-dir", type=str, default="", help="directory to dump experiments")
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--beta1', type = float, default = 0.9)
parser.add_argument('--beta2', type = float, default = 0.999)
parser.add_argument("--optim", type=str, default="adam", help="training optimizer", choices=["sgd", "adam"])
parser.add_argument('-b', '--batch-size', default=12, type=int, metavar='N', help='mini-batch size')
parser.add_argument('-w', '--num-workers', default=32, type=int, metavar='NW', help='# of workers for dataloading (default: 32)')
parser.add_argument("--n-epochs", type=int, default=1, help="number of maximum training epochs")
# not used in the formal experiments
parser.add_argument("--lr_patience", type=int, default=2, help="how many epoch to wait to reduce lr if mAP doesn't improve")
parser.add_argument("--n-print-steps", type=int, default=100, help="number of steps to print statistics")
parser.add_argument('--save_model', help='save the model or not', type=ast.literal_eval)
parser.add_argument('--freqm', help='frequency mask max length', type=int, default=0)
parser.add_argument('--timem', help='time mask max length', type=int, default=0)
parser.add_argument("--mixup", type=float, default=0, help="how many (0-1) samples need to be mixup during training")
parser.add_argument("--bal", type=str, default=None, help="use balanced sampling or not")
# the stride used in patch spliting, e.g., for patch size 16*16, a stride of 16 means no overlapping, a stride of 10 means overlap of 6.
parser.add_argument("--fstride", type=int, default=10, help="soft split freq stride, overlap=patch_size-stride")
parser.add_argument("--tstride", type=int, default=10, help="soft split time stride, overlap=patch_size-stride")
parser.add_argument('--imagenet_pretrain', help='if use ImageNet pretrained audio spectrogram transformer model', type=ast.literal_eval, default='True')
parser.add_argument('--audioset_pretrain', help='if use ImageNet and audioset pretrained audio spectrogram transformer model', type=ast.literal_eval, default='False')
# arguments for TAL-trans Models:
parser.add_argument('--embedding_size', type = int, default = 1024) # this is the embedding size after a pooling layer
# after a non-pooling layer, the embeddings size will be twice this much
parser.add_argument('--n_conv_layers', type = int, default = 10)
parser.add_argument('--n_trans_layers', type = int, default = 2)
parser.add_argument('--kernel_size', type = str, default = '3') # 'n' or 'nxm'
parser.add_argument('--n_pool_layers', type = int, default = 5) # the pooling layers will be inserted uniformly into the conv layers
# the should be at least 2 and at most 6 pooling layers
# the first two pooling layers will have stride (2,2); later ones will have stride (1,2)
parser.add_argument('--batch_norm', type = bool, default = True)
parser.add_argument('--dropout', type = float, default = 0.0)
parser.add_argument('--pooling', type = str, default = 'lin', choices = ['max', 'ave', 'lin', 'exp', 'att', 'h-att', 'all'])
parser.add_argument('--continue_from_ckpt', type = str, default = None)
parser.add_argument('--addpos', type = bool, default = False)
parser.add_argument('--transformer_dropout', type = float, default = 0.5)
#psla model
parser.add_argument("--eff_b", type=int, default=0, help="which efficientnet to use, the larger number, the more complex")
parser.add_argument("--att_head", type=int, default=4, help="number of attention heads")
parser.add_argument("--att_activation", type = str, default = 'sigmoid', choices = ['sigmoid','softmax','relu','linear'])
args = parser.parse_args()
print('now train a audio spectrogram transformer model')
# dataset spectrogram mean and std, used to normalize the input audioset_s yun feature:#-12.75089158, 11.75840071 gong yuan audioset: -4.2677393, 4.5689974
norm_stats = {'audioset':[args.mean, args.std], 'audioset_s':[args.mean, args.std], 'esc50':[-6.6268077, 5.358466], 'speechcommands':[-6.845978, 5.5654526]}
target_length = {'audioset':1024, 'audioset_s':400, 'esc50':512, 'speechcommands':128}
# if add noise for data augmentation, only use for speech commands
noise = {'audioset': False, 'audioset_s': False, 'esc50': False, 'speechcommands':True}
audio_conf = {'num_mel_bins': args.n_mels, 'target_length': target_length[args.dataset], 'freqm': args.freqm, 'timem': args.timem, 'mixup': args.mixup, 'dataset': args.dataset, 'mode':'train', 'mean':norm_stats[args.dataset][0], 'std':norm_stats[args.dataset][1],
'noise':noise[args.dataset]}
val_audio_conf = {'num_mel_bins': args.n_mels, 'target_length': target_length[args.dataset], 'freqm': 0, 'timem': 0, 'mixup': 0, 'dataset': args.dataset, 'mode':'evaluation', 'mean':norm_stats[args.dataset][0], 'std':norm_stats[args.dataset][1], 'noise':False}
if args.bal == 'bal':
print('balanced sampler is being used')
samples_weight = np.loadtxt(args.data_train[:-5]+'_weight.csv', delimiter=',')
sampler = WeightedRandomSampler(samples_weight, len(samples_weight), replacement=True)
train_loader = torch.utils.data.DataLoader(
dataloader.AudiosetDataset(args.data_train, label_csv=args.label_csv, audio_conf=audio_conf),
batch_size=args.batch_size, sampler=sampler, num_workers=args.num_workers, pin_memory=True)
elif args.bal == 'over':
print('over sampler is being used')
samples_weight = np.loadtxt(args.data_train[:-5]+'_oversample_weight.csv', delimiter=',')
sampler = WeightedRandomSampler(samples_weight, len(samples_weight), replacement=True)
train_loader = torch.utils.data.DataLoader(
dataloader.AudiosetDataset(args.data_train, label_csv=args.label_csv, audio_conf=audio_conf),
batch_size=args.batch_size, sampler=sampler, num_workers=args.num_workers, pin_memory=True)
elif args.bal == 'multinomial':
print('over sampler and multinomial mixup is being used')
samples_weight = np.loadtxt(args.data_train[:-5]+'_oversample_weight.csv', delimiter=',')
sampler = WeightedRandomSampler(samples_weight, len(samples_weight), replacement=True)
audio_conf['samples_weight'] = samples_weight
train_loader = torch.utils.data.DataLoader(
dataloader.AudiosetDataset(args.data_train, label_csv=args.label_csv, audio_conf=audio_conf),
batch_size=args.batch_size, sampler=sampler, num_workers=args.num_workers, pin_memory=True)
else:
print('balanced sampler is not used')
train_loader = torch.utils.data.DataLoader(
dataloader.AudiosetDataset(args.data_train, label_csv=args.label_csv, audio_conf=audio_conf),
batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
dataloader.AudiosetDataset(args.data_val, label_csv=args.label_csv, audio_conf=val_audio_conf),
batch_size=args.batch_size*2, shuffle=False, num_workers=args.num_workers, pin_memory=True)
if 'x' not in args.kernel_size:
args.kernel_size = args.kernel_size + 'x' + args.kernel_size
args.kernel_size = tuple(int(x) for x in args.kernel_size.split('x'))
# transformer based model
if args.model == 'ast':
audio_model = models.ASTModel(label_dim=args.n_class, fstride=args.fstride, tstride=args.tstride, input_fdim=args.n_mels,
input_tdim=target_length[args.dataset], imagenet_pretrain=args.imagenet_pretrain,
audioset_pretrain=args.audioset_pretrain, model_size='base384')
elif args.model == 'fnet':
audio_model = models.get_fnet()
elif args.model == 'TALtrans':
args.target_length = target_length[args.dataset]
audio_model = models.TransformerEncoder(args)
elif args.model == 'TAL':
audio_model = models.TALNet(args)
elif args.model == 'efficientnet':
audio_model = models.EffNetAttention(att_act=args.att_activation, label_dim=args.n_class, b=args.eff_b, pretrain=args.imagenet_pretrain, head_num=args.att_head)
elif args.model == 'resnet':
args.target_length = target_length[args.dataset]
audio_model = models.ResNetAttention(args)
elif args.model == 'mbnet':
audio_model = models.MBNet(label_dim=args.n_class, pretrain=args.imagenet_pretrain)
elif args.model == 'linear':
audio_model = models.LinearModel(n_layers=3, input_dim=args.n_mels, hidden_dim=128, label_dim=args.n_class)
print("\nCreating experiment directory: %s" % args.exp_dir)
os.makedirs("%s/models" % args.exp_dir)
with open("%s/args.pkl" % args.exp_dir, "wb") as f:
pickle.dump(args, f)
print('Now starting training for {:d} epochs'.format(args.n_epochs))
train(audio_model, train_loader, val_loader, args)
# for speechcommands dataset, evaluate the best model on validation set on the test set
if args.dataset == 'speechcommands':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
sd = torch.load(args.exp_dir + '/models/best_audio_model.pth', map_location=device)
audio_model = torch.nn.DataParallel(audio_model)
audio_model.load_state_dict(sd)
# best model on the validation set
stats, _ = validate(audio_model, val_loader, args, 'valid_set')
# note it is NOT mean of class-wise accuracy
val_acc = stats[0]['acc']
val_mAUC = np.mean([stat['auc'] for stat in stats])
print('---------------evaluate on the validation set---------------')
print("Accuracy: {:.6f}".format(val_acc))
print("AUC: {:.6f}".format(val_mAUC))
# test the model on the evaluation set
eval_loader = torch.utils.data.DataLoader(
dataloader.AudiosetDataset(args.data_eval, label_csv=args.label_csv, audio_conf=val_audio_conf),
batch_size=args.batch_size*2, shuffle=False, num_workers=args.num_workers, pin_memory=True)
stats, _ = validate(audio_model, eval_loader, args, 'eval_set')
eval_acc = stats[0]['acc']
eval_mAUC = np.mean([stat['auc'] for stat in stats])
print('---------------evaluate on the test set---------------')
print("Accuracy: {:.6f}".format(eval_acc))
print("AUC: {:.6f}".format(eval_mAUC))
np.savetxt(args.exp_dir + '/eval_result.csv', [val_acc, val_mAUC, eval_acc, eval_mAUC])
| 11,956 | 63.983696 | 263 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/src/traintest.py | # -*- coding: utf-8 -*-
# @Time : 3/8/22
# @Modified by : Juncheng B Li
# @Original Author : Yuan Gong
# @File : traintest.py
import sys
import os
import datetime
sys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))
from utilities import *
import time
import torch
from torch import nn
import numpy as np
import pickle
from torch.cuda.amp import autocast,GradScaler
def train(audio_model, train_loader, test_loader, args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('running on ' + str(device))
torch.set_grad_enabled(True)
# Initialize all of the statistics we want to keep track of
batch_time = AverageMeter()
per_sample_time = AverageMeter()
data_time = AverageMeter()
per_sample_data_time = AverageMeter()
loss_meter = AverageMeter()
per_sample_dnn_time = AverageMeter()
progress = []
# best_cum_mAP is checkpoint ensemble from the first epoch to the best epoch
best_epoch, best_cum_epoch, best_mAP, best_acc, best_cum_mAP = 0, 0, -np.inf, -np.inf, -np.inf
global_step, epoch = 0, 0
start_time = time.time()
exp_dir = args.exp_dir
def _save_progress():
progress.append([epoch, global_step, best_epoch, best_mAP,
time.time() - start_time])
with open("%s/progress.pkl" % exp_dir, "wb") as f:
pickle.dump(progress, f)
if not isinstance(audio_model, nn.DataParallel):
audio_model = nn.DataParallel(audio_model)
audio_model = audio_model.to(device)
# Set up the optimizer
trainables = [p for p in audio_model.parameters() if p.requires_grad]
print('Total parameter number is : {:.3f} million'.format(sum(p.numel() for p in audio_model.parameters()) / 1e6))
print('Total trainable parameter number is : {:.3f} million'.format(sum(p.numel() for p in trainables) / 1e6))
optimizer = torch.optim.Adam(trainables, args.lr, weight_decay=5e-7, betas=(0.95, 0.999))
# dataset specific settings
#scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=args.lr_patience, verbose=True)
if args.dataset == 'audioset' or args.dataset == 'audioset_s':
if len(train_loader.dataset) > 2e5:
print('scheduler for full audioset is used')
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2,3,4,5,6,7,8,9,10], gamma=0.5, last_epoch=-1)
else:
print('scheduler for balanced audioset is used')
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [10, 15, 20, 25], gamma=0.5, last_epoch=-1)
main_metrics = 'mAP'
loss_fn = nn.BCEWithLogitsLoss()
if args.model != 'ast':
loss_fn = nn.BCELoss()
warmup = True
elif args.dataset == 'esc50':
print('scheduler for esc-50 is used')
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, list(range(5,26)), gamma=0.85)
main_metrics = 'acc'
loss_fn = nn.CrossEntropyLoss()
warmup = False
elif args.dataset == 'speechcommands':
print('scheduler for speech commands is used')
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, list(range(5,26)), gamma=0.85)
main_metrics = 'acc'
loss_fn = nn.BCEWithLogitsLoss()
warmup = False
else:
raise ValueError('unknown dataset, dataset should be in [audioset, speechcommands, esc50]')
print('now training with {:s}, main metrics: {:s}, loss function: {:s}, learning rate scheduler: {:s}'.format(str(args.dataset), str(main_metrics), str(loss_fn), str(scheduler)))
args.loss_fn = loss_fn
epoch += 1
# for amp
scaler = GradScaler()
print("current #steps=%s, #epochs=%s" % (global_step, epoch))
print("start training...")
result = np.zeros([args.n_epochs, 10])
audio_model.train()
while epoch < args.n_epochs + 1:
begin_time = time.time()
end_time = time.time()
audio_model.train()
print('---------------')
print(datetime.datetime.now())
print("current #epochs=%s, #steps=%s" % (epoch, global_step))
for i, (audio_input, labels) in enumerate(train_loader):
B = audio_input.size(0)
audio_input = audio_input.to(device, non_blocking=True)
labels = labels.to(device, non_blocking=True)
data_time.update(time.time() - end_time)
per_sample_data_time.update((time.time() - end_time) / audio_input.shape[0])
dnn_start_time = time.time()
# first several steps for warm-up
if global_step <= 1000 and global_step % 50 == 0 and warmup == True:
warm_lr = (global_step / 1000) * args.lr
for param_group in optimizer.param_groups:
param_group['lr'] = warm_lr
print('warm-up learning rate is {:f}'.format(optimizer.param_groups[0]['lr']))
# with autocast():
audio_output = audio_model(audio_input)
if isinstance(loss_fn, torch.nn.CrossEntropyLoss):
loss = loss_fn(audio_output, torch.argmax(labels.long(), axis=1))
elif isinstance(loss_fn, torch.nn.BCEWithLogitsLoss):
loss = loss_fn(audio_output, labels)
else:
epsilon = 1e-7
audio_output = torch.clamp(audio_output, epsilon, 1. - epsilon)
loss = loss_fn(audio_output, labels)
# optimization if amp is not used
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# optimiztion if amp is used
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
# record loss
loss_meter.update(loss.item(), B)
batch_time.update(time.time() - end_time)
per_sample_time.update((time.time() - end_time)/audio_input.shape[0])
per_sample_dnn_time.update((time.time() - dnn_start_time)/audio_input.shape[0])
print_step = global_step % args.n_print_steps == 0
early_print_step = epoch == 0 and global_step % (args.n_print_steps/10) == 0
print_step = print_step or early_print_step
if print_step and global_step != 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Per Sample Total Time {per_sample_time.avg:.5f}\t'
'Per Sample Data Time {per_sample_data_time.avg:.5f}\t'
'Per Sample DNN Time {per_sample_dnn_time.avg:.5f}\t'
'Train Loss {loss_meter.avg:.4f}\t'.format(
epoch, i, len(train_loader), per_sample_time=per_sample_time, per_sample_data_time=per_sample_data_time,
per_sample_dnn_time=per_sample_dnn_time, loss_meter=loss_meter), flush=True)
if np.isnan(loss_meter.avg):
print("training diverged...")
return
end_time = time.time()
global_step += 1
print('start validation')
stats, valid_loss = validate(audio_model, test_loader, args, epoch)
# ensemble results
cum_stats = validate_ensemble(args, epoch)
cum_mAP = np.mean([stat['AP'] for stat in cum_stats])
cum_mAUC = np.mean([stat['auc'] for stat in cum_stats])
cum_acc = cum_stats[0]['acc']
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
acc = stats[0]['acc']
middle_ps = [stat['precisions'][int(len(stat['precisions'])/2)] for stat in stats]
middle_rs = [stat['recalls'][int(len(stat['recalls'])/2)] for stat in stats]
average_precision = np.mean(middle_ps)
average_recall = np.mean(middle_rs)
if main_metrics == 'mAP':
print("mAP: {:.6f}".format(mAP))
else:
print("acc: {:.6f}".format(acc))
print("AUC: {:.6f}".format(mAUC))
print("Avg Precision: {:.6f}".format(average_precision))
print("Avg Recall: {:.6f}".format(average_recall))
print("d_prime: {:.6f}".format(d_prime(mAUC)))
print("train_loss: {:.6f}".format(loss_meter.avg))
print("valid_loss: {:.6f}".format(valid_loss))
if main_metrics == 'mAP':
result[epoch-1, :] = [mAP, mAUC, average_precision, average_recall, d_prime(mAUC), loss_meter.avg, valid_loss, cum_mAP, cum_mAUC, optimizer.param_groups[0]['lr']]
else:
result[epoch-1, :] = [acc, mAUC, average_precision, average_recall, d_prime(mAUC), loss_meter.avg, valid_loss, cum_acc, cum_mAUC, optimizer.param_groups[0]['lr']]
np.savetxt(exp_dir + '/result.csv', result, delimiter=',')
print('validation finished')
if mAP > best_mAP:
best_mAP = mAP
if main_metrics == 'mAP':
best_epoch = epoch
if acc > best_acc:
best_acc = acc
if main_metrics == 'acc':
best_epoch = epoch
if cum_mAP > best_cum_mAP:
best_cum_epoch = epoch
best_cum_mAP = cum_mAP
if best_epoch == epoch:
torch.save(audio_model.state_dict(), "%s/models/best_audio_model.pth" % (exp_dir))
torch.save(optimizer.state_dict(), "%s/models/best_optim_state.pth" % (exp_dir))
torch.save(audio_model.state_dict(), "%s/models/audio_model.%d.pth" % (exp_dir, epoch))
if len(train_loader.dataset) > 2e5:
torch.save(optimizer.state_dict(), "%s/models/optim_state.%d.pth" % (exp_dir, epoch))
scheduler.step()
print('Epoch-{0} lr: {1}'.format(epoch, optimizer.param_groups[0]['lr']))
with open(exp_dir + '/stats_' + str(epoch) +'.pickle', 'wb') as handle:
pickle.dump(stats, handle, protocol=pickle.HIGHEST_PROTOCOL)
_save_progress()
finish_time = time.time()
print('epoch {:d} training time: {:.3f}'.format(epoch, finish_time-begin_time))
epoch += 1
batch_time.reset()
per_sample_time.reset()
data_time.reset()
per_sample_data_time.reset()
loss_meter.reset()
per_sample_dnn_time.reset()
if args.dataset == 'audioset' or args.dataset == 'audioset_s':
if len(train_loader.dataset) > 2e5:
stats=validate_wa(audio_model, test_loader, args, 1, 5)
else:
stats=validate_wa(audio_model, test_loader, args, 6, 25)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
middle_ps = [stat['precisions'][int(len(stat['precisions'])/2)] for stat in stats]
middle_rs = [stat['recalls'][int(len(stat['recalls'])/2)] for stat in stats]
average_precision = np.mean(middle_ps)
average_recall = np.mean(middle_rs)
wa_result = [mAP, mAUC, average_precision, average_recall, d_prime(mAUC)]
print('---------------Training Finished---------------')
print('weighted averaged model results')
print("mAP: {:.6f}".format(mAP))
print("AUC: {:.6f}".format(mAUC))
print("Avg Precision: {:.6f}".format(average_precision))
print("Avg Recall: {:.6f}".format(average_recall))
print("d_prime: {:.6f}".format(d_prime(mAUC)))
print("train_loss: {:.6f}".format(loss_meter.avg))
print("valid_loss: {:.6f}".format(valid_loss))
np.savetxt(exp_dir + '/wa_result.csv', wa_result)
def validate(audio_model, val_loader, args, epoch):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_time = AverageMeter()
if not isinstance(audio_model, nn.DataParallel):
audio_model = nn.DataParallel(audio_model)
audio_model = audio_model.to(device)
# switch to evaluate mode
audio_model.eval()
end = time.time()
A_predictions = []
A_targets = []
A_loss = []
with torch.no_grad():
for i, (audio_input, labels) in enumerate(val_loader):
audio_input = audio_input.to(device)
# compute output
audio_output = audio_model(audio_input)
if args.model == 'ast':
audio_output = torch.sigmoid(audio_output)
predictions = audio_output.to('cpu').detach()
A_predictions.append(predictions)
A_targets.append(labels)
# compute the loss
labels = labels.to(device)
epsilon = 1e-7
audio_output = torch.clamp(audio_output, epsilon, 1. - epsilon)
if isinstance(args.loss_fn, torch.nn.CrossEntropyLoss):
loss = args.loss_fn(audio_output, torch.argmax(labels.long(), axis=1))
else:
loss = args.loss_fn(audio_output, labels)
A_loss.append(loss.to('cpu').detach())
batch_time.update(time.time() - end)
end = time.time()
audio_output = torch.cat(A_predictions)
target = torch.cat(A_targets)
loss = np.mean(A_loss)
stats = calculate_stats(audio_output, target)
# save the prediction here
exp_dir = args.exp_dir
if os.path.exists(exp_dir+'/predictions') == False:
os.mkdir(exp_dir+'/predictions')
np.savetxt(exp_dir+'/predictions/target.csv', target, delimiter=',')
np.savetxt(exp_dir+'/predictions/predictions_' + str(epoch) + '.csv', audio_output, delimiter=',')
return stats, loss
def validate_ensemble(args, epoch):
exp_dir = args.exp_dir
target = np.loadtxt(exp_dir+'/predictions/target.csv', delimiter=',')
if epoch == 1:
cum_predictions = np.loadtxt(exp_dir + '/predictions/predictions_1.csv', delimiter=',')
else:
cum_predictions = np.loadtxt(exp_dir + '/predictions/cum_predictions.csv', delimiter=',') * (epoch - 1)
predictions = np.loadtxt(exp_dir+'/predictions/predictions_' + str(epoch) + '.csv', delimiter=',')
cum_predictions = cum_predictions + predictions
# remove the prediction file to save storage space
os.remove(exp_dir+'/predictions/predictions_' + str(epoch-1) + '.csv')
cum_predictions = cum_predictions / epoch
np.savetxt(exp_dir+'/predictions/cum_predictions.csv', cum_predictions, delimiter=',')
stats = calculate_stats(cum_predictions, target)
return stats
def validate_wa(audio_model, val_loader, args, start_epoch, end_epoch):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
exp_dir = args.exp_dir
sdA = torch.load(exp_dir + '/models/audio_model.' + str(start_epoch) + '.pth', map_location=device)
model_cnt = 1
for epoch in range(start_epoch+1, end_epoch+1):
sdB = torch.load(exp_dir + '/models/audio_model.' + str(epoch) + '.pth', map_location=device)
for key in sdA:
sdA[key] = sdA[key] + sdB[key]
model_cnt += 1
# if choose not to save models of epoch, remove to save space
if args.save_model == False:
os.remove(exp_dir + '/models/audio_model.' + str(epoch) + '.pth')
# averaging
for key in sdA:
sdA[key] = sdA[key] / float(model_cnt)
audio_model.load_state_dict(sdA)
torch.save(audio_model.state_dict(), exp_dir + '/models/audio_model_wa.pth')
stats, loss = validate(audio_model, val_loader, args, 'wa')
return stats | 15,426 | 41.498623 | 182 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/src/demo.py | # -*- coding: utf-8 -*-
# @Time : 3/8/22
# @Modified by : Juncheng B Li
# @Original Author : Yuan Gong
# @File : demo.py
import os
import torch
from models import ASTModel
# download pretrained model in this directory
os.environ['TORCH_HOME'] = '../pretrained_models'
# assume each input spectrogram has 100 time frames
input_tdim = 100
# assume the task has 527 classes
label_dim = 527
# create a pseudo input: a batch of 10 spectrogram, each with 100 time frames and 128 frequency bins
test_input = torch.rand([10, input_tdim, 128])
# create an AST model
ast_mdl = ASTModel(label_dim=label_dim, input_tdim=input_tdim, imagenet_pretrain=True, audioset_pretrain=True)
test_output = ast_mdl(test_input)
# output should be in shape [10, 527], i.e., 10 samples, each with prediction of 527 classes.
print(test_output.shape) | 829 | 36.727273 | 110 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/src/get_norm_stats.py | # -*- coding: utf-8 -*-
# @Time : 3/8/22
# @Modified by : Juncheng B Li
# @Original Author : Yuan Gong
# @File : get_norm_stats.py
# this is a sample code of how to get normalization stats for input spectrogram
import torch
import numpy as np
from src import dataloader
# set skip_norm as True only when you are computing the normalization stats
audio_conf = {'num_mel_bins': 128, 'target_length': 1024, 'freqm': 24, 'timem': 192, 'mixup': 0.5, 'skip_norm': True, 'mode': 'train', 'dataset': 'audioset'}
train_loader = torch.utils.data.DataLoader(
dataloader.AudiosetDataset('/data/sls/scratch/yuangong/audioset/datafiles/balanced_train_data.json', label_csv='/data/sls/scratch/yuangong/audioset/utilities/class_labels_indices.csv',
audio_conf=audio_conf), batch_size=1000, shuffle=False, num_workers=8, pin_memory=True)
mean=[]
std=[]
for i, (audio_input, labels) in enumerate(train_loader):
cur_mean = torch.mean(audio_input)
cur_std = torch.std(audio_input)
mean.append(cur_mean)
std.append(cur_std)
print(cur_mean, cur_std)
print(np.mean(mean), np.mean(std)) | 1,131 | 39.428571 | 188 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/src/models/AudioFnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy
import math
import numpy as np
import re
from scipy import linalg
class FNetInput(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config['input_size'], eps=config['layer_norm_eps'])
self.hidden_mapping = nn.Linear(config['input_size'], config['hidden_size'])
self.dropout = nn.Dropout(config['dropout_rate'])
def forward(self, x):
x = self.layer_norm(x)
x = self.hidden_mapping(x)
x = self.dropout(x)
return x
class FourierMMLayer(nn.Module):
'''
Matmul to accelerate
'''
def __init__(self, config):
super().__init__()
self.dft_mat_seq = torch.tensor(linalg.dft(config['max_position_embeddings']))
self.dft_mat_hidden = torch.tensor(linalg.dft(config['hidden_size']))
def forward(self, hidden_states):
hidden_states_complex = hidden_states.type(torch.complex128)
return torch.einsum(
"...ij,...jk,...ni->...nk",
hidden_states_complex,
self.dft_mat_hidden,
self.dft_mat_seq
).real.type(torch.float32)
class FourierFFTLayer(nn.Module):
'''
Default fft
'''
def __init__(self):
super().__init__()
@torch.cuda.amp.autocast(enabled=False)
def forward(self, hidden_states):
return torch.fft.fft(torch.fft.fft(hidden_states.float(), dim=-1), dim=-2).real
class FNetLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.fft = FourierMMLayer(config) if config['fourier'] == 'matmul' else FourierFFTLayer()
self.mixing_layer_norm = nn.LayerNorm(config['hidden_size'], eps=config['layer_norm_eps'])
self.feed_forward = nn.Linear(config['hidden_size'], config['intermediate_size'])
self.output_dense = nn.Linear(config['intermediate_size'], config['hidden_size'])
self.output_layer_norm = nn.LayerNorm(config['hidden_size'], eps=config['layer_norm_eps'])
self.dropout = nn.Dropout(config['dropout_rate'])
self.activation = nn.GELU()
def forward(self, hidden_states):
fft_output = self.fft(hidden_states)
fft_output = self.mixing_layer_norm(fft_output + hidden_states)
intermediate_output = self.feed_forward(fft_output)
intermediate_output = self.activation(intermediate_output)
output = self.output_dense(intermediate_output)
output = self.dropout(output)
output = self.output_layer_norm(output + fft_output)
return output
class FNetEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([FNetLayer(config) for _ in range(config['num_hidden_layers'])])
def forward(self, hidden_states):
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states)
return hidden_states
class FNetPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.att = nn.Linear(config['hidden_size'], config['num_classes'] )
self.fc = nn.Linear(config['hidden_size'], config['num_classes'] )
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
# first_token_tensor = hidden_states[:, 0]
# pooled_output = self.dense(first_token_tensor)
# pooled_output = self.activation(pooled_output)
x = hidden_states
frame_prob = self.fc(x)
frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)
frame_att = F.softmax(self.att(x),dim=1)
frame_att = torch.clamp(frame_att, 1e-7, 1 - 1e-7)
frame_att = frame_att / frame_att.sum(dim=1).unsqueeze(1)
global_prob = (frame_prob * frame_att).sum(dim=1)
#global_prob = torch.clamp(global_prob, 1e-7, 1 - 1e-7)
return global_prob, None, None
return pooled_output
class FNet(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.input = FNetInput(config)
self.encoder = FNetEncoder(config)
self.pooler = FNetPooler(config)
def forward(self, x):
input_output = self.input(x)
sequence_output = self.encoder(input_output)
pooled_output = self.pooler(sequence_output)
return pooled_output[0]
class FNetForPreTraining(nn.Module):
def __init__(self, config):
super(FNetForPreTraining, self).__init__()
self.encoder = FNet(config)
self.input_size = config['input_size']
self.vocab_size = config['vocab_size']
self.hidden_size = config['hidden_size']
self.num_layers = config['num_hidden_layers']
self.mlm_intermediate = nn.Linear(self.hidden_size, self.input_size)
self.activation = nn.GELU()
self.mlm_layer_norm = nn.LayerNorm(self.input_size)
self.mlm_output = nn.Linear(self.input_size, self.vocab_size)
self.nsp_output = nn.Linear(self.hidden_size, 2)
def _mlm(self, x):
x = self.mlm_intermediate(x)
x = self.activation(x)
x = self.mlm_layer_norm(x)
x = self.mlm_output(x)
return x
def forward(self, input_ids, type_ids, mlm_positions=None):
sequence_output, pooled_output = self.encoder(input_ids, type_ids)
if mlm_positions is not None:
mlm_input = sequence_output.take_along_dim(mlm_positions.unsqueeze(-1), dim=1)
else:
mlm_input = sequence_output
mlm_logits = self._mlm(mlm_input)
nsp_logits = self.nsp_output(pooled_output)
return {"mlm_logits": mlm_logits, "nsp_logits": nsp_logits}
def get_default_config( fourier_type="fft",
layer_norm_eps=1e-12,
dropout_rate=0.1):
return {
"num_hidden_layers": 50,
"input_size": 128,
"hidden_size": 256,
"intermediate_size": 512,
"fourier": 'fft',
"layer_norm_eps": layer_norm_eps,
"dropout_rate": dropout_rate,
"num_classes":527
}
def get_config_from_statedict(state_dict,
fourier_type="fft",
pad_token_id=0,
layer_norm_eps=1e-12,
dropout_rate=0.1):
is_pretraining_checkpoint = 'mlm_output.weight' in state_dict.keys()
def prepare(key):
if is_pretraining_checkpoint:
return f"encoder.{key}"
return key
regex = re.compile(prepare(r'encoder.layer.\d+.feed_forward.weight'))
num_layers = len([key for key in state_dict.keys() if regex.search(key)])
return {
"num_hidden_layers": num_layers,
"vocab_size": state_dict[prepare('embeddings.word_embeddings.weight')].shape[0],
"embedding_size": state_dict[prepare('embeddings.word_embeddings.weight')].shape[1],
"hidden_size": state_dict[prepare('encoder.layer.0.output_dense.weight')].shape[0],
"intermediate_size": state_dict[prepare('encoder.layer.0.feed_forward.weight')].shape[0],
"max_position_embeddings": state_dict[prepare('embeddings.position_embeddings.weight')].shape[0],
"type_vocab_size": state_dict[prepare('embeddings.token_type_embeddings.weight')].shape[0],
# the following parameters can not be inferred from the state dict and must be given manually
"fourier": fourier_type,
"pad_token_id": pad_token_id,
"layer_norm_eps": layer_norm_eps,
"dropout_rate": dropout_rate,
}
def get_fnet():
config = get_default_config()
return FNet(config) | 7,877 | 33.704846 | 105 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/src/models/HigherModels.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def init_layer(layer):
if layer.weight.ndimension() == 4:
(n_out, n_in, height, width) = layer.weight.size()
n = n_in * height * width
elif layer.weight.ndimension() == 2:
(n_out, n) = layer.weight.size()
std = math.sqrt(2. / n)
scale = std * math.sqrt(3.)
layer.weight.data.uniform_(-scale, scale)
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
bn.weight.data.fill_(1.)
class Attention(nn.Module):
def __init__(self, n_in, n_out, att_activation, cla_activation):
super(Attention, self).__init__()
self.att_activation = att_activation
self.cla_activation = cla_activation
self.att = nn.Conv2d(
in_channels=n_in, out_channels=n_out, kernel_size=(
1, 1), stride=(
1, 1), padding=(
0, 0), bias=True)
self.cla = nn.Conv2d(
in_channels=n_in, out_channels=n_out, kernel_size=(
1, 1), stride=(
1, 1), padding=(
0, 0), bias=True)
self.init_weights()
def init_weights(self):
init_layer(self.att)
init_layer(self.cla)
def activate(self, x, activation):
if activation == 'linear':
return x
elif activation == 'relu':
return F.relu(x)
elif activation == 'sigmoid':
return torch.sigmoid(x)
elif activation == 'softmax':
return F.softmax(x, dim=1)
def forward(self, x):
"""input: (samples_num, freq_bins, time_steps, 1)
"""
att = self.att(x)
att = self.activate(att, self.att_activation)
cla = self.cla(x)
cla = self.activate(cla, self.cla_activation)
att = att[:, :, :, 0] # (samples_num, classes_num, time_steps)
cla = cla[:, :, :, 0] # (samples_num, classes_num, time_steps)
epsilon = 1e-7
att = torch.clamp(att, epsilon, 1. - epsilon)
norm_att = att / torch.sum(att, dim=2)[:, :, None]
x = torch.sum(norm_att * cla, dim=2)
return x, norm_att
class MeanPooling(nn.Module):
def __init__(self, n_in, n_out, att_activation, cla_activation):
super(MeanPooling, self).__init__()
self.cla_activation = cla_activation
self.cla = nn.Conv2d(
in_channels=n_in, out_channels=n_out, kernel_size=(
1, 1), stride=(
1, 1), padding=(
0, 0), bias=True)
self.init_weights()
def init_weights(self):
init_layer(self.cla)
def activate(self, x, activation):
return torch.sigmoid(x)
def forward(self, x):
"""input: (samples_num, freq_bins, time_steps, 1)
"""
cla = self.cla(x)
cla = self.activate(cla, self.cla_activation)
cla = cla[:, :, :, 0] # (samples_num, classes_num, time_steps)
x = torch.mean(cla, dim=2)
return x, []
class MHeadAttention(nn.Module):
def __init__(self, n_in, n_out, att_activation, cla_activation, head_num=4):
super(MHeadAttention, self).__init__()
self.head_num = head_num
self.att_activation = att_activation
self.cla_activation = cla_activation
self.att = nn.ModuleList([])
self.cla = nn.ModuleList([])
for i in range(self.head_num):
self.att.append(nn.Conv2d(in_channels=n_in, out_channels=n_out, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True))
self.cla.append(nn.Conv2d(in_channels=n_in, out_channels=n_out, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True))
self.head_weight = nn.Parameter(torch.tensor([1.0/self.head_num] * self.head_num))
def activate(self, x, activation):
if activation == 'linear':
return x
elif activation == 'relu':
return F.relu(x)
elif activation == 'sigmoid':
return torch.sigmoid(x)
elif activation == 'softmax':
return F.softmax(x, dim=1)
def forward(self, x):
"""input: (samples_num, freq_bins, time_steps, 1)
"""
x_out = []
for i in range(self.head_num):
att = self.att[i](x)
att = self.activate(att, self.att_activation)
cla = self.cla[i](x)
cla = self.activate(cla, self.cla_activation)
att = att[:, :, :, 0] # (samples_num, classes_num, time_steps)
cla = cla[:, :, :, 0] # (samples_num, classes_num, time_steps)
epsilon = 1e-7
att = torch.clamp(att, epsilon, 1. - epsilon)
norm_att = att / torch.sum(att, dim=2)[:, :, None]
x_out.append(torch.sum(norm_att * cla, dim=2) * self.head_weight[i])
x = (torch.stack(x_out, dim=0)).sum(dim=0)
return x, [] | 4,936 | 28.562874 | 138 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/src/models/Net_mModal_mgpu.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy
import math
import numpy as np
# from model import * # Poyao's model.py
class ConvBlock(nn.Module):
def __init__(self, n_input_feature_maps, n_output_feature_maps, kernel_size, batch_norm = False, pool_stride = None):
super(ConvBlock, self).__init__()
assert all(x % 2 == 1 for x in kernel_size)
self.n_input = n_input_feature_maps
self.n_output = n_output_feature_maps
self.kernel_size = kernel_size
self.batch_norm = batch_norm
self.pool_stride = pool_stride
self.conv = nn.Conv2d(self.n_input, self.n_output, self.kernel_size, padding = tuple(int(x/2) for x in self.kernel_size), bias = ~batch_norm)
if batch_norm: self.bn = nn.BatchNorm2d(self.n_output)
# std = math.sqrt((4 * (1.0 - dropout)) / kernel_size[0] * n_input_feature_maps)
# self.conv.weight.data.normal_(mean=0, std=std)
# self.conv.bias.data.zero_()
nn.init.xavier_uniform(self.conv.weight)
def forward(self, x):
x = self.conv(x)
if self.batch_norm: x = self.bn(x)
x = F.relu(x)
if self.pool_stride is not None: x = F.max_pool2d(x, self.pool_stride)
return x
class TALNet(nn.Module):
def __init__(self, args):
super(TALNet, self).__init__()
self.__dict__.update(args.__dict__) # Instill all args into self
# print(self.n_conv_layers)
assert self.n_conv_layers % self.n_pool_layers == 0
self.input_n_freq_bins = n_freq_bins = args.n_mels
self.output_size = 527
self.conv = nn.ModuleList()
# self.conv = []
pool_interval = self.n_conv_layers / self.n_pool_layers
n_input = 1
for i in range(self.n_conv_layers):
if (i + 1) % pool_interval == 0: # this layer has pooling
n_freq_bins = int(n_freq_bins / 2)
n_output = int(self.embedding_size / n_freq_bins)
pool_stride = (2, 2) if i < pool_interval * 2 else (1, 2)
else:
n_output = int(self.embedding_size * 2 / n_freq_bins)
pool_stride = None
layer = ConvBlock(n_input, n_output, self.kernel_size, batch_norm = self.batch_norm, pool_stride = pool_stride)
self.conv.append(layer)
self.__setattr__('conv' + str(i + 1), layer)
n_input = n_output
half_embedding_size = int(self.embedding_size/2)
self.gru = nn.GRU(self.embedding_size, half_embedding_size, 1, batch_first = True, bidirectional = True)
self.fc_prob = nn.Linear(self.embedding_size, self.output_size)
if self.pooling == 'att':
self.fc_att = nn.Linear(self.embedding_size, self.output_size)
# Better initialization
nn.init.orthogonal_(self.gru.weight_ih_l0); nn.init.constant_(self.gru.bias_ih_l0, 0)
nn.init.orthogonal_(self.gru.weight_hh_l0); nn.init.constant_(self.gru.bias_hh_l0, 0)
nn.init.orthogonal_(self.gru.weight_ih_l0_reverse); nn.init.constant_(self.gru.bias_ih_l0_reverse, 0)
nn.init.orthogonal_(self.gru.weight_hh_l0_reverse); nn.init.constant_(self.gru.bias_hh_l0_reverse, 0)
nn.init.xavier_uniform_(self.fc_prob.weight); nn.init.constant_(self.fc_prob.bias, 0)
if self.pooling == 'att':
nn.init.xavier_uniform_(self.fc_att.weight); nn.init.constant_(self.fc_att.bias, 0)
def forward(self, x):
#print('x shape:', x.shape)
x = x.view((-1, 1, x.size(1), x.size(2))) # x becomes (batch, channel, time, freq)
for i in range(len(self.conv)):
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
x = self.conv[i](x) # x becomes (batch, channel, time, freq)
#print('x shape:', x.shape)
x = x.permute(0, 2, 1, 3).contiguous() # x becomes (batch, time, channel, freq)
#print('x shape:', x.shape)
x = x.view((-1, x.size(1), x.size(2) * x.size(3))) # x becomes (batch, time, embedding_size)
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
#print('x shape:', x.shape)
x, _ = self.gru(x) # x becomes (batch, time, embedding_size)
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
frame_prob = torch.sigmoid(self.fc_prob(x)) # shape of frame_prob: (batch, time, output_size)
frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)
if self.pooling == 'max':
global_prob, _ = frame_prob.max(dim = 1)
# return global_prob, frame_prob
return global_prob
elif self.pooling == 'ave':
global_prob = frame_prob.mean(dim = 1)
# return global_prob, frame_prob
return global_prob
elif self.pooling == 'lin':
global_prob = (frame_prob * frame_prob).sum(dim = 1) / frame_prob.sum(dim = 1)
# return global_prob, frame_prob
return global_prob
elif self.pooling == 'exp':
global_prob = (frame_prob * frame_prob.exp()).sum(dim = 1) / frame_prob.exp().sum(dim = 1)
# return global_prob, frame_prob
return global_prob
elif self.pooling == 'att':
frame_att = F.softmax(self.fc_att(x), dim = 1)
global_prob = (frame_prob * frame_att).sum(dim = 1)
# return global_prob, frame_prob, frame_att
return global_prob
def predict(self, x, verbose = True, batch_size = 100):
# Predict in batches. Both input and output are numpy arrays.
# If verbose == True, return all of global_prob, frame_prob and att
# If verbose == False, only return global_prob
result = []
for i in range(0, len(x), batch_size):
with torch.no_grad():
input = Variable(torch.from_numpy(x[i : i + batch_size])).cuda()
output = self.forward(input)
#frame = output[1].cpu().numpy()
#np.save('TALframe_516.npy', frame)
if not verbose: output = output[:2]
result.append([var.data.cpu().numpy() for var in output])
result = tuple(numpy.concatenate(items) for items in zip(*result))
#return result if verbose else result[0]
if verbose:
return result
return result[0], result[1]
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.0):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k)
self.w_ks = nn.Linear(d_model, n_head * d_k)
self.w_vs = nn.Linear(d_model, n_head * d_v)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
# nn.init.xavier_uniform_(self.w_qs.weight)
# nn.init.xavier_uniform_(self.w_ks.weight)
# nn.init.xavier_uniform_(self.w_vs.weight)
# nn.init.constant_(self.w_qs.bias, 0.)
# nn.init.constant_(self.w_ks.bias, 0.)
# nn.init.constant_(self.w_vs.bias, 0.)
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5),
attn_dropout=dropout)
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
#nn.init.constant_(self.fc.bias, 0.)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
if mask is not None:
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output, attn
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super(ScaledDotProductAttention, self).__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class PositionalEncoding(nn.Module):
"""Implement the positional encoding (PE) function.
PE(pos, 2i) = sin(pos/(10000^(2i/dmodel)))
PE(pos, 2i+1) = cos(pos/(10000^(2i/dmodel)))
"""
def __init__(self, d_model, max_len=5000):
super(PositionalEncoding, self).__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model, requires_grad=False)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, input):
"""
Args:
input: N x T x D
"""
length = input.size(1)
return self.pe[:, :length]
class PositionwiseFeedForward(nn.Module):
"""Implements position-wise feedforward sublayer.
FFN(x) = max(0, xW1 + b1)W2 + b2
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, x):
residual = x
output = self.w_2(F.relu(self.w_1(x)))
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
class TransformerLayer(nn.Module):
def __init__(self, hidden_size, dropout=0.1):
super(TransformerLayer, self).__init__()
# parameters
#self.hidden_size = hidden_size
#self.pe_maxlen = pe_maxlen
#self.linear_in = nn.Linear(hidden_size, hidden_size)
#self.layer_norm_in = nn.LayerNorm(hidden_size)
#self.positional_encoding = PositionalEncoding(hidden_size, max_len=pe_maxlen)
#self.dropout = nn.Dropout(dropout)
self.slf_attn = MultiHeadAttention(
8, hidden_size, hidden_size/8, hidden_size/8, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(
hidden_size, hidden_size, dropout=dropout)
def forward(self, hidden_states):
#enc_output = self.dropout(
# self.layer_norm_in(self.linear_in(hidden_states)) +
# self.positional_encoding(hidden_states))
#enc_output = hidden_states + self.positional_encoding(hidden_states)
enc_output, enc_slf_attn = self.slf_attn(
hidden_states, hidden_states, hidden_states)
enc_output = self.pos_ffn(enc_output)
return enc_output
class NewNet(nn.Module):
def __init__(self, args):
super(NewNet, self).__init__()
self.__dict__.update(args.__dict__) # Instill all args into self
assert self.n_conv_layers % self.n_pool_layers == 0
self.input_n_freq_bins = n_freq_bins = args.n_mels
self.output_size = 527
# self.conv = []
self.conv = nn.ModuleList()
pool_interval = self.n_conv_layers / self.n_pool_layers
n_input = 1
for i in range(self.n_conv_layers):
if (i + 1) % pool_interval == 0: # this layer has pooling
n_freq_bins = int(n_freq_bins / 2)
n_output = int(self.embedding_size / n_freq_bins)
pool_stride = (2, 2) if i < pool_interval * 2 else (1, 2)
else:
n_output = int(self.embedding_size * 2 / n_freq_bins)
pool_stride = None
layer = ConvBlock(n_input, n_output, self.kernel_size, batch_norm = self.batch_norm, pool_stride = pool_stride)
self.conv.append(layer)
self.__setattr__('conv' + str(i + 1), layer)
n_input = n_output
half_embedding_size = int(self.embedding_size / 2)
self.gru = nn.GRU(self.embedding_size, half_embedding_size, 1, batch_first = True, bidirectional = True)
#self.position_embeddings = nn.Embedding(400, 64)
#self.positional_encoding = PositionalEncoding(64, max_len=400)
#self.self_att = BERTSelfAttention(self.embedding_size)
#self.transformer = TransformerLayer(self.embedding_size, dropout=self.dropout)
#self.layer_stack = nn.ModuleList([
# EncoderLayer(self.embedding_size, self.embedding_size*2, dropout=self.dropout)
# for _ in range(self.n_trans_layers)])
self.fc_prob = nn.Linear(self.embedding_size, self.output_size)
#self.proj_back = nn.Linear(self.embedding_size*2, self.embedding_size)
if self.pooling == 'att' or self.pooling == 'all':
self.fc_att = nn.Linear(self.embedding_size, self.output_size)
# Better initialization
nn.init.orthogonal(self.gru.weight_ih_l0); nn.init.constant(self.gru.bias_ih_l0, 0)
nn.init.orthogonal(self.gru.weight_hh_l0); nn.init.constant(self.gru.bias_hh_l0, 0)
nn.init.orthogonal(self.gru.weight_ih_l0_reverse); nn.init.constant(self.gru.bias_ih_l0_reverse, 0)
nn.init.orthogonal(self.gru.weight_hh_l0_reverse); nn.init.constant(self.gru.bias_hh_l0_reverse, 0)
nn.init.xavier_uniform(self.fc_prob.weight); nn.init.constant(self.fc_prob.bias, 0)
#nn.init.xavier_uniform(self.proj_back.weight); nn.init.constant(self.proj_back.bias, 0)
if self.pooling == 'att' or self.pooling == 'all':
nn.init.xavier_uniform(self.fc_att.weight); nn.init.constant(self.fc_att.bias, 0)
if self.pooling == 'h-att':
self.stride_pool = nn.AvgPool1d(5)
if self.pooling == 'all':
self.ens = nn.Linear(5, 5)
nn.init.xavier_uniform(self.ens.weight); nn.init.constant(self.ens.bias, 0)
def forward(self, x):
#batch_size, seq_length, _ = x.shape
#position_ids = torch.arange(seq_length, dtype=torch.long, device=x.device)
#position_ids = position_ids.unsqueeze(0).repeat(batch_size, 1)
#position_embeddings = self.position_embeddings(position_ids)
#x = x + position_embeddings
#x = x + self.positional_encoding(x)
# print('x shape:', x.shape)
x = x.view((-1, 1, x.size(1), x.size(2))) # x becomes (batch, channel, time, freq)
for i in range(len(self.conv)):
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
x = self.conv[i](x) # x becomes (batch, channel, time, freq)
# print('x shape:', x.shape)
x = x.permute(0, 2, 1, 3).contiguous() # x becomes (batch, time, channel, freq)
# print('x shape:', x.shape)
x = x.view((-1, x.size(1), x.size(2) * x.size(3))) # x becomes (batch, time, embedding_size)
# print('x shape:', x.shape)
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
# print('x shape:', x.shape)
#x = self.transformer(x)
x, _ = self.gru(x) # x becomes (batch, time, embedding_size)
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
frame_prob = torch.sigmoid(self.fc_prob(x)) # shape of frame_prob: (batch, time, output_size)
frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)
if self.pooling == 'max':
global_prob, _ = frame_prob.max(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'ave':
global_prob = frame_prob.mean(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'lin':
global_prob = (frame_prob * frame_prob).sum(dim = 1) / frame_prob.sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'exp':
global_prob = (frame_prob * frame_prob.exp()).sum(dim = 1) / frame_prob.exp().sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'att':
frame_att = F.softmax(self.fc_att(x), dim = 1)
global_prob = (frame_prob * frame_att).sum(dim = 1)
return global_prob, frame_prob, frame_att
elif self.pooling == 'h-att':
segment_prob = (frame_prob * frame_prob)
frame_prob = frame_prob.permute(0, 2, 1)
segment_prob = segment_prob.permute(0, 2, 1)
xj = self.stride_pool(segment_prob)/self.stride_pool(frame_prob)
wj = self.stride_pool(frame_prob)
xj = xj.permute(0, 2, 1)
wj = wj.permute(0, 2, 1)
global_prob = (xj*wj).sum(dim=1)/wj.sum(dim=1)
return global_prob, frame_prob.permute(0, 2, 1)
elif self.pooling == 'all':
max_prob, _ = frame_prob.max(dim = 1)
max_prob = max_prob.unsqueeze(-1)
ave_prob = frame_prob.mean(dim=1).unsqueeze(-1)
lin_prob = (frame_prob * frame_prob).sum(dim = 1) / frame_prob.sum(dim = 1)
lin_prob = lin_prob.unsqueeze(-1)
exp_prob = (frame_prob * frame_prob.exp()).sum(dim = 1) / frame_prob.exp().sum(dim = 1)
exp_prob = exp_prob.unsqueeze(-1)
frame_att = F.softmax(self.fc_att(x), dim = 1)
att_prob = (frame_prob * frame_att).sum(dim = 1)
att_prob = att_prob.unsqueeze(-1)
all_prob = torch.cat([max_prob, ave_prob, lin_prob, exp_prob, att_prob], dim=2)
global_weights = F.softmax(self.ens(all_prob), dim=2)
global_prob = (all_prob * global_weights).sum(dim=2)
global_weights = global_weights.permute(0, 2, 1)
return global_prob, frame_prob, frame_att, global_weights
def predict(self, x, verbose = True, batch_size = 100):
# Predict in batches. Both input and output are numpy arrays.
# If verbose == True, return all of global_prob, frame_prob and att
# If verbose == False, only return global_prob
result = []
for i in range(0, len(x), batch_size):
with torch.no_grad():
input = Variable(torch.from_numpy(x[i : i + batch_size])).cuda()
output = self.forward(input)
if not verbose: output = output[:1]
result.append([var.data.cpu().numpy() for var in output])
result = tuple(numpy.concatenate(items) for items in zip(*result))
return result if verbose else result[0]
class EncoderLayer(nn.Module):
"""Compose with two sub-layers.
1. A multi-head self-attention mechanism
2. A simple, position-wise fully connected feed-forward network.
"""
def __init__(self, d_model, d_inner, dropout=0.1):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(
8, d_model, int(d_model/8), int(d_model/8), dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(
d_model, d_inner, dropout=dropout)
def forward(self, enc_input):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input)
enc_output = self.pos_ffn(enc_output)
return enc_output
class TransformerEncoder(nn.Module):
"""Encoder of Transformer including self-attention and feed forward.
"""
def __init__(self, args):
super(TransformerEncoder, self).__init__()
self.__dict__.update(args.__dict__)
assert self.n_conv_layers % self.n_pool_layers == 0
self.input_n_freq_bins = n_freq_bins = args.n_mels
self.output_size = 527
# self.conv = []
self.conv = nn.ModuleList()
pool_interval = int(self.n_conv_layers / self.n_pool_layers)
n_input = 1
for i in range(self.n_conv_layers):
if (i + 1) % pool_interval == 0: # this layer has pooling
n_freq_bins = int(n_freq_bins/2)
n_output = int(self.embedding_size / n_freq_bins)
pool_stride = (2, 2) if i < pool_interval * 2 else (1, 2)
else:
n_output = int(self.embedding_size * 2 / n_freq_bins)
pool_stride = None
layer = ConvBlock(n_input, n_output, self.kernel_size, batch_norm = self.batch_norm, pool_stride = pool_stride)
self.conv.append(layer)
self.__setattr__('conv' + str(i + 1), layer)
n_input = n_output
# use linear transformation with layer norm to replace input embedding
#self.linear_in = nn.Linear(self.input_n_freq_bins, self.embedding_size)
#self.layer_norm_in = nn.LayerNorm(self.embedding_size)
if self.addpos:
self.positional_encoding = PositionalEncoding(args.n_mels, max_len=args.target_length)
self.enc_layer = EncoderLayer(self.embedding_size, self.embedding_size, dropout=self.transformer_dropout)
# self.layer_stack = nn.ModuleList([
# EncoderLayer(self.embedding_size, self.embedding_size, dropout=self.transformer_dropout)
# for _ in range(self.n_trans_layers)])
self.fc_prob = nn.Linear(self.embedding_size, self.output_size)
if self.pooling == 'att':
self.fc_att = nn.Linear(self.embedding_size, self.output_size)
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
nn.init.xavier_uniform(self.fc_prob.weight); nn.init.constant(self.fc_prob.bias, 0)
if self.pooling == 'att':
nn.init.xavier_uniform(self.fc_att.weight); nn.init.constant(self.fc_att.bias, 0)
def forward(self, x):
"""
Args:
padded_input: N x T x D
input_lengths: N
Returns:
enc_output: N x T x H
"""
# Forward
if self.addpos:
x = x*8 + self.positional_encoding(x)
x = x.view((-1, 1, x.size(1), x.size(2))) # x becomes (batch, channel, time, freq)
for i in range(len(self.conv)):
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
x = self.conv[i](x) # x becomes (batch, channel, time, freq)
x = x.permute(0, 2, 1, 3).contiguous() # x becomes (batch, time, channel, freq)
x = x.view((-1, x.size(1), x.size(2) * x.size(3))) # x becomes (batch, time, embedding_size)
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
for _ in range(self.n_trans_layers):
x = self.enc_layer(x)
#for enc_layer in self.layer_stack:
# x = enc_layer(x)
if self.dropout > 0:
x_hat = F.dropout(x, p = self.dropout, training = self.training)
else:
x_hat = x
# print(x_hat.shape)
frame_prob = torch.sigmoid(self.fc_prob(x_hat)) # shape of frame_prob: (batch, time, output_size)
frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)
if self.pooling == 'max':
global_prob, _ = frame_prob.max(dim = 1)
# return global_prob, frame_prob
return global_prob
elif self.pooling == 'ave':
global_prob = frame_prob.mean(dim = 1)
# return global_prob, frame_prob
return global_prob
elif self.pooling == 'lin':
global_prob = (frame_prob * frame_prob).sum(dim = 1) / frame_prob.sum(dim = 1)
# return global_prob, frame_prob
return global_prob
elif self.pooling == 'exp':
global_prob = (frame_prob * frame_prob.exp()).sum(dim = 1) / frame_prob.exp().sum(dim = 1)
# return global_prob, frame_prob
return global_prob
elif self.pooling == 'att':
frame_att = F.softmax(self.fc_att(x_hat), dim = 1)
global_prob = (frame_prob * frame_att).sum(dim = 1)
# return global_prob, x, frame_prob, frame_att
return global_prob
def predict(self, x, verbose = True, batch_size = 100):
# Predict in batches. Both input and output are numpy arrays.
# If verbose == True, return all of global_prob, frame_prob and att
# If verbose == False, only return global_prob
result = []
for i in range(0, len(x), batch_size):
with torch.no_grad():
input = Variable(torch.from_numpy(x[i : i + batch_size])).cuda()
output = self.forward(input)
# att = output[2].cpu().numpy()
# np.save('TALtransatt_515.npy', att)
# frame = output[1].cpu().numpy()
# np.save('TALtransframe_515.npy', frame)
# exit(0)
if not verbose: output = output[:2]
result.append([var.data.cpu().numpy() for var in output])
result = tuple(numpy.concatenate(items) for items in zip(*result))
if verbose:
return result
return result[0], result[1]
class Transformer(nn.Module):
"""Encoder of Transformer including self-attention and feed forward.
"""
def __init__(self, args):
super(Transformer, self).__init__()
self.__dict__.update(args.__dict__)
self.input_n_freq_bins = n_freq_bins = args.n_mels
self.output_size = 527
# use linear transformation with layer norm to replace input embedding
self.linear_in = nn.Linear(self.input_n_freq_bins, self.embedding_size)
self.layer_norm_in = nn.LayerNorm(self.embedding_size)
#self.positional_encoding = PositionalEncoding(64, max_len=400)
self.layer_stack = nn.ModuleList([
EncoderLayer(self.embedding_size, self.embedding_size*2, dropout=self.dropout)
for _ in range(self.n_trans_layers)])
self.fc_prob = nn.Linear(self.embedding_size, self.output_size)
if self.pooling == 'att':
self.fc_att = nn.Linear(self.embedding_size, self.output_size)
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
nn.init.xavier_uniform(self.fc_prob.weight); nn.init.constant(self.fc_prob.bias, 0)
if self.pooling == 'att':
nn.init.xavier_uniform(self.fc_att.weight); nn.init.constant(self.fc_att.bias, 0)
def forward(self, x):
"""
Args:
padded_input: N x T x D
input_lengths: N
Returns:
enc_output: N x T x H
"""
# Forward
# x becomes (batch, time, channel, freq)
x = self.layer_norm_in(self.linear_in(x)) # x becomes (batch, time, embedding_size)
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
for enc_layer in self.layer_stack:
x = enc_layer(x)
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
x = x.permute(0, 2, 1)
x = F.max_pool1d(x, 4)
x = x.permute(0, 2, 1)
frame_prob = torch.sigmoid(self.fc_prob(x)) # shape of frame_prob: (batch, time, output_size)
frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)
if self.pooling == 'max':
global_prob, _ = frame_prob.max(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'ave':
global_prob = frame_prob.mean(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'lin':
global_prob = (frame_prob * frame_prob).sum(dim = 1) / frame_prob.sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'exp':
global_prob = (frame_prob * frame_prob.exp()).sum(dim = 1) / frame_prob.exp().sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'att':
frame_att = F.softmax(self.fc_att(x), dim = 1)
global_prob = (frame_prob * frame_att).sum(dim = 1)
return global_prob, frame_prob, frame_att
def predict(self, x, verbose = True, batch_size = 100):
# Predict in batches. Both input and output are numpy arrays.
# If verbose == True, return all of global_prob, frame_prob and att
# If verbose == False, only return global_prob
result = []
for i in range(0, len(x), batch_size):
with torch.no_grad():
input = Variable(torch.from_numpy(x[i : i + batch_size])).cuda()
output = self.forward(input)
if not verbose: output = output[:1]
result.append([var.data.cpu().numpy() for var in output])
result = tuple(numpy.concatenate(items) for items in zip(*result))
return result if verbose else result[0]
class MMTEncoder(nn.Module):
"""Encoder of Transformer including self-attention and feed forward.
"""
def __init__(self, args):
super(MMTEncoder, self).__init__()
self.__dict__.update(args.__dict__)
assert self.n_conv_layers % self.n_pool_layers == 0
#self.fusion_module = fusion_module
self.input_n_freq_bins = n_freq_bins = args.n_mels
self.output_size = 527
self.conv = nn.ModuleList()
pool_interval = int(self.n_conv_layers / self.n_pool_layers)
n_input = 1
for i in range(self.n_conv_layers):
if (i + 1) % pool_interval == 0: # this layer has pooling
n_freq_bins = int(n_freq_bins/2)
n_output = int(self.embedding_size / n_freq_bins)
pool_stride = (2, 2) if i < pool_interval * 2 else (1, 2)
else:
n_output = int(self.embedding_size * 2 / n_freq_bins)
pool_stride = None
layer = ConvBlock(n_input, n_output, self.kernel_size, batch_norm = self.batch_norm, pool_stride = pool_stride)
self.conv.append(layer)
self.__setattr__('conv' + str(i + 1), layer)
n_input = n_output
# use linear transformation with layer norm to replace input embedding
#self.linear_in = nn.Linear(self.input_n_freq_bins, self.embedding_size)
#self.layer_norm_in = nn.LayerNorm(self.embedding_size)
if self.addpos:
self.positional_encoding = PositionalEncoding(args.n_mels, max_len=args.target_length)
# self.enc_layer = EncoderLayer(self.embedding_size, self.embedding_size, dropout=self.transformer_dropout)
if self.fusion_module == 0:
self.proj0 = nn.Linear(9216, self.embedding_size)
if self.fusion_module == 1:
self.proj1 = nn.Linear(12288, self.embedding_size)
self.enc_layer = EncoderLayer(self.embedding_size, self.embedding_size, dropout=self.transformer_dropout)
self.fc_prob = nn.Linear(self.embedding_size, self.output_size)
if self.pooling == 'att':
self.fc_att = nn.Linear(self.embedding_size, self.output_size)
# self.fc_prob = nn.Linear(self.embedding_size, self.output_size)
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
nn.init.xavier_uniform(self.fc_prob.weight); nn.init.constant(self.fc_prob.bias, 0)
if self.pooling == 'att':
nn.init.xavier_uniform(self.fc_att.weight); nn.init.constant(self.fc_att.bias, 0)
def forward(self, x1, x2):
"""
Args:
padded_input: N x T x D
input_lengths: N
x1: N T1 D1 400 64
x2: N T2 D2 10 2048
Returns:
enc_output: N x T x H
"""
# Forward
if self.addpos:
x1 = x1*8 + self.positional_encoding(x1)
if self.fusion_module == 0:# direct fusion
N, T1, D1 = x1.shape
N, T2, D2 = x2.shape
try:
# x1 = x1.reshape((N, 1, T2, -1)) # x becomes (batch, channel, time, freq)
# x2 = x2.reshape((N, 1, T2, -1))
x1 = x1.reshape((N, 1, 80, -1)) # x becomes (batch, channel, time, freq)
x2 = x2.reshape((N, 1, 80, -1))
except:
print('x1:', x1.shape)
print('x2:', x2.shape)
#print('x1:', x1.shape)
#print('x2:', x2.shape)
x = torch.cat((x1, x2), dim = 3)
#x = F.relu(self.proj0(x))
else:
N, T1, D1 = x1.shape
x = x1.view((N, 1, T1, D1))
for i in range(len(self.conv)):
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
x = self.conv[i](x) # x becomes (batch, channel, time, freq)
#print (x.shape)
if self.fusion_module == 1: # fuse after conv before transform
N, C, T1, D1 = x.shape
N, T2, D2 = x2.shape
if T1 % T2 != 0: # padding
temp = T1 % T2
x = F.pad(input=x, pad=(0, 0, torch.floor(temp / 2), temp - torch.floor(temp / 2)), mode='constant', value=0)
# x = x.permute
# x = x.view
# x = x.permute
x = x.permute(0, 2, 1, 3).contiguous()
x1 = x.view((N, 1, T2, -1)) # need to check if view work as thought # x becomes (batch, channel, time, freq)
x2 = x2.view((N, 1, T2, D2))
x = torch.cat((x1, x2), dim = 3) # x becomes (batch, channel, time, freq)
x = F.relu(self.proj1(x))
x = x.permute(0, 2, 1, 3).contiguous() # x becomes (batch, time, channel, freq)
x = x.view((-1, x.size(1), x.size(2) * x.size(3))) # x becomes (batch, time, embedding_size)
if self.fusion_module == 0:
x = F.relu(self.proj0(x))
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
for _ in range(self.n_trans_layers):
x = self.enc_layer(x)
#for enc_layer in self.layer_stack:
# x = enc_layer(x)
if self.fusion_module == 2: # fuse after transform before fully connected
N, T1, D1 = x.shape
N, T2, D2 = x2.shape
if T1 % T2 != 0: # padding
temp = T1 % T2
x = F.pad(input=x, pad=(0, 0, torch.floor(temp / 2), temp - torch.floor(temp / 2)), mode='constant', value=0)
x1 = x.view((N, T2, -1)) # need to check if view work as thought
x2 = x2.view((N, T2, D2))
x = torch.cat((x1, x2), dim = 2)
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
frame_prob = torch.sigmoid(self.fc_prob(x)) # shape of frame_prob: (batch, time, output_size)
frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)
if self.pooling == 'max':
global_prob, _ = frame_prob.max(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'ave':
global_prob = frame_prob.mean(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'lin':
global_prob = (frame_prob * frame_prob).sum(dim = 1) / frame_prob.sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'exp':
global_prob = (frame_prob * frame_prob.exp()).sum(dim = 1) / frame_prob.exp().sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'att':
frame_att = F.softmax(self.fc_att(x), dim = 1)
global_prob = (frame_prob * frame_att).sum(dim = 1)
return global_prob, frame_prob, frame_att
def predict(self, x1, x2, verbose = True, batch_size = 100):
# Predict in batches. Both input and output are numpy arrays.
# If verbose == True, return all of global_prob, frame_prob and att
# If verbose == False, only return global_prob
result = []
for i in range(0, len(x1), batch_size):
with torch.no_grad():
input1 = Variable(torch.from_numpy(x1[i : i + batch_size])).cuda()
input2 = Variable(torch.from_numpy(x2[i : i + batch_size])).cuda()
output = self.forward(input1, input2)
# att = output[2].cpu().numpy()
# np.save('TALtransatt_515.npy', att)
# frame = output[1].cpu().numpy()
# np.save('TALtransframe_515.npy', frame)
# exit(0)
if not verbose: output = output[:2]
result.append([var.data.cpu().numpy() for var in output])
result = tuple(numpy.concatenate(items) for items in zip(*result))
if verbose:
return result
return result[0], result[1]
#TODO
class LateFusion(nn.Module):
"""late fusion model.
"""
def __init__(self, args):
super(LateFusion, self).__init__()
self.__dict__.update(args.__dict__)
# I think that we should copy the whole branch model instead of using extracted feature
self.output_size = 527
self.embedding_size=1024
self.n_feature=2048
self.alpha = nn.Linear(self.embedding_size, self.embedding_size)
self.beta = nn.Linear(self.embedding_size, self.embedding_size)
# self.beta = nn.Linear(self.n_feature, self.embedding_size)
self.fc_prob = nn.Linear(self.embedding_size, self.output_size)
if self.pooling == 'att':
# self.fc_att = nn.Linear(self.embedding_size, self.output_size)
self.fc_att = nn.Linear(self.embedding_size, self.output_size)
# for p in self.parameters():
# if p.dim() > 1:
# nn.init.xavier_uniform_(p)
nn.init.xavier_uniform(self.fc_prob.weight); nn.init.constant(self.fc_prob.bias, 0)
if self.pooling == 'att':
nn.init.xavier_uniform(self.fc_att.weight); nn.init.constant(self.fc_att.bias, 0)
self.branch1 = TransformerEncoder(args)
self.branch2 = videoModel(args)
# if not args.from_scratch:
# self.branch1.load_state_dict(torch.load('/home/kaixinm/kaixinm/workspace/audioset/TAL-trans-embed1024-10C5P-kernel3x3-bn-drop0.0-att-batch100-ckpt2500-adam-lr4e-04-pat3-fac0.8-seed15213-Trans2-weight-decay0.00000000-betas0.900-0.999repro/model/checkpoint19.pt')['model'])
# # self.branch2.load_state_dict(torch.load('/home/billyli/data_folder/workspace/audioset/VM-embed1024-10C5P-kernel3x3-bn-drop0.0-att-batch100-ckpt2500-adam-lr4e-04-pat3-fac0.8-seed15213-Trans2-weight-decay0.00000000-betas0.900-0.999late_fusion_branch2/model/checkpoint29.pt')['model'])
# self.branch2.load_state_dict(torch.load('/home/kaixinm/kaixinm/workspace/audioset/VM-embed1024-10C5P-kernel3x3-bn-drop0.0-att-batch100-ckpt2500-adam-lr4e-04-pat3-fac0.8-seed15213-Trans2-weight-decay0.00000000-betas0.900-0.999baseline/model/checkpoint30.pt')['model'])
# #self.branch1.train(False)
# #self.branch2.train(False)
def forward(self, x1, x2):
# B*E and B*E, E should be 1024 for both
x1 = self.branch1(x1)[1]
x2 = self.branch2(x2)[1]
x1 = x1.permute(0, 2, 1)
x1 = F.avg_pool1d(x1, 10)
x1 = x1.permute(0, 2, 1)
# print(x1.shape, x2.shape)
x = self.alpha(x1) + self.beta(x2)
if self.dropout > 0:
x_hat = F.dropout(x, p = self.dropout, training = self.training)
else:
x_hat = x
frame_prob = torch.sigmoid(self.fc_prob(x_hat)) # shape of frame_prob: (batch, time, output_size)
frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)
if self.pooling == 'max':
global_prob, _ = frame_prob.max(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'ave':
global_prob = frame_prob.mean(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'lin':
global_prob = (frame_prob * frame_prob).sum(dim = 1) / frame_prob.sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'exp':
global_prob = (frame_prob * frame_prob.exp()).sum(dim = 1) / frame_prob.exp().sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'att':
frame_att = F.softmax(self.fc_att(x_hat), dim = 1)
global_prob = (frame_prob * frame_att).sum(dim = 1)
return global_prob, x, frame_prob, frame_att
def predict(self, x1, x2, verbose = True, batch_size = 100):
# Predict in batches. Both input and output are numpy arrays.
# If verbose == True, return all of global_prob, frame_prob and att
# If verbose == False, only return global_prob
result = []
for i in range(0, len(x1), batch_size):
with torch.no_grad():
input1 = Variable(torch.from_numpy(x1[i : i + batch_size])).cuda()
input2 = Variable(torch.from_numpy(x2[i : i + batch_size])).cuda()
output = self.forward(input1, input2)
# att = output[2].cpu().numpy()
# np.save('TALtransatt_515.npy', att)
# frame = output[1].cpu().numpy()
# np.save('TALtransframe_515.npy', frame)
# exit(0)
if not verbose: output = output[:2]
result.append([var.data.cpu().numpy() for var in output])
result = tuple(numpy.concatenate(items) for items in zip(*result))
if verbose:
return result
return result[0], result[1]
class SuperLateFusion(nn.Module):
"""late fusion model.
"""
def __init__(self, args):
super(SuperLateFusion, self).__init__()
self.__dict__.update(args.__dict__)
# I think that we should copy the whole branch model instead of using extracted feature
self.output_size = 527
self.embedding_size=1024
self.n_feature=2048
#self.alpha = nn.Linear(self.embedding_size, self.embedding_size)
#self.beta = nn.Linear(self.embedding_size, self.embedding_size)
# self.beta = nn.Linear(self.n_feature, self.embedding_size)
#self.fc_prob = nn.Linear(self.embedding_size, self.output_size)
#if self.pooling == 'att':
# self.fc_att = nn.Linear(self.embedding_size, self.output_size)
# self.fc_att = nn.Linear(self.embedding_size, self.output_size)
# for p in self.parameters():
# if p.dim() > 1:
# nn.init.xavier_uniform_(p)
#nn.init.xavier_uniform(self.fc_prob.weight); nn.init.constant(self.fc_prob.bias, 0)
#if self.pooling == 'att':
# nn.init.xavier_uniform(self.fc_att.weight); nn.init.constant(self.fc_att.bias, 0)
self.branch1 = TransformerEncoder(args)
self.branch2 = videoModel(args)
if not args.from_scratch:
print('here!!!!!!')
self.branch1.load_state_dict(torch.load('/home/kaixinm/kaixinm/workspace/audioset/TAL-trans-embed1024-10C5P-kernel3x3-bn-drop0.0-att-batch100-ckpt2500-adam-lr4e-04-pat3-fac0.8-seed15213-Trans2-weight-decay0.00000000-betas0.900-0.999shorter/model/checkpoint20.pt')['model'])
# self.branch2.load_state_dict(torch.load('/home/billyli/data_folder/workspace/audioset/VM-embed1024-10C5P-kernel3x3-bn-drop0.0-att-batch100-ckpt2500-adam-lr4e-04-pat3-fac0.8-seed15213-Trans2-weight-decay0.00000000-betas0.900-0.999late_fusion_branch2/model/checkpoint29.pt')['model'])
self.branch2.load_state_dict(torch.load('/home/kaixinm/kaixinm/workspace/audioset/VM-embed1024-10C5P-kernel3x3-bn-drop0.0-att-batch100-ckpt2500-adam-lr4e-04-pat3-fac0.8-seed15213-Trans2-weight-decay0.00000000-betas0.900-0.999dropout0.5/model/checkpoint18.pt')['model'])
#self.branch1.train(False)
#self.branch2.train(False)
def forward(self, x1, x2):
# B*E and B*E, E should be 1024 for both
pred1, x1, _, _ = self.branch1(x1)
pred2, x2, _, _ = self.branch2(x2)
final_pred = pred1*0.6 + pred2*0.4
return (final_pred, )
def predict(self, x1, x2, verbose = True, batch_size = 100):
# Predict in batches. Both input and output are numpy arrays.
# If verbose == True, return all of global_prob, frame_prob and att
# If verbose == False, only return global_prob
result = []
for i in range(0, len(x1), batch_size):
with torch.no_grad():
input1 = Variable(torch.from_numpy(x1[i : i + batch_size])).cuda()
input2 = Variable(torch.from_numpy(x2[i : i + batch_size])).cuda()
output = self.forward(input1, input2)
# att = output[2].cpu().numpy()
# np.save('TALtransatt_515.npy', att)
# frame = output[1].cpu().numpy()
# np.save('TALtransframe_515.npy', frame)
# exit(0)
#if not verbose: output = output[:2]
result.append([var.data.cpu().numpy() for var in output])
result = tuple(numpy.concatenate(items) for items in zip(*result))
#if verbose:
# return result
return result[0], None
class videoModel(nn.Module):
"""Encoder of Transformer including self-attention and feed forward.
"""
def __init__(self, args):
super(videoModel, self).__init__()
self.__dict__.update(args.__dict__)
self.output_size = 527
self.embedding_size=1024
self.n_feature=2048
self.enc_layer = EncoderLayer(self.n_feature, self.embedding_size, dropout=self.transformer_dropout)
self.hidden1 = nn.Linear(self.n_feature, self.embedding_size)
self.fc_prob = nn.Linear(self.embedding_size, self.output_size)
if self.pooling == 'att':
self.fc_att = nn.Linear(self.embedding_size, self.output_size)
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
nn.init.xavier_uniform(self.fc_prob.weight); nn.init.constant(self.fc_prob.bias, 0)
if self.pooling == 'att':
nn.init.xavier_uniform(self.fc_att.weight); nn.init.constant(self.fc_att.bias, 0)
def forward(self, x):
# x: BxTxE
b,t,e = x.shape
for _ in range(self.n_trans_layers):
x = self.enc_layer(x)
x = F.relu(self.hidden1(x))
if self.dropout > 0:
x_hat = F.dropout(x, p = self.dropout, training = self.training)
else:
x_hat = x
frame_prob = torch.sigmoid(self.fc_prob(x_hat)) # shape of frame_prob: (batch, time, output_size)
frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)
if self.pooling == 'max':
global_prob, _ = frame_prob.max(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'ave':
global_prob = frame_prob.mean(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'lin':
global_prob = (frame_prob * frame_prob).sum(dim = 1) / frame_prob.sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'exp':
global_prob = (frame_prob * frame_prob.exp()).sum(dim = 1) / frame_prob.exp().sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'att':
frame_att = F.softmax(self.fc_att(x_hat), dim = 1)
global_prob = (frame_prob * frame_att).sum(dim = 1)
return global_prob, x, frame_prob, frame_att
def predict(self, x1, verbose = True, batch_size = 100):
# Predict in batches. Both input and output are numpy arrays.
# If verbose == True, return all of global_prob, frame_prob and att
# If verbose == False, only return global_prob
result = []
for i in range(0, len(x1), batch_size):
with torch.no_grad():
input = Variable(torch.from_numpy(x1[i : i + batch_size])).cuda()
output = self.forward(input)
# att = output[2].cpu().numpy()
# np.save('TALtransatt_515.npy', att)
# frame = output[1].cpu().numpy()
# np.save('TALtransframe_515.npy', frame)
# exit(0)
if not verbose: output = output[:2]
result.append([var.data.cpu().numpy() for var in output])
result = tuple(numpy.concatenate(items) for items in zip(*result))
if verbose:
return result
return result[0], result[1]
| 52,075 | 48.501901 | 298 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/src/models/AudioFFnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy
import math
import numpy as np
import re
from scipy import linalg
class FFNetInput(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config['input_size'], eps=config['layer_norm_eps'])
self.hidden_mapping = nn.Linear(config['input_size'], config['hidden_size'])
self.dropout = nn.Dropout(config['dropout_rate'])
def forward(self, x):
x = self.layer_norm(x)
x = self.hidden_mapping(x)
x = self.dropout(x)
return x
class FourierFFTLayer(nn.Module):
'''
Default fft
'''
def __init__(self):
super().__init__()
@torch.cuda.amp.autocast(enabled=False)
def forward(self, hidden_states, dim=-1):
return torch.fft.fft(hidden_states.float(), dim=dim).real
class FFNetLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.fft = FourierFFTLayer()
self.mixing_layer_norm = nn.LayerNorm(config['hidden_size'], eps=config['layer_norm_eps'])
self.feed_forward = nn.Linear(config['hidden_size'], config['intermediate_size'])
self.output_dense = nn.Linear(config['intermediate_size'], config['hidden_size'])
self.activation = nn.GELU()
self.mixing_layer_norm2 = nn.LayerNorm(config['hidden_size'], eps=config['layer_norm_eps'])
self.feed_forward2 = nn.Linear(config['hidden_size'], config['intermediate_size'])
self.output_dense2 = nn.Linear(config['intermediate_size'], config['hidden_size'])
self.activation2 = nn.GELU()
self.output_layer_norm = nn.LayerNorm(config['hidden_size'], eps=config['layer_norm_eps'])
self.dropout = nn.Dropout(config['dropout_rate'])
def forward(self, hidden_states):
fft_output = self.fft(hidden_states, dim=-1)
fft_output = self.mixing_layer_norm(fft_output + hidden_states)
intermediate_output = self.feed_forward(fft_output)
intermediate_output = self.activation(intermediate_output)
output = self.output_dense(intermediate_output)
fft_output = self.fft(output, dim=-2)
fft_output = self.mixing_layer_norm2(fft_output + hidden_states)
intermediate_output = self.feed_forward2(fft_output)
intermediate_output = self.activation2(intermediate_output)
output = self.output_dense2(intermediate_output)
output = self.dropout(output)
output = self.output_layer_norm(output + fft_output)
return output
class FFNetEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([FFNetLayer(config) for _ in range(config['num_hidden_layers'])])
def forward(self, hidden_states):
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states)
return hidden_states
class FFNetPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.att = nn.Linear(config['hidden_size'], num_classes)
self.fc = nn.Linear(config['hidden_size'], num_classes)
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
# first_token_tensor = hidden_states[:, 0]
# pooled_output = self.dense(first_token_tensor)
# pooled_output = self.activation(pooled_output)
x = hidden_states
frame_prob = torch.sigmoid(self.fc(x))
frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)
frame_att = torch.sigmoid(self.att(x))
frame_att = torch.clamp(frame_att, 1e-7, 1 - 1e-7)
frame_att = frame_att / frame_att.sum(dim=1).unsqueeze(1)
global_prob = (frame_prob * frame_att).sum(dim=1)
#global_prob = torch.clamp(global_prob, 1e-7, 1 - 1e-7)
return global_prob, None, None
return pooled_output
class FFNet(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.input = FFNetInput(config)
self.encoder = FFNetEncoder(config)
self.pooler = FFNetPooler(config)
def forward(self, x):
input_output = self.input(x)
sequence_output = self.encoder(input_output)
pooled_output = self.pooler(sequence_output)
return sequence_output, pooled_output
def get_default_config( fourier_type="fft",
layer_norm_eps=1e-12,
dropout_rate=0.1):
return {
"num_hidden_layers": 10,
"input_size": 64,
"hidden_size": 128,
"intermediate_size": 256,
"fourier": 'fft',
"layer_norm_eps": layer_norm_eps,
"dropout_rate": dropout_rate,
}
def get_ffnet():
config = get_default_config()
return FFNet(config) | 5,076 | 31.33758 | 100 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/src/models/pslaModels.py | import torch.nn as nn
import torch
from .HigherModels import *
from efficientnet_pytorch import EfficientNet
import torchvision
class ResNetAttention(nn.Module):
def __init__(self, args):
super(ResNetAttention, self).__init__()
self.__dict__.update(args.__dict__) # Instill all args into self
self.model = torchvision.models.resnet50(pretrained=args.imagenet_pretrain)
self.target_length = args.target_length
self.n_mels = args.n_mels
if args.imagenet_pretrain == False:
print('ResNet50 Model Trained from Scratch (ImageNet Pretraining NOT Used).')
else:
print('Now Use ImageNet Pretrained ResNet50 Model.')
self.model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
# remove the original ImageNet classification layers to save space.
self.model.fc = torch.nn.Identity()
self.model.avgpool = torch.nn.Identity()
# attention pooling module
self.attention = Attention(
832, #2048 originally
args.n_class,
att_activation=args.att_activation,
cla_activation=args.att_activation)
self.avgpool = nn.AvgPool2d((4, 1))
def forward(self, x):
# expect input x = (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)
x = x.unsqueeze(1)
x = x.transpose(2, 3)
batch_size = x.shape[0]
x = self.model(x)
if self.n_mels == 128:
x = x.reshape([batch_size, 2048, 4, self.n_mels//4 ]) #batch, 2048, 4, 32
elif self.n_mels == 64:
x = x.reshape([batch_size, 832, 4, self.n_mels//4 ])#batch, 832, 4, 16
x = self.avgpool(x)
x = x.transpose(2,3)
out, norm_att = self.attention(x)
return out
class MBNet(nn.Module):
def __init__(self, label_dim=527, pretrain=True):
super(MBNet, self).__init__()
self.model = torchvision.models.mobilenet_v2(pretrained=pretrain)
self.model.features[0][0] = torch.nn.Conv2d(1, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
self.model.classifier = torch.nn.Linear(in_features=1280, out_features=label_dim, bias=True)
def forward(self, x, nframes):
# expect input x = (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)
x = x.unsqueeze(1)
x = x.transpose(2, 3)
out = torch.sigmoid(self.model(x))
return out
class EffNetAttention(nn.Module):
def __init__(self, att_act='sigmoid', label_dim=527, b=0, pretrain=True, head_num=4):
super(EffNetAttention, self).__init__()
self.middim = [1280, 1280, 1408, 1536, 1792, 2048, 2304, 2560]
if pretrain == False:
print('EfficientNet Model Trained from Scratch (ImageNet Pretraining NOT Used).')
self.effnet = EfficientNet.from_name('efficientnet-b'+str(b), in_channels=1)
else:
print('Now Use ImageNet Pretrained EfficientNet-B{:d} Model.'.format(b))
self.effnet = EfficientNet.from_pretrained('efficientnet-b'+str(b), in_channels=1)
# multi-head attention pooling
if head_num > 1:
print('Model with {:d} attention heads'.format(head_num))
self.attention = MHeadAttention(
self.middim[b],
label_dim,
att_activation = att_act,
cla_activation= att_act)
# single-head attention pooling
elif head_num == 1:
print('Model with single attention heads')
self.attention = Attention(
self.middim[b],
label_dim,
att_activation = att_act,
cla_activation = att_act)
# mean pooling (no attention)
elif head_num == 0:
print('Model with mean pooling (NO Attention Heads)')
self.attention = MeanPooling(
self.middim[b],
label_dim,
att_activation = att_act,
cla_activation = att_act)
else:
raise ValueError('Attention head must be integer >= 0, 0=mean pooling, 1=single-head attention, >1=multi-head attention.')
self.avgpool = nn.AvgPool2d((4, 1))
#remove the original ImageNet classification layers to save space.
self.effnet._fc = nn.Identity()
def forward(self, x, nframes=1056):
# expect input x = (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)
x = x.unsqueeze(1)
x = x.transpose(2, 3)
x = self.effnet.extract_features(x)
x = self.avgpool(x)
x = x.transpose(2,3)
out, norm_att = self.attention(x)
return out
if __name__ == '__main__':
input_tdim = 1056
ast_mdl = ResNetAttention(pretrain=False)
# psla_mdl = EffNetFullAttention(pretrain=False, b=0, head_num=0)
# input a batch of 10 spectrogram, each with 100 time frames and 128 frequency bins
test_input = torch.rand([10, input_tdim, 128])
test_output = psla_mdl(test_input)
# output should be in shape [10, 527], i.e., 10 samples, each with prediction of 527 classes.
print(test_output.shape) | 5,238 | 40.579365 | 134 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/src/models/linearModels.py | import torch.nn as nn
import torch
import torch.nn.functional as F
import torchvision
class LinearModel(nn.Module):
def __init__(self, n_layers=3, input_dim=64, hidden_dim=128, label_dim=527):
super(LinearModel, self).__init__()
self.n_layers = n_layers
self.hidden_dim = hidden_dim
self.label_dim = label_dim
self.pooling = 'att'
self.dropout = 0.1
self.linear = nn.ModuleList()
self.linear.append(nn.Linear(in_features=input_dim, out_features=hidden_dim, bias=True))
for i in range(self.n_layers):
self.linear.append(nn.LayerNorm(self.hidden_dim))
self.linear.append(nn.Linear(self.hidden_dim, self.hidden_dim))
self.fc_prob = nn.Linear(self.hidden_dim, self.label_dim)
if self.pooling == 'att':
self.fc_att = nn.Linear(self.hidden_dim, self.label_dim)
def forward(self, x):
for i in range(len(self.linear)):
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
x = self.linear[i](x)
frame_prob = torch.sigmoid(self.fc_prob(x))
frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)
frame_att = F.softmax(self.fc_att(x), dim = 1)
global_prob = (frame_prob * frame_att).sum(dim = 1)
# return global_prob, frame_prob, frame_att
return global_prob
def predict(self, x, verbose = True, batch_size = 100):
# Predict in batches. Both input and output are numpy arrays.
# If verbose == True, return all of global_prob, frame_prob and att
# If verbose == False, only return global_prob
result = []
for i in range(0, len(x), batch_size):
with torch.no_grad():
input = Variable(torch.from_numpy(x[i : i + batch_size])).cuda()
output = self.forward(input)
#frame = output[1].cpu().numpy()
#np.save('TALframe_516.npy', frame)
if not verbose: output = output[:2]
result.append([var.data.cpu().numpy() for var in output])
result = tuple(numpy.concatenate(items) for items in zip(*result))
#return result if verbose else result[0]
if verbose:
return result
return result[0], result[1]
# class ResNetAttention(nn.Module):
# def __init__(self, label_dim=527, pretrain=True):
# super(ResNetAttention, self).__init__()
# self.model = torchvision.models.resnet50(pretrained=False)
# if pretrain == False:
# print('ResNet50 Model Trained from Scratch (ImageNet Pretraining NOT Used).')
# else:
# print('Now Use ImageNet Pretrained ResNet50 Model.')
# self.model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
# # remove the original ImageNet classification layers to save space.
# self.model.fc = torch.nn.Identity()
# self.model.avgpool = torch.nn.Identity()
# # attention pooling module
# self.attention = Attention(
# 2048,
# label_dim,
# att_activation='sigmoid',
# cla_activation='sigmoid')
# self.avgpool = nn.AvgPool2d((4, 1))
# def forward(self, x):
# # expect input x = (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)
# x = x.unsqueeze(1)
# x = x.transpose(2, 3)
# batch_size = x.shape[0]
# x = self.model(x)
# x = x.reshape([batch_size, 2048, 4, 32])
# x = self.avgpool(x)
# x = x.transpose(2,3)
# out, norm_att = self.attention(x)
# return out
# class MBNet(nn.Module):
# def __init__(self, label_dim=527, pretrain=True):
# super(MBNet, self).__init__()
# self.model = torchvision.models.mobilenet_v2(pretrained=pretrain)
# self.model.features[0][0] = torch.nn.Conv2d(1, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
# self.model.classifier = torch.nn.Linear(in_features=1280, out_features=label_dim, bias=True)
# def forward(self, x, nframes):
# # expect input x = (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)
# x = x.unsqueeze(1)
# x = x.transpose(2, 3)
# out = torch.sigmoid(self.model(x))
# return out
# class EffNetAttention(nn.Module):
# def __init__(self, label_dim=527, b=0, pretrain=True, head_num=4):
# super(EffNetAttention, self).__init__()
# self.middim = [1280, 1280, 1408, 1536, 1792, 2048, 2304, 2560]
# if pretrain == False:
# print('EfficientNet Model Trained from Scratch (ImageNet Pretraining NOT Used).')
# self.effnet = EfficientNet.from_name('efficientnet-b'+str(b), in_channels=1)
# else:
# print('Now Use ImageNet Pretrained EfficientNet-B{:d} Model.'.format(b))
# self.effnet = EfficientNet.from_pretrained('efficientnet-b'+str(b), in_channels=1)
# # multi-head attention pooling
# if head_num > 1:
# print('Model with {:d} attention heads'.format(head_num))
# self.attention = MHeadAttention(
# self.middim[b],
# label_dim,
# att_activation='sigmoid',
# cla_activation='sigmoid')
# # single-head attention pooling
# elif head_num == 1:
# print('Model with single attention heads')
# self.attention = Attention(
# self.middim[b],
# label_dim,
# att_activation='sigmoid',
# cla_activation='sigmoid')
# # mean pooling (no attention)
# elif head_num == 0:
# print('Model with mean pooling (NO Attention Heads)')
# self.attention = MeanPooling(
# self.middim[b],
# label_dim,
# att_activation='sigmoid',
# cla_activation='sigmoid')
# else:
# raise ValueError('Attention head must be integer >= 0, 0=mean pooling, 1=single-head attention, >1=multi-head attention.')
# self.avgpool = nn.AvgPool2d((4, 1))
# #remove the original ImageNet classification layers to save space.
# self.effnet._fc = nn.Identity()
# def forward(self, x, nframes=1056):
# # expect input x = (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)
# x = x.unsqueeze(1)
# x = x.transpose(2, 3)
# x = self.effnet.extract_features(x)
# x = self.avgpool(x)
# x = x.transpose(2,3)
# out, norm_att = self.attention(x)
# return out
if __name__ == '__main__':
input_tdim = 1024
#ast_mdl = ResNetNewFullAttention(pretrain=False)
psla_mdl = LinearModel(input_dim=128)
# input a batch of 10 spectrogram, each with 100 time frames and 128 frequency bins
test_input = torch.rand([10, input_tdim, 128])
test_output = psla_mdl(test_input)
# output should be in shape [10, 527], i.e., 10 samples, each with prediction of 527 classes.
print(test_output.shape) | 7,213 | 40.94186 | 136 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/src/models/ast_models.py | # -*- coding: utf-8 -*-
# @Time : 3/8/22
# @Modified by : Juncheng B Li
# @Original Author : Yuan Gong
# @File : ast_models.py
import torch
import torch.nn as nn
from torch.cuda.amp import autocast
import os
import wget
os.environ['TORCH_HOME'] = '../../pretrained_models'
import timm
from timm.models.layers import to_2tuple,trunc_normal_
# override the timm package to relax the input shape constraint.
class PatchEmbed(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class ASTModel(nn.Module):
"""
The AST model.
:param label_dim: the label dimension, i.e., the number of total classes, it is 527 for AudioSet, 50 for ESC-50, and 35 for speechcommands v2-35
:param fstride: the stride of patch spliting on the frequency dimension, for 16*16 patchs, fstride=16 means no overlap, fstride=10 means overlap of 6
:param tstride: the stride of patch spliting on the time dimension, for 16*16 patchs, tstride=16 means no overlap, tstride=10 means overlap of 6
:param input_fdim: the number of frequency bins of the input spectrogram
:param input_tdim: the number of time frames of the input spectrogram
:param imagenet_pretrain: if use ImageNet pretrained model
:param audioset_pretrain: if use full AudioSet and ImageNet pretrained model
:param model_size: the model size of AST, should be in [tiny224, small224, base224, base384], base224 and base 384 are same model, but are trained differently during ImageNet pretraining.
"""
def __init__(self, label_dim=527, fstride=10, tstride=10, input_fdim=128, input_tdim=1024, imagenet_pretrain=True, audioset_pretrain=False, model_size='base384', verbose=True):
super(ASTModel, self).__init__()
assert timm.__version__ == '0.4.5', 'Please use timm == 0.4.5, the code might not be compatible with newer versions.'
if verbose == True:
print('---------------AST Model Summary---------------')
print('ImageNet pretraining: {:s}, AudioSet pretraining: {:s}'.format(str(imagenet_pretrain),str(audioset_pretrain)))
# override timm input shape restriction
timm.models.vision_transformer.PatchEmbed = PatchEmbed
# if AudioSet pretraining is not used (but ImageNet pretraining may still apply)
if audioset_pretrain == False:
if model_size == 'tiny224':
self.v = timm.create_model('vit_deit_tiny_distilled_patch16_224', pretrained=imagenet_pretrain)
elif model_size == 'small224':
self.v = timm.create_model('vit_deit_small_distilled_patch16_224', pretrained=imagenet_pretrain)
elif model_size == 'base224':
self.v = timm.create_model('vit_deit_base_distilled_patch16_224', pretrained=imagenet_pretrain)
elif model_size == 'base384':
self.v = timm.create_model('vit_deit_base_distilled_patch16_384', pretrained=imagenet_pretrain)
else:
raise Exception('Model size must be one of tiny224, small224, base224, base384.')
self.original_num_patches = self.v.patch_embed.num_patches
self.oringal_hw = int(self.original_num_patches ** 0.5)
self.original_embedding_dim = self.v.pos_embed.shape[2]
self.mlp_head = nn.Sequential(nn.LayerNorm(self.original_embedding_dim), nn.Linear(self.original_embedding_dim, label_dim))
# automatcially get the intermediate shape
f_dim, t_dim = self.get_shape(fstride, tstride, input_fdim, input_tdim)
num_patches = f_dim * t_dim
print(f'f_dim: {f_dim},t_dim: {t_dim}, num_patches: {num_patches}')
self.v.patch_embed.num_patches = num_patches
if verbose == True:
print('frequncey stride={:d}, time stride={:d}'.format(fstride, tstride))
print('number of patches={:d}'.format(num_patches))
# the linear projection layer
new_proj = torch.nn.Conv2d(1, self.original_embedding_dim, kernel_size=(16, 16), stride=(fstride, tstride))
if imagenet_pretrain == True:
new_proj.weight = torch.nn.Parameter(torch.sum(self.v.patch_embed.proj.weight, dim=1).unsqueeze(1))
new_proj.bias = self.v.patch_embed.proj.bias
self.v.patch_embed.proj = new_proj
# the positional embedding
if imagenet_pretrain == True:
# get the positional embedding from deit model, skip the first two tokens (cls token and distillation token), reshape it to original 2D shape (24*24).
new_pos_embed = self.v.pos_embed[:, 2:, :].detach().reshape(1, self.original_num_patches, self.original_embedding_dim).transpose(1, 2).reshape(1, self.original_embedding_dim, self.oringal_hw, self.oringal_hw)
# cut (from middle) or interpolate the second dimension of the positional embedding
if t_dim <= self.oringal_hw:
new_pos_embed = new_pos_embed[:, :, :, int(self.oringal_hw / 2) - int(t_dim / 2): int(self.oringal_hw / 2) - int(t_dim / 2) + t_dim]
else:
new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(self.oringal_hw, t_dim), mode='bilinear')
# cut (from middle) or interpolate the first dimension of the positional embedding
if f_dim <= self.oringal_hw:
new_pos_embed = new_pos_embed[:, :, int(self.oringal_hw / 2) - int(f_dim / 2): int(self.oringal_hw / 2) - int(f_dim / 2) + f_dim, :]
else:
new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(f_dim, t_dim), mode='bilinear')
# flatten the positional embedding
new_pos_embed = new_pos_embed.reshape(1, self.original_embedding_dim, num_patches).transpose(1,2)
# concatenate the above positional embedding with the cls token and distillation token of the deit model.
self.v.pos_embed = nn.Parameter(torch.cat([self.v.pos_embed[:, :2, :].detach(), new_pos_embed], dim=1))
print(f'pos_embedding reshaped shape: {self.v.pos_embed.shape}')
else:
# if not use imagenet pretrained model, just randomly initialize a learnable positional embedding
# TODO can use sinusoidal positional embedding instead
new_pos_embed = nn.Parameter(torch.zeros(1, self.v.patch_embed.num_patches + 2, self.original_embedding_dim))
self.v.pos_embed = new_pos_embed
trunc_normal_(self.v.pos_embed, std=.02)
# now load a model that is pretrained on both ImageNet and AudioSet
elif audioset_pretrain == True:
if audioset_pretrain == True and imagenet_pretrain == False:
raise ValueError('currently model pretrained on only audioset is not supported, please set imagenet_pretrain = True to use audioset pretrained model.')
if model_size != 'base384':
raise ValueError('currently only has base384 AudioSet pretrained model.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# if os.path.exists('/ocean/projects/iri120008p/billyli/ast/pretrained_models/audioset_10_10_0.4593.pth') == False:
# # this model performs 0.4593 mAP on the audioset eval set
# audioset_mdl_url = 'https://www.dropbox.com/s/cv4knew8mvbrnvq/audioset_0.4593.pth?dl=1'
# wget.download(audioset_mdl_url, out='../../pretrained_models/audioset_10_10_0.4593.pth')
print('I got here!')
sd = torch.load('../pretrained_models/audioset_10_10_0.4593.pth', map_location=device)
audio_model = ASTModel(label_dim=527, fstride=10, tstride=10, input_fdim=128, input_tdim=1024, imagenet_pretrain=False, audioset_pretrain=False, model_size='base384', verbose=False)
audio_model = torch.nn.DataParallel(audio_model)
audio_model.load_state_dict(sd, strict=False)
self.v = audio_model.module.v
self.original_embedding_dim = self.v.pos_embed.shape[2]
self.mlp_head = nn.Sequential(nn.LayerNorm(self.original_embedding_dim), nn.Linear(self.original_embedding_dim, label_dim))
f_dim, t_dim = self.get_shape(fstride, tstride, input_fdim, input_tdim)
num_patches = f_dim * t_dim
self.v.patch_embed.num_patches = num_patches
if verbose == True:
print('frequncey stride={:d}, time stride={:d}'.format(fstride, tstride))
print('number of patches={:d}'.format(num_patches))
new_pos_embed = self.v.pos_embed[:, 2:, :].detach().reshape(1, 1212, 768).transpose(1, 2).reshape(1, 768, 12, 101)
# if the input sequence length is larger than the original audioset (10s), then cut the positional embedding
if t_dim < 101:
new_pos_embed = new_pos_embed[:, :, :, 50 - int(t_dim/2): 50 - int(t_dim/2) + t_dim]
# otherwise interpolate
else:
new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(12, t_dim), mode='bilinear')
new_pos_embed = new_pos_embed.reshape(1, 768, num_patches).transpose(1, 2)
self.v.pos_embed = nn.Parameter(torch.cat([self.v.pos_embed[:, :2, :].detach(), new_pos_embed], dim=1))
def get_shape(self, fstride, tstride, input_fdim=128, input_tdim=1024):
test_input = torch.randn(1, 1, input_fdim, input_tdim)
print(f'input_fdim: {input_fdim},input_tdim: {input_tdim}')
test_proj = nn.Conv2d(1, self.original_embedding_dim, kernel_size=(16, 16), stride=(fstride, tstride))
test_out = test_proj(test_input)
print(f'out_shape: {test_out.shape}')
f_dim = test_out.shape[2]
t_dim = test_out.shape[3]
return f_dim, t_dim
@autocast()
def forward(self, x):
"""
:param x: the input spectrogram, expected shape: (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)
:return: prediction
"""
# expect input x = (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)
# print("input shape:", x.shape)
x = x.unsqueeze(1)
x = x.transpose(2, 3)
B = x.shape[0]
x = self.v.patch_embed(x)
cls_tokens = self.v.cls_token.expand(B, -1, -1)
dist_token = self.v.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
# print("x_inter shape: ", x.shape)
# print("pos_embedding shape: ", self.v.pos_embed.shape)
x = x + self.v.pos_embed
x = self.v.pos_drop(x)
for blk in self.v.blocks:
x = blk(x)
x = self.v.norm(x)
x = (x[:, 0] + x[:, 1]) / 2
x = self.mlp_head(x)
return x
if __name__ == '__main__':
input_tdim = 100
ast_mdl = ASTModel(input_tdim=input_tdim)
# input a batch of 10 spectrogram, each with 100 time frames and 128 frequency bins
test_input = torch.rand([10, input_tdim, 128])
test_output = ast_mdl(test_input)
# output should be in shape [10, 527], i.e., 10 samples, each with prediction of 527 classes.
print(test_output.shape)
input_tdim = 256
ast_mdl = ASTModel(input_tdim=input_tdim,label_dim=50, audioset_pretrain=True)
# input a batch of 10 spectrogram, each with 512 time frames and 128 frequency bins
test_input = torch.rand([10, input_tdim, 128])
test_output = ast_mdl(test_input)
# output should be in shape [10, 50], i.e., 10 samples, each with prediction of 50 classes.
print(test_output.shape) | 12,236 | 57.831731 | 224 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/src/utilities/stats.py | import numpy as np
from scipy import stats
from sklearn import metrics
import torch
def d_prime(auc):
standard_normal = stats.norm()
d_prime = standard_normal.ppf(auc) * np.sqrt(2.0)
return d_prime
def calculate_stats(output, target):
"""Calculate statistics including mAP, AUC, etc.
Args:
output: 2d array, (samples_num, classes_num)
target: 2d array, (samples_num, classes_num)
Returns:
stats: list of statistic of each class.
"""
classes_num = target.shape[-1]
stats = []
# Accuracy, only used for single-label classification such as esc-50, not for multiple label one such as AudioSet
acc = metrics.accuracy_score(np.argmax(target, 1), np.argmax(output, 1))
# Class-wise statistics
for k in range(classes_num):
# Average precision
avg_precision = metrics.average_precision_score(
target[:, k], output[:, k], average=None)
# AUC
auc = metrics.roc_auc_score(target[:, k], output[:, k], average=None)
# Precisions, recalls
(precisions, recalls, thresholds) = metrics.precision_recall_curve(
target[:, k], output[:, k])
# FPR, TPR
(fpr, tpr, thresholds) = metrics.roc_curve(target[:, k], output[:, k])
save_every_steps = 1000 # Sample statistics to reduce size
dict = {'precisions': precisions[0::save_every_steps],
'recalls': recalls[0::save_every_steps],
'AP': avg_precision,
'fpr': fpr[0::save_every_steps],
'fnr': 1. - tpr[0::save_every_steps],
'auc': auc,
# note acc is not class-wise, this is just to keep consistent with other metrics
'acc': acc
}
stats.append(dict)
return stats
| 1,819 | 29.847458 | 117 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/src/utilities/util.py | import math
import pickle
import numpy as np
import torch
import torch.nn as nn
import random
from collections import namedtuple
def calc_recalls(S):
"""
Computes recall at 1, 5, and 10 given a similarity matrix S.
By convention, rows of S are assumed to correspond to images and columns are captions.
"""
assert(S.dim() == 2)
assert(S.size(0) == S.size(1))
if isinstance(S, torch.autograd.Variable):
S = S.data
n = S.size(0)
A2I_scores, A2I_ind = S.topk(10, 0)
I2A_scores, I2A_ind = S.topk(10, 1)
A_r1 = AverageMeter()
A_r5 = AverageMeter()
A_r10 = AverageMeter()
I_r1 = AverageMeter()
I_r5 = AverageMeter()
I_r10 = AverageMeter()
for i in range(n):
A_foundind = -1
I_foundind = -1
for ind in range(10):
if A2I_ind[ind, i] == i:
I_foundind = ind
if I2A_ind[i, ind] == i:
A_foundind = ind
# do r1s
if A_foundind == 0:
A_r1.update(1)
else:
A_r1.update(0)
if I_foundind == 0:
I_r1.update(1)
else:
I_r1.update(0)
# do r5s
if A_foundind >= 0 and A_foundind < 5:
A_r5.update(1)
else:
A_r5.update(0)
if I_foundind >= 0 and I_foundind < 5:
I_r5.update(1)
else:
I_r5.update(0)
# do r10s
if A_foundind >= 0 and A_foundind < 10:
A_r10.update(1)
else:
A_r10.update(0)
if I_foundind >= 0 and I_foundind < 10:
I_r10.update(1)
else:
I_r10.update(0)
recalls = {'A_r1':A_r1.avg, 'A_r5':A_r5.avg, 'A_r10':A_r10.avg,
'I_r1':I_r1.avg, 'I_r5':I_r5.avg, 'I_r10':I_r10.avg}
#'A_meanR':A_meanR.avg, 'I_meanR':I_meanR.avg}
return recalls
def computeMatchmap(I, A):
assert(I.dim() == 3)
assert(A.dim() == 2)
D = I.size(0)
H = I.size(1)
W = I.size(2)
T = A.size(1)
Ir = I.view(D, -1).t()
matchmap = torch.mm(Ir, A)
matchmap = matchmap.view(H, W, T)
return matchmap
def matchmapSim(M, simtype):
assert(M.dim() == 3)
if simtype == 'SISA':
return M.mean()
elif simtype == 'MISA':
M_maxH, _ = M.max(0)
M_maxHW, _ = M_maxH.max(0)
return M_maxHW.mean()
elif simtype == 'SIMA':
M_maxT, _ = M.max(2)
return M_maxT.mean()
else:
raise ValueError
def sampled_margin_rank_loss(image_outputs, audio_outputs, nframes, margin=1., simtype='MISA'):
"""
Computes the triplet margin ranking loss for each anchor image/caption pair
The impostor image/caption is randomly sampled from the minibatch
"""
assert(image_outputs.dim() == 4)
assert(audio_outputs.dim() == 3)
n = image_outputs.size(0)
loss = torch.zeros(1, device=image_outputs.device, requires_grad=True)
for i in range(n):
I_imp_ind = i
A_imp_ind = i
while I_imp_ind == i:
I_imp_ind = np.random.randint(0, n)
while A_imp_ind == i:
A_imp_ind = np.random.randint(0, n)
nF = nframes[i]
nFimp = nframes[A_imp_ind]
anchorsim = matchmapSim(computeMatchmap(image_outputs[i], audio_outputs[i][:, 0:nF]), simtype)
Iimpsim = matchmapSim(computeMatchmap(image_outputs[I_imp_ind], audio_outputs[i][:, 0:nF]), simtype)
Aimpsim = matchmapSim(computeMatchmap(image_outputs[i], audio_outputs[A_imp_ind][:, 0:nFimp]), simtype)
A2I_simdif = margin + Iimpsim - anchorsim
if (A2I_simdif.data > 0).all():
loss = loss + A2I_simdif
I2A_simdif = margin + Aimpsim - anchorsim
if (I2A_simdif.data > 0).all():
loss = loss + I2A_simdif
loss = loss / n
return loss
def compute_matchmap_similarity_matrix(image_outputs, audio_outputs, nframes, simtype='MISA'):
"""
Assumes image_outputs is a (batchsize, embedding_dim, rows, height) tensor
Assumes audio_outputs is a (batchsize, embedding_dim, 1, time) tensor
Returns similarity matrix S where images are rows and audios are along the columns
"""
assert(image_outputs.dim() == 4)
assert(audio_outputs.dim() == 3)
n = image_outputs.size(0)
S = torch.zeros(n, n, device=image_outputs.device)
for image_idx in range(n):
for audio_idx in range(n):
nF = max(1, nframes[audio_idx])
S[image_idx, audio_idx] = matchmapSim(computeMatchmap(image_outputs[image_idx], audio_outputs[audio_idx][:, 0:nF]), simtype)
return S
def compute_pooldot_similarity_matrix(image_outputs, audio_outputs, nframes):
"""
Assumes image_outputs is a (batchsize, embedding_dim, rows, height) tensor
Assumes audio_outputs is a (batchsize, embedding_dim, 1, time) tensor
Returns similarity matrix S where images are rows and audios are along the columns
S[i][j] is computed as the dot product between the meanpooled embeddings of
the ith image output and jth audio output
"""
assert(image_outputs.dim() == 4)
assert(audio_outputs.dim() == 4)
n = image_outputs.size(0)
imagePoolfunc = nn.AdaptiveAvgPool2d((1, 1))
pooled_image_outputs = imagePoolfunc(image_outputs).squeeze(3).squeeze(2)
audioPoolfunc = nn.AdaptiveAvgPool2d((1, 1))
pooled_audio_outputs_list = []
for idx in range(n):
nF = max(1, nframes[idx])
pooled_audio_outputs_list.append(audioPoolfunc(audio_outputs[idx][:, :, 0:nF]).unsqueeze(0))
pooled_audio_outputs = torch.cat(pooled_audio_outputs_list).squeeze(3).squeeze(2)
S = torch.mm(pooled_image_outputs, pooled_audio_outputs.t())
return S
def one_imposter_index(i, N):
imp_ind = random.randint(0, N - 2)
if imp_ind == i:
imp_ind = N - 1
return imp_ind
def basic_get_imposter_indices(N):
imposter_idc = []
for i in range(N):
# Select an imposter index for example i:
imp_ind = one_imposter_index(i, N)
imposter_idc.append(imp_ind)
return imposter_idc
def semihardneg_triplet_loss_from_S(S, margin):
"""
Input: Similarity matrix S as an autograd.Variable
Output: The one-way triplet loss from rows of S to columns of S. Impostors are taken
to be the most similar point to the anchor that is still less similar to the anchor
than the positive example.
You would need to run this function twice, once with S and once with S.t(),
in order to compute the triplet loss in both directions.
"""
assert(S.dim() == 2)
assert(S.size(0) == S.size(1))
N = S.size(0)
loss = torch.autograd.Variable(torch.zeros(1).type(S.data.type()), requires_grad=True)
# Imposter - ground truth
Sdiff = S - torch.diag(S).view(-1, 1)
eps = 1e-12
# All examples less similar than ground truth
mask = (Sdiff < -eps).type(torch.LongTensor)
maskf = mask.type_as(S)
# Mask out all examples >= gt with minimum similarity
Sp = maskf * Sdiff + (1 - maskf) * torch.min(Sdiff).detach()
# Find the index maximum similar of the remaining
_, idc = Sp.max(dim=1)
idc = idc.data.cpu()
# Vector mask: 1 iff there exists an example < gt
has_neg = (mask.sum(dim=1) > 0).data.type(torch.LongTensor)
# Random imposter indices
random_imp_ind = torch.LongTensor(basic_get_imposter_indices(N))
# Use hardneg if there exists an example < gt, otherwise use random imposter
imp_idc = has_neg * idc + (1 - has_neg) * random_imp_ind
# This could probably be vectorized too, but I haven't.
for i, imp in enumerate(imp_idc):
local_loss = Sdiff[i, imp] + margin
if (local_loss.data > 0).all():
loss = loss + local_loss
loss = loss / N
return loss
def sampled_triplet_loss_from_S(S, margin):
"""
Input: Similarity matrix S as an autograd.Variable
Output: The one-way triplet loss from rows of S to columns of S. Imposters are
randomly sampled from the columns of S.
You would need to run this function twice, once with S and once with S.t(),
in order to compute the triplet loss in both directions.
"""
assert(S.dim() == 2)
assert(S.size(0) == S.size(1))
N = S.size(0)
loss = torch.autograd.Variable(torch.zeros(1).type(S.data.type()), requires_grad=True)
# Imposter - ground truth
Sdiff = S - torch.diag(S).view(-1, 1)
imp_ind = torch.LongTensor(basic_get_imposter_indices(N))
# This could probably be vectorized too, but I haven't.
for i, imp in enumerate(imp_ind):
local_loss = Sdiff[i, imp] + margin
if (local_loss.data > 0).all():
loss = loss + local_loss
loss = loss / N
return loss
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(base_lr, lr_decay, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every lr_decay epochs"""
lr = base_lr * (0.1 ** (epoch // lr_decay))
print('now learning rate changed to {:f}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_learning_rate2(base_lr, lr_decay, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every lr_decay epochs"""
for param_group in optimizer.param_groups:
cur_lr = param_group['lr']
print('current learing rate is {:f}'.format(lr))
lr = cur_lr * 0.1
print('now learning rate changed to {:f}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def load_progress(prog_pkl, quiet=False):
"""
load progress pkl file
Args:
prog_pkl(str): path to progress pkl file
Return:
progress(list):
epoch(int):
global_step(int):
best_epoch(int):
best_avg_r10(float):
"""
def _print(msg):
if not quiet:
print(msg)
with open(prog_pkl, "rb") as f:
prog = pickle.load(f)
epoch, global_step, best_epoch, best_avg_r10, _ = prog[-1]
_print("\nPrevious Progress:")
msg = "[%5s %7s %5s %7s %6s]" % ("epoch", "step", "best_epoch", "best_avg_r10", "time")
_print(msg)
return prog, epoch, global_step, best_epoch, best_avg_r10
def count_parameters(model):
return sum([p.numel() for p in model.parameters() if p.requires_grad])
PrenetConfig = namedtuple(
'PrenetConfig', ['input_size', 'hidden_size', 'num_layers', 'dropout'])
RNNConfig = namedtuple(
'RNNConfig',
['input_size', 'hidden_size', 'num_layers', 'dropout', 'residual'])
| 10,901 | 34.511401 | 140 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/egs/audioset/inference.py | # -*- coding: utf-8 -*-
# @Time : 3/8/22
# @Modified by : Juncheng B Li
# @Original Author : jeffcheng
# @Reference: a inference script for single audio, heavily base on demo.py and traintest.py
import os
import sys
import csv
import argparse
import numpy as np
import torch
import torchaudio
torchaudio.set_audio_backend("soundfile") # switch backend
basepath = os.path.dirname(os.path.dirname(sys.path[0]))
sys.path.append(basepath)
from src.models import ASTModel
# download pretrained model in this directory
os.environ['TORCH_HOME'] = '../pretrained_models'
def make_features(wav_name, mel_bins, target_length=1024):
waveform, sr = torchaudio.load(wav_name)
fbank = torchaudio.compliance.kaldi.fbank(
waveform, htk_compat=True, sample_frequency=sr, use_energy=False,
window_type='hanning', num_mel_bins=mel_bins, dither=0.0,
frame_shift=10)
n_frames = fbank.shape[0]
p = target_length - n_frames
if p > 0:
m = torch.nn.ZeroPad2d((0, 0, 0, p))
fbank = m(fbank)
elif p < 0:
fbank = fbank[0:target_length, :]
fbank = (fbank - (-4.2677393)) / (4.5689974 * 2)
return fbank
def load_label(label_csv):
with open(label_csv, 'r') as f:
reader = csv.reader(f, delimiter=',')
lines = list(reader)
labels = []
ids = [] # Each label has a unique id such as "/m/068hy"
for i1 in range(1, len(lines)):
id = lines[i1][1]
label = lines[i1][2]
ids.append(id)
labels.append(label)
return labels
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser:'
'python inference --audio_path ./0OxlgIitVig.wav '
'--model_path ./pretrained_models/audioset_10_10_0.4593.pth')
parser.add_argument("--model_path", type=str, required=True,
help="the trained model you want to test")
parser.add_argument('--audio_path',
help='the audio you want to predict, sample rate 16k.',
type=str, required=True)
args = parser.parse_args()
label_csv = './data/class_labels_indices.csv' # label and indices for audioset data
# 1. make feature for predict
audio_path = args.audio_path
feats = make_features(audio_path, mel_bins=128) # shape(1024, 128)
# assume each input spectrogram has 100 time frames
input_tdim = feats.shape[0]
# 2. load the best model and the weights
checkpoint_path = args.model_path
ast_mdl = ASTModel(label_dim=527, input_tdim=input_tdim, imagenet_pretrain=False, audioset_pretrain=False)
print(f'[*INFO] load checkpoint: {checkpoint_path}')
checkpoint = torch.load(checkpoint_path, map_location='cuda')
audio_model = torch.nn.DataParallel(ast_mdl, device_ids=[0])
audio_model.load_state_dict(checkpoint)
audio_model = audio_model.to(torch.device("cuda:0"))
# 3. feed the data feature to model
feats_data = feats.expand(1, input_tdim, 128) # reshape the feature
audio_model.eval() # set the eval model
with torch.no_grad():
output = audio_model.forward(feats_data)
output = torch.sigmoid(output)
result_output = output.data.cpu().numpy()[0]
# 4. map the post-prob to label
labels = load_label(label_csv)
sorted_indexes = np.argsort(result_output)[::-1]
# Print audio tagging top probabilities
print('[*INFO] predice results:')
for k in range(10):
print('{}: {:.4f}'.format(np.array(labels)[sorted_indexes[k]],
result_output[sorted_indexes[k]]))
| 3,757 | 32.553571 | 110 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/egs/audioset/ensemble.py | # -*- coding: utf-8 -*-
# @Time : 3/8/22
# @Modified by : Juncheng B Li
# @Original Author : Yuan Gong
# @File : ensemble.py
# get the ensemble result
import os, sys, argparse
parentdir = str(os.path.abspath(os.path.join(__file__ ,"../../..")))+'/src'
sys.path.append(parentdir)
import dataloader
import models
from utilities import *
from traintest import train, validate
import numpy as np
from scipy import stats
import torch
eval_data_path = '/data/sls/scratch/yuangong/audioset/datafiles/eval_data.json'
def get_ensemble_res(mdl_list, base_path):
# the 0-len(mdl_list) rows record the results of single models, the last row record the result of the ensemble model.
ensemble_res = np.zeros([len(mdl_list)+1, 3])
if os.path.exists(base_path) == False:
os.mkdir(base_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
for model_idx, mdl in enumerate(mdl_list):
print('-----------------------')
print('now loading model {:d}: {:s}'.format(model_idx, mdl))
# sd = torch.load('/Users/yuan/Documents/ast/pretrained_models/audio_model_wa.pth', map_location=device)
sd = torch.load(mdl, map_location=device)
# get the time and freq stride of the pretrained model
fstride, tstride = int(mdl.split('/')[-1].split('_')[1]), int(mdl.split('/')[-1].split('_')[2].split('.')[0])
audio_model = models.ASTModel(fstride=fstride, tstride=tstride)
audio_model = torch.nn.DataParallel(audio_model)
audio_model.load_state_dict(sd, strict=False)
args.exp_dir = base_path
stats, _ = validate(audio_model, eval_loader, args, model_idx)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
dprime = d_prime(mAUC)
ensemble_res[model_idx, :] = [mAP, mAUC, dprime]
print("Model {:d} {:s} mAP: {:.6f}, AUC: {:.6f}, d-prime: {:.6f}".format(model_idx, mdl, mAP, mAUC, dprime))
# calculate the ensemble result
# get the ground truth label
target = np.loadtxt(base_path + '/predictions/target.csv', delimiter=',')
# get the ground truth label
prediction_sample = np.loadtxt(base_path + '/predictions/predictions_0.csv', delimiter=',')
# allocate memory space for the ensemble prediction
predictions_table = np.zeros([len(mdl_list) , prediction_sample.shape[0], prediction_sample.shape[1]])
for model_idx in range(0, len(mdl_list)):
predictions_table[model_idx, :, :] = np.loadtxt(base_path + '/predictions/predictions_' + str(model_idx) + '.csv', delimiter=',')
model_idx += 1
ensemble_predictions = np.mean(predictions_table, axis=0)
stats = calculate_stats(ensemble_predictions, target)
ensemble_mAP = np.mean([stat['AP'] for stat in stats])
ensemble_mAUC = np.mean([stat['auc'] for stat in stats])
ensemble_dprime = d_prime(ensemble_mAUC)
ensemble_res[-1, :] = [ensemble_mAP, ensemble_mAUC, ensemble_dprime]
print('---------------Ensemble Result Summary---------------')
for model_idx in range(len(mdl_list)):
print("Model {:d} {:s} mAP: {:.6f}, AUC: {:.6f}, d-prime: {:.6f}".format(model_idx, mdl_list[model_idx], ensemble_res[model_idx, 0], ensemble_res[model_idx, 1], ensemble_res[model_idx, 2]))
print("Ensemble {:d} Models mAP: {:.6f}, AUC: {:.6f}, d-prime: {:.6f}".format(len(mdl_list), ensemble_mAP, ensemble_mAUC, ensemble_dprime))
np.savetxt(base_path + '/ensemble_result.csv', ensemble_res, delimiter=',')
def d_prime(auc):
standard_normal = stats.norm()
d_prime = standard_normal.ppf(auc) * np.sqrt(2.0)
return d_prime
# dataloader settings
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args = parser.parse_args()
args.dataset='audioset'
args.data_eval= eval_data_path
args.label_csv='/data/sls/scratch/yuangong/ast/egs/audioset/class_labels_indices.csv'
args.loss_fn = torch.nn.BCEWithLogitsLoss()
norm_stats = {'audioset': [-4.2677393, 4.5689974], 'esc50': [-6.6268077, 5.358466],
'speechcommands': [-6.845978, 5.5654526]}
target_length = {'audioset': 1024, 'esc50': 512, 'speechcommands': 128}
noise = {'audioset': False, 'esc50': False, 'speechcommands': True}
val_audio_conf = {'num_mel_bins': 128, 'target_length': target_length[args.dataset], 'freqm': 0, 'timem': 0, 'mixup': 0, 'dataset': args.dataset, 'mode':'evaluation', 'mean':norm_stats[args.dataset][0], 'std':norm_stats[args.dataset][1], 'noise':False}
eval_loader = torch.utils.data.DataLoader(
dataloader.AudiosetDataset(args.data_eval, label_csv=args.label_csv, audio_conf=val_audio_conf),
batch_size=100, shuffle=False, num_workers=16, pin_memory=True)
# formal full ensemble, ensemble-S
mdl_list_s = ['/data/sls/scratch/yuangong/ast/pretrained_models/ensemble/audioset_10_10_0.4495.pth',
'/data/sls/scratch/yuangong/ast/pretrained_models/ensemble/audioset_10_10_0.4483.pth',
'/data/sls/scratch/yuangong/ast/pretrained_models/ensemble/audioset_10_10_0.4475.pth']
# formal full ensemble, ensemble-M
mdl_list_m = ['/data/sls/scratch/yuangong/ast/pretrained_models/ensemble/audioset_10_10_0.4495.pth',
'/data/sls/scratch/yuangong/ast/pretrained_models/ensemble/audioset_10_10_0.4483.pth',
'/data/sls/scratch/yuangong/ast/pretrained_models/ensemble/audioset_10_10_0.4475.pth',
'/data/sls/scratch/yuangong/ast/pretrained_models/ensemble/audioset_12_12_0.4467.pth',
'/data/sls/scratch/yuangong/ast/pretrained_models/ensemble/audioset_14_14_0.4431.pth',
'/data/sls/scratch/yuangong/ast/pretrained_models/ensemble/audioset_16_16_0.4422.pth']
# ensemble 3 models that is trained with same setting, but different random seeds
get_ensemble_res(mdl_list_s, './exp/ensemble_s')
# ensemble 6 models that is trained with different settings (3 with stride of 10, others are with stride of 12, 14, and 16)
get_ensemble_res(mdl_list_m, './exp/ensemble_m') | 5,912 | 51.327434 | 252 | py |
AudioTaggingDoneRight | AudioTaggingDoneRight-main/egs/speechcommands/prep_sc.py | # -*- coding: utf-8 -*-
# @Time : 6/23/21 3:19 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : yuangong@mit.edu
# @File : prep_sc.py
import numpy as np
import json
import os
import wget
from torchaudio.datasets import SPEECHCOMMANDS
# prepare the data of the speechcommands dataset.
print('Now download and process speechcommands dataset, it will take a few moments...')
# download the speechcommands dataset
if os.path.exists('./data/speech_commands_v0.02') == False:
# we use the 35 class v2 dataset, which is used in torchaudio https://pytorch.org/audio/stable/_modules/torchaudio/datasets/speechcommands.html
sc_url = 'https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.02.tar.gz'
wget.download(sc_url, out='./data/')
os.mkdir('./data/speech_commands_v0.02')
os.system('tar -xzvf ./data/speech_commands_v0.02.tar.gz -C ./data/speech_commands_v0.02')
os.remove('./data/speech_commands_v0.02.tar.gz')
# generate training list = all samples - validation_list - testing_list
if os.path.exists('./data/speech_commands_v0.02/train_list.txt')==False:
with open('./data/speech_commands_v0.02/validation_list.txt', 'r') as f:
val_list = f.readlines()
with open('./data/speech_commands_v0.02/testing_list.txt', 'r') as f:
test_list = f.readlines()
val_test_list = list(set(test_list+val_list))
def get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name))]
def get_immediate_files(a_dir):
return [name for name in os.listdir(a_dir) if os.path.isfile(os.path.join(a_dir, name))]
base_path = './data/speech_commands_v0.02/'
all_cmds = get_immediate_subdirectories(base_path)
all_list = []
for cmd in all_cmds:
if cmd != '_background_noise_':
cmd_samples = get_immediate_files(base_path+'/'+cmd)
for sample in cmd_samples:
all_list.append(cmd + '/' + sample+'\n')
training_list = [x for x in all_list if x not in val_test_list]
with open('./data/speech_commands_v0.02/train_list.txt', 'w') as f:
f.writelines(training_list)
# The implementation of torchaudio has some bugs, use my own implementation, but the split results are exactly the same
# print('Now download and process speechcommands dataset, it will take a few moments...')
# class SubsetSC(SPEECHCOMMANDS):
# def __init__(self, subset: str = None):
# super().__init__("./data/", download=True)
#
# def load_list(filename):
# filepath = os.path.join(self._path, filename)
# with open(filepath) as fileobj:
# return [os.path.join(self._path, line.strip()) for line in fileobj]
#
# if subset == "validation":
# self._walker = load_list("validation_list.txt")
# elif subset == "testing":
# self._walker = load_list("testing_list.txt")
# elif subset == "training":
# excludes = load_list("validation_list.txt") + load_list("testing_list.txt")
# excludes = set(excludes)
# self._walker = [w for w in self._walker if w not in excludes]
# train_full_path = [w for w in self._walker if w not in excludes]
# gen_train_list(train_full_path)
#
# def gen_train_list(train_full_path):
# train_list = []
# for fullpath in train_full_path:
# fullpath = fullpath.split('/')[3:]
# fullpath = '/'.join(fullpath)+'\n'
# train_list.append(fullpath)
# with open('./data/SpeechCommands/speech_commands_v0.02/train_list.txt', 'w') as f:
# f.writelines(train_list)
# Create training and testing split of the data. We do not use validation in this tutorial. Function borrowed from torchaudio implementation.
#train_set = SubsetSC("training")
label_set = np.loadtxt('./data/speechcommands_class_labels_indices.csv', delimiter=',', dtype='str')
label_map = {}
for i in range(1, len(label_set)):
label_map[eval(label_set[i][2])] = label_set[i][0]
print(label_map)
# generate json files
if os.path.exists('./data/datafiles') == False:
os.mkdir('./data/datafiles')
base_path = './data/speech_commands_v0.02/'
for split in ['testing', 'validation', 'train']:
wav_list = []
with open(base_path+split+'_list.txt', 'r') as f:
filelist = f.readlines()
for file in filelist:
cur_label = label_map[file.split('/')[0]]
cur_path = os.path.abspath(os.getcwd()) + '/data/speech_commands_v0.02/' + file.strip()
cur_dict = {"wav": cur_path, "labels": '/m/spcmd'+cur_label.zfill(2)}
wav_list.append(cur_dict)
if split == 'train':
with open('./data/datafiles/speechcommand_train_data.json', 'w') as f:
json.dump({'data': wav_list}, f, indent=1)
if split == 'testing':
with open('./data/datafiles/speechcommand_eval_data.json', 'w') as f:
json.dump({'data': wav_list}, f, indent=1)
if split == 'validation':
with open('./data/datafiles/speechcommand_valid_data.json', 'w') as f:
json.dump({'data': wav_list}, f, indent=1)
print(split + ' data processing finished, total {:d} samples'.format(len(wav_list)))
print('Speechcommands dataset processing finished.')
| 5,419 | 43.065041 | 147 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/pruning_cifar10.py | from __future__ import division
import os, sys, shutil, time, random
import argparse
import torch
import torch.backends.cudnn as cudnn
import torchvision.datasets as dset
import torchvision.transforms as transforms
from utils import AverageMeter, RecorderMeter, time_string, convert_secs2time, timing
import models
import numpy as np
import pickle
from scipy.spatial import distance
import pdb
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='Trains ResNeXt on CIFAR or ImageNet',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('data_path', type=str, help='Path to dataset')
parser.add_argument('--dataset', type=str, choices=['cifar10', 'cifar100', 'imagenet', 'svhn', 'stl10'],
help='Choose between Cifar10/100 and ImageNet.')
parser.add_argument('--arch', metavar='ARCH', default='resnet18', choices=model_names,
help='model architecture: ' + ' | '.join(model_names) + ' (default: resnext29_8_64)')
# Optimization options
parser.add_argument('--epochs', type=int, default=300, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=128, help='Batch size.')
parser.add_argument('--learning_rate', type=float, default=0.1, help='The Learning Rate.')
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', type=float, default=0.0005, help='Weight decay (L2 penalty).')
parser.add_argument('--schedule', type=int, nargs='+', default=[150, 225],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gammas', type=float, nargs='+', default=[0.1, 0.1],
help='LR is multiplied by gamma on schedule, number of gammas should be equal to schedule')
# Checkpoints
parser.add_argument('--print_freq', default=200, type=int, metavar='N', help='print frequency (default: 200)')
parser.add_argument('--save_path', type=str, default='./', help='Folder to save checkpoints and log.')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)')
# random seed
parser.add_argument('--manualSeed', type=int, help='manual seed')
# compress rate
parser.add_argument('--rate_norm', type=float, default=0.9, help='the remaining ratio of pruning based on Norm')
parser.add_argument('--rate_dist', type=float, default=0.1, help='the reducing ratio of pruning based on Distance')
parser.add_argument('--layer_begin', type=int, default=1, help='compress layer of model')
parser.add_argument('--layer_end', type=int, default=1, help='compress layer of model')
parser.add_argument('--layer_inter', type=int, default=1, help='compress layer of model')
parser.add_argument('--epoch_prune', type=int, default=1, help='compress layer of model')
parser.add_argument('--use_state_dict', dest='use_state_dict', action='store_true', help='use state dcit or not')
parser.add_argument('--use_pretrain', dest='use_pretrain', action='store_true', help='use pre-trained model or not')
parser.add_argument('--pretrain_path', default='', type=str, help='..path of pre-trained model')
parser.add_argument('--dist_type', default='l2', type=str, choices=['l2', 'l1', 'cos'], help='distance type of GM')
args = parser.parse_args()
args.use_cuda = args.ngpu > 0 and torch.cuda.is_available()
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if args.use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
cudnn.benchmark = True
def main():
# Init logger
if not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
log = open(os.path.join(args.save_path, 'log_seed_{}.txt'.format(args.manualSeed)), 'w')
print_log('save path : {}'.format(args.save_path), log)
state = {k: v for k, v in args._get_kwargs()}
print_log(state, log)
print_log("Random Seed: {}".format(args.manualSeed), log)
print_log("python version : {}".format(sys.version.replace('\n', ' ')), log)
print_log("torch version : {}".format(torch.__version__), log)
print_log("cudnn version : {}".format(torch.backends.cudnn.version()), log)
print_log("Norm Pruning Rate: {}".format(args.rate_norm), log)
print_log("Distance Pruning Rate: {}".format(args.rate_dist), log)
print_log("Layer Begin: {}".format(args.layer_begin), log)
print_log("Layer End: {}".format(args.layer_end), log)
print_log("Layer Inter: {}".format(args.layer_inter), log)
print_log("Epoch prune: {}".format(args.epoch_prune), log)
print_log("use pretrain: {}".format(args.use_pretrain), log)
print_log("Pretrain path: {}".format(args.pretrain_path), log)
print_log("Dist type: {}".format(args.dist_type), log)
# Init dataset
if not os.path.isdir(args.data_path):
os.makedirs(args.data_path)
if args.dataset == 'cifar10':
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
elif args.dataset == 'cifar100':
mean = [x / 255 for x in [129.3, 124.1, 112.4]]
std = [x / 255 for x in [68.2, 65.4, 70.4]]
else:
assert False, "Unknow dataset : {}".format(args.dataset)
train_transform = transforms.Compose(
[transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
if args.dataset == 'cifar10':
train_data = dset.CIFAR10(args.data_path, train=True, transform=train_transform, download=True)
test_data = dset.CIFAR10(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 10
elif args.dataset == 'cifar100':
train_data = dset.CIFAR100(args.data_path, train=True, transform=train_transform, download=True)
test_data = dset.CIFAR100(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 100
elif args.dataset == 'svhn':
train_data = dset.SVHN(args.data_path, split='train', transform=train_transform, download=True)
test_data = dset.SVHN(args.data_path, split='test', transform=test_transform, download=True)
num_classes = 10
elif args.dataset == 'stl10':
train_data = dset.STL10(args.data_path, split='train', transform=train_transform, download=True)
test_data = dset.STL10(args.data_path, split='test', transform=test_transform, download=True)
num_classes = 10
elif args.dataset == 'imagenet':
assert False, 'Do not finish imagenet code'
else:
assert False, 'Do not support dataset : {}'.format(args.dataset)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
print_log("=> creating model '{}'".format(args.arch), log)
# Init model, criterion, and optimizer
net = models.__dict__[args.arch](num_classes)
print_log("=> network :\n {}".format(net), log)
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
# define loss function (criterion) and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
if args.use_cuda:
net.cuda()
criterion.cuda()
if args.use_pretrain:
if os.path.isfile(args.pretrain_path):
print_log("=> loading pretrain model '{}'".format(args.pretrain_path), log)
else:
dir = '/data/yahe/cifar10_base/'
# dir = '/data/uts521/yang/progress/cifar10_base/'
whole_path = dir + 'cifar10_' + args.arch + '_base'
args.pretrain_path = whole_path + '/checkpoint.pth.tar'
print_log("Pretrain path: {}".format(args.pretrain_path), log)
pretrain = torch.load(args.pretrain_path)
if args.use_state_dict:
net.load_state_dict(pretrain['state_dict'])
else:
net = pretrain['state_dict']
recorder = RecorderMeter(args.epochs)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print_log("=> loading checkpoint '{}'".format(args.resume), log)
checkpoint = torch.load(args.resume)
recorder = checkpoint['recorder']
args.start_epoch = checkpoint['epoch']
if args.use_state_dict:
net.load_state_dict(checkpoint['state_dict'])
else:
net = checkpoint['state_dict']
optimizer.load_state_dict(checkpoint['optimizer'])
print_log("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']), log)
else:
print_log("=> no checkpoint found at '{}'".format(args.resume), log)
else:
print_log("=> do not use any checkpoint for {} model".format(args.arch), log)
if args.evaluate:
time1 = time.time()
validate(test_loader, net, criterion, log)
time2 = time.time()
print('function took %0.3f ms' % ((time2 - time1) * 1000.0))
return
m = Mask(net)
m.init_length()
print("-" * 10 + "one epoch begin" + "-" * 10)
print("remaining ratio of pruning : Norm is %f" % args.rate_norm)
print("reducing ratio of pruning : Distance is %f" % args.rate_dist)
print("total remaining ratio is %f" % (args.rate_norm - args.rate_dist))
val_acc_1, val_los_1 = validate(test_loader, net, criterion, log)
print(" accu before is: %.3f %%" % val_acc_1)
m.model = net
m.init_mask(args.rate_norm, args.rate_dist, args.dist_type)
# m.if_zero()
m.do_mask()
m.do_similar_mask()
net = m.model
# m.if_zero()
if args.use_cuda:
net = net.cuda()
val_acc_2, val_los_2 = validate(test_loader, net, criterion, log)
print(" accu after is: %s %%" % val_acc_2)
# Main loop
start_time = time.time()
epoch_time = AverageMeter()
small_filter_index = []
large_filter_index = []
for epoch in range(args.start_epoch, args.epochs):
current_learning_rate = adjust_learning_rate(optimizer, epoch, args.gammas, args.schedule)
need_hour, need_mins, need_secs = convert_secs2time(epoch_time.avg * (args.epochs - epoch))
need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)
print_log(
'\n==>>{:s} [Epoch={:03d}/{:03d}] {:s} [learning_rate={:6.4f}]'.format(time_string(), epoch, args.epochs,
need_time, current_learning_rate) \
+ ' [Best : Accuracy={:.2f}, Error={:.2f}]'.format(recorder.max_accuracy(False),
100 - recorder.max_accuracy(False)), log)
# train for one epoch
train_acc, train_los = train(train_loader, net, criterion, optimizer, epoch, log, m)
# evaluate on validation set
val_acc_1, val_los_1 = validate(test_loader, net, criterion, log)
if epoch % args.epoch_prune == 0 or epoch == args.epochs - 1:
m.model = net
m.if_zero()
m.init_mask(args.rate_norm, args.rate_dist, args.dist_type)
m.do_mask()
m.do_similar_mask()
m.if_zero()
net = m.model
if args.use_cuda:
net = net.cuda()
val_acc_2, val_los_2 = validate(test_loader, net, criterion, log)
is_best = recorder.update(epoch, train_los, train_acc, val_los_2, val_acc_2)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': net,
'recorder': recorder,
'optimizer': optimizer.state_dict(),
}, is_best, args.save_path, 'checkpoint.pth.tar')
# measure elapsed time
epoch_time.update(time.time() - start_time)
start_time = time.time()
recorder.plot_curve(os.path.join(args.save_path, 'curve.png'))
log.close()
# train function (forward, backward, update)
def train(train_loader, model, criterion, optimizer, epoch, log, m):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.use_cuda:
target = target.cuda(async=True)
input = input.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
# Mask grad for iteration
m.do_grad_mask()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print_log(' Epoch: [{:03d}][{:03d}/{:03d}] '
'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
'Loss {loss.val:.4f} ({loss.avg:.4f}) '
'Prec@1 {top1.val:.3f} ({top1.avg:.3f}) '
'Prec@5 {top5.val:.3f} ({top5.avg:.3f}) '.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5) + time_string(), log)
print_log(
' **Train** Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Error@1 {error1:.3f}'.format(top1=top1, top5=top5,
error1=100 - top1.avg),
log)
return top1.avg, losses.avg
def validate(val_loader, model, criterion, log):
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
for i, (input, target) in enumerate(val_loader):
if args.use_cuda:
target = target.cuda(async=True)
input = input.cuda()
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
print_log(' **Test** Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Error@1 {error1:.3f}'.format(top1=top1, top5=top5,
error1=100 - top1.avg),
log)
return top1.avg, losses.avg
def print_log(print_string, log):
print("{}".format(print_string))
log.write('{}\n'.format(print_string))
log.flush()
def save_checkpoint(state, is_best, save_path, filename):
filename = os.path.join(save_path, filename)
torch.save(state, filename)
if is_best:
bestname = os.path.join(save_path, 'model_best.pth.tar')
shutil.copyfile(filename, bestname)
def adjust_learning_rate(optimizer, epoch, gammas, schedule):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.learning_rate
assert len(gammas) == len(schedule), "length of gammas and schedule should be equal"
for (gamma, step) in zip(gammas, schedule):
if (epoch >= step):
lr = lr * gamma
else:
break
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_obj(obj, name):
with open('obj/' + name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open('obj/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
class Mask:
def __init__(self, model):
self.model_size = {}
self.model_length = {}
self.compress_rate = {}
self.distance_rate = {}
self.mat = {}
self.model = model
self.mask_index = []
self.filter_small_index = {}
self.filter_large_index = {}
self.similar_matrix = {}
self.norm_matrix = {}
def get_codebook(self, weight_torch, compress_rate, length):
weight_vec = weight_torch.view(length)
weight_np = weight_vec.cpu().numpy()
weight_abs = np.abs(weight_np)
weight_sort = np.sort(weight_abs)
threshold = weight_sort[int(length * (1 - compress_rate))]
weight_np[weight_np <= -threshold] = 1
weight_np[weight_np >= threshold] = 1
weight_np[weight_np != 1] = 0
print("codebook done")
return weight_np
def get_filter_codebook(self, weight_torch, compress_rate, length):
codebook = np.ones(length)
if len(weight_torch.size()) == 4:
filter_pruned_num = int(weight_torch.size()[0] * (1 - compress_rate))
weight_vec = weight_torch.view(weight_torch.size()[0], -1)
norm2 = torch.norm(weight_vec, 2, 1)
norm2_np = norm2.cpu().numpy()
filter_index = norm2_np.argsort()[:filter_pruned_num]
# norm1_sort = np.sort(norm1_np)
# threshold = norm1_sort[int (weight_torch.size()[0] * (1-compress_rate) )]
kernel_length = weight_torch.size()[1] * weight_torch.size()[2] * weight_torch.size()[3]
for x in range(0, len(filter_index)):
codebook[filter_index[x] * kernel_length: (filter_index[x] + 1) * kernel_length] = 0
print("filter codebook done")
else:
pass
return codebook
def get_filter_index(self, weight_torch, compress_rate, length):
if len(weight_torch.size()) == 4:
filter_pruned_num = int(weight_torch.size()[0] * (1 - compress_rate))
weight_vec = weight_torch.view(weight_torch.size()[0], -1)
# norm1 = torch.norm(weight_vec, 1, 1)
# norm1_np = norm1.cpu().numpy()
norm2 = torch.norm(weight_vec, 2, 1)
norm2_np = norm2.cpu().numpy()
filter_small_index = []
filter_large_index = []
filter_large_index = norm2_np.argsort()[filter_pruned_num:]
filter_small_index = norm2_np.argsort()[:filter_pruned_num]
# norm1_sort = np.sort(norm1_np)
# threshold = norm1_sort[int (weight_torch.size()[0] * (1-compress_rate) )]
kernel_length = weight_torch.size()[1] * weight_torch.size()[2] * weight_torch.size()[3]
# print("filter index done")
else:
pass
return filter_small_index, filter_large_index
# optimize for fast ccalculation
def get_filter_similar(self, weight_torch, compress_rate, distance_rate, length, dist_type="l2"):
codebook = np.ones(length)
if len(weight_torch.size()) == 4:
filter_pruned_num = int(weight_torch.size()[0] * (1 - compress_rate))
similar_pruned_num = int(weight_torch.size()[0] * distance_rate)
weight_vec = weight_torch.view(weight_torch.size()[0], -1)
if dist_type == "l2" or "cos":
norm = torch.norm(weight_vec, 2, 1)
norm_np = norm.cpu().numpy()
elif dist_type == "l1":
norm = torch.norm(weight_vec, 1, 1)
norm_np = norm.cpu().numpy()
filter_small_index = []
filter_large_index = []
filter_large_index = norm_np.argsort()[filter_pruned_num:]
filter_small_index = norm_np.argsort()[:filter_pruned_num]
# # distance using pytorch function
# similar_matrix = torch.zeros((len(filter_large_index), len(filter_large_index)))
# for x1, x2 in enumerate(filter_large_index):
# for y1, y2 in enumerate(filter_large_index):
# # cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
# # similar_matrix[x1, y1] = cos(weight_vec[x2].view(1, -1), weight_vec[y2].view(1, -1))[0]
# pdist = torch.nn.PairwiseDistance(p=2)
# similar_matrix[x1, y1] = pdist(weight_vec[x2].view(1, -1), weight_vec[y2].view(1, -1))[0][0]
# # more similar with other filter indicates large in the sum of row
# similar_sum = torch.sum(torch.abs(similar_matrix), 0).numpy()
# distance using numpy function
indices = torch.LongTensor(filter_large_index).cuda()
weight_vec_after_norm = torch.index_select(weight_vec, 0, indices).cpu().numpy()
# for euclidean distance
if dist_type == "l2" or "l1":
similar_matrix = distance.cdist(weight_vec_after_norm, weight_vec_after_norm, 'euclidean')
elif dist_type == "cos": # for cos similarity
similar_matrix = 1 - distance.cdist(weight_vec_after_norm, weight_vec_after_norm, 'cosine')
similar_sum = np.sum(np.abs(similar_matrix), axis=0)
# for distance similar: get the filter index with largest similarity == small distance
similar_large_index = similar_sum.argsort()[similar_pruned_num:]
similar_small_index = similar_sum.argsort()[: similar_pruned_num]
similar_index_for_filter = [filter_large_index[i] for i in similar_small_index]
print('filter_large_index', filter_large_index)
print('filter_small_index', filter_small_index)
print('similar_sum', similar_sum)
print('similar_large_index', similar_large_index)
print('similar_small_index', similar_small_index)
print('similar_index_for_filter', similar_index_for_filter)
kernel_length = weight_torch.size()[1] * weight_torch.size()[2] * weight_torch.size()[3]
for x in range(0, len(similar_index_for_filter)):
codebook[
similar_index_for_filter[x] * kernel_length: (similar_index_for_filter[x] + 1) * kernel_length] = 0
print("similar index done")
else:
pass
return codebook
def convert2tensor(self, x):
x = torch.FloatTensor(x)
return x
def init_length(self):
for index, item in enumerate(self.model.parameters()):
self.model_size[index] = item.size()
for index1 in self.model_size:
for index2 in range(0, len(self.model_size[index1])):
if index2 == 0:
self.model_length[index1] = self.model_size[index1][0]
else:
self.model_length[index1] *= self.model_size[index1][index2]
def init_rate(self, rate_norm_per_layer, rate_dist_per_layer):
for index, item in enumerate(self.model.parameters()):
self.compress_rate[index] = 1
self.distance_rate[index] = 1
for key in range(args.layer_begin, args.layer_end + 1, args.layer_inter):
self.compress_rate[key] = rate_norm_per_layer
self.distance_rate[key] = rate_dist_per_layer
# different setting for different architecture
if args.arch == 'resnet20':
last_index = 57
elif args.arch == 'resnet32':
last_index = 93
elif args.arch == 'resnet56':
last_index = 165
elif args.arch == 'resnet110':
last_index = 327
# to jump the last fc layer
self.mask_index = [x for x in range(0, last_index, 3)]
# self.mask_index = [x for x in range (0,330,3)]
def init_mask(self, rate_norm_per_layer, rate_dist_per_layer, dist_type):
self.init_rate(rate_norm_per_layer, rate_dist_per_layer)
for index, item in enumerate(self.model.parameters()):
if index in self.mask_index:
# mask for norm criterion
self.mat[index] = self.get_filter_codebook(item.data, self.compress_rate[index],
self.model_length[index])
self.mat[index] = self.convert2tensor(self.mat[index])
if args.use_cuda:
self.mat[index] = self.mat[index].cuda()
# # get result about filter index
# self.filter_small_index[index], self.filter_large_index[index] = \
# self.get_filter_index(item.data, self.compress_rate[index], self.model_length[index])
# mask for distance criterion
self.similar_matrix[index] = self.get_filter_similar(item.data, self.compress_rate[index],
self.distance_rate[index],
self.model_length[index], dist_type=dist_type)
self.similar_matrix[index] = self.convert2tensor(self.similar_matrix[index])
if args.use_cuda:
self.similar_matrix[index] = self.similar_matrix[index].cuda()
print("mask Ready")
def do_mask(self):
for index, item in enumerate(self.model.parameters()):
if index in self.mask_index:
a = item.data.view(self.model_length[index])
b = a * self.mat[index]
item.data = b.view(self.model_size[index])
print("mask Done")
def do_similar_mask(self):
for index, item in enumerate(self.model.parameters()):
if index in self.mask_index:
a = item.data.view(self.model_length[index])
b = a * self.similar_matrix[index]
item.data = b.view(self.model_size[index])
print("mask similar Done")
def do_grad_mask(self):
for index, item in enumerate(self.model.parameters()):
if index in self.mask_index:
a = item.grad.data.view(self.model_length[index])
# reverse the mask of model
# b = a * (1 - self.mat[index])
b = a * self.mat[index]
b = b * self.similar_matrix[index]
item.grad.data = b.view(self.model_size[index])
# print("grad zero Done")
def if_zero(self):
for index, item in enumerate(self.model.parameters()):
if (index in self.mask_index):
# if index == 0:
a = item.data.view(self.model_length[index])
b = a.cpu().numpy()
print(
"number of nonzero weight is %d, zero is %d" % (np.count_nonzero(b), len(b) - np.count_nonzero(b)))
if __name__ == '__main__':
main()
| 28,893 | 43.452308 | 122 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/pruning_imagenet.py | # https://github.com/pytorch/vision/blob/master/torchvision/models/__init__.py
import argparse
import os, sys
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models
from utils import convert_secs2time, time_string, time_file_str, timing
# from models import print_log
import models
import random
import numpy as np
from scipy.spatial import distance
from collections import OrderedDict
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--save_dir', type=str, default='./', help='Folder to save checkpoints and log.')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W',
help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=200, type=int, metavar='N', help='print frequency (default: 100)')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
parser.add_argument('--use_pretrain', dest='use_pretrain', action='store_true', help='use pre-trained model or not')
# compress rate
parser.add_argument('--rate_norm', type=float, default=0.9, help='the remaining ratio of pruning based on Norm')
parser.add_argument('--rate_dist', type=float, default=0.1, help='the reducing ratio of pruning based on Distance')
parser.add_argument('--layer_begin', type=int, default=3, help='compress layer of model')
parser.add_argument('--layer_end', type=int, default=3, help='compress layer of model')
parser.add_argument('--layer_inter', type=int, default=1, help='compress layer of model')
parser.add_argument('--epoch_prune', type=int, default=1, help='epoch interval of pruning')
parser.add_argument('--skip_downsample', type=int, default=1, help='compress layer of model')
parser.add_argument('--use_sparse', dest='use_sparse', action='store_true', help='use sparse model as initial or not')
parser.add_argument('--sparse',
default='/data/yahe/imagenet/resnet50-rate-0.7/checkpoint.resnet50.2018-01-07-9744.pth.tar',
type=str, metavar='PATH', help='path of sparse model')
parser.add_argument('--lr_adjust', type=int, default=30, help='number of epochs that change learning rate')
parser.add_argument('--VGG_pruned_style', choices=["CP_5x", "Thinet_conv"],
help='number of epochs that change learning rate')
args = parser.parse_args()
args.use_cuda = torch.cuda.is_available()
args.prefix = time_file_str()
def main():
best_prec1 = 0
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
log = open(os.path.join(args.save_dir, '{}.{}.log'.format(args.arch, args.prefix)), 'w')
# version information
print_log("PyThon version : {}".format(sys.version.replace('\n', ' ')), log)
print_log("PyTorch version : {}".format(torch.__version__), log)
print_log("cuDNN version : {}".format(torch.backends.cudnn.version()), log)
print_log("Vision version : {}".format(torchvision.__version__), log)
# create model
print_log("=> creating model '{}'".format(args.arch), log)
model = models.__dict__[args.arch](pretrained=args.use_pretrain)
if args.use_sparse:
model = import_sparse(model)
print_log("=> Model : {}".format(model), log)
print_log("=> parameter : {}".format(args), log)
print_log("Norm Pruning Rate: {}".format(args.rate_norm), log)
print_log("Distance Pruning Rate: {}".format(args.rate_dist), log)
print_log("Layer Begin: {}".format(args.layer_begin), log)
print_log("Layer End: {}".format(args.layer_end), log)
print_log("Layer Inter: {}".format(args.layer_inter), log)
print_log("Epoch prune: {}".format(args.epoch_prune), log)
print_log("Skip downsample : {}".format(args.skip_downsample), log)
print_log("Workers : {}".format(args.workers), log)
print_log("Learning-Rate : {}".format(args.lr), log)
print_log("Use Pre-Trained : {}".format(args.use_pretrain), log)
print_log("lr adjust : {}".format(args.lr_adjust), log)
print_log("VGG pruned style : {}".format(args.VGG_pruned_style), log)
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=True)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print_log("=> loading checkpoint '{}'".format(args.resume), log)
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print_log("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']), log)
else:
print_log("=> no checkpoint found at '{}'".format(args.resume), log)
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, sampler=None)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, log)
return
filename = os.path.join(args.save_dir, 'checkpoint.{:}.{:}.pth.tar'.format(args.arch, args.prefix))
bestname = os.path.join(args.save_dir, 'best.{:}.{:}.pth.tar'.format(args.arch, args.prefix))
m = Mask(model)
m.init_length()
print("-" * 10 + "one epoch begin" + "-" * 10)
print("remaining ratio of pruning : Norm is %f" % args.rate_norm)
print("reducing ratio of pruning : Distance is %f" % args.rate_dist)
print("total remaining ratio is %f" % (args.rate_norm - args.rate_dist))
m.model = model
m.init_mask(args.rate_norm, args.rate_dist)
# m.if_zero()
m.do_mask()
m.do_similar_mask()
model = m.model
m.if_zero()
if args.use_cuda:
model = model.cuda()
val_acc_2 = validate(val_loader, model, criterion, log)
print(">>>>> accu after is: {:}".format(val_acc_2))
start_time = time.time()
epoch_time = AverageMeter()
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
need_hour, need_mins, need_secs = convert_secs2time(epoch_time.val * (args.epochs - epoch))
need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)
print_log(
' [{:s}] :: {:3d}/{:3d} ----- [{:s}] {:s}'.format(args.arch, epoch, args.epochs, time_string(), need_time),
log)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, log, m)
# evaluate on validation set
val_acc_1 = validate(val_loader, model, criterion, log)
if epoch % args.epoch_prune == 0 or epoch == args.epochs - 1:
m.model = model
m.if_zero()
m.init_mask(args.rate_norm, args.rate_dist)
m.do_mask()
m.do_similar_mask()
m.if_zero()
model = m.model
if args.use_cuda:
model = model.cuda()
val_acc_2 = validate(val_loader, model, criterion, log)
# remember best prec@1 and save checkpoint
is_best = val_acc_2 > best_prec1
best_prec1 = max(val_acc_2, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best, filename, bestname)
# measure elapsed time
epoch_time.update(time.time() - start_time)
start_time = time.time()
log.close()
def import_sparse(model):
checkpoint = torch.load(args.sparse)
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
print("sparse_model_loaded")
return model
def train(train_loader, model, criterion, optimizer, epoch, log, m):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
# Mask grad for iteration
m.do_grad_mask()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print_log('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5), log)
def validate(val_loader, model, criterion, log):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print_log('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5), log)
print_log(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Error@1 {error1:.3f}'.format(top1=top1, top5=top5,
error1=100 - top1.avg), log)
return top1.avg
def save_checkpoint(state, is_best, filename, bestname):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, bestname)
def print_log(print_string, log):
print("{:}".format(print_string))
log.write('{:}\n'.format(print_string))
log.flush()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // args.lr_adjust))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class Mask:
def __init__(self, model):
self.model_size = {}
self.model_length = {}
self.compress_rate = {}
self.distance_rate = {}
self.mat = {}
self.model = model
self.mask_index = []
self.filter_small_index = {}
self.filter_large_index = {}
self.similar_matrix = {}
def get_codebook(self, weight_torch, compress_rate, length):
weight_vec = weight_torch.view(length)
weight_np = weight_vec.cpu().numpy()
weight_abs = np.abs(weight_np)
weight_sort = np.sort(weight_abs)
threshold = weight_sort[int(length * (1 - compress_rate))]
weight_np[weight_np <= -threshold] = 1
weight_np[weight_np >= threshold] = 1
weight_np[weight_np != 1] = 0
print("codebook done")
return weight_np
def get_filter_codebook(self, weight_torch, compress_rate, length):
codebook = np.ones(length)
if len(weight_torch.size()) == 4:
filter_pruned_num = int(weight_torch.size()[0] * (1 - compress_rate))
weight_vec = weight_torch.view(weight_torch.size()[0], -1)
# norm1 = torch.norm(weight_vec, 1, 1)
# norm1_np = norm1.cpu().numpy()
norm2 = torch.norm(weight_vec, 2, 1)
norm2_np = norm2.cpu().numpy()
filter_index = norm2_np.argsort()[:filter_pruned_num]
# norm1_sort = np.sort(norm1_np)
# threshold = norm1_sort[int (weight_torch.size()[0] * (1-compress_rate) )]
kernel_length = weight_torch.size()[1] * weight_torch.size()[2] * weight_torch.size()[3]
for x in range(0, len(filter_index)):
codebook[filter_index[x] * kernel_length: (filter_index[x] + 1) * kernel_length] = 0
print("filter codebook done")
elif len(weight_torch.size()) == 2:
weight_torch = weight_torch.view(weight_torch.size()[0], weight_torch.size()[1], 1, 1)
codebook = self.get_filter_codebook(weight_torch, compress_rate, length)
print("filter codebook for fc done")
else:
pass
return codebook
@timing
def get_filter_similar_old(self, weight_torch, compress_rate, distance_rate, length):
codebook = np.ones(length)
if len(weight_torch.size()) == 4:
filter_pruned_num = int(weight_torch.size()[0] * (1 - compress_rate))
similar_pruned_num = int(weight_torch.size()[0] * distance_rate)
weight_vec = weight_torch.view(weight_torch.size()[0], -1)
# norm1 = torch.norm(weight_vec, 1, 1)
# norm1_np = norm1.cpu().numpy()
norm2 = torch.norm(weight_vec, 2, 1)
norm2_np = norm2.cpu().numpy()
filter_small_index = []
filter_large_index = []
filter_large_index = norm2_np.argsort()[filter_pruned_num:]
filter_small_index = norm2_np.argsort()[:filter_pruned_num]
print('weight_vec.size', weight_vec.size())
# distance using pytorch function
similar_matrix = torch.zeros((len(filter_large_index), len(filter_large_index)))
for x1, x2 in enumerate(filter_large_index):
for y1, y2 in enumerate(filter_large_index):
# cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
# similar_matrix[x1, y1] = cos(weight_vec[x2].view(1, -1), weight_vec[y2].view(1, -1))[0]
pdist = torch.nn.PairwiseDistance(p=2)
# print('weight_vec[x2].size', weight_vec[x2].size())
similar_matrix[x1, y1] = pdist(weight_vec[x2].view(1, -1), weight_vec[y2].view(1, -1))[0][0]
# print('weight_vec[x2].size after', weight_vec[x2].size())
# more similar with other filter indicates large in the sum of row
similar_sum = torch.sum(torch.abs(similar_matrix), 0).numpy()
# for distance similar: get the filter index with largest similarity == small distance
similar_large_index = similar_sum.argsort()[similar_pruned_num:]
similar_small_index = similar_sum.argsort()[: similar_pruned_num]
similar_index_for_filter = [filter_large_index[i] for i in similar_small_index]
print('filter_large_index', filter_large_index)
print('filter_small_index', filter_small_index)
print('similar_sum', similar_sum)
print('similar_large_index', similar_large_index)
print('similar_small_index', similar_small_index)
print('similar_index_for_filter', similar_index_for_filter)
kernel_length = weight_torch.size()[1] * weight_torch.size()[2] * weight_torch.size()[3]
for x in range(0, len(similar_index_for_filter)):
codebook[
similar_index_for_filter[x] * kernel_length: (similar_index_for_filter[x] + 1) * kernel_length] = 0
print("similar index done")
else:
pass
return codebook
# optimize for fast ccalculation
def get_filter_similar(self, weight_torch, compress_rate, distance_rate, length):
codebook = np.ones(length)
if len(weight_torch.size()) == 4:
filter_pruned_num = int(weight_torch.size()[0] * (1 - compress_rate))
similar_pruned_num = int(weight_torch.size()[0] * distance_rate)
weight_vec = weight_torch.view(weight_torch.size()[0], -1)
# norm1 = torch.norm(weight_vec, 1, 1)
# norm1_np = norm1.cpu().numpy()
norm2 = torch.norm(weight_vec, 2, 1)
norm2_np = norm2.cpu().numpy()
filter_small_index = []
filter_large_index = []
filter_large_index = norm2_np.argsort()[filter_pruned_num:]
filter_small_index = norm2_np.argsort()[:filter_pruned_num]
# # distance using pytorch function
# similar_matrix = torch.zeros((len(filter_large_index), len(filter_large_index)))
# for x1, x2 in enumerate(filter_large_index):
# for y1, y2 in enumerate(filter_large_index):
# # cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
# # similar_matrix[x1, y1] = cos(weight_vec[x2].view(1, -1), weight_vec[y2].view(1, -1))[0]
# pdist = torch.nn.PairwiseDistance(p=2)
# similar_matrix[x1, y1] = pdist(weight_vec[x2].view(1, -1), weight_vec[y2].view(1, -1))[0][0]
# # more similar with other filter indicates large in the sum of row
# similar_sum = torch.sum(torch.abs(similar_matrix), 0).numpy()
# distance using numpy function
indices = torch.LongTensor(filter_large_index).cuda()
weight_vec_after_norm = torch.index_select(weight_vec, 0, indices).cpu().numpy()
# for euclidean distance
similar_matrix = distance.cdist(weight_vec_after_norm, weight_vec_after_norm, 'euclidean')
# for cos similarity
# similar_matrix = 1 - distance.cdist(weight_vec_after_norm, weight_vec_after_norm, 'cosine')
similar_sum = np.sum(np.abs(similar_matrix), axis=0)
# for distance similar: get the filter index with largest similarity == small distance
similar_large_index = similar_sum.argsort()[similar_pruned_num:]
similar_small_index = similar_sum.argsort()[: similar_pruned_num]
similar_index_for_filter = [filter_large_index[i] for i in similar_small_index]
kernel_length = weight_torch.size()[1] * weight_torch.size()[2] * weight_torch.size()[3]
for x in range(0, len(similar_index_for_filter)):
codebook[
similar_index_for_filter[x] * kernel_length: (similar_index_for_filter[x] + 1) * kernel_length] = 0
print("similar index done")
else:
pass
return codebook
def convert2tensor(self, x):
x = torch.FloatTensor(x)
return x
def init_length(self):
for index, item in enumerate(self.model.parameters()):
self.model_size[index] = item.size()
for index1 in self.model_size:
for index2 in range(0, len(self.model_size[index1])):
if index2 == 0:
self.model_length[index1] = self.model_size[index1][0]
else:
self.model_length[index1] *= self.model_size[index1][index2]
def init_rate(self, rate_norm_per_layer, rate_dist_per_layer):
if 'vgg' in args.arch:
cfg_official = [64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512]
cfg_CP_5x = [24, 22, 41, 51, 108, 89, 111, 184, 276, 228, 512, 512, 512]
# cfg = [32, 64, 128, 128, 256, 256, 256, 256, 256, 256, 256, 256, 256]
cfg_Thinet_conv = [32, 32, 64, 64, 128, 128, 128, 256, 256, 256, 512, 512, 512]
if args.VGG_pruned_style == "CP_5x":
cfg_now = cfg_CP_5x
elif args.VGG_pruned_style == "Thinet_conv":
cfg_now = cfg_Thinet_conv
cfg_index = 0
previous_cfg = True
for index, item in enumerate(self.model.named_parameters()):
self.compress_rate[index] = 1
if len(item[1].size()) == 4:
if not previous_cfg:
self.compress_rate[index] = rate_norm_per_layer
self.distance_rate[index] = rate_dist_per_layer
self.mask_index.append(index)
print(item[0], "self.mask_index", self.mask_index)
else:
self.compress_rate[index] = 1
self.distance_rate[index] = 1 - cfg_now[cfg_index] / item[1].size()[0]
self.mask_index.append(index)
print(item[0], "self.mask_index", self.mask_index, cfg_index, cfg_now[cfg_index])
cfg_index += 1
elif "resnet" in args.arch:
for index, item in enumerate(self.model.parameters()):
self.compress_rate[index] = 1
self.distance_rate[index] = 1
for key in range(args.layer_begin, args.layer_end + 1, args.layer_inter):
self.compress_rate[key] = rate_norm_per_layer
self.distance_rate[key] = rate_dist_per_layer
# different setting for different architecture
if args.arch == 'resnet18':
# last index include last fc layer
last_index = 60
skip_list = [21, 36, 51]
elif args.arch == 'resnet34':
last_index = 108
skip_list = [27, 54, 93]
elif args.arch == 'resnet50':
last_index = 159
skip_list = [12, 42, 81, 138]
elif args.arch == 'resnet101':
last_index = 312
skip_list = [12, 42, 81, 291]
elif args.arch == 'resnet152':
last_index = 465
skip_list = [12, 42, 117, 444]
self.mask_index = [x for x in range(0, last_index, 3)]
# skip downsample layer
if args.skip_downsample == 1:
for x in skip_list:
self.compress_rate[x] = 1
self.mask_index.remove(x)
print(self.mask_index)
else:
pass
def init_mask(self, rate_norm_per_layer, rate_dist_per_layer):
self.init_rate(rate_norm_per_layer, rate_dist_per_layer)
for index, item in enumerate(self.model.parameters()):
if index in self.mask_index:
# mask for norm criterion
self.mat[index] = self.get_filter_codebook(item.data, self.compress_rate[index],
self.model_length[index])
self.mat[index] = self.convert2tensor(self.mat[index])
if args.use_cuda:
self.mat[index] = self.mat[index].cuda()
# mask for distance criterion
self.similar_matrix[index] = self.get_filter_similar(item.data, self.compress_rate[index],
self.distance_rate[index],
self.model_length[index])
self.similar_matrix[index] = self.convert2tensor(self.similar_matrix[index])
if args.use_cuda:
self.similar_matrix[index] = self.similar_matrix[index].cuda()
print("mask Ready")
def do_mask(self):
for index, item in enumerate(self.model.parameters()):
if index in self.mask_index:
a = item.data.view(self.model_length[index])
b = a * self.mat[index]
item.data = b.view(self.model_size[index])
print("mask Done")
def do_similar_mask(self):
for index, item in enumerate(self.model.parameters()):
if index in self.mask_index:
a = item.data.view(self.model_length[index])
b = a * self.similar_matrix[index]
item.data = b.view(self.model_size[index])
print("mask similar Done")
def do_grad_mask(self):
for index, item in enumerate(self.model.parameters()):
if index in self.mask_index:
a = item.grad.data.view(self.model_length[index])
# reverse the mask of model
# b = a * (1 - self.mat[index])
b = a * self.mat[index]
b = b * self.similar_matrix[index]
item.grad.data = b.view(self.model_size[index])
# print("grad zero Done")
def if_zero(self):
for index, item in enumerate(self.model.parameters()):
if index in self.mask_index:
# if index in [x for x in range(args.layer_begin, args.layer_end + 1, args.layer_inter)]:
a = item.data.view(self.model_length[index])
b = a.cpu().numpy()
print("layer: %d, number of nonzero weight is %d, zero is %d" % (
index, np.count_nonzero(b), len(b) - np.count_nonzero(b)))
if __name__ == '__main__':
main()
| 30,109 | 43.020468 | 119 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.