repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/utils/cwt_utils.py
|
# -*- coding: utf-8 -*-
import numpy as np
from scipy import integrate
from .common import WARN, assert_is_one_of, p2up
from .backend import torch, asnumpy
from ..configs import gdefaults
pi = np.pi
__all__ = [
'adm_ssq',
'adm_cwt',
'cwt_scalebounds',
'process_scales',
'infer_scaletype',
'make_scales',
'logscale_transition_idx',
'nv_from_scales',
'find_min_scale',
'find_max_scale',
'find_downsampling_scale',
'integrate_analytic',
'find_max_scale_alt',
'_process_fs_and_t',
]
def adm_ssq(wavelet):
"""Calculates the synchrosqueezing admissibility constant, the term
R_psi in Eq 15 of [1] (also see Eq 2.5 of [2]). Uses numeric intergration.
integral(conj(wavelet(w)) / w, w=0..inf)
# References:
1. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications. G. Thakur,
E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
2. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
"""
wavelet = Wavelet._init_if_not_isinstance(wavelet).fn
Css = integrate_analytic(lambda w: np.conj(asnumpy(wavelet(w))) / w)
Css = Css.real if abs(Css.imag) < 1e-15 else Css
return Css
def adm_cwt(wavelet):
"""Calculates the cwt admissibility constant as per Eq. (4.67) of [1].
Uses numeric integration.
integral(wavelet(w) * conj(wavelet(w)) / w, w=0..inf)
1. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
"""
wavelet = Wavelet._init_if_not_isinstance(wavelet).fn
Cpsi = integrate_analytic(lambda w: np.conj(asnumpy(wavelet(w))
) * asnumpy(wavelet(w)) / w)
Cpsi = Cpsi.real if abs(Cpsi.imag) < 1e-15 else Cpsi
return Cpsi
def cwt_scalebounds(wavelet, N, preset=None, min_cutoff=None, max_cutoff=None,
cutoff=None, bin_loc=None, bin_amp=None, use_padded_N=True,
viz=False):
"""Finds range of scales for which `wavelet` is "well-behaved", as
determined by `preset`. Assumes `wavelet` is uni-modal (one peak in freq
domain); may be inaccurate otherwise.
`min_scale`: found such that freq-domain wavelet takes on `cutoff` of its max
value on the greatest bin.
- Lesser `cutoff` -> lesser `min_scale`, always
`max_scale`: search determined by `preset`:
- 'maximal': found such that freq-domain takes `bin_amp` of its max value
on the `bin_loc`-th (non-dc) bin
- Greater `bin_loc` or lesser `bin_amp` -> lesser `max_scale`, always
- 'minimal': found more intricately independent of precise bin location,
but is likely to omit first several bins entirely; see
`help(utils.find_max_scale_alt)`.
- Greater `min_cutoff` -> lesser `max_scale`, generally
`viz==2` for more visuals, `viz==3` for even more.
# Arguments:
wavelet: `wavelets.Wavelet`
Wavelet sampled in Fourier frequency domain. See `help(cwt)`.
N: int
Length of wavelet to use.
min_cutoff, max_cutoff: float > 0 / None
Used to find max scale with `preset='minimal'`.
See `help(utils.find_max_scale_alt)`
cutoff: float / None
Used to find min scale. See `help(utils.find_min_scale)`
preset: str['maximal', 'minimal', 'naive'] / None
- 'maximal': yields a larger max and smaller min.
- 'minimal': strives to keep wavelet in "well-behaved" range of std_t
and std_w, but very high or very low frequencies' energies will be
under-represented. Is closer to MATLAB's default `cwtfreqbounds`.
- 'naive': returns (1, N), which is per original MATLAB Toolbox,
but a poor choice for most wavelet options.
- None: will use `min_cutoff, max_cutoff, cutoff` values, else
override `min_cutoff, max_cutoff` with those of `preset='minimal'`,
and of `cutoff` with that of `preset='maximal'`:
(min_cutoff, max_cutoff, cutoff) = (0.6, 0.8, -.5)
use_padded_N: bool (default True)
Whether to use `N=p2up(N)` in computations. Typically `N == len(x)`,
but CWT pads to next power of 2, which is the actual wavelet length
used, which typically behaves significantly differently at scale
extrema, thus recommended default True. Differs from passing
`N=p2up(N)[0]` and False only for first visual if `viz`, see code.
# Returns:
min_scale, max_scale: float, float
Minimum & maximum scales.
"""
def _process_args(preset, min_cutoff, max_cutoff, cutoff, bin_loc, bin_amp):
defaults = dict(min_cutoff=.6, max_cutoff=.8, cutoff=-.5)
if preset is not None:
if any((min_cutoff, max_cutoff, cutoff)):
WARN("`preset` will override `min_cutoff, max_cutoff, cutoff`")
elif preset == 'minimal' and any((bin_amp, bin_loc)):
WARN("`preset='minimal'` ignores `bin_amp` & `bin_loc`")
assert_is_one_of(preset, 'preset',
('maximal', 'minimal', 'naive'))
if preset in ('naive', 'maximal'):
min_cutoff, max_cutoff = None, None
if preset == 'maximal':
cutoff = -.5
else:
min_cutoff, max_cutoff, cutoff = defaults.values()
else:
if min_cutoff is None:
min_cutoff = defaults['min_cutoff']
elif min_cutoff <= 0:
raise ValueError("`min_cutoff` must be >0 (got %s)" % min_cutoff)
if max_cutoff is None:
max_cutoff = defaults['max_cutoff']
elif max_cutoff < min_cutoff:
raise ValueError("must have `max_cutoff > min_cutoff` "
"(got %s, %s)" % (max_cutoff, min_cutoff))
bin_loc = bin_loc or (2 if preset == 'maximal' else None)
bin_amp = bin_amp or (1 if preset == 'maximal' else None)
cutoff = cutoff if (cutoff is not None) else defaults['cutoff']
return min_cutoff, max_cutoff, cutoff, bin_loc, bin_amp
def _viz():
_viz_cwt_scalebounds(wavelet, N=M, Nt=M, min_scale=min_scale,
max_scale=max_scale, cutoff=cutoff)
if viz >= 2:
wavelet_waveforms(wavelet, M, min_scale)
wavelet_waveforms(wavelet, M, max_scale)
if viz == 3:
scales = make_scales(M, min_scale, max_scale)
sweep_harea(wavelet, M, scales)
min_cutoff, max_cutoff, cutoff, bin_loc, bin_amp = _process_args(
preset, min_cutoff, max_cutoff, cutoff, bin_loc, bin_amp)
if preset == 'naive': # still _process_args for the NOTE
return 1, N
M = p2up(N)[0] if use_padded_N else N
min_scale = find_min_scale(wavelet, cutoff=cutoff)
if preset in ('minimal', None):
max_scale = find_max_scale_alt(wavelet, M, min_cutoff=min_cutoff,
max_cutoff=max_cutoff)
elif preset == 'maximal':
max_scale = find_max_scale(wavelet, M, bin_loc=bin_loc, bin_amp=bin_amp)
if viz:
_viz()
return min_scale, max_scale
def _assert_positive_integer(g, name=''):
if not (g > 0 and float(g).is_integer()):
raise ValueError(f"'{name}' must be a positive integer (got {g})")
def process_scales(scales, N, wavelet=None, nv=None, get_params=False,
use_padded_N=True):
"""Makes scales if `scales` is a string, else validates the array,
and returns relevant parameters if requested.
- Ensures, if array, `scales` is 1D, or 2D with last dim == 1
- Ensures, if string, `scales` is one of ('log', 'linear')
- If `get_params`, also returns (`scaletype`, `nv`, `na`)
- `scaletype`: inferred from `scales` ('linear' or 'log') if array
- `nv`, `na`: computed newly only if not already passed
"""
def _process_args(scales, nv, wavelet):
preset = None
if isinstance(scales, str):
if ':' in scales:
scales, preset = scales.split(':')
elif scales == 'log-piecewise':
preset = 'maximal'
assert_is_one_of(scales, 'scales',
('log', 'log-piecewise', 'linear'))
if nv is None:
nv = 32
if wavelet is None:
raise ValueError("must set `wavelet` if `scales` isn't array")
scaletype = scales
elif isinstance(scales, (np.ndarray, torch.Tensor)):
scales = asnumpy(scales)
if scales.squeeze().ndim != 1:
raise ValueError("`scales`, if array, must be 1D "
"(got shape %s)" % str(scales.shape))
scaletype, _nv = infer_scaletype(scales)
if scaletype == 'log':
if nv is not None and _nv != nv:
raise Exception("`nv` used in `scales` differs from "
"`nv` passed (%s != %s)" % (_nv, nv))
nv = _nv
elif scaletype == 'log-piecewise':
nv = _nv # will be array
scales = scales.reshape(-1, 1) # ensure 2D for broadcast ops later
else:
raise TypeError("`scales` must be a string or Numpy array "
"(got %s)" % type(scales))
if nv is not None and not isinstance(nv, np.ndarray):
_assert_positive_integer(nv, 'nv')
nv = int(nv)
return scaletype, nv, preset
scaletype, nv, preset = _process_args(scales, nv, wavelet)
if isinstance(scales, (np.ndarray, torch.Tensor)):
scales = scales.reshape(-1, 1)
return (scales if not get_params else
(scales, scaletype, len(scales), nv))
#### Compute scales & params #############################################
min_scale, max_scale = cwt_scalebounds(wavelet, N=N, preset=preset,
use_padded_N=use_padded_N)
scales = make_scales(N, min_scale, max_scale, nv=nv, scaletype=scaletype,
wavelet=wavelet)
na = len(scales)
return (scales if not get_params else
(scales, scaletype, na, nv))
def infer_scaletype(scales):
"""Infer whether `scales` is linearly or exponentially distributed (if latter,
also infers `nv`). Used internally on `scales` and `ssq_freqs`.
Returns one of: 'linear', 'log', 'log-piecewise'
"""
scales = asnumpy(scales).reshape(-1, 1)
if not isinstance(scales, np.ndarray):
raise TypeError("`scales` must be a numpy array (got %s)" % type(scales))
elif scales.dtype not in (np.float32, np.float64):
raise TypeError("`scales.dtype` must be np.float32 or np.float64 "
"(got %s)" % scales.dtype)
th_log = 4e-15 if scales.dtype == np.float64 else 8e-7
th_lin = th_log * 1e3 # less accurate for some reason
if np.mean(np.abs(np.diff(np.log(scales), 2, axis=0))) < th_log:
scaletype = 'log'
# ceil to avoid faulty float-int roundoffs
nv = int(np.round(1 / np.diff(np.log2(scales), axis=0)[0]))
elif np.mean(np.abs(np.diff(scales, 2, axis=0))) < th_lin:
scaletype = 'linear'
nv = None
elif logscale_transition_idx(scales) is None:
raise ValueError("could not infer `scaletype` from `scales`; "
"`scales` array must be linear or exponential. "
"(got diff(scales)=%s..." % np.diff(scales, axis=0)[:4])
else:
scaletype = 'log-piecewise'
nv = nv_from_scales(scales)
return scaletype, nv
def make_scales(N, min_scale=None, max_scale=None, nv=32, scaletype='log',
wavelet=None, downsample=None):
"""Recommended to first work out `min_scale` & `max_scale` with
`cwt_scalebounds`.
# Arguments:
N: int
`len(x)` or `len(x_padded)`.
min_scale, max_scale: float, float
Set scale range. Obtained e.g. from `utils.cwt_scalebounds`.
nv: int
Number of voices (wavelets) per octave.
scaletype: str['log', 'log-piecewise', 'linear']
Scaling kind to make.
`'log-piecewise'` uses `utils.find_downsampling_scale`.
wavelet: wavelets.Wavelet
Used only for `scaletype='log-piecewise'`.
downsample: int
Downsampling factor. Used only for `scaletype='log-piecewise'`.
# Returns:
scales: np.ndarray
"""
if scaletype == 'log-piecewise' and wavelet is None:
raise ValueError("must pass `wavelet` for `scaletype == 'log-piecewise'`")
if min_scale is None and max_scale is None and wavelet is not None:
min_scale, max_scale = cwt_scalebounds(wavelet, N, use_padded_N=True)
else:
min_scale = min_scale or 1
max_scale = max_scale or N
downsample = int(gdefaults('utils.cwt_utils.make_scales',
downsample=downsample))
# number of 2^-distributed scales spanning min to max
na = int(np.ceil(nv * np.log2(max_scale / min_scale)))
# floor to keep freq-domain peak at or to right of Nyquist
# min must be more precise, if need integer rounding do on max
mn_pow = int(np.floor(nv * np.log2(min_scale)))
mx_pow = mn_pow + na
if scaletype == 'log':
# TODO discretize per `logspace` instead
scales = 2 ** (np.arange(mn_pow, mx_pow) / nv)
elif scaletype == 'log-piecewise':
scales = 2 ** (np.arange(mn_pow, mx_pow) / nv)
idx = find_downsampling_scale(wavelet, scales)
if idx is not None:
# `+downsample - 1` starts `scales2` as continuing from `scales1`
# at `scales2`'s sampling rate; rest of ops are based on this design,
# such as `/nv` in ssq, which divides `scales2[0]` by `nv`, but if
# `scales2[0]` is one sample away from `scales1[-1]`, seems incorrect
scales1 = scales[:idx]
scales2 = scales[idx + downsample - 1::downsample]
scales = np.hstack([scales1, scales2])
elif scaletype == 'linear':
# TODO poor scheme (but there may not be any good one)
min_scale, max_scale = 2**(mn_pow/nv), 2**(mx_pow/nv)
na = int(np.ceil(max_scale / min_scale))
scales = np.linspace(min_scale, max_scale, na)
else:
raise ValueError("`scaletype` must be 'log' or 'linear'; "
"got: %s" % scaletype)
scales = scales.reshape(-1, 1) # ensure 2D for broadcast ops later
return scales
def logscale_transition_idx(scales):
"""Returns `idx` that splits `scales` as `[scales[:idx], scales[idx:]]`.
"""
scales = asnumpy(scales)
scales_diff2 = np.abs(np.diff(np.log(scales), 2, axis=0))
idx = np.argmax(scales_diff2) + 2
diff2_max = scales_diff2.max()
# every other value must be zero, assert it is so
scales_diff2[idx - 2] = 0
th = 1e-14 if scales.dtype == np.float64 else 1e-6
if not np.any(diff2_max > 100*np.abs(scales_diff2).mean()):
# everything's zero, i.e. no transition detected
return None
elif not np.all(np.abs(scales_diff2) < th):
# other nonzero diffs found, more than one transition point
return None
else:
return idx
def nv_from_scales(scales):
"""Infers `nv` from `scales` assuming `2**` scales; returns array
of length `len(scales)` if `scaletype = 'log-piecewise'`.
"""
scales = asnumpy(scales)
logdiffs = 1 / np.diff(np.log2(scales), axis=0)
nv = np.vstack([logdiffs[:1], logdiffs])
idx = logscale_transition_idx(scales)
if idx is not None:
nv_transition_idx = np.argmax(np.abs(np.diff(nv, axis=0))) + 1
assert nv_transition_idx == idx, "%s != %s" % (nv_transition_idx, idx)
return nv
def find_min_scale(wavelet, cutoff=1):
"""Design the wavelet in frequency domain. `scale` is found to yield
`scale * xi(scale=1)` such that its last (largest) positive value evaluates
`wavelet` to `cutoff * max(psih)`. If cutoff > 0, it lands to right of peak,
else to left (i.e. peak excluded).
"""
wavelet = Wavelet._init_if_not_isinstance(wavelet)
w_peak, peak = find_maximum(wavelet.fn)
if cutoff > 0:
# search to right of peak
step_start, step_limit = w_peak, 10*w_peak
else:
# search to left of peak
step_start, step_limit = 0, w_peak
w_cutoff, _ = find_first_occurrence(wavelet.fn, value=abs(cutoff) * peak,
step_start=step_start,
step_limit=step_limit)
min_scale = w_cutoff / pi
return min_scale
def find_max_scale(wavelet, N, bin_loc=1, bin_amp=1):
"""Finds `scale` such that freq-domain wavelet's amplitude is `bin_amp`
of maximum at `bin_loc` bin. Set `bin_loc=1` to ensure no lower frequencies
are lost, but likewise mind redundancy (see `make_scales`).
"""
wavelet = Wavelet._init_if_not_isinstance(wavelet)
# get scale at which full freq-domain wavelet is likely to fit
wc_ct = center_frequency(wavelet, kind='peak-ct', N=N)
scalec_ct = (4/pi) * wc_ct
# get freq_domain wavelet, positive half (asm. analytic)
psih = asnumpy(wavelet(scale=scalec_ct, N=N)[:N//2 + 1])
# get (radian) frequencies at which it was sampled
xi = asnumpy(wavelet.xifn(scalec_ct, N))
# get index of psih's peak
midx = np.argmax(psih)
# get index where `psih` attains `bin1_amp` of its max value, to left of peak
w_bin = xi[np.where(psih[:midx] < psih.max()*bin_amp)[0][-1]]
# find scale such that wavelet amplitude is `bin_amp` of max at `bin_loc` bin
max_scale = scalec_ct * (w_bin / xi[bin_loc])
return max_scale
def find_downsampling_scale(wavelet, scales, span=5, tol=3, method='sum',
nonzero_th=.02, nonzero_tol=4., N=None, viz=False,
viz_last=False):
"""Find `scale` past which freq-domain wavelets are "excessively redundant",
redundancy determined by `span, tol, method, nonzero_th, nonzero_tol`.
# Arguments
wavelet: np.ndarray / wavelets.Wavelet
CWT wavelet.
scales: np.ndarray
CWT scales.
span: int
Number of wavelets to cross-correlate at each comparison.
tol: int
Tolerance value, works with `method`.
method: str['any', 'all', 'sum']
Condition relating `span` and `tol` to determine whether wavelets
are packed "too densely" at a given cross-correlation, relative
to "joint peak".
'any': at least one of wavelet peaks lie `tol` or more bins away
'all': all wavelet peaks lie `tol` or more bins away
'sum': sum(distances between wavelet peaks and joint peak) > `tol`
nonzero_th: float
Wavelet points as a fraction of respective maxima to consider
nonzero (i.e. `np.where(psih > psih.max()*nonzero_th)`).
nonzero_tol: float
Average number of nonzero points in a `span` group of wavelets above
which testing is exempted. (e.g. if 5 wavelets have 25 nonzero points,
average is 5, so if `nonzero_tol=4`, the `scale` is skipped/passed).
N: int / None
Length of wavelet to use. Defaults to 2048, which generalizes well
along other defaults, since those params (`span`, `tol`, etc) would
need to be scaled alongside `N`.
viz: bool (default False)
Visualize every test for debug purposes.
viz_last: bool (default True)
Visualize the failing scale (recommended if trying by hand);
ignored if `viz=True`.
"""
def check_group(psihs_peaks, joint_peak, method, tol):
too_dense = False
distances = np.abs(psihs_peaks[1] - joint_peak)
if method == 'any':
dist_max = distances.max()
if dist_max < tol:
too_dense = True
elif method == 'all':
dist_satisfied = (distances > tol)
if not np.all(dist_satisfied):
too_dense = True
elif method == 'sum':
dist_sum = distances.sum()
if dist_sum < tol:
too_dense = True
return too_dense
def _viz(psihs, psihs_peaks, joint_peak, psihs_nonzeros, i):
max_nonzero_idx = np.where(psihs_nonzeros)[1].max()
plot(psihs.T[:max_nonzero_idx + 3], color='tab:blue',
vlines=(joint_peak, {'color': 'tab:red'}))
scat(psihs_peaks[1], psihs[psihs_peaks].T, color='tab:red', show=1)
distances = np.abs(psihs_peaks[1] - joint_peak)
print("(idx, peak distances from joint peak, joint peak) = "
"({}, {}, {})".format(i, distances, joint_peak), flush=True)
assert_is_one_of(method, 'method', ('any', 'all', 'sum'))
if not isinstance(wavelet, np.ndarray):
wavelet = Wavelet._init_if_not_isinstance(wavelet)
N = N or 2048
Psih = (wavelet if isinstance(wavelet, (np.ndarray, torch.Tensor)) else
wavelet(scale=scales, N=N))
Psih = asnumpy(Psih)
if len(Psih) != len(scales):
raise ValueError("len(Psih) != len(scales) "
"(%s != %s)" % (len(Psih), len(scales)))
# analytic, drop right half (all zeros)
Psih = Psih[:, :Psih.shape[1]//2]
n_scales = len(Psih)
n_groups = n_scales - span - 1
psihs_peaks = None
for i in range(n_groups):
psihs = Psih[i:i + span]
psihs_nonzeros = (psihs > nonzero_th*psihs.max(axis=1)[:, None])
avg_nonzeros = psihs_nonzeros.sum() / span
if avg_nonzeros > nonzero_tol:
continue
psihs_peaks = np.where(psihs == psihs.max(axis=1)[:, None])
joint_peak = np.argmax(np.prod(psihs, 0)) # mutually cross-correlate
too_dense = check_group(psihs_peaks, joint_peak, method, tol)
if too_dense:
break
if viz:
_viz(psihs, psihs_peaks, joint_peak, psihs_nonzeros, i)
if (viz or viz_last) and psihs_peaks is not None:
print(("Failing scale: (idx, scale) = ({}, {:.2f})\n"
"out of max: (idx, scale) = ({}, {:.2f})"
).format(i, float(scales[i]), len(scales) - 1, float(scales[-1])))
_viz(psihs, psihs_peaks, joint_peak, psihs_nonzeros, i)
return i if (i < n_groups - 1) else None
def integrate_analytic(int_fn, nowarn=False):
"""Assumes function that's zero for negative inputs (e.g. analytic wavelet),
decays toward right, and is unimodal: int_fn(t<0)=0, int_fn(t->inf)->0.
Integrates using trapezoidal rule, from 0 to inf (equivalently).
Integrates near zero separately in log space (useful for e.g. 1/x).
"""
def _est_arr(mxlim, N):
t = np.linspace(mxlim, .1, N, endpoint=False)[::-1]
arr = int_fn(t)
max_idx = np.argmax(arr)
min_neglect_idx = _min_neglect_idx(np.abs(arr[max_idx:]),
th=1e-15) + max_idx
return arr, t, min_neglect_idx
def _find_convergent_array():
mxlims = [1, 20, 80, 160]
for m, mxlim in zip([1, 1, 4, 8], mxlims):
arr, t, min_neglect_idx = _est_arr(mxlim, N=10000*m)
# ensure sufficient decay between peak and right endpoint, and
# that `arr` isn't a flatline (contains wavelet peak)
if ((len(t) - min_neglect_idx > 1000 * m) and
np.sum(np.abs(arr)) > 1e-5):
break
else:
if int_nz < 1e-5:
raise Exception("Could not find converging or non-negligibly"
"-valued bounds of integration for `int_fn`")
elif not nowarn:
WARN("Integrated only from 1e-15 to 0.1 in logspace")
return arr[:min_neglect_idx], t[:min_neglect_idx]
def _integrate_near_zero():
# sample `intfn` more finely as it might be extremely narrow near zero.
# this still doesn't work well as float64 zeros the numerator before /w,
# but the true integral will be negligibly small most of the time anyway
# (.001 to .1 may not be negligible, however; better captured by logspace)
t = np.logspace(-15, -1, 1000)
arr = int_fn(t)
return integrate.trapz(arr, t)
int_nz = _integrate_near_zero()
arr, t = _find_convergent_array()
return integrate.trapz(arr, t) + int_nz
def find_max_scale_alt(wavelet, N, min_cutoff=.1, max_cutoff=.8):
"""
Design the wavelet in frequency domain. `scale` is found to yield
`scale * xi(scale=1)` such that two of its consecutive values land
symmetrically about the peak of `psih` (i.e. none *at* peak), while
still yielding `wavelet(w)` to fall between `min_cutoff`* and `max_cutoff`*
`max(psih)`. `scale` is selected such that the symmetry is attained
using smallest possible bins (closest to dc). Steps:
1. Find `w` (input value to `wavelet`) for which `wavelet` is maximized
(i.e. peak of `psih`).
2. Find two `w` such that `wavelet` attains `min_cutoff` and `max_cutoff`
times its maximum value, using `w` in previous step as upper bound.
3. Find `div_size` such that `xi` lands at both points of symmetry;
`div_size` == increment between successive values of
`xi = scale * xi(scale=1)`.
- `xi` begins at zero; along the cutoff bounds, and us selecting
the smallest number of divisions/increments to reach points of
symmetry, we guarantee a unique `scale`.
This yields a max `scale` that'll generally lie in 'nicely-behaved' region
of std_t; value can be used to fine-tune further.
See `visuals.sweep_std_t`.
"""
if max_cutoff <= 0 or min_cutoff <= 0:
raise ValueError("`max_cutoff` and `min_cutoff` must be positive "
"(got %s, %s)" % (max_cutoff, min_cutoff))
elif max_cutoff <= min_cutoff:
raise ValueError("must have `max_cutoff > min_cutoff` "
"(got %s, %s)" % (max_cutoff, min_cutoff))
wavelet = Wavelet._init_if_not_isinstance(wavelet)
w_peak, peak = find_maximum(wavelet.fn)
# we solve the inverse problem; instead of looking for spacing of xi
# that'd land symmetrically about psih's peak, we pick such points
# above a set ratio of peak's value and ensure they divide the line
# from left symmetry point to zero an integer number of times
# define all points of wavelet from cutoff to peak, left half
w_cutoff, _ = find_first_occurrence(wavelet.fn, value=min_cutoff * peak,
step_start=0, step_limit=w_peak)
w_ltp = np.arange(w_cutoff, w_peak, step=1/N) # left-to-peak
# consider every point on wavelet(w_ltp) (except peak) as candidate cutoff
# point, and pick earliest one that yields integer number of increments
# from left point of symmetry to origin
div_size = (w_peak - w_ltp[:-1]) * 2 # doubled so peak is skipped
n_divs = w_ltp[:-1] / div_size
# diff of modulus; first drop in n_divs is like [.98, .99, 0, .01], so at 0
# we've hit an integer, and n_divs grows ~linearly so behavior guaranteed
# -.8 arbitrary to be ~1 but <1
try:
idx = np.where(np.diff(n_divs % 1) < -.8)[0][0]
except:
raise Exception("Failed to find suffciently-integer xi divisions; try "
"widening (min_cutoff, max_cutoff)")
# the div to base the scale on (angular bin spacing of scale*xi)
div_scale = div_size[idx + 1]
# div size of scale=1 (spacing between angular bins at scale=1)
w_1div = pi / (N / 2)
max_scale = div_scale / w_1div
return max_scale
def _process_fs_and_t(fs, t, N):
"""Ensures `t` is uniformly-spaced and of same length as `x` (==N)
and returns `fs` and `dt` based on it, or from defaults if `t` is None.
"""
if fs is not None and t is not None:
WARN("`t` will override `fs` (both were passed)")
if t is not None:
if len(t) != N:
# not explicitly used anywhere but ensures wrong `t` wasn't supplied
raise Exception("`t` must be of same length as `x` "
"(%s != %s)" % (len(t), N))
elif not np.mean(np.abs(np.diff(t, 2, axis=0))) < 1e-7: # float32 thr.
raise Exception("Time vector `t` must be uniformly sampled.")
fs = 1 / (t[1] - t[0])
else:
if fs is None:
fs = 1
elif fs <= 0:
raise ValueError("`fs` must be > 0")
dt = 1 / fs
return dt, fs, t
#############################################################################
from ..algos import _min_neglect_idx, find_maximum, find_first_occurrence
from ..wavelets import Wavelet, center_frequency
from ..visuals import plot, scat, _viz_cwt_scalebounds, wavelet_waveforms
from ..visuals import sweep_harea
| 29,511
| 39.650138
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/utils/common.py
|
# -*- coding: utf-8 -*-
import numpy as np
import logging
from textwrap import wrap
from .fft_utils import fft, ifft
logging.basicConfig(format='')
WARN = lambda msg: logging.warning("WARNING: %s" % msg)
NOTE = lambda msg: logging.warning("NOTE: %s" % msg) # else it's mostly ignored
pi = np.pi
EPS32 = np.finfo(np.float32).eps # machine epsilon
EPS64 = np.finfo(np.float64).eps
__all__ = [
"WARN",
"NOTE",
"pi",
"EPS32",
"EPS64",
"p2up",
"padsignal",
"trigdiff",
"mad",
"est_riskshrink_thresh",
"find_closest_parallel_is_faster",
"assert_is_one_of",
"_textwrap",
]
def p2up(n):
"""Calculates next power of 2, and left/right padding to center
the original `n` locations.
# Arguments:
n: int
Length of original (unpadded) signal.
# Returns:
n_up: int
Next power of 2.
n1: int
Left pad length.
n2: int
Right pad length.
"""
up = int(2**(1 + np.round(np.log2(n))))
n2 = int((up - n) // 2)
n1 = int(up - n - n2)
return up, n1, n2
def padsignal(x, padtype='reflect', padlength=None, get_params=False):
"""Pads signal and returns trim indices to recover original.
# Arguments:
x: np.ndarray / torch.Tensor
Input vector, 1D or 2D. 2D has time in dim1, e.g. `(n_inputs, time)`.
padtype: str
Pad scheme to apply on input. One of:
('reflect', 'symmetric', 'replicate', 'wrap', 'zero').
'zero' is most naive, while 'reflect' (default) partly mitigates
boundary effects. See [1] & [2].
Torch doesn't support all padding schemes, but `cwt` will still
pad it via NumPy.
padlength: int / None
Number of samples to pad input to (i.e. len(x_padded) == padlength).
Even: left = right, Odd: left = right + 1.
Defaults to next highest power of 2 w.r.t. `len(x)`.
# Returns:
xp: np.ndarray
Padded signal.
n_up: int
Next power of 2, or `padlength` if provided.
n1: int
Left pad length.
n2: int
Right pad length.
# References:
1. Signal extension modes. PyWavelets contributors
https://pywavelets.readthedocs.io/en/latest/ref/
signal-extension-modes.html
2. Wavelet Bases and Lifting Wavelets. H. Xiong.
http://min.sjtu.edu.cn/files/wavelet/
6-lifting%20wavelet%20and%20filterbank.pdf
"""
def _process_args(x, padtype):
is_numpy = bool(isinstance(x, np.ndarray))
supported = (('zero', 'reflect', 'symmetric', 'replicate', 'wrap')
if is_numpy else
('zero', 'reflect'))
assert_is_one_of(padtype, 'padtype', supported)
if not hasattr(x, 'ndim'):
raise TypeError("`x` must be a numpy array or torch Tensor "
"(got %s)" % type(x))
elif x.ndim not in (1, 2):
raise ValueError("`x` must be 1D or 2D (got x.ndim == %s)" % x.ndim)
return is_numpy
is_numpy = _process_args(x, padtype)
N = x.shape[-1]
if padlength is None:
# pad up to the nearest power of 2
n_up, n1, n2 = p2up(N)
else:
n_up = padlength
if abs(padlength - N) % 2 == 0:
n1 = n2 = (n_up - N) // 2
else:
n2 = (n_up - N) // 2
n1 = n2 + 1
n_up, n1, n2 = int(n_up), int(n1), int(n2)
# set functional spec
if x.ndim == 1:
pad_width = (n1, n2)
elif x.ndim == 2:
pad_width = ([(0, 0), (n1, n2)] if is_numpy else
(n1, n2))
# comments use (n=4, n1=4, n2=3) as example, but this combination can't occur
if is_numpy:
if padtype == 'zero':
# [1,2,3,4] -> [0,0,0,0, 1,2,3,4, 0,0,0]
xp = np.pad(x, pad_width)
elif padtype == 'reflect':
# [1,2,3,4] -> [3,4,3,2, 1,2,3,4, 3,2,1]
xp = np.pad(x, pad_width, mode='reflect')
elif padtype == 'replicate':
# [1,2,3,4] -> [1,1,1,1, 1,2,3,4, 4,4,4]
xp = np.pad(x, pad_width, mode='edge')
elif padtype == 'wrap':
# [1,2,3,4] -> [1,2,3,4, 1,2,3,4, 1,2,3]
xp = np.pad(x, pad_width, mode='wrap')
elif padtype == 'symmetric':
# [1,2,3,4] -> [4,3,2,1, 1,2,3,4, 4,3,2]
if x.ndim == 1:
xp = np.hstack([x[::-1][-n1:], x, x[::-1][:n2]])
elif x.ndim == 2:
xp = np.hstack([x[:, ::-1][:, -n1:], x, x[:, ::-1][:, :n2]])
else:
import torch
mode = 'constant' if padtype == 'zero' else 'reflect'
if x.ndim == 1:
xp = torch.nn.functional.pad(x[None], pad_width, mode)[0]
else:
xp = torch.nn.functional.pad(x, pad_width, mode)
return (xp, n_up, n1, n2) if get_params else xp
def trigdiff(A, fs=1., padtype=None, rpadded=None, N=None, n1=None, window=None,
transform='cwt'):
"""Trigonometric / frequency-domain differentiation; see `difftype` in
`help(ssq_cwt)`. Used internally by `ssq_cwt` with `order > 0`.
Un-transforms `A`, then transforms differentiated.
# Arguments:
A: np.ndarray
2D array to differentiate (or 3D, batched).
fs: float
Sampling frequency, used to scale derivative to physical units.
padtype: str / None
Whether to pad `A` (along dim1) before differentiating.
rpadded: bool (default None)
Whether `A` is already padded. Defaults to True if `padtype` is None.
Must pass `N` if True.
N: int
Length of unpadded signal (i.e. `A.shape[1]`).
n1: int
Will trim differentiated array as `A_diff[:, n1:n1+N]` (un-padding).
transform: str['cwt', 'stft']
Whether `A` stems from CWT or STFT, which changes how differentiation
is done. `'stft'` currently not supported.
"""
from ..wavelets import _xifn
from . import backend as S
def _process_args(A, rpadded, padtype, N, transform, window):
if transform == 'stft':
raise NotImplementedError("`transform='stft'` is currently not "
"supported.")
assert isinstance(A, np.ndarray) or S.is_tensor(A), type(A)
assert A.ndim in (2, 3)
if rpadded and N is None:
raise ValueError("must pass `N` if `rpadded`")
if transform == 'stft' and window is None:
raise ValueError("`transform='stft'` requires `window`")
rpadded = rpadded or False
padtype = padtype or ('reflect' if not rpadded else None)
return rpadded, padtype
rpadded, padtype = _process_args(A, rpadded, padtype, N, transform, window)
if padtype is not None:
A, _, n1, *_ = padsignal(A, padtype, get_params=True)
if transform == 'cwt':
xi = S.asarray(_xifn(1, A.shape[-1]), A.dtype)
A_freqdom = fft(A, axis=-1, astensor=True)
A_diff = ifft(A_freqdom * 1j * xi * fs, axis=-1, astensor=True)
else:
# this requires us to first fully invert STFT(x), then `buffer(x)`,
# then compute `diff_window`, which isn't hard to implement;
# last of these is done
# wf = fft(S.asarray(window, A.dtype))
# xi = S.asarray(_xifn(1, len(window))[None], A.dtype)
# if len(window) % 2 == 0:
# xi[len(window) // 2] = 0
# reshape = (-1, 1) if A.ndim == 2 else (1, -1, 1)
# diff_window = ifft(wf * 1j * xi).real.reshape(*reshape)
pass
if rpadded or padtype is not None:
if N is None:
N = A.shape[-1]
if n1 is None:
_, n1, _ = p2up(N)
idx = ((slice(None), slice(n1, n1 + N)) if A.ndim == 2 else
(slice(None), slice(None), slice(n1, n1 + N)))
A_diff = A_diff[idx]
if S.is_tensor(A_diff):
A_diff = A_diff.contiguous()
return A_diff
def est_riskshrink_thresh(Wx, nv):
"""Estimate the RiskShrink hard thresholding level, based on [1].
This has a denoising effect, but risks losing much of the signal; it's larger
the more high-frequency content there is, even if not noise.
# Arguments:
Wx: np.ndarray
CWT of a signal (see `cwt`).
nv: int
Number of voices used in CWT (see `cwt`).
# Returns:
gamma: float
The RiskShrink hard thresholding estimate.
# References:
1. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
2. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
est_riskshrink_thresh.m
"""
N = Wx.shape[1]
Wx_fine = np.abs(Wx[:nv])
gamma = 1.4826 * np.sqrt(2 * np.log(N)) * mad(Wx_fine)
return gamma
def find_closest_parallel_is_faster(shape, dtype='float32', trials=7, verbose=1):
"""Returns True if `find_closest(, parallel=True)` is faster, as averaged
over `trials` trials on dummy data.
"""
from timeit import timeit
from ..algos import find_closest
a = np.abs(np.random.randn(*shape).astype(dtype))
v = np.random.uniform(0, len(a), len(a)).astype(dtype)
t0 = timeit(lambda: find_closest(a, v, parallel=False), number=trials)
t1 = timeit(lambda: find_closest(a, v, parallel=True), number=trials)
if verbose:
print("Parallel avg.: {} sec\nNon-parallel avg.: {} sec".format(
t1 / trials, t0 / trials))
return t1 > t0
def mad(data, axis=None):
"""Mean absolute deviation"""
return np.mean(np.abs(data - np.mean(data, axis)), axis)
def assert_is_one_of(x, name, supported, e=ValueError):
if x not in supported:
raise e("`{}` must be one of: {} (got {})".format(
name, ', '.join(supported), x))
def _textwrap(txt, wrap_len=50):
"""Preserves line breaks and includes `'\n'.join()` step."""
return '\n'.join(['\n'.join(
wrap(line, wrap_len, break_long_words=False, replace_whitespace=False))
for line in txt.splitlines() if line.strip() != ''])
| 10,463
| 32.43131
| 81
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/utils/gpu_utils.py
|
# -*- coding: utf-8 -*-
import numpy as np
from collections import namedtuple
from string import Template
from .backend import torch, cp
Stream = namedtuple('Stream', ['ptr'])
def _run_on_gpu(kernel, grid, block, *args, **kwargs):
kernel_name = kernel.split('void ')[1].split('(')[0]
fn = load_kernel(kernel_name, kernel, **kwargs)
fn(grid=grid, block=block, args=args,
stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))
@cp._util.memoize(for_each_device=True)
def load_kernel(kernel_name, code, **kwargs):
code = Template(code).substitute(**kwargs)
kernel_code = cp.cuda.compile_with_cache(code)
return kernel_code.get_function(kernel_name)
def _get_kernel_params(x, dim=1, threadsperblock=None):
M, N = x.shape[:2]
if dim == 1:
threadsperblock = threadsperblock or (1024,)
blockspergrid = (int(np.ceil(M * N / threadsperblock[0])),)
elif dim == 2:
threadsperblock = threadsperblock or (32, 32)
blockspergrid_x = int(np.ceil(M / threadsperblock[0]))
blockspergrid_y = int(np.ceil(N / threadsperblock[1]))
blockspergrid = (blockspergrid_x, blockspergrid_y)
dtype = ('double' if x.dtype in (torch.float64, torch.complex128) else
'float')
kernel_kw = dict(dtype=dtype, M=M, N=N)
str_dtype = 'float32' if dtype == 'float' else 'float64'
return blockspergrid, threadsperblock, kernel_kw, str_dtype
| 1,432
| 33.95122
| 74
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/utils/backend.py
|
# -*- coding: utf-8 -*-
import numpy as np
# torch & cupy imported at bottom
def allclose(a, b, device='cuda'):
"""`numpy.allclose` or `torch.allclose`, latter if input(s) are Tensor."""
if is_tensor(a, b, mode='any'):
a, b = asarray(a, device=device), asarray(b, device=device)
return torch.allclose(a, b)
return np.allclose(a, b)
def astype(x, dtype, device='cuda'):
if is_tensor(x):
return x.to(dtype=_torch_dtype(dtype))
return x.astype(dtype)
def array(x, dtype=None, device='cuda'):
if USE_GPU():
return torch.tensor(x, dtype=_torch_dtype(dtype), device=device)
return np.array(x)
def asarray(x, dtype=None, device='cuda'):
if USE_GPU():
return torch.as_tensor(x, dtype=_torch_dtype(dtype), device=device)
return np.asarray(x, dtype=dtype)
def zeros(shape, dtype=None, device='cuda'):
if USE_GPU():
return torch.zeros(shape, dtype=_torch_dtype(dtype), device=device)
return np.zeros(shape, dtype=dtype)
def ones(shape, dtype=None, device='cuda'):
if USE_GPU():
return torch.ones(shape, dtype=_torch_dtype(dtype), device=device)
return np.ones(shape, dtype=dtype)
def is_tensor(*args, mode='all'):
cond = all if mode == 'all' else any
return cond(isinstance(x, torch.Tensor) for x in args)
def is_dtype(x, str_dtype):
return (str_dtype in str(x.dtype) if isinstance(str_dtype, str) else
any(sd in str(x.dtype) for sd in str_dtype))
def atleast_1d(x, dtype=None, device='cuda'):
return Q.atleast_1d(asarray(x, dtype=dtype, device=device))
def asnumpy(x):
if is_tensor(x):
return x.cpu().numpy()
return x
def arange(a, b=None, dtype=None, device='cuda'):
if b is None:
a, b = 0, a
if USE_GPU():
if isinstance(dtype, str):
dtype = getattr(torch, dtype)
return torch.arange(a, b, dtype=dtype, device=device)
return np.arange(a, b, dtype=dtype)
def vstack(x):
if is_tensor(x) or (isinstance(x, list) and is_tensor(x[0])):
if isinstance(x, list):
# stack arrays as elements in extended dim0
return torch.vstack([_x[None] for _x in x])
return torch.vstack(x)
return np.vstack([x])
#### misc + dummies ##########################################################
def warn_if_tensor_and_par(x, parallel):
if parallel and is_tensor(x):
from .common import WARN
WARN("`parallel` ignored with tensor input.")
def _torch_dtype(dtype):
if isinstance(dtype, str):
return getattr(torch, dtype)
elif isinstance(dtype, np.dtype):
return getattr(torch, str(dtype).split('.')[-1])
return dtype # assume torch.dtype
class _TensorDummy():
pass
class TorchDummy():
"""Dummy class with dummy attributes."""
def __init__(self):
self.Tensor = _TensorDummy
self.dtype = _TensorDummy
class _Util():
"""For wrapper: `@cp._util.memoize`."""
def memoize(self, *args, **kwargs):
def wrap(fn):
return fn
return wrap
class CupyDummy():
"""Dummy class with dummy attributes."""
def __init__(self):
self._util = _Util()
class _Q():
"""Class for accessing `numpy` or `torch` attributes according to `USE_GPU()`.
"""
def __getattr__(self, name):
if USE_GPU():
return getattr(torch, name)
return getattr(np, name)
##############################################################################
Q = _Q()
try:
import torch
import cupy as cp
except:
torch = TorchDummy()
cp = CupyDummy()
from ..configs import USE_GPU
| 3,656
| 24.573427
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/utils/__init__.py
|
# -*- coding: utf-8 -*-
from . import common
from . import cwt_utils
from . import stft_utils
from . import fft_utils
from . import backend
from .common import *
from .cwt_utils import *
from .stft_utils import *
from .fft_utils import *
from .backend import *
| 262
| 19.230769
| 25
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/utils/fft_utils.py
|
# -*- coding: utf-8 -*-
import numpy as np
import multiprocessing
from scipy.fft import fftshift as sfftshift, ifftshift as sifftshift
from scipy.fft import fft as sfft, rfft as srfft, ifft as sifft, irfft as sirfft
from pathlib import Path
from . import backend as S
from ..configs import IS_PARALLEL
try:
from torch.fft import (fft as tfft, rfft as trfft,
ifft as tifft, irfft as tirfft,
fftshift as tfftshift, ifftshift as tifftshift)
except ImportError:
pass
try:
import pyfftw
pyfftw.interfaces.cache.enable()
pyfftw.interfaces.cache.set_keepalive_time(600)
except ImportError:
pyfftw = None
UTILS_DIR = Path(__file__).parent
__all__ = [
'fft',
'rfft',
'ifft',
'irfft',
'fftshift',
'ifftshift',
'FFT',
'FFT_GLOBAL',
]
#############################################################################
class FFT():
"""Global class for ssqueezepy FFT methods.
Will use GPU via PyTorch if environment flag `'SSQ_GPU'` is set to `'1'`.
Will use `scipy.fft` or `pyfftw` depending on `patience` argument (and
whether `pyfftw` is installed).
Both will use `threads` CPUs to accelerate computing.
In a nutshell, if you plan on re-running FFT on input of same shape and dtype,
prefer `patience=1`, which introduces a lengthy first-time overhead but may
compute significantly faster afterwards.
# Arguments (`fft`, `rfft`, `ifft`, `irfft`):
x: np.ndarray
1D or 2D.
axis: int
FFT axis. One of `0, 1, -1`.
patience: int / tuple[int, int]
If int:
0: will use `scipy.fft`
1: `pyfftw` with flag `'FFTW_PATIENT'`
2: `pyfftw` with flag `'FFTW_EXHAUSTIVE'`
Else, if tuple, second element specifies `planning_timelimit`
passed to `pyfftw.FFTW` (so tuple requires `patience[0] != 0`).
Set `planning_timelimit = None` to allow planning to finish,
but beware; `patience = 1` can take hours for large inputs, and `2`
even longer.
astensor: bool (default False)
If computing on GPU, whether to return as `torch.Tensor` (if False,
will move to CPU and convert to `numpy.ndarray`).
n: int / None
Only for `irfft`; length of original input. If None, will default to
`2*(x.shape[axis] - 1)`.
__________________________________________________________________________
# Arguments (`__init__`):
planning_timelimit: int
Default.
wisdom_dir: str
Where to save wisdom to or load from. Empty string means
`ssqueezepy/utils/`.
threads: int
Number of CPU threads to use. -1 = maximum.
patience: int
Default `patience`.
cache_fft_objects: bool (default False)
If True, `pyfftw` objects generated throughout session are stored in
`FFT._input_history`, and retrieved if all of below match:
`(x.shape, x.dtype, real, patience, n)`
where `patience` includes `planning_timelimit` as a tuple.
Default False since loading from wisdom is very fast anyway.
verbose: bool (default True)
Controls whether a message is printed upon `patience >= 1`.
__________________________________________________________________________
**Wisdom**
`pyfftw` uses "wisdom", basically storing and reusing generated FFT plans
if input attributes match:
(`x.shape`, `x.dtype`, `axis`, `flags`, `planning_timelimit`)
`flags` and `planning_timelimit` are set via `patience`.
With each `pyfftw` use, `save_wisdom()` is called, writing to `wisdom32` and
`wisdom64` bytes files in `ssqueezepy/utils`. Each time ssqueezepy runs in a
new session, `load_wisdom()` is called to load these values, so wisdom is
only expansive.
"""
def __init__(self, planning_timelimit=120, wisdom_dir=UTILS_DIR, threads=None,
patience=0, cache_fft_objects=False, verbose=1):
self.planning_timelimit = planning_timelimit
self.wisdom_dir = wisdom_dir
self._user_threads = threads
self._patience = patience # default patience
self._process_patience(patience) # error if !=0 and pyfftw not installed
self.cache_fft_objects = cache_fft_objects
self.verbose = verbose
if pyfftw is not None:
pyfftw.config.NUM_THREADS = self.threads
self._wisdom32_path = str(Path(self.wisdom_dir, 'wisdom32'))
self._wisdom64_path = str(Path(self.wisdom_dir, 'wisdom64'))
self._wisdom32, self._wisdom64 = b'', b''
self._input_history = {}
self.load_wisdom()
@property
def threads(self):
"""Set dynamically if `threads` wasn't passed in __init__."""
if self._user_threads is None:
return (multiprocessing.cpu_count() if IS_PARALLEL() else 1)
return self._user_threads
@property
def patience(self):
"""Setter will also set `planning_timelimit` if setting to tuple."""
return self._patience
@patience.setter
def patience(self, value):
self._validate_patience(value)
if isinstance(value, tuple):
self._patience, self.planning_timelimit = value
else:
self._patience = value
#### Main methods #########################################################
def fft(self, x, axis=-1, patience=None, astensor=False):
"""See `help(ssqueezepy.utils.FFT)`."""
out = self._maybe_gpu('fft', x, dim=axis, astensor=astensor)
if out is not None:
return out
patience = self._process_patience(patience)
if patience == 0:
return sfft(x, axis=axis, workers=self.threads)
fft_object = self._get_save_fill(x, axis, patience, real=False)
return fft_object()
def rfft(self, x, axis=-1, patience=None, astensor=False):
"""See `help(ssqueezepy.utils.FFT)`."""
out = self._maybe_gpu('rfft', x, dim=axis, astensor=astensor)
if out is not None:
return out
patience = self._process_patience(patience)
if patience == 0:
return srfft(x, axis=axis, workers=self.threads)
fft_object = self._get_save_fill(x, axis, patience, real=True)
return fft_object()
def ifft(self, x, axis=-1, patience=None, astensor=False):
"""See `help(ssqueezepy.utils.FFT)`."""
out = self._maybe_gpu('ifft', x, dim=axis, astensor=astensor)
if out is not None:
return out
patience = self._process_patience(patience)
if patience == 0:
return sifft(x, axis=axis, workers=self.threads)
fft_object = self._get_save_fill(x, axis, patience, real=False,
inverse=True)
return fft_object()
def irfft(self, x, axis=-1, patience=None, astensor=False, n=None):
"""See `help(ssqueezepy.utils.FFT)`."""
out = self._maybe_gpu('irfft', x, dim=axis, astensor=astensor, n=n)
if out is not None:
return out
patience = self._process_patience(patience)
if patience == 0:
return sirfft(x, axis=axis, workers=self.threads, n=n)
fft_object = self._get_save_fill(x, axis, patience, real=True,
inverse=True, n=n)
return fft_object()
def fftshift(self, x, axes=-1, astensor=False):
out = self._maybe_gpu('fftshift', x, dim=axes, astensor=astensor)
if out is not None:
return out
return sfftshift(x, axes=axes)
def ifftshift(self, x, axes=-1, astensor=False):
out = self._maybe_gpu('ifftshift', x, dim=axes, astensor=astensor)
if out is not None:
return out
return sifftshift(x, axes=axes)
def _maybe_gpu(self, name, x, astensor=False, **kw):
if S.is_tensor(x):
fn = {'fft': tfft, 'ifft': tifft,
'rfft': trfft, 'irfft': tirfft,
'fftshift': tfftshift, 'ifftshift': tifftshift}[name]
out = fn(S.asarray(x), **kw)
return out if astensor else out.cpu().numpy()
return None
#### FFT makers ###########################################################
def _get_save_fill(self, x, axis, patience, real, inverse=False, n=None):
fft_object = self.get_fft_object(x, axis, patience, real, inverse, n)
self.save_wisdom()
fft_object.input_array[:] = x
return fft_object
def get_fft_object(self, x, axis, patience=1, real=False, inverse=False,
n=None):
combo = (x.shape, x.dtype, axis, real, n)
if self.cache_fft_objects and combo in self._input_history:
fft_object = self._input_history[combo]
else:
fft_object = self._get_fft_object(x, axis, patience, real, inverse, n)
if self.cache_fft_objects:
self._input_history[combo] = fft_object
return fft_object
def _get_fft_object(self, x, axis, patience, real, inverse, n):
(shapes, dtypes, flags, planning_timelimit, direction
) = self._process_input(x, axis, patience, real, inverse, n)
shape_in, shape_out = shapes
dtype_in, dtype_out = dtypes
a = pyfftw.empty_aligned(shape_in, dtype=dtype_in)
b = pyfftw.empty_aligned(shape_out, dtype=dtype_out)
fft_object = pyfftw.FFTW(a, b, axes=(axis,), flags=flags,
planning_timelimit=planning_timelimit,
direction=direction, threads=self.threads)
return fft_object
def _process_input(self, x, axis, patience, real, inverse, n):
self._validate_input(x, axis, real, patience, inverse)
# patience, planning time, forward / inverse
if isinstance(patience, tuple):
patience, planning_timelimit = patience
else:
planning_timelimit = self.planning_timelimit
flags = ['FFTW_PATIENT'] if patience == 1 else ['FFTW_EXHAUSTIVE']
direction = 'FFTW_BACKWARD' if inverse else 'FFTW_FORWARD'
# shapes
shape_in = x.shape
shape_out = self._get_output_shape(x, axis, real, inverse, n)
# dtypes
double = x.dtype in (np.float64, np.cfloat)
cdtype = 'complex128' if double else 'complex64'
rdtype = 'float64' if double else 'float32'
dtype_in = rdtype if (real and not inverse) else cdtype
dtype_out = rdtype if (real and inverse) else cdtype
# notify user of procedure
if self.verbose:
if planning_timelimit is None:
adjective = "very long" if patience == 2 else "long"
print("Planning optimal FFT algorithm; this may "
"take %s..." % adjective)
else:
print("Planning optimal FFT algorithm; this will take up to "
"%s secs" % planning_timelimit)
return ((shape_in, shape_out), (dtype_in, dtype_out), flags,
planning_timelimit, direction)
def _get_output_shape(self, x, axis, real=False, inverse=False, n=None):
if not inverse:
n_fft = x.shape[axis]
fft_out_len = (n_fft//2 + 1) if real else n_fft
else:
if real:
n_fft = n if (n is not None) else 2*(x.shape[axis] - 1)
else:
n_fft = x.shape[axis]
fft_out_len = n_fft
if x.ndim != 1:
shape = list(x.shape)
shape[axis] = fft_out_len
shape = tuple(shape)
else:
shape = (fft_out_len,)
return shape
#### Misc #################################################################
def load_wisdom(self):
for name in ('wisdom32', 'wisdom64'):
path = getattr(self, f"_{name}_path")
if Path(path).is_file():
with open(path, 'rb') as f:
setattr(self, f"_{name}", f.read())
pyfftw.import_wisdom((self._wisdom64, self._wisdom32, b''))
def save_wisdom(self):
"""Will overwrite."""
self._wisdom64, self._wisdom32, _ = pyfftw.export_wisdom()
for name in ('wisdom32', 'wisdom64'):
path = getattr(self, f"_{name}_path")
with open(path, 'wb') as f:
f.write(getattr(self, f"_{name}"))
def _validate_input(self, x, axis, real, patience, inverse):
"""Assert is single/double precision and is 1D/2D."""
supported = ('float32', 'float64', 'complex64', 'complex128')
dtype = str(x.dtype)
if dtype not in supported:
raise TypeError("unsupported `x.dtype`: %s " % dtype
+ "(must be one of: %s)" % ', '.join(supported))
if (real and not inverse) and dtype.startswith('complex'):
raise TypeError("`x` cannot be complex for `rfft`")
if axis not in (0, 1, -1):
raise ValueError("unsupported `axis`: %s " % axis
+ "; must be 0, 1, or -1")
self._validate_patience(patience)
def _validate_patience(self, patience):
if not isinstance(patience, (int, tuple)):
raise TypeError("`patience` must be int or tuple "
"(got %s)" % type(patience))
elif isinstance(patience, int):
from .common import assert_is_one_of
assert_is_one_of(patience, 'patience', (0, 1, 2))
def _process_patience(self, patience):
patience = patience if (patience is not None) else self.patience
if pyfftw is None and patience != 0:
raise ValueError("`patience != 0` requires `pyfftw` installed.")
return patience
FFT_GLOBAL = FFT()
fft = FFT_GLOBAL.fft
rfft = FFT_GLOBAL.rfft
ifft = FFT_GLOBAL.ifft
irfft = FFT_GLOBAL.irfft
fftshift = FFT_GLOBAL.fftshift
ifftshift = FFT_GLOBAL.ifftshift
| 14,188
| 37.142473
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/utils/stft_utils.py
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy.fft import fft, fftshift
from numba import jit, prange
from scipy import integrate
from .gpu_utils import _run_on_gpu, _get_kernel_params
from ..configs import IS_PARALLEL
from .backend import torch
from . import backend as S
__all__ = [
"buffer",
"unbuffer",
"window_norm",
"window_resolution",
"window_area",
]
def buffer(x, seg_len, n_overlap, modulated=False, parallel=None):
"""Build 2D array where each column is a successive slice of `x` of length
`seg_len` and overlapping by `n_overlap` (or equivalently incrementing
starting index of each slice by `hop_len = seg_len - n_overlap`).
Mimics MATLAB's `buffer`, with less functionality.
Supports batched input with samples along dim 0, i.e. `(n_inputs, input_len)`.
See `help(stft)` on `modulated`.
Ex:
x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
xb = buffer(x, seg_len=5, n_overlap=3)
xb == [[0, 1, 2, 3, 4],
[2, 3, 4, 5, 6],
[4, 5, 6, 7, 8]].T
"""
S.warn_if_tensor_and_par(x, parallel)
assert x.ndim in (1, 2)
hop_len = seg_len - n_overlap
n_segs = (x.shape[-1] - seg_len) // hop_len + 1
s20 = int(np.ceil(seg_len / 2))
s21 = s20 - 1 if (seg_len % 2 == 1) else s20
args = (seg_len, n_segs, hop_len, s20, s21, modulated)
if S.is_tensor(x):
if x.ndim == 1:
out = _buffer_gpu(x, seg_len, n_segs, hop_len, s20, s21, modulated)
elif x.ndim == 2:
out = x.new_zeros((len(x), seg_len, n_segs))
for _x, _out in zip(x, out):
_buffer_gpu(_x, *args, out=_out)
else:
parallel = parallel or IS_PARALLEL()
fn = _buffer_par if parallel else _buffer
if x.ndim == 1:
out = np.zeros((seg_len, n_segs), dtype=x.dtype, order='F')
fn(x, out, *args)
elif x.ndim == 2:
out = np.zeros((len(x), seg_len, n_segs), dtype=x.dtype, order='F')
for _x, _out in zip(x, out):
fn(_x, _out, *args)
return out
@jit(nopython=True, cache=True)
def _buffer(x, out, seg_len, n_segs, hop_len, s20, s21, modulated=False):
for i in range(n_segs):
if not modulated:
start = hop_len * i
end = start + seg_len
out[:, i] = x[start:end]
else:
start0 = hop_len * i
end0 = start0 + s21
start1 = end0
end1 = start1 + s20
out[:s20, i] = x[start1:end1]
out[s20:, i] = x[start0:end0]
@jit(nopython=True, cache=True, parallel=True)
def _buffer_par(x, out, seg_len, n_segs, hop_len, s20, s21, modulated=False):
for i in prange(n_segs):
if not modulated:
start = hop_len * i
end = start + seg_len
out[:, i] = x[start:end]
else:
start0 = hop_len * i
end0 = start0 + s21
start1 = end0
end1 = start1 + s20
out[:s20, i] = x[start1:end1]
out[s20:, i] = x[start0:end0]
def _buffer_gpu(x, seg_len, n_segs, hop_len, s20, s21, modulated=False, out=None):
kernel = '''
extern "C" __global__
void buffer(${dtype} x[${N}],
${dtype} out[${L}][${W}],
bool modulated,
int hop_len, int seg_len,
int s20, int s21)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= ${W})
return;
int start = hop_len * i;
for (int j=start; j < start + seg_len; ++j){
if (!modulated){
out[j - start][i] = x[j];
} else {
if (j < start + s20){
out[j - start][i] = x[j + s21];
} else{
out[j - start][i] = x[j - s20];
}
}
}
}
'''
if not isinstance(x, torch.Tensor):
x = torch.as_tensor(x, device='cuda')
if out is None:
out = x.new_zeros((seg_len, n_segs))
blockspergrid, threadsperblock, kernel_kw, _ = _get_kernel_params(out, dim=1)
kernel_kw.update(dict(N=len(x), L=len(out), W=out.shape[1]))
kernel_args = [x.data_ptr(), out.data_ptr(), bool(modulated), hop_len,
seg_len, s20, s21]
_run_on_gpu(kernel, blockspergrid, threadsperblock, *kernel_args, **kernel_kw)
return out
def unbuffer(xbuf, window, hop_len, n_fft, N, win_exp=1):
"""Undoes `buffer` (minus unpadding), per padding logic used in `stft`:
(N, n_fft) : logic
even, even: left = right + 1
(N, n_fft, len(xp), pl, pr) -> (128, 120, 247, 60, 59)
odd, odd: left = right
(N, n_fft, len(xp), pl, pr) -> (129, 121, 249, 60, 60)
even, odd: left = right
(N, n_fft, len(xp), pl, pr) -> (128, 121, 248, 60, 60)
odd, even: left = right + 1
(N, n_fft, len(xp), pl, pr) -> (129, 120, 248, 60, 59)
"""
if N is None:
# assume greatest possible len(x) (unpadded)
N = xbuf.shape[1] * hop_len + len(window) - 1
if len(window) != n_fft:
raise ValueError("Must have `len(window) == n_fft` "
"(got %s != %s)" % (len(window), n_fft))
if win_exp == 0:
window = 1
elif win_exp != 1:
window = window ** win_exp
x = np.zeros(N + n_fft - 1, dtype=xbuf.dtype)
_overlap_add(x, xbuf, window, hop_len, n_fft)
return x
def window_norm(window, hop_len, n_fft, N, win_exp=1):
"""Computes window modulation array for use in `stft` and `istft`."""
wn = np.zeros(N + n_fft - 1)
_window_norm(wn, window, hop_len, n_fft, win_exp)
return wn
@jit(nopython=True, cache=True)
def _overlap_add(x, xbuf, window, hop_len, n_fft):
for i in range(xbuf.shape[1]):
n = i * hop_len
x[n:n + n_fft] += xbuf[:, i] * window
@jit(nopython=True, cache=True)
def _window_norm(wn, window, hop_len, n_fft, win_exp=1):
max_hops = (len(wn) - n_fft) // hop_len + 1
wpow = window ** (win_exp + 1)
for i in range(max_hops):
n = i * hop_len
wn[n:n + n_fft] += wpow
def window_resolution(window):
"""Minimal function to compute a window's time & frequency widths, assuming
Fourier spectrum centered about dc (else use `ssqueezepy.wavelets` methods).
Returns std_w, std_t, harea. `window` must be np.ndarray and >=0.
"""
from ..wavelets import _xifn
assert window.min() >= 0, "`window` must be >= 0 (got min=%s)" % window.min()
N = len(window)
t = np.arange(-N/2, N/2, step=1)
ws = fftshift(_xifn(1, N))
psihs = fftshift(fft(window))
apsi2 = np.abs(window)**2
apsih2s = np.abs(psihs)**2
var_w = integrate.trapz(ws**2 * apsih2s, ws) / integrate.trapz(apsih2s, ws)
var_t = integrate.trapz(t**2 * apsi2, t) / integrate.trapz(apsi2, t)
std_w, std_t = np.sqrt(var_w), np.sqrt(var_t)
harea = std_w * std_t
return std_w, std_t, harea
def window_area(window, time=True, frequency=False):
"""Minimal function to compute a window's time or frequency 'area' as area
under curve of `abs(window)**2`. `window` must be np.ndarray.
"""
from ..wavelets import _xifn
if not time and not frequency:
raise ValueError("must compute something")
if time:
t = np.arange(-len(window)/2, len(window)/2, step=1)
at = integrate.trapz(np.abs(window)**2, t)
if frequency:
ws = fftshift(_xifn(1, len(window)))
apsih2s = np.abs(fftshift(fft(window)))**2
aw = integrate.trapz(apsih2s, ws)
if time and frequency:
return at, aw
elif time:
return at
return aw
| 7,677
| 30.991667
| 82
|
py
|
rnlps
|
rnlps-master/rnlps/__init__.py
| 0
| 0
| 0
|
py
|
|
rnlps
|
rnlps-master/rnlps/policies/base.py
|
"""
Defines the interaction between policy and bandit.
Also includes the base policies for the different types of problems -
non-contextual, contextual and linear bandits.
"""
import numpy as np
import tensorflow as tf
import contextlib
from termcolor import cprint
@contextlib.contextmanager
def _printoptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
try:
yield
finally:
np.set_printoptions(**original)
class Trial:
def __init__(self, n_arms):
self.n_arms = n_arms
self.returns = np.zeros(n_arms, dtype=np.float)
self.pulls = np.zeros(n_arms, dtype=np.int)
self.arms = []
self.rewards = []
self.contexts = []
self.regrets = []
self.length = 0
def update_contexts(self, context):
self.contexts.append(context)
def append(self, arm, reward, context, regret):
if (arm >= self.n_arms) or (arm < 0):
raise Exception('Invalid arm.')
self.arms.append(arm)
self.rewards.append(reward)
self.update_contexts(context)
self.regrets.append(regret)
self.length += 1
self.returns[arm] += reward
self.pulls[arm] += 1
def average_rewards(self):
out = np.full(self.n_arms, float('inf'))
where = (self.pulls > 0)
return np.divide(self.returns, self.pulls, out=out, where=where)
def cumulative_rewards(self):
return np.cumsum(self.rewards)
def cumulative_regret(self):
return np.cumsum(self.regrets)
class Policy:
def __init__(self, bandit):
self.bandit = bandit
self.contextual_bandit = 0
if hasattr(bandit, 'reset'):
self.contextual_bandit = 1
def select(self, trial):
raise NotImplementedError()
def interact(self, trial_length):
trial = Trial(self.bandit.n_arms)
if self.contextual_bandit:
reset_context = self.bandit.reset()
trial.update_contexts(reset_context)
for i in range(trial_length):
arm = self.select(trial)
reward, context, regret = self.bandit.pull(arm)
trial.append(arm, reward, context, regret)
return trial
class BaseOracle(Policy):
"""Selects the best available action at every time step. """
def __init__(self, bandit):
Policy.__init__(self, bandit)
def select(self, trial):
return int(self.bandit.best_arms()[0])
def __repr__(self):
return 'Oracle()'
class BaseFixed(Policy):
"""Selects a fixed chosen action at every time step. """
def __init__(self, bandit, arm):
Policy.__init__(self, bandit)
self.arm = arm
def select(self, trial):
return self.arm
def __repr__(self):
return 'Fixed(arm={0})'.format(self.arm)
class BaseRandom(Policy):
"""Selects a random action at every time step. """
def __init__(self, bandit, seed):
Policy.__init__(self, bandit)
self.random_state = np.random.RandomState(seed)
def select(self, trial):
return self.random_state.randint(trial.n_arms)
def __repr__(self):
return 'Random()'
class BaseThompsonRecurrentNetwork(Policy):
""" Recurrent neural-linear: Thompson sampling based policy by using
Bayesian linear regression on the representation(context) generated by
the penultimate layer of the recurrent architecture. """
def __init__(self, bandit, n_units, learning_rate, regularise_lambda, epochs,
train_every, std_targets, std_weights, verbose, seed):
Policy.__init__(self, bandit)
self.n_units = n_units
if len(self.n_units) < 2:
raise Exception('Invalid number of layers.')
self.learning_rate = learning_rate
self.regularise_lambda = regularise_lambda
self.epochs = epochs
self.train_every = train_every
self.verbose = verbose
self.random_state = np.random.RandomState(seed=seed)
self.var_targets = std_targets**2.
self.var_weights = std_weights**2.
self.one_over_lambda = self.var_weights/self.var_targets
self._setup(seed)
def _setup(self, seed):
""" Creates the recurrent representation architecture. """
tf.reset_default_graph()
tf.set_random_seed(seed)
self._rewards = tf.placeholder(tf.float32, shape=None, name='rewards')
inputs, isize = self._setup_input()
W = tf.Variable(tf.truncated_normal(shape=(isize, self.n_units[0])),
name='W0')
b = tf.Variable(tf.zeros(shape=(self.n_units[0])), name='b0')
rnn_inputs = tf.matmul(inputs, W) + b
rnn_inputs = tf.expand_dims(rnn_inputs, axis=0)
cell = tf.contrib.rnn.LSTMCell(num_units=self.n_units[1])
self._istate = cell.zero_state(1, dtype=tf.float32)
rnn_outputs, \
self._final_state = tf.nn.dynamic_rnn(cell, rnn_inputs,
initial_state=self._istate)
self._h_output = tf.reshape(rnn_outputs, (-1, self.n_units[1]))
for i in range(2, len(self.n_units)):
W = tf.Variable(tf.truncated_normal(shape=(self.n_units[i - 1],
self.n_units[i])),
name='W{0}'.format(i))
b = tf.Variable(tf.zeros(shape=(self.n_units[i])),
name='b{0}'.format(i))
self._h_output = tf.tanh(tf.matmul(self._h_output, W) + b)
W = tf.Variable(tf.truncated_normal(shape=(self.n_units[-1], 1)),
name='Wout')
# Note: No bias in the output layer
self._pred = tf.matmul(self._h_output, W)
self._pred = tf.reshape(self._pred, (-1,))
self._reg_loss = sum(tf.nn.l2_loss(tf_var)
for tf_var in tf.trainable_variables()
if not ("b" in tf_var.name))
self._loss = tf.reduce_mean((self._pred - self._rewards[1:])**2) + self.regularise_lambda * self._reg_loss
# Predictions compared with reward from 1 step ahead
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self._train = optimizer.minimize(self._loss)
config = tf.ConfigProto(device_count={'GPU': 0})
self.session = tf.Session(config=config)
self.session.run(tf.global_variables_initializer())
def select(self, trial):
""" Selects which arm to play in the current round. """
if trial.length < trial.n_arms:
# the case when insufficient number of arms have been played.
return np.argmin(trial.pulls)
else:
return self._select_from_policy(trial)
def _select_from_policy(self, trial):
""" Selects which arm to play in the current round - according to the
policy. """
feed = self._get_feed_for_rnn(trial)
if trial.length % self.train_every == 0:
# Train the RNN weights when this condition is true
for _ in range(self.epochs):
self.session.run(self._train, feed)
loss, state, observations = self.session.run([self._loss,
self._final_state,
self._h_output], feed)
targets = np.array(trial.rewards)
# Update the posterior for Bayesian linear regression
mean, cov = self._update_bayesian_lr_posterior(observations, targets)
# Sample from posterior for Bayesian linear regression
w = self.random_state.multivariate_normal(mean, cov)
# Exploration with posterior sampling
# Pull the arm with the highest prediction under the sampled model
pred = self._get_pred_from_sampled_model(trial, w, state)
if self.verbose:
msg = 'Pull: {0:4d}. Loss: {1:.4f}. Prediction: {2}. Policy: {3}.'
p = np.zeros(trial.n_arms)
p[np.argmax(pred)] = 1.0
with _printoptions(precision=4, suppress=True):
if np.argmax(p) in self.bandit.best_arms():
cprint(msg.format(trial.length + 1, loss, pred, p), 'green')
else:
print(msg.format(trial.length + 1, loss, pred, p))
return np.argmax(pred)
def _update_bayesian_lr_posterior(self, observations, targets):
""" Updates the posterior for Bayesian linear regression. """
M = self.one_over_lambda * observations.T.dot(observations)
M = np.linalg.inv(M + np.eye(M.shape[0]))
mean = (self.one_over_lambda * M).dot(observations.T.dot(targets))
cov = self.var_weights*M
return mean, cov
def _setup_input(self):
raise NotImplementedError()
def _get_feed_for_rnn(self, trial):
raise NotImplementedError()
def _get_pred_from_sampled_model(self, trial, w, state):
raise NotImplementedError()
def __repr__(self):
r = 'ThompsonRecurrentNetwork(n_units={0}, learning_rate={1}, '
r += 'regL2={2}, epochs={3}, train_every={4}, std_targets={5}, std_weights={6})'
return r.format(self.n_units, self.learning_rate,
self.regularise_lambda, self.epochs,
self.train_every, np.sqrt(self.var_targets),
np.sqrt(self.var_weights))
class BaseThompsonSinFeedforwardNetwork(Policy):
""" Neural-linear: Posterior sampling with Bayesian linear regression on
the representation generated by a FFNN fed with handcrafted context and
sinusoidal input units."""
def __init__(self, bandit, order, periods, periods_dim, n_units,
learning_rate, regularise_lambda, epochs, train_every,
std_targets, std_weights, verbose, seed):
Policy.__init__(self, bandit)
self.order = order
self.periods = np.array(periods) # Can be used to provide hard-coded periods
self.periods_dim = periods_dim # Number of sinudoidal units
self.n_units = n_units
self.learning_rate = learning_rate
self.regularise_lambda = regularise_lambda
self.epochs = epochs
self.train_every = train_every
self.var_targets = std_targets**2.
self.var_weights = std_weights**2.
self.one_over_lambda = self.var_weights/self.var_targets
self.verbose = verbose
self.random_state = np.random.RandomState(seed=seed)
self._setup(seed)
def _setup(self, seed):
""" Creates the NN architecture used in neural-linear. """
tf.reset_default_graph()
tf.set_random_seed(seed)
isize_pre, isize_post = self._setup_input_size()
self._observations = tf.placeholder(tf.float32, shape=[None, isize_pre],
name='observations')
self._targets = tf.placeholder(tf.float32, shape=None, name='targets')
# Re-create _observations to include the periodic transformations
self._observations_orig = self._observations[:, :-1]
self._raw_t = self._observations[:, -1]
W = tf.Variable(tf.truncated_normal(shape=(1, self.periods_dim)),
name='W_periodic')
b = tf.Variable(tf.zeros(shape=(self.periods_dim)),
name='b_periodic')
# Sinusoidal activation
self._raw_t = tf.reshape(self._raw_t, [-1, 1])
t_op = tf.sin(tf.matmul(self._raw_t, W) + b)
# Observations now include the periodic functions of the time step.
self._observations_new = tf.concat([self._observations_orig, t_op], axis = -1)
n_units = [isize_post] + self.n_units
self._h_output = self._observations_new
for i in range(1, len(n_units)):
W = tf.Variable(tf.truncated_normal(shape=(n_units[i - 1],
n_units[i])),
name='W{0}'.format(i))
b = tf.Variable(tf.zeros(shape=(n_units[i])),
name='b{0}'.format(i))
if i == 1:
# No activation in the first layer
self._h_output = tf.matmul(self._h_output, W) + b
else:
self._h_output = tf.tanh(tf.matmul(self._h_output, W) + b)
# Note: No bias in the output layer
W = tf.Variable(tf.truncated_normal(shape=(n_units[-1], 1)),
name='Wout')
self._pred = tf.matmul(self._h_output, W)
self._pred = tf.reshape(self._pred, (-1,))
# L2 regularization on weights
self._reg_loss = sum(tf.nn.l2_loss(tf_var)
for tf_var in tf.trainable_variables()
if not ("b" in tf_var.name))
self._loss = tf.reduce_mean((self._pred - self._targets)**2) + \
self.regularise_lambda * self._reg_loss
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self._train = optimizer.minimize(self._loss)
config = tf.ConfigProto(device_count={'GPU': 0})
self.session = tf.Session(config=config)
self.session.run(tf.global_variables_initializer())
def select(self, trial):
""" Selects which arm to play in the current round. """
if trial.length < max(self.order + 1, trial.n_arms):
# the case when insufficient number of arms have been played.
return np.argmin(trial.pulls)
else:
return self._select_from_policy(trial)
def _select_from_policy(self, trial):
""" Selects which arm to play in the current round - according to the
policy. """
observations, targets = self._get_observation_target_pair(trial)
feed = {self._observations: observations, self._targets: targets}
if trial.length % self.train_every == 0:
# Train the NN weights when this condition is true
for _ in range(self.epochs):
self.session.run(self._train, feed)
loss, observations = self.session.run([self._loss, self._h_output],
feed)
# Observations now contain the output activation of the penultimate layer of the NN,
# on which we perform Bayesian Linear Regression
# Update the posterior for Bayesian linear regression
mean, cov = self._update_bayesian_lr_posterior(observations, targets)
# Sample from posterior for Bayesian linear regression
w = self.random_state.multivariate_normal(mean, cov)
# Exploration with posterior sampling
# Pull the arm with the highest prediction under the sampled model
pred = self._get_pred_from_sampled_model(trial, w)
if self.verbose:
msg = 'Pull: {0:4d}. Loss: {1:.4f}. Prediction: {2}. Policy: {3}.'
p = np.zeros(trial.n_arms)
p[np.argmax(pred)] = 1.0
with _printoptions(precision=4, suppress=True):
if np.argmax(p) in self.bandit.best_arms():
cprint(msg.format(trial.length + 1, loss, pred, p), 'green')
else:
print(msg.format(trial.length + 1, loss, pred, p))
return np.argmax(pred)
def _get_observation_target_pair(self, trial):
""" Creates the dataset for training the NN representation model. """
observations = []
targets = []
for t in range(self.order, trial.length):
observation, target = self._example(trial.arms, trial.rewards, trial.contexts[:-1], t)
# remove the last context for which you don't have a pull.
observations.append(observation)
targets.append(target)
return observations, targets
def _get_pred_from_sampled_model(self, trial, w):
""" Obtains the predicted reward for every action under the sampled
model(w). """
pred = np.zeros(trial.n_arms)
for arm in range(trial.n_arms):
observation, _ = self._example(trial.arms + [arm],
trial.rewards + [0.],
trial.contexts,
trial.length)
feed = {self._observations: [observation]}
arm_features = self.session.run(self._h_output, feed)[0]
pred[arm] = arm_features.dot(w)
return pred
def _update_bayesian_lr_posterior(self, observations, targets):
""" Updates the posterior for Bayesian linear regression weights. """
M = self.one_over_lambda * observations.T.dot(observations)
M = np.linalg.inv(M + np.eye(M.shape[0]))
mean = (self.one_over_lambda * M).dot(observations.T.dot(targets))
cov = self.var_weights*M
return mean, cov
def _example(self, arms, rewards, contexts, t):
raise NotImplementedError()
def _setup_input_size(self):
raise NotImplementedError()
def __repr__(self):
r = 'ThompsonSinFeedforwardNetwork(order={0}, periods={1}, periods_dim={2},'
r += ' n_units={3}, learning_rate={4}, regL2={5}, epochs={6}, train_every={7},'
r += ' std_targets={8}, std_weights={9})'
return r.format(self.order, self.periods, self.periods_dim, self.n_units,
self.learning_rate, self.regularise_lambda,
self.epochs, self.train_every,
np.sqrt(self.var_targets), np.sqrt(self.var_weights))
| 17,699
| 33.038462
| 114
|
py
|
rnlps
|
rnlps-master/rnlps/policies/contextual_policies.py
|
"""
Policies for contextual bandit problems.
"""
import numpy as np
import tensorflow as tf
import contextlib
from termcolor import cprint
from rnlps.policies.base import Trial, Policy
from rnlps.policies.base import BaseOracle, BaseFixed, BaseRandom
from rnlps.policies.base import BaseThompsonRecurrentNetwork, BaseThompsonSinFeedforwardNetwork
@contextlib.contextmanager
def _printoptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
try:
yield
finally:
np.set_printoptions(**original)
class Oracle(BaseOracle):
pass
class Fixed(BaseFixed):
pass
class Random(BaseRandom):
pass
class ThompsonRecurrentNetwork(BaseThompsonRecurrentNetwork):
""" Recurrent neural-linear: Thompson sampling based policy by using
Bayesian linear regression on the representation(context) generated by
the penultimate layer of the recurrent architecture. """
def __init__(self, bandit, n_units, learning_rate, regularise_lambda, epochs, train_every,
std_targets, std_weights, verbose, seed):
self.context_dims = bandit.context_dims
BaseThompsonRecurrentNetwork.__init__(self, bandit, n_units,
learning_rate, regularise_lambda, epochs, train_every, std_targets,
std_weights, verbose, seed)
def _setup_input(self):
""" Returns the input and input size for reward prediction by the RNN. """
self._arms = tf.placeholder(tf.int32, shape=None, name='arms')
self._contexts = tf.placeholder(tf.float32, shape=[None, self.context_dims], name='contexts')
rewards = tf.reshape(self._rewards[:-1], (-1, 1))
# past rewards seen as features for prediction, first value is 0, last one can't be used as a feature for the next step
arms_oh = tf.one_hot(self._arms, depth=self.bandit.n_arms,
dtype=tf.float32, name='arms_oh')
contexts = self._contexts
# Don't have a reward for the last context, ignore it while passing feed dict
inputs = tf.concat([rewards, arms_oh, contexts], axis=1, name='inputs')
isize = self.bandit.n_arms + 1 + self.context_dims
return inputs, isize
def _get_pred_from_sampled_model(self, trial, w, state):
""" Obtains the predicted reward for every action under the sampled
model(w). """
pred = np.zeros(trial.n_arms)
for arm in range(trial.n_arms):
feed = {self._istate: state, self._arms: [arm],
self._rewards: [trial.rewards[-1], 0], self._contexts: [trial.contexts[-1]]}
arm_features = self.session.run(self._h_output, feed)[0]
pred[arm] = arm_features.dot(w)
return pred
def _get_feed_for_rnn(self, trial):
""" Returns the input feed for the action selection by the policy. """
feed = {self._arms: trial.arms, self._rewards: [0.] + trial.rewards, self._contexts: trial.contexts[:-1]}
# Include contexts while dealing with contextual bandits
return feed
class ThompsonSinFeedforwardNetwork(BaseThompsonSinFeedforwardNetwork):
""" Neural-linear: Thompson sampling with Bayesian linear regression on
the representation generated by a FFNN fed with handcrafted context and
sinusoidal input units."""
def __init__(self, bandit, order, periods, periods_dim, n_units,
learning_rate, regularise_lambda, epochs, train_every,
std_targets, std_weights, verbose, seed):
self.context_dims = bandit.context_dims
BaseThompsonSinFeedforwardNetwork.__init__(self, bandit, order, periods, periods_dim, n_units,
learning_rate, regularise_lambda, epochs, train_every,
std_targets, std_weights, verbose, seed)
self._setup(seed)
def _setup_input_size(self):
""" Returns the number of units in the input layer, before and after
including the sinusoidal units. """
isize = len(self.periods)*2
isize += self.bandit.n_arms + self.context_dims + (self.bandit.n_arms + self.context_dims+ 1) * self.order
#add more units for the periodic transfromation
isize_pre = isize + 1
isize_post = isize + self.periods_dim
return isize_pre, isize_post
def _example(self, arms, rewards, contexts, t):
""" Creates one sample of the handcrafted context-target pair. Used to
generate the training data for training the NN representation model. """
if t < self.order:
raise Exception('Incomplete observation.')
arms_oh = np.eye(self.bandit.n_arms)
observation = []
# Can be used for hard-coded periods
for p in self.periods:
angle = (2*np.pi*t)/p
observation += [np.cos(angle), np.sin(angle)]
observation.extend(arms_oh[arms[t]])
observation.extend(contexts[t]) #Add the current context
for i in range(self.order):
observation.extend(arms_oh[arms[t - i - 1]])
observation.append(rewards[t - i - 1])
#Add the previous contexts
observation.extend(contexts[t - i - 1])
observation.append(t) #add the raw time-step as the new input
target = rewards[t]
return observation, target
contextual_policies = {'Oracle': Oracle,
'Fixed': Fixed,
'Random': Random,
'ThompsonRecurrentNetwork': ThompsonRecurrentNetwork,
'ThompsonSinFeedforwardNetwork': ThompsonSinFeedforwardNetwork}
| 5,628
| 33.115152
| 127
|
py
|
rnlps
|
rnlps-master/rnlps/policies/non_contextual_policies.py
|
"""
Policies for non-contextual bandit problems.
"""
import numpy as np
import tensorflow as tf
import contextlib
from termcolor import cprint
from rnlps.policies.base import Trial, Policy
from rnlps.policies.base import BaseOracle, BaseFixed, BaseRandom
from rnlps.policies.base import BaseThompsonRecurrentNetwork, BaseThompsonSinFeedforwardNetwork
@contextlib.contextmanager
def _printoptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
try:
yield
finally:
np.set_printoptions(**original)
class Oracle(BaseOracle):
pass
class Fixed(BaseFixed):
pass
class Random(BaseRandom):
pass
class UCB(Policy):
def __init__(self, bandit):
Policy.__init__(self, bandit)
def select(self, trial):
for arm in range(trial.n_arms):
if trial.pulls[arm] < 1:
return arm
ft = 1 + (trial.length + 1)*(np.log(trial.length + 1)**2)
bonus = np.sqrt((2.*np.log(ft))/trial.pulls)
indices = trial.average_rewards() + bonus
return np.argmax(indices)
def __repr__(self):
return 'UCB()'
class SW_UCB(Policy):
""" Sliding-window UCB implementation based on the paper,
'On Upper-Confidence Bound Policies for Non-Stationary Bandit Problems' -
https://arxiv.org/pdf/0805.3415.pdf """
def __init__(self, bandit, tau, seed, ksi = 0.6):
Policy.__init__(self, bandit)
self.tau = tau
self.ksi = ksi
self.random_state = np.random.RandomState(seed)
def select(self, trial):
for arm in self.random_state.permutation(trial.n_arms):
if trial.pulls[arm] < 1:
return arm
# Consider only the last tau steps
arms_tau = np.array(trial.arms[-self.tau:])
rewards_tau = np.array(trial.rewards[-self.tau:])
returns_tau = np.zeros(trial.n_arms)
N_tau = np.zeros(trial.n_arms)
for arm in range(trial.n_arms):
N_tau[arm] = np.sum(arms_tau == arm)
returns_tau[arm] = np.sum((arms_tau == arm) * rewards_tau)
out = np.full(trial.n_arms, float('inf'))
where = (N_tau > 0)
avg_rewards_tau = np.divide(returns_tau, N_tau, out=out, where=where)
out2 = np.full(trial.n_arms, float('inf'))
where2 = (N_tau > 0)
ins_sqrt_term = np.divide(self.ksi * np.log(min(self.tau, trial.length)), N_tau, out=out2, where=where2)
bonus = np.sqrt(ins_sqrt_term)
indices = avg_rewards_tau + bonus
# break ties in a random way
max_arms = np.argwhere(indices == np.amax(indices))
chosen_arm = self.random_state.choice(max_arms.flatten())
return chosen_arm
def __repr__(self):
return 'SW_UCB(tau={0}, ksi={1})'.format(self.tau, self.ksi)
class D_UCB(Policy):
""" Discounted UCB implementation based on the paper,
'On Upper-Confidence Bound Policies for Non-Stationary Bandit Problems' -
https://arxiv.org/pdf/0805.3415.pdf """
def __init__(self, bandit, gamma, seed, ksi = 0.6):
Policy.__init__(self, bandit)
self.gamma = gamma
self.ksi = ksi
self.random_state = np.random.RandomState(seed)
def select(self, trial):
for arm in self.random_state.permutation(trial.n_arms):
if trial.pulls[arm] < 1:
return arm
arms_all = np.array(trial.arms)
rewards_all = np.array(trial.rewards)
discount_arr = np.flip(np.cumprod(self.gamma * np.ones(trial.length - 1)))
discount_arr = np.append(discount_arr, 1.0)
discounted_rewards = rewards_all * discount_arr
returns_gamma = np.zeros(trial.n_arms)
N_gamma = np.zeros(trial.n_arms)
for arm in range(trial.n_arms):
N_gamma[arm] = np.sum((arms_all == arm) * discount_arr)
returns_gamma[arm] = np.sum((arms_all == arm) * discounted_rewards)
out = np.full(trial.n_arms, float('inf'))
where = (N_gamma > 0)
avg_rewards_gamma = np.divide(returns_gamma, N_gamma, out=out, where=where)
n_tot_gamma = np.sum(N_gamma)
out2 = np.full(trial.n_arms, float('inf'))
where2 = (N_gamma > 0)
ins_sqrt_term = np.divide(self.ksi * np.log(n_tot_gamma), N_gamma, out=out2, where=where2)
bonus = 2 * np.sqrt(ins_sqrt_term)
indices = avg_rewards_gamma + bonus
# break ties in a random way
max_arms = np.argwhere(indices == np.amax(indices))
chosen_arm = self.random_state.choice(max_arms.flatten())
return chosen_arm
def __repr__(self):
return 'D_UCB(gamma={0}, ksi={1})'.format(self.gamma, self.ksi)
class ThompsonSamplingBernoulli(Policy):
""" Thompson sampling for the k-armed bernoulli bandit. """
def __init__(self, bandit, a, b, seed):
Policy.__init__(self, bandit)
self.a = a
self.b = b
self.random_state = np.random.RandomState(seed=seed)
def select(self, trial):
a = trial.returns
b = trial.pulls - trial.returns
means = np.zeros(trial.n_arms)
for i in range(trial.n_arms):
means[i] = self.random_state.beta(a[i] + self.a, b[i] + self.b)
return np.argmax(means)
def __repr__(self):
return 'ThompsonSamplingBernoulli(a={0}, b={1})'.format(self.a, self.b)
class ThompsonRecurrentNetwork(BaseThompsonRecurrentNetwork):
""" Recurrent neural-linear: Thompson sampling based policy by using
Bayesian linear regression on the representation(context) generated by
the penultimate layer of the recurrent architecture. """
def __init__(self, bandit, n_units, learning_rate, regularise_lambda,
epochs, train_every, std_targets, std_weights, verbose, seed):
BaseThompsonRecurrentNetwork.__init__(self, bandit, n_units,
learning_rate, regularise_lambda, epochs, train_every, std_targets,
std_weights, verbose, seed)
def _setup_input(self):
""" Returns the input and input size for reward prediction by the RNN. """
self._arms = tf.placeholder(tf.int32, shape=None, name='arms')
rewards = tf.reshape(self._rewards[:-1], (-1, 1))
arms_oh = tf.one_hot(self._arms, depth=self.bandit.n_arms,
dtype=tf.float32, name='arms_oh')
inputs = tf.concat([rewards, arms_oh], axis=1, name='inputs')
isize = self.bandit.n_arms + 1
return inputs, isize
def _get_pred_from_sampled_model(self, trial, w, state):
""" Obtains the predicted reward for every action under the sampled
model(w). """
pred = np.zeros(trial.n_arms)
for arm in range(trial.n_arms):
feed = {self._istate: state, self._arms: [arm],
self._rewards: [trial.rewards[-1], 0]}
arm_features = self.session.run(self._h_output, feed)[0]
pred[arm] = arm_features.dot(w)
return pred
def _get_feed_for_rnn(self, trial):
""" Returns the input feed for the action selection by the policy. """
feed = {self._arms: trial.arms, self._rewards: [0.] + trial.rewards}
return feed
class ThompsonSinFeedforwardNetwork(BaseThompsonSinFeedforwardNetwork):
""" Neural-linear: Thompson sampling with Bayesian linear regression on
the representation generated by a FFNN fed with handcrafted context and
sinusoidal input units."""
def __init__(self, bandit, order, periods, periods_dim, n_units,
learning_rate, regularise_lambda, epochs, train_every,
std_targets, std_weights, verbose, seed):
BaseThompsonSinFeedforwardNetwork.__init__(self, bandit, order, periods, periods_dim, n_units,
learning_rate, regularise_lambda, epochs, train_every,
std_targets, std_weights, verbose, seed)
def _setup_input_size(self):
""" Returns the number of units in the input layer, before and after
including the sinusoidal units. """
isize = len(self.periods)*2
isize += self.bandit.n_arms + (self.bandit.n_arms + 1) * self.order
#add more units for the periodic transfromation
isize_pre = isize + 1
isize_post = isize + self.periods_dim
return isize_pre, isize_post
def _example(self, arms, rewards, contexts, t):
""" Creates one sample of the handcrafted context-target pair. Used to
generate the training data for training the NN representation model. """
# Dataset to predict the reward based on handcrafted context and current action.
if t < self.order:
raise Exception('Incomplete observation.')
arms_oh = np.eye(self.bandit.n_arms)
observation = []
# Can be used for hard-coded periods
for p in self.periods:
angle = (2*np.pi*t)/p
observation += [np.cos(angle), np.sin(angle)]
observation.extend(arms_oh[arms[t]])
for i in range(self.order):
observation.extend(arms_oh[arms[t - i - 1]])
observation.append(rewards[t - i - 1])
observation.append(t) #add the raw time-step as the new input
target = rewards[t]
return observation, target
non_contextual_policies = {'Oracle': Oracle,
'Fixed': Fixed,
'Random': Random,
'UCB': UCB,
'SW_UCB' : SW_UCB,
'D_UCB' : D_UCB,
'ThompsonSamplingBernoulli': ThompsonSamplingBernoulli,
'ThompsonRecurrentNetwork': ThompsonRecurrentNetwork,
'ThompsonSinFeedforwardNetwork': ThompsonSinFeedforwardNetwork}
| 9,757
| 31.098684
| 112
|
py
|
rnlps
|
rnlps-master/rnlps/policies/contextual_linear_policies.py
|
"""
Policies for linear bandit problems.
"""
import numpy as np
import tensorflow as tf
import contextlib
from termcolor import cprint
from math import log
from rnlps.policies.base import Trial, Policy
from rnlps.policies.base import BaseOracle, BaseFixed, BaseRandom
from rnlps.policies.base import BaseThompsonRecurrentNetwork, BaseThompsonSinFeedforwardNetwork
@contextlib.contextmanager
def _printoptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
try:
yield
finally:
np.set_printoptions(**original)
class ContextualLinearTrial(Trial):
def __init__(self, n_arms):
self.arm_embeddings = [] # actions are no longer one hot
Trial.__init__(self, n_arms)
def update_contexts(self, context):
# Don't append here. No need to store the entire set of previously available arms
self.contexts = context
def append(self, arm, reward, context, regret):
self.arm_embeddings.append(self.contexts[arm])
Trial.append(self, arm, reward, context, regret)
class ContextualLinearPolicy(Policy):
def __init__(self, bandit):
Policy.__init__(self, bandit)
self.contextual_bandit = 0
# we use the context to only collect the available arms
# prediction depends on the arm chosen
def interact(self, trial_length):
trial = ContextualLinearTrial(self.bandit.n_arms)
reset_context = self.bandit.reset()
trial.update_contexts(reset_context)
for i in range(trial_length):
arm = self.select(trial)
reward, context, regret = self.bandit.pull(arm)
trial.append(arm, reward, context, regret)
return trial
class Oracle(ContextualLinearPolicy, BaseOracle):
"""Selects the best available action at every time step. """
def __init__(self, bandit):
ContextualLinearPolicy.__init__(self, bandit)
class Fixed(ContextualLinearPolicy, BaseFixed):
"""Selects a fixed chosen action at every time step. """
def __init__(self, bandit, arm):
ContextualLinearPolicy.__init__(self, bandit)
self.arm = arm
class Random(ContextualLinearPolicy, BaseRandom):
"""Selects a random action at every time step. """
def __init__(self, bandit, seed):
ContextualLinearPolicy.__init__(self, bandit)
self.random_state = np.random.RandomState(seed)
class LinUCB(ContextualLinearPolicy):
def __init__(self, bandit, delta, alpha, lambda_reg, sigma_noise, seed):
ContextualLinearPolicy.__init__(self, bandit)
self.random_state = np.random.RandomState(seed)
self.arm_dims = bandit.dimension
self.delta = delta
self.alpha = alpha # multiplicative constant to tune exploration bonus
self.sigma_noise = sigma_noise
self.ucbs = np.zeros(bandit.n_arms)
self.theta_hat = np.zeros(self.arm_dims)
self.lambda_reg = lambda_reg
self.V = self.lambda_reg * np.eye(self.arm_dims)
self.inv_V = (1/self.lambda_reg) * np.eye(self.arm_dims)
self.b = np.zeros(self.arm_dims)
# assumes that norm of theta_star and actions are bounded by 1.
def select(self, trial):
# Update variables based on previous arm and reward
if trial.length >= 1:
self.V = self.V + np.outer(trial.arm_embeddings[-1], trial.arm_embeddings[-1])
self.b = self.b + trial.arm_embeddings[-1] * trial.rewards[-1]
self.inv_V = np.linalg.pinv(self.V)
self.theta_hat = np.dot(self.inv_V, self.b)
sqrt_beta_t = np.sqrt(self.lambda_reg)
sqrt_beta_t += self.sigma_noise * np.sqrt(2*log(1/self.delta) + self.arm_dims * log(1 + trial.length/(self.lambda_reg * self.arm_dims)))
for arm in range(trial.n_arms):
invV_arm = np.dot(self.inv_V, trial.contexts[arm])
norm_a_with_invV = np.sqrt(np.dot(trial.contexts[arm], invV_arm))
self.ucbs[arm] = np.dot(self.theta_hat, trial.contexts[arm]) + self.alpha * sqrt_beta_t * norm_a_with_invV
max_arms = np.argwhere(self.ucbs == np.amax(self.ucbs))
chosen_arm = self.random_state.choice(max_arms.flatten())
return chosen_arm
def __repr__(self):
r = 'LinUCB(delta={0}, alpha={1}, lambda_reg={2}, sigma_noise={3})'
return r.format(self.delta, self.alpha, self.lambda_reg, self.sigma_noise)
class SW_LinUCB(ContextualLinearPolicy):
""" Implementation of SW-LinUCB, by by Cheung et. al -
'Learning to optimize under Non-Stationarity', AISTATS19.
Based on the code available at
https://github.com/YRussac/WeightedLinearBandits/blob/master/D_LinUCB_class.py """
def __init__(self, bandit, delta, alpha, tau, lambda_reg, sigma_noise, seed):
ContextualLinearPolicy.__init__(self, bandit)
self.random_state = np.random.RandomState(seed)
self.arm_dims = bandit.dimension
self.delta = delta
self.alpha = alpha # multiplicative constant to tune exploration bonus
self.tau = tau # window size
self.sigma_noise = sigma_noise
self.ucbs = np.zeros(bandit.n_arms)
self.theta_hat = np.zeros(self.arm_dims)
self.lambda_reg = lambda_reg
self.V = self.lambda_reg * np.eye(self.arm_dims)
self.inv_V = (1/self.lambda_reg) * np.eye(self.arm_dims)
self.b = np.zeros(self.arm_dims)
self.beta = np.sqrt(self.lambda_reg) + self.sigma_noise * np.sqrt(self.arm_dims * np.log((1 + self.tau/self.lambda_reg)/self.delta))
# beta_t is fixed in this algorithm
def select(self, trial):
# Update variables based on previous arm and reward
if trial.length >= 1:
if trial.length <= self.tau:
self.V = self.V + np.outer(trial.arm_embeddings[-1], trial.arm_embeddings[-1])
self.b = self.b + trial.arm_embeddings[-1] * trial.rewards[-1]
self.inv_V = np.linalg.pinv(self.V)
self.theta_hat = np.dot(self.inv_V, self.b)
else:
a_removal = trial.arm_embeddings[-(self.tau + 1)]
r_removal = trial.rewards[-(self.tau + 1)]
self.V = self.V + np.outer(trial.arm_embeddings[-1], trial.arm_embeddings[-1]) - np.outer(a_removal, a_removal)
self.b = self.b + trial.arm_embeddings[-1] * trial.rewards[-1] - a_removal * r_removal
self.inv_V = np.linalg.pinv(self.V)
self.theta_hat = np.dot(self.inv_V, self.b)
for arm in range(trial.n_arms):
invV_arm = np.dot(self.inv_V, trial.contexts[arm])
norm_a_with_invV = np.sqrt(np.dot(trial.contexts[arm], invV_arm))
self.ucbs[arm] = np.dot(self.theta_hat, trial.contexts[arm]) + self.alpha * self.beta * norm_a_with_invV
max_arms = np.argwhere(self.ucbs == np.amax(self.ucbs))
chosen_arm = self.random_state.choice(max_arms.flatten())
return chosen_arm
def __repr__(self):
r = 'SW_LinUCB(delta={0}, alpha={1}, tau={2}, lambda_reg={3}, sigma_noise={4})'
return r.format(self.delta, self.alpha, self.tau, self.lambda_reg, self.sigma_noise)
class D_LinUCB(ContextualLinearPolicy):
""" Implementation of D-LinUCB, by Russac et. al -
'Weighted Linear Bandits in Non-Stationary Environments', NeurIPS19.
Based on the code available at
https://github.com/YRussac/WeightedLinearBandits/blob/master/D_LinUCB_class.py """
def __init__(self, bandit, delta, alpha, gamma, lambda_reg, sigma_noise, seed):
ContextualLinearPolicy.__init__(self, bandit)
self.random_state = np.random.RandomState(seed)
self.arm_dims = bandit.dimension
self.delta = delta
self.alpha = alpha # multiplicative constant to tune exploration bonus
self.gamma = gamma # discount factor
self.sigma_noise = sigma_noise
self.ucbs = np.zeros(bandit.n_arms)
self.theta_hat = np.zeros(self.arm_dims)
self.lambda_reg = lambda_reg
self.V = self.lambda_reg * np.eye(self.arm_dims)
self.V_tilde = self.lambda_reg * np.eye(self.arm_dims)
self.inv_V = (1/self.lambda_reg) * np.eye(self.arm_dims)
self.b = np.zeros(self.arm_dims)
self.gamma2_t = 1
def select(self, trial):
# Update variables based on previous arm and reward
if trial.length >= 1:
self.gamma2_t *= self.gamma ** 2
self.V = self.gamma * self.V + np.outer(trial.arm_embeddings[-1], trial.arm_embeddings[-1]) + (1 - self.gamma) * self.lambda_reg * np.eye(self.arm_dims)
#Their update - according to the code
self.V_tilde = (self.gamma**2) * self.V + np.outer(trial.arm_embeddings[-1], trial.arm_embeddings[-1]) + (1 - self.gamma**2) * self.lambda_reg * np.eye(self.arm_dims)
# The update according to the paper?
#self.V_tilde = (self.gamma**2) * self.V_tilde + np.outer(trial.arm_embeddings[-1], trial.arm_embeddings[-1]) + (1 - self.gamma**2) * self.lambda_reg * np.eye(self.arm_dims)
self.b = self.gamma * self.b + trial.arm_embeddings[-1] * trial.rewards[-1]
self.inv_V = np.linalg.pinv(self.V)
self.theta_hat = np.dot(self.inv_V, self.b)
beta_t = np.sqrt(self.lambda_reg)
beta_t += self.sigma_noise * np.sqrt(2*log(1/self.delta) + self.arm_dims * np.log(1 + (1-self.gamma2_t)/(self.arm_dims * self.lambda_reg * (1 - self.gamma**2))))
for arm in range(trial.n_arms):
invV_arm = np.dot(np.matmul(self.inv_V,np.matmul(self.V_tilde,self.inv_V)), trial.contexts[arm])
norm_a_with_invV = np.sqrt(np.dot(trial.contexts[arm], invV_arm))
self.ucbs[arm] = np.dot(self.theta_hat, trial.contexts[arm]) + self.alpha * beta_t * norm_a_with_invV
max_arms = np.argwhere(self.ucbs == np.amax(self.ucbs))
chosen_arm = self.random_state.choice(max_arms.flatten())
return chosen_arm
def __repr__(self):
r = 'D_LinUCB(delta={0}, alpha={1}, gamma={2}, lambda_reg={3}, sigma_noise={4})'
return r.format(self.delta, self.alpha, self.gamma, self.lambda_reg, self.sigma_noise)
class ThompsonRecurrentNetwork(ContextualLinearPolicy, BaseThompsonRecurrentNetwork):
""" Recurrent neural-linear: Thompson sampling based policy by using
Bayesian linear regression on the representation(context) generated by
the penultimate layer of the recurrent architecture. """
def __init__(self, bandit, n_units, learning_rate, regularise_lambda, epochs,
train_every, std_targets, std_weights, verbose, seed):
# Overrides the __init__ method from BaseThompsonRecurrentNetwork
ContextualLinearPolicy.__init__(self, bandit)
self.n_units = n_units
if len(self.n_units) < 2:
raise Exception('Invalid number of layers.')
self.learning_rate = learning_rate
self.regularise_lambda = regularise_lambda
self.epochs = epochs
self.train_every = train_every
self.var_targets = std_targets**2.
self.var_weights = std_weights**2.
self.one_over_lambda = self.var_weights/self.var_targets
self.verbose = verbose
self.random_state = np.random.RandomState(seed=seed)
self.arm_dims = bandit.dimension
self._setup(seed)
def select(self, trial):
""" Selects which arm to play in the current round. """
# Overrides the select method in BaseThompsonRecurrentNetwork
# we don't need to pull every arm once here
# The first pull is just arm = 0, after that we use the network
if trial.length < 1:
return 0
else:
return self._select_from_policy(trial)
def _setup_input(self):
""" Returns the input and input size for reward prediction by the RNN. """
self._arms = tf.placeholder(tf.float32, shape=[None, self.arm_dims], name='arms')
rewards = tf.reshape(self._rewards[:-1], (-1, 1))
# past rewards seen as features for prediction, first value is 0, last one can't be used as a feature for the next step
arms_emb = self._arms
inputs = tf.concat([rewards, arms_emb], axis=1, name='inputs')
isize = self.arm_dims + 1
return inputs, isize
def _get_pred_from_sampled_model(self, trial, w, state):
""" Obtains the predicted reward for every action under the sampled
model(w). """
pred = np.zeros(trial.n_arms)
for arm in range(trial.n_arms):
feed = {self._istate: state, self._arms: np.reshape(trial.contexts[arm], (1, -1)),
self._rewards: [trial.rewards[-1], 0]}#, self._contexts: [trial.contexts[-1]]}
arm_features = self.session.run(self._h_output, feed)[0]
pred[arm] = arm_features.dot(w)
return pred
def _get_feed_for_rnn(self, trial):
""" Returns the input feed for the action selection by the policy. """
feed = {self._arms: trial.arm_embeddings, self._rewards: [0.] + trial.rewards}
return feed
class ThompsonSinFeedforwardNetwork(ContextualLinearPolicy, BaseThompsonSinFeedforwardNetwork):
""" Neural-linear: Thompson sampling with Bayesian linear regression on
the representation generated by a FFNN fed with handcrafted context and
sinusoidal input units."""
def __init__(self, bandit, order, periods, periods_dim, n_units,
learning_rate, regularise_lambda, epochs, train_every,
std_targets, std_weights, verbose, seed):
# Overrides the __init__ method from BaseThompsonSinFeedforwardNetwork
ContextualLinearPolicy.__init__(self, bandit)
self.order = order
self.periods = np.array(periods)
self.periods_dim = periods_dim
self.n_units = n_units
self.learning_rate = learning_rate
self.regularise_lambda = regularise_lambda
self.epochs = epochs
self.train_every = train_every
self.var_targets = std_targets**2.
self.var_weights = std_weights**2.
self.one_over_lambda = self.var_weights/self.var_targets
self.verbose = verbose
self.arm_dims = bandit.dimension
self.random_state = np.random.RandomState(seed=seed)
self._setup(seed)
def select(self, trial):
""" Selects which arm to play in the current round. """
# Overrides the select method in BaseThompsonSinFeedforwardNetwork
# we don't need to pull every arm once here
if trial.length < (self.order + 1):
return np.argmin(trial.pulls)
else:
return self._select_from_policy(trial)
def _get_observation_target_pair(self, trial):
""" Creates the dataset for training the NN representation model. """
# Overrides the _get_observation_target_pair method in BaseThompsonSinFeedforwardNetwork
# uses the arm embeddings for prediction
observations = []
targets = []
for t in range(self.order, trial.length):
observation, target = self._example(trial.arm_embeddings, trial.rewards, t)
observations.append(observation)
targets.append(target)
return observations, targets
def _get_pred_from_sampled_model(self, trial, w):
""" Obtains the predicted reward for every action under the sampled
model(w). """
# Overrides the _get_pred_from_sampled_model method in BaseThompsonSinFeedforwardNetwork
# uses the arm embeddings for prediction
pred = np.zeros(trial.n_arms)
for arm in range(trial.n_arms):
observation, _ = self._example(trial.arm_embeddings + [trial.contexts[arm]],
trial.rewards + [0.],
trial.length)
feed = {self._observations: [observation]}
arm_features = self.session.run(self._h_output, feed)[0]
pred[arm] = arm_features.dot(w)
return pred
def _setup_input_size(self):
""" Returns the number of units in the input layer, before and after
including the sinusoidal units. """
isize = len(self.periods)*2
isize += self.arm_dims + (self.arm_dims + 1) * self.order
#add more units for the periodic transfromation
isize_pre = isize + 1
isize_post = isize + self.periods_dim
return isize_pre, isize_post
def _example(self, arm_embeddings, rewards, t):
""" Creates one sample of the handcrafted context-target pair. Used to
generate the training data for training the NN representation model. """
if t < self.order:
raise Exception('Incomplete observation.')
observation = []
for p in self.periods:
angle = (2*np.pi*t)/p
observation += [np.cos(angle), np.sin(angle)]
observation.extend(arm_embeddings[t])
for i in range(self.order):
observation.extend(arm_embeddings[t - i - 1])
observation.append(rewards[t - i - 1])
observation.append(t) #add the raw time-step as the new input
target = rewards[t]
return observation, target
contextual_linear_policies = {'Oracle': Oracle,
'Fixed': Fixed,
'Random': Random,
'LinUCB': LinUCB,
'SW_LinUCB': SW_LinUCB,
'D_LinUCB': D_LinUCB,
'ThompsonRecurrentNetwork': ThompsonRecurrentNetwork,
'ThompsonSinFeedforwardNetwork': ThompsonSinFeedforwardNetwork}
| 17,762
| 36.161088
| 185
|
py
|
rnlps
|
rnlps-master/rnlps/policies/__init__.py
| 0
| 0
| 0
|
py
|
|
rnlps
|
rnlps-master/rnlps/examples/example_hgrids/hgrid_linear.py
|
"""
Creates config files for experiments and hyperparamter grid search.
"""
import os
import argparse
import json
import numpy as np
from itertools import product
from collections import namedtuple
# Configuration templates with different arguments
CfgOracle = namedtuple('Oracle', [])
CfgRandom = namedtuple('Random', ['seed'])
CfgSWLinUCB = namedtuple('SW_LinUCB', ['delta', 'alpha', 'tau', 'lambda_reg', 'sigma_noise', 'seed'])
CfgDLinUCB = namedtuple('D_LinUCB', ['delta', 'alpha', 'gamma', 'lambda_reg', 'sigma_noise', 'seed'])
# Recurrent neural-linear
CfgThompsonRNN = namedtuple('ThompsonRecurrentNetwork',
['n_units', 'learning_rate', 'regularise_lambda', 'epochs',
'train_every', 'std_targets', 'std_weights',
'verbose', 'seed'])
# Neural-linear with sinusoidal units
CfgThompsonSinNN = namedtuple('ThompsonSinFeedforwardNetwork',
['order', 'periods', 'periods_dim', 'n_units',
'learning_rate', 'regularise_lambda','epochs', 'train_every',
'std_targets', 'std_weights', 'verbose', 'seed'])
def main():
# Bandit settings - problem type and specific instance.
bandit = "RotatingLinearBandit2d"
bandit_parameters = {"n_arms": 25, "time_period": 32, "seed": 0}
trial_length = 4096
# Policy settings: Defines the hyperparameter grid.
seeds = list(range(5))
configs = []
# Policy settings (Oracle)
configs.append(CfgOracle())
# Policy settings (Random)
configs.append(CfgRandom(seed=seeds))
# Policy settings grid (Recurrent neural-linear)
configs.append(CfgThompsonRNN(n_units=[[32, 32, 32]],
learning_rate=[0.001, 0.01, 0.1],
regularise_lambda=[0.001],
epochs=[16, 64],
train_every=[32, 128],
std_targets=[0.1, 0.3],
std_weights=[0.5, 1],
verbose=[False],
seed=seeds))
# Policy settings grid (Neural-linear)
configs.append(CfgThompsonSinNN(order=[1, 4],
periods=[[]],
periods_dim =[2,4,8],
n_units= [[32, 32, 32], [64, 64, 64]],
learning_rate=[0.001, 0.01, 0.1],
regularise_lambda=[0.001],
epochs=[16, 64],
train_every=[32, 128],
std_targets=[0.1, 0.3],
std_weights=[0.5, 1],
verbose=[False],
seed=seeds))
configs.append(CfgSWLinUCB(delta = [0.1],
alpha = [1],
tau = [128, 256, 512, 1024],
lambda_reg = [0.1],
sigma_noise = [0.05],
seed=seeds))
configs.append(CfgDLinUCB(delta = [0.1],
alpha = [1],
gamma = [0.9, 0.95, 0.98, 0.99],
lambda_reg = [0.1],
sigma_noise = [0.05],
seed=seeds))
parser = argparse.ArgumentParser()
parser.add_argument('directory', help='Experiments directory.')
args = parser.parse_args()
if not os.path.isdir(args.directory):
os.makedirs(args.directory)
# Creates folders with config files for all combinations of hyperparameters.
i = 1
for policy_config in configs:
PolicyConfig = type(policy_config)
combos = map(lambda c: PolicyConfig(*c), product(*policy_config))
for combo in combos:
print(combo)
cfg = {'bandit': bandit,
'bandit_parameters': bandit_parameters,
'trial_length': trial_length,
'policy': PolicyConfig.__name__,
'policy_parameters': combo._asdict()}
path = os.path.join(args.directory, str(i))
if not os.path.isdir(path):
os.makedirs(path)
f = open(os.path.join(path, 'config.json'), 'w')
json.dump(cfg, f)
f.close()
i += 1
if __name__ == "__main__":
main()
| 4,560
| 34.632813
| 101
|
py
|
rnlps
|
rnlps-master/rnlps/examples/example_hgrids/hgrid_contextual.py
|
"""
Creates config files for experiments and hyperparamter grid search.
"""
import os
import argparse
import json
import numpy as np
from itertools import product
from collections import namedtuple
# Configuration templates with different arguments
CfgOracle = namedtuple('Oracle', [])
CfgRandom = namedtuple('Random', ['seed'])
# Recurrent neural-linear
CfgThompsonRNN = namedtuple('ThompsonRecurrentNetwork',
['n_units', 'learning_rate', 'regularise_lambda', 'epochs',
'train_every', 'std_targets', 'std_weights',
'verbose', 'seed'])
# Neural-linear with sinusoidal units
CfgThompsonSinNN = namedtuple('ThompsonSinFeedforwardNetwork',
['order', 'periods', 'periods_dim', 'n_units',
'learning_rate', 'regularise_lambda','epochs', 'train_every',
'std_targets', 'std_weights', 'verbose', 'seed'])
def main():
bandit = "StationaryContextualBandit"
bandit_parameters = {"dataset": "wall_following_24", "seed": 0}
# Number of interactions (has to be lesser than the size of dataset)
trial_length = 5455
# Policy settings: Defines the hyperparameter grid.
seeds = list(range(5))
configs = []
# Policy settings (Oracle)
configs.append(CfgOracle())
# Policy settings (Random)
configs.append(CfgRandom(seed=seeds))
# Policy settings grid (Recurrent neural-linear)
configs.append(CfgThompsonRNN(n_units=[[32, 32, 32]],
learning_rate=[0.001, 0.01, 0.1],
regularise_lambda=[0.001],
epochs=[16, 64],
train_every=[32, 128],
std_targets=[0.3],
std_weights=[0.5, 1],
verbose=[False],
seed=seeds))
# Policy settings grid (Neural-linear)
configs.append(CfgThompsonSinNN(order=[1,4],
periods=[[]],
periods_dim =[2,4,8],
n_units= [[64, 64, 64]],
learning_rate=[0.001, 0.01, 0.1],
regularise_lambda=[0.001],
epochs=[16, 64],
train_every=[32, 128],
std_targets=[0.1, 0.3],
std_weights=[0.5, 1],
verbose=[False],
seed=seeds))
parser = argparse.ArgumentParser()
parser.add_argument('directory', help='Experiments directory.')
args = parser.parse_args()
if not os.path.isdir(args.directory):
os.makedirs(args.directory)
# Creates folders with config files for all combinations of hyperparameters.
i = 1
for policy_config in configs:
PolicyConfig = type(policy_config)
combos = map(lambda c: PolicyConfig(*c), product(*policy_config))
for combo in combos:
print(combo)
cfg = {'bandit': bandit,
'bandit_parameters': bandit_parameters,
'trial_length': trial_length,
'policy': PolicyConfig.__name__,
'policy_parameters': combo._asdict()}
path = os.path.join(args.directory, str(i))
if not os.path.isdir(path):
os.makedirs(path)
f = open(os.path.join(path, 'config.json'), 'w')
json.dump(cfg, f)
f.close()
i += 1
if __name__ == "__main__":
main()
| 3,770
| 32.972973
| 90
|
py
|
rnlps
|
rnlps-master/rnlps/scripts/hgrid.py
|
"""
Creates config files for experiments and hyperparamter grid search.
"""
import os
import argparse
import json
import numpy as np
from itertools import product
from collections import namedtuple
# Configuration templates with different arguments
CfgOracle = namedtuple('Oracle', [])
CfgRandom = namedtuple('Random', ['seed'])
CfgSW_UCB = namedtuple('SW_UCB', ['tau','ksi','seed'])
CfgD_UCB = namedtuple('D_UCB', ['gamma','ksi','seed'])
# Recurrent neural-linear
CfgThompsonRNN = namedtuple('ThompsonRecurrentNetwork',
['n_units', 'learning_rate', 'regularise_lambda', 'epochs',
'train_every', 'std_targets', 'std_weights',
'verbose', 'seed'])
# Neural-linear with sinusoidal units
CfgThompsonSinNN = namedtuple('ThompsonSinFeedforwardNetwork',
['order', 'periods', 'periods_dim', 'n_units',
'learning_rate', 'regularise_lambda','epochs', 'train_every',
'std_targets', 'std_weights', 'verbose', 'seed'])
def main():
# Bandit settings - problem type and specific instance.
bandit = "FlippingGaussianBandit"
bandit_parameters = {"means": [0.1, 0.9, 0.8, 0.2, 0.3, 0.7, 0.6, 0.4] , "half_period": 10, "std": 0.1, "seed": 0}
# Another problem setting -
# bandit = "SinusoidalBernoulliBandit"
# bandit_parameters = {"n_arms": 5, "step_size": (2*np.pi/32), "seed": 0}
# Number of interactions
trial_length = 4096
# Policy settings: Defines the hyperparameter grid.
seeds = list(range(3))
configs = []
# Policy settings (Oracle)
configs.append(CfgOracle())
# Policy settings (Random)
configs.append(CfgRandom(seed=seeds))
# Policy settings grid (Recurrent neural-linear)
configs.append(CfgThompsonRNN(n_units=[[32, 32, 32]],
learning_rate=[0.01],
regularise_lambda=[0.001],
epochs=[16],
train_every=[32],
std_targets=[0.1],
std_weights=[0.5],
verbose=[False],
seed=seeds))
# Policy settings grid (Neural-linear)
configs.append(CfgThompsonSinNN(order=[1],
periods=[[]],
periods_dim = [1],
n_units= [[32, 32, 32]],
learning_rate=[0.1],
regularise_lambda=[0.001],
epochs=[16],
train_every=[32],
std_targets=[0.1],
std_weights=[1.0],
verbose=[False],
seed=seeds))
# Add SW_UCB & D_UCB
configs.append(CfgSW_UCB(tau=[25, 50, 75],
ksi=[0.5],
seed=seeds))
configs.append(CfgD_UCB(gamma = [0.95, 0.98, 0.99],
ksi=[0.5],
seed=seeds))
parser = argparse.ArgumentParser()
parser.add_argument('directory', help='Experiments directory.')
args = parser.parse_args()
if not os.path.isdir(args.directory):
os.makedirs(args.directory)
# Creates folders with config files for all combinations of hyperparameters.
i = 1
for policy_config in configs:
PolicyConfig = type(policy_config)
combos = map(lambda c: PolicyConfig(*c), product(*policy_config))
for combo in combos:
print(combo)
cfg = {'bandit': bandit,
'bandit_parameters': bandit_parameters,
'trial_length': trial_length,
'policy': PolicyConfig.__name__,
'policy_parameters': combo._asdict()}
path = os.path.join(args.directory, str(i))
if not os.path.isdir(path):
os.makedirs(path)
f = open(os.path.join(path, 'config.json'), 'w')
json.dump(cfg, f)
f.close()
i += 1
if __name__ == "__main__":
main()
| 4,355
| 32.507692
| 118
|
py
|
rnlps
|
rnlps-master/rnlps/scripts/hp_sensitivity_plot.py
|
"""
Generates the hyperparameter sensitivity plot. Takes as an argument the
directory that contains the summary file (policy_mean_perf.csv) which is
generated by create_summary.py
"""
import matplotlib
matplotlib.use('Agg')
import numpy as np
import pandas as pd
import os
import argparse
import seaborn as sns
import matplotlib.pyplot as plt
def main():
sns.set(context='paper', style='darkgrid', font_scale=3, rc={'legend.frameon':False,
'lines.linewidth':6.0})
parser = argparse.ArgumentParser()
parser.add_argument('directory', help='Experiments directory.')
args = parser.parse_args()
currd = os.getcwd()
os.chdir(args.directory)
exists = os.path.isfile("./policy_mean_perf.csv")
if not exists:
raise Exception("policy_mean_perf.csv does not exist in this folder.\
Check the path. Or you may have forgotten to run create_summary.py")
df = pd.read_csv("policy_mean_perf.csv")
# Considering only the neural policies here-
df = df[df.Policy.str.contains('Network')]
df['Policy_Type'] = 'NN'
df.loc[df.Policy.str.contains('RecurrentNetwork'), 'Policy_Type'] = 'RNN'
# Number of configs scales the line plot to be equal on the x-axis.
nn_configs = len(df[df.Policy_Type == 'NN'])
rnn_configs = len(df[df.Policy_Type == 'RNN'])
df["rank"] = df.groupby("Policy_Type")["Mean_Return"].rank(ascending=False)
df.loc[df.Policy_Type == 'RNN', 'rank'] = np.linspace(1,nn_configs, num=rnn_configs)
plt.figure(figsize=(16,9))
c_list = sns.color_palette()
c_palette = {'RNN' : c_list[0],
'NN': c_list[1]}
ax = sns.lineplot(x='rank', y='Mean_Return', hue='Policy_Type', palette=c_palette,
data=df, markers=True, marker ='.', markersize=12)
handles, labels = plt.gca().get_legend_handles_labels()
order = [1,2]
plt.legend([handles[idx] for idx in order],[labels[idx] for idx in order])
# Turn off x-ticks
plt.tick_params(
axis='x',
which='both',
bottom=False,
top=False,
labelbottom=False)
plt.xlabel('hyperparameter setting (best to worst)')
plt.ylabel('cumulative reward')
plt.savefig('hparam_sensitivity.pdf', bbox_inches='tight',pad_inches = 0)
if __name__ == "__main__":
main()
| 2,301
| 28.139241
| 88
|
py
|
rnlps
|
rnlps-master/rnlps/scripts/run.py
|
"""
Runs a single experiment for a particular configuration of bandit and
policy settings. Saves results in trial.csv.
"""
import os
import argparse
import json
import numpy as np
import pandas as pd
from rnlps.environments.non_contextual_bandits import non_contextual_bandits
from rnlps.environments.contextual_bandits import contextual_bandits
from rnlps.environments.linear_bandits import linear_bandits
from rnlps.policies.non_contextual_policies import non_contextual_policies
from rnlps.policies.contextual_policies import contextual_policies
from rnlps.policies.contextual_linear_policies import contextual_linear_policies
def main():
""" Example configuration file (also see the output of hgrid.py):
{
"bandit": "StationaryBernoulliBandit",
"bandit_parameters": {"means": [0.25, 0.5, 0.75], "seed": 0},
"policy": "ThompsonSamplingBernoulli",
"policy_parameters": {"a": 1.0, "b": 1.0, "seed": 1},
"trial_length": 100
}
"""
parser = argparse.ArgumentParser()
parser.add_argument('directory', help='Experiment directory.')
args = parser.parse_args()
f = open(os.path.join(args.directory, 'config.json'), 'r')
config = json.load(f)
f.close()
if config['bandit'] in non_contextual_bandits.keys():
bandit = non_contextual_bandits[config['bandit']](**config['bandit_parameters'])
policy = non_contextual_policies[config['policy']](bandit, **config['policy_parameters'])
elif config['bandit'] in contextual_bandits.keys():
bandit = contextual_bandits[config['bandit']](**config['bandit_parameters'])
policy = contextual_policies[config['policy']](bandit, **config['policy_parameters'])
else:
bandit = linear_bandits[config['bandit']](**config['bandit_parameters'])
policy = contextual_linear_policies[config['policy']](bandit, **config['policy_parameters'])
trial_length = config['trial_length']
trial = policy.interact(trial_length)
df = pd.DataFrame({'Pull': np.arange(trial_length) + 1,
'Return': trial.cumulative_rewards(),
'Arm_Pulled': trial.arms,
'Regret': trial.cumulative_regret(),
'Policy': repr(policy)})
df.to_csv(os.path.join(args.directory, 'trial.csv'), index=False)
if __name__ == '__main__':
main()
| 2,371
| 34.939394
| 100
|
py
|
rnlps
|
rnlps-master/rnlps/scripts/create_summary.py
|
"""
Creates 2 csv files summarising the performance of all the policies
on an experiment.
analysis.csv - return (cumulative reward) of every run from each individual
experiment.
policy_mean_perf.csv - aggregates performance across random seeds to
provide the mean and standard deviation of the return.
"""
import os
import argparse
import pandas as pd
def main():
parser = argparse.ArgumentParser()
parser.add_argument('directory', help='Experiments directory.')
args = parser.parse_args()
currd = os.getcwd()
os.chdir(args.directory)
folders = os.listdir()
folders = [f for f in folders if f.isdigit()]
d2 = pd.DataFrame(columns=['Policy','Pull','Return'])
for f in folders:
exists = os.path.isfile(f + "/trial.csv")
if exists:
try:
df = pd.read_csv(f + "/trial.csv")
df = df[['Policy','Pull','Return']]
r = df.iloc[-1,:]
r['folder'] = f
d2 = d2.append(r)
except Exception as e:
print(e)
print("Folder does not have trial.csv: \n")
print(f)
d2 = d2.sort_values(by='Return', ascending = False)
d2 = d2[['Policy', 'Pull', 'Return', 'folder']]
aggregations = {'Return':{'Mean_Return': 'mean', 'Std_Return': 'std',
'Num_experiments': 'count'}}
d3 = d2.groupby('Policy').agg(aggregations)
d3.columns = d3.columns.droplevel()
d3 = d3.sort_values(by='Mean_Return', ascending=False)
# Save csv file in the 'Experiments directory' which was given as an argument.
d2.to_csv("analysis.csv", index=None)
d3.to_csv("policy_mean_perf.csv")
os.chdir(currd)
if __name__ == '__main__':
main()
| 1,784
| 27.790323
| 82
|
py
|
rnlps
|
rnlps-master/rnlps/scripts/regret_analysis.py
|
"""
Generates the regret plot for an experiment. Includes the regret curves for
the random policy, conventional algorithms like SW-UCB, and the default and
best neural bandits.
Usage:
$ python3 regret_analysis.py experiment_folder/
With no additional flag this takes the 'best' rnn and ffnn policies from the
given_best_rnn_policy and given_best_ffnn_policy variables in this script.
$ python3 regret_analysis.py experiment_folder/ --computebest
Chooses the (R)NN policy with the highest return in this folder as 'best'
for the plot.
$ python3 regret_analysis.py experiment_folder/ --nondefaultasbest
Assumes that the other non-default configuration in this folder is the
'best'.
"""
import matplotlib
matplotlib.use('Agg')
import os
import argparse
import json
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from rnlps.environments.non_contextual_bandits import non_contextual_bandits
from rnlps.environments.contextual_bandits import contextual_bandits
from rnlps.environments.linear_bandits import linear_bandits
sns.set(context='paper', style='darkgrid', font_scale=3, rc={'legend.frameon':False, 'lines.linewidth':6.0})
# Modify these if using different defaults
contextual_default_ffnn = "ThompsonSinFeedforwardNetwork(order=1,periods=[],periods_dim=2n_units=[32, 32, 32],learning_rate=0.01,regL2=0.001,epochs=64,train_every=32,std_targets=0.1,std_weights=1.0)"
contextual_default_rnn = "ThompsonRecurrentNetwork(n_units=[32,32,32],learning_rate=0.001,epochs=64,train_every=32,std_targets=0.3,std_weights=0.5,regL2=0.001)"
non_contextual_default_ffnn = "ThompsonFeedforwardNetwork(order=1,periods=[],periods_dim=1n_units=[32,32,32],learning_rate=0.1,regL2=0.001,epochs=16,train_every=32,std_targets=0.1,std_weights=1.0)"
non_contextual_default_rnn = "ThompsonRecurrentNetwork(n_units=[32,32,32],learning_rate=0.01,regL2=0.001,epochs=16,train_every=32,std_targets=0.1,std_weights=0.5)"
given_best_ffnn_policy = ''
given_best_rnn_policy = ''
def get_empirical_regret(frame, bandit, trial_length):
ecr = bandit.expected_cumulative_rewards(trial_length)
return ecr - frame['Return']
def main():
# Policies to be considered in the plot, more will be appended later.
list_policies = ['Random()']
parser = argparse.ArgumentParser()
parser.add_argument('directory', help='Experiments directory.')
parser.add_argument('--computebest', help='Find the best (R)NN conifguration',
action='store_true')
parser.add_argument('--nondefaultasbest', help='Assumes the config other than default is the best',
action='store_true')
args = parser.parse_args()
dirs = [os.path.join(args.directory, d)
for d in os.listdir(args.directory) if not d.startswith('_')]
dirs = [d for d in dirs if os.path.isdir(d)]
configs = {}
for d in dirs:
f = open(os.path.join(d, 'config.json'), 'r')
configs[d] = json.load(f)
f.close()
reference = configs[dirs[0]]
# Consistency check
bandit_settings = ['bandit', 'bandit_parameters','trial_length']
for d, config in configs.items():
for p in bandit_settings:
if config[p] != reference[p]:
if p == "trial_length":
print("\nThe trial length is different\n")
else:
print("\nThe difference is in ", p)
print("Current config: \n")
print(config[p])
print("Reference config: \n")
print(reference[p])
if reference['bandit'] in non_contextual_bandits.keys():
default_ffnn_policy = non_contextual_default_ffnn
default_rnn_policy = non_contextual_default_rnn
elif reference['bandit'] in contextual_bandits.keys():
default_ffnn_policy = contextual_default_ffnn
default_rnn_policy = contextual_default_rnn
else:
default_ffnn_policy = contextual_default_ffnn
default_rnn_policy = contextual_default_rnn
default_rnn_policy = default_rnn_policy.replace(' ', '')
default_ffnn_policy = default_ffnn_policy.replace(' ','')
list_policies.append(default_ffnn_policy)
list_policies.append(default_rnn_policy)
df = pd.DataFrame()
for d in dirs:
if os.path.exists(os.path.join(d, 'trial.csv')):
frame = pd.read_csv(os.path.join(d, 'trial.csv'))
frame.Policy = frame.Policy.str.replace(' ', '')
frame = frame.sort_values(by='Pull')
# Currently, we report the empirical regret for the non-contextual and contextual bandit problems.
# This will be made consistent in the next version of the paper.
if reference['bandit'] in linear_bandits.keys():
frame['Regret'] = frame['Regret']
elif reference['bandit'] in non_contextual_bandits.keys():
bandit = non_contextual_bandits[reference['bandit']](**reference['bandit_parameters'])
frame['Regret'] = get_empirical_regret(frame, bandit, reference['trial_length'])
elif reference['bandit'] in contextual_bandits.keys():
bandit = contextual_bandits[reference['bandit']](**reference['bandit_parameters'])
frame['Regret'] = get_empirical_regret(frame, bandit, reference['trial_length'])
df = df.append(frame)
else:
msg = 'Warning: missing trial {0} for {1}({2}).'
print(msg.format(d, configs[d]['policy'],
configs[d]['policy_parameters']))
last = df[df['Pull'] == reference['trial_length']]
last = last.loc[:, ['Return', 'Policy']]
p_group = last.groupby('Policy').mean().sort_values(by='Return',
ascending=False)
with pd.option_context('display.max_colwidth', -1):
print(p_group)
# Add the best conventional bandit algorithms to the list of policies if they
# are present.
if reference['bandit'] in linear_bandits.keys():
if p_group.index.str.contains('D_LinUCB').any():
D_LinUCB_results = p_group[p_group.index.str.contains('D_LinUCB')]
best_D_LinUCB_policy = D_LinUCB_results.Return.idxmax()
list_policies.append(best_D_LinUCB_policy)
if p_group.index.str.contains('SW_LinUCB').any():
SW_LinUCB_results = p_group[p_group.index.str.contains('SW_LinUCB')]
best_SW_LinUCB_policy = SW_LinUCB_results.Return.idxmax()
list_policies.append(best_SW_LinUCB_policy)
elif reference['bandit'] in non_contextual_bandits.keys():
if p_group.index.str.contains('D_UCB').any():
D_UCB_results = p_group[p_group.index.str.contains('D_UCB')]
best_D_UCB_policy = D_UCB_results.Return.idxmax()
list_policies.append(best_D_UCB_policy)
if p_group.index.str.contains('SW_UCB').any():
SW_UCB_results = p_group[p_group.index.str.contains('SW_UCB')]
best_SW_UCB_policy = SW_UCB_results.Return.idxmax()
list_policies.append(best_SW_UCB_policy)
# Add the best neural policies
ffnn_policies = p_group[p_group.index.str.contains('FeedforwardNetwork')]
rnn_policies = p_group[p_group.index.str.contains('RecurrentNetwork')]
if args.computebest:
best_ffnn_policy = ffnn_policies.Return.idxmax()
best_rnn_policy = rnn_policies.Return.idxmax()
elif args.nondefaultasbest:
# Works when we have run the experiment only with default and optionally
# another policy that was the best during the hyperparameter search.
# Eg. if there is only one RNN default policy, then we assume there is
# no separate best RNN policy
if ((len(ffnn_policies) > 2) or (len(rnn_policies) > 2)):
raise Exception('More than 2 (R)NN policies. Ambigous which non-default policy is best.')
best_ffnn_policy = ffnn_policies.index[ffnn_policies.index != default_ffnn_policy]
best_rnn_policy = rnn_policies.index[rnn_policies.index != default_rnn_policy]
if len(best_ffnn_policy) > 0:
best_ffnn_policy = best_ffnn_policy.values[0]
else:
best_ffnn_policy = default_ffnn_policy
if len(best_rnn_policy) > 0:
best_rnn_policy = best_rnn_policy.values[0]
else:
best_rnn_policy = default_rnn_policy
else:
# Assumes best_ffnn_policy and best_rnn_policy and provided along with
# the default configurations
best_ffnn_policy = given_best_ffnn_policy
best_rnn_policy = given_best_rnn_policy
list_policies.append(best_ffnn_policy)
list_policies.append(best_rnn_policy)
# Set the colour palette - keep consistent colours
c_list = sns.color_palette()
c_palette = {'Random' : c_list[7],
'Best RNN': c_list[0],
'Best NN': c_list[1],
'Default RNN': c_list[2],
'Default NN': c_list[3],
'SW-UCB':c_list[4],
'D-UCB': c_list[5]}
if reference['bandit'] in linear_bandits.keys():
c_palette = {'Random' : c_list[7],
'Best RNN': c_list[0],
'Best NN': c_list[1],
'Default RNN': c_list[2],
'Default NN': c_list[3],
'SW-LinUCB':c_list[4],
'D-LinUCB': c_list[5]}
plot_df = df[df['Policy'].isin(list_policies)]
# Will store the name to be used in the legend
plot_df['Policy_newnames'] = ''
plot_df.loc[plot_df.Policy == best_rnn_policy, 'Policy_newnames'] = 'Best RNN'
plot_df.loc[plot_df.Policy == best_ffnn_policy, 'Policy_newnames'] = 'Best NN'
plot_df.loc[plot_df.Policy.str.contains('Random'), 'Policy_newnames'] = 'Random'
plot_df.loc[plot_df.Policy.str.contains('D_UCB'), 'Policy_newnames'] = 'D-UCB'
plot_df.loc[plot_df.Policy.str.contains('SW_UCB'), 'Policy_newnames'] = 'SW-UCB'
plot_df.loc[plot_df.Policy.str.contains('D_LinUCB'), 'Policy_newnames'] = 'D-LinUCB'
plot_df.loc[plot_df.Policy.str.contains('SW_LinUCB'), 'Policy_newnames'] = 'SW-LinUCB'
plot_df.loc[plot_df.Policy == default_rnn_policy, 'Policy_newnames'] = 'Default RNN'
plot_df.loc[plot_df.Policy == default_ffnn_policy, 'Policy_newnames'] = 'Default NN'
plot_df = plot_df.sort_values(by='Policy_newnames')
del plot_df['Policy']
plot_df.rename(columns = {'Policy_newnames':'Policy'}, inplace=True)
# Plot the regret
plt.figure(figsize=(16,9))
plot_df = plot_df[plot_df.Pull < int(reference['trial_length'])]
# add ci = "sd" for faster standard deviation confidence bounds, imstead of
# a bootstrapped estimate
ax = sns.lineplot(x='Pull', y='Regret', hue='Policy', palette = c_palette,
data=plot_df, linewidth=3.0)
plt.xlim(1, int(reference['trial_length']))
plt.xticks(range(0, int(reference['trial_length']) + 2, 1024))
plt.xlabel('time step')
plt.ylabel('regret')
# Display legend in this particular order
handles, labels = plt.gca().get_legend_handles_labels()
order = list(range(1, len(handles)))
plt.legend([handles[idx] for idx in order],[labels[idx] for idx in order])
plt.savefig('regret.pdf', bbox_inches='tight',pad_inches = 0)
if __name__ == "__main__":
main()
| 11,381
| 38.520833
| 199
|
py
|
rnlps
|
rnlps-master/rnlps/scripts/__init__.py
| 0
| 0
| 0
|
py
|
|
rnlps
|
rnlps-master/rnlps/scripts/multirun.py
|
"""
Runs multiple jobs in parallel in a tmux session.
"""
import os
import time
import datetime
import argparse
def main():
if 'TMUX' not in os.environ:
raise Exception('This script should be called from a tmux session.')
parser = argparse.ArgumentParser()
parser.add_argument('directory')
parser.add_argument('jobs', type=int)
args = parser.parse_args()
dirs = [os.path.join(args.directory, d)
for d in os.listdir(args.directory) if not d.startswith('_')]
dirs = [d for d in dirs if os.path.isdir(d)]
cmds = ['python3 rnlps/scripts/run.py {0}'.format(d) for d in dirs]
tokens = []
for j in range(args.jobs):
tokens.append(os.path.join(args.directory, '.token_{0}'.format(j)))
os.system('touch {0}'.format(tokens[-1]))
start_time = time.time()
for j, cmd in enumerate(cmds, 1):
free_tokens = []
while len(free_tokens) < 1:
free_tokens = list(filter(os.path.exists, tokens))
time.sleep(1)
os.remove(free_tokens[0])
os.system('tmux new-window -d -t {0}'.format(j))
os.system('tmux send-keys -t {0} \"{1}\" C-m'.format(j, cmd))
touch = 'touch {0}'.format(free_tokens[0])
os.system('tmux send-keys -t {0} \"{1}\" C-m'.format(j, touch))
os.system('tmux send-keys -t {0} \"exit\" C-m'.format(j))
print('Running command {0}/{1}.'.format(j, len(cmds)))
free_tokens = []
while len(free_tokens) < args.jobs:
free_tokens = list(filter(os.path.exists, tokens))
time.sleep(1)
for token in tokens:
os.remove(token)
elapsed = datetime.timedelta(seconds=time.time() - start_time)
print('Success. Time elapsed: {0}.'.format(elapsed))
if __name__ == "__main__":
main()
| 1,803
| 26.333333
| 76
|
py
|
rnlps
|
rnlps-master/rnlps/environments/contextual_bandits.py
|
"""
Contextual bandit environments to evaluate performance.
"""
import numpy as np
import os
class StationaryContextualBandit:
def __init__(self, dataset, seed, err_sigma = 0.05):
# Can also be used for real-world non-stationary problems
# as it doesn't shuffle the data.
self.random_state = np.random.RandomState(seed)
if os.path.isdir("rnlps/datasets/" + dataset):
self.X = np.load("rnlps/datasets/" + dataset + "/X.npy")
self.targets = np.load("rnlps/datasets/" + dataset + "/y.npy")
else :
raise Exception("Dataset does not exist. Check the path.")
self.n_arms = len(np.unique(self.targets))
self.step = 0
self.context_dims = np.shape(self.X)[-1]
self.err_sigma = err_sigma
def reset(self):
self.step = 0
return self.X[self.step]
def pull(self, arm):
if (arm >= self.n_arms) or (arm < 0):
raise Exception('Invalid arm.')
reward = 0.0
regret = 1.0
if arm == self.targets[self.step]:
reward = 1.0
regret = 0.0
assert (reward + regret) == 1
self.step += 1
context = self.X[self.step]
reward = reward + self.random_state.normal(0, self.err_sigma)
return reward, context, regret
def best_arms(self):
return [self.targets[self.step]]
def expected_cumulative_rewards(self, trial_length):
return np.cumsum(np.ones(trial_length))
def __repr__(self):
r = 'StationaryContextualBandit(n_arms={0}, X_dims={1})'
return r.format(self.n_arms, np.shape(self.X))
class FlippingContextualBandit:
def __init__(self, dataset, half_period, seed, err_sigma = 0.05):
self.random_state = np.random.RandomState(seed)
if os.path.isdir("rnlps/datasets/" + dataset):
self.X = np.load("rnlps/datasets/" + dataset + "/X.npy")
self.targets = np.load("rnlps/datasets/" + dataset + "/y.npy")
else :
raise Exception("Dataset does not exist. Check the path.")
self.n_arms = len(np.unique(self.targets))
self.step = 0
self.half_period = half_period
self.context_dims = np.shape(self.X)[-1]
self.flipped = 0
self.err_sigma = err_sigma
def reset(self):
self.step = 0
return self.X[self.step]
def pull(self, arm):
if (arm >= self.n_arms) or (arm < 0):
raise Exception('Invalid arm.')
if (arm == self.targets[self.step]) & (self.flipped == 0):
reward = 1.0
regret = 0.0
elif (arm == ((self.n_arms - 1 - self.targets[self.step]) % self.n_arms) & self.flipped == 1):
reward = 1.0
regret = 0.0
else:
reward = 0.0
regret = 1.0
assert (reward + regret) == 1
self.step += 1
context = self.X[self.step]
if self.step % self.half_period == 0:
self.flipped = (self.flipped + 1) % 2
reward = reward + self.random_state.normal(0, self.err_sigma)
return reward, context, regret
def best_arms(self):
best = self.targets[self.step]
if self.flipped:
best = (self.n_arms - 1 - self.targets[self.step]) % self.n_arms
return [best]
def expected_cumulative_rewards(self, trial_length):
return np.cumsum(np.ones(trial_length))
def __repr__(self):
r = 'FlippingContextualBandit(n_arms={0}, X_dims={1}, half_period={2})'
return r.format(self.n_arms, np.shape(self.X), self.half_period)
contextual_bandits = {'StationaryContextualBandit': StationaryContextualBandit,
'FlippingContextualBandit': FlippingContextualBandit}
| 3,790
| 27.503759
| 102
|
py
|
rnlps
|
rnlps-master/rnlps/environments/non_contextual_bandits.py
|
"""
Non-contextual bandit environments to evaluate performance.
"""
import numpy as np
class StationaryBernoulliBandit:
def __init__(self, means, seed):
self.means = np.array(means)
self.random_state = np.random.RandomState(seed)
if (max(self.means) > 1.) or (min(self.means) < 0.):
raise Exception('Invalid parameters.')
self.n_arms = len(self.means)
def pull(self, arm):
if (arm >= self.n_arms) or (arm < 0):
raise Exception('Invalid arm.')
regret = np.max(self.means) - self.means[arm]
return float(self.random_state.binomial(1, self.means[arm])), None, regret
def expected_cumulative_rewards(self, trial_length):
return np.cumsum(np.full(trial_length, max(self.means)))
def best_arms(self):
m = max(self.means)
return [a for a in range(self.n_arms) if np.allclose(self.means[a], m)]
def __repr__(self):
return 'StationaryBernoulliBandit(means={0})'.format(repr(self.means))
class FlippingBernoulliBandit:
def __init__(self, means, half_period, seed):
self.means = self._means = np.array(means)
self.half_period = half_period
self.random_state = np.random.RandomState(seed)
if (max(self.means) > 1.) or (min(self.means) < 0.):
raise Exception('Invalid parameters.')
self.n_arms = len(self.means)
self.step = 0
def pull(self, arm):
if (arm >= self.n_arms) or (arm < 0):
raise Exception('Invalid arm.')
reward = float(self.random_state.binomial(1, self.means[arm]))
regret = np.max(self.means) - self.means[arm]
self.step += 1
# flip means if step is a multiple of half_period
if (self.step % self.half_period) == 0:
self.means = 1 - self.means
return reward, None, regret
def expected_cumulative_rewards(self, trial_length):
a, b = np.max(self._means), np.max(1. - self._means)
er = np.array([a]*self.half_period + [b]*self.half_period)
er = np.tile(er, trial_length//len(er) + 1)
return np.cumsum(er[0: trial_length])
def best_arms(self):
m = max(self.means)
return [a for a in range(self.n_arms) if np.allclose(self.means[a], m)]
def __repr__(self):
r = 'FlippingBernoulliBandit(means={0}, half_period={1})'
return r.format(repr(self._means), self.half_period)
class SinusoidalBernoulliBandit:
def __init__(self, n_arms, step_size, seed):
self.n_arms = n_arms
self.step_size = step_size
self.random_state = np.random.RandomState(seed)
self.pos = 0
self.offsets = np.array([(2*np.pi*i)/self.n_arms for i in range(self.n_arms)])
def pull(self, arm):
if (arm >= self.n_arms) or (arm < 0):
raise Exception('Invalid arm.')
p = (1. + np.sin(self.pos + self.offsets))/2.
reward = float(self.random_state.binomial(1, p[arm]))
regret = np.max(p) - p[arm]
self.pos += self.step_size
return reward, None, regret
def expected_cumulative_rewards(self, trial_length):
best_rewards = []
pos = 0
offsets = np.array([(2*np.pi*i)/self.n_arms for i in range(self.n_arms)])
for i in range(trial_length):
p = (1. + np.sin(pos + offsets))/2.
best_rewards.append(np.max(p))
pos += self.step_size
return np.cumsum(best_rewards)
def best_arms(self):
p = np.zeros(self.n_arms)
for arm in range(self.n_arms):
p[arm] = (1. + np.sin(self.pos + self.offsets[arm]))/2.
m = max(p)
return [a for a in range(self.n_arms) if np.allclose(p[a], m)]
def __repr__(self):
r = 'SinusoidalBernoulliBandit(n_arms={0}, step_size={1})'
return r.format(self.n_arms, self.step_size)
class StationaryGaussianBandit:
def __init__(self, means, std, seed):
self.means = np.array(means)
self.std = float(std)
self.random_state = np.random.RandomState(seed)
self.n_arms = len(self.means)
def pull(self, arm):
if (arm >= self.n_arms) or (arm < 0):
raise Exception('Invalid arm.')
reward = self.random_state.normal(self.means[arm], self.std)
regret = np.max(self.means) - self.means[arm]
return reward, None, regret
def expected_cumulative_rewards(self, trial_length):
return np.cumsum(np.full(trial_length, max(self.means)))
def best_arms(self):
m = max(self.means)
return [a for a in range(self.n_arms) if np.allclose(self.means[a], m)]
def __repr__(self):
r = 'StationaryGaussianBandit(means={0}, std={1})'
return r.format(repr(self.means), self.std)
class FlippingGaussianBandit:
def __init__(self, means, std, half_period, seed):
self.means = self._means = np.array(means)
self.std = float(std)
self.half_period = half_period
self.random_state = np.random.RandomState(seed)
self.n_arms = len(self.means)
self.step = 0
def pull(self, arm):
if (arm >= self.n_arms) or (arm < 0):
raise Exception('Invalid arm.')
reward = self.random_state.normal(self.means[arm], self.std)
regret = np.max(self.means) - self.means[arm]
self.step += 1
# flip means if step is a multiple of half_period
if (self.step % self.half_period) == 0:
self.means = -self.means
return reward, None, regret
def expected_cumulative_rewards(self, trial_length):
a, b = np.max(self._means), np.max(-self._means)
er = np.array([a]*self.half_period + [b]*self.half_period)
er = np.tile(er, trial_length//len(er) + 1)
return np.cumsum(er[0: trial_length])
def best_arms(self):
m = max(self.means)
return [a for a in range(self.n_arms) if np.allclose(self.means[a], m)]
def __repr__(self):
r = 'FlippingGaussianBandit(means={0}, std={1}, half_period={2})'
return r.format(repr(self._means), self.std, self.half_period)
class SinusoidalGaussianBandit:
def __init__(self, n_arms, std, step_size, seed):
self.n_arms = n_arms
self.std = float(std)
self.step_size = step_size
self.random_state = np.random.RandomState(seed)
self.pos = 0
self.offsets = np.array([(2*np.pi*i)/self.n_arms for i in range(self.n_arms)])
def pull(self, arm):
if (arm >= self.n_arms) or (arm < 0):
raise Exception('Invalid arm.')
v = np.sin(self.pos + self.offsets)
reward = self.random_state.normal(v[arm], self.std)
regret = max(v) - v[arm]
self.pos += self.step_size
return reward, None, regret
def expected_cumulative_rewards(self, trial_length):
raise NotImplementedError()
def best_arms(self):
v = np.zeros(self.n_arms)
for arm in range(self.n_arms):
v[arm] = np.sin(self.pos + self.offsets[arm])
m = max(v)
return [a for a in range(self.n_arms) if np.allclose(v[a], m)]
def __repr__(self):
r = 'SinusoidalGaussianBandit(n_arms={0}, std={1}, step_size={2})'
return r.format(self.n_arms, self.std, self.step_size)
class GaussianMarkovChainBandit:
""" Most rewarding arm with expected reward = best_mean. All other arms
return an expected reward = other_mean. When the most rewarding arm is
pulled, it transitions according to the transition matrix (tmatrix)
probabilities. """
def __init__(self, tmatrix, best_mean, other_mean, std, seed):
self.tmatrix = np.array(tmatrix)
self.random_state = np.random.RandomState(seed)
if self.tmatrix.shape[0] != self.tmatrix.shape[1]:
raise Exception('Invalid matrix dimensions.')
if not np.allclose(self.tmatrix.sum(axis=1), 1.):
raise Exception('Invalid transition probabilities.')
self.n_arms = self.tmatrix.shape[0]
self.state = self.random_state.choice(self.n_arms)
self.best_mean = best_mean
self.other_mean = other_mean
self.std = std
def pull(self, arm):
if (arm >= self.n_arms) or (arm < 0):
raise Exception('Invalid arm.')
if (arm == self.state):
p = self.tmatrix[self.state]
self.state = self.random_state.choice(self.n_arms, p=p)
reward_mu = self.best_mean
else:
reward_mu = self.other_mean
reward = self.random_state.normal(reward_mu, self.std)
regret = self.best_mean - reward_mu
return reward, None, regret
def expected_cumulative_rewards(self, trial_length):
return np.cumsum(self.best_mean * np.ones(trial_length))
def best_arms(self):
return [self.state]
def __repr__(self):
r = 'GaussianMarkovChainBandit(tmatrix={0}, best_mean={1}, other_mean={2}, std={3})'
return r.format(repr(self.tmatrix), self.best_mean, self.other_mean, self.std)
class GaussianCircularChainBandit(GaussianMarkovChainBandit):
def __init__(self, n_arms, best_mean, other_mean, std, p_left, seed):
if n_arms < 3:
raise Exception('Invalid number of states.')
tmatrix = np.zeros((n_arms, n_arms))
for j in range(n_arms):
tmatrix[j, j - 1] = p_left
tmatrix[j, (j + 1) % n_arms] = (1 - p_left)
GaussianMarkovChainBandit.__init__(self, tmatrix, best_mean, other_mean, std, seed)
non_contextual_bandits = {'StationaryBernoulliBandit': StationaryBernoulliBandit,
'FlippingBernoulliBandit': FlippingBernoulliBandit,
'SinusoidalBernoulliBandit': SinusoidalBernoulliBandit,
'StationaryGaussianBandit': StationaryGaussianBandit,
'FlippingGaussianBandit': FlippingGaussianBandit,
'SinusoidalGaussianBandit': SinusoidalGaussianBandit,
'GaussianMarkovChainBandit': GaussianMarkovChainBandit,
'GaussianCircularChainBandit': GaussianCircularChainBandit}
| 10,137
| 32.458746
| 92
|
py
|
rnlps
|
rnlps-master/rnlps/environments/__init__.py
| 0
| 0
| 0
|
py
|
|
rnlps
|
rnlps-master/rnlps/environments/linear_bandits.py
|
"""
Linear bandit environments to evaluate performance.
"""
import numpy as np
import os
class StationaryLinearBandit:
def __init__(self, n_arms, dimension, seed, arm_pool_size = 2000, err_sigma = 0.05):
self.n_arms = n_arms
self.dimension = dimension
self.arm_pool_size = arm_pool_size
self.err_sigma = err_sigma
self.random_state = np.random.RandomState(seed)
self.theta_star = self.generate_theta_star()
self.arm_pool = self.generate_arm_pool()
self.current_arms = []
self.step = 0
def generate_theta_star(self):
theta_star_unnormalized = self.random_state.uniform(low = -1, high = 1, size = (self.dimension,))
return theta_star_unnormalized/np.linalg.norm(theta_star_unnormalized)
def generate_arm_pool(self):
arm_pool_unnormalized = self.random_state.uniform(low = -1, high = 1, size = (self.arm_pool_size, self.dimension))
return arm_pool_unnormalized/np.linalg.norm(arm_pool_unnormalized, keepdims = True, axis = 1)
def sample_arms(self):
indices = self.random_state.choice(self.arm_pool_size, size = self.n_arms, replace = False)
return self.arm_pool[indices]
def reset(self):
self.step = 0
arms_context = self.sample_arms()
self.current_arms = arms_context
return arms_context
def pull(self, arm):
if (arm >= self.n_arms) or (arm < 0):
raise Exception('Invalid arm.')
expected_reward = np.dot(self.current_arms[arm], self.theta_star)
best_arm = self.best_arms()
regret = np.dot(self.current_arms[best_arm[0]] , self.theta_star) - expected_reward
reward = expected_reward + self.random_state.normal(0, self.err_sigma)
self.step += 1
context = self.sample_arms()
self.current_arms = context
return reward, context, regret
def best_arms(self):
means = np.dot(self.current_arms, self.theta_star)
return [np.argmax(means)]
def expected_cumulative_rewards(self, trial_length):
raise NotImplementedError
def __repr__(self):
r = 'StationaryLinearBandit(n_arms={0}, dimension={1}, arm_pool_size={2})'
return r.format(self.n_arms, self.dimension, self.arm_pool_size)
class FlippingLinearBandit:
def __init__(self, n_arms, dimension, half_period, seed, arm_pool_size = 2000, err_sigma = 0.05):
self.n_arms = n_arms
self.dimension = dimension
self.arm_pool_size = arm_pool_size
self.err_sigma = err_sigma
self.half_period = half_period
self.random_state = np.random.RandomState(seed)
self.theta_star = self.generate_theta_star()
self.arm_pool = self.generate_arm_pool()
self.current_arms = []
self.step = 0
def generate_theta_star(self):
theta_star_unnormalized = self.random_state.uniform(low = -1, high = 1, size = (self.dimension,))
return theta_star_unnormalized/np.linalg.norm(theta_star_unnormalized)
def generate_arm_pool(self):
arm_pool_unnormalized = self.random_state.uniform(low = -1, high = 1, size = (self.arm_pool_size, self.dimension))
return arm_pool_unnormalized/np.linalg.norm(arm_pool_unnormalized, keepdims = True, axis = 1)
def sample_arms(self):
indices = self.random_state.choice(self.arm_pool_size, size = self.n_arms, replace = False)
return self.arm_pool[indices]
def reset(self):
self.step = 0
arms_context = self.sample_arms()
self.current_arms = arms_context
return arms_context
def pull(self, arm):
if (arm >= self.n_arms) or (arm < 0):
raise Exception('Invalid arm.')
expected_reward = np.dot(self.current_arms[arm], self.theta_star)
best_arm = self.best_arms()
regret = np.dot(self.current_arms[best_arm[0]] , self.theta_star) - expected_reward
reward = expected_reward + self.random_state.normal(0, self.err_sigma)
self.step += 1
context = self.sample_arms()
# Update theta_star
if self.step % self.half_period == 0:
self.theta_star = -1 * self.theta_star
self.current_arms = context
return reward, context, regret
def best_arms(self):
means = np.dot(self.current_arms, self.theta_star)
return [np.argmax(means)]
def expected_cumulative_rewards(self, trial_length):
raise NotImplementedError
def __repr__(self):
r = 'FlippingLinearBandit(n_arms={0}, dimension={1}, half_period={2}, arm_pool_size={3})'
return r.format(self.n_arms, self.dimension, self.half_period, self.arm_pool_size)
class RotatingLinearBandit2d:
def __init__(self, n_arms, time_period, seed, arm_pool_size = 2000, err_sigma = 0.05):
self.n_arms = n_arms
self.dimension = 2
self.arm_pool_size = arm_pool_size
self.err_sigma = err_sigma
self.time_period = time_period
self.random_state = np.random.RandomState(seed)
self.arm_pool = self.generate_arm_pool()
self.current_arms = []
self.step = 0
self.theta_star = np.array([np.cos(2 * np.pi * self.step/self.time_period), np.sin(2 * np.pi * self.step/self.time_period)])
def generate_arm_pool(self):
arm_pool_unnormalized = self.random_state.uniform(low = -1, high = 1, size = (self.arm_pool_size, self.dimension))
return arm_pool_unnormalized/np.linalg.norm(arm_pool_unnormalized, keepdims = True, axis = 1)
def sample_arms(self):
indices = self.random_state.choice(self.arm_pool_size, size = self.n_arms, replace = False)
return self.arm_pool[indices]
def reset(self):
self.step = 0
arms_context = self.sample_arms()
self.current_arms = arms_context
return arms_context
def pull(self, arm):
if (arm >= self.n_arms) or (arm < 0):
raise Exception('Invalid arm.')
expected_reward = np.dot(self.current_arms[arm], self.theta_star)
best_arm = self.best_arms()
regret = np.dot(self.current_arms[best_arm[0]] , self.theta_star) - expected_reward
reward = expected_reward + self.random_state.normal(0, self.err_sigma)
self.step += 1
context = self.sample_arms()
# Update theta_star
self.theta_star = np.array([np.cos(2 * np.pi * self.step/self.time_period), np.sin(2 * np.pi * self.step/self.time_period)])
self.current_arms = context
return reward, context, regret
def best_arms(self):
means = np.dot(self.current_arms, self.theta_star)
return [np.argmax(means)]
def expected_cumulative_rewards(self, trial_length):
raise NotImplementedError
def __repr__(self):
r = 'RotatingLinearBandit2d(n_arms={0}, dimension={1}, time_period={2}, arm_pool_size={3})'
return r.format(self.n_arms, self.dimension, self.half_period, self.arm_pool_size)
linear_bandits = {'StationaryLinearBandit': StationaryLinearBandit,
'FlippingLinearBandit': FlippingLinearBandit,
'RotatingLinearBandit2d' : RotatingLinearBandit2d}
| 7,215
| 32.719626
| 132
|
py
|
DAS
|
DAS-master/code/my_layers.py
|
import keras.backend as K
from keras.engine.topology import Layer
from keras.layers.convolutional import Conv1D
from keras import initializers
from keras import regularizers
from keras import constraints
import tensorflow as tf
import numpy as np
################################################################################
# Quadratic-time MMD with Gaussian RBF
def _mix_rbf_kernel(X, Y, sigmas=[1.], wts=None):
if wts is None:
wts = [1] * len(sigmas)
XX = tf.matmul(X, X, transpose_b=True)
XY = tf.matmul(X, Y, transpose_b=True)
YY = tf.matmul(Y, Y, transpose_b=True)
X_sqnorms = tf.diag_part(XX)
Y_sqnorms = tf.diag_part(YY)
r = lambda x: tf.expand_dims(x, 0)
c = lambda x: tf.expand_dims(x, 1)
K_XX, K_XY, K_YY = 0, 0, 0
for sigma, wt in zip(sigmas, wts):
gamma = 1 / (2 * sigma**2)
K_XX += wt * tf.exp(-gamma * (-2 * XX + c(X_sqnorms) + r(X_sqnorms)))
K_XY += wt * tf.exp(-gamma * (-2 * XY + c(X_sqnorms) + r(Y_sqnorms)))
K_YY += wt * tf.exp(-gamma * (-2 * YY + c(Y_sqnorms) + r(Y_sqnorms)))
return K_XX, K_XY, K_YY, tf.reduce_sum(wts)
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = tf.cast(tf.shape(K_XX)[0], tf.float32)
n = tf.cast(tf.shape(K_YY)[0], tf.float32)
if biased:
mmd2 = (tf.reduce_sum(K_XX, keep_dims=True) / (m * m)
+ tf.reduce_sum(K_YY, keep_dims=True) / (n * n)
- 2 * tf.reduce_sum(K_XY, keep_dims=True) / (m * n))
else:
if const_diagonal is not False:
trace_X = m * const_diagonal
trace_Y = n * const_diagonal
else:
trace_X = tf.trace(K_XX)
trace_Y = tf.trace(K_YY)
mmd2 = ((tf.reduce_sum(K_XX) - trace_X) / (m * (m - 1))
+ (tf.reduce_sum(K_YY) - trace_Y) / (n * (n - 1))
- 2 * tf.reduce_sum(K_XY) / (m * n))
return mmd2
def mix_rbf_mmd2(X, Y, sigmas=[1.], wts=None, biased=True):
K_XX, K_XY, K_YY, d = _mix_rbf_kernel(X, Y, sigmas, wts)
return _mmd2(K_XX, K_XY, K_YY, const_diagonal=d, biased=biased)
def rbf_mmd2(X, Y, sigma=1., biased=True):
return mix_rbf_mmd2(X, Y, sigmas=[sigma], biased=biased)
################################################################################
################################################################################
# Customized layers
class Max_over_time(Layer):
def __init__(self, **kwargs):
self.supports_masking = True
super(Max_over_time, self).__init__(**kwargs)
def call(self, x, mask=None):
if mask is not None:
mask = K.cast(mask, K.floatx())
mask = K.expand_dims(mask)
x = x * mask
return K.max(x, axis=1)
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[2])
def compute_mask(self, x, mask):
return None
class KL_loss(Layer):
def __init__(self, batch_size, **kwargs):
super(KL_loss, self).__init__(**kwargs)
self.batch_size = batch_size
def call(self, x, mask=None):
a = x[0]
b = x[1]
a = K.mean(a, axis=0, keepdims=True)
b = K.mean(b, axis=0, keepdims=True)
a /= K.sum(a, keepdims=True)
b /= K.sum(b, keepdims=True)
a = K.clip(a, K.epsilon(), 1)
b = K.clip(b, K.epsilon(), 1)
loss = K.sum(a*K.log(a/b), axis=-1, keepdims=True) \
+ K.sum(b*K.log(b/a), axis=-1, keepdims=True)
loss = K.repeat_elements(loss, self.batch_size, axis=0)
return loss
def compute_output_shape(self, input_shape):
return (input_shape[0][0], 1)
def compute_mask(self, x, mask):
return None
class mmd_loss(Layer):
def __init__(self, batch_size, **kwargs):
super(mmd_loss, self).__init__(**kwargs)
self.batch_size = batch_size
def call(self, x, mask=None):
a = x[0]
b = x[1]
mmd = rbf_mmd2(a, b)
mmd = K.repeat_elements(mmd, self.batch_size, axis=0)
return mmd
def compute_output_shape(self, input_shape):
return (input_shape[0][0], 1)
def compute_mask(self, x, mask):
return None
class Ensemble_pred_loss(Layer):
def __init__(self, **kwargs):
super(Ensemble_pred_loss, self).__init__(**kwargs)
def call(self, x, mask=None):
pred = x[0]
target = x[1]
weight = x[2]
error = K.categorical_crossentropy(target, pred)
loss = error * weight
return loss
def compute_output_shape(self, input_shape):
return (input_shape[0][0], 1)
def compute_mask(self, x, mask):
return None
class Conv1DWithMasking(Conv1D):
def __init__(self, **kwargs):
self.supports_masking = True
super(Conv1DWithMasking, self).__init__(**kwargs)
def compute_mask(self, x, mask):
return mask
| 4,951
| 26.359116
| 80
|
py
|
DAS
|
DAS-master/code/optimizers.py
|
import keras.optimizers as opt
def get_optimizer(args):
clipvalue = 0
clipnorm = 10
if args.algorithm == 'rmsprop':
optimizer = opt.RMSprop(lr=0.0005, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif args.algorithm == 'sgd':
optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
elif args.algorithm == 'adagrad':
optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif args.algorithm == 'adadelta':
optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif args.algorithm == 'adam':
optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
elif args.algorithm == 'adamax':
optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
return optimizer
| 943
| 41.909091
| 115
|
py
|
DAS
|
DAS-master/code/train_batch.py
|
import argparse
import logging
import numpy as np
from time import time
import utils as U
logging.basicConfig(
# filename='out.log',
level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
##############################################################################################################################
# Parse arguments
parser = argparse.ArgumentParser()
# arguments related to datasets and data preprocessing
parser.add_argument("--dataset", dest="dataset", type=str, metavar='<str>', required=True, help="The name of the dataset (small_1|small_2|large|amazon)")
parser.add_argument("--source", dest="source", type=str, metavar='<str>', required=True, help="The name of the source domain")
parser.add_argument("--target", dest="target", type=str, metavar='<str>', required=True, help="The name of the source target")
parser.add_argument("-v", "--vocab-size", dest="vocab_size", type=int, metavar='<int>', default=10000, help="Vocab size. '0' means no limit (default=0)")
parser.add_argument("--n-class", dest="n_class", type=int, metavar='<int>', default=3, help="The number of ouput classes")
parser.add_argument("-t", "--type", dest="model_type", type=str, metavar='<str>', default='DAS', help="Model type (default=DAS)")
parser.add_argument("--emb", dest="emb_path", type=str, metavar='<str>', help="The path to the word embeddings file")
# hyper-parameters related to network training
parser.add_argument("-a", "--algorithm", dest="algorithm", type=str, metavar='<str>', default='rmsprop', help="Optimization algorithm (rmsprop|sgd|adagrad|adadelta|adam|adamax) (default=rmsprop)")
parser.add_argument("--epochs", dest="epochs", type=int, metavar='<int>', default=15, help="Number of epochs (default=15)")
parser.add_argument("-b", "--batch-size", dest="batch_size", type=int, metavar='<int>', default=50, help="Batch size (default=50)")
# hyper-parameters related to network structure
parser.add_argument("-e", "--embdim", dest="emb_dim", type=int, metavar='<int>', default=300, help="Embeddings dimension (default=300)")
parser.add_argument("-c", "--cnndim", dest="cnn_dim", type=int, metavar='<int>', default=300, help="CNN output dimension.(default=300)")
parser.add_argument("-w", "--cnnwin", dest="cnn_window_size", type=int, metavar='<int>', default=3, help="CNN window size. (default=3)")
parser.add_argument("--cnn-activation", dest="cnn_activation", type=str, metavar='<str>', default='relu', help="The activation of CNN")
parser.add_argument("--dropout", dest="dropout_prob", type=float, metavar='<float>', default=0.5, help="The dropout probability. To disable, input 0 (default=0.5)")
parser.add_argument("--discrepancy-obj", dest="minimize_discrepancy_obj", type=str, metavar='<str>', default='kl_loss', help="The loss for minimizing domain discrepancy (default=kl_loss)")
# hyper-parameters related to DAS objectives
# You can play with those hyper-parameters to see the different variants of our model.
# e.g. set weight_uns to 0 denotes DAS-EM; set weight_entropy to 0 denotes DAS-SE;
# set weight_discrepancy, weight_entropy, weight_uns all to 0s denotes NaiveNN.
parser.add_argument("--weight-discrepancy", dest="weight_discrepancy", type=float, metavar='<float>', default=200, help="The weight of the domain discrepancy minimization objective (lamda_1 in the paper)")
parser.add_argument("--weight-entropy", dest="weight_entropy", type=float, metavar='<float>', default=1.0, help="The weight of the target entropy objective (lamda_2 in the paper)")
parser.add_argument("--weight-uns", dest="weight_uns", type=float, metavar='<float>', default=3.0, help="The max value of the ensemble prediction objective weight (lamda_3 in the paper)")
parser.add_argument("--ensemble-prob", dest="ensemble_prob", type=float, metavar='<float>', default=0.5, help="The ensemble momentum (alpha in the paper)")
# random seed that affects data splits and parameter intializations
parser.add_argument("--seed", dest="seed", type=int, metavar='<int>', default=1234, help="Random seed (default=1234)")
args = parser.parse_args()
U.print_args(args)
# small_1 and small_2 denote eperimenal setting 1 and setting 2 on the small-scale dataset respectively.
# large denotes the large-scale dataset. Table 1(b) in the paper
# amazon denotes the amazon benchmark dataset (Blitzer et al., 2007). See appendix A in the paper.
assert args.dataset in {'small_1', 'small_2', 'large', 'amazon'}
assert args.model_type == 'DAS'
# The domains contained in each dataset
if args.dataset in {'small_1', 'small_2'}:
assert args.source in {'book', 'electronics', 'beauty', 'music'}
assert args.target in {'book', 'electronics', 'beauty', 'music'}
elif args.dataset == 'large':
assert args.source in {'imdb', 'yelp2014', 'cell_phone', 'baby'}
assert args.target in {'imdb', 'yelp2014', 'cell_phone', 'baby'}
else:
# note that the book and electronics domains of amazon benchmark are different from those in small_1 and small_2
assert args.source in {'book', 'dvd', 'electronics', 'kitchen'}
assert args.target in {'book', 'dvd', 'electronics', 'kitchen'}
assert args.algorithm in {'rmsprop', 'sgd', 'adagrad', 'adadelta', 'adam', 'adamax'}
# In DAS, we use kl_loss for minimizing domain discrepancy. (See section 3.2 in paper)
assert args.minimize_discrepancy_obj in {'kl_loss', 'mmd'}
if args.seed > 0:
np.random.seed(args.seed)
##############################################################################################################################
# Prepare data
if args.dataset == 'amazon':
from read_amazon import get_data
else:
from read import get_data
vocab, overall_maxlen, source_x, source_y, dev_x, dev_y, test_x, test_y, source_un, target_un = get_data(
args.dataset, args.source, args.target, args.n_class, args.vocab_size)
print '------------ Traing Sets ------------'
print 'Number of labeled source examples: ', len(source_x)
print 'Number of total source examples (labeled+unlabeled): ', len(source_un)
print 'Number of unlabeled target examples: ', len(target_un)
print '------------ Development Set ------------'
print 'Size of development set: ', len(dev_x)
print '------------ Test Set -------------'
print 'Size of test set: ', len(test_x)
def batch_generator(data_list, batch_size):
num = len(data_list[0])
while True:
excerpt = np.random.choice(num, batch_size)
yield[data[excerpt] for data in data_list]
def batch_generator_large(data_list, batch_size):
#######################################
# Generate balanced labeled source examples.
# Only used on large dataset as
# the training set is quite unbalanced.
#######################################
label_list = np.argmax(data_list[1], axis=-1)
pos_inds = np.where(label_list==0)[0]
neg_inds = np.where(label_list==1)[0]
neu_inds = np.where(label_list==2)[0]
while True:
pos_sample = np.random.choice(pos_inds, batch_size/3)
neg_sample = np.random.choice(neg_inds, batch_size/3)
neu_sample = np.random.choice(neu_inds, batch_size/3+batch_size%3)
excerpt = np.concatenate((pos_sample, neg_sample))
excerpt = np.concatenate((excerpt, neu_sample))
np.random.shuffle(excerpt)
yield[data[excerpt] for data in data_list]
##############################################################################################################################
# Optimizer algorithm
from optimizers import get_optimizer
optimizer = get_optimizer(args)
###############################################################################################################################
# Building model
from models import create_model
import keras.backend as K
logger.info(' Building model')
def entropy(y_true, y_pred):
return K.mean(K.categorical_crossentropy(y_pred, y_pred), axis=-1)
def return_ypred(y_true, y_pred):
return y_pred
model = create_model(args, overall_maxlen, vocab)
model.compile(optimizer=optimizer,
loss={'source_probs': 'categorical_crossentropy', 'target_probs': entropy, 'discrepancy_loss': return_ypred, 'uns_loss': return_ypred},
loss_weights={'source_probs': 1, 'target_probs': args.weight_entropy, 'discrepancy_loss': args.weight_discrepancy, 'uns_loss': 1},
metrics={'source_probs': 'categorical_accuracy'})
###############################################################################################################################
# Training
from keras.utils.np_utils import to_categorical
# weight ramp-up function on the ensemble prediction objective
# w(t) in the paper.
def rampup(epoch):
max_rampup_epochs = 30.0
if epoch == 0:
return 0
elif epoch < args.epochs:
p = min(max_rampup_epochs, float(epoch)) / max_rampup_epochs
p = 1.0 - p
return np.exp(-p*p*5.0)*args.weight_uns
from tqdm import tqdm
logger.info('----------------------------------------- Training Model ---------------------------------------------------------')
if args.dataset == 'large':
source_gen = batch_generator_large([source_x, source_y], batch_size=args.batch_size)
else:
source_gen = batch_generator([source_x, source_y], batch_size=args.batch_size)
source_un_gen = batch_generator([source_un], batch_size=args.batch_size)
target_un_gen = batch_generator([target_un], batch_size=args.batch_size)
overall_x = np.concatenate((source_un, target_un))
samples_per_epoch = len(overall_x)
batches_per_epoch = samples_per_epoch / args.batch_size
# Set the limit of batches_per_epoch to 500
batches_per_epoch = min(batches_per_epoch, 500)
#Initialize targets for unlabeled data. (See algorithm 1 in paper)
ensemble_prediction = np.zeros((len(overall_x), args.n_class))
targets = np.zeros((len(overall_x), args.n_class))
epoch_predictions = np.zeros((len(overall_x), args.n_class))
get_predictions = K.function([model.get_layer('uns_input').input, K.learning_phase()], [model.get_layer('uns_predictions').output])
best_valid_acc = 0
pred_probs = None
for ii in xrange(args.epochs):
t0 = time()
train_loss, source_loss, target_loss, dis_loss, uns_loss, train_metric = 0., 0., 0., 0., 0., 0.
uns_gen = batch_generator([overall_x, targets], batch_size=args.batch_size)
for b in tqdm(xrange(batches_per_epoch)):
batch_source_x, batch_source_y = source_gen.next()
batch_source_un = source_un_gen.next()[0]
batch_target_un = target_un_gen.next()[0]
batch_uns, batch_targets = uns_gen.next()
train_loss_, source_loss_, target_loss_, dis_loss_, uns_loss_, train_metric_ = model.train_on_batch(
[batch_source_x, batch_source_un, batch_target_un, batch_uns, batch_targets, np.full((args.batch_size, 1), rampup(ii))],
{'source_probs': batch_source_y, 'target_probs': batch_source_y, 'discrepancy_loss': np.ones((args.batch_size, 1)) ,
'uns_loss': np.ones((args.batch_size, 1))})
train_loss += train_loss_ / batches_per_epoch
source_loss += source_loss_ / batches_per_epoch
target_loss += target_loss_ / batches_per_epoch
uns_loss += uns_loss_ / batches_per_epoch
dis_loss += dis_loss_ / batches_per_epoch
train_metric += train_metric_ / batches_per_epoch
# after the training of each epoch, compute predictions on unlabeled data
for ind in xrange(0, len(overall_x), args.batch_size):
if ind+args.batch_size > len(overall_x):
batch_inds = range(ind, len(overall_x))
else:
batch_inds = range(ind, ind+args.batch_size)
batch_ = overall_x[batch_inds]
batch_predictions = get_predictions([batch_, 0])[0]
for i, j in enumerate(batch_inds):
epoch_predictions[j] = batch_predictions[i]
# compute ensemble predictions on unlabeled data
ensemble_prediction = args.ensemble_prob*ensemble_prediction + (1-args.ensemble_prob)*epoch_predictions
targets = ensemble_prediction / (1.0-args.ensemble_prob**(ii+1))
targets = to_categorical(np.argmax(ensemble_prediction, axis=-1), args.n_class)
tr_time = time() - t0
valid_loss, valid_source_loss, valid_target_loss, valid_dis_loss, valid_uns_loss, valid_metric = model.evaluate([dev_x, dev_x, dev_x, dev_x, dev_y, np.ones((len(dev_y), 1))],\
{'source_probs': dev_y, 'target_probs': dev_y, 'discrepancy_loss': np.ones((len(dev_x),1)), 'uns_loss': np.ones((len(dev_x),1))}, batch_size=args.batch_size, verbose=1)
logger.info('Epoch %d, train: %is' % (ii, tr_time))
logger.info('[Train] loss: %.4f, [Source Classification] loss: %.4f, [Target Entropy] loss, %.4f, [Ensemble Prediction] loss: %.4f, [Discrepancy] loss: %.4f, metric: %.4f' \
% (train_loss, source_loss, target_loss, uns_loss, dis_loss, train_metric))
logger.info('[Validation] loss: %.4f, [Classification] loss: %.4f, [Entropy] loss, %.4f, [Ensemble Prediction] loss: %.4f, [Discrepancy] loss: %.4f, metric: %.4f' \
% (valid_loss, valid_source_loss, valid_target_loss, valid_uns_loss, valid_dis_loss, valid_metric))
if valid_metric > best_valid_acc:
best_valid_acc = valid_metric
print("------------- Best performance on dev set so far ==> evaluating on test set -------------")
logger.info("------------- Best performance on dev set so far ==> evaluating on test set -------------\n")
if args.dataset == 'large':
#pad test set so that its size is dividible by batch_size
append = args.batch_size-(len(test_y)%args.batch_size)
test_x_ = np.concatenate((test_x, np.zeros((append, test_x.shape[1]))))
test_y_ = np.concatenate((test_y, np.zeros((append, test_y.shape[1]))))
pred_probs = model.predict([test_x_, test_x_, test_x_, test_x_,
test_y_, np.ones((len(test_y_), 1))], batch_size=args.batch_size, verbose=1)[0]
pred_probs = pred_probs[:len(test_y)]
else:
pred_probs = model.predict([test_x, test_x, test_x, test_x,
test_y, np.ones((len(test_y), 1))], batch_size=args.batch_size, verbose=1)[0]
from sklearn.metrics import classification_report, accuracy_score, precision_recall_fscore_support
preds = np.argmax(pred_probs, axis=-1)
true = np.argmax(test_y, axis=-1)
# Compute accuracy on test set
logger.info("accuracy: "+ str(accuracy_score(true, preds)) + "\n")
# Compute macro-f1 on test set
p_macro, r_macro, f_macro, support_macro \
= precision_recall_fscore_support(true, preds, average='macro')
f_macro = 2*p_macro*r_macro/(p_macro+r_macro)
logger.info("macro-f1: "+str(f_macro) + "\n\n")
| 14,860
| 49.037037
| 205
|
py
|
DAS
|
DAS-master/code/utils.py
|
import sys
import os, errno
import logging
#-----------------------------------------------------------------------------------------------------------#
def set_logger(out_dir=None):
console_format = BColors.OKBLUE + '[%(levelname)s]' + BColors.ENDC + ' (%(name)s) %(message)s'
#datefmt='%Y-%m-%d %Hh-%Mm-%Ss'
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter(console_format))
logger.addHandler(console)
if out_dir:
file_format = '[%(levelname)s] (%(name)s) %(message)s'
log_file = logging.FileHandler(out_dir + '/log.txt', mode='w')
log_file.setLevel(logging.DEBUG)
log_file.setFormatter(logging.Formatter(file_format))
logger.addHandler(log_file)
#-----------------------------------------------------------------------------------------------------------#
def mkdir_p(path):
if path == '':
return
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def get_root_dir():
return os.path.dirname(sys.argv[0])
def bincounts(array):
num_rows = array.shape[0]
if array.ndim > 1:
num_cols = array.shape[1]
else:
num_cols = 1
array = array[:, None]
counters = []
mfe_list = []
for col in range(num_cols):
counter = {}
for row in range(num_rows):
element = array[row,col]
if element in counter:
counter[element] += 1
else:
counter[element] = 1
max_count = 0
for element in counter:
if counter[element] > max_count:
max_count = counter[element]
mfe = element
counters.append(counter)
mfe_list.append(mfe)
return counters, mfe_list
# Convert all arguments to strings
def ltos(*args):
outputs = []
for arg in args:
if type(arg) == list:
out = ' '.join(['%.3f' % e for e in arg])
if len(arg) == 1:
outputs.append(out)
else:
outputs.append('[' + out + ']')
else:
outputs.append(str(arg))
return tuple(outputs)
#-----------------------------------------------------------------------------------------------------------#
import re
class BColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
WHITE = '\033[37m'
YELLOW = '\033[33m'
GREEN = '\033[32m'
BLUE = '\033[34m'
CYAN = '\033[36m'
RED = '\033[31m'
MAGENTA = '\033[35m'
BLACK = '\033[30m'
BHEADER = BOLD + '\033[95m'
BOKBLUE = BOLD + '\033[94m'
BOKGREEN = BOLD + '\033[92m'
BWARNING = BOLD + '\033[93m'
BFAIL = BOLD + '\033[91m'
BUNDERLINE = BOLD + '\033[4m'
BWHITE = BOLD + '\033[37m'
BYELLOW = BOLD + '\033[33m'
BGREEN = BOLD + '\033[32m'
BBLUE = BOLD + '\033[34m'
BCYAN = BOLD + '\033[36m'
BRED = BOLD + '\033[31m'
BMAGENTA = BOLD + '\033[35m'
BBLACK = BOLD + '\033[30m'
@staticmethod
def cleared(s):
return re.sub("\033\[[0-9][0-9]?m", "", s)
def red(message):
return BColors.RED + str(message) + BColors.ENDC
def b_red(message):
return BColors.BRED + str(message) + BColors.ENDC
def blue(message):
return BColors.BLUE + str(message) + BColors.ENDC
def b_yellow(message):
return BColors.BYELLOW + str(message) + BColors.ENDC
def green(message):
return BColors.GREEN + str(message) + BColors.ENDC
def b_green(message):
return BColors.BGREEN + str(message) + BColors.ENDC
#-----------------------------------------------------------------------------------------------------------#
def print_args(args, path=None):
if path:
output_file = open(path, 'w')
logger = logging.getLogger(__name__)
logger.info("Arguments:")
args.command = ' '.join(sys.argv)
items = vars(args)
for key in sorted(items.keys(), key=lambda s: s.lower()):
value = items[key]
if not value:
value = "None"
logger.info(" " + key + ": " + str(items[key]))
if path is not None:
output_file.write(" " + key + ": " + str(items[key]) + "\n")
if path:
output_file.close()
del args.command
def get_args(args):
items = vars(args)
output_string = ''
for key in sorted(items.keys(), key=lambda s: s.lower()):
value = items[key]
if not value:
value = "None"
output_string += " " + key + ": " + str(items[key] + "\n")
return output_string
| 4,263
| 24.686747
| 109
|
py
|
DAS
|
DAS-master/code/models.py
|
import numpy as np
import logging
import codecs
from keras.layers import Dense, Dropout, Activation, Embedding, Input
from keras.models import Model
import keras.backend as K
from my_layers import Conv1DWithMasking, Max_over_time, KL_loss, Ensemble_pred_loss, mmd_loss
from keras.constraints import maxnorm
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
def create_model(args, overal_maxlen, vocab):
##############################################################################################################################
# Custom CNN kernel initializer
# Use the initialization from Kim et al. (2014) for CNN kernel initialization.
def my_init(shape, dtype=K.floatx()):
return 0.01 * np.random.standard_normal(size=shape)
##############################################################################################################################
# Funtion that loads word embeddings from Glove vectors
def init_emb(emb_matrix, vocab, emb_file):
print 'Loading word embeddings ...'
counter = 0.
pretrained_emb = open(emb_file)
for line in pretrained_emb:
tokens = line.split()
if len(tokens) != 301:
continue
word = tokens[0]
vec = tokens[1:]
try:
emb_matrix[0][vocab[word]] = vec
counter += 1
except KeyError:
pass
pretrained_emb.close()
logger.info('%i/%i word vectors initialized (hit rate: %.2f%%)' % (counter, len(vocab), 100*counter/len(vocab)))
return emb_matrix
##############################################################################################################################
# Create Model
cnn_padding='same'
vocab_size = len(vocab)
if args.model_type == 'DAS':
print '\n'
logger.info('Building model for DAS')
# labeled source examples
source_input = Input(shape=(overal_maxlen,), dtype='int32', name='source_input')
# unlabeled source examples (this includes all source examples with and without labels)
source_un_input = Input(shape=(overal_maxlen,), dtype='int32', name='source_un_input')
# unlabeled target examples
target_un_input = Input(shape=(overal_maxlen,), dtype='int32', name='target_un_input')
# all examples from both source and target domains
uns_input = Input(shape=(overal_maxlen,), dtype='int32', name='uns_input')
# estimated sentiment labels for all examples
uns_target = Input(shape=(args.n_class,), dtype=K.floatx(), name='uns_target')
# ramp-up weight
uns_weight = Input(shape=(1, ), dtype=K.floatx(), name='uns_weight')
word_emb = Embedding(vocab_size, args.emb_dim, mask_zero=True, name='word_emb')
source_output = word_emb(source_input)
source_un_output = word_emb(source_un_input)
target_un_output = word_emb(target_un_input)
uns_output = word_emb(uns_input)
print 'use a cnn layer'
conv = Conv1DWithMasking(filters=args.cnn_dim, kernel_size=args.cnn_window_size, \
activation=args.cnn_activation, padding=cnn_padding, kernel_initializer=my_init, name='cnn')
source_output = conv(source_output)
source_un_output = conv(source_un_output)
target_un_output = conv(target_un_output)
uns_output = conv(uns_output)
print 'use max_over_time as aggregation function'
source_output = Max_over_time(name='mot')(source_output)
source_un_output = Max_over_time()(source_un_output)
target_un_output = Max_over_time()(target_un_output)
uns_output = Max_over_time()(uns_output)
if args.minimize_discrepancy_obj == 'kl_loss':
dis_loss = KL_loss(args.batch_size, name='discrepancy_loss')([source_un_output, target_un_output])
elif args.minimize_discrepancy_obj == 'mmd':
dis_loss = mmd_loss(args.batch_size, name='discrepancy_loss')([source_un_output, target_un_output])
else:
raise NotImplementedError
if args.weight_discrepancy > 0:
print 'Minimize domain discrepancy between source and target via %s'%(args.minimize_discrepancy_obj)
if args.dropout_prob > 0:
print 'use dropout layer'
source_output = Dropout(args.dropout_prob)(source_output)
target_un_output = Dropout(args.dropout_prob)(target_un_output)
uns_output = Dropout(args.dropout_prob)(uns_output)
clf = Dense(args.n_class, kernel_constraint=maxnorm(3), name='dense')
source_output = clf(source_output)
target_output = clf(target_un_output)
uns_output = clf(uns_output)
source_probs = Activation('softmax', name='source_probs')(source_output)
target_probs = Activation('softmax', name='target_probs')(target_output)
uns_probs = Activation('softmax', name='uns_predictions')(uns_output)
uns_pred_loss = Ensemble_pred_loss(name='uns_loss')([uns_probs, uns_target, uns_weight])
if args.weight_uns > 0:
print 'Use ensemble prediction on unlabeled data for semi-supervised training'
model = Model(inputs=[source_input, source_un_input, target_un_input, uns_input, uns_target, uns_weight],
outputs=[source_probs, target_probs, dis_loss, uns_pred_loss])
else:
raise NotImplementedError
logger.info(' Done')
print '\n'
##############################################################################################################################
# Initialize embeddings if embedding path is given
if args.emb_path:
# It takes around 3 mininutes to load pre-trained word embeddings.
model.get_layer('word_emb').set_weights(init_emb(model.get_layer('word_emb').get_weights(), vocab, args.emb_path))
return model
| 6,134
| 39.629139
| 130
|
py
|
DAS
|
DAS-master/code/read.py
|
import codecs
import operator
import numpy as np
import re
from keras.preprocessing import sequence
from keras.utils.np_utils import to_categorical
num_regex = re.compile('^[+-]?[0-9]+\.?[0-9]*$')
def create_vocab(file_list, vocab_size, skip_len):
print 'Creating vocab ...'
total_words, unique_words = 0, 0
word_freqs = {}
for f in file_list:
fin = codecs.open(f, 'r', 'utf-8')
for line in fin:
words = line.split()
if skip_len > 0 and len(words) > skip_len:
continue
for w in words:
if not bool(num_regex.match(w)):
try:
word_freqs[w] += 1
except KeyError:
unique_words += 1
word_freqs[w] = 1
total_words += 1
fin.close()
print (' %i total words, %i unique words' % (total_words, unique_words))
sorted_word_freqs = sorted(word_freqs.items(), key=operator.itemgetter(1), reverse=True)
vocab = {'<pad>':0, '<unk>':1, '<num>':2}
index = len(vocab)
for word, _ in sorted_word_freqs:
vocab[word] = index
index += 1
if vocab_size > 0 and index > vocab_size + 2:
break
print (' keep the top %i words' % vocab_size)
return vocab
def create_data(vocab, text_path, label_path, domain, n_class, skip_top, skip_len, replace_non_vocab):
data = []
label = [] # {pos: 0, neg: 1, neu: 2}
f = codecs.open(text_path, 'r', 'utf-8')
f_l = codecs.open(label_path, 'r', 'utf-8')
num_hit, unk_hit, skip_top_hit, total = 0., 0., 0., 0.
pos_count, neg_count, neu_count = 0, 0, 0
max_len = 0
for line, score in zip(f, f_l):
word_indices = []
words = line.split()
if skip_len > 0 and len(words) > skip_len:
continue
score = float(score.strip())
if domain == 'imdb':
if score < 5:
neg_count += 1
label.append(1)
elif score > 6:
pos_count += 1
label.append(0)
else:
if n_class == 3:
neu_count += 1
label.append(2)
else:
continue
elif domain in {'yelp2014', 'book', 'electronics', 'beauty', 'music', 'cell_phone', 'baby'}:
if score < 3:
neg_count += 1
label.append(1)
elif score > 3:
pos_count += 1
label.append(0)
else:
if n_class == 3:
neu_count += 1
label.append(2)
else:
continue
else:
print 'No such domain!'
break
for word in words:
if bool(num_regex.match(word)):
word_indices.append(vocab['<num>'])
num_hit += 1
elif word in vocab:
word_ind = vocab[word]
if skip_top > 0 and word_ind < skip_top + 3:
skip_top_hit += 1
else:
word_indices.append(word_ind)
else:
if replace_non_vocab:
word_indices.append(vocab['<unk>'])
unk_hit += 1
total += 1
if len(word_indices) > max_len:
max_len = len(word_indices)
data.append(word_indices)
f.close()
f_l.close()
print(' <num> hit rate: %.2f%%, <unk> hit rate: %.2f%%, <skip_top> hit rate: %.2f%%' \
% (100*num_hit/total, 100*unk_hit/total, 100*skip_top_hit/total))
print domain
print 'pos count: ', pos_count
print 'neg count: ', neg_count
print 'neu count: ', neu_count
return np.array(data), np.array(label), max_len
def prepare_data(dataset, source_domain, target_domain, n_class, vocab_size=0, skip_len=0, skip_top=0, replace_non_vocab=1):
if dataset == 'small_1':
text_list = ['../data/small/%s/set1_text.txt'%source_domain,
'../data/small/%s/set1_text.txt'%target_domain]
score_list = ['../data/small/%s/set1_label.txt'%source_domain,
'../data/small/%s/set1_label.txt'%target_domain]
domain_list = [source_domain, target_domain]
elif dataset == 'small_2':
text_list = ['../data/small/%s/set1_text.txt'%source_domain,
'../data/small/%s/set1_text.txt'%target_domain,
'../data/small/%s/set2_text.txt'%source_domain,
'../data/small/%s/set2_text.txt'%target_domain]
score_list = ['../data/small/%s/set1_label.txt'%source_domain,
'../data/small/%s/set1_label.txt'%target_domain,
'../data/small/%s/set2_label.txt'%source_domain,
'../data/small/%s/set2_label.txt'%target_domain]
domain_list = [source_domain, target_domain, source_domain, target_domain]
else:
text_list = ['../data/large/%s/text.txt'%source_domain,
'../data/large/%s/text.txt'%target_domain]
score_list = ['../data/large/%s/label.txt'%source_domain,
'../data/large/%s/label.txt'%target_domain]
domain_list = [source_domain, target_domain]
vocab = create_vocab(text_list, vocab_size, skip_len)
data_list = []
label_list = []
overall_max_len = 0
for f, f_l, domain in zip(text_list, score_list, domain_list):
data, label, max_len = create_data(vocab, f, f_l, domain, n_class, skip_top, skip_len, replace_non_vocab)
data_list.append(data)
label_list.append(label)
if max_len > overall_max_len:
overall_max_len = max_len
return vocab, data_list, label_list, overall_max_len
def get_data(dataset, source_domain, target_domain, n_class, vocab_size=0):
assert dataset in ['small_1', 'small_2', 'large']
vocab, data_list, label_list, overall_maxlen = prepare_data(dataset, source_domain, target_domain, n_class, vocab_size)
data_list = [sequence.pad_sequences(d, maxlen=overall_maxlen) for d in data_list]
label_list = [to_categorical(l, n_class) for l in label_list]
if dataset == 'large':
# when using the large-scale dataset, we need to sample 1k balanced dev set from labeled source data
labels = np.argmax(label_list[0], axis=-1)
pos_inds = np.where(labels==0)[0]
neg_inds = np.where(labels==1)[0]
neu_inds = np.where(labels==2)[0]
np.random.shuffle(pos_inds)
np.random.shuffle(neg_inds)
np.random.shuffle(neu_inds)
dev_inds = np.concatenate((pos_inds[:333], neg_inds[:333]))
dev_inds = np.concatenate((dev_inds, neu_inds[:334]))
train_inds = np.concatenate((pos_inds[333:], neg_inds[333:]))
train_inds = np.concatenate((train_inds, neu_inds[334:]))
source_x, source_y = data_list[0][train_inds], label_list[0][train_inds]
dev_x, dev_y = data_list[0][dev_inds], label_list[0][dev_inds]
else:
#On small-scale dataset, randomly select 1k examples from set1 of source domain as dev set
inds = np.random.permutation(data_list[0].shape[0])
dev_inds, train_inds = inds[:1000], inds[1000:]
source_x, source_y = data_list[0][train_inds], label_list[0][train_inds]
dev_x, dev_y = data_list[0][dev_inds], label_list[0][dev_inds]
test_x, test_y = data_list[1], label_list[1]
if dataset in ['small_1', 'large']:
source_un = data_list[0]
target_un = data_list[1]
else:
source_un = np.concatenate((data_list[0], data_list[2]))
target_un = data_list[3]
return vocab, overall_maxlen, source_x, source_y, dev_x, dev_y, test_x, test_y, source_un, target_un
| 7,918
| 32.273109
| 124
|
py
|
DAS
|
DAS-master/code/read_amazon.py
|
import codecs
import operator
import numpy as np
import re
from keras.preprocessing import sequence
from keras.utils.np_utils import to_categorical
from read import create_vocab
num_regex = re.compile('^[+-]?[0-9]+\.?[0-9]*$')
def create_data(vocab, file_path, skip_top, skip_len, replace_non_vocab):
data = []
f = codecs.open(file_path, 'r', 'utf-8')
num_hit, unk_hit, skip_top_hit, total = 0., 0., 0., 0.
max_len = 0
for line in f:
word_indices = []
words = line.split()
if skip_len > 0 and len(words) > skip_len:
continue
for word in words:
if bool(num_regex.match(word)):
word_indices.append(vocab['<num>'])
num_hit += 1
elif word in vocab:
word_ind = vocab[word]
if skip_top > 0 and word_ind < skip_top + 3:
skip_top_hit += 1
else:
word_indices.append(word_ind)
else:
if replace_non_vocab:
word_indices.append(vocab['<unk>'])
unk_hit += 1
total += 1
if len(word_indices) > max_len:
max_len = len(word_indices)
data.append(word_indices)
print(' <num> hit rate: %.2f%%, <unk> hit rate: %.2f%%, <skip_top> hit rate: %.2f%%' \
% (100*num_hit/total, 100*unk_hit/total, 100*skip_top_hit/total))
return np.array(data), max_len
def prepare_data(source_domain, target_domain, n_class, vocab_size=0, skip_len=0, skip_top=0, replace_non_vocab=1):
file_list = ['../data/amazon/%s/pos.txt'%source_domain,
'../data/amazon/%s/neg.txt'%source_domain,
'../data/amazon/%s/un_pos.txt'%source_domain,
'../data/amazon/%s/un_neg.txt'%source_domain,
'../data/amazon/%s/pos.txt'%target_domain,
'../data/amazon/%s/neg.txt'%target_domain,
'../data/amazon/%s/un_pos.txt'%target_domain,
'../data/amazon/%s/un_neg.txt'%target_domain]
vocab = create_vocab(file_list, vocab_size, skip_len)
data_list = []
overall_max_len = 0
for f in file_list:
data, max_len = create_data(vocab, f, skip_top, skip_len, replace_non_vocab)
data_list.append(data)
if max_len > overall_max_len:
overall_max_len = max_len
return vocab, data_list, overall_max_len
def get_data(dataset, source_domain, target_domain, n_class, vocab_size=0):
vocab, data_list, overall_maxlen = prepare_data(source_domain, target_domain, n_class, vocab_size)
data_list = [sequence.pad_sequences(d, maxlen=overall_maxlen) for d in data_list]
for d in data_list:
np.random.shuffle(d)
source_pos, source_neg, source_un_pos, source_un_neg, target_pos, target_neg, target_un_pos, target_un_neg = data_list
# Each domain has a train set of size 1600, and a test set of size 400 with exactly balanced positive and negative examples
# Only consider binary classification {pos: 1, neg: 0}
source_train_y = np.concatenate((np.ones(800), np.zeros(800))).reshape(1600,1)
source_test_y = np.concatenate((np.ones(200), np.zeros(200))).reshape(400, 1)
target_train_y = np.concatenate((np.ones(800), np.zeros(800))).reshape(1600, 1)
target_test_y = np.concatenate((np.ones(200), np.zeros(200))).reshape(400, 1)
source_train_y = to_categorical(source_train_y, n_class)
source_test_y = to_categorical(source_test_y, n_class)
target_train_y = to_categorical(target_train_y, n_class)
target_test_y = to_categorical(target_test_y, n_class)
source_train_x = np.concatenate((source_pos[0:800], source_neg[0:800]))
source_test_x = np.concatenate((source_pos[800:], source_neg[800:]))
target_train_x = np.concatenate((target_pos[0:800], target_neg[0:800]))
target_test_x = np.concatenate((target_pos[800:], target_neg[800:]))
# Each domain has an additional unlabeled set of size 4000.
source_un = np.concatenate((source_un_pos, source_un_neg))
target_un = np.concatenate((target_un_pos, target_un_neg))
# For each pair of source-target domain, the classifier is trained on the training set of the source domain and
# is evaluated on the test set of the target domain. The test set from source domain is used as development set.
source_x, source_y = source_train_x, source_train_y
dev_x, dev_y = source_test_x, source_test_y
test_x, test_y = target_test_x, target_test_y
source_un = np.concatenate((source_x, source_un))
return vocab, overall_maxlen, source_x, source_y, dev_x, dev_y, test_x, test_y, source_un, target_un
| 4,704
| 40.27193
| 127
|
py
|
QuantFace
|
QuantFace-master/train_quantization_synthetic.py
|
import argparse
import logging
import os
import time
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.nn.parallel.distributed import DistributedDataParallel
import torch.utils.data.distributed
from torch.nn.utils import clip_grad_norm_
from backbones.mobilefacenet import MobileFaceNet
from config.config_Quantization_Synthetic import config as cfg
from utils.dataset import DataLoaderX, FaceDatasetFolder
from utils.utils_callbacks import CallBackVerification, CallBackLogging, CallBackModelCheckpoint
from utils.utils_logging import AverageMeter, init_logging
from backbones.iresnet import iresnet100, iresnet50, freeze_model, unfreeze_model, iresnet18
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.benchmark = True
def main(args):
dist.init_process_group(backend='nccl', init_method='env://')
local_rank = args.local_rank
torch.cuda.set_device(local_rank)
rank = dist.get_rank()
world_size = dist.get_world_size()
if not os.path.exists(cfg.output) and rank == 0:
os.makedirs(cfg.output)
else:
time.sleep(2)
log_root = logging.getLogger()
init_logging(log_root, rank, cfg.output)
trainset = FaceDatasetFolder(root_dir=cfg.rec, local_rank=local_rank)
train_sampler = torch.utils.data.distributed.DistributedSampler(
trainset, shuffle=True)
train_loader = DataLoaderX(
local_rank=local_rank, dataset=trainset, batch_size=cfg.batch_size,
sampler=train_sampler, num_workers=0, pin_memory=True, drop_last=True)
# load model
if cfg.network == "iresnet100":
backbone = iresnet100(num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
logging.info("load backbone!" + cfg.network)
elif cfg.network == "iresnet50":
backbone = iresnet50(dropout=0.4,num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
elif cfg.network == "iresnet18":
backbone = iresnet18(dropout=0.4, num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
elif cfg.network =="mobilefacenet":
backbone=MobileFaceNet().to(local_rank)
else:
backbone = None
logging.info("load backbone failed!")
exit()
if args.resume:
try:
backbone_pth = os.path.join(cfg.output32, str(cfg.global_step) + "backbone.pth")
backbone.load_state_dict(torch.load(backbone_pth, map_location=torch.device(local_rank)))
if rank == 0:
logging.info("backbone resume loaded successfully!")
except (FileNotFoundError, KeyError, IndexError, RuntimeError):
logging.info("load backbone resume init, failed!")
for ps in backbone.parameters():
dist.broadcast(ps, 0)
if cfg.network == "mobilefacenet":
from backbones.mobilefacenet import quantize_model
backbone_quant = quantize_model(backbone, cfg.wq, cfg.aq).to(local_rank)
else:
from backbones.iresnet import quantize_model
backbone_quant = quantize_model(backbone, cfg.wq, cfg.aq).to(local_rank)
backbone = DistributedDataParallel(
module=backbone, broadcast_buffers=False, device_ids=[local_rank])
backbone.eval()
backbone_quant = DistributedDataParallel(
module=backbone_quant, broadcast_buffers=True, device_ids=[local_rank])
backbone_quant.train()
opt_backbone = torch.optim.SGD(
params=[{'params': backbone_quant.parameters()}],
lr=cfg.lr / 512 * cfg.batch_size * world_size,
momentum=0.9, weight_decay=cfg.weight_decay,nesterov=True,)
scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
optimizer=opt_backbone, lr_lambda=cfg.lr_func)
criterion = torch.nn.MSELoss() #CrossEntropyLoss()
start_epoch = 0
total_step = int(len(trainset) / cfg.batch_size / world_size * cfg.num_epoch)
if rank == 0: logging.info("Total Step is: %d" % total_step)
callback_verification = CallBackVerification(cfg.eval_step, rank, cfg.val_targets, cfg.rec)
callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, writer=None)
callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)
backbone_quant=unfreeze_model(backbone_quant)
loss = AverageMeter()
global_step = 0
for epoch in range(start_epoch, cfg.num_epoch):
train_sampler.set_epoch(epoch)
backbone_quant=freeze_model(backbone_quant)
for _, (img, label) in enumerate(train_loader):
global_step += 1
if (global_step<300):
backbone_quant = unfreeze_model(backbone_quant)
img = img.cuda(local_rank, non_blocking=True)
features = F.normalize(backbone_quant(img))
with torch.no_grad():
features_1 = F.normalize(backbone(img))
loss_v=criterion(features,features_1)
loss_v.backward()
clip_grad_norm_(backbone_quant.parameters(), max_norm=5, norm_type=2)
opt_backbone.step()
opt_backbone.zero_grad()
loss.update(loss_v.item(), 1)
if (global_step %5000==0):
logging.info(backbone_quant)
callback_logging(global_step, loss, epoch)
callback_verification(global_step, backbone_quant)
backbone_quant = freeze_model(backbone_quant)
scheduler_backbone.step()
callback_checkpoint(global_step, backbone_quant, None, quantiza=True)
callback_verification(cfg.eval_step, backbone_quant)
dist.destroy_process_group()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PyTorch margin penalty loss training')
parser.add_argument('--local_rank', type=int, default=0, help='local_rank')
parser.add_argument('--resume', type=int, default=1, help="resume training")
args_ = parser.parse_args()
main(args_)
| 5,889
| 37.496732
| 104
|
py
|
QuantFace
|
QuantFace-master/train_quantization.py
|
import argparse
import logging
import os
import time
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.nn.parallel.distributed import DistributedDataParallel
import torch.utils.data.distributed
from torch.nn.utils import clip_grad_norm_
from backbones.mobilefacenet import MobileFaceNet
from config.config_Quantization import config as cfg
from utils.dataset import MXFaceDataset, DataLoaderX
from utils.utils_callbacks import CallBackVerification, CallBackLogging, CallBackModelCheckpoint
from utils.utils_logging import AverageMeter, init_logging
from backbones.iresnet import iresnet100, iresnet50, freeze_model, unfreeze_model, iresnet18
torch.backends.cudnn.benchmark = True
def main(args):
dist.init_process_group(backend='nccl', init_method='env://')
local_rank = args.local_rank
torch.cuda.set_device(local_rank)
rank = dist.get_rank()
world_size = dist.get_world_size()
if not os.path.exists(cfg.output) and rank == 0:
os.makedirs(cfg.output)
else:
time.sleep(2)
log_root = logging.getLogger()
init_logging(log_root, rank, cfg.output)
trainset = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)
train_sampler = torch.utils.data.distributed.DistributedSampler(
trainset, shuffle=True)
train_loader = DataLoaderX(
local_rank=local_rank, dataset=trainset, batch_size=cfg.batch_size,
sampler=train_sampler, num_workers=0, pin_memory=True, drop_last=True)
# load model
if cfg.network == "iresnet100":
backbone = iresnet100(num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
logging.info("load backbone!" + cfg.network)
elif cfg.network == "iresnet50":
backbone = iresnet50(dropout=0.4,num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
elif cfg.network == "iresnet18":
backbone = iresnet18(dropout=0.4, num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
elif cfg.network =="mobilefacenet":
backbone=MobileFaceNet().to(local_rank)
else:
backbone = None
logging.info("load backbone failed!")
exit()
if args.resume:
try:
backbone_pth = os.path.join(cfg.output32, str(cfg.global_step) + "backbone.pth")
backbone.load_state_dict(torch.load(backbone_pth, map_location=torch.device(local_rank)))
if rank == 0:
logging.info("backbone resume loaded successfully!")
except (FileNotFoundError, KeyError, IndexError, RuntimeError):
logging.info("load backbone resume init, failed!")
for ps in backbone.parameters():
dist.broadcast(ps, 0)
if cfg.network =="mobilefacenet":
from backbones.mobilefacenet import quantize_model
backbone_quant = quantize_model(backbone, cfg.wq, cfg.aq).to(local_rank)
else:
from backbones.iresnet import quantize_model
backbone_quant=quantize_model(backbone,cfg.wq,cfg.aq).to(local_rank)
backbone = DistributedDataParallel(
module=backbone, broadcast_buffers=False, device_ids=[local_rank])
backbone.eval()
backbone_quant = DistributedDataParallel(
module=backbone_quant, broadcast_buffers=True, device_ids=[local_rank])
backbone_quant.train()
opt_backbone = torch.optim.SGD(
params=[{'params': backbone_quant.parameters()}],
lr=cfg.lr / 512 * cfg.batch_size * world_size,
momentum=0.9, weight_decay=cfg.weight_decay,nesterov=True,)
scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
optimizer=opt_backbone, lr_lambda=cfg.lr_func)
criterion =torch.nn.MSELoss()
start_epoch = 0
total_step = int(len(trainset) / cfg.batch_size / world_size * cfg.num_epoch)
if rank == 0: logging.info("Total Step is: %d" % total_step)
callback_verification = CallBackVerification(cfg.eval_step, rank, cfg.val_targets, cfg.rec)
callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, writer=None)
callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)
backbone_quant=unfreeze_model(backbone_quant)
loss = AverageMeter()
global_step = 0
for epoch in range(start_epoch, cfg.num_epoch):
train_sampler.set_epoch(epoch)
backbone_quant=freeze_model(backbone_quant)
for _, (img, label) in enumerate(train_loader):
global_step += 1
if (global_step < 300):
backbone_quant = unfreeze_model(backbone_quant)
img = img.cuda(local_rank, non_blocking=True)
features = F.normalize(backbone_quant(img))
with torch.no_grad():
features_1 = F.normalize(backbone(img))
loss_v=criterion(features,features_1)
loss_v.backward()
clip_grad_norm_(backbone_quant.parameters(), max_norm=5, norm_type=2)
opt_backbone.step()
opt_backbone.zero_grad()
loss.update(loss_v.item(), 1)
if (global_step %5000==0):
logging.info(backbone_quant)
callback_logging(global_step, loss, epoch)
callback_verification(global_step, backbone_quant)
backbone_quant = freeze_model(backbone_quant)
scheduler_backbone.step()
callback_checkpoint(global_step, backbone_quant, None,quantiza=True)
callback_verification(5686, backbone_quant)
dist.destroy_process_group()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PyTorch margin penalty loss training')
parser.add_argument('--local_rank', type=int, default=0, help='local_rank')
parser.add_argument('--resume', type=int, default=1, help="resume training")
args_ = parser.parse_args()
main(args_)
| 5,795
| 37.64
| 104
|
py
|
QuantFace
|
QuantFace-master/train_fp32.py
|
import argparse
import logging
import os
import time
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.nn.parallel.distributed import DistributedDataParallel
import torch.utils.data.distributed
from torch.nn.utils import clip_grad_norm_
from torch.nn import CrossEntropyLoss
from backbones.mobilefacenet import MobileFaceNet
from utils import losses
from config.config_FP32 import config as cfg
from utils.dataset import MXFaceDataset, DataLoaderX
from utils.utils_callbacks import CallBackVerification, CallBackLogging, CallBackModelCheckpoint
from utils.utils_logging import AverageMeter, init_logging
from backbones.iresnet import iresnet100, iresnet50, iresnet18
torch.backends.cudnn.benchmark = True
def main(args):
dist.init_process_group(backend='nccl', init_method='env://')
local_rank = args.local_rank
torch.cuda.set_device(local_rank)
rank = dist.get_rank()
world_size = dist.get_world_size()
if not os.path.exists(cfg.output) and rank == 0:
os.makedirs(cfg.output)
else:
time.sleep(2)
log_root = logging.getLogger()
init_logging(log_root, rank, cfg.output)
trainset = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)
train_sampler = torch.utils.data.distributed.DistributedSampler(
trainset, shuffle=True)
train_loader = DataLoaderX(
local_rank=local_rank, dataset=trainset, batch_size=cfg.batch_size,
sampler=train_sampler, num_workers=0, pin_memory=True, drop_last=True)
# load model
if cfg.network == "iresnet100":
backbone = iresnet100(num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
elif cfg.network == "iresnet50":
backbone = iresnet50(dropout=0.4,num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
elif cfg.network == "iresnet18":
backbone = iresnet18(dropout=0.4, num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
elif cfg.network == "mobilefacenet":
backbone = MobileFaceNet().to(local_rank)
else:
backbone = None
logging.info("load backbone failed!")
exit()
if args.resume:
try:
backbone_pth = os.path.join(cfg.output, str(cfg.global_step) + "backbone.pth")
backbone.load_state_dict(torch.load(backbone_pth, map_location=torch.device(local_rank)))
if rank == 0:
logging.info("backbone resume loaded successfully!")
except (FileNotFoundError, KeyError, IndexError, RuntimeError):
logging.info("load backbone resume init, failed!")
for ps in backbone.parameters():
dist.broadcast(ps, 0)
backbone = DistributedDataParallel(
module=backbone, broadcast_buffers=False, device_ids=[local_rank])
backbone.train()
# get header
if cfg.loss == "ElasticArcFace":
header = losses.ElasticArcFace(in_features=cfg.embedding_size, out_features=cfg.num_classes, s=cfg.s, m=cfg.m,std=cfg.std).to(local_rank)
elif cfg.loss == "ElasticArcFacePlus":
header = losses.ElasticArcFace(in_features=cfg.embedding_size, out_features=cfg.num_classes, s=cfg.s, m=cfg.m,std=cfg.std, plus=True).to(local_rank)
elif cfg.loss == "ElasticCosFace":
header = losses.ElasticCosFace(in_features=cfg.embedding_size, out_features=cfg.num_classes, s=cfg.s, m=cfg.m,std=cfg.std).to(local_rank)
elif cfg.loss == "ElasticCosFacePlus":
header = losses.ElasticCosFace(in_features=cfg.embedding_size, out_features=cfg.num_classes, s=cfg.s, m=cfg.m,
std=cfg.std, plus=True).to(local_rank)
elif cfg.loss == "ArcFace":
header = losses.ArcFace(in_features=cfg.embedding_size, out_features=cfg.num_classes, s=cfg.s, m=cfg.m).to(local_rank)
elif cfg.loss == "CosFace":
header = losses.CosFace(in_features=cfg.embedding_size, out_features=cfg.num_classes, s=cfg.s, m=cfg.m).to(
local_rank)
else:
print("Header not implemented")
if args.resume:
try:
header_pth = os.path.join(cfg.output, str(cfg.global_step) + "header.pth")
header.load_state_dict(torch.load(header_pth, map_location=torch.device(local_rank)))
if rank == 0:
logging.info("header resume loaded successfully!")
except (FileNotFoundError, KeyError, IndexError, RuntimeError):
logging.info("header resume init, failed!")
header = DistributedDataParallel(
module=header, broadcast_buffers=False, device_ids=[local_rank])
header.train()
opt_backbone = torch.optim.SGD(
params=[{'params': backbone.parameters()}],
lr=cfg.lr / 512 * cfg.batch_size * world_size,
momentum=0.9, weight_decay=cfg.weight_decay)
opt_header = torch.optim.SGD(
params=[{'params': header.parameters()}],
lr=cfg.lr / 512 * cfg.batch_size * world_size,
momentum=0.9, weight_decay=cfg.weight_decay)
scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
optimizer=opt_backbone, lr_lambda=cfg.lr_func)
scheduler_header = torch.optim.lr_scheduler.LambdaLR(
optimizer=opt_header, lr_lambda=cfg.lr_func)
criterion = CrossEntropyLoss()
start_epoch = 0
total_step = int(len(trainset) / cfg.batch_size / world_size * cfg.num_epoch)
if rank == 0: logging.info("Total Step is: %d" % total_step)
if args.resume:
rem_steps = (total_step - cfg.global_step)
cur_epoch = cfg.num_epoch - int(cfg.num_epoch / total_step * rem_steps)
logging.info("resume from estimated epoch {}".format(cur_epoch))
logging.info("remaining steps {}".format(rem_steps))
start_epoch = cur_epoch
scheduler_backbone.last_epoch = cur_epoch
scheduler_header.last_epoch = cur_epoch
# --------- this could be solved more elegant ----------------
opt_backbone.param_groups[0]['lr'] = scheduler_backbone.get_lr()[0]
opt_header.param_groups[0]['lr'] = scheduler_header.get_lr()[0]
print("last learning rate: {}".format(scheduler_header.get_lr()))
# ------------------------------------------------------------
callback_verification = CallBackVerification(cfg.eval_step, rank, cfg.val_targets, cfg.rec)
callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, writer=None)
callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)
loss = AverageMeter()
global_step = cfg.global_step
for epoch in range(start_epoch, cfg.num_epoch):
train_sampler.set_epoch(epoch)
for _, (img, label) in enumerate(train_loader):
global_step += 1
img = img.cuda(local_rank, non_blocking=True)
label = label.cuda(local_rank, non_blocking=True)
features = F.normalize(backbone(img))
thetas = header(features, label)
loss_v = criterion(thetas, label)
loss_v.backward()
clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
opt_backbone.step()
opt_header.step()
opt_backbone.zero_grad()
opt_header.zero_grad()
loss.update(loss_v.item(), 1)
callback_logging(global_step, loss, epoch)
callback_verification(global_step, backbone)
scheduler_backbone.step()
scheduler_header.step()
callback_checkpoint(global_step, backbone, header)
dist.destroy_process_group()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PyTorch margin penalty loss training')
parser.add_argument('--local_rank', type=int, default=0, help='local_rank')
parser.add_argument('--resume', type=int, default=0, help="resume training")
args_ = parser.parse_args()
main(args_)
| 7,876
| 39.394872
| 156
|
py
|
QuantFace
|
QuantFace-master/config/config_FP32.py
|
from easydict import EasyDict as edict
config = edict()
config.dataset = "emoreIresNet" # training dataset
config.embedding_size = 512 # embedding size of model
config.momentum = 0.9
config.weight_decay = 5e-4
config.batch_size = 128 # batch size per GPU
config.lr = 0.1
config.output = "output/MobileFaceNet_fp32" # train model output folder
config.global_step=0 # step to resume
config.s=64.0
config.m=0.5
config.std=0.05
# type of network to train [iresnet100 | iresnet50| iresnet18]
config.network = "mobilefacenet"
config.SE=False # SEModule
config.loss ="ArcFace"
if (config.network == "mobilefacenet"):
config.embedding_size = 128
if config.dataset == "emoreIresNet":
config.rec = "data/faces_emore"
config.num_classes = 85742
config.num_image = 5822653
config.num_epoch = 16
config.warmup_epoch = -1
config.val_targets = ["lfw", "cfp_fp", "cfp_ff", "agedb_30", "calfw", "cplfw"]
config.eval_step=5686
def lr_step_func(epoch):
return ((epoch + 1) / (4 + 1)) ** 2 if epoch < -1 else 0.1 ** len(
[m for m in [8, 14,20,25] if m - 1 <= epoch]) # [m for m in [8, 14,20,25] if m - 1 <= epoch])
config.lr_func = lr_step_func
elif config.dataset == "webface":
config.rec = "data/faces_webface_112x112"
config.num_classes = 10572
config.num_image = 501195
config.num_epoch = 40 # [22, 30, 35]
config.warmup_epoch = -1
config.val_targets = ["lfw", "cfp_fp", "cfp_ff", "agedb_30", "calfw", "cplfw"]
config.eval_step= 958 #33350
def lr_step_func(epoch):
return ((epoch + 1) / (4 + 1)) ** 2 if epoch < config.warmup_epoch else 0.1 ** len(
[m for m in [22, 30, 40] if m - 1 <= epoch])
config.lr_func = lr_step_func
| 1,734
| 35.145833
| 106
|
py
|
QuantFace
|
QuantFace-master/config/config_Quantization.py
|
from easydict import EasyDict as edict
config = edict()
config.dataset = "emoreIresNetTunning" # training dataset
config.embedding_size = 512 # embedding size of model
config.momentum = 0.9
config.weight_decay =5e-4
config.batch_size = 128
# batch size per GPU
config.lr = 0.1
config.output = "output/output_r50_w8_a8" # train model output folder
config.goutput="output"
config.output32= "/r50_fp32" #"/r50_fp32" | /r100_fp32"
config.global_step= 181952#181952 # step to resume
config.s=64.0
config.m=0.5
config.std=0.05
config.wq=6
config.aq=6
config.loss="ArcFace" # Option : ElasticArcFace, ArcFace, ElasticCosFace, CosFace, MLLoss
# type of network to train [iresnet100 | iresnet50| iresnet18 | mobilefacenet]
config.network = "iresnet50"
config.SE=False # SEModule
config.loss ="ArcFace"
if (config.network == "mobilefacenet"):
config.embedding_size = 128
if config.dataset == "emoreIresNetTunning":
config.rec = "/data/fboutros/faces_emore"
config.num_classes = 85742
config.num_image = 5811200
config.num_epoch = 1
config.warmup_epoch = -1
config.val_targets =["lfw", "cfp_fp", "cfp_ff", "agedb_30", "calfw", "cplfw"]
config.eval_step= 5686
def lr_step_func(epoch):
return ((epoch + 1) / (4 + 1)) ** 2 if epoch < -1 else 0.1 ** len(
[m for m in [3, 5,7] if m - 1 <= epoch]) # [m for m in [8, 14,20,25] if m - 1 <= epoch])
config.lr_func = lr_step_func
| 1,435
| 30.217391
| 101
|
py
|
QuantFace
|
QuantFace-master/config/config_Quantization_Synthetic.py
|
from easydict import EasyDict as edict
config = edict()
config.dataset = "emoreIresNetTunningSyntheticFP32" # training dataset
config.embedding_size = 512 # embedding size of model
config.momentum = 0.9
config.weight_decay =5e-4
config.batch_size = 128
# batch size per GPU
config.lr = 0.1
config.output = "output/output_r50_FP32_Synthetic" # train model output folder
config.goutput="output"
config.output32= "/r50_fp32" #"/r50_fp32" | /r100_fp32"
config.global_step= 181952#181952 # step to resume
config.s=64.0
config.m=0.5
config.std=0.05
config.wq=6
config.aq=6
config.loss="ArcFace" # Option : ElasticArcFace, ArcFace, ElasticCosFace, CosFace, MLLoss
# type of network to train [iresnet100 | iresnet50| iresnet18 | mobilefacenet]
config.network = "iresnet50"
config.SE=False # SEModule
config.loss ="ArcFace"
if (config.network == "mobilefacenet"):
config.embedding_size = 128
if config.dataset == "emoreIresNetTunningSynthetic":
config.rec = "./data/synthetic/training"
config.num_classes = 85742
config.num_image = 528227
config.num_epoch = 11
config.warmup_epoch = -1
config.val_targets =["lfw", "cfp_fp", "cfp_ff", "agedb_30", "calfw", "cplfw"]
config.eval_step= 5686
def lr_step_func(epoch):
return ((epoch + 1) / (4 + 1)) ** 2 if epoch < -1 else 0.1 ** len(
[m for m in [11] if m - 1 <= epoch]) # [m for m in [8, 14,20,25] if m - 1 <= epoch])
config.lr_func = lr_step_func
| 1,459
| 32.181818
| 97
|
py
|
QuantFace
|
QuantFace-master/eval/verification.py
|
"""Helper for evaluation on the Labeled Faces in the Wild dataset
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import os
import pickle
import mxnet as mx
import numpy as np
import sklearn
import torch
from mxnet import ndarray as nd
from scipy import interpolate
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
class LFold:
def __init__(self, n_splits=2, shuffle=False):
self.n_splits = n_splits
if self.n_splits > 1:
self.k_fold = KFold(n_splits=n_splits, shuffle=shuffle)
def split(self, indices):
if self.n_splits > 1:
return self.k_fold.split(indices)
else:
return [(indices, indices)]
def calculate_roc(thresholds,
embeddings1,
embeddings2,
actual_issame,
nrof_folds=10,
pca=0):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = LFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
if pca == 0:
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if pca > 0:
print('doing pca on', fold_idx)
embed1_train = embeddings1[train_set]
embed2_train = embeddings2[train_set]
_embed_train = np.concatenate((embed1_train, embed2_train), axis=0)
pca_model = PCA(n_components=pca)
pca_model.fit(_embed_train)
embed1 = pca_model.transform(embeddings1)
embed2 = pca_model.transform(embeddings2)
embed1 = sklearn.preprocessing.normalize(embed1)
embed2 = sklearn.preprocessing.normalize(embed2)
diff = np.subtract(embed1, embed2)
dist = np.sum(np.square(diff), 1)
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(
threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = calculate_accuracy(
threshold, dist[test_set],
actual_issame[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(
thresholds[best_threshold_index], dist[test_set],
actual_issame[test_set])
tpr = np.mean(tprs, 0)
fpr = np.mean(fprs, 0)
return tpr, fpr, accuracy
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame),
np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
acc = float(tp + tn) / dist.size
return tpr, fpr, acc
def calculate_val(thresholds,
embeddings1,
embeddings2,
actual_issame,
far_target,
nrof_folds=10):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = LFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(
threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train) >= far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = calculate_val_far(
threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean
def calculate_val_far(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(
np.logical_and(predict_issame, np.logical_not(actual_issame)))
n_same = np.sum(actual_issame)
n_diff = np.sum(np.logical_not(actual_issame))
# print(true_accept, false_accept)
# print(n_same, n_diff)
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
return val, far
def evaluate(embeddings, actual_issame, nrof_folds=10, pca=0):
# Calculate evaluation metrics
thresholds = np.arange(0, 4, 0.01)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
tpr, fpr, accuracy = calculate_roc(thresholds,
embeddings1,
embeddings2,
np.asarray(actual_issame),
nrof_folds=nrof_folds,
pca=pca)
thresholds = np.arange(0, 4, 0.001)
val, val_std, far = calculate_val(thresholds,
embeddings1,
embeddings2,
np.asarray(actual_issame),
1e-3,
nrof_folds=nrof_folds)
return tpr, fpr, accuracy, val, val_std, far
@torch.no_grad()
def load_bin(path, image_size):
try:
with open(path, 'rb') as f:
bins, issame_list = pickle.load(f) # py2
except UnicodeDecodeError as e:
with open(path, 'rb') as f:
bins, issame_list = pickle.load(f, encoding='bytes') # py3
data_list = []
for flip in [0, 1]:
data = torch.empty((len(issame_list) * 2, 3, image_size[0], image_size[1]))
data_list.append(data)
for idx in range(len(issame_list) * 2):
_bin = bins[idx]
img = mx.image.imdecode(_bin)
if img.shape[1] != image_size[0]:
img = mx.image.resize_short(img, image_size[0])
img = nd.transpose(img, axes=(2, 0, 1))
for flip in [0, 1]:
if flip == 1:
img = mx.ndarray.flip(data=img, axis=2)
data_list[flip][idx][:] = torch.from_numpy(img.asnumpy())
if idx % 1000 == 0:
print('loading bin', idx)
print(data_list[0].shape)
return data_list, issame_list
@torch.no_grad()
def test(data_set, backbone, batch_size, nfolds=10):
print('testing verification..')
data_list = data_set[0]
issame_list = data_set[1]
embeddings_list = []
time_consumed = 0.0
for i in range(len(data_list)):
data = data_list[i]
embeddings = None
ba = 0
while ba < data.shape[0]:
bb = min(ba + batch_size, data.shape[0])
count = bb - ba
_data = data[bb - batch_size: bb]
time0 = datetime.datetime.now()
img = ((_data / 255) - 0.5) / 0.5
net_out: torch.Tensor = backbone(img)
_embeddings = net_out.detach().cpu().numpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed += diff.total_seconds()
if embeddings is None:
embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
embeddings[ba:bb, :] = _embeddings[(batch_size - count):, :]
ba = bb
embeddings_list.append(embeddings)
_xnorm = 0.0
_xnorm_cnt = 0
for embed in embeddings_list:
for i in range(embed.shape[0]):
_em = embed[i]
_norm = np.linalg.norm(_em)
_xnorm += _norm
_xnorm_cnt += 1
_xnorm /= _xnorm_cnt
embeddings = embeddings_list[0].copy()
embeddings = sklearn.preprocessing.normalize(embeddings)
acc1 = 0.0
std1 = 0.0
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
print(embeddings.shape)
print('infer time', time_consumed)
_, _, accuracy, val, val_std, far = evaluate(embeddings, issame_list, nrof_folds=nfolds)
acc2, std2 = np.mean(accuracy), np.std(accuracy)
return acc1, std1, acc2, std2, _xnorm, embeddings_list
def dumpR(data_set,
backbone,
batch_size,
name='',
data_extra=None,
label_shape=None):
print('dump verification embedding..')
data_list = data_set[0]
issame_list = data_set[1]
embeddings_list = []
time_consumed = 0.0
for i in range(len(data_list)):
data = data_list[i]
embeddings = None
ba = 0
while ba < data.shape[0]:
bb = min(ba + batch_size, data.shape[0])
count = bb - ba
_data = nd.slice_axis(data, axis=0, begin=bb - batch_size, end=bb)
time0 = datetime.datetime.now()
if data_extra is None:
db = mx.io.DataBatch(data=(_data,), label=(_label,))
else:
db = mx.io.DataBatch(data=(_data, _data_extra),
label=(_label,))
model.forward(db, is_train=False)
net_out = model.get_outputs()
_embeddings = net_out[0].asnumpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed += diff.total_seconds()
if embeddings is None:
embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
embeddings[ba:bb, :] = _embeddings[(batch_size - count):, :]
ba = bb
embeddings_list.append(embeddings)
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
actual_issame = np.asarray(issame_list)
outname = os.path.join('temp.bin')
with open(outname, 'wb') as f:
pickle.dump((embeddings, issame_list),
f,
protocol=pickle.HIGHEST_PROTOCOL)
# if __name__ == '__main__':
#
# parser = argparse.ArgumentParser(description='do verification')
# # general
# parser.add_argument('--data-dir', default='', help='')
# parser.add_argument('--model',
# default='../model/softmax,50',
# help='path to load model.')
# parser.add_argument('--target',
# default='lfw,cfp_ff,cfp_fp,agedb_30',
# help='test targets.')
# parser.add_argument('--gpu', default=0, type=int, help='gpu id')
# parser.add_argument('--batch-size', default=32, type=int, help='')
# parser.add_argument('--max', default='', type=str, help='')
# parser.add_argument('--mode', default=0, type=int, help='')
# parser.add_argument('--nfolds', default=10, type=int, help='')
# args = parser.parse_args()
# image_size = [112, 112]
# print('image_size', image_size)
# ctx = mx.gpu(args.gpu)
# nets = []
# vec = args.model.split(',')
# prefix = args.model.split(',')[0]
# epochs = []
# if len(vec) == 1:
# pdir = os.path.dirname(prefix)
# for fname in os.listdir(pdir):
# if not fname.endswith('.params'):
# continue
# _file = os.path.join(pdir, fname)
# if _file.startswith(prefix):
# epoch = int(fname.split('.')[0].split('-')[1])
# epochs.append(epoch)
# epochs = sorted(epochs, reverse=True)
# if len(args.max) > 0:
# _max = [int(x) for x in args.max.split(',')]
# assert len(_max) == 2
# if len(epochs) > _max[1]:
# epochs = epochs[_max[0]:_max[1]]
#
# else:
# epochs = [int(x) for x in vec[1].split('|')]
# print('model number', len(epochs))
# time0 = datetime.datetime.now()
# for epoch in epochs:
# print('loading', prefix, epoch)
# sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
# # arg_params, aux_params = ch_dev(arg_params, aux_params, ctx)
# all_layers = sym.get_internals()
# sym = all_layers['fc1_output']
# model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
# # model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])
# model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0],
# image_size[1]))])
# model.set_params(arg_params, aux_params)
# nets.append(model)
# time_now = datetime.datetime.now()
# diff = time_now - time0
# print('model loading time', diff.total_seconds())
#
# ver_list = []
# ver_name_list = []
# for name in args.target.split(','):
# path = os.path.join(args.data_dir, name + ".bin")
# if os.path.exists(path):
# print('loading.. ', name)
# data_set = load_bin(path, image_size)
# ver_list.append(data_set)
# ver_name_list.append(name)
#
# if args.mode == 0:
# for i in range(len(ver_list)):
# results = []
# for model in nets:
# acc1, std1, acc2, std2, xnorm, embeddings_list = test(
# ver_list[i], model, args.batch_size, args.nfolds)
# print('[%s]XNorm: %f' % (ver_name_list[i], xnorm))
# print('[%s]Accuracy: %1.5f+-%1.5f' % (ver_name_list[i], acc1, std1))
# print('[%s]Accuracy-Flip: %1.5f+-%1.5f' % (ver_name_list[i], acc2, std2))
# results.append(acc2)
# print('Max of [%s] is %1.5f' % (ver_name_list[i], np.max(results)))
# elif args.mode == 1:
# raise ValueError
# else:
# model = nets[0]
# dumpR(ver_list[0], model, args.batch_size, args.target)
| 16,187
| 38.579462
| 152
|
py
|
QuantFace
|
QuantFace-master/eval/__init__.py
| 0
| 0
| 0
|
py
|
|
QuantFace
|
QuantFace-master/plots/plot_param.py
|
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
fontP = FontProperties()
dbs=["agedb", "lfw", "calfw", "cplfw", "cfp","IJB-B", "IJB-C"]
for db in dbs:
if (db=="agedb"):
accuracies=[98.33, 97.95, 96.43,
98.08, 97.97, 97.43,
97.13, 97.07, 96.62,
95.62, 94.37, 91.77]
params = [261.22, 65.31,
49.01,
174.68, 43.67, 32.77,
96.22, 24.1, 18.1,
4.21, 1.1, 0.79]
nets = ["ResNet100",
"ResNet100(w8a8)",
"ResNet100(w6a6)",
"ResNet50",
"ResNet50(w8a8)",
"ResNet50(w6a6)",
"ResNet18",
"ResNet18(w8a8)",
"ResNet18(w6a6)",
"MobileFaceNet",
"MobileFaceNet(w8a8)",
"MobileFaceNet(w6a6)"]
marker = [ '+', '.', '*',
'+', '.', '*',
'+', '.', '*',
'+', '.', '*',
'+', '.', '*',
'+', '.', '*'
]
color = ['blue','blue','blue',
'green','green','green',
'red','red','red',
'darkgoldenrod','darkgoldenrod','darkgoldenrod']
save_path = "./agedb.pdf"
plt.figure()
fig, ax = plt.subplots()
plt.ylabel("Accuracy (%) ",fontsize=26)
plt.xlabel("Size (MB)",fontsize=26)
plt.ylim([90, 98.50])
plt.xlim([-2, 267])
elif(db=="lfw"):
accuracies = [99.83, 99.8, 99.45,
99.80, 99.78, 99.68,
99.67,
99.55, 99.55,
99.47,
99.35, 99.08]
save_path = "./lfw.pdf"
plt.figure()
fig, ax = plt.subplots()
plt.ylabel("Accuracy (%) ",fontsize=26)
plt.xlabel("Size (MB)",fontsize=26)
plt.ylim([98, 99.89])
plt.xlim([-2, 267])
elif(db=="calfw"):
accuracies = [96.13, 96.02, 95.58,
96.1, 95.87, 95.7,
95.70,95.58, 95.32,
95.15, 94.78, 93.48]
save_path = "./calfw.pdf"
plt.figure()
fig, ax = plt.subplots()
plt.plot(accuracies, params, 'o')
plt.ylabel("Accuracy (%) ",fontsize=26)
plt.xlabel("Size (MB)",fontsize=26)
plt.ylim([90, 97])
plt.xlim([-2, 267])
elif(db=="cplfw"):
accuracies = [93.22, 92.9, 86.6,
92.43, 92.08, 90.38,
89.73, 89.53, 89.05,
87.98, 87.73, 84.85]
save_path = "./cplfw.pdf"
plt.figure()
fig, ax = plt.subplots()
plt.ylabel("Accuracy (%) ",fontsize=26)
plt.xlabel("Size (MB)",fontsize=26)
plt.ylim([82, 94.2])
plt.xlim([-2, 267])
elif(db=="cfp"):
accuracies = [98.4, 98.14, 91.0,
98.01, 97.43, 95.17,
94.47, 94.04, 93.34,
91.59, 90.84, 87.64]
save_path = "./cfp.pdf"
plt.figure()
fig, ax = plt.subplots()
plt.ylabel("Accuracy (%) ",fontsize=26)
plt.xlabel("Size (MB)",fontsize=26)
plt.ylim([84, 99])
plt.xlim([-2, 267])
elif(db=="IJB-B"):
accuracies = [95.25, 94.74, 85.06,
94.19, 93.67, 89.44,
91.64, 91.01, 90.38,
88.54, 86.98, 80.58]
save_path = "./ijbb.pdf"
plt.figure()
fig, ax = plt.subplots()
plt.ylabel("TAR at FAR1e–4 ",fontsize=26)
plt.xlabel("Size (MB)",fontsize=26)
plt.ylim([78, 97])
plt.xlim([-2, 267])
elif(db=="IJB-C"):
accuracies = [96.5, 96.09, 87.0,
95.74, 95.18, 90.72,
93.56, 92.87, 92.36,
90.88,
89.21, 82.94]
save_path = "./ijbc.pdf"
plt.figure()
fig, ax = plt.subplots()
plt.ylabel("TAR at FAR1e–4 ",fontsize=26)
plt.xlabel("Size (MB)",fontsize=26)
plt.ylim([80, 97])
plt.xlim([-2, 267])
p=[]
for i in range(len(accuracies)):
#if "ours" in nets[i]:
# plt.plot(params[i], accuracies[i], marker[i],markersize=16,markeredgecolor='red',label=nets[i])
#else:
plt.plot(params[i], accuracies[i], marker[i],color=color[i], markersize=16,label=nets[i])
plt.grid()
plt.tight_layout()
plt.legend(numpoints=1, loc='lower right',fontsize=12,ncol=2)
plt.savefig(save_path, format='pdf', dpi=600)
plt.close()
| 4,907
| 31.078431
| 105
|
py
|
QuantFace
|
QuantFace-master/quantization_utils/quant_modules.py
|
# *
# @file Different utility functions
# Copyright (c) Yaohui Cai, Zhewei Yao, Zhen Dong, Amir Gholami
# All rights reserved.
# This file is part of ZeroQ repository.
# https://github.com/amirgholami/ZeroQ
# ZeroQ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ZeroQ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ZeroQ repository. If not, see <http://www.gnu.org/licenses/>.
# *
import torch
import time
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Module, Parameter
from .quant_utils import *
import sys
class QuantAct(Module):
"""
Class to quantize given activations
"""
def __init__(self,
activation_bit,
full_precision_flag=False,
running_stat=True,
beta=0.9):
"""
activation_bit: bit-setting for activation
full_precision_flag: full precision or not
running_stat: determines whether the activation range is updated or froze
"""
super(QuantAct, self).__init__()
self.activation_bit = activation_bit
self.full_precision_flag = full_precision_flag
self.running_stat = running_stat
self.register_buffer('x_min', torch.zeros(1))
self.register_buffer('x_max', torch.zeros(1))
self.register_buffer('beta', torch.Tensor([beta]))
self.register_buffer('beta_t', torch.ones(1))
self.act_function = AsymmetricQuantFunction.apply
def __repr__(self):
return "{0}(activation_bit={1}, full_precision_flag={2}, running_stat={3}, Act_min: {4:.2f}, Act_max: {5:.2f})".format(
self.__class__.__name__, self.activation_bit,
self.full_precision_flag, self.running_stat, self.x_min.item(),
self.x_max.item())
def fix(self):
"""
fix the activation range by setting running stat
"""
self.running_stat = False
def unfix(self):
"""
fix the activation range by setting running stat
"""
self.running_stat = True
def forward(self, x):
"""
quantize given activation x
"""
if self.running_stat:
x_min = x.data.min()
x_max = x.data.max()
# in-place operation used on multi-gpus
self.x_min += -self.x_min + min(self.x_min, x_min)
self.x_max += -self.x_max + max(self.x_max, x_max)
#self.beta_t = self.beta_t * self.beta
#self.x_min = (self.x_min * self.beta + x_min * (1 - self.beta))/(1 - self.beta_t)
#self.x_max = (self.x_max * self.beta + x_max * (1 - self.beta)) / (1 - self.beta_t)
#self.x_min += -self.x_min + min(self.x_min, x_min)
#self.x_max += -self.x_max + max(self.x_max, x_max)
if not self.full_precision_flag:
quant_act = self.act_function(x, self.activation_bit, self.x_min,
self.x_max)
return quant_act
else:
return x
class QuantActPreLu(Module):
"""
Class to quantize given activations
"""
def __init__(self,
act_bit,
full_precision_flag=False,
running_stat=True):
"""
activation_bit: bit-setting for activation
full_precision_flag: full precision or not
running_stat: determines whether the activation range is updated or froze
"""
super(QuantActPreLu, self).__init__()
self.activation_bit = act_bit
self.full_precision_flag = full_precision_flag
self.running_stat = running_stat
self.act_function = AsymmetricQuantFunction.apply
self.quantAct=QuantAct(activation_bit=act_bit,running_stat=True)
def __repr__(self):
s = super(QuantActPreLu, self).__repr__()
s = "(" + s + " activation_bit={}, full_precision_flag={})".format(
self.activation_bit, self.full_precision_flag)
return s
def set_param(self, prelu):
self.weight = Parameter(prelu.weight.data.clone())
def fix(self):
"""
fix the activation range by setting running stat
"""
self.running_stat = False
def unfix(self):
"""
fix the activation range by setting running stat
"""
self.running_stat = True
def forward(self, x):
w = self.weight
x_transform = w.data.detach()
a_min = x_transform.min(dim=0).values
a_max = x_transform.max(dim=0).values
if not self.full_precision_flag:
w = self.act_function(self.weight, self.activation_bit, a_min,
a_max)
else:
w = self.weight
#inputs = max(0, inputs) + alpha * min(0, inputs)
#w_min = torch.mul( F.relu(-x),-w)
#x= F.relu(x) + w_min
#inputs = self.quantized_op.add(torch.relu(x), weight_min_res)
x= F.prelu(x,weight=w)
x=self.quantAct(x)
return x
class Quant_Linear(Module):
"""
Class to quantize given linear layer weights
"""
def __init__(self, weight_bit, full_precision_flag=False):
"""
weight: bit-setting for weight
full_precision_flag: full precision or not
running_stat: determines whether the activation range is updated or froze
"""
super(Quant_Linear, self).__init__()
self.full_precision_flag = full_precision_flag
self.weight_bit = weight_bit
self.weight_function = AsymmetricQuantFunction.apply
def __repr__(self):
s = super(Quant_Linear, self).__repr__()
s = "(" + s + " weight_bit={}, full_precision_flag={})".format(
self.weight_bit, self.full_precision_flag)
return s
def set_param(self, linear):
self.in_features = linear.in_features
self.out_features = linear.out_features
self.weight = Parameter(linear.weight.data.clone())
try:
self.bias = Parameter(linear.bias.data.clone())
except AttributeError:
self.bias = None
def forward(self, x):
"""
using quantized weights to forward activation x
"""
w = self.weight
x_transform = w.data.detach()
w_min = x_transform.min(dim=1).values
w_max = x_transform.max(dim=1).values
if not self.full_precision_flag:
w = self.weight_function(self.weight, self.weight_bit, w_min,w_max)
else:
w = self.weight
return F.linear(x, weight=w, bias=self.bias)
class Quant_Conv2d(Module):
"""
Class to quantize given convolutional layer weights
"""
def __init__(self, weight_bit, full_precision_flag=False):
super(Quant_Conv2d, self).__init__()
self.full_precision_flag = full_precision_flag
self.weight_bit = weight_bit
self.weight_function = AsymmetricQuantFunction.apply
def __repr__(self):
s = super(Quant_Conv2d, self).__repr__()
s = "(" + s + " weight_bit={}, full_precision_flag={})".format(
self.weight_bit, self.full_precision_flag)
return s
def set_param(self, conv):
self.in_channels = conv.in_channels
self.out_channels = conv.out_channels
self.kernel_size = conv.kernel_size
self.stride = conv.stride
self.padding = conv.padding
self.dilation = conv.dilation
self.groups = conv.groups
self.weight = Parameter(conv.weight.data.clone())
try:
self.bias = Parameter(conv.bias.data.clone())
except AttributeError:
self.bias = None
def forward(self, x):
"""
using quantized weights to forward activation x
"""
w = self.weight
x_transform = w.data.contiguous().view(self.out_channels, -1)
w_min = x_transform.min(dim=1).values
w_max = x_transform.max(dim=1).values
if not self.full_precision_flag:
w = self.weight_function(self.weight, self.weight_bit, w_min,
w_max)
else:
w = self.weight
return F.conv2d(x, w, self.bias, self.stride, self.padding,
self.dilation, self.groups)
| 7,593
| 28.320463
| 121
|
py
|
QuantFace
|
QuantFace-master/quantization_utils/quant_utils.py
|
#*
# @file Different utility functions
# Copyright (c) Yaohui Cai, Zhewei Yao, Zhen Dong, Amir Gholami
# All rights reserved.
# This file is part of ZeroQ repository.
# https://github.com/amirgholami/ZeroQ
# ZeroQ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ZeroQ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ZeroQ repository. If not, see <http://www.gnu.org/licenses/>.
#*
import math
import numpy as np
from torch.autograd import Function, Variable
import torch
def clamp(input, min, max, inplace=False):
"""
Clamp tensor input to (min, max).
input: input tensor to be clamped
"""
if inplace:
input.clamp_(min, max)
return input
return torch.clamp(input, min, max)
def linear_quantize(input, scale, zero_point, inplace=False):
"""
Quantize single-precision input tensor to integers with the given scaling factor and zeropoint.
input: single-precision input tensor to be quantized
scale: scaling factor for quantization
zero_pint: shift for quantization
"""
# reshape scale and zeropoint for convolutional weights and activation
if len(input.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
zero_point = zero_point.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(input.shape) == 2:
scale = scale.view(-1, 1)
zero_point = zero_point.view(-1, 1)
# mapping single-precision input to integer values with the given scale and zeropoint
if inplace:
input.mul_(scale).sub_(zero_point).round_()
return input
return torch.round(scale * input - zero_point)
def linear_dequantize(input, scale, zero_point, inplace=False):
"""
Map integer input tensor to fixed point float point with given scaling factor and zeropoint.
input: integer input tensor to be mapped
scale: scaling factor for quantization
zero_pint: shift for quantization
"""
# reshape scale and zeropoint for convolutional weights and activation
if len(input.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
zero_point = zero_point.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(input.shape) == 2:
scale = scale.view(-1, 1)
zero_point = zero_point.view(-1, 1)
# mapping integer input to fixed point float point value with given scaling factor and zeropoint
if inplace:
input.add_(zero_point).div_(scale)
return input
return (input + zero_point) / scale
def asymmetric_linear_quantization_params(num_bits,
saturation_min,
saturation_max,
integral_zero_point=True,
signed=True):
"""
Compute the scaling factor and zeropoint with the given quantization range.
saturation_min: lower bound for quantization range
saturation_max: upper bound for quantization range
"""
n = 2**num_bits - 1
scale = n / torch.clamp((saturation_max - saturation_min), min=1e-8)
zero_point = scale * saturation_min
if integral_zero_point:
if isinstance(zero_point, torch.Tensor):
zero_point = zero_point.round()
else:
zero_point = float(round(zero_point))
if signed:
zero_point += 2**(num_bits - 1)
return scale, zero_point
class AsymmetricQuantFunction(Function):
"""
Class to quantize the given floating-point values with given range and bit-setting.
Currently only support inference, but not support back-propagation.
"""
@staticmethod
def forward(ctx, x, k, x_min=None, x_max=None):
"""
x: single-precision value to be quantized
k: bit-setting for x
x_min: lower bound for quantization range
x_max=None
"""
# if x_min is None or x_max is None or (sum(x_min == x_max) == 1
# and x_min.numel() == 1):
# x_min, x_max = x.min(), x.max()
scale, zero_point = asymmetric_linear_quantization_params(
k, x_min, x_max)
new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)
n = 2**(k - 1)
new_quant_x = torch.clamp(new_quant_x, -n, n - 1)
quant_x = linear_dequantize(new_quant_x,
scale,
zero_point,
inplace=False)
return torch.autograd.Variable(quant_x)
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None, None
| 5,114
| 35.535714
| 100
|
py
|
QuantFace
|
QuantFace-master/utils/losses.py
|
import torch
from torch import nn
import math
import numpy as np
import torch.nn.functional as F
def l2_norm(input, axis = 1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class MLLoss(nn.Module):
def __init__(self, s=64.0):
super(MLLoss, self).__init__()
self.s = s
def forward(self, embbedings, label):
embbedings = l2_norm(embbedings, axis=1)
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embbedings, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
cos_theta.mul_(self.s)
return cos_theta
'''
# from https://github.com/HuangYG123/CurricularFace/blob/master/head/metrics.py
class ElasticArcFace(nn.Module):
r"""Implement of ArcFace (https://arxiv.org/pdf/1801.07698v1.pdf):
Args:
in_features: size of each input sample
out_features: size of each output sample
s: norm of input feature
m: margin
cos(theta+m)
"""
def __init__(self, in_features, out_features, s=64.0, m=0.50, easy_margin=False,std=0.0125):
super(ElasticArcFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.std=std
self.kernel = nn.Parameter(torch.FloatTensor(in_features, out_features))
# nn.init.xavier_uniform_(self.kernel)
nn.init.normal_(self.kernel, std=0.01)
self.easy_margin = easy_margin
def forward(self, embbedings, label):
embbedings = l2_norm(embbedings, axis=1)
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embbedings, kernel_norm)
cos_theta = cos_theta.clamp(-1 + 1e-5, 1 + 1e-5) # for numerical stability
with torch.no_grad():
origin_cos = cos_theta.clone()
target_logit = cos_theta[torch.arange(0, embbedings.size(0)), label].view(-1, 1)
sin_theta = torch.sqrt(1.0 - torch.pow(target_logit, 2))
index = torch.where(label != -1)[0]
margin = torch.normal(mean=self.m, std=self.std, size=label[index, None].size(), device=cos_theta.device).clamp(self.m-self.std, self.m+self.std) # Fast converge .clamp(self.m-self.std, self.m+self.std)
with torch.no_grad():
#distmat = cos_theta[index, label.view(-1)].detach().clone()
_, idicate_cosie = torch.sort(target_logit, dim=0, descending=True)
margin, _ = torch.sort(margin, dim=0)
cos_m=torch.cos(margin)
sin_m=torch.sin(margin)
th=torch.cos(math.pi-margin)
mm=torch.sin(math.pi-margin)*margin
cos_theta_m = target_logit * cos_m - sin_theta * sin_m # cos(target+margin)
if self.easy_margin:
final_target_logit = torch.where(target_logit > 0, cos_theta_m, target_logit)
else:
final_target_logit = torch.where(target_logit > th, cos_theta_m, target_logit - mm)
cos_theta.scatter_(1, label.view(-1, 1).long(), final_target_logit)
output = cos_theta * self.s
return output # , origin_cos * self.s
'''
class ElasticArcFace(nn.Module):
def __init__(self, in_features, out_features, s=64.0, m=0.50,std=0.0125, random=True):
super(ElasticArcFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.kernel = nn.Parameter(torch.FloatTensor(in_features, out_features))
nn.init.normal_(self.kernel, std=0.01)
self.std=std
self.random=random
def forward(self, embbedings, label):
embbedings = l2_norm(embbedings, axis=1)
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embbedings, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
index = torch.where(label != -1)[0]
m_hot = torch.zeros(index.size()[0], cos_theta.size()[1], device=cos_theta.device)
margin = torch.normal(mean=self.m, std=self.std, size=label[index, None].size(), device=cos_theta.device)#.clamp(self.m-self.std, self.m+self.std) # Fast converge .clamp(self.m-self.std, self.m+self.std)
if not self.random:
with torch.no_grad():
distmat = cos_theta[index, label.view(-1)].detach().clone()
_, idicate_cosie = torch.sort(distmat, dim=0, descending=True)
margin, _ = torch.sort(margin, dim=0)
m_hot.scatter_(1, label[index, None], margin[idicate_cosie])
else:
m_hot.scatter_(1, label[index, None], margin)
cos_theta.acos_()
cos_theta[index] += m_hot
cos_theta.cos_().mul_(self.s)
return cos_theta
class ElasticCosFace(nn.Module):
def __init__(self, in_features, out_features, s=64.0, m=0.35,std=0.0125, random=False):
super(ElasticCosFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.kernel = nn.Parameter(torch.FloatTensor(in_features, out_features))
nn.init.normal_(self.kernel, std=0.01)
self.std=std
self.random=random
def forward(self, embbedings, label):
embbedings = l2_norm(embbedings, axis=1)
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embbedings, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
index = torch.where(label != -1)[0]
m_hot = torch.zeros(index.size()[0], cos_theta.size()[1], device=cos_theta.device)
margin = torch.normal(mean=self.m, std=self.std, size=label[index, None].size(), device=cos_theta.device) # Fast converge .clamp(self.m-self.std, self.m+self.std)
if not self.random:
with torch.no_grad():
distmat = cos_theta[index, label.view(-1)].detach().clone()
_, idicate_cosie = torch.sort(distmat, dim=0, descending=True)
margin, _ = torch.sort(margin, dim=0)
m_hot.scatter_(1, label[index, None], margin[idicate_cosie])
else:
m_hot.scatter_(1, label[index, None], margin)
m_hot.scatter_(1, label[index, None], margin)
cos_theta[index] -= m_hot
ret = cos_theta * self.s
return ret
class CosFace(nn.Module):
def __init__(self, in_features, out_features, s=64.0, m=0.35):
super(CosFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.kernel = nn.Parameter(torch.FloatTensor(in_features, out_features))
nn.init.normal_(self.kernel, std=0.01)
def forward(self, embbedings, label):
embbedings = l2_norm(embbedings, axis=1)
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embbedings, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
index = torch.where(label != -1)[0]
m_hot = torch.zeros(index.size()[0], cos_theta.size()[1], device=cos_theta.device)
m_hot.scatter_(1, label[index, None], self.m)
cos_theta[index] -= m_hot
ret = cos_theta * self.s
return ret
def loss_func(feat1, feat2):
return 1- F.cosine_similarity(feat1, feat2).abs().mean()
class ArcFace(nn.Module):
def __init__(self, in_features, out_features, s=64.0, m=0.50):
super(ArcFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.kernel = nn.Parameter(torch.FloatTensor(in_features, out_features))
nn.init.normal_(self.kernel, std=0.01)
def forward(self, embbedings, label):
embbedings = l2_norm(embbedings, axis=1)
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embbedings, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
index = torch.where(label != -1)[0]
m_hot = torch.zeros(index.size()[0], cos_theta.size()[1], device=cos_theta.device)
m_hot.scatter_(1, label[index, None], self.m)
cos_theta.acos_()
cos_theta[index] += m_hot
cos_theta.cos_().mul_(self.s)
return cos_theta
| 8,386
| 40.315271
| 211
|
py
|
QuantFace
|
QuantFace-master/utils/countFLOPS.py
|
from torch.autograd import Variable
import numpy as np
import torch
def count_model_flops(model, input_res=[112, 112], multiply_adds=True):
list_conv = []
def conv_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = (kernel_ops * (
2 if multiply_adds else 1) + bias_ops) * output_channels * output_height * output_width * batch_size
list_conv.append(flops)
list_linear = []
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
if self.bias is not None:
bias_ops = self.bias.nelement() if self.bias.nelement() else 0
flops = batch_size * (weight_ops + bias_ops)
else:
flops = batch_size * weight_ops
list_linear.append(flops)
list_bn = []
def bn_hook(self, input, output):
list_bn.append(input[0].nelement() * 2)
list_relu = []
def relu_hook(self, input, output):
list_relu.append(input[0].nelement())
list_pooling = []
def pooling_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = 0
flops = (kernel_ops + bias_ops) * output_channels * output_height * output_width * batch_size
list_pooling.append(flops)
def pooling_hook_ad(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
input = input[0]
flops = int(np.prod(input.shape))
list_pooling.append(flops)
handles = []
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, torch.nn.Conv2d) or isinstance(net, torch.nn.ConvTranspose2d):
handles.append(net.register_forward_hook(conv_hook))
elif isinstance(net, torch.nn.Linear):
handles.append(net.register_forward_hook(linear_hook))
elif isinstance(net, torch.nn.BatchNorm2d) or isinstance(net, torch.nn.BatchNorm1d):
handles.append(net.register_forward_hook(bn_hook))
elif isinstance(net, torch.nn.ReLU) or isinstance(net, torch.nn.PReLU):
handles.append(net.register_forward_hook(relu_hook))
elif isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):
handles.append(net.register_forward_hook(pooling_hook))
else:
print("warning" + str(net))
return
for c in childrens:
foo(c)
model.eval()
foo(model)
input = Variable(torch.rand(3, input_res[1], input_res[0]).unsqueeze(0), requires_grad=True)
out = model(input)
total_flops = (sum(list_conv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling))
for h in handles:
h.remove()
model.train()
return flops_to_string(total_flops)
def flops_to_string(flops, units='MFLOPS', precision=4):
if units == 'GFLOPS':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MFLOPS':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KFLOPS':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' FLOPS'
def _calc_width(net):
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
| 4,062
| 36.275229
| 112
|
py
|
QuantFace
|
QuantFace-master/utils/modelFLOPS.py
|
import logging
from pytorch_model_summary import summary
import torch
from utils.countFLOPS import count_model_flops
from backbones.iresnet import iresnet100
from config.config_FP32 import config as cfg
if __name__ == "__main__":
# load model
if cfg.network == "iresnet100":
backbone = iresnet100(num_features=cfg.embedding_size)
elif cfg.network == "iresnet100":
backbone = iresnet100(num_features=cfg.embedding_size)
else:
backbone = None
logging.info("load backbone failed!")
print(summary(backbone, torch.zeros((1, 3, 112, 112)), show_input=False))
flops = count_model_flops(backbone)
print(flops)
#model.eval()
#tic = time.time()
#model.forward(torch.zeros((1, 3, 112, 112)))
#end = time.time()
#print(end-tic)
| 813
| 21.611111
| 77
|
py
|
QuantFace
|
QuantFace-master/utils/dataset.py
|
import numbers
import os
import queue as Queue
import random
import threading
import mxnet as mx
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import cv2
class BackgroundGenerator(threading.Thread):
def __init__(self, generator, local_rank, max_prefetch=6):
super(BackgroundGenerator, self).__init__()
self.queue = Queue.Queue(max_prefetch)
self.generator = generator
self.local_rank = local_rank
self.daemon = True
self.start()
def run(self):
torch.cuda.set_device(self.local_rank)
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def next(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item
def __next__(self):
return self.next()
def __iter__(self):
return self
class DataLoaderX(DataLoader):
def __init__(self, local_rank, **kwargs):
super(DataLoaderX, self).__init__(**kwargs)
self.stream = torch.cuda.Stream(local_rank)
self.local_rank = local_rank
def __iter__(self):
self.iter = super(DataLoaderX, self).__iter__()
self.iter = BackgroundGenerator(self.iter, self.local_rank)
self.preload()
return self
def preload(self):
self.batch = next(self.iter, None)
if self.batch is None:
return None
with torch.cuda.stream(self.stream):
for k in range(len(self.batch)):
self.batch[k] = self.batch[k].to(device=self.local_rank,
non_blocking=True)
def __next__(self):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if batch is None:
raise StopIteration
self.preload()
return batch
class MXFaceDataset(Dataset):
def __init__(self, root_dir, local_rank):
super(MXFaceDataset, self).__init__()
self.transform = transforms.Compose(
[transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
self.root_dir = root_dir
self.local_rank = local_rank
path_imgrec = os.path.join(root_dir, 'train.rec')
path_imgidx = os.path.join(root_dir, 'train.idx')
self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
s = self.imgrec.read_idx(0)
header, _ = mx.recordio.unpack(s)
if header.flag > 0:
self.header0 = (int(header.label[0]), int(header.label[1]))
self.imgidx = np.array(range(1, int(header.label[0])))
else:
self.imgidx = np.array(list(self.imgrec.keys))
def __getitem__(self, index):
idx = self.imgidx[index]
s = self.imgrec.read_idx(idx)
header, img = mx.recordio.unpack(s)
label = header.label
if not isinstance(label, numbers.Number):
label = label[0]
label = torch.tensor(label, dtype=torch.long)
sample = mx.image.imdecode(img).asnumpy()
if self.transform is not None:
sample = self.transform(sample)
return sample, label
def __len__(self):
return len(self.imgidx)
class FaceDatasetFolder(Dataset):
def __init__(self, root_dir, local_rank):
super(FaceDatasetFolder, self).__init__()
self.transform = transforms.Compose(
[transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
self.root_dir = os.path.join( root_dir)
self.local_rank = local_rank
self.imgidx, self.labels=self.scan(self.root_dir)
def scan(self,root):
imgidex=[]
labels=[]
lb=0
list_dir=os.listdir(root)
#list_dir.sort()
for img in list_dir:
imgidex.append(os.path.join(root,img))
labels.append(lb)
lb = lb+1
return imgidex,labels
def readImage(self,path):
return cv2.imread(os.path.join(self.root_dir,path))
def __getitem__(self, index):
path = self.imgidx[index]
img=self.readImage(path)
label = self.labels[index]
label = torch.tensor(label, dtype=torch.long)
sample = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
if self.transform is not None:
sample = self.transform(sample)
return sample, label
def __len__(self):
return len(self.imgidx)
| 4,790
| 30.728477
| 82
|
py
|
QuantFace
|
QuantFace-master/utils/utils_amp.py
|
from typing import Dict, List
import torch
from torch._six import container_abcs
from torch.cuda.amp import GradScaler
class _MultiDeviceReplicator(object):
"""
Lazily serves copies of a tensor to requested devices. Copies are cached per-device.
"""
def __init__(self, master_tensor: torch.Tensor) -> None:
assert master_tensor.is_cuda
self.master = master_tensor
self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
def get(self, device) -> torch.Tensor:
retval = self._per_device_tensors.get(device, None)
if retval is None:
retval = self.master.to(device=device, non_blocking=True, copy=True)
self._per_device_tensors[device] = retval
return retval
class MaxClipGradScaler(GradScaler):
def __init__(self, init_scale, max_scale: float, growth_interval=100):
GradScaler.__init__(self, init_scale=init_scale, growth_interval=growth_interval)
self.max_scale = max_scale
def scale_clip(self):
if self.get_scale() == self.max_scale:
self.set_growth_factor(1)
elif self.get_scale() < self.max_scale:
self.set_growth_factor(2)
elif self.get_scale() > self.max_scale:
self._scale.fill_(self.max_scale)
self.set_growth_factor(1)
def scale(self, outputs):
"""
Multiplies ('scales') a tensor or list of tensors by the scale factor.
Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned
unmodified.
Arguments:
outputs (Tensor or iterable of Tensors): Outputs to scale.
"""
if not self._enabled:
return outputs
self.scale_clip()
# Short-circuit for the common case.
if isinstance(outputs, torch.Tensor):
assert outputs.is_cuda
if self._scale is None:
self._lazy_init_scale_growth_tracker(outputs.device)
assert self._scale is not None
return outputs * self._scale.to(device=outputs.device, non_blocking=True)
# Invoke the more complex machinery only if we're treating multiple outputs.
stash: List[_MultiDeviceReplicator] = [] # holds a reference that can be overwritten by apply_scale
def apply_scale(val):
if isinstance(val, torch.Tensor):
assert val.is_cuda
if len(stash) == 0:
if self._scale is None:
self._lazy_init_scale_growth_tracker(val.device)
assert self._scale is not None
stash.append(_MultiDeviceReplicator(self._scale))
return val * stash[0].get(val.device)
elif isinstance(val, container_abcs.Iterable):
iterable = map(apply_scale, val)
if isinstance(val, list) or isinstance(val, tuple):
return type(val)(iterable)
else:
return iterable
else:
raise ValueError("outputs must be a Tensor or an iterable of Tensors")
return apply_scale(outputs)
| 3,187
| 37.878049
| 109
|
py
|
QuantFace
|
QuantFace-master/utils/utils_callbacks.py
|
import logging
import os
import time
from typing import List
import torch
from eval import verification
from utils.utils_logging import AverageMeter
class CallBackVerification(object):
def __init__(self, frequent, rank, val_targets, rec_prefix, image_size=(112, 112)):
self.frequent: int = frequent
self.rank: int = rank
self.highest_acc: float = 0.0
self.highest_acc_list: List[float] = [0.0] * len(val_targets)
self.ver_list: List[object] = []
self.ver_name_list: List[str] = []
if self.rank == 0:
self.init_dataset(val_targets=val_targets, data_dir=rec_prefix, image_size=image_size)
def ver_test(self, backbone: torch.nn.Module, global_step: int):
results = []
for i in range(len(self.ver_list)):
acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test(
self.ver_list[i], backbone, 10, 10)
logging.info('[%s][%d]XNorm: %f' % (self.ver_name_list[i], global_step, xnorm))
logging.info('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' % (self.ver_name_list[i], global_step, acc2, std2))
if acc2 > self.highest_acc_list[i]:
self.highest_acc_list[i] = acc2
logging.info(
'[%s][%d]Accuracy-Highest: %1.5f' % (self.ver_name_list[i], global_step, self.highest_acc_list[i]))
results.append(acc2)
def init_dataset(self, val_targets, data_dir, image_size):
for name in val_targets:
path = os.path.join(data_dir, name + ".bin")
if os.path.exists(path):
data_set = verification.load_bin(path, image_size)
self.ver_list.append(data_set)
self.ver_name_list.append(name)
def __call__(self, num_update, backbone: torch.nn.Module):
if self.rank == 0 and num_update > 0 and num_update % self.frequent == 0:
backbone.eval()
self.ver_test(backbone, num_update)
backbone.train()
class CallBackLogging(object):
def __init__(self, frequent, rank, total_step, batch_size, world_size, writer=None, resume=0, rem_total_steps=None):
self.frequent: int = frequent
self.rank: int = rank
self.time_start = time.time()
self.total_step: int = total_step
self.batch_size: int = batch_size
self.world_size: int = world_size
self.writer = writer
self.resume = resume
self.rem_total_steps = rem_total_steps
self.init = False
self.tic = 0
def __call__(self, global_step, loss: AverageMeter, epoch: int):
if self.rank == 0 and global_step > 0 and global_step % self.frequent == 0:
if self.init:
try:
speed: float = self.frequent * self.batch_size / (time.time() - self.tic)
speed_total = speed * self.world_size
except ZeroDivisionError:
speed_total = float('inf')
time_now = (time.time() - self.time_start) / 3600
# TODO: resume time_total is not working
if self.resume:
time_total = time_now / ((global_step + 1) / self.rem_total_steps)
else:
time_total = time_now / ((global_step + 1) / self.total_step)
time_for_end = time_total - time_now
if self.writer is not None:
self.writer.add_scalar('time_for_end', time_for_end, global_step)
self.writer.add_scalar('loss', loss.avg, global_step)
msg = "Speed %.2f samples/sec Loss %.4f Epoch: %d Global Step: %d Required: %1.f hours" % (
speed_total, loss.avg, epoch, global_step, time_for_end
)
logging.info(msg)
loss.reset()
self.tic = time.time()
else:
self.init = True
self.tic = time.time()
class CallBackModelCheckpoint(object):
def __init__(self, rank, output="./"):
self.rank: int = rank
self.output: str = output
def __call__(self, global_step, backbone: torch.nn.Module, header: torch.nn.Module = None, quantiza : bool= False):
if quantiza:
if global_step > 100 and self.rank == 0:
torch.save(backbone.module, os.path.join(self.output, str(global_step) + "backbone.pt"))
else:
if global_step > 100 and self.rank == 0:
torch.save(backbone.module.state_dict(), os.path.join(self.output, str(global_step)+ "backbone.pth"))
if global_step > 100 and self.rank == 0 and header is not None:
torch.save(header.module.state_dict(), os.path.join(self.output, str(global_step)+ "header.pth"))
| 4,819
| 43.220183
| 120
|
py
|
QuantFace
|
QuantFace-master/utils/utils_logging.py
|
import logging
import os
import sys
class AverageMeter(object):
"""Computes and stores the average and current value
"""
def __init__(self):
self.val = None
self.avg = None
self.sum = None
self.count = None
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def init_logging(log_root, rank, models_root, logfile=None):
if rank is 0:
log_root.setLevel(logging.INFO)
formatter = logging.Formatter("Training: %(asctime)s-%(message)s")
file_name = "training.log" if logfile is None else logfile
handler_file = logging.FileHandler(os.path.join(models_root, file_name))
handler_stream = logging.StreamHandler(sys.stdout)
handler_file.setFormatter(formatter)
handler_stream.setFormatter(formatter)
log_root.addHandler(handler_file)
log_root.addHandler(handler_stream)
log_root.info('rank_id: %d' % rank)
| 1,157
| 26.571429
| 80
|
py
|
QuantFace
|
QuantFace-master/utils/__init__.py
| 0
| 0
| 0
|
py
|
|
QuantFace
|
QuantFace-master/backbones/vggface.py
|
import torch
from torchvision import datasets, transforms, models
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import Dataset, DataLoader
from skimage import io, transform
from PIL import Image
import torchvision.transforms.functional as TF
import itertools
import torch.utils.data as data_utils
from backbones.countFLOPS import _calc_width, count_model_flops
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
class VGG_16(nn.Module):
"""
Main Class
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
self.block_size = [2, 2, 3, 3, 3]
self.conv_1_1 = nn.Conv2d(3, 64, 3, stride=1, padding=1)
self.conv_1_2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv_2_1 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.conv_2_2 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.conv_3_1 = nn.Conv2d(128, 256, 3, stride=1, padding=1)
self.conv_3_2 = nn.Conv2d(256, 256, 3, stride=1, padding=1)
self.conv_3_3 = nn.Conv2d(256, 256, 3, stride=1, padding=1)
self.conv_4_1 = nn.Conv2d(256, 512, 3, stride=1, padding=1)
self.conv_4_2 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.conv_4_3 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.conv_5_1 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.conv_5_2 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.conv_5_3 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.fc6 = nn.Linear(512 * 7 * 7, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.fc8 = nn.Linear(4096, 2622)
def forward(self, x):
""" Pytorch forward
Args:
x: input image (224x224)
Returns: class logits
"""
x = F.relu(self.conv_1_1(x))
x = F.relu(self.conv_1_2(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv_2_1(x))
x = F.relu(self.conv_2_2(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv_3_1(x))
x = F.relu(self.conv_3_2(x))
x = F.relu(self.conv_3_3(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv_4_1(x))
x = F.relu(self.conv_4_2(x))
x = F.relu(self.conv_4_3(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv_5_1(x))
x = F.relu(self.conv_5_2(x))
x = F.relu(self.conv_5_3(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(x.size(0), -1)
x = F.relu(self.fc6(x))
x = F.dropout(x, 0.5, self.training)
x = F.relu(self.fc7(x))
x = F.dropout(x, 0.5, self.training)
return self.fc8(x)
def _test():
import torch
pretrained = False
models = [
VGG_16
]
for model in models:
net = model()
print(net)
# net.train()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
flops=count_model_flops(net, input_res=[224,224])
print("m={}, {}".format(model.__name__, flops))
net.eval()
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 512))
if __name__ == "__main__":
_test()
| 3,350
| 29.189189
| 67
|
py
|
QuantFace
|
QuantFace-master/backbones/activation.py
|
import torch.nn as nn
import torch.nn.functional as F
import torch
from inspect import isfunction
class Identity(nn.Module):
"""
Identity block.
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def __repr__(self):
return '{name}()'.format(name=self.__class__.__name__)
class HSigmoid(nn.Module):
"""
Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'
https://arxiv.org/abs/1905.02244.
"""
def forward(self, x):
return F.relu6(x + 3.0, inplace=True) / 6.0
class Swish(nn.Module):
"""
Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941.
"""
def forward(self, x):
return x * torch.sigmoid(x)
class HSwish(nn.Module):
"""
H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
inplace : bool
Whether to use inplace version of the module.
"""
def __init__(self, inplace=False):
super(HSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_activation_layer(activation,param):
"""
Create activation layer from string/function.
Parameters:
----------
activation : function, or str, or nn.Module
Activation function or name of activation function.
Returns:
-------
nn.Module
Activation layer.
"""
assert (activation is not None)
if isfunction(activation):
return activation()
elif isinstance(activation, str):
if activation == "relu":
return nn.ReLU(inplace=True)
elif activation =="prelu":
return nn.PReLU(param)
elif activation == "relu6":
return nn.ReLU6(inplace=True)
elif activation == "swish":
return Swish()
elif activation == "hswish":
return HSwish(inplace=True)
elif activation == "sigmoid":
return nn.Sigmoid()
elif activation == "hsigmoid":
return HSigmoid()
elif activation == "identity":
return Identity()
else:
raise NotImplementedError()
else:
assert (isinstance(activation, nn.Module))
return activation
| 2,439
| 27.045977
| 106
|
py
|
QuantFace
|
QuantFace-master/backbones/countFLOPS.py
|
from torch.autograd import Variable
import numpy as np
import torch
def count_model_flops(model, input_res=[112, 112], multiply_adds=True):
list_conv = []
def conv_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = (kernel_ops * (
2 if multiply_adds else 1) + bias_ops) * output_channels * output_height * output_width * batch_size
list_conv.append(flops)
list_linear = []
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
if self.bias is not None:
bias_ops = self.bias.nelement() if self.bias.nelement() else 0
flops = batch_size * (weight_ops + bias_ops)
else:
flops = batch_size * weight_ops
list_linear.append(flops)
list_bn = []
def bn_hook(self, input, output):
list_bn.append(input[0].nelement() * 2)
list_relu = []
def relu_hook(self, input, output):
list_relu.append(input[0].nelement())
list_pooling = []
def pooling_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = 0
flops = (kernel_ops + bias_ops) * output_channels * output_height * output_width * batch_size
list_pooling.append(flops)
def pooling_hook_ad(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
input = input[0]
flops = int(np.prod(input.shape))
list_pooling.append(flops)
handles = []
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, torch.nn.Conv2d) or isinstance(net, torch.nn.ConvTranspose2d):
handles.append(net.register_forward_hook(conv_hook))
elif isinstance(net, torch.nn.Linear):
handles.append(net.register_forward_hook(linear_hook))
elif isinstance(net, torch.nn.BatchNorm2d) or isinstance(net, torch.nn.BatchNorm1d):
handles.append(net.register_forward_hook(bn_hook))
elif isinstance(net, torch.nn.ReLU) or isinstance(net, torch.nn.PReLU):
handles.append(net.register_forward_hook(relu_hook))
elif isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):
handles.append(net.register_forward_hook(pooling_hook))
else:
print("warning" + str(net))
return
for c in childrens:
foo(c)
model.eval()
foo(model)
input = Variable(torch.rand(3, input_res[0], input_res[1]).unsqueeze(0), requires_grad=True)
out = model(input)
total_flops = (sum(list_conv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling))
for h in handles:
h.remove()
model.train()
return flops_to_string(total_flops)
def flops_to_string(flops, units='MFLOPS', precision=4):
if units == 'GFLOPS':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MFLOPS':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KFLOPS':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' FLOPS'
def _calc_width(net):
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
| 4,062
| 36.275229
| 112
|
py
|
QuantFace
|
QuantFace-master/backbones/mobilefacenet.py
|
import copy
from torch.nn import (
Linear,
Conv2d,
BatchNorm1d,
BatchNorm2d,
PReLU,
ReLU,
Sigmoid,
Dropout2d,
Dropout,
AvgPool2d,
MaxPool2d,
AdaptiveAvgPool2d,
Sequential,
Module,
Parameter,
)
import torch.nn.functional as F
import torch
import torch.nn as nn
from collections import namedtuple, OrderedDict
import math
#from .common import ECA_Layer, SEBlock, CbamBlock, Identity, GCT
################################## Original Arcface Model #############################################################
from quantization_utils.quant_modules import Quant_Conv2d, Quant_Linear, QuantAct, QuantActPreLu
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
class Conv_block(Module):
def __init__(
self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1
):
super(Conv_block, self).__init__()
self.conv = Conv2d(
in_c,
out_channels=out_c,
kernel_size=kernel,
groups=groups,
stride=stride,
padding=padding,
bias=False,
)
self.bn = BatchNorm2d(out_c)
self.prelu = PReLU(out_c)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.prelu(x)
return x
class Linear_block(Module):
def __init__(
self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1
):
super(Linear_block, self).__init__()
self.conv = Conv2d(
in_c,
out_channels=out_c,
kernel_size=kernel,
groups=groups,
stride=stride,
padding=padding,
bias=False,
)
self.bn = BatchNorm2d(out_c)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class Depth_Wise(Module):
def __init__(
self,
in_c,
out_c,
attention,
residual=False,
kernel=(3, 3),
stride=(2, 2),
padding=(1, 1),
groups=1,
):
super(Depth_Wise, self).__init__()
self.conv = Conv_block(
in_c, out_c=groups, kernel=(1, 1), padding=(0, 0), stride=(1, 1)
)
self.conv_dw = Conv_block(
groups, groups, groups=groups, kernel=kernel, padding=padding, stride=stride
)
self.project = Linear_block(
groups, out_c, kernel=(1, 1), padding=(0, 0), stride=(1, 1)
)
self.attention = attention
self.residual = residual
self.attention = attention # se, eca, cbam
def forward(self, x):
if self.residual:
short_cut = x
x = self.conv(x)
x = self.conv_dw(x)
x = self.project(x)
if self.attention != "none":
x = self.attention_layer(x)
if self.residual:
output = short_cut + x
else:
output = x
return output
class Residual(Module):
def __init__(
self,
c,
attention,
num_block,
groups,
kernel=(3, 3),
stride=(1, 1),
padding=(1, 1),
):
super(Residual, self).__init__()
modules = []
for _ in range(num_block):
modules.append(
Depth_Wise(
c,
c,
attention,
residual=True,
kernel=kernel,
padding=padding,
stride=stride,
groups=groups,
)
)
self.model = Sequential(*modules)
def forward(self, x):
return self.model(x)
class GNAP(Module):
def __init__(self, embedding_size):
super(GNAP, self).__init__()
assert embedding_size == 512
self.bn1 = BatchNorm2d(512, affine=False)
self.pool = nn.AdaptiveAvgPool2d((1, 1))
self.bn2 = BatchNorm1d(512, affine=False)
def forward(self, x):
x = self.bn1(x)
x_norm = torch.norm(x, 2, 1, True)
x_norm_mean = torch.mean(x_norm)
weight = x_norm_mean / x_norm
x = x * weight
x = self.pool(x)
x = x.view(x.shape[0], -1)
feature = self.bn2(x)
return feature
class GDC(Module):
def __init__(self, embedding_size):
super(GDC, self).__init__()
self.conv_6_dw = Linear_block(
512, 512, groups=512, kernel=(7, 7), stride=(1, 1), padding=(0, 0)
)
self.conv_6_flatten = Flatten()
self.linear = Linear(512, embedding_size, bias=False)
# self.bn = BatchNorm1d(embedding_size, affine=False)
self.bn = BatchNorm1d(embedding_size)
def forward(self, x):
x = self.conv_6_dw(x)
x = self.conv_6_flatten(x)
x = self.linear(x)
x = self.bn(x)
return x
class MobileFaceNet(Module):
def __init__(
self, input_size=(112,112), embedding_size=128, output_name="GDC", attention="none"
):
super(MobileFaceNet, self).__init__()
assert output_name in ["GNAP", "GDC"]
assert input_size[0] in [112]
self.conv1 = Conv_block(3, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1))
self.conv2_dw = Conv_block(
64, 64, kernel=(3, 3), stride=(1, 1), padding=(1, 1), groups=64
)
self.conv_23 = Depth_Wise(
64, 64, attention, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=128
)
self.conv_3 = Residual(
64,
attention,
num_block=4,
groups=128,
kernel=(3, 3),
stride=(1, 1),
padding=(1, 1),
)
self.conv_34 = Depth_Wise(
64, 128, attention, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=256
)
self.conv_4 = Residual(
128,
attention,
num_block=6,
groups=256,
kernel=(3, 3),
stride=(1, 1),
padding=(1, 1),
)
self.conv_45 = Depth_Wise(
128,
128,
attention,
kernel=(3, 3),
stride=(2, 2),
padding=(1, 1),
groups=512,
)
self.conv_5 = Residual(
128,
attention,
num_block=2,
groups=256,
kernel=(3, 3),
stride=(1, 1),
padding=(1, 1),
)
self.conv_6_sep = Conv_block(
128, 512, kernel=(1, 1), stride=(1, 1), padding=(0, 0)
)
if output_name == "GNAP":
self.output_layer = GNAP(512)
else:
self.output_layer = GDC(embedding_size)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.conv2_dw(out)
out = self.conv_23(out)
out = self.conv_3(out)
out = self.conv_34(out)
out = self.conv_4(out)
out = self.conv_45(out)
out = self.conv_5(out)
conv_features = self.conv_6_sep(out)
out = self.output_layer(conv_features)
return out
def quantize_model(model, weight_bit=None, act_bit=None):
"""
Recursively quantize a pretrained single-precision model to int8 quantized model
model: pretrained single-precision model
"""
# if not (weight_bit) and not (act_bit ):
# weight_bit = self.settings.qw
# act_bit = self.settings.qa
# quantize convolutional and linear layers
if type(model) == nn.Conv2d:
quant_mod = Quant_Conv2d(weight_bit=weight_bit)
quant_mod.set_param(model)
return quant_mod
elif type(model) == nn.Linear:
quant_mod = Quant_Linear(weight_bit=weight_bit)
quant_mod.set_param(model)
return quant_mod
elif type(model) == nn.PReLU:
quant_mod = QuantActPreLu(act_bit=act_bit)
quant_mod.set_param(model)
return quant_mod
# quantize all the activation
elif type(model) == nn.ReLU or type(model) == nn.ReLU6 or type(model) == nn.PReLU:
return nn.Sequential(*[model, QuantAct(activation_bit=act_bit)])
# recursively use the quantized module to replace the single-precision module
elif type(model) == nn.Sequential or isinstance(model, nn.Sequential):
mods = OrderedDict()
for n, m in model.named_children():
if isinstance(m, Depth_Wise) and m.residual:
mods[n] = nn.Sequential(
*[quantize_model(m, weight_bit=weight_bit, act_bit=act_bit), QuantAct(activation_bit=act_bit)])
else:
mods[n] = quantize_model(m, weight_bit=weight_bit, act_bit=act_bit)
# mods.append(self.quantize_model(m))
return nn.Sequential(mods)
else:
q_model = copy.deepcopy(model)
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
setattr(q_model, attr, quantize_model(mod, weight_bit=weight_bit, act_bit=act_bit))
return q_model
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
if __name__ == "__main__":
net = MobileFaceNet()
quant=quantize_model(net,8,8)
print(quant)
| 10,285
| 28.13881
| 120
|
py
|
QuantFace
|
QuantFace-master/backbones/utils.py
|
import torch
from torch import nn
import torch.nn.functional as F
from backbones.activation import get_activation_layer
class DropBlock2D(nn.Module):
r"""Randomly zeroes 2D spatial blocks of the input tensor.
As described in the paper
`DropBlock: A regularization method for convolutional networks`_ ,
dropping whole blocks of feature map allows to remove semantic
information as compared to regular dropout.
Args:
drop_prob (float): probability of an element to be dropped.
block_size (int): size of the block to drop
Shape:
- Input: `(N, C, H, W)`
- Output: `(N, C, H, W)`
.. _DropBlock: A regularization method for convolutional networks:
https://arxiv.org/abs/1810.12890
"""
def __init__(self, drop_prob, block_size):
super(DropBlock2D, self).__init__()
self.drop_prob = drop_prob
self.block_size = block_size
def forward(self, x):
# shape: (bsize, channels, height, width)
assert x.dim() == 4, \
"Expected input with 4 dimensions (bsize, channels, height, width)"
if not self.training or self.drop_prob == 0.:
return x
else:
# get gamma value
gamma = self._compute_gamma(x)
# sample mask
mask = (torch.rand(x.shape[0], *x.shape[2:]) < gamma).float()
# place mask on input device
mask = mask.to(x.device)
# compute block mask
block_mask = self._compute_block_mask(mask)
# apply block mask
out = x * block_mask[:, None, :, :]
# scale output
out = out * block_mask.numel() / block_mask.sum()
return out
def _compute_block_mask(self, mask):
block_mask = F.max_pool2d(input=mask[:, None, :, :],
kernel_size=(self.block_size, self.block_size),
stride=(1,1),
padding=self.block_size//2)
if self.block_size % 2 == 0:
block_mask = block_mask[:, :, :-1, :-1]
block_mask = 1 - block_mask.squeeze(1)
return block_mask
def _compute_gamma(self, x):
return self.drop_prob / (self.block_size**2)
def round_channels(channels,
divisor=8):
"""
Round weighted channel number (make divisible operation).
Parameters:
----------
channels : int or float
Original number of channels.
divisor : int, default 8
Alignment value.
Returns:
-------
int
Weighted number of channels.
"""
rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)
if float(rounded_channels) < 0.9 * channels:
rounded_channels += divisor
return rounded_channels
def conv1x1(in_channels,
out_channels,
stride=1,
groups=1, dilation=1,
bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
groups=groups, dilation=dilation,
bias=bias)
def conv3x3(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False):
"""
Convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
class Flatten(nn.Module):
"""
Simple flatten module.
"""
def forward(self, x):
return x.view(x.size(0), -1)
def depthwise_conv3x3(channels,
stride=1,
padding=1,
dilation=1,
bias=False):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=channels,
out_channels=channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=channels,
bias=bias)
class ConvBlock(nn.Module):
"""
Standard convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(ConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.use_pad = (isinstance(padding, (list, tuple)) and (len(padding) == 4))
if self.use_pad:
self.pad = nn.ZeroPad2d(padding=padding)
padding = 0
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation,out_channels)
def forward(self, x):
if self.use_pad:
x = self.pad(x)
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
stride=1,
padding=0,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
1x1 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 0
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
class DwsConvBlock(nn.Module):
"""
Depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
pw_use_bn : bool, default True
Whether to use BatchNorm layer (pointwise convolution block).
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
dw_use_bn=True,
pw_use_bn=True,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True))):
super(DwsConvBlock, self).__init__()
self.dw_conv = dwconv_block(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
use_bn=dw_use_bn,
bn_eps=bn_eps,
activation=dw_activation)
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bias=bias,
use_bn=pw_use_bn,
bn_eps=bn_eps,
activation=pw_activation)
def forward(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
def dwconv_block(in_channels,
out_channels,
kernel_size,
stride=1,
padding=1,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
Depthwise convolution block.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def channel_shuffle2(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083. The alternative version.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns:
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
assert (channels % groups == 0)
channels_per_group = channels // groups
x = x.view(batch, channels_per_group, groups, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
| 15,256
| 29.211881
| 120
|
py
|
QuantFace
|
QuantFace-master/backbones/senet.py
|
import torch.nn as nn
import math
import torch.nn.functional as F
__all__ = ['SENet', 'senet50']
from backbones.countFLOPS import count_model_flops
from backbones.utils import _calc_width
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
# This SEModule is not used.
class SEModule(nn.Module):
def __init__(self, planes, compress_rate):
super(SEModule, self).__init__()
self.conv1 = nn.Conv2d(planes, planes // compress_rate, kernel_size=1, stride=1, bias=True)
self.conv2 = nn.Conv2d(planes // compress_rate, planes, kernel_size=1, stride=1, bias=True)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = F.avg_pool2d(module_input, kernel_size=module_input.size(2))
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.sigmoid(x)
return module_input * x
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
# SENet
compress_rate = 16
# self.se_block = SEModule(planes * 4, compress_rate) # this is not used.
self.conv4 = nn.Conv2d(planes * 4, planes * 4 // compress_rate, kernel_size=1, stride=1, bias=True)
self.conv5 = nn.Conv2d(planes * 4 // compress_rate, planes * 4, kernel_size=1, stride=1, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
## senet
out2 = F.avg_pool2d(out, kernel_size=out.size(2))
out2 = self.conv4(out2)
out2 = self.relu(out2)
out2 = self.conv5(out2)
out2 = self.sigmoid(out2)
# out2 = self.se_block.forward(out) # not used
if self.downsample is not None:
residual = self.downsample(x)
out = out2 * out + residual
# out = out2 + residual # not used
out = self.relu(out)
return out
class SENet(nn.Module):
def __init__(self, block, layers, num_classes=1000, include_top=True):
self.inplanes = 64
super(SENet, self).__init__()
self.include_top = include_top
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
#self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
if not self.include_top:
return x
#x = x.view(x.size(0), -1)
#x = self.fc(x)
return x
class sphere64(nn.Module):
def __init__(self,classnum=10574,feature=False):
super(sphere64, self).__init__()
self.classnum = classnum
self.feature = feature
#input = B*3*112*96
self.conv1_1 = nn.Conv2d(3,64,3,2,1) #=>B*64*56*48
self.relu1_1 = nn.PReLU(64)
self.conv1_2 = nn.Conv2d(64,64,3,1,1)
self.relu1_2 = nn.PReLU(64)
self.conv1_3 = nn.Conv2d(64,64,3,1,1)
self.relu1_3 = nn.PReLU(64)
self.conv1_4 = nn.Conv2d(64,64,3,1,1)
self.relu1_4 = nn.PReLU(64)
self.conv1_5 = nn.Conv2d(64,64,3,1,1)
self.relu1_5 = nn.PReLU(64)
self.conv1_6 = nn.Conv2d(64,64,3,1,1)
self.relu1_6 = nn.PReLU(64)
self.conv1_7 = nn.Conv2d(64,64,3,1,1)
self.relu1_7 = nn.PReLU(64)
self.conv1_8 = nn.Conv2d(64, 64, 3, 1, 1)
self.relu1_8 = nn.PReLU(64)
self.conv1_9 = nn.Conv2d(64, 64, 3, 1, 1)
self.relu1_9 = nn.PReLU(64)
self.conv2_1 = nn.Conv2d(64,128,3,2,1) #=>B*128*28*24
self.relu2_1 = nn.PReLU(128)
self.conv2_2 = nn.Conv2d(128,128,3,1,1)
self.relu2_2 = nn.PReLU(128)
self.conv2_3 = nn.Conv2d(128,128,3,1,1)
self.relu2_3 = nn.PReLU(128)
self.conv2_4 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_4 = nn.PReLU(128)
self.conv2_5 = nn.Conv2d(128,128,3,1,1)
self.relu2_5 = nn.PReLU(128)
self.conv2_6 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_6 = nn.PReLU(128)
self.conv2_7 = nn.Conv2d(128,128,3,1,1)
self.relu2_7 = nn.PReLU(128)
self.conv2_8 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_8 = nn.PReLU(128)
self.conv2_9 = nn.Conv2d(128,128,3,1,1)
self.relu2_9 = nn.PReLU(128)
self.conv2_10 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_10 = nn.PReLU(128)
self.conv2_11 = nn.Conv2d(128,128,3,1,1)
self.relu2_11 = nn.PReLU(128)
self.conv2_12 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_12 = nn.PReLU(128)
self.conv2_13 = nn.Conv2d(128,128,3,1,1)
self.relu2_13 = nn.PReLU(128)
self.conv2_14 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_14 = nn.PReLU(128)
self.conv2_15 = nn.Conv2d(128,128,3,1,1)
self.relu2_15 = nn.PReLU(128)
self.conv2_16 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_16 = nn.PReLU(128)
self.conv2_17 = nn.Conv2d(128,128,3,1,1)
self.relu2_17 = nn.PReLU(128)
self.conv3_1 = nn.Conv2d(128,256,3,2,1) #=>B*256*14*12
self.relu3_1 = nn.PReLU(256)
self.conv3_2 = nn.Conv2d(256,256,3,1,1)
self.relu3_2 = nn.PReLU(256)
self.conv3_3 = nn.Conv2d(256,256,3,1,1)
self.relu3_3 = nn.PReLU(256)
self.conv3_4 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_4 = nn.PReLU(256)
self.conv3_5 = nn.Conv2d(256,256,3,1,1)
self.relu3_5 = nn.PReLU(256)
self.conv3_6 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_6 = nn.PReLU(256)
self.conv3_7 = nn.Conv2d(256,256,3,1,1)
self.relu3_7 = nn.PReLU(256)
self.conv3_8 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_8 = nn.PReLU(256)
self.conv3_9 = nn.Conv2d(256,256,3,1,1)
self.relu3_9 = nn.PReLU(256)
self.conv3_10 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_10 = nn.PReLU(256)
self.conv3_11 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_11 = nn.PReLU(256)
self.conv3_12 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_12 = nn.PReLU(256)
self.conv3_13 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_13 = nn.PReLU(256)
self.conv3_14 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_14 = nn.PReLU(256)
self.conv3_15 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_15 = nn.PReLU(256)
self.conv3_16 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_16 = nn.PReLU(256)
self.conv3_17 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_17 = nn.PReLU(256)
self.conv3_18 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_18 = nn.PReLU(256)
self.conv3_19 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_19 = nn.PReLU(256)
self.conv3_20 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_20 = nn.PReLU(256)
self.conv3_21 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_21 = nn.PReLU(256)
self.conv3_22 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_22 = nn.PReLU(256)
self.conv3_23 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_23 = nn.PReLU(256)
self.conv3_24 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_24 = nn.PReLU(256)
self.conv3_25 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_25 = nn.PReLU(256)
self.conv3_26 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_26 = nn.PReLU(256)
self.conv3_27 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_27 = nn.PReLU(256)
self.conv3_28 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_28 = nn.PReLU(256)
self.conv3_29 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_29 = nn.PReLU(256)
self.conv3_30 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_30 = nn.PReLU(256)
self.conv3_31 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_31 = nn.PReLU(256)
self.conv3_32 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_32 = nn.PReLU(256)
self.conv3_33 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_33 = nn.PReLU(256)
self.conv4_1 = nn.Conv2d(256,512,3,2,1) #=>B*512*7*6
self.relu4_1 = nn.PReLU(512)
self.conv4_2 = nn.Conv2d(512,512,3,1,1)
self.relu4_2 = nn.PReLU(512)
self.conv4_3 = nn.Conv2d(512,512,3,1,1)
self.relu4_3 = nn.PReLU(512)
self.conv4_4 = nn.Conv2d(512,512,3,1,1)
self.relu4_4 = nn.PReLU(512)
self.conv4_5 = nn.Conv2d(512,512,3,1,1)
self.relu4_5 = nn.PReLU(512)
self.conv4_6 = nn.Conv2d(512,512,3,1,1)
self.relu4_6 = nn.PReLU(512)
self.conv4_7 = nn.Conv2d(512,512,3,1,1)
self.relu4_7 = nn.PReLU(512)
self.fc5 = nn.Linear(512*7*6,512)
def forward(self, x):
x = self.relu1_1(self.conv1_1(x))
x = x + self.relu1_3(self.conv1_3(self.relu1_2(self.conv1_2(x))))
x = x + self.relu1_5(self.conv1_5(self.relu1_4(self.conv1_4(x))))
x = x + self.relu1_7(self.conv1_7(self.relu1_6(self.conv1_6(x))))
x = self.relu2_1(self.conv2_1(x))
x = x + self.relu2_3(self.conv2_3(self.relu2_2(self.conv2_2(x))))
x = x + self.relu2_5(self.conv2_5(self.relu2_4(self.conv2_4(x))))
x = x + self.relu2_7(self.conv2_7(self.relu2_6(self.conv2_6(x))))
x = x + self.relu2_9(self.conv2_9(self.relu2_8(self.conv2_8(x))))
x = x + self.relu2_11(self.conv2_11(self.relu2_10(self.conv2_10(x))))
x = x + self.relu2_13(self.conv2_13(self.relu2_12(self.conv2_12(x))))
x = x + self.relu2_15(self.conv2_15(self.relu2_14(self.conv2_14(x))))
x = x + self.relu2_17(self.conv2_17(self.relu2_16(self.conv2_16(x))))
x = self.relu3_1(self.conv3_1(x))
x = x + self.relu3_3(self.conv3_3(self.relu3_2(self.conv3_2(x))))
x = x + self.relu3_5(self.conv3_5(self.relu3_4(self.conv3_4(x))))
x = x + self.relu3_7(self.conv3_7(self.relu3_6(self.conv3_6(x))))
x = x + self.relu3_9(self.conv3_9(self.relu3_8(self.conv3_8(x))))
x = x + self.relu3_11(self.conv3_11(self.relu3_10(self.conv3_10(x))))
x = x + self.relu3_13(self.conv3_13(self.relu3_12(self.conv3_12(x))))
x = x + self.relu3_15(self.conv3_15(self.relu3_14(self.conv3_14(x))))
x = x + self.relu3_17(self.conv3_17(self.relu3_16(self.conv3_16(x))))
x = x + self.relu3_19(self.conv3_19(self.relu3_18(self.conv3_18(x))))
x = x + self.relu3_21(self.conv3_21(self.relu3_20(self.conv3_20(x))))
x = x + self.relu3_23(self.conv3_23(self.relu3_22(self.conv3_22(x))))
x = x + self.relu3_25(self.conv3_25(self.relu3_24(self.conv3_24(x))))
x = x + self.relu3_27(self.conv3_27(self.relu3_26(self.conv3_26(x))))
x = x + self.relu3_29(self.conv3_29(self.relu3_28(self.conv3_28(x))))
x = x + self.relu3_31(self.conv3_31(self.relu3_30(self.conv3_20(x))))
x = x + self.relu3_33(self.conv3_33(self.relu3_32(self.conv3_32(x))))
x = self.relu4_1(self.conv4_1(x))
x = x + self.relu4_3(self.conv4_3(self.relu4_2(self.conv4_2(x))))
x = x + self.relu4_5(self.conv4_5(self.relu4_4(self.conv4_4(x))))
x = x + self.relu4_7(self.conv4_7(self.relu4_7(self.conv4_6(x))))
x = x.view(x.size(0),-1)
x = self.fc5(x)
if self.feature: return x
return x
def senet50(**kwargs):
"""Constructs a SENet-50 model.
"""
model = SENet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def _test():
import torch
pretrained = False
models = [
senet50
]
for model in models:
net = model()
#print(net)
# net.train()
net.eval()
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
print(y.size())
assert (tuple(y.size()) == (1, 2048,1,1))
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
flops = count_model_flops(net,input_res=[224,224])
print("m={}, {}".format(model.__name__, flops))
if __name__ == "__main__":
_test()
| 15,709
| 33.679912
| 107
|
py
|
QuantFace
|
QuantFace-master/backbones/iresnet.py
|
import copy
from collections import OrderedDict
import torch
from torch import nn
__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100']
from backbones.countFLOPS import _calc_width, count_model_flops
from quantization_utils.quant_modules import QuantAct, Quant_Linear, Quant_Conv2d, QuantActPreLu
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return input * x
class IBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1,use_se=False):
super(IBasicBlock, self).__init__()
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)
self.conv1 = conv3x3(inplanes, planes)
self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,)
self.prelu = nn.PReLU(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,)
self.downsample = downsample
self.stride = stride
self.use_se=use_se
if (use_se):
self.se_block=SEModule(planes,16)
def forward(self, x):
identity = x
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn3(out)
if(self.use_se):
out=self.se_block(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
class IResNet(nn.Module):
fc_scale = 7 * 7
def __init__(self,
block, layers, dropout=0, num_features=512, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None, use_se=False):
super(IResNet, self).__init__()
self.inplanes = 64
self.dilation = 1
self.use_se=use_se
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05)
self.prelu = nn.PReLU(self.inplanes)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2 ,use_se=self.use_se)
self.layer2 = self._make_layer(block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0],use_se=self.use_se)
self.layer3 = self._make_layer(block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1] ,use_se=self.use_se)
self.layer4 = self._make_layer(block,
512,
layers[3],
stride=2,
dilate=replace_stride_with_dilation[2] ,use_se=self.use_se)
self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,)
self.dropout =nn.Dropout(p=dropout, inplace=True) # 7x7x 512
self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features)
self.features = nn.BatchNorm1d(num_features, eps=1e-05)
nn.init.constant_(self.features.weight, 1.0)
self.features.weight.requires_grad = False
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, 0, 0.1)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, IBasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False,use_se=False):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation,use_se=use_se))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,use_se=use_se))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.prelu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn2(x)
x = torch.flatten(x, 1)
x = self.dropout(x)
x = self.fc(x)
x = self.features(x)
return x
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
model = IResNet(block, layers, **kwargs)
if pretrained:
raise ValueError()
return model
def iresnet18(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained,
progress, **kwargs)
def iresnet34(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained,
progress, **kwargs)
def iresnet50(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained,
progress, **kwargs)
def iresnet100(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained,
progress, **kwargs)
def quantize_model( model,weight_bit=None, act_bit=None ):
"""
Recursively quantize a pretrained single-precision model to int8 quantized model
model: pretrained single-precision model
"""
#if not (weight_bit) and not (act_bit ):
# weight_bit = self.settings.qw
# act_bit = self.settings.qa
# quantize convolutional and linear layers
if type(model) == nn.Conv2d:
quant_mod = Quant_Conv2d(weight_bit=weight_bit)
quant_mod.set_param(model)
return quant_mod
elif type(model) == nn.Linear :
quant_mod = Quant_Linear(weight_bit=weight_bit)
quant_mod.set_param(model)
return quant_mod
elif type(model) == nn.PReLU :
quant_mod = QuantActPreLu(act_bit=act_bit)
quant_mod.set_param(model)
return quant_mod
# quantize all the activation
elif type(model) == nn.ReLU or type(model) == nn.ReLU6 or type(model)==nn.PReLU:
return nn.Sequential(*[model, QuantAct(activation_bit=act_bit)])
# recursively use the quantized module to replace the single-precision module
elif type(model) == nn.Sequential or isinstance(model,nn.Sequential):
mods = OrderedDict()
for n, m in model.named_children():
if isinstance(m,IBasicBlock):
mods[n] = nn.Sequential(*[quantize_model(m,weight_bit=weight_bit, act_bit=act_bit), QuantAct(activation_bit=act_bit)])
else:
mods[n] = quantize_model(m, weight_bit=weight_bit, act_bit=act_bit)
return nn.Sequential(mods)
else:
q_model = copy.deepcopy(model)
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
setattr(q_model, attr, quantize_model(mod,weight_bit=weight_bit, act_bit=act_bit))
return q_model
def freeze_model( model):
"""
freeze the activation range
"""
if type(model) == QuantAct:
model.fix()
elif type(model) == nn.Sequential:
for n, m in model.named_children():
freeze_model(m)
else:
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
freeze_model(mod)
return model
def unfreeze_model( model):
"""
unfreeze the activation range
"""
if type(model) == QuantAct:
model.unfix()
elif type(model) == nn.Sequential:
for n, m in model.named_children():
unfreeze_model(m)
else:
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
unfreeze_model(mod)
return model
def _test():
models = [
iresnet100
]
for model in models:
net=model()
quant=quantize_model(net,8,8)
print(quant)
weight_count = _calc_width(net)
flops = count_model_flops(net)
print("m={}, {}".format(model.__name__, weight_count))
print("m={}, {}".format(model.__name__, flops))
net.eval()
x = torch.randn(1, 3, 112, 112)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 512))
if __name__ == "__main__":
_test()
| 11,550
| 36.141479
| 142
|
py
|
QuantFace
|
QuantFace-master/backbones/__init__.py
| 1
| 0
| 0
|
py
|
|
Progressive-Pruning
|
Progressive-Pruning-main/main_anytime_train.py
|
import argparse
import os
import pdb
import pickle
import random
import shutil
import time
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.multiprocessing
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from advertorch.utils import NormalizeByChannelMeanStd
from torch.utils.data.sampler import SubsetRandomSampler
import utils
torch.multiprocessing.set_sharing_strategy("file_system")
from dataset import (Setup_RestrictedImageNet,
generate_anytime_cifar10_dataloader,
generate_anytime_cifar100_dataloader,
generate_anytime_res_img_dataloader,
generate_anytime_res_img_dataloader_few,
setup__cifar10_dataset, setup__cifar100_dataset)
from generate_mask import generate_mask_
from pruner import *
from utils import evaluate_cer, setup_model
from wb import WandBLogger
parser = argparse.ArgumentParser(description="PyTorch Anytime Training")
##################################### Dataset #################################################
parser.add_argument(
"--data", type=str, default="../data", help="location of the data corpus"
)
parser.add_argument("--dataset", type=str, default="cifar10", help="dataset")
parser.add_argument(
"--meta_batch_size",
type=int,
default=5000,
help="data number in each meta batch_size",
)
parser.add_argument("--meta_batch_number", type=int, default=10)
##################################### Architecture ############################################
parser.add_argument("--arch", type=str, default="resnet20s", help="model architecture")
parser.add_argument(
"--imagenet_arch",
action="store_true",
help="architecture for imagenet size samples",
)
parser.add_argument(
"--imagenet_path",
type=str,
default="../imagenet",
help="location of the imagenet folder",
)
##################################### General setting ############################################
parser.add_argument("--seed", default=None, type=int, help="random seed")
parser.add_argument("--gpu", type=int, default=0, help="gpu device id")
parser.add_argument(
"--workers", type=int, default=2, help="number of workers in dataloader"
)
parser.add_argument("--resume", action="store_true", help="resume from checkpoint")
parser.add_argument("--checkpoint", type=str, default=None, help="checkpoint file")
parser.add_argument(
"--save_dir",
help="The directory used to save the trained models",
default=None,
type=str,
)
parser.add_argument("-no_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-one_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-buffer_replay", action="store_true", help="Flag for No Replay")
parser.add_argument(
"--buffer_size_train",
default=182,
type=int,
help="number of Random Train examples to add in buffer",
)
parser.add_argument(
"--buffer_size_valid",
default=182,
type=int,
help="number of Random Valid examples to add in buffer",
)
parser.add_argument("-snip_no_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-few_shot", action="store_true", help="Flag for No Replay")
parser.add_argument(
"--n_shots",
default=100,
type=int,
help="number of Random Valid examples to add in buffer",
)
##################################### Training setting #################################################
parser.add_argument("--batch_size", type=int, default=128, help="batch size")
parser.add_argument("--lr", default=0.1, type=float, help="initial learning rate")
parser.add_argument("--momentum", default=0.9, type=float, help="momentum")
parser.add_argument("--weight_decay", default=1e-4, type=float, help="weight decay")
parser.add_argument(
"--epochs", default=182, type=int, help="number of total epochs to run"
)
parser.add_argument("--warmup", default=0, type=int, help="warm up epochs")
parser.add_argument("--print_freq", default=50, type=int, help="print frequency")
parser.add_argument("--decreasing_lr", default="91,136", help="decreasing strategy")
##################################### Pruning setting #################################################
parser.add_argument(
"--tickets_mask", default=None, type=str, help="mask for subnetworks"
)
parser.add_argument(
"--tickets_init", default=None, type=str, help="initilization for subnetworks"
)
parser.add_argument(
"--snip_size", default=0.20, type=float, help="the size for the snip"
)
parser.add_argument("--sparsity_level", default=0, type=float, help="sparsity level")
parser.add_argument(
"--pruner", default="snip", type=str, help="Pruner Type[mag,snip,GraSP,SynFlow]"
)
parser.add_argument(
"--scope", default="global", type=str, help="Scope of Pruner[local,global]"
)
##################################### W&B Logging setting #################################################
parser.add_argument("-wb", action="store_true", help="Flag for using W&B logging")
parser.add_argument(
"--project_name", default="APP", type=str, help="Name of the W&B project"
)
parser.add_argument(
"--run", default="Anytime_fixed", type=str, help="Name for the W&B run"
)
best_sa = 0
args = parser.parse_args()
print(args)
os.makedirs(args.save_dir, exist_ok=True)
if args.scope == "l":
args.scope = "local"
def main():
global args, best_sa
args = parser.parse_args()
print(args)
torch.cuda.set_device(int(args.gpu))
os.makedirs(args.save_dir, exist_ok=True)
if args.seed:
setup_seed(args.seed)
model = setup_model(args)
if args.dataset == "cifar10":
whole_trainset = setup__cifar10_dataset(args)
elif args.dataset == "cifar100":
whole_trainset = setup__cifar100_dataset(args)
elif args.dataset == "restricted_imagenet":
whole_trainset, test_set = Setup_RestrictedImageNet(args, args.imagenet_path)
if args.tickets_init:
print("loading init from {}".format(args.tickets_init))
init_file = torch.load(args.tickets_init, map_location="cpu")
if "init_weight" in init_file:
init_file = init_file["init_weight"]
model.load_state_dict(init_file)
else:
torch.save(model.state_dict(), os.path.join(args.save_dir, "randinit.pth.tar"))
# setup initialization and mask
if args.tickets_mask:
print("loading mask from {}".format(args.tickets_mask))
mask_file = torch.load(args.tickets_mask, map_location="cpu")
if "state_dict" in mask_file:
mask_file = mask_file["state_dict"]
mask_file = extract_mask(mask_file)
print("pruning with {} masks".format(len(mask_file)))
prune_model_custom(model, mask_file)
model.cuda()
criterion = nn.CrossEntropyLoss()
decreasing_lr = list(map(int, args.decreasing_lr.split(",")))
optimizer = torch.optim.SGD(
model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=decreasing_lr, gamma=0.1
)
if args.wb:
wandb_logger = WandBLogger(
project_name=args.project_name,
run_name=args.run,
dir=args.save_dir,
config=vars(args),
model=model,
params={"resume": args.resume},
)
else:
wandb_logger = None
if args.resume:
print("resume from checkpoint {}".format(args.checkpoint))
checkpoint = torch.load(
args.checkpoint, map_location=torch.device("cuda:" + str(args.gpu))
)
best_sa = checkpoint["best_sa"]
start_epoch = checkpoint["epoch"]
all_result = checkpoint["result"]
start_state = checkpoint["state"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
print(
"loading from state: {} epoch: {}, best_sa = {}".format(
start_state, start_epoch, best_sa
)
)
else:
all_result = {}
all_result["gen_gap"] = []
all_result["train_ta"] = []
all_result["val_ta"] = []
all_result["best_sa"] = []
all_result["gen_gap"] = []
all_result["train_loss"] = []
all_result["lr"] = []
all_result["val_loss"] = []
start_epoch = 0
start_state = 1
# sparsity = [1, 1.5,1.75,2, 2.5,3,3.5,4,4.5,5] # 32.768 remaining_weights=0.8**(sparsity)
if args.scope == "local":
sparsity = [args.sparsity_level for x in range(args.meta_batch_number)]
else:
sparsity = np.linspace(1, args.sparsity_level, args.meta_batch_number)
time_list = []
CER = []
CER_diff = []
for current_state in range(start_state, args.meta_batch_number + 1):
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=decreasing_lr, gamma=0.1
)
print("Current state = {}".format(current_state))
start_time = time.time()
if args.dataset == "cifar10":
print("Loading cifar10 dataset in anytime setting")
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_cifar10_dataloader(args, whole_trainset, current_state)
elif args.dataset == "cifar100":
print("Loading cifar100 dataset in anytime setting")
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_cifar100_dataloader(
args, whole_trainset, current_state
)
elif args.dataset == "restricted_imagenet":
print("Loading Restricted Imagenet dataset in anytime setting")
if args.meta_batch_number == 3:
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_res_img_dataloader(
args, whole_trainset, test_set, 80565, current_state
)
elif args.meta_batch_number == 10:
# Few Shot Dataloader Example
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_res_img_dataloader_few(
args, whole_trainset, test_set, 6800, current_state
)
# Generate Mask using SNIP
sparsity_level = sparsity[current_state - 1]
save_mask = (
args.save_dir
+ f"/{current_state}mask_{args.pruner}_{sparsity_level}.pth.tar"
)
if current_state == 1:
model_load_dir = (
args.save_dir + "/randinit.pth.tar"
) # 1st Meta Batch Randomly initialized model
else:
model_load_dir = args.save_dir + f"/{current_state-1}model_SA_best.pth.tar"
generate_mask_(
args,
train_snip_set,
args.pruner,
model_load_dir,
save=save_mask,
state=sparsity_level,
)
model.cpu()
# Load the Model by applying above mask
print("loading mask from {}".format(save_mask))
mask_file = torch.load(save_mask, map_location="cpu")
if "state_dict" in mask_file:
mask_file = mask_file["state_dict"]
mask_file = extract_mask(mask_file)
print("pruning with {} masks".format(len(mask_file)))
prune_model_custom(model, mask_file)
model.cuda()
for epoch in range(start_epoch, args.epochs):
print(optimizer.state_dict()["param_groups"][0]["lr"])
acc, loss = train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
tacc, vloss = validate(val_loader, model, criterion)
# evaluate on test set
# test_tacc = validate(test_loader, model, criterion)
scheduler.step()
# remember best prec@1 and save checkpoint
is_best_sa = tacc > best_sa
best_sa = max(tacc, best_sa)
gen_gap = acc - tacc
all_result["gen_gap"].append(gen_gap)
all_result["train_ta"].append(acc)
all_result["val_ta"].append(tacc)
all_result["best_sa"].append(best_sa)
all_result["train_loss"].append(loss)
all_result["val_loss"].append(vloss)
all_result["lr"].append(optimizer.state_dict()["param_groups"][0]["lr"])
save_checkpoint(
{
"state": current_state,
"result": all_result,
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"best_sa": best_sa,
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
},
is_SA_best=is_best_sa,
data_state=current_state,
save_path=args.save_dir,
)
if wandb_logger:
wandb_logger.log_metrics(all_result)
# report result
val_pick_best_epoch = np.argmax(np.array(all_result["val_ta"]))
print(
"* State = {} best SA = {} Epoch = {}".format(
current_state,
all_result["val_ta"][val_pick_best_epoch],
val_pick_best_epoch + 1,
)
)
all_result = {}
all_result["train_ta"] = []
all_result["val_ta"] = []
all_result["best_sa"] = []
all_result["gen_gap"] = []
all_result["train_loss"] = []
all_result["val_loss"] = []
all_result["lr"] = []
best_sa = 0
start_epoch = 0
best_checkpoint = torch.load(
os.path.join(args.save_dir, "{}model_SA_best.pth.tar".format(current_state))
)
print("Loading Best Weight")
model.load_state_dict(best_checkpoint["state_dict"])
end_time = time.time() - start_time
print("Total time elapsed: {:.4f}s".format(end_time))
time_list.append(end_time)
if args.dataset == "restricted_imagenet":
CER.append(evaluate_cer(model, args, test_loader))
else:
CER.append(evaluate_cer(model, args))
if current_state != 1:
diff = (CER[current_state - 1] - CER[current_state - 2]) / 10000
CER_diff.append(diff)
print("CER diff: {}".format(diff))
# Reset LR to 0.1 after each state
for g in optimizer.param_groups:
g["lr"] = 0.1
print("LR reset to 0.1")
print(optimizer.state_dict()["param_groups"][0]["lr"])
test_tacc, _ = validate(test_loader, model, criterion)
print("Test Acc = {}".format(test_tacc))
print("CER = {}".format(sum(CER)))
wandb_logger.log_metrics({"Test/test_acc": test_tacc})
wandb_logger.log_metrics({"Test/CER": sum(CER)})
print("Final Test Accuracy: ")
print(test_tacc)
print("CER")
print(CER)
print("Anytime Relative Error")
print(CER_diff)
print("Total time")
print(time_list)
def train(train_loader, model, criterion, optimizer, epoch):
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
start = time.time()
for i, (image, target) in enumerate(train_loader):
if epoch < args.warmup:
warmup_lr(epoch, i + 1, optimizer, one_epoch_step=len(train_loader))
image = image.cuda()
target = target.cuda()
# compute output
output_clean = model(image)
loss = criterion(output_clean, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output_clean.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
end = time.time()
print(
"Epoch: [{0}][{1}/{2}]\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Accuracy {top1.val:.3f} ({top1.avg:.3f})\t"
"Time {3:.2f}".format(
epoch, i, len(train_loader), end - start, loss=losses, top1=top1
)
)
start = time.time()
print("train_accuracy {top1.avg:.3f}".format(top1=top1))
return top1.avg, losses.avg
def validate(val_loader, model, criterion):
"""
Run evaluation
"""
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
for i, (image, target) in enumerate(val_loader):
image = image.cuda()
target = target.cuda()
# compute output
with torch.no_grad():
output = model(image)
loss = criterion(output, target)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
print(
"Test: [{0}/{1}]\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Accuracy {top1.val:.3f} ({top1.avg:.3f})".format(
i, len(val_loader), loss=losses, top1=top1
)
)
print("valid_accuracy {top1.avg:.3f}".format(top1=top1))
return top1.avg, losses.avg
def save_checkpoint(
state, is_SA_best, data_state, save_path, filename="checkpoint.pth.tar"
):
filepath = os.path.join(save_path, str(data_state) + filename)
torch.save(state, filepath)
if is_SA_best:
shutil.copyfile(
filepath,
os.path.join(save_path, "{}model_SA_best.pth.tar".format(data_state)),
)
def warmup_lr(epoch, step, optimizer, one_epoch_step):
overall_steps = args.warmup * one_epoch_step
current_steps = epoch * one_epoch_step + step
lr = args.lr * current_steps / overall_steps
lr = min(lr, args.lr)
for p in optimizer.param_groups:
p["lr"] = lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def setup_seed(seed):
print("setup random seed = {}".format(seed))
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == "__main__":
main()
| 19,993
| 31.777049
| 107
|
py
|
Progressive-Pruning
|
Progressive-Pruning-main/main_anytime_baseline.py
|
import argparse
import os
import pdb
import pickle
import random
import shutil
import time
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.multiprocessing
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from advertorch.utils import NormalizeByChannelMeanStd
from torch.utils.data.sampler import SubsetRandomSampler
import utils
torch.multiprocessing.set_sharing_strategy("file_system")
from dataset import (Setup_RestrictedImageNet,
generate_anytime_cifar10_dataloader,
generate_anytime_cifar100_dataloader,
generate_anytime_res_img_dataloader,
generate_anytime_res_img_dataloader_few,
setup__cifar10_dataset, setup__cifar100_dataset)
from generate_mask import generate_mask_
from pruner import *
from utils import evaluate_cer, setup_model
from wb import WandBLogger
parser = argparse.ArgumentParser(description="PyTorch Anytime Training")
##################################### Dataset #################################################
parser.add_argument(
"--data", type=str, default="../data", help="location of the data corpus"
)
parser.add_argument("--dataset", type=str, default="cifar10", help="dataset")
parser.add_argument(
"--meta_batch_size",
type=int,
default=5000,
help="data number in each meta batch_size",
)
parser.add_argument("--meta_batch_number", type=int, default=10)
##################################### Architecture ############################################
parser.add_argument("--arch", type=str, default="resnet20s", help="model architecture")
parser.add_argument(
"--imagenet_arch",
action="store_true",
help="architecture for imagenet size samples",
)
parser.add_argument(
"--imagenet_path",
type=str,
default="../imagenet",
help="location of the imagenet folder",
)
##################################### General setting ############################################
parser.add_argument("--seed", default=None, type=int, help="random seed")
parser.add_argument("--gpu", type=int, default=0, help="gpu device id")
parser.add_argument(
"--workers", type=int, default=2, help="number of workers in dataloader"
)
parser.add_argument("--resume", action="store_true", help="resume from checkpoint")
parser.add_argument("--checkpoint", type=str, default=None, help="checkpoint file")
parser.add_argument(
"--save_dir",
help="The directory used to save the trained models",
default=None,
type=str,
)
parser.add_argument("-no_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-one_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-buffer_replay", action="store_true", help="Flag for No Replay")
parser.add_argument(
"--buffer_size_train",
default=182,
type=int,
help="number of Random Train examples to add in buffer",
)
parser.add_argument(
"--buffer_size_valid",
default=182,
type=int,
help="number of Random Valid examples to add in buffer",
)
parser.add_argument("-snip_no_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-few_shot", action="store_true", help="Flag for No Replay")
parser.add_argument(
"--n_shots",
default=100,
type=int,
help="number of Random Valid examples to add in buffer",
)
##################################### Training setting #################################################
parser.add_argument("--batch_size", type=int, default=128, help="batch size")
parser.add_argument("--lr", default=0.1, type=float, help="initial learning rate")
parser.add_argument("--momentum", default=0.9, type=float, help="momentum")
parser.add_argument("--weight_decay", default=1e-4, type=float, help="weight decay")
parser.add_argument(
"--epochs", default=182, type=int, help="number of total epochs to run"
)
parser.add_argument("--warmup", default=0, type=int, help="warm up epochs")
parser.add_argument("--print_freq", default=50, type=int, help="print frequency")
parser.add_argument("--decreasing_lr", default="91,136", help="decreasing strategy")
##################################### Pruning setting #################################################
parser.add_argument(
"--tickets_mask", default=None, type=str, help="mask for subnetworks"
)
parser.add_argument(
"--tickets_init", default=None, type=str, help="initilization for subnetworks"
)
parser.add_argument(
"--snip_size", default=0.20, type=float, help="the size for the snip"
)
parser.add_argument("--sparsity_level", default=0, type=float, help="sparsity level")
parser.add_argument(
"--pruner", default="snip", type=str, help="Pruner Type[mag,snip,GraSP,SynFlow]"
)
parser.add_argument(
"--scope", default="global", type=str, help="Scope of Pruner[local,global]"
)
##################################### W&B Logging setting #################################################
parser.add_argument("-wb", action="store_true", help="Flag for using W&B logging")
parser.add_argument(
"--project_name", default="APP", type=str, help="Name of the W&B project"
)
parser.add_argument(
"--run", default="Anytime_fixed", type=str, help="Name for the W&B run"
)
best_sa = 0
args = parser.parse_args()
print(args)
os.makedirs(args.save_dir, exist_ok=True)
def main():
global args, best_sa
args = parser.parse_args()
print(args)
torch.cuda.set_device(int(args.gpu))
os.makedirs(args.save_dir, exist_ok=True)
if args.seed:
setup_seed(args.seed)
model = setup_model(args)
if args.dataset == "cifar10":
whole_trainset = setup__cifar10_dataset(args)
elif args.dataset == "cifar100":
whole_trainset = setup__cifar100_dataset(args)
elif args.dataset == "restricted_imagenet":
whole_trainset, test_set = Setup_RestrictedImageNet(args.imagenet_path)
model.cuda()
criterion = nn.CrossEntropyLoss()
decreasing_lr = list(map(int, args.decreasing_lr.split(",")))
optimizer = torch.optim.SGD(
model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=decreasing_lr, gamma=0.1
)
if args.wb:
wandb_logger = WandBLogger(
project_name=args.project_name,
run_name=args.run,
dir=args.save_dir,
config=vars(args),
model=model,
params={"resume": args.resume},
)
else:
wandb_logger = None
all_result = {}
all_result["gen_gap"] = []
all_result["train_ta"] = []
all_result["val_ta"] = []
all_result["best_sa"] = []
all_result["gen_gap"] = []
all_result["train_loss"] = []
all_result["val_loss"] = []
all_result["lr"] = []
start_epoch = 0
start_state = 1
time_list = []
CER = []
CER_diff = []
for current_state in range(start_state, args.meta_batch_number + 1):
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=decreasing_lr, gamma=0.1
)
print("Current state = {}".format(current_state))
start_time = time.time()
if args.dataset == "cifar10":
print("Loading cifar10 dataset in anytime setting")
(
train_loader,
val_loader,
test_loader,
_,
) = generate_anytime_cifar10_dataloader(args, whole_trainset, current_state)
elif args.dataset == "cifar100":
print("Loading cifar100 dataset in anytime setting")
(
train_loader,
val_loader,
test_loader,
_,
) = generate_anytime_cifar100_dataloader(
args, whole_trainset, current_state
)
elif args.dataset == "restricted_imagenet":
print("Loading Restricted Imagenet dataset in anytime setting")
if args.meta_batch_number == 3:
(
train_loader,
val_loader,
test_loader,
_,
) = generate_anytime_res_img_dataloader(
args, whole_trainset, test_set, 80565, current_state
)
elif args.meta_batch_number == 10 and args.few_shot:
# Few Shot Dataloader Example
(
train_loader,
val_loader,
test_loader,
_,
) = generate_anytime_res_img_dataloader_few(
args, whole_trainset, test_set, 6800, current_state
)
for epoch in range(start_epoch, args.epochs):
print(optimizer.state_dict()["param_groups"][0]["lr"])
acc, loss = train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
tacc, vloss = validate(val_loader, model, criterion)
scheduler.step()
# remember best prec@1 and save checkpoint
is_best_sa = tacc > best_sa
best_sa = max(tacc, best_sa)
gen_gap = acc - tacc
all_result["gen_gap"].append(gen_gap)
all_result["train_ta"].append(acc)
all_result["val_ta"].append(tacc)
all_result["best_sa"].append(best_sa)
all_result["train_loss"].append(loss)
all_result["val_loss"].append(vloss)
all_result["lr"].append(optimizer.state_dict()["param_groups"][0]["lr"])
save_checkpoint(
{
"state": current_state,
"result": all_result,
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"best_sa": best_sa,
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
},
is_SA_best=is_best_sa,
data_state=current_state,
save_path=args.save_dir,
)
if wandb_logger:
wandb_logger.log_metrics(all_result)
val_pick_best_epoch = np.argmax(np.array(all_result["val_ta"]))
print(
"* State = {} best SA = {} Epoch = {}".format(
current_state,
all_result["val_ta"][val_pick_best_epoch],
val_pick_best_epoch + 1,
)
)
all_result = {}
all_result["train_ta"] = []
all_result["val_ta"] = []
all_result["best_sa"] = []
all_result["gen_gap"] = []
all_result["train_loss"] = []
all_result["val_loss"] = []
all_result["lr"] = []
best_sa = 0
start_epoch = 0
best_checkpoint = torch.load(
os.path.join(args.save_dir, "{}model_SA_best.pth.tar".format(current_state))
)
print("Loading Best Weight")
model.load_state_dict(best_checkpoint["state_dict"])
end_time = time.time() - start_time
print("Total time elapsed: {:.4f}s".format(end_time))
time_list.append(end_time)
if args.dataset == "restricted_imagenet":
CER.append(evaluate_cer(model, args, test_loader))
else:
CER.append(evaluate_cer(model, args))
if current_state != 1:
diff = (CER[current_state - 1] - CER[current_state - 2]) / 10000
CER_diff.append(diff)
print("CER diff = {}".format(diff))
# Reset LR to 0.1 after each state
for g in optimizer.param_groups:
g["lr"] = 0.1
print("LR reset to 0.1")
print(optimizer.state_dict()["param_groups"][0]["lr"])
test_tacc, _ = validate(test_loader, model, criterion)
wandb_logger.log_metrics({"Test/test_acc": test_tacc})
wandb_logger.log_metrics({"Test/CER": sum(CER)})
print("Test Acc = {}".format(test_tacc))
print("CER = {}".format(sum(CER)))
print("CER")
print(CER)
print("Final Test Accuracy: ")
print(test_tacc)
print("Anytime Relative Error")
print(CER_diff)
print("Total time")
print(time_list)
def train(train_loader, model, criterion, optimizer, epoch):
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
start = time.time()
for i, (image, target) in enumerate(train_loader):
if epoch < args.warmup:
warmup_lr(epoch, i + 1, optimizer, one_epoch_step=len(train_loader))
image = image.clone().cuda()
target = target.clone().cuda()
# compute output
output_clean = model(image)
loss = criterion(output_clean, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output_clean.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
end = time.time()
print(
"Epoch: [{0}][{1}/{2}]\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Accuracy {top1.val:.3f} ({top1.avg:.3f})\t"
"Time {3:.2f}".format(
epoch, i, len(train_loader), end - start, loss=losses, top1=top1
)
)
start = time.time()
print("train_accuracy {top1.avg:.3f}".format(top1=top1))
return top1.avg, losses.avg
def validate(val_loader, model, criterion):
"""
Run evaluation
"""
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
for i, (image, target) in enumerate(val_loader):
image = image.clone().cuda()
target = target.clone().cuda()
# compute output
with torch.no_grad():
output = model(image)
loss = criterion(output, target)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
print(
"Test: [{0}/{1}]\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Accuracy {top1.val:.3f} ({top1.avg:.3f})".format(
i, len(val_loader), loss=losses, top1=top1
)
)
print("valid_accuracy {top1.avg:.3f}".format(top1=top1))
return top1.avg, losses.avg
def save_checkpoint(
state, is_SA_best, data_state, save_path, filename="checkpoint.pth.tar"
):
filepath = os.path.join(save_path, str(data_state) + filename)
torch.save(state, filepath)
if is_SA_best:
shutil.copyfile(
filepath,
os.path.join(save_path, "{}model_SA_best.pth.tar".format(data_state)),
)
def warmup_lr(epoch, step, optimizer, one_epoch_step):
overall_steps = args.warmup * one_epoch_step
current_steps = epoch * one_epoch_step + step
lr = args.lr * current_steps / overall_steps
lr = min(lr, args.lr)
for p in optimizer.param_groups:
p["lr"] = lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def setup_seed(seed):
print("setup random seed = {}".format(seed))
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == "__main__":
main()
| 16,789
| 31.041985
| 107
|
py
|
Progressive-Pruning
|
Progressive-Pruning-main/utils.py
|
"""
setup model and datasets
"""
import torch
import torch.nn as nn
from advertorch.utils import NormalizeByChannelMeanStd
# from advertorch.utils import NormalizeByChannelMeanStd
from torch.autograd.variable import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import CIFAR10, CIFAR100
from dataset import *
from models import *
__all__ = ["setup_model_dataset", "setup_model"]
def evaluate_cer(net, args, loader_=None):
criterion = nn.CrossEntropyLoss()
test_transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
if args.dataset == "cifar10":
test_set = CIFAR10(
"../data", train=False, transform=test_transform, download=True
)
test_loader = DataLoader(
test_set,
batch_size=128,
shuffle=False,
num_workers=2,
pin_memory=True,
)
elif args.dataset == "cifar100":
test_set = CIFAR100(
"../data", train=False, transform=test_transform, download=True
)
test_loader = DataLoader(
test_set,
batch_size=128,
shuffle=False,
num_workers=2,
pin_memory=True,
)
elif args.dataset == "restricted_imagenet":
test_loader = loader_
correct = 0
total_loss = 0
total = 0 # number of samples
num_batch = len(test_loader)
use_cuda = True
net.cuda()
net.eval()
with torch.no_grad():
if isinstance(criterion, nn.CrossEntropyLoss):
for batch_idx, (inputs, targets) in enumerate(test_loader):
# print(inputs.size(0))
batch_size = inputs.size(0)
total += batch_size
inputs = Variable(inputs)
targets = Variable(targets)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
outputs = net(inputs)
loss = criterion(outputs, targets)
total_loss += loss.item() * batch_size
_, predicted = torch.max(outputs.data, 1)
correct += predicted.eq(targets).sum().item()
print("Correct %")
print(100 * correct / total)
misclassified = total - correct
print("Total Loss")
print(total_loss * 100 / total)
print(f"misclassified samples from {total}")
print(misclassified)
return misclassified
def setup_model(args):
if args.dataset == "cifar10":
classes = 10
normalization = NormalizeByChannelMeanStd(
mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616]
)
elif args.dataset == "cifar100":
classes = 100
normalization = NormalizeByChannelMeanStd(
mean=[0.5071, 0.4866, 0.4409], std=[0.2673, 0.2564, 0.2762]
)
elif args.dataset == "restricted_imagenet":
classes = 14
if args.imagenet_arch:
if args.dataset == "restricted_imagenet":
classes = 14
model = model_dict[args.arch](num_classes=classes, imagenet=True)
else:
model = model_dict[args.arch](num_classes=classes)
if args.dataset != "restricted_imagenet":
model.normalize = normalization
return model
| 3,323
| 27.904348
| 75
|
py
|
Progressive-Pruning
|
Progressive-Pruning-main/dataset.py
|
"""
function for loading datasets
contains:
CIFAR-10
CIFAR-100
"""
import os
import random
import numpy as np
import torch
import torchvision
from torch.utils.data import DataLoader, Subset
from torchvision import transforms
from torchvision.datasets import CIFAR10, CIFAR100
__all__ = [
"cifar10_dataloaders",
"cifar100_dataloaders",
"generate_anytime_cifar10_dataloader",
]
from robustness.datasets import RestrictedImageNetBalanced
def to_few_shot(dataset, n_shots=10):
"""
Transforms torchvision dataset to a few-shot dataset.
:param dataset: torchvision dataset
:param n_shots: number of samples per class
:return: few-shot torchvision dataset
"""
try:
targets = dataset.targets # targets or labels depending on the dataset
is_targets = True
except:
targets = dataset.labels
is_targets = False
assert min(targets) == 0, "labels should start from 0, not from {}".format(
min(targets)
)
# Find n_shots samples for each class
labels_dict = {}
imgs = dataset.imgs
for i, lbl in enumerate(imgs):
if lbl[1] not in labels_dict:
labels_dict[lbl[1]] = []
if len(labels_dict[lbl[1]]) < n_shots:
labels_dict[lbl[1]].append(i)
idx = sorted(
torch.cat([torch.tensor(v) for k, v in labels_dict.items()])
) # sort according to the original order in the full dataset
dataset.imgs = (
[dataset.imgs[i] for i in idx]
if isinstance(dataset.imgs, list)
else dataset.imgs[idx]
)
targets = [imgs[i][1] for i in idx]
if is_targets:
dataset.targets = targets
else:
dataset.labels = targets
return dataset
def Setup_RestrictedImageNet(args, path):
ds = RestrictedImageNetBalanced(path)
train_set, test_set = ds.make_loaders(batch_size=128, workers=8)
if args.few_shot:
print("Few Shot Regime Train Data Loading ")
train_set = to_few_shot(train_set, n_shots=args.n_shots)
return train_set, test_set
def generate_anytime_res_img_dataloader_few(
args, whole_trainset, test_set, sample_len, state=1
):
meta_train_size = int(args.meta_batch_size * 0.9) # 29839#
meta_val_size = args.meta_batch_size - meta_train_size # 500
if args.no_replay:
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(
sample_len + (state - 1) * meta_val_size,
sample_len + state * meta_val_size,
)
)
elif args.one_replay:
if state == 1:
train_list = list(
range((state - 1) * meta_train_size, state * meta_train_size)
)
val_list = list(
range(
sample_len + (state - 1) * meta_val_size,
sample_len + state * meta_val_size,
)
)
else:
train_list = list(
range((state - 2) * meta_train_size, state * meta_train_size)
)
val_list = list(
range(
sample_len + (state - 2) * meta_val_size,
sample_len + state * meta_val_size,
)
)
elif args.buffer_replay:
k = args.buffer_size_train
l = args.buffer_size_val
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(
sample_len + (state - 1) * meta_val_size,
sample_len + state * meta_val_size,
)
)
train_list.extend(buffer_train_set)
val_list.extend(buffer_val_set)
# Populating Buffer
train_sampled_set = random.sample(train_list, k)
valid_sampled_set = random.sample(val_list, l)
buffer_train_set.extend(train_sampled_set)
buffer_val_set.extend(valid_sampled_set)
else:
train_list = list(range(0, state * meta_train_size)) # 0 45000
val_list = list(
range(sample_len, sample_len + state * meta_val_size)
) # 45000 500
print(
"Current: Trainset size = {}, Valset size = {}".format(
len(train_list), len(val_list)
)
)
train_set = Subset(whole_trainset, train_list)
val_set = Subset(whole_trainset, val_list)
if args.snip_no_replay:
train_list_norep = list(
range((state - 1) * meta_train_size, state * meta_train_size)
)
train_set_norep = Subset(whole_trainset, train_list_norep)
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set_norep, list(range(snip_set)))
else:
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set, list(range(snip_set)))
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
test_loader = DataLoader(
test_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
return train_loader, val_loader, test_loader, train_snip_set
def generate_anytime_res_img_dataloader(
args, whole_trainset, test_set, sample_len, state=1
):
meta_train_size = int(args.meta_batch_size * 0.9) # 29839#
meta_val_size = args.meta_batch_size - meta_train_size # 500
if args.no_replay:
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(
sample_len + (state - 1) * meta_val_size,
sample_len + state * meta_val_size,
)
)
elif args.one_replay:
if state == 1:
train_list = list(
range((state - 1) * meta_train_size, state * meta_train_size)
)
val_list = list(
range(
sample_len + (state - 1) * meta_val_size,
sample_len + state * meta_val_size,
)
)
else: # 0-1, 1-2,2-3,3-4,4-5
train_list = list(
range((state - 2) * meta_train_size, state * meta_train_size)
)
val_list = list(
range(
sample_len + (state - 2) * meta_val_size,
sample_len + state * meta_val_size,
)
)
elif args.buffer_replay:
k = args.buffer_size_train
l = args.buffer_size_val
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(
sample_len + (state - 1) * meta_val_size,
sample_len + state * meta_val_size,
)
)
train_list.extend(buffer_train_set)
val_list.extend(buffer_val_set)
# Populating Buffer
train_sampled_set = random.sample(train_list, k)
valid_sampled_set = random.sample(val_list, l)
buffer_train_set.extend(train_sampled_set)
buffer_val_set.extend(valid_sampled_set)
else:
train_list = list(range(0, state * meta_train_size)) # 0 45000
val_list = list(
range(sample_len, sample_len + state * meta_val_size)
) # 45000 500
print(
"Current: Trainset size = {}, Valset size = {}".format(
len(train_list), len(val_list)
)
)
train_set = Subset(whole_trainset, train_list)
val_set = Subset(whole_trainset, val_list)
if args.snip_no_replay:
train_list_norep = list(
range((state - 1) * meta_train_size, state * meta_train_size)
)
train_set_norep = Subset(whole_trainset, train_list_norep)
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set_norep, list(range(snip_set)))
else:
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set, list(range(snip_set)))
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
test_loader = DataLoader(
test_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
return train_loader, val_loader, test_loader, train_snip_set
def cifar10_dataloaders(batch_size=128, data_dir="datasets/cifar10", num_workers=2):
train_transform = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
test_transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
print(
"Dataset information: CIFAR-10\t 45000 images for training \t 500 images for validation\t"
)
print("10000 images for testing\t no normalize applied in data_transform")
print("Data augmentation = randomcrop(32,4) + randomhorizontalflip")
train_set = Subset(
CIFAR10(data_dir, train=True, transform=train_transform, download=True),
list(range(45000)),
)
val_set = Subset(
CIFAR10(data_dir, train=True, transform=test_transform, download=True),
list(range(45000, 50000)),
)
test_set = CIFAR10(data_dir, train=False, transform=test_transform, download=True)
train_loader = DataLoader(
train_set,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
)
val_loader = DataLoader(
val_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
)
test_loader = DataLoader(
test_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
)
return train_loader, val_loader, test_loader
def cifar100_dataloaders(batch_size=128, data_dir="datasets/cifar100", num_workers=2):
train_transform = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
test_transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
print(
"Dataset information: CIFAR-100\t 45000 images for training \t 500 images for validation\t"
)
print("10000 images for testing\t no normalize applied in data_transform")
print("Data augmentation = randomcrop(32,4) + randomhorizontalflip")
train_set = Subset(
CIFAR100(data_dir, train=True, transform=train_transform, download=True),
list(range(45000)),
)
val_set = Subset(
CIFAR100(data_dir, train=True, transform=test_transform, download=True),
list(range(45000, 50000)),
)
test_set = CIFAR100(data_dir, train=False, transform=test_transform, download=True)
train_loader = DataLoader(
train_set,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
)
val_loader = DataLoader(
val_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
)
test_loader = DataLoader(
test_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
)
return train_loader, val_loader, test_loader
def setup__cifar10_dataset(args):
train_transform = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
whole_trainset = CIFAR10(
args.data, train=True, transform=train_transform, download=True
)
return whole_trainset
def setup__cifar10_dataset_end(args):
train_transform = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
whole_trainset = CIFAR10(
args.data, train=True, transform=train_transform, download=True
)
# 50,000 -200 = 49800
end_list = list(range(49800, 50000))
sub_whole_trainset = Subset(whole_trainset, list(range(49800)))
end_trainset = Subset(whole_trainset, end_list)
return sub_whole_trainset, end_trainset
def generate_anytime_cifar10_dataloader_end(args, whole_trainset, state=1):
test_transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
# 45000-200 = 49800 , 49800-623
meta_train_size = int(args.meta_batch_size * 0.9) # #5602
meta_val_size = args.meta_batch_size - meta_train_size # 623
if args.no_replay:
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(44816 + (state - 1) * meta_val_size, 44816 + state * meta_val_size)
)
else:
train_list = list(range(0, state * meta_train_size)) # 0 44816
val_list = list(range(44816, 44816 + state * meta_val_size)) # 45000 500
print(
"Current: Trainset size = {}, Valset size = {}".format(
len(train_list), len(val_list)
)
)
train_set = Subset(whole_trainset, train_list)
val_set = Subset(whole_trainset, val_list)
test_set = CIFAR10(args.data, train=False, transform=test_transform, download=True)
if args.snip_no_replay:
train_list_norep = list(
range((state - 1) * meta_train_size, state * meta_train_size)
)
train_set_norep = Subset(whole_trainset, train_list_norep)
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set_norep, list(range(snip_set)))
else:
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set, list(range(snip_set)))
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
test_loader = DataLoader(
test_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
return train_loader, val_loader, test_loader, train_snip_set
buffer_train_set = []
buffer_val_set = []
def generate_anytime_cifar10_dataloader(args, whole_trainset, state=1):
test_transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
meta_train_size = int(args.meta_batch_size * 0.9) # 4500 #
meta_val_size = args.meta_batch_size - meta_train_size # 500
if args.no_replay:
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(45000 + (state - 1) * meta_val_size, 45000 + state * meta_val_size)
)
elif args.one_replay:
if state == 1:
train_list = list(
range((state - 1) * meta_train_size, state * meta_train_size)
)
val_list = list(
range(
45000 + (state - 1) * meta_val_size, 45000 + state * meta_val_size
)
)
else: # 0-1, 1-2,2-3,3-4,4-5
train_list = list(
range((state - 2) * meta_train_size, state * meta_train_size)
)
val_list = list(
range(
45000 + (state - 2) * meta_val_size, 45000 + state * meta_val_size
)
)
elif args.buffer_replay:
k = args.buffer_size_train
l = args.buffer_size_val
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(45000 + (state - 1) * meta_val_size, 45000 + state * meta_val_size)
)
train_list.extend(buffer_train_set)
val_list.extend(buffer_val_set)
# Populating Buffer
train_sampled_set = random.sample(train_list, k)
valid_sampled_set = random.sample(val_list, l)
buffer_train_set.extend(train_sampled_set)
buffer_val_set.extend(valid_sampled_set)
else:
train_list = list(range(0, state * meta_train_size)) # 0 45000
val_list = list(range(45000, 45000 + state * meta_val_size)) # 45000 500
print(
"Current: Trainset size = {}, Valset size = {}".format(
len(train_list), len(val_list)
)
)
train_set = Subset(whole_trainset, train_list)
val_set = Subset(whole_trainset, val_list)
test_set = CIFAR10(args.data, train=False, transform=test_transform, download=True)
if args.snip_no_replay:
train_list_norep = list(
range((state - 1) * meta_train_size, state * meta_train_size)
)
train_set_norep = Subset(whole_trainset, train_list_norep)
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set_norep, list(range(snip_set)))
else:
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set, list(range(snip_set)))
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
test_loader = DataLoader(
test_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
return train_loader, val_loader, test_loader, train_snip_set
def setup__cifar100_dataset(args):
train_transform = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
print(
"Dataset information: CIFAR-100\t 45000 images for training \t 500 images for validation\t"
)
print("10000 images for testing\t no normalize applied in data_transform")
print("Data augmentation = randomcrop(32,4) + randomhorizontalflip")
whole_trainset = CIFAR100(
args.data, train=True, transform=train_transform, download=True
)
return whole_trainset
def generate_anytime_cifar100_dataloader(args, whole_trainset, state=1):
test_transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
meta_train_size = int(args.meta_batch_size * 0.9) # 4500
meta_val_size = args.meta_batch_size - meta_train_size # 500
if args.no_replay:
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(45000 + (state - 1) * meta_val_size, 45000 + state * meta_val_size)
)
else:
train_list = list(range(0, state * meta_train_size)) # 0 45000
val_list = list(range(45000, 45000 + state * meta_val_size)) # 45000 500
print(
"Current: Trainset size = {}, Valset size = {}".format(
len(train_list), len(val_list)
)
)
train_set = Subset(whole_trainset, train_list)
val_set = Subset(whole_trainset, val_list)
test_set = CIFAR100(args.data, train=False, transform=test_transform, download=True)
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set, list(range(snip_set)))
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
test_loader = DataLoader(
test_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
return train_loader, val_loader, test_loader, train_snip_set
| 21,039
| 28.928876
| 99
|
py
|
Progressive-Pruning
|
Progressive-Pruning-main/main_anytime_one.py
|
import argparse
import os
import pdb
import pickle
import random
import shutil
import time
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.multiprocessing
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from advertorch.utils import NormalizeByChannelMeanStd
from torch.utils.data.sampler import SubsetRandomSampler
import utils
torch.multiprocessing.set_sharing_strategy("file_system")
from dataset import (Setup_RestrictedImageNet,
generate_anytime_cifar10_dataloader,
generate_anytime_cifar100_dataloader,
generate_anytime_res_img_dataloader,
generate_anytime_res_img_dataloader_few,
setup__cifar10_dataset, setup__cifar100_dataset)
from generate_mask import generate_mask_
from pruner import *
from utils import evaluate_cer, setup_model
from wb import WandBLogger
parser = argparse.ArgumentParser(description="PyTorch Anytime Training")
##################################### Dataset #################################################
parser.add_argument(
"--data", type=str, default="../data", help="location of the data corpus"
)
parser.add_argument("--dataset", type=str, default="cifar10", help="dataset")
parser.add_argument(
"--meta_batch_size",
type=int,
default=5000,
help="data number in each meta batch_size",
)
parser.add_argument("--meta_batch_number", type=int, default=10)
##################################### Architecture ############################################
parser.add_argument("--arch", type=str, default="resnet20s", help="model architecture")
parser.add_argument(
"--imagenet_arch",
action="store_true",
help="architecture for imagenet size samples",
)
parser.add_argument(
"--imagenet_path",
type=str,
default="/home/mila/i/irina.rish/scratch/imagenet",
help="location of the data corpus",
)
##################################### General setting ############################################
parser.add_argument("--seed", default=None, type=int, help="random seed")
parser.add_argument("--gpu", type=int, default=0, help="gpu device id")
parser.add_argument(
"--workers", type=int, default=2, help="number of workers in dataloader"
)
parser.add_argument("--resume", action="store_true", help="resume from checkpoint")
parser.add_argument("--checkpoint", type=str, default=None, help="checkpoint file")
parser.add_argument(
"--save_dir",
help="The directory used to save the trained models",
default=None,
type=str,
)
parser.add_argument("-no_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-one_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-buffer_replay", action="store_true", help="Flag for No Replay")
parser.add_argument(
"--buffer_size_train",
default=182,
type=int,
help="number of Random Train examples to add in buffer",
)
parser.add_argument(
"--buffer_size_valid",
default=182,
type=int,
help="number of Random Valid examples to add in buffer",
)
parser.add_argument("-snip_no_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-few_shot", action="store_true", help="Flag for No Replay")
parser.add_argument(
"--n_shots",
default=100,
type=int,
help="number of Random Valid examples to add in buffer",
)
##################################### Training setting #################################################
parser.add_argument("--batch_size", type=int, default=128, help="batch size")
parser.add_argument("--lr", default=0.1, type=float, help="initial learning rate")
parser.add_argument("--momentum", default=0.9, type=float, help="momentum")
parser.add_argument("--weight_decay", default=1e-4, type=float, help="weight decay")
parser.add_argument(
"--epochs", default=182, type=int, help="number of total epochs to run"
)
parser.add_argument("--warmup", default=0, type=int, help="warm up epochs")
parser.add_argument("--print_freq", default=50, type=int, help="print frequency")
parser.add_argument("--decreasing_lr", default="91,136", help="decreasing strategy")
##################################### Pruning setting #################################################
parser.add_argument(
"--tickets_mask", default=None, type=str, help="mask for subnetworks"
)
parser.add_argument(
"--tickets_init", default=None, type=str, help="initilization for subnetworks"
)
parser.add_argument(
"--snip_size", default=0.20, type=float, help="the size for the snip"
)
parser.add_argument("--sparsity_level", default=0, type=float, help="sparsity level")
parser.add_argument(
"--pruner", default="snip", type=str, help="Pruner Type[mag,snip,GraSP,SynFlow]"
)
parser.add_argument(
"--scope", default="global", type=str, help="Scope of Pruner[local,global]"
)
##################################### W&B Logging setting #################################################
parser.add_argument("-wb", action="store_true", help="Flag for using W&B logging")
parser.add_argument(
"--project_name", default="APP", type=str, help="Name of the W&B project"
)
parser.add_argument(
"--run", default="Anytime_fixed", type=str, help="Name for the W&B run"
)
best_sa = 0
args = parser.parse_args()
print(args)
os.makedirs(args.save_dir, exist_ok=True)
if args.scope == "l":
args.scope = "local"
def main():
global args, best_sa
args = parser.parse_args()
print(args)
torch.cuda.set_device(int(args.gpu))
os.makedirs(args.save_dir, exist_ok=True)
if args.seed:
setup_seed(args.seed)
model = setup_model(args)
if args.dataset == "cifar10":
whole_trainset = setup__cifar10_dataset(args)
elif args.dataset == "cifar100":
whole_trainset = setup__cifar100_dataset(args)
elif args.dataset == "restricted_imagenet":
whole_trainset, test_set = Setup_RestrictedImageNet(args.imagenet_path)
model.cpu()
# print(model)
if args.tickets_init:
print("loading init from {}".format(args.tickets_init))
init_file = torch.load(args.tickets_init, map_location="cpu")
if "init_weight" in init_file:
init_file = init_file["init_weight"]
model.load_state_dict(init_file)
else:
torch.save(model.state_dict(), os.path.join(args.save_dir, "randinit.pth.tar"))
# setup initialization and mask
if args.tickets_mask:
print("loading mask from {}".format(args.tickets_mask))
mask_file = torch.load(args.tickets_mask, map_location="cpu")
if "state_dict" in mask_file:
mask_file = mask_file["state_dict"]
mask_file = extract_mask(mask_file)
print("pruning with {} masks".format(len(mask_file)))
prune_model_custom(model, mask_file)
model.cuda()
criterion = nn.CrossEntropyLoss()
decreasing_lr = list(map(int, args.decreasing_lr.split(",")))
optimizer = torch.optim.SGD(
model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=decreasing_lr, gamma=0.1
)
if args.wb:
wandb_logger = WandBLogger(
project_name=args.project_name,
run_name=args.run,
dir=args.save_dir,
config=vars(args),
model=model,
params={"resume": args.resume},
)
else:
wandb_logger = None
if args.resume:
print("resume from checkpoint {}".format(args.checkpoint))
checkpoint = torch.load(
args.checkpoint, map_location=torch.device("cuda:" + str(args.gpu))
)
best_sa = checkpoint["best_sa"]
start_epoch = checkpoint["epoch"]
all_result = checkpoint["result"]
start_state = checkpoint["state"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
print(
"loading from state: {} epoch: {}, best_sa = {}".format(
start_state, start_epoch, best_sa
)
)
else:
all_result = {}
all_result["train_ta"] = []
all_result["val_ta"] = []
all_result["best_sa"] = []
all_result["gen_gap"] = []
all_result["train_loss"] = []
all_result["val_loss"] = []
all_result["lr"] = []
start_epoch = 0
start_state = 1
time_list = []
CER = []
CER_diff = []
for current_state in range(start_state, args.meta_batch_number + 1):
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=decreasing_lr, gamma=0.1
)
start_time = time.time()
print("Current state = {}".format(current_state))
if args.dataset == "cifar10":
print("Loading cifar10 dataset in anytime setting")
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_cifar10_dataloader(args, whole_trainset, current_state)
elif args.dataset == "cifar100":
print("Loading cifar100 dataset in anytime setting")
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_cifar100_dataloader(
args, whole_trainset, current_state
)
elif args.dataset == "restricted_imagenet":
print("Loading Restricted Imagenet dataset in anytime setting")
if args.meta_batch_number == 3:
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_res_img_dataloader(
args, whole_trainset, test_set, 80565, current_state
)
elif args.meta_batch_number == 10 and args.few_shot:
# Few Shot Dataloader Example
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_res_img_dataloader_few(
args, whole_trainset, test_set, 6800, current_state
)
# Generate Mask using SNIP
if current_state == 1:
sparsity_level = (
args.sparsity_level
) # 0.8**sparsity_level 80% Remaining Weights
save_mask = (
args.save_dir
+ f"/{current_state}mask_{args.pruner}_{sparsity_level}.pth.tar"
)
model_load_dir = (
args.save_dir + "/randinit.pth.tar"
) # 1st Meta Batch Randomly initialized model
generate_mask_(
args,
train_snip_set,
args.pruner,
model_load_dir,
save=save_mask,
state=sparsity_level,
)
model.cpu()
# Load the Model by applying above mask
print("loading mask from {}".format(save_mask))
mask_file = torch.load(save_mask, map_location="cpu")
if "state_dict" in mask_file:
mask_file = mask_file["state_dict"]
mask_file = extract_mask(mask_file)
print("pruning with {} masks".format(len(mask_file)))
prune_model_custom(model, mask_file)
model.cuda()
for epoch in range(start_epoch, args.epochs):
print(optimizer.state_dict()["param_groups"][0]["lr"])
acc, loss = train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
tacc, vloss = validate(val_loader, model, criterion)
# evaluate on test set
scheduler.step()
# remember best prec@1 and save checkpoint
is_best_sa = tacc > best_sa
best_sa = max(tacc, best_sa)
gen_gap = acc - tacc
all_result["gen_gap"].append(gen_gap)
all_result["train_ta"].append(acc)
all_result["val_ta"].append(tacc)
all_result["best_sa"].append(best_sa)
all_result["train_loss"].append(loss)
all_result["val_loss"].append(vloss)
all_result["lr"].append(optimizer.state_dict()["param_groups"][0]["lr"])
save_checkpoint(
{
"state": current_state,
"result": all_result,
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"best_sa": best_sa,
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
},
is_SA_best=is_best_sa,
data_state=current_state,
save_path=args.save_dir,
)
if wandb_logger:
wandb_logger.log_metrics(all_result)
# report result
val_pick_best_epoch = np.argmax(np.array(all_result["val_ta"]))
print(
"* State = {} best SA = {} Epoch = {}".format(
current_state,
all_result["val_ta"][val_pick_best_epoch],
val_pick_best_epoch + 1,
)
)
all_result = {}
all_result["train_ta"] = []
all_result["val_ta"] = []
all_result["best_sa"] = []
all_result["gen_gap"] = []
all_result["train_loss"] = []
all_result["val_loss"] = []
all_result["lr"] = []
best_sa = 0
start_epoch = 0
best_checkpoint = torch.load(
os.path.join(args.save_dir, "{}model_SA_best.pth.tar".format(current_state))
)
print("Loading Best Weight")
model.load_state_dict(best_checkpoint["state_dict"])
end_time = time.time() - start_time
print("Total time elapsed: {:.4f}s".format(end_time))
time_list.append(end_time)
if args.dataset == "restricted_imagenet":
CER.append(evaluate_cer(model, args, test_loader))
else:
CER.append(evaluate_cer(model, args))
if current_state != 1:
diff = (CER[current_state - 1] - CER[current_state - 2]) / 10000
CER_diff.append(diff)
print("CER diff = {}".format(diff))
# Reset LR to 0.1 after each state
for g in optimizer.param_groups:
g["lr"] = 0.1
print("LR reset to 0.1")
print(optimizer.state_dict()["param_groups"][0]["lr"])
test_tacc, _ = validate(test_loader, model, criterion)
print("Test Acc = {}".format(test_tacc))
wandb_logger.log_metrics({"Test/test_acc": test_tacc})
wandb_logger.log_metrics({"Test/CER": sum(CER)})
print("CER = {}".format(sum(CER)))
print("CER")
print(CER)
print("Final Test Accuracy: ")
print(test_tacc)
print("Anytime Relative Error")
print(CER_diff)
print("Time Elapsed")
print(time_list)
def train(train_loader, model, criterion, optimizer, epoch):
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
start = time.time()
for i, (image, target) in enumerate(train_loader):
if epoch < args.warmup:
warmup_lr(epoch, i + 1, optimizer, one_epoch_step=len(train_loader))
image = image.clone().cuda()
target = target.clone().cuda()
# compute output
output_clean = model(image)
loss = criterion(output_clean, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output_clean.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
end = time.time()
print(
"Epoch: [{0}][{1}/{2}]\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Accuracy {top1.val:.3f} ({top1.avg:.3f})\t"
"Time {3:.2f}".format(
epoch, i, len(train_loader), end - start, loss=losses, top1=top1
)
)
start = time.time()
print("train_accuracy {top1.avg:.3f}".format(top1=top1))
return top1.avg, losses.avg
def validate(val_loader, model, criterion):
"""
Run evaluation
"""
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
for i, (image, target) in enumerate(val_loader):
image = image.clone().cuda()
target = target.clone().cuda()
# compute output
with torch.no_grad():
output = model(image)
loss = criterion(output, target)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
print(
"Test: [{0}/{1}]\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Accuracy {top1.val:.3f} ({top1.avg:.3f})".format(
i, len(val_loader), loss=losses, top1=top1
)
)
print("valid_accuracy {top1.avg:.3f}".format(top1=top1))
return top1.avg, losses.avg
def save_checkpoint(
state, is_SA_best, data_state, save_path, filename="checkpoint.pth.tar"
):
filepath = os.path.join(save_path, str(data_state) + filename)
torch.save(state, filepath)
if is_SA_best:
shutil.copyfile(
filepath,
os.path.join(save_path, "{}model_SA_best.pth.tar".format(data_state)),
)
def warmup_lr(epoch, step, optimizer, one_epoch_step):
overall_steps = args.warmup * one_epoch_step
current_steps = epoch * one_epoch_step + step
lr = args.lr * current_steps / overall_steps
lr = min(lr, args.lr)
for p in optimizer.param_groups:
p["lr"] = lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def setup_seed(seed):
print("setup random seed = {}".format(seed))
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == "__main__":
main()
| 19,758
| 31.767828
| 107
|
py
|
Progressive-Pruning
|
Progressive-Pruning-main/generate_mask.py
|
import argparse
import os
import numpy as np
import torch
import torch.nn as nn
import torchvision
from advertorch.utils import NormalizeByChannelMeanStd
from torch.utils.data import DataLoader, Subset
from torchvision import transforms
from torchvision.datasets import CIFAR10, CIFAR100
from models.ResNets import resnet20s
from tools.pruning_utils import *
from utils import setup_model
def generate_mask_(args, data, pruner, model_dir, save, state, gpu=0):
def prune_loop(
model,
loss,
pruner,
dataloader,
device,
sparsity,
scope,
epochs,
train_mode=False,
):
# Set model to train or eval mode
model.train()
if not train_mode:
model.eval()
# Prune model
for epoch in range(epochs):
pruner.score(model, loss, dataloader, device)
sparse = sparsity ** ((epoch + 1) / epochs)
pruner.mask(sparse, scope)
torch.cuda.set_device(int(gpu))
model = setup_model(args)
prune_conv(model)
print("loading model from {}".format(model_dir))
checkpoint = torch.load(model_dir, map_location="cpu")
if "state_dict" in checkpoint.keys():
checkpoint = checkpoint["state_dict"]
model.load_state_dict(checkpoint, strict=False)
model.cuda()
remain_weight = 0.8 ** state
if pruner == "mag":
print("Pruning with Magnitude")
pruner = Mag(masked_parameters(model))
prune_loop(
model,
None,
pruner,
None,
torch.device("cuda:{}".format(gpu)),
remain_weight,
scope=args.scope,
epochs=10,
train_mode=True,
)
current_mask = extract_mask(model.state_dict())
check_sparsity_dict(current_mask)
torch.save(current_mask, save)
elif pruner == "snip":
print("Pruning with SNIP")
criterion = nn.CrossEntropyLoss()
data_loader = DataLoader(
data, batch_size=100, shuffle=False, num_workers=2, pin_memory=True
)
pruner = SNIP(masked_parameters(model))
prune_loop(
model,
criterion,
pruner,
data_loader,
torch.device("cuda:{}".format(gpu)),
remain_weight,
scope=args.scope,
epochs=1,
train_mode=True,
)
current_mask = extract_mask(model.state_dict())
check_sparsity_dict(current_mask)
torch.save(current_mask, save)
elif pruner == "random":
print("Pruning with Magnitude")
pruner = Rand(masked_parameters(model))
prune_loop(
model,
None,
pruner,
None,
torch.device("cuda:{}".format(gpu)),
remain_weight,
scope=args.scope,
epochs=1,
train_mode=True,
)
current_mask = extract_mask(model.state_dict())
check_sparsity_dict(current_mask)
torch.save(current_mask, save)
elif pruner == "GraSP":
print("Pruning with GraSP")
criterion = nn.CrossEntropyLoss()
trainset = torchvision.datasets.CIFAR10(
args.data, train=True, download=True, transform=transforms.ToTensor()
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=128, shuffle=True, num_workers=2
)
pruner = GraSP(masked_parameters(model))
prune_loop(
model,
criterion,
pruner,
trainloader,
torch.device("cuda:{}".format(gpu)),
remain_weight,
scope="global",
epochs=1,
train_mode=True,
)
current_mask = extract_mask(model.state_dict())
check_sparsity_dict(current_mask)
torch.save(current_mask, save)
| 3,926
| 26.270833
| 81
|
py
|
Progressive-Pruning
|
Progressive-Pruning-main/tools/layers.py
|
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn import init
from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter
class Linear(nn.Linear):
def __init__(self, in_features, out_features, bias=True):
super(Linear, self).__init__(in_features, out_features, bias)
self.register_buffer("weight_mask", torch.ones(self.weight.shape))
def forward(self, input):
W = self.weight_mask * self.weight
b = self.bias
return F.linear(input, W, b)
class Conv2d(nn.Conv2d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
):
super(Conv2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode,
)
self.register_buffer("weight_mask", torch.ones(self.weight.shape))
def _conv_forward(self, input, weight, bias):
if self.padding_mode != "zeros":
return F.conv2d(
F.pad(input, self._padding_repeated_twice, mode=self.padding_mode),
weight,
bias,
self.stride,
_pair(0),
self.dilation,
self.groups,
)
return F.conv2d(
input, weight, bias, self.stride, self.padding, self.dilation, self.groups
)
def forward(self, input):
W = self.weight_mask * self.weight
b = self.bias
return self._conv_forward(input, W, b)
| 1,785
| 25.656716
| 86
|
py
|
Progressive-Pruning
|
Progressive-Pruning-main/tools/pruning_utils.py
|
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.utils.prune as prune
from tools.layers import Conv2d, Linear
__all__ = [
"masked_parameters",
"SynFlow",
"Mag",
"Taylor1ScorerAbs",
"Rand",
"SNIP",
"GraSP",
"check_sparsity_dict",
"extract_mask",
"prune_conv",
]
def masks(module):
r"""Returns an iterator over modules masks, yielding the mask."""
for name, buf in module.named_buffers():
if "mask" in name:
yield buf
def masked_parameters(model):
r"""Returns an iterator over models prunable parameters, yielding both the
mask and parameter tensors.
"""
for module in model.modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
for mask, param in zip(masks(module), module.parameters(recurse=False)):
if param is not module.bias:
yield mask, param
class Pruner:
def __init__(self, masked_parameters):
self.masked_parameters = list(masked_parameters)
self.scores = {}
def score(self, model, loss, dataloader, device):
raise NotImplementedError
def _global_mask(self, sparsity):
r"""Updates masks of model with scores by sparsity level globally."""
# # Set score for masked parameters to -inf
# for mask, param in self.masked_parameters:
# score = self.scores[id(param)]
# score[mask == 0.0] = -np.inf
# Threshold scores
global_scores = torch.cat([torch.flatten(v) for v in self.scores.values()])
k = int((1.0 - sparsity) * global_scores.numel())
if not k < 1:
threshold, _ = torch.kthvalue(global_scores, k)
for mask, param in self.masked_parameters:
score = self.scores[id(param)]
zero = torch.tensor([0.0]).to(mask.device)
one = torch.tensor([1.0]).to(mask.device)
mask.copy_(torch.where(score <= threshold, zero, one))
def _local_mask(self, sparsity):
r"""Updates masks of model with scores by sparsity level parameter-wise."""
for mask, param in self.masked_parameters:
score = self.scores[id(param)]
k = int((1.0 - sparsity) * score.numel())
if not k < 1:
threshold, _ = torch.kthvalue(torch.flatten(score), k)
zero = torch.tensor([0.0]).to(mask.device)
one = torch.tensor([1.0]).to(mask.device)
mask.copy_(torch.where(score <= threshold, zero, one))
def mask(self, sparsity, scope):
r"""Updates masks of model with scores by sparsity according to scope."""
if scope == "global":
self._global_mask(sparsity)
if scope == "local":
self._local_mask(sparsity)
@torch.no_grad()
def apply_mask(self):
r"""Applies mask to prunable parameters."""
for mask, param in self.masked_parameters:
param.mul_(mask)
def alpha_mask(self, alpha):
r"""Set all masks to alpha in model."""
for mask, _ in self.masked_parameters:
mask.fill_(alpha)
# Based on https://github.com/facebookresearch/open_lth/blob/master/utils/tensor_utils.py#L43
def shuffle(self):
for mask, param in self.masked_parameters:
shape = mask.shape
perm = torch.randperm(mask.nelement())
mask = mask.reshape(-1)[perm].reshape(shape)
def invert(self):
for v in self.scores.values():
v.div_(v ** 2)
def stats(self):
r"""Returns remaining and total number of prunable parameters."""
remaining_params, total_params = 0, 0
for mask, _ in self.masked_parameters:
remaining_params += mask.detach().cpu().numpy().sum()
total_params += mask.numel()
return remaining_params, total_params
class SynFlow(Pruner):
def __init__(self, masked_parameters):
super(SynFlow, self).__init__(masked_parameters)
def score(self, model, loss, dataloader, device):
@torch.no_grad()
def linearize(model):
# model.double()
signs = {}
for name, param in model.state_dict().items():
signs[name] = torch.sign(param)
param.abs_()
return signs
@torch.no_grad()
def nonlinearize(model, signs):
# model.float()
for name, param in model.state_dict().items():
param.mul_(signs[name])
signs = linearize(model)
(data, _) = next(iter(dataloader))
input_dim = list(data[0, :].shape)
input = torch.ones([1] + input_dim).to(
device
) # , dtype=torch.float64).to(device)
output = model(input)
torch.sum(output).backward()
for _, p in self.masked_parameters:
self.scores[id(p)] = torch.clone(p.grad * p).detach().abs_()
p.grad.data.zero_()
nonlinearize(model, signs)
class Mag(Pruner):
def __init__(self, masked_parameters):
super(Mag, self).__init__(masked_parameters)
def score(self, model, loss, dataloader, device):
for _, p in self.masked_parameters:
self.scores[id(p)] = torch.clone(p.data).detach().abs_()
class Rand(Pruner):
def __init__(self, masked_parameters):
super(Rand, self).__init__(masked_parameters)
def score(self, model, loss, dataloader, device):
for _, p in self.masked_parameters:
self.scores[id(p)] = torch.randn_like(p)
# Based on https://github.com/mi-lad/snip/blob/master/snip.py#L18
class SNIP(Pruner):
def __init__(self, masked_parameters):
super(SNIP, self).__init__(masked_parameters)
def score(self, model, loss, dataloader, device):
# allow masks to have gradient
for m, _ in self.masked_parameters:
m.requires_grad = True
# compute gradient
for batch_idx, (data, target) in enumerate(dataloader):
data, target = data.to(device), target.to(device)
output = model(data)
loss(output, target).backward()
# calculate score |g * theta|
for m, p in self.masked_parameters:
self.scores[id(p)] = torch.clone(m.grad).detach().abs_()
p.grad.data.zero_()
m.grad.data.zero_()
m.requires_grad = False
# normalize score
all_scores = torch.cat([torch.flatten(v) for v in self.scores.values()])
norm = torch.sum(all_scores)
for _, p in self.masked_parameters:
self.scores[id(p)].div_(norm)
def GraSP_fetch_data(dataloader, num_classes, samples_per_class):
datas = [[] for _ in range(num_classes)]
labels = [[] for _ in range(num_classes)]
mark = dict()
dataloader_iter = iter(dataloader)
while True:
inputs, targets = next(dataloader_iter)
for idx in range(inputs.shape[0]):
x, y = inputs[idx : idx + 1], targets[idx : idx + 1]
category = y.item()
if len(datas[category]) == samples_per_class:
mark[category] = True
continue
datas[category].append(x)
labels[category].append(y)
if len(mark) == num_classes:
break
X, y = torch.cat([torch.cat(_, 0) for _ in datas]), torch.cat(
[torch.cat(_) for _ in labels]
).view(-1)
return X, y
# Based on https://github.com/alecwangcq/GraSP/blob/master/pruner/GraSP.py#L49
class GraSP(Pruner):
def __init__(self, masked_parameters):
super(GraSP, self).__init__(masked_parameters)
self.temp = 200
self.eps = 1e-10
def score(self, model, loss, dataloader, device):
# first gradient vector without computational graph
stopped_grads = 0
data, target = GraSP_fetch_data(dataloader, 10, 10)
data, target = data.to(device), target.to(device)
output = model(data) / self.temp
L = loss(output, target)
grads = torch.autograd.grad(
L, [p for (_, p) in self.masked_parameters], create_graph=False
)
flatten_grads = torch.cat([g.reshape(-1) for g in grads if g is not None])
stopped_grads += flatten_grads
# second gradient vector with computational graph
data, target = GraSP_fetch_data(dataloader, 10, 10)
data, target = data.to(device), target.to(device)
output = model(data) / self.temp
L = loss(output, target)
grads = torch.autograd.grad(
L, [p for (_, p) in self.masked_parameters], create_graph=True
)
flatten_grads = torch.cat([g.reshape(-1) for g in grads if g is not None])
gnorm = (stopped_grads * flatten_grads).sum()
gnorm.backward()
# calculate score Hg * theta (negate to remove top percent)
for _, p in self.masked_parameters:
self.scores[id(p)] = torch.clone(p.grad * p.data).detach()
p.grad.data.zero_()
# normalize score
all_scores = torch.cat([torch.flatten(v) for v in self.scores.values()])
norm = torch.abs(torch.sum(all_scores)) + self.eps
for _, p in self.masked_parameters:
self.scores[id(p)].div_(norm)
class Taylor1ScorerAbs(Pruner):
def __init__(self, masked_parameters):
super(Taylor1ScorerAbs, self).__init__(masked_parameters)
def score(self, model, loss, dataloader, device):
for batch_idx, (data, target) in enumerate(dataloader):
data, target = data.to(device), target.to(device)
output = model(data)
loss(output, target).backward()
for _, p in self.masked_parameters:
self.scores[id(p)] = torch.clone(p.grad * p).detach().abs_()
p.grad.data.zero_()
def check_sparsity_dict(model_dict):
sum_list = 0
zero_sum = 0
for key in model_dict.keys():
if "mask" in key:
sum_list = sum_list + float(model_dict[key].nelement())
zero_sum = zero_sum + float(torch.sum(model_dict[key] == 0))
print("* remain weight = ", 100 * (1 - zero_sum / sum_list), "%")
return 100 * (1 - zero_sum / sum_list)
def extract_mask(model_dict):
new_dict = {}
for key in model_dict.keys():
if "mask" in key:
new_dict[key] = copy.deepcopy(model_dict[key])
return new_dict
def prune_conv(model):
for name, module in reversed(model._modules.items()):
if len(list(module.children())) > 0:
model._modules[name] = prune_conv(model=module)
if isinstance(module, nn.Conv2d):
bias = True
if module.bias == None:
bias = False
layer_new = Conv2d(
module.in_channels,
module.out_channels,
module.kernel_size,
module.stride,
padding=module.padding,
dilation=module.dilation,
groups=module.groups,
bias=bias,
)
model._modules[name] = layer_new
return model
| 11,187
| 31.618076
| 97
|
py
|
Progressive-Pruning
|
Progressive-Pruning-main/pruner/pruner.py
|
import copy
import torch
import torch.nn as nn
import torch.nn.utils.prune as prune
__all__ = [
"pruning_model",
"pruning_model_random",
"prune_model_custom",
"remove_prune",
"extract_mask",
"reverse_mask",
"check_sparsity",
"check_sparsity_dict",
]
# Pruning operation
def pruning_model(model, px):
print("Apply Unstructured L1 Pruning Globally (all conv layers)")
parameters_to_prune = []
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
parameters_to_prune.append((m, "weight"))
parameters_to_prune = tuple(parameters_to_prune)
prune.global_unstructured(
parameters_to_prune,
pruning_method=prune.L1Unstructured,
amount=px,
)
def pruning_model_random(model, px):
print("Apply Unstructured Random Pruning Globally (all conv layers)")
parameters_to_prune = []
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
parameters_to_prune.append((m, "weight"))
parameters_to_prune = tuple(parameters_to_prune)
prune.global_unstructured(
parameters_to_prune,
pruning_method=prune.RandomUnstructured,
amount=px,
)
def prune_model_custom(model, mask_dict):
print("Pruning with custom mask (all conv layers)")
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
mask_name = name + ".weight_mask"
if mask_name in mask_dict.keys():
prune.CustomFromMask.apply(
m, "weight", mask=mask_dict[name + ".weight_mask"]
)
else:
print("Can not fing [{}] in mask_dict".format(mask_name))
def remove_prune(model):
print("Remove hooks for multiplying masks (all conv layers)")
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.remove(m, "weight")
# Mask operation function
def extract_mask(model_dict):
new_dict = {}
for key in model_dict.keys():
if "mask" in key:
new_dict[key] = copy.deepcopy(model_dict[key])
return new_dict
def reverse_mask(mask_dict):
new_dict = {}
for key in mask_dict.keys():
new_dict[key] = 1 - mask_dict[key]
return new_dict
# Mask statistic function
def check_sparsity(model):
sum_list = 0
zero_sum = 0
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
sum_list = sum_list + float(m.weight.nelement())
zero_sum = zero_sum + float(torch.sum(m.weight == 0))
if zero_sum:
remain_weight_ratie = 100 * (1 - zero_sum / sum_list)
print("* remain weight ratio = ", 100 * (1 - zero_sum / sum_list), "%")
else:
print("no weight for calculating sparsity")
remain_weight_ratie = None
return remain_weight_ratie
def check_sparsity_dict(state_dict):
sum_list = 0
zero_sum = 0
for key in state_dict.keys():
if "mask" in key:
sum_list += float(state_dict[key].nelement())
zero_sum += float(torch.sum(state_dict[key] == 0))
if zero_sum:
remain_weight_ratie = 100 * (1 - zero_sum / sum_list)
print("* remain weight ratio = ", 100 * (1 - zero_sum / sum_list), "%")
else:
print("no weight for calculating sparsity")
remain_weight_ratie = None
return remain_weight_ratie
| 3,421
| 24.537313
| 79
|
py
|
Progressive-Pruning
|
Progressive-Pruning-main/pruner/__init__.py
|
from pruner.pruner import *
| 28
| 13.5
| 27
|
py
|
Progressive-Pruning
|
Progressive-Pruning-main/models/ResNet.py
|
import torch
import torch.nn as nn
from advertorch.utils import NormalizeByChannelMeanStd
from torch.utils.model_zoo import load_url as load_state_dict_from_url
__all__ = [
"ResNet",
"resnet18",
"resnet34",
"resnet50",
"resnet101",
"resnet152",
"resnext50_32x4d",
"resnext101_32x8d",
"wide_resnet50_2",
"wide_resnet101_2",
]
model_urls = {
"resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth",
"resnet34": "https://download.pytorch.org/models/resnet34-333f7ec4.pth",
"resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth",
"resnet101": "https://download.pytorch.org/models/resnet101-5d3b4d8f.pth",
"resnet152": "https://download.pytorch.org/models/resnet152-b121ed2d.pth",
"resnext50_32x4d": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
"resnext101_32x8d": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
"wide_resnet50_2": "https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
"wide_resnet101_2": "https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth",
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ["downsample"]
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ["downsample"]
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block,
layers,
num_classes=1000,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
imagenet=False,
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
print("The normalize layer is contained in the network")
self.normalize = NormalizeByChannelMeanStd(
mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616]
)
if not imagenet:
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.Identity()
else:
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.normalize(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet18", BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet34", BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet50", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet(
"resnet101", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs
)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet(
"resnet152", Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs
)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 4
return _resnet(
"resnext50_32x4d", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs
)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 8
return _resnet(
"resnext101_32x8d", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs
)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet(
"wide_resnet50_2", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs
)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet(
"wide_resnet101_2", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs
)
| 14,716
| 32.754587
| 107
|
py
|
Progressive-Pruning
|
Progressive-Pruning-main/models/VGG.py
|
import torch
import torch.nn as nn
from advertorch.utils import NormalizeByChannelMeanStd
from torch.utils.model_zoo import load_url as load_state_dict_from_url
__all__ = [
"VGG",
"vgg11",
"vgg11_bn",
"vgg13",
"vgg13_bn",
"vgg16",
"vgg16_bn",
"vgg19_bn",
"vgg19",
]
model_urls = {
"vgg11": "https://download.pytorch.org/models/vgg11-bbd30ac9.pth",
"vgg13": "https://download.pytorch.org/models/vgg13-c768596a.pth",
"vgg16": "https://download.pytorch.org/models/vgg16-397923af.pth",
"vgg19": "https://download.pytorch.org/models/vgg19-dcbb9e9d.pth",
"vgg11_bn": "https://download.pytorch.org/models/vgg11_bn-6002323d.pth",
"vgg13_bn": "https://download.pytorch.org/models/vgg13_bn-abd245e5.pth",
"vgg16_bn": "https://download.pytorch.org/models/vgg16_bn-6c64b313.pth",
"vgg19_bn": "https://download.pytorch.org/models/vgg19_bn-c79401a0.pth",
}
class VGG(nn.Module):
def __init__(self, features, num_classes=10, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(512, num_classes)
print("The normalize layer is contained in the network")
self.normalize = NormalizeByChannelMeanStd(
mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616]
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.normalize(x)
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
"A": [64, "M", 128, "M", 256, 256, "M", 512, 512, "M", 512, 512],
"B": [64, 64, "M", 128, 128, "M", 256, 256, "M", 512, 512, "M", 512, 512],
"D": [
64,
64,
"M",
128,
128,
"M",
256,
256,
256,
"M",
512,
512,
512,
"M",
512,
512,
512,
],
"E": [
64,
64,
"M",
128,
128,
"M",
256,
256,
256,
256,
"M",
512,
512,
512,
512,
"M",
512,
512,
512,
512,
],
}
def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
if pretrained:
kwargs["init_weights"] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def vgg11(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg11", "A", False, pretrained, progress, **kwargs)
def vgg11_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg11_bn", "A", True, pretrained, progress, **kwargs)
def vgg13(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg13", "B", False, pretrained, progress, **kwargs)
def vgg13_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg13_bn", "B", True, pretrained, progress, **kwargs)
def vgg16(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg16", "D", False, pretrained, progress, **kwargs)
def vgg16_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg16_bn", "D", True, pretrained, progress, **kwargs)
def vgg19(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration "E")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg19", "E", False, pretrained, progress, **kwargs)
def vgg19_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration 'E') with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg19_bn", "E", True, pretrained, progress, **kwargs)
| 7,591
| 32.59292
| 113
|
py
|
Progressive-Pruning
|
Progressive-Pruning-main/models/__init__.py
|
from models.ResNet import *
from models.ResNets import *
from models.VGG import *
model_dict = {
"resnet18": resnet18,
"resnet34": resnet34,
"wide_resnet50_2": wide_resnet50_2,
"wide_resnet101_2": wide_resnet101_2,
"resnet101": resnet101,
"resnet50": resnet50,
"resnet20s": resnet20s,
"resnet44s": resnet44s,
"resnet56s": resnet56s,
"vgg16_bn": vgg16_bn,
}
| 398
| 22.470588
| 41
|
py
|
Progressive-Pruning
|
Progressive-Pruning-main/models/ResNets.py
|
"""
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
Borrow from : https://github.com/akamaster/pytorch_resnet_cifar10.git
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from advertorch.utils import NormalizeByChannelMeanStd
from torch.autograd import Variable
__all__ = [
"ResNets",
"resnet20s",
"resnet32s",
"resnet44s",
"resnet56s",
"resnet110s",
"resnet1202s",
]
def _weights_init(m):
classname = m.__class__.__name__
# print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option="A"):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == "A":
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(
lambda x: F.pad(
x[:, :, ::2, ::2],
(0, 0, 0, 0, planes // 4, planes // 4),
"constant",
0,
)
)
elif option == "B":
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNets(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNets, self).__init__()
self.in_planes = 16
print("The normalize layer is contained in the network")
self.normalize = NormalizeByChannelMeanStd(
mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616]
)
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.fc = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = self.normalize(x)
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def resnet20s(num_classes=10):
return ResNets(BasicBlock, [3, 3, 3], num_classes=num_classes)
def resnet32s(num_classes=10):
return ResNets(BasicBlock, [5, 5, 5], num_classes=num_classes)
def resnet44s(num_classes=10):
return ResNets(BasicBlock, [7, 7, 7], num_classes=num_classes)
def resnet56s(num_classes=10):
return ResNets(BasicBlock, [9, 9, 9], num_classes=num_classes)
def resnet110s(num_classes=10):
return ResNets(BasicBlock, [18, 18, 18], num_classes=num_classes)
def resnet1202s(num_classes=10):
return ResNets(BasicBlock, [200, 200, 200], num_classes=num_classes)
| 5,404
| 30.794118
| 85
|
py
|
Progressive-Pruning
|
Progressive-Pruning-main/wb/wandb_logger.py
|
"""
Utilities for Weights & Biases logging.
"""
from pathlib import Path
from typing import Union
import PIL
from matplotlib.pyplot import Figure
from PIL.Image import Image
from torch import Tensor
__all__ = ["WandBLogger"]
class WandBLogger:
"""
The `WandBLogger` provides an easy integration with
Weights & Biases logging. Each monitored metric is automatically
logged to a dedicated Weights & Biases project dashboard.
.. note::
The wandb log files are placed by default in "./wandb/" unless specified.
"""
def __init__(
self,
project_name: str = "APP",
run_name: str = "Prune1",
save_code: bool = True,
config: object = None,
dir: Union[str, Path] = None,
model: object = None,
params: dict = None,
) -> None:
"""
Creates an instance of the `WandBLogger`.
:param project_name: Name of the W&B project.
:param run_name: Name of the W&B run.
:param save_code: Saves the main training script to W&B.
:param dir: Path to the local log directory for W&B logs to be saved at.
:param config: Syncs hyper-parameters and config values used to W&B.
:param params: All arguments for wandb.init() function call.
Visit https://docs.wandb.ai/ref/python/init to learn about all
wand.init() parameters.
"""
self.project_name = project_name
self.run_name = run_name
self.save_code = save_code
self.dir = dir
self.config = config
self.model = model
self.params = params
self._import_wandb()
self._args_parse()
self._before_job()
def _import_wandb(self):
try:
import wandb
assert hasattr(wandb, "__version__")
except (ImportError, AssertionError):
raise ImportError('Please run "pip install wandb" to install wandb')
self.wandb = wandb
def _args_parse(self):
self.init_kwargs = {
"project": self.project_name,
"name": self.run_name,
"save_code": self.save_code,
"dir": self.dir,
"config": self.config,
}
if self.params:
self.init_kwargs.update(self.params)
def _before_job(self):
if self.wandb is None:
self.import_wandb()
if self.init_kwargs:
self.wandb.init(**self.init_kwargs)
else:
self.wandb.init()
if self.model is not None:
self.wandb.watch(self.model)
def log_metrics(
self,
log_dict: dict = None,
img: Union[Image, Figure, str, Path] = None,
curve: object = None,
) -> None:
for key, value in log_dict.items():
if isinstance(value, (int, float, Tensor)):
self.wandb.log({key: value})
else:
if "ARE" in key:
curr_val = value
else:
curr_val = value[-1]
if isinstance(curr_val, (int, float, Tensor)):
if "train" in key.lower():
key = "Train/" + key
self.wandb.log({key: curr_val})
if "val" in key.lower():
key = "Val/" + key
self.wandb.log({key: curr_val})
if "test" in key.lower():
key = "Test/" + key
self.wandb.log({key: curr_val})
self.wandb.log({key: curr_val})
else:
return
if img is not None:
if isinstance(img, (Image, Figure)):
self.wandb.log({"Media/Training Curve": self.wandb.Image(img)})
if isinstance(img, (str, Path)):
img_pil = PIL.Image.open(img)
self.wandb.log({"Media/Training Curve": self.wandb.Image(img_pil)})
if curve is not None:
if isinstance(curve, (object)):
self.wandb.log({"Training Curves": curve})
| 4,146
| 30.416667
| 87
|
py
|
Progressive-Pruning
|
Progressive-Pruning-main/wb/__init__.py
|
from .wandb_logger import *
| 28
| 13.5
| 27
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/baseline_convex_fair_regression.py
|
import cvxpy as cp
import numpy as np
import argparse
import pandas as pd
import torch
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
import fairness_metrics
import data_loader
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides an implementation of the paper in
https://arxiv.org/pdf/1706.02409.pdf
An example usage:
python .\baseline_convex_fair_regression.py --seed {} --fairness {} --dataset {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def run(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
if args.dataset == 'CommunitiesCrime':
ds = data_loader.CommunitiesCrime()
if args.dataset == 'BarPass':
ds = data_loader.BarPass()
if args.dataset == 'StudentsMath':
ds = data_loader.StudentPerformance(subject='Math')
if args.dataset == 'StudentsPortugese':
ds = data_loader.StudentPerformance(subject='Portugese')
ds.split_test()
k = ds.get_k()
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'R2' : fairness_metrics.R2
}
# storage of results
results_train = []
results_test = []
# get data
X0, Y0 = ds.get_data_for_A(0)
X0 = X0.numpy()
Y0 = Y0.numpy()
X1, Y1 = ds.get_data_for_A(1)
X1 = X1.numpy()
Y1 = Y1.numpy()
X, Y, A = ds.get_data()
X_test, Y_test, A_test = ds.get_test_data()
# run the test for various lambdas
for lambda_ in lambda_candidates:
start_time = time.time()
if args.fairness == 'group':
D = np.exp(-(Y1-Y0.T)**2)
n1n0 = D.shape[0]*D.shape[1]
theta = cp.Variable((X1.shape[1],1))
theta0 = cp.Variable()
objective = cp.Minimize(cp.sum((Y0-theta0-X0@theta)**2)/Y0.shape[0]+\
cp.sum((Y1-theta0-X1@theta)**2)/Y1.shape[0]+\
lambda_*(cp.sum(cp.multiply(D, (X1@theta - (X0@theta).T)))/n1n0)**2 +\
args.gamma*(theta0**2 + cp.sum_squares(theta)))
problem = cp.Problem(objective, [])
problem.solve(solver = cp.GUROBI, verbose=False)
else:
D = np.exp(-(Y1-Y0.T)**2)
n1n0 = D.shape[0]*D.shape[1]
theta = cp.Variable((X1.shape[1],1))
theta0 = cp.Variable()
objective = cp.Minimize(cp.sum((Y0-theta0-X0@theta)**2)/Y0.shape[0]+\
cp.sum((Y1-theta0-X1@theta)**2)/Y1.shape[0]+\
lambda_*(cp.sum(cp.multiply(D, (X1@theta - (X0@theta).T)**2))/n1n0) +\
args.gamma*(theta0**2 + cp.sum_squares(theta)))
problem = cp.Problem(objective, [])
problem.solve(solver = cp.GUROBI, verbose=False)
duration = time.time()-start_time
theta = torch.tensor(theta.value).float()
theta0 = torch.tensor(theta0.value).float()
predict = lambda X: theta0 + X@theta
# metrics on train set
y_hat = predict(X)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = predict(X_test)
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = lambda_
test_results['lambda_'] = lambda_
train_results['time'] = duration
test_results['time'] = duration
results_train.append(train_results)
results_test.append(test_results)
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
df_train.to_csv('results_regression/cvx_regression_baseline/{}_cvx-bl-{}_{}_train_{}.csv'.format(args.dataset, \
args.gamma, \
args.fairness, args.seed))
df_test.to_csv('results_regression/cvx_regression_baseline/{}_cvx-bl-{}_{}_test_{}.csv'.format(args.dataset, \
args.gamma, \
args.fairness, args.seed))
def run_sgd(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
if args.dataset == 'CommunitiesCrime':
ds = data_loader.CommunitiesCrime()
if args.dataset == 'BarPass':
ds = data_loader.BarPass()
if args.dataset == 'StudentsMath':
ds = data_loader.StudentPerformance(subject='Math')
if args.dataset == 'StudentsPortugese':
ds = data_loader.StudentPerformance(subject='Portugese')
ds.split_test()
k = ds.get_k()
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'R2' : fairness_metrics.R2
}
# storage of results
results_train = []
results_test = []
# get data
X0, Y0 = ds.get_data_for_A(0)
X1, Y1 = ds.get_data_for_A(1)
X, Y, A = ds.get_data()
k = X.shape[1]
X_test, Y_test, A_test = ds.get_test_data()
# run the test for various lambdas
for lambda_ in lambda_candidates:
D = torch.exp(-(Y1-Y0.T)**2)
objective_group = lambda theta0, theta: (torch.sum((Y0-theta0-X0@theta)**2)+torch.sum((Y1-theta0-X1@theta)**2))/(D.shape[0]+D.shape[1])+\
lambda_*(torch.mean(D*(X1@theta - (X0@theta).T)))**2
objective_individual = lambda theta0, theta: (torch.sum((Y0-theta0-X0@theta)**2)+\
torch.sum((Y1-theta0-X1@theta)**2))/(D.shape[0]+D.shape[1])+\
lambda_*(torch.mean(D*(X1@theta - (X0@theta).T)**2))
objective = objective_group if args.fairness=='group' else objective_individual
theta_0 = torch.rand(1)
theta = torch.rand([k, 1])
theta_0.requires_grad = True
theta.requires_grad = True
optimizer = torch.optim.Adam([theta_0, theta])
losses = []
for epoch in tqdm(range(5000)):
optimizer.zero_grad()
loss = objective(theta_0, theta)
losses.append(loss.item())
loss.backward()
optimizer.step()
predict = lambda X: theta_0 + X@theta
# metrics on train set
y_hat = predict(X)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = predict(X_test)
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = lambda_
test_results['lambda_'] = lambda_
results_train.append(train_results)
results_test.append(test_results)
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
df_train.to_csv('results/cvx_regression_baseline/{}_cvx-bl-{}_{}_train_{}.csv'.format(args.dataset, \
args.gamma, \
args.fairness, args.seed))
df_test.to_csv('results/cvx_regression_baseline/{}_cvx-bl-{}_{}_test_{}.csv'.format(args.dataset, \
args.gamma, \
args.fairness, args.seed))
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--lambda_min', default=-2, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=5, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--dataset', help='Dataset to use', choices=['CommunitiesCrime', 'BarPass', 'StudentsMath', 'StudentsPortugese'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=50)
parser.add_argument('--gamma', help='Weight of L2 regularizer', type=float, default=0)
parser.add_argument('--fairness', help='Fairness Type to use', choices=['group', 'individual'])
args = parser.parse_args()
run(args)
| 10,008
| 39.522267
| 145
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/fair_training.py
|
# fair_training.py
# training methods for fair regression
import torch
from torch.autograd import Variable
import torch.optim as optim
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementation of stochastic gradient descent
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
# +--------------------------------------------------+
# | Algorithm: Stratified SGD - Classification |
# +--------------------------------------------------+
def fair_learning(generator, predict, fair_loss, params, lambda_, psi=None, verbose = False, logdata=None, gamma_scheduler=None, lr_decay=1, lr=1e-3, logfairloss=None, **kwargs):
'''
Train model using Algorithm 2, which uses mini-batch SGD to train accuracy_loss + lambda_ * fairness_loss.
Args:
generator (generator): Generator which yields (X,Y,A)
predict (fct handle): Prediction function handle, maps X-->Y_hat
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lambda_ (numeric): Hyperparameter controlling influence of L_fair
verbose (bool, optional): Verbosity
logdata (None or tuple of X,Y,A, all torch.Tensor, optional): data for keeping track of training process
gamma_scheduler (optional): Learning Rate Scheduler
logfairloss (optional): Fairness log function to use for logging instead of fairloss
Returns:
Trainig Loss over Training if logdata is provided, but changes params
'''
if logfairloss==None:
logfairloss = fair_loss
optimizer = optim.Adam(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.BCEWithLogitsLoss()
batch_reg_loss = []
batch_fair_loss = []
for iterate, (X,Y,A) in enumerate(generator):
if logdata:
with torch.no_grad():
y_hat_log = predict(logdata[0])
batch_reg_loss.append(criterion(y_hat_log, logdata[1]))
y_hat_1_log = y_hat_log[logdata[2]==1]
y_hat_0_log = y_hat_log[logdata[2]==0]
batch_fair_loss.append(logfairloss(y_hat_1_log, y_hat_0_log))
optimizer.zero_grad()
# predict
y_hat = predict(X)
# compute regression and fairness loss
L = criterion(y_hat, Y)
y_after_sig = torch.sigmoid(y_hat)
y_after_sig = y_after_sig[:, None]
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
L_fair = fair_loss(y_hat_1, y_hat_0)
# overall loss
loss = L + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L.data.item(), L_fair.data.item()))
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
return batch_reg_loss, batch_fair_loss
# +--------------------------------------------------+
# | Algorithm: Stratified SGD - Regression |
# +--------------------------------------------------+
def fair_learning_regression(generator, predict, fair_loss, params, lambda_, psi=None, verbose = False, logdata=None, gamma_scheduler=None, lr_decay=1, lr=1e-3, logfairloss=None, **kwargs):
'''
Train model using Algorithm, which uses mini-batch SGD to train accuracy_loss + lambda_ * fairness_loss.
Args:
generator (generator): Generator which yields (X,Y,A)
predict (fct handle): Prediction function handle, maps X-->Y_hat
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lambda_ (numeric): Hyperparameter controlling influence of L_fair
verbose (bool, optional): Verbosity
logdata (None or tuple of X,Y,A, all torch.Tensor, optional): data for keeping track of training process
gamma_scheduler (optional): Learning Rate Scheduler
logfairloss (optional): Fairness log function to use for logging instead of fairloss
Returns:
Trainig Loss over Training if logdata is provided, but changes params
'''
if logfairloss==None:
logfairloss = fair_loss
optimizer = optim.Adam(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.MSELoss()
batch_reg_loss = []
batch_fair_loss = []
for iterate, (X,Y,A) in enumerate(generator):
if logdata:
with torch.no_grad():
y_hat_log = predict(logdata[0])
batch_reg_loss.append(criterion(y_hat_log, logdata[1]))
y_hat_1_log = y_hat_log[logdata[2]==1]
y_hat_0_log = y_hat_log[logdata[2]==0]
batch_fair_loss.append(logfairloss(y_hat_1_log, y_hat_0_log))
optimizer.zero_grad()
# predict
y_hat = predict(X)
# compute regression and fairness loss
L = criterion(y_hat, Y)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
L_fair = fair_loss(y_hat_1, y_hat_0)
# overall loss
loss = L + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L.data.item(), L_fair.data.item()))
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
return batch_reg_loss, batch_fair_loss
| 5,881
| 45.314961
| 189
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/run_regression.py
|
import os
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides results for Figure~3 and Table~5.
Example usage python run.py
The results are saved under ./results folder.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
if __name__=='__main__':
print('Running for Student Datasets')
nlambda = 25
for seed in range(10):
print('Seed {}...'.format(seed))
os.system('python run_benchmark_regression.py --dataset StudentsMath --seed {} --lambda_max 3 --n_epochs 2000 --lr 1e-3'.format(seed))
os.system('python run_benchmark_regression.py --dataset StudentsPortugese --seed {} --lambda_max 3 --n_epochs 2000 --lr 1e-3'.format(seed))
os.system('python run_benchmark_regression.py --dataset StudentsMath --seed {} --lambda_max 3 --n_epochs 2000 --model linearregression --lr 1e-3'.format(seed))
os.system('python run_benchmark_regression.py --dataset StudentsPortugese --seed {} --lambda_max 3 --n_epochs 2000 --model linearregression --lr 1e-3'.format(seed))
os.system('python baseline_convex_fair_regression.py --seed {} --fairness individual --dataset StudentsPortugese'.format(seed))
os.system('python baseline_convex_fair_regression.py --seed {} --fairness individual --dataset StudentsMath'.format(seed))
os.system('python baseline_convex_fair_regression.py --seed {} --fairness group --dataset StudentsPortugese'.format(seed))
os.system('python baseline_convex_fair_regression.py --seed {} --fairness group --dataset StudentsMath'.format(seed))
print('Running for CommunitiesCrime')
nlambda = 25
for seed in range(10):
print('Seed {}...'.format(seed))
os.system('python run_benchmark_regression.py --dataset CommunitiesCrime --seed {} --n_epochs 1000 --lambda_max 2'.format(seed))
os.system('python run_benchmark_regression.py --dataset CommunitiesCrime --seed {} --n_epochs 1000 --model linearregression --lambda_max 2'.format(seed))
os.system('python baseline_convex_fair_regression.py --seed {} --fairness group --dataset CommunitiesCrime'.format(seed))
os.system('python baseline_convex_fair_regression.py --seed {} --fairness individual --dataset CommunitiesCrime'.format(seed))
| 2,369
| 70.818182
| 172
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/fairness_metrics.py
|
import torch
import ot
import cvxpy as cp
import numpy as np
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementations of the fairness metrics (e.g. energy distance, Sinkhorn divergence, statistical parity)
as well as performance metrics (e.g. MSE, accuracy) of a model mentioned in the paper.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
# +------------------------------------------+
# | Metric 1: Energy Distance |
# +------------------------------------------+
def energy_distance(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
n1 = torch.numel(y1)
n2 = torch.numel(y2)
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n1*(n1-1))
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(n2*(n2-1)))
# +------------------------------------------+
# | Metric 2: Sinkhorn Divergence |
# +------------------------------------------+
def sinkhorn_diver(y1, y2):
'''
Compute type Sinkhorn divergence between empirical distribution y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Wasserstein distance
'''
# compute cost matrix
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink12 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y1.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink11 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y2.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink22 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
return (sink12 - 1/2 * sink11 - 1/2 * sink22).sum()
# +------------------------------------------+
# | Metric 3: MMD with RBF Kernel |
# +------------------------------------------+
def MMD_RBF(y1, y2):
n = y1.flatten().shape[0]
m = y2.flatten().shape[0]
def rbf(diff, diagzero=True):
sigma = 0.1
if diagzero:
return torch.exp(((diff*(1-torch.eye(diff.shape[0])))**2)/(2*sigma**2))
else:
return torch.exp((diff**2)/(2*sigma**2))
return (-2*rbf(y1.unsqueeze(0)-y2.unsqueeze(1), diagzero=False).sum()/(n*m)
+rbf(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n*(n-1))
+rbf(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(m*(m-1)))
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
diff = torch.tensor(0)
for y_test in torch.hstack((y1_hat,y2_hat)).flatten():
cdf1_y = (y1_hat<=y_test).float().mean()
cdf2_y = (y2_hat<=y_test).float().mean()
if (cdf1_y-cdf2_y).abs()>diff:
diff = (cdf1_y-cdf2_y).abs()
return diff
# +------------------------------------------+
# | Evaluation Metric 2: Bounded Group Loss |
# +------------------------------------------+
def bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L2'):
'''
Compute fraction in group loss between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between group loss
'''
r1 = y1_hat-y1
r2 = y2_hat-y2
if loss=='L2':
lossf = lambda ra,rb: (ra**2).mean() / (rb**2).mean()
if loss=='L1':
lossf = lambda ra,rb: ra.abs().mean() / rb.abs().mean()
l = lossf(r1,r2)
return l if l<1 else 1/l
# +------------------------------------------+
# | Evaluation Metric 3: |
# | Group Fairness in Expectation |
# +------------------------------------------+
def group_fair_expect(y1_hat, y2_hat, y1, y2):
'''
Compute Group Fairness in Expectation between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between means
'''
return (y1_hat.mean()-y2_hat.mean()).abs()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity_classification(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
return ((y1_hat).sum() / y1_hat.shape[0] - (y2_hat).sum() / y2_hat.shape[0]).abs()
# +------------------------------------------+
# | Evaluation Metric 4: lp distance |
# +------------------------------------------+
def lp_dist(y1_hat, y2_hat, y1, y2, p=1):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): lp distance
'''
dist = torch.tensor(0.)
ys, idx = torch.hstack((y1_hat,y2_hat)).flatten().sort()
for i in range(ys.shape[0]-1):
cdf1_y = (y1_hat <= ys[i]).float().mean()
cdf2_y = (y2_hat <= ys[i]).float().mean()
dist += ((cdf1_y - cdf2_y).abs() ** p) * (ys[i+1] - ys[i])
return dist**(1/p)
# +------------------------------------------+
# | Reg/Clf Metrics: MSE, MAE, Accuracy |
# +------------------------------------------+
def MSE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MSE (torch.Tensor): mean squared error
'''
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
ys = torch.hstack((y1, y2)).flatten()
return ((ys-yhats)**2).mean()
def MAE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MAE (torch.Tensor): mean absolute error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return (ys-yhats).abs().mean()
def accuracy(y1_hat, y2_hat, y1, y2):
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
total = ys.size(0)
correct = (yhats == ys).sum().item()
return torch.tensor(correct / total * 100)
def R2(y1_hat, y2_hat, y1, y2):
'''
Compute regression R2.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
R2 (torch.Tensor): mean squared error
'''
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
var_y = torch.var(ys, unbiased=False)
return 1.0 - torch.nn.MSELoss(reduction="mean")(yhats, ys) / var_y
| 9,848
| 34.428058
| 133
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/benchmark.py
|
# benchmark.py
# file with functions for running experiment
import fair_training
import numpy as np
import torch
import matplotlib.pyplot as plt
from scipy.spatial import ConvexHull
import time
def convergence_plotter(regloss, fairloss, lambda_):
plt.figure(figsize=(16,5))
plt.subplot(131)
plt.plot(regloss)
plt.title('Regression Loss')
plt.xlabel('Iteration')
plt.ylabel('Regression Loss')
plt.subplot(132)
plt.plot(fairloss)
plt.title('Fairness Loss')
plt.xlabel('Iteration')
plt.ylabel('Fairness Loss')
plt.subplot(133)
plt.plot(lambda_*np.array(fairloss)+np.array(regloss))
plt.title('Overall Loss')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.show()
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementaion train and test function for MFL.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def train_test_fair_learning(ds, model, fair_loss, lr, batch_size, N_epochs, lambda_, metrics, lr_decay = 1, psi=None, plot_convergence=False, logfairloss=None, train_test_split_fin=0, **kwargs):
'''
Train a model using algorithm 2 and test it on metrics
Args:
ds (data_loader.DataLoader): Data loader to use
model (torch.nn.Module): Pytorch module
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
lr (float): SGD Learning Rate
batch_size (int): Batch-Size of SGD
N_epochs (int): Number of epochs for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
metrics (dict with fctn handles): Metrics to use in evaluation. Will return a dict with same keys
plot_convergence (bool, optional): If convergence plot of training should be shown
logfairloss (fctn handle, optional): Fairness function used for logging instead of fair_loss
train_test_split_fin (bool, adult data) : Train-Test split already performed on adultdata
Returns:
train_results, test_results: dicts of results
'''
# train the model
start_time = time.time()
regloss, fairloss = fair_training.fair_learning(generator=ds.stratified_batch_generator_worep(batch_size, N_epochs),
predict=model.forward,
fair_loss=fair_loss,
params=model.parameters(),
lambda_=lambda_,
lr_decay=lr_decay,
logdata = ds.get_log_data() if plot_convergence else None,
psi=psi, lr=lr, logfairloss=logfairloss, **kwargs)
stop_time = time.time()
# plot convergence if desired
if plot_convergence:
convergence_plotter(regloss, fairloss, lambda_)
# compute metrics
model.eval()
# metrics on training set
if train_test_split_fin:
X, Y, A, X_test, Y_test, A_test = ds.get_adult_data()
else:
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test= ds.get_test_data()
y_hat = torch.round(torch.sigmoid(model(X)))
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = torch.round(torch.sigmoid(model(X_test)))
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
return train_results, test_results
def train_test_fair_learning_regression(ds, model, fair_loss, lr, batch_size, N_epochs, lambda_, metrics, lr_decay = 1, psi=None, plot_convergence=False, logfairloss=None, train_test_split_fin=0, **kwargs):
'''
Train a model using algorithm 2 and test it on metrics
Args:
ds (data_loader.DataLoader): Data loader to use
model (torch.nn.Module): Pytorch module
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
lr (float): SGD Learning Rate
batch_size (int): Batch-Size of SGD
N_epochs (int): Number of epochs for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
metrics (dict with fctn handles): Metrics to use in evaluation. Will return a dict with same keys
plot_convergence (bool, optional): If convergence plot of training should be shown
logfairloss (fctn handle, optional): Fairness function used for logging instead of fair_loss
train_test_split_fin (bool, adult data) : Train-Test split already performed on adultdata
Returns:
train_results, test_results: dicts of results
'''
# train the model
start_time = time.time()
regloss, fairloss = fair_training.fair_learning_regression(generator=ds.stratified_batch_generator_worep(batch_size, N_epochs),
predict=model.forward,
fair_loss=fair_loss,
params=model.parameters(),
lambda_=lambda_,
lr_decay=lr_decay,
logdata = ds.get_log_data() if plot_convergence else None,
psi=psi, lr=lr, logfairloss=logfairloss, **kwargs)
stop_time = time.time()
# plot convergence if desired
if plot_convergence:
convergence_plotter(regloss, fairloss, lambda_)
# compute metrics
model.eval()
# metrics on training set
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test = ds.get_test_data()
y_hat = model(X)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = model(X_test)
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
return train_results, test_results
| 7,129
| 40.213873
| 206
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/setup.py
|
from setuptools import setup
setup(
name="dccp",
version="1.0.3",
author="Xinyue Shen, Steven Diamond, Stephen Boyd",
author_email="xinyues@stanford.edu, diamond@cs.stanford.edu, boyd@stanford.edu",
packages=["dccp"],
license="GPLv3",
zip_safe=False,
install_requires=["cvxpy >= 0.3.5"],
use_2to3=True,
url="http://github.com/cvxgrp/dccp/",
description="A CVXPY extension for difference of convex programs.",
)
| 456
| 27.5625
| 84
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/data_loader.py
|
# data_loader.py
# utilities for loading data
import torch
import numpy as np
import pandas as pd
import copy
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from load_data import *
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn import preprocessing
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides data loading functinality for MFL.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def to_tensor(data, device):
D = data
if type(data) == pd.core.frame.DataFrame:
D = data.to_numpy()
if type(D) == np.ndarray:
return torch.tensor(D, device=device).float()
elif type(D) == torch.Tensor:
return D.to(device).float()
else:
raise NotImplementedError('Currently only Torch Tensors, Numpy NDArrays and Pandas Dataframes are supported')
class DataLoader:
def __init__(self, X, Y, A, X_test=None, Y_test=None, A_test=None, use_tensor=True, device='cpu', info='No Info Available', min_max_scaler=None):
self.device = device
self.use_tensor = use_tensor
self.X = to_tensor(X, device) if use_tensor else X
self.A = to_tensor(A, device) if use_tensor else A
self.Y = to_tensor(Y, device) if use_tensor else Y
if X_test is not None:
self.X_test = to_tensor(X_test, device) if use_tensor else X_test
self.A_test = to_tensor(A_test, device) if use_tensor else A_test
self.Y_test = to_tensor(Y_test, device) if use_tensor else Y_test
self.info = info
self.min_max_scaler = None
def get_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_adult_data(self):
return (self.X, self.Y, self.A, self.X_test, self.Y_test, self.A_test)
def get_data_for_A(self, a):
# get dataset but only for samples with attribute a
X_a = self.X[(self.A==a).squeeze()]
Y_a = self.Y[(self.A==a).squeeze()]
return (X_a, Y_a)
def stratified_batch_generator_worep(self, batch_size=32, n_epochs=100):
# get propoertions of protected attribute
p_A1 = self.A.mean()
p_A0 = 1 - p_A1
total_samples = self.A.shape[0]
# build index set of protected and unprotected attribute
# number of samples to sample from each distribution
n_batch_1 = int(p_A1*batch_size)
n_batch_0 = int(p_A0*batch_size)
for epoch in tqdm(range(n_epochs)):
ind_A1 = (self.A==1).nonzero()[:,0]
ind_A0 = (self.A==0).nonzero()[:,0]
for _ in range(0, total_samples - batch_size + 1, batch_size):
# sample indexes for protected and unprotected class
sampled_indices_A1 = (torch.ones(ind_A1.shape[0]) / (ind_A1.shape[0])).multinomial(
num_samples=n_batch_1,
replacement=False)
batch_idx1 = ind_A1[sampled_indices_A1]
mask = torch.ones(ind_A1.numel(), dtype=torch.bool)
mask[sampled_indices_A1] = False
ind_A1 = ind_A1[mask]
sampled_indices_A0 = (torch.ones(ind_A0.shape[0]) / (ind_A0.shape[0])).multinomial(
num_samples=n_batch_0,
replacement=False)
batch_idx0 = ind_A0[sampled_indices_A0]
mask = torch.ones(ind_A0.numel(), dtype=torch.bool)
mask[sampled_indices_A0] = False
ind_A0 = ind_A0[mask]
yield (torch.vstack((self.X[batch_idx0], self.X[batch_idx1])),
torch.vstack((self.Y[batch_idx0], self.Y[batch_idx1])),
torch.vstack((self.A[batch_idx0], self.A[batch_idx1])))
def get_info(self):
return self.info
def split_test(self, **kwargs):
# perform train test split, kwargs for sklearn train-test-split
X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(self.X, self.Y, self.A, **kwargs)
self.X = X_train
self.X_test = X_test
self.Y = Y_train
self.Y_test = Y_test
self.A = A_train
self.A_test = A_test
def get_test_data(self):
# get the test dataset
if self.X_test is None:
raise ValueError('Train-Test split has not yet been performed')
if self.min_max_scaler is not None:
x_vals = self.X_test.values #returns a numpy array
x_scaled = self.min_max_scaler.fit_transform(x_vals)
self.X_test = pd.DataFrame(x_scaled)
return (self.X_test, self.Y_test, self.A_test)
def get_log_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_k(self):
return self.X.shape[1]
class LawSchool(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x, **kwargs):
rawdata = pd.read_sas('./data/classification/lawschool/lawschs1_1.sas7bdat')
rawdata = rawdata.drop(['college', 'Year', 'URM', 'enroll'], axis=1)
rawdata = rawdata.dropna(axis=0)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
X = rawdata[['LSAT', 'GPA', 'Gender', 'resident']]
Y = rawdata['admit']
A = rawdata['White']
if a_inside_x:
X = pd.concat((X, A), axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict admission,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], info=info, **kwargs)
class Drug(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x):
X, Y, A = load_drug_data('data/classification/drug/drug_consumption.data.txt')
info = '''
https://www.kaggle.com/danofer/compass
'''
if a_inside_x:
X = np.concatenate((X, A[:, None]), axis=1)
super().__init__(X, Y[:, None], A[:, None], info=info)
class Credit(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x, **kwargs):
rawdata = pd.read_excel('./data/classification/credit_card/default_clients.xls', header=1)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
columns = list(rawdata.columns)
categ_cols = []
for column in columns:
if 2 < len(set(rawdata[column])) < 10:
categ_cols.append((column, len(set(rawdata[column]))))
preproc_data = copy.deepcopy(rawdata)
for categ_col, n_items in categ_cols:
for i in range(n_items):
preproc_data[categ_col + str(i)] = (preproc_data[categ_col] == i).astype(float)
preproc_data = preproc_data.drop(['EDUCATION', 'MARRIAGE'], axis=1)
X = preproc_data.drop(['ID', 'SEX', 'default payment next month'], axis=1)
Y = preproc_data['default payment next month']
A = 2 - preproc_data['SEX']
if a_inside_x:
X = pd.concat((X, A), axis=1)
info = '''Credit data'''
self.min_max_scaler = preprocessing.MinMaxScaler()
x_vals = X.values #returns a numpy array
x_scaled = self.min_max_scaler.fit_transform(x_vals)
X = pd.DataFrame(x_scaled)
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], info=info, **kwargs)
class Adult(DataLoader):
def __init__(self, a_inside_x, **kwargs):
X_train_, Y_train_, X_test_, Y_test_ = adult_data_read('./data/classification/adult/')
A = X_train_['Sex']
A_test = X_test_['Sex']
le = LabelEncoder()
Y = le.fit_transform(Y_train_)
Y = pd.Series(Y, name='>50k')
Y_test = le.fit_transform(Y_test_)
Y_test = pd.Series(Y_test, name='>50k')
if not a_inside_x:
X = X_train_.drop(labels=['Sex'], axis=1)
X = pd.get_dummies(X)
X_test = X_test_.drop(labels=['Sex'], axis=1)
X_test = pd.get_dummies(X_test)
else:
X = pd.get_dummies(X_train_)
X_test = pd.get_dummies(X_test_)
info = """Adult dataset for classification. Train Test split is already provided"""
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], X_test, np.array(Y_test)[:, None], np.array(A_test)[:, None], info=info, **kwargs)
class CommunitiesCrime(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class CommunitiesCrimeClassification(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, a_inside_x, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
bin_thr = Y.mean()
Y = (Y>= bin_thr).astype(int)
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
if a_inside_x:
X = np.concatenate((np.array(X), np.array(A)), axis=1)
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class BarPass(DataLoader):
# http://www.seaphe.org/databases.php
def __init__(self, **kwargs):
df = pd.read_sas('data/lawschs1_1.sas7bdat')
drop_cols = ['enroll', 'college', 'Year', 'Race']
df = df[[col for col in df.columns if col not in drop_cols]]
df = df.dropna()
Y = df[['GPA']]
A = df[['White']]
X = df.drop('GPA', axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict GPA,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
self.first_call = True
super().__init__(X, Y, A, info=info, **kwargs)
def get_data(self):
if self.first_call:
self.Xs, self.Ys, self.As = next(self.stratified_batch_generator_worep(500, 1))
self.first_call = False
return (self.Xs, self.Ys, self.As)
def get_log_data(self):
return self.get_data()
class StudentPerformance(DataLoader):
# https://archive.ics.uci.edu/ml/datasets/student+performance
def __init__(self, subject = 'Math', **kwargs):
# load data
df = pd.read_csv('data/student/student-{}.csv'.format(subject.lower()[:3]), sep=';')\
# convert the categorical values
categoricals = df.dtypes[df.dtypes==object].index
for attribute in categoricals:
options = df[attribute].unique()
options.sort()
options = options[:-1]
for option in options:
df['{}_{}'.format(attribute, option)] = (df[attribute]==option).astype(int)
df = df.drop(attribute, axis=1)
# extract X A Y
A = df[['sex_F']]
Y = df[['G3']]
X = df.drop(['sex_F', 'G3'], axis=1)
info = '''
Student Performance dataset. Predict Final Grade based on Attributes, don't discriminate against female students.
https://archive.ics.uci.edu/ml/datasets/student+performance
'''
super().__init__(X, Y, A, info=info, **kwargs)
class Compas(DataLoader):
def __init__(self, a_inside_x):
X, Y, A = load_compas_data('data/classification/compas/compas-scores-two-years.csv')
info = '''
https://www.kaggle.com/danofer/compass
'''
if a_inside_x:
X = np.concatenate((X, A[:, None]), axis=1)
super().__init__(X, Y[:, None], A[:, None], info=info)
class Synthetic1(DataLoader):
# synthetic data: bias offset
def __init__(self, N, k, delta_intercept = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = delta_intercept+ X_0@theta
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
class Synthetic2(DataLoader):
# synthetic data: bias slope
def __init__(self, N, k, delta_slope = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = X_0@(theta+delta_slope)
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
def set_seed(seed=0):
torch.manual_seed(seed)
np.random.seed(seed)
def adult_data_read(data_root, display=False):
""" Return the Adult census data in a nice package. """
dtypes = [
("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"),
("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"),
("Occupation", "category"), ("Relationship", "category"), ("Race", "category"),
("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"),
("Hours per week", "float32"), ("Country", "category"), ("Target", "category")
]
raw_train_data = pd.read_csv(
data_root+'adult.data',
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
raw_test_data = pd.read_csv(
data_root+'adult.test',
skiprows=1,
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
train_data = raw_train_data.drop(["Education"], axis=1) # redundant with Education-Num
test_data = raw_test_data.drop(["Education"], axis=1) # redundant with Education-Num
filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes))
train_data["Target"] = train_data["Target"] == " >50K"
test_data["Target"] = test_data["Target"] == " >50K."
rcode = {
"Not-in-family": 0,
"Unmarried": 1,
"Other-relative": 2,
"Own-child": 3,
"Husband": 4,
"Wife": 5
}
for k, dtype in filt_dtypes:
if dtype == "category":
if k == "Relationship":
train_data[k] = np.array([rcode[v.strip()] for v in train_data[k]])
test_data[k] = np.array([rcode[v.strip()] for v in test_data[k]])
else:
train_data[k] = train_data[k].cat.codes
test_data[k] = test_data[k].cat.codes
return train_data.drop(["Target", "fnlwgt"], axis=1), train_data["Target"].values, test_data.drop(["Target", "fnlwgt"], axis=1), test_data["Target"].values
| 16,841
| 39.681159
| 159
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/run_benchmark_MMD_simple.py
|
import models
import fairness_metrics
import benchmark
import data_loader
import pickle
import argparse
import pandas as pd
import numpy as np
import time
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides implementatino of MFL
An example usage
python run_benchmark.py --dataset {} --seed {} --a_inside_x True --nlambda {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def run(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
Model = models.LinearRegression if args.model=='linearregression' else models.NeuralNetwork
fair_loss = fairness_metrics.MMD_RBF
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
train_test_split_fin = 0
lr = args.lr
batch_size = args.batch_size
n_epochs = args.n_epochs
lr = 5e-4
n_epochs = 500
lr_decay = 0.99
batch_size = 2048
if args.dataset == 'CommunitiesCrimeClassification':
ds = data_loader.CommunitiesCrimeClassification(a_inside_x=args.a_inside_x)
batch_size = 128
lr_decay = 1.0
n_epochs = 300
lr = 2e-4
if args.dataset == 'Compas':
ds = data_loader.Compas(a_inside_x=args.a_inside_x)
lr_decay = 1.0
batch_size = 128
if args.dataset == 'LawSchool':
ds = data_loader.LawSchool(a_inside_x=args.a_inside_x)
if args.dataset == 'Credit':
ds = data_loader.Credit(a_inside_x=args.a_inside_x)
if args.dataset == 'Adult':
ds = data_loader.Adult(a_inside_x=args.a_inside_x)
train_test_split_fin = 1
n_epochs = 300
lr = 1e-2
if args.dataset == 'Drug':
ds = data_loader.Drug(a_inside_x=args.a_inside_x)
batch_size = 128
lr_decay = 1.0
n_epochs = 200
lr = 1e-3
# if args.dataset == 'Synthetic1':
# ds = data_loader.Synthetic1(1000, 4)
# if args.dataset == 'Synthetic2':
# ds = data_loader.Synthetic2(1000, 4)
# if args.dataset == 'CommunitiesCrime':
# ds = data_loader.CommunitiesCrime()
# if args.dataset == 'CommunitiesCrimeClassification':
# ds = data_loader.CommunitiesCrimeClassification(a_inside_x=args.a_inside_x)
# if args.dataset == 'BarPass':
# ds = data_loader.BarPass()
# if args.dataset == 'StudentsMath':
# ds = data_loader.StudentPerformance(subject='Math')
# if args.dataset == 'StudentsPortugese':
# ds = data_loader.StudentPerformance(subject='Portugese')
# if args.dataset == 'Adult':
# ds = data_loader.Adult(a_inside_x=args.a_inside_x)
# train_test_split_fin = 1
# if args.dataset == 'Compas':
# ds = data_loader.Compas(a_inside_x=args.a_inside_x)
# if args.dataset == 'LawSchool':
# ds = data_loader.LawSchool(a_inside_x=args.a_inside_x)
# if args.dataset == 'Drug':
# ds = data_loader.Drug(a_inside_x=args.a_inside_x)
# if args.dataset == 'Credit':
# ds = data_loader.Credit(a_inside_x=args.a_inside_x)
logfairloss = fair_loss
if args.dataset != 'Adult':
ds.split_test()
k = ds.get_k() # Dimension
# metrics to evaluate
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# storage of results
results_train = []
results_test = []
# run the test for various lambdas
for lambda_ in lambda_candidates:
print('Training MMD, for lambda_: {}/{}, seed:{}'.format(lambda_, args.nlambda, args.seed))
model = Model(k)
# if args.algorithm==1:
# train_metrics, test_metrics = benchmark.test_algorithm_1(ds,
# model,
# reg_loss,
# fair_loss,
# args.lr,
# args.n_iterates,
# lambda_,
# metrics,
# psi=None, plot_convergence=args.plot_convergence, logfairloss=logfairloss, weight_decay=args.weight_decay)
# elif args.algorithm==2:
# train_metrics, test_metrics = benchmark.test_algorithm_2(ds,
# model,
# reg_loss,
# fair_loss,
# args.lr,
# args.batch_size,
# args.n_epochs,
# lambda_,
# metrics,
# psi=None, plot_convergence=args.plot_convergence, logfairloss=logfairloss, weight_decay=args.weight_decay)
train_metrics, test_metrics = benchmark.train_test_fair_learning(ds=ds,
model=model,
fair_loss=fair_loss,
lr=lr,
batch_size=batch_size,
N_epochs=n_epochs,
lambda_=lambda_,
metrics=metrics,
lr_decay=lr_decay,
psi=None, plot_convergence=args.plot_convergence, logfairloss=logfairloss, weight_decay=args.weight_decay, train_test_split_fin=train_test_split_fin)
train_metrics['lambda_'] = lambda_
test_metrics['lambda_'] = lambda_
results_train.append(train_metrics)
results_test.append(test_metrics)
# save the results
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
if args.a_inside_x:
df_train.to_csv('results/NN_MMD/{}_{}_AinX_train_{}.csv'.format(args.dataset, \
args.model, args.seed))
df_test.to_csv('results/NN_MMD/{}_{}_AinX_test_{}.csv'.format(args.dataset, \
args.model, args.seed))
else:
print('here')
df_train.to_csv('results/NN_MMD/{}_{}_train_{}.csv'.format(args.dataset, \
args.model, args.seed))
df_test.to_csv('results/NN_MMD/{}_{}_test_{}.csv'.format(args.dataset, \
args.model, args.seed))
PARAMS = {'dataset':args.dataset,
'batch_size':batch_size,
'lr':lr, 'epochs':n_epochs,
'seed':args.seed,
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'algorihtm':'adam',
'model_details':model.state_dict,
'L':'BCE_cross_entropy',
'fair_loss':'Energy',
'lr_decay':lr_decay,
'a_inside_x':args.a_inside_x
}
with open('results/NN_MMD/{}_{}_{}.pkl'.format(args.dataset, args.model, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
#
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--model', default='NN', choices=['linearregression', 'NN'], help='Regression Model')
# parser.add_argument('--regloss', default='L2', choices=['L1', 'L2'], help='Regression Loss')
# parser.add_argument('--fairloss', required=True, choices=['Energy', 'Wasserstein'], help='Fairness loss')
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=2, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--lr', default=1e-4, type=float, help='Learning Rate of (S)GD: Currently has no effect since Adam is used')
parser.add_argument('--batch_size', default=128, type=int, help='Batch Size for algorithm 2')
parser.add_argument('--n_epochs', default=500, type=int, help='Number of Epochs of (S)GD')
parser.add_argument('--plot_convergence', default=False, action='store_true', help='If Convergence plot should be done')
parser.add_argument('--dataset', help='Dataset to use', choices=['Synthetic1', 'Synthetic2', 'CommunitiesCrime', 'CommunitiesCrimeClassification',
'BarPass', 'StudentsMath', 'StudentsPortugese', 'Compas', 'LawSchool', 'Adult',
'Credit', 'Drug'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=50)
parser.add_argument('--weight_decay', help='SGD weight decay', type=float, default=0.0)
parser.add_argument('--a_inside_x', default=False, type=str2bool, help='The sensitive feature is in X')
args = parser.parse_args()
run(args)
| 10,851
| 49.240741
| 214
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/MMD_fair_run.py
|
import models
import fairness_metrics
import data_loader
import MMD_fair
import argparse
import pandas as pd
import numpy as np
import time
import pickle
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides an implementation of the paper in
https://papers.nips.cc/paper/2020/file/af9c0e0c1dee63e5acad8b7ed1a5be96-Paper.pdf
An example usage:
python .\MMD_fair_run.py --dataset {} --nlambda {} --seed {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def run(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
Model = models.NeuralNetwork_MMD
reg_loss = models.MSE
fair_loss = fairness_metrics.sinkhorn_diver
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
train_test_split_fin = 0
n_iterates = args.n_iterates
lr = 1e-1
lr_decay = 0.99
if args.dataset == 'CommunitiesCrimeClassification':
ds = data_loader.CommunitiesCrimeClassification(a_inside_x=args.a_inside_x)
if args.dataset == 'Compas':
ds = data_loader.Compas(a_inside_x=args.a_inside_x)
if args.dataset == 'LawSchool':
ds = data_loader.LawSchool(a_inside_x=args.a_inside_x)
if args.dataset == 'Credit':
ds = data_loader.Credit(a_inside_x=args.a_inside_x)
if args.dataset == 'Adult':
ds = data_loader.Adult(a_inside_x=args.a_inside_x)
train_test_split_fin = 1
if args.dataset == 'Drug':
ds = data_loader.Drug(a_inside_x=args.a_inside_x)
logfairloss = fair_loss
if args.dataset != 'Adult':
ds.split_test()
k = ds.get_k() # Dimension
# metrics to evaluate
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# storage of results
results_train = []
results_test = []
# run the test for various lambdas
# extract the data
X, Y, A = ds.get_data()
for lambda_ in lambda_candidates:
print('Training MMD-Sinkhorn method, for lambda_: {}/{}, seed:{}'.format(lambda_, args.nlambda, args.seed))
model = Model(k)
train_metrics, test_metrics = MMD_fair.mmd_fair_traintest(ds,
model,
reg_loss,
fair_loss,
lr,
n_iterates,
lambda_,
metrics,
psi=None, plot_convergence=args.plot_convergence, logfairloss=logfairloss, lr_decay=lr_decay)
train_metrics['lambda_'] = lambda_
test_metrics['lambda_'] = lambda_
results_train.append(train_metrics)
results_test.append(test_metrics)
# save the results
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
if args.a_inside_x:
df_train.to_csv('results/NN_MMD_sinkhorn/{}_{}_Sinkhorn_AinX_train_{}.csv'.format(args.dataset, args.model, args.seed))
df_test.to_csv('results/NN_MMD_sinkhorn/{}_{}_Sinkhorn_AinX_test_{}.csv'.format(args.dataset, args.model, args.seed))
else:
print('here')
df_train.to_csv('results/NN_MMD_sinkhorn/{}_{}_Sinkhorn_train_{}.csv'.format(args.dataset, \
args.model, args.seed))
df_test.to_csv('results/NN_MMD_sinkhorn/{}_{}_Sinkhorn_test_{}.csv'.format(args.dataset, \
args.model, args.seed))
PARAMS = {'dataset':args.dataset,
'lr':lr, 'iterates':n_iterates,
'seed':args.seed,
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'algorihtm':'Gradient-Descent',
'model_details':model.state_dict,
'L':'MSE',
'fair_loss':'Sinkhorn',
'a_inside_x':args.a_inside_x
}
with open('results/NN_MMD_sinkhorn/{}_{}_Sinkhorn.pkl'.format(args.dataset, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
#
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--model', default='NN', choices=['linearregression', 'NN'], help='Regression Model')
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=2, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--lr', default=1e-3, type=float, help='Gradient descent')
parser.add_argument('--n_iterates', default=500, type=int, help='Number of Iterates of GD')
parser.add_argument('--plot_convergence', default=False, action='store_true', help='If Convergence plot should be done')
parser.add_argument('--dataset', help='Dataset to use', choices=['CommunitiesCrimeClassification','Compas', 'LawSchool', 'Adult', 'Credit', 'Drug'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=50)
parser.add_argument('--a_inside_x', default=False, type=str2bool, help='The sensitive feature is in X')
args = parser.parse_args()
run(args)
| 6,679
| 44.753425
| 158
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/zafar_classification.py
|
# Baseline 1: https://arxiv.org/pdf/1706.02409.pdf
import cvxpy as cp
import numpy as np
import argparse
import pandas as pd
import torch
from zafar_method import funcs_disp_mist
from zafar_method.utils import *
import fairness_metrics
import data_loader
from zafar_method import utils
import numpy as np
from tqdm import tqdm
import cvxpy as cp
from collections import namedtuple
from sklearn.metrics import log_loss
from zafar_method import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import pickle
from copy import deepcopy
import os, sys
# from generate_synthetic_data import *
from zafar_method import utils as ut
from zafar_method import funcs_disp_mist as fdm
import time
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides implementation of http://proceedings.mlr.press/v54/zafar17a/zafar17a.pdf.
gamma parameter is the accuracy fairness tradeoff of the model.
An example usage is python zafar_classification.py --dataset {} --seed {} --nlambda {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def run(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
gamma_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
if args.dataset == 'CommunitiesCrimeClassification':
ds = data_loader.CommunitiesCrimeClassification(a_inside_x=0)
if args.dataset == 'Compas':
ds = data_loader.Compas(a_inside_x=0)
if args.dataset == 'LawSchool':
ds = data_loader.LawSchool(a_inside_x=0)
if args.dataset == 'Credit':
ds = data_loader.Credit(a_inside_x=0)
if args.dataset == 'Adult':
ds = data_loader.Adult(a_inside_x=0)
train_test_split_fin = 1
if args.dataset == 'Drug':
ds = data_loader.Drug(a_inside_x=0)
if args.dataset != 'Adult':
ds.split_test()
k = ds.get_k()
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# storage of results
results_train = []
results_test = []
X, Y, A = ds.get_data()
X_test, Y_test, A_test = ds.get_test_data()
x_train = X.cpu().detach().numpy()
Y_train = Y.cpu().detach().numpy().flatten()
a_train = A.cpu().detach().numpy().flatten()
x_test = X_test.cpu().detach().numpy()
y_test = Y_test.cpu().detach().numpy().flatten()
a_test = A_test.cpu().detach().numpy().flatten()
loss_function = "logreg" # perform the experiments with logistic regression
Y_test_ = y_test.copy()
Y_train_ = Y_train.copy()
Y_test_[y_test == 0] = -1
Y_train_[Y_train_ == 0] = -1
# run the test for various lambdas
y_train = Y_train_
y_test = Y_test_
x_control_train = {"s1": a_train}
x_control_test = {"s1": a_test}
cons_params = None # constraint parameters, will use them later
EPS = 1e-6
for gamma in gamma_candidates:
print('Training Zafar method, for gamma: {}/{}, seed:{}'.format(gamma, args.nlambda, args.seed))
start_time = time.time()
# mult_range = np.arange(1.0, 0.0 - it, -it).tolist()
# sensitive_attrs_to_cov_thresh = deepcopy(cov_all_train_uncons)
apply_fairness_constraints = 0 # set this flag to one since we want to optimize accuracy subject to fairness constraints
apply_accuracy_constraint = 1
sep_constraint = 0
# for m in mult_range:
# sensitive_attrs_to_cov_thresh = deepcopy(cov_all_train_uncons)
# for s_attr in sensitive_attrs_to_cov_thresh.keys():
# for cov_type in sensitive_attrs_to_cov_thresh[s_attr].keys():
# for s_val in sensitive_attrs_to_cov_thresh[s_attr][cov_type]:
# sensitive_attrs_to_cov_thresh[s_attr][cov_type][s_val] *= m
sensitive_attrs_to_cov_thresh = {"s1":0}
w = train_model(x_train, y_train, x_control_train, lf._logistic_loss, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], sensitive_attrs_to_cov_thresh, gamma)
# y_test_predicted = np.sign(np.dot(x_test, w))
# correct_answers = (y_test_predicted == y_test).astype(int) # will have 1 when the prediction and the actual label match
# accuracy = float(sum(correct_answers)) / float(len(correct_answers))
# y_test_predict[y_test_predict == -1] = 0
# w = torch.tensor(w).float()
# theta0 = torch.tensor(w).float()
stop_time = time.time()
predict = lambda X: torch.tensor(np.maximum(np.sign(np.dot(X.cpu().detach().numpy(), w)), 0)).float()
# metrics on train set
y_hat = predict(X).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = predict(X_test).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = gamma
train_results['time'] = stop_time - start_time
test_results['lambda_'] = gamma
results_train.append(train_results)
results_test.append(test_results)
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
df_train.to_csv('results/zafar/{}_zafar_{}_train.csv'.format(args.dataset, args.seed))
df_test.to_csv('results/zafar/{}_zafar_{}_test.csv'.format(args.dataset, args.seed))
PARAMS = {'dataset':args.dataset,
'method':'zafar',
'seed':args.seed,
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'a_inside_x': False
}
with open('results/zafar/{}_zafar_{}.pkl'.format(args.dataset, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=1, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--dataset', help='Dataset to use', choices=['CommunitiesCrimeClassification', 'Compas', 'LawSchool', 'Adult', 'Credit', 'Drug'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=25)
args = parser.parse_args()
run(args)
| 8,066
| 42.139037
| 195
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/run.py
|
import os
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides results for Figure~4 and Table~6.
Example usage python run.py
The results are saved under ./results folder.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
if __name__ == '__main__':
nlambda = 25
for dataset in ['CommunitiesCrimeClassification', 'Compas', 'Adult', 'Drug']:
print(dataset)
for seed in range(10):
print('Running for seed {}'.format(seed))
print('Running Logistic Regression for our Model')
os.system('python run_benchmark.py --dataset {} --seed {} --a_inside_x True --nlambda {} --model linear'.format(dataset, seed, nlambda))
print('Running NN for our Model')
os.system('python run_benchmark.py --dataset {} --seed {} --a_inside_x True --nlambda {}'.format(dataset, seed, nlambda))
print('Running FKDE...')
os.system('python fair_KDE.py --dataset {} --seed {} --nlambda {}'.format(dataset, seed, nlambda))
print('Running MMD...')
if not dataset == 'Adult':
os.system('python .\MMD_fair_run.py --dataset {} --nlambda {} --a_inside_x True --seed {}'.format(dataset, nlambda, seed))
os.system('python zafar_classification.py --dataset {} --seed {}'.format(dataset, seed))
| 1,453
| 54.923077
| 148
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/MMD_fair.py
|
# fair_training.py
# training methods for fair regression
import torch
from torch.autograd import Variable
import torch.optim as optim
import time
from tqdm import tqdm
# +---------------------------------+
# | Algorithm 1: Gradient Descent |
# +---------------------------------+
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides an implementation of the paper in
https://papers.nips.cc/paper/2020/file/af9c0e0c1dee63e5acad8b7ed1a5be96-Paper.pdf
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def mmd_gradient_descent(X, Y, A, model, predict, reg_loss, fair_loss, params, lr, N_iterates, lambda_, verbose=False, log=False, logfairloss=None, lr_decay=1, **kwargs):
'''
Train model using Algorithm 1, which uses simple gradient descent.
Args:
X (torch.Tensor): X data
Y (torch.Tensor): Y data
A (torch.Tensor): A data
predict (fct handle): Prediction function handle, maps X-->Y_hat
reg_loss (fct handle): Regression Loss function handle, maps Y_hat, Y-->L_reg
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lr (float): SGD Learning Rate
N_iterates (int): Number of iterates for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
psi (fct handle, optional):Transformation function maps from Y_hat, Y --> score, fair_loss is computed on score
verbose (bool, optional): Verbosity
log (bool, optional): Return training path
logfairloss (optional): Sinkhorn divergence
Returns:
Trainig Loss over Training if log=True, but changes params
'''
optimizer = optim.SGD(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.BCEWithLogitsLoss()
# optimizer = optim.Adam(params)
#scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)
epoch_reg_loss = []
epoch_fair_loss = []
for iterate in tqdm(range(N_iterates)):
# zero grad accumulator
optimizer.zero_grad()
# predict
y_hat = predict(X)
y_hat_first_layer = model.first_layer(X)
L_reg = criterion(y_hat, Y)
# y_hat = torch.sigmoid(y_hat)
# compute regression and fairness loss
y_hat_1 = y_hat_first_layer[A.squeeze()==1]
y_hat_0 = y_hat_first_layer[A.squeeze()==0]
L_fair = fair_loss(y_hat_1, y_hat_0)
# all_linear1_params = torch.cat([x.view(-1) for x in model.linear1.parameters()])
# all_linear2_params = torch.cat([x.view(-1) for x in model.linear2.parameters()])
# W_froben = torch.norm(all_linear1_params, 2) ** 2
# V_froben = torch.norm(all_linear2_params, 2) ** 2
# overall loss
# reg_weight = 0.1
loss = L_reg + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L_reg.data.item(), L_fair.data.item()))
if log:
epoch_fair_loss.append(L_fair.data.item())
epoch_reg_loss.append(L_reg.data.item())
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
#scheduler.step()
return epoch_reg_loss, epoch_fair_loss
def mmd_fair_traintest(ds, model, reg_loss, fair_loss, lr, n_iterates, lambda_, metrics, psi=None, plot_convergence=False, logfairloss=None, train_test_split_fin=0, lr_decay=1, **kwargs):
'''
Train a model using algorithm 2 and test it on metrics
Args:
ds (data_loader.DataLoader): Data loader to use
model (torch.nn.Module): Pytorch module
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
lr (float): SGD Learning Rate
batch_size (int): Batch-Size of SGD
N_epochs (int): Number of epochs for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
metrics (dict with fctn handles): Metrics to use in evaluation. Will return a dict with same keys
plot_convergence (bool, optional): If convergence plot of training should be shown
logfairloss (fctn handle, optional): Fairness function used for logging instead of fair_loss
train_test_split_fin (bool, adult data) : Train-Test split already performed on adultdata
Returns:
train_results, test_results: dicts of results
'''
# train the model
start_time = time.time()
if train_test_split_fin:
X, Y, A, X_test, Y_test, A_test = ds.get_adult_data()
else:
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test= ds.get_test_data()
regloss, fairloss = mmd_gradient_descent(X, Y, A, model, model.forward,
reg_loss,
fair_loss,
model.parameters(), lr, n_iterates,
lambda_,
logdata = ds.get_log_data() if plot_convergence else None, logfairloss=logfairloss, lr_decay=lr_decay, **kwargs)
# plot convergence if desired
if plot_convergence:
convergence_plotter(regloss, fairloss, lambda_)
# compute metrics
model.eval()
# metrics on training seed
stop_time = time.time()
y_hat = torch.round(torch.sigmoid(model.forward(X)))
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
# metrics on test set
y_hat = torch.round(torch.sigmoid(model.forward(X_test)))
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
return train_results, test_results
| 6,629
| 44.102041
| 187
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/models.py
|
# models.py
# models for regression
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides models for MFL and Oneta et al.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
class LinearRegression(nn.Module):
def __init__(self, k):
super(LinearRegression, self).__init__()
self.linear = torch.nn.Linear(k, 1, bias=True)
def forward(self, x):
return self.linear(x)
class NeuralNetwork(nn.Module):
def __init__(self, k):
super(NeuralNetwork, self).__init__()
self.linear1 = torch.nn.Linear(k, 20, bias=True)
self.linear2 = torch.nn.Linear(20, 1, bias=True)
def forward(self, x):
x = F.relu(self.linear1(x))
self.output = self.linear2(x)
return self.output
"""Model of MFL"""
class NeuralNetworkClassification(nn.Module):
def __init__(self, k):
super(NeuralNetworkClassification, self).__init__()
self.linear1 = torch.nn.Linear(k, 16, bias=True)
self.linear2 = torch.nn.Linear(16, 1, bias=True)
def forward(self, x):
x = F.relu(self.linear1(x))
self.output = self.linear2(x)
return self.output
"""Model of Oneta et al."""
class NeuralNetwork_MMD(nn.Module):
def __init__(self, k):
super(NeuralNetwork_MMD, self).__init__()
self.linear1 = torch.nn.Linear(k, 16, bias=True)
self.sigmoid_ = torch.nn.ReLU()
self.linear2 = torch.nn.Linear(16, 1, bias=True)
def first_layer(self, x):
return self.sigmoid_((self.linear1(x)))
def forward(self, x):
self.output = self.linear2(self.sigmoid_((self.linear1(x))))
return self.output
# loss_functions: MAE and MSE
def MSE(y_pred, y):
return ((y_pred - y) ** 2).mean()
def MAE(y_pred, y):
return (y_pred - y).abs().mean()
| 2,012
| 29.5
| 97
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/load_data.py
|
import numpy as np
import pandas as pd
import sklearn.preprocessing as preprocessing
from collections import namedtuple
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt # for plotting stuff
import os
import collections
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementaions of preprocessing of datasets.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def load_compas_data(COMPAS_INPUT_FILE):
FEATURES_CLASSIFICATION = ["age_cat", "race", "sex", "priors_count",
"c_charge_degree"] # features to be used for classification
CONT_VARIABLES = [
"priors_count"] # continuous features, will need to be handled separately from categorical features, categorical features will be encoded using one-hot
CLASS_FEATURE = "two_year_recid" # the decision variable
SENSITIVE_ATTRS = ["race"]
# COMPAS_INPUT_FILE = DIR_DATA + "compas/compas-scores-two-years.csv"
print('Loading COMPAS dataset...')
# load the data and get some stats
df = pd.read_csv(COMPAS_INPUT_FILE)
df = df.dropna(subset=["days_b_screening_arrest"]) # dropping missing vals
# convert to np array
data = df.to_dict('list')
for k in data.keys():
data[k] = np.array(data[k])
""" Filtering the data """
# These filters are the same as propublica (refer to https://github.com/propublica/compas-analysis)
# If the charge date of a defendants Compas scored crime was not within 30 days from when the person was arrested, we assume that because of data quality reasons, that we do not have the right offense.
idx = np.logical_and(data["days_b_screening_arrest"] <= 30, data["days_b_screening_arrest"] >= -30)
# We coded the recidivist flag -- is_recid -- to be -1 if we could not find a compas case at all.
idx = np.logical_and(idx, data["is_recid"] != -1)
# In a similar vein, ordinary traffic offenses -- those with a c_charge_degree of 'O' -- will not result in Jail time are removed (only two of them).
idx = np.logical_and(idx, data["c_charge_degree"] != "O") # F: felony, M: misconduct
# We filtered the underlying data from Broward county to include only those rows representing people who had either recidivated in two years, or had at least two years outside of a correctional facility.
idx = np.logical_and(idx, data["score_text"] != "NA")
# we will only consider blacks and whites for this analysis
idx = np.logical_and(idx, np.logical_or(data["race"] == "African-American", data["race"] == "Caucasian"))
# select the examples that satisfy this criteria
for k in data.keys():
data[k] = data[k][idx]
""" Feature normalization and one hot encoding """
# convert class label 0 to -1
y = data[CLASS_FEATURE]
# y[y == 0] = -1
print("\nNumber of people recidivating within two years")
print(pd.Series(y).value_counts())
print("\n")
X = np.array([]).reshape(len(y),
0) # empty array with num rows same as num examples, will hstack the features to it
x_control = collections.defaultdict(list)
feature_names = []
for attr in FEATURES_CLASSIFICATION:
vals = data[attr]
if attr in SENSITIVE_ATTRS:
lb = preprocessing.LabelBinarizer()
lb.fit(vals)
vals = lb.transform(vals)
x_control[attr] = vals
pass
else:
if attr in CONT_VARIABLES:
vals = [float(v) for v in vals]
vals = preprocessing.scale(vals) # 0 mean and 1 variance
vals = np.reshape(vals, (len(y), -1)) # convert from 1-d arr to a 2-d arr with one col
else: # for binary categorical variables, the label binarizer uses just one var instead of two
lb = preprocessing.LabelBinarizer()
lb.fit(vals)
vals = lb.transform(vals)
# add to sensitive features dict
# add to learnable features
X = np.hstack((X, vals))
if attr in CONT_VARIABLES: # continuous feature, just append the name
feature_names.append(attr)
else: # categorical features
if vals.shape[1] == 1: # binary features that passed through lib binarizer
feature_names.append(attr)
else:
for k in lb.classes_: # non-binary categorical features, need to add the names for each cat
feature_names.append(attr + "_" + str(k))
# convert the sensitive feature to 1-d array
x_control = dict(x_control)
for k in x_control.keys():
assert (x_control[k].shape[1] == 1) # make sure that the sensitive feature is binary after one hot encoding
x_control[k] = np.array(x_control[k]).flatten()
# sys.exit(1)
# """permute the date randomly"""
# perm = range(0, X.shape[0])
# shuffle(perm)
# X = X[perm]
# y = y[perm]
for k in x_control.keys():
x_control[k] = x_control[k][:]
# intercept = np.ones(X.shape[0]).reshape(X.shape[0], 1)
# X = np.concatenate((intercept, X), axis=1)
assert (len(feature_names) == X.shape[1])
print("Features we will be using for classification are:", feature_names, "\n")
x_control = x_control['race']
return X, y, x_control
def load_drug_data(DIR_DATA):
g = pd.read_csv(DIR_DATA, header=None, sep=',')
# g = pd.read_csv("drug_consumption.data.txt", header=None, sep=',')
g = np.array(g)
data = np.array(g[:, 1:13]) # Remove the ID and labels
labels = g[:, 13:]
yfalse_value = 'CL0'
y = np.array([1.0 if yy == yfalse_value else 0.0 for yy in labels[:, 5]])
dataset = namedtuple('_', 'data, target')(data, y)
print('Loading Drug (black vs others) dataset...')
# dataset_train = load_drug()
sensible_feature = 4 # ethnicity
a = np.array([1.0 if el == -0.31685 else 0 for el in data[:, sensible_feature]])
X = np.delete(data, sensible_feature, axis=1).astype(float)
return X, y, a
def load_adult(DIR_DATA, smaller=False, scaler=True):
'''
:param smaller: selecting this flag it is possible to generate a smaller version of the training and test sets.
:param scaler: if True it applies a StandardScaler() (from sklearn.preprocessing) to the data.
:return: train and test data.
Features of the Adult dataset:
0. age: continuous.
1. workclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
2. fnlwgt: continuous.
3. education: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th,
Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
4. education-num: continuous.
5. marital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed,
Married-spouse-absent, Married-AF-spouse.
6. occupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty,
Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv,
Protective-serv, Armed-Forces.
7. relationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
8. race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
9. sex: Female, Male.
10. capital-gain: continuous.
11. capital-loss: continuous.
12. hours-per-week: continuous.
13. native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc),
India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico,
Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala,
Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
(14. label: <=50K, >50K)
'''
data = pd.read_csv(
DIR_DATA,
names=[
"Age", "workclass", "fnlwgt", "education", "education-num", "marital-status",
"occupation", "relationship", "race", "gender", "capital gain", "capital loss",
"hours per week", "native-country", "income"]
)
len_train = len(data.values[:, -1])
data_test = pd.read_csv(
DIR_DATA + "adult/adult.test",
names=[
"Age", "workclass", "fnlwgt", "education", "education-num", "marital-status",
"occupation", "relationship", "race", "gender", "capital gain", "capital loss",
"hours per week", "native-country", "income"]
)
data = pd.concat([data, data_test])
# Considering the relative low portion of missing data, we discard rows with missing data
domanda = data["workclass"][4].values[1]
data = data[data["workclass"] != domanda]
data = data[data["occupation"] != domanda]
data = data[data["native-country"] != domanda]
# Here we apply discretisation on column marital_status
data.replace(['Divorced', 'Married-AF-spouse',
'Married-civ-spouse', 'Married-spouse-absent',
'Never-married', 'Separated', 'Widowed'],
['not married', 'married', 'married', 'married',
'not married', 'not married', 'not married'], inplace=True)
# categorical fields
category_col = ['workclass', 'race', 'education', 'marital-status', 'occupation',
'relationship', 'gender', 'native-country', 'income']
for col in category_col:
b, c = np.unique(data[col], return_inverse=True)
data[col] = c
datamat = data.values
target = np.array([-1.0 if val == 0 else 1.0 for val in np.array(datamat)[:, -1]])
datamat = datamat[:, :-1]
if scaler:
scaler = StandardScaler()
scaler.fit(datamat)
datamat = scaler.transform(datamat)
if smaller:
print('A smaller version of the dataset is loaded...')
data = namedtuple('_', 'data, target')(datamat[:len_train // 20, :-1], target[:len_train // 20])
data_test = namedtuple('_', 'data, target')(datamat[len_train:, :-1], target[len_train:])
else:
print('The dataset is loaded...')
data = namedtuple('_', 'data, target')(datamat[:len_train, :-1], target[:len_train])
data_test = namedtuple('_', 'data, target')(datamat[len_train:, :-1], target[len_train:])
return data, data_test
# def load_toy_test():
# # Load toy test
# n_samples = 100 * 2
# n_samples_low = 20 * 2
# n_dimensions = 10
# X, y, sensible_feature_id, _, _ = generate_toy_data(n_samples=n_samples,
# n_samples_low=n_samples_low,
# n_dimensions=n_dimensions)
# data = namedtuple('_', 'data, target')(X, y)
# return data, data
| 11,005
| 46.034188
| 207
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/fair_KDE.py
|
# Baseline Fair KDE : https://proceedings.neurips.cc//paper/2020/file/ac3870fcad1cfc367825cda0101eee62-Paper.pdf
import cvxpy as cp
import numpy as np
import argparse
import pandas as pd
import torch
import fairness_metrics
import data_loader
from tqdm import tqdm
from collections import namedtuple
from sklearn.metrics import log_loss
from copy import deepcopy
import os, sys
import time
import pickle
import random
import matplotlib.pyplot as plt
import torch.optim as optim
from Fair_KDE.models import Classifier
import matplotlib.pyplot as plt
import torch.nn as nn
from torch.utils.data import DataLoader
from Fair_KDE.dataloader import CustomDataset
import time
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementation of https://proceedings.neurips.cc/paper/2020/file/ac3870fcad1cfc367825cda0101eee62-Paper.pdf
An example usage python fair_KDE.py --dataset {} --seed {} --nlambda {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
tau = 0.5
# Approximation of Q-function given by López-Benítez & Casadevall (2011) based on a second-order exponential function & Q(x) = 1- Q(-x):
a = 0.4920
b = 0.2887
c = 1.1893
Q_function = lambda x: torch.exp(-a*x**2 - b*x - c)
def CDF_tau(Yhat, h=0.01, tau=0.5):
m = len(Yhat)
Y_tilde = (tau-Yhat)/h
sum_ = torch.sum(Q_function(Y_tilde[Y_tilde>0])) \
+ torch.sum(1-Q_function(torch.abs(Y_tilde[Y_tilde<0]))) \
+ 0.5*(len(Y_tilde[Y_tilde==0]))
return sum_/m
def Huber_loss(x, delta):
if x.abs() < delta:
return (x ** 2) / 2
return delta * (x.abs() - delta / 2)
# act on experiment parameters:
def run(args):
# act on experiment parameters:
seed = args.seed
data_loader.set_seed(args.seed)
##### Other training hyperparameters #####
lr = 2e-4
n_epochs = 200
lr_decay = 1.0
batch_size = 2048
n_epochs = 500
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
if args.dataset == 'CommunitiesCrimeClassification':
ds = data_loader.CommunitiesCrimeClassification(a_inside_x=0)
batch_size = 128
if args.dataset == 'Compas':
ds = data_loader.Compas(a_inside_x=0)
lr = 5e-4
batch_size = 2048
if args.dataset == 'LawSchool':
ds = data_loader.LawSchool(a_inside_x=0)
lr = 2e-4
batch_size = 2048
if args.dataset == 'Credit':
ds = data_loader.Credit(a_inside_x=0)
lr = 5e-4
n_batch = 2048
if args.dataset == 'Adult':
ds = data_loader.Adult(0)
train_test_split_fin = 1
batch_size = 2048
lr = 1e-1
lr_decay - 0.98
if args.dataset == 'Drug':
ds = data_loader.Drug(a_inside_x=0)
batch_size = 128
if args.dataset != 'Adult':
ds.split_test()
k = ds.get_k()
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# storage of results
results_train = []
results_test = []
##### Which fairness notion to consider (Demographic Parity / Equalized Odds) #####
fairness = 'DP' # ['DP', 'EO']
##### Model specifications #####
n_layers = 2 # [positive integers]
n_hidden_units = 16 # [positive integers]
##### Our algorithm hyperparameters #####
h = 0.1 # Bandwidth hyperparameter in KDE [positive real numbers]
delta = 1.0 # Delta parameter in Huber loss [positive real numbers]
lambda_ = 0.05 # regularization factor of DDP/DEO; Positive real numbers \in [0.0, 1.0]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
##### Whether to enable GPU training or not
device = torch.device('cpu') # or torch.device('cpu')
# Import dataset
# dataset = FairnessDataset(dataset=dataset_name, device=device)
# dataset.normalize()
input_dim = k + 1
net = Classifier(n_layers=n_layers, n_inputs=input_dim, n_hidden_units=n_hidden_units)
net = net.to(device)
# Set an optimizer
optimizer = optim.Adam(net.parameters(), lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay) # None
# X, Y, A = ds.get_data()
# X_test, Y_test, A_test = ds.get_test_data()
# x_train = X.cpu().detach().numpy()
# Y_train = Y.cpu().detach().numpy().flatten()
# a_train = A.cpu().detach().numpy().flatten()
# x_test = X_test.cpu().detach().numpy()
# y_test = Y_test.cpu().detach().numpy().flatten()
# a_test = A_test.cpu().detach().numpy().flatten()
# train_tensors, test_tensors = dataset.get_dataset_in_tensor()
# X_train, Y_train, Z_train, XZ_train = train_tensors
# X_test, Y_test, Z_test, XZ_test = test_tensors
# Retrieve train/test splitted numpy arrays for index=split
# train_arrays, test_arrays = dataset.get_dataset_in_ndarray()
# X_train_np, Y_train_np, Z_train_np, XZ_train_np = train_arrays
# X_test_np, Y_test_np, Z_test_np, XZ_test_np = test_arrays
if args.dataset == 'Adult':
X_train, Y_train, Z_train, X_test, Y_test, Z_test = ds.get_adult_data()
else:
X_train, Y_train, Z_train = ds.get_data()
X_test, Y_test, Z_test = ds.get_test_data()
XZ_test = torch.cat([X_test, Z_test], 1)
XZ_train = torch.cat([X_train, Z_train], 1)
custom_dataset = CustomDataset(XZ_train, Y_train, Z_train)
if batch_size == 'full':
batch_size_ = XZ_train.shape[0]
elif isinstance(batch_size, int):
batch_size_ = batch_size
generator = DataLoader(custom_dataset, batch_size=batch_size_, shuffle=True)
pi = torch.tensor(np.pi).to(device)
phi = lambda x: torch.exp(-0.5*x**2)/torch.sqrt(2*pi) #normal distribution
# # An empty dataframe for logging experimental results
# df = pd.DataFrame()
# df_ckpt = pd.DataFrame()
loss_function = nn.BCELoss()
costs = []
time_track = []
for lambda_ in lambda_candidates:
print('Training FKDE method, for lambda: {}/{}, seed:{}'.format(lambda_, args.nlambda, args.seed))
start_time = time.time()
for epoch in range(n_epochs):
for i, (xz_batch, y_batch, z_batch) in enumerate(generator):
xz_batch, y_batch, z_batch = xz_batch.to(device), y_batch.to(device), z_batch.to(device)
Yhat = net(xz_batch)
Ytilde = torch.round(Yhat.squeeze())
cost = 0
dtheta = 0
m = z_batch.shape[0]
# prediction loss
p_loss = loss_function(Yhat.squeeze(), y_batch.squeeze())
cost += (1 - lambda_) * p_loss
# DP_Constraint
if fairness == 'DP':
Pr_Ytilde1 = CDF_tau(Yhat.detach(), h, tau)
for z in range(1):
Pr_Ytilde1_Z = CDF_tau(Yhat.detach()[z_batch==z],h,tau)
m_z = z_batch[z_batch==z].shape[0]
Delta_z = Pr_Ytilde1_Z-Pr_Ytilde1
Delta_z_grad = torch.dot(phi((tau-Yhat.detach()[z_batch==z])/h).view(-1),
Yhat[z_batch==z].view(-1))/h/m_z
Delta_z_grad -= torch.dot(phi((tau-Yhat.detach())/h).view(-1),
Yhat.view(-1))/h/m
if Delta_z.abs() >= delta:
if Delta_z > 0:
Delta_z_grad *= lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= -lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= lambda_*Delta_z
cost += Delta_z_grad
# EO_Constraint
elif fairness == 'EO':
for y in [0,1]:
Pr_Ytilde1_Y = CDF_tau(Yhat[y_batch==y].detach(),h,tau)
m_y = y_batch[y_batch==y].shape[0]
for z in range(1):
Pr_Ytilde1_ZY = CDF_tau(Yhat[(y_batch==y) & (z_batch==z)].detach(),h,tau)
m_zy = z_batch[(y_batch==y) & (z_batch==z)].shape[0]
Delta_zy = Pr_Ytilde1_ZY-Pr_Ytilde1_Y
Delta_zy_grad = torch.dot(
phi((tau-Yhat[(y_batch==y) & (z_batch==z)].detach())/h).view(-1),
Yhat[(y_batch==y) & (z_batch==z)].view(-1)
)/h/m_zy
Delta_zy_grad -= torch.dot(
phi((tau-Yhat[y_batch==y].detach())/h).view(-1),
Yhat[y_batch==y].view(-1)
)/h/m_y
if Delta_zy.abs() >= delta:
if Delta_zy > 0:
Delta_zy_grad *= lambda_*delta
cost += Delta_zy_grad
else:
Delta_zy_grad *= lambda_*delta
cost += -lambda_*delta*Delta_zy_grad
else:
Delta_zy_grad *= lambda_*Delta_zy
cost += Delta_zy_grad
optimizer.zero_grad()
if (torch.isnan(cost)).any():
continue
cost.backward()
optimizer.step()
costs.append(cost.item())
# Print the cost per 10 batches
# if (i + 1) % 10 == 0 or (i + 1) == len(generator):
# print('Lambda:{}, Epoch [{}/{}], Batch [{}/{}], Cost: {:.4f}'.format(lambda_, epoch+1, n_epochs,
# i+1, len(generator),
# cost.item()), end='\r')
if lr_scheduler is not None:
lr_scheduler.step()
stop_time = time.time()
def predict(XZ):
Y_hat_ = net(XZ)
Y_hat_[Y_hat_>=0.5] = 1
Y_hat_[Y_hat_ < 0.5] = 0
return Y_hat_
# metrics on train set
y_hat = predict(XZ_train).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_train==1]
y_hat_0 = y_hat[Z_train==0]
y_1 = Y_train[Z_train==1]
y_0 = Y_train[Z_train==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
# metrics on test set
y_hat = predict(XZ_test).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_test==1]
y_hat_0 = y_hat[Z_test==0]
y_1 = Y_test[Z_test==1]
y_0 = Y_test[Z_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = lambda_
test_results['lambda_'] = lambda_
print(train_results)
results_train.append(train_results)
results_test.append(test_results)
# df_train = pd.DataFrame(data=results_train)
# df_test = pd.DataFrame(data=results_test)
# df_train.to_csv('results/{}_zafar_{}_train.csv'.format(args.dataset, 0))
# df_test.to_csv('results/{}_zafar_{}_test.csv'.format(args.dataset, 0))
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
df_train.to_csv('results/FKDE/{}_FKDE_{}_train.csv'.format(args.dataset, args.seed))
df_test.to_csv('results/FKDE/{}_FKDE_{}_test.csv'.format(args.dataset, args.seed))
PARAMS = {'dataset':args.dataset,
'batch_size':batch_size,
'lr':lr, 'epochs':n_epochs,
'seed':args.seed,
'method':'FKDE',
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'algorihtm':'adam',
'L':'BCE_cross_entropy',
'lr_decay':lr_decay,
'a_inside_x': True
}
with open('results/FKDE/{}_FKDE_{}.pkl'.format(args.dataset, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=2, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--dataset', help='Dataset to use', choices=['CommunitiesCrimeClassification', 'LawSchool', 'Compas', 'Adult', 'Credit', 'Drug'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=50)
args = parser.parse_args()
run(args)
| 14,239
| 40.037464
| 153
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/run_benchmark.py
|
import models
import fairness_metrics
import benchmark
import data_loader
import pickle
import argparse
import pandas as pd
import numpy as np
import time
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides implementatino of MFL
An example usage python run_benchmark.py --dataset {} --seed {} --a_inside_x True --nlambda {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def run(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
Model = models.LinearRegression if args.model=='linear' else models.NeuralNetworkClassification
fair_loss = fairness_metrics.energy_distance
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
train_test_split_fin = 0
lr = args.lr
n_epochs = args.n_epochs
#lr = 5e-4
#n_epochs = 500
lr_decay = 0.99
batch_size = 2048
if args.dataset == 'CommunitiesCrimeClassification':
ds = data_loader.CommunitiesCrimeClassification(a_inside_x=args.a_inside_x)
batch_size = 128
if args.dataset == 'Compas':
ds = data_loader.Compas(a_inside_x=args.a_inside_x)
if args.dataset == 'LawSchool':
ds = data_loader.LawSchool(a_inside_x=args.a_inside_x)
if args.dataset == 'Credit':
ds = data_loader.Credit(a_inside_x=args.a_inside_x)
if args.dataset == 'Adult':
ds = data_loader.Adult(a_inside_x=args.a_inside_x)
train_test_split_fin = 1
if args.dataset == 'Drug':
ds = data_loader.Drug(a_inside_x=args.a_inside_x)
batch_size = 128
logfairloss = fair_loss
if args.dataset != 'Adult':
ds.split_test()
k = ds.get_k() # Dimension
# metrics to evaluate
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# storage of results
results_train = []
results_test = []
# run the test for various lambdas
for lambda_ in lambda_candidates:
print('Training Our method, for lambda_: {}/{}, seed:{}'.format(lambda_, args.nlambda, args.seed))
model = Model(k)
print(Model)
train_metrics, test_metrics = benchmark.train_test_fair_learning(ds=ds,
model=model,
fair_loss=fair_loss,
lr=lr,
batch_size=batch_size,
N_epochs=n_epochs,
lambda_=lambda_,
metrics=metrics,
lr_decay=lr_decay,
psi=None, plot_convergence=args.plot_convergence, logfairloss=logfairloss, weight_decay=args.weight_decay, train_test_split_fin=train_test_split_fin)
train_metrics['lambda_'] = lambda_
test_metrics['lambda_'] = lambda_
results_train.append(train_metrics)
results_test.append(test_metrics)
# save the results
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
if args.a_inside_x:
df_train.to_csv('results/NN_energy/{}_{}_AinX_train_{}.csv'.format(args.dataset, \
args.model, args.seed))
df_test.to_csv('results/NN_energy/{}_{}_AinX_test_{}.csv'.format(args.dataset, \
args.model, args.seed))
else:
print('here')
df_train.to_csv('results/NN_energy/{}_{}_train_{}.csv'.format(args.dataset, \
args.model, args.seed))
df_test.to_csv('results/NN_energy/{}_{}_test_{}.csv'.format(args.dataset, \
args.model, args.seed))
PARAMS = {'dataset':args.dataset,
'batch_size':batch_size,
'lr':lr, 'epochs':n_epochs,
'seed':args.seed,
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'algorihtm':'adam',
'model_details':model.state_dict,
'L':'BCE_cross_entropy',
'fair_loss':'Energy',
'lr_decay':lr_decay,
'a_inside_x':args.a_inside_x
}
with open('results/NN_energy/{}_{}_{}.pkl'.format(args.dataset, args.model, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
#
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--model', default='NN', choices=['linear', 'NN'], help='Model')
# parser.add_argument('--regloss', default='L2', choices=['L1', 'L2'], help='Regression Loss')
# parser.add_argument('--fairloss', required=True, choices=['Energy', 'Wasserstein'], help='Fairness loss')
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=2, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--lr', default=5e-4, type=float, help='Learning Rate of (S)GD: Currently has no effect since Adam is used')
parser.add_argument('--n_epochs', default=500, type=int, help='Number of Epochs of (S)GD')
parser.add_argument('--plot_convergence', default=False, action='store_true', help='If Convergence plot should be done')
parser.add_argument('--dataset', help='Dataset to use', choices=['Synthetic1', 'Synthetic2', 'CommunitiesCrime', 'CommunitiesCrimeClassification',
'BarPass', 'StudentsMath', 'StudentsPortugese', 'Compas', 'LawSchool', 'Adult',
'Credit', 'Drug'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=50)
parser.add_argument('--weight_decay', help='SGD weight decay', type=float, default=0.0)
parser.add_argument('--a_inside_x', default=False, type=str2bool, help='The sensitive feature is in X')
args = parser.parse_args()
run(args)
| 7,511
| 46.544304
| 214
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/run_benchmark_regression.py
|
import models
import fairness_metrics
import benchmark
import data_loader
import pickle
import argparse
import pandas as pd
import numpy as np
import time
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides implementatino of MFL
An example usage
python run_benchmark_regression.py --dataset {} --seed {} --nlambda {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def run(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
Model = models.LinearRegression if args.model=='linearregression' else models.NeuralNetwork
fair_loss = fairness_metrics.energy_distance
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
train_test_split_fin = 0
lr = args.lr
lr_decay = 1
batch_size = args.batch_size
n_epochs = args.n_epochs
if args.dataset == 'CommunitiesCrime':
ds = data_loader.CommunitiesCrime()
if args.dataset == 'BarPass':
ds = data_loader.BarPass()
if args.dataset == 'StudentsMath':
ds = data_loader.StudentPerformance(subject='Math')
if args.dataset == 'StudentsPortugese':
ds = data_loader.StudentPerformance(subject='Portugese')
logfairloss = fair_loss
ds.split_test()
k = ds.get_k() # Dimension
# metrics to evaluate
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'R2' : fairness_metrics.R2
}
# storage of results
results_train = []
results_test = []
# run the test for various lambdas
for lambda_ in lambda_candidates:
print('Training Our method, for lambda_: {}/{}, seed:{}'.format(lambda_, args.nlambda, args.seed))
model = Model(k)
train_metrics, test_metrics = benchmark.train_test_fair_learning_regression(ds=ds,
model=model,
fair_loss=fair_loss,
lr=lr,
batch_size=batch_size,
N_epochs=n_epochs,
lambda_=lambda_,
metrics=metrics,
lr_decay=lr_decay,
psi=None, plot_convergence=args.plot_convergence, logfairloss=logfairloss, weight_decay=args.weight_decay, train_test_split_fin=train_test_split_fin)
train_metrics['lambda_'] = lambda_
test_metrics['lambda_'] = lambda_
results_train.append(train_metrics)
results_test.append(test_metrics)
# save the results
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
df_train.to_csv('results/NN_energy_regression/{}_{}_train_{}.csv'.format(args.dataset, \
args.model, args.seed))
df_test.to_csv('results/NN_energy_regression/{}_{}_test_{}.csv'.format(args.dataset, \
args.model, args.seed))
PARAMS = {'dataset':args.dataset,
'batch_size':batch_size,
'lr':lr, 'epochs':n_epochs,
'seed':args.seed,
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'algorihtm':'adam',
'model_details':model.state_dict,
'L':'MSE',
'fair_loss':'Energy',
'lr_decay':lr_decay
}
with open('results/NN_energy_regression/{}_{}_{}.pkl'.format(args.dataset, args.model, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--model', default='NN', choices=['linearregression', 'NN'], help='Regression Model')
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=1, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--lr', default=1e-4, type=float, help='Learning Rate of (S)GD: Currently has no effect since Adam is used')
parser.add_argument('--batch_size', default=128, type=int, help='Batch Size for algorithm 2')
parser.add_argument('--n_epochs', default=500, type=int, help='Number of Epochs of (S)GD')
parser.add_argument('--plot_convergence', default=False, action='store_true', help='If Convergence plot should be done')
parser.add_argument('--dataset', help='Dataset to use', choices=['CommunitiesCrime', 'BarPass', 'StudentsMath', 'StudentsPortugese'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=50)
parser.add_argument('--weight_decay', help='SGD weight decay', type=float, default=0.0)
args = parser.parse_args()
run(args)
| 6,191
| 45.208955
| 214
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/dccp-master/dccp/objective.py
|
__author__ = "Xinyue"
from dccp.linearize import linearize, linearize_para
import cvxpy as cvx
# from linearize import linearize_para
def convexify_para_obj(obj):
"""
input:
obj: an objective of a problem
return:
if the objective is dcp,
return the cost function (an expression);
if the objective has a wrong curvature,
return the linearized expression of the cost function,
the zeros order parameter,
the dictionary of parameters indexed by variables,
the domain
"""
if obj.is_dcp() == False:
return linearize_para(obj.expr)
else:
return obj.expr
def is_dccp(objective):
"""
input:
objective: an objective of a problem
return:
if the objective is dccp
the objective must be convex, concave, affine, or constant
"""
if objective.expr.curvature == "UNKNOWN":
return False
else:
return True
def convexify_obj(obj):
"""
:param obj: objective of a problem
:return: convexified onjective or None
"""
# not dcp
if obj.is_dcp() == False:
lin = linearize(obj.expr)
# non-sub/super-diff
if lin is None:
return None
else:
if obj.NAME == "minimize":
result = cvx.Minimize(lin)
else:
result = cvx.Maximize(lin)
else:
result = obj
return result
| 1,445
| 23.931034
| 66
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/dccp-master/dccp/problem.py
|
__author__ = "Xinyue"
import numpy as np
import cvxpy as cvx
import logging
from dccp.objective import convexify_obj
from dccp.objective import convexify_para_obj
from dccp.constraint import convexify_para_constr
from dccp.constraint import convexify_constr
logger = logging.getLogger("dccp")
logger.addHandler(logging.FileHandler(filename="dccp.log", mode="w", delay=True))
logger.setLevel(logging.INFO)
logger.propagate = False
def dccp(
self,
max_iter=100,
tau=0.005,
mu=1.2,
tau_max=1e8,
solver=None,
ccp_times=1,
max_slack=1e-3,
ep=1e-5,
**kwargs
):
"""
main algorithm ccp
:param max_iter: maximum number of iterations in ccp
:param tau: initial weight on slack variables
:param mu: increment of weight on slack variables
:param tau_max: maximum weight on slack variables
:param solver: specify the solver for the transformed problem
:param ccp_times: times of running ccp to solve a problem with random initial values on variables
:return
if the transformed problem is infeasible, return None;
"""
if not is_dccp(self):
raise Exception("Problem is not DCCP.")
result = None
if self.objective.NAME == "minimize":
cost_value = float("inf") # record on the best cost value
else:
cost_value = -float("inf")
for t in range(ccp_times): # for each time of running ccp
dccp_ini(
self, random=(ccp_times > 1), solver=solver, **kwargs
) # initialization; random initial value is mandatory if ccp_times>1
# iterations
result_temp = iter_dccp(
self, max_iter, tau, mu, tau_max, solver, ep, max_slack, **kwargs
)
# first iteration
if t == 0:
self._status = result_temp[-1]
result = result_temp
cost_value = result_temp[0]
result_record = {}
for var in self.variables():
result_record[var] = var.value
else:
if result_temp[-1] == "Converged":
self._status = result_temp[-1]
if result_temp[0] is not None:
if (
(cost_value is None)
or (
self.objective.NAME == "minimize"
and result_temp[0] < cost_value
)
or (
self.objective.NAME == "maximize"
and result_temp[0] > cost_value
)
): # find a better cost value
# no slack; slack small enough
if len(result_temp) < 4 or result_temp[1] < max_slack:
result = result_temp # update the result
cost_value = result_temp[
0
] # update the record on the best cost value
for var in self.variables():
result_record[var] = var.value
else:
for var in self.variables():
var.value = result_record[var]
return result
def dccp_ini(self, times=1, random=0, solver=None, **kwargs):
"""
set initial values
:param times: number of random projections for each variable
:param random: mandatory random initial values
"""
dom_constr = self.objective.args[0].domain # domain of the objective function
for arg in self.constraints:
for l in range(2):
for dom in arg.args[l].domain:
dom_constr.append(dom) # domain on each side of constraints
var_store = [] # store initial values for each variable
init_flag = [] # indicate if any variable is initialized by the user
var_user_ini = []
for var in self.variables():
var_store.append(np.zeros(var.shape)) # to be averaged
init_flag.append(var.value is None)
if var.value is None:
var_user_ini.append(np.zeros(var.shape))
else:
var_user_ini.append(var.value)
# setup the problem
ini_cost = 0
var_ind = 0
value_para = []
for var in self.variables():
if (
init_flag[var_ind] or random
): # if the variable is not initialized by the user, or random initialization is mandatory
value_para.append(cvx.Parameter(var.shape))
ini_cost += cvx.pnorm(var - value_para[-1], 2)
var_ind += 1
ini_obj = cvx.Minimize(ini_cost)
ini_prob = cvx.Problem(ini_obj, dom_constr)
# solve it several times with random points
for t in range(times): # for each time of random projection
count_para = 0
var_ind = 0
for var in self.variables():
# if the variable is not initialized by the user, or random
# initialization is mandatory
if init_flag[var_ind] or random:
# set a random point
if len(var.shape) > 1:
value_para[count_para].value = (
np.random.randn(var.shape[0], var.shape[1]) * 10
)
else:
value_para[count_para].value = np.random.randn(var.size) * 10
count_para += 1
var_ind += 1
if solver is None:
ini_prob.solve(**kwargs)
else:
ini_prob.solve(solver=solver, **kwargs)
var_ind = 0
for var in self.variables():
var_store[var_ind] = var_store[var_ind] + var.value / float(
times
) # average
var_ind += 1
# set initial values
var_ind = 0
for var in self.variables():
if init_flag[var_ind] or random:
var.value = var_store[var_ind]
else:
var.value = var_user_ini[var_ind]
var_ind += 1
def is_dccp(problem):
"""
:param
a problem
:return
a boolean indicating if the problem is dccp
"""
if problem.objective.expr.curvature == "UNKNOWN":
return False
for constr in problem.constraints:
for arg in constr.args:
if arg.curvature == "UNKNOWN":
return False
return True
def iter_dccp(self, max_iter, tau, mu, tau_max, solver, ep, max_slack_tol, **kwargs):
"""
ccp iterations
:param max_iter: maximum number of iterations in ccp
:param tau: initial weight on slack variables
:param mu: increment of weight on slack variables
:param tau_max: maximum weight on slack variables
:param solver: specify the solver for the transformed problem
:return
value of the objective function, maximum value of slack variables, value of variables
"""
# split non-affine equality constraints
constr = []
for constraint in self.constraints:
if (
str(type(constraint)) == "<class 'cvxpy.constraints.zero.Equality'>"
and not constraint.is_dcp()
):
constr.append(constraint.args[0] <= constraint.args[1])
constr.append(constraint.args[0] >= constraint.args[1])
else:
constr.append(constraint)
obj = self.objective
self = cvx.Problem(obj, constr)
it = 1
converge = False
# keep the values from the previous iteration or initialization
previous_cost = float("inf")
previous_org_cost = self.objective.value
variable_pres_value = []
for var in self.variables():
variable_pres_value.append(var.value)
# each non-dcp constraint needs a slack variable
var_slack = []
for constr in self.constraints:
if not constr.is_dcp():
var_slack.append(cvx.Variable(constr.shape))
while it <= max_iter and all(var.value is not None for var in self.variables()):
constr_new = []
# objective
convexified_obj = convexify_obj(self.objective)
if not self.objective.is_dcp():
# non-sub/super-diff
while convexified_obj is None:
# damping
var_index = 0
for var in self.variables():
var.value = 0.8 * var.value + 0.2 * variable_pres_value[var_index]
var_index += 1
convexified_obj = convexify_obj(self.objective)
# domain constraints
for dom in self.objective.expr.domain:
constr_new.append(dom)
# new cost function
cost_new = convexified_obj.expr
# constraints
count_slack = 0
for arg in self.constraints:
temp = convexify_constr(arg)
if not arg.is_dcp():
while temp is None:
# damping
var_index = 0
for var in self.variables():
var.value = (
0.8 * var.value + 0.2 * variable_pres_value[var_index]
)
var_index += 1
temp = convexify_constr(arg)
newcon = temp[0] # new constraint without slack variable
for dom in temp[1]: # domain
constr_new.append(dom)
constr_new.append(newcon.expr <= var_slack[count_slack])
constr_new.append(var_slack[count_slack] >= 0)
count_slack = count_slack + 1
else:
constr_new.append(arg)
# objective
if self.objective.NAME == "minimize":
for var in var_slack:
cost_new += tau * cvx.sum(var)
obj_new = cvx.Minimize(cost_new)
else:
for var in var_slack:
cost_new -= tau * cvx.sum(var)
obj_new = cvx.Maximize(cost_new)
# new problem
prob_new = cvx.Problem(obj_new, constr_new)
# keep previous value of variables
variable_pres_value = []
for var in self.variables():
variable_pres_value.append(var.value)
# solve
if solver is None:
prob_new_cost_value = prob_new.solve(**kwargs)
else:
prob_new_cost_value = prob_new.solve(solver=solver, **kwargs)
if prob_new_cost_value is not None:
logger.info(
"iteration=%d, cost value=%.5f, tau=%.5f, solver status=%s",
it,
prob_new_cost_value,
tau,
prob_new.status,
)
else:
logger.info(
"iteration=%d, cost value=%.5f, tau=%.5f, solver status=%s",
it,
np.nan,
tau,
prob_new.status,
)
max_slack = None
# print slack
if (
prob_new._status == "optimal" or prob_new._status == "optimal_inaccurate"
) and not var_slack == []:
slack_values = [v.value for v in var_slack if v.value is not None]
max_slack = max([np.max(v) for v in slack_values] + [-np.inf])
logger.info("max slack = %.5f", max_slack)
# terminate
if (
prob_new.value is not None
and np.abs(previous_cost - prob_new.value) <= ep
and np.abs(self.objective.value - previous_org_cost) <= ep
and (max_slack is None or max_slack <= max_slack_tol)
):
it = max_iter + 1
converge = True
else:
previous_cost = prob_new.value
previous_org_cost = self.objective.value
tau = min([tau * mu, tau_max])
it += 1
# return
if converge:
self._status = "Converged"
else:
self._status = "Not_converged"
var_value = []
for var in self.variables():
var_value.append(var.value)
if not var_slack == []:
return (self.objective.value, max_slack, var_value, self._status)
else:
return (self.objective.value, var_value, self._status)
cvx.Problem.register_solve("dccp", dccp)
| 12,115
| 35.059524
| 101
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/dccp-master/dccp/constraint.py
|
__author__ = "Xinyue"
from dccp.linearize import linearize, linearize_para
import cvxpy as cvx
# from dccp.linearize import linearize_para
def convexify_para_constr(self):
"""
input:
self: a constraint of a problem
return:
if the constraint is dcp, return itself;
otherwise, return
a convexified constraint
para: [left side, right side]
if the left/right-hand side of the the constraint is linearized,
left/right side = [zero order parameter, {variable: [value parameter, [gradient parameter]]}]
else,
left/right side = []
dom: domain
"""
if not self.is_dcp():
dom = [] # domain
para = [] # a list for parameters
if self.expr.args[0].curvature == "CONCAVE": # left-hand concave
lin = linearize_para(self.expr.args[0]) # linearize the expression
left = lin[0]
para.append(
[lin[1], lin[2]]
) # [zero order parameter, {variable: [value parameter, [gradient parameter]]}]
for con in lin[3]:
dom.append(con)
else:
left = self.expr.args[0]
para.append(
[]
) # appending an empty list indicates the expression has the right curvature
if (
self.expr.args[1].curvature == "CONCAVE"
): # negative right-hand must be concave (right-hand is convex)
lin = linearize_para(self.expr.args[1]) # linearize the expression
neg_right = lin[0]
para.append([lin[1], lin[2]])
for con in lin[3]:
dom.append(con)
else:
neg_right = self.expr.args[1]
para.append([])
return left + neg_right <= 0, para, dom
else:
return self
def convexify_constr(constr):
"""
:param constr: a constraint of a problem
:return:
for a dcp constraint, return itself;
for a non-dcp constraint, return a convexified constraint and domain constraints;
return None if non-sub/super-diff
"""
if not constr.is_dcp():
dom = []
# left hand concave
if constr.args[0].curvature == "CONCAVE":
left = linearize(constr.args[0])
if left is None:
return None
else:
for con in constr.args[0].domain:
dom.append(con)
else:
left = constr.args[0]
# right hand convex
if constr.args[1].curvature == "CONVEX":
right = linearize(constr.args[1])
if right is None:
return None
else:
for con in constr.args[1].domain:
dom.append(con)
else:
right = constr.args[1]
return left - right <= 0, dom
else:
return constr
| 2,925
| 32.632184
| 109
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/dccp-master/dccp/linearize.py
|
__author__ = "Xinyue"
import numpy as np
import cvxpy as cvx
def linearize_para(expr):
"""
input:
expr: an expression
return:
linear_expr: linearized expression
zero_order: zero order parameter
linear_dictionary: {variable: [value parameter, [gradient parameter]]}
dom: domain
"""
zero_order = cvx.Parameter(expr.shape) # zero order
linear_expr = zero_order
linear_dictionary = {}
for var in expr.variables():
value_para = cvx.Parameter(var.shape)
if var.ndim > 1: # matrix to vector
gr = []
for d in range(var.shape[1]):
g = cvx.Parameter((var.shape[0], expr.shape[0]))
# g = g.T
linear_expr += g.T @ (var[:, d] - value_para[:, d]) # first order
gr.append(g)
linear_dictionary[var] = [value_para, gr]
else: # vector to vector
g = cvx.Parameter(var.shape[0], expr.shape[0])
linear_expr += g.T @ (var[:, d] - value_para[:, d]) # first order
gr.append(g)
linear_dictionary[var] = [value_para, gr]
dom = expr.domain
return linear_expr, zero_order, linear_dictionary, dom
def linearize(expr):
"""Returns the tangent approximation to the expression.
Gives an elementwise lower (upper) bound for convex (concave)
expressions. No guarantees for non-DCP expressions.
Args:
expr: An expression.
Returns:
An affine expression.
"""
if expr.is_affine():
return expr
else:
if np.any(np.iscomplex(expr.value)):
tangent = np.real(expr.value) + np.imag(expr.value)
else:
tangent = expr.value
if tangent is None:
raise ValueError(
"Cannot linearize non-affine expression with missing variable values."
)
grad_map = expr.grad
for var in expr.variables():
if grad_map[var] is None:
return None
complex_flag = False
if var.is_complex() or np.any(np.iscomplex(grad_map[var])):
complex_flag = True
if var.ndim > 1:
temp = cvx.reshape(
cvx.vec(var - var.value), (var.shape[0] * var.shape[1], 1)
)
if complex_flag:
flattened = np.transpose(np.real(grad_map[var])) @ cvx.real(temp) + \
np.transpose(np.imag(grad_map[var])) @ cvx.imag(temp)
else:
flattened = np.transpose(grad_map[var]) @ temp
tangent = tangent + cvx.reshape(flattened, expr.shape)
elif var.size > 1:
if complex_flag:
tangent = tangent + np.transpose(np.real(grad_map[var])) @ (cvx.real(var) - np.real(var.value)) \
+ np.transpose(np.imag(grad_map[var])) @ (cvx.imag(var) - np.imag(var.value))
else:
tangent = tangent + np.transpose(grad_map[var]) @ (var - var.value)
else:
if complex_flag:
tangent = tangent + np.real(grad_map[var]) * (cvx.real(var) - np.real(var.value)) \
+ np.imag(grad_map[var]) * (cvx.imag(var) - np.imag(var.value))
else:
tangent = tangent + grad_map[var] * (var - var.value)
return tangent
| 3,432
| 36.725275
| 117
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/dccp-master/dccp/test/test_example.py
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division
from dccp.tests.base_test import BaseTest
import cvxpy as cvx
from dccp.objective import convexify_obj
from dccp.constraint import convexify_constr
from dccp.linearize import linearize
import dccp.problem
import dccp
import numpy as np
class TestExample(BaseTest):
""" Unit tests example. """
def setUp(self):
# Initialize things.
self.a = cvx.Variable(1)
self.x = cvx.Variable(2)
self.y = cvx.Variable(2)
self.z = cvx.Variable(2)
def test_readme_example(self):
"""
Test the example in the readme.
self.sol - All known possible solutions to the problem in the readme.
"""
self.sol = [[0, 0], [0, 1], [1, 0], [1, 1]]
myprob = cvx.Problem(
cvx.Maximize(cvx.norm(self.y - self.z, 2)),
[0 <= self.y, self.y <= 1, 0 <= self.z, self.z <= 1],
)
assert not myprob.is_dcp() # false
assert dccp.is_dccp(myprob) # true
result = myprob.solve(method="dccp")
# print(self.y.value, self.z.value)
self.assertIsAlmostIn(self.y.value, self.sol)
self.assertIsAlmostIn(self.z.value, self.sol)
self.assertAlmostEqual(result[0], np.sqrt(2))
def test_linearize(self):
"""
Test the linearize function.
"""
z = cvx.Variable((1, 5))
expr = cvx.square(z)
z.value = np.reshape(np.array([1, 2, 3, 4, 5]), (1, 5))
lin = linearize(expr)
self.assertEqual(lin.shape, (1, 5))
self.assertItemsAlmostEqual(lin.value, [1, 4, 9, 16, 25])
def test_convexify_obj(self):
"""
Test convexify objective
"""
obj = cvx.Maximize(cvx.sum(cvx.square(self.x)))
self.x.value = [1, 1]
obj_conv = convexify_obj(obj)
prob_conv = cvx.Problem(obj_conv, [self.x <= -1])
prob_conv.solve()
self.assertAlmostEqual(prob_conv.value, -6)
obj = cvx.Minimize(cvx.sqrt(self.a))
self.a.value = [1]
obj_conv = convexify_obj(obj)
prob_conv = cvx.Problem(obj_conv, cvx.sqrt(self.a).domain)
prob_conv.solve()
self.assertAlmostEqual(prob_conv.value, 0.5)
def test_convexify_constr(self):
"""
Test convexify constraint
"""
constr = cvx.norm(self.x) >= 1
self.x.value = [1, 1]
constr_conv = convexify_constr(constr)
prob_conv = cvx.Problem(cvx.Minimize(cvx.norm(self.x)), [constr_conv[0]])
prob_conv.solve()
self.assertAlmostEqual(prob_conv.value, 1)
constr = cvx.sqrt(self.a) <= 1
self.a.value = [1]
constr_conv = convexify_constr(constr)
prob_conv = cvx.Problem(
cvx.Minimize(self.a), [constr_conv[0], constr_conv[1][0]]
)
prob_conv.solve()
self.assertAlmostEqual(self.a.value[0], 0)
def test_vector_constr(self):
"""
Test DCCP with vector cosntraints.
"""
prob = cvx.Problem(cvx.Minimize(self.x[0]), [self.x >= 0])
# doesn't crash with solver params.
result = prob.solve(method="dccp", verbose=True)
self.assertAlmostEqual(result[0], 0)
| 3,862
| 32.301724
| 81
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/dccp-master/dccp/test/base_test.py
|
# Base class for unit tests.
import unittest
import numpy as np
class BaseTest(unittest.TestCase):
# AssertAlmostEqual for lists.
def assertItemsAlmostEqual(self, a, b, places=5):
a = self.mat_to_list(a)
b = self.mat_to_list(b)
for i in range(len(a)):
self.assertAlmostEqual(a[i], b[i], places)
# Overriden method to assume lower accuracy.
def assertAlmostEqual(self, a, b, places=5):
super(BaseTest, self).assertAlmostEqual(a, b, places=places)
def mat_to_list(self, mat):
"""Convert a numpy matrix to a list.
"""
if isinstance(mat, (np.matrix, np.ndarray)):
return np.asarray(mat).flatten("F").tolist()
else:
return mat
# Test function to check if computed solution, comp_sol, is approximately equal to any of the possible solutions, sols.
def assertIsAlmostIn(self, comp_sol, sols, tolerance=0.000001):
"""
Input: comp_sol - the computed solution in the optimization problem
sols - list of all possible solutions
tolerance - tolerance that they are almost equal
"""
comp_sol = self.mat_to_list(comp_sol)
sols = self.mat_to_list(sols)
truth = [
np.linalg.norm(np.asarray(comp_sol) - np.asarray(sol_ex)) < tolerance
for sol_ex in sols
]
self.assertTrue(any(truth))
| 1,417
| 34.45
| 123
|
py
|
Metrizing-Fairness
|
Metrizing-Fairness-main/offline_experiments/src/dccp-master/build/lib/dccp/objective.py
|
__author__ = "Xinyue"
from dccp.linearize import linearize, linearize_para
import cvxpy as cvx
# from linearize import linearize_para
def convexify_para_obj(obj):
"""
input:
obj: an objective of a problem
return:
if the objective is dcp,
return the cost function (an expression);
if the objective has a wrong curvature,
return the linearized expression of the cost function,
the zeros order parameter,
the dictionary of parameters indexed by variables,
the domain
"""
if obj.is_dcp() == False:
return linearize_para(obj.expr)
else:
return obj.expr
def is_dccp(objective):
"""
input:
objective: an objective of a problem
return:
if the objective is dccp
the objective must be convex, concave, affine, or constant
"""
if objective.expr.curvature == "UNKNOWN":
return False
else:
return True
def convexify_obj(obj):
"""
:param obj: objective of a problem
:return: convexified onjective or None
"""
# not dcp
if obj.is_dcp() == False:
lin = linearize(obj.expr)
# non-sub/super-diff
if lin is None:
return None
else:
if obj.NAME == "minimize":
result = cvx.Minimize(lin)
else:
result = cvx.Maximize(lin)
else:
result = obj
return result
| 1,445
| 23.931034
| 66
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.