repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
SOLikeT
|
SOLikeT-master/soliket/gaussian_data.py
|
import numpy as np
try:
import holoviews as hv
except ImportError:
pass
from scipy.linalg import cholesky, LinAlgError
def multivariate_normal_logpdf(theory, data, cov, inv_cov, log_det):
const = np.log(2 * np.pi) * (-len(data) / 2) + log_det * (-1 / 2)
delta = data - theory
#print(const,delta,np.dot(delta, inv_cov.dot(delta)))
return -0.5 * np.dot(delta, inv_cov.dot(delta)) + const
class GaussianData:
"""Named multivariate gaussian data
"""
def __init__(self, name, x, y, cov, ncovsims=None):
self.name = str(name)
self.ncovsims = ncovsims
if not (len(x) == len(y) and cov.shape == (len(x), len(x))):
raise ValueError(f"Incompatible shapes! x={x.shape}, y={y.shape}, \
cov={cov.shape}")
self.x = x
self.y = y
self.cov = cov
try:
self.cholesky = cholesky(cov)
except LinAlgError:
raise ValueError("Covariance is not SPD!")
if ncovsims is None:
self.inv_cov = np.linalg.inv(self.cov)
else:
hartlap_factor = (self.ncovsims - len(x) - 2) / (self.ncovsims - 1)
self.inv_cov = hartlap_factor * np.linalg.inv(self.cov)
self.log_det = np.linalg.slogdet(self.cov)[1]
def __len__(self):
return len(self.x)
def loglike(self, theory):
return multivariate_normal_logpdf(theory, self.y, self.cov, self.inv_cov,
self.log_det)
class MultiGaussianData(GaussianData):
"""
Parameters
----------
data_list : list
List of Data objects
cross_covs : dictionary
Cross-covariances, keyed by (name1, name2) tuples.
"""
def __init__(self, data_list, cross_covs=None):
if cross_covs is None:
cross_covs = {}
# Ensure all cross-covs are proper shape, and fill with zeros if not present
for d1 in data_list:
for d2 in data_list:
key = (d1.name, d2.name)
if d1 == d2:
cross_covs[key] = d1.cov
rev_key = (d2.name, d1.name)
if key in cross_covs:
cov = cross_covs[key]
if not cov.shape == (len(d1), len(d2)):
raise ValueError(
f"Cross-covariance (for {d1.name} x {d2.name}) \
has wrong shape: {cov.shape}!"
)
elif rev_key in cross_covs:
cross_covs[key] = cross_covs[rev_key].T
else:
cross_covs[key] = np.zeros((len(d1), len(d2)))
self.data_list = data_list
self.lengths = [len(d) for d in data_list]
self.names = [d.name for d in data_list]
self.cross_covs = cross_covs
self._data = None
@property
def data(self):
if self._data is None:
self._assemble_data()
return self._data
def loglike(self, theory):
return self.data.loglike(theory)
@property
def name(self):
return self.data.name
@property
def cov(self):
return self.data.cov
@property
def inv_cov(self):
return self.data.inv_cov
@property
def log_det(self):
return self.data.log_det
@property
def labels(self):
return [x for y in [[name] * len(d) for
name, d in zip(self.names, self.data_list)] for x in y]
def _index_range(self, name):
if name not in self.names:
raise ValueError(f"{name} not in {self.names}!")
i0 = 0
for n, length in zip(self.names, self.lengths):
if n == name:
i1 = i0 + length
break
i0 += length
return i0, i1
def _slice(self, *names):
if isinstance(names, str):
names = [names]
return np.s_[tuple(slice(*self._index_range(n)) for n in names)]
def _assemble_data(self):
x = np.concatenate([d.x for d in self.data_list])
y = np.concatenate([d.y for d in self.data_list])
N = sum([len(d) for d in self.data_list])
cov = np.zeros((N, N))
for n1 in self.names:
for n2 in self.names:
cov[self._slice(n1, n2)] = self.cross_covs[(n1, n2)]
self._data = GaussianData(" + ".join(self.names), x, y, cov)
def plot_cov(self, **kwargs):
data = [
(f"{li}: {self.data.x[i]}", f"{lj}: {self.data.x[j]}", self.cov[i, j])
for i, li in zip(range(len(self.data)), self.labels)
for j, lj in zip(range(len(self.data)), self.labels)
]
return hv.HeatMap(data).opts(tools=["hover"], width=800, height=800,
invert_yaxis=True, xrotation=90)
| 4,889
| 28.281437
| 84
|
py
|
SOLikeT
|
SOLikeT-master/soliket/poisson_data.py
|
import numpy as np
import pandas as pd
class PoissonData:
"""Poisson-process-generated data.
Parameters
----------
catalog : pd.DataFrame
Catalog of observed data.
columns : list
Columns of catalog relevant for computing poisson rate.
samples : dict, optional
Each entry is an N_cat x N_samples array of posterior samples;
plus, should have a 'prior' entry of the same shape that is the value of the
interim prior for each sample.
"""
def __init__(self, name, catalog, columns, samples=None):
self.name = str(name)
self.catalog = pd.DataFrame(catalog)[columns]
self.columns = columns
if samples is not None:
for c in columns:
if c not in samples:
raise ValueError("If providing samples, must have samples \
for all columns: {}".format(columns))
if "prior" not in samples:
raise ValueError('Must provide value of interim prior \
for all samples, under "prior" key!')
assert all(
[samples[k].shape == samples["prior"].shape for k in samples]
), "Samples all need same shape!"
self.N_k = samples["prior"].shape[1]
self._len = samples["prior"].shape[0]
else:
self._len = len(self.catalog)
self.samples = samples
def __len__(self):
return self._len
def loglike(self, rate_fn, n_expected, broadcastable=False):
"""Computes log-likelihood of data under poisson process model
rate_fn returns the *observed rate* as a function of self.columns
(must be able to take all of self.columns as keywords, and be broadcastable
(though could make this an option))
n_expected is predicted total number
"""
# Simple case; no uncertainties
if self.samples is None:
if broadcastable:
rate_densities = rate_fn(**{c: self.catalog[c].values for
c in self.columns})
else:
rate_densities = np.array(
[
rate_fn(**{c: self.catalog[c].values[i] for c in self.columns})
for i in range(len(self))
]
)
return -n_expected + sum(np.log(rate_densities))
else:
# Eqn (11) of DFM, Hogg & Morton (https://arxiv.org/pdf/1406.3020.pdf)
summand = rate_fn(**{c: self.samples[c] for
c in self.columns}) / self.samples["prior"]
l_k = 1 / self.N_k * summand.sum(axis=1)
assert l_k.shape == (self._len,)
return -n_expected + sum(np.log(l_k))
| 2,851
| 34.209877
| 87
|
py
|
SOLikeT
|
SOLikeT-master/soliket/utils.py
|
from importlib import import_module
from scipy.stats import binned_statistic as binnedstat
import numpy as np
from cobaya.likelihood import Likelihood
from cobaya.likelihoods.one import one
def binner(ls, cls, bin_edges):
x = ls.copy()
y = cls.copy()
cents = (bin_edges[:-1] + bin_edges[1:]) / 2.0
bin_edges_min = bin_edges.min()
bin_edges_max = bin_edges.max()
y[x < bin_edges_min] = 0
y[x > bin_edges_max] = 0
bin_means = binnedstat(x, y, bins=bin_edges, statistic=np.nanmean)[0]
return cents, bin_means
def get_likelihood(name, options=None):
parts = name.split(".")
module = import_module(".".join(parts[:-1]))
t = getattr(module, parts[-1])
if not issubclass(t, Likelihood):
raise ValueError(f"{name} is not a Likelihood!")
if options is None:
options = {}
return t(options)
class OneWithCls(one):
lmax = 10000
def get_requirements(self):
return {"Cl": {"pp": self.lmax,
"tt": self.lmax,
"te": self.lmax,
"ee": self.lmax,
"bb": self.lmax, }}
| 1,140
| 26.166667
| 73
|
py
|
SOLikeT
|
SOLikeT-master/soliket/bandpass.py
|
r"""
.. module:: bandpass
This module computes the bandpass transmission based on the inputs from
the parameter file ``BandPass.yaml``. There are three possibilities:
* reading the passband :math:`\tau(\nu)` stored in a sacc file
(which is the default now, being the mflike default)
* building the passbands :math:`\tau(\nu)`, either as Dirac delta or as top-hat
* reading the passbands :math:`\tau(\nu)` from an external file.
Fore the first option, the ``read_from_sacc`` option in ``BandPass.yaml``
has to be set to ``True``:
.. code-block:: yaml
read_from_sacc: True
Otherwise, it has to be left empty. The frequencies and passbands are passed in a
``bands`` dictionary, which is passed from ``Foregrounds`` through ``TheoryForge``.
For the second option, the ``top_hat_band`` dictionary in ``BandPass.yaml``
has to be filled with two keys:
* ``nsteps``: setting the number of frequencies used in the band integration
(either 1 for a Dirac delta or > 1)
* ``bandwidth``: setting the relative width :math:`\delta` of the band with respect to
the central frequency, such that the frequency extrems are
:math:`\nu_{\rm{low/high}} = \nu_{\rm{center}}(1 \mp \delta/2) +
\Delta^{\nu}_{\rm band}` (with :math:`\Delta^{\nu}_{\rm band}`
being the possible bandpass shift). ``bandwidth`` has to be 0
if ``nstep`` = 1, > 0 otherwise. ``bandwidth`` can be a list
if you want a different width for each band
e.g. ``bandwidth: [0.3,0.2,0.3]`` for 3 bands.
The effective frequencies are read from the ``bands`` dictionary as before. If we are not
using passbands from a sacc file, ``bands`` is filled in ``Foreground`` using the default
``eff_freqs`` in ``Foreground.yaml``. In this case it is filled assuming a Dirac delta.
.. code-block:: yaml
top_hat_band:
nsteps: 1
bandwidth: 0
For the third option, the ``external_bandpass`` dictionary in ``BandPass.yaml``
has to have the the ``path`` key, representing the path to the folder with all the
passbands.
.. code-block:: yaml
external_bandpass:
path: "path_of_passband_folder"
The path has to be relative to the ``data_folder`` in ``BandPass.yaml``.
This folder has to have text files with the names of the experiment or array and the
nominal frequency of the channel, e.g. ``LAT_93`` or ``dr6_pa4_f150``. No other extensions
to be added to the name! These files will contain the frequencies as first column and
the passband as second column.
To avoid the options you don't want to select, the corresponding dictionary has to be
``null``.
If all dictionaries are ``null``, there will be an error message inviting to choose
one of the three options.
The bandpass transmission is built as
.. math::
\frac{\frac{\partial B_{\nu+\Delta \nu}}
{\partial T} \tau(\nu+\Delta \nu)}{\int d\nu\frac{\partial
B_{\nu+\Delta \nu}}{\partial T} \tau(\nu+\Delta \nu)}
where
.. math::
&\frac{\partial B_{\nu}}{\partial T} \propto \frac{x^2 e^x \nu^2}{(e^x-1)^2} \\
& x \equiv \frac{h \nu}{k_B T_{CMB}}
which converts from CMB thermodynamic temperature to differential source intensity
(see eq.8 of https://arxiv.org/abs/1303.5070).
The passband :math:`\tau(\nu)` has to be divided by :math:`\nu^2` if it has been
measured with respect to a Rayleigh-Jeans (RJ) source and :math:`\Delta \nu` is the
possible bandpass shift for that channel.
"""
import numpy as np
import os
from typing import Optional
from cobaya.theory import Theory
from cobaya.tools import are_different_params_lists
from cobaya.log import LoggedError
from .constants import T_CMB, h_Planck, k_Boltzmann
# Converts from cmb units to brightness.
# Numerical factors not included, it needs proper normalization when used.
def _cmb2bb(nu):
r"""
Computes the conversion factor :math:`\frac{\partial B_{\nu}}{\partial T}`
from CMB thermodynamic units to differential source intensity.
Passbands measured with respect to a RJ source have to be divided by a
:math:`\nu^2` factor.
Numerical constants are not included, which is not a problem when using this
conversion both at numerator and denominator.
:param nu: frequency array
:return: the array :math:`\frac{\partial B_{\nu}}{\partial T}`. See note above.
"""
# NB: numerical factors not included
x = nu * h_Planck * 1e9 / k_Boltzmann / T_CMB
return np.exp(x) * (nu * x / np.expm1(x))**2
class BandPass(Theory):
# attributes set from .yaml
data_folder: Optional[str]
read_from_sacc: dict
top_hat_band: dict
external_bandpass: dict
def initialize(self):
self.expected_params_bp = ["bandint_shift_LAT_93",
"bandint_shift_LAT_145",
"bandint_shift_LAT_225"]
self.exp_ch = None
#self.eff_freqs = None
self.bands = None
# To read passbands stored in the sacc files
# default for mflike
self.read_from_sacc = bool(self.read_from_sacc)
# Parameters for band integration
self.use_top_hat_band = bool(self.top_hat_band)
if self.use_top_hat_band:
self.bandint_nsteps = self.top_hat_band["nsteps"]
self.bandint_width = self.top_hat_band["bandwidth"]
self.bandint_external_bandpass = bool(self.external_bandpass)
if self.bandint_external_bandpass:
path = os.path.normpath(os.path.join(self.data_folder,
self.external_bandpass["path"]))
arrays = os.listdir(path)
self._init_external_bandpass_construction(path, arrays)
if (not self.read_from_sacc and not self.use_top_hat_band
and not self.bandint_external_bandpass):
raise LoggedError(
self.log, "fill the dictionaries in the yaml file for" \
"either reading the passband from sacc file (mflike default)" \
"or an external passband or building a top-hat one!"
)
def initialize_with_params(self):
# Check that the parameters are the right ones
differences = are_different_params_lists(
self.input_params, self.expected_params_bp,
name_A="given", name_B="expected")
if differences:
raise LoggedError(
self.log, "Configuration error in parameters: %r.",
differences)
def must_provide(self, **requirements):
# bandint_freqs is required by Foreground
# and requires some params to be computed
# Assign those from Foreground
if "bandint_freqs" in requirements:
self.bands = requirements["bandint_freqs"]["bands"]
self.exp_ch = [k.replace("_s0", "") for k in self.bands.keys()
if "_s0" in k]
def calculate(self, state, want_derived=False, **params_values_dict):
r"""
Adds the bandpass transmission to the ``state`` dictionary of the
BandPass Theory class.
:param *params_values_dict: dictionary of nuisance parameters
"""
nuis_params = {k: params_values_dict[k] for k in self.expected_params_bp}
# Bandpass construction
if self.bandint_external_bandpass:
self.bandint_freqs = self._external_bandpass_construction(**nuis_params)
if self.read_from_sacc or self.use_top_hat_band:
self.bandint_freqs = self._bandpass_construction(**nuis_params)
state["bandint_freqs"] = self.bandint_freqs
def get_bandint_freqs(self):
"""
Returns the ``state`` dictionary of bandpass transmissions
"""
return self.current_state["bandint_freqs"]
# Takes care of the bandpass construction. It returns a list of nu-transmittance for
# each frequency or an array with the effective freqs.
def _bandpass_construction(self, **params):
r"""
Builds the bandpass transmission
:math:`\frac{\frac{\partial B_{\nu+\Delta \nu}}{\partial T}
\tau(\nu+\Delta \nu)}{\int d\nu
\frac{\partial B_{\nu+\Delta \nu}}{\partial T} \tau(\nu+\Delta \nu)}`
using passbands :math:`\tau(\nu)` (divided by :math:`\nu^2` if
measured with respect to a RJ source, not read from a txt
file) and bandpass shift :math:`\Delta \nu`. If ``read_from_sacc = True``
(the default), :math:`\tau(\nu)` has been read from the sacc file
and passed through ``Foreground`` from ``TheoryForge``.
If ``use_top_hat_band``, :math:`\tau(\nu)` is built as a top-hat
with width ``bandint_width`` and number of samples ``nsteps``,
read from the ``BandPass.yaml``.
If ``nstep = 1`` and ``bandint_width = 0``, the passband is a Dirac delta
centered at :math:`\nu+\Delta \nu`.
:param *params: dictionary of nuisance parameters
:return: the list of [nu, transmission] in the multifrequency case
or just an array of frequencies in the single frequency one
"""
data_are_monofreq = False
bandint_freqs = []
for ifr, fr in enumerate(self.exp_ch):
bandpar = 'bandint_shift_' + str(fr)
bands = self.bands[f"{fr}_s0"]
nu_ghz, bp = np.asarray(bands["nu"]), np.asarray(bands["bandpass"])
if self.use_top_hat_band:
# checks on the bandpass input params
if not hasattr(self.bandint_width, "__len__"):
self.bandint_width = np.full_like(
self.exp_ch, self.bandint_width, dtype=float
)
if self.bandint_nsteps > 1 and np.any(np.array(self.bandint_width) == 0):
raise LoggedError(
self.log, "One band has width = 0, set a positive width and run again"
)
# Compute central frequency given bandpass
fr = nu_ghz @ bp / bp.sum()
if self.bandint_nsteps > 1:
bandlow = fr * (1 - self.bandint_width[ifr] * .5)
bandhigh = fr * (1 + self.bandint_width[ifr] * .5)
nub = np.linspace(bandlow + params[bandpar],
bandhigh + params[bandpar],
self.bandint_nsteps, dtype=float)
tranb = _cmb2bb(nub)
tranb_norm = np.trapz(_cmb2bb(nub), nub)
bandint_freqs.append([nub, tranb / tranb_norm])
if self.bandint_nsteps == 1:
nub = fr + params[bandpar]
data_are_monofreq = True
bandint_freqs.append(nub)
if self.read_from_sacc:
nub = nu_ghz + params[bandpar]
if len(bp) == 1:
# Monofrequency channel
data_are_monofreq = True
bandint_freqs.append(nub[0])
else:
trans_norm = np.trapz(bp * _cmb2bb(nub), nub)
trans = bp / trans_norm * _cmb2bb(nub)
bandint_freqs.append([nub, trans])
if data_are_monofreq:
bandint_freqs = np.asarray(bandint_freqs)
self.log.info("bandpass is delta function, no band integration performed")
return bandint_freqs
def _init_external_bandpass_construction(self, path, exp_ch):
"""
Initializes the passband reading for ``_external_bandpass_construction``.
:param exp_ch: list of the frequency channels
:param path: path of the passband txt file
"""
self.external_bandpass = []
for expc in exp_ch:
print(path, expc)
nu_ghz, bp = np.loadtxt(path + "/" + expc, usecols=(0, 1), unpack=True)
self.external_bandpass.append([expc, nu_ghz, bp])
def _external_bandpass_construction(self, **params):
r"""
Builds bandpass transmission
:math:`\frac{\frac{\partial B_{\nu+\Delta \nu}}{\partial T}
\tau(\nu+\Delta \nu)}{\int d\nu
\frac{\partial B_{\nu+\Delta \nu}}{\partial T} \tau(\nu+\Delta \nu)}`
using passbands :math:`\tau(\nu)` (divided by :math:`\nu^2` if measured
with respect to a RJ source) read from
an external txt file and
possible bandpass shift parameters :math:`\Delta \nu`.
:param *params: dictionary of nuisance parameters
:return: the list of [nu, transmission] or array of effective freqs
if the passbands read are monofrequency.
"""
bandint_freqs = []
for expc, nu_ghz, bp in self.external_bandpass:
bandpar = "bandint_shift_" + expc
nub = nu_ghz + params[bandpar]
if not hasattr(bp, "__len__"):
bandint_freqs.append(nub)
bandint_freqs = np.asarray(bandint_freqs)
self.log.info("bandpass is delta function, no band integration performed")
else:
trans_norm = np.trapz(bp * _cmb2bb(nub), nub)
trans = bp / trans_norm * _cmb2bb(nub)
bandint_freqs.append([nub, trans])
return bandint_freqs
| 13,271
| 39.711656
| 91
|
py
|
SOLikeT
|
SOLikeT-master/soliket/gaussian.py
|
import numpy as np
from typing import Optional, Sequence
from cobaya.likelihood import Likelihood
from cobaya.input import merge_info
from cobaya.tools import recursive_update
from cobaya.typing import empty_dict
from .gaussian_data import GaussianData, MultiGaussianData
from .utils import get_likelihood
class GaussianLikelihood(Likelihood):
name: str = "Gaussian"
datapath: Optional[str] = None
covpath: Optional[str] = None
ncovsims: Optional[int] = None
def initialize(self):
x, y = self._get_data()
cov = self._get_cov()
self.data = GaussianData(self.name, x, y, cov, self.ncovsims)
def _get_data(self):
x, y = np.loadtxt(self.datapath, unpack=True)
return x, y
def _get_cov(self):
cov = np.loadtxt(self.covpath)
return cov
def _get_theory(self, **kwargs):
raise NotImplementedError
def logp(self, **params_values):
theory = self._get_theory(**params_values)
return self.data.loglike(theory)
class CrossCov(dict):
def save(self, path):
np.savez(path, **{str(k): v for k, v in self.items()})
@classmethod
def load(cls, path):
if path is None:
return None
return cls({eval(k): v for k, v in np.load(path).items()})
class MultiGaussianLikelihood(GaussianLikelihood):
components: Optional[Sequence] = None
options: Optional[Sequence] = None
cross_cov_path: Optional[str] = None
def __init__(self, info=empty_dict, **kwargs):
if 'components' in info:
self.likelihoods = [get_likelihood(*kv) for kv in zip(info['components'],
info['options'])]
default_info = merge_info(*[like.get_defaults() for like in self.likelihoods])
default_info.update(info)
super().__init__(info=default_info, **kwargs)
def initialize(self):
self.cross_cov = CrossCov.load(self.cross_cov_path)
data_list = [like.data for like in self.likelihoods]
self.data = MultiGaussianData(data_list, self.cross_cov)
self.log.info('Initialized.')
def initialize_with_provider(self, provider):
for like in self.likelihoods:
like.initialize_with_provider(provider)
# super().initialize_with_provider(provider)
def get_helper_theories(self):
helpers = {}
for like in self.likelihoods:
helpers.update(like.get_helper_theories())
return helpers
def _get_theory(self, **kwargs):
return np.concatenate([like._get_theory(**kwargs) for like in self.likelihoods])
def get_requirements(self):
# Reqs with arguments like 'lmax', etc. may have to be carefully treated here to
# merge
reqs = {}
for like in self.likelihoods:
new_reqs = like.get_requirements()
# Deal with special cases requiring careful merging
# Make sure the max of the lmax/union of Cls is taken.
# (should make a unit test for this)
if "Cl" in new_reqs and "Cl" in reqs:
new_cl_spec = new_reqs["Cl"]
old_cl_spec = reqs["Cl"]
merged_cl_spec = {}
all_keys = set(new_cl_spec.keys()).union(set(old_cl_spec.keys()))
for k in all_keys:
new_lmax = new_cl_spec.get(k, 0)
old_lmax = old_cl_spec.get(k, 0)
merged_cl_spec[k] = max(new_lmax, old_lmax)
new_reqs["Cl"] = merged_cl_spec
reqs = recursive_update(reqs, new_reqs)
return reqs
| 3,654
| 31.061404
| 88
|
py
|
SOLikeT
|
SOLikeT-master/soliket/poisson.py
|
import pandas as pd
from cobaya.likelihood import Likelihood
from .poisson_data import PoissonData
class PoissonLikelihood(Likelihood):
name = "Poisson"
data_path = None
columns = None
def initialize(self):
catalog = self._get_catalog()
if self.columns is None:
self.columns = catalog.columns
self.data = PoissonData(self.name, catalog, self.columns)
def get_requirements(self):
return {}
def _get_catalog(self):
catalog = pd.read_csv(self.data_path)
return catalog
def _get_rate_fn(self, **kwargs):
"""Returns a callable rate function that takes each of 'columns' as kwargs.
"""
raise NotImplementedError
def _get_n_expected(self, **kwargs):
"""Computes and returns the integral of the rate function
"""
raise NotImplementedError
def logp(self, **params_values):
rate_fn = self._get_rate_fn(**params_values)
n_expected = self._get_n_expected(**params_values)
return self.data.loglike(rate_fn, n_expected)
| 1,084
| 26.125
| 83
|
py
|
SOLikeT
|
SOLikeT-master/soliket/__init__.py
|
from .lensing import LensingLiteLikelihood, LensingLikelihood # noqa: F401
from .gaussian import GaussianLikelihood, MultiGaussianLikelihood # noqa: F401
# from .studentst import StudentstLikelihood # noqa: F401
from .ps import PSLikelihood, BinnedPSLikelihood # noqa: F401
from .mflike import MFLike # noqa: F401
from .mflike import TheoryForge_MFLike
from .xcorr import XcorrLikelihood # noqa: F401
from .foreground import Foreground
from .bandpass import BandPass
from .cosmopower import CosmoPower, CosmoPowerDerived
try:
from .clusters import ClusterLikelihood # noqa: F401
except ImportError:
print('Skipping cluster likelihood (is pyCCL installed?)')
pass
try:
import pyccl as ccl # noqa: F401
from .ccl import CCL # noqa: F401
from .cross_correlation import GalaxyKappaLikelihood, ShearKappaLikelihood # noqa: F401, E501
except ImportError:
print('Skipping CCL module as pyCCL is not installed')
pass
| 953
| 37.16
| 98
|
py
|
SOLikeT
|
SOLikeT-master/soliket/cosmopower.py
|
"""
.. module:: soliket.cosmopower
:Synopsis: Simple CosmoPower theory wrapper for Cobaya.
:Author: Hidde T. Jense
.. |br| raw:: html
<br />
.. note::
**If you use this cosmological code, please cite:**
|br|
A. Spurio Mancini et al.
*CosmoPower: emulating cosmological power spectra for accelerated Bayesian
inference from next-generation surveys*
(`arXiv:210603846 <https://arxiv.org/abs/2106.03846>`_)
And remember to cite any sources for trained networks you use.
Usage
-----
After installing SOLikeT and cosmopower, you can use the ``CosmoPower`` theory
codes by adding the ``soliket.CosmoPower`` code as a block in your parameter
files.
Example: CMB emulators
----------------------
You can get the example CMB emulators from the
`cosmopower release repository
<https://github.com/alessiospuriomancini/cosmopower/tree/main/cosmopower/trained_models/CP_paper>`_.
After downloading these, you should have a directory structure like:
.. code-block:: bash
/path/to/cosmopower/data
├── cmb_TT_NN.pkl
├── cmb_TE_PCAplusNN.pkl
└── cmb_EE_NN.pkl
With these and with ``soliket.CosmoPower`` installed and visible to cobaya, you
can add it as a theory block to your run yaml as:
.. code-block:: yaml
theory:
soliket.CosmoPower:
network_path: /path/to/cosmopower/data
network_settings:
tt:
type: NN
filename: cmb_TT_NN
te:
type: PCAplusNN
filename: cmb_TE_PCAplusNN
log: False
ee:
type: NN
filename: cmb_EE_NN
Running this with cobaya will use ``soliket.CosmoPower`` as a theory to
calculate the CMB Cl's from the emulators.
If you want to add the example PP networks as well, you can do that simply with
a block as:
.. code-block:: yaml
theory:
soliket.CosmoPower:
network_path: /path/to/cosmopower/data
network_settings:
[...]
pp:
type: PCAplusNN
filename: cmb_PP_PCAplusNN
In this example, the TT, EE and PP networks output :math:`\log(C_\ell)`, hence
the default (``log: True``) is left, while the TE network outputs the
:math:`C_\ell` values directly. The TT and EE networks use the CosmoPower
Neural Network (NN) type emulators, while TE and PP use the Principal Component
Analysis + NN (PCAplusNN) types.
SOLikeT will automatically use the correct conversion prefactors
:math:`\ell (\ell + 1) / 2 \pi` terms and similar, as well as the CMB
temperature. See the :func:`~soliket.cosmopower.CosmoPower.ell_factor` and
:func:`~soliket.cosmopower.CosmoPower.cmb_unit_factor` functions for more
information on how SOLikeT infers these values.
"""
import os
try:
import cosmopower as cp # noqa F401
except ImportError:
HAS_COSMOPOWER = False
else:
HAS_COSMOPOWER = True
import numpy as np
from typing import Dict, Iterable, Tuple
from cobaya.log import LoggedError
from cobaya.theory import Theory
from cobaya.theories.cosmo import BoltzmannBase
from cobaya.typing import InfoDict
class CosmoPower(BoltzmannBase):
"""A CosmoPower Network wrapper for Cobaya."""
def initialize(self) -> None:
super().initialize()
if self.network_settings is None:
raise LoggedError("No network settings were provided.")
self.networks = {}
self.all_parameters = set([])
for spectype in self.network_settings:
netdata = {}
nettype = self.network_settings[spectype]
netpath = os.path.join(self.network_path, nettype["filename"])
if nettype["type"] == "NN":
network = cp.cosmopower_NN(
restore=True, restore_filename=netpath)
elif nettype["type"] == "PCAplusNN":
network = cp.cosmopower_PCAplusNN(
restore=True, restore_filename=netpath)
elif self.stop_at_error:
raise ValueError(
f"Unknown network type {nettype['type']} for network {spectype}.")
else:
self.log.warn(
f"Unknown network type {nettype['type']}\
for network {spectype}: skipped!")
netdata["type"] = nettype["type"]
netdata["log"] = nettype.get("log", True)
netdata["network"] = network
netdata["parameters"] = list(network.parameters)
netdata["lmax"] = network.modes.max()
netdata["has_ell_factor"] = nettype.get("has_ell_factor", False)
self.all_parameters = self.all_parameters | set(network.parameters)
if network is not None:
self.networks[spectype.lower()] = netdata
if "lmax" not in self.extra_args:
self.extra_args["lmax"] = None
self.log.info(f"Loaded CosmoPower from directory {self.network_path}")
self.log.info(
f"CosmoPower will expect the parameters {self.all_parameters}")
def calculate(self, state: dict, want_derived: bool = True, **params) -> bool:
## sadly, this syntax not valid until python 3.9
# cmb_params = {
# p: [params[p]] for p in params
# } | {
# self.translate_param(p): [params[p]] for p in params
# }
cmb_params = {**{
p: [params[p]] for p in params
}, **{
self.translate_param(p): [params[p]] for p in params
}}
ells = None
for spectype in self.networks:
network = self.networks[spectype]
used_params = {par: (cmb_params[par] if par in cmb_params else [
params[par]]) for par in network["parameters"]}
if network["log"]:
data = network["network"].ten_to_predictions_np(used_params)[
0, :]
else:
data = network["network"].predictions_np(used_params)[0, :]
state[spectype] = data
if ells is None:
ells = network["network"].modes
state["ell"] = ells.astype(int)
return True
def get_Cl(self, ell_factor: bool = False, units: str = "FIRASmuK2") -> dict:
cls_old = self.current_state.copy()
lmax = self.extra_args["lmax"] or cls_old["ell"].max()
cls = {"ell": np.arange(lmax + 1).astype(int)}
ls = cls_old["ell"]
for k in self.networks:
cls[k] = np.tile(np.nan, cls["ell"].shape)
for k in self.networks:
prefac = np.ones_like(ls).astype(float)
if self.networks[k]["has_ell_factor"]:
prefac /= self.ell_factor(ls, k)
if ell_factor:
prefac *= self.ell_factor(ls, k)
cls[k][ls] = cls_old[k] * prefac * \
self.cmb_unit_factor(k, units, 2.7255)
cls[k][:2] = 0.0
if np.any(np.isnan(cls[k])):
self.log.warning("CosmoPower used outside of trained "
"{} ell range. Filled in with NaNs.".format(k))
return cls
def ell_factor(self, ls: np.ndarray, spectra: str) -> np.ndarray:
"""
Calculate the ell factor for a specific spectrum.
These prefactors are used to convert from Cell to Dell and vice-versa.
See also:
cobaya.BoltzmannBase.get_Cl
`camb.CAMBresults.get_cmb_power_spectra
<https://camb.readthedocs.io/en/latest/results.html#camb.results.CAMBdata.get_cmb_power_spectra>`_
Examples:
ell_factor(l, "tt") -> :math:`\ell ( \ell + 1 )/(2 \pi)`
ell_factor(l, "pp") -> :math:`\ell^2 ( \ell + 1 )^2/(2 \pi)`.
:param ls: the range of ells.
:param spectra: a two-character string with each character being one of [tebp].
:return: an array filled with ell factors for the given spectrum.
"""
ellfac = np.ones_like(ls).astype(float)
if spectra in ["tt", "te", "tb", "ee", "et", "eb", "bb", "bt", "be"]:
ellfac = ls * (ls + 1.0) / (2.0 * np.pi)
elif spectra in ["pt", "pe", "pb", "tp", "ep", "bp"]:
ellfac = (ls * (ls + 1.0)) ** (3. / 2.) / (2.0 * np.pi)
elif spectra in ["pp"]:
ellfac = (ls * (ls + 1.0)) ** 2.0 / (2.0 * np.pi)
return ellfac
def cmb_unit_factor(self, spectra: str,
units: str = "FIRASmuK2",
Tcmb: float = 2.7255) -> float:
"""
Calculate the CMB prefactor for going from dimensionless power spectra to
CMB units.
:param spectra: a length 2 string specifying the spectrum for which to
calculate the units.
:param units: a string specifying which units to use.
:param Tcmb: the used CMB temperature [units of K].
:return: The CMB unit conversion factor.
"""
res = 1.0
x, y = spectra.lower()
if x == "t" or x == "e" or x == "b":
res *= self._cmb_unit_factor(units, Tcmb)
elif x == "p":
res *= 1. / np.sqrt(2.0 * np.pi)
if y == "t" or y == "e" or y == "b":
res *= self._cmb_unit_factor(units, Tcmb)
elif y == "p":
res *= 1. / np.sqrt(2.0 * np.pi)
return res
def get_can_support_parameters(self) -> Iterable[str]:
return self.all_parameters
def get_requirements(self) -> Iterable[Tuple[str, str]]:
requirements = []
for k in self.all_parameters:
if k in self.renames.values():
for v in self.renames:
if self.renames[v] == k:
requirements.append((v, None))
break
else:
requirements.append((k, None))
return requirements
class CosmoPowerDerived(Theory):
"""A theory class that can calculate derived parameters from CosmoPower networks."""
def initialize(self) -> None:
super().initialize()
if self.network_settings is None:
raise LoggedError("No network settings were provided.")
netpath = os.path.join(self.network_path, self.network_settings["filename"])
if self.network_settings["type"] == "NN":
self.network = cp.cosmopower_NN(
restore=True, restore_filename=netpath)
elif self.network_settings["type"] == "PCAplusNN":
self.network = cp.cosmopower_PCAplusNN(
restore=True, restore_filename=netpath)
else:
raise LoggedError(
f"Unknown network type {self.network_settings['type']}.")
self.input_parameters = set(self.network.parameters)
self.log_data = self.network_settings.get("log", False)
self.log.info(
f"Loaded CosmoPowerDerived from directory {self.network_path}")
self.log.info(
f"CosmoPowerDerived will expect the parameters {self.input_parameters}")
self.log.info(
f"CosmoPowerDerived can provide the following parameters: \
{self.get_can_provide()}.")
def translate_param(self, p):
return self.renames.get(p, p)
def calculate(self, state: dict, want_derived: bool = True, **params) -> bool:
## sadly, this syntax not valid until python 3.9
# input_params = {
# p: [params[p]] for p in params
# } | {
# self.translate_param(p): [params[p]] for p in params
# }
input_params = {**{
p: [params[p]] for p in params
}, **{
self.translate_param(p): [params[p]] for p in params
}}
if self.log_data:
data = self.network.ten_to_predictions_np(input_params)[0, :]
else:
data = self.network.predictions_np(input_params)[0, :]
for k, v in zip(self.derived_parameters, data):
if len(k) == 0 or k == "_":
continue
state["derived"][k] = v
return True
def get_param(self, p) -> float:
return self.current_state["derived"][self.translate_param(p)]
def get_can_support_parameters(self) -> Iterable[str]:
return self.input_parameters
def get_requirements(self) -> Iterable[Tuple[str, str]]:
requirements = []
for k in self.input_parameters:
if k in self.renames.values():
for v in self.renames:
if self.renames[v] == k:
requirements.append((v, None))
break
else:
requirements.append((k, None))
return requirements
def get_can_provide(self) -> Iterable[str]:
return set([par for par in self.derived_parameters
if (len(par) > 0 and not par == "_")])
| 12,879
| 32.541667
| 106
|
py
|
SOLikeT
|
SOLikeT-master/soliket/bias.py
|
"""
.. module:: soliket.bias
:Synopsis: Class to calculate bias models for haloes and galaxies as cobaya
Theory classes.
:author: Ian Harrison
Usage
-----
To use the Linear Bias model, simply add it as a theory code alongside camb in
your run settings, e.g.:
.. code-block:: yaml
theory:
camb:
soliket.bias.linear_bias:
Implementing your own bias model
--------------------------------
If you want to add your own bias model, you can do so by inheriting from the
``soliket.Bias`` theory class and implementing your own custom ``calculate()``
function (have a look at the linear bias model for ideas).
"""
import numpy as np
from typing import Optional
from cobaya.theory import Theory
from cobaya.typing import InfoDict
class Bias(Theory):
"""Parent class for bias models."""
_logz = np.linspace(-3, np.log10(1100), 150)
_default_z_sampling = 10**_logz
_default_z_sampling[0] = 0
def initialize(self):
self._var_pairs = set()
def get_requirements(self):
return {}
def must_provide(self, **requirements):
options = requirements.get("linear_bias") or {}
self.kmax = max(self.kmax, options.get("kmax", self.kmax))
self.z = np.unique(np.concatenate(
(np.atleast_1d(options.get("z", self._default_z_sampling)),
np.atleast_1d(self.z))))
# Dictionary of the things needed from CAMB/CLASS
needs = {}
self.nonlinear = self.nonlinear or options.get("nonlinear", False)
self._var_pairs.update(
set((x, y) for x, y in
options.get("vars_pairs", [("delta_tot", "delta_tot")])))
needs["Pk_grid"] = {
"vars_pairs": self._var_pairs or [("delta_tot", "delta_tot")],
"nonlinear": (True, False) if self.nonlinear else False,
"z": self.z,
"k_max": self.kmax
}
assert len(self._var_pairs) < 2, "Bias doesn't support other Pk yet"
return needs
def _get_Pk_mm(self):
for pair in self._var_pairs:
self.k, self.z, Pk_mm = \
self.provider.get_Pk_grid(var_pair=pair, nonlinear=self.nonlinear)
return Pk_mm
def get_Pk_gg_grid(self) -> dict:
return self._current_state["Pk_gg_grid"]
def get_Pk_gm_grid(self) -> dict:
return self._current_state["Pk_gm_grid"]
class Linear_bias(Bias):
"""
:Synopsis: Linear bias model.
Has one free parameter, :math:`b_\mathrm{lin}` (``b_lin``).
"""
def calculate(self, state: dict, want_derived: bool = True,
**params_values_dict) -> Optional[bool]:
Pk_mm = self._get_Pk_mm()
state["Pk_gg_grid"] = params_values_dict["b_lin"]**2. * Pk_mm
state["Pk_gm_grid"] = params_values_dict["b_lin"] * Pk_mm
| 2,862
| 26.528846
| 87
|
py
|
SOLikeT
|
SOLikeT-master/soliket/lensing/lensing.py
|
import os
from pkg_resources import resource_filename
import numpy as np
import sacc
from cobaya.likelihoods.base_classes import InstallableLikelihood
from cobaya.model import get_model
from cobaya.log import LoggedError
# from cobaya.install import NotInstalledError
from ..ps import BinnedPSLikelihood
class LensingLikelihood(BinnedPSLikelihood, InstallableLikelihood):
_url = "https://portal.nersc.gov/project/act/jia_qu/lensing_like/likelihood.tar.gz"
install_options = {"download_url": _url}
data_folder = "LensingLikelihood/"
data_filename = "clkk_reconstruction_sim.fits"
kind = "pp"
sim_number = 0
lmax = 3000
theory_lmax = 10000
fiducial_params = {
"ombh2": 0.02219218,
"omch2": 0.1203058,
"H0": 67.02393,
"tau": 0.6574325e-01,
"nnu": 3.046,
"As": 2.15086031154146e-9,
"ns": 0.9625356e00,
}
def initialize(self):
self.log.info("Initialising.")
# Set path to data
if ((not getattr(self, "path", None)) and
(not getattr(self, "packages_path", None))):
raise LoggedError(
self.log,
"No path given to LensingLikelihood data. "
"Set the likelihood property "
"'path' or 'packages_path'"
)
# If no path specified, use the modules path
data_file_path = os.path.normpath(
getattr(self, "path", None) or os.path.join(self.packages_path, "data")
)
self.data_folder = os.path.join(data_file_path, self.data_folder)
if not os.path.exists(self.data_folder):
if not getattr(self, "path", None):
self.install(path=self.packages_path)
else:
raise LoggedError(
self.log,
"The 'data_folder' directory does not exist. "\
"Check the given path [%s].",
self.data_folder,
)
# Set files where data/covariance are loaded from
self.datapath = os.path.join(self.data_folder, self.data_filename)
self.sacc = sacc.Sacc.load_fits(self.datapath)
x, y = self._get_data()
self.cov = self._get_cov()
self.binning_matrix = self._get_binning_matrix()
# Initialize fiducial PS
Cls = self._get_fiducial_Cls()
# Set the fiducial spectra
self.ls = np.arange(0, self.lmax)
self.fcltt = Cls["tt"][0: self.lmax]
self.fclpp = Cls["pp"][0: self.lmax]
self.fclee = Cls["ee"][0: self.lmax]
self.fclte = Cls["te"][0: self.lmax]
self.fclbb = Cls["bb"][0: self.lmax]
self.thetaclkk = self.fclpp * (self.ls * (self.ls + 1)) ** 2 * 0.25
# load the correction terms generate from the script n1so.py
self.N0cltt = np.loadtxt(os.path.join(self.data_folder, "n0mvdcltt1.txt")).T
self.N0clte = np.loadtxt(os.path.join(self.data_folder, "n0mvdclte1.txt")).T
self.N0clee = np.loadtxt(os.path.join(self.data_folder, "n0mvdclee1.txt")).T
self.N0clbb = np.loadtxt(os.path.join(self.data_folder, "n0mvdclbb1.txt")).T
self.N1clpp = np.loadtxt(os.path.join(self.data_folder, "n1mvdclkk1.txt")).T
self.N1cltt = np.loadtxt(os.path.join(self.data_folder, "n1mvdcltte1.txt")).T
self.N1clte = np.loadtxt(os.path.join(self.data_folder, "n1mvdcltee1.txt")).T
self.N1clee = np.loadtxt(os.path.join(self.data_folder, "n1mvdcleee1.txt")).T
self.N1clbb = np.loadtxt(os.path.join(self.data_folder, "n1mvdclbbe1.txt")).T
self.n0 = np.loadtxt(os.path.join(self.data_folder, "n0mv.txt"))
super().initialize()
def _get_fiducial_Cls(self):
info_fiducial = {
"params": self.fiducial_params,
"likelihood": {"soliket.utils.OneWithCls": {"lmax": self.theory_lmax}},
"theory": {"camb": {"extra_args": {"kmax": 0.9}}},
# "modules": modules_path,
}
model_fiducial = get_model(info_fiducial)
model_fiducial.logposterior({})
Cls = model_fiducial.provider.get_Cl(ell_factor=False)
return Cls
def get_requirements(self):
return {
"Cl": {
"pp": self.theory_lmax,
"tt": self.theory_lmax,
"te": self.theory_lmax,
"ee": self.theory_lmax,
"bb": self.theory_lmax,
}
}
def _get_data(self):
bin_centers, bandpowers, cov = \
self.sacc.get_ell_cl(None, 'ck', 'ck', return_cov=True)
self.x = bin_centers
self.y = bandpowers
return bin_centers, self.y
def _get_cov(self):
bin_centers, bandpowers, cov = \
self.sacc.get_ell_cl(None, 'ck', 'ck', return_cov=True)
self.cov = cov
return cov
def _get_binning_matrix(self):
bin_centers, bandpowers, cov, ind = \
self.sacc.get_ell_cl(None, 'ck', 'ck', return_cov=True, return_ind=True)
bpw = self.sacc.get_bandpower_windows(ind)
binning_matrix = bpw.weight.T
self.binning_matrix = binning_matrix
return binning_matrix
def _get_theory(self, **params_values):
cl = self.provider.get_Cl(ell_factor=False)
Cl_theo = cl["pp"][0: self.lmax]
Cl_tt = cl["tt"][0: self.lmax]
Cl_ee = cl["ee"][0: self.lmax]
Cl_te = cl["te"][0: self.lmax]
Cl_bb = cl["bb"][0: self.lmax]
ls = self.ls
Clkk_theo = (ls * (ls + 1)) ** 2 * Cl_theo * 0.25
Clkk_binned = self.binning_matrix.dot(Clkk_theo)
correction = (
2
* (self.thetaclkk / self.n0)
* (
np.dot(self.N0cltt, Cl_tt - self.fcltt)
+ np.dot(self.N0clee, Cl_ee - self.fclee)
+ np.dot(self.N0clbb, Cl_bb - self.fclbb)
+ np.dot(self.N0clte, Cl_te - self.fclte)
)
+ np.dot(self.N1clpp, Clkk_theo - self.thetaclkk)
+ np.dot(self.N1cltt, Cl_tt - self.fcltt)
+ np.dot(self.N1clee, Cl_ee - self.fclee)
+ np.dot(self.N1clbb, Cl_bb - self.fclbb)
+ np.dot(self.N1clte, Cl_te - self.fclte)
)
# put the correction term into bandpowers
correction = self.binning_matrix.dot(correction)
return Clkk_binned + correction
class LensingLiteLikelihood(BinnedPSLikelihood):
kind: str = "pp"
lmax: int = 3000
datapath: str = resource_filename("soliket", "lensing/data/binnedauto.txt")
covpath: str = resource_filename("soliket", "lensing/data/binnedcov.txt")
binning_matrix_path: str = resource_filename("soliket",
"lensing/data/binningmatrix.txt")
| 6,836
| 34.42487
| 87
|
py
|
SOLikeT
|
SOLikeT-master/soliket/lensing/__init__.py
|
from .lensing import LensingLiteLikelihood, LensingLikelihood # noqa: F401
| 76
| 37.5
| 75
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_xcorr.py
|
# pytest -k xcorr -v --pdb .
import pytest
import numpy as np
from cobaya.yaml import yaml_load
from cobaya.model import get_model
import os
import pdb
def get_demo_xcorr_model(theory):
if theory == "camb":
info_yaml = r"""
likelihood:
soliket.XcorrLikelihood:
stop_at_error: True
datapath: soliket/tests/data/unwise_g-so_kappa.sim.sacc.fits
k_tracer_name: ck_so
gc_tracer_name: gc_unwise
theory:
camb:
extra_args:
lens_potential_accuracy: 1
params:
tau: 0.05
mnu: 0.0
nnu: 3.046
b1:
prior:
min: 0.
max: 10.
ref:
min: 1.
max: 4.
proposal: 0.1
s1:
prior:
min: 0.1
max: 1.0
proposal: 0.1
"""
elif theory == "classy":
info_yaml = r"""
likelihood:
soliket.XcorrLikelihood:
stop_at_error: True
datapath: soliket/tests/data/unwise_g-so_kappa.sim.sacc.fits
k_tracer_name: ck_so
gc_tracer_name: gc_unwise
theory:
classy:
extra_args:
output: lCl, tCl
path: global
params:
b1:
prior:
min: 0.
max: 10.
ref:
min: 1.
max: 4.
proposal: 0.1
s1:
prior:
min: 0.1
max: 1.0
proposal: 0.1
"""
info = yaml_load(info_yaml)
model = get_model(info)
return model
@pytest.mark.skip(reason="Under development")
@pytest.mark.parametrize("theory", ["camb"])#, "classy"])
def test_xcorr(theory):
params = {'b1': 1.0, 's1': 0.4}
model = get_demo_xcorr_model(theory)
lnl = model.loglike(params)[0]
assert np.isfinite(lnl)
xcorr_lhood = model.likelihood['soliket.XcorrLikelihood']
setup_chi_out = xcorr_lhood._setup_chi()
Pk_interpolator = xcorr_lhood.theory.get_Pk_interpolator(("delta_nonu", "delta_nonu"),
extrap_kmax=1.e8,
nonlinear=False).P
from soliket.xcorr.limber import do_limber
cl_gg, cl_kappag = do_limber(xcorr_lhood.ell_range,
xcorr_lhood.provider,
xcorr_lhood.dndz,
xcorr_lhood.dndz,
params['s1'],
params['s1'],
Pk_interpolator,
params['b1'],
params['b1'],
xcorr_lhood.alpha_auto,
xcorr_lhood.alpha_cross,
setup_chi_out,
Nchi=xcorr_lhood.Nchi,
dndz1_mag=xcorr_lhood.dndz,
dndz2_mag=xcorr_lhood.dndz)
ell_load = xcorr_lhood.data.x
cl_load = xcorr_lhood.data.y
# cov_load = xcorr_lhood.data.cov
# cl_err_load = np.sqrt(np.diag(cov_load))
n_ell = len(ell_load) // 2
ell_obs_gg = ell_load[n_ell:]
ell_obs_kappag = ell_load[:n_ell]
cl_obs_gg = cl_load[:n_ell]
cl_obs_kappag = cl_load[n_ell:]
# Nell_unwise_g = np.ones_like(cl_gg) \
# / (xcorr_lhood.ngal * (60 * 180 / np.pi)**2)
Nell_obs_unwise_g = np.ones_like(cl_obs_gg) \
/ (xcorr_lhood.ngal * (60 * 180 / np.pi)**2)
import pyccl as ccl
h2 = (xcorr_lhood.provider.get_param('H0') / 100)**2
cosmo = ccl.Cosmology(Omega_c=xcorr_lhood.provider.get_param('omch2') / h2,
Omega_b=xcorr_lhood.provider.get_param('ombh2') / h2,
h=xcorr_lhood.provider.get_param('H0') / 100,
n_s=xcorr_lhood.provider.get_param('ns'),
A_s=xcorr_lhood.provider.get_param('As'),
Omega_k=xcorr_lhood.provider.get_param('omk'),
Neff=xcorr_lhood.provider.get_param('nnu'),
matter_power_spectrum='linear')
g_bias_zbz = (xcorr_lhood.dndz[:, 0],
params['b1'] * np.ones(len(xcorr_lhood.dndz[:, 0])))
mag_bias_zbz = (xcorr_lhood.dndz[:, 0],
params['s1'] * np.ones(len(xcorr_lhood.dndz[:, 0])))
tracer_g = ccl.NumberCountsTracer(cosmo,
has_rsd=False,
dndz=xcorr_lhood.dndz.T,
bias=g_bias_zbz,
mag_bias=mag_bias_zbz)
tracer_k = ccl.CMBLensingTracer(cosmo, z_source=1100)
cl_gg_ccl = ccl.cls.angular_cl(cosmo, tracer_g, tracer_g, xcorr_lhood.ell_range)
cl_kappag_ccl = ccl.cls.angular_cl(cosmo, tracer_k, tracer_g, xcorr_lhood.ell_range)
assert np.allclose(cl_gg_ccl, cl_gg)
assert np.allclose(cl_kappag_ccl, cl_kappag)
cl_obs_gg_ccl = ccl.cls.angular_cl(cosmo, tracer_g, tracer_g, ell_obs_gg)
cl_obs_kappag_ccl = ccl.cls.angular_cl(cosmo, tracer_k, tracer_g, ell_obs_kappag)
assert np.allclose(cl_obs_gg_ccl + Nell_obs_unwise_g, cl_obs_gg)
assert np.allclose(cl_obs_kappag_ccl, cl_obs_kappag)
| 5,744
| 32.208092
| 90
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_bias.py
|
# pytest -k bias -v .
import pytest
import numpy as np
from cobaya.model import get_model
from cobaya.run import run
info = {"params": {
"b_lin": 1.1,
"H0": 70.,
"ombh2": 0.0245,
"omch2": 0.1225,
"ns": 0.96,
"As": 2.2e-9,
"tau": 0.05
},
"likelihood": {"one": None},
"sampler": {"evaluate": None},
"debug": True
}
def test_bias_import():
from soliket.bias import Bias
def test_linear_bias_import():
from soliket.bias import Linear_bias
def test_linear_bias_model():
from soliket.bias import Linear_bias
info["theory"] = {
"camb": None,
"linear_bias": {"external": Linear_bias}
}
model = get_model(info) # noqa F841
def test_linear_bias_compute_grid():
from soliket.bias import Linear_bias
info["theory"] = {
"camb": None,
"linear_bias": {"external": Linear_bias}
}
model = get_model(info) # noqa F841
model.add_requirements({"Pk_grid": {"z": 0., "k_max": 10.,
"nonlinear": False,
"vars_pairs": ('delta_tot', 'delta_tot')
},
"Pk_gg_grid": None,
"Pk_gm_grid": None
})
model.logposterior(info['params']) # force computation of model
lhood = model.likelihood['one']
k, z, Pk_mm_lin = lhood.provider.get_Pk_grid(var_pair=('delta_tot', 'delta_tot'),
nonlinear=False)
Pk_gg = lhood.provider.get_Pk_gg_grid()
Pk_gm = lhood.provider.get_Pk_gm_grid()
assert np.allclose(Pk_mm_lin * info["params"]["b_lin"]**2., Pk_gg)
assert np.allclose(Pk_mm_lin * info["params"]["b_lin"], Pk_gm)
| 1,999
| 26.027027
| 85
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_gaussian.py
|
# import unittest
import numpy as np
from sklearn.datasets import make_spd_matrix
from soliket.gaussian import GaussianData, MultiGaussianData, CrossCov
def toy_data():
name1 = "A"
n1 = 10
x1 = np.arange(n1)
y1 = np.random.random(n1)
name2 = "B"
n2 = 20
x2 = np.arange(n2)
y2 = np.random.random(n2)
name3 = "C"
n3 = 30
x3 = np.arange(n3)
y3 = np.random.random(n3)
# Generate arbitrary covariance matrix, partition into parts
full_cov = make_spd_matrix(n1 + n2 + n3, random_state=1234)
cov1 = full_cov[:n1, :n1]
cov2 = full_cov[n1: n1 + n2, n1: n1 + n2]
cov3 = full_cov[n1 + n2:, n1 + n2:]
data1 = GaussianData(name1, x1, y1, cov1)
data2 = GaussianData(name2, x2, y2, cov2)
data3 = GaussianData(name3, x3, y3, cov3)
cross_cov = CrossCov(
{
(name1, name2): full_cov[:n1, n1: n1 + n2],
(name1, name3): full_cov[:n1, n1 + n2:],
(name2, name3): full_cov[n1: n1 + n2, n1 + n2:],
}
)
return [data1, data2, data3], cross_cov
def test_gaussian():
datalist, cross_cov = toy_data()
multi = MultiGaussianData(datalist, cross_cov)
name1, name2, name3 = [d.name for d in datalist]
data1, data2, data3 = datalist
assert (multi.cross_covs[(name1, name2)] == multi.cross_covs[(name2, name1)].T).all()
assert (multi.cross_covs[(name1, name3)] == multi.cross_covs[(name3, name1)].T).all()
assert (multi.cross_covs[(name2, name3)] == multi.cross_covs[(name3, name2)].T).all()
assert (multi.cross_covs[(name1, name1)] == data1.cov).all()
assert (multi.cross_covs[(name2, name2)] == data2.cov).all()
assert (multi.cross_covs[(name3, name3)] == data3.cov).all()
def test_gaussian_hartlap():
np.random.seed(1234)
name1 = "A"
n1 = 10
x1 = np.arange(n1)
y1th = x1**2.
y1 = np.random.random(n1)
nsims1 = 50
cov1 = make_spd_matrix(n1, random_state=1234)
data1 = GaussianData(name1, x1, y1, cov1)
data1_simcov = GaussianData(name1 + 'simcov', x1, y1, cov1,
ncovsims=nsims1)
data1_manysimcov = GaussianData(name1 + 'simcov', x1, y1, cov1,
ncovsims=(100 * nsims1))
hartlap_factor = (nsims1 - n1 - 2) / (nsims1 - 1)
hartlap_manyfactor = (100 * nsims1 - n1 - 2) / (100 * nsims1 - 1)
assert np.isclose(data1.loglike(y1th),
data1_simcov.loglike(y1th) / hartlap_factor,
rtol=1.e-3)
assert np.isclose(data1.loglike(y1th),
data1_manysimcov.loglike(y1th) / hartlap_manyfactor,
rtol=1.e-5)
| 2,674
| 28.395604
| 89
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_cash.py
|
import numpy as np
from soliket.cash import CashCData
def toy_data():
x = np.arange(20)
y = np.arange(20)
xx, yy = np.meshgrid(x, y)
return x, y, xx, yy
def test_cash():
data1d, theory1d, data2d, theory2d = toy_data()
cashdata1d = CashCData("toy 1d", data1d)
cashdata2d = CashCData("toy 2d", data2d)
assert np.allclose(cashdata1d.loglike(theory1d), -37.3710640070228)
assert np.allclose(cashdata2d.loglike(theory2d), -2349.5353718742294)
| 484
| 19.208333
| 73
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_lensing.py
|
import os
import tempfile
import pytest
import numpy as np
from cobaya.yaml import yaml_load
from cobaya.model import get_model
packages_path = os.environ.get("COBAYA_PACKAGES_PATH") or os.path.join(
tempfile.gettempdir(), "lensing_packages"
)
# Cosmological parameters for the test data from SO sims
# See https://github.com/simonsobs/SOLikeT/pull/101 for validation plots
fiducial_params = {
'omch2': 0.1203058,
'ombh2': 0.02219218,
'H0': 67.02393,
'ns': 0.9625356,
'As': 2.15086031154146e-9,
'mnu': 0.06,
'tau': 0.06574325,
'nnu': 3.04}
info = {"theory": {"camb": {"extra_args": {"kmax": 0.9}}}}
info['params'] = fiducial_params
def test_lensing_import(request):
from soliket.lensing import LensingLikelihood
def test_lensing_like(request):
from cobaya.install import install
install({"likelihood": {"soliket.lensing.LensingLikelihood": None}},
path=packages_path, skip_global=False, force=True, debug=True)
from soliket.lensing import LensingLikelihood
info["likelihood"] = {"LensingLikelihood": {"external": LensingLikelihood}}
model = get_model(info)
loglikes, derived = model.loglikes()
assert np.isclose(loglikes[0], 335.8560097798468, atol=0.2, rtol=0.0)
| 1,256
| 26.326087
| 79
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_ccl.py
|
"""
Check that CCL works correctly.
"""
import pytest
import numpy as np
from cobaya.model import get_model
from cobaya.likelihood import Likelihood
class CheckLike(Likelihood):
"""
This is a mock likelihood that simply forces soliket.CCL to calculate
a CCL object.
"""
def logp(self, **params_values):
ccl = self.theory.get_CCL() # noqa F841
return -1.0
def get_requirements(self):
return {"CCL": None}
fiducial_params = {
"ombh2": 0.0224,
"omch2": 0.122,
"cosmomc_theta": 104e-4,
"tau": 0.065,
"ns": 0.9645,
"logA": 3.07,
"As": {"value": "lambda logA: 1e-10*np.exp(logA)"}
}
info_dict = {
"params": fiducial_params,
"likelihood": {
"checkLike": {"external": CheckLike}
},
"theory": {
"camb": {
},
"soliket.CCL": {
"kmax": 10.0,
"nonlinear": True
}
}
}
def test_ccl_import(request):
"""
Test whether we can import pyCCL.
"""
import pyccl
def test_ccl_cobaya(request):
"""
Test whether we can call CCL from cobaya.
"""
model = get_model(info_dict)
model.loglikes()
def test_ccl_distances(request):
"""
Test whether the calculated angular diameter distance & luminosity distances
in CCL have the correct relation.
"""
model = get_model(info_dict)
model.loglikes({})
cosmo = model.provider.get_CCL()["cosmo"]
z = np.linspace(0.0, 10.0, 100)
a = 1.0 / (z + 1.0)
da = cosmo.angular_diameter_distance(a)
dl = cosmo.luminosity_distance(a)
assert np.allclose(da * (1.0 + z) ** 2.0, dl)
def test_ccl_pk(request):
"""
Test whether non-linear Pk > linear Pk in expected regimes.
"""
model = get_model(info_dict)
model.loglikes({})
cosmo = model.provider.get_CCL()["cosmo"]
k = np.logspace(np.log10(3e-1), 1, 1000)
pk_lin = cosmo.linear_matter_power(k, a=0.5)
pk_nonlin = cosmo.nonlin_matter_power(k, a=0.5)
assert np.all(pk_nonlin > pk_lin)
| 2,033
| 20.638298
| 80
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_cosmopower.py
|
"""
Check that CosmoPower gives the correct Planck CMB power spectrum.
"""
import os
import pytest
import numpy as np
import matplotlib.pyplot as plt
from cobaya.model import get_model
from soliket.cosmopower import HAS_COSMOPOWER
fiducial_params = {
"ombh2": 0.0224,
"omch2": 0.122,
"h": 0.67,
"tau": 0.065,
"ns": 0.9645,
"logA": 3.07,
"A_planck": 1.0,
# derived params
"As": {"value": "lambda logA: 1e-10 * np.exp(logA)"},
"H0": {"value": "lambda h: h * 100.0"},
}
info_dict = {
"params": fiducial_params,
"likelihood": {
# This should be installed, otherwise one should install it via cobaya.
"planck_2018_highl_plik.TTTEEE_lite_native": {"stop_at_error": True}
},
"theory": {
"soliket.CosmoPower": {
"stop_at_error": True,
"network_settings": {
"tt": {
"type": "NN",
"log": True,
"filename": "cmb_TT_NN",
# If your network has been trained on (l (l+1) / 2 pi) C_l,
# this flag needs to be set.
"has_ell_factor": False,
},
"ee": {
"type": "NN",
"log": True,
"filename": "cmb_EE_NN",
"has_ell_factor": False,
},
"te": {
"type": "PCAplusNN",
# Trained on Cl, not log(Cl)
"log": False,
"filename": "cmb_TE_PCAplusNN",
"has_ell_factor": False,
},
},
"renames": {
"ombh2": "omega_b",
"omch2": "omega_cdm",
"ns": "n_s",
"logA": "ln10^{10}A_s",
"tau": "tau_reio"
}
}
},
}
@pytest.mark.skipif(not HAS_COSMOPOWER, reason='test requires cosmopower')
def test_cosmopower_theory(request):
info_dict['theory']['soliket.CosmoPower']['network_path'] = \
os.path.join(request.config.rootdir, 'soliket/data/CosmoPower/CP_paper/CMB')
model_fiducial = get_model(info_dict) # noqa F841
@pytest.mark.skipif(not HAS_COSMOPOWER, reason='test requires cosmopower')
def test_cosmopower_loglike(request):
info_dict['theory']['soliket.CosmoPower']['network_path'] = \
os.path.join(request.config.rootdir, 'soliket/data/CosmoPower/CP_paper/CMB')
model_cp = get_model(info_dict)
logL_cp = float(model_cp.loglikes({})[0])
assert np.isclose(logL_cp, -295.139)
@pytest.mark.skipif(not HAS_COSMOPOWER, reason='test requires cosmopower')
def test_cosmopower_against_camb(request):
info_dict['theory'] = {'camb': {'stop_at_error': True}}
model_camb = get_model(info_dict)
logL_camb = float(model_camb.loglikes({})[0])
camb_cls = model_camb.theory['camb'].get_Cl()
info_dict['theory'] = {
"soliket.CosmoPower": {
"stop_at_error": True,
"extra_args": {'lmax': camb_cls['ell'].max()},
'network_path': os.path.join(request.config.rootdir,
'soliket/data/CosmoPower/CP_paper/CMB'),
"network_settings": {
"tt": {
"type": "NN",
"log": True,
"filename": "cmb_TT_NN"
},
"ee": {
"type": "NN",
"log": True,
"filename": "cmb_EE_NN"
},
"te": {
"type": "PCAplusNN",
"log": False,
"filename": "cmb_TE_PCAplusNN"
},
},
"renames": {
"ombh2": "omega_b",
"omch2": "omega_cdm",
"ns": "n_s",
"logA": "ln10^{10}A_s",
"tau": "tau_reio"
}
}
}
model_cp = get_model(info_dict)
logL_cp = float(model_cp.loglikes({})[0])
cp_cls = model_cp.theory['soliket.CosmoPower'].get_Cl()
nanmask = ~np.isnan(cp_cls['tt'])
assert np.allclose(cp_cls['tt'][nanmask], camb_cls['tt'][nanmask], rtol=1.e-2)
assert np.isclose(logL_camb, logL_cp, rtol=1.e-1)
| 4,311
| 30.474453
| 84
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_mflike.py
|
"""
Make sure that this returns the same result as original mflike.MFLike from LAT_MFlike repo
"""
import os
import tempfile
import unittest
import pytest
from packaging.version import Version
import camb
import soliket # noqa
from soliket.mflike import TestMFLike
import numpy as np
packages_path = os.environ.get("COBAYA_PACKAGES_PATH") or os.path.join(
tempfile.gettempdir(), "LAT_packages"
)
cosmo_params = {
"cosmomc_theta": 0.0104085,
"As": 2.0989031673191437e-09,
"ombh2": 0.02237,
"omch2": 0.1200,
"ns": 0.9649,
"Alens": 1.0,
"tau": 0.0544,
}
nuisance_params = {
"a_tSZ": 3.3044404448917724,
"a_kSZ": 1.6646620740058649,
"a_p": 6.912474322461401,
"beta_p": 2.077474196171309,
"a_c": 4.88617700670901,
"beta_c": 2.2030316332596014,
"a_s": 3.099214100532393,
"T_d": 9.60,
"a_gtt": 0,
"a_gte": 0,
"a_gee": 0,
"a_psee": 0,
"a_pste": 0,
"xi": 0,
"bandint_shift_LAT_93": 0,
"bandint_shift_LAT_145": 0,
"bandint_shift_LAT_225": 0,
"cal_LAT_93": 1,
"cal_LAT_145": 1,
"cal_LAT_225": 1,
"calT_LAT_93": 1,
"calE_LAT_93": 1,
"calT_LAT_145": 1,
"calE_LAT_145": 1,
"calT_LAT_225": 1,
"calE_LAT_225": 1,
"calG_all": 1,
"alpha_LAT_93": 0,
"alpha_LAT_145": 0,
"alpha_LAT_225": 0,
}
if Version(camb.__version__) >= Version('1.4'):
chi2s = {"tt": 545.1257,
"te": 137.4146,
"ee": 167.9850,
"tt-te-et-ee": 790.5121}
else:
chi2s = {"tt": 544.9745,
"te-et": 152.6807,
"ee": 168.0953,
"tt-te-et-ee": 790.4124}
pre = "test_data_sacc_"
class MFLikeTest(unittest.TestCase):
def setUp(self):
from cobaya.install import install
install({"likelihood": {"soliket.mflike.TestMFLike": None}},
path=packages_path, skip_global=False, force=True, debug=True)
def test_mflike(self):
# As of now, there is not a mechanism
# in soliket to ensure there is .loglike that can be called like this
# w/out cobaya
camb_cosmo = cosmo_params.copy()
lmax = 9000
camb_cosmo.update({"lmax": lmax, "lens_potential_accuracy": 1})
pars = camb.set_params(**camb_cosmo)
results = camb.get_results(pars)
powers = results.get_cmb_power_spectra(pars, CMB_unit="muK")
cl_dict = {k: powers["total"][:, v] for
k, v in {"tt": 0, "ee": 1, "te": 3}.items()}
BP = soliket.BandPass()
FG = soliket.Foreground()
TF = soliket.TheoryForge_MFLike()
ell = np.arange(lmax + 1)
bands = TF.bands
exp_ch = TF.exp_ch
print(exp_ch, bands)
requested_cls = TF.requested_cls
BP.bands = bands
BP.exp_ch = [k.replace("_s0", "") for k in bands.keys()
if "_s0" in k]
bandpass = BP._bandpass_construction(**nuisance_params)
fg_dict = FG._get_foreground_model(requested_cls=requested_cls,
ell=ell,
exp_ch=exp_ch,
bandint_freqs=bandpass,
**nuisance_params)
dlobs_dict = TF.get_modified_theory(cl_dict, fg_dict, **nuisance_params)
for select, chi2 in chi2s.items():
my_mflike = TestMFLike(
{
"external": TestMFLike,
"packages_path": packages_path,
"data_folder": "TestMFLike",
"input_file": pre + "00000.fits",
"defaults": {
"polarizations": select.upper().split("-"),
"scales": {
"TT": [2, 179],
"TE": [2, 179],
"ET": [2, 179],
"EE": [2, 179],
},
"symmetrize": False,
},
}
)
loglike = my_mflike.loglike(dlobs_dict)
self.assertAlmostEqual(-2 * (loglike - my_mflike.logp_const), chi2, 2)
def test_cobaya(self):
info = {
"likelihood": {
"soliket.mflike.TestMFLike": {
"datapath": os.path.join(packages_path, "data/TestMFLike"),
"data_folder": "TestMFLike",
"input_file": pre + "00000.fits",
"defaults": {
"polarizations": ["TT", "TE", "ET", "EE"],
"scales": {
"TT": [2, 179],
"TE": [2, 179],
"ET": [2, 179],
"EE": [2, 179],
},
"symmetrize": False,
},
},
},
"theory": {"camb": {"extra_args": {"lens_potential_accuracy": 1},
"stop_at_error": True}},
"params": cosmo_params,
"modules": packages_path,
"debug": True,
}
info["theory"]["soliket.TheoryForge_MFLike"] = {'stop_at_error': True}
info["theory"]["soliket.Foreground"] = {'stop_at_error': True}
info["theory"]["soliket.BandPass"] = {'stop_at_error': True}
from cobaya.model import get_model
model = get_model(info)
my_mflike = model.likelihood["soliket.mflike.TestMFLike"]
chi2 = -2 * (model.loglikes(nuisance_params)[0] - my_mflike.logp_const)
self.assertAlmostEqual(chi2[0], chi2s["tt-te-et-ee"], 2)
| 5,764
| 29.664894
| 90
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_ps.py
|
import os
from tempfile import gettempdir
import numpy as np
from sklearn.datasets import make_spd_matrix
# from scipy.stats import multivariate_normal
from soliket.gaussian import GaussianData, CrossCov
from soliket import MultiGaussianLikelihood
from soliket import PSLikelihood
from soliket.utils import get_likelihood
class ToyLikelihood(PSLikelihood):
name = "toy"
n = 10
sigma = 1
off_diag_amp = 1e-3
cov = None
seed = 1234
def initialize(self):
np.random.seed(self.seed)
x = np.arange(self.n)
if self.cov is None:
cov = make_spd_matrix(self.n) * self.off_diag_amp
cov += np.diag(np.ones(self.n) * self.sigma ** 2)
else:
cov = self.cov
y = np.random.multivariate_normal(np.zeros(self.n), cov)
self.data = GaussianData(self.name, x, y, cov)
def _get_theory(self):
return np.zeros(self.n)
def test_toy():
n1, n2, n3 = [10, 20, 30]
full_cov = make_spd_matrix(n1 + n2 + n3, random_state=1234) * 1e-1
full_cov += np.diag(np.ones((n1 + n2 + n3)))
cov1 = full_cov[:n1, :n1]
cov2 = full_cov[n1: n1 + n2, n1: n1 + n2]
cov3 = full_cov[n1 + n2:, n1 + n2:]
name1, name2, name3 = ["A", "B", "C"]
cross_cov = CrossCov(
{
(name1, name2): full_cov[:n1, n1: n1 + n2],
(name1, name3): full_cov[:n1, n1 + n2:],
(name2, name3): full_cov[n1: n1 + n2, n1 + n2:],
}
)
tempdir = gettempdir()
cross_cov_path = os.path.join(tempdir, "toy_cross_cov.npz")
cross_cov.save(cross_cov_path)
info1 = {"name": name1, "n": n1, "cov": cov1, "seed": 123}
info2 = {"name": name2, "n": n2, "cov": cov2, "seed": 234}
info3 = {"name": name3, "n": n3, "cov": cov3, "seed": 345}
lhood = "soliket.tests.test_ps.ToyLikelihood"
components = [lhood] * 3
options = [info1, info2, info3]
multilike1 = MultiGaussianLikelihood({"components": components, "options": options})
multilike2 = MultiGaussianLikelihood(
{"components": components, "options": options, "cross_cov_path": cross_cov_path}
)
like1 = get_likelihood(lhood, info1)
like2 = get_likelihood(lhood, info2)
like3 = get_likelihood(lhood, info3)
assert np.isclose(multilike1.logp(), sum([likex.logp() for
likex in [like1, like2, like3]]))
assert not np.isclose(multilike2.logp(), sum([likex.logp() for
likex in [like1, like2, like3]]))
| 2,554
| 30.158537
| 88
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_poisson.py
|
import numpy as np
import pandas as pd
from functools import partial
from soliket.poisson_data import PoissonData
x_min = 0
x_max = 10
def rate_density(x, a):
"""simple linear rate density
"""
return a * x
def n_expected(a):
return 0.5 * a * (x_max ** 2 - x_min ** 2) # integral(rate_density, x_min, x_max)
def generate_data(a, with_samples=False, unc=0.3, Nk=64):
# Generate total number
n = np.random.poisson(n_expected(a))
# Generate x values according to rate density (normalized as PDF)
u = np.random.random(n)
# From inverting CDF of above normalized density
x = np.sqrt(u * (x_max ** 2 - x_min ** 2) + x_min ** 2)
if not with_samples:
return x
else:
return x[:, None] * (1 + np.random.normal(0, unc, size=(n, Nk)))
def test_poisson_experiment(a_true=3, N=100, with_samples=False, Nk=64):
a_maxlikes = []
for i in range(N):
observations = generate_data(a_true, with_samples=with_samples, Nk=Nk)
if not with_samples:
catalog = pd.DataFrame({"x": observations})
data = PoissonData("toy", catalog, ["x"])
else:
catalog = pd.DataFrame({"x": observations.mean(axis=1)})
samples = {"x": observations, "prior": np.ones(observations.shape)}
data = PoissonData("toy_samples", catalog, ["x"], samples=samples)
a_grid = np.arange(0.1, 10, 0.1)
lnl = [data.loglike(partial(rate_density, a=a), n_expected(a)) for a in a_grid]
a_maxlike = a_grid[np.argmax(lnl)]
a_maxlikes.append(a_maxlike)
assert abs(np.mean(a_maxlikes) - a_true) < 0.1
| 1,639
| 28.285714
| 87
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_cross_correlation.py
|
import numpy as np
import os
import pytest
from soliket.ccl import CCL
from cobaya.model import get_model
auto_file = 'soliket/data/xcorr_simulated/clgg_noiseless.txt'
cross_file = 'soliket/data/xcorr_simulated/clkg_noiseless.txt'
dndz_file = 'soliket/data/xcorr_simulated/dndz.txt'
sacc_file = 'soliket/tests/data/des_s-act_kappa.toy-sim.sacc.fits'
cosmo_params = {"Omega_c": 0.25, "Omega_b": 0.05, "h": 0.67, "n_s": 0.96}
info = {
"params": {
"omch2": cosmo_params["Omega_c"] * cosmo_params["h"] ** 2.0,
"ombh2": cosmo_params["Omega_b"] * cosmo_params["h"] ** 2.0,
"H0": cosmo_params["h"] * 100,
"ns": cosmo_params["n_s"],
"As": 2.2e-9,
"tau": 0,
"b1": 1,
"s1": 0.4,
},
"theory": {"camb": None, "ccl": {"external": CCL, "nonlinear": False}},
"debug": False,
"stop_at_error": True,
}
def test_galaxykappa_import(request):
from soliket.cross_correlation import GalaxyKappaLikelihood
def test_shearkappa_import(request):
from soliket.cross_correlation import ShearKappaLikelihood
def test_galaxykappa_model(request):
from soliket.cross_correlation import GalaxyKappaLikelihood
info["likelihood"] = {
"GalaxyKappaLikelihood": {"external": GalaxyKappaLikelihood,
"datapath": None,
'cross_file': os.path.join(request.config.rootdir,
cross_file),
'auto_file': os.path.join(request.config.rootdir,
auto_file),
'dndz_file': os.path.join(request.config.rootdir,
dndz_file)}
}
model = get_model(info) # noqa F841
# @pytest.mark.xfail(reason="data file not in repo")
def test_shearkappa_model(request):
from soliket.cross_correlation import ShearKappaLikelihood
info["likelihood"] = {"ShearKappaLikelihood":
{"external": ShearKappaLikelihood,
"datapath": os.path.join(request.config.rootdir, sacc_file)}}
model = get_model(info) # noqa F841
def test_galaxykappa_like(request):
from soliket.cross_correlation import GalaxyKappaLikelihood
info["likelihood"] = {
"GalaxyKappaLikelihood": {"external": GalaxyKappaLikelihood,
"datapath": None,
'cross_file': os.path.join(request.config.rootdir,
cross_file),
'auto_file': os.path.join(request.config.rootdir,
auto_file),
'dndz_file': os.path.join(request.config.rootdir,
dndz_file)}
}
model = get_model(info)
loglikes, derived = model.loglikes()
assert np.isclose(loglikes[0], 88.2, atol=0.2, rtol=0.0)
# @pytest.mark.xfail(reason="data file not in repo")
def test_shearkappa_like(request):
from soliket.cross_correlation import ShearKappaLikelihood
rootdir = request.config.rootdir
cs82_file = "soliket/tests/data/cs82_gs-planck_kappa_binned.sim.sacc.fits"
test_datapath = os.path.join(rootdir, cs82_file)
info["likelihood"] = {
"ShearKappaLikelihood": {"external": ShearKappaLikelihood,
"datapath": test_datapath}
}
# Cosmological parameters for the test data, digitized from
# Fig. 3 and Eq. 8 of Hall & Taylor (2014).
# See https://github.com/simonsobs/SOLikeT/pull/58 for validation plots
info['params'] = {"omch2": 0.118, # Planck + lensing + WP + highL
"ombh2": 0.0222,
"H0": 68.0,
"ns": 0.962,
"As": 2.1e-9,
"tau": 0.094,
"mnu": 0.0,
"nnu": 3.046,
"s1": 0.4,
"b1": 1.0}
model = get_model(info)
loglikes, derived = model.loglikes()
assert np.isclose(loglikes, 637.64473666)
def test_shearkappa_hartlap(request):
from soliket.cross_correlation import ShearKappaLikelihood
rootdir = request.config.rootdir
cs82_file = "soliket/tests/data/cs82_gs-planck_kappa_binned.sim.sacc.fits"
test_datapath = os.path.join(rootdir, cs82_file)
info["likelihood"] = {
"ShearKappaLikelihood": {"external": ShearKappaLikelihood,
"datapath": test_datapath}
}
# Cosmological parameters for the test data, digitized from
# Fig. 3 and Eq. 8 of Hall & Taylor (2014).
# See https://github.com/simonsobs/SOLikeT/pull/58 for validation plots
info['params'] = {"omch2": 0.118, # Planck + lensing + WP + highL
"ombh2": 0.0222,
"H0": 68.0,
"ns": 0.962,
# "As": 2.1e-9,
"As": 2.5e-9, # offset the theory to upweight inv_cov in loglike
"tau": 0.094,
"mnu": 0.0,
"nnu": 3.046,
"s1": 0.4,
"b1": 1.0}
model = get_model(info)
loglikes, derived = model.loglikes()
info["likelihood"]["ShearKappaLikelihood"]["ncovsims"] = 5
model = get_model(info)
loglikes_hartlap, derived = model.loglikes()
assert np.isclose(np.abs(loglikes - loglikes_hartlap), 0.0010403,
rtol=1.e-5, atol=1.e-5)
def test_shearkappa_deltaz(request):
from soliket.cross_correlation import ShearKappaLikelihood
info["likelihood"] = {"ShearKappaLikelihood":
{"external": ShearKappaLikelihood,
"datapath": os.path.join(request.config.rootdir, sacc_file),
"z_nuisance_mode": "deltaz"}}
model = get_model(info) # noqa F841
loglikes, derived = model.loglikes()
assert np.isfinite(loglikes)
def test_shearkappa_m(request):
from soliket.cross_correlation import ShearKappaLikelihood
info["likelihood"] = {"ShearKappaLikelihood":
{"external": ShearKappaLikelihood,
"datapath": os.path.join(request.config.rootdir, sacc_file),
"m_nuisance_mode": True}}
model = get_model(info) # noqa F841
loglikes, derived = model.loglikes()
assert np.isfinite(loglikes)
def test_shearkappa_ia_nla_noevo(request):
from soliket.cross_correlation import ShearKappaLikelihood
info["likelihood"] = {"ShearKappaLikelihood":
{"external": ShearKappaLikelihood,
"datapath": os.path.join(request.config.rootdir, sacc_file),
"ia_mode": 'nla-noevo'}}
model = get_model(info) # noqa F841
loglikes, derived = model.loglikes()
assert np.isfinite(loglikes)
def test_shearkappa_ia_nla(request):
from soliket.cross_correlation import ShearKappaLikelihood
info["likelihood"] = {"ShearKappaLikelihood":
{"external": ShearKappaLikelihood,
"datapath": os.path.join(request.config.rootdir, sacc_file),
"ia_mode": 'nla'}}
info["params"]["eta_IA"] = 1.7
model = get_model(info) # noqa F841
loglikes, derived = model.loglikes()
assert np.isfinite(loglikes)
def test_shearkappa_ia_perbin(request):
from soliket.cross_correlation import ShearKappaLikelihood
info["likelihood"] = {"ShearKappaLikelihood":
{"external": ShearKappaLikelihood,
"datapath": os.path.join(request.config.rootdir, sacc_file),
"ia_mode": 'nla-perbin'}}
model = get_model(info) # noqa F841
loglikes, derived = model.loglikes()
assert np.isfinite(loglikes)
def test_shearkappa_hmcode(request):
from soliket.cross_correlation import ShearKappaLikelihood
info["likelihood"] = {"ShearKappaLikelihood":
{"external": ShearKappaLikelihood,
"datapath": os.path.join(request.config.rootdir, sacc_file)}}
info["theory"] = {"camb": {'extra_args': {'halofit_version': 'mead2020_feedback',
'HMCode_logT_AGN': 7.8}},
"ccl": {"external": CCL, "nonlinear": False}}
model = get_model(info) # noqa F841
loglikes, derived = model.loglikes()
assert np.isfinite(loglikes)
| 8,759
| 32.822394
| 88
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_foreground.py
|
# pytest -k bandpass -v .
import pytest
import numpy as np
import os
from cobaya.model import get_model
from cobaya.run import run
info = {"params": {
"a_tSZ": 3.3044404448917724,
"a_kSZ": 1.6646620740058649,
"a_p": 6.912474322461401,
"beta_p": 2.077474196171309,
"a_c": 4.88617700670901,
"beta_c": 2.2030316332596014,
"a_s": 3.099214100532393,
"T_d": 9.60,
"a_gtt": 0,
"a_gte": 0,
"a_gee": 0,
"a_psee": 0,
"a_pste": 0,
"xi": 0,
},
"likelihood": {"one": None},
"sampler": {"evaluate": None},
"debug": True
}
def test_foreground_import():
from soliket.foreground import Foreground
def test_foreground_model():
from soliket.foreground import Foreground
info["theory"] = {"foreground": {"external": Foreground},
}
model = get_model(info) # noqa F841
def test_foreground_compute():
from soliket.foreground import Foreground
from soliket.bandpass import BandPass
info["theory"] = {
"foreground": {"external": Foreground},
"bandpass": {"external": BandPass},
}
info["foregrounds"] = {
"normalisation": {"nu_0": 150.0,
"ell_0": 3000,
"T_CMB": 2.725
},
"components": {"tt": ["kSZ", "tSZ_and_CIB",
"cibp", "dust", "radio"],
"te": ["radio", "dust"],
"ee": ["radio", "dust"]
},
}
info["spectra"] = {
"polarizations": ["tt", "te", "ee"],
"lmin": 2,
"lmax": 9000,
"exp_ch": ["LAT_93", "LAT_145", "LAT_225"],
"eff_freqs": [93, 145, 225]
}
nu_0 = info["foregrounds"]["normalisation"]["nu_0"]
ell_0 = info["foregrounds"]["normalisation"]["ell_0"]
ell = np.arange(info["spectra"]["lmin"], info["spectra"]["lmax"] + 1)
requested_cls = info["spectra"]["polarizations"]
components = info["foregrounds"]["components"]
exp_ch = info["spectra"]["exp_ch"]
eff_freqs = np.asarray(info["spectra"]["eff_freqs"])
bands = {f"{expc}_s0": {'nu': [eff_freqs[iexpc]], 'bandpass': [1.]}
for iexpc, expc in enumerate(exp_ch)}
model = get_model(info) # noqa F841
model.add_requirements({"fg_dict": {
"requested_cls": requested_cls,
"ell": ell,
"exp_ch": exp_ch,
"bands": bands},
})
model.logposterior(info['params']) # force computation of model
lhood = model.likelihood['one']
fg_model = lhood.provider.get_fg_dict()
fg_model_test = get_fg(exp_ch, eff_freqs, ell, ell_0, nu_0, requested_cls, components)
for k in fg_model_test.keys():
assert np.allclose(fg_model[k], fg_model_test[k])
def get_fg(freqs, bandint_freqs, ell, ell_0, nu_0, requested_cls, components):
from fgspectra import cross as fgc
from fgspectra import frequency as fgf
from fgspectra import power as fgp
template_path = os.path.join(os.path.dirname(os.path.abspath(fgp.__file__)),
'data')
cibc_file = os.path.join(template_path, 'cl_cib_Choi2020.dat')
ksz = fgc.FactorizedCrossSpectrum(fgf.ConstantSED(), fgp.kSZ_bat())
cibp = fgc.FactorizedCrossSpectrum(fgf.ModifiedBlackBody(), fgp.PowerLaw())
radio = fgc.FactorizedCrossSpectrum(fgf.PowerLaw(), fgp.PowerLaw())
tsz = fgc.FactorizedCrossSpectrum(fgf.ThermalSZ(), fgp.tSZ_150_bat())
cibc = fgc.FactorizedCrossSpectrum(fgf.CIB(),
fgp.PowerSpectrumFromFile(cibc_file))
dust = fgc.FactorizedCrossSpectrum(fgf.ModifiedBlackBody(), fgp.PowerLaw())
tSZ_and_CIB = fgc.SZxCIB_Choi2020()
ell_clp = ell * (ell + 1.)
ell_0clp = ell_0 * (ell_0 + 1.)
fg_component_list = {s: components[s] for s in requested_cls}
model = {}
model["tt", "kSZ"] = info["params"]["a_kSZ"] * ksz({"nu": bandint_freqs},
{"ell": ell,
"ell_0": ell_0})
model["tt", "cibp"] = info["params"]["a_p"] * cibp({"nu": bandint_freqs,
"nu_0": nu_0,
"temp": info["params"]["T_d"],
"beta": info["params"]["beta_p"]},
{"ell": ell_clp,
"ell_0": ell_0clp,
"alpha": 1})
model["tt", "radio"] = info["params"]["a_s"] * radio({"nu": bandint_freqs,
"nu_0": nu_0,
"beta": -0.5 - 2.},
{"ell": ell_clp,
"ell_0": ell_0clp,
"alpha": 1})
model["tt", "tSZ"] = info["params"]["a_tSZ"] * tsz({"nu": bandint_freqs,
"nu_0": nu_0},
{"ell": ell,
"ell_0": ell_0})
model["tt", "cibc"] = info["params"]["a_c"] * cibc({"nu": bandint_freqs,
"nu_0": nu_0,
"temp": info["params"]["T_d"],
"beta": info["params"]["beta_c"]},
{"ell": ell,
"ell_0": ell_0})
model["tt", "dust"] = info["params"]["a_gtt"] * dust({"nu": bandint_freqs,
"nu_0": nu_0,
"temp": 19.6,
"beta": 1.5},
{"ell": ell,
"ell_0": 500.,
"alpha": -0.6})
model["tt", "tSZ_and_CIB"] = \
tSZ_and_CIB({'kwseq': ({'nu': bandint_freqs, 'nu_0': nu_0},
{'nu': bandint_freqs, 'nu_0': nu_0,
'temp': info["params"]['T_d'],
'beta': info["params"]["beta_c"]})},
{'kwseq': ({'ell': ell, 'ell_0': ell_0,
'amp': info["params"]['a_tSZ']},
{'ell': ell, 'ell_0': ell_0,
'amp': info["params"]['a_c']},
{'ell': ell, 'ell_0': ell_0,
'amp': - info["params"]['xi'] \
* np.sqrt(info["params"]['a_tSZ'] *
info["params"]['a_c'])})})
model["ee", "radio"] = info["params"]["a_psee"] * radio({"nu": bandint_freqs,
"nu_0": nu_0,
"beta": -0.5 - 2.},
{"ell": ell_clp,
"ell_0": ell_0clp,
"alpha": 1})
model["ee", "dust"] = info["params"]["a_gee"] * dust({"nu": bandint_freqs,
"nu_0": nu_0,
"temp": 19.6,
"beta": 1.5},
{"ell": ell,
"ell_0": 500.,
"alpha": -0.4})
model["te", "radio"] = info["params"]["a_pste"] * radio({"nu": bandint_freqs,
"nu_0": nu_0,
"beta": -0.5 - 2.},
{"ell": ell_clp,
"ell_0": ell_0clp,
"alpha": 1})
model["te", "dust"] = info["params"]["a_gte"] * dust({"nu": bandint_freqs,
"nu_0": nu_0,
"temp": 19.6,
"beta": 1.5},
{"ell": ell,
"ell_0": 500.,
"alpha": -0.4})
fg_dict = {}
for c1, f1 in enumerate(freqs):
for c2, f2 in enumerate(freqs):
for s in requested_cls:
fg_dict[s, "all", f1, f2] = np.zeros(len(ell))
for comp in fg_component_list[s]:
if comp == "tSZ_and_CIB":
fg_dict[s, "tSZ", f1, f2] = model[s, "tSZ"][c1, c2]
fg_dict[s, "cibc", f1, f2] = model[s, "cibc"][c1, c2]
fg_dict[s, "tSZxCIB", f1, f2] = (
model[s, comp][c1, c2]
- model[s, "tSZ"][c1, c2]
- model[s, "cibc"][c1, c2]
)
fg_dict[s, "all", f1, f2] += model[s, comp][c1, c2]
else:
fg_dict[s, comp, f1, f2] = model[s, comp][c1, c2]
fg_dict[s, "all", f1, f2] += fg_dict[s, comp, f1, f2]
return fg_dict
| 11,069
| 46.922078
| 90
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_lensing_lite.py
|
import pytest
import numpy as np
from cobaya.yaml import yaml_load
from cobaya.model import get_model
try:
import classy # noqa F401
except ImportError:
boltzmann_codes = ["camb"]
else:
boltzmann_codes = ["camb", "classy"]
def get_demo_lensing_model(theory):
if theory == "camb":
info_yaml = r"""
likelihood:
soliket.LensingLiteLikelihood:
stop_at_error: True
theory:
camb:
extra_args:
lens_potential_accuracy: 1
params:
ns:
prior:
min: 0.8
max: 1.2
H0:
prior:
min: 40
max: 100
"""
elif theory == "classy":
info_yaml = r"""
likelihood:
soliket.LensingLiteLikelihood:
stop_at_error: True
theory:
classy:
extra_args:
output: lCl, tCl
path: global
params:
n_s:
prior:
min: 0.8
max: 1.2
H0:
prior:
min: 40
max: 100
"""
info = yaml_load(info_yaml)
model = get_model(info)
return model
@pytest.mark.parametrize("theory", boltzmann_codes)
def test_lensing(theory):
model = get_demo_lensing_model(theory)
ns_param = "ns" if theory == "camb" else "n_s"
lnl = model.loglike({ns_param: 0.965, "H0": 70})[0]
assert np.isfinite(lnl)
| 1,586
| 20.739726
| 55
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_multi.py
|
import numpy as np
import pytest
from soliket.tests.test_mflike import cosmo_params, nuisance_params
@pytest.mark.xfail(reason="lensing lhood install failure")
def test_multi():
lensing_options = {"theory_lmax": 5000}
pre = "data_sacc_"
mflike_options = {
"input_file": pre + "00000.fits",
"cov_Bbl_file": pre + "w_covar_and_Bbl.fits",
"stop_at_error": True,
}
camb_options = {"extra_args": {"lens_potential_accuracy": 1}}
fg_params = {"a_tSZ": {"prior": {"min": 3.0, "max": 3.6}},
"a_kSZ": {"prior": {"min": 1.4, "max": 1.8}}}
mflike_params = {**cosmo_params, **nuisance_params}
mflike_params.update(fg_params)
lensing_params = {**cosmo_params}
info = {
"likelihood": {
"soliket.gaussian.MultiGaussianLikelihood": {
"components": ["soliket.mflike.MFLike", "soliket.LensingLikelihood"],
"options": [mflike_options, lensing_options],
"stop_at_error": True,
}
},
"theory": {"camb": camb_options,
"soliket.TheoryForge_MFLike": {'stop_at_error': True},
"soliket.Foreground": {"stop_at_error": True},
"soliket.BandPass": {"stop_at_error": True}},
"params": {**mflike_params},
}
info1 = {
"likelihood": {"soliket.mflike.MFLike": mflike_options},
"theory": {"camb": camb_options,
"soliket.TheoryForge_MFLike": {'stop_at_error': True},
"soliket.Foreground": {"stop_at_error": True},
"soliket.BandPass": {"stop_at_error": True}},
"params": {**mflike_params},
}
info2 = {
"likelihood": {"soliket.LensingLikelihood": lensing_options},
"theory": {"camb": camb_options},
"params": {**lensing_params},
}
from cobaya.model import get_model
model = get_model(info)
model1 = get_model(info1)
model2 = get_model(info2)
# To test here, the absolute values of the logps are not identical
# to the sum of components when combined (probably due to numerical issues of
# computing inv_cov); so here we test to make sure
# that the change in logp between two different sets of params is identical
fg_values_a = {"a_tSZ": nuisance_params["a_tSZ"], "a_kSZ": nuisance_params["a_kSZ"]}
fg_values_b = {k: v * 1.1 for k, v in fg_values_a.items()}
logp_a = model.loglikes(fg_values_a, cached=False)[0].sum()
logp_b = model.loglikes(fg_values_b, cached=False)[0].sum()
d_logp = logp_b - logp_a
assert np.isfinite(d_logp)
model1_logp_a = model1.loglikes(fg_values_a, cached=False)[0].sum()
model2_logp_a = model2.loglikes({}, cached=False)[0].sum()
model1_logp_b = model1.loglikes(fg_values_b, cached=False)[0].sum()
model2_logp_b = model2.loglikes({}, cached=False)[0].sum()
d_logp1 = model1_logp_b - model1_logp_a
d_logp2 = model2_logp_b - model2_logp_a
d_logp_sum = d_logp1 + d_logp2
assert np.isclose(d_logp, d_logp_sum)
| 3,066
| 34.252874
| 88
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_clusters.py
|
import numpy as np
import pytest
from cobaya.model import get_model
fiducial_params = {
"ombh2": 0.02225,
"omch2": 0.1198,
"H0": 67.3,
"tau": 0.06,
"As": 2.2e-9,
"ns": 0.96,
"mnu": 0.06,
"nnu": 3.046,
}
info_fiducial = {
"params": fiducial_params,
"likelihood": {"soliket.ClusterLikelihood": {"stop_at_error": True}},
"theory": {
"camb": {
"extra_args": {
"accurate_massive_neutrino_transfers": True,
"num_massive_neutrinos": 1,
"redshifts": np.linspace(0, 2, 41),
"nonlinear": False,
"kmax": 10.0,
"dark_energy_model": "ppf",
"bbn_predictor": "PArthENoPE_880.2_standard.dat"
}
},
},
}
def test_clusters_model():
model_fiducial = get_model(info_fiducial) # noqa F841
def test_clusters_loglike():
model_fiducial = get_model(info_fiducial)
lnl = model_fiducial.loglikes({})[0]
assert np.isclose(lnl, -854.89406321, rtol=1.e-3, atol=1.e-5)
def test_clusters_n_expected():
model_fiducial = get_model(info_fiducial)
lnl = model_fiducial.loglikes({})[0]
like = model_fiducial.likelihood["soliket.ClusterLikelihood"]
assert np.isfinite(lnl)
assert like._get_n_expected() > 40
| 1,322
| 21.05
| 73
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_bandpass.py
|
# pytest -k bandpass -v .
import pytest
import numpy as np
from cobaya.model import get_model
from cobaya.run import run
from ..constants import T_CMB, h_Planck, k_Boltzmann
info = {"params": {
"bandint_shift_LAT_93": 0.0,
"bandint_shift_LAT_145": 0.0,
"bandint_shift_LAT_225": 0.0
},
"likelihood": {"one": None},
"sampler": {"evaluate": None},
"debug": True
}
bands = {"LAT_93_s0": {"nu": [93], "bandpass": [1.]},
"LAT_145_s0": {"nu": [145], "bandpass": [1.]},
"LAT_225_s0": {"nu": [225], "bandpass": [1.]}}
exp_ch = [k.replace("_s0", "") for k in bands.keys()]
def _cmb2bb(nu):
# NB: numerical factors not included
x = nu * h_Planck * 1e9 / k_Boltzmann / T_CMB
return np.exp(x) * (nu * x / np.expm1(x))**2
def test_bandpass_import():
from soliket.bandpass import BandPass
def test_bandpass_model():
from soliket.bandpass import BandPass
info["theory"] = {"bandpass": {
"external": BandPass,
},
}
model = get_model(info) # noqa F841
def test_bandpass_read_from_sacc():
from soliket.bandpass import BandPass
# testing the default read_from_sacc
info["theory"] = {
"bandpass": {"external": BandPass},
}
model = get_model(info) # noqa F841
model.add_requirements({"bandint_freqs": {"bands": bands}
})
model.logposterior(info['params']) # force computation of model
lhood = model.likelihood['one']
bandpass = lhood.provider.get_bandint_freqs()
bandint_freqs = np.empty_like(exp_ch, dtype=float)
for ifr, fr in enumerate(exp_ch):
bandpar = 'bandint_shift_' + fr
bandint_freqs[ifr] = np.asarray(bands[fr + "_s0"]["nu"]) + info["params"][bandpar]
assert np.allclose(bandint_freqs, bandpass)
def test_bandpass_top_hat():
from soliket.bandpass import BandPass
# now testing top-hat construction
info["theory"].update({
"bandpass": {"external": BandPass,
"top_hat_band": {
"nsteps": 3,
"bandwidth": 0.5},
"external_bandpass": {},
"read_from_sacc": {},
},
})
model = get_model(info)
model.add_requirements({"bandint_freqs": {"bands": bands}
})
model.logposterior(info['params']) # force computation of model
lhood = model.likelihood['one']
bandpass = lhood.provider.get_bandint_freqs()
bandint_freqs = []
nsteps = info["theory"]["bandpass"]["top_hat_band"]["nsteps"]
bandwidth = info["theory"]["bandpass"]["top_hat_band"]["bandwidth"]
for ifr, fr in enumerate(exp_ch):
bandpar = 'bandint_shift_' + fr
bd = bands[f"{fr}_s0"]
nu_ghz, bp = np.asarray(bd["nu"]), np.asarray(bd["bandpass"])
fr = nu_ghz @ bp / bp.sum()
bandlow = fr * (1 - bandwidth * .5)
bandhigh = fr * (1 + bandwidth * .5)
nub = np.linspace(bandlow + info["params"][bandpar],
bandhigh + info["params"][bandpar],
nsteps, dtype=float)
tranb = _cmb2bb(nub)
tranb_norm = np.trapz(_cmb2bb(nub), nub)
bandint_freqs.append([nub, tranb / tranb_norm])
assert np.allclose(bandint_freqs, bandpass)
def test_bandpass_external_file(request):
from soliket.bandpass import BandPass
import os
filepath = os.path.join(request.config.rootdir,
"soliket/tests/data/")
# now testing reading from external file
info["theory"].update({
"bandpass": {"external": BandPass,
"data_folder": f"{filepath}",
"top_hat_band": {},
"external_bandpass": {
"path": "test_bandpass"},
"read_from_sacc": {},
},
})
model = get_model(info)
model.add_requirements({"bandint_freqs": {"bands": bands}
})
model.logposterior(info['params']) # force computation of model
lhood = model.likelihood['one']
bandpass = lhood.provider.get_bandint_freqs()
path = os.path.normpath(os.path.join(
info["theory"]["bandpass"]["data_folder"],
info["theory"]["bandpass"]["external_bandpass"]["path"]))
arrays = os.listdir(path)
external_bandpass = []
for a in arrays:
nu_ghz, bp = np.loadtxt(path + "/" + a, usecols=(0, 1), unpack=True)
external_bandpass.append([a, nu_ghz, bp])
bandint_freqs = []
for expc, nu_ghz, bp in external_bandpass:
bandpar = "bandint_shift_" + expc
nub = nu_ghz + info["params"][bandpar]
if not hasattr(bp, "__len__"):
bandint_freqs.append(nub)
bandint_freqs = np.asarray(bandint_freqs)
else:
trans_norm = np.trapz(bp * _cmb2bb(nub), nub)
trans = bp / trans_norm * _cmb2bb(nub)
bandint_freqs.append([nub, trans])
assert np.allclose(bandint_freqs, bandpass)
| 5,237
| 30.178571
| 90
|
py
|
SOLikeT
|
SOLikeT-master/soliket/tests/test_runs.py
|
import pkgutil
import pytest
import tempfile
from cobaya.yaml import yaml_load
from cobaya.run import run
import os
packages_path = os.environ.get("COBAYA_PACKAGES_PATH") or os.path.join(
tempfile.gettempdir(), "lensing_packages"
)
import os
packages_path = os.environ.get("COBAYA_PACKAGES_PATH") or os.path.join(
tempfile.gettempdir(), "lensing_packages"
)
@pytest.mark.parametrize("lhood",
["mflike",
"lensing",
"lensing_lite",
"multi",
"cross_correlation",
# "xcorr"
])
def test_evaluate(lhood):
if lhood == "multi":
pytest.xfail(reason="multi lhood install failure")
if lhood == "mflike":
pytest.skip(reason="don't want to install 300Mb of data!")
if lhood == "cross_correlation":
pytest.skip(reason="cannot locate data files")
info = yaml_load(pkgutil.get_data("soliket", f"tests/test_{lhood}.yaml"))
info["force"] = True
info['sampler'] = {'evaluate': {}}
from cobaya.install import install
install(info, path=packages_path, skip_global=True)
updated_info, sampler = run(info)
@pytest.mark.parametrize("lhood",
["mflike",
"lensing",
"lensing_lite",
"multi",
"cross_correlation",
# "xcorr"
])
def test_mcmc(lhood):
if lhood == "multi":
pytest.xfail(reason="multi lhood install failure")
if lhood == "mflike":
pytest.skip(reason="don't want to install 300Mb of data!")
if lhood == "cross_correlation":
pytest.skip(reason="cannot locate data files")
info = yaml_load(pkgutil.get_data("soliket", f"tests/test_{lhood}.yaml"))
info["force"] = True
info['sampler'] = {'mcmc': {'max_samples': 10, 'max_tries': 1000}}
from cobaya.install import install
install(info, path=packages_path, skip_global=True)
updated_info, sampler = run(info)
| 2,146
| 27.25
| 77
|
py
|
SOLikeT
|
SOLikeT-master/soliket/xcorr/limber.py
|
import numpy as np
import pdb
from ..constants import C_HMPC
oneover_chmpc = 1. / C_HMPC
def mag_bias_kernel(provider, dndz, s1, zatchi, chi_arr, chiprime_arr, zprime_arr):
'''Calculates magnification bias kernel.
'''
dndzprime = np.interp(zprime_arr, dndz[:, 0], dndz[:, 1], left=0, right=0)
norm = np.trapz(dndz[:, 1], x=dndz[:, 0])
dndzprime = dndzprime / norm #TODO check this norm is right
g_integrand = (chiprime_arr - chi_arr[np.newaxis, :]) / chiprime_arr \
* (oneover_chmpc * provider.get_param('H0') / 100) \
* np.sqrt(provider.get_param('omegam') * (1 + zprime_arr)**3.
+ 1 - provider.get_param('omegam')) \
* dndzprime
g = chi_arr * np.trapz(g_integrand, x=chiprime_arr, axis=0)
W_mu = (5. * s1 - 2.) * 1.5 * provider.get_param('omegam') \
* (provider.get_param('H0') / 100)**2 * (oneover_chmpc)**2 \
* (1. + zatchi(chi_arr)) * g
return W_mu
def do_limber(ell_arr, provider, dndz1, dndz2, s1, s2, pk, b1_HF, b2_HF,
alpha_auto, alpha_cross,
chi_grids,
#use_zeff=True,
Nchi=50, dndz1_mag=None, dndz2_mag=None, normed=False):
zatchi = chi_grids['zatchi']
# chiatz = chi_grids['chiatz']
chi_arr = chi_grids['chival']
# z_arr = chi_grids['zval']
chiprime_arr = chi_grids['chivalp']
zprime_arr = chi_grids['zvalp']
chistar = provider.get_comoving_radial_distance(provider.get_param('zstar'))
# Galaxy kernels, assumed to be b(z) * dN/dz
W_g1 = np.interp(zatchi(chi_arr), dndz1[:, 0], dndz1[:, 1] \
* provider.get_Hubble(dndz1[:, 0], units='1/Mpc'), left=0, right=0)
if not normed:
W_g1 /= np.trapz(W_g1, x=chi_arr)
W_g2 = np.interp(zatchi(chi_arr), dndz2[:, 0], dndz2[:, 1] \
* provider.get_Hubble(dndz2[:, 0], units='1/Mpc'), left=0, right=0)
if not normed:
W_g2 /= np.trapz(W_g2, x=chi_arr)
W_kappa = (oneover_chmpc)**2. * 1.5 * provider.get_param('omegam') \
* (provider.get_param('H0') / 100)**2. * (1. + zatchi(chi_arr)) \
* chi_arr * (chistar - chi_arr) / chistar
# Get effective redshift
# if use_zeff:
# kern = W_g1 * W_g2 / chi_arr**2
# zeff = np.trapz(kern * z_arr,x=chi_arr) / np.trapz(kern, x=chi_arr)
# else:
# zeff = -1.0
# set up magnification bias kernels
W_mu1 = mag_bias_kernel(provider, dndz1, s1,
zatchi, chi_arr, chiprime_arr, zprime_arr)
c_ell_g1g1 = np.zeros([ell_arr.shape[0], 1, chi_arr.shape[0]])
c_ell_g1kappa = np.zeros([ell_arr.shape[0], 1, chi_arr.shape[0]])
c_ell_kappakappa = np.zeros([ell_arr.shape[0], 1, chi_arr.shape[0]])
c_ell_g1mu1 = np.zeros([ell_arr.shape[0], 1, chi_arr.shape[0]])
c_ell_mu1mu1 = np.zeros([ell_arr.shape[0], 1, chi_arr.shape[0]])
c_ell_mu1kappa = np.zeros([ell_arr.shape[0], 1, chi_arr.shape[0]])
for i_chi, chi in enumerate(chi_arr):
k_arr = (ell_arr + 0.5) / chi
p_mm_hf = pk(zatchi(chi), k_arr)
p_mm = p_mm_hf
p_gg = b1_HF * b1_HF * p_mm_hf # lets just stay at constant linear bias for now
p_gm = b1_HF * p_mm_hf
W_g1g1 = W_g1[i_chi] * W_g1[i_chi] / (chi**2) * p_gg
c_ell_g1g1[:, :, i_chi] = W_g1g1.T
W_g1kappa = W_g1[i_chi] * W_kappa[i_chi] / (chi**2) * p_gm
c_ell_g1kappa[:, :, i_chi] = W_g1kappa.T
# W_kappakappa = W_kappa[i_chi] * W_kappa[i_chi] / (chi**2) * p_mm
# c_ell_kappakappa[:,:,i_chi] = W_kappakappa.T
W_g1mu1 = W_g1[i_chi] * W_mu1[i_chi] / (chi**2) * p_gm
c_ell_g1mu1[:, :, i_chi] = W_g1mu1.T
W_mu1mu1 = W_mu1[i_chi] * W_mu1[i_chi] / (chi**2) * p_mm
c_ell_mu1mu1[:, :, i_chi] = W_mu1mu1.T
W_mu1kappa = W_kappa[i_chi] * W_mu1[i_chi] / (chi**2) * p_mm
c_ell_mu1kappa[:, :, i_chi] = W_mu1kappa.T
c_ell_g1g1 = np.trapz(c_ell_g1g1, x=chi_arr, axis=-1)
c_ell_g1kappa = np.trapz(c_ell_g1kappa, x=chi_arr, axis=-1)
c_ell_kappakappa = np.trapz(c_ell_kappakappa, x=chi_arr, axis=-1)
c_ell_g1mu1 = np.trapz(c_ell_g1mu1, x=chi_arr, axis=-1)
c_ell_mu1mu1 = np.trapz(c_ell_mu1mu1, x=chi_arr, axis=-1)
c_ell_mu1kappa = np.trapz(c_ell_mu1kappa, x=chi_arr, axis=-1)
clobs_gg = c_ell_g1g1 + 2. * c_ell_g1mu1 + c_ell_mu1mu1
clobs_kappag = c_ell_g1kappa + c_ell_mu1kappa
# clobs_kappakappa = c_ell_kappakappa
return clobs_gg.flatten(), clobs_kappag.flatten()#, clobs_kappakappa.flatten()
| 4,606
| 36.762295
| 87
|
py
|
SOLikeT
|
SOLikeT-master/soliket/xcorr/xcorr.py
|
r""" Likelihood for cross-correlation of CMB lensing and galaxy clustering probes.
"""
import numpy as np
import sacc
import pdb
from scipy.interpolate import InterpolatedUnivariateSpline as Spline
from ..gaussian import GaussianData, GaussianLikelihood
from .. import utils
from .limber import do_limber
class XcorrLikelihood(GaussianLikelihood):
'''Cross-correlation Likelihood for CMB lensing and galaxy clustering probes.
Based on the original xcorr code [1]_ used in [2]_.
Accepts data files containing the two spectra from either text files or a sacc file.
Parameters
----------
datapath : str, optional
sacc file containing the redshift distribtion, galaxy-galaxy and galaxy-kappa
observed spectra. Default: soliket/tests/data/unwise_g-so_kappa.sim.sacc.fits
k_tracer_name : str, optional
sacc file tracer name for kappa. Default: ck_so
gc_tracer_name : str, optional
sacc file tracer name for galaxy clustering. Default: gc_unwise
dndz_file : str, optional
Text file containing the redshift distribution.
auto_file : str, optional
Text file containing the galaxy-galaxy observed spectra.
cross_file : str, optional
Text file containing the galaxy-kappa observed spectra.
high_ell : int
Maximum multipole to be computed for all spectra. Default: 600
nz : int
Resolution of redshift grid used for Limber computations. Default: 149
Nchi : int
Resolution of Chi grid used for lensing kernel computations. Default: 149
Nchi_mag : int
Resolution of Chi grid used for magnification kernel computations. Default: 149
Pk_interp_kmax : float
Maximum k value for the Pk interpolator, units Mpc^-1. Default: 10.0
b1 : float
Linear galaxy bias value for the galaxy sample.
s1 : float
Magnification bias slope for the galaxy sample.
References
----------
.. [1] https://github.com/simonsobs/xcorr
.. [2] Krolewski, Ferraro and White, 2021, arXiv:2105.03421
'''
def initialize(self):
name: str = "Xcorr" # noqa F841
self.log.info('Initialising.')
if self.datapath is None:
dndz_file: Optional[str] # noqa F821
auto_file: Optional[str] # noqa F821
cross_file: Optional[str] # noqa F821
self.dndz = np.loadtxt(self.dndz_file)
self.x, self.y, self.dy = self._get_data()
if self.covpath is None:
self.log.info('No xcorr covariance specified. Using diag(dy^2).')
self.cov = np.diag(self.dy**2)
else:
self.cov = self._get_cov()
else:
self.k_tracer_name: Optional[str] # noqa F821
self.gc_tracer_name: Optional[str] # noqa F821
# tracer_combinations: Optional[str] # TODO: implement with keep_selection
self.sacc_data = self._get_sacc_data()
self.x = self.sacc_data['x']
self.y = self.sacc_data['y']
self.cov = self.sacc_data['cov']
self.dndz = self.sacc_data['dndz']
self.ngal = self.sacc_data['ngal']
# TODO is this resolution limit on zarray a CAMB problem?
self.nz: Optional[int] # noqa F821
assert self.nz <= 149, "CAMB limitations requires nz <= 149"
self.zarray = np.linspace(self.dndz[:, 0].min(), self.dndz[:, 0].max(), self.nz)
self.zbgdarray = np.concatenate([self.zarray, [1100]]) # TODO: unfix zstar
self.Nchi: Optional[int] # noqa F821
self.Nchi_mag: Optional[int] # noqa F821
#self.use_zeff: Optional[bool] # noqa F821
self.Pk_interp_kmax: Optional[float] # noqa F821
self.high_ell: Optional[float] # noqa F821
self.ell_range = np.linspace(1, self.high_ell, int(self.high_ell + 1))
# TODO expose these defaults
self.alpha_auto = 0.9981
self.alpha_cross = 0.9977
self.data = GaussianData(self.name, self.x, self.y, self.cov)
def get_requirements(self):
return {
'Cl': {'lmax': self.high_ell,
'pp': self.high_ell},
"Pk_interpolator": {
"z": self.zarray[:-1],
"k_max": self.Pk_interp_kmax,
#"extrap_kmax": 20.0,
"nonlinear": False,
"hubble_units": False, # cobaya told me to
"k_hunit": False, # cobaya told me to
"vars_pairs": [["delta_nonu", "delta_nonu"]],
},
"Hubble": {"z": self.zarray},
"angular_diameter_distance": {"z": self.zbgdarray},
"comoving_radial_distance": {"z": self.zbgdarray},
'H0': None,
'ombh2': None,
'omch2': None,
'omk': None,
'omegam': None,
'zstar': None,
'As': None,
'ns': None
}
def _bin(self, theory_cl, lmin, lmax):
binned_theory_cl = np.zeros_like(lmin)
for i in range(len(lmin)):
binned_theory_cl[i] = np.mean(theory_cl[(self.ell_range >= lmin[i])
& (self.ell_range < lmax[i])])
return binned_theory_cl
def _get_sacc_data(self, **params_values):
data_sacc = sacc.Sacc.load_fits(self.datapath)
# TODO: would be better to use keep_selection
data_sacc.remove_selection(tracers=(self.k_tracer_name, self.k_tracer_name))
ell_auto, cl_auto = data_sacc.get_ell_cl('cl_00',
self.gc_tracer_name,
self.gc_tracer_name)
ell_cross, cl_cross = data_sacc.get_ell_cl('cl_00',
self.gc_tracer_name,
self.k_tracer_name) #TODO: check order
cov = data_sacc.covariance.covmat
x = np.concatenate([ell_auto, ell_cross])
y = np.concatenate([cl_auto, cl_cross])
dndz = np.column_stack([data_sacc.tracers[self.gc_tracer_name].z,
data_sacc.tracers[self.gc_tracer_name].nz])
ngal = data_sacc.tracers[self.gc_tracer_name].metadata['ngal']
data = {'x': x,
'y': y,
'cov': cov,
'dndz': dndz,
'ngal': ngal}
return data
def _get_data(self, **params_values):
data_auto = np.loadtxt(self.auto_file)
data_cross = np.loadtxt(self.cross_file)
# Get data
self.ell_auto = data_auto[0]
cl_auto = data_auto[1]
cl_auto_err = data_auto[2]
self.ell_cross = data_cross[0]
cl_cross = data_cross[1]
cl_cross_err = data_cross[2]
x = np.concatenate([self.ell_auto, self.ell_cross])
y = np.concatenate([cl_auto, cl_cross])
dy = np.concatenate([cl_auto_err, cl_cross_err])
return x, y, dy
def _setup_chi(self):
chival = self.provider.get_comoving_radial_distance(self.zarray)
zatchi = Spline(chival, self.zarray)
chiatz = Spline(self.zarray, chival)
chimin = np.min(chival) + 1.e-5
chimax = np.max(chival)
chival = np.linspace(chimin, chimax, self.Nchi)
zval = zatchi(chival)
chistar = \
self.provider.get_comoving_radial_distance(self.provider.get_param('zstar'))
chivalp = \
np.array(list(map(lambda x: np.linspace(x, chistar, self.Nchi_mag), chival)))
chivalp = chivalp.transpose()[0]
zvalp = zatchi(chivalp)
chi_result = {'zatchi': zatchi,
'chiatz': chiatz,
'chival': chival,
'zval': zval,
'chivalp': chivalp,
'zvalp': zvalp}
return chi_result
def _get_theory(self, **params_values):
setup_chi_out = self._setup_chi()
Pk_interpolator = self.provider.get_Pk_interpolator(("delta_nonu", "delta_nonu"),
extrap_kmax=1.e8,
nonlinear=False).P
cl_gg, cl_kappag = do_limber(self.ell_range,
self.provider,
self.dndz,
self.dndz,
params_values['s1'],
params_values['s1'],
Pk_interpolator,
params_values['b1'],
params_values['b1'],
self.alpha_auto,
self.alpha_cross,
setup_chi_out,
Nchi=self.Nchi,
#use_zeff=self.use_zeff,
dndz1_mag=self.dndz,
dndz2_mag=self.dndz)
# TODO: this is not the correct binning,
# but there needs to be a consistent way to specify it
bin_edges = np.linspace(20, self.high_ell, self.data.x.shape[0] // 2 + 1)
ell_gg, clobs_gg = utils.binner(self.ell_range, cl_gg, bin_edges)
ell_kappag, clobs_kappag = utils.binner(self.ell_range, cl_kappag, bin_edges)
#ell_kappakappa, clobs_kappakappa = utils.binner(self.ell_range, cl_kappakappa, bin_edges) # noqa E501
return np.concatenate([clobs_gg, clobs_kappag])
| 9,916
| 36.422642
| 110
|
py
|
SOLikeT
|
SOLikeT-master/soliket/xcorr/__init__.py
|
from .xcorr import XcorrLikelihood
| 35
| 17
| 34
|
py
|
SOLikeT
|
SOLikeT-master/soliket/mflike/mflike.py
|
r"""
.. module:: mflike
:Synopsis: Multi frequency likelihood for TTTEEE CMB power spectra for Simons Observatory
:Authors: Thibaut Louis, Xavier Garrido, Max Abitbol,
Erminia Calabrese, Antony Lewis, David Alonso.
MFLike is a multi frequency likelihood code interfaced with the Cobaya
sampler and a theory Boltzmann code such as CAMB, CLASS or Cosmopower.
The ``MFLike`` likelihood class reads the data file (in ``sacc`` format)
and all the settings
for the MCMC run (such as file paths, :math:`\ell` ranges, experiments
and frequencies to be used, parameters priors...)
from the ``MFLike.yaml`` file.
The theory :math:`C_{\ell}` are then summed to the (possibly frequency
integrated) foreground power spectra and modified by systematic effects
in the ``TheoryForge_MFLike`` class. The foreground power spectra are
computed by the ``soliket.Foreground`` class, while the bandpasses from
the ``soliket.BandPass`` one; the ``Foreground`` class is required by
``TheoryForge_MFLike``, while ``BandPass`` is requires by ``Foreground``.
This is a scheme of how ``MFLike`` and ``TheoryForge_MFLike`` are interfaced:
.. image:: images/mflike_scheme.png
:width: 400
"""
import os
from typing import Optional
import numpy as np
from cobaya.conventions import data_path, packages_path_input
from cobaya.likelihoods.base_classes import InstallableLikelihood
from cobaya.log import LoggedError
from cobaya.tools import are_different_params_lists
from ..gaussian import GaussianData, GaussianLikelihood
class MFLike(GaussianLikelihood, InstallableLikelihood):
_url = "https://portal.nersc.gov/cfs/sobs/users/MFLike_data"
_release = "v0.8"
install_options = {"download_url": "{}/{}.tar.gz".format(_url, _release)}
# attributes set from .yaml
input_file: Optional[str]
cov_Bbl_file: Optional[str]
data: dict
defaults: dict
def initialize(self):
# Set default values to data member not initialized via yaml file
self.l_bpws = None
self.spec_meta = []
# Set path to data
if ((not getattr(self, "path", None)) and
(not getattr(self, "packages_path", None))):
raise LoggedError(self.log,
"No path given to MFLike data. "
"Set the likelihood property "
"'path' or 'packages_path'"
)
# If no path specified, use the modules path
data_file_path = os.path.normpath(getattr(self, "path", None) or
os.path.join(self.packages_path,
"data"))
self.data_folder = os.path.join(data_file_path, self.data_folder)
if not os.path.exists(self.data_folder):
if not getattr(self, "path", None):
self.install(path=self.packages_path)
else:
raise LoggedError(
self.log,
"The 'data_folder' directory does not exist. "\
"Check the given path [%s].",
self.data_folder,
)
self.requested_cls = [p.lower() for p in self.defaults["polarizations"]]
for x in ["et", "eb", "bt"]:
if x in self.requested_cls:
self.requested_cls.remove(x)
# Read data
self.prepare_data()
self.lmax_theory = self.lmax_theory or 9000
self.log.debug(f"Maximum multipole value: {self.lmax_theory}")
self.log.info("Initialized!")
def get_requirements(self):
r"""
Passes the fields ``ell``, ``requested_cls``, ``lcuts``,
``exp_ch`` (list of array names) and ``bands``
(dictionary of ``exp_ch`` and the corresponding frequency
and passbands) inside the dictionary ``requirements["cmbfg_dict"]``.
:return: the dictionary ``requirements["cmbfg_dict"]``
"""
# mflike requires cmbfg_dict from theoryforge
# cmbfg_dict requires some params to be computed
reqs = dict()
reqs["cmbfg_dict"] = {"ell": self.l_bpws,
"requested_cls": self.requested_cls,
"lcuts": self.lcuts,
"exp_ch": self.experiments,
"bands": self.bands}
return reqs
def _get_theory(self, **params_values):
cmbfg_dict = self.provider.get_cmbfg_dict()
return self._get_power_spectra(cmbfg_dict)
def logp(self, **params_values):
cmbfg_dict = self.theory.get_cmbfg_dict()
return self.loglike(cmbfg_dict)
def loglike(self, cmbfg_dict):
"""
Computes the gaussian log-likelihood
:param cmbfg_dict: the dictionary of theory + foregrounds
:math:`D_{\ell}`
:return: the exact loglikelihood :math:`\ln \mathcal{L}`
"""
ps_vec = self._get_power_spectra(cmbfg_dict)
delta = self.data_vec - ps_vec
logp = -0.5 * (delta @ self.inv_cov @ delta)
logp += self.logp_const
self.log.debug(
"Log-likelihood value computed "
"= {} (Χ² = {})".format(logp, -2 * (logp - self.logp_const)))
return logp
def prepare_data(self, verbose=False):
"""
Reads the sacc data, extracts the data tracers,
trims the spectra and covariance according to the ell scales
set in the input file. It stores the ell vector, the deta vector
and the covariance in a GaussianData object.
If ``verbose=True``, it plots the tracer names, the spectrum name,
the shape of the indices array, lmin, lmax.
"""
import sacc
data = self.data
# Read data
input_fname = os.path.join(self.data_folder, self.input_file)
s = sacc.Sacc.load_fits(input_fname)
# Read extra file containing covariance and windows if needed.
cbbl_extra = False
s_b = s
if self.cov_Bbl_file:
if self.cov_Bbl_file != self.input_file:
cov_Bbl_fname = os.path.join(self.data_folder,
self.cov_Bbl_file)
s_b = sacc.Sacc.load_fits(cov_Bbl_fname)
cbbl_extra = True
try:
default_cuts = self.defaults
except AttributeError:
raise KeyError("You must provide a list of default cuts")
# Translation betwen TEB and sacc C_ell types
pol_dict = {"T": "0",
"E": "e",
"B": "b"}
ppol_dict = {"TT": "tt",
"EE": "ee",
"TE": "te",
"ET": "te",
"BB": "bb",
"EB": "eb",
"BE": "eb",
"TB": "tb",
"BT": "tb",
"BB": "bb"}
def get_cl_meta(spec):
"""
Lower-level function of `prepare_data`.
For each of the entries of the `spectra` section of the
yaml file, extracts the relevant information: channel,
polarization combinations, scale cuts and
whether TE should be symmetrized.
:param spec: the dictionary ``data["spectra"]``
"""
# Experiments/frequencies
exp_1, exp_2 = spec["experiments"]
# Read off polarization channel combinations
pols = spec.get("polarizations",
default_cuts["polarizations"]).copy()
# Read off scale cuts
scls = spec.get("scales",
default_cuts["scales"]).copy()
# For the same two channels, do not include ET and TE, only TE
if (exp_1 == exp_2):
if "ET" in pols:
pols.remove("ET")
if "TE" not in pols:
pols.append("TE")
scls["TE"] = scls["ET"]
symm = False
else:
# Symmetrization
if ("TE" in pols) and ("ET" in pols):
symm = spec.get("symmetrize",
default_cuts["symmetrize"])
else:
symm = False
return exp_1, exp_2, pols, scls, symm
def get_sacc_names(pol, exp_1, exp_2):
"""
Lower-level function of `prepare_data`.
Translates the polarization combination and channel
name of a given entry in the `spectra`
part of the input yaml file into the names expected
in the SACC files.
:param pol: temperature or polarization fields, i.e. 'TT', 'TE'
:param exp_1: experiment of map 1
:param exp_2: experiment of map 2
:return: tracer name 1, tracer name 2, string for :math:`C_{\ell}`
type
"""
tname_1 = exp_1
tname_2 = exp_2
p1, p2 = pol
if p1 in ["E", "B"]:
tname_1 += "_s2"
else:
tname_1 += "_s0"
if p2 in ["E", "B"]:
tname_2 += "_s2"
else:
tname_2 += "_s0"
if p2 == "T":
dtype = "cl_" + pol_dict[p2] + pol_dict[p1]
else:
dtype = "cl_" + pol_dict[p1] + pol_dict[p2]
return tname_1, tname_2, dtype
# First we trim the SACC file so it only contains
# the parts of the data we care about.
# Indices to be kept
indices = []
indices_b = []
# Length of the final data vector
len_compressed = 0
for spectrum in data["spectra"]:
(exp_1, exp_2, pols, scls, symm) = get_cl_meta(spectrum)
for pol in pols:
tname_1, tname_2, dtype = get_sacc_names(pol, exp_1, exp_2)
lmin, lmax = scls[pol]
ind = s.indices(dtype, # Power spectrum type
(tname_1, tname_2), # Channel combinations
ell__gt=lmin, ell__lt=lmax) # Scale cuts
indices += list(ind)
# Note that data in the cov_Bbl file may be in different order.
if cbbl_extra:
ind_b = s_b.indices(dtype,
(tname_1, tname_2),
ell__gt=lmin, ell__lt=lmax)
indices_b += list(ind_b)
if symm and pol == "ET":
pass
else:
len_compressed += ind.size
self.log.debug(f"{tname_1} {tname_2} {dtype} {ind.shape} {lmin} {lmax}")
# Get rid of all the unselected power spectra.
# Sacc takes care of performing the same cuts in the
# covariance matrix, window functions etc.
s.keep_indices(np.array(indices))
if cbbl_extra:
s_b.keep_indices(np.array(indices_b))
# Now create metadata for each spectrum
len_full = s.mean.size
# These are the matrices we'll use to compress the data if
# `symmetrize` is true.
# Note that a lot of the complication in this function is caused by the
# symmetrization option, for which SACC doesn't have native support.
mat_compress = np.zeros([len_compressed, len_full])
mat_compress_b = np.zeros([len_compressed, len_full])
self.lcuts = {k: c[1] for k, c in default_cuts["scales"].items()}
index_sofar = 0
for spectrum in data["spectra"]:
(exp_1, exp_2, pols, scls, symm) = get_cl_meta(spectrum)
for k in scls.keys():
self.lcuts[k] = max(self.lcuts[k], scls[k][1])
for pol in pols:
tname_1, tname_2, dtype = get_sacc_names(pol, exp_1, exp_2)
# The only reason why we need indices is the symmetrization.
# Otherwise all of this could have been done in the previous
# loop over data["spectra"].
ls, cls, ind = s.get_ell_cl(dtype, tname_1, tname_2, return_ind=True)
if cbbl_extra:
ind_b = s_b.indices(dtype,
(tname_1, tname_2))
ws = s_b.get_bandpower_windows(ind_b)
else:
ws = s.get_bandpower_windows(ind)
if self.l_bpws is None:
# The assumption here is that bandpower windows
# will all be sampled at the same ells.
self.l_bpws = ws.values
# Symmetrize if needed.
if (pol in ["TE", "ET"]) and symm:
pol2 = pol[::-1]
pols.remove(pol2)
tname_1, tname_2, dtype = get_sacc_names(pol2,
exp_1, exp_2)
ind2 = s.indices(dtype,
(tname_1, tname_2))
cls2 = s.get_ell_cl(dtype, tname_1, tname_2)[1]
cls = 0.5 * (cls + cls2)
for i, (j1, j2) in enumerate(zip(ind, ind2)):
mat_compress[index_sofar + i, j1] = 0.5
mat_compress[index_sofar + i, j2] = 0.5
if cbbl_extra:
ind2_b = s_b.indices(dtype,
(tname_1, tname_2))
for i, (j1, j2) in enumerate(zip(ind_b, ind2_b)):
mat_compress_b[index_sofar + i, j1] = 0.5
mat_compress_b[index_sofar + i, j2] = 0.5
else:
for i, j1 in enumerate(ind):
mat_compress[index_sofar + i, j1] = 1
if cbbl_extra:
for i, j1 in enumerate(ind_b):
mat_compress_b[index_sofar + i, j1] = 1
# The fields marked with # below aren't really used, but
# we store them just in case.
self.spec_meta.append({"ids": (index_sofar +
np.arange(cls.size,
dtype=int)),
"pol": ppol_dict[pol],
"hasYX_xsp": pol in ["ET", "BE", "BT"], # For symm
"t1": exp_1,
"t2": exp_2,
"leff": ls,
"cl_data": cls,
"bpw": ws})
index_sofar += cls.size
if not cbbl_extra:
mat_compress_b = mat_compress
# Put data and covariance in the right order.
self.data_vec = np.dot(mat_compress, s.mean)
self.cov = np.dot(mat_compress_b,
s_b.covariance.covmat.dot(mat_compress_b.T))
self.inv_cov = np.linalg.inv(self.cov)
self.logp_const = np.log(2 * np.pi) * (-len(self.data_vec) / 2)
self.logp_const -= 0.5 * np.linalg.slogdet(self.cov)[1]
self.experiments = data["experiments"]
self.bands = {
name: {"nu": tracer.nu, "bandpass": tracer.bandpass}
for name, tracer in s.tracers.items()
}
# Put lcuts in a format that is recognisable by CAMB.
self.lcuts = {k.lower(): c for k, c in self.lcuts.items()}
if "et" in self.lcuts:
del self.lcuts["et"]
ell_vec = np.zeros_like(self.data_vec)
for m in self.spec_meta:
i = m["ids"]
ell_vec[i] = m["leff"]
self.ell_vec = ell_vec
self.data = GaussianData("mflike", self.ell_vec, self.data_vec, self.cov)
def _get_power_spectra(self, cmbfg):
"""
Get :math:`D_{\ell}` from the theory component
already modified by ``theoryforge_MFLike``
:param cmbfg: the dictionary of theory+foreground :math:`D_{\ell}`
:return: the binned data vector
"""
ps_vec = np.zeros_like(self.data_vec)
DlsObs = dict()
# Note we rescale l_bpws because cmbfg spectra start from l=2
ell = self.l_bpws - 2
for m in self.spec_meta:
p = m["pol"]
i = m["ids"]
w = m["bpw"].weight.T
if p in ['tt', 'ee', 'bb']:
DlsObs[p, m['t1'], m['t2']] = cmbfg[p, m['t1'], m['t2']][ell]
else: # ['te','tb','eb']
if m['hasYX_xsp']: # not symmetrizing
DlsObs[p, m['t1'], m['t2']] = cmbfg[p, m['t2'], m['t1']][ell]
else:
DlsObs[p, m['t1'], m['t2']] = cmbfg[p, m['t1'], m['t2']][ell]
#
if self.defaults['symmetrize']: # we average TE and ET (as for data)
DlsObs[p, m['t1'], m['t2']] += cmbfg[p, m['t2'], m['t1']][ell]
DlsObs[p, m['t1'], m['t2']] *= 0.5
clt = w @ DlsObs[p, m["t1"], m["t2"]]
ps_vec[i] = clt
return ps_vec
class TestMFLike(MFLike):
_url = "https://portal.nersc.gov/cfs/sobs/users/MFLike_data"
filename = "v0.1_test"
install_options = {"download_url": f"{_url}/{filename}.tar.gz"}
| 17,618
| 39.226027
| 90
|
py
|
SOLikeT
|
SOLikeT-master/soliket/mflike/theoryforge_MFLike.py
|
"""
.. module:: theoryforge
The ``TheoryForge_MFLike`` class applies the foreground spectra and systematic
effects to the theory spectra provided by ``MFLike``. To do that, ``MFLike``
provides ``TheoryForge_MFLike`` with the appropriate list of channels, the
requested temperature/polarization fields, the
:math:`\ell` ranges and a dictionary of the passbands read from the ``sacc`` file:
.. code-block:: python
bands = {"experiment_channel": {{"nu": [freqs...],
"bandpass": [...]}}, ...}
This dictionary is then passed to ``Bandpass`` (through ``Foreground``)
to compute the bandpass
transmissions, which are then used for the actual foreground spectra computation.
If one wants to use this class as standalone, the ``bands`` dictionary is
filled when initializing ``TheoryForge_MFLike``. The name of the channels to use
are read from the ``exp_ch`` list in ``TheoryForge_MFLike.yaml``, the effective
frequencies are in the ``eff_freqs`` list. Of course the effective frequencies
have to match the information from ``exp_ch``, i.e.:
.. code-block:: yaml
exp_ch: ["LAT_93", "LAT_145", "ACT_145"]
eff_freqs: [93, 145, 145]
This class applies three kinds of systematic effects to the CMB + foreground
power spectrum:
* calibrations (global ``calG_all``, per channel ``cal_exp_nu``, per field
``calT_exp_nu``, ``calE_exp_nu``)
* polarization angles effect (``alpha_exp_nu``)
* systematic templates (e.g. T --> P leakage). In this case the dictionary
``systematics_template`` has to be filled with the correct path
``rootname``:
.. code-block:: yaml
systematics_template:
rootname: "test_template"
If left ``null``, no systematic template is applied.
The bandpass shifts are applied within the ``Bandpass`` class.
The values of the systematic parameters are set in ``TheoryForge_MFLike.yaml``.
They have to be named as ``cal/calT/calE/alpha`` + ``_`` + experiment_channel string
(e.g. ``LAT_93/dr6_pa4_f150``).
"""
import numpy as np
import os
from typing import Optional
from cobaya.theory import Theory
from cobaya.tools import are_different_params_lists
from cobaya.log import LoggedError
class TheoryForge_MFLike(Theory):
# attributes set from .yaml
data_folder: Optional[str]
exp_ch: list
eff_freqs: list
spectra: dict
systematics_template: dict
def initialize(self):
self.lmin = self.spectra["lmin"]
self.lmax = self.spectra["lmax"]
self.ell = np.arange(self.lmin, self.lmax + 1)
# State requisites to the theory code
# Which lmax for theory CMB
# Note this must be greater than lmax above to avoid approx errors
self.lmax_boltzmann = 9000
# Which lmax for theory FG
# This can be larger than lmax boltzmann
self.lmax_fg = 9000
# Which spectra to consider
self.requested_cls = self.spectra["polarizations"]
# Set lmax for theory CMB requirements
self.lcuts = {k: self.lmax_boltzmann for k in self.requested_cls}
if hasattr(self.eff_freqs, "__len__"):
if not len(self.exp_ch) == len(self.eff_freqs):
raise LoggedError(
self.log, "list of effective frequency has to have"\
"same length as list of channels!"
)
# self.bands to be filled with passbands read from sacc file
# if mflike is used
self.bands = {f"{expc}_s0": {'nu': [self.eff_freqs[iexpc]], 'bandpass': [1.]}
for iexpc, expc in enumerate(self.exp_ch)}
self.expected_params_nuis = ["cal_LAT_93", "cal_LAT_145", "cal_LAT_225",
"calT_LAT_93", "calE_LAT_93",
"calT_LAT_145", "calE_LAT_145",
"calT_LAT_225", "calE_LAT_225",
"calG_all",
"alpha_LAT_93", "alpha_LAT_145",
"alpha_LAT_225",
]
self.use_systematics_template = bool(self.systematics_template)
if self.use_systematics_template:
self.systematics_template = self.systematics_template
# Initialize template for marginalization, if needed
self._init_template_from_file()
def initialize_with_params(self):
# Check that the parameters are the right ones
differences = are_different_params_lists(
self.input_params, self.expected_params_nuis,
name_A="given", name_B="expected")
if differences:
raise LoggedError(
self.log, "Configuration error in parameters: %r.",
differences)
def must_provide(self, **requirements):
# cmbfg_dict is required by mflike
# and requires some params to be computed
# Assign required params from mflike
# otherwise use default values
if "cmbfg_dict" in requirements:
req = requirements["cmbfg_dict"]
self.ell = req.get("ell", self.ell)
self.requested_cls = req.get("requested_cls", self.requested_cls)
self.lcuts = req.get("lcuts", self.lcuts)
self.exp_ch = req.get("exp_ch", self.exp_ch)
self.bands = req.get("bands", self.bands)
# theoryforge requires Cl from boltzmann solver
# and fg_dict from Foreground theory component
# Both requirements require some params to be computed
# Passing those from theoryforge
reqs = dict()
# Be sure that CMB is computed at lmax > lmax_data (lcuts from mflike here)
reqs["Cl"] = {k: max(c, self.lmax_boltzmann + 1) for k, c in self.lcuts.items()}
reqs["fg_dict"] = {"requested_cls": self.requested_cls,
"ell": np.arange(max(self.ell[-1], self.lmax_fg + 1)),
"exp_ch": self.exp_ch, "bands": self.bands}
return reqs
def get_cmb_theory(self, **params):
return self.provider.get_Cl(ell_factor=True)
def get_foreground_theory(self, **params):
return self.provider.get_fg_dict()
def calculate(self, state, want_derived=False, **params_values_dict):
Dls = self.get_cmb_theory(**params_values_dict)
params_values_nocosmo = {k: params_values_dict[k] for k in (
self.expected_params_nuis)}
fg_dict = self.get_foreground_theory(**params_values_nocosmo)
state["cmbfg_dict"] = self.get_modified_theory(Dls,
fg_dict, **params_values_nocosmo)
def get_cmbfg_dict(self):
return self.current_state["cmbfg_dict"]
def get_modified_theory(self, Dls, fg_dict, **params):
"""
Takes the theory :math:`D_{\ell}`, sums it to the total
foreground power spectrum (possibly computed with bandpass
shift and bandpass integration) and applies calibration,
polarization angles rotation and systematic templates.
:param Dls: CMB theory spectra
:param fg_dict: total foreground spectra, provided by
``soliket.Foreground``
:param *params: dictionary of nuisance and foregrounds parameters
:return: the CMB+foregrounds :math:`D_{\ell}` dictionary,
modulated by systematics
"""
self.Dls = Dls
nuis_params = {k: params[k] for k in self.expected_params_nuis}
cmbfg_dict = {}
# Sum CMB and FGs
for f1 in self.exp_ch:
for f2 in self.exp_ch:
for s in self.requested_cls:
cmbfg_dict[s, f1, f2] = (self.Dls[s][self.ell] +
fg_dict[s, 'all', f1, f2][self.ell])
# Apply alm based calibration factors
cmbfg_dict = self._get_calibrated_spectra(cmbfg_dict, **nuis_params)
# Introduce spectra rotations
cmbfg_dict = self._get_rotated_spectra(cmbfg_dict, **nuis_params)
# Introduce templates of systematics from file, if needed
if self.use_systematics_template:
cmbfg_dict = self._get_template_from_file(cmbfg_dict, **nuis_params)
return cmbfg_dict
def _get_calibrated_spectra(self, dls_dict, **nuis_params):
r"""
Calibrates the spectra through calibration factors at
the map level:
.. math::
D^{{\rm cal}, TT, \nu_1 \nu_2}_{\ell} &= \frac{1}{
{\rm cal}_{G}\, {\rm cal}^{\nu_1} \, {\rm cal}^{\nu_2}\,
{\rm cal}^{\nu_1}_{\rm T}\,
{\rm cal}^{\nu_2}_{\rm T}}\, D^{TT, \nu_1 \nu_2}_{\ell}
D^{{\rm cal}, TE, \nu_1 \nu_2}_{\ell} &= \frac{1}{
{\rm cal}_{G}\,{\rm cal}^{\nu_1} \, {\rm cal}^{\nu_2}\,
{\rm cal}^{\nu_1}_{\rm T}\,
{\rm cal}^{\nu_2}_{\rm E}}\, D^{TT, \nu_1 \nu_2}_{\ell}
D^{{\rm cal}, EE, \nu_1 \nu_2}_{\ell} &= \frac{1}{
{\rm cal}_{G}\,{\rm cal}^{\nu_1} \, {\rm cal}^{\nu_2}\,
{\rm cal}^{\nu_1}_{\rm E}\,
{\rm cal}^{\nu_2}_{\rm E}}\, D^{EE, \nu_1 \nu_2}_{\ell}
It uses the ``syslibrary.syslib_mflike.Calibration_alm`` function.
:param dls_dict: the CMB+foregrounds :math:`D_{\ell}` dictionary
:param *nuis_params: dictionary of nuisance parameters
:return: dictionary of calibrated CMB+foregrounds :math:`D_{\ell}`
"""
from syslibrary import syslib_mflike as syl
cal_pars = {}
if "tt" in self.requested_cls or "te" in self.requested_cls:
cal = (nuis_params["calG_all"] *
np.array([nuis_params[f"cal_{exp}"] * nuis_params[f"calT_{exp}"]
for exp in self.exp_ch]))
cal_pars["tt"] = 1 / cal
if "ee" in self.requested_cls or "te" in self.requested_cls:
cal = (nuis_params["calG_all"] *
np.array([nuis_params[f"cal_{exp}"] * nuis_params[f"calE_{exp}"]
for exp in self.exp_ch]))
cal_pars["ee"] = 1 / cal
calib = syl.Calibration_alm(ell=self.ell, spectra=dls_dict)
return calib(cal1=cal_pars, cal2=cal_pars, nu=self.exp_ch)
###########################################################################
# This part deals with rotation of spectra
# Each freq {freq1,freq2,...,freqn} gets a rotation angle alpha_LAT_93,
# alpha_LAT_145, etc..
###########################################################################
def _get_rotated_spectra(self, dls_dict, **nuis_params):
r"""
Rotates the polarization spectra through polarization angles:
.. math::
D^{{\rm rot}, TE, \nu_1 \nu_2}_{\ell} &= \cos(\alpha^{\nu_2})
D^{TE, \nu_1 \nu_2}_{\ell}
D^{{\rm rot}, EE, \nu_1 \nu_2}_{\ell} &= \cos(\alpha^{\nu_1})
\cos(\alpha^{\nu_2}) D^{EE, \nu_1 \nu_2}_{\ell}
It uses the ``syslibrary.syslib_mflike.Rotation_alm`` function.
:param dls_dict: the CMB+foregrounds :math:`D_{\ell}` dictionary
:param *nuis_params: dictionary of nuisance parameters
:return: dictionary of rotated CMB+foregrounds :math:`D_{\ell}`
"""
from syslibrary import syslib_mflike as syl
rot_pars = [nuis_params[f"alpha_{exp}"] for exp in self.exp_ch]
rot = syl.Rotation_alm(ell=self.ell, spectra=dls_dict)
return rot(rot_pars, nu=self.exp_ch, cls=self.requested_cls)
###########################################################################
# This part deals with template marginalization
# A dictionary of template dls is read from yaml (likely to be not efficient)
# then rescaled and added to theory dls
###########################################################################
# Initializes the systematics templates
# This is slow, but should be done only once
def _init_template_from_file(self):
"""
Reads the systematics template from file, using
the ``syslibrary.syslib_mflike.ReadTemplateFromFile``
function.
"""
if not self.systematics_template.get("rootname"):
raise LoggedError(self.log, "Missing 'rootname' for systematics template!")
from syslibrary import syslib_mflike as syl
# decide where to store systematics template.
# Currently stored inside syslibrary package
templ_from_file = \
syl.ReadTemplateFromFile(rootname=self.systematics_template["rootname"])
self.dltempl_from_file = templ_from_file(ell=self.ell)
def _get_template_from_file(self, dls_dict, **nuis_params):
"""
Adds the systematics template, modulated by ``nuis_params['templ_freq']``
parameters, to the :math:`D_{\ell}`.
:param dls_dict: the CMB+foregrounds :math:`D_{\ell}` dictionary
:param *nuis_params: dictionary of nuisance parameters
:return: dictionary of CMB+foregrounds :math:`D_{\ell}`
with systematics templates
"""
# templ_pars=[nuis_params['templ_'+str(fr)] for fr in self.exp_ch]
# templ_pars currently hard-coded
# but ideally should be passed as input nuisance
templ_pars = {cls: np.zeros((len(self.exp_ch), len(self.exp_ch)))
for cls in self.requested_cls}
for cls in self.requested_cls:
for i1, f1 in enumerate(self.exp_ch):
for i2, f2 in enumerate(self.exp_ch):
dls_dict[cls, f1, f2] += (templ_pars[cls][i1][i2] *
self.dltempl_from_file[cls, f1, f2])
return dls_dict
| 13,738
| 38.366762
| 88
|
py
|
SOLikeT
|
SOLikeT-master/soliket/mflike/__init__.py
|
from .mflike import MFLike, TestMFLike
from .theoryforge_MFLike import TheoryForge_MFLike
| 90
| 29.333333
| 50
|
py
|
SOLikeT
|
SOLikeT-master/soliket/clusters/massfunc.py
|
import numpy as np
from scipy.interpolate import RegularGridInterpolator
from .tinker import dn_dlogM
from ..constants import MSUN_CGS, G_CGS, MPC2CM
np.seterr(divide='ignore', invalid='ignore')
class HMF:
def __init__(self, om, Ez, pk=None, kh=None, zarr=None):
# Initialize redshift and mass ranges
if zarr is None:
self.zarr = np.arange(0.05, 1.95, 0.1)
else:
self.zarr = zarr
# self.M = 10**np.arange(np.log10(5e13), 15.7, 0.02)
# self.M = 10**np.arange(13.5, 15.7, 0.02)
M_edges = 10 ** np.arange(13.5, 15.72, 0.02)
self.M = (M_edges[1:] + M_edges[:-1]) / 2. # 10**np.arange(13.5, 15.7, 0.02)
assert len(Ez) == len(zarr), "Ez and z arrays do not match"
self.E_z = Ez
# Initialize rho critical values for usage
self.om = om
self.rho_crit0H100 = (3. / (8. * np.pi) * (100 * 1.e5) ** 2.) \
/ G_CGS * MPC2CM / MSUN_CGS
self.rhoc0om = self.rho_crit0H100 * self.om
if pk is None:
print('this will not work')
else:
self.pk = pk
self.kh = kh
# self.kh, self.pk = self._pk(self.zarr)
def rhoc(self):
# critical density as a function of z
ans = self.rho_crit0H100 * self.E_z ** 2.
return ans
def rhom(self):
# mean matter density as a function of z
ans = self.rhoc0om * (1.0 + self.zarr) ** 3
return ans
def critdensThreshold(self, deltac):
rho_treshold = deltac * self.rhoc() / self.rhom()
return rho_treshold
def dn_dM(self, M, delta):
"""
dN/dmdV Mass Function
M here is in MDeltam but we can convert
"""
delts = self.critdensThreshold(delta)
dn_dlnm = dn_dlogM(M, self.zarr, self.rhoc0om, delts, self.kh, self.pk,
'comoving')
dn_dm = dn_dlnm / M[:, None]
return dn_dm
def inter_dndmLogm(self, delta, M=None):
"""
interpolating over M and z for faster calculations
"""
if M is None:
M = self.M
dndM = self.dn_dM(M, delta)
ans = RegularGridInterpolator((np.log10(M), self.zarr),
np.log10(dndM), method='cubic', fill_value=0)
return ans
| 2,360
| 30.065789
| 85
|
py
|
SOLikeT
|
SOLikeT-master/soliket/clusters/tinker.py
|
from builtins import zip
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as iuSpline
from scipy.integrate import simps
# Tinker stuff
tinker_data = np.transpose([[float(x) for x in line.split()]
for line in
"""200 0.186 1.47 2.57 1.19
300 0.200 1.52 2.25 1.27
400 0.212 1.56 2.05 1.34
600 0.218 1.61 1.87 1.45
800 0.248 1.87 1.59 1.58
1200 0.255 2.13 1.51 1.80
1600 0.260 2.30 1.46 1.97
2400 0.260 2.53 1.44 2.24
3200 0.260 2.66 1.41 2.44""".split('\n')])
tinker_splines = None
def tinker_params_spline(delta, z=None):
global tinker_splines
if tinker_splines is None:
tinker_splines = []
D, data = np.log(tinker_data[0]), tinker_data[1:]
for y in data:
# Extend to large Delta
p = np.polyfit(D[-2:], y[-2:], 1)
x = np.hstack((D, D[-1] + 3.))
y = np.hstack((y, np.polyval(p, x[-1])))
tinker_splines.append(iuSpline(x, y, k=2))
A0, a0, b0, c0 = [ts(np.log(delta)) for ts in tinker_splines]
if z is None:
return A0, a0, b0, c0
z = np.asarray(z)
A = A0 * (1 + z) ** -.14
a = a0 * (1 + z) ** -.06
alpha = 10. ** (-(((.75 / np.log10(delta / 75.))) ** 1.2))
b = b0 * (1 + z) ** -alpha
c = np.zeros(np.shape(z)) + c0
return A, a, b, c
def tinker_params_analytic(delta, z=None):
alpha = None
if np.asarray(delta).ndim == 0: # scalar delta.
A0, a0, b0, c0 = [p[0] for p in
tinker_params(np.array([delta]), z=None)]
if z is not None:
if delta < 75.:
alpha = 1.
else:
alpha = 10. ** (-(((.75 / np.log10(delta / 75.))) ** 1.2))
else:
log_delta = np.log10(delta)
A0 = 0.1 * log_delta - 0.05
a0 = 1.43 + (log_delta - 2.3) ** (1.5)
b0 = 1.0 + (log_delta - 1.6) ** (-1.5)
c0 = log_delta - 2.35
A0[delta > 1600] = .26
a0[log_delta < 2.3] = 1.43
b0[log_delta < 1.6] = 1.0
c0[c0 < 0] = 0.
c0 = 1.2 + c0 ** 1.6
if z is None:
return A0, a0, b0, c0
A = A0 * (1 + z) ** -.14
a = a0 * (1 + z) ** -.06
if alpha is None:
alpha = 10. ** (-(((.75 / np.log10(delta / 75.))) ** 1.2))
alpha[delta < 75.] = 1.
b = b0 * (1 + z) ** -alpha
c = np.zeros(np.shape(z)) + c0
return A, a, b, c
tinker_params = tinker_params_spline
def tinker_f(sigma, params):
A, a, b, c = params
return A * ((sigma / b) ** -a + 1) * np.exp(-c / sigma ** 2)
# Sigma-evaluation, and top-hat functions.
def radius_from_mass(M, rho):
"""
Convert mass M to radius R assuming density rho.
"""
return (3. * M / (4. * np.pi * rho)) ** (1 / 3.)
def top_hatf(kR):
"""
Returns the Fourier transform of the spherical top-hat function
evaluated at a given k*R.
Notes:
-------
* This is called many times and costs a lot of runtime.
* For small values, use Taylor series.
"""
out = np.nan_to_num(3 * (np.sin(kR) - (kR) * np.cos(kR))) / ((kR) ** 3)
return out
def sigma_sq_integral(R_grid, power_spt, k_val):
"""
Determines the sigma^2 parameter over the m-z grid by integrating over k.
Notes:
-------
* Fastest python solution I have found for this. There is probably a
smarter way using numpy arrays.
"""
to_integ = np.array(
[top_hatf(R_grid * k) ** 2 * np.tile(
power_spt[:, i],
(R_grid.shape[0], 1),
) * k ** 2 for k, i in zip(k_val, np.arange(len(k_val)))]
)
return simps(to_integ / (2 * np.pi ** 2), x=k_val, axis=0)
def dn_dlogM(M, z, rho, delta, k, P, comoving=False):
"""
M is (nM) or (nM, nz)
z is (nz)
rho is (nz)
delta is (nz) or scalar
k is (nk)
P is (nz,nk)
Somewhat awkwardly, k and P are comoving. rho really isn't.
return is (nM,nz)
"""
if M.ndim == 1:
M = M[:, None]
# Radius associated to mass, co-moving
R = radius_from_mass(M, rho)
if not comoving: # if you do this make sure rho still has shape of z.
R = R * np.transpose(1 + z)
# Fluctuations on those scales (P and k are comoving)
sigma = sigma_sq_integral(R, P, k) ** .5
# d log(sigma^-1)
# gradient is broken.
if R.shape[-1] == 1:
dlogs = -np.gradient(np.log(sigma[..., 0]))[:, None]
else:
dlogs = -np.gradient(np.log(sigma))[0]
# Evaluate Tinker mass function.
tp = tinker_params(delta, z)
tf = tinker_f(sigma, tp)
# dM; compute as M * dlogM since it is likely log-spaced.
if M.shape[-1] == 1:
dM = np.gradient(np.log(M[:, 0]))[:, None] * M
else:
dM = np.gradient(np.log(M))[0] * M
# Return dn / dlogM
return tf * rho * dlogs / dM
| 5,114
| 29.813253
| 77
|
py
|
SOLikeT
|
SOLikeT-master/soliket/clusters/survey.py
|
import os
import numpy as np
from scipy import interpolate
import astropy.io.fits as pyfits
# from astLib import astWCS
from astropy.wcs import WCS
from astropy.io import fits
import astropy.table as atpy
def read_clust_cat(fitsfile, qmin):
list = fits.open(fitsfile)
data = list[1].data
SNR = data.field("SNR2p4")
z = data.field("z")
zerr = data.field("zErr")
Y0 = data.field("y0tilde")
Y0err = data.field("y0tilde_err")
ind = np.where(SNR >= qmin)[0]
print("num clust ", np.shape(ind), qmin)
return z[ind], zerr[ind], Y0[ind], Y0err[ind]
def read_mock_cat(fitsfile, qmin):
list = fits.open(fitsfile)
data = list[1].data
SNR = data.field("fixed_SNR")
z = data.field("redshift")
zerr = data.field("redshiftErr")
Y0 = data.field("fixed_y_c")
Y0err = data.field("err_fixed_y_c")
ind = np.where(SNR >= qmin)[0]
return z[ind], zerr[ind], Y0[ind], Y0err[ind]
def read_matt_mock_cat(fitsfile, qmin):
list = fits.open(fitsfile)
data = list[1].data
# ra = data.field("RADeg")
# dec = data.field("decDeg")
z = data.field("redshift")
zerr = data.field("redshiftErr")
Y0 = data.field("fixed_y_c")
Y0err = data.field("fixed_err_y_c")
SNR = data.field("fixed_SNR")
# M = data.field("true_M500")
ind = np.where(SNR >= qmin)[0]
return z[ind], zerr[ind], Y0[ind], Y0err[ind]
def read_matt_cat(fitsfile, qmin):
list = fits.open(fitsfile)
data = list[1].data
z = data.field("redshift")
zerr = data.field("redshiftErr")
Y0 = data.field("fixed_y_c")
Y0err = data.field("fixed_err_y_c")
SNR = data.field("fixed_SNR")
ind = np.where(SNR >= qmin)[0]
return z[ind], zerr[ind], Y0[ind], Y0err[ind]
def loadAreaMask(extName, DIR):
"""Loads the survey area mask (i.e., after edge-trimming and point source masking,
produced by nemo).
Returns map array, wcs
"""
areaImg = pyfits.open(os.path.join(DIR, "areaMask%s.fits.gz" % (extName)))
areaMap = areaImg[0].data
wcs = WCS(areaImg[0].header) # , mode="pyfits")
areaImg.close()
return areaMap, wcs
def loadRMSmap(extName, DIR):
"""Loads the survey RMS map (produced by nemo).
Returns map array, wcs
"""
areaImg = pyfits.open(
os.path.join(DIR, "RMSMap_Arnaud_M2e14_z0p4%s.fits.gz" % (extName))
)
areaMap = areaImg[0].data
wcs = WCS(areaImg[0].header) # , mode="pyfits")
areaImg.close()
return areaMap, wcs
def loadQ(source, tileNames=None):
"""Load the filter mismatch function Q as a dictionary of spline fits.
Args:
source (NemoConfig or str): Either the path to a .fits table (containing Q fits
for all tiles - this is normally selFn/QFit.fits), or a NemoConfig object
(from which the path and tiles to use will be inferred).
tileNames (optional, list): A list of tiles for which the Q function will be
extracted. If source is a NemoConfig object, this should be set to None.
Returns:
A dictionary (with tile names as keys), containing spline knots for the Q
function for each tile.
"""
if type(source) == str:
combinedQTabFileName = source
else:
# We should add a check to confirm this is actually a NemoConfig object
combinedQTabFileName = os.path.join(source.selFnDir, "QFit.fits")
tileNames = source.tileNames
tckDict = {}
if os.path.exists(combinedQTabFileName):
combinedQTab = atpy.Table().read(combinedQTabFileName)
for key in combinedQTab.keys():
if key != "theta500Arcmin":
tckDict[key] = interpolate.splrep(
combinedQTab["theta500Arcmin"], combinedQTab[key]
)
else:
if tileNames is None:
raise Exception(
"If source does not point to a complete QFit.fits file,\
you need to supply tileNames."
)
for tileName in tileNames:
tab = atpy.Table().read(
combinedQTabFileName.replace(".fits", "#%s.fits" % (tileName))
)
tckDict[tileName] = interpolate.splrep(tab["theta500Arcmin"], tab["Q"])
return tckDict
class SurveyData:
def __init__(
self,
nemoOutputDir,
ClusterCat,
qmin=5.6,
szarMock=False,
MattMock=False,
tiles=False,
num_noise_bins=20,
):
self.nemodir = nemoOutputDir
self.tckQFit = loadQ(self.nemodir + "/QFit.fits")
self.qmin = qmin
self.tiles = tiles
self.num_noise_bins = num_noise_bins
if szarMock:
print("mock catalog")
self.clst_z, self.clst_zerr, self.clst_y0, self.clst_y0err = read_mock_cat(
ClusterCat, self.qmin
)
elif MattMock:
print("Matt mock catalog")
self.clst_z, self.clst_zerr, self.clst_y0, self.clst_y0err = read_matt_cat(
ClusterCat, self.qmin
)
else:
print("real catalog")
self.clst_z, self.clst_zerr, self.clst_y0, self.clst_y0err = read_clust_cat(
ClusterCat, self.qmin
)
if tiles:
self.filetile = self.nemodir + "/tileAreas.txt"
self.tilenames = np.loadtxt(
self.filetile, dtype=np.str, usecols=0, unpack=True
)
self.tilearea = np.loadtxt(
self.filetile, dtype=np.float, usecols=1, unpack=True
)
self.fsky = []
self.mask = []
self.mwcs = []
self.rms = []
self.rwcs = []
self.rmstotal = np.array([])
for i in range(len(self.tilearea)):
self.fsky.append(self.tilearea[i] / 41252.9612)
tempmask, tempmwcs = loadAreaMask("#" + self.tilenames[i], self.nemodir)
self.mask.append(tempmask)
self.mwcs.append(tempmwcs)
temprms, temprwcs = loadRMSmap("#" + self.tilenames[i], self.nemodir)
self.rms.append(temprms)
self.rwcs.append(temprwcs)
self.rmstotal = np.append(self.rmstotal, temprms[temprms > 0])
self.fskytotal = np.sum(self.fsky)
else:
self.rms, self.rwcs = loadRMSmap("", self.nemodir)
self.mask, self.mwcs = loadAreaMask("", self.nemodir)
self.rmstotal = self.rms[self.rms > 0]
self.fskytotal = 987.5 / 41252.9612
count_temp, bin_edge = np.histogram(
np.log10(self.rmstotal), bins=self.num_noise_bins
)
self.frac_of_survey = count_temp * 1.0 / np.sum(count_temp)
self.Ythresh = 10 ** ((bin_edge[:-1] + bin_edge[1:]) / 2.0)
@property
def Q(self):
if self.tiles:
return self.tckQFit["Q"]
else:
return self.tckQFit["PRIMARY"]
| 6,963
| 31.849057
| 88
|
py
|
SOLikeT
|
SOLikeT-master/soliket/clusters/sz_utils.py
|
import numpy as np
from scipy import interpolate
# from astropy.cosmology import FlatLambdaCDM
# from nemo import signals
from ..constants import MPC2CM, MSUN_CGS, G_CGS, C_M_S, T_CMB
from ..constants import h_Planck, k_Boltzmann, electron_mass_kg, elementary_charge
# from .clusters import C_KM_S as C_in_kms
rho_crit0H100 = (3. / (8. * np.pi) * (100. * 1.e5) ** 2.) \
/ G_CGS * MPC2CM / MSUN_CGS
def gaussian(xx, mu, sig, noNorm=False):
if noNorm:
return np.exp(-1.0 * (xx - mu) ** 2 / (2.0 * sig ** 2.0))
else:
return 1.0 / (sig * np.sqrt(2 * np.pi)) \
* np.exp(-1.0 * (xx - mu) ** 2 / (2.0 * sig ** 2.0))
class szutils:
def __init__(self, Survey):
self.LgY = np.arange(-6, -2.5, 0.01)
self.Survey = Survey
# self.rho_crit0H100 = (3. / (8. * np.pi) * \
# (100. * 1.e5)**2.) / G_in_cgs * Mpc_in_cm / MSun_in_g
def P_Yo(self, LgY, M, z, param_vals, Ez_fn, Da_fn):
H0 = param_vals["H0"]
Ma = np.outer(M, np.ones(len(LgY[0, :])))
Ytilde, theta0, Qfilt = y0FromLogM500(
np.log10(param_vals["massbias"] * Ma / (H0 / 100.0)),
z,
self.Survey.Q,
sigma_int=param_vals["scat"],
B0=param_vals["B0"],
H0=param_vals["H0"],
Ez_fn=Ez_fn,
Da_fn=Da_fn
)
Y = 10 ** LgY
# Ytilde = np.repeat(Ytilde[:, :, np.newaxis], LgY.shape[2], axis=2)
# ind = 20
# print ("M,z,y~",M[ind],z,Ytilde[ind,0])
numer = -1.0 * (np.log(Y / Ytilde)) ** 2
ans = (
1.0 / (param_vals["scat"] * np.sqrt(2 * np.pi)) *
np.exp(numer / (2.0 * param_vals["scat"] ** 2))
)
return ans
def P_Yo_vec(self, LgY, M, z, param_vals, Ez_fn, Da_fn):
H0 = param_vals["H0"]
# Ma = np.outer(M, np.ones(len(LgY[0, :])))
Ytilde, theta0, Qfilt = y0FromLogM500(
np.log10(param_vals["massbias"] * M / (H0 / 100.0)),
z,
self.Survey.Q,
sigma_int=param_vals["scat"],
B0=param_vals["B0"],
H0=param_vals["H0"],
Ez_fn=Ez_fn,
Da_fn=Da_fn,
)
Y = 10 ** LgY
Ytilde = np.repeat(Ytilde[:, :, np.newaxis], LgY.shape[2], axis=2)
numer = -1.0 * (np.log(Y / Ytilde)) ** 2
ans = (
1.0 / (param_vals["scat"] * np.sqrt(2 * np.pi)) *
np.exp(numer / (2.0 * param_vals["scat"] ** 2))
)
return ans
def Y_erf(self, Y, Ynoise):
qmin = self.Survey.qmin
ans = Y * 0.0
ans[Y - qmin * Ynoise > 0] = 1.0
return ans
def P_of_gt_SN(self, LgY, MM, zz, Ynoise, param_vals, Ez_fn, Da_fn):
Y = 10 ** LgY
sig_tr = np.outer(np.ones([MM.shape[0], MM.shape[1]]), self.Y_erf(Y, Ynoise))
sig_thresh = np.reshape(sig_tr,
(MM.shape[0], MM.shape[1], len(self.Y_erf(Y, Ynoise))))
LgYa = np.outer(np.ones([MM.shape[0], MM.shape[1]]), LgY)
LgYa2 = np.reshape(LgYa, (MM.shape[0], MM.shape[1], len(LgY)))
P_Y = np.nan_to_num(self.P_Yo_vec(LgYa2, MM, zz, param_vals, Ez_fn, Da_fn))
ans = np.trapz(P_Y * sig_thresh, x=LgY, axis=2) * np.log(10)
return ans
def PfuncY(self, YNoise, M, z_arr, param_vals, Ez_fn, Da_fn):
LgY = self.LgY
P_func = np.outer(M, np.zeros([len(z_arr)]))
M_arr = np.outer(M, np.ones([len(z_arr)]))
P_func = self.P_of_gt_SN(LgY, M_arr, z_arr, YNoise, param_vals, Ez_fn, Da_fn)
return P_func
def P_of_Y_per(self, LgY, MM, zz, Y_c, Y_err, param_vals):
P_Y_sig = np.outer(np.ones(len(MM)), self.Y_prob(Y_c, LgY, Y_err))
LgYa = np.outer(np.ones(len(MM)), LgY)
LgYa = np.outer(np.ones([MM.shape[0], MM.shape[1]]), LgY)
LgYa2 = np.reshape(LgYa, (MM.shape[0], MM.shape[1], len(LgY)))
P_Y = np.nan_to_num(self.P_Yo(LgYa2, MM, zz, param_vals))
ans = np.trapz(P_Y * P_Y_sig, LgY, np.diff(LgY), axis=1) * np.log(10)
return ans
def Y_prob(self, Y_c, LgY, YNoise):
Y = 10 ** (LgY)
ans = gaussian(Y, Y_c, YNoise)
return ans
def Pfunc_per(self, MM, zz, Y_c, Y_err, param_vals, Ez_fn, Da_fn):
LgY = self.LgY
LgYa = np.outer(np.ones(len(MM)), LgY)
P_Y_sig = self.Y_prob(Y_c, LgY, Y_err)
P_Y = np.nan_to_num(self.P_Yo(LgYa, MM, zz, param_vals, Ez_fn, Da_fn))
ans = np.trapz(P_Y * P_Y_sig, LgY, np.diff(LgY), axis=1)
return ans
def Pfunc_per_parallel(self, Marr, zarr, Y_c, Y_err, param_vals, Ez_fn, Da_fn):
# LgY = self.LgY
# LgYa = np.outer(np.ones(Marr.shape[0]), LgY)
# LgYa = np.outer(np.ones([Marr.shape[0], Marr.shape[1]]), LgY)
# LgYa2 = np.reshape(LgYa, (Marr.shape[0], Marr.shape[1], len(LgY)))
# Yc_arr = np.outer(np.ones(Marr.shape[0]), Y_c)
# Yerr_arr = np.outer(np.ones(Marr.shape[0]), Y_err)
# Yc_arr = np.repeat(Yc_arr[:, :, np.newaxis], len(LgY), axis=2)
# Yerr_arr = np.repeat(Yerr_arr[:, :, np.newaxis], len(LgY), axis=2)
# P_Y_sig = self.Y_prob(Yc_arr, LgYa2, Yerr_arr)
# P_Y = np.nan_to_num(self.P_Yo(LgYa2, Marr, zarr, param_vals, Ez_fn))
P_Y_sig = self.Y_prob(Y_c, self.LgY, Y_err)
P_Y = np.nan_to_num(self.P_Yo(self.LgY, Marr, zarr, param_vals, Ez_fn, Da_fn))
ans = np.trapz(P_Y * P_Y_sig, x=self.LgY, axis=2)
return ans
def Pfunc_per_zarr(self, MM, z_c, Y_c, Y_err, int_HMF, param_vals):
LgY = self.LgY
# old was z_arr
# P_func = np.outer(MM, np.zeros([len(z_arr)]))
# M_arr = np.outer(MM, np.ones([len(z_arr)]))
# M200 = np.outer(MM, np.zeros([len(z_arr)]))
# zarr = np.outer(np.ones([len(M)]), z_arr)
P_func = self.P_of_Y_per(LgY, MM, z_c, Y_c, Y_err, param_vals)
return P_func
###
"""Routines from nemo (author: Matt Hilton ) to limit dependencies"""
# ----------------------------------------------------------------------------------------
def calcR500Mpc(z, M500, Ez_fn, H0):
"""Given z, M500 (in MSun), returns R500 in Mpc, with respect to critical density.
"""
if type(M500) == str:
raise Exception(
"M500 is a string - check M500MSun in your .yml config file:\
use, e.g., 1.0e+14 (not 1e14 or 1e+14)"
)
Ez = Ez_fn(z)
criticalDensity = rho_crit0H100 * (H0 / 100.) ** 2 * Ez ** 2
R500Mpc = np.power((3 * M500) / (4 * np.pi * 500 * criticalDensity), 1.0 / 3.0)
return R500Mpc
# ----------------------------------------------------------------------------------------
def calcTheta500Arcmin(z, M500, Ez_fn, Da_fn, H0):
"""Given z, M500 (in MSun), returns angular size equivalent to R500, with respect to
critical density.
"""
R500Mpc = calcR500Mpc(z, M500, Ez_fn, H0)
DAz = Da_fn(z)
theta500Arcmin = np.degrees(np.arctan(R500Mpc / DAz)) * 60.0
return theta500Arcmin
# ----------------------------------------------------------------------------------------
def calcQ(theta500Arcmin, tck):
"""Returns Q, given theta500Arcmin, and a set of spline fit knots for (theta, Q).
"""
# Q=np.poly1d(coeffs)(theta500Arcmin)
Q = interpolate.splev(theta500Arcmin, tck)
return Q
# ----------------------------------------------------------------------------------------
def calcFRel(z, M500, obsFreqGHz=148.0, Ez_fn=None):
"""Calculates relativistic correction to SZ effect at specified frequency, given z,
M500 in MSun.
This assumes the Arnaud et al. (2005) M-T relation, and applies formulae of
Itoh et al. (1998)
As for H13, we return fRel = 1 + delta_SZE (see also Marriage et al. 2011)
"""
# Using Arnaud et al. (2005) M-T to get temperature
A = 3.84e14
B = 1.71
# TkeV=5.*np.power(((cosmoModel.efunc(z)*M500)/A), 1/B) # HMF/Astropy
Ez = Ez_fn(z)
TkeV = 5.0 * np.power(((Ez * M500) / A), 1 / B) # Colossus
TKelvin = TkeV * ((1000 * elementary_charge) / k_Boltzmann)
# Itoh et al. (1998) eqns. 2.25 - 2.30
thetae = (k_Boltzmann * TKelvin) / (electron_mass_kg * C_M_S ** 2)
X = (h_Planck * obsFreqGHz * 1e9) / (k_Boltzmann * T_CMB)
Xtw = X * (np.cosh(X / 2.0) / np.sinh(X / 2.0))
Stw = X / np.sinh(X / 2.0)
Y0 = -4 + Xtw
Y1 = (
-10.0
+ (47 / 2.0) * Xtw
- (42 / 5.0) * Xtw ** 2
+ (7 / 10.0) * Xtw ** 3
+ np.power(Stw, 2) * (-(21 / 5.0) + (7 / 5.0) * Xtw)
)
Y2 = (
-(15 / 2.0)
+ (1023 / 8.0) * Xtw
- (868 / 5.0) * Xtw ** 2
+ (329 / 5.0) * Xtw ** 3
- (44 / 5.0) * Xtw ** 4
+ (11 / 30.0) * Xtw ** 5
+ np.power(Stw, 2)
* (-(434 / 5.0) + (658 / 5.0) * Xtw
- (242 / 5.0) * Xtw ** 2
+ (143 / 30.0) * Xtw ** 3)
+ np.power(Stw, 4) * (-(44 / 5.0) + (187 / 60.0) * Xtw)
)
Y3 = (
(15 / 2.0)
+ (2505 / 8.0) * Xtw
- (7098 / 5.0) * Xtw ** 2
+ (14253 / 10.0) * Xtw ** 3
- (18594 / 35.0) * Xtw ** 4
+ (12059 / 140.0) * Xtw ** 5
- (128 / 21.0) * Xtw ** 6
+ (16 / 105.0) * Xtw ** 7
+ np.power(Stw, 2)
* (
-(7098 / 10.0)
+ (14253 / 5.0) * Xtw
- (102267 / 35.0) * Xtw ** 2
+ (156767 / 140.0) * Xtw ** 3
- (1216 / 7.0) * Xtw ** 4
+ (64 / 7.0) * Xtw ** 5
)
+ np.power(Stw, 4)
* (-(18594 / 35.0) + (205003 / 280.0) * Xtw
- (1920 / 7.0) * Xtw ** 2 + (1024 / 35.0) * Xtw ** 3)
+ np.power(Stw, 6) * (-(544 / 21.0) + (992 / 105.0) * Xtw)
)
Y4 = (
-(135 / 32.0)
+ (30375 / 128.0) * Xtw
- (62391 / 10.0) * Xtw ** 2
+ (614727 / 40.0) * Xtw ** 3
- (124389 / 10.0) * Xtw ** 4
+ (355703 / 80.0) * Xtw ** 5
- (16568 / 21.0) * Xtw ** 6
+ (7516 / 105.0) * Xtw ** 7
- (22 / 7.0) * Xtw ** 8
+ (11 / 210.0) * Xtw ** 9
+ np.power(Stw, 2)
* (
-(62391 / 20.0)
+ (614727 / 20.0) * Xtw
- (1368279 / 20.0) * Xtw ** 2
+ (4624139 / 80.0) * Xtw ** 3
- (157396 / 7.0) * Xtw ** 4
+ (30064 / 7.0) * Xtw ** 5
- (2717 / 7.0) * Xtw ** 6
+ (2761 / 210.0) * Xtw ** 7
)
+ np.power(Stw, 4)
* (
-(124389 / 10.0)
+ (6046951 / 160.0) * Xtw
- (248520 / 7.0) * Xtw ** 2
+ (481024 / 35.0) * Xtw ** 3
- (15972 / 7.0) * Xtw ** 4
+ (18689 / 140.0) * Xtw ** 5
)
+ np.power(Stw, 6)
* (-(70414 / 21.0) + (465992 / 105.0) * Xtw
- (11792 / 7.0) * Xtw ** 2 + (19778 / 105.0) * Xtw ** 3)
+ np.power(Stw, 8) * (-(682 / 7.0) + (7601 / 210.0) * Xtw)
)
deltaSZE = (
((X ** 3) / (np.exp(X) - 1))
* ((thetae * X * np.exp(X)) / (np.exp(X) - 1))
* (Y0 + Y1 * thetae + Y2 * thetae ** 2 + Y3 * thetae ** 3 + Y4 * thetae ** 4)
)
fRel = 1 + deltaSZE
return fRel
# ----------------------------------------------------------------------------------------
def y0FromLogM500(
log10M500,
z,
tckQFit,
tenToA0=4.95e-5,
B0=0.08,
Mpivot=3e14,
sigma_int=0.2,
fRelWeightsDict={148.0: 1.0},
H0=70.,
Ez_fn=None,
Da_fn=None
):
"""Predict y0~ given logM500 (in MSun) and redshift. Default scaling relation
parameters are A10 (as in H13).
Use cosmoModel (astropy.cosmology object) to change/specify cosmological parameters.
fRelWeightsDict is used to account for the relativistic correction when y0~ has been
constructed from multi-frequency maps. Weights should sum to 1.0; keys are observed
frequency in GHz.
Returns y0~, theta500Arcmin, Q
"""
if type(Mpivot) == str:
raise Exception(
"Mpivot is a string - check Mpivot in your .yml config file:\
use, e.g., 3.0e+14 (not 3e14 or 3e+14)"
)
# Filtering/detection was performed with a fixed fiducial cosmology... so we don't
# need to recalculate Q.
# We just need to recalculate theta500Arcmin and E(z) only
M500 = np.power(10, log10M500)
theta500Arcmin = calcTheta500Arcmin(z, M500, Ez_fn, Da_fn, H0)
Q = calcQ(theta500Arcmin, tckQFit)
Ez = Ez_fn(z)
# Relativistic correction: now a little more complicated, to account for fact y0~ maps
# are weighted sum of individual frequency maps, and relativistic correction size
# varies with frequency
fRels = []
freqWeights = []
for obsFreqGHz in fRelWeightsDict.keys():
fRels.append(calcFRel(z, M500, obsFreqGHz=obsFreqGHz, Ez_fn=Ez_fn))
freqWeights.append(fRelWeightsDict[obsFreqGHz])
fRel = np.average(np.array(fRels), axis=0, weights=freqWeights)
# UPP relation according to H13
# NOTE: m in H13 is M/Mpivot
# NOTE: this goes negative for crazy masses where the Q polynomial fit goes -ve, so
# ignore those
y0pred = tenToA0 * np.power(Ez, 2) * np.power(M500 / Mpivot, 1 + B0) * Q * fRel
return y0pred, theta500Arcmin, Q
| 13,757
| 32.474453
| 90
|
py
|
SOLikeT
|
SOLikeT-master/soliket/clusters/clusters.py
|
"""
requires extra: astlib
"""
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from pkg_resources import resource_filename
import pyccl as ccl
from ..poisson import PoissonLikelihood
from . import massfunc as mf
from .survey import SurveyData
from .sz_utils import szutils
C_KM_S = 2.99792e5
class SZModel:
pass
class ClusterLikelihood(PoissonLikelihood):
name = "Clusters"
columns = ["tsz_signal", "z", "tsz_signal_err"]
data_path = resource_filename("soliket", "clusters/data/selFn_equD56")
# data_path = resource_filename("soliket", "clusters/data/selFn_SO")
data_name = resource_filename("soliket", "clusters/data/E-D56Clusters.fits")
# data_name = resource_filename("soliket",
# "clusters/data/MFMF_WebSkyHalos_A10tSZ_3freq_tiles_mass.fits")
def initialize(self):
self.zarr = np.arange(0, 2, 0.05)
self.k = np.logspace(-4, np.log10(5), 200)
# self.mdef = ccl.halos.MassDef(500, 'critical')
super().initialize()
def get_requirements(self):
return {
"Pk_interpolator": {
"z": self.zarr,
"k_max": 5.0,
"nonlinear": False,
"hubble_units": False, # cobaya told me to
"k_hunit": False, # cobaya told me to
"vars_pairs": [["delta_nonu", "delta_nonu"]],
},
"Hubble": {"z": self.zarr},
"angular_diameter_distance": {"z": self.zarr},
"comoving_radial_distance": {"z": self.zarr}
# "CCL": {"methods": {"sz_model": self._get_sz_model}, "kmax": 10},
}
def _get_sz_model(self, cosmo):
model = SZModel()
model.hmf = ccl.halos.MassFuncTinker08(cosmo, mass_def=self.mdef)
model.hmb = ccl.halos.HaloBiasTinker10(
cosmo, mass_def=self.mdef, mass_def_strict=False
)
model.hmc = ccl.halos.HMCalculator(cosmo, model.hmf, model.hmb, self.mdef)
# model.szk = SZTracer(cosmo)
return model
def _get_catalog(self):
self.survey = SurveyData(
self.data_path, self.data_name
) # , MattMock=False,tiles=False)
self.szutils = szutils(self.survey)
df = pd.DataFrame(
{
"z": self.survey.clst_z.byteswap().newbyteorder(),
"tsz_signal": self.survey.clst_y0.byteswap().newbyteorder(),
"tsz_signal_err": self.survey.clst_y0err.byteswap().newbyteorder(),
}
)
return df
def _get_om(self):
return (self.theory.get_param("omch2") + self.theory.get_param("ombh2")) / (
(self.theory.get_param("H0") / 100.0) ** 2
)
def _get_ob(self):
return (self.theory.get_param("ombh2")) / (
(self.theory.get_param("H0") / 100.0) ** 2
)
def _get_Ez(self):
return self.theory.get_Hubble(self.zarr) / self.theory.get_param("H0")
def _get_Ez_interpolator(self):
return interp1d(self.zarr, self._get_Ez())
def _get_DAz(self):
return self.theory.get_angular_diameter_distance(self.zarr)
def _get_DAz_interpolator(self):
return interp1d(self.zarr, self._get_DAz())
def _get_HMF(self):
h = self.theory.get_param("H0") / 100.0
Pk_interpolator = self.theory.get_Pk_interpolator(
("delta_nonu", "delta_nonu"), nonlinear=False
).P
pks = Pk_interpolator(self.zarr, self.k)
# pkstest = Pk_interpolator(0.125, self.k )
# print (pkstest * h**3 )
Ez = (
self._get_Ez()
) # self.theory.get_Hubble(self.zarr) / self.theory.get_param("H0")
om = self._get_om()
hmf = mf.HMF(om, Ez, pk=pks * h**3, kh=self.k / h, zarr=self.zarr)
return hmf
def _get_param_vals(self, **kwargs):
# Read in scaling relation parameters
# scat = kwargs['scat']
# massbias = kwargs['massbias']
# B0 = kwargs['B']
B0 = 0.08
scat = 0.2
massbias = 1.0
H0 = self.theory.get_param("H0")
ob = self._get_ob()
om = self._get_om()
param_vals = {
"om": om,
"ob": ob,
"H0": H0,
"B0": B0,
"scat": scat,
"massbias": massbias,
}
return param_vals
def _get_rate_fn(self, **kwargs):
HMF = self._get_HMF()
param_vals = self._get_param_vals(**kwargs)
Ez_fn = self._get_Ez_interpolator()
DA_fn = self._get_DAz_interpolator()
dn_dzdm_interp = HMF.inter_dndmLogm(delta=500)
h = self.theory.get_param("H0") / 100.0
def Prob_per_cluster(z, tsz_signal, tsz_signal_err):
c_y = tsz_signal
c_yerr = tsz_signal_err
c_z = z
Pfunc_ind = self.szutils.Pfunc_per(
HMF.M, c_z, c_y * 1e-4, c_yerr * 1e-4, param_vals, Ez_fn, DA_fn
)
dn_dzdm = 10 ** np.squeeze(dn_dzdm_interp((np.log10(HMF.M), c_z))) * h**4.0
ans = np.trapz(dn_dzdm * Pfunc_ind, dx=np.diff(HMF.M, axis=0), axis=0)
return ans
return Prob_per_cluster
# Implement a function that returns a rate function (function of (tsz_signal, z))
def _get_dVdz(self):
"""dV/dzdOmega"""
DA_z = self.theory.get_angular_diameter_distance(self.zarr)
dV_dz = (
DA_z**2
* (1.0 + self.zarr) ** 2
/ (self.theory.get_Hubble(self.zarr) / C_KM_S)
)
# dV_dz *= (self.theory.get_param("H0") / 100.0) ** 3.0 # was h0
return dV_dz
def _get_n_expected(self, **kwargs):
# def Ntot_survey(self,int_HMF,fsky,Ythresh,param_vals):
HMF = self._get_HMF()
param_vals = self._get_param_vals(**kwargs)
Ez_fn = self._get_Ez_interpolator()
DA_fn = self._get_DAz_interpolator()
z_arr = self.zarr
h = self.theory.get_param("H0") / 100.0
Ntot = 0
dVdz = self._get_dVdz()
dn_dzdm = HMF.dn_dM(HMF.M, 500.0) * h**4.0 # getting rid of hs
for Yt, frac in zip(self.survey.Ythresh, self.survey.frac_of_survey):
Pfunc = self.szutils.PfuncY(Yt, HMF.M, z_arr, param_vals, Ez_fn, DA_fn)
N_z = np.trapz(
dn_dzdm * Pfunc, dx=np.diff(HMF.M[:, None] / h, axis=0), axis=0
)
Ntot += (
np.trapz(N_z * dVdz, x=z_arr)
* 4.0
* np.pi
* self.survey.fskytotal
* frac
)
return Ntot
def _test_n_tot(self, **kwargs):
HMF = self._get_HMF()
# param_vals = self._get_param_vals(**kwargs)
# Ez_fn = self._get_Ez_interpolator()
# DA_fn = self._get_DAz_interpolator()
z_arr = self.zarr
h = self.theory.get_param("H0") / 100.0
Ntot = 0
dVdz = self._get_dVdz()
dn_dzdm = HMF.dn_dM(HMF.M, 500.0) * h**4.0 # getting rid of hs
# Test Mass function against Nemo.
Pfunc = 1.0
N_z = np.trapz(dn_dzdm * Pfunc, dx=np.diff(HMF.M[:, None] / h, axis=0), axis=0)
Ntot = (
np.trapz(N_z * dVdz, x=z_arr)
* 4.0
* np.pi
* (600.0 / (4 * np.pi * (180 / np.pi) ** 2))
)
return Ntot
| 7,397
| 29.697095
| 89
|
py
|
SOLikeT
|
SOLikeT-master/soliket/clusters/__init__.py
|
from .clusters import ClusterLikelihood # noqa: F401
| 54
| 26.5
| 53
|
py
|
SOLikeT
|
SOLikeT-master/docs/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# Import SOLikeT (for autodoc)
import sys
sys.path.insert(0, "..")
# Create some mock imports
import mock
MOCK_MODULES = ["cosmopower", "tensorflow", "pyccl", "camb"]
for module in MOCK_MODULES:
sys.modules[module] = mock.Mock()
import soliket
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'SOLikeT'
copyright = '2023, The SO Collaboration'
author = 'The SO Collaboration'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
"sphinx.ext.autodoc", # Generate doc pages from source docstrings
"sphinx.ext.viewcode", # Generate links to source code
"sphinx.ext.mathjax", # Mathematical symbols
"sphinx_rtd_theme", # readthedocs theme
]
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
| 1,440
| 31.022222
| 87
|
py
|
studd
|
studd-main/utils.py
|
import numpy as np
import pandas as pd
import copy
def num_cols(df):
types = [type(df[col].values[1]) for col in df.columns]
is_num = [not int(x is str) for x in types]
is_num_idx = np.flatnonzero(is_num)
return is_num_idx
def any_str_col(df):
types = [type(df[col].values[0]) for col in df.columns]
is_str = [int(x is str) for x in types]
is_str_tot = sum(is_str)
any_str = is_str_tot > 0
return any_str
def decode_bytes(ds):
for col in ds.columns:
col_str = ds[col].values
if type(col_str[0]) == bytes:
col_str = [x.decode('utf-8') for x in col_str]
ds[col] = col_str
else:
pass
return ds
def col_as_int(x):
ux = np.unique(x)
mydict = dict()
for i, elem in enumerate(ux):
mydict[elem] = i
num_col = [mydict[elem] for elem in x]
return num_col
def col_obj2num(x):
cols = x.columns[x.dtypes.eq('object')]
x[cols] = x[cols].apply(pd.to_numeric, errors='coerce')
return x
def as_df(X, y):
Xdf = pd.DataFrame(X, columns=["X" + str(i)
for i in range(X.shape[1])])
Xdf["target"] = y
return Xdf
def comb_df(df1, df2):
df1_ = copy.deepcopy(df1)
df2_ = copy.deepcopy(df2)
df3 = df1_.append(df2_, ignore_index=True)
df3.reset_index(drop=True, inplace=True)
return df3
| 1,397
| 18.150685
| 63
|
py
|
studd
|
studd-main/__init__.py
|
#
| 1
| 1
| 1
|
py
|
studd
|
studd-main/workflows.py
|
import pandas as pd
import numpy as np
from studd.studd_batch import STUDD
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier as RF
from skmultiflow.drift_detection.page_hinkley import PageHinkley as PHT
def Workflow(X, y, delta, window_size):
ucdd = STUDD(X=X, y=y, n_train=window_size)
ucdd.initial_fit(model=RF(), std_model=RF())
print("Detecting change by tracking features")
UFD = ucdd.drift_detection_uspv_x(datastream_=ucdd.datastream,
model_=ucdd.base_model,
n_train_=ucdd.n_train,
X=X,
window_size=window_size,
pvalue=delta,
n_samples=window_size,
upd_model=True)
print("Detecting change with STUDD")
RES_STUDD = ucdd.drift_detection_std(datastream_=ucdd.datastream,
model_=ucdd.base_model,
std_model_=ucdd.student_model,
n_train_=ucdd.n_train,
n_samples=window_size,
delta=delta / 2,
upd_model=True,
upd_std_model=True,
detector=PHT)
print("Detecting change with bl1")
res_bl1 = ucdd.BL1_never_adapt(datastream_=ucdd.datastream,
model_=ucdd.base_model)
print("Detecting change with bl2")
res_bl2 = ucdd.BL2_retrain_after_w(datastream_=ucdd.datastream,
model_=ucdd.base_model,
n_train_=ucdd.n_train,
n_samples=window_size)
print("Detecting change with SS")
SS = ucdd.drift_detection_spv(datastream_=ucdd.datastream,
model_=ucdd.base_model,
n_train_=ucdd.n_train,
n_samples=window_size,
delay_time=0,
observation_ratio=1,
upd_model=True,
delta=delta,
detector=PHT)
print("Detecting change with UTH")
UHT = ucdd.drift_detection_uspv(datastream_=ucdd.datastream,
model_=ucdd.base_model,
n_train_=ucdd.n_train,
use_prob=False,
n_samples=window_size,
method="ks",
window_size=window_size,
upd_model=True,
pvalue=delta)
print("Detecting change with UTHF")
UHTF = ucdd.drift_detection_uspv_f(datastream_=ucdd.datastream,
model_=ucdd.base_model,
n_train_=ucdd.n_train,
use_prob=False,
n_samples=window_size,
method="ks",
window_size=window_size,
upd_model=True,
pvalue=delta)
DSS = ucdd.drift_detection_spv(datastream_=ucdd.datastream,
model_=ucdd.base_model,
n_train_=ucdd.n_train,
delay_time=int(window_size / 2),
n_samples=window_size,
observation_ratio=1,
upd_model=True,
delta=delta,
detector=PHT)
WS = ucdd.drift_detection_spv(datastream_=ucdd.datastream,
model_=ucdd.base_model,
n_train_=ucdd.n_train,
n_samples=window_size,
delay_time=0,
observation_ratio=.5,
upd_model=True,
delta=delta,
detector=PHT)
DWS = ucdd.drift_detection_spv(datastream_=ucdd.datastream,
model_=ucdd.base_model,
n_train_=ucdd.n_train,
n_samples=window_size,
delay_time=int(window_size / 2),
observation_ratio=0.5,
upd_model=True,
delta=delta,
detector=PHT)
training_info = ucdd.init_training_data
results = {
"STUDD": RES_STUDD,
"BL1": res_bl1,
"BL2": res_bl2,
"SS": SS,
"DSS": DSS,
"WS": WS,
"DWS": DWS,
"UHT": UHT,
"UHTF": UHTF,
"UFD": UFD}
perf_kpp = dict()
perf_acc = dict()
nupdates = dict()
pointsbought = dict()
for m in results:
x = results[m]
perf_acc_i = metrics.accuracy_score(y_true=x["preds"]["y"],
y_pred=x["preds"]["y_hat"])
perf_m = metrics.cohen_kappa_score(y1=x["preds"]["y"],
y2=x["preds"]["y_hat"])
pointsbought[m] = x["samples_used"]
nupdates[m] = x["n_updates"]
perf_kpp[m] = perf_m
perf_acc[m] = perf_acc_i
perf_kpp = pd.DataFrame(perf_kpp.items())
perf_acc = pd.DataFrame(perf_acc.items())
perf = pd.concat([perf_kpp.reset_index(drop=True), perf_acc], axis=1)
perf.columns = ['Method', 'Kappa', 'rm', 'Acc']
perf = perf.drop("rm", axis=1)
return perf, pointsbought, nupdates, training_info, results
| 6,327
| 40.090909
| 73
|
py
|
studd
|
studd-main/studd/studd_batch.py
|
from skmultiflow.data.data_stream import DataStream
from skmultiflow.drift_detection.page_hinkley import PageHinkley as PHT
from ht_detectors.tracker_output import HypothesisTestDetector
import copy
import numpy as np
class STUDD:
def __init__(self, X, y, n_train):
"""
:param X:
:param y:
:param n_train:
"""
D = DataStream(X, y)
D.prepare_for_use()
self.datastream = D
self.n_train = n_train
self.W = n_train
self.base_model = None
self.student_model = None
self.init_training_data = None
def initial_fit(self, model, std_model):
"""
:return:
"""
X_tr, y_tr = self.datastream.next_sample(self.n_train)
model.fit(X_tr, y_tr)
yhat_tr = model.predict(X_tr)
std_model.fit(X_tr, yhat_tr)
self.base_model = model
self.student_model = std_model
self.init_training_data = dict({"X": X_tr, "y": y_tr, "y_hat": yhat_tr})
DETECTOR = PHT
@staticmethod
def drift_detection_std(datastream_, model_,
std_model_, n_train_,
delta, n_samples,
upd_model=False,
upd_std_model=True,
detector=DETECTOR):
datastream = copy.deepcopy(datastream_)
base_model = copy.deepcopy(model_)
student_model = copy.deepcopy(std_model_)
n_train = copy.deepcopy(n_train_)
std_detector = detector(delta=delta)
std_alarms = []
iter = n_train
n_updates = 0
samples_used = 0
y_hat_hist = []
y_buffer, y_hist = [], []
X_buffer, X_hist = [], []
while datastream.has_more_samples():
# print("Iteration: " + str(iter))
Xi, yi = datastream.next_sample()
y_hist.append(yi[0])
y_buffer.append(yi[0])
X_hist.append(Xi[0])
X_buffer.append(Xi[0])
model_yhat = base_model.predict(Xi)
y_hat_hist.append(model_yhat[0])
std_model_yhat = student_model.predict(Xi)
std_err = int(model_yhat != std_model_yhat)
std_detector.add_element(std_err)
if std_detector.detected_change():
print("Found change std in iter: " + str(iter))
std_alarms.append(iter)
if upd_model:
X_buffer = np.array(X_buffer)
y_buffer = np.array(y_buffer)
samples_used_iter = len(y_buffer[-n_samples:])
print("Updating model with " + str(samples_used_iter), " Observations")
base_model.fit(X_buffer[-n_samples:],
y_buffer[-n_samples:])
yhat_buffer = base_model.predict(X_buffer)
if upd_std_model:
student_model.fit(X_buffer, yhat_buffer)
else:
student_model.fit(X_buffer[-n_samples:],
yhat_buffer[-n_samples:])
# y_buffer = []
# X_buffer = []
y_buffer = list(y_buffer)
X_buffer = list(X_buffer)
n_updates += 1
samples_used += samples_used_iter
print("Moving on")
iter += 1
preds = dict({"y": y_hist, "y_hat": y_hat_hist})
output = dict({"alarms": std_alarms,
"preds": preds,
"n_updates": n_updates,
"samples_used": samples_used})
return output
@staticmethod
def drift_detection_spv(datastream_, model_, n_train_,
delay_time, observation_ratio,
delta, n_samples,
upd_model=False,
detector=DETECTOR):
import copy
import numpy as np
datastream = copy.deepcopy(datastream_)
model = copy.deepcopy(model_)
n_train = copy.deepcopy(n_train_)
driftmodel = detector(delta=delta)
alarms = []
iter = n_train
j, n_updates, samples_used = 0, 0, 0
yhat_hist = []
y_buffer, y_hist = [], []
X_buffer, X_hist = [], []
while datastream.has_more_samples():
# print("Iteration: " + str(iter))
Xi, yi = datastream.next_sample()
y_hist.append(yi[0])
y_buffer.append(yi[0])
X_hist.append(Xi[0])
X_buffer.append(Xi[0])
model_yhat = model.predict(Xi)
yhat_hist.append(model_yhat[0])
put_i_available = np.random.binomial(1, observation_ratio)
if put_i_available > 0:
if j >= delay_time:
err = int(y_hist[j - delay_time] != yhat_hist[j - delay_time])
driftmodel.add_element(err)
if driftmodel.detected_change():
print("Found change in iter: " + str(iter))
alarms.append(iter)
if upd_model:
X_buffer = np.array(X_buffer)
y_buffer = np.array(y_buffer)
samples_used_iter = len(y_buffer[-n_samples:])
print("Updating model with " + str(samples_used_iter), " Observations")
model.fit(X_buffer[-n_samples:],
y_buffer[-n_samples:])
y_buffer = list(y_buffer)
X_buffer = list(X_buffer)
n_updates += 1
samples_used += samples_used_iter
print("Moving on")
iter += 1
j += 1
preds = dict({"y": y_hist, "y_hat": yhat_hist})
output = dict({"alarms": alarms,
"preds": preds,
"n_updates": n_updates,
"samples_used": samples_used})
return output
@staticmethod
def BL2_retrain_after_w(datastream_, model_, n_train_, n_samples):
import copy
import numpy as np
datastream = copy.deepcopy(datastream_)
model = copy.deepcopy(model_)
n_train = copy.deepcopy(n_train_)
iter = copy.deepcopy(n_train_)
j, n_updates, samples_used = 0, 0, 0
yhat_hist = []
y_buffer, y_hist = [], []
X_buffer, X_hist = [], []
while datastream.has_more_samples():
# print("Iteration: " + str(iter))
Xi, yi = datastream.next_sample()
y_hist.append(yi[0])
y_buffer.append(yi[0])
X_hist.append(Xi[0])
X_buffer.append(Xi[0])
model_yhat = model.predict(Xi)
yhat_hist.append(model_yhat[0])
if iter % n_train == 0 and iter > n_train + 1:
X_buffer = np.array(X_buffer)
y_buffer = np.array(y_buffer)
samples_used_iter = len(y_buffer[-n_samples:])
print("Updating model with " + str(samples_used_iter), " Observations")
model.fit(X_buffer[-n_samples:],
y_buffer[-n_samples:])
y_buffer = list(y_buffer)
X_buffer = list(X_buffer)
n_updates += 1
samples_used += samples_used_iter
print("Moving on")
iter += 1
j += 1
preds = dict({"y": y_hist, "y_hat": yhat_hist})
output = dict({"alarms": [],
"preds": preds,
"n_updates": n_updates,
"samples_used": samples_used})
return output
@staticmethod
def BL1_never_adapt(datastream_, model_):
import copy
datastream = copy.deepcopy(datastream_)
model = copy.deepcopy(model_)
yhat_hist, y_hist = [], []
while datastream.has_more_samples():
# print("Iteration: " + str(iter))
Xi, yi = datastream.next_sample()
y_hist.append(yi[0])
model_yhat = model.predict(Xi)
yhat_hist.append(model_yhat[0])
preds = dict({"y": y_hist, "y_hat": yhat_hist})
output = dict({"alarms": [],
"preds": preds,
"n_updates": 0,
"samples_used": 0})
return output
@staticmethod
def drift_detection_uspv(datastream_, model_, n_train_,
use_prob,
method,
pvalue,
window_size,
n_samples,
upd_model=False):
import copy
import numpy as np
assert method in ["wrs", "tt", "ks"]
datastream = copy.deepcopy(datastream_)
model = copy.deepcopy(model_)
n_train = copy.deepcopy(n_train_)
driftmodel = HypothesisTestDetector(method=method,
window=window_size,
thr=pvalue)
alarms = []
y_buffer = []
y_hist = []
X_buffer = []
y_hat_hist = []
n_updates = 0
samples_used = 0
iter = n_train
while datastream.has_more_samples():
# print("Iteration: " + str(iter))
Xi, yi = datastream.next_sample()
y_buffer.append(yi[0])
y_hist.append(yi[0])
X_buffer.append(Xi[0])
y_hat_hist.append(model.predict(Xi)[0])
if use_prob:
yprob_all = model.predict_proba(Xi)
if len(yprob_all) < 2:
yhat = yprob_all[0]
elif len(yprob_all) == 2:
yhat = yprob_all[1]
else:
yhat = np.max(yprob_all)
else:
yhat = model.predict(Xi)[0]
driftmodel.add_element(yhat)
if driftmodel.detected_change():
print("Found change in iter: " + str(iter))
alarms.append(iter)
if upd_model:
X_buffer = np.array(X_buffer)
y_buffer = np.array(y_buffer)
samples_used_iter = len(y_buffer[-n_samples:])
print("Updating model with " + str(samples_used_iter), " Observations")
model.fit(X_buffer[-n_samples:],
y_buffer[-n_samples:])
# y_buffer = []
# X_buffer = []
y_buffer = list(y_buffer)
X_buffer = list(X_buffer)
n_updates += 1
samples_used += samples_used_iter
print("Moving on")
iter += 1
preds = dict({"y": y_hist, "y_hat": y_hat_hist})
output = dict({"alarms": alarms,
"preds": preds,
"n_updates": n_updates,
"samples_used": samples_used})
return output
@staticmethod
def drift_detection_uspv_f(datastream_, model_, n_train_,
use_prob,
method,
pvalue,
window_size,
n_samples,
upd_model=False):
import copy
import numpy as np
from ht_detectors.tracker_output import FixedWindowDetector
assert method in ["wrs", "tt", "ks"]
datastream = copy.deepcopy(datastream_)
model = copy.deepcopy(model_)
n_train = copy.deepcopy(n_train_)
driftmodel = FixedWindowDetector(ref_window=[], thr=pvalue, window_size=window_size)
alarms = []
y_buffer = []
y_hist = []
X_buffer = []
y_hat_hist = []
n_updates = 0
samples_used = 0
iter = n_train
while datastream.has_more_samples():
# print("Iteration: " + str(iter))
Xi, yi = datastream.next_sample()
y_buffer.append(yi[0])
y_hist.append(yi[0])
X_buffer.append(Xi[0])
y_hat_hist.append(model.predict(Xi)[0])
if use_prob:
yprob_all = model.predict_proba(Xi)
if len(yprob_all) < 2:
yhat = yprob_all[0]
elif len(yprob_all) == 2:
yhat = yprob_all[1]
else:
yhat = np.max(yprob_all)
else:
yhat = model.predict(Xi)[0]
driftmodel.add_element(yhat)
if driftmodel.detected_change():
print("Found change in iter: " + str(iter))
alarms.append(iter)
if upd_model:
X_buffer = np.array(X_buffer)
y_buffer = np.array(y_buffer)
samples_used_iter = len(y_buffer[-n_samples:])
print("Updating model with " + str(samples_used_iter), " Observations")
model.fit(X_buffer[-n_samples:],
y_buffer[-n_samples:])
# y_buffer = []
# X_buffer = []
y_buffer = list(y_buffer)
X_buffer = list(X_buffer)
n_updates += 1
samples_used += samples_used_iter
print("Moving on")
iter += 1
preds = dict({"y": y_hist, "y_hat": y_hat_hist})
output = dict({"alarms": alarms,
"preds": preds,
"n_updates": n_updates,
"samples_used": samples_used})
return output
@staticmethod
def drift_detection_uspv_x(datastream_, model_, n_train_,
X,
pvalue,
window_size,
n_samples,
upd_model=False):
import copy
import numpy as np
from ht_detectors.tracker_covariates import XCTracker
datastream = copy.deepcopy(datastream_)
model = copy.deepcopy(model_)
n_train = copy.deepcopy(n_train_)
driftmodel = XCTracker(X=X, thr=pvalue, W=window_size)
driftmodel.create_trackers()
alarms = []
y_buffer = []
y_hist = []
X_buffer = []
y_hat_hist = []
n_updates = 0
samples_used = 0
iter = n_train
while datastream.has_more_samples():
# print("Iteration: " + str(iter))
Xi, yi = datastream.next_sample()
y_buffer.append(yi[0])
y_hist.append(yi[0])
X_buffer.append(Xi[0])
y_hat_hist.append(model.predict(Xi)[0])
# yhat = model.predict(Xi)[0]
driftmodel.add_element(Xi)
if driftmodel.detected_change():
print("Found change in iter: " + str(iter))
alarms.append(iter)
if upd_model:
X_buffer = np.array(X_buffer)
y_buffer = np.array(y_buffer)
samples_used_iter = len(y_buffer[-n_samples:])
print("Updating model with " + str(samples_used_iter), " Observations")
model.fit(X_buffer[-n_samples:],
y_buffer[-n_samples:])
# y_buffer = []
# X_buffer = []
y_buffer = list(y_buffer)
X_buffer = list(X_buffer)
n_updates += 1
samples_used += samples_used_iter
print("Moving on")
iter += 1
preds = dict({"y": y_hist, "y_hat": y_hat_hist})
output = dict({"alarms": alarms,
"preds": preds,
"n_updates": n_updates,
"samples_used": samples_used})
return output
| 16,337
| 28.813869
| 92
|
py
|
studd
|
studd-main/studd/__init__.py
|
#
| 2
| 0.5
| 1
|
py
|
studd
|
studd-main/ht_detectors/tracker_output.py
|
import numpy as np
from scipy import stats
class HypothesisTestDetector(object):
METHOD = "ks"
def __init__(self, method, window, thr):
assert method in ["ks", "wrs", "tt"]
if method == "ks":
# Two-sample Kolmogorov-Smirnov test
m = stats.ks_2samp
elif method == "wrs":
# Wilcoxon rank-sum test
m = stats.ranksums
else:
# Two-sample t-test
m = stats.ttest_ind
self.method = m
self.alarm_list = []
self.data = []
self.window = window
self.thr = thr
self.index = 0
def add_element(self, elem):
self.data.append(elem)
def detected_change(self):
x = np.array(self.data)
w = self.window
if len(x) < 2 * w:
self.index += 1
return False
testw = x[-w:]
refw = x[-(w * 2):-w]
ht = self.method(testw, refw)
pval = ht[1]
has_change = pval < self.thr
if has_change:
print('Change detected at index: ' + str(self.index))
self.alarm_list.append(self.index)
self.index += 1
# self.get_change = True
self.data = [] # list(x[-w:])
# self.data = list(x[-w:])
return True
else:
self.index += 1
return False
class FixedWindowDetector(object):
def __init__(self, ref_window, thr, window_size):
self.method = stats.ks_2samp
self.alarm_list = []
self.data = []
self.ref_window = ref_window
self.window_size = window_size
self.thr = thr
self.index = 0
self.p_value = 1
def add_element(self, elem):
self.data.append(elem)
self.ref_window.append(elem)
self.ref_window = self.ref_window[:self.window_size]
def detected_change(self):
if len(self.data) < self.window_size:
self.index += 1
return False
x = np.array(self.data)
x = x[-self.window_size:]
w = np.array(self.ref_window)
ht = self.method(x, w)
p_value = ht[1]
has_change = p_value < self.thr
self.p_value = p_value
if has_change:
# print('Change detected at index: ' + str(self.index))
self.alarm_list.append(self.index)
self.ref_window = []
self.data = []
self.index += 1
return True
else:
self.index += 1
return False
| 2,561
| 24.366337
| 67
|
py
|
studd
|
studd-main/ht_detectors/__init__.py
|
#
| 1
| 1
| 1
|
py
|
studd
|
studd-main/ht_detectors/tracker_covariates.py
|
import numpy as np
import pandas as pd
from scipy.stats import ks_2samp
class FeatTracker(object):
def __init__(self, ref_window, thr, window_size):
self.method = ks_2samp
self.alarm_list = []
self.data = []
self.ref_window = np.array(ref_window)
self.window_size = window_size
self.thr = thr
self.index = 0
self.p_value = 1
def add_element(self, elem):
self.data.append(elem)
def detected_change(self):
if len(self.data) < self.window_size:
self.index += 1
return False
x = np.array(self.data)
x = x[-self.window_size:]
w = self.ref_window
ht = self.method(x, w)
p_value = ht[1]
has_change = p_value < self.thr
self.p_value = p_value
if has_change:
# print('Change detected at index: ' + str(self.index))
self.alarm_list.append(self.index)
self.index += 1
return True
else:
self.index += 1
return False
class XCTracker(object):
def __init__(self, X, thr, W=None):
"""
X change tracker
:param X: pd df
"""
self.X = X
self.col_names = list(self.X.columns)
self.trackers = dict.fromkeys(self.col_names)
self.thr = thr
self.index = 0
self.p_values = None
if W is None:
self.W = self.X.shape[0]
else:
self.W = W
self.X = self.X.tail(self.W)
def create_trackers(self):
for col in self.trackers:
x = np.array(self.X.loc[:, col])
self.trackers[col] = \
FeatTracker(ref_window=x,
thr=self.thr,
window_size=self.W)
def reset_trackers(self):
self.trackers = dict.fromkeys(self.col_names)
self.X = self.X.tail(self.W)
self.create_trackers()
def get_p_values(self):
self.p_values = \
[self.trackers[x].p_value
for x in self.trackers]
def add_element(self, Xi):
Xi_df = pd.DataFrame(Xi)
Xi_df.columns = self.X.columns
self.X.append(Xi_df, ignore_index=True)
x = Xi.flatten()
for i, col in enumerate(self.col_names):
self.trackers[col].add_element(x[i])
def detected_change(self):
self.index += 1
changes = []
for col in self.col_names:
has_change = \
self.trackers[col].detected_change()
changes.append(has_change)
changes = np.array(changes)
any_change = np.any(changes)
if any_change:
print('Change detected at index: ' + str(self.index))
self.reset_trackers()
return any_change
| 2,842
| 23.508621
| 67
|
py
|
studd
|
studd-main/experiments/main.py
|
import pickle
from workflows import Workflow
with open('data/real_datasets.pkl', 'rb') as fp:
datasets = pickle.load(fp)
delta = 0.002
results = dict()
for df in datasets:
y = data.target.values
X = data.drop(['target'], axis=1)
small_data_streams = ['AbruptInsects',
'Insects',
'Keystroke',
'ozone',
'outdoor',
'luxembourg']
if str(df) in small_data_streams:
n_train_obs = 500
W = n_train_obs
else:
n_train_obs = 1000
W = n_train_obs
predictions, detections, train_size, training_info, results_comp = \
Workflow(X=X, y=y,delta=delta,window_size=W)
ds_results = \
dict(predictions=predictions,
detections=detections,
n_updates=train_size,
data_size=len(y),
training_info=training_info,
results_comp=results_comp)
results[df] = ds_results
with open('data/studd_experiments.pkl', 'wb') as fp:
pickle.dump(results, fp)
| 1,142
| 24.4
| 72
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 16/PIPO-FAN-master/pipo_fan/resample.py
|
import numpy as np
import SimpleITK as sitk
import os
import argparse
parser = argparse.ArgumentParser(description='Resampling CT img or seg to 256 or 512')
parser.add_argument('-p1', '--path1_filename', default=None, type=str,
metavar='path1_filename',
help='Raw file folder ')
parser.add_argument('-p2', '--path2_filename', default=None, type=str,
metavar='path2_filename',
help='New file folder ')
# parser.add_argument('-n', '--name', default=None, type=str,
# metavar='name',
# help='Name of folder that contains all file')
parser.add_argument('-s1', '--size1', default=256, type=int,
metavar='size1',
help='row size after resampling')
parser.add_argument('-s2', '--size2', default=256, type=int,
metavar='size2',
help='col size after resampling')
def ResampleBySize_view(path1,path2,name,rowSize,colSize):
img = sitk.ReadImage(path1+name)
# myshow(img,'1')
# pix_resampled = (sitk.GetArrayFromImage(img).astype(dtype=float))
# plot_3d(pix_resampled,0)
original_spacing = img.GetSpacing()
print('original_spacing:',original_spacing)
original_size = img.GetSize()
print('original_size:',original_size)
#VolSize = original_size
rowSize,colSize=rowSize,colSize
factor3=1
factor1=rowSize/img.GetSize()[0]
factor2=colSize/img.GetSize()[1]
factor=[factor1,factor2,factor3]
#we rotate the image according to its transformation using the direction and according to the final spacing we want
newSize = np.asarray(img.GetSize()) * factor + 0.00001
dstRes =np.asarray(img.GetSpacing())/factor
print(dstRes)
print(newSize)
# ret = np.zeros([newSize[0],newSize[1], newSize[2]], dtype=np.float32)
newSize = newSize.astype(dtype=int).tolist()
print(newSize)
T = sitk.AffineTransform(3)
#T.SetMatrix(img.GetDirection())
# T.Scale(factor)
# T.scale(factor)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(img)
resampler.SetOutputSpacing([dstRes[0], dstRes[1], dstRes[2]])
resampler.SetSize(newSize)
method=sitk.sitkLinear
resampler.SetInterpolator(method)
resampler.SetTransform(T)
imgResampled = resampler.Execute(img)
new_spacing = imgResampled.GetSpacing()
print('new_spacing:', new_spacing)
print('new_size:', imgResampled.GetSize())
path2=path2
sitk.WriteImage(imgResampled,path2+name)
if __name__ == "__main__":
global args
args = parser.parse_args()
p1 = args.path1_filename
p2 = args.path2_filename
s1 = args.size1
s2 = args.size2
for file in os.listdir(p1):
ResampleBySize_view(p1, p2, file, s1, s2)
| 2,903
| 28.333333
| 119
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 16/PIPO-FAN-master/pipo_fan/train_sf_partial.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 5 16:00:33 2017
@author: yan
"""
# %% train the network
import argparse
import datetime
import math
import numpy as np
import os
from os import path
import shutil
import time
import torch
from torch import cuda
from torch import optim
#from torch.optim import lr_scheduler
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
#from collections import OrderedDict
from torch.nn import init
import dataset.dataset_liverCT_2D as dl1
import dataset.dataset_muor_2D as dl2
# from model.denseu_net import DenseUNet
# from model.unet import UNet
from model.concave_dps_w import ResUNet
# from model.concave_res_w3 import ResUNet
# from model.resu_net import ResUNet
# from model.concave_dcc import ResUNet
#from model.concave_sh import ResUNet
# from scipy.misc import imsave
# %%
parser = argparse.ArgumentParser(description='PyTorch ResUNet Training')
parser.add_argument('--epochs', default=4000, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batchsize', default=8, type=int,
metavar='N', help='mini-batch size (default: 8)')
parser.add_argument('--blocksize', default=224, type=int,
metavar='N', help='H/W of each image block (default: 224)')
parser.add_argument('-s', '--slices', default=3, type=int,
metavar='N', help='number of slices (default: 3)')
parser.add_argument('--lr', '--learning-rate', default=0.0002, type=float,
metavar='LR', help='initial learning rate (default: 0.002)')
parser.add_argument('--momentum', default=0.9, type=float,
metavar='N', help='momentum for optimizer (default: 0.9)')
parser.add_argument('--view', default='axial', type=str,
metavar='View', help='view for segmentation (default: axial)')
parser.add_argument('--cv_n', default='1', type=str,
help='Cross validation Dataset num')
# %%
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# %%
# def dice_similarity(output, target):
# """Computes the Dice similarity"""
# #batch_size = target.size(0)
# smooth = 0.00001
# # max returns values and positions
# seg_channel = output.max(dim=1)[1]
# seg_channel = seg_channel.float()
# target = target.float()
# #print('Shapes: {}, {}'.format(seg_channel.shape, target.shape))
# intersection = (seg_channel * target).sum(dim=2).sum(dim=1)
# union = (seg_channel + target).sum(dim=2).sum(dim=1)
# dice = 2. * intersection / (union + smooth)
# #print(intersection, union, dice)
# return torch.mean(dice)
def dice_similarity(output, target):
"""Computes the Dice similarity"""
#batch_size = target.size(0)
smooth = 0.00001
# max returns values and positions
# output = output>0.5
output = output.float()
target = target.float()
seg_channel = output.view(output.size(0), -1)
target_channel = target.view(target.size(0), -1)
#print('Shapes: {}, {}'.format(seg_channel.shape, target.shape))
intersection = (seg_channel * target_channel).sum()
union = (seg_channel + target_channel).sum()
dice = (2. * intersection) / (union + smooth)
#print(intersection, union, dice)
return torch.mean(dice)
def dice_similarity_u(output, target):
"""Computes the Dice similarity"""
#batch_size = target.size(0)
total_dice = 0
output = output.clone()
target = target.clone()
# print('target:',target.sum())
for i in range(1, output.shape[1]):
target_i = torch.zeros(target.shape)
target_i = target_i.cuda().clone()
target_i[target == i] = 1
output_i = output[:, i:i+1].clone()
dice_i = dice_similarity(output_i, target_i)
# print('dice_: ',i,dice_i.data)
# print('target_i: ',target_i.sum())
# print('output_i: ',output_i.sum())
total_dice += dice_i
total_dice = total_dice / (output.shape[1] - 1)
#print(intersection, union, dice)
return total_dice
def visualize_train(d,name):
name = name
da = d.cpu().data.numpy()
db = np.transpose(da[0], (1,2,0))
# print('db.shape',db.shape)
if db.shape[2] == 3:
imsave(path.join('/home/fangx2/mu_or/train_u', name+'.png'), db, format='png')
else:
imsave(path.join('/home/fangx2/mu_or/train_u', name+'.png'), db[:,:,0], format='png')
def visualize_train1(d,name):
name = name
da = d.cpu().data.numpy()
db = da[0,:,:]
imsave(path.join('/home/fangx2/mu_or/train_u', name+'.png'), db, format='png')
def visualize_val(d,name):
name = name
da = d.cpu().data.numpy()
db = np.transpose(da[0], (1,2,0))
# print('db.shape',db.shape)
if db.shape[2] == 3:
imsave(path.join('/home/fangx2/mu_or/val_u', name+'.png'), db, format='png')
else:
imsave(path.join('/home/fangx2/mu_or/val_u', name+'.png'), db[:,:,0], format='png')
def visualize_val1(d,name):
name = name
da = d.cpu().data.numpy()
db = da[0,:,:]
imsave(path.join('/home/fangx2/mu_or/val_u', name+'.png'), db, format='png')
# %%
def train(train_loader, data_type, model, criterion, optimizer, epoch, verbose=True):
"""Function for training"""
batch_time = AverageMeter()
#data_time = AverageMeter()
losses = AverageMeter()
dice = AverageMeter()
# switch to train mode
model.train()
end_time = time.time()
for i, sample_batched in enumerate(train_loader):
# measure data loading time
#data_time.update(time.time() - end_time)
image_batch = sample_batched['image']
# label should be only the middle slice
label_batch = sample_batched['label'][:,0,:,:]
# mask = sample_batched['mask'][:,0:1,:,:]
# print('mask shape:', mask.shape)
#print('label batch size: {}'.format(label_batch.shape))
#image_batch = image_batch.cuda()
#label_batch = label_batch.cuda(async=True)
input_var = Variable(image_batch).float()
input_var = input_var.cuda()
target_var = Variable(label_batch).long()
target_var = target_var.cuda()
# mask_var = Variable(mask).float()
# mask_var = mask_var.cuda()
# compute output
output = model(input_var)
output = torch.clamp(output, min=1e-10, max=1)
if data_type == '1':
output_p2 = output[:,1:2,:,:].clone()
output_p1 = output[:,0:1,:,:].clone() + output[:,2:3,:,:].clone() + output[:,3:4,:,:].clone()
output_p = torch.cat((output_p1, output_p2), 1)
if data_type == '2':
output_p2 = output[:,2:3,:,:].clone()
output_p1 = output[:,0:1,:,:].clone() + output[:,1:2,:,:].clone() + output[:,3:4,:,:].clone()
output_p = torch.cat((output_p1, output_p2), 1)
if data_type == '3':
output_p2 = output[:,3:4,:,:].clone()
output_p1 = output[:,0:1,:,:].clone() + output[:,1:2,:,:].clone() + output[:,2:3,:,:].clone()
output_p = torch.cat((output_p1, output_p2), 1)
if data_type == '4':
output_p = output.clone()
# print('output p:',output_p.sum())
# output = output * mask_var
# print('Output size: {}, type: {}'.format(output.size(), type(output)))
# print('Target size: {}, type: {}'.format(target_var.size(), type(target_var)))
loss = criterion(output_p, target_var)
# if epoch % 5 == 0:
# visualize_train(output_p[:,1:4,:,:], str(epoch) + 'output')
# visualize_train1(target_var[:,:,:], str(epoch) + 'target')
# measure accuracy and record loss
losses.update(loss.data, image_batch.size(0))
ds = dice_similarity_u(output_p, target_var)
#print(ds.data)
dice.update(ds.data, image_batch.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
current_time = time.time()
batch_time.update(current_time - end_time)
end_time = current_time
if ((i+1) % 10 == 0) and verbose:
print('Train ep {0} [batch {1}/{2}]: '
#'Time {batch_time.val:.1f}s, '
'Loss avg: {loss.avg:.4f}, '
'Dice avg: {dice.avg:.4f}'.format(
epoch+1, i+1, len(train_loader),
#batch_time=batch_time,
loss=losses,
dice=dice))
print('Training -> loss: {loss.avg:.4f}, '
'Dice {dice.avg:.3f}'.format(
loss=losses, dice=dice))
#return {'train_loss': loss.avg, 'train_acc': dice.avg}
return losses.avg, dice.avg
# %%
def validate(loader, data_type, model, criterion, epoch, verbose=True):
batch_time = AverageMeter()
losses = AverageMeter()
dice = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, sample_batched in enumerate(loader):
image_batch = sample_batched['image']
# label should be only the middle slice
label_batch = sample_batched['label'][:,0,:,:]
# mask = sample_batched['mask'][:,0:1,:,:]
input_var = Variable(image_batch, volatile=True).float()
input_var = input_var.cuda()
target_var = Variable(label_batch, volatile=True).long()
target_var = target_var.cuda()
# mask_var = Variable(mask).float()
# mask_var = mask_var.cuda()
# compute output
output = model(input_var)
# output = output * mask_var
if data_type == '1':
output_p = output[:,0:2,:,:].clone()
output_p[:,0,:,:] = output[:,0,:,:].clone() + output[:,2,:,:].clone() + output[:,3,:,:].clone()
if data_type == '2':
output_p = output[:,1:3,:,:].clone()
output_p[:,0,:,:] = output[:,0,:,:].clone() + output[:,1,:,:].clone() + output[:,3,:,:].clone()
if data_type == '3':
output_p = output[:,2:4,:,:].clone()
output_p[:,0,:,:] = output[:,0,:,:].clone() + output[:,1,:,:].clone() + output[:,2,:,:].clone()
if data_type == '4':
output_p = output.clone()
# if epoch % 5 == 0:
# visualize_val(output_p[:,1:4,:,:], str(epoch) + 'output')
# visualize_val1(target_var[:,:,:], str(epoch) + 'target')
loss = criterion(output_p, target_var)
#torch.save(input_var, '/home/yanp2/tmp/resu-net/logs/input_{}.pth'.format(i))
#torch.save(target_var, '/home/yanp2/tmp/resu-net/logs/target_{}.pth'.format(i))
#torch.save(output, '/home/yanp2/tmp/resu-net/logs/output_{}.pth'.format(i))
# measure accuracy and record loss
losses.update(loss.data, image_batch.size(0))
ds = dice_similarity_u(output_p, target_var)
dice.update(ds.data, image_batch.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if ((i+1) % 10 == 0) and verbose:
print('Validation ep {0} [batch {1}/{2}]: '
#'Time {batch_time.val:.1f}s, '
'Loss avg: {loss.avg:.4f}, '
'Dice avg: {dice.avg:.4f}'.format(
epoch+1, i+1, len(loader),
#batch_time=batch_time,
loss=losses,
dice=dice))
print('Validation ep {} -> loss: {loss.avg:.4f}, '
'Dice {dice.avg:.3f}'.format(
epoch+1, loss=losses, dice=dice))
return losses.avg, dice.avg
#def adjust_learning_rate(optimizer, epoch):
def adjust_learning_rate(optimizer, gamma=0.9):
for param_group in optimizer.param_groups:
param_group['lr'] *= gamma
# %%
def save_checkpoint(state, is_best, log_folder, view='axial',
filename='checkpoint.pth.tar'):
"""Save checkpoints
"""
filename = path.join(log_folder, filename)
torch.save(state, filename)
if is_best:
filename_best = path.join(log_folder, 'resu_best_{}.pth.tar'.format(view))
shutil.copyfile(filename, filename_best)
# def compute_length(inputs, edge_op):
# """Compute the length of segmentation boundary"""
# # Get segmentation
# seg_channel = inputs.max(dim=1)[1]
# seg_channel = seg_channel.unsqueeze(1)
# seg_channel = seg_channel.float()
# #print(seg_channel.shape)
# g2 = F.conv2d(seg_channel, edge_op, padding=1)
# gx = g2 ** 2
# gx = torch.sum(torch.squeeze(gx), dim=0)
# # Adding small number to increase the numerical stability
# #gx = torch.sqrt(gx + 1e-16)
# gm = torch.mean(gx.view(-1))
# return gm
# class HybridLoss2d(nn.Module):
# def __init__(self, edge_op, weight=None, size_average=True):
# super(HybridLoss2d, self).__init__()
# self.nll_loss = nn.NLLLoss2d(weight, size_average)
# self.op = edge_op
# def forward(self, inputs, targets):
# #return self.nll_loss(F.log_softmax(inputs, dim=1), targets)
# ce = self.nll_loss(F.log_softmax(inputs, dim=1), targets)
# # dice
# dice = dice_similarity(inputs, targets)
# # boundary length
# length = compute_length(inputs, self.op)
# return ce - 0.1 * dice + length
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average)
def forward(self, inputs, targets):
return self.nll_loss(torch.log(inputs), targets)
# class FocalLoss2d(nn.Module):
# def __init__(self, weight=None, size_average=True):
# super(FocalLoss2d, self).__init__()
# self.nll_loss = nn.NLLLoss2d(weight, size_average)
# def forward(self, inputs, targets):
# focal_frequency = F.nll_loss(F.softmax(inputs, dim=1), targets, reduction = 'none')
# # print('shape1:',focal_frequency.shape)
# focal_frequency += 1.0
# focal_frequency = torch.pow(focal_frequency, 2)
# focal_frequency = focal_frequency.repeat(2, 1, 1, 1)
# focal_frequency = focal_frequency.transpose(1,0)
# # print('shape:',focal_frequency.shape)
# return self.nll_loss(focal_frequency * F.log_softmax(inputs, dim=1), targets)
# %%
if __name__ == "__main__":
global args
args = parser.parse_args()
cv = args.cv_n
use_cuda = cuda.is_available()
checkpoing_dir = path.expanduser('/home/fangx2/mu_or/tmp/sf_134')
if not path.isdir(checkpoing_dir):
os.makedirs(checkpoing_dir)
log_dir = path.expanduser('/home/fangx2/mu_or/tmp/sf_134')
if not path.isdir(log_dir):
os.makedirs(log_dir)
"""
training
"""
num_classes = 4
num_in_channels = args.slices
# model = DenseUNet(num_channels = num_in_channels, num_classes = num_classes)
model = ResUNet(num_in_channels, num_classes)
# model = UNet(num_in_channels, num_classes)
resunet_checkpoint = torch.load('/home/fangx2/mu_or/tmp/sf_pr0_1216_dps/resunet_checkpoint_final.pth.tar')
resunet_dict = resunet_checkpoint['state_dict']
model.resnet.load_state_dict(resunet_dict)
optimizer = optim.RMSprop(model.parameters(), lr=args.lr, momentum=args.momentum)
folder_training_1 = '/home/fangx2/data/LIver_submit1/data3/training_a/'
folder_validation_1 = '/home/fangx2/data/LIver_submit1/data3/training_a/'
folder_training_2 = '/home/fangx2/kits19/training_256_ras_a/'
folder_validation_2 = '/home/fangx2/kits19/training_256_ras_a/'
folder_training_3 = '/home/fangx2/data/code/data/spleen/training_a/'
folder_validation_3 = '/home/fangx2/data/code/data/spleen/training_a/'
folder_training_4 = '/home/fangx2/BTCV/training_256/'
folder_validation_4 = '/home/fangx2/BTCV/validation_256/'
# folder_training = r'/home/fangx2/data/LIver_submit1/dataset_256'
# folder_validation = r'/home/fangx2/data/LIver_submit1/dataset_256'
# Set L2 penalty using weight_decay
#optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
# optimizer = optim.RMSprop(model.parameters(), lr=args.lr, momentum=args.momentum)
# Initialize Sobel edge detection filter
sobel_x = np.asarray([1.0, 0, -1.0, 2.0, 0, -2.0, 1.0, 0, -1.0], dtype=np.float32)
sobel_x /= 4.0
sobel_x = np.reshape(sobel_x, (1, 1, 3, 3))
sobel_y = np.asarray([1.0, 2.0, 1.0, 0, 0, 0, -1.0, -2.0, -1.0], dtype=np.float32)
sobel_y /= 4.0
sobel_y = np.reshape(sobel_y, (1, 1, 3, 3))
sobel = np.concatenate((sobel_x, sobel_y), axis=0)
sobel = Variable(torch.from_numpy(sobel), requires_grad=False)
if use_cuda:
sobel = sobel.cuda()
# weights = torch.Tensor([0.2, 1.2])
#Cross entropy Loss
criterion = CrossEntropyLoss2d()
# criterion = FocalLoss2d(weights)
#criterion = HybridLoss2d(sobel, weights)
if use_cuda:
print('\n***** Training ResU-Net with GPU *****\n')
model.cuda()
criterion.cuda()
blocksize = args.blocksize
view = args.view
if view == 'axial' or view == 'sagittal' or view == 'coronal':
composed = dl1.get_composed_transform(blocksize, num_in_channels, view)
composed4 = dl2.get_composed_transform(blocksize, num_in_channels, view)
else:
print('The given view of <{}> is not supported!'.format(view))
batchsize = args.batchsize
#Dataset 1,2,3
dataset_train1 = dl1.LiverCTDataset(folder_training_1,
transform=composed)
train_loader1 = dl1.DataLoader(dataset_train1,
batch_size=args.batchsize,
shuffle=True,
num_workers=4,
drop_last=False
)
dataset_validation1 = dl1.LiverCTDataset(folder_validation_1,
transform=composed)
val_loader1 = dl1.DataLoader(dataset_validation1,
batch_size=args.batchsize,
shuffle=False,
num_workers=2,
drop_last=False
)
# dataset_train2 = dl1.LiverCTDataset(folder_training_2,
# transform=composed)
# train_loader2 = dl1.DataLoader(dataset_train2,
# batch_size=args.batchsize,
# shuffle=True,
# num_workers=4,
# drop_last=False
# )
# dataset_validation2 = dl1.LiverCTDataset(folder_validation_2,
# transform=composed)
# val_loader2 = dl1.DataLoader(dataset_validation2,
# batch_size=args.batchsize,
# shuffle=False,
# num_workers=2,
# drop_last=False
# )
dataset_train3 = dl1.LiverCTDataset(folder_training_3,
transform=composed)
train_loader3 = dl1.DataLoader(dataset_train3,
batch_size=args.batchsize,
shuffle=True,
num_workers=4,
drop_last=False
)
dataset_validation3 = dl1.LiverCTDataset(folder_validation_3,
transform=composed)
val_loader3 = dl1.DataLoader(dataset_validation3,
batch_size=args.batchsize,
shuffle=False,
num_workers=2,
drop_last=False
)
#Dataset4
dataset_train4 = dl2.LiverCTDataset(folder_training_4,
transform=composed4)
train_loader4 = dl2.DataLoader(dataset_train4,
batch_size=args.batchsize,
shuffle=True,
num_workers=4,
drop_last=False
)
dataset_validation4 = dl2.LiverCTDataset(folder_validation_4,
transform=composed4)
val_loader4 = dl2.DataLoader(dataset_validation4,
batch_size=args.batchsize,
shuffle=False,
num_workers=2,
drop_last=False
)
best_dice = -1.0
#for epoch in range(args.start_epoch, args.epochs):
num_epochs = args.epochs
train_history = []
val_history = []
for epoch in range(num_epochs):
print('Training epoch {} of {}...'.format(epoch + 1, num_epochs))
# start timing
t_start = time.time()
# train for one epoch
if epoch % 3 == 0:
train_loss = train(train_loader1, '1', model, criterion,
optimizer, epoch, verbose=True)
elif epoch % 3 == 1:
# train_loss = train(train_loader2, '2', model, criterion,
# optimizer, epoch, verbose=True)
# # elif epoch % 4 == 2:
# else:
train_loss = train(train_loader3, '3', model, criterion,
optimizer, epoch, verbose=True)
else:
train_loss = train(train_loader4, '4', model, criterion,
optimizer, epoch, verbose=True)
# train_loss = train(train_loader4, '4', model, criterion,
# optimizer, epoch, verbose=True)
train_history.append(train_loss)
# Gradually reducing learning rate
if epoch % 40 == 0:
adjust_learning_rate(optimizer, gamma=0.99)
# evaluate on validation set
val_loss = validate(val_loader4, '4', model, criterion, epoch, verbose=True)
val_history.append(val_loss)
dice = val_loss[1]
# remember best prec@1 and save checkpoint
is_best = dice > best_dice
best_dice = max(dice, best_dice)
if is_best:
fn_checkpoint = 'resu_checkpoint_ep{:04d}.pth.tar'.format(epoch + 1)
save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_dice': best_dice,
'optimizer' : optimizer.state_dict(),},
is_best,
checkpoing_dir,
view,
filename=fn_checkpoint)
if epoch == num_epochs - 1:
filename = path.join(checkpoing_dir, 'resunet_checkpoint_final.pth.tar')
torch.save({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_dice': best_dice,
'optimizer' : optimizer.state_dict(),},filename)
elapsed_time = time.time() - t_start
print('Epoch {} completed in {:.2f}s\n'.format(epoch+1, elapsed_time))
# save the training history
time_now = datetime.datetime.now()
time_str = time_now.strftime('%y%m%d-%H%M%S')
fn_train_history = path.join(log_dir, 'train_hist_{}.npy'.format(time_str))
fn_val_history = path.join(log_dir, 'val_hist_{}.npy'.format(time_str))
np.save(fn_train_history, np.asarray(train_history))
np.save(fn_val_history, np.asarray(val_history))
time_disp_str = time_now.strftime('%H:%M:%S on %Y-%m-%d')
print('Training completed at {}'.format(time_disp_str))
print('Training history saved into:\n<{}>'.format(fn_train_history))
print('<{}>'.format(fn_val_history))
| 24,733
| 35.480826
| 110
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 16/PIPO-FAN-master/pipo_fan/train_concave0.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 5 16:00:33 2017
@author: yan
"""
# %% train the network
import argparse
import datetime
import math
import numpy as np
import os
from os import path
import shutil
import time
import torch
from torch import cuda
from torch import optim
#from torch.optim import lr_scheduler
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
#from collections import OrderedDict
from torch.nn import init
# from lovasz_losses import lovasz_softmax
import dataset.dataset_liverCT_2D as dl
# import dataset.dataset_all as dl
#from u_net import UNet
# from model.concave_sh import ResUNet
# from model.MIMO_att import ResUNet
# from model.concave_res2 import ResUNet
from model.concave_dps import ResUNet
# from model.concave_dps_dc import ResUNet
# from model.concave_dps3 import ResUNet
#from resu_scalecov import ResUNet
#from coordu_net import UNet
# %%
parser = argparse.ArgumentParser(description='PyTorch ResUNet Training')
parser.add_argument('--epochs', default=2000, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batchsize', default=8, type=int,
metavar='N', help='mini-batch size (default: 8)')
parser.add_argument('--blocksize', default=224, type=int,
metavar='N', help='H/W of each image block (default: 320)')
parser.add_argument('-s', '--slices', default=3, type=int,
metavar='N', help='number of slices (default: 1)')
parser.add_argument('-n', '--num_classes', default=2, type=int,
metavar='N', help='number of slices (default: 3)')
parser.add_argument('--lr', '--learning-rate', default=0.002, type=float,
metavar='LR', help='initial learning rate (default: 0.002)')
parser.add_argument('--momentum', default=0.9, type=float,
metavar='N', help='momentum for optimizer (default: 0.9)')
parser.add_argument('--view', default='axial', type=str,
metavar='View', help='view for segmentation (default: axial)')
parser.add_argument('--cv_n', default='1', type=str,
help='Cross validation Dataset num')
# %%
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# %%
def dice_similarity(output, target):
"""Computes the Dice similarity"""
#batch_size = target.size(0)
smooth = 0.00001
# max returns values and positions
seg_channel = output.max(dim=1)[1]
seg_channel = seg_channel.float()
target = target.float()
#print('Shapes: {}, {}'.format(seg_channel.shape, target.shape))
intersection = (seg_channel * target).sum(dim=2).sum(dim=1)
union = (seg_channel + target).sum(dim=2).sum(dim=1)
dice = 2. * intersection / (union + smooth)
#print(intersection, union, dice)
return torch.mean(dice)
# def dice_similarity(output, target):
# """Computes the Dice similarity"""
# #batch_size = target.size(0)
# smooth = 0.00001
# # max returns values and positions
# output = output>0.5
# output = output.float()
# target = target.float()
# seg_channel = output.view(output.size(0), -1)
# target_channel = target.view(target.size(0), -1)
# #print('Shapes: {}, {}'.format(seg_channel.shape, target.shape))
# intersection = (seg_channel * target_channel).sum()
# union = (seg_channel + target_channel).sum()
# dice = (2. * intersection) / (union + smooth)
# #print(intersection, union, dice)
# return torch.mean(dice)
# %%
def train(train_loader, model, criterion, optimizer, epoch, verbose=True):
"""Function for training"""
batch_time = AverageMeter()
#data_time = AverageMeter()
losses = AverageMeter()
dice = AverageMeter()
losses_1 = AverageMeter()
dice_1 = AverageMeter()
losses_2 = AverageMeter()
dice_2 = AverageMeter()
losses_3 = AverageMeter()
dice_3 = AverageMeter()
losses_4 = AverageMeter()
dice_4 = AverageMeter()
losses_5 = AverageMeter()
dice_5 = AverageMeter()
# losses_6 = AverageMeter()
# dice_6 = AverageMeter()
# switch to train mode
model.train()
end_time = time.time()
for i, sample_batched in enumerate(train_loader):
# measure data loading time
#data_time.update(time.time() - end_time)
image_batch = sample_batched['image']
# label should be only the middle slice
label_batch1 = sample_batched['label'][:,0,:,:]
input_var = Variable(image_batch, volatile=True).float()
input_var = input_var.cuda()
target_var = Variable(label_batch1, volatile=True).long().cuda()
# target_var = nn.Upsample(size = [256,256], mode='bilinear', align_corners=True)(target_var)
# compute output
output1, output2, output3, output4, output5 = model(input_var)
# print('output:',output1.shape,output2.shape,output3.shape)
loss1 = criterion(output1, target_var)
loss2 = criterion(output2, target_var)
loss3 = criterion(output3, target_var)
loss4 = criterion(output4, target_var)
loss5 = criterion(output5, target_var)
# loss6 = criterion(output6, target_var)
# a = (output1 - output2 + 1) / 2
# a_tar = (target_var1 - target_var2 + 1) / 2
# loss4 = criterion(a, a_tar)
# b = (output3 - output2 +1) / 2
# b_tar = (target_var3 - target_var2 + 1) / 2
# loss5 = criterion(b, b_tar)
# loss = loss1 + loss2 + loss3 + 0.5 * loss4 + 0.5 * loss5
loss = loss1 + loss2 + loss3 + loss4 + loss5
# measure accuracy and record loss
losses.update(loss.data, image_batch.size(0))
losses_1.update(loss1.data, image_batch.size(0))
losses_2.update(loss2.data, image_batch.size(0))
losses_3.update(loss3.data, image_batch.size(0))
losses_4.update(loss4.data, image_batch.size(0))
losses_5.update(loss5.data, image_batch.size(0))
# losses_6.update(loss6.data, image_batch.size(0))
ds_1 = dice_similarity(output1, target_var)
ds_2 = dice_similarity(output2, target_var)
ds_3 = dice_similarity(output3, target_var)
ds_4 = dice_similarity(output4, target_var)
ds_5 = dice_similarity(output5, target_var)
# ds_6 = dice_similarity(output6, target_var)
#print(ds.data)
dice_1.update(ds_1.data, image_batch.size(0))
dice_2.update(ds_2.data, image_batch.size(0))
dice_3.update(ds_3.data, image_batch.size(0))
dice_4.update(ds_4.data, image_batch.size(0))
dice_5.update(ds_5.data, image_batch.size(0))
# dice_6.update(ds_6.data, image_batch.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
current_time = time.time()
batch_time.update(current_time - end_time)
end_time = current_time
if ((i+1) % 10 == 0) and verbose:
print('Train ep {0} [batch {1}/{2}]: '
#'Time {batch_time.val:.1f}s, '
'Loss avg: {loss.avg:.4f}, '
'Dice avg: {dice.avg:.4f}'.format(
epoch+1, i+1, len(train_loader),
#batch_time=batch_time,
loss=losses,
dice=dice))
print('Training -> loss: {loss.avg:.4f}'.format(
loss=losses))
print('Training -> loss_1: {loss.avg:.4f}, '
'Dice_1 {dice_1.avg:.3f}'.format(
loss=losses_1, dice_1=dice_1))
print('Training -> loss_2: {loss.avg:.4f}, '
'Dice_2 {dice_2.avg:.3f}'.format(
loss=losses_2, dice_2=dice_2))
print('Training -> loss_3: {loss.avg:.4f}, '
'Dice_3 {dice_3.avg:.3f}'.format(
loss=losses_3, dice_3=dice_3))
print('Training -> loss_4: {loss.avg:.4f}, '
'Dice_4 {dice_4.avg:.3f}'.format(
loss=losses_4, dice_4=dice_4))
print('Training -> loss_5: {loss.avg:.4f}, '
'Dice_5 {dice_5.avg:.3f}'.format(
loss=losses_5, dice_5=dice_5))
# print('Training -> loss_6: {loss.avg:.4f}, '
# 'Dice_6 {dice_6.avg:.3f}'.format(
# loss=losses_5, dice_6=dice_6))
#return {'train_loss': loss.avg, 'train_acc': dice.avg}
return losses.avg, dice_5.avg
# %%
def validate(loader, model, criterion, epoch, verbose=True):
batch_time = AverageMeter()
losses = AverageMeter()
dice = AverageMeter()
losses_1 = AverageMeter()
dice_1 = AverageMeter()
losses_2 = AverageMeter()
dice_2 = AverageMeter()
losses_3 = AverageMeter()
dice_3 = AverageMeter()
losses_4 = AverageMeter()
dice_4 = AverageMeter()
losses_5 = AverageMeter()
dice_5 = AverageMeter()
# losses_6 = AverageMeter()
# dice_6 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, sample_batched in enumerate(loader):
image_batch = sample_batched['image']
# label should be only the middle slice
label_batch1 = sample_batched['label'][:,0,:,:]
input_var = Variable(image_batch, volatile=True).float()
input_var = input_var.cuda()
target_var = Variable(label_batch1, volatile=True).long().cuda()
# compute output
output1, output2, output3, output4, output5 = model(input_var)
loss1 = criterion(output1, target_var)
loss2 = criterion(output2, target_var)
loss3 = criterion(output3, target_var)
loss4 = criterion(output4, target_var)
loss5 = criterion(output5, target_var)
# loss6 = criterion(output6, target_var)
# a = (output1 - output2 + 1) / 2
# a_tar = (target_var1 - target_var2 + 1) / 2
# loss4 = criterion(a, a_tar)
# b = (output3 - output2 +1) / 2
# b_tar = (target_var3 - target_var2 + 1) / 2
# loss5 = criterion(b, b_tar)
# loss = loss1 + loss2 + loss3 + 0.5 * loss4 + 0.5 * loss5
loss = loss1 + loss2 + loss3 + loss4 + loss5
#torch.save(input_var, '/home/yanp2/tmp/resu-net/logs/input_{}.pth'.format(i))
#torch.save(target_var, '/home/yanp2/tmp/resu-net/logs/target_{}.pth'.format(i))
#torch.save(output, '/home/yanp2/tmp/resu-net/logs/output_{}.pth'.format(i))
# measure accuracy and record loss
# measure accuracy and record loss
losses.update(loss.data, image_batch.size(0))
losses_1.update(loss1.data, image_batch.size(0))
losses_2.update(loss2.data, image_batch.size(0))
losses_3.update(loss3.data, image_batch.size(0))
losses_4.update(loss4.data, image_batch.size(0))
losses_5.update(loss5.data, image_batch.size(0))
# losses_6.update(loss6.data, image_batch.size(0))
ds_1 = dice_similarity(output1, target_var)
ds_2 = dice_similarity(output2, target_var)
ds_3 = dice_similarity(output3, target_var)
ds_4 = dice_similarity(output4, target_var)
ds_5 = dice_similarity(output5, target_var)
# ds_6 = dice_similarity(output6, target_var)
dice_1.update(ds_1.data, image_batch.size(0))
dice_2.update(ds_2.data, image_batch.size(0))
dice_3.update(ds_3.data, image_batch.size(0))
dice_4.update(ds_4.data, image_batch.size(0))
dice_5.update(ds_5.data, image_batch.size(0))
# dice_6.update(ds_6.data, image_batch.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if ((i+1) % 10 == 0) and verbose:
print('Validation ep {0} [batch {1}/{2}]: '
#'Time {batch_time.val:.1f}s, '
'Loss avg: {loss.avg:.4f}, '
'Dice avg: {dice.avg:.4f}'.format(
epoch+1, i+1, len(loader),
#batch_time=batch_time,
loss=losses,
dice=dice))
print('Validation ep {} -> loss: {loss.avg:.4f}, '
'Dice {dice.avg:.3f}'.format(
epoch+1, loss=losses, dice=dice))
print('Validation -> loss_1: {loss.avg:.4f}, '
'Dice_1 {dice_1.avg:.3f}'.format(
loss=losses_1, dice_1=dice_1))
print('Validation -> loss_2: {loss.avg:.4f}, '
'Dice_2 {dice_2.avg:.3f}'.format(
loss=losses_2, dice_2=dice_2))
print('Validation -> loss_3: {loss.avg:.4f}, '
'Dice_3 {dice_3.avg:.3f}'.format(
loss=losses_3, dice_3=dice_3))
print('Validation -> loss_4: {loss.avg:.4f}, '
'Dice_4 {dice_4.avg:.3f}'.format(
loss=losses_4, dice_4=dice_4))
print('Validation -> loss_5: {loss.avg:.4f}, '
'Dice_5 {dice_5.avg:.3f}'.format(
loss=losses_5, dice_5=dice_5))
# print('Validation -> loss_6: {loss.avg:.4f}, '
# 'Dice_6 {dice_6.avg:.3f}'.format(
# loss=losses_6, dice_6=dice_6))
return losses.avg, dice_5.avg
#def adjust_learning_rate(optimizer, epoch):
def adjust_learning_rate(optimizer, gamma=0.9):
for param_group in optimizer.param_groups:
param_group['lr'] *= gamma
# %%
def save_checkpoint(state, is_best, log_folder, view='axial',
filename='checkpoint.pth.tar'):
"""Save checkpoints
"""
filename = path.join(log_folder, filename)
torch.save(state, filename)
if is_best:
filename_best = path.join(log_folder, 'resu_best_{}.pth.tar'.format(view))
shutil.copyfile(filename, filename_best)
def compute_length(inputs, edge_op):
"""Compute the length of segmentation boundary"""
# Get segmentation
seg_channel = inputs.max(dim=1)[1]
seg_channel = seg_channel.unsqueeze(1)
seg_channel = seg_channel.float()
#print(seg_channel.shape)
g2 = F.conv2d(seg_channel, edge_op, padding=1)
gx = g2 ** 2
gx = torch.sum(torch.squeeze(gx), dim=0)
# Adding small number to increase the numerical stability
#gx = torch.sqrt(gx + 1e-16)
gm = torch.mean(gx.view(-1))
return gm
class HybridLoss2d(nn.Module):
def __init__(self, edge_op, weight=None, size_average=True):
super(HybridLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average)
self.op = edge_op
def forward(self, inputs, targets):
#return self.nll_loss(F.log_softmax(inputs, dim=1), targets)
ce = self.nll_loss(F.log_softmax(inputs, dim=1), targets)
# dice
dice = dice_similarity(inputs, targets)
# boundary length
length = compute_length(inputs, self.op)
return ce - 0.1 * dice + length
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average)
def forward(self, inputs, targets):
return self.nll_loss(F.log_softmax(inputs, dim=1), targets)
class LovaszLoss2d(nn.Module):
def forward(self, inputs, targets):
return lovasz_softmax(F.softmax(inputs), targets)
class LoCeLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True):
super(LoCeLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average)
def forward(self, inputs, targets):
return lovasz_softmax(F.softmax(inputs), targets) + self.nll_loss(F.log_softmax(inputs, dim=1), targets)
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, input, target):
smooth = 0.00001
input = input.float()
target = target.float()
iflat = input.view(-1)
tflat = target.view(-1)
intersection = (iflat * tflat).sum()
return 1 - ((2. * intersection + smooth) /
(iflat.sum() + tflat.sum() + smooth))
class FocalLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True):
super(FocalLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average)
def forward(self, inputs, targets):
focal_frequency = F.nll_loss(F.softmax(inputs, dim=1), targets, reduction = 'none')
# print('shape1:',focal_frequency.shape)
focal_frequency += 1.0
focal_frequency = torch.pow(focal_frequency, 2)
focal_frequency = focal_frequency.repeat(2, 1, 1, 1)
focal_frequency = focal_frequency.transpose(1,0)
# print('shape:',focal_frequency.shape)
return self.nll_loss(focal_frequency * F.log_softmax(inputs, dim=1), targets)
# %%
if __name__ == "__main__":
global args
args = parser.parse_args()
cv = args.cv_n
view = args.view
use_cuda = cuda.is_available()
# checkpoing_dir = path.expanduser('/home/fangx2/data/LIver_submit1/data' + cv + '/tmp/spleen_dps_1105')
# checkpoing_dir = path.expanduser('/home/fangx2/data/code/data/spleen/spleen_dps_1105')
# checkpoing_dir = path.expanduser('/home/fangx2/data/LIver_submit1/data3/tmp/liver_ras_1106')
# if not path.isdir(checkpoing_dir):
# os.makedirs(checkpoing_dir)
# # log_dir = path.expanduser('/home/fangx2/data/LIver_submit1/data' + cv + '/tmp/spleen_dps_1105')
# # log_dir = path.expanduser('/home/fangx2/data/code/data/spleen/spleen_dps_1105')
# log_dir = path.expanduser('/home/fangx2/data/LIver_submit1/data3/tmp/liver_ras_1106')
# if not path.isdir(log_dir):
# os.makedirs(log_dir)
checkpoing_dir = path.expanduser('/home/fangx2/data/code/data/spleen/5_fold_cv/fold' + cv + '/tmp/concave')
if not path.isdir(checkpoing_dir):
os.makedirs(checkpoing_dir)
log_dir = path.expanduser('/home/fangx2/data/code/data/spleen/5_fold_cv/fold' + cv + '/tmp/concave')
if not path.isdir(log_dir):
os.makedirs(log_dir)
"""
training
"""
num_classes = args.num_classes
num_in_channels = args.slices
#model = UNet(5, 2)
model = ResUNet(num_in_channels,num_classes)
# resunet_checkpoint = torch.load('/home/fangx2/data/LIver_submit1/data' + cv + '/tmp/concave_dps_pre/resu_best_axial.pth.tar')
# resunet_dict = resunet_checkpoint['state_dict']
# model.load_state_dict(resunet_dict)
# folder_training = r'/home/fangx2/data/LIver_submit1/data3/training_ras'
# folder_validation = r'/home/fangx2/data/LIver_submit1/data3/validation_ras'
folder_training = '/home/fangx2/data/code/data/spleen/5_fold_cv/fold' + cv + '/training/'
folder_validation = '/home/fangx2/data/code/data/spleen/5_fold_cv/fold' + cv + '/validation/'
# folder_training = r'/home/fangx2/data/code/data/spleen/training'
# folder_validation = r'/home/fangx2/data/code/data/spleen/validation'
# folder_training = r'/home/fangx2/data/LIver_submit1/data' + cv + '/training/'
# folder_validation = r'/home/fangx2/data/LIver_submit1/data' + cv + '/validation/'
# folder_training = r'/home/fangx2/data/a_submit2/dataset_256/'
# folder_validation = r'/home/fangx2/data/a_submit2/dataset_256/'
# Set L2 penalty using weight_decay
#optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
optimizer = optim.RMSprop(model.parameters(), lr=args.lr, momentum=args.momentum)
#optimizer = optim.Adam(model.parameters(), lr=args.lr)
# resunet_checkpoint = torch.load('/home/fangx2/data/LIver_submit1/data1/tmp/concave_lr/resu_best_axial.pth.tar')
# resunet_dict = resunet_checkpoint['state_dict']
# model.load_state_dict(resunet_dict)
# Initialize Sobel edge detection filter
sobel_x = np.asarray([1.0, 0, -1.0, 2.0, 0, -2.0, 1.0, 0, -1.0], dtype=np.float32)
sobel_x /= 4.0
sobel_x = np.reshape(sobel_x, (1, 1, 3, 3))
sobel_y = np.asarray([1.0, 2.0, 1.0, 0, 0, 0, -1.0, -2.0, -1.0], dtype=np.float32)
sobel_y /= 4.0
sobel_y = np.reshape(sobel_y, (1, 1, 3, 3))
sobel = np.concatenate((sobel_x, sobel_y), axis=0)
sobel = Variable(torch.from_numpy(sobel), requires_grad=False)
if use_cuda:
sobel = sobel.cuda()
weights = torch.Tensor([0.2, 1.2])
criterion = CrossEntropyLoss2d(weights)
# criterion = FocalLoss2d(weights)
# criterion = DiceLoss()
#criterion = HybridLoss2d(sobel, weights)
#criterion = LoCeLoss2d(weights)
if use_cuda:
print('\n***** Training ResU-Net with GPU *****\n')
model.cuda()
criterion.cuda()
blocksize = args.blocksize
if view == 'axial' or view == 'sagittal' or view == 'coronal':
composed = dl.get_composed_transform(blocksize, num_in_channels, view)
else:
print('The given view of <{}> is not supported!'.format(view))
batchsize = args.batchsize
dataset_train = dl.LiverCTDataset(folder_training,
transform=composed)
train_loader = dl.DataLoader(dataset_train,
batch_size=args.batchsize,
shuffle=True,
num_workers=4,
drop_last=False
)
dataset_validation = dl.LiverCTDataset(folder_validation,
transform=composed)
val_loader = dl.DataLoader(dataset_validation,
batch_size=args.batchsize,
shuffle=False,
num_workers=2,
drop_last=False
)
best_dice = -1.0
#for epoch in range(args.start_epoch, args.epochs):
num_epochs = args.epochs
train_history = []
val_history = []
for epoch in range(num_epochs):
print('Training epoch {} of {}...'.format(epoch + 1, num_epochs))
# start timing
t_start = time.time()
# train for one epoch
train_loss = train(train_loader, model, criterion,
optimizer, epoch, verbose=True)
train_history.append(train_loss)
# Gradually reducing learning rate
if epoch % 40 == 0:
adjust_learning_rate(optimizer, gamma=0.99)
# evaluate on validation set
val_loss = validate(val_loader, model, criterion, epoch, verbose=True)
val_history.append(val_loss)
dice = val_loss[1]
# remember best prec@1 and save checkpoint
is_best = dice > best_dice
best_dice = max(dice, best_dice)
if is_best:
fn_checkpoint = 'resu_checkpoint_ep{:04d}.pth.tar'.format(epoch + 1)
save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_dice': best_dice,
'optimizer' : optimizer.state_dict(),},
is_best,
checkpoing_dir,
view,
filename=fn_checkpoint)
if epoch == num_epochs - 1:
filename = path.join(checkpoing_dir, 'resunet_checkpoint_final.pth.tar')
torch.save({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_dice': best_dice,
'optimizer' : optimizer.state_dict(),},filename)
elapsed_time = time.time() - t_start
print('Epoch {} completed in {:.2f}s\n'.format(epoch+1, elapsed_time))
# save the training history
time_now = datetime.datetime.now()
time_str = time_now.strftime('%y%m%d-%H%M%S')
fn_train_history = path.join(log_dir, 'train_hist_{}.npy'.format(time_str))
fn_val_history = path.join(log_dir, 'val_hist_{}.npy'.format(time_str))
np.save(fn_train_history, np.asarray(train_history))
np.save(fn_val_history, np.asarray(val_history))
time_disp_str = time_now.strftime('%H:%M:%S on %Y-%m-%d')
print('Training completed at {}'.format(time_disp_str))
print('Training history saved into:\n<{}>'.format(fn_train_history))
print('<{}>'.format(fn_val_history))
| 24,809
| 35.485294
| 131
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 16/PIPO-FAN-master/pipo_fan/segment_sf_partial.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 28 16:24:59 2017
@author: yan
Load pre-trained network to segment a new image
Code v0.01
"""
# %% Resnet blocks in U-net
import argparse
import datetime
import nibabel as nib
import numpy as np
import os
from os import path
from scipy import ndimage
import SimpleITK as sitk
import time
import torch
from torch import cuda
from torch import optim
from torch.autograd import Variable
import torch.nn as nn
# from unet_context import UNet_ctx
#from u_net import UNet
# from model.concave_dps import ResUNet
from model.concave_dps_w import ResUNet
# from model.concave_res2 import ResUNet
# from model.concave_res_w3 import ResUNet
#from fcoordresu_net import ResUNet
#from resu_ctx import ResUNet
# %%
parser = argparse.ArgumentParser(description='ResUNet CT segmentation')
parser.add_argument('input_filename', type=str, metavar='input_filename',
help='File of image to be segmented')
parser.add_argument('output_filename', type=str, metavar='output_filename',
help='File to save the segmentation result')
parser.add_argument('-s', '--slices', default=3, type=int,
help='number of slices (default: 5)')
parser.add_argument('--begin', default=0, type=int,
help='Beginning slice for segmentation')
parser.add_argument('--end', default=9999, type=int,
help='Ending slice for segmentation')
parser.add_argument('-c', '--cuda', default=True, type=bool, metavar='Use GPU CUDA',
help='Use GPU for computation')
parser.add_argument('-e', '--evaluating', default=False, type=bool,
metavar='evaluation after segmentation', help='Use GT label for evaluation after completing segmentation')
parser.add_argument('-l', '--label_filename', default=None, type=str,
metavar='label_filename',
help='File containing the ground truth segmentation label for evaluation')
parser.add_argument('--network_path', default='./', type=str,
metavar='path of network file',
help='File containing the pre-trained network')
parser.add_argument('--view', default='axial', type=str,
metavar='View', help='view for segmentation (default: axial)')
# %%
def load_image(image_filename, evaluating=False, label_filename=None):
"""
"""
image = nib.load(image_filename)
if evaluating and path.isfile(label_filename):
label = nib.load(label_filename)
else:
label = None
return {'image':image, 'label':label}
# %%
def load_network(fn_network, gpu=True):
""" Load pre-trained network
"""
if path.isfile(fn_network):
print("=> loading checkpoint '{}'".format(fn_network))
if gpu:
checkpoint = torch.load(fn_network)
else:
checkpoint = torch.load(fn_network, map_location=lambda storage, loc: storage)
# Currently only support binary segmentation
# num_classes = 2
#model = UNet(5,2)
#model = UNet_ctx(3,5,2)
model = ResUNet(3,4)
model.load_state_dict(checkpoint['state_dict'])
if gpu:
model.cuda()
else:
model.cpu()
# optimizer = optim.SGD(model.parameters(), lr=0.02)
# if gpu:
# optimizer.load_state_dict(checkpoint['optimizer'])
# else:
optimizer = None
print("=> loaded checkpoint at epoch {}"
.format(checkpoint['epoch']))
return model, optimizer
else:
print("=> no checkpoint found at '{}'".format(fn_network))
return None, None
# %%
def compute_dice(la, lb):
intersection = np.sum(la * lb)
union = np.sum(la + lb)
return 2 * intersection / (union + 0.00001)
# %%
class SimpleITKAsNibabel(nib.Nifti1Image):
"""
Minimal interface to use a SimpleITK image as if it were
a nibabel object. Currently only supports the subset of the
interface used by NiftyNet and is read only
"""
def __init__(self, itk_image):
#try:
self._SimpleITKImage = itk_image
#except RuntimeError as err:
# if 'Unable to determine ImageIO reader' in str(err):
# raise nibabel.filebasedimages.ImageFileError(str(err))
# else:
# raise
# self._header = SimpleITKAsNibabelHeader(self._SimpleITKImage)
affine = make_affine(self._SimpleITKImage)
# super(SimpleITKAsNibabel, self).__init__(
# sitk.GetArrayFromImage(self._SimpleITKImage).transpose(), affine)
nib.Nifti1Image.__init__(
self,
sitk.GetArrayFromImage(self._SimpleITKImage).transpose(), affine)
class SimpleITKAsNibabelHeader(nib.spatialimages.SpatialHeader):
def __init__(self, image_reference):
super(SimpleITKAsNibabelHeader, self).__init__(
data_dtype=sitk.GetArrayViewFromImage(image_reference).dtype,
shape=sitk.GetArrayViewFromImage(image_reference).shape,
zooms=image_reference.GetSpacing())
def make_affine(simpleITKImage):
# get affine transform in LPS
c = [simpleITKImage.TransformContinuousIndexToPhysicalPoint(p)
for p in ((1, 0, 0),
(0, 1, 0),
(0, 0, 1),
(0, 0, 0))]
c = np.array(c)
affine = np.concatenate([
np.concatenate([c[0:3] - c[3:], c[3:]], axis=0),
[[0.], [0.], [0.], [1.]]], axis=1)
affine = np.transpose(affine)
# convert to RAS to match nibabel
affine = np.matmul(np.diag([-1., -1., 1., 1.]), affine)
return affine
# %%
class Nifti_from_numpy(nib.Nifti1Image):
"""
Minimal interface to use a SimpleITK image as if it were
a nibabel object. Currently only supports the subset of the
interface used by NiftyNet and is read only
"""
def __init__(self, array, itk_image):
#try:
self._SimpleITKImage = itk_image
#except RuntimeError as err:
# if 'Unable to determine ImageIO reader' in str(err):
# raise nibabel.filebasedimages.ImageFileError(str(err))
# else:
# raise
# self._header = SimpleITKAsNibabelHeader(self._SimpleITKImage)
affine = make_affine(self._SimpleITKImage)
# super(SimpleITKAsNibabel, self).__init__(
# sitk.GetArrayFromImage(self._SimpleITKImage).transpose(), affine)
nib.Nifti1Image.__init__(
self, array.transpose(), affine)
def extract_volume(volume):
volumes = []
x_coord = []
y_coord = []
for x in range(0,volume.shape[1],112):
for y in range(0,volume.shape[2],112):
end_x = x + 224
end_y = y + 224
if end_x > volume.shape[1]:
x = volume.shape[1] - 224
end_x = volume.shape[1]
if end_y > volume.shape[2]:
y = volume.shape[2] - 224
end_y = volume.shape[2]
cur_img = volume[:, x:end_x, y:end_y]
volumes.append(cur_img)
x_coord.append(x)
y_coord.append(y)
if y == volume.shape[2] - 224:
break
if x == volume.shape[1] - 224:
break
return volumes, x_coord, y_coord
def construct_volume(volumes,x_coord, y_coord):
x_len = max(x_coord) + 224
y_len = max(y_coord) + 224
seg_matrix = []
mul_matrix = []
for i in range(len(volumes)):
output = torch.zeros([volumes[i].shape[0],volumes[i].shape[1],x_len,y_len],dtype=torch.float32)
time_matrix = torch.zeros([volumes[i].shape[0],volumes[i].shape[1], x_len,y_len])
x_start = x_coord[i]
y_start = y_coord[i]
x_end = x_start + 224
y_end = y_start + 224
output[:,:,x_start:x_end, y_start:y_end] = volumes[i]
time_matrix[:,:, x_start:x_end, y_start:y_end] = torch.ones(volumes[i].shape)
seg_matrix.append(output)
mul_matrix.append(time_matrix)
seg_matrix = torch.cat(seg_matrix,0)
mul_matrix = torch.cat(mul_matrix,0)
seg_matrix = torch.sum(seg_matrix, 0)
mul_matrix = torch.sum(mul_matrix, 0)
seg_final = torch.div(seg_matrix, mul_matrix)
seg_final = seg_final.cuda()
return seg_final
# %%
if __name__ == "__main__":
args = parser.parse_args()
evaluating = args.evaluating
use_cuda = args.cuda
slice_begin = args.begin
slice_end = args.end
view = args.view
if not cuda.is_available():
print('No available GPU can be used for computation!')
use_cuda = False
num_channels = args.slices
# num_channels = 3
#fn_network = path.expanduser('~/tmp/resu-net3D/checkpoints/resu3d_checkpoint_ep0578.pth.tar')
#fn_network = path.join(args.network_path, 'resu_best.pth.tar')
#load the trained best 2D model
# fn_network = path.join(args.network_path,'resunet_checkpoint_final.pth.tar')
fn_network = path.join(args.network_path,'resu_best_' + view + '.pth.tar')
print('Loading network from <{}>'.format(fn_network))
if not path.isfile(fn_network):
raise Exception('Missing network <{}>! File Not Found!'.format(fn_network))
model_axial, optimizer = load_network(fn_network, gpu=use_cuda)
# Set model to evaluation mode
model_axial.eval()
#img_filename = path.expanduser(args.input_filename)
#file in computer/home/data/ct_nih
img_filename = args.input_filename
print('Input image for segmentation:\t{}'.format(img_filename))
dicom_input = False
# Check if it is DICOM folder
if path.isdir(img_filename):
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames( img_filename )
reader.SetFileNames(dicom_names)
image = reader.Execute()
dicom_input = True
w, h, d = image.GetSize()
img_data = sitk.GetArrayFromImage(image)
else:
volume = load_image(img_filename, evaluating, args.label_filename)
image, label = volume['image'], volume['label']
w, h, d = image.shape[:3]
img_data = np.squeeze(image.get_data())
print('Size of the input image: {}x{}x{}'.format(w, h, d))
img_data = img_data.astype(np.float32)
if view == 'axial':
img_data = img_data
elif view == 'coronal':
img_data = img_data.transpose((2,0,1))
else:
img_data = img_data.transpose(2,1,0)
img_data[img_data > 200] = 200.0
img_data[img_data < -200] = -200.0
img_data /= 200.0
print('Segmenting image...')
start_time = time.time()
results = []
num_half_channels = num_channels >> 1
# Define the range of segmentation
first = max(num_half_channels, slice_begin)
last = min(d - num_half_channels - 1, slice_end)
#last = min(d - num_channels + 1, slice_end)
num_segmented_slices = last - first + 1
print('Segmenting {} slices between [{}, {}]'.format(
num_segmented_slices, first, last))
for i in range(first):
#results.append(np.zeros((1,1,w,h)))
results.append(np.zeros((1,h,w)))
#for depth in range(d - num_channels + 1):
for depth in range(first - num_half_channels,
last - num_half_channels):
if dicom_input:
subvolume = img_data[depth:depth+num_channels,:,:]
else:
subvolume = img_data[:,:,depth:depth+num_channels]
subvolume = subvolume.transpose((2, 1, 0))
subvolumes, x_coor, y_coor = extract_volume(subvolume)
outputs = []
for volume in subvolumes:
volume = volume[np.newaxis,:,:,:]
volume = Variable(torch.from_numpy(volume), volatile=True).float()
if use_cuda:
volume = volume.cuda()
#subs.append(subvolume)
# output1, output2, output3, output4, output5 = model_axial(volume)
output5 = model_axial(volume)
# output_s = nn.Softmax2d()(output5)
outputs.append(output5)
output = construct_volume(outputs, x_coor, y_coor)
output = output.max(dim=0)[1].cpu().data.numpy()
output = output[np.newaxis,:,:]
results.append(output)
#results.append(output.cpu().data.numpy())
print('It took {:.1f}s to segment {} slices'.format(
time.time() - start_time, num_segmented_slices))
#for i in range(num_half_channels):
for i in range(d - last):
#results.append(np.zeros((1,1,w,h)))
results.append(np.zeros((1,h,w)))
results = np.squeeze(np.asarray(results))
#dsize = list(results.shape)
c, h, w = results.shape
#print('Segmentation result in CxHxW: {}x{}x{}'.format(c, h, w))
if not dicom_input:
if view == 'axial':
results = np.transpose(results, (2, 1, 0))
elif view == 'coronal':
results = np.transpose(results,(1, 0, 2))
else:
results = results
print('Segmentation result in HxWxC: {}x{}x{}'.format(h, w, c))
# results[results > 0.49] = 1
# results[results < 0.5] = 0
results = results.astype(np.uint8)
if evaluating:
label_data = label.get_data()
# remove tumor label
label_data[label_data > 1] = 1
dice = compute_dice(results, label_data)
print('Dice score of ResU-Net: {:.3f}'.format(dice))
# print('Starting morphological post-processing...')
# #print('no postprocess...')
# # perform morphological operation
# #remove small noisy segmentation
# results = ndimage.binary_opening(results, iterations=5)
# #Generate smooth segmentation
# results = ndimage.binary_dilation(results, iterations=3)
# results = ndimage.binary_fill_holes(results)
# results = ndimage.binary_erosion(results, iterations=3)
# perform largest connected component analysis
# labeled_array, num_features = ndimage.label(results)
# size_features = np.zeros((num_features))
# for i in range(num_features):
# size_features[i] = np.sum(labeled_array == i+1)
# results = np.zeros_like(labeled_array)
# results[labeled_array == np.argmax(size_features) + 1] = 1
results_post = np.zeros_like(results)
min_co = 0
for i in range(1, 4):
#liver
if i ==1:
results_i = np.zeros(results.shape)
# results_i = results_i.cuda().clone()
results_i[results == i] = 1
labeled_array_i, num_features_i = ndimage.label(results_i)
size_features_i = np.zeros((num_features_i))
for j in range(num_features_i):
size_features_i[j] = np.sum(labeled_array_i == j+1)
results_i = np.zeros_like(labeled_array_i)
results_i[labeled_array_i == np.argmax(size_features_i) + 1] = i
results_i = results_i.astype(np.uint8)
summed_1 = np.sum(results_i.sum(axis=0), axis=0)
non0_list = np.asarray([i for i in range(summed_1.size)])
non0_list = non0_list[summed_1 > 1]
min_co = 0.8 * np.min(non0_list)
min_co = int(min_co)
print('min_co', min_co)
#kidney
if i == 2:
results_i = np.zeros(results.shape)
# results_i = results_i.cuda().clone()
results_i[results == i] = 1
results_i[:,:,:min_co] = 0
labeled_array_i, num_features_i = ndimage.label(results_i)
size_features_i = np.zeros((num_features_i))
for j in range(num_features_i):
size_features_i[j] = np.sum(labeled_array_i == j+1)
results_i = np.zeros_like(labeled_array_i)
# print('idx1:',np.argmax(size_features_i))
results_i[labeled_array_i == np.argmax(size_features_i) + 1] = i
results1_i = np.zeros_like(labeled_array_i)
idx2 = np.argsort(-size_features_i)[1]
# print('idx2:',idx2)
results1_i[labeled_array_i == idx2 + 1] = i
results_i = results_i + results1_i
results_i = results_i.astype(np.uint8)
#spleen
else:
results_i = np.zeros(results.shape)
# results_i = results_i.cuda().clone()
results_i[results == i] = 1
results_i[:,:,:min_co] = 0
labeled_array_i, num_features_i = ndimage.label(results_i)
size_features_i = np.zeros((num_features_i))
for j in range(num_features_i):
size_features_i[j] = np.sum(labeled_array_i == j+1)
results_i = np.zeros_like(labeled_array_i)
results_i[labeled_array_i == np.argmax(size_features_i) + 1] = i
results_i = results_i.astype(np.uint8)
results_post += results_i
results = results_post
# results = results.astype(np.uint8)
# Create the segmentation image for saving
if dicom_input:
new_image = Nifti_from_numpy(results, image)
else:
header = image.header
header.set_data_dtype(np.uint8)
# if nifty1
if header['sizeof_hdr'] == 348:
new_image = nib.Nifti1Image(results, image.affine, header=header)
# if nifty2
elif header['sizeof_hdr'] == 540:
new_image = nib.Nifti2Image(results, image.affine, header=header)
else:
raise IOError('Input image header problem')
#seg_dir = path.expanduser('~/tmp/resu-net/segmentation')
#fn_seg = path.join(seg_dir, 'segmentation.nii')
fn_seg = path.expanduser(args.output_filename)
print('Writing segmentation result into <{}>...'.format(fn_seg))
#mu.write_mhd_file(fn_seg, results, meta_dict=header)
nib.save(new_image, fn_seg)
print('Segmentation result has been saved.')
# Compute Dice for evaluating
if evaluating:
dice = compute_dice(results, label_data)
print('Final Dice score: {:.3f}'.format(dice))
| 17,990
| 34.001946
| 126
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 16/PIPO-FAN-master/pipo_fan/dataset/dataset_liverCT_2D.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 22 14:10:33 2017
@author: yanrpi
"""
# %%
import glob
import numpy as np
import nibabel as nib
import random
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from os import path
# from scipy.misc import imsave
from scipy import ndimage
# %%
class LiverCTDataset(Dataset):
"""Liver CT image dataset."""
def __init__(self, root_dir, transform=None, verbose=False):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied on a sample.
"""
if not path.isdir(root_dir):
raise ValueError("\"{}\" is not a valid directory path!".format(root_dir))
self.root_dir = root_dir
self.transform = transform
self.verbose = verbose
res = glob.glob(path.join(root_dir, 'volume-*.nii'))
#print(res)
self.num_images = len(res)
self.ct_filenames = res
def __len__(self):
return self.num_images
def __getitem__(self, idx):
img_name = self.ct_filenames[idx]
seg_name = img_name.replace('volume', 'segmentation')
image = nib.load(img_name)
segmentation = nib.load(seg_name)
# image = nib.as_closest_canonical(image)
# segmentation = nib.as_closest_canonical(segmentation)
if self.verbose:
print('{} -> {}'.format(idx, img_name))
print('Image shape: {}'.format(image.shape))
print('Segmentation shape: {}'.format(segmentation.shape))
sample = {'image': image, 'label': segmentation}
#sample = {'image': img_name, 'segmentation': seg_name}
if self.transform:
sample = self.transform(sample)
return sample
# %%
class RandomCrop(object):
"""Crop randomly the image in a sample.
For segmentation training, only crop sections with non-zero label
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size, view):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size, output_size)
else:
assert len(output_size) == 3
self.output_size = output_size
self.view = view
def __call__(self, sample):
image, segmentation = sample['image'], sample['label']
h, w, d = image.shape
new_h, new_w, new_d = self.output_size
view = self.view
new_d_half = new_d >> 1
# Find slices containing segmentation object
seg_data = segmentation.get_data()
img_data = image.get_data()
if view == 'axial':
img_data = img_data
seg_data = seg_data
elif view == 'coronal':
img_data = img_data.transpose((2, 0, 1))
seg_data = seg_data.transpose((2, 0, 1))
else:
img_data = img_data.transpose((2, 1, 0))
seg_data = seg_data.transpose((2, 1, 0))
summed = np.sum(seg_data.sum(axis=0), axis=0)
non0_list = np.asarray([i for i in range(summed.size)])
non0_list = non0_list[summed > 10]
seg_start = max(np.min(non0_list) - new_d_half, 0)
seg_end = min(np.max(non0_list) + new_d_half, d)
if new_h == h:
top = 0
left = 0
else:
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
#ant = np.random.randint(0, d - new_d)
ant = np.random.randint(seg_start, seg_end - new_d)
img_data = img_data[top: top + new_h,
left: left + new_w,
ant: ant + new_d]
img_data = img_data.astype(np.float32)
ant_seg = ant + new_d_half
seg_data = seg_data[top: top + new_h,
left: left + new_w,
ant_seg: ant_seg + 1]
# seg_data = seg_data[top: top + new_h,
# left: left + new_w,
# ant: ant + new_d]
seg_data = seg_data.astype(np.float32)
# Merge labels
seg_data[seg_data > 1] = 1
# flip up side down to correct
# image = np.flip(img_data, axis=1).copy()
# label = np.flip(seg_data, axis=1).copy()
return {'image': img_data, 'label': seg_data}
class RandomHorizontalFlip(object):
"""Randomly flip the image in the horizontal direction.
"""
def __call__(self, sample):
if random.uniform(0,1) < 0.5:
return sample
# else return flipped sample
image, label = sample['image'], sample['label']
image = np.flip(image, axis=0).copy()
label = np.flip(label, axis=0).copy()
return {'image': image, 'label': label}
class RandomVerticalFlip(object):
"""Randomly flip the image in the horizontal direction.
"""
def __call__(self, sample):
if random.uniform(0,1) < 0.5:
return sample
# else return flipped sample
image, label = sample['image'], sample['label']
image = np.flip(image, axis=1).copy()
label = np.flip(label, axis=1).copy()
return {'image': image, 'label': label}
# def pixel_mask(image, p):
# p_map = np.random.random(size = image.shape)
# mask = p_map <= p
# return mask
# def boundary_mask(label, p1, p2):
# d_map_in = ndimage.distance_transform_edt(label)
# label_r = 1 - label
# d_map_out = ndimage.distance_transform_edt(label_r)
# d_map = d_map_in + d_map_out
# d_map[d_map<=3] = 1
# d_map[d_map>3] = 0
# # d_map = d_map<=5
# # print('d_map:',d_map.sum())
# p_map = d_map
# p_map[p_map == 1] = p1
# p_map[p_map == 0] = p2
# # print('p_map:',(p_map==p1).sum())
# r_map = np.random.random(size = label.shape)
# mask = r_map <= p_map
# mask = 1*mask
# return mask
# def bkg_mask(label, p1, p2):
# p_map = label.copy()
# p_map[p_map>=1] = 1
# p_map[p_map<1] = 0
# # print('P_map.sum0',(p_map==0).sum())
# # print('P_map.sum1',(p_map==1).sum())
# p_map[p_map == 0] = p2
# # print('p_mapsum1',p_map.sum())
# p_map[p_map == 1] = p1
# # print('p_map:',(p_map==p1).sum())
# r_map = np.random.random(size = label.shape)
# mask = r_map <= p_map
# mask = 1*mask
# # print('mask.sum:',mask.sum())
# return mask
# def bdy2blk(bdy, nrows, ncols, p1, p2):
# # print(bdy.shape)
# bdy1 = np.squeeze(bdy,-1)
# # 224 x 224
# h, w = bdy1.shape
# # print(h,nrows,h/nrows)
# # 16 x 16 x 14 x 14
# bdy1 = bdy1.reshape(h//nrows, nrows, -1, ncols).swapaxes(1,2).reshape(-1, nrows, ncols)
# bdy1 = bdy1.reshape(nrows, ncols, int(h/nrows), int(w/nrows))
# # print('bdy1.shape:',bdy1.shape)
# for i in range(bdy1.shape[0]):
# for j in range(bdy1.shape[1]):
# if bdy1[i][j].sum() >= 1:
# if np.random.random_sample() <= p1:
# bdy1[i][j] = np.ones(bdy1[i][j].shape)
# else:
# bdy1[i][j] = np.zeros(bdy1[i][j],shape)
# else:
# if np.random.random_sample() <= p2:
# bdy1[i][j] = np.ones(bdy1[i][j].shape)
# else:
# bdy1[i][j] = np.zeros(bdy1[i][j].shape)
# return bdy1
# def blk_mask(label, p1, p2):
# d_map_in = ndimage.distance_transform_edt(label)
# label_r = 1 - label
# d_map_out = ndimage.distance_transform_edt(label_r)
# d_map = d_map_in + d_map_out
# d_map[d_map<=5] = 1
# d_map[d_map>5] = 0
# p_map = d_map
# # print('p_map_shape:', p_map.shape)
# mask = bdy2blk(p_map,16,16, p1, p2)
# # p_map size 16 x 16 x 14 x 14
# # p_map[p_map == 1] = p1
# # p_map[p_map == 0] = p2
# # r_map = np.random.random(size = label.shape)
# # mask = r_map <= p_map
# # 16x16 --> 224 x 224
# # print('mask_shape1', mask.shape)
# mask = np.hstack(mask)
# mask = np.hstack(mask)
# # print('mask_shape', mask.shape)
# mask = np.expand_dims(mask, -1)
# return mask
# class BdyblkOut(object):
# def __init__(self, probability1, probability2):
# self.pa = probability1
# self.pb = probability2
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p1 = self.pa + (1 - self.pa) * np.random.random_sample()
# p2 = self.pb + (1 - self.pb) * np.random.random_sample()
# # mask = boundary_mask(label, p1, p2)
# mask = bdyblk_mask(label, p1, p2)
# # print('mask:',mask.shape)
# image = image * mask
# label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
# class BoundaryOut(object):
# def __init__(self, probability1, probability2):
# self.pa = probability1
# self.pb = probability2
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p1 = self.pa + (1 - self.pa) * np.random.random_sample()
# p2 = self.pb + (1 - self.pb) * np.random.random_sample()
# # p1 = self.pa
# # p2 = self.pb
# mask = boundary_mask(label, p1, p2)
# # mask = bdyblk_mask(label, p1, p2)
# # print('mask_:',mask.sum())
# # noise = np.random.normal(0,0.33,image.shape)
# # noise[noise>1] = 1
# # noise[noise<-1] = -1
# # noise = noise*(1-mask)
# image = image * mask
# # image = image
# # image = image + noise
# # label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
# class BkgOut(object):
# def __init__(self, probability1, probability2):
# self.pa = probability1
# self.pb = probability2
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p1 = self.pa + (1 - self.pa) * np.random.random_sample()
# p2 = self.pb + (1 - self.pb) * np.random.random_sample()
# mask = bkg_mask(label, p1, p2)
# # print('mask:',mask.shape)
# image = image * mask
# label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
# class MaskOut(object):
# def __init__(self, probability):
# self.pb = probability
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p = self.pb + (1 - self.pb) * np.random.random_sample()
# mask = pixel_mask(image, p)
# # print('mask:',mask.shape)
# image = image * mask
# label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
class Clip(object):
"""Clip the intensity values.
Args:
Lower and upper bounds.
"""
def __init__(self, lower_bound, upper_bound):
'''
'''
# Make sure upper bound is larger than the lower bound
self.LB = min(lower_bound, upper_bound)
self.UB = max(lower_bound, upper_bound)
def __call__(self, sample):
image, label = sample['image'], sample['label']
image[image>self.UB] = self.UB
image[image<self.LB] = self.LB
return {'image': image, 'label': label}
class Normalize(object):
"""Normalize the input data to 0 mean 1 std per channel"""
def __init__(self, lower_bound, upper_bound):
self.LB = min(lower_bound, upper_bound)
self.UB = max(lower_bound, upper_bound)
def __call__(self, sample):
image, label = sample['image'], sample['label']
#img_mean = np.mean(image, axis=(0,1))
#img_std = np.std(image, axis=(0,1))
#nc = image.shape[2]
#for c in range(nc):
# image[:,:,c] = (image[:,:,c] - img_mean[c]) / img_std[c]
mid_point = (self.LB + self.UB) / 2.0
image -= mid_point
half_range = (self.UB - self.LB) / 2.0
image /= (half_range + 0.000001)
return {'image': image, 'label': label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, label = sample['image'], sample['label']
# image, label, mask = sample['image'], sample['label'], sample['mask']
# swap color axis because
# numpy image: W x H x C
# torch image: C X H X W
image = image.transpose((2, 1, 0))
#print(image.shape, type(image), image.dtype)
label = label.transpose((2, 1, 0))
# mask = mask.transpose(2, 1, 0)
#print(label.shape, type(label), label.dtype)
return {'image': torch.from_numpy(image),
'label': torch.from_numpy(label)}
# return {'image': torch.from_numpy(image),
# 'label': torch.from_numpy(label),
# 'mask': torch.from_numpy(mask)}
def get_composed_transform(hw, slices, view):
composed = transforms.Compose([RandomCrop((hw, hw, slices),view),
Clip(-200, 200),
Normalize(-200, 200),
RandomHorizontalFlip(),
RandomVerticalFlip(),
# MaskOut(0.5),
# BoundaryOut(0.5, 1),
# BdyblkOut(1, 0.5),
# BkgOut(1,0.5),
ToTensor()])
return composed
# %% Tester
if __name__ == '__main__':
img_folder = '/home/data/LITS/training'
#img_folder = '/Users/yan/Documents/data/LITS_training'
log_dir = path.expanduser('~/tmp/u-net/logs')
composed = get_composed_transform(224, 3, 'axial')
dataset = LiverCTDataset(img_folder,
transform=composed,
verbose = True)
'''
for i in range(5):
sample = dataset[i]
img = sample['image']
print(i, img.size(), type(img))
label = sample['label']
print(i, label.size(), type(label))
'''
# num_workers = 4 to use more processes
dataloader = DataLoader(dataset, batch_size=1, shuffle=True,
num_workers=0)
#for i_batch, sample_batched in enumerate(dataloader):
batch_it = iter(dataloader)
sample_batched = next(batch_it)
image_batch = sample_batched['image']
label_batch = sample_batched['label']
mask_batch = sample_batched['mask']
print('Batch size: {}, image size: {}, label size: {}, mask size: {}'.format(len(image_batch),
image_batch.size(2),
label_batch.size(2),
mask_batch.size(2)))
img_data = image_batch[0,0,:,:].numpy()
v_min = img_data.min()
v_max = img_data.max()
print('Img -> max: {}, min: {}'.format(v_max, v_min))
img_data = (img_data - v_min) / (v_max - v_min) * 255
img_data = img_data.astype(np.uint8)
label_data = label_batch[0,0,:,:].numpy()
mask_data = mask_batch[0,0,:,:].numpy()
v_min = label_data.min()
v_max = label_data.max()
print('Label -> max: {}, min: {}'.format(v_max, v_min))
label_data *= 255
lable_data = label_data.astype(np.uint8)
m_min = mask_data.min()
m_max = mask_data.max()
print('mask:',mask_data.shape,m_min,m_max)
mask_data *= 255
mask_data = mask_data.astype(np.uint8)
# Save images
imsave(path.join(log_dir, 'image_sample.png'), img_data, format='png')
imsave(path.join(log_dir, 'label_sample.png'), label_data, format='png')
imsave(path.join(log_dir, 'mask_sample.png'), mask_data, format='png')
| 16,145
| 31.552419
| 99
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 16/PIPO-FAN-master/pipo_fan/dataset/dataset_muor_2D.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 22 14:10:33 2017
@author: yanrpi
"""
# %%
import glob
import numpy as np
import nibabel as nib
import random
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from os import path
# from scipy.misc import imsave
from scipy import ndimage
# from scipy.misc import imsave
# %%
class LiverCTDataset(Dataset):
"""Liver CT image dataset."""
def __init__(self, root_dir, transform=None, verbose=False):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied on a sample.
"""
if not path.isdir(root_dir):
raise ValueError("\"{}\" is not a valid directory path!".format(root_dir))
self.root_dir = root_dir
self.transform = transform
self.verbose = verbose
res = glob.glob(path.join(root_dir, 'volume-*.nii'))
#print(res)
self.num_images = len(res)
self.ct_filenames = res
def __len__(self):
return self.num_images
def __getitem__(self, idx):
img_name = self.ct_filenames[idx]
seg_name = img_name.replace('volume', 'segmentation')
image = nib.load(img_name)
segmentation = nib.load(seg_name)
# image = nib.as_closest_canonical(image)
# segmentation = nib.as_closest_canonical(segmentation)
if self.verbose:
print('{} -> {}'.format(idx, img_name))
print('Image shape: {}'.format(image.shape))
print('Segmentation shape: {}'.format(segmentation.shape))
sample = {'image': image, 'label': segmentation}
#sample = {'image': img_name, 'segmentation': seg_name}
if self.transform:
sample = self.transform(sample)
return sample
# %%
class RandomCrop(object):
"""Crop randomly the image in a sample.
For segmentation training, only crop sections with non-zero label
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size, view):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size, output_size)
else:
assert len(output_size) == 3
self.output_size = output_size
self.view = view
def __call__(self, sample):
image, segmentation = sample['image'], sample['label']
h, w, d = image.shape
new_h, new_w, new_d = self.output_size
view = self.view
new_d_half = new_d >> 1
# Find slices containing segmentation object
seg_data = segmentation.get_data()
img_data = image.get_data()
if view == 'axial':
img_data = img_data
seg_data = seg_data
elif view == 'coronal':
img_data = img_data.transpose((2, 0, 1))
seg_data = seg_data.transpose((2, 0, 1))
else:
img_data = img_data.transpose((2, 1, 0))
seg_data = seg_data.transpose((2, 1, 0))
summed = np.sum(seg_data.sum(axis=0), axis=0)
non0_list = np.asarray([i for i in range(summed.size)])
non0_list = non0_list[summed > 10]
seg_start = max(np.min(non0_list) - new_d_half, 0)
seg_end = min(np.max(non0_list) + new_d_half, d)
if new_h == h:
top = 0
left = 0
else:
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
#ant = np.random.randint(0, d - new_d)
ant = np.random.randint(seg_start, seg_end - new_d)
img_data = img_data[top: top + new_h,
left: left + new_w,
ant: ant + new_d]
img_data = img_data.astype(np.float32)
ant_seg = ant + new_d_half
seg_data = seg_data[top: top + new_h,
left: left + new_w,
ant_seg: ant_seg + 1]
# seg_data = seg_data[top: top + new_h,
# left: left + new_w,
# ant: ant + new_d]
seg_data = seg_data.astype(np.float32)
# Merge labels
# seg_data[seg_data > 1] = 1
# flip up side down to correct
# image = np.flip(img_data, axis=1).copy()
# label = np.flip(seg_data, axis=1).copy()
return {'image': img_data, 'label': seg_data}
class RandomHorizontalFlip(object):
"""Randomly flip the image in the horizontal direction.
"""
def __call__(self, sample):
if random.uniform(0,1) < 0.5:
return sample
# else return flipped sample
image, label = sample['image'], sample['label']
image = np.flip(image, axis=0).copy()
label = np.flip(label, axis=0).copy()
return {'image': image, 'label': label}
class RandomVerticalFlip(object):
"""Randomly flip the image in the horizontal direction.
"""
def __call__(self, sample):
if random.uniform(0,1) < 0.5:
return sample
# else return flipped sample
image, label = sample['image'], sample['label']
image = np.flip(image, axis=1).copy()
label = np.flip(label, axis=1).copy()
return {'image': image, 'label': label}
# def pixel_mask(image, p):
# p_map = np.random.random(size = image.shape)
# mask = p_map <= p
# return mask
# def boundary_mask(label, p1, p2):
# d_map_in = ndimage.distance_transform_edt(label)
# label_r = 1 - label
# d_map_out = ndimage.distance_transform_edt(label_r)
# d_map = d_map_in + d_map_out
# d_map[d_map<=3] = 1
# d_map[d_map>3] = 0
# # d_map = d_map<=5
# # print('d_map:',d_map.sum())
# p_map = d_map
# p_map[p_map == 1] = p1
# p_map[p_map == 0] = p2
# # print('p_map:',(p_map==p1).sum())
# r_map = np.random.random(size = label.shape)
# mask = r_map <= p_map
# mask = 1*mask
# return mask
# def bkg_mask(label, p1, p2):
# p_map = label.copy()
# p_map[p_map>=1] = 1
# p_map[p_map<1] = 0
# # print('P_map.sum0',(p_map==0).sum())
# # print('P_map.sum1',(p_map==1).sum())
# p_map[p_map == 0] = p2
# # print('p_mapsum1',p_map.sum())
# p_map[p_map == 1] = p1
# # print('p_map:',(p_map==p1).sum())
# r_map = np.random.random(size = label.shape)
# mask = r_map <= p_map
# mask = 1*mask
# # print('mask.sum:',mask.sum())
# return mask
# def bdy2blk(bdy, nrows, ncols, p1, p2):
# # print(bdy.shape)
# bdy1 = np.squeeze(bdy,-1)
# # 224 x 224
# h, w = bdy1.shape
# # print(h,nrows,h/nrows)
# # 16 x 16 x 14 x 14
# bdy1 = bdy1.reshape(h//nrows, nrows, -1, ncols).swapaxes(1,2).reshape(-1, nrows, ncols)
# bdy1 = bdy1.reshape(nrows, ncols, int(h/nrows), int(w/nrows))
# # print('bdy1.shape:',bdy1.shape)
# for i in range(bdy1.shape[0]):
# for j in range(bdy1.shape[1]):
# if bdy1[i][j].sum() >= 1:
# if np.random.random_sample() <= p1:
# bdy1[i][j] = np.ones(bdy1[i][j].shape)
# else:
# bdy1[i][j] = np.zeros(bdy1[i][j],shape)
# else:
# if np.random.random_sample() <= p2:
# bdy1[i][j] = np.ones(bdy1[i][j].shape)
# else:
# bdy1[i][j] = np.zeros(bdy1[i][j].shape)
# return bdy1
# def blk_mask(label, p1, p2):
# d_map_in = ndimage.distance_transform_edt(label)
# label_r = 1 - label
# d_map_out = ndimage.distance_transform_edt(label_r)
# d_map = d_map_in + d_map_out
# d_map[d_map<=5] = 1
# d_map[d_map>5] = 0
# p_map = d_map
# # print('p_map_shape:', p_map.shape)
# mask = bdy2blk(p_map,16,16, p1, p2)
# # p_map size 16 x 16 x 14 x 14
# # p_map[p_map == 1] = p1
# # p_map[p_map == 0] = p2
# # r_map = np.random.random(size = label.shape)
# # mask = r_map <= p_map
# # 16x16 --> 224 x 224
# # print('mask_shape1', mask.shape)
# mask = np.hstack(mask)
# mask = np.hstack(mask)
# # print('mask_shape', mask.shape)
# mask = np.expand_dims(mask, -1)
# return mask
# class BdyblkOut(object):
# def __init__(self, probability1, probability2):
# self.pa = probability1
# self.pb = probability2
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p1 = self.pa + (1 - self.pa) * np.random.random_sample()
# p2 = self.pb + (1 - self.pb) * np.random.random_sample()
# # mask = boundary_mask(label, p1, p2)
# mask = bdyblk_mask(label, p1, p2)
# # print('mask:',mask.shape)
# image = image * mask
# label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
# class BoundaryOut(object):
# def __init__(self, probability1, probability2):
# self.pa = probability1
# self.pb = probability2
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p1 = self.pa + (1 - self.pa) * np.random.random_sample()
# p2 = self.pb + (1 - self.pb) * np.random.random_sample()
# # p1 = self.pa
# # p2 = self.pb
# mask = boundary_mask(label, p1, p2)
# # mask = bdyblk_mask(label, p1, p2)
# # print('mask_:',mask.sum())
# # noise = np.random.normal(0,0.33,image.shape)
# # noise[noise>1] = 1
# # noise[noise<-1] = -1
# # noise = noise*(1-mask)
# image = image * mask
# # image = image
# # image = image + noise
# # label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
# class BkgOut(object):
# def __init__(self, probability1, probability2):
# self.pa = probability1
# self.pb = probability2
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p1 = self.pa + (1 - self.pa) * np.random.random_sample()
# p2 = self.pb + (1 - self.pb) * np.random.random_sample()
# mask = bkg_mask(label, p1, p2)
# # print('mask:',mask.shape)
# image = image * mask
# label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
# class MaskOut(object):
# def __init__(self, probability):
# self.pb = probability
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p = self.pb + (1 - self.pb) * np.random.random_sample()
# mask = pixel_mask(image, p)
# # print('mask:',mask.shape)
# image = image * mask
# label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
class Clip(object):
"""Clip the intensity values.
Args:
Lower and upper bounds.
"""
def __init__(self, lower_bound, upper_bound):
'''
'''
# Make sure upper bound is larger than the lower bound
self.LB = min(lower_bound, upper_bound)
self.UB = max(lower_bound, upper_bound)
def __call__(self, sample):
image, label = sample['image'], sample['label']
image[image>self.UB] = self.UB
image[image<self.LB] = self.LB
return {'image': image, 'label': label}
class Normalize(object):
"""Normalize the input data to 0 mean 1 std per channel"""
def __init__(self, lower_bound, upper_bound):
self.LB = min(lower_bound, upper_bound)
self.UB = max(lower_bound, upper_bound)
def __call__(self, sample):
image, label = sample['image'], sample['label']
#img_mean = np.mean(image, axis=(0,1))
#img_std = np.std(image, axis=(0,1))
#nc = image.shape[2]
#for c in range(nc):
# image[:,:,c] = (image[:,:,c] - img_mean[c]) / img_std[c]
mid_point = (self.LB + self.UB) / 2.0
image -= mid_point
half_range = (self.UB - self.LB) / 2.0
image /= (half_range + 0.000001)
return {'image': image, 'label': label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, label = sample['image'], sample['label']
# image, label, mask = sample['image'], sample['label'], sample['mask']
# swap color axis because
# numpy image: W x H x C
# torch image: C X H X W
image = image.transpose((2, 1, 0))
#print(image.shape, type(image), image.dtype)
label = label.transpose((2, 1, 0))
# mask = mask.transpose(2, 1, 0)
#print(label.shape, type(label), label.dtype)
return {'image': torch.from_numpy(image),
'label': torch.from_numpy(label)}
# return {'image': torch.from_numpy(image),
# 'label': torch.from_numpy(label),
# 'mask': torch.from_numpy(mask)}
def get_composed_transform(hw, slices, view):
composed = transforms.Compose([RandomCrop((hw, hw, slices),view),
Clip(-200, 200),
Normalize(-200, 200),
RandomHorizontalFlip(),
RandomVerticalFlip(),
# MaskOut(0.5),
# BoundaryOut(0.5, 1),
# BdyblkOut(1, 0.5),
# BkgOut(1,0.5),
ToTensor()])
return composed
# %% Tester
if __name__ == '__main__':
img_folder = '/zion/fangx2/BTCV/training_256'
#img_folder = '/Users/yan/Documents/data/LITS_training'
log_dir = path.expanduser('/zion/fangx2/mu_or/train/logs/')
composed = get_composed_transform(224, 3, 'axial')
dataset = LiverCTDataset(img_folder,
transform=composed,
verbose = True)
'''
for i in range(5):
sample = dataset[i]
img = sample['image']
print(i, img.size(), type(img))
label = sample['label']
print(i, label.size(), type(label))
'''
# num_workers = 4 to use more processes
dataloader = DataLoader(dataset, batch_size=1, shuffle=True,
num_workers=0)
#for i_batch, sample_batched in enumerate(dataloader):
batch_it = iter(dataloader)
sample_batched = next(batch_it)
image_batch = sample_batched['image']
label_batch = sample_batched['label']
print('Batch size: {}, image size: {}, label size: {}'.format(len(image_batch),
image_batch.size(2),
label_batch.size(2)))
img_data = image_batch[0,0,:,:].numpy()
v_min = img_data.min()
v_max = img_data.max()
print('Img -> max: {}, min: {}'.format(v_max, v_min))
img_data = (img_data - v_min) / (v_max - v_min) * 255
img_data = img_data.astype(np.uint8)
label_data = label_batch[0,0,:,:].numpy()
v_min = label_data.min()
v_max = label_data.max()
print('Label -> max: {}, min: {}'.format(v_max, v_min))
label_data *= 255
lable_data = label_data.astype(np.uint8)
# Save images
imsave(path.join(log_dir, 'image_sample.png'), img_data, format='png')
imsave(path.join(log_dir, 'label_sample.png'), label_data, format='png')
| 15,824
| 31.428279
| 93
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 16/model/denseu_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1, bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False)),
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return torch.cat([x, new_features], 1)
class _DenseBlock(nn.Sequential):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate)
self.add_module('denselayer%d' % (i + 1), layer)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class up_in(nn.Sequential):
def __init__(self, num_input_features1, num_input_features2, num_output_features):
super(up_in, self).__init__()
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.add_module('conv1_1', nn.Conv2d(num_input_features1, num_input_features2,
kernel_size=1, stride=1, bias=False))
self.add_module('conv3_3', nn.Conv2d(num_input_features2, num_output_features,
kernel_size=3, stride=1, padding=1, bias=False))
self.add_module('norm', nn.BatchNorm2d(num_output_features))
self.add_module('relu', nn.ReLU(inplace=True))
def forward(self, x,y):
y = self.up(y)
x = self.conv1_1(x)
z = self.conv3_3(x+y)
z = self.norm(z)
z = self.relu(z)
return z
class upblock(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(upblock, self).__init__()
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.add_module('conv3_3', nn.Conv2d(num_input_features, num_output_features,
kernel_size=3, stride=1, padding=1, bias=False))
self.add_module('norm', nn.BatchNorm2d(num_output_features))
self.add_module('relu', nn.ReLU(inplace=True))
def forward(self, x,y):
y = self.up(y)
z = self.conv3_3(x+y)
z = self.norm(z)
z = self.relu(z)
return z
class up_out(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(up_out, self).__init__()
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.add_module('conv3_3', nn.Conv2d(num_input_features, num_output_features,
kernel_size=3, stride=1, padding=1, bias=False))
self.dropout = nn.Dropout2d(p=0.3)
self.add_module('norm', nn.BatchNorm2d(num_output_features))
self.add_module('relu', nn.ReLU(inplace=True))
def forward(self, y):
y = self.up(y)
y = self.conv3_3(y)
y = self.dropout(y)
y = self.norm(y)
y = self.relu(y)
return y
class DenseUNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
"""
def __init__(self, growth_rate=48, block_config=(6, 12, 36, 24),
num_init_features=96, bn_size=4, drop_rate=0, num_channels = 3, num_classes = 2):
super(DenseUNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(num_channels, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
self.up1 = up_in(48*44, 48*46, 48*16)
self.up2 = upblock(48*16, 48*8)
self.up3 = upblock(48*8, 96)
self.up4 = upblock(96,96)
self.up5 = up_out(96,64)
self.outconv = outconv(64,num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
features = self.features.conv0(x)
x0 = self.features.norm0(features)
x0 = self.features.relu0(x0)
x1 = self.features.pool0(x0)
x1 = self.features.denseblock1(x1)
x2 = self.features.transition1(x1)
x2 = self.features.denseblock2(x2)
x3 = self.features.transition2(x2)
x3 = self.features.denseblock3(x3)
x4 = self.features.transition3(x3)
x4 = self.features.denseblock4(x4)
y4 = self.up1(x3, x4)
y3 = self.up2(x2, y4)
y2 = self.up3(x1, y3)
y1 = self.up4(x0, y2)
y0 = self.up5(y1)
out = self.outconv(y0)
# out = F.softmax(out, dim=1)
return out
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
x = self.conv(x)
return x
| 7,830
| 42.505556
| 114
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 16/model/unet.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class inconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = double_conv(in_ch, out_ch)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.mpconv = nn.Sequential(
nn.MaxPool2d(2),
double_conv(in_ch, out_ch)
)
def forward(self, x):
x = self.mpconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
diffX = x1.size()[2] - x2.size()[2]
diffY = x1.size()[3] - x2.size()[3]
x2 = F.pad(x2, (diffX // 2, int(diffX / 2),
diffY // 2, int(diffY / 2)))
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
x = self.conv(x)
return x
class UNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(UNet, self).__init__()
self.inc = inconv(n_channels, 64)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.up1 = up(1024, 256)
self.up2 = up(512, 128)
self.up3 = up(256, 64)
self.up4_1 = up(128, 64)
self.outc1 = outconv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
a = self.up4_1(x, x1)
a = self.outc1(a)
a = F.softmax(a, dim=1)
return a
| 2,971
| 26.266055
| 86
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 16/model/resu_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1)
)
def forward(self, x):
x = self.conv(x)
return x
class one_conv(nn.Module):
def __init__(self, in_ch, out_ch):
super(one_conv, self).__init__()
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1)
)
def forward(self,x):
x = self.conv(x)
return x
class inconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1)
)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.pool = nn.MaxPool2d(2)
self.mpconv = double_conv(in_ch, out_ch)
self.bridge = one_conv(in_ch, out_ch)
def forward(self, x):
x = self.pool(x)
#print(x.shape)
x_1 = self.mpconv(x)
#print(x_1.shape)
x_2 = self.bridge(x)
#print(x_2.shape)
x = x_1 + x_2
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
self.bridge = one_conv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
diffX = x1.size()[2] - x2.size()[2]
diffY = x1.size()[3] - x2.size()[3]
x2 = F.pad(x2, (diffX // 2, int(diffX / 2),
diffY // 2, int(diffY / 2)))
x = torch.cat([x2, x1], dim=1)
x = self.conv(x) + self.bridge(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1)
)
def forward(self, x):
x = self.conv(x)
return x
class ResUNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(ResUNet, self).__init__()
self.inc = inconv(n_channels, 64)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.up1 = up(1024, 256)
self.up2 = up(512, 128)
self.up3 = up(256, 64)
self.up4 = up(128, 64)
self.outc = outconv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
# x = F.softmax(x, dim=1)
return x
| 3,831
| 27.176471
| 86
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 16/model/concave_dps.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1)
)
def forward(self, x):
x = self.conv(x)
return x
class one_conv(nn.Module):
def __init__(self, in_ch, out_ch):
super(one_conv, self).__init__()
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1)
)
def forward(self,x):
x = self.conv(x)
return x
class res_conv(nn.Module):
def __init__(self, in_ch, out_ch):
super(res_conv, self).__init__()
self.conv1 = double_conv(in_ch, out_ch)
self.bridge = one_conv(in_ch, out_ch)
def forward(self, x):
x1 = self.conv1(x)
if x.shape == x1.shape:
r = x + x1
else:
r = self.bridge(x) + x1
return r
class inconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1)
)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.pool = nn.MaxPool2d(2)
self.mpconv = double_conv(in_ch, out_ch)
self.bridge = one_conv(in_ch, out_ch)
def forward(self, x, y):
x = self.pool(x)
# Concatenation
x_1 = torch.cat((x,y),1)
# Summation
# x_1 = x + y
x_2 = self.mpconv(x_1)
if x_1.shape == x_2.shape:
x = x_1 + x_2
else:
x = self.bridge(x_1) + x_2
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
self.bridge = one_conv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
diffX = x1.size()[2] - x2.size()[2]
diffY = x1.size()[3] - x2.size()[3]
x2 = F.pad(x2, (diffX // 2, int(diffX / 2),
diffY // 2, int(diffY / 2)))
x = torch.cat([x2, x1], dim=1)
x = self.conv(x) + self.bridge(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1)
)
def forward(self, x):
x = self.conv(x)
return x
class ResUNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(ResUNet, self).__init__()
self.inc = inconv(n_channels, 64)
self.dbconv1 = res_conv(64,128)
self.down1 = down(128, 128)
self.dbconv2 = res_conv(64,128)
self.dbconv3 = res_conv(128,256)
self.down2 = down(256, 256)
self.dbconv4 = res_conv(64,128)
self.dbconv5 = res_conv(128,256)
self.dbconv6 = res_conv(256,512)
self.down3 = down(512, 512)
self.down4 = down(1024, 512)
self.dbup1 = res_conv(512,256)
self.dbup2 = res_conv(256,128)
self.dbup3 = res_conv(128,64)
self.dbup4 = res_conv(64,64)
self.up1 = up(1024, 256)
self.dbup5 = res_conv(256,128)
self.dbup6 = res_conv(128,64)
self.dbup7 = res_conv(64,64)
self.up2 = up(512, 128)
self.dbup8 = res_conv(128,64)
self.dbup9 = res_conv(64,64)
self.up3 = up(256, 64)
self.dbup10 = res_conv(64,64)
self.up4 = up(128, 64)
self.outc1 = outconv(64, n_classes)
self.outc2 = outconv(64, n_classes)
self.outc3 = outconv(64, n_classes)
self.outc4 = outconv(64, n_classes)
self.outc = outconv(64, n_classes)
self.pool = nn.AvgPool2d(2)
self.unpool = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
# self.unpool = nn.Upsample(scale_factor=2, mode='nearest')
# self.att = res_conv(64,1)
# self.gapool = nn.AvgPool2d(kernel_size=224)
def forward(self, x):
x1 = self.inc(x)
y1 = self.pool(x)
z1 = self.inc(y1)
x2 = self.down1(x1, z1)
y2 = self.pool(y1)
z2 = self.inc(y2)
a1 = self.dbconv1(z2)
x3 = self.down2(x2, a1)
y3 = self.pool(y2)
z3 = self.inc(y3)
a2 = self.dbconv2(z3)
a3 = self.dbconv3(a2)
x4 = self.down3(x3, a3)
y4 = self.pool(y3)
z4 = self.inc(y4)
a4 = self.dbconv4(z4)
a5 = self.dbconv5(a4)
a6 = self.dbconv6(a5)
x5 = self.down4(x4, a6)
o1 = self.dbup1(x5)
o1 = self.dbup2(o1)
o1 = self.dbup3(o1)
o1 = self.dbup4(o1)
out1 = self.outc1(o1)
x6 = self.up1(x5, x4)
o2 = self.dbup5(x6)
o2 = self.dbup6(o2)
o2 = self.dbup7(o2)
out2 = self.outc2(o2)
x7 = self.up2(x6, x3)
o3 = self.dbup8(x7)
o3 = self.dbup9(o3)
out3 = self.outc3(o3)
x8 = self.up3(x7, x2)
o4 = self.dbup10(x8)
out4 = self.outc4(o4)
o5 = self.up4(x8, x1)
out5 = self.outc(o5)
o1 = self.unpool(self.unpool(self.unpool(self.unpool(o1))))
o2 = self.unpool(self.unpool(self.unpool(o2)))
o3 = self.unpool(self.unpool(o3))
o4 = self.unpool(o4)
# w1 = self.att(o1)
# w2 = self.att(o2)
# w3 = self.att(o3)
# w4 = self.att(o4)
# w5 = self.att(o5)
# w1 = self.gapool(w1)
# w2 = self.gapool(w2)
# w3 = self.gapool(w3)
# w4 = self.gapool(w4)
# w5 = self.gapool(w5)
# w = torch.cat((w3, w4, w5),1)
# w = torch.nn.Softmax2d()(w)
# w3 = w[:,0:1,:,:]
# w4 = w[:,1:2,:,:]
# w5 = w[:,2:3,:,:]
# w4 = w[:,3:4,:,:]
# w5 = w[:,4:5,:,:]
out1 = self.unpool(self.unpool(self.unpool(self.unpool(out1))))
out2 = self.unpool(self.unpool(self.unpool(out2)))
out3 = self.unpool(self.unpool(out3))
out4 = self.unpool(out4)
# out = w3*out3 + w4*out4 + w5*out5
return out1, out2, out3, out4, out5
# class ResUNet(nn.Module):
# def __init__(self, n_channels, n_classes):
# super(ResUNet, self).__init__()
# self.resnet = ResUNet_0(n_channels, n_classes)
# # self.catconv = cat_conv(10,n_classes)
# self.att = nn.Sequential(
# nn.BatchNorm2d(2),
# nn.ReLU(inplace=True),
# nn.Conv2d(2, 1, 1),
# nn.BatchNorm2d(1),
# nn.ReLU(inplace=True),
# nn.Conv2d(1, 1, 3, padding=1)
# )
# self.gapool1 = nn.AvgPool2d(kernel_size=224)
# self.gapool2 = nn.MaxPool2d(kernel_size=224)
# def forward(self,x):
# a,b,c,d,e = self.resnet(x)
# w1 = self.att(a)
# w2 = self.att(b)
# w3 = self.att(c)
# w4 = self.att(d)
# w5 = self.att(e)
# w1 = self.gapool1(w1) + self.gapool2(w1)
# w2 = self.gapool1(w2) + self.gapool2(w2)
# w3 = self.gapool1(w3) + self.gapool2(w3)
# w4 = self.gapool1(w4) + self.gapool2(w4)
# w5 = self.gapool1(w5) + self.gapool2(w5)
# w = torch.cat((w1, w2, w3, w4, w5),1)
# w = torch.nn.Softmax2d()(w)
# w1 = w[:,0:1,:,:]
# w2 = w[:,1:2,:,:]
# w3 = w[:,2:3,:,:]
# w4 = w[:,3:4,:,:]
# w5 = w[:,4:5,:,:]
# fi_out = w1*a + w2*b + w3*c + w4*d + w5*e
# return fi_out
| 8,568
| 29.386525
| 86
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 16/model/concave_dps_w.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .concave_dps import ResUNet as ResUNet_0
class attention(nn.Module):
def __init__(self, in_ch, out_ch):
super(attention, self).__init__()
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1),
nn.BatchNorm2d(1),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1)
)
def forward(self,x):
x = self.conv(x)
return x
class ResUNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(ResUNet, self).__init__()
self.resnet = ResUNet_0(n_channels, n_classes)
# self.catconv = cat_conv(10,n_classes)
self.att = attention(n_classes, 1)
self.gapool1 = nn.AvgPool2d(kernel_size=224)
self.gapool2 = nn.MaxPool2d(kernel_size=224)
def forward(self,x):
a,b,c,d,e = self.resnet(x)
w1 = self.att(a)
w2 = self.att(b)
w3 = self.att(c)
w4 = self.att(d)
w5 = self.att(e)
w1 = self.gapool1(w1) + self.gapool2(w1)
w2 = self.gapool1(w2) + self.gapool2(w2)
w3 = self.gapool1(w3) + self.gapool2(w3)
w4 = self.gapool1(w4) + self.gapool2(w4)
w5 = self.gapool1(w5) + self.gapool2(w5)
w = torch.cat((w1, w2, w3, w4, w5),1)
w = torch.nn.Softmax2d()(w)
w1 = w[:,0:1,:,:]
w2 = w[:,1:2,:,:]
w3 = w[:,2:3,:,:]
w4 = w[:,3:4,:,:]
w5 = w[:,4:5,:,:]
fi_out = w1*a + w2*b + w3*c + w4*d + w5*e
#softmax for uniseg
fi_out = F.softmax(fi_out, dim=1)
return fi_out
| 1,754
| 26.421875
| 54
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 5/main.py
|
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.neural_network import MLPRegressor
from sklearn.datasets.california_housing import fetch_california_housing
import math
from itertools import combinations
import shap
## Use House Dataset
cal_housing = fetch_california_housing()
X, y = cal_housing.data, cal_housing.target
names = cal_housing.feature_names
# Center target to avoid gradient boosting init bias: gradient boosting
# with the 'recursion' method does not account for the initial estimator
# (here the average target, by default)
y -= y.mean()
## Train a MLP regressor
print("Training MLPRegressor...")
est = MLPRegressor(activation='logistic')
est.fit(X, y)
y_ = est.predict(X)
## derive Shapley values based on its definition
Index = [0,1,2,3,4,5,6,7]
ShapleyValue = np.zeros(X.shape)
for i in np.arange(X.shape[0]):
Instance_i =X[i:i+1,:]
print(i)
for j in np.arange(X.shape[1]):
Index_delete_j = np.delete(Index,[j])
for S in np.arange(len(Index_delete_j)):
List = list(combinations(Index_delete_j,S+1))
for NumberOfCombination in np.arange(len(List)):
TypeOfCombination = np.array(List[NumberOfCombination])
Instance_i_with_j = np.zeros((1,8))
Instance_i_with_j[:,:] = Instance_i
Instance_i_with_j[:,TypeOfCombination] = 0
Instance_i_without_j = np.zeros((1,8))
Instance_i_without_j[:,:] = Instance_i_with_j
Instance_i_without_j[:,j] = 0
cmod = S+1
Factorials_concerned = float(math.factorial(cmod) * math.factorial(8 - cmod - 1)) / float(math.factorial(8))
ShapleyValue[i,j] = ShapleyValue[i,j]+ Factorials_concerned*(est.predict(Instance_i_with_j)-est.predict(Instance_i_without_j))
## Use Library Shap to plot
import shap
import pandas as pd
X_data, y_data = cal_housing.data, cal_housing.target
e_dataframe = pd.DataFrame(X_data)
new_data = e_dataframe.rename(index=str, columns={0:'MedInc'})
new_data = new_data.rename(index=str, columns={1:'HouseAge'})
new_data = new_data.rename(index=str, columns={2:'AveRooms'})
new_data = new_data.rename(index=str, columns={3:'AveBedrms'})
new_data = new_data.rename(index=str, columns={4:'Population'})
new_data = new_data.rename(index=str, columns={5:'AveOccup'})
new_data = new_data.rename(index=str, columns={6:'Latitude'})
new_data = new_data.rename(index=str, columns={7:'Longitude '})
shap.summary_plot(ShapleyValue, new_data)
| 2,784
| 28.62766
| 142
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 6/Gradient_DeepTaylorLRP.py
|
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import lrp
import pandas as pd
from pylab import rcParams
### Import Data From TensorFlow
rcParams['figure.figsize'] = 8, 10
mnist = input_data.read_data_sets('/tmp/tensorflow/mnist/input_data', one_hot=True)
Train_input = mnist.train.images
Train_label = mnist.train.labels
Test_input = mnist.test.images
Test_label = mnist.test.labels
batch_size = 50
total_batch = int(mnist.train.num_examples/batch_size)
num_epochs = 5
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10], name="truth")
### Construct a CNN
#Set the weights for the network
xavier = tf.contrib.layers.xavier_initializer_conv2d()
conv1_weights = tf.get_variable(name="c1", initializer=xavier, shape=[5, 5, 1, 10])
conv1_biases = tf.Variable(tf.zeros([10]))
conv2_weights = tf.get_variable(name="c2", initializer=xavier, shape=[5, 5, 10, 25])
conv2_biases = tf.Variable(tf.zeros([25]))
conv3_weights = tf.get_variable(name="c3", initializer=xavier, shape=[4, 4, 25, 100])
conv3_biases = tf.Variable(tf.zeros([100]))
fc1_weights = tf.Variable(tf.truncated_normal([4 * 4 * 100, 10], stddev=0.1))
fc1_biases = tf.Variable(tf.zeros([10]))
#Stack the Layers
reshaped_input = tf.reshape(x, [-1, 28, 28, 1], name="absolute_input")
#layer 1
conv1 = tf.nn.conv2d(reshaped_input, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 2
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 3
conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))
pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 4
pool_shape = pool3.get_shape().as_list()
reshaped = tf.reshape(pool3, [-1, pool_shape[1] * pool_shape[2] * pool_shape[3]])
y = tf.add(tf.matmul(reshaped, fc1_weights), fc1_biases, name="absolute_output")
# Define loss and optimizer
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cross_entropy)
### Import Trained Model
saver = tf.train.Saver()
sess = tf.InteractiveSession()
saver.restore(sess,"save_model/MNIST.ckpt")
### Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
test_acc = []
train_acc = []
for i in tqdm(range(total_batch)):
batch_x, batch_y = mnist.test.next_batch(batch_size)
test_acc.append(sess.run(accuracy, feed_dict={x: batch_x, y_: batch_y}))
print (np.mean(test_acc))
### Apply Deep Taylor LRP
F_list = lrp.lrp(y*y_, 0, 1, return_flist=True)
batch_x = Test_input[0:10]
batch_y = Test_label[0:10]
im_list_DeepTaylor = lrp.get_lrp_im(sess, F_list[-1], reshaped_input, y_, np.reshape(batch_x, (10, 28,28, 1)), batch_y)
### Visualize the produced heatmaps
for b, im in zip(batch_x, im_list_DeepTaylor ):
plt.figure()
plt.subplot(1,2,1)
plt.imshow(np.reshape(b, (28,28)))
plt.subplot(1,2,2)
plt.imshow(np.reshape(im, (28,28)), cmap="gray")
plt.show()
| 3,595
| 32.607477
| 119
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 6/Gradient_Sundararajan.py
|
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import lrp
import pandas as pd
from pylab import rcParams
### Import Data From TensorFlow
rcParams['figure.figsize'] = 8, 10
mnist = input_data.read_data_sets('/tmp/tensorflow/mnist/input_data', one_hot=True)
Train_input = mnist.train.images
Train_label = mnist.train.labels
Test_input = mnist.test.images
Test_label = mnist.test.labels
batch_size = 50
total_batch = int(mnist.train.num_examples/batch_size)
num_epochs = 5
### Construct a CNN
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10], name="truth")
#Set the weights for the network
xavier = tf.contrib.layers.xavier_initializer_conv2d()
conv1_weights = tf.get_variable(name="c1", initializer=xavier, shape=[5, 5, 1, 10])
conv1_biases = tf.Variable(tf.zeros([10]))
conv2_weights = tf.get_variable(name="c2", initializer=xavier, shape=[5, 5, 10, 25])
conv2_biases = tf.Variable(tf.zeros([25]))
conv3_weights = tf.get_variable(name="c3", initializer=xavier, shape=[4, 4, 25, 100])
conv3_biases = tf.Variable(tf.zeros([100]))
fc1_weights = tf.Variable(tf.truncated_normal([4 * 4 * 100, 10], stddev=0.1))
fc1_biases = tf.Variable(tf.zeros([10]))
#Stack the Layers
reshaped_input = tf.reshape(x, [-1, 28, 28, 1], name="absolute_input")
#layer 1
conv1 = tf.nn.conv2d(reshaped_input, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 2
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 3
conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))
pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 4
pool_shape = pool3.get_shape().as_list()
reshaped = tf.reshape(pool3, [-1, pool_shape[1] * pool_shape[2] * pool_shape[3]])
y = tf.add(tf.matmul(reshaped, fc1_weights), fc1_biases, name="absolute_output")
# Define loss and optimizer
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
grads = tf.gradients(cross_entropy,x)
train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cross_entropy)
### Import Trained Model
saver = tf.train.Saver()
sess = tf.InteractiveSession()
saver.restore(sess,"save_model/MNIST.ckpt")
### Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
test_acc = []
train_acc = []
for i in tqdm(range(total_batch)):
batch_x, batch_y = mnist.test.next_batch(batch_size)
test_acc.append(sess.run(accuracy, feed_dict={x: batch_x, y_: batch_y}))
print (np.mean(test_acc))
### Apply Sundararajan Gradient
BaseLine = np.mean(Train_input,axis=0)
plt.figure()
plt.imshow(np.reshape(BaseLine,(28,28)))
BaseLine = np.repeat(np.array([BaseLine]),10,axis=0)
N = 10
Gradients = np.zeros((10,784))
batch_x = Test_input[0:10]
batch_y = Test_label[0:10]
for i in np.arange(N):
print(i)
tmp = sess.run(grads, feed_dict={x: BaseLine+(i/N)*(batch_x-BaseLine), y_: batch_y})
Gradients = Gradients + batch_x *tmp[0]*(i/N)*(1/N)
im_list_Sundararaju = Gradients
### Visualize the produced heatmaps
for b, im in zip(batch_x, im_list_Sundararaju):
plt.figure()
plt.subplot(1,2,1)
plt.imshow(np.reshape(b, (28,28)))
plt.subplot(1,2,2)
plt.imshow(np.reshape(im, (28,28)), cmap="gray")
plt.show()
| 3,861
| 30.655738
| 92
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 6/Gradient_Simonyan.py
|
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import pandas as pd
from pylab import rcParams
### Import Data From TensorFlow
rcParams['figure.figsize'] = 8, 10
mnist = input_data.read_data_sets('/tmp/tensorflow/mnist/input_data', one_hot=True)
Train_input = mnist.train.images
Train_label = mnist.train.labels
Test_input = mnist.test.images
Test_label = mnist.test.labels
batch_size = 50
total_batch = int(mnist.train.num_examples/batch_size)
num_epochs = 5
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10], name="truth")
### Construct a CNN
#Set the weights for the network
xavier = tf.contrib.layers.xavier_initializer_conv2d()
conv1_weights = tf.get_variable(name="c1", initializer=xavier, shape=[5, 5, 1, 10])
conv1_biases = tf.Variable(tf.zeros([10]))
conv2_weights = tf.get_variable(name="c2", initializer=xavier, shape=[5, 5, 10, 25])
conv2_biases = tf.Variable(tf.zeros([25]))
conv3_weights = tf.get_variable(name="c3", initializer=xavier, shape=[4, 4, 25, 100])
conv3_biases = tf.Variable(tf.zeros([100]))
fc1_weights = tf.Variable(tf.truncated_normal([4 * 4 * 100, 10], stddev=0.1))
fc1_biases = tf.Variable(tf.zeros([10]))
#Stack the Layers
reshaped_input = tf.reshape(x, [-1, 28, 28, 1], name="absolute_input")
#layer 1
conv1 = tf.nn.conv2d(reshaped_input, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 2
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 3
conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))
pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 4
pool_shape = pool3.get_shape().as_list()
reshaped = tf.reshape(pool3, [-1, pool_shape[1] * pool_shape[2] * pool_shape[3]])
y = tf.add(tf.matmul(reshaped, fc1_weights), fc1_biases, name="absolute_output")
# Define loss and optimizer
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
grads = tf.gradients(cross_entropy,x)
train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cross_entropy)
### Import Trained Model
saver = tf.train.Saver()
sess = tf.InteractiveSession()
saver.restore(sess,"save_model/MNIST.ckpt")
### Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
test_acc = []
train_acc = []
for i in tqdm(range(total_batch)):
batch_x, batch_y = mnist.test.next_batch(batch_size)
test_acc.append(sess.run(accuracy, feed_dict={x: batch_x, y_: batch_y}))
print (np.mean(test_acc))
### Apply Simonyan Raw Gradients
batch_x = Test_input[0:10]
batch_y = Test_label[0:10]
Gradients = sess.run(grads, feed_dict={x: batch_x, y_: batch_y})
im_list_simonyan = Gradients[0]
### Visualize the produced heatmaps
for b, im in zip(batch_x, im_list_simonyan):
plt.figure()
plt.subplot(1,2,1)
plt.imshow(np.reshape(b, (28,28)))
plt.subplot(1,2,2)
plt.imshow(np.reshape(im, (28,28)), cmap="gray")
plt.show()
| 3,554
| 31.614679
| 92
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 6/Gradient_Smilkov.py
|
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import pandas as pd
from pylab import rcParams
### Import Data From TensorFlow
rcParams['figure.figsize'] = 8, 10
mnist = input_data.read_data_sets('/tmp/tensorflow/mnist/input_data', one_hot=True)
Train_input = mnist.train.images
Train_label = mnist.train.labels
Test_input = mnist.test.images
Test_label = mnist.test.labels
batch_size = 50
total_batch = int(mnist.train.num_examples/batch_size)
num_epochs = 5
### Construct a CNN
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10], name="truth")
#Set the weights for the network
xavier = tf.contrib.layers.xavier_initializer_conv2d()
conv1_weights = tf.get_variable(name="c1", initializer=xavier, shape=[5, 5, 1, 10])
conv1_biases = tf.Variable(tf.zeros([10]))
conv2_weights = tf.get_variable(name="c2", initializer=xavier, shape=[5, 5, 10, 25])
conv2_biases = tf.Variable(tf.zeros([25]))
conv3_weights = tf.get_variable(name="c3", initializer=xavier, shape=[4, 4, 25, 100])
conv3_biases = tf.Variable(tf.zeros([100]))
fc1_weights = tf.Variable(tf.truncated_normal([4 * 4 * 100, 10], stddev=0.1))
fc1_biases = tf.Variable(tf.zeros([10]))
#Stack the Layers
reshaped_input = tf.reshape(x, [-1, 28, 28, 1], name="absolute_input")
#layer 1
conv1 = tf.nn.conv2d(reshaped_input, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 2
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 3
conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))
pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 4
pool_shape = pool3.get_shape().as_list()
reshaped = tf.reshape(pool3, [-1, pool_shape[1] * pool_shape[2] * pool_shape[3]])
y = tf.add(tf.matmul(reshaped, fc1_weights), fc1_biases, name="absolute_output")
# Define loss and optimizer
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
grads = tf.gradients(cross_entropy,x)
train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cross_entropy)
### Import Trained Model
saver = tf.train.Saver()
sess = tf.InteractiveSession()
saver.restore(sess,"save_model/MNIST.ckpt")
### Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
test_acc = []
train_acc = []
for i in tqdm(range(total_batch)):
batch_x, batch_y = mnist.test.next_batch(batch_size)
test_acc.append(sess.run(accuracy, feed_dict={x: batch_x, y_: batch_y}))
print (np.mean(test_acc))
### Apply Smilkov Gradient
Gradients = np.zeros((10,784))
batch_x = Test_input[0:10]
batch_y = Test_label[0:10]
N = 10
for i in np.arange(N):
noise = np.random.normal(0,0.1,(10,784))
print(noise[1,2])
batch_x_noised = batch_x + noise
tmp = sess.run(grads, feed_dict={x: batch_x_noised, y_: batch_y})
Gradients = Gradients + tmp[0]
Gradients =Gradients/N
im_list_smilkov = Gradients
### Visualize the produced heatmaps
for b, im in zip(batch_x, im_list_smilkov):
plt.figure()
plt.subplot(1,2,1)
plt.imshow(np.reshape(b, (28,28)))
plt.subplot(1,2,2)
plt.imshow(np.reshape(im, (28,28)), cmap="gray")
plt.show()
| 3,777
| 30.22314
| 92
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 6/ShowSaliency.py
|
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import lrp
import pandas as pd
from pylab import rcParams
import numpy as np
import tensorflow as tf
import h5py
import os
import matplotlib.pyplot as plt
import matplotlib.image as im
import matplotlib.gridspec as gridspec
import matplotlib.patches as patches
rcParams['figure.figsize'] = 8, 10
mnist = input_data.read_data_sets('/tmp/tensorflow/mnist/input_data', one_hot=True)
Train_input = mnist.train.images
Train_label = mnist.train.labels
Test_input = mnist.test.images
Test_label = mnist.test.labels
batch_x = Test_input[0:10]
batch_y = Test_label[0:10]
csfont = {'fontname':'Times New Roman'}
plt.figure(dpi = 240)
gs1 = gridspec.GridSpec(5, 5)
gs1.update(wspace=0.025, hspace=0.09) # set the spacing between axes.
ax=plt.subplot(gs1[0])
fig=plt.imshow(np.reshape(batch_x[0], (28,28)), origin="upper", cmap='gray')
plt.title('Digit',**csfont,fontsize=8)
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[1])
fig=plt.imshow(np.reshape(im_list_simonyan[0], (28,28)), origin="upper", cmap='gray')
plt.title('Raw Gradient',**csfont,fontsize=8)
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[2])
fig=plt.imshow(np.reshape(im_list_smilkov[0], (28,28)), origin="upper", cmap='gray')
plt.title('SmoothGrad',**csfont,fontsize=8)
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[3])
fig=plt.imshow(np.reshape(im_list_Sundararaju[0], (28,28)), origin="upper", cmap='gray')
plt.title('IntegratedGrad',**csfont,fontsize=8)
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[4])
fig=plt.imshow(np.reshape(im_list_DeepTaylor[0], (28,28)), origin="upper", cmap='gray')
plt.title('Deep Taylor',**csfont,fontsize=8)
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[5])
fig=plt.imshow(np.reshape(batch_x[1], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[6])
fig=plt.imshow(np.reshape(im_list_simonyan[1], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[7])
fig=plt.imshow(np.reshape(im_list_smilkov[1], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[8])
fig=plt.imshow(np.reshape(im_list_Sundararaju[1], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[9])
fig=plt.imshow(np.reshape(im_list_DeepTaylor[1], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[10])
fig=plt.imshow(np.reshape(batch_x[2], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[11])
fig=plt.imshow(np.reshape(im_list_simonyan[2], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[12])
fig=plt.imshow(np.reshape(im_list_smilkov[2], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[13])
fig=plt.imshow(np.reshape(im_list_Sundararaju[2], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[14])
fig=plt.imshow(np.reshape(im_list_DeepTaylor[2], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[15])
fig=plt.imshow(np.reshape(batch_x[3], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[16])
fig=plt.imshow(np.reshape(im_list_simonyan[3], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[17])
fig=plt.imshow(np.reshape(im_list_smilkov[3], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[18])
fig=plt.imshow(np.reshape(im_list_Sundararaju[3], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[19])
fig=plt.imshow(np.reshape(im_list_DeepTaylor[3], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[20])
fig=plt.imshow(np.reshape(batch_x[4], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[21])
fig=plt.imshow(np.reshape(im_list_simonyan[4], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[22])
fig=plt.imshow(np.reshape(im_list_smilkov[4], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[23])
fig=plt.imshow(np.reshape(im_list_Sundararaju[4], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax=plt.subplot(gs1[24])
fig=plt.imshow(np.reshape(im_list_DeepTaylor[4], (28,28)), origin="upper", cmap='gray')
plt.show()
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
| 7,312
| 25.305755
| 88
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 6/lrp.py
|
import sys
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
from tqdm import tqdm
from tensorflow.python.ops import nn_ops, gen_nn_ops
import matplotlib.pyplot as plt
from scipy.stats.mstats import zscore
#Helper Method for
def lrp(F, lowest, highest, graph=None, return_flist=False):
"""
Accepts a final output, and propagates back from there to compute LRP over a tensorflow graph.
Performs a Taylor Decomp at each layer to assess the relevances of each neuron at that layer
"""
#Assumptions:
#all conv strides are [1,1,1,1]
#all pool strides are [1,2,2,1]
#all pool/conv padding is SAME
#only reshaping that happens is after a pool layer (pool -> fc) or a conv layer (conv -> fc)
F_list = []
traversed, graph, graph_dict, var_dict = get_traversed(graph=graph)
for n in traversed:
val_name = next(I for I in graph_dict[n].input if I in traversed).split("/read")[0] + ":0"
X = graph.get_tensor_by_name(val_name)
if graph_dict[n].op == "MatMul":
weight_name = next(I for I in graph_dict[n].input if not I in traversed).split("/read")[0] + ":0"
W = var_dict[weight_name]
if "absolute_input" in graph_dict[n].input:
F = fprop_first(F, W, X, lowest, highest)
F_list.append(F)
break
else:
F = fprop(F, W, X)
F_list.append(F)
elif graph_dict[n].op == "MaxPool" or graph_dict[n].op == "MaxPoolWithArgmax":
F = fprop_pool(F, X)
F_list.append(F)
elif graph_dict[n].op == "Conv2D":
weight_name = next(I for I in graph_dict[n].input if not I in traversed).split("/read")[0] + ":0"
W = var_dict[weight_name]
if "absolute_input" in graph_dict[n].input:
F = fprop_conv_first(F, W, X, lowest, highest)
F_list.append(F)
break
else:
F = fprop_conv(F, W, X)
F_list.append(F)
if return_flist:
return F_list
else:
return F
def get_traversed(graph = None):
#Get the graph and graph traversal
graph = tf.get_default_graph() if graph is None else graph
graph_dict = {node.name:node for node in graph.as_graph_def().node}
var_dict = {v.name:v.value() for v in tf.get_collection(tf.GraphKeys.VARIABLES)}
return traverse(graph_dict["absolute_output"], [], graph_dict), graph, graph_dict, var_dict
def traverse(node, L, graph_dict):
#Depth First Search the Network Graph
L.append(node.name)
if "absolute_input" in node.name:
return L
inputs = node.input
for nodename in inputs:
if not traverse(graph_dict[nodename], L, graph_dict) is None:
return L
return None
def fprop_first(F, W, X, lowest, highest):
#Propagate from last feedforward layer to input
W,V,U = W,tf.maximum(0.0,W), tf.minimum(0.0,W)
X,L,H = X, X*0+lowest, X*0+highest
Z = tf.matmul(X, W)-tf.matmul(L, V)-tf.matmul(H, U)+1e-9
S = F/Z
F = X*tf.matmul(S,tf.transpose(W))-L*tf.matmul(S, tf.transpose(V))-H*tf.matmul(S,tf.transpose(U))
return F
def fprop(F, W, X):
#Propagate over feedforward layer
V = tf.maximum(0.0, W)
Z = tf.matmul(X, V)+1e-9;
S = F/Z
C = tf.matmul(S, tf.transpose(V))
F = X*C
return F
def fprop_conv_first(F, W, X, lowest, highest, strides=None, padding='SAME'):
#Propagate from last conv layer to input
strides = [1, 1, 1, 1] if strides is None else strides
Wn = tf.minimum(0.0, W)
Wp = tf.maximum(0.0, W)
X, L, H = X, X*0+lowest, X*0+highest
c = tf.nn.conv2d(X, W, strides, padding)
cp = tf.nn.conv2d(H, Wp, strides, padding)
cn = tf.nn.conv2d(L, Wn, strides, padding)
Z = c - cp - cn + 1e-9
S = F/Z
g = nn_ops.conv2d_backprop_input(tf.shape(X), W, S, strides, padding)
gp = nn_ops.conv2d_backprop_input(tf.shape(X), Wp, S, strides, padding)
gn = nn_ops.conv2d_backprop_input(tf.shape(X), Wn, S, strides, padding)
F = X*g - L*gp - H*gn
return F
def fprop_conv(F, W, X, strides=None, padding='SAME'):
#Propagate over conv layer
xshape = X.get_shape().as_list()
fshape = F.get_shape().as_list()
if len(xshape) != len(fshape):
F = tf.reshape(F, (-1, xshape[1], xshape[2], fshape[-1]/(xshape[1]*xshape[2])))
strides = [1, 1, 1, 1] if strides is None else strides
W = tf.maximum(0.0, W)
Z = tf.nn.conv2d(X, W, strides, padding) + 1e-9
S = F/Z
C = nn_ops.conv2d_backprop_input(tf.shape(X), W, S, strides, padding)
F = X*C
return F
def fprop_pool(F, X, strides=None, ksize=None, padding='SAME'):
#Propagate over pool layer
xshape = X.get_shape().as_list()
fshape = F.get_shape().as_list()
if len(xshape) != len(fshape):
F = tf.reshape(F, (-1, int(np.ceil(xshape[1]/2.0)),
int(np.ceil(xshape[2]/2.0)), xshape[3]))
ksize = [1, 2, 2, 1] if ksize is None else ksize
strides = [1, 2, 2, 1] if strides is None else strides
Z = tf.nn.max_pool(X, strides=strides, ksize=ksize, padding=padding) + 1e-9
S = F / Z
C = gen_nn_ops.max_pool_grad(X, Z, S, ksize, strides, padding)
F = X*C
return F
def get_lrp_im(sess, F, x, y, xval, yval):
#Compute LRP over the values and labels
im = []
for i in range(0, xval.shape[0]):
im += list(F.eval(session=sess, feed_dict={x: xval[i:i+1], y: yval[i:i+1]}))
return im
def visualize(im_list, xval):
#Visualize the LRPs
for i in range(len(im_list[0])):
plt.figure()
plt.subplot(1,1+len(im_list),1)
plt.title("Image")
plt.imshow(xval[i])
for j in range(len(im_list)):
plt.subplot(1,1+len(im_list),2+j)
plt.title("LRP for network {}".format(j))
I = np.mean(np.maximum(im_list[j][i], 0), -1)
I = np.minimum(I, np.percentile(I, 99))
I = I/np.max(I)
print ("np.linalg.norm(I)", np.linalg.norm(I))
plt.imshow(I, cmap="gray")
plt.show()
return im_list
| 6,252
| 35.354651
| 109
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 3/main.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
import sys
import numpy as np
import pandas as pd
from sklearn import linear_model, preprocessing, cluster
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
from scipy.optimize import fmin_ncg
import os.path
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.keras import backend as k
from tensorflow.contrib.learn.python.learn.datasets import base
from influence.hessians import hessian_vector_product
from influence.dataset import DataSet
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import pandas as pd
from pylab import rcParams
rcParams['figure.figsize'] = 8, 10
########### Data Loading
mnist = input_data.read_data_sets('/tmp/tensorflow/mnist/input_data', one_hot=True)
Train_input = mnist.train.images
Train_label = mnist.train.labels
Test_input = mnist.test.images
Test_label = mnist.test.labels
########## The necessary functions
## the purposes of these functions are showed by their names
def get_influence_on_test_loss(sess, grad_total_loss_op, test_indices, train_idx=None,
approx_type='lissa', approx_params=None, force_refresh=True, test_description=None,
X_train = Train_input, Y_train = Train_label, X_test = Test_input, Y_test = Test_label):
# If train_idx is None then use X and Y (phantom points)
# Need to make sure test_idx stays consistent between models
# because mini-batching permutes dataset order
test_grad_loss_no_reg_val = get_test_grad_loss_no_reg_val(sess, grad_loss_no_reg_op, X_test, Y_test, test_indices,batch_size=100 )
print('Norm of test gradient: %s' % np.linalg.norm(test_grad_loss_no_reg_val[0]))
start_time = time.time()
if test_description is None:
test_description = test_indices
inverse_hvp = get_inverse_hvp_lissa(test_grad_loss_no_reg_val, sess, v_placeholder, hessian_vector,
batch_size=None,
scale=10, damping=0.0, num_samples=1, recursion_depth=1000)
duration = time.time() - start_time
print('Inverse HVP took %s sec' % duration)
start_time = time.time()
num_to_remove = 100
predicted_loss_diffs = np.zeros([num_to_remove])
for counter in np.arange(num_to_remove):
print(counter)
single_train_feed_dict = {x: X_train[counter, :], y_ : [Y_train[counter,:]]}
train_grad_loss_val = sess.run(grad_total_loss_op, feed_dict=single_train_feed_dict)
predicted_loss_diffs[counter] = np.dot(np.concatenate(inverse_hvp), np.concatenate(train_grad_loss_val)) / num_to_remove
duration = time.time() - start_time
print('Multiplying by %s train examples took %s sec' % (num_to_remove, duration))
return predicted_loss_diffs
def get_test_grad_loss_no_reg_val(sess, grad_loss_no_reg_op, Test_input, Test_label, test_indices, batch_size=100):
if test_indices is not None:
num_iter = int(np.ceil(len(test_indices) / batch_size))
test_grad_loss_no_reg_val = None
for i in range(num_iter):
start = i * batch_size
end = int(min((i+1) * batch_size, len(test_indices)))
test_feed_dict = fill_feed_dict_with_some_ex(x, y_, Test_input, Test_label, test_indices[start:end])
temp = sess.run(grad_loss_no_reg_op, feed_dict=test_feed_dict)
if test_grad_loss_no_reg_val is None:
test_grad_loss_no_reg_val = [a * (end-start) for a in temp]
else:
test_grad_loss_no_reg_val = [a + b * (end-start) for (a, b) in zip(test_grad_loss_no_reg_val, temp)]
test_grad_loss_no_reg_val = [a/len(test_indices) for a in test_grad_loss_no_reg_val]
return test_grad_loss_no_reg_val
def fill_feed_dict_with_all_but_one_ex(x, y_, data_images, data_labels, idx_to_remove):
num_examples = data_images.shape[0]
idx = np.array([True] * num_examples, dtype=bool)
idx[idx_to_remove] = False
feed_dict = {
x: data_images[idx, :],
y: data_labels[idx, :]
}
return feed_dict
def fill_feed_dict_with_some_ex(x, y_, data_images, data_labels, target_indices):
input_feed = data_images[target_indices, :]
labels_feed = data_labels[target_indices,:]
feed_dict = {
x: input_feed,
y_: labels_feed,
}
return feed_dict
def fill_feed_dict_with_batch(x, y_, Test_input, Test_label, batch_size=0):
if batch_size is None:
return fill_feed_dict_with_all_ex(x, y_, Test_input, Test_label)
def fill_feed_dict_with_all_ex(x, y_, data_images, data_labels):
feed_dict = {
x: data_images,
y_: data_labels
}
return feed_dict
def get_inverse_hvp_lissa(v, sess, v_placeholder, hessian_vector,
batch_size=None,
scale=10, damping=0.0, num_samples=1, recursion_depth=10000):
"""
This uses mini-batching; uncomment code for the single sample case.
"""
inverse_hvp = None
print_iter = recursion_depth / 10
for i in range(num_samples):
# samples = np.random.choice(self.num_train_examples, size=recursion_depth)
cur_estimate = v
for j in range(recursion_depth):
# feed_dict = fill_feed_dict_with_one_ex(
# data_set,
# images_placeholder,
# labels_placeholder,
# samples[j])
feed_dict = fill_feed_dict_with_batch(x, y_, Test_input, Test_label, batch_size=batch_size)
feed_dict = update_feed_dict_with_v_placeholder(v_placeholder, feed_dict, cur_estimate)
hessian_vector_val = sess.run(hessian_vector, feed_dict=feed_dict)
cur_estimate = [a + (1-damping) * b - c/scale for (a,b,c) in zip(v, cur_estimate, hessian_vector_val)]
# Update: v + (I - Hessian_at_x) * cur_estimate
if (j % print_iter == 0) or (j == recursion_depth - 1):
print("Recursion at depth %s: norm is %.8lf" % (j, np.linalg.norm(cur_estimate[0])))
feed_dict = update_feed_dict_with_v_placeholder(v_placeholder, feed_dict, cur_estimate)
if inverse_hvp is None:
inverse_hvp = [b/scale for b in cur_estimate]
else:
inverse_hvp = [a + b/scale for (a, b) in zip(inverse_hvp, cur_estimate)]
inverse_hvp = [a/num_samples for a in inverse_hvp]
return inverse_hvp
def update_feed_dict_with_v_placeholder(v_placeholder, feed_dict, vec):
for pl_block, vec_block in zip(v_placeholder, vec):
feed_dict[pl_block] = vec_block
return feed_dict
## Define the Model and Path for Gradients
batch_size = 50
total_batch = int(mnist.train.num_examples/batch_size)
num_epochs = 5
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10], name="truth")
#Set the weights for the network
xavier = tf.contrib.layers.xavier_initializer_conv2d()
conv1_weights = tf.get_variable(name="c1", initializer=xavier, shape=[5, 5, 1, 10])
conv1_biases = tf.Variable(tf.zeros([10]))
conv2_weights = tf.get_variable(name="c2", initializer=xavier, shape=[5, 5, 10, 25])
conv2_biases = tf.Variable(tf.zeros([25]))
conv3_weights = tf.get_variable(name="c3", initializer=xavier, shape=[4, 4, 25, 100])
conv3_biases = tf.Variable(tf.zeros([100]))
fc1_weights = tf.Variable(tf.truncated_normal([4 * 4 * 100, 10], stddev=0.1))
fc1_biases = tf.Variable(tf.zeros([10]))
#Stack the Layers
reshaped_input = tf.reshape(x, [-1, 28, 28, 1], name="absolute_input")
#layer 1
conv1 = tf.nn.conv2d(reshaped_input, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 2
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 3
conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))
pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 4
pool_shape = pool3.get_shape().as_list()
reshaped = tf.reshape(pool3, [-1, pool_shape[1] * pool_shape[2] * pool_shape[3]])
y = tf.add(tf.matmul(reshaped, fc1_weights), fc1_biases, name="absolute_output")
# Define loss and optimizer
total_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
grads = tf.gradients(total_loss,x)
params = tf.trainable_variables()
grad_total_loss_op = tf.gradients(total_loss, params)
grad_loss_no_reg_op = grad_total_loss_op
v_placeholder = [tf.placeholder(tf.float32, shape=a.get_shape()) for a in params]
u_placeholder = [tf.placeholder(tf.float32, shape=a.get_shape()) for a in params]
hessian_vector = hessian_vector_product(total_loss, params, v_placeholder)
grad_loss_wrt_input_op = tf.gradients(total_loss, x)
# Because tf.gradients auto accumulates, we probably don't need the add_n (or even reduce_sum)
influence_op = tf.add_n(
[tf.reduce_sum(tf.multiply(a, array_ops.stop_gradient(b))) for a, b in zip(grad_total_loss_op, v_placeholder)])
grad_influence_wrt_input_op = tf.gradients(influence_op, x)
train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(total_loss)
########### Import Trained Model
saver = tf.train.Saver()
sess = tf.Session()
saver.restore(sess,"save_model/MNIST.ckpt")
########### Test Indice is 34
Test_indices = [34]
test_grad_loss_no_reg_val = get_test_grad_loss_no_reg_val(sess, grad_loss_no_reg_op, Test_input, Test_label, Test_indices,batch_size=100 )
print('Norm of test gradient: %s' % np.linalg.norm(test_grad_loss_no_reg_val[0]))
inverse_hvp = get_inverse_hvp_lissa(test_grad_loss_no_reg_val, sess, v_placeholder, hessian_vector,
batch_size=None,
scale=10, damping=0.0, num_samples=1, recursion_depth=50)
########### Compute the Influence function
num_to_remove = 1000
predicted_loss_diffs = np.zeros([num_to_remove])
for counter in np.arange(num_to_remove):
print(counter)
single_train_feed_dict = {x: Train_input[counter:counter+1, :], y_ : Train_label[counter:counter+1,:]}
train_grad_loss_val = sess.run(grad_total_loss_op, feed_dict=single_train_feed_dict)
for q in np.arange(len(inverse_hvp)):
predicted_loss_diffs[counter] = predicted_loss_diffs[counter] + np.dot(np.reshape(inverse_hvp[q],(1,-1)), np.reshape(train_grad_loss_val[q],(-1,1)))
predicted_loss_diffs[counter] = predicted_loss_diffs[counter] / num_to_remove
#%%
plt.rcParams["font.family"] = "Times New Roman"
plt.figure()
plt.subplot(1,3,1)
fig = plt.imshow(np.reshape(Test_input[34,:], (28,28)))
plt.title('Test Image')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.subplot(1,3,2)
fig = plt.imshow(np.reshape(Train_input[0,:], (28,28)))
plt.title('Harmful Image')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.subplot(1,3,3)
fig = plt.imshow(np.reshape(Train_input[68,:], (28,28)))
plt.title('Harmful Image')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.savefig('InfluenceFunctionofDigits.png')
| 12,510
| 35.263768
| 168
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 3/influence/dataset_poisoning.py
|
import IPython
import numpy as np
import os
import time
from shutil import copyfile
from influence.inceptionModel import BinaryInceptionModel
from influence.binaryLogisticRegressionWithLBFGS import BinaryLogisticRegressionWithLBFGS
import influence.experiments
from influence.dataset import DataSet
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import base
def get_projection_to_box_around_orig_point(X_orig, box_radius_in_pixels=0.5):
box_radius_in_float = box_radius_in_pixels * 2.0 / 255.0
if X_orig is None:
lower_bound = -1
upper_bound = 1
else:
lower_bound = np.maximum(
-np.ones_like(X_orig),
X_orig - box_radius_in_float)
upper_bound = np.minimum(
np.ones_like(X_orig),
X_orig + box_radius_in_float)
# Automatically enforces -1, 1 as well
def project_fn(X):
return np.clip(X, lower_bound, upper_bound)
return project_fn
def select_examples_to_attack(model, num_to_poison, grad_influence_wrt_input_val, step_size):
# diffs = model.data_sets.train.x - np.clip(model.data_sets.train.x - step_size * np.sign(grad_influence_wrt_input_val) * 2.0 / 255.0, -1, 1)
# pred_diff = np.sum(diffs * grad_influence_wrt_input_val, axis = 1)
# This ignores the clipping, but it's faster
pred_diff = np.sum(np.abs(grad_influence_wrt_input_val), axis = 1)
indices_to_poison = np.argsort(pred_diff)[-1:-num_to_poison-1:-1] # First index is the most effective
return indices_to_poison
def poison_with_influence_proj_gradient_step(model, indices_to_poison, grad_influence_wrt_input_val_subset, step_size, project_fn):
"""
Returns poisoned_X_train, a subset of model.data_sets.train (marked by indices_to_poison)
that has been modified by a single gradient step.
"""
poisoned_X_train_subset = project_fn(
model.data_sets.train.x[indices_to_poison, :] - step_size * np.sign(grad_influence_wrt_input_val_subset) * 2.0 / 255.0)
print('-- max: %s, mean: %s, min: %s' % (
np.max(grad_influence_wrt_input_val_subset),
np.mean(grad_influence_wrt_input_val_subset),
np.min(grad_influence_wrt_input_val_subset)))
return poisoned_X_train_subset
def generate_inception_features(model, poisoned_X_train_subset, labels_subset, batch_size=None):
poisoned_train = DataSet(poisoned_X_train_subset, labels_subset)
poisoned_data_sets = base.Datasets(train=poisoned_train, validation=None, test=None)
if batch_size == None:
batch_size = len(labels_subset)
num_examples = poisoned_data_sets.train.num_examples
assert num_examples % batch_size == 0
num_iter = int(num_examples / batch_size)
poisoned_data_sets.train.reset_batch()
inception_features_val = []
for i in xrange(num_iter):
feed_dict = model.fill_feed_dict_with_batch(poisoned_data_sets.train, batch_size=batch_size)
inception_features_val_temp = model.sess.run(model.inception_features, feed_dict=feed_dict)
inception_features_val.append(inception_features_val_temp)
return np.concatenate(inception_features_val)
def iterative_attack(top_model, full_model, top_graph, full_graph, project_fn, test_indices, test_description=None,
indices_to_poison=None,
num_iter=10,
step_size=1,
save_iter=1,
loss_type='normal_loss',
early_stop=None):
# If early_stop is set and it stops early, returns True
# Otherwise, returns False
if test_description is None:
test_description = test_indices
if early_stop is not None:
assert len(test_indices) == 1, 'Early stopping only supported for attacks on a single test index.'
if len(indices_to_poison) == 1:
train_idx_str = indices_to_poison
else:
train_idx_str = len(indices_to_poison)
top_model_name = top_model.model_name
full_model_name = full_model.model_name
print('Test idx: %s' % test_indices)
print('Indices to poison: %s' % indices_to_poison)
# Remove everything but the poisoned train indices from the full model, to save time
full_model.update_train_x_y(
full_model.data_sets.train.x[indices_to_poison, :],
full_model.data_sets.train.labels[indices_to_poison])
eff_indices_to_poison = np.arange(len(indices_to_poison))
labels_subset = full_model.data_sets.train.labels[eff_indices_to_poison]
for attack_iter in range(num_iter):
print('*** Iter: %s' % attack_iter)
print('Calculating grad...')
# Use top model to quickly generate inverse HVP
with top_graph.as_default():
top_model.get_influence_on_test_loss(
test_indices,
[0],
force_refresh=True,
test_description=test_description,
loss_type=loss_type)
copyfile(
'output/%s-cg-%s-test-%s.npz' % (top_model_name, loss_type, test_description),
'output/%s-cg-%s-test-%s.npz' % (full_model_name, loss_type, test_description))
# Use full model to get gradient wrt pixels
with full_graph.as_default():
grad_influence_wrt_input_val_subset = full_model.get_grad_of_influence_wrt_input(
eff_indices_to_poison,
test_indices,
force_refresh=False,
test_description=test_description,
loss_type=loss_type)
poisoned_X_train_subset = poison_with_influence_proj_gradient_step(
full_model,
eff_indices_to_poison,
grad_influence_wrt_input_val_subset,
step_size,
project_fn)
# Update training dataset
with full_graph.as_default():
full_model.update_train_x(poisoned_X_train_subset)
inception_X_train = top_model.data_sets.train.x
inception_X_train_subset = generate_inception_features(full_model, poisoned_X_train_subset, labels_subset)
inception_X_train[indices_to_poison] = inception_X_train_subset
with top_graph.as_default():
top_model.update_train_x(inception_X_train)
# Retrain model
print('Training...')
with top_graph.as_default():
top_model.train()
weights = top_model.sess.run(top_model.weights)
weight_path = 'output/inception_weights_%s_attack_%s_testidx-%s.npy' % (top_model_name, loss_type, test_description)
np.save(weight_path, weights)
with full_graph.as_default():
full_model.load_weights_from_disk(weight_path, do_save=False, do_check=False)
# Print out attack effectiveness if it's not too expensive
test_pred = None
if len(test_indices) < 100:
with full_graph.as_default():
test_pred = full_model.sess.run(full_model.preds, feed_dict=full_model.fill_feed_dict_with_some_ex(
full_model.data_sets.test,
test_indices))
print('Test pred (full): %s' % test_pred)
with top_graph.as_default():
test_pred = top_model.sess.run(top_model.preds, feed_dict=top_model.fill_feed_dict_with_some_ex(
top_model.data_sets.test,
test_indices))
print('Test pred (top): %s' % test_pred)
if ((early_stop is not None) and (len(test_indices) == 1)):
if test_pred[0, int(full_model.data_sets.test.labels[test_indices])] < early_stop:
print('Successfully attacked. Saving and breaking...')
np.savez('output/%s_attack_%s_testidx-%s_trainidx-%s_stepsize-%s_proj_final' % (full_model.model_name, loss_type, test_description, train_idx_str, step_size),
poisoned_X_train_image=poisoned_X_train_subset,
poisoned_X_train_inception_features=inception_X_train_subset,
Y_train=labels_subset,
indices_to_poison=indices_to_poison,
attack_iter=attack_iter + 1,
test_pred=test_pred,
step_size=step_size)
return True
if (attack_iter+1) % save_iter == 0:
np.savez('output/%s_attack_%s_testidx-%s_trainidx-%s_stepsize-%s_proj_iter-%s' % (full_model.model_name, loss_type, test_description, train_idx_str, step_size, attack_iter+1),
poisoned_X_train_image=poisoned_X_train_subset,
poisoned_X_train_inception_features=inception_X_train_subset,
Y_train=labels_subset,
indices_to_poison=indices_to_poison,
attack_iter=attack_iter + 1,
test_pred=test_pred,
step_size=step_size)
return False
| 8,970
| 40.532407
| 188
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 3/influence/logisticRegressionWithLBFGS.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
import sys
import numpy as np
import pandas as pd
from sklearn import linear_model, preprocessing, cluster
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
import os.path
import time
import tensorflow as tf
import math
from tensorflow.python.ops import array_ops
from influence.hessians import hessians
from influence.genericNeuralNet import GenericNeuralNet, variable, variable_with_weight_decay
class LogisticRegressionWithLBFGS(GenericNeuralNet):
def __init__(self, input_dim, weight_decay, max_lbfgs_iter, **kwargs):
self.weight_decay = weight_decay
self.input_dim = input_dim
self.max_lbfgs_iter = max_lbfgs_iter
super(LogisticRegressionWithLBFGS, self).__init__(**kwargs)
self.set_params_op = self.set_params()
# self.hessians_op = hessians(self.total_loss, self.params)
# Multinomial has weird behavior when it's binary
C = 1.0 / (self.num_train_examples * self.weight_decay)
self.sklearn_model = linear_model.LogisticRegression(
C=C,
tol=1e-8,
fit_intercept=False,
solver='lbfgs',
multi_class='multinomial',
warm_start=True, #True
max_iter=max_lbfgs_iter)
C_minus_one = 1.0 / ((self.num_train_examples - 1) * self.weight_decay)
self.sklearn_model_minus_one = linear_model.LogisticRegression(
C=C_minus_one,
tol=1e-8,
fit_intercept=False,
solver='lbfgs',
multi_class='multinomial',
warm_start=True, #True
max_iter=max_lbfgs_iter)
def get_all_params(self):
all_params = []
for layer in ['softmax_linear']:
# for var_name in ['weights', 'biases']:
for var_name in ['weights']:
temp_tensor = tf.get_default_graph().get_tensor_by_name("%s/%s:0" % (layer, var_name))
all_params.append(temp_tensor)
return all_params
def placeholder_inputs(self):
input_placeholder = tf.placeholder(
tf.float32,
shape=(None, self.input_dim),
name='input_placeholder')
labels_placeholder = tf.placeholder(
tf.int32,
shape=(None),
name='labels_placeholder')
return input_placeholder, labels_placeholder
def inference(self, input):
with tf.variable_scope('softmax_linear'):
weights = variable_with_weight_decay(
'weights',
[self.input_dim * self.num_classes],
stddev=1.0 / math.sqrt(float(self.input_dim)),
wd=self.weight_decay)
logits = tf.matmul(input, tf.reshape(weights, [self.input_dim, self.num_classes]))
# biases = variable(
# 'biases',
# [self.num_classes],
# tf.constant_initializer(0.0))
# logits = tf.matmul(input, tf.reshape(weights, [self.input_dim, self.num_classes])) + biases
self.weights = weights
# self.biases = biases
return logits
def predictions(self, logits):
preds = tf.nn.softmax(logits, name='preds')
return preds
def set_params(self):
# See if we can automatically infer weight shape
self.W_placeholder = tf.placeholder(
tf.float32,
shape=[self.input_dim * self.num_classes],
name='W_placeholder')
# self.b_placeholder = tf.placeholder(
# tf.float32,
# shape=[self.num_classes],
# name='b_placeholder')
set_weights = tf.assign(self.weights, self.W_placeholder, validate_shape=True)
return [set_weights]
# set_biases = tf.assign(self.biases, self.b_placeholder, validate_shape=True)
# return [set_weights, set_biases]
def retrain(self, num_steps, feed_dict):
self.train_with_LBFGS(
feed_dict=feed_dict,
save_checkpoints=False,
verbose=False)
# super(LogisticRegressionWithLBFGS, self).train(
# num_steps,
# iter_to_switch_to_batch=0,
# iter_to_switch_to_sgd=1000000,
# save_checkpoints=False, verbose=False)
def train(self, num_steps=None,
iter_to_switch_to_batch=None,
iter_to_switch_to_sgd=None,
save_checkpoints=True, verbose=True):
self.train_with_LBFGS(
feed_dict=self.all_train_feed_dict,
save_checkpoints=save_checkpoints,
verbose=verbose)
# super(LogisticRegressionWithLBFGS, self).train(
# num_steps=500,
# iter_to_switch_to_batch=0,
# iter_to_switch_to_sgd=100000,
# save_checkpoints=True, verbose=True)
def train_with_SGD(self, **kwargs):
super(LogisticRegressionWithLBFGS, self).train(**kwargs)
def train_with_LBFGS(self, feed_dict, save_checkpoints=True, verbose=True):
# More sanity checks to see if predictions are the same?
X_train = feed_dict[self.input_placeholder]
Y_train = feed_dict[self.labels_placeholder]
num_train_examples = len(Y_train)
assert len(Y_train.shape) == 1
assert X_train.shape[0] == Y_train.shape[0]
if num_train_examples == self.num_train_examples:
if verbose: print('Using normal model')
model = self.sklearn_model
elif num_train_examples == self.num_train_examples - 1:
if verbose: print('Using model minus one')
model = self.sklearn_model_minus_one
else:
raise ValueError, "feed_dict has incorrect number of training examples"
# print(X_train)
# print(Y_train)
model.fit(X_train, Y_train)
# sklearn returns coefficients in shape num_classes x num_features
# whereas our weights are defined as num_features x num_classes
# so we have to tranpose them first.
W = np.reshape(model.coef_.T, -1)
# b = model.intercept_
params_feed_dict = {}
params_feed_dict[self.W_placeholder] = W
# params_feed_dict[self.b_placeholder] = b
self.sess.run(self.set_params_op, feed_dict=params_feed_dict)
if save_checkpoints: self.saver.save(self.sess, self.checkpoint_file, global_step=0)
if verbose:
print('LBFGS training took %s iter.' % model.n_iter_)
print('After training with LBFGS: ')
self.print_model_eval()
| 6,886
| 33.094059
| 114
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 3/influence/image_utils.py
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, model_from_json
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras import backend as K
sns.set(color_codes=True)
def plot_flat_bwimage(X, y=None, pos_class=1, neg_class=-1, side=28):
X = np.reshape(X, (side, side))
if y is not None:
if y == 1:
label = pos_class
else:
label = neg_class
with sns.axes_style("white"):
if y is not None:
plt.title('Label is %s' % label)
plt.imshow(X, cmap='gray', interpolation='none')
def plot_flat_bwgrad(X, side=28):
X = np.reshape(X, (side, side))
max_abs = np.max(np.abs(X))
with sns.axes_style("white"):
f, ax = plt.subplots()
colormap = ax.imshow(X, cmap='coolwarm', vmax=max_abs, vmin=-max_abs, interpolation='none')
f.colorbar(colormap)
def plot_flat_colorimage(X, y, pos_class=1, neg_class=-1, side=32):
X = np.reshape(X, (side, side, 3))
if y == 1:
label = pos_class
else:
label = neg_class
with sns.axes_style("white"):
f, ax = plt.subplots(figsize=(6,6))
ax.set_title('Label is %s' % label)
ax.imshow(X, interpolation='none')
# ax.imshow(X)
plt.show()
def plot_flat_colorgrad(X, side=32):
X = np.reshape(X, (side, side, 3))
with sns.axes_style("white"):
f, ax = plt.subplots()
colormap = ax.imshow(X, interpolation='none')
f.colorbar(colormap)
| 1,739
| 30.636364
| 99
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 3/influence/all_CNN_c.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
import sys
import numpy as np
import pandas as pd
from sklearn import linear_model, preprocessing, cluster
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
import os.path
import time
import IPython
import tensorflow as tf
import math
from influence.genericNeuralNet import GenericNeuralNet, variable, variable_with_weight_decay
from influence.dataset import DataSet
def conv2d(x, W, r):
return tf.nn.conv2d(x, W, strides=[1, r, r, 1], padding='VALID')
def softplus(x):
return tf.log(tf.exp(x) + 1)
class All_CNN_C(GenericNeuralNet):
def __init__(self, input_side, input_channels, conv_patch_size, hidden1_units, hidden2_units, hidden3_units, weight_decay, **kwargs):
self.weight_decay = weight_decay
self.input_side = input_side
self.input_channels = input_channels
self.input_dim = self.input_side * self.input_side * self.input_channels
self.conv_patch_size = conv_patch_size
self.hidden1_units = hidden1_units
self.hidden2_units = hidden2_units
self.hidden3_units = hidden3_units
super(All_CNN_C, self).__init__(**kwargs)
def conv2d_softplus(self, input_x, conv_patch_size, input_channels, output_channels, stride):
weights = variable_with_weight_decay(
'weights',
[conv_patch_size * conv_patch_size * input_channels * output_channels],
stddev=2.0 / math.sqrt(float(conv_patch_size * conv_patch_size * input_channels)),
wd=self.weight_decay)
biases = variable(
'biases',
[output_channels],
tf.constant_initializer(0.0))
weights_reshaped = tf.reshape(weights, [conv_patch_size, conv_patch_size, input_channels, output_channels])
hidden = tf.nn.tanh(conv2d(input_x, weights_reshaped, stride) + biases)
return hidden
def get_all_params(self):
all_params = []
for layer in ['h1_a', 'h1_c', 'h2_a', 'h2_c', 'h3_a', 'h3_c', 'softmax_linear']:
for var_name in ['weights', 'biases']:
temp_tensor = tf.get_default_graph().get_tensor_by_name("%s/%s:0" % (layer, var_name))
all_params.append(temp_tensor)
return all_params
def retrain(self, num_steps, feed_dict):
retrain_dataset = DataSet(feed_dict[self.input_placeholder], feed_dict[self.labels_placeholder])
for step in xrange(num_steps):
iter_feed_dict = self.fill_feed_dict_with_batch(retrain_dataset)
self.sess.run(self.train_op, feed_dict=iter_feed_dict)
def placeholder_inputs(self):
input_placeholder = tf.placeholder(
tf.float32,
shape=(None, self.input_dim),
name='input_placeholder')
labels_placeholder = tf.placeholder(
tf.int32,
shape=(None),
name='labels_placeholder')
return input_placeholder, labels_placeholder
def inference(self, input_x):
input_reshaped = tf.reshape(input_x, [-1, self.input_side, self.input_side, self.input_channels])
# Hidden 1
with tf.variable_scope('h1_a'):
h1_a = self.conv2d_softplus(input_reshaped, self.conv_patch_size, self.input_channels, self.hidden1_units, stride=1)
with tf.variable_scope('h1_c'):
h1_c = self.conv2d_softplus(h1_a, self.conv_patch_size, self.hidden1_units, self.hidden1_units, stride=2)
# Hidden 2
with tf.variable_scope('h2_a'):
h2_a = self.conv2d_softplus(h1_c, self.conv_patch_size, self.hidden1_units, self.hidden2_units, stride=1)
with tf.variable_scope('h2_c'):
h2_c = self.conv2d_softplus(h2_a, self.conv_patch_size, self.hidden2_units, self.hidden2_units, stride=2)
# Shared layers / hidden 3
with tf.variable_scope('h3_a'):
h3_a = self.conv2d_softplus(h2_c, self.conv_patch_size, self.hidden2_units, self.hidden3_units, stride=1)
last_layer_units = 10
with tf.variable_scope('h3_c'):
h3_c = self.conv2d_softplus(h3_a, 1, self.hidden3_units, last_layer_units, stride=1)
h3_d = tf.reduce_mean(h3_c, axis=[1, 2])
with tf.variable_scope('softmax_linear'):
weights = variable_with_weight_decay(
'weights',
[last_layer_units * self.num_classes],
stddev=1.0 / math.sqrt(float(last_layer_units)),
wd=self.weight_decay)
biases = variable(
'biases',
[self.num_classes],
tf.constant_initializer(0.0))
logits = tf.matmul(h3_d, tf.reshape(weights, [last_layer_units, self.num_classes])) + biases
return logits
def predictions(self, logits):
preds = tf.nn.softmax(logits, name='preds')
return preds
| 5,279
| 36.183099
| 137
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 3/influence/experiments.py
|
import numpy as np
import os
import time
import IPython
from scipy.stats import pearsonr
def get_try_check(model, X_train, Y_train, Y_train_flipped, X_test, Y_test):
def try_check(idx_to_check, label):
Y_train_fixed = np.copy(Y_train_flipped)
Y_train_fixed[idx_to_check] = Y_train[idx_to_check]
model.update_train_x_y(X_train, Y_train_fixed)
model.train()
check_num = np.sum(Y_train_fixed != Y_train_flipped)
check_loss, check_acc = model.sess.run(
[model.loss_no_reg, model.accuracy_op],
feed_dict=model.all_test_feed_dict)
print('%20s: fixed %3s labels. Loss %.5f. Accuracy %.3f.' % (
label, check_num, check_loss, check_acc))
return check_num, check_loss, check_acc
return try_check
def test_mislabeled_detection_batch(
model,
X_train, Y_train,
Y_train_flipped,
X_test, Y_test,
train_losses, train_loo_influences,
num_flips, num_checks):
assert num_checks > 0
num_train_examples = Y_train.shape[0]
try_check = get_try_check(model, X_train, Y_train, Y_train_flipped, X_test, Y_test)
# Pick by LOO influence
idx_to_check = np.argsort(train_loo_influences)[-num_checks:]
fixed_influence_loo_results = try_check(idx_to_check, 'Influence (LOO)')
# Pick by top loss to fix
idx_to_check = np.argsort(np.abs(train_losses))[-num_checks:]
fixed_loss_results = try_check(idx_to_check, 'Loss')
# Randomly pick stuff to fix
idx_to_check = np.random.choice(num_train_examples, size=num_checks, replace=False)
fixed_random_results = try_check(idx_to_check, 'Random')
return fixed_influence_loo_results, fixed_loss_results, fixed_random_results
def viz_top_influential_examples(model, test_idx):
model.reset_datasets()
print('Test point %s has label %s.' % (test_idx, model.data_sets.test.labels[test_idx]))
num_to_remove = 10000
indices_to_remove = np.arange(num_to_remove)
predicted_loss_diffs = model.get_influence_on_test_loss(
test_idx,
indices_to_remove,
force_refresh=True)
# If the predicted difference in loss is high (very positive) after removal,
# that means that the point helped it to be correct.
top_k = 10
helpful_points = np.argsort(predicted_loss_diffs)[-top_k:][::-1]
unhelpful_points = np.argsort(predicted_loss_diffs)[:top_k]
for points, message in [
(helpful_points, 'better'), (unhelpful_points, 'worse')]:
print("Top %s training points making the loss on the test point %s:" % (top_k, message))
for counter, idx in enumerate(points):
print("#%s, class=%s, predicted_loss_diff=%.8f" % (
idx,
model.data_sets.train.labels[idx],
predicted_loss_diffs[idx]))
def test_retraining(model, test_idx, iter_to_load, force_refresh=False,
num_to_remove=50, num_steps=1000, random_seed=17,
remove_type='random'):
np.random.seed(random_seed)
model.load_checkpoint(iter_to_load)
sess = model.sess
y_test = model.data_sets.test.labels[test_idx]
print('Test label: %s' % y_test)
## Or, randomly remove training examples
if remove_type == 'random':
indices_to_remove = np.random.choice(model.num_train_examples, size=num_to_remove, replace=False)
predicted_loss_diffs = model.get_influence_on_test_loss(
[test_idx],
indices_to_remove,
force_refresh=force_refresh)
## Or, remove the most influential training examples
elif remove_type == 'maxinf':
predicted_loss_diffs = model.get_influence_on_test_loss(
[test_idx],
np.arange(len(model.data_sets.train.labels)),
force_refresh=force_refresh)
indices_to_remove = np.argsort(np.abs(predicted_loss_diffs))[-num_to_remove:]
predicted_loss_diffs = predicted_loss_diffs[indices_to_remove]
else:
raise ValueError, 'remove_type not well specified'
actual_loss_diffs = np.zeros([num_to_remove])
# Sanity check
test_feed_dict = model.fill_feed_dict_with_one_ex(
model.data_sets.test,
test_idx)
test_loss_val, params_val = sess.run([model.loss_no_reg, model.params], feed_dict=test_feed_dict)
train_loss_val = sess.run(model.total_loss, feed_dict=model.all_train_feed_dict)
# train_loss_val = model.minibatch_mean_eval([model.total_loss], model.data_sets.train)[0]
model.retrain(num_steps=num_steps, feed_dict=model.all_train_feed_dict)
retrained_test_loss_val = sess.run(model.loss_no_reg, feed_dict=test_feed_dict)
retrained_train_loss_val = sess.run(model.total_loss, feed_dict=model.all_train_feed_dict)
# retrained_train_loss_val = model.minibatch_mean_eval([model.total_loss], model.data_sets.train)[0]
model.load_checkpoint(iter_to_load, do_checks=False)
print('Sanity check: what happens if you train the model a bit more?')
print('Loss on test idx with original model : %s' % test_loss_val)
print('Loss on test idx with retrained model : %s' % retrained_test_loss_val)
print('Difference in test loss after retraining : %s' % (retrained_test_loss_val - test_loss_val))
print('===')
print('Total loss on training set with original model : %s' % train_loss_val)
print('Total loss on training with retrained model : %s' % retrained_train_loss_val)
print('Difference in train loss after retraining : %s' % (retrained_train_loss_val - train_loss_val))
print('These differences should be close to 0.\n')
# Retraining experiment
for counter, idx_to_remove in enumerate(indices_to_remove):
print("=== #%s ===" % counter)
print('Retraining without train_idx %s (label %s):' % (idx_to_remove, model.data_sets.train.labels[idx_to_remove]))
train_feed_dict = model.fill_feed_dict_with_all_but_one_ex(model.data_sets.train, idx_to_remove)
model.retrain(num_steps=num_steps, feed_dict=train_feed_dict)
retrained_test_loss_val, retrained_params_val = sess.run([model.loss_no_reg, model.params], feed_dict=test_feed_dict)
actual_loss_diffs[counter] = retrained_test_loss_val - test_loss_val
print('Diff in params: %s' % np.linalg.norm(np.concatenate(params_val) - np.concatenate(retrained_params_val)))
print('Loss on test idx with original model : %s' % test_loss_val)
print('Loss on test idx with retrained model : %s' % retrained_test_loss_val)
print('Difference in loss after retraining : %s' % actual_loss_diffs[counter])
print('Predicted difference in loss (influence): %s' % predicted_loss_diffs[counter])
# Restore params
model.load_checkpoint(iter_to_load, do_checks=False)
np.savez(
'output/%s_loss_diffs' % model.model_name,
actual_loss_diffs=actual_loss_diffs,
predicted_loss_diffs=predicted_loss_diffs)
print('Correlation is %s' % pearsonr(actual_loss_diffs, predicted_loss_diffs)[0])
return actual_loss_diffs, predicted_loss_diffs, indices_to_remove
| 7,208
| 40.67052
| 125
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 3/influence/smooth_hinge.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
import sys
import numpy as np
import pandas as pd
from sklearn import linear_model, preprocessing, cluster, svm
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
from scipy.optimize import fmin_l_bfgs_b, fmin_cg, fmin_ncg
import os.path
import time
import IPython
import tensorflow as tf
import math
from influence.genericNeuralNet import GenericNeuralNet, variable, variable_with_weight_decay
def log_loss(x, t):
exponents = -(x-1)/t
# exponents = -(x)/t
max_elems = tf.maximum(exponents, tf.zeros_like(exponents))
return t * (max_elems + tf.log(
tf.exp(exponents - max_elems) +
tf.exp(tf.zeros_like(exponents) - max_elems)))
# return t * tf.log(tf.exp(-(x)/t) + 1)
def hinge(x):
return tf.maximum(1-x, 0)
def smooth_hinge_loss(x, t):
# return tf.cond(
# tf.equal(t, 0),
# lambda: hinge(x),
# lambda: log_loss(x,t)
# )
if t == 0:
return hinge(x)
else:
return log_loss(x,t)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_grad(x):
a = sigmoid(x)
return a * (1 - a)
class SmoothHinge(GenericNeuralNet):
# Expects labels to be +1 or -1
def __init__(self, input_dim, temp, weight_decay, use_bias, **kwargs):
self.weight_decay = weight_decay
self.input_dim = input_dim
self.temp = temp
self.use_bias = use_bias
super(SmoothHinge, self).__init__(**kwargs)
C = 1.0 / (self.num_train_examples * self.weight_decay)
self.svm_model = svm.LinearSVC(
C=C,
loss='hinge',
tol=1e-6,
fit_intercept=self.use_bias,
random_state=24,
max_iter=5000)
C_minus_one = 1.0 / ((self.num_train_examples - 1) * self.weight_decay)
self.svm_model_minus_one = svm.LinearSVC(
C=C_minus_one,
loss='hinge',
tol=1e-6,
fit_intercept=self.use_bias,
random_state=24,
max_iter=5000)
self.set_params_op = self.set_params()
assert self.num_classes == 2
def get_all_params(self):
all_params = []
for layer in ['softmax_linear']:
# for var_name in ['weights', 'biases']:
for var_name in ['weights']:
temp_tensor = tf.get_default_graph().get_tensor_by_name("%s/%s:0" % (layer, var_name))
all_params.append(temp_tensor)
return all_params
def placeholder_inputs(self):
input_placeholder = tf.placeholder(
tf.float32,
shape=(None, self.input_dim),
name='input_placeholder')
labels_placeholder = tf.placeholder(
tf.int32,
shape=(None),
name='labels_placeholder')
return input_placeholder, labels_placeholder
def inference(self, input):
# Softmax_linear
with tf.variable_scope('softmax_linear'):
# We regularize the bias to keep it in line with sklearn's
# liblinear implementation
if self.use_bias:
weights = variable_with_weight_decay(
'weights',
[self.input_dim + 1],
stddev=5.0 / math.sqrt(float(self.input_dim)),
wd=self.weight_decay)
# biases = variable(
# 'biases',
# [1],
# tf.constant_initializer(0.0))
logits = tf.matmul(
tf.concat([input, tf.ones([tf.shape(input)[0], 1])], axis=1),
tf.reshape(weights, [-1, 1]))# + biases
else:
weights = variable_with_weight_decay(
'weights',
[self.input_dim],
stddev=5.0 / math.sqrt(float(self.input_dim)),
wd=self.weight_decay)
logits = tf.matmul(
input,
tf.reshape(weights, [-1, 1]))
self.weights = weights
return logits
def retrain(self, num_steps, feed_dict):
# self.sess.run(
# self.update_learning_rate_op,
# feed_dict={self.learning_rate_placeholder: 1 * self.initial_learning_rate})
# for step in xrange(num_steps):
# self.sess.run(self.train_op, feed_dict=feed_dict)
if self.temp == 0:
self.train_with_svm(feed_dict, save_checkpoints=False, verbose=False)
else:
self.train_with_fmin(feed_dict, save_checkpoints=False, verbose=False)
def get_train_fmin_loss_fn(self, train_feed_dict):
def fmin_loss(W):
params_feed_dict = {}
params_feed_dict[self.W_placeholder] = W
self.sess.run(self.set_params_op, feed_dict=params_feed_dict)
loss_val = self.sess.run(self.total_loss, feed_dict=train_feed_dict)
return loss_val
return fmin_loss
def get_train_fmin_grad_fn(self, train_feed_dict):
def fmin_grad(W):
params_feed_dict = {}
params_feed_dict[self.W_placeholder] = W
self.sess.run(self.set_params_op, feed_dict=params_feed_dict)
grad_val = self.sess.run(self.grad_total_loss_op, feed_dict=train_feed_dict)[0]
return grad_val
return fmin_grad
def get_train_fmin_hvp_fn(self, train_feed_dict):
def fmin_hvp(W, v):
params_feed_dict = {}
params_feed_dict[self.W_placeholder] = W
self.sess.run(self.set_params_op, feed_dict=params_feed_dict)
feed_dict = self.update_feed_dict_with_v_placeholder(train_feed_dict, self.vec_to_list(v))
hessian_vector_val = self.sess.run(self.hessian_vector, feed_dict=feed_dict)[0]
return hessian_vector_val
return fmin_hvp
def train(self):
if self.temp == 0:
self.train_with_svm(self.all_train_feed_dict)
else:
self.train_with_fmin(self.all_train_feed_dict)
def train_with_fmin(self, train_feed_dict, save_checkpoints=True, verbose=True):
fmin_loss_fn = self.get_train_fmin_loss_fn(train_feed_dict)
fmin_grad_fn = self.get_train_fmin_grad_fn(train_feed_dict)
fmin_hvp_fn = self.get_train_fmin_hvp_fn(train_feed_dict)
x0 = np.array(self.sess.run(self.params)[0])
# fmin_results = fmin_l_bfgs_b(
# # fmin_results = fmin_cg(
# fmin_loss_fn,
# x0,
# fmin_grad_fn
# # gtol=1e-8
# )
fmin_results = fmin_ncg(
f=fmin_loss_fn,
x0=x0,
fprime=fmin_grad_fn,
fhess_p=fmin_hvp_fn,
avextol=1e-8,
maxiter=100)
W = np.reshape(fmin_results, -1)
params_feed_dict = {}
params_feed_dict[self.W_placeholder] = W
self.sess.run(self.set_params_op, feed_dict=params_feed_dict)
if save_checkpoints: self.saver.save(self.sess, self.checkpoint_file, global_step=0)
if verbose:
# print('CG training took %s iter.' % model.n_iter_)
print('After training with CG: ')
self.print_model_eval()
def train_with_svm(self, feed_dict, save_checkpoints=True, verbose=True):
X_train = feed_dict[self.input_placeholder]
Y_train = feed_dict[self.labels_placeholder]
num_train_examples = len(Y_train)
assert len(Y_train.shape) == 1
assert X_train.shape[0] == Y_train.shape[0]
if num_train_examples == self.num_train_examples:
print('Using normal model')
model = self.svm_model
elif num_train_examples == self.num_train_examples - 1:
print('Using model minus one')
model = self.svm_model_minus_one
else:
raise ValueError, "feed_dict has incorrect number of training examples"
model.fit(X_train, Y_train)
# sklearn returns coefficients in shape num_classes x num_features
# whereas our weights are defined as num_features x num_classes
# so we have to tranpose them first.
if self.use_bias:
W = np.concatenate((np.reshape(model.coef_.T, -1), model.intercept_), axis=0)
else:
W = np.reshape(model.coef_.T, -1)
params_feed_dict = {}
params_feed_dict[self.W_placeholder] = W
self.sess.run(self.set_params_op, feed_dict=params_feed_dict)
if save_checkpoints: self.saver.save(self.sess, self.checkpoint_file, global_step=0)
if verbose:
print('SVM training took %s iter.' % model.n_iter_)
print('After SVM training: ')
self.print_model_eval()
# print('Starting SGD')
# for step in xrange(100):
# self.sess.run(self.train_op, feed_dict=feed_dict)
# self.print_model_eval()
def set_params(self):
if self.use_bias:
self.W_placeholder = tf.placeholder(
tf.float32,
shape=[self.input_dim + 1],
name='W_placeholder')
else:
self.W_placeholder = tf.placeholder(
tf.float32,
shape=[self.input_dim],
name='W_placeholder')
set_weights = tf.assign(self.weights, self.W_placeholder, validate_shape=True)
return [set_weights]
def predictions(self, logits):
preds = tf.sign(logits, name='preds')
return preds
def loss(self, logits, labels):
self.margin = tf.multiply(
tf.cast(labels, tf.float32),
tf.reshape(logits, [-1]))
indiv_loss_no_reg = smooth_hinge_loss(self.margin, self.temp)
loss_no_reg = tf.reduce_mean(indiv_loss_no_reg)
tf.add_to_collection('losses', loss_no_reg)
total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
return total_loss, loss_no_reg, indiv_loss_no_reg
def adversarial_loss(self, logits, labels):
wrong_labels = (labels - 1) * -1 # Flips 0s and 1s
wrong_margins = tf.multiply(
tf.cast(wrong_labels, tf.float32),
tf.reshape(logits, [-1]))
indiv_adversarial_loss = -smooth_hinge_loss(wrong_margins, self.temp)
adversarial_loss = tf.reduce_mean(indiv_adversarial_loss)
return adversarial_loss, indiv_adversarial_loss
def get_accuracy_op(self, logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
preds = tf.sign(tf.reshape(logits, [-1]))
correct = tf.reduce_sum(
tf.cast(
tf.equal(
preds,
tf.cast(labels, tf.float32)),
tf.int32))
return correct / tf.shape(labels)[0]
| 11,722
| 32.686782
| 114
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 3/influence/dataset.py
|
# Adapted from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/learn/python/learn/datasets/mnist.py
import numpy as np
class DataSet(object):
def __init__(self, x, labels):
if len(x.shape) > 2:
x = np.reshape(x, [x.shape[0], -1])
assert(x.shape[0] == labels.shape[0])
x = x.astype(np.float32)
self._x = x
self._x_batch = np.copy(x)
self._labels = labels
self._labels_batch = np.copy(labels)
self._num_examples = x.shape[0]
self._index_in_epoch = 0
@property
def x(self):
return self._x
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
def reset_batch(self):
self._index_in_epoch = 0
self._x_batch = np.copy(self._x)
self._labels_batch = np.copy(self._labels)
def next_batch(self, batch_size):
assert batch_size <= self._num_examples
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._x_batch = self._x_batch[perm, :]
self._labels_batch = self._labels_batch[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
end = self._index_in_epoch
return self._x_batch[start:end], self._labels_batch[start:end]
def filter_dataset(X, Y, pos_class, neg_class):
"""
Filters out elements of X and Y that aren't one of pos_class or neg_class
then transforms labels of Y so that +1 = pos_class, -1 = neg_class.
"""
assert(X.shape[0] == Y.shape[0])
assert(len(Y.shape) == 1)
Y = Y.astype(int)
pos_idx = Y == pos_class
neg_idx = Y == neg_class
Y[pos_idx] = 1
Y[neg_idx] = -1
idx_to_keep = pos_idx | neg_idx
X = X[idx_to_keep, ...]
Y = Y[idx_to_keep]
return (X, Y)
def find_distances(target, X, theta=None):
assert len(X.shape) == 2, "X must be 2D, but it is currently %s" % len(X.shape)
target = np.reshape(target, -1)
assert X.shape[1] == len(target), \
"X (%s) and target (%s) must have same feature dimension" % (X.shape[1], len(target))
if theta is None:
return np.linalg.norm(X - target, axis=1)
else:
theta = np.reshape(theta, -1)
# Project onto theta
return np.abs((X - target).dot(theta))
| 2,619
| 27.172043
| 123
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 3/influence/imagenet_utils.py
|
# Taken from https://github.com/fchollet/keras/blob/master/keras/applications/imagenet_utils.py
import numpy as np
import json
from keras.utils.data_utils import get_file
from keras import backend as K
CLASS_INDEX = None
CLASS_INDEX_PATH = 'https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json'
def preprocess_input(x, dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
assert dim_ordering in {'tf', 'th'}
if dim_ordering == 'th':
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
# 'RGB'->'BGR'
x = x[:, ::-1, :, :]
else:
x[:, :, :, 0] -= 103.939
x[:, :, :, 1] -= 116.779
x[:, :, :, 2] -= 123.68
# 'RGB'->'BGR'
x = x[:, :, :, ::-1]
return x
def decode_predictions(preds, top=5):
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results
| 1,663
| 31.627451
| 105
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 3/influence/nlprocessor.py
|
# from spacy.en import English
# import spacy
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import en_core_web_sm
class NLProcessor(object):
def __init__(self):
# self.nlp = English()
self.nlp = en_core_web_sm.load()
# self.nlp = spacy.load('en_core_web_sm-1.2.0')
self.vectorizer = CountVectorizer(min_df=5)
self.word_vec_len = 300
def process_spam(self, spam, ham):
"""
Takes in a list of spam emails and a list of ham emails
and returns a tuple (docs, Y), where:
- docs is a list of documents, with each document lemmatized
and stripped of stop and OOV words.
- Y is an array of classes {0, 1}. Each element is an example.
+1 means spam, 0 means ham.
"""
docs = []
for raw_doc in spam + ham:
doc = self.nlp(raw_doc)
docs.append(' '.join(
[token.lemma_ for token in doc if (token.is_alpha and not (token.is_oov or token.is_stop))]))
Y = np.zeros(len(spam) + len(ham))
Y[:len(spam)] = 1
Y[len(spam):] = 0
docs_Y = zip(docs, Y)
np.random.shuffle(docs_Y)
docs, Y = zip(*docs_Y)
Y = np.array(Y)
return docs, Y
def process_newsgroups(self, newsgroups):
"""
Takes in a newsgroups object returned by fetch_20newsgroups()
and returns a tuple (docs, Y), where:
- docs is a list of documents, with each document lemmatized
and stripped of stop and OOV words.
- Y is an array of classes {+1, -1}. Each element is an example.
"""
docs = []
for raw_doc in newsgroups.data:
doc = self.nlp(raw_doc)
docs.append(' '.join(
[token.lemma_ for token in doc if (token.is_alpha and not (token.is_oov or token.is_stop))]))
# Convert target to {+1, -1}. It is originally {+1, 0}.
Y = (np.array(newsgroups.target) * 2) - 1
return (docs, Y)
def learn_vocab(self, docs):
"""
Learns a vocabulary from docs.
"""
self.vectorizer.fit(docs)
def get_bag_of_words(self, docs):
"""
Takes in a list of documents and converts it into a bag of words
representation. Returns X, a sparse matrix where each row is an example
and each column is a feature (word in the vocab).
"""
X = self.vectorizer.transform(docs)
return X
def get_mean_word_vector(self, docs):
"""
Takes in a list of documents and returns X, a matrix where each row
is an example and each column is the mean word vector in that document.
"""
n = len(docs)
X = np.empty([n, self.word_vec_len])
doc_vec = np.zeros(self.word_vec_len)
for idx, doc in enumerate(docs):
doc_vec = reduce(lambda x, y: x+y, [token.vector for token in self.nlp(doc)])
doc_vec /= n
X[idx, :] = doc_vec
return X
| 3,105
| 33.131868
| 109
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 3/influence/inception_v3.py
|
# -*- coding: utf-8 -*-
"""Inception V3 model for Keras.
Note that the input image format for this model is different than for
the VGG16 and ResNet models (299x299 instead of 224x224),
and that the input preprocessing function is also different (same as Xception).
# Reference
- [Rethinking the Inception Architecture for Computer Vision](http://arxiv.org/abs/1512.00567)
"""
from __future__ import print_function
from __future__ import absolute_import
import warnings
import numpy as np
from keras.models import Model
from keras import layers
from keras.layers import Activation
from keras.layers import Dense
from keras.layers import Input
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import AveragePooling2D
from keras.layers import GlobalAveragePooling2D
from keras.layers import GlobalMaxPooling2D
from keras.engine.topology import get_source_inputs
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
from keras import backend as K
from keras.applications.imagenet_utils import decode_predictions
from keras.applications.imagenet_utils import _obtain_input_shape
from keras.preprocessing import image
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
def conv2d_bn(x,
filters,
num_row,
num_col,
padding='same',
strides=(1, 1),
name=None):
"""Utility function to apply conv + BN.
Arguments:
x: input tensor.
filters: filters in `Conv2D`.
num_row: height of the convolution kernel.
num_col: width of the convolution kernel.
padding: padding mode in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_conv'`
for the convolution and `name + '_bn'` for the
batch norm layer.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
if K.image_data_format() == 'channels_first':
bn_axis = 1
else:
bn_axis = 3
x = Conv2D(
filters, (num_row, num_col),
strides=strides,
padding=padding,
use_bias=False,
name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
x = Activation('relu', name=name)(x)
return x
def InceptionV3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Inception v3 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
Note that the default input image size for this model is 299x299.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)` (with `channels_last` data format)
or `(3, 299, 299)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 139.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=139,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
img_input = Input(tensor=input_tensor, shape=input_shape)
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
x = conv2d_bn(x, 32, 3, 3, padding='valid')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, padding='valid')
x = conv2d_bn(x, 192, 3, 3, padding='valid')
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0, 1, 2: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed0')
# mixed 1: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed1')
# mixed 2: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed2')
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(
branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 192, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
strides=(2, 2), padding='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(
branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = layers.concatenate(
[branch3x3_1, branch3x3_2], axis=channel_axis, name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = layers.concatenate(
[branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(9 + i))
if include_top:
# Classification block
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='inception_v3')
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
if include_top:
weights_path = get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
md5_hash='9a0d58056eeedaa3f26cb7ebd46da564')
else:
# Replace this with a local copy for reproducibility
# weights_path = get_file(
# 'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
# WEIGHTS_PATH_NO_TOP,
# cache_subdir='models',
# md5_hash='bcbd6486424b2319ff4ef7d526e38f63')
weights_path = 'inception/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
model.load_weights(weights_path)
if K.backend() == 'theano':
convert_all_kernels_in_model(model)
return model
def preprocess_input(x):
x /= 255.
x -= 0.5
x *= 2.
return x
if __name__ == '__main__':
model = InceptionV3(include_top=True, weights='imagenet')
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
print('Predicted:', decode_predictions(preds))
| 15,178
| 35.753027
| 152
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 3/influence/genericNeuralNet.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
import sys
import numpy as np
import pandas as pd
from sklearn import linear_model, preprocessing, cluster
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
from scipy.optimize import fmin_ncg
import os.path
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.ops import array_ops
from keras import backend as K
from tensorflow.contrib.learn.python.learn.datasets import base
from influence.hessians import hessian_vector_product
from influence.dataset import DataSet
def variable(name, shape, initializer):
dtype = tf.float32
var = tf.get_variable(
name,
shape,
initializer=initializer,
dtype=dtype)
return var
def variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float32
var = variable(
name,
shape,
initializer=tf.truncated_normal_initializer(
stddev=stddev,
dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def normalize_vector(v):
"""
Takes in a vector in list form, concatenates it to form a single vector,
normalizes it to unit length, then returns it in list form together with its norm.
"""
norm_val = np.linalg.norm(np.concatenate(v))
norm_v = [a/norm_val for a in v]
return norm_v, norm_val
class GenericNeuralNet(object):
"""
Multi-class classification.
"""
def __init__(self, **kwargs):
np.random.seed(0)
tf.set_random_seed(0)
self.batch_size = kwargs.pop('batch_size')
self.data_sets = kwargs.pop('data_sets')
self.train_dir = kwargs.pop('train_dir', 'output')
log_dir = kwargs.pop('log_dir', 'log')
self.model_name = kwargs.pop('model_name')
self.num_classes = kwargs.pop('num_classes')
self.initial_learning_rate = kwargs.pop('initial_learning_rate')
self.decay_epochs = kwargs.pop('decay_epochs')
if 'keep_probs' in kwargs: self.keep_probs = kwargs.pop('keep_probs')
else: self.keep_probs = None
if 'mini_batch' in kwargs: self.mini_batch = kwargs.pop('mini_batch')
else: self.mini_batch = True
if 'damping' in kwargs: self.damping = kwargs.pop('damping')
else: self.damping = 0.0
if not os.path.exists(self.train_dir):
os.makedirs(self.train_dir)
# Initialize session
config = tf.ConfigProto()
self.sess = tf.Session(config=config)
K.set_session(self.sess)
# Setup input
self.input_placeholder, self.labels_placeholder = self.placeholder_inputs()
self.num_train_examples = self.data_sets.train.labels.shape[0]
self.num_test_examples = self.data_sets.test.labels.shape[0]
# Setup inference and training
if self.keep_probs is not None:
self.keep_probs_placeholder = tf.placeholder(tf.float32, shape=(2))
self.logits = self.inference(self.input_placeholder, self.keep_probs_placeholder)
elif hasattr(self, 'inference_needs_labels'):
self.logits = self.inference(self.input_placeholder, self.labels_placeholder)
else:
self.logits = self.inference(self.input_placeholder)
self.total_loss, self.loss_no_reg, self.indiv_loss_no_reg = self.loss(
self.logits,
self.labels_placeholder)
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.learning_rate = tf.Variable(self.initial_learning_rate, name='learning_rate', trainable=False)
self.learning_rate_placeholder = tf.placeholder(tf.float32)
self.update_learning_rate_op = tf.assign(self.learning_rate, self.learning_rate_placeholder)
self.train_op = self.get_train_op(self.total_loss, self.global_step, self.learning_rate)
self.train_sgd_op = self.get_train_sgd_op(self.total_loss, self.global_step, self.learning_rate)
self.accuracy_op = self.get_accuracy_op(self.logits, self.labels_placeholder)
self.preds = self.predictions(self.logits)
# Setup misc
self.saver = tf.train.Saver()
# Setup gradients and Hessians
self.params = self.get_all_params()
self.grad_total_loss_op = tf.gradients(self.total_loss, self.params)
self.grad_loss_no_reg_op = tf.gradients(self.loss_no_reg, self.params)
self.v_placeholder = [tf.placeholder(tf.float32, shape=a.get_shape()) for a in self.params]
self.u_placeholder = [tf.placeholder(tf.float32, shape=a.get_shape()) for a in self.params]
self.hessian_vector = hessian_vector_product(self.total_loss, self.params, self.v_placeholder)
self.grad_loss_wrt_input_op = tf.gradients(self.total_loss, self.input_placeholder)
# Because tf.gradients auto accumulates, we probably don't need the add_n (or even reduce_sum)
self.influence_op = tf.add_n(
[tf.reduce_sum(tf.multiply(a, array_ops.stop_gradient(b))) for a, b in zip(self.grad_total_loss_op, self.v_placeholder)])
self.grad_influence_wrt_input_op = tf.gradients(self.influence_op, self.input_placeholder)
self.checkpoint_file = os.path.join(self.train_dir, "%s-checkpoint" % self.model_name)
self.all_train_feed_dict = self.fill_feed_dict_with_all_ex(self.data_sets.train)
self.all_test_feed_dict = self.fill_feed_dict_with_all_ex(self.data_sets.test)
init = tf.global_variables_initializer()
self.sess.run(init)
self.vec_to_list = self.get_vec_to_list_fn()
self.adversarial_loss, self.indiv_adversarial_loss = self.adversarial_loss(self.logits, self.labels_placeholder)
if self.adversarial_loss is not None:
self.grad_adversarial_loss_op = tf.gradients(self.adversarial_loss, self.params)
def get_vec_to_list_fn(self):
params_val = self.sess.run(self.params)
self.num_params = len(np.concatenate(params_val))
print('Total number of parameters: %s' % self.num_params)
def vec_to_list(v):
return_list = []
cur_pos = 0
for p in params_val:
return_list.append(v[cur_pos : cur_pos+len(p)])
cur_pos += len(p)
assert cur_pos == len(v)
return return_list
return vec_to_list
def reset_datasets(self):
for data_set in self.data_sets:
if data_set is not None:
data_set.reset_batch()
def fill_feed_dict_with_all_ex(self, data_set):
feed_dict = {
self.input_placeholder: data_set.x,
self.labels_placeholder: data_set.labels
}
return feed_dict
def fill_feed_dict_with_all_but_one_ex(self, data_set, idx_to_remove):
num_examples = data_set.x.shape[0]
idx = np.array([True] * num_examples, dtype=bool)
idx[idx_to_remove] = False
feed_dict = {
self.input_placeholder: data_set.x[idx, :],
self.labels_placeholder: data_set.labels[idx]
}
return feed_dict
def fill_feed_dict_with_batch(self, data_set, batch_size=0):
if batch_size is None:
return self.fill_feed_dict_with_all_ex(data_set)
elif batch_size == 0:
batch_size = self.batch_size
input_feed, labels_feed = data_set.next_batch(batch_size)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
}
return feed_dict
def fill_feed_dict_with_some_ex(self, data_set, target_indices):
input_feed = data_set.x[target_indices, :].reshape(len(target_indices), -1)
labels_feed = data_set.labels[target_indices].reshape(-1)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
}
return feed_dict
def fill_feed_dict_with_one_ex(self, data_set, target_idx):
input_feed = data_set.x[target_idx, :].reshape(1, -1)
labels_feed = data_set.labels[target_idx].reshape(-1)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
}
return feed_dict
def fill_feed_dict_manual(self, X, Y):
X = np.array(X)
Y = np.array(Y)
input_feed = X.reshape(len(Y), -1)
labels_feed = Y.reshape(-1)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
}
return feed_dict
def minibatch_mean_eval(self, ops, data_set):
num_examples = data_set.num_examples
assert num_examples % self.batch_size == 0
num_iter = int(num_examples / self.batch_size)
self.reset_datasets()
ret = []
for i in xrange(num_iter):
feed_dict = self.fill_feed_dict_with_batch(data_set)
ret_temp = self.sess.run(ops, feed_dict=feed_dict)
if len(ret)==0:
for b in ret_temp:
if isinstance(b, list):
ret.append([c / float(num_iter) for c in b])
else:
ret.append([b / float(num_iter)])
else:
for counter, b in enumerate(ret_temp):
if isinstance(b, list):
ret[counter] = [a + (c / float(num_iter)) for (a, c) in zip(ret[counter], b)]
else:
ret[counter] += (b / float(num_iter))
return ret
def print_model_eval(self):
params_val = self.sess.run(self.params)
if self.mini_batch == True:
grad_loss_val, loss_no_reg_val, loss_val, train_acc_val = self.minibatch_mean_eval(
[self.grad_total_loss_op, self.loss_no_reg, self.total_loss, self.accuracy_op],
self.data_sets.train)
test_loss_val, test_acc_val = self.minibatch_mean_eval(
[self.loss_no_reg, self.accuracy_op],
self.data_sets.test)
else:
grad_loss_val, loss_no_reg_val, loss_val, train_acc_val = self.sess.run(
[self.grad_total_loss_op, self.loss_no_reg, self.total_loss, self.accuracy_op],
feed_dict=self.all_train_feed_dict)
test_loss_val, test_acc_val = self.sess.run(
[self.loss_no_reg, self.accuracy_op],
feed_dict=self.all_test_feed_dict)
print('Train loss (w reg) on all data: %s' % loss_val)
print('Train loss (w/o reg) on all data: %s' % loss_no_reg_val)
print('Test loss (w/o reg) on all data: %s' % test_loss_val)
print('Train acc on all data: %s' % train_acc_val)
print('Test acc on all data: %s' % test_acc_val)
print('Norm of the mean of gradients: %s' % np.linalg.norm(np.concatenate(grad_loss_val)))
print('Norm of the params: %s' % np.linalg.norm(np.concatenate(params_val)))
def retrain(self, num_steps, feed_dict):
for step in xrange(num_steps):
self.sess.run(self.train_op, feed_dict=feed_dict)
def update_learning_rate(self, step):
assert self.num_train_examples % self.batch_size == 0
num_steps_in_epoch = self.num_train_examples / self.batch_size
epoch = step // num_steps_in_epoch
multiplier = 1
if epoch < self.decay_epochs[0]:
multiplier = 1
elif epoch < self.decay_epochs[1]:
multiplier = 0.1
else:
multiplier = 0.01
self.sess.run(
self.update_learning_rate_op,
feed_dict={self.learning_rate_placeholder: multiplier * self.initial_learning_rate})
def train(self, num_steps,
iter_to_switch_to_batch=20000,
iter_to_switch_to_sgd=40000,
save_checkpoints=True, verbose=True):
"""
Trains a model for a specified number of steps.
"""
if verbose: print('Training for %s steps' % num_steps)
sess = self.sess
for step in xrange(num_steps):
self.update_learning_rate(step)
start_time = time.time()
if step < iter_to_switch_to_batch:
feed_dict = self.fill_feed_dict_with_batch(self.data_sets.train)
_, loss_val = sess.run([self.train_op, self.total_loss], feed_dict=feed_dict)
elif step < iter_to_switch_to_sgd:
feed_dict = self.all_train_feed_dict
_, loss_val = sess.run([self.train_op, self.total_loss], feed_dict=feed_dict)
else:
feed_dict = self.all_train_feed_dict
_, loss_val = sess.run([self.train_sgd_op, self.total_loss], feed_dict=feed_dict)
duration = time.time() - start_time
if verbose:
if step % 1000 == 0:
# Print status to stdout.
print('Step %d: loss = %.8f (%.3f sec)' % (step, loss_val, duration))
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 100000 == 0 or (step + 1) == num_steps:
if save_checkpoints: self.saver.save(sess, self.checkpoint_file, global_step=step)
if verbose: self.print_model_eval()
def load_checkpoint(self, iter_to_load, do_checks=True):
checkpoint_to_load = "%s-%s" % (self.checkpoint_file, iter_to_load)
self.saver.restore(self.sess, checkpoint_to_load)
if do_checks:
print('Model %s loaded. Sanity checks ---' % checkpoint_to_load)
self.print_model_eval()
def get_train_op(self, total_loss, global_step, learning_rate):
"""
Return train_op
"""
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(total_loss, global_step=global_step)
return train_op
def get_train_sgd_op(self, total_loss, global_step, learning_rate=0.001):
"""
Return train_sgd_op
"""
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(total_loss, global_step=global_step)
return train_op
def get_accuracy_op(self, logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
correct = tf.nn.in_top_k(logits, labels, 1)
return tf.reduce_sum(tf.cast(correct, tf.int32)) / tf.shape(labels)[0]
def loss(self, logits, labels):
labels = tf.one_hot(labels, depth=self.num_classes)
# correct_prob = tf.reduce_sum(tf.multiply(labels, tf.nn.softmax(logits)), reduction_indices=1)
cross_entropy = - tf.reduce_sum(tf.multiply(labels, tf.nn.log_softmax(logits)), reduction_indices=1)
indiv_loss_no_reg = cross_entropy
loss_no_reg = tf.reduce_mean(cross_entropy, name='xentropy_mean')
tf.add_to_collection('losses', loss_no_reg)
total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
return total_loss, loss_no_reg, indiv_loss_no_reg
def adversarial_loss(self, logits, labels):
# Computes sum of log(1 - p(y = true|x))
# No regularization (because this is meant to be computed on the test data)
labels = tf.one_hot(labels, depth=self.num_classes)
wrong_labels = (labels - 1) * -1 # Flips 0s and 1s
wrong_labels_bool = tf.reshape(tf.cast(wrong_labels, tf.bool), [-1, self.num_classes])
wrong_logits = tf.reshape(tf.boolean_mask(logits, wrong_labels_bool), [-1, self.num_classes - 1])
indiv_adversarial_loss = tf.reduce_logsumexp(wrong_logits, reduction_indices=1) - tf.reduce_logsumexp(logits, reduction_indices=1)
adversarial_loss = tf.reduce_mean(indiv_adversarial_loss)
return adversarial_loss, indiv_adversarial_loss #, indiv_wrong_prob
def update_feed_dict_with_v_placeholder(self, feed_dict, vec):
for pl_block, vec_block in zip(self.v_placeholder, vec):
feed_dict[pl_block] = vec_block
return feed_dict
def get_inverse_hvp(self, v, approx_type='cg', approx_params=None, verbose=True):
assert approx_type in ['cg', 'lissa']
if approx_type == 'lissa':
return self.get_inverse_hvp_lissa(v, **approx_params)
elif approx_type == 'cg':
return self.get_inverse_hvp_cg(v, verbose)
def get_inverse_hvp_lissa(self, v,
batch_size=None,
scale=10, damping=0.0, num_samples=1, recursion_depth=10000):
"""
This uses mini-batching; uncomment code for the single sample case.
"""
inverse_hvp = None
print_iter = recursion_depth / 10
for i in range(num_samples):
# samples = np.random.choice(self.num_train_examples, size=recursion_depth)
cur_estimate = v
for j in range(recursion_depth):
# feed_dict = fill_feed_dict_with_one_ex(
# data_set,
# images_placeholder,
# labels_placeholder,
# samples[j])
feed_dict = self.fill_feed_dict_with_batch(self.data_sets.train, batch_size=batch_size)
feed_dict = self.update_feed_dict_with_v_placeholder(feed_dict, cur_estimate)
hessian_vector_val = self.sess.run(self.hessian_vector, feed_dict=feed_dict)
cur_estimate = [a + (1-damping) * b - c/scale for (a,b,c) in zip(v, cur_estimate, hessian_vector_val)]
# Update: v + (I - Hessian_at_x) * cur_estimate
if (j % print_iter == 0) or (j == recursion_depth - 1):
print("Recursion at depth %s: norm is %.8lf" % (j, np.linalg.norm(np.concatenate(cur_estimate))))
feed_dict = self.update_feed_dict_with_v_placeholder(feed_dict, cur_estimate)
if inverse_hvp is None:
inverse_hvp = [b/scale for b in cur_estimate]
else:
inverse_hvp = [a + b/scale for (a, b) in zip(inverse_hvp, cur_estimate)]
inverse_hvp = [a/num_samples for a in inverse_hvp]
return inverse_hvp
def minibatch_hessian_vector_val(self, v):
num_examples = self.num_train_examples
if self.mini_batch == True:
batch_size = 100
assert num_examples % batch_size == 0
else:
batch_size = self.num_train_examples
num_iter = int(num_examples / batch_size)
self.reset_datasets()
hessian_vector_val = None
for i in xrange(num_iter):
feed_dict = self.fill_feed_dict_with_batch(self.data_sets.train, batch_size=batch_size)
# Can optimize this
feed_dict = self.update_feed_dict_with_v_placeholder(feed_dict, v)
hessian_vector_val_temp = self.sess.run(self.hessian_vector, feed_dict=feed_dict)
if hessian_vector_val is None:
hessian_vector_val = [b / float(num_iter) for b in hessian_vector_val_temp]
else:
hessian_vector_val = [a + (b / float(num_iter)) for (a,b) in zip(hessian_vector_val, hessian_vector_val_temp)]
hessian_vector_val = [a + self.damping * b for (a,b) in zip(hessian_vector_val, v)]
return hessian_vector_val
def get_fmin_loss_fn(self, v):
def get_fmin_loss(x):
hessian_vector_val = self.minibatch_hessian_vector_val(self.vec_to_list(x))
return 0.5 * np.dot(np.concatenate(hessian_vector_val), x) - np.dot(np.concatenate(v), x)
return get_fmin_loss
def get_fmin_grad_fn(self, v):
def get_fmin_grad(x):
hessian_vector_val = self.minibatch_hessian_vector_val(self.vec_to_list(x))
return np.concatenate(hessian_vector_val) - np.concatenate(v)
return get_fmin_grad
def get_fmin_hvp(self, x, p):
hessian_vector_val = self.minibatch_hessian_vector_val(self.vec_to_list(p))
return np.concatenate(hessian_vector_val)
def get_cg_callback(self, v, verbose):
fmin_loss_fn = self.get_fmin_loss_fn(v)
def fmin_loss_split(x):
hessian_vector_val = self.minibatch_hessian_vector_val(self.vec_to_list(x))
return 0.5 * np.dot(np.concatenate(hessian_vector_val), x), -np.dot(np.concatenate(v), x)
def cg_callback(x):
# x is current params
v = self.vec_to_list(x)
idx_to_remove = 5
single_train_feed_dict = self.fill_feed_dict_with_one_ex(self.data_sets.train, idx_to_remove)
train_grad_loss_val = self.sess.run(self.grad_total_loss_op, feed_dict=single_train_feed_dict)
predicted_loss_diff = np.dot(np.concatenate(v), np.concatenate(train_grad_loss_val)) / self.num_train_examples
if verbose:
print('Function value: %s' % fmin_loss_fn(x))
quad, lin = fmin_loss_split(x)
print('Split function value: %s, %s' % (quad, lin))
print('Predicted loss diff on train_idx %s: %s' % (idx_to_remove, predicted_loss_diff))
return cg_callback
def get_inverse_hvp_cg(self, v, verbose):
fmin_loss_fn = self.get_fmin_loss_fn(v)
fmin_grad_fn = self.get_fmin_grad_fn(v)
cg_callback = self.get_cg_callback(v, verbose)
fmin_results = fmin_ncg(
f=fmin_loss_fn,
x0=np.concatenate(v),
fprime=fmin_grad_fn,
fhess_p=self.get_fmin_hvp,
callback=cg_callback,
avextol=1e-8,
maxiter=100)
return self.vec_to_list(fmin_results)
def get_test_grad_loss_no_reg_val(self, test_indices, batch_size=100, loss_type='normal_loss'):
if loss_type == 'normal_loss':
op = self.grad_loss_no_reg_op
elif loss_type == 'adversarial_loss':
op = self.grad_adversarial_loss_op
else:
raise ValueError, 'Loss must be specified'
if test_indices is not None:
num_iter = int(np.ceil(len(test_indices) / batch_size))
test_grad_loss_no_reg_val = None
for i in range(num_iter):
start = i * batch_size
end = int(min((i+1) * batch_size, len(test_indices)))
test_feed_dict = self.fill_feed_dict_with_some_ex(self.data_sets.test, test_indices[start:end])
temp = self.sess.run(op, feed_dict=test_feed_dict)
if test_grad_loss_no_reg_val is None:
test_grad_loss_no_reg_val = [a * (end-start) for a in temp]
else:
test_grad_loss_no_reg_val = [a + b * (end-start) for (a, b) in zip(test_grad_loss_no_reg_val, temp)]
test_grad_loss_no_reg_val = [a/len(test_indices) for a in test_grad_loss_no_reg_val]
else:
test_grad_loss_no_reg_val = self.minibatch_mean_eval([op], self.data_sets.test)[0]
return test_grad_loss_no_reg_val
def get_influence_on_test_loss(self, test_indices, train_idx,
approx_type='cg', approx_params=None, force_refresh=True, test_description=None,
loss_type='normal_loss',
X=None, Y=None):
# If train_idx is None then use X and Y (phantom points)
# Need to make sure test_idx stays consistent between models
# because mini-batching permutes dataset order
if train_idx is None:
if (X is None) or (Y is None): raise ValueError, 'X and Y must be specified if using phantom points.'
if X.shape[0] != len(Y): raise ValueError, 'X and Y must have the same length.'
else:
if (X is not None) or (Y is not None): raise ValueError, 'X and Y cannot be specified if train_idx is specified.'
test_grad_loss_no_reg_val = self.get_test_grad_loss_no_reg_val(test_indices, loss_type=loss_type)
print('Norm of test gradient: %s' % np.linalg.norm(np.concatenate(test_grad_loss_no_reg_val)))
start_time = time.time()
if test_description is None:
test_description = test_indices
approx_filename = os.path.join(self.train_dir, '%s-%s-%s-test-%s.npz' % (self.model_name, approx_type, loss_type, test_description))
if os.path.exists(approx_filename) and force_refresh == False:
inverse_hvp = list(np.load(approx_filename)['inverse_hvp'])
print('Loaded inverse HVP from %s' % approx_filename)
else:
inverse_hvp = self.get_inverse_hvp(
test_grad_loss_no_reg_val,
approx_type,
approx_params)
np.savez(approx_filename, inverse_hvp=inverse_hvp)
print('Saved inverse HVP to %s' % approx_filename)
duration = time.time() - start_time
print('Inverse HVP took %s sec' % duration)
start_time = time.time()
if train_idx is None:
num_to_remove = len(Y)
predicted_loss_diffs = np.zeros([num_to_remove])
for counter in np.arange(num_to_remove):
single_train_feed_dict = self.fill_feed_dict_manual(X[counter, :], [Y[counter]])
train_grad_loss_val = self.sess.run(self.grad_total_loss_op, feed_dict=single_train_feed_dict)
predicted_loss_diffs[counter] = np.dot(np.concatenate(inverse_hvp), np.concatenate(train_grad_loss_val)) / self.num_train_examples
else:
num_to_remove = len(train_idx)
predicted_loss_diffs = np.zeros([num_to_remove])
for counter, idx_to_remove in enumerate(train_idx):
single_train_feed_dict = self.fill_feed_dict_with_one_ex(self.data_sets.train, idx_to_remove)
train_grad_loss_val = self.sess.run(self.grad_total_loss_op, feed_dict=single_train_feed_dict)
predicted_loss_diffs[counter] = np.dot(np.concatenate(inverse_hvp), np.concatenate(train_grad_loss_val)) / self.num_train_examples
duration = time.time() - start_time
print('Multiplying by %s train examples took %s sec' % (num_to_remove, duration))
return predicted_loss_diffs
def find_eigvals_of_hessian(self, num_iter=100, num_prints=10):
# Setup
print_iterations = num_iter / num_prints
feed_dict = self.fill_feed_dict_with_one_ex(self.data_sets.train, 0)
# Initialize starting vector
grad_loss_val = self.sess.run(self.grad_total_loss_op, feed_dict=feed_dict)
initial_v = []
for a in grad_loss_val:
initial_v.append(np.random.random(a.shape))
initial_v, _ = normalize_vector(initial_v)
# Do power iteration to find largest eigenvalue
print('Starting power iteration to find largest eigenvalue...')
largest_eig = norm_val
print('Largest eigenvalue is %s' % largest_eig)
# Do power iteration to find smallest eigenvalue
print('Starting power iteration to find smallest eigenvalue...')
cur_estimate = initial_v
for i in range(num_iter):
cur_estimate, norm_val = normalize_vector(cur_estimate)
hessian_vector_val = self.minibatch_hessian_vector_val(cur_estimate)
new_cur_estimate = [a - largest_eig * b for (a,b) in zip(hessian_vector_val, cur_estimate)]
if i % print_iterations == 0:
print(-norm_val + largest_eig)
dotp = np.dot(np.concatenate(new_cur_estimate), np.concatenate(cur_estimate))
print("dot: %s" % dotp)
cur_estimate = new_cur_estimate
smallest_eig = -norm_val + largest_eig
assert dotp < 0, "Eigenvalue calc failed to find largest eigenvalue"
print('Largest eigenvalue is %s' % largest_eig)
print('Smallest eigenvalue is %s' % smallest_eig)
return largest_eig, smallest_eig
def get_grad_of_influence_wrt_input(self, train_indices, test_indices,
approx_type='cg', approx_params=None, force_refresh=True, verbose=True, test_description=None,
loss_type='normal_loss'):
"""
If the loss goes up when you remove a point, then it was a helpful point.
So positive influence = helpful.
If we move in the direction of the gradient, we make the influence even more positive,
so even more helpful.
Thus if we want to make the test point more wrong, we have to move in the opposite direction.
"""
# Calculate v_placeholder (gradient of loss at test point)
test_grad_loss_no_reg_val = self.get_test_grad_loss_no_reg_val(test_indices, loss_type=loss_type)
if verbose: print('Norm of test gradient: %s' % np.linalg.norm(np.concatenate(test_grad_loss_no_reg_val)))
start_time = time.time()
if test_description is None:
test_description = test_indices
approx_filename = os.path.join(self.train_dir, '%s-%s-%s-test-%s.npz' % (self.model_name, approx_type, loss_type, test_description))
if os.path.exists(approx_filename) and force_refresh == False:
inverse_hvp = list(np.load(approx_filename)['inverse_hvp'])
if verbose: print('Loaded inverse HVP from %s' % approx_filename)
else:
inverse_hvp = self.get_inverse_hvp(
test_grad_loss_no_reg_val,
approx_type,
approx_params,
verbose=verbose)
np.savez(approx_filename, inverse_hvp=inverse_hvp)
if verbose: print('Saved inverse HVP to %s' % approx_filename)
duration = time.time() - start_time
if verbose: print('Inverse HVP took %s sec' % duration)
grad_influence_wrt_input_val = None
for counter, train_idx in enumerate(train_indices):
# Put in the train example in the feed dict
grad_influence_feed_dict = self.fill_feed_dict_with_one_ex(
self.data_sets.train,
train_idx)
self.update_feed_dict_with_v_placeholder(grad_influence_feed_dict, inverse_hvp)
# Run the grad op with the feed dict
current_grad_influence_wrt_input_val = self.sess.run(self.grad_influence_wrt_input_op, feed_dict=grad_influence_feed_dict)[0][0, :]
if grad_influence_wrt_input_val is None:
grad_influence_wrt_input_val = np.zeros([len(train_indices), len(current_grad_influence_wrt_input_val)])
grad_influence_wrt_input_val[counter, :] = current_grad_influence_wrt_input_val
return grad_influence_wrt_input_val
def update_train_x(self, new_train_x):
assert np.all(new_train_x.shape == self.data_sets.train.x.shape)
new_train = DataSet(new_train_x, np.copy(self.data_sets.train.labels))
self.data_sets = base.Datasets(train=new_train, validation=self.data_sets.validation, test=self.data_sets.test)
self.all_train_feed_dict = self.fill_feed_dict_with_all_ex(self.data_sets.train)
self.reset_datasets()
def update_train_x_y(self, new_train_x, new_train_y):
new_train = DataSet(new_train_x, new_train_y)
self.data_sets = base.Datasets(train=new_train, validation=self.data_sets.validation, test=self.data_sets.test)
self.all_train_feed_dict = self.fill_feed_dict_with_all_ex(self.data_sets.train)
self.num_train_examples = len(new_train_y)
self.reset_datasets()
def update_test_x_y(self, new_test_x, new_test_y):
new_test = DataSet(new_test_x, new_test_y)
self.data_sets = base.Datasets(train=self.data_sets.train, validation=self.data_sets.validation, test=new_test)
self.all_test_feed_dict = self.fill_feed_dict_with_all_ex(self.data_sets.test)
self.num_test_examples = len(new_test_y)
self.reset_datasets()
| 33,790
| 39.565426
| 158
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 3/influence/binaryLogisticRegressionWithLBFGS.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
import sys
import numpy as np
import pandas as pd
from sklearn import linear_model, preprocessing, cluster
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
from scipy.special import expit
import os.path
import time
import tensorflow as tf
import math
from influence.hessians import hessians
from influence.genericNeuralNet import GenericNeuralNet, variable, variable_with_weight_decay
from influence.logisticRegressionWithLBFGS import LogisticRegressionWithLBFGS
class BinaryLogisticRegressionWithLBFGS(LogisticRegressionWithLBFGS):
def __init__(self, **kwargs):
super(BinaryLogisticRegressionWithLBFGS, self).__init__(**kwargs)
C = 1.0 / (self.num_train_examples * self.weight_decay)
self.sklearn_model = linear_model.LogisticRegression(
C=C,
tol=1e-8,
fit_intercept=False,
solver='lbfgs',
warm_start=True,
max_iter=1000)
C_minus_one = 1.0 / ((self.num_train_examples - 1) * self.weight_decay)
self.sklearn_model_minus_one = linear_model.LogisticRegression(
C=C_minus_one,
tol=1e-8,
fit_intercept=False,
solver='lbfgs',
warm_start=True,
max_iter=1000)
def inference(self, input):
with tf.variable_scope('softmax_linear'):
weights = variable_with_weight_decay(
'weights',
[self.input_dim],
stddev=1.0 / math.sqrt(float(self.input_dim)),
wd=self.weight_decay)
logits = tf.matmul(input, tf.reshape(weights, [self.input_dim, 1])) # + biases
zeros = tf.zeros_like(logits)
logits_with_zeros = tf.concat([zeros, logits], 1)
self.weights = weights
return logits_with_zeros
def set_params(self):
self.W_placeholder = tf.placeholder(
tf.float32,
shape=[self.input_dim],
name='W_placeholder')
set_weights = tf.assign(self.weights, self.W_placeholder, validate_shape=True)
return [set_weights]
# Special-purpose function for paper experiments
# that has flags for ignoring training error or Hessian
def get_influence_on_test_loss(self, test_indices, train_idx,
approx_type='cg', approx_params=None, force_refresh=True, test_description=None,
loss_type='normal_loss',
ignore_training_error=False,
ignore_hessian=False
):
test_grad_loss_no_reg_val = self.get_test_grad_loss_no_reg_val(test_indices, loss_type=loss_type)
print('Norm of test gradient: %s' % np.linalg.norm(np.concatenate(test_grad_loss_no_reg_val)))
start_time = time.time()
if test_description is None:
test_description = test_indices
approx_filename = os.path.join(self.train_dir, '%s-%s-%s-test-%s.npz' % (self.model_name, approx_type, loss_type, test_description))
if ignore_hessian == False:
if os.path.exists(approx_filename) and force_refresh == False:
inverse_hvp = list(np.load(approx_filename)['inverse_hvp'])
print('Loaded inverse HVP from %s' % approx_filename)
else:
inverse_hvp = self.get_inverse_hvp(
test_grad_loss_no_reg_val,
approx_type,
approx_params)
np.savez(approx_filename, inverse_hvp=inverse_hvp)
print('Saved inverse HVP to %s' % approx_filename)
else:
inverse_hvp = test_grad_loss_no_reg_val
duration = time.time() - start_time
print('Inverse HVP took %s sec' % duration)
start_time = time.time()
num_to_remove = len(train_idx)
predicted_loss_diffs = np.zeros([num_to_remove])
for counter, idx_to_remove in enumerate(train_idx):
if ignore_training_error == False:
single_train_feed_dict = self.fill_feed_dict_with_one_ex(self.data_sets.train, idx_to_remove)
train_grad_loss_val = self.sess.run(self.grad_total_loss_op, feed_dict=single_train_feed_dict)
else:
train_grad_loss_val = [-(self.data_sets.train.labels[idx_to_remove] * 2 - 1) * self.data_sets.train.x[idx_to_remove, :]]
predicted_loss_diffs[counter] = np.dot(np.concatenate(inverse_hvp), np.concatenate(train_grad_loss_val)) / self.num_train_examples
duration = time.time() - start_time
print('Multiplying by %s train examples took %s sec' % (num_to_remove, duration))
return predicted_loss_diffs
def get_loo_influences(self):
X_train = self.data_sets.train.x
Y_train = self.data_sets.train.labels * 2 - 1
theta = self.sess.run(self.params)[0]
# Pre-calculate inverse covariance matrix
n = X_train.shape[0]
dim = X_train.shape[1]
cov = np.zeros([dim, dim])
probs = expit(np.dot(X_train, theta.T))
weighted_X_train = np.reshape(probs * (1 - probs), (-1, 1)) * X_train
cov = np.dot(X_train.T, weighted_X_train) / n
cov += self.weight_decay * np.eye(dim)
cov_lu_factor = slin.lu_factor(cov)
assert(len(Y_train.shape) == 1)
x_train_theta = np.reshape(X_train.dot(theta.T), [-1])
sigma = expit(-Y_train * x_train_theta)
d_theta = slin.lu_solve(cov_lu_factor, X_train.T).T
quad_x = np.sum(X_train * d_theta, axis=1)
return sigma * quad_x
| 5,853
| 34.695122
| 142
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 3/influence/inceptionModel.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
import sys
import numpy as np
import pandas as pd
from sklearn import linear_model, preprocessing, cluster
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
import os.path
import time
import tensorflow as tf
import math
from influence.hessians import hessians
from influence.genericNeuralNet import GenericNeuralNet, variable, variable_with_weight_decay
from influence.inception_v3 import InceptionV3
from keras.layers import Flatten
from keras.layers import AveragePooling2D
from keras.utils.data_utils import get_file
from keras import backend as K
class BinaryInceptionModel(GenericNeuralNet):
def __init__(self, img_side, num_channels, weight_decay, **kwargs):
self.weight_decay = weight_decay
self.img_side = img_side
self.num_channels = num_channels
self.input_dim = img_side * img_side * num_channels
self.num_features = 2048 # Hardcoded for inception. For some reason Flatten() doesn't register num_features.
super(BinaryInceptionModel, self).__init__(**kwargs)
self.load_inception_weights()
# Do we need to set trainable to False?
# We might be unnecessarily blowing up the graph by including all of the train operations
# needed for the inception network.
self.set_params_op = self.set_params()
C = 1.0 / ((self.num_train_examples) * self.weight_decay)
self.sklearn_model = linear_model.LogisticRegression(
C=C,
tol=1e-8,
fit_intercept=False,
solver='lbfgs',
# multi_class='multinomial',
warm_start=True,
max_iter=1000)
C_minus_one = 1.0 / ((self.num_train_examples - 1) * self.weight_decay)
self.sklearn_model_minus_one = linear_model.LogisticRegression(
C=C_minus_one,
tol=1e-8,
fit_intercept=False,
solver='lbfgs',
# multi_class='multinomial',
warm_start=True,
max_iter=1000)
def get_all_params(self):
all_params = []
for layer in ['softmax_linear']:
# for var_name in ['weights', 'biases']:
for var_name in ['weights']:
temp_tensor = tf.get_default_graph().get_tensor_by_name("%s/%s:0" % (layer, var_name))
all_params.append(temp_tensor)
return all_params
def placeholder_inputs(self):
input_placeholder = tf.placeholder(
tf.float32,
shape=(None, self.input_dim),
name='input_placeholder')
labels_placeholder = tf.placeholder(
tf.int32,
shape=(None),
name='labels_placeholder')
return input_placeholder, labels_placeholder
def fill_feed_dict_with_all_ex(self, data_set):
feed_dict = {
self.input_placeholder: data_set.x,
self.labels_placeholder: data_set.labels,
K.learning_phase(): 0
}
return feed_dict
def fill_feed_dict_with_all_but_one_ex(self, data_set, idx_to_remove):
num_examples = data_set.x.shape[0]
idx = np.array([True] * num_examples, dtype=bool)
idx[idx_to_remove] = False
feed_dict = {
self.input_placeholder: data_set.x[idx, :],
self.labels_placeholder: data_set.labels[idx],
K.learning_phase(): 0
}
return feed_dict
def fill_feed_dict_with_batch(self, data_set, batch_size=0):
if batch_size is None:
return self.fill_feed_dict_with_all_ex(data_set)
elif batch_size == 0:
batch_size = self.batch_size
input_feed, labels_feed = data_set.next_batch(batch_size)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
K.learning_phase(): 0
}
return feed_dict
def fill_feed_dict_with_some_ex(self, data_set, target_indices):
input_feed = data_set.x[target_indices, :].reshape(len(target_indices), -1)
labels_feed = data_set.labels[target_indices].reshape(-1)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
K.learning_phase(): 0
}
return feed_dict
def fill_feed_dict_with_one_ex(self, data_set, target_idx):
input_feed = data_set.x[target_idx, :].reshape(1, -1)
labels_feed = data_set.labels[target_idx].reshape(1)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
K.learning_phase(): 0
}
return feed_dict
def load_inception_weights(self):
# Replace this with a local copy for reproducibility
# TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
# weights_path = get_file('inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
# TF_WEIGHTS_PATH_NO_TOP,
# cache_subdir='models',
# md5_hash='bcbd6486424b2319ff4ef7d526e38f63')
weights_path = 'inception/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
self.inception_model.load_weights(weights_path)
def inference(self, input):
reshaped_input = tf.reshape(input, [-1, self.img_side, self.img_side, self.num_channels])
self.inception_model = InceptionV3(include_top=False, weights='imagenet', input_tensor=reshaped_input)
raw_inception_features = self.inception_model.output
pooled_inception_features = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(raw_inception_features)
self.inception_features = Flatten(name='flatten')(pooled_inception_features)
with tf.variable_scope('softmax_linear'):
weights = variable_with_weight_decay(
'weights',
[self.num_features],
stddev=1.0 / math.sqrt(float(self.num_features)),
wd=self.weight_decay)
logits = tf.matmul(self.inception_features, tf.reshape(weights, [-1, 1]))
zeros = tf.zeros_like(logits)
logits_with_zeros = tf.concat([zeros, logits], 1)
self.weights = weights
return logits_with_zeros
def predictions(self, logits):
preds = tf.nn.softmax(logits, name='preds')
return preds
def set_params(self):
# See if we can automatically infer weight shape
self.W_placeholder = tf.placeholder(
tf.float32,
shape=[self.num_features],
name='W_placeholder')
set_weights = tf.assign(self.weights, self.W_placeholder, validate_shape=True)
return [set_weights]
def retrain(self, num_steps, feed_dict):
self.train_with_LBFGS(
feed_dict=feed_dict,
save_checkpoints=False,
verbose=False)
def train(self, num_steps=None,
iter_to_switch_to_batch=None,
iter_to_switch_to_sgd=None,
save_checkpoints=True, verbose=True):
self.train_with_LBFGS(
feed_dict=self.all_train_feed_dict,
save_checkpoints=save_checkpoints,
verbose=verbose)
def train_with_SGD(self, **kwargs):
super(BinaryInceptionModel, self).train(**kwargs)
def minibatch_inception_features(self, feed_dict):
num_examples = feed_dict[self.input_placeholder].shape[0]
batch_size = 100
num_iter = int(np.ceil(num_examples / batch_size))
ret = np.zeros([num_examples, self.num_features])
batch_feed_dict = {}
batch_feed_dict[K.learning_phase()] = 0
for i in xrange(num_iter):
start = i * batch_size
end = (i+1) * batch_size
if end > num_examples:
end = num_examples
batch_feed_dict[self.input_placeholder] = feed_dict[self.input_placeholder][start:end]
batch_feed_dict[self.labels_placeholder] = feed_dict[self.labels_placeholder][start:end]
ret[start:end, :] = self.sess.run(self.inception_features, feed_dict=batch_feed_dict)
return ret
def train_with_LBFGS(self, feed_dict, save_checkpoints=True, verbose=True):
# More sanity checks to see if predictions are the same?
# X_train = feed_dict[self.input_placeholder]
# X_train = self.sess.run(self.inception_features, feed_dict=feed_dict)
X_train = self.minibatch_inception_features(feed_dict)
Y_train = feed_dict[self.labels_placeholder]
num_train_examples = len(Y_train)
assert len(Y_train.shape) == 1
assert X_train.shape[0] == Y_train.shape[0]
if num_train_examples == self.num_train_examples:
print('Using normal model')
model = self.sklearn_model
elif num_train_examples == self.num_train_examples - 1:
print('Using model minus one')
model = self.sklearn_model_minus_one
else:
raise ValueError, "feed_dict has incorrect number of training examples"
model.fit(X_train, Y_train)
# sklearn returns coefficients in shape num_classes x num_features
# whereas our weights are defined as num_features x num_classes
# so we have to tranpose them first.
W = np.reshape(model.coef_.T, -1)
# b = model.intercept_
params_feed_dict = {}
params_feed_dict[self.W_placeholder] = W
# params_feed_dict[self.b_placeholder] = b
self.sess.run(self.set_params_op, feed_dict=params_feed_dict)
if save_checkpoints: self.saver.save(self.sess, self.checkpoint_file, global_step=0)
if verbose:
print('LBFGS training took %s iter.' % model.n_iter_)
print('After training with LBFGS: ')
self.print_model_eval()
def load_weights_from_disk(self, weights_filename, do_check=True, do_save=True):
W = np.load('%s' % weights_filename)
params_feed_dict = {}
params_feed_dict[self.W_placeholder] = W
self.sess.run(self.set_params_op, feed_dict=params_feed_dict)
if do_save: self.saver.save(self.sess, self.checkpoint_file, global_step=0)
print('Loaded weights from disk.')
if do_check: self.print_model_eval()
def get_hessian(self):
H = self.sess.run(self.hessians_op)
print(H.shape)
# Maybe update Hessian every time main train routine is called?
| 11,113
| 34.059937
| 165
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 3/influence/hessians.py
|
### Adapted from TF repo
import tensorflow as tf
from tensorflow import gradients
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
def hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
# grads = xs
assert len(grads) == length
elemwise_products = [
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v) if grad_elem is not None
]
# Second backprop
grads_with_none = gradients(elemwise_products, xs)
return_grads = [
grad_elem if grad_elem is not None \
else tf.zeros_like(x) \
for x, grad_elem in zip(xs, grads_with_none)]
return return_grads
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def hessians(ys, xs, name="hessians", colocate_gradients_with_ops=False,
gate_gradients=False, aggregation_method=None):
"""Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
`hessians()` adds ops to the graph to output the Hessian matrix of `ys`
with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`
where each tensor is the Hessian of `sum(ys)`. This function currently
only supports evaluating the Hessian with respect to (a list of) one-
dimensional tensors.
The Hessian is a matrix of second-order partial derivatives of a scalar
tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'hessians'.
colocate_gradients_with_ops: See `gradients()` documentation for details.
gate_gradients: See `gradients()` documentation for details.
aggregation_method: See `gradients()` documentation for details.
Returns:
A list of Hessian matrices of `sum(y)` for each `x` in `xs`.
Raises:
LookupError: if one of the operations between `xs` and `ys` does not
have a registered gradient function.
ValueError: if the arguments are invalid or not supported. Currently,
this function only supports one-dimensional `x` in `xs`.
"""
xs = _AsList(xs)
kwargs = {
'colocate_gradients_with_ops': colocate_gradients_with_ops,
'gate_gradients': gate_gradients,
'aggregation_method': aggregation_method
}
# Compute a hessian matrix for each x in xs
hessians = []
for i, x in enumerate(xs):
# Check dimensions
ndims = x.get_shape().ndims
if ndims is None:
raise ValueError('Cannot compute Hessian because the dimensionality of '
'element number %d of `xs` cannot be determined' % i)
elif ndims != 1:
raise ValueError('Computing hessians is currently only supported for '
'one-dimensional tensors. Element number %d of `xs` has '
'%d dimensions.' % (i, ndims))
with ops.name_scope(name + '_first_derivative'):
# Compute the partial derivatives of the input with respect to all
# elements of `x`
_gradients = tf.gradients(ys, x, **kwargs)[0]
# Unpack the gradients into a list so we can take derivatives with
# respect to each element
_gradients = array_ops.unpack(_gradients)
with ops.name_scope(name + '_second_derivative'):
# Compute the partial derivatives with respect to each element of the list
_hess = [tf.gradients(_gradient, x, **kwargs)[0] for _gradient in _gradients]
# Pack the list into a matrix and add to the list of hessians
hessians.append(array_ops.pack(_hess, name=name))
return hessians
| 5,137
| 40.772358
| 83
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 17/main_BioLearning.py
|
import scipy.io
import numpy as np
import matplotlib.pyplot as plt
import pickle
### Loading CIFAR100
def load_cifar100(filename):
with open(filename, 'rb')as f:
datadict = pickle.load(f, encoding='latin1')
images = datadict['data']
labels = datadict['fine_labels']
labels = np.array(labels)
return images, labels
images_train, labels_train = load_cifar100("./cifar-100-python/train")
images_test, labels_test = load_cifar100("./cifar-100-python/test")
### The function to draw weights
def draw_weights(synapses, Kx, Ky):
yy=np.random.randint(90, size=1)
HM=np.zeros((32*Ky,32*Kx,3))
for y in range(Ky):
for x in range(Kx):
HM[y*32:(y+1)*32,x*32:(x+1)*32,:]=synapses[yy,:].reshape(32,32,3)
yy += 1
plt.clf()
nc=np.amax(np.absolute(HM))
im=plt.imshow(HM,cmap='bwr',vmin=-nc,vmax=nc)
fig.colorbar(im,ticks=[np.amin(HM), 0, np.amax(HM)])
plt.axis('off')
fig.canvas.draw()
### Model hyperparamters
M = images_train
Nc=10
N=3072
Ns=50000
eps0=2e-2 # learning rate
Kx=10
Ky=10
hid=1000 # number of hidden units that are displayed in Ky by Kx array
mu=0.0
sigma=1.0
Nep=200 # number of epochs
Num=100 # size of the minibatch
prec=1e-30
delta=0.4 # Strength of the anti-hebbian learning
p=2.0 # Lebesgue norm of the weights
k=2 # ranking parameter, must be integer that is bigger or equal than 2
### Bio-learning
fig=plt.figure(figsize=(12.9,10))
synapses = np.random.normal(mu, sigma, (hid, N))
for nep in range(Nep):
print(nep)
eps=eps0*(1-nep/Nep)
M=M[np.random.permutation(Ns),:]
for i in range(Ns//Num):
inputs=np.transpose(M[i*Num:(i+1)*Num,:])
sig=np.sign(synapses)
tot_input=np.dot(sig*np.absolute(synapses)**(p-1),inputs)
y=np.argsort(tot_input,axis=0)
yl=np.zeros((hid,Num))
yl[y[hid-1,:],np.arange(Num)]=1.0
yl[y[hid-k],np.arange(Num)]=-delta
xx=np.sum(np.multiply(yl,tot_input),1)
ds=np.dot(yl,np.transpose(inputs)) - np.multiply(np.tile(xx.reshape(xx.shape[0],1),(1,N)),synapses)
nc=np.amax(np.absolute(ds))
if nc<prec:
nc=prec
synapses += eps*np.true_divide(ds,nc)
draw_weights(synapses, Kx, Ky)
### Visualization
Kx = 20
Ky = 50
fig=plt.figure(figsize=(12.9,10))
yy=0
HMI=np.zeros((32*Ky,32*Kx,3))
for y in range(Ky):
for x in range(Kx):
HM = synapses[yy]
HM_max = np.max(HM)
HM_min = np.min(HM)
HM = (HM-HM_min)/(HM_max-HM_min)
HM = np.reshape(HM,(3,32,32)).transpose((1,2,0))
HMI[y*32:(y+1)*32,x*32:(x+1)*32,:]= HM
yy += 1
plt.clf()
nc=np.amax(np.absolute(HMI))
im=plt.imshow(HMI,cmap='bwr',vmin=-nc,vmax=nc)
fig.colorbar(im,ticks=[np.amin(HMI), 0, np.amax(HMI)])
plt.axis('off')
fig.canvas.draw()
### Save Weights
synapse_transpose = np.transpose(synapses)
np.save('FrozenWeights_cifar100.npy', synapse_transpose)
| 3,108
| 23.674603
| 107
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 7/main_rule_extraction.py
|
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
import numpy as np
import tensorflow as tf
### Import Iris Data to play with
# Sepal Length, Sepal Width, Petal Length and Petal Width
iris = datasets.load_iris()
X = iris.data # we only take the first two features.
y_temp = iris.target
Y = np.zeros((150,3))
for k in np.arange(150):
Y[k,y_temp[k]] = 1
### Import a MLP Model
tf.reset_default_graph()
# correct labels
y_ = tf.placeholder(tf.float32, [None, 3])
# input data
x = tf.placeholder(tf.float32, [None, 4])
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.001)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.0, shape=shape)
return tf.Variable(initial)
W_fc1 = weight_variable([4, 3])
b_fc1 = bias_variable([3])
h_fc1 = tf.nn.sigmoid(tf.matmul(x, W_fc1) + b_fc1)
W_fc2 = weight_variable([3, 3])
b_fc2 = bias_variable([3])
h_fc2 = tf.nn.sigmoid(tf.matmul(h_fc1, W_fc2) + b_fc2)
y = tf.nn.softmax(h_fc2)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.MomentumOptimizer(learning_rate=0.1, momentum=0.9).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
### Import Trained Model
saver = tf.train.Saver()
sess = tf.Session()
saver.restore(sess,"SaveIrisModel/iris.ckpt")
train_accuracy = sess.run(accuracy, feed_dict={x: X, y_ : Y})
print(train_accuracy)
### Find Rules
# set epsilon
train_accuracy = sess.run(accuracy, feed_dict={x: X, y_ : Y})
epsilon = 0.5
activations = sess.run(h_fc1, feed_dict={x : X, y_ : Y} )
W_fc = sess.run(W_fc2)
b_fc = sess.run(b_fc2)
List_First_Hidden_Unit_1 = []
List_First_Hidden_Unit_2 = []
Average_First_Hidden_1 = 0
Average_First_Hidden_2 = 0
for k in np.arange(150):
if np.abs(activations[k,0]-0) < epsilon :
List_First_Hidden_Unit_1.append([k])
Average_First_Hidden_1 = Average_First_Hidden_1 + activations[k,0]
else :
List_First_Hidden_Unit_2.append([k])
Average_First_Hidden_2 = Average_First_Hidden_2 + activations[k,0]
Average_First_Hidden_1 = Average_First_Hidden_1/len(List_First_Hidden_Unit_1)
Average_First_Hidden_2 = Average_First_Hidden_2/len(List_First_Hidden_Unit_2)
Hidden_1_index = np.array(List_First_Hidden_Unit_1)
Hidden_2_index = np.array(List_First_Hidden_Unit_2)
List_Second_Hidden_Unit_1 = []
List_Second_Hidden_Unit_2 = []
Average_Second_Hidden_1 = 0
Average_Second_Hidden_2 = 0
for k in np.arange(150):
if np.abs(activations[k,1]-0) < epsilon :
List_Second_Hidden_Unit_1.append([k])
Average_Second_Hidden_1 = Average_Second_Hidden_1 + activations[k,1]
else :
List_Second_Hidden_Unit_2.append([k])
Average_Second_Hidden_2 = Average_Second_Hidden_2 + activations[k,1]
Average_Second_Hidden_1 = Average_Second_Hidden_1/len(List_Second_Hidden_Unit_1)
Average_Second_Hidden_2 = Average_Second_Hidden_2/len(List_Second_Hidden_Unit_2)
List_Third_Hidden_Unit_1 = []
List_Third_Hidden_Unit_2 = []
Average_Third_Hidden_1 = 0
Average_Third_Hidden_2 = 0
for k in np.arange(150):
if np.abs(activations[k,2]-0) < epsilon :
List_Third_Hidden_Unit_1.append([k])
Average_Third_Hidden_1 = Average_Third_Hidden_1 + activations[k,2]
else :
List_Third_Hidden_Unit_2.append([k])
Average_Third_Hidden_2 = Average_Third_Hidden_2 + activations[k,2]
Average_Third_Hidden_1 = Average_Third_Hidden_1/len(List_Third_Hidden_Unit_1)
Average_Third_Hidden_2 = Average_Third_Hidden_2/len(List_Third_Hidden_Unit_2)
# is equivalent to a discretized
a = np.reshape(np.arange(8, dtype=np.uint8),(8,1))
b = np.unpackbits(a, axis=1)
b=b[:,5:8]
print(b)
index_x,index_y1 = np.where(b.T==1)
index_x,index_y2 = np.where(b.T==0)
new_activation = np.zeros((8,3))
new_activation[index_y1[0:4],0] = Average_First_Hidden_1
new_activation[index_y2[0:4],0] = Average_First_Hidden_2
new_activation[index_y1[4:8],1] = Average_Second_Hidden_1
new_activation[index_y2[4:8],1] = Average_Second_Hidden_2
new_activation[index_y1[8:12],2] = Average_Third_Hidden_1
new_activation[index_y2[8:12],2] = Average_Third_Hidden_2
digits = np.dot(new_activation,W_fc)+b_fc
Group_0 = []
Group_1 = []
Group_2 = []
Group_3 = []
Group_4 = []
Group_5 = []
Group_6 = []
Group_7 = []
for i in np.arange(150):
if [i] in List_First_Hidden_Unit_1 :
if [i] in List_Second_Hidden_Unit_1 :
if [i] in List_Third_Hidden_Unit_1 :
Group_0.append(i)
else:
Group_1.append(i)
else:
if [i] in List_Third_Hidden_Unit_1 :
Group_2.append(i)
else:
Group_3.append(i)
else:
if [i] in List_Second_Hidden_Unit_1 :
if [i] in List_Third_Hidden_Unit_1 :
Group_4.append(i)
else:
Group_5.append(i)
else:
if [i] in List_Third_Hidden_Unit_1 :
Group_6.append(i)
else:
Group_7.append(i)
Group_0 = tuple(Group_0)
Group_1 = tuple(Group_1)
Group_2 = tuple(Group_2)
Group_3 = tuple(Group_3)
Group_4 = tuple(Group_4)
Group_5 = tuple(Group_5)
Group_6 = tuple(Group_6)
Group_7 = tuple(Group_7)
for j in np.arange(4):
print('The third class', j, np.max(X[Group_0,j])) # the third class
print('The third class', j, np.min(X[Group_0,j]))
print('The second class', j, np.max(X[Group_1,j])) # the second class
print('The second class', j, np.min(X[Group_1,j]))
print('The first class', j, np.max(X[Group_7,j])) # The first class
print('The first class', j, np.min(X[Group_7,j]))
### Evaluate the Effectiveness of Rules
# The rule sets:
# if petal length < 1.9: Iris Petosa 1
# if petal length > 3.0 & petal width < 1.4 : Versticutor 2
# default Verginica 3
Label_by_rule = np.zeros((150,1))
for j in np.arange(150):
if X[j,2] < 1.9 :
Label_by_rule[j,0] = 0
elif (X[j,2] > 3.0) & (X[j,3] < 1.7) & (X[j,2] < 5.0):
Label_by_rule[j,0] = 1
else:
Label_by_rule[j,0] = 2
summm = 0
for j in np.arange(150):
if Label_by_rule[j,0] == y_temp[j]:
summm +=1
print(summm/150)
| 6,727
| 27.033333
| 96
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 9/main_lime.py
|
from __future__ import print_function
import sklearn
import sklearn.datasets
import sklearn.ensemble
import numpy as np
import lime
import lime.lime_tabular
### Before running this code, you need to install package lime
np.random.seed(1)
iris = sklearn.datasets.load_iris()
train, test, labels_train, labels_test = sklearn.model_selection.train_test_split(iris.data, iris.target, train_size=0.80)
rf = sklearn.ensemble.RandomForestClassifier(n_estimators=500)
rf.fit(train, labels_train)
sklearn.metrics.accuracy_score(labels_test, rf.predict(test))
explainer = lime.lime_tabular.LimeTabularExplainer(train, feature_names=iris.feature_names, class_names=iris.target_names, discretize_continuous=True)
i = np.random.randint(0, test.shape[0])
exp = explainer.explain_instance(test[i], rf.predict_proba, num_features=4, top_labels=3)
exp.as_pyplot_figure()
| 864
| 28.827586
| 150
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 10/main_adjointMethods.py
|
import numpy as np
import numpy.random as npr
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
import tensorflow.contrib.eager as tfe
from main_neural_ode import NeuralODE
### tf.enable_eager_execution must be called at program startup. Please restart your kernel.
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams.update({'font.size': 18})
keras = tf.keras
tfe.enable_eager_execution()
### Initialize parameters
t = np.linspace(0, 25, 200)
h0 = tf.to_float([[1., 0.]])
W = tf.to_float([[-0.1, 1.0], [-0.2, -0.1]])
### Define the Computational Graph
class Lambda(tf.keras.Model):
def call(self, inputs, **kwargs):
t, h = inputs
return tf.matmul(h, W)
neural_ode = NeuralODE(Lambda(), t=t)
hN, states_history = neural_ode.forward(h0, return_states="numpy")
initial_path = np.concatenate(states_history)
### This is a function to plot the trajectory
def plot_trajectory(trajectories, fig=True):
if fig:
plt.figure(figsize=(5, 5))
for path in trajectories:
if type(path) == tuple:
c, label, path = path
plt.plot(*path.T, c, lw=2, label=label)
else:
plt.plot(*path.T, lw=2)
plt.axis("equal")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plot_trajectory([initial_path])
### Define model parameters
optimizer = tf.train.MomentumOptimizer(learning_rate=1e-2, momentum=0.95)
h0_var = tf.contrib.eager.Variable(h0)
hN_target = tf.to_float([[0., 0.5]])
### compute the gradient with respect to the h0 and W
def compute_gradients_and_update():
with tf.GradientTape() as g:
hN = neural_ode.forward(h0_var)
g.watch(hN)
loss = tf.reduce_sum((hN_target - hN)**2)
dLoss = g.gradient(loss, hN) # same what 2 * (hN_target - hN)
h0_reconstruction, dfdh0, dWeights = neural_ode.backward(hN, dLoss)
optimizer.apply_gradients(zip([dfdh0], [h0_var]))
return loss
### Compile EAGER graph to static (this will be much faster)
compute_gradients_and_update = tfe.defun(compute_gradients_and_update)
### Show the Optimization Process
loss_history = []
for step in tqdm(range(201)):
with tf.GradientTape() as g:
hN = neural_ode.forward(h0_var)
g.watch(hN)
loss = tf.reduce_sum((hN_target - hN)**2)
dLoss = g.gradient(loss, hN) # same what 2 * (hN_target - hN)
h0_reconstruction, dfdh0, dWeights = neural_ode.backward(hN, dLoss)
print(dWeights)
optimizer.apply_gradients(zip([dfdh0], [h0_var]))
if step % 50 == 0:
yN, states_history_model = neural_ode.forward(h0_var, return_states="numpy")
plot_trajectory([
("r", "initial", initial_path),
("g", "optimized", np.concatenate(states_history_model))])
plt.show()
print(dfdh0)
print(h0_var)
| 2,914
| 25.026786
| 92
|
py
|
IndependentEvaluation
|
IndependentEvaluation-main/Code For Figure 10/main_neural_ode.py
|
from typing import Optional, List
import numpy as np
import tensorflow as tf
from tensorflow.python.framework.ops import EagerTensor
import tensorflow.contrib.eager as tfe
keras = tf.keras
def zip_map(zipped, update_op):
return [update_op(*elems) for elems in zipped]
def euler_update(h_list, dh_list, dt):
return zip_map(zip(h_list, dh_list), lambda h, dh: h + tf.cast(dt, h.dtype) * dh)
def euler_step(func, dt, state):
return euler_update(state, func(state), dt)
def rk2_step(func, dt, state):
k1 = func(state)
k2 = func(euler_update(state, k1, dt))
return zip_map(zip(state, k1, k2),
lambda h, dk1, dk2: h + tf.cast(dt, h.dtype) * (dk1 + dk2) / 2)
def rk4_step(func, dt, state):
k1 = func(state)
k2 = func(euler_update(state, k1, dt / 2))
k3 = func(euler_update(state, k2, dt / 2))
k4 = func(euler_update(state, k3, dt))
return zip_map(
zip(state, k1, k2, k3, k4),
lambda h, dk1, dk2, dk3, dk4: h + tf.cast(dt, h.dtype) * (
dk1 + 2 * dk2 + 2 * dk3 + dk4) / 6,
)
class NeuralODE:
def __init__(
self, model: tf.keras.Model, t=np.linspace(0, 1, 40),
solver=rk4_step
):
self._t = t
self._model = model
self._solver = solver
self._deltas_t = t[1:] - t[:-1]
def forward(self, inputs: tf.Tensor, return_states: Optional[str] = None):
def _forward_dynamics(_state):
"""Used in solver _state == (time, tensor)"""
return [1.0, self._model(inputs=_state)]
states = []
def _append_state(_state):
tensors = _state[1]
if return_states == "numpy":
states.append(tensors.numpy())
elif return_states == "tf":
states.append(tensors)
with tf.name_scope("forward"):
t0 = tf.to_float(self._t[0])
state = [t0, inputs]
_append_state(state)
for dt in self._deltas_t:
state = self._solver(
func=_forward_dynamics, dt=tf.to_float(dt), state=state
)
_append_state(state)
outputs = state[1]
if return_states:
return outputs, states
return outputs
def _backward_dynamics(self, state):
t = state[0]
ht = state[1]
at = -state[2]
with tf.GradientTape() as g:
g.watch(ht)
ht_new = self._model(inputs=[t, ht])
gradients = g.gradient(
target=ht_new, sources=[ht] + self._model.weights,
output_gradients=at
)
return [1.0, ht_new, *gradients]
def backward(self, outputs: tf.Tensor,
output_gradients: Optional[tf.Tensor] = None):
with tf.name_scope("backward"):
grad_weights = [tf.zeros_like(w) for w in self._model.weights]
t0 = tf.to_float(self._t[-1])
if output_gradients is None:
output_gradients = tf.zeros_like(outputs)
state = [t0, outputs, output_gradients, *grad_weights]
for dt in self._deltas_t[::-1]:
state = self._solver(
self._backward_dynamics, dt=-tf.to_float(dt), state=state
)
inputs = state[1]
dLdInputs = state[2]
dLdWeights = state[3:]
return inputs, dLdInputs, dLdWeights
def forward_odeint(
self,
inputs: tf.Tensor,
rtol=1e-6,
atol=1e-6,
method='dopri5',
return_states: bool = False,
):
"""Do forward with adaptive solver"""
with tf.name_scope("forward_odeint"):
t = tf.to_float(self._t)
if not return_states:
t = tf.to_float([t[0], t[-1]])
outputs, info_dict = tf.contrib.integrate.odeint(
func=lambda _y, _t: self._model(inputs=(_t, _y)),
y0=inputs,
t=t,
rtol=rtol,
atol=atol,
method=method,
full_output=True,
)
if return_states:
return outputs, info_dict
return outputs[-1, ...], info_dict
def defun_neural_ode(node: NeuralODE) -> NeuralODE:
node.forward = tfe.defun(node.forward)
node.backward = tfe.defun(node.backward)
node.forward_odeint = tfe.defun(node.forward_odeint)
return node
| 4,469
| 28.215686
| 85
|
py
|
enpheeph
|
enpheeph-main/.noxfile.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import pathlib
import nox
CI_RUN = "CI" in os.environ
nox.needs_version = ">=2022.11.21"
nox.options.envdir = ".nox"
nox.options.default_venv_backend = "venv"
# nox.options.default_venv_backend = "mamba"
nox.options.error_on_external_run = True
nox.options.error_on_missing_interpreters = True
nox.options.reuse_existing_virtualenvs = False
# nox.options.sessions = ["test"]
nox.options.stop_on_first_error = True
def _select_cache_dir(session):
if not session.interactive or CI_RUN:
cache_dir = pathlib.Path(".logs")
else:
cache_dir = session.cache_dir
return cache_dir
@nox.session
@nox.parametrize(
"python",
["3.10"],
)
def test(session):
cache_dir = _select_cache_dir(session)
session.install("-e", ".[full-dev-cpu]")
# clean previous coverage
# if run here it would clear the other ones
# session.run("python", "-m", "coverage", "erase")
# run pytest with Jenkins and coverage output
session.run(
"python",
"-m",
"pytest",
"--cov=enpheeph",
"--cov-config=pyproject.toml",
f"--junitxml={str(cache_dir)}/tools/pytest/junit-{session.name}-{session.python}.xml",
*session.posargs,
env={"COVERAGE_FILE": f".coverage.{session.name}.{session.python}"},
)
session.notify("coverage")
@nox.session
@nox.parametrize(
"python",
["3.10"],
)
def coverage(session):
cache_dir = _select_cache_dir(session)
session.install("-e", ".[coverage]")
# combine and report the coverage, to generate output file
session.run("python", "-m", "coverage", "combine")
session.run("python", "-m", "coverage", "report")
session.run(
"python",
"-m",
"coverage",
"xml",
"-o",
f"{str(cache_dir)}/tools/coverage/coverage.xml",
)
# clean the coverage afterwards
session.run("python", "-m", "coverage", "erase")
@nox.session
@nox.parametrize(
"python",
["3.10"],
)
def pre_commit_linting(session):
session.install("-e", ".[pre-commit]")
# this will run also nox linting
# it should run the specified hook ids if posargs not empty, otherwise all of them
session.run(
"pre-commit", "run", "--hook-stage", "manual", "--verbose", *session.posargs
)
@nox.session
@nox.parametrize(
"python",
["3.10"],
)
def linting(session):
session.install("-e", ".")
session.install("-e", ".[dev-tools]")
session.run(
"mkinit",
"--recursive",
"--black",
"--lazy",
# "--verbose",
"src/enpheeph",
)
# it runs on the specified files if posargs is non-empty
files = session.posargs if session.posargs else ["."]
session.run(
"ruff",
"--fix",
"--exit-non-zero-on-fix",
*files,
env={"RUFF_CACHE_DIR": str(session.cache_dir / "ruff")},
)
session.run(
"black",
*files,
env={"BLACK_CACHE_DIR": str(session.cache_dir / "black")},
)
# session.run(
# "flake8",
# "--max-line-length=88",
# "--extend-ignore=E203",
# "--max-complexity=10",
# *session.posargs,
# )
# mypy is not ready yet
# session.run("mypy", "src/enpheeph", "tests")
| 4,819
| 27.52071
| 94
|
py
|
enpheeph
|
enpheeph-main/src/enpheeph/__init__.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# we ignore mypy/flake8/black as this file is autogenerated
# we ignore this specific error because of AUTOGEN_INIT
# mypy: ignore-errors
# the following flake8 syntax is wrong, as it will be read as generic noqa, but we use
# it to remember the errors appearing in the __init__.py
# additionally this is not caught by pygrep-hooks as it counts only "type: ignore" and
# "noqa", both with starting #
# flake8: noqa: E302,E305
# fmt: off
# this is required so that the mkinit script will generate the init imports only in this
# section
# <AUTOGEN_INIT>
def lazy_import(module_name, submodules, submod_attrs):
import importlib
import os
name_to_submod = {
func: mod for mod, funcs in submod_attrs.items()
for func in funcs
}
def __getattr__(name):
if name in submodules:
attr = importlib.import_module(
'{module_name}.{name}'.format(
module_name=module_name, name=name)
)
elif name in name_to_submod:
submodname = name_to_submod[name]
module = importlib.import_module(
'{module_name}.{submodname}'.format(
module_name=module_name, submodname=submodname)
)
attr = getattr(module, name)
else:
raise AttributeError(
'No {module_name} attribute {name}'.format(
module_name=module_name, name=name))
globals()[name] = attr
return attr
if os.environ.get('EAGER_IMPORT', ''):
for name in name_to_submod.values():
__getattr__(name)
for attrs in submod_attrs.values():
for attr in attrs:
__getattr__(attr)
return __getattr__
__getattr__ = lazy_import(
__name__,
submodules={
'handlers',
'helpers',
'injections',
'integrations',
'utils',
},
submod_attrs={
'handlers': [
'InjectionHandler',
'LibraryHandlerPluginABC',
'PyTorchHandlerPlugin',
'injectionhandler',
'libraryhandlerpluginabc',
'plugins',
'pytorchhandlerplugin',
],
'helpers': [
'FaultModelABC',
'ModelSummaryABC',
'ModelSummaryTorchinfo',
'abc',
'faultmodel',
'faultmodelabc',
'faultmodels',
'layersummaryabc',
'modelsummaryabc',
'modelsummarytorchinfo',
'plugins',
'sensitivityanalysis',
'summaries',
],
'injections': [
'AutoPyTorchMaskPlugin',
'CSVStoragePluginABC',
'CuPyPyTorchMaskPlugin',
'CustomBase',
'CustomBaseClass',
'DenseSparseOutputPyTorchFault',
'ExperimentRun',
'ExperimentRunBaseMixin',
'ExperimentRunProtocol',
'FPQuantizedOutputPyTorchFault',
'Fault',
'FaultABC',
'FaultBaseMixin',
'FaultProtocol',
'IndexingPlugin',
'IndexingPluginABC',
'Injection',
'InjectionABC',
'InjectionProtocol',
'LowLevelTorchMaskPluginABC',
'Monitor',
'MonitorABC',
'MonitorBaseMixin',
'MonitorProtocol',
'NumPyPyTorchMaskPlugin',
'OutputPyTorchFault',
'OutputPyTorchMonitor',
'PandasCSVStoragePlugin',
'PolymorphicMixin',
'PrunedDenseToSparseWeightPyTorchFault',
'PyTorchInjectionABC',
'PyTorchMaskMixin',
'PyTorchMonitorPostProcessorMixin',
'PyTorchSparseInterfaceMixin',
'PyTorchSparseInterfacePluginABC',
'PyTorchTensorObjectValidatorMixin',
'QuantizedOutputPyTorchFault',
'SNNOutputNorseFault',
'SQLStoragePluginABC',
'SQLiteStoragePlugin',
'Session',
'SessionBaseMixin',
'SessionProtocol',
'StoragePluginABC',
'WeightPyTorchFault',
'abc',
'autopytorchmaskplugin',
'csv',
'csvdataclasses',
'csvstorageplugin',
'csvstoragepluginabc',
'cupypytorchmaskplugin',
'densesparseoutputpytorchfault',
'faultabc',
'fix_pysqlite',
'fpquantizedoutputpytorchfault',
'indexing',
'indexingplugin',
'indexingpluginabc',
'injectionabc',
'lowleveltorchmaskpluginabc',
'mask',
'mixins',
'monitorabc',
'numpypytorchmaskplugin',
'outputpytorchfault',
'outputpytorchmonitor',
'plugins',
'pruneddensetosparseactivationpytorchfault',
'pruneddensetosparseweightpytorchfault',
'pysqlite_begin_emission_fix_on_connect',
'pytorchinjectionabc',
'pytorchmaskmixin',
'pytorchmonitorpostprocessormixin',
'pytorchquantizationmixin',
'pytorchsparseinterfacemixin',
'pytorchsparseinterfacepluginabc',
'pytorchtensorobjectvalidatormixin',
'quantizedoutputpytorchfault',
'set_sqlite_pragma',
'snnoutputnorsefault',
'sparse',
'sql',
'sqlalchemy_begin_emission_pysqlite',
'sqldataclasses',
'sqlitestorageplugin',
'sqlstoragepluginabc',
'sqlutils',
'storage',
'storagepluginabc',
'storagetypings',
'torch_geometric_mean',
'utils',
'weightpytorchfault',
],
'integrations': [
'InjectionCallback',
'injectioncallback',
'pytorchlightning',
],
'utils': [
'ActiveDimensionIndexType',
'AnyIndexType',
'AnyMaskType',
'ArrayType',
'BaseInjectionLocation',
'BitFaultMaskInfo',
'BitFaultValue',
'BitIndexInfo',
'BitWidth',
'DimensionDictType',
'DimensionIndexType',
'DimensionLocationIndexType',
'DimensionLocationMaskType',
'DimensionType',
'Endianness',
'FaultLocation',
'FaultLocationMixin',
'FaultMaskOperation',
'FaultMaskValue',
'HandlerStatus',
'IDGenerator',
'IDGeneratorSubclass',
'Index1DType',
'IndexMultiDType',
'IndexTimeType',
'InjectionLocationABC',
'LocationMixin',
'LocationModuleNameMixin',
'LocationOptionalMixin',
'LowLevelMaskArrayType',
'Mask1DType',
'MaskMultiDType',
'ModelType',
'MonitorLocation',
'MonitorMetric',
'ParameterType',
'PathType',
'ShapeType',
'SkipIfErrorContextManager',
'TensorType',
'camel_to_snake',
'classes',
'compare_version',
'constants',
'dataclasses',
'enums',
'functions',
'get_object_library',
'imports',
'is_module_available',
'typings',
],
},
)
def __dir__():
return __all__
__all__ = ['ActiveDimensionIndexType', 'AnyIndexType', 'AnyMaskType',
'ArrayType', 'AutoPyTorchMaskPlugin', 'BaseInjectionLocation',
'BitFaultMaskInfo', 'BitFaultValue', 'BitIndexInfo', 'BitWidth',
'CSVStoragePluginABC', 'CuPyPyTorchMaskPlugin', 'CustomBase',
'CustomBaseClass', 'DenseSparseOutputPyTorchFault',
'DimensionDictType', 'DimensionIndexType',
'DimensionLocationIndexType', 'DimensionLocationMaskType',
'DimensionType', 'Endianness', 'ExperimentRun',
'ExperimentRunBaseMixin', 'ExperimentRunProtocol',
'FPQuantizedOutputPyTorchFault', 'Fault', 'FaultABC',
'FaultBaseMixin', 'FaultLocation', 'FaultLocationMixin',
'FaultMaskOperation', 'FaultMaskValue', 'FaultModelABC',
'FaultProtocol', 'HandlerStatus', 'IDGenerator',
'IDGeneratorSubclass', 'Index1DType', 'IndexMultiDType',
'IndexTimeType', 'IndexingPlugin', 'IndexingPluginABC', 'Injection',
'InjectionABC', 'InjectionCallback', 'InjectionHandler',
'InjectionLocationABC', 'InjectionProtocol',
'LibraryHandlerPluginABC', 'LocationMixin',
'LocationModuleNameMixin', 'LocationOptionalMixin',
'LowLevelMaskArrayType', 'LowLevelTorchMaskPluginABC', 'Mask1DType',
'MaskMultiDType', 'ModelSummaryABC', 'ModelSummaryTorchinfo',
'ModelType', 'Monitor', 'MonitorABC', 'MonitorBaseMixin',
'MonitorLocation', 'MonitorMetric', 'MonitorProtocol',
'NumPyPyTorchMaskPlugin', 'OutputPyTorchFault',
'OutputPyTorchMonitor', 'PandasCSVStoragePlugin', 'ParameterType',
'PathType', 'PolymorphicMixin',
'PrunedDenseToSparseWeightPyTorchFault', 'PyTorchHandlerPlugin',
'PyTorchInjectionABC', 'PyTorchMaskMixin',
'PyTorchMonitorPostProcessorMixin', 'PyTorchSparseInterfaceMixin',
'PyTorchSparseInterfacePluginABC',
'PyTorchTensorObjectValidatorMixin', 'QuantizedOutputPyTorchFault',
'SNNOutputNorseFault', 'SQLStoragePluginABC', 'SQLiteStoragePlugin',
'Session', 'SessionBaseMixin', 'SessionProtocol', 'ShapeType',
'SkipIfErrorContextManager', 'StoragePluginABC', 'TensorType',
'WeightPyTorchFault', 'abc', 'autopytorchmaskplugin',
'camel_to_snake', 'classes', 'compare_version', 'constants', 'csv',
'csvdataclasses', 'csvstorageplugin', 'csvstoragepluginabc',
'cupypytorchmaskplugin', 'dataclasses',
'densesparseoutputpytorchfault', 'enums', 'faultabc', 'faultmodel',
'faultmodelabc', 'faultmodels', 'fix_pysqlite',
'fpquantizedoutputpytorchfault', 'functions', 'get_object_library',
'handlers', 'helpers', 'imports', 'indexing', 'indexingplugin',
'indexingpluginabc', 'injectionabc', 'injectioncallback',
'injectionhandler', 'injections', 'integrations',
'is_module_available', 'layersummaryabc', 'libraryhandlerpluginabc',
'lowleveltorchmaskpluginabc', 'mask', 'mixins', 'modelsummaryabc',
'modelsummarytorchinfo', 'monitorabc', 'numpypytorchmaskplugin',
'outputpytorchfault', 'outputpytorchmonitor', 'plugins',
'pruneddensetosparseactivationpytorchfault',
'pruneddensetosparseweightpytorchfault',
'pysqlite_begin_emission_fix_on_connect', 'pytorchhandlerplugin',
'pytorchinjectionabc', 'pytorchlightning', 'pytorchmaskmixin',
'pytorchmonitorpostprocessormixin', 'pytorchquantizationmixin',
'pytorchsparseinterfacemixin', 'pytorchsparseinterfacepluginabc',
'pytorchtensorobjectvalidatormixin', 'quantizedoutputpytorchfault',
'sensitivityanalysis', 'set_sqlite_pragma', 'snnoutputnorsefault',
'sparse', 'sql', 'sqlalchemy_begin_emission_pysqlite',
'sqldataclasses', 'sqlitestorageplugin', 'sqlstoragepluginabc',
'sqlutils', 'storage', 'storagepluginabc', 'storagetypings',
'summaries', 'torch_geometric_mean', 'typings', 'utils',
'weightpytorchfault']
# </AUTOGEN_INIT>
| 12,581
| 37.477064
| 88
|
py
|
enpheeph
|
enpheeph-main/src/enpheeph/__about__.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# for defaultdict
import collections
# use importlib.metadata to gather the info from the package information
# these are saved in setup.py until we can use pyproject.toml
import importlib.metadata
import time
# these two variables will not be imported with import * as they start with _
_this_year = time.strftime("%Y")
_start_year = "2020"
# version, to be accessed by setuptools
__version__ = "0.1.0a1"
# metadata taken from the package through importlib.metadata.metadata
try:
_metadata = importlib.metadata.metadata("enpheeph")
_get_all_metadata = _metadata.get_all
except importlib.metadata.PackageNotFoundError:
# using defaultdict with None, we can use the same object to return
# the None value for all the attributes
_metadata = collections.defaultdict(lambda: None)
def _get_all_metadata(x):
return []
# here are all the values of the attributes from the package info
name = _metadata["Name"]
version = _metadata["Version"]
classifiers = _get_all_metadata("Classifier")
authors = _get_all_metadata("Author")
author_emails = _get_all_metadata("Author-email")
| 2,648
| 37.955882
| 77
|
py
|
enpheeph
|
enpheeph-main/src/enpheeph/abc/libraryhandlerpluginabc.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import abc
import typing
import enpheeph.injections.abc.injectionabc
import enpheeph.utils.typings
class LibraryHandlerPluginABC(abc.ABC):
@abc.abstractmethod
def library_setup(
self,
model: enpheeph.utils.typings.ModelType,
active_injections: typing.List[
enpheeph.injections.abc.injectionabc.InjectionABC
],
) -> enpheeph.utils.typings.ModelType:
pass
@abc.abstractmethod
def library_teardown(
self,
model: enpheeph.utils.typings.ModelType,
active_injections: typing.List[
enpheeph.injections.abc.injectionabc.InjectionABC
],
) -> enpheeph.utils.typings.ModelType:
pass
| 2,244
| 35.803279
| 77
|
py
|
enpheeph
|
enpheeph-main/src/enpheeph/abc/lowleveltorchmaskpluginabc.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import abc
import typing
import enpheeph.utils.enums
import enpheeph.utils.typings
# while 3rd party library should be before custom libraries, we move it down to avoid
# flake8 complaining, since it is a conditional import
if typing.TYPE_CHECKING:
import torch
class LowLevelTorchMaskPluginABC(abc.ABC):
@abc.abstractmethod
def to_torch(
self, array: enpheeph.utils.typings.LowLevelMaskArrayType
) -> "torch.Tensor":
pass
@abc.abstractmethod
def from_torch(
self, tensor: "torch.Tensor"
) -> enpheeph.utils.typings.LowLevelMaskArrayType:
pass
@abc.abstractmethod
def to_bitwise_type(
self, array: enpheeph.utils.typings.LowLevelMaskArrayType
) -> enpheeph.utils.typings.LowLevelMaskArrayType:
pass
@abc.abstractmethod
def to_target_type(
self,
array: enpheeph.utils.typings.LowLevelMaskArrayType,
target: enpheeph.utils.typings.LowLevelMaskArrayType,
) -> enpheeph.utils.typings.LowLevelMaskArrayType:
pass
@abc.abstractmethod
def make_mask_array(
self,
int_mask: int,
# this fill value is already final, as is the int mask
int_fill_value: int,
shape: typing.Sequence[int],
torch_placeholder: "torch.Tensor",
mask: typing.Optional[enpheeph.utils.typings.AnyMaskType] = None,
mask_index: typing.Optional[enpheeph.utils.typings.AnyIndexType] = None,
) -> enpheeph.utils.typings.LowLevelMaskArrayType:
pass
| 3,069
| 35.117647
| 85
|
py
|
enpheeph
|
enpheeph-main/src/enpheeph/abc/indexingpluginabc.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import abc
import typing
import enpheeph.utils.constants
import enpheeph.utils.dataclasses
import enpheeph.utils.enums
import enpheeph.utils.typings
class IndexingPluginABC(abc.ABC):
active_dimension_index: typing.Optional[
typing.List[enpheeph.utils.typings.ActiveDimensionIndexType]
]
dimension_dict: enpheeph.utils.typings.DimensionDictType
# to select a set of dimensions to be used as active when selecting tensor indices
# by default no dimension is considered active
@abc.abstractmethod
def select_active_dimensions(
self,
dimensions: typing.Sequence[enpheeph.utils.enums.DimensionType],
# if True, we will move all the indices so that the first index is 0
# and the last is -1
autoshift_to_boundaries: bool = False,
# if True we fill the empty indices with the filler
# if False we will skip them
fill_empty_index: bool = True,
# the filler to use, defaults to : for a single dimension,
# which is slice(None, None)
filler: typing.Any = slice(None, None),
) -> typing.List[enpheeph.utils.typings.ActiveDimensionIndexType]:
pass
# to reset the active dimensions to the empty dimension dict
@abc.abstractmethod
def reset_active_dimensions(self) -> None:
pass
# to join indices following the order provided by the active_dimension dict
@abc.abstractmethod
def join_indices(
self,
dimension_indices: enpheeph.utils.typings.DimensionLocationIndexType,
) -> enpheeph.utils.typings.AnyIndexType:
pass
# to filter a size/shape array depending on the active dimension index
# by selecting only the dimensions with the enum
@abc.abstractmethod
def filter_dimensions(
self,
# a normal size/shape array
dimensions: typing.Sequence[int],
) -> typing.Tuple[int, ...]:
pass
| 3,462
| 37.910112
| 86
|
py
|
enpheeph
|
enpheeph-main/src/enpheeph/abc/sqlstoragepluginabc.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import abc
import datetime
import typing
import sqlalchemy
import sqlalchemy.dialects.sqlite
import sqlalchemy.ext.compiler
import sqlalchemy.sql.expression
import sqlalchemy.types
import enpheeph.injections.plugins.storage.sql.utils.sqlutils
import enpheeph.injections.plugins.storage.abc.storagepluginabc
import enpheeph.injections.plugins.storage.utils.storagetypings
import enpheeph.utils.dataclasses
import enpheeph.utils.typings
import enpheeph.injections.plugins.storage.sql.utils.sqldataclasses as sql_data_classes
class SQLStoragePluginABC(
enpheeph.injections.plugins.storage.abc.storagepluginabc.StoragePluginABC,
):
experiment_id: typing.Optional[int]
engine: sqlalchemy.engine.Engine
session_id: typing.Optional[int]
@classmethod
@abc.abstractmethod
def init_engine(
cls, db_url: str, extra_engine_args: typing.Dict[str, typing.Any]
) -> sqlalchemy.engine.Engine:
pass
def get_experiments(
self,
id_: typing.Optional[int] = None,
running: typing.Optional[bool] = None,
completed: typing.Optional[bool] = None,
start_time: typing.Optional[datetime.datetime] = None,
total_duration: typing.Optional[datetime.timedelta] = None,
golden_run_flag: typing.Optional[bool] = None,
injection_locations: typing.Optional[
typing.Sequence[enpheeph.utils.dataclasses.InjectionLocationABC]
] = None,
# in the future we will add also model_info
) -> typing.List[
enpheeph.injections.plugins.storage.utils.storagetypings.ExperimentRunProtocol,
]:
# first we open the querying session on our engine
with sqlalchemy.orm.Session(self.engine) as session:
# this is the statement for selecting the ExperimentRun
stmt = sqlalchemy.select(sql_data_classes.ExperimentRun)
# filtering by attributes is easily doable with a query on the properties
if id_ is not None:
stmt = stmt.where(sql_data_classes.ExperimentRun.id_ == id_)
if running is not None:
stmt = stmt.where(sql_data_classes.ExperimentRun.running == running)
if completed is not None:
stmt = stmt.where(sql_data_classes.ExperimentRun.completed == completed)
if start_time is not None:
stmt = stmt.where(
sql_data_classes.ExperimentRun.start_time == start_time
)
if total_duration is not None:
stmt = stmt.where(
sql_data_classes.ExperimentRun.total_duration == total_duration
)
if golden_run_flag is not None:
stmt = stmt.where(
sql_data_classes.ExperimentRun.golden_run_flag == golden_run_flag
)
# if we filter by injection locations
if injection_locations is not None:
# we need to create the aliases,
# one for each injection in the input list
inj_aliases = [
sqlalchemy.orm.aliased(sql_data_classes.Injection)
for _ in range(len(injection_locations))
]
# then we join the aliases,
# so that each one of them is bound to a different
# instance of Injection that are connected to ExperimentRun
for inj_al in inj_aliases:
stmt = stmt.join(inj_al, sql_data_classes.ExperimentRun.injections)
# then we add the conditions
for inj_al, inj in zip(inj_aliases, injection_locations):
stmt = stmt.where(inj_al.location == inj)
# we return all the instances of the classes
return typing.cast(
typing.List[
# black converts it to very long line, so we disable it
# fmt: off
enpheeph.injections.plugins.storage.
storage_typings.ExperimentRunProtocol,
# fmt: on
],
session.execute(stmt).scalars().all(),
)
def create_experiment(
self,
injection_locations: typing.Sequence[
enpheeph.utils.dataclasses.InjectionLocationABC
],
# in the future also model_info
running: bool = True,
golden_run_flag: bool = False,
# the id for the golden run
# if None we skip this part
golden_run_id: typing.Optional[int] = None,
start_time: typing.Optional[datetime.datetime] = None,
extra_experiment_info: typing.Optional[
typing.Dict[typing.Any, typing.Any]
] = None,
) -> int:
# check to avoid creating an experiment on top of the existing one
if self.experiment_id is not None:
raise ValueError("To create an experiment the current one must be closed")
if self.session_id is None:
raise ValueError(
"To create an experiment you need to create a Session first"
)
# we open a new Session
with sqlalchemy.orm.Session(self.engine) as session:
# we create a new ExperimentRun
# running is set by default, but the argument is_running can disable it
experiment = sql_data_classes.ExperimentRun(
running=running,
completed=False,
golden_run_flag=golden_run_flag,
start_time=start_time,
extra_experiment_info=extra_experiment_info,
# we set the ID for the Session the experiment is on
session_id=self.session_id,
)
# we insert all the injection locations
# depending on the class instance we create different objects
for inj_loc in injection_locations:
if isinstance(inj_loc, enpheeph.utils.dataclasses.FaultLocation):
sql_inj_loc = sql_data_classes.Fault(
location=inj_loc, internal_id=inj_loc.unique_instance_id
)
elif isinstance(inj_loc, enpheeph.utils.dataclasses.MonitorLocation):
sql_inj_loc = sql_data_classes.Monitor(
location=inj_loc, internal_id=inj_loc.unique_instance_id
)
else:
raise ValueError(f"Unsupported injection, {inj_loc}")
experiment.injections.append(sql_inj_loc)
session.add(experiment)
session.commit()
# ID is available only after committing
self.experiment_id = experiment.id_
# this must be done outside the session, as experiment is not added yet
# to the database
if golden_run_id is not None:
self.add_experiment_golden_run(golden_run_id)
return self.experiment_id
def create_session(
self,
extra_session_info: typing.Optional[typing.Dict[typing.Any, typing.Any]] = None,
) -> int:
# check to avoid creating an experiment on top of the existing one
if self.experiment_id is not None:
raise ValueError(
"To create a Session the current experiment must be closed"
)
if self.session_id is not None:
raise ValueError("To create a Session the current one must be closed")
# we open a new Session
with sqlalchemy.orm.Session(self.engine) as session:
# we create a new Session
sess = sql_data_classes.Session(
extra_session_info=extra_session_info,
)
session.add(sess)
session.commit()
# ID is available only after committing
self.session_id = sess.id_
return self.session_id
def complete_experiment(
self,
total_duration: typing.Optional[datetime.timedelta] = None,
) -> None:
if self.experiment_id is None:
raise ValueError("There is no experiment to be closed")
with sqlalchemy.orm.Session(self.engine) as session:
# we get the experiment from the session
experiment = (
session.execute(
sqlalchemy.select(sql_data_classes.ExperimentRun).where(
sql_data_classes.ExperimentRun.id_ == self.experiment_id
)
)
.scalars()
.one()
) # we use .one() as there will be only one match
experiment.completed = True
experiment.total_duration = total_duration
experiment.running = False
session.add(experiment)
session.commit()
self.experiment_id = None
def complete_session(
self,
) -> None:
if self.experiment_id is not None:
raise ValueError(
"To close the current Session the current Experiment must be closed"
)
if self.session_id is None:
raise ValueError("There is no current Session to close")
with sqlalchemy.orm.Session(self.engine) as session:
# we get the Session from the engine
sess = (
session.execute(
sqlalchemy.select(sql_data_classes.Session).where(
sql_data_classes.Session.id_ == self.session_id
)
)
.scalars()
.one()
) # we use .one() as there will be only one match
# **NOTE**: for now we do not do anything but it might be useful in the
# future
# ideally the Session should represent all the information which are common
# between Experiments from the same run, but ideally different experiments
# in the same run might also have different models/datasets, so more
# details are needed
session.add(sess)
session.commit()
self.session = None
def add_experiment_metrics(
self, metrics: typing.Dict[typing.Any, typing.Any]
) -> None:
if self.experiment_id is None:
raise ValueError("There is no experiment to be closed")
with sqlalchemy.orm.Session(self.engine) as session:
# we get the experiment from the session
experiment = (
session.execute(
sqlalchemy.select(sql_data_classes.ExperimentRun).where(
sql_data_classes.ExperimentRun.id_ == self.experiment_id
)
)
.scalars()
.one()
) # we use .one() as there will be only one match
experiment.metrics = metrics
session.add(experiment)
session.commit()
def add_experiment_golden_run(self, golden_run_id: int) -> None:
if self.experiment_id is None:
raise ValueError("There is no experiment to work on")
with sqlalchemy.orm.Session(self.engine) as session:
# we get the experiment from the session
experiment = (
session.execute(
sqlalchemy.select(sql_data_classes.ExperimentRun).where(
sql_data_classes.ExperimentRun.id_ == self.experiment_id
)
)
.scalars()
.one()
) # we use .one() as there will be only one match
# we cannot get the golden run directly as that would be a circular
# dependency
# so we simply update the ID
experiment.golden_run_id = golden_run_id
session.add(experiment)
session.commit()
def add_payload(
self,
location: enpheeph.utils.dataclasses.InjectionLocationABC,
payload: typing.Dict[typing.Any, typing.Any],
) -> None:
# we create a new session on the engine
with sqlalchemy.orm.Session(self.engine) as session:
# we query the session to get the corresponding element from the current
# experiment
stmt = (
sqlalchemy.select(sql_data_classes.Injection)
.select_from(sql_data_classes.ExperimentRun)
.join(sql_data_classes.Injection)
.where(
sql_data_classes.Injection.experiment_run_id == self.experiment_id
)
.where(sql_data_classes.Injection.location == location)
.where(
sql_data_classes.Injection.internal_id
== location.unique_instance_id
)
)
inj = session.execute(stmt).scalars().one()
inj.payload = payload
session.add(inj)
session.commit()
| 14,542
| 37.070681
| 88
|
py
|
enpheeph
|
enpheeph-main/src/enpheeph/abc/pytorchinjectionabc.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import abc
import typing
import enpheeph.injections.abc.injectionabc
# to avoid flake complaining that imports are after if, even though torch is 3rd-party
# library so it should be before self-imports
if typing.TYPE_CHECKING:
import torch
class PyTorchInjectionABC(enpheeph.injections.abc.injectionabc.InjectionABC):
handle: typing.Optional["torch.utils.hooks.RemovableHandle"]
@abc.abstractmethod
def setup(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
pass
# we define here the teardown as it should be common for all injections
# if some injections require particular care, it should be overridden, as long as
# the signature is the same
def teardown(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
# safe get the handle attribute if not defined
if getattr(self, "handle", None) is not None:
typing.cast(
"torch.utils.hooks.RemovableHandle",
self.handle,
).remove()
self.handle = None
return module
@property
@abc.abstractmethod
def module_name(self) -> str:
pass
| 2,730
| 34.934211
| 86
|
py
|
enpheeph
|
enpheeph-main/src/enpheeph/abc/pytorchsparseinterfacepluginabc.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import abc
# import typing
# import enpheeph.utils.enums
# import enpheeph.utils.typings
# while 3rd party library should be before custom libraries, we move it down to avoid
# flake8 complaining, since it is a conditional import
# if typing.TYPE_CHECKING:
# import torch
class PyTorchSparseInterfacePluginABC(abc.ABC):
# @abc.abstractmethod
# def to_torch(
# self, array: enpheeph.utils.typings.LowLevelMaskArrayType
# ) -> "torch.Tensor":
# pass
# @abc.abstractmethod
# def from_torch(
# self, tensor: "torch.Tensor"
# ) -> enpheeph.utils.typings.LowLevelMaskArrayType:
# pass
# @abc.abstractmethod
# def to_bitwise_type(
# self, array: enpheeph.utils.typings.LowLevelMaskArrayType
# ) -> enpheeph.utils.typings.LowLevelMaskArrayType:
# pass
# @abc.abstractmethod
# def to_target_type(
# self,
# array: enpheeph.utils.typings.LowLevelMaskArrayType,
# target: enpheeph.utils.typings.LowLevelMaskArrayType,
# ) -> enpheeph.utils.typings.LowLevelMaskArrayType:
# pass
# @abc.abstractmethod
# def make_mask_array(
# self,
# int_mask: int,
# # this fill value is already final, as is the int mask
# int_fill_value: int,
# shape: typing.Sequence[int],
# torch_placeholder: "torch.Tensor",
# mask: typing.Optional[enpheeph.utils.typings.AnyMaskType] = None,
# mask_index: typing.Optional[enpheeph.utils.typings.AnyIndexType] = None,
# ) -> enpheeph.utils.typings.LowLevelMaskArrayType:
# pass
pass
| 3,162
| 35.356322
| 85
|
py
|
enpheeph
|
enpheeph-main/src/enpheeph/abc/injectionabc.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import abc
import enpheeph.utils.dataclasses
import enpheeph.utils.typings
class InjectionABC(abc.ABC):
location: enpheeph.utils.dataclasses.InjectionLocationABC
@abc.abstractmethod
def setup(
self,
module: enpheeph.utils.typings.ModelType,
) -> enpheeph.utils.typings.ModelType:
return NotImplemented
@abc.abstractmethod
def teardown(
self,
module: enpheeph.utils.typings.ModelType,
) -> enpheeph.utils.typings.ModelType:
return NotImplemented
| 2,066
| 35.910714
| 77
|
py
|
enpheeph
|
enpheeph-main/src/enpheeph/abc/faultabc.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import enpheeph.injections.abc.injectionabc
class FaultABC(enpheeph.injections.abc.injectionabc.InjectionABC):
pass
| 1,662
| 41.641026
| 77
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.