code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python3
# pyEZmock.py: this file is part of the pyEZmock package.
#
# pyEZmock: Python wrapper of Effective Zel'dovich approximation mock (EZmock).
#
# Github repository:
# https://github.com/cheng-zhao/pyEZmock
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import json
class pyEZmock:
"""
Python wrapper of Effective Zel'dovich approximation mock (EZmock).
"""
def __init__(self, workdir, restore=False,
exe='/global/u2/z/zhaoc/work/pyEZmock/bin/EZmock.sh',
pk_exe='/global/u2/z/zhaoc/work/pyEZmock/bin/POWSPEC.sh',
xi_exe='/global/u2/z/zhaoc/work/pyEZmock/bin/FCFC_2PT_BOX.sh',
bk_exe='/global/u2/z/zhaoc/work/pyEZmock/bin/BISPEC_BOX.sh'):
"""
Initialise the `pyEZmock` class.
Parameters
----------
workdir: str
Working directory for generating EZmocks.
restore: bool, optional
Indicate whether to scan `workdir` and restore previous runs.
exe: str, optional
Location of the EZmock executable.
pk_exe: str, optional
Location of the powspec executable.
xi_exe: str, optional
Location of the FCFC executable for periodic boxes.
bk_exe: str, optional
Location of the bispec executable.
"""
# Set paths and filenames
self.workdir = workdir
self.exe = exe
self.pk_exe = pk_exe
self.xi_exe = xi_exe
self.bk_exe = bk_exe
self._conf = 'config.json' # file for storing configurations
self._script = 'run_job.sh' # job script file
self._done = 'DONE' # indicate whether the job is finished
# Initialise EZmock parameters
self._param = dict(
boxsize = None,
num_grid = None,
redshift = None,
num_tracer = None,
pdf_base = None,
dens_scat = None,
rand_motion = None,
dens_cut = None,
seed = 1,
omega_m = None,
z_init = 0,
init_pk = None
)
# Initialise clustering settings
self._pkconf = dict( # Power spectrum (pk) settings
rspace = False, # indicate whether to compute real-space pk
zspace = False, # indicate whether to compute redshift-space pk
rell = [], # real-space pk multipoles to be evaluated
zell = [], # redshift-space pk multipoles to be evaluated
ngrid = 256, # grid size for power spectra evaluation
kmax = 0.3, # maximum k for the power spectra
dk = 0.01, # bin size of k for the power spectra
rref = None, # reference real-space power spectrum
rcol = [], # reference real-space pk multipole columns
zref = None, # reference redshift-space power spectrum
zcol = [] # reference redshift-space pk multipole columns
)
self._xiconf = dict( # 2-point correlation function (xi) settings
rspace = False, # indicate whether to compute real-space xi
zspace = False, # indicate whether to compute redshift-space xi
rell = [], # real-space xi multipoles to be evaluated
zell = [], # redshift-space xi multipoles to be evaluated
rmax = 150, # maximum separation for the 2PCFs
dr = 5, # separation bin size for the 2PCFs
nmu = 60, # number of mu bins
rref = None, # reference real-space xi
rcol = [], # reference real-space xi multipole columns
zref = None, # reference redshift-space xi
zcol = [] # reference redshift-space xi multipole columns
)
self._bkconf = dict( # Bispectrum (bk) settings
rspace = False, # indicate whether to compute real-space bk
zspace = False, # indicate whether to compute redshift-space bk
ngrid = 256, # grid size for bispectra evaluation
k1 = [None, None], # range of the k1 vector for the bispectrum
k2 = [None, None], # range of the k2 vector for the bispectrum
nbin = 20, # number of output bins for the bispectrum
rref = None, # reference real-space bispectrum
rcol = None, # column of the reference real-space bk
zref = None, # reference redshift-space bispectrum
zcol = None # column of the reference redshift-space bk
)
# Default columns for clustering measurements
self._pkcol = 5
self._xicol = 3
self._bkcol = 4
# Default plotting styles
self._alpha = 0.8 # transparency for historical curves
self._ls_ref = 'k:' # line style for the reference
self._ls_curr = 'k-' # line style for the current run
# Other parameters
self._history = [] # history of evaluated parameter sets
self._odir = None # output directory for the current run
self._bname = None # catalogue basename for the current run
# Load previous runs as histories
if restore: self.restore()
def set_param(self, boxsize, num_grid, redshift, num_tracer,
pdf_base=None, dens_scat=None, rand_motion=None, dens_cut=None,
seed=1, omega_m=0.307115, z_init=0,
init_pk='/global/u2/z/zhaoc/work/pyEZmock/data/PlanckDM.linear.pk'):
"""
Set parameters for EZmock evaluation.
Parameters
----------
boxsize: float
Side length of the cubic periodic box.
num_grid: int
Number of grids per side for the density field.
redshift: float
Final redshift of the catalogue.
num_tracer: int
Number of tracers to be generated.
pdf_base: float, optional
Base number for PDF mapping.
dens_scat: float, optional
Density modification parameter.
rand_motion: float, optional
Parameter for the random local motion.
dens_cut: float, optional
The critical density.
seed: int, optional
Random seed.
omega_m: float, optional
Density parameter at z = 0.
z_init: float, optional
Redshift of the initial power spectrum.
init_pk: str, optional
Initial power spectrum.
Reference
---------
https://arxiv.org/abs/2007.08997
"""
self._param['boxsize'] = float(boxsize)
self._param['num_grid'] = int(num_grid)
self._param['redshift'] = float(redshift)
self._param['num_tracer'] = int(num_tracer)
if pdf_base is not None: self._param['pdf_base'] = float(pdf_base)
if dens_scat is not None: self._param['dens_scat'] = float(dens_scat)
if rand_motion is not None: self._param['rand_motion'] = float(rand_motion)
if dens_cut is not None: self._param['dens_cut'] = float(dens_cut)
self._param['seed'] = int(seed)
if init_pk is not None:
if not os.path.isfile(init_pk):
raise FileNotFoundError(f"`init_pk` does not exist: {init_pk}")
self._param['init_pk'] = os.path.abspath(init_pk)
self._param['omega_m'] = float(omega_m)
if self._param['omega_m'] <= 0 or self._param['omega_m'] > 1:
raise ValueError('`omega_m` must be between 0 and 1')
self._param['z_init'] = float(z_init)
if self._param['z_init'] < 0:
raise ValueError('`z_init` must be non-negative')
def set_clustering(self,
pk='none', pk_r_ell=[0], pk_z_ell=[0,2], pk_grid=256, pk_kmax=0.3,
pk_dk=0.01, pk_r_ref=None, pk_r_ref_col=[5], pk_z_ref=None,
pk_z_ref_col=[5,6],
xi='none', xi_r_ell=[0], xi_z_ell=[0,2], xi_rmax=150, xi_dr=5, xi_nmu=60,
xi_r_ref=None, xi_r_ref_col=[4], xi_z_ref=None, xi_z_ref_col=[4,5],
bk='none', bk_grid=256, bk_k1=[0.04,0.06], bk_k2=[0.09,0.11], bk_nbin=20,
bk_r_ref=None, bk_r_ref_col=4, bk_z_ref=None, bk_z_ref_col=4):
"""
Set configurations for clustering measurements.
Parameters
----------
pk: str, optional
Specify the power spectra to be computed. The string has to be one of
'real', 'redshift', 'both', or 'none'. 'real' and 'redshift' indicate
computing real- and redshift-space power spectrum multipoles
respectively; 'both' and 'none' indicate computing both or none of them.
pk_r_ell: tuple of ints, optinal
Legendre multipoles of real-space power spectrum to be evaluated.
pk_z_ell: tuple of ints, optinal
Legendre multipoles of redshift-space power spectrum to be evaluated.
pk_grid: int, optional
Grid size for power spectra evaluations.
pk_kmax: float, optional
Maximum k for the power spectra.
pk_dk: float, optional
Bin size of k for the power spectra.
pk_r_ref: str, optional
File for the reference real-space power spectrum.
The first column must be k.
pk_r_ref_col: tuple of ints, optional
Columns (counting from 0) of the reference real-space power spectrum
multipoles.
pk_z_ref: str, optional
File for the reference redshift-space power spectrum.
The first column must be k.
pk_z_ref_col: tuple of ints, optional
Columns (counting from 0) of the reference redshift-space power spectrum
multipoles.
xi: str, optional
Specify the 2-point correlation functions (2PCFs) to be computed.
The string has to be one of 'real', 'redshift', 'both', or 'none'.
xi_r_ell: tuple of ints, optinal
Legendre multipoles of real-space 2PCF to be evaluated.
xi_z_ell: tuple of ints, optinal
Legendre multipoles of redshift-space 2PCF to be evaluated.
xi_rmax: float, optional
Maximum separation for the 2PCFs.
xi_dr: float, optional
Bin size of separation for the 2PCFs.
xi_nmu: int, optional
Number of mu bins for the 2PCFs.
xi_r_ref: str, optional
File for the reference real-space 2PCF. The first column must be r.
xi_r_ref_col: tuple of ints, optional
Columns (counting from 0) of the reference real-space 2PCF multipoles.
xi_z_ref: str, optional
File for the reference redshift-space 2PCF. The first column must be r.
xi_z_ref_col: tuple of ints, optional
Columns (counting from 0) of the reference redshift-space 2PCF
multipoles.
bk: str, optional
Specify the bispectra to be computed. The string has to be one of
'real', 'redshift', 'both', or 'none'.
bk_grid: int, optional
Grid size for bispectra evaluations.
bk_k1: (float, float), optional
Range of the first k vector for the bispectra.
bk_k2: (float, float), optional
Range of the second k vector for the bispectra.
bk_nbin: int, optional
Number of output bins for the bispectra.
bk_r_ref: str, optional
File for the reference real-space bispectrum.
The first column must be theta.
bk_r_ref_col: int, optional
Column (counting from 0) of the reference real-space bispectrum.
bk_z_ref: str, optional
File for the reference redshift-space bispectrum.
The first column must be theta.
bk_z_ref_col: int, optional
Column (counting from 0) of the reference redshift-space bispectrum.
"""
if self._pk_exe is not None:
if pk not in ('real', 'redshift', 'both', 'none'):
raise ValueError("`pk` must be 'real', 'redshift', 'both' or 'none'")
self._pkconf['rspace'] = (pk == 'real') or (pk == 'both')
self._pkconf['zspace'] = (pk == 'redshift') or (pk == 'both')
if pk != 'none':
self._pkconf['ngrid'] = int(pk_grid)
self._pkconf['kmax'] = float(pk_kmax)
self._pkconf['dk'] = float(pk_dk)
if self._pkconf['rspace']:
try:
if len(pk_r_ell) == 0: raise ValueError('`pk_r_ell` is empty')
except TypeError: pk_r_ell = [pk_r_ell]
self._pkconf['rell'] = pk_r_ell
if pk_r_ref is not None:
if not os.path.isfile(pk_r_ref):
raise FileNotFoundError(f'`pk_r_ref` does not exist: {pk_r_ref}')
if len(pk_r_ell) != len(pk_r_ref_col):
raise ValueError('unequal length of `pk_r_ell` and `pk_r_ref_col`')
self._pkconf['rref'] = os.path.abspath(pk_r_ref)
self._pkconf['rcol'] = pk_r_ref_col
if self._pkconf['zspace']:
try:
if len(pk_z_ell) == 0: raise ValueError('`pk_z_ell` is empty')
except TypeError: pk_z_ell = [pk_z_ell]
self._pkconf['zell'] = pk_z_ell
if pk_z_ref is not None:
if not os.path.isfile(pk_z_ref):
raise FileNotFoundError(f'`pk_z_ref` does not exist: {pk_z_ref}')
if len(pk_z_ell) != len(pk_z_ref_col):
raise ValueError('unequal length of `pk_z_ell` and `pk_z_ref_col`')
self._pkconf['zref'] = os.path.abspath(pk_z_ref)
self._pkconf['zcol'] = pk_z_ref_col
else:
self._pkconf['rspace'] = self._pkconf['zspace'] = False
if self._xi_exe is not None:
if xi not in ('real', 'redshift', 'both', 'none'):
raise ValueError("`xi` must be 'real', 'redshift', 'both' or 'none'")
self._xiconf['rspace'] = (xi == 'real') or (xi == 'both')
self._xiconf['zspace'] = (xi == 'redshift') or (xi == 'both')
if xi != 'none':
self._xiconf['rmax'] = float(xi_rmax)
self._xiconf['dr'] = float(xi_dr)
self._xiconf['nmu'] = int(xi_nmu)
if self._xiconf['rspace']:
try:
if len(xi_r_ell) == 0: raise ValueError('`xi_r_ell` is empty')
except TypeError: xi_r_ell = [xi_r_ell]
self._xiconf['rell'] = xi_r_ell
if xi_r_ref is not None:
if not os.path.isfile(xi_r_ref):
raise FileNotFoundError(f'`xi_r_ref` does not exist: {xi_r_ref}')
if len(xi_r_ell) != len(xi_r_ref_col):
raise ValueError('unequal length of `xi_r_ell` and `xi_r_ref_col`')
self._xiconf['rref'] = os.path.abspath(xi_r_ref)
self._xiconf['rcol'] = xi_r_ref_col
if self._xiconf['zspace']:
try:
if len(xi_z_ell) == 0: raise ValueError('`xi_z_ell` is empty')
except TypeError: xi_z_ell = [xi_z_ell]
self._xiconf['zell'] = xi_z_ell
if xi_z_ref is not None:
if not os.path.isfile(xi_z_ref):
raise FileNotFoundError(f'`xi_z_ref` does not exist: {xi_z_ref}')
if len(xi_z_ell) != len(xi_z_ref_col):
raise ValueError('unequal length of `xi_z_ell` and `xi_z_ref_col`')
self._xiconf['zref'] = os.path.abspath(xi_z_ref)
self._xiconf['zcol'] = xi_z_ref_col
else:
self._xiconf['rspace'] = self._xiconf['zspace'] = False
if self._bk_exe is not None:
if bk not in ('real', 'redshift', 'both', 'none'):
raise ValueError("`bk` must be 'real', 'redshift', 'both' or 'none'")
self._bkconf['rspace'] = (bk == 'real') or (bk == 'both')
self._bkconf['zspace'] = (bk == 'redshift') or (bk == 'both')
if bk != 'none':
self._bkconf['ngrid'] = int(bk_grid)
if len(bk_k1) != 2: raise ValueError('invalid length of `bk_k1`')
if len(bk_k2) != 2: raise ValueError('invalid length of `bk_k2`')
self._bkconf['k1'] = bk_k1
self._bkconf['k2'] = bk_k2
self._bkconf['nbin'] = int(bk_nbin)
if self._bkconf['rspace']:
if bk_r_ref is not None:
if not os.path.isfile(bk_r_ref):
raise FileNotFoundError(f'`bk_r_ref` does not exist: {bk_r_ref}')
self._bkconf['rref'] = os.path.abspath(bk_r_ref)
self._bkconf['rcol'] = int(bk_r_ref_col)
if self._bkconf['zspace']:
if bk_z_ref is not None:
if not os.path.isfile(bk_z_ref):
raise FileNotFoundError(f'`bk_z_ref` does not exist: {bk_z_ref}')
self._bkconf['zref'] = os.path.abspath(bk_z_ref)
self._bkconf['zcol'] = int(bk_z_ref_col)
else:
self._bkconf['rspace'] = self._bkconf['zspace'] = False
def run(self, nthreads, queue=None, walltime=30, partition='haswell',
boxsize=None, num_grid=None, redshift=None, num_tracer=None,
pdf_base=None, dens_scat=None, rand_motion=None, dens_cut=None,
seed=None, omega_m=None, z_init=None, init_pk=None):
"""
Run the job for EZmock generation and clustering measurements.
Parameters
----------
nthreads: int
Number of OpenMP threads used for the run.
queue: str, optional
Queue of the job to be submitted to (e.g. 'debug' and 'regular').
If not provided, the job script has to be run manually.
walltime: int, optional
Limit on the total run time (in minutes) of the job.
partition: str, optional
Specify the architecture of the nodes for the job.
It has to be 'haswell' or 'knl'.
The rest of the parameters are the same as those for `set_param`.
Return
------
Filename of the jobs cript.
"""
import copy
from subprocess import Popen, PIPE
nthreads = int(nthreads)
if nthreads <= 0: raise ValueError(f'invalid `nthreads`: {nthreads:d}')
if queue is not None:
if walltime is None:
raise ValueError('`walltime` is required when `queue` is set')
if partition != 'haswell' and partition != 'knl':
raise ValueError("`partition` must be 'haswell' or 'knl'")
previous_param = copy.deepcopy(self._param)
if boxsize is not None: self._param['boxsize'] = float(boxsize)
if num_grid is not None: self._param['num_grid'] = int(num_grid)
if redshift is not None: self._param['redshift'] = float(redshift)
if num_tracer is not None: self._param['num_tracer'] = int(num_tracer)
if pdf_base is not None: self._param['pdf_base'] = float(pdf_base)
if dens_scat is not None: self._param['dens_scat'] = float(dens_scat)
if rand_motion is not None: self._param['rand_motion'] = float(rand_motion)
if dens_cut is not None: self._param['dens_cut'] = float(dens_cut)
if seed is not None: self._param['seed'] = int(seed)
if omega_m is not None:
self._param['omega_m'] = float(omega_m)
if self._param['omega_m'] <= 0 or self._param['omega_m'] > 1:
raise ValueError('`omega_m` must be between 0 and 1')
if z_init is not None:
self._param['z_init'] = float(z_init)
if self._param['z_init'] < 0:
raise ValueError('`z_init` must be non-negative')
if init_pk is not None:
if not os.path.isfile(init_pk):
raise FileNotFoundError(f'`init_pk` does not exist: {init_pk}')
self._param['init_pk'] = os.path.abspath(init_pk)
if None in self._param.values():
raise ValueError('please set EZmock parameters via `set_param` or `run`')
# Create the path for the current run
self._bname = self._get_bname(self._param)
self._odir = f'{self._workdir}/{self._bname}'
if not os.path.isdir(self._odir):
try: os.mkdir(self._odir)
except: raise IOError(f'cannot create directory: {self._odir}')
# Check if the job exists already
quit = False
par = pkconf = xiconf = bkconf = None
script = f'{self._odir}/{self._script}'
conf = f'{self._odir}/{self._conf}'
done = f'{self._odir}/{self._done}'
if os.path.isfile(script):
try:
with open(conf, 'r') as f:
par, pkconf, xiconf, bkconf = json.load(f)
if par == self._param and pkconf == self._pkconf and \
xiconf == self._xiconf and bkconf == self._bkconf:
if os.path.isfile(done): print('The job has been finished already')
else: self._warn(('the job exists but have not been finished, please '
'wait if the job has already been submitted, or submit/run '
f'the script manually: \n{script}'))
quit = True
elif par is not None and par != self._param:
os.rename(conf, f'{conf.old}')
self._warn(('existing EZmock will be overwritten, due to the '
'change of parameters. The previous settings are moved to \n'
f'{conf}.old'))
except FileNotFoundError:
self._warn(('existing EZmock run detected but the settings are '
'not found, rerun anyway'))
if quit == False and os.path.isfile(done): os.remove(done)
# Check the previous set of parameters, and record as history if applicable
if not None in previous_param.values():
prev_bname = self._get_bname(previous_param)
if not os.path.isfile(f'{self._workdir}/{prev_bname}/{self._done}'):
self._warn('the previous run may have not been finished')
elif previous_param not in self._history:
self._history.append(previous_param)
if quit: return script
# Generate contents of the job script file.
jobstr = ('#!/bin/bash\n#SBATCH -n 1\n#SBATCH -L SCRATCH\n'
f'#SBATCH -o {self._odir}/stdout_%j.txt\n'
f'#SBATCH -e {self._odir}/stderr_%j.txt\n')
if queue is not None:
jobstr += (f'#SBATCH -q {queue}\n'
f'#SBATCH -C {partition}\n#SBATCH -c {nthreads:d}\n'
f'#SBATCH -t {int(walltime):d}\n')
jobstr += f'\nexport OMP_NUM_THREADS={nthreads:d}\n\ncd {self._odir}\n\n'
run_mock = True
ofile = f'{self._odir}/EZmock_{self._bname}.dat'
if par == self._param and os.path.isfile(ofile):
self._warn('EZmock will not be run, as file exists: {ofile}')
run_mock = False
else: # remeasure clustering measurements if rerunning EZmock
pkconf = xiconf = bkconf = None
run_rsd = self._pkconf['zspace'] or self._xiconf['zspace'] or \
self._bkconf['zspace']
ofile = f'{self._odir}/EZmock_{self._bname}_RSD.dat'
if run_rsd and par == self._param and os.path.isfile(ofile): run_rsd = False
if run_mock or run_rsd:
jobstr += self._mock_cmd(self._bname, mock=run_mock, rsd=run_rsd)
if self._pkconf['rspace']:
ofile = f'{self._odir}/PK_EZmock_{self._bname}.dat'
if pkconf != self._pkconf or not os.path.isfile(ofile):
jobstr += self._pk_cmd(rsd=False)
if self._pkconf['zspace']:
ofile = f'{self._odir}/PK_EZmock_{self._bname}_RSD.dat'
if pkconf != self._pkconf or not os.path.isfile(ofile):
jobstr += self._pk_cmd(rsd=True)
if self._xiconf['rspace']:
ofile = f'{self._odir}/2PCF_EZmock_{self._bname}.dat'
if xiconf != self._xiconf or not os.path.isfile(ofile):
jobstr += self._xi_cmd(rsd=False)
if self._xiconf['zspace']:
ofile = f'{self._odir}/2PCF_EZmock_{self._bname}_RSD.dat'
if xiconf != self._xiconf or not os.path.isfile(ofile):
jobstr += self._xi_cmd(rsd=True)
if self._bkconf['rspace']:
ofile = f'{self._odir}/BK_EZmock_{self._bname}.dat'
if bkconf != self._bkconf or not os.path.isfile(ofile):
jobstr += self._bk_cmd(rsd=False)
if self._bkconf['zspace']:
ofile = f'{self._odir}/BK_EZmock_{self._bname}_RSD.dat'
if bkconf != self._bkconf or not os.path.isfile(ofile):
jobstr += self._bk_cmd(rsd=True)
jobstr += f'echo 1 > {self._done}\n'
# Save the job script and configurations
with open(script, 'w') as f: f.write(jobstr)
with open(conf, 'w') as f:
json.dump([self._param, self._pkconf, self._xiconf, self._bkconf], f,
indent=2)
# Submit the job if applicable
if queue is None:
print(('Job script generated. Please run the following command manually:'
f'\nbash {script}'))
else:
process = Popen(['/usr/bin/sbatch',script], shell=False,
stdout=PIPE, stderr=PIPE, text=True)
sts = process.wait()
for line in process.stdout: print(line, end='')
for line in process.stderr: print(line, end='')
if sts != 0:
self._warn(('job submission failed. Please resubmit the script '
f'manually: \nsbatch {script} \nor run the script directly: \n'
f'bash {script}'))
return script
def plot(self, fname=None):
"""
Plot the clustering measurements of the previous runs, the references,
and the current run.
Parameters
----------
fname: str, optional
If fname is provided, the plot is saved as this file.
"""
import numpy as np
import matplotlib.pyplot as plt
# Determine the number of subplots
nplot = 0
if self._pkconf['rspace']: nplot += len(self._pkconf['rell'])
if self._pkconf['zspace']: nplot += len(self._pkconf['zell'])
if self._xiconf['rspace']: nplot += len(self._xiconf['rell'])
if self._xiconf['zspace']: nplot += len(self._xiconf['zell'])
if self._bkconf['rspace']: nplot += 1
if self._bkconf['zspace']: nplot += 1
if nplot == 0:
raise ValueError(('no clustering measurements specified, '
'please set via `set_clustering`'))
# Create subplots
ncol = 3
if nplot <= 4: ncol = nplot
nrow = int(np.ceil(nplot / ncol))
figw = min(15, nplot * 5)
figh = 3 * nrow
plt.figure(figsize=(figw,figh))
plt.subplots_adjust(wspace=0.3, hspace=0.3)
ax = [plt.subplot2grid((nrow,ncol), (i//ncol,i%ncol)) for i in range(nplot)]
for a in ax: a.grid(ls=':', c='dimgray', alpha=0.6)
iplot = 0
alpha_hist = 0.8
ls_ref = 'k:'
ls_curr = 'k-'
# Plot power spectra multiples
if self._pkconf['rspace']:
# Plot histories first
for j, hist in enumerate(self._history):
bname = self._get_bname(hist)
ifile = f'{self._workdir}/{bname}/PK_EZmock_{bname}.dat'
if os.path.isfile(ifile):
d = np.loadtxt(ifile, unpack=True)
for i, ell in enumerate(self._pkconf['rell']):
ax[iplot+i].plot(d[0], d[self._pkcol+i]*d[0]**1.5,
alpha=self._alpha, label=f'history {j:d}')
# Plot the reference
if self._pkconf['rref'] is not None:
d = np.loadtxt(self._pkconf['rref'], unpack=True)
for i, c in enumerate(self._pkconf['rcol']):
ax[iplot+i].plot(d[0], d[c]*d[0]**1.5, self._ls_ref, label='ref')
# Plot the current results
if not None in self._param.values():
ifile = f'{self._workdir}/{self._bname}/PK_EZmock_{self._bname}.dat'
try:
d = np.loadtxt(ifile, unpack=True)
for i, ell in enumerate(self._pkconf['rell']):
ax[iplot+i].plot(d[0], d[self._pkcol+i]*d[0]**1.5,
self._ls_curr, label='current')
except FileNotFoundError:
self._warn('the current job may have not been finished')
else: self._warn('the current job may have not been initialised')
# Set axis labels and ranges
for i, ell in enumerate(self._pkconf['rell']):
ax[iplot+i].set_xlabel(r'$k$ (real space)')
ax[iplot+i].set_ylabel(r'$k^{{1.5}} P_{:d} (k)$'.format(ell))
ax[iplot+i].set_xlim(0, self._pkconf['kmax'])
iplot += len(self._pkconf['rell'])
if self._pkconf['zspace']:
# Plot histories first
for j, hist in enumerate(self._history):
bname = self._get_bname(hist)
ifile = f'{self._workdir}/{bname}/PK_EZmock_{bname}_RSD.dat'
if os.path.isfile(ifile):
d = np.loadtxt(ifile, unpack=True)
for i, ell in enumerate(self._pkconf['zell']):
ax[iplot+i].plot(d[0], d[self._pkcol+i]*d[0]**1.5,
alpha=self._alpha, label=f'history {j:d}')
# Plot the reference
if self._pkconf['zref'] is not None:
d = np.loadtxt(self._pkconf['zref'], unpack=True)
for i, c in enumerate(self._pkconf['zcol']):
ax[iplot+i].plot(d[0], d[c]*d[0]**1.5, self._ls_ref, label='ref')
# Plot the current results
if not None in self._param.values():
ifile = f'{self._workdir}/{self._bname}/PK_EZmock_{self._bname}_RSD.dat'
try:
d = np.loadtxt(ifile, unpack=True)
for i, ell in enumerate(self._pkconf['zell']):
ax[iplot+i].plot(d[0], d[self._pkcol+i]*d[0]**1.5,
self._ls_curr, label='current')
except FileNotFoundError:
self._warn('the current job may have not been finished')
else: self._warn('the current job may have not been initialised')
# Set axis labels and ranges
for i, ell in enumerate(self._pkconf['zell']):
ax[iplot+i].set_xlabel(r'$k$ (redshift space)')
ax[iplot+i].set_ylabel(r'$k^{{1.5}} P_{:d} (k)$'.format(ell))
ax[iplot+i].set_xlim(0, self._pkconf['kmax'])
iplot += len(self._pkconf['zell'])
# Plot 2PCF multipoles
if self._xiconf['rspace']:
# Plot histories first
for j, hist in enumerate(self._history):
bname = self._get_bname(hist)
ifile = f'{self._workdir}/{bname}/2PCF_EZmock_{bname}.dat'
if os.path.isfile(ifile):
d = np.loadtxt(ifile, unpack=True)
for i, ell in enumerate(self._xiconf['rell']):
ax[iplot+i].plot(d[0], d[self._xicol+i]*d[0]**2,
alpha=self._alpha, label=f'history {j:d}')
# Plot the reference
if self._xiconf['rref'] is not None:
d = np.loadtxt(self._xiconf['rref'], unpack=True)
for i, c in enumerate(self._xiconf['rcol']):
ax[iplot+i].plot(d[0], d[c]*d[0]**2, self._ls_ref, label='ref')
# Plot the current results
if not None in self._param.values():
ifile = f'{self._workdir}/{self._bname}/2PCF_EZmock_{self._bname}.dat'
try:
d = np.loadtxt(ifile, unpack=True)
for i, ell in enumerate(self._xiconf['rell']):
ax[iplot+i].plot(d[0], d[self._xicol+i]*d[0]**2,
self._ls_curr, label='current')
except FileNotFoundError:
self._warn('the current job may have not been finished')
else: self._warn('the current job may have not been initialised')
# Set axis labels and ranges
for i, ell in enumerate(self._xiconf['rell']):
ax[iplot+i].set_xlabel(r'$r$ (real space)')
ax[iplot+i].set_ylabel(r'$r^{{2}} \xi_{:d} (r)$'.format(ell))
ax[iplot+i].set_xlim(0, self._xiconf['rmax'])
iplot += len(self._xiconf['rell'])
if self._xiconf['zspace']:
# Plot histories first
for j, hist in enumerate(self._history):
bname = self._get_bname(hist)
ifile = f'{self._workdir}/{bname}/2PCF_EZmock_{bname}_RSD.dat'
if os.path.isfile(ifile):
d = np.loadtxt(ifile, unpack=True)
for i, ell in enumerate(self._xiconf['zell']):
ax[iplot+i].plot(d[0], d[self._xicol+i]*d[0]**2,
alpha=self._alpha, label=f'history {j:d}')
# Plot the reference
if self._xiconf['zref'] is not None:
d = np.loadtxt(self._xiconf['zref'], unpack=True)
for i, c in enumerate(self._xiconf['zcol']):
ax[iplot+i].plot(d[0], d[c]*d[0]**2, self._ls_ref, label='ref')
# Plot the current results
if not None in self._param.values():
bname = self._bname
ifile = f'{self._workdir}/{bname}/2PCF_EZmock_{bname}_RSD.dat'
try:
d = np.loadtxt(ifile, unpack=True)
for i, ell in enumerate(self._xiconf['zell']):
ax[iplot+i].plot(d[0], d[self._xicol+i]*d[0]**2,
self._ls_curr, label='current')
except FileNotFoundError:
self._warn('the current job may have not been finished')
else: self._warn('the current job may have not been initialised')
# Set axis labels and ranges
for i, ell in enumerate(self._xiconf['zell']):
ax[iplot+i].set_xlabel(r'$s$ (redshift space)')
ax[iplot+i].set_ylabel(r'$s^{{2}} \xi_{:d} (s)$'.format(ell))
ax[iplot+i].set_xlim(0, self._xiconf['rmax'])
iplot += len(self._xiconf['zell'])
# Plot bispectra
if self._bkconf['rspace']:
# Plot histories first
for j, hist in enumerate(self._history):
bname = self._get_bname(hist)
ifile = f'{self._workdir}/{bname}/BK_EZmock_{bname}.dat'
if os.path.isfile(ifile):
d = np.loadtxt(ifile, unpack=True)
ax[iplot].plot(d[0]/np.pi, d[self._bkcol], alpha=self._alpha,
label=f'history {j:d}')
# Plot the reference
if self._bkconf['rref'] is not None:
d = np.loadtxt(self._bkconf['rref'], unpack=True)
ax[iplot].plot(d[0]/np.pi, d[self._bkconf['rcol']], self._ls_ref,
label='ref')
# Plot the current results
if not None in self._param.values():
ifile = f'{self._workdir}/{self._bname}/BK_EZmock_{self._bname}.dat'
try:
d = np.loadtxt(ifile, unpack=True)
ax[iplot].plot(d[0]/np.pi, d[self._bkcol], self._ls_curr,
label='current')
except FileNotFoundError:
self._warn('the current job may have not been finished')
else: self._warn('the current job may have not been initialised')
# Set axis labels
ax[iplot].set_xlabel(r'$\theta_{12} / \pi$ (real space)')
ax[iplot].set_ylabel(r'$B (\theta_{12})$')
iplot += 1
if self._bkconf['zspace']:
# Plot histories first
for j, hist in enumerate(self._history):
bname = self._get_bname(hist)
ifile = f'{self._workdir}/{bname}/BK_EZmock_{bname}_RSD.dat'
if os.path.isfile(ifile):
d = np.loadtxt(ifile, unpack=True)
ax[iplot].plot(d[0]/np.pi, d[self._bkcol], alpha=self._alpha,
label=f'history {j:d}')
# Plot the reference
if self._bkconf['zref'] is not None:
d = np.loadtxt(self._bkconf['zref'], unpack=True)
ax[iplot].plot(d[0]/np.pi, d[self._bkconf['zcol']], self._ls_ref,
label='ref')
# Plot the current results
if not None in self._param.values():
ifile = f'{self._workdir}/{self._bname}/BK_EZmock_{self._bname}_RSD.dat'
try:
d = np.loadtxt(ifile, unpack=True)
ax[iplot].plot(d[0]/np.pi, d[self._bkcol], self._ls_curr,
label='current')
except FileNotFoundError:
self._warn('the current job may have not been finished')
else: self._warn('the current job may have not been initialised')
# Set axis labels
ax[iplot].set_xlabel(r'$\theta_{12} / \pi$ (redshift space)')
ax[iplot].set_ylabel(r'$B (\theta_{12})$')
iplot += 1
ax[-1].legend()
if fname is not None: plt.savefig(fname)
def massive_jobs(self, nthreads, seeds, clustering=False, queue=None,
walltime=30, partition='haswell'):
"""
Create the job script for massive production of EZmock,
with the current set of parameters.
Parameters
----------
nthreads: int
Number of OpenMP threads used for each realisation.
seeds: tuple of int
Random seeds for the massive production.
clustering: bool, optional
Indicate whether to perform clustering measurements.
queue: str, optional
Queue of the job to be submitted to (e.g. 'regular')
walltime: int, optional
Limit on the total run time (in minutes) of the job.
partition: str, optional
Specify the architecture of the nodes for the job.
It has to be 'haswell' or 'knl'.
Return
------
Filename of the job script.
"""
import copy
nthreads = int(nthreads)
if nthreads <= 0: raise ValueError(f'invalid `nthreads`: {nthreads:d}')
if queue is not None:
if walltime is None:
raise ValueError('`walltime` is required when `queue` is set')
if partition != 'haswell' and partition != 'knl':
raise ValueError("`partition` must be 'haswell' or 'knl'")
if (len(seeds) <= 1): raise TypeError('seeds must be a list')
if None in self._param.values():
raise ValueError('please set EZmock parameters via `set_param` or `run`')
jobs = []
# Generate the script for each seed.
for s in seeds:
if s <= 0:
self._warn(f'omitting non-positive seed: {s:d}')
continue
params = copy.deepcopy(self._param)
params['seed'] = s
bname = self._get_bname(params)
odir = f'{self._workdir}/{bname}'
if not os.path.isdir(odir):
try: os.mkdir(odir)
except: raise IOError(f'cannot create directory: {odir}')
# Check if the job exists already
par = pkconf = xiconf = bkconf = None
script = f'{odir}/{self._script}'
conf = f'{odir}/{self._conf}'
done = f'{odir}/{self._done}'
if os.path.isfile(script):
try:
with open(conf, 'r') as f:
par, pkconf, xiconf, bkconf = json.load(f)
if par == params and os.path.isfile(done) and \
(clustering == False or (pkconf == self._pkconf and \
xiconf == self._xiconf and bkconf == self._bkconf)):
self._warn(f'omitting existing job for seed {s:d}: {script}')
continue
if par is not None and par != params:
os.rename(conf, f'{conf.old}')
self._warn(('existing EZmock with seed {s:d} will be overwritten, '
'due to the change of parameters. The previous settings '
f'are moved to {conf}.old'))
except FileNotFoundError:
self._warn(('existing EZmock with seed {s:d} detected but the '
'settings are not found, rerun anyway'))
if os.path.isfile(done): os.remove(done)
jobstr = (f'#!/bin/bash\n\nexport OMP_NUM_THREADS={nthreads:d}\n\n'
f'cd {odir}\n\n')
run_mock = True
ofile = f'{odir}/EZmock_{bname}.dat'
if par == params and os.path.isfile(ofile): run_mock = False
else: pkconf = xiconf = bkconf = None
run_rsd = clustering and (self._pkconf['zspace'] or \
self._xiconf['zspace'] or self._bkconf['zspace'])
ofile = f'{odir}/EZmock_{bname}_RSD.dat'
if run_rsd and par == params and os.path.isfile(ofile): run_rsd = False
if run_mock or run_rsd:
jobstr += self._mock_cmd(bname, mock=run_mock, rsd=run_rsd,
params=params)
if clustering:
if self._pkconf['rspace']:
ofile = f'{odir}/PK_EZmock_{bname}.dat'
if pkconf != self._pkconf or not os.path.isfile(ofile):
jobstr += self._pk_cmd(bname=bname, rsd=False)
if self._pkconf['zspace']:
ofile = f'{odir}/PK_EZmock_{bname}_RSD.dat'
if pkconf != self._pkconf or not os.path.isfile(ofile):
jobstr += self._pk_cmd(bname=bname, rsd=True)
if self._xiconf['rspace']:
ofile = f'{odir}/2PCF_EZmock_{bname}.dat'
if xiconf != self._xiconf or not os.path.isfile(ofile):
jobstr += self._xi_cmd(bname=bname, rsd=False)
if self._xiconf['zspace']:
ofile = f'{odir}/2PCF_EZmock_{bname}_RSD.dat'
if xiconf != self._xiconf or not os.path.isfile(ofile):
jobstr += self._xi_cmd(bname=bname, rsd=True)
if self._bkconf['rspace']:
ofile = f'{odir}/BK_EZmock_{bname}.dat'
if bkconf != self._bkconf or not os.path.isfile(ofile):
jobstr += self._bk_cmd(bname=bname, rsd=False)
if self._bkconf['zspace']:
ofile = f'{odir}/BK_EZmock_{bname}_RSD.dat'
if bkconf != self._bkconf or not os.path.isfile(ofile):
jobstr += self._bk_cmd(bname=bname, rsd=True)
jobstr += f'echo 1 > {self._done}\n'
# Save the script and configurations
with open(script, 'w') as f: f.write(jobstr)
with open(conf, 'w') as f:
json.dump([params, self._pkconf, self._xiconf, self._bkconf], f,
indent=2)
jobs.append(script)
# Generate the job script for all realisations.
njob = len(jobs)
if njob < 1: raise ValueError('no valid job is found')
script = f'{self._workdir}/submit_mass_production.sh'
jobstr = '#!/bin/bash\n'
if queue is not None:
jobstr += (f'#SBATCH -n {njob:d}\n#SBATCH -L SCRATCH\n'
f'#SBATCH -o {self._workdir}/massive_stdout_%j.txt\n'
f'#SBATCH -e {self._workdir}/massive_stderr_%j.txt\n')
jobstr += (f'#SBATCH -q {queue}\n#SBATCH -C {partition}\n'
f'#SBATCH -c {nthreads:d}\n#SBATCH -t {int(walltime):d}\n')
for j in jobs:
jobstr += f'srun -n 1 -c {nthreads:d} --cpu_bind=cores bash {j} &\n'
jobstr += 'wait\n'
with open(script, 'w') as f: f.write(jobstr)
print(f'The job script for {njob:d} realisations have been generated.\n'
f'Please check it before submission:\n{script}')
else:
for j in jobs:
jobstr += f'bash {j}\n'
with open(script, 'w') as f: f.write(jobstr)
print(f'The job script for {njob:d} realisations have been generated.\n'
f'Please consider running it with `jobfork`:\n{script}')
return script
def restore(self):
"""
Restore the parameters of previous runs from existing files.
"""
from glob import glob
paths = glob(f'{self._workdir}/B*G*Z*N*_b*d*r*c*_seed*')
for p in paths:
if os.path.isfile(f'{p}/{self._done}'):
with open(f'{p}/{self._conf}', 'r') as f:
par, _, _, _ = json.load(f)
if not par in self._history: self._history.append(par)
def params(self):
"""
Print the current set of EZmock parameters.
"""
for key, value in self._param.items(): print(key, '=', value)
def history(self):
"""
Print the histories of the EZmock parameter sets.
"""
for i, param in enumerate(self._history): print(f'{i:d}:', param)
def clear(self, slicer):
"""
Clear history entries defined by `slicer`.
Parameters
----------
slicer:
The slice of the histories to be cleared.
It must be generated using the `slice` function.
"""
if type(slicer).__name__ != 'slice':
raise TypeError('slicer must be generated using the `slice` function')
del(self._history[slicer])
@property
def workdir(self):
"""
Working directory for generating EZmocks.
"""
return self._workdir
@workdir.setter
def workdir(self, path):
if path is None:
raise ValueError('working directory not set')
if not os.path.isdir(path):
try: os.mkdir(path)
except: raise IOError(f'cannot create working directory: {path}')
self._workdir = os.path.abspath(path)
@property
def exe(self):
"""
Path of the EZmock executable.
"""
return self._ez_exe
@exe.setter
def exe(self, path):
if not (os.path.isfile(path) and os.access(path, os.X_OK)):
raise IOError(f'invalid EZmock executable: {path}')
self._ez_exe = os.path.abspath(path)
@property
def pk_exe(self):
"""
Path of the powspec executable.
"""
return self._pk_exe
@pk_exe.setter
def pk_exe(self, path):
if not (os.path.isfile(path) and os.access(path, os.X_OK)):
self._warn(f'Power spectrum disabled due to invalid executable: {path}')
self._pk_exe = None
else: self._pk_exe = os.path.abspath(path)
@property
def xi_exe(self):
"""
Path of the FCFC executable for periodic boxes.
"""
return self._xi_exe
@xi_exe.setter
def xi_exe(self, path):
if not (os.path.isfile(path) and os.access(path, os.X_OK)):
self._warn(f'2PCF disabled due to invalid executable: {path}')
self._xi_exe = None
else: self._xi_exe = os.path.abspath(path)
@property
def bk_exe(self):
"""
Path of the bispec executable.
"""
return self._bk_exe
@bk_exe.setter
def bk_exe(self, path):
if not (os.path.isfile(path) and os.access(path, os.X_OK)):
self._warn(f'Bispectrum disabled due to invalid executable: {path}')
self._bk_exe = None
else: self._bk_exe = os.path.abspath(path)
def _get_bname(self, param):
"""
Generate the basename of files for a given parameter set.
Parameters
----------
param: dict
Dictionary storing a set of EZmock parameters.
Return
------
The basename as a string.
"""
bname = (f"B{param['boxsize']:g}"
f"G{param['num_grid']:d}"
f"Z{param['redshift']:g}"
f"N{param['num_tracer']:d}_"
f"b{param['pdf_base']:g}"
f"d{param['dens_scat']:g}"
f"r{param['rand_motion']:g}"
f"c{param['dens_cut']:g}_"
f"seed{param['seed']:d}")
return bname
def _mock_cmd(self, bname, mock=True, rsd=True, params=None):
"""
Generate the command for constructing the EZmock catalogue.
Parameters
----------
bname: str
Basename of the EZmock catalogue.
mock: bool optional
Indicate whether to run EZmock.
rsd: bool, optional
Indicate whether to apply redshift space distortions.
params: dict, optional
A dictionary of the EZmock parameters.
Returns
-------
The command as a string
"""
from cosmoEZ import flatLCDM
if params is None: params = self._param
jobstr = ''
# Compute structure growth parameters
cosmo = flatLCDM(omega_m = params['omega_m'])
z1 = 1 + params['redshift']
a = 1. / z1
a_init = 1. / (1 + params['z_init'])
grow2z0 = cosmo.growth2(a, a_init=a_init)
hubble = cosmo.hubble(a)
zdist = cosmo.growthf(a) * hubble * a
# Generate the command for running EZmock
if mock:
jobstr += ("echo '&EZmock_v0_input\n"
f"datafile_prefix = \"EZmock_{bname}\"\n"
f"datafile_path = \"./\"\n"
f"iseed = {params['seed']:d}\n"
f"boxsize = {params['boxsize']:g}\n"
f"grid_num = {params['num_grid']:d}\n"
f"redshift = {params['redshift']:g}\n"
f"grow2z0 = {grow2z0:g}\n"
f"expect_sum_pdf = {params['num_tracer']:d}\n"
f"expect_A_pdf = {params['pdf_base']:g}\n"
f"density_cut = {params['dens_cut']:g}\n"
f"scatter2 = {params['dens_scat']:g}\n"
f"zdist_rate = {zdist:g}\n"
f"zdist_fog = {params['rand_motion']:g}\n"
"density_sat = 100\nscatter = 10\nmodify_pk = 0.0\n"
"modify_pdf = 0\nantidamping = 2\n"
"use_whitenoise_file = .false.\nwhitenoise_file = \"\"\n"
f"pkfile = \"{params['init_pk']}\"\n"
f"pknwfile = \"{params['init_pk']}\"\n"
"compute_CF = .false.\ncompute_CF_zdist = .false.\n"
"dilute_factor = 0.3\nskiplines = 0\ntwod_corr_suffix = \"\"\n"
f"max_r = 50\nbin_size = 5\nom = {params['omega_m']:g}\n/'"
f" | {self._ez_exe} || exit\n\n")
if rsd:
# Generate the command for applying redshift space distortions
bsize = params['boxsize']
# Check boundaries and remove non-numerical entries (such as nan)
jobstr += ("awk '{CONVFMT=\"%.8g\";OFMT=\"%.8g\"; "
"if ($3+0==$3 && $6+0==$6) { "
f"z=($3+$6*{z1:g}/{hubble:.8g}+{bsize:g})%{bsize:g}; "
f"if ($1>=0 && $1<{bsize:g} && $2>=0 && $2<{bsize:g}"
f" && z>=0 && z<{bsize:g}) print $1,$2,z; }} }} ' "
f"EZmock_{bname}.dat > EZmock_{bname}_RSD.dat || exit\n\n")
return jobstr
def _pk_cmd(self, bname=None, rsd=True):
"""
Generate the command for computing power spectrum of EZmock.
Parameters
----------
bname: str
Basename of the EZmock catalogue.
rsd: bool, optional
Indicate whether to compute the redshift-space power spectrum.
Returns
-------
The command as a string
"""
if bname is None: bname = self._bname
if rsd:
poles = '[' + ','.join(f'{i:d}' for i in self._pkconf['zell']) + ']'
ifile = f'EZmock_{bname}_RSD.dat'
else:
poles = '[' + ','.join(f'{i:d}' for i in self._pkconf['rell']) + ']'
ifile = f'EZmock_{bname}.dat'
ofile = f'PK_{ifile}'
bsize = self._param['boxsize']
jobstr = (f'{self._pk_exe} -d {ifile} --data-formatter "%lf %lf %lf" '
f"-p '[($1+{bsize:g})%{bsize:g},($2+{bsize:g})%{bsize:g},"
f"($3+{bsize:g})%{bsize:g}]' -s T -B {bsize:g} "
f"-G {self._pkconf['ngrid']:d} -n 1 -i F -l '{poles}' -k 0 "
f"-K {self._pkconf['kmax']:g} -b {self._pkconf['dk']:g} "
f"-a {ofile} || exit\n\n")
return jobstr
def _xi_cmd(self, bname=None, rsd=True):
"""
Generate the command for computing 2-point correlation function of EZmock.
Parameters
----------
bname: str
Basename of the EZmock catalogue.
rsd: bool, optional
Indicate whether to compute the redshift-space power spectrum.
Returns
-------
The command as a string
"""
if bname is None: bname = self._bname
if rsd:
poles = '[' + ','.join(f'{i:d}' for i in self._xiconf['zell']) + ']'
ifile = f'EZmock_{bname}_RSD.dat'
else:
poles = '[' + ','.join(f'{i:d}' for i in self._xiconf['rell']) + ']'
ifile = f'EZmock_{bname}.dat'
ofile = f'2PCF_{ifile}'
bsize = self._param['boxsize']
jobstr = (f"{self._xi_exe} -i {ifile} -l D -f '%lf %lf %lf' "
f"-x '[$1,$2,$3]' -b {bsize:g} -B 1 -p DD -P {ofile}.dd -e 'DD/@@-1' "
f"-E {ofile}.xi2d -m '{poles}' -M {ofile} --s-min 0 "
f"--s-max {self._xiconf['rmax']:g} --s-step {self._xiconf['dr']:g} "
f"--mu-num {self._xiconf['nmu']:d} --dist-prec 0 -S 0 || exit\n\n")
return jobstr
def _bk_cmd(self, bname=None, rsd=True):
"""
Generate the command for computing power spectrum of EZmock.
Parameters
----------
bname: str
Basename of the EZmock catalogue.
rsd: bool, optional
Indicate whether to compute the redshift-space power spectrum.
Returns
-------
The command as a string
"""
if bname is None: bname = self._bname
if rsd: ifile = f'EZmock_{bname}_RSD.dat'
else: ifile = f'EZmock_{bname}.dat'
ofile = f'BK_{ifile}'
bsize = self._param['boxsize']
jobstr = (f'{self._bk_exe} -i {ifile} -s 7 --x-min 0 --y-min 0 --z-min 0 '
f'--x-max {bsize:g} --y-max {bsize:g} --z-max {bsize:g} -b 0'
f" -B {bsize:g} -g {self._bkconf['ngrid']:d} -w 1 -x 0 "
f"-p {self._bkconf['k1'][0]:g} -P {self._bkconf['k1'][1]:g} "
f"-q {self._bkconf['k2'][0]:g} -Q {self._bkconf['k2'][1]:g} "
f"-n {self._bkconf['nbin']:d} -o {ofile} -y || exit\n\n")
return jobstr
def _warn(self, message):
"""
Print the warning message.
Parameters
----------
message: str
The message to be printed.
"""
print('\x1b[31;1mWarning:\x1b[0m ' + message)
| [
"copy.deepcopy",
"os.remove",
"subprocess.Popen",
"os.path.isdir",
"cosmoEZ.flatLCDM",
"os.mkdir",
"glob.glob",
"numpy.ceil",
"matplotlib.pyplot.savefig",
"os.rename",
"os.access",
"os.path.isfile",
"os.path.abspath",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.figure",
"js... | [((18415, 18441), 'copy.deepcopy', 'copy.deepcopy', (['self._param'], {}), '(self._param)\n', (18428, 18441), False, 'import copy\n'), ((20269, 20291), 'os.path.isfile', 'os.path.isfile', (['script'], {}), '(script)\n', (20283, 20291), False, 'import os\n'), ((26004, 26036), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(figw, figh)'}), '(figsize=(figw, figh))\n', (26014, 26036), True, 'import matplotlib.pyplot as plt\n'), ((26040, 26083), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.3)', 'hspace': '(0.3)'}), '(wspace=0.3, hspace=0.3)\n', (26059, 26083), True, 'import matplotlib.pyplot as plt\n'), ((41882, 41930), 'glob.glob', 'glob', (['f"""{self._workdir}/B*G*Z*N*_b*d*r*c*_seed*"""'], {}), "(f'{self._workdir}/B*G*Z*N*_b*d*r*c*_seed*')\n", (41886, 41930), False, 'from glob import glob\n'), ((43245, 43266), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (43260, 43266), False, 'import os\n'), ((43552, 43573), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (43567, 43573), False, 'import os\n'), ((45946, 45981), 'cosmoEZ.flatLCDM', 'flatLCDM', ([], {'omega_m': "params['omega_m']"}), "(omega_m=params['omega_m'])\n", (45954, 45981), False, 'from cosmoEZ import flatLCDM\n'), ((8078, 8102), 'os.path.abspath', 'os.path.abspath', (['init_pk'], {}), '(init_pk)\n', (8093, 8102), False, 'import os\n'), ((19617, 19641), 'os.path.abspath', 'os.path.abspath', (['init_pk'], {}), '(init_pk)\n', (19632, 19641), False, 'import os\n'), ((19911, 19936), 'os.path.isdir', 'os.path.isdir', (['self._odir'], {}), '(self._odir)\n', (19924, 19936), False, 'import os\n'), ((22333, 22354), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (22347, 22354), False, 'import os\n'), ((22762, 22783), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (22776, 22783), False, 'import os\n'), ((24247, 24326), 'json.dump', 'json.dump', (['[self._param, self._pkconf, self._xiconf, self._bkconf]', 'f'], {'indent': '(2)'}), '([self._param, self._pkconf, self._xiconf, self._bkconf], f, indent=2)\n', (24256, 24326), False, 'import json\n'), ((24534, 24622), 'subprocess.Popen', 'Popen', (["['/usr/bin/sbatch', script]"], {'shell': '(False)', 'stdout': 'PIPE', 'stderr': 'PIPE', 'text': '(True)'}), "(['/usr/bin/sbatch', script], shell=False, stdout=PIPE, stderr=PIPE,\n text=True)\n", (24539, 24622), False, 'from subprocess import Popen, PIPE\n'), ((25927, 25948), 'numpy.ceil', 'np.ceil', (['(nplot / ncol)'], {}), '(nplot / ncol)\n', (25934, 25948), True, 'import numpy as np\n'), ((26094, 26147), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(nrow, ncol)', '(i // ncol, i % ncol)'], {}), '((nrow, ncol), (i // ncol, i % ncol))\n', (26110, 26147), True, 'import matplotlib.pyplot as plt\n'), ((35332, 35350), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (35343, 35350), True, 'import matplotlib.pyplot as plt\n'), ((36953, 36979), 'copy.deepcopy', 'copy.deepcopy', (['self._param'], {}), '(self._param)\n', (36966, 36979), False, 'import copy\n'), ((37417, 37439), 'os.path.isfile', 'os.path.isfile', (['script'], {}), '(script)\n', (37431, 37439), False, 'import os\n'), ((41960, 41995), 'os.path.isfile', 'os.path.isfile', (['f"""{p}/{self._done}"""'], {}), "(f'{p}/{self._done}')\n", (41974, 41995), False, 'import os\n'), ((43106, 43125), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (43119, 43125), False, 'import os\n'), ((43922, 43943), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (43937, 43943), False, 'import os\n'), ((44298, 44319), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (44313, 44319), False, 'import os\n'), ((44663, 44684), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (44678, 44684), False, 'import os\n'), ((7950, 7973), 'os.path.isfile', 'os.path.isfile', (['init_pk'], {}), '(init_pk)\n', (7964, 7973), False, 'import os\n'), ((19489, 19512), 'os.path.isfile', 'os.path.isfile', (['init_pk'], {}), '(init_pk)\n', (19503, 19512), False, 'import os\n'), ((19949, 19969), 'os.mkdir', 'os.mkdir', (['self._odir'], {}), '(self._odir)\n', (19957, 19969), False, 'import os\n'), ((21284, 21304), 'os.path.isfile', 'os.path.isfile', (['done'], {}), '(done)\n', (21298, 21304), False, 'import os\n'), ((21306, 21321), 'os.remove', 'os.remove', (['done'], {}), '(done)\n', (21315, 21321), False, 'import os\n'), ((21511, 21571), 'os.path.isfile', 'os.path.isfile', (['f"""{self._workdir}/{prev_bname}/{self._done}"""'], {}), "(f'{self._workdir}/{prev_bname}/{self._done}')\n", (21525, 21571), False, 'import os\n'), ((26551, 26572), 'os.path.isfile', 'os.path.isfile', (['ifile'], {}), '(ifile)\n', (26565, 26572), False, 'import os\n'), ((26880, 26925), 'numpy.loadtxt', 'np.loadtxt', (["self._pkconf['rref']"], {'unpack': '(True)'}), "(self._pkconf['rref'], unpack=True)\n", (26890, 26925), True, 'import numpy as np\n'), ((28138, 28159), 'os.path.isfile', 'os.path.isfile', (['ifile'], {}), '(ifile)\n', (28152, 28159), False, 'import os\n'), ((28467, 28512), 'numpy.loadtxt', 'np.loadtxt', (["self._pkconf['zref']"], {'unpack': '(True)'}), "(self._pkconf['zref'], unpack=True)\n", (28477, 28512), True, 'import numpy as np\n'), ((29758, 29779), 'os.path.isfile', 'os.path.isfile', (['ifile'], {}), '(ifile)\n', (29772, 29779), False, 'import os\n'), ((30085, 30130), 'numpy.loadtxt', 'np.loadtxt', (["self._xiconf['rref']"], {'unpack': '(True)'}), "(self._xiconf['rref'], unpack=True)\n", (30095, 30130), True, 'import numpy as np\n'), ((31343, 31364), 'os.path.isfile', 'os.path.isfile', (['ifile'], {}), '(ifile)\n', (31357, 31364), False, 'import os\n'), ((31670, 31715), 'numpy.loadtxt', 'np.loadtxt', (["self._xiconf['zref']"], {'unpack': '(True)'}), "(self._xiconf['zref'], unpack=True)\n", (31680, 31715), True, 'import numpy as np\n'), ((32967, 32988), 'os.path.isfile', 'os.path.isfile', (['ifile'], {}), '(ifile)\n', (32981, 32988), False, 'import os\n'), ((33227, 33272), 'numpy.loadtxt', 'np.loadtxt', (["self._bkconf['rref']"], {'unpack': '(True)'}), "(self._bkconf['rref'], unpack=True)\n", (33237, 33272), True, 'import numpy as np\n'), ((34235, 34256), 'os.path.isfile', 'os.path.isfile', (['ifile'], {}), '(ifile)\n', (34249, 34256), False, 'import os\n'), ((34495, 34540), 'numpy.loadtxt', 'np.loadtxt', (["self._bkconf['zref']"], {'unpack': '(True)'}), "(self._bkconf['zref'], unpack=True)\n", (34505, 34540), True, 'import numpy as np\n'), ((37096, 37115), 'os.path.isdir', 'os.path.isdir', (['odir'], {}), '(odir)\n', (37109, 37115), False, 'import os\n'), ((38304, 38324), 'os.path.isfile', 'os.path.isfile', (['done'], {}), '(done)\n', (38318, 38324), False, 'import os\n'), ((38538, 38559), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (38552, 38559), False, 'import os\n'), ((38829, 38850), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (38843, 38850), False, 'import os\n'), ((40469, 40543), 'json.dump', 'json.dump', (['[params, self._pkconf, self._xiconf, self._bkconf]', 'f'], {'indent': '(2)'}), '([params, self._pkconf, self._xiconf, self._bkconf], f, indent=2)\n', (40478, 40543), False, 'import json\n'), ((43138, 43152), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (43146, 43152), False, 'import os\n'), ((43423, 43443), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (43437, 43443), False, 'import os\n'), ((43448, 43472), 'os.access', 'os.access', (['path', 'os.X_OK'], {}), '(path, os.X_OK)\n', (43457, 43472), False, 'import os\n'), ((43740, 43760), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (43754, 43760), False, 'import os\n'), ((43765, 43789), 'os.access', 'os.access', (['path', 'os.X_OK'], {}), '(path, os.X_OK)\n', (43774, 43789), False, 'import os\n'), ((44126, 44146), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (44140, 44146), False, 'import os\n'), ((44151, 44175), 'os.access', 'os.access', (['path', 'os.X_OK'], {}), '(path, os.X_OK)\n', (44160, 44175), False, 'import os\n'), ((44485, 44505), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (44499, 44505), False, 'import os\n'), ((44510, 44534), 'os.access', 'os.access', (['path', 'os.X_OK'], {}), '(path, os.X_OK)\n', (44519, 44534), False, 'import os\n'), ((13275, 13300), 'os.path.abspath', 'os.path.abspath', (['pk_r_ref'], {}), '(pk_r_ref)\n', (13290, 13300), False, 'import os\n'), ((13870, 13895), 'os.path.abspath', 'os.path.abspath', (['pk_z_ref'], {}), '(pk_z_ref)\n', (13885, 13895), False, 'import os\n'), ((14991, 15016), 'os.path.abspath', 'os.path.abspath', (['xi_r_ref'], {}), '(xi_r_ref)\n', (15006, 15016), False, 'import os\n'), ((15586, 15611), 'os.path.abspath', 'os.path.abspath', (['xi_z_ref'], {}), '(xi_z_ref)\n', (15601, 15611), False, 'import os\n'), ((16581, 16606), 'os.path.abspath', 'os.path.abspath', (['bk_r_ref'], {}), '(bk_r_ref)\n', (16596, 16606), False, 'import os\n'), ((16878, 16903), 'os.path.abspath', 'os.path.abspath', (['bk_z_ref'], {}), '(bk_z_ref)\n', (16893, 16903), False, 'import os\n'), ((20379, 20391), 'json.load', 'json.load', (['f'], {}), '(f)\n', (20388, 20391), False, 'import json\n'), ((20531, 20551), 'os.path.isfile', 'os.path.isfile', (['done'], {}), '(done)\n', (20545, 20551), False, 'import os\n'), ((23030, 23051), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (23044, 23051), False, 'import os\n'), ((23227, 23248), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (23241, 23248), False, 'import os\n'), ((23421, 23442), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (23435, 23442), False, 'import os\n'), ((23620, 23641), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (23634, 23641), False, 'import os\n'), ((23812, 23833), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (23826, 23833), False, 'import os\n'), ((24009, 24030), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (24023, 24030), False, 'import os\n'), ((26588, 26618), 'numpy.loadtxt', 'np.loadtxt', (['ifile'], {'unpack': '(True)'}), '(ifile, unpack=True)\n', (26598, 26618), True, 'import numpy as np\n'), ((27235, 27265), 'numpy.loadtxt', 'np.loadtxt', (['ifile'], {'unpack': '(True)'}), '(ifile, unpack=True)\n', (27245, 27265), True, 'import numpy as np\n'), ((28175, 28205), 'numpy.loadtxt', 'np.loadtxt', (['ifile'], {'unpack': '(True)'}), '(ifile, unpack=True)\n', (28185, 28205), True, 'import numpy as np\n'), ((28826, 28856), 'numpy.loadtxt', 'np.loadtxt', (['ifile'], {'unpack': '(True)'}), '(ifile, unpack=True)\n', (28836, 28856), True, 'import numpy as np\n'), ((29795, 29825), 'numpy.loadtxt', 'np.loadtxt', (['ifile'], {'unpack': '(True)'}), '(ifile, unpack=True)\n', (29805, 29825), True, 'import numpy as np\n'), ((30440, 30470), 'numpy.loadtxt', 'np.loadtxt', (['ifile'], {'unpack': '(True)'}), '(ifile, unpack=True)\n', (30450, 30470), True, 'import numpy as np\n'), ((31380, 31410), 'numpy.loadtxt', 'np.loadtxt', (['ifile'], {'unpack': '(True)'}), '(ifile, unpack=True)\n', (31390, 31410), True, 'import numpy as np\n'), ((32045, 32075), 'numpy.loadtxt', 'np.loadtxt', (['ifile'], {'unpack': '(True)'}), '(ifile, unpack=True)\n', (32055, 32075), True, 'import numpy as np\n'), ((33004, 33034), 'numpy.loadtxt', 'np.loadtxt', (['ifile'], {'unpack': '(True)'}), '(ifile, unpack=True)\n', (33014, 33034), True, 'import numpy as np\n'), ((33552, 33582), 'numpy.loadtxt', 'np.loadtxt', (['ifile'], {'unpack': '(True)'}), '(ifile, unpack=True)\n', (33562, 33582), True, 'import numpy as np\n'), ((34272, 34302), 'numpy.loadtxt', 'np.loadtxt', (['ifile'], {'unpack': '(True)'}), '(ifile, unpack=True)\n', (34282, 34302), True, 'import numpy as np\n'), ((34824, 34854), 'numpy.loadtxt', 'np.loadtxt', (['ifile'], {'unpack': '(True)'}), '(ifile, unpack=True)\n', (34834, 34854), True, 'import numpy as np\n'), ((37130, 37144), 'os.mkdir', 'os.mkdir', (['odir'], {}), '(odir)\n', (37138, 37144), False, 'import os\n'), ((38326, 38341), 'os.remove', 'os.remove', (['done'], {}), '(done)\n', (38335, 38341), False, 'import os\n'), ((42072, 42084), 'json.load', 'json.load', (['f'], {}), '(f)\n', (42081, 42084), False, 'import json\n'), ((13009, 13033), 'os.path.isfile', 'os.path.isfile', (['pk_r_ref'], {}), '(pk_r_ref)\n', (13023, 13033), False, 'import os\n'), ((13604, 13628), 'os.path.isfile', 'os.path.isfile', (['pk_z_ref'], {}), '(pk_z_ref)\n', (13618, 13628), False, 'import os\n'), ((14725, 14749), 'os.path.isfile', 'os.path.isfile', (['xi_r_ref'], {}), '(xi_r_ref)\n', (14739, 14749), False, 'import os\n'), ((15320, 15344), 'os.path.isfile', 'os.path.isfile', (['xi_z_ref'], {}), '(xi_z_ref)\n', (15334, 15344), False, 'import os\n'), ((16444, 16468), 'os.path.isfile', 'os.path.isfile', (['bk_r_ref'], {}), '(bk_r_ref)\n', (16458, 16468), False, 'import os\n'), ((16741, 16765), 'os.path.isfile', 'os.path.isfile', (['bk_z_ref'], {}), '(bk_z_ref)\n', (16755, 16765), False, 'import os\n'), ((20896, 20926), 'os.rename', 'os.rename', (['conf', 'f"""{conf.old}"""'], {}), "(conf, f'{conf.old}')\n", (20905, 20926), False, 'import os\n'), ((37533, 37545), 'json.load', 'json.load', (['f'], {}), '(f)\n', (37542, 37545), False, 'import json\n'), ((37577, 37597), 'os.path.isfile', 'os.path.isfile', (['done'], {}), '(done)\n', (37591, 37597), False, 'import os\n'), ((37894, 37924), 'os.rename', 'os.rename', (['conf', 'f"""{conf.old}"""'], {}), "(conf, f'{conf.old}')\n", (37903, 37924), False, 'import os\n'), ((39144, 39165), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (39158, 39165), False, 'import os\n'), ((39358, 39379), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (39372, 39379), False, 'import os\n'), ((39569, 39590), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (39583, 39590), False, 'import os\n'), ((39785, 39806), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (39799, 39806), False, 'import os\n'), ((39994, 40015), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (40008, 40015), False, 'import os\n'), ((40208, 40229), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (40222, 40229), False, 'import os\n')] |
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics.classification import accuracy_score, f1_score
def prediction_score(train_X, train_y, test_X, test_y, metric, model):
# if the train labels are always the same
values_train = set(train_y)
if len(values_train) == 1:
# predict always that value
only_value_train = list(values_train)[0]
test_pred = np.ones_like(test_y) * only_value_train
# if the train labels have different values
else:
# create the model
if model == "random_forest_classifier":
m = RandomForestClassifier(n_estimators=10)
elif model == "logistic_regression":
m = LogisticRegression()
else:
raise Exception("Invalid model name.")
# fit and predict
m.fit(train_X, train_y)
test_pred = m.predict(test_X)
# calculate the score
if metric == "f1":
return f1_score(test_y, test_pred)
elif metric == "accuracy":
return accuracy_score(test_y, test_pred)
else:
raise Exception("Invalid metric name.")
| [
"numpy.ones_like",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.metrics.classification.f1_score",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.classification.accuracy_score"
] | [((1018, 1045), 'sklearn.metrics.classification.f1_score', 'f1_score', (['test_y', 'test_pred'], {}), '(test_y, test_pred)\n', (1026, 1045), False, 'from sklearn.metrics.classification import accuracy_score, f1_score\n'), ((479, 499), 'numpy.ones_like', 'np.ones_like', (['test_y'], {}), '(test_y)\n', (491, 499), True, 'import numpy as np\n'), ((669, 708), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (691, 708), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1092, 1125), 'sklearn.metrics.classification.accuracy_score', 'accuracy_score', (['test_y', 'test_pred'], {}), '(test_y, test_pred)\n', (1106, 1125), False, 'from sklearn.metrics.classification import accuracy_score, f1_score\n'), ((770, 790), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (788, 790), False, 'from sklearn.linear_model import LogisticRegression\n')] |
# -*- coding: utf-8 -*-
# Requires PIL (pillow) and NumPy
# Copyright (C) <NAME>, 2016
# License: MIT
import os
import json
import codecs
from PIL import Image
from io import BytesIO
import numpy as np
class Hexagram(object):
"""
Generate and write a hexagram to PNG
Input is an iterable of six binary digits or booleans;
1 / True is a solid line,
0 / False is a broken line
Write to hexagram_output\hexagram.png by calling .dump(),
with an optional filename string argument
"""
def __init__(self, pattern, plength=6):
if len(pattern) != plength:
raise HexagramException("Pass an iterable of %s digits or booleans" % plength)
self.bar_height = 8
self.wbar_height = 4
# we always want to produce a square hexagram
self.bar_width = (self.bar_height * 6) + (self.wbar_height * 5)
self.generated = self.generate(pattern)
def _black_row(self):
""" an unbroken bar """
return np.vstack([
np.zeros((self.bar_height, self.bar_width)),
np.ones((self.wbar_height, self.bar_width))]
)
def _broken_row(self):
""" a broken bar """
return np.vstack([
np.hstack([
np.zeros((self.bar_height, (self.bar_width // 2) - self.bar_height)),
np.ones((self.bar_height, self.bar_height * 2)),
np.zeros((self.bar_height, (self.bar_width // 2) - self.bar_height))]),
np.ones((self.wbar_height, self.bar_width))]
)
def trim(self, raw_hexagram):
""" remove trailing white bar from bottom of hexagram / trigram """
raw_hexagram[-1] = raw_hexagram[-1][0:self.bar_height]
return raw_hexagram
def generate(self, pattern):
""" generate a scaled b&w hexagram """
container = []
# hexagrams are grown bottom to top
for row in pattern:
if row:
container.insert(0, self._black_row())
else:
container.insert(0, self._broken_row())
container = self.trim(container)
stacked = np.vstack(container)
# rescale to 256 x 8-bit (0 = black, 255 = white)
return (255.0 / stacked.max() * (stacked - stacked.min())).astype(np.uint8)
def dump(self, fname=False):
""" write hexagram to PNG """
_fname = (fname or self.__class__.__name__.lower())
im = Image.fromarray(self.generated)
outdir = '%s%s' % (self.__class__.__name__.lower(), '_output')
if not os.path.exists(outdir):
os.makedirs(outdir)
path = os.path.join(outdir, "%s%s" % (_fname, ".png"))
im.save(path)
def dump_json(self, fname=False):
""" tries to dump JSON representation to a file """
_fname = (fname or self.__class__.__name__.lower())
try:
with codecs.open("%s%s" % (_fname, ".json"), 'w', encoding="utf-8") as f:
f.write(
json.dumps(
self.generated.tolist(),
indent=4,
separators=(',', ':'),
sort_keys=True)
)
except IOError:
raise WriteException("Couldn't write json! You could also copy the .json property to your clipboard.")
def dump_image(self):
""" returns a hexagram as an in-memory file object """
img_io = BytesIO()
image = Image.fromarray(self.generated)
image.save(img_io, 'PNG', quality=100)
img_io.seek(0)
return img_io
class Trigram(Hexagram):
""" Same as hexagram, but with three bars """
def __init__(self, pattern):
super(self.__class__, self).__init__(pattern, plength=3)
class HexagramException(Exception):
""" tfw your hexagram can't be constructed bc it's too short """
pass
class WriteException(Exception):
""" like an IOError """
pass
| [
"os.path.exists",
"PIL.Image.fromarray",
"numpy.ones",
"os.makedirs",
"os.path.join",
"io.BytesIO",
"numpy.zeros",
"numpy.vstack",
"codecs.open"
] | [((2132, 2152), 'numpy.vstack', 'np.vstack', (['container'], {}), '(container)\n', (2141, 2152), True, 'import numpy as np\n'), ((2440, 2471), 'PIL.Image.fromarray', 'Image.fromarray', (['self.generated'], {}), '(self.generated)\n', (2455, 2471), False, 'from PIL import Image\n'), ((2629, 2676), 'os.path.join', 'os.path.join', (['outdir', "('%s%s' % (_fname, '.png'))"], {}), "(outdir, '%s%s' % (_fname, '.png'))\n", (2641, 2676), False, 'import os\n'), ((3448, 3457), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3455, 3457), False, 'from io import BytesIO\n'), ((3474, 3505), 'PIL.Image.fromarray', 'Image.fromarray', (['self.generated'], {}), '(self.generated)\n', (3489, 3505), False, 'from PIL import Image\n'), ((2558, 2580), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (2572, 2580), False, 'import os\n'), ((2594, 2613), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (2605, 2613), False, 'import os\n'), ((1020, 1063), 'numpy.zeros', 'np.zeros', (['(self.bar_height, self.bar_width)'], {}), '((self.bar_height, self.bar_width))\n', (1028, 1063), True, 'import numpy as np\n'), ((1077, 1120), 'numpy.ones', 'np.ones', (['(self.wbar_height, self.bar_width)'], {}), '((self.wbar_height, self.bar_width))\n', (1084, 1120), True, 'import numpy as np\n'), ((1491, 1534), 'numpy.ones', 'np.ones', (['(self.wbar_height, self.bar_width)'], {}), '((self.wbar_height, self.bar_width))\n', (1498, 1534), True, 'import numpy as np\n'), ((2888, 2950), 'codecs.open', 'codecs.open', (["('%s%s' % (_fname, '.json'))", '"""w"""'], {'encoding': '"""utf-8"""'}), "('%s%s' % (_fname, '.json'), 'w', encoding='utf-8')\n", (2899, 2950), False, 'import codecs\n'), ((1256, 1322), 'numpy.zeros', 'np.zeros', (['(self.bar_height, self.bar_width // 2 - self.bar_height)'], {}), '((self.bar_height, self.bar_width // 2 - self.bar_height))\n', (1264, 1322), True, 'import numpy as np\n'), ((1342, 1389), 'numpy.ones', 'np.ones', (['(self.bar_height, self.bar_height * 2)'], {}), '((self.bar_height, self.bar_height * 2))\n', (1349, 1389), True, 'import numpy as np\n'), ((1407, 1473), 'numpy.zeros', 'np.zeros', (['(self.bar_height, self.bar_width // 2 - self.bar_height)'], {}), '((self.bar_height, self.bar_width // 2 - self.bar_height))\n', (1415, 1473), True, 'import numpy as np\n')] |
import numpy as np
class BriansBrain:
def apply(self, current, neighbors):
result = np.zeros_like(current)
sum_one = np.zeros_like(current)
for key in neighbors:
sum_one += (neighbors[key]==1).astype(int)
current_state = current.copy()
zero = (current_state ==0)
two_1 = (sum_one ==2)
current_state[current_state==2] = 0
current_state[current_state==1] = 2
result += current_state
result += np.logical_and(zero, two_1).astype(int)
return result
| [
"numpy.zeros_like",
"numpy.logical_and"
] | [((97, 119), 'numpy.zeros_like', 'np.zeros_like', (['current'], {}), '(current)\n', (110, 119), True, 'import numpy as np\n'), ((138, 160), 'numpy.zeros_like', 'np.zeros_like', (['current'], {}), '(current)\n', (151, 160), True, 'import numpy as np\n'), ((490, 517), 'numpy.logical_and', 'np.logical_and', (['zero', 'two_1'], {}), '(zero, two_1)\n', (504, 517), True, 'import numpy as np\n')] |
import numpy as np
def count(LM):
co = 0
if LM.shape[0]>2:
index = np.argwhere(LM==1)[:5]
for it in index:
lm_ = np.delete(LM,it[0],0)
lm_ = np.delete(lm_,it[1]-1,0)
lm_ = np.delete(lm_,it[0],1)
lm = np.delete(lm_,it[1]-1,1)
LM[it[0],it[1]] = 0
LM[it[1],it[0]] = 0
co += count(lm)
elif LM.shape[0]==2:
if LM[0,0]==0 and LM[1,1]==0 and LM[0,1]==1 and LM[1,0]==1:
co = 1
else:
co = 0
return co
if __name__ == "__main__":
LMn = [[0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0],
[1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0],
[1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1],
[0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0]]
LMn = np.array(LMn)
print("pair_count = ",count(LMn))
| [
"numpy.array",
"numpy.argwhere",
"numpy.delete"
] | [((1208, 1221), 'numpy.array', 'np.array', (['LMn'], {}), '(LMn)\n', (1216, 1221), True, 'import numpy as np\n'), ((84, 104), 'numpy.argwhere', 'np.argwhere', (['(LM == 1)'], {}), '(LM == 1)\n', (95, 104), True, 'import numpy as np\n'), ((150, 173), 'numpy.delete', 'np.delete', (['LM', 'it[0]', '(0)'], {}), '(LM, it[0], 0)\n', (159, 173), True, 'import numpy as np\n'), ((190, 218), 'numpy.delete', 'np.delete', (['lm_', '(it[1] - 1)', '(0)'], {}), '(lm_, it[1] - 1, 0)\n', (199, 218), True, 'import numpy as np\n'), ((233, 257), 'numpy.delete', 'np.delete', (['lm_', 'it[0]', '(1)'], {}), '(lm_, it[0], 1)\n', (242, 257), True, 'import numpy as np\n'), ((273, 301), 'numpy.delete', 'np.delete', (['lm_', '(it[1] - 1)', '(1)'], {}), '(lm_, it[1] - 1, 1)\n', (282, 301), True, 'import numpy as np\n')] |
#!/opt/anaconda/envs/env_hazard_index/bin/python
import atexit
import cioppy
ciop = cioppy.Cioppy()
import xarray as xr
import numpy as np
import pandas as pd
import geopandas as gpd
import datetime
import sys
import gdal
from urllib.parse import urlparse
import os
SUCCESS = 0
ERR_RESOLUTION = 10
ERR_STAGEIN = 20
ERR_NO_OUTPUT = 30
# add a trap to exit gracefully
def clean_exit(exit_code):
log_level = 'INFO'
if exit_code != SUCCESS:
log_level = 'ERROR'
msg = {SUCCESS: 'Processing successfully concluded',
ERR_RESOLUTION: 'Could not resolve Sentinel-1 product enclosure',
ERR_STAGEIN: 'Could not stage-in Sentinel-1 product',
ERR_NO_OUTPUT: "Missing output"
}
ciop.log(log_level, msg[exit_code])
def get_vsi_url(enclosure, username=None, api_key=None):
parsed_url = urlparse(enclosure)
if username is not None:
url = '/vsigzip//vsicurl/%s://%s:%s@%s%s' % (list(parsed_url)[0],
username,
api_key,
list(parsed_url)[1],
list(parsed_url)[2])
else:
url = '/vsigzip//vsicurl/%s://%s%s' % (list(parsed_url)[0],
list(parsed_url)[1],
list(parsed_url)[2])
return url
def to_ds(search, username, api_key):
chirps_ds = []
dates = []
datasets = []
for index, row in search.iterrows():
# read the vsicurl geotiff
da = xr.open_rasterio(get_vsi_url(row['enclosure'], username, api_key))
# remove the band dimension
da = da.squeeze().drop(labels='band')
# add the variable
da = da.to_dataset(name='rainfall')
dates.append(row['startdate'])
datasets.append(da)
ds = xr.concat(datasets, dim=pd.DatetimeIndex(dates, name='date'))
return ds
def get_weights(ds):
w=[]
zigma=ds['rainfall'].sum(dim=['date'])
for index, d in enumerate(ds['rainfall'].date.values):
w.append((ds['rainfall'].sel(date=d)/zigma))
ds_w = xr.concat(w,
dim=pd.DatetimeIndex(ds['rainfall'].date.values,
name='date'))
# Throw away nan entries to make it arithmetically multipicable
remove_nan = lambda x: 0 if np.isnan(x) else x
vfunc_remove_nan = np.vectorize(remove_nan,otypes=[np.float64])
return vfunc_remove_nan(ds_w)
def percentile(x):
''' this function calculates impirical percentile of input array'''
nan_indeces=[]
if not np.isnan(x).all():
rank = np.zeros(x.shape[0])
for i in range(x.shape[0]):
if np.isnan(x[i]):
nan_indeces.append(i)
rank[i]=0
else:
for j in range(x.shape[0]):
if not np.isnan(x[j]) and not i==j and x[j]<=x[i]:
rank[i]+=1
percentil = ((rank + 0.5) / (x.shape[0] - len(nan_indeces)))
for i in nan_indeces:
percentil[i] = np.nan
else:
percentil = np.empty(x.shape[0])
percentil[:] = np.nan
return percentil
def teta_sp(x,y):
'''this function computes sum of inner product of two 1d input array'''
if x.shape==y.shape:
x[np.isnan(x)]=0
y[np.isnan(y)]=0
return np.sum(x*y)
else:
return np.nan
def inv_logit(p):
'''maps from the linear predictor to the probabilities'''
return np.exp(p) / float(1 + np.exp(p))
def cog(input_tif, output_tif):
translate_options = gdal.TranslateOptions(gdal.ParseCommandLine('-co TILED=YES ' \
'-co COPY_SRC_OVERVIEWS=YES ' \
' -co COMPRESS=LZW'))
ds = gdal.Open(input_tif, gdal.OF_READONLY)
gdal.SetConfigOption('COMPRESS_OVERVIEW', 'DEFLATE')
ds.BuildOverviews('NEAREST', [2,4,8,16,32])
ds = None
ds = gdal.Open(input_tif)
gdal.Translate(output_tif,
ds,
options=translate_options)
ds = None
os.remove('{}.ovr'.format(input_tif))
os.remove(input_tif)
def main():
os.chdir(ciop.tmp_dir)
parameters = dict()
parameters['username'] = None if ciop.getparam('_T2Username') == '' else ciop.getparam('_T2Username')
parameters['api_key'] = None if ciop.getparam('_T2ApiKey') == '' else ciop.getparam('_T2ApiKey')
ciop.log('INFO', 'username: "{}"'.format(parameters['username']))
search_params = dict()
###################################################################################################
# This is the version for tg-queue-one-time-series needed for long time-series productions
#
#parameters['series_startdate'] = ciop.getparam('series_start_date')
#parameters['series_enddate'] = ciop.getparam('series_end_date')
#parameters['catalogue_osd'] = ciop.getparam('catalogue_osd')
#search_params['start'] = ciop.getparam('series_startdate')
#search_params['stop'] = ciop.getparam('series_enddate')
#search_params['count'] = 'unlimited'
#ciop.log('INFO', 'Looking for data from {} to {}:'.format(search_params['start'],search_params['stop']))
#if parameters['username'] is not None:
# creds = '{}:{}'.format(parameters['username'],
# parameters['api_key'])
# search = pd.DataFrame(ciop.search(end_point=parameters['catalogue_osd'],
# params=search_params,
# output_fields='self,startdate,enddate,enclosure,title',
# model='GeoTime',
# timeout=1200000,
# creds=creds))
#else:
# search = pd.DataFrame(ciop.search(end_point=parameters['catalogue_osd'],
# params=search_params,
# output_fields='self,startdate,enddate,enclosure,title',
# model='GeoTime',
# timeout=1200000))
#ciop.log('INFO', 'Inputs: \n')
#for row in search.iterrows():
# ciop.log('INFO', row[1]['self'])
####################################################################################################
temp_results = []
for line in sys.stdin:
ciop.log('INFO', 'Line: {}'.format(line.rstrip()))
if parameters['username'] is not None:
creds = '{}:{}'.format(parameters['username'],
parameters['api_key'])
entry = ciop.search(end_point=line.rstrip(),
params=search_params,
output_fields='self,startdate,enddate,enclosure,title,wkt',
model='GeoTime',
timeout=1200000,
creds=creds)[0]
else:
entry = ciop.search(end_point=line.rstrip(),
params=search_params,
output_fields='self,startdate,enddate,enclosure,title,wkt',
model='GeoTime',
timeout=1200000)[0]
temp_results.append(entry)
search = gpd.GeoDataFrame(temp_results)
# Convert startdate to pd.datetime and sort by date
search['startdate_dt'] = pd.to_datetime(search.startdate)
search['enddate_dt'] = pd.to_datetime(search.enddate)
search = search.sort_values(by='startdate_dt')
ciop.log('DEBUG', 'Create xarray dataset')
ds = to_ds(search,
username=parameters['username'],
api_key=parameters['api_key'])
# Geo-Info reterived from input
temp = get_vsi_url(search.iloc[0]['enclosure'], parameters['username'], parameters['api_key'])
temp_ds = gdal.Open(temp)
geo_transform = temp_ds.GetGeoTransform()
projection = temp_ds.GetProjection()
temp_ds = None
# compute impirical percentile for each pixel over the vector 'date'
#my_test=ds['rainfall'][:,400:1000,4000:4600]
#ds_date_index,ds_x_index, ds_y_index= my_test.shape
ds_date_index,ds_x_index, ds_y_index= ds['rainfall'].shape
result=np.zeros((ds_date_index,ds_x_index,ds_y_index),dtype=float)
x_block=int(np.ceil(ds_x_index/1000))
y_block=int(np.ceil(ds_y_index/1000))
for i in range(x_block):
for j in range(y_block):
x_low=1000*(i)
x_high=1000*(i+1)
y_low=1000*(j)
y_high=1000*(j+1)
if x_high>ds_x_index:
x_high=ds_x_index
if y_high>ds_y_index:
y_high=ds_y_index
result[:,x_low:x_high,y_low:y_high] = np.apply_along_axis(percentile,
0,
ds['rainfall'][:,x_low:x_high,y_low:y_high])
# result[:,x_low:x_high,y_low:y_high] = np.apply_along_axis(percentile,
# 0,
# my_test[:,x_low:x_high,y_low:y_high])
ciop.log('DEBUG', 'Get weights')
w = get_weights(ds)
ciop.log('DEBUG','pixel-wise weighted percentile')
# pixel-wise weighted percentile
teta=np.zeros((result.shape[1],result.shape[2]),dtype=float)
for i in range(result.shape[1]):
for j in range(result.shape[2]):
teta[i,j]=teta_sp(result[:,i,j],
w[:,i,j])
vfunc_inv_logit = np.vectorize(inv_logit,
otypes=[np.float64])
ciop.log('DEBUG', 'Precipitation hazard index')
q = 100 * vfunc_inv_logit(teta)
temp_output_name = 'temp_rainfall_hazard_index_{}_{}.tif'.format(search['startdate_dt'].min().strftime('%Y_%m_%d'),
search['enddate_dt'].max().strftime('%Y_%m_%d'))
ciop.log('DEBUG', 'Save as temp geotiff: {}'.format(temp_output_name))
cols=q.shape[1]
rows=q.shape[0]
drv = gdal.GetDriverByName('GTiff')
ds_tif = drv.Create(temp_output_name,
cols, rows,
1,
gdal.GDT_Float32)
ds_tif.SetGeoTransform(geo_transform)
ds_tif.SetProjection(projection)
ds_tif.GetRasterBand(1).WriteArray(q)
ds_tif.GetRasterBand(1).SetDescription('Q')
ds_tif.FlushCache()
output_name = '_'.join(temp_output_name.split('_')[1:])
ciop.log('INFO', 'Creating COG: {}'.format(output_name))
cog(temp_output_name,output_name)
#Create properties file
out_properties = output_name.split('.')[0] + '.properties'
with open(out_properties, 'w') as file:
file.write('title=Rainfall-related hazard index for season {0} / {1}\n'.format(search['startdate_dt'].min().strftime('%Y-%m-%d'),
search['enddate_dt'].max().strftime('%Y-%m-%d')))
date='{}/{}'.format(search['startdate_dt'].min().strftime('%Y-%m-%dT%H:%M:%SZ'),
search['enddate_dt'].max().strftime('%Y-%m-%dT%H:%M:%SZ'))
file.write('date={}\n'.format(date))
file.write('geometry={0}'.format(search['wkt'].iloc[0]))
ciop.log('INFO', 'Publishing COG')
ciop.publish(os.path.join(ciop.tmp_dir, output_name), metalink=True)
ciop.publish(os.path.join(ciop.tmp_dir, out_properties), metalink=True)
try:
main()
except SystemExit as e:
if e.args[0]:
clean_exit(e.args[0])
raise
else:
atexit.register(clean_exit, 0)
| [
"gdal.GetDriverByName",
"gdal.ParseCommandLine",
"pandas.to_datetime",
"os.remove",
"gdal.SetConfigOption",
"numpy.exp",
"numpy.empty",
"geopandas.GeoDataFrame",
"atexit.register",
"numpy.ceil",
"gdal.Translate",
"pandas.DatetimeIndex",
"numpy.isnan",
"numpy.vectorize",
"gdal.Open",
"u... | [((85, 100), 'cioppy.Cioppy', 'cioppy.Cioppy', ([], {}), '()\n', (98, 100), False, 'import cioppy\n'), ((854, 873), 'urllib.parse.urlparse', 'urlparse', (['enclosure'], {}), '(enclosure)\n', (862, 873), False, 'from urllib.parse import urlparse\n'), ((2597, 2642), 'numpy.vectorize', 'np.vectorize', (['remove_nan'], {'otypes': '[np.float64]'}), '(remove_nan, otypes=[np.float64])\n', (2609, 2642), True, 'import numpy as np\n'), ((4260, 4298), 'gdal.Open', 'gdal.Open', (['input_tif', 'gdal.OF_READONLY'], {}), '(input_tif, gdal.OF_READONLY)\n', (4269, 4298), False, 'import gdal\n'), ((4304, 4356), 'gdal.SetConfigOption', 'gdal.SetConfigOption', (['"""COMPRESS_OVERVIEW"""', '"""DEFLATE"""'], {}), "('COMPRESS_OVERVIEW', 'DEFLATE')\n", (4324, 4356), False, 'import gdal\n'), ((4434, 4454), 'gdal.Open', 'gdal.Open', (['input_tif'], {}), '(input_tif)\n', (4443, 4454), False, 'import gdal\n'), ((4459, 4516), 'gdal.Translate', 'gdal.Translate', (['output_tif', 'ds'], {'options': 'translate_options'}), '(output_tif, ds, options=translate_options)\n', (4473, 4516), False, 'import gdal\n'), ((4617, 4637), 'os.remove', 'os.remove', (['input_tif'], {}), '(input_tif)\n', (4626, 4637), False, 'import os\n'), ((4656, 4678), 'os.chdir', 'os.chdir', (['ciop.tmp_dir'], {}), '(ciop.tmp_dir)\n', (4664, 4678), False, 'import os\n'), ((8006, 8036), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['temp_results'], {}), '(temp_results)\n', (8022, 8036), True, 'import geopandas as gpd\n'), ((8133, 8165), 'pandas.to_datetime', 'pd.to_datetime', (['search.startdate'], {}), '(search.startdate)\n', (8147, 8165), True, 'import pandas as pd\n'), ((8193, 8223), 'pandas.to_datetime', 'pd.to_datetime', (['search.enddate'], {}), '(search.enddate)\n', (8207, 8223), True, 'import pandas as pd\n'), ((8619, 8634), 'gdal.Open', 'gdal.Open', (['temp'], {}), '(temp)\n', (8628, 8634), False, 'import gdal\n'), ((9010, 9072), 'numpy.zeros', 'np.zeros', (['(ds_date_index, ds_x_index, ds_y_index)'], {'dtype': 'float'}), '((ds_date_index, ds_x_index, ds_y_index), dtype=float)\n', (9018, 9072), True, 'import numpy as np\n'), ((10091, 10148), 'numpy.zeros', 'np.zeros', (['(result.shape[1], result.shape[2])'], {'dtype': 'float'}), '((result.shape[1], result.shape[2]), dtype=float)\n', (10099, 10148), True, 'import numpy as np\n'), ((10355, 10399), 'numpy.vectorize', 'np.vectorize', (['inv_logit'], {'otypes': '[np.float64]'}), '(inv_logit, otypes=[np.float64])\n', (10367, 10399), True, 'import numpy as np\n'), ((10981, 11010), 'gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (11001, 11010), False, 'import gdal\n'), ((12604, 12634), 'atexit.register', 'atexit.register', (['clean_exit', '(0)'], {}), '(clean_exit, 0)\n', (12619, 12634), False, 'import atexit\n'), ((2846, 2866), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (2854, 2866), True, 'import numpy as np\n'), ((3506, 3526), 'numpy.empty', 'np.empty', (['x.shape[0]'], {}), '(x.shape[0])\n', (3514, 3526), True, 'import numpy as np\n'), ((3764, 3777), 'numpy.sum', 'np.sum', (['(x * y)'], {}), '(x * y)\n', (3770, 3777), True, 'import numpy as np\n'), ((3901, 3910), 'numpy.exp', 'np.exp', (['p'], {}), '(p)\n', (3907, 3910), True, 'import numpy as np\n'), ((4019, 4107), 'gdal.ParseCommandLine', 'gdal.ParseCommandLine', (['"""-co TILED=YES -co COPY_SRC_OVERVIEWS=YES -co COMPRESS=LZW"""'], {}), "(\n '-co TILED=YES -co COPY_SRC_OVERVIEWS=YES -co COMPRESS=LZW')\n", (4040, 4107), False, 'import gdal\n'), ((9086, 9112), 'numpy.ceil', 'np.ceil', (['(ds_x_index / 1000)'], {}), '(ds_x_index / 1000)\n', (9093, 9112), True, 'import numpy as np\n'), ((9128, 9154), 'numpy.ceil', 'np.ceil', (['(ds_y_index / 1000)'], {}), '(ds_y_index / 1000)\n', (9135, 9154), True, 'import numpy as np\n'), ((12354, 12393), 'os.path.join', 'os.path.join', (['ciop.tmp_dir', 'output_name'], {}), '(ciop.tmp_dir, output_name)\n', (12366, 12393), False, 'import os\n'), ((12427, 12469), 'os.path.join', 'os.path.join', (['ciop.tmp_dir', 'out_properties'], {}), '(ciop.tmp_dir, out_properties)\n', (12439, 12469), False, 'import os\n'), ((2036, 2072), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['dates'], {'name': '"""date"""'}), "(dates, name='date')\n", (2052, 2072), True, 'import pandas as pd\n'), ((2344, 2401), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["ds['rainfall'].date.values"], {'name': '"""date"""'}), "(ds['rainfall'].date.values, name='date')\n", (2360, 2401), True, 'import pandas as pd\n'), ((2550, 2561), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (2558, 2561), True, 'import numpy as np\n'), ((2931, 2945), 'numpy.isnan', 'np.isnan', (['x[i]'], {}), '(x[i])\n', (2939, 2945), True, 'import numpy as np\n'), ((3709, 3720), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (3717, 3720), True, 'import numpy as np\n'), ((3734, 3745), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (3742, 3745), True, 'import numpy as np\n'), ((9554, 9640), 'numpy.apply_along_axis', 'np.apply_along_axis', (['percentile', '(0)', "ds['rainfall'][:, x_low:x_high, y_low:y_high]"], {}), "(percentile, 0, ds['rainfall'][:, x_low:x_high, y_low:\n y_high])\n", (9573, 9640), True, 'import numpy as np\n'), ((2803, 2814), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (2811, 2814), True, 'import numpy as np\n'), ((3923, 3932), 'numpy.exp', 'np.exp', (['p'], {}), '(p)\n', (3929, 3932), True, 'import numpy as np\n'), ((3155, 3169), 'numpy.isnan', 'np.isnan', (['x[j]'], {}), '(x[j])\n', (3163, 3169), True, 'import numpy as np\n')] |
import numpy as np
import sys
BEGIN_DEBUG=True
def log(string,filename="log.txt"):
np.set_printoptions(threshold='nan')
if BEGIN_DEBUG == True:
output = sys.stdout
outputfile = open("E:\\"+filename, "a")
sys.stdout = outputfile
print("------------------------");
print(string)
print("------------------------")
sys.stdout=output
| [
"numpy.set_printoptions"
] | [((89, 125), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': '"""nan"""'}), "(threshold='nan')\n", (108, 125), True, 'import numpy as np\n')] |
""" Demonstrates how to define a multivariate Gaussian Random Field,
sample a realization and plot it.
"""
import numpy as np
import torch
from meslas.means import LinearMean
from meslas.covariance.spatial_covariance_functions import Matern32
from meslas.covariance.cross_covariances import UniformMixing
from meslas.covariance.heterotopic import FactorCovariance
from meslas.geometry.grid import TriangularGrid
from meslas.random_fields import GRF, DiscreteGRF
from meslas.sensor_plotting import DiscreteSensor
from meslas.plotting import plot_grid_values, plot_grid_probas
from torch.distributions.multivariate_normal import MultivariateNormal
from gpytorch.utils.cholesky import psd_safe_cholesky
# Dimension of the response.
n_out = 2
# Spatial Covariance.
matern_cov = Matern32(lmbda=0.1, sigma=1.0)
# Cross covariance.
cross_cov = UniformMixing(gamma0=0.9, sigmas=[np.sqrt(0.25), np.sqrt(0.6)])
covariance = FactorCovariance(matern_cov, cross_cov, n_out=n_out)
# Specify mean function, here it is a linear trend that decreases with the
# horizontal coordinate.
beta0s = np.array([5.8, 24.0])
beta1s = np.array([
[0, -4.0],
[0, -3.8]])
mean = LinearMean(beta0s, beta1s)
# Create the GRF.
myGRF = GRF(mean, covariance)
# ------------------------------------------------------
# DISCRETIZE EVERYTHING
# ------------------------------------------------------
# Create a regular equilateral triangular grid in 2 dims.
# The argument specified the number of cells along 1 dimension, hence total
# size of the grid is roughly the square of this number.
my_grid = TriangularGrid(21)
print("Working on an equilateral triangular grid with {} nodes.".format(my_grid.n_points))
# Discretize the GRF on a grid and be done with it.
# From now on we only consider locations on the grid.
my_discrete_grf = DiscreteGRF.from_model(myGRF, my_grid)
# ------------------------------------------------------
# Sample and plot
# ------------------------------------------------------
# Sample all components at all locations.
sample = my_discrete_grf.sample()
plot_grid_values(my_grid, sample)
# ------------------------------------------------------
# Observe some data.
# ------------------------------------------------------
# Data observations must be specified by a so-called generalized location.
# A generalized location is a couple of vectors (S, L). The first vector
# specifies WHERE the observations have been made, whereas the second vector
# indicates WHICH component was measured at that location (remember we are
# considering multivariate GRFs).
#
# Example consider S = [[0, 0], [-1, -1]] and L = [0, 8].
# Then, the generalized location (S, L) describes a set of observations that
# consists of one observation of the 0-th component of the field at the points
# (0,0) and one observation of the 8-th component of the field at (-1, -1).
# WARNING: python used 0-based indices, to 0-th component translates to first
# component in math language.
S_y = torch.tensor([[0.2, 0.1], [0.2, 0.2], [0.2, 0.3],
[0.2, 0.4], [0.2, 0.5], [0.2, 0.6],
[0.2, 0.7], [0.2, 0.8], [0.2, 0.9], [0.2, 1.0],
[0.6, 0.5]])
L_y = torch.tensor([0, 0, 0, 0, 0, 1, 1, 0 ,0 ,0, 0]).long()
# Now define which value were observed. Here we arbitrarily set all values to
# -5.
y = torch.tensor(11*[-6])
# Now integrate this data to the model (i.e. compute the conditional
# distribution of the GRF.
#
# SUBTLETY: we are here working with a discrete GRF, i.e. a GRF on a grid, that
# maintains a mean vector and covariance matrix for ALL grid points and only
# understands data that is on the grid.
# This means that this GP takes as spatial inputs integers that define the
# index of the given point in the grid array.
#
# Hence, in this setting, coordinates should be projected back to the grid in
# order for the GP to understand them.
S_y_inds = my_grid.get_closest(S_y) # Get grid index of closest nodes.
# Note that working on a grid an never stepping outside of it is the usual
# setting in adaptive designs.
# One can work with the non-discretized GP class otherwise.
#
noise_std = torch.tensor([0.1, 0.1])
# Condition the model.
my_discrete_grf.update(S_y_inds, L_y, y, noise_std=noise_std)
# Plot conditional mean.
plot_grid_values(my_grid, my_discrete_grf.mean_vec)
| [
"meslas.means.LinearMean",
"meslas.random_fields.DiscreteGRF.from_model",
"meslas.plotting.plot_grid_values",
"meslas.covariance.spatial_covariance_functions.Matern32",
"numpy.sqrt",
"numpy.array",
"torch.tensor",
"meslas.covariance.heterotopic.FactorCovariance",
"meslas.random_fields.GRF",
"mesla... | [((782, 812), 'meslas.covariance.spatial_covariance_functions.Matern32', 'Matern32', ([], {'lmbda': '(0.1)', 'sigma': '(1.0)'}), '(lmbda=0.1, sigma=1.0)\n', (790, 812), False, 'from meslas.covariance.spatial_covariance_functions import Matern32\n'), ((924, 976), 'meslas.covariance.heterotopic.FactorCovariance', 'FactorCovariance', (['matern_cov', 'cross_cov'], {'n_out': 'n_out'}), '(matern_cov, cross_cov, n_out=n_out)\n', (940, 976), False, 'from meslas.covariance.heterotopic import FactorCovariance\n'), ((1087, 1108), 'numpy.array', 'np.array', (['[5.8, 24.0]'], {}), '([5.8, 24.0])\n', (1095, 1108), True, 'import numpy as np\n'), ((1118, 1150), 'numpy.array', 'np.array', (['[[0, -4.0], [0, -3.8]]'], {}), '([[0, -4.0], [0, -3.8]])\n', (1126, 1150), True, 'import numpy as np\n'), ((1175, 1201), 'meslas.means.LinearMean', 'LinearMean', (['beta0s', 'beta1s'], {}), '(beta0s, beta1s)\n', (1185, 1201), False, 'from meslas.means import LinearMean\n'), ((1229, 1250), 'meslas.random_fields.GRF', 'GRF', (['mean', 'covariance'], {}), '(mean, covariance)\n', (1232, 1250), False, 'from meslas.random_fields import GRF, DiscreteGRF\n'), ((1591, 1609), 'meslas.geometry.grid.TriangularGrid', 'TriangularGrid', (['(21)'], {}), '(21)\n', (1605, 1609), False, 'from meslas.geometry.grid import TriangularGrid\n'), ((1826, 1864), 'meslas.random_fields.DiscreteGRF.from_model', 'DiscreteGRF.from_model', (['myGRF', 'my_grid'], {}), '(myGRF, my_grid)\n', (1848, 1864), False, 'from meslas.random_fields import GRF, DiscreteGRF\n'), ((2074, 2107), 'meslas.plotting.plot_grid_values', 'plot_grid_values', (['my_grid', 'sample'], {}), '(my_grid, sample)\n', (2090, 2107), False, 'from meslas.plotting import plot_grid_values, plot_grid_probas\n'), ((2987, 3138), 'torch.tensor', 'torch.tensor', (['[[0.2, 0.1], [0.2, 0.2], [0.2, 0.3], [0.2, 0.4], [0.2, 0.5], [0.2, 0.6], [\n 0.2, 0.7], [0.2, 0.8], [0.2, 0.9], [0.2, 1.0], [0.6, 0.5]]'], {}), '([[0.2, 0.1], [0.2, 0.2], [0.2, 0.3], [0.2, 0.4], [0.2, 0.5], [\n 0.2, 0.6], [0.2, 0.7], [0.2, 0.8], [0.2, 0.9], [0.2, 1.0], [0.6, 0.5]])\n', (2999, 3138), False, 'import torch\n'), ((3308, 3331), 'torch.tensor', 'torch.tensor', (['(11 * [-6])'], {}), '(11 * [-6])\n', (3320, 3331), False, 'import torch\n'), ((4119, 4143), 'torch.tensor', 'torch.tensor', (['[0.1, 0.1]'], {}), '([0.1, 0.1])\n', (4131, 4143), False, 'import torch\n'), ((4256, 4307), 'meslas.plotting.plot_grid_values', 'plot_grid_values', (['my_grid', 'my_discrete_grf.mean_vec'], {}), '(my_grid, my_discrete_grf.mean_vec)\n', (4272, 4307), False, 'from meslas.plotting import plot_grid_values, plot_grid_probas\n'), ((3164, 3211), 'torch.tensor', 'torch.tensor', (['[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0])\n', (3176, 3211), False, 'import torch\n'), ((880, 893), 'numpy.sqrt', 'np.sqrt', (['(0.25)'], {}), '(0.25)\n', (887, 893), True, 'import numpy as np\n'), ((895, 907), 'numpy.sqrt', 'np.sqrt', (['(0.6)'], {}), '(0.6)\n', (902, 907), True, 'import numpy as np\n')] |
import epics
import time
import pylab as P
import os
import matplotlib.pyplot as plt
import numpy
from scipy import signal
import glob
from datetime import datetime
outfname = "/tmp/data.txt"
pwelch_ratio = 8; # to set size of nperseg
def extract_data(datafp, first=0, last=0, filt=1):
cmd = "/usr/local/controls/Applications/smurf/smurftestapps/extractdata "+ datafp + " " + outfname + " " +str(first) +" "+ str(last) +" "+ str(filt)
print("extract command:", cmd);
os.system(cmd)
def plot_most_recent_data(channels, labels = None, downsample = 1, flt=1):
#list_of_dat_files = glob.glob('/data/smurf_data/%s/*/outputs/*.dat*'%datetime.now().strftime('%Y%m%d'))
list_of_dat_files = glob.glob('/data/smurf_data/%s/*/outputs/*.dat*'%'20190215')
latest_file = max(list_of_dat_files, key=os.path.getctime)
datafp=latest_file
datafp=datafp.replace('00005','00004')
maskfname = datafp.split('.')[0]+'_mask.txt'
print('latest_file=%s'%latest_file)
print('maskfname=%s'%maskfname)
mask = numpy.loadtxt(maskfname)
fig = plt.figure(figsize = (10,8))
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ln = 0
tmin = numpy.inf
tmax = 0
fmin = numpy.inf
fmax = 0
for chn in channels:
try:
channel = numpy.arange(len(mask))[mask == (chn+2*512)][0]
except IndexError:
continue
print(channel)
print("Extracting channel %d" %channel)
extract_data(datafp, channel, channel, flt) # executes C command
print("done extracting")
dat = numpy.loadtxt(outfname) # loads text data (numpy)
print("data shape", dat.shape)
dat = dat[::downsample,:]
s = dat.shape
points = s[0] #how many points in teh dataset
tm = dat[:,0]
tmin = min(tm.min(),tmin)
tmax = max(tm.max(),tmax)
tmd = numpy.diff(tm)
tstep = sum(tmd) / len(tmd)
print("min time", min(tm), "max time", max(tm))
print(tm[0:10])
tmp = int(points / pwelch_ratio)
tmp2 = P.log(tmp) / P.log(2) # get in powers of two
tmp3 = int(tmp2)
np = pow(2,tmp3)
print("nperseg = ", np)
print("tstep = ", tstep)
if labels is None:
label = "Channel %d" %(chn)
else:
label = labels[ln]
ax1.plot(tm,dat[:,1]-dat[:,1].mean(), label = label, alpha = 0.5)
#ax1.plot(tm,dat[:,1], label = "Channel %d" %(chn), alpha = 0.5)
fx, pden = signal.welch(dat[:,1]-dat[:,1].mean(), 1.0/tstep, nperseg = np)
fmin = min(fmin,fx[1:].min())
fmax = max(fmax,fx.max())
#ax2.plot(fx, pden, '-', label = label, alpha = 0.5)
ax2.plot(fx, numpy.sqrt(pden), '-', label = label, alpha = 0.5)
ln += 1
#for j in range(0, lastch+1-firstch):
# print("plotting", j)
# plt.plot(tm, dat[:,j+1])
ax1.grid()
ax1.legend()
ax1.set_xlim(tmin,tmax)
ax1.set_xlabel("Time (seconds)")
#ax1.set_ylabel("Phase ($\Phi_0$)")
ax1.set_ylabel("Phase (?)")
ax2.grid()
ax2.set_xlim(fmin,fmax)
ax2.set_xlabel("Frequency (Hz)")
#ax2.set_ylabel("Power Spectral Density ($\Phi_0^2$ / Hz)")
ax2.set_ylabel("Spectral Density (? / $\sqrt{Hz}$)")
ax2.set_xscale('log')
ax2.set_yscale('log')
fig.tight_layout()
plt.show()
| [
"numpy.sqrt",
"numpy.diff",
"matplotlib.pyplot.figure",
"os.system",
"numpy.loadtxt",
"pylab.log",
"glob.glob",
"matplotlib.pyplot.show"
] | [((482, 496), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (491, 496), False, 'import os\n'), ((711, 773), 'glob.glob', 'glob.glob', (["('/data/smurf_data/%s/*/outputs/*.dat*' % '20190215')"], {}), "('/data/smurf_data/%s/*/outputs/*.dat*' % '20190215')\n", (720, 773), False, 'import glob\n'), ((1039, 1063), 'numpy.loadtxt', 'numpy.loadtxt', (['maskfname'], {}), '(maskfname)\n', (1052, 1063), False, 'import numpy\n'), ((1075, 1102), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (1085, 1102), True, 'import matplotlib.pyplot as plt\n'), ((3291, 3301), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3299, 3301), True, 'import matplotlib.pyplot as plt\n'), ((1574, 1597), 'numpy.loadtxt', 'numpy.loadtxt', (['outfname'], {}), '(outfname)\n', (1587, 1597), False, 'import numpy\n'), ((1861, 1875), 'numpy.diff', 'numpy.diff', (['tm'], {}), '(tm)\n', (1871, 1875), False, 'import numpy\n'), ((2041, 2051), 'pylab.log', 'P.log', (['tmp'], {}), '(tmp)\n', (2046, 2051), True, 'import pylab as P\n'), ((2054, 2062), 'pylab.log', 'P.log', (['(2)'], {}), '(2)\n', (2059, 2062), True, 'import pylab as P\n'), ((2666, 2682), 'numpy.sqrt', 'numpy.sqrt', (['pden'], {}), '(pden)\n', (2676, 2682), False, 'import numpy\n')] |
"""Financial projection for photovailtic system."""
# Utilities
import numpy as np
# Local
from .models import FinancialCalc
# Functions
def financial_calc(energy_by_month, kwh_cost, module, inverter, system):
"""
Returns:
- The month money savings of electrical coute derived of pv system.
- The investment return time of the pv system.
"""
cost = FinancialCalc(kwh_cost, module, inverter, system)
cost.calc_cost()
savings = _savings(energy_by_month, kwh_cost)
time_return = _investment_return(cost.total_cost_, savings)
return savings, time_return
def average_paymet_to_kwh_info(average_payment, fee):
"""
Returns:
- The kWh cost.
- The amount of khW consumed in the period.
References:
.. [1] https://app.cfe.mx/Aplicaciones/CCFE/Tarifas/TarifasCRECasa/Tarifas/Tarifa1.aspx
"""
# DAC fee
DAC_fee = 4.34
# Average limits in others CFE fee(s)
limit_energy = [128, 171]
# Cost by limit of CFE fee(s)
fee = [0.793, 1.13, 2.964]
# For DAC fee
if fee == 'DAC':
return DAC_fee, average_payment / DAC_fee
# For other fee
count = average_payment
cost = []
consume = []
for i in range(2):
if limit_energy[i] * fee[i] < count:
cost.append(fee[i] * limit_energy[i])
consume.append(limit_energy[i])
count -= limit_energy[i]
else:
cost.append(fee[i] * limit_energy[i])
consume.append(count/fee[i])
if count > 0:
cost.append(fee[2] * count)
consume.append(count/fee[2])
# Toal
mean_consume = np.sum(np.array(consume))
# weighted average
kWh_cost = np.mean(np.array(cost)) / mean_consume
return kWh_cost, mean_consume
def _savings(energy_by_month, kwh_cost):
"""
Returns savings by month.
"""
savings = {}
for month, energy in energy_by_month.items():
savings[month] = energy*kwh_cost
return savings
def _investment_return(total_cost, savings):
"""
Returns investment return time.
"""
# Simple return
anual_savigs = np.sum(list(savings.values()))
time = total_cost/anual_savigs
return time
| [
"numpy.array"
] | [((1651, 1668), 'numpy.array', 'np.array', (['consume'], {}), '(consume)\n', (1659, 1668), True, 'import numpy as np\n'), ((1716, 1730), 'numpy.array', 'np.array', (['cost'], {}), '(cost)\n', (1724, 1730), True, 'import numpy as np\n')] |
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: GPL-3.0-or-later
# coding: utf-8
"""
Adds extra extendable components to the clustered and simplified network.
Relevant Settings
-----------------
.. code:: yaml
costs:
year:
USD2013_to_EUR2013:
dicountrate:
emission_prices:
electricity:
max_hours:
marginal_cost:
capital_cost:
extendable_carriers:
StorageUnit:
Store:
.. seealso::
Documentation of the configuration file ``config.yaml`` at :ref:`costs_cf`,
:ref:`electricity_cf`
Inputs
------
- ``data/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity.
Outputs
-------
- ``networks/elec_s{simpl}_{clusters}_ec.nc``:
Description
-----------
The rule :mod:`add_extra_components` attaches additional extendable components to the clustered and simplified network. These can be configured in the ``config.yaml`` at ``electricity: extendable_carriers:``. It processes ``networks/elec_s{simpl}_{clusters}.nc`` to build ``networks/elec_s{simpl}_{clusters}_ec.nc``, which in contrast to the former (depending on the configuration) contain with **zero** initial capacity
- ``StorageUnits`` of carrier 'H2' and/or 'battery'. If this option is chosen, every bus is given an extendable ``StorageUnit`` of the corresponding carrier. The energy and power capacities are linked through a parameter that specifies the energy capacity as maximum hours at full dispatch power and is configured in ``electricity: max_hours:``. This linkage leads to one investment variable per storage unit. The default ``max_hours`` lead to long-term hydrogen and short-term battery storage units.
- ``Stores`` of carrier 'H2' and/or 'battery' in combination with ``Links``. If this option is chosen, the script adds extra buses with corresponding carrier where energy ``Stores`` are attached and which are connected to the corresponding power buses via two links, one each for charging and discharging. This leads to three investment variables for the energy capacity, charging and discharging capacity of the storage unit.
"""
import logging
from _helpers import configure_logging
import pypsa
import pandas as pd
import numpy as np
from add_electricity import (load_costs, add_nice_carrier_names,
_add_missing_carriers_from_costs)
idx = pd.IndexSlice
logger = logging.getLogger(__name__)
def attach_storageunits(n, costs):
elec_opts = snakemake.config['electricity']
carriers = elec_opts['extendable_carriers']['StorageUnit']
max_hours = elec_opts['max_hours']
_add_missing_carriers_from_costs(n, costs, carriers)
buses_i = n.buses.index
lookup_store = {"H2": "electrolysis", "battery": "battery inverter"}
lookup_dispatch = {"H2": "fuel cell", "battery": "battery inverter"}
for carrier in carriers:
n.madd("StorageUnit", buses_i, ' ' + carrier,
bus=buses_i,
carrier=carrier,
p_nom_extendable=True,
capital_cost=costs.at[carrier, 'capital_cost'],
marginal_cost=costs.at[carrier, 'marginal_cost'],
efficiency_store=costs.at[lookup_store[carrier], 'efficiency'],
efficiency_dispatch=costs.at[lookup_dispatch[carrier], 'efficiency'],
max_hours=max_hours[carrier],
cyclic_state_of_charge=True)
def attach_stores(n,n_H2, costs):
elec_opts = snakemake.config['electricity']
carriers = elec_opts['extendable_carriers']['Store']
_add_missing_carriers_from_costs(n, costs, carriers)
buses_i = n.buses.index
bus_sub_dict = {k: n.buses[k].values for k in ['x', 'y', 'country']}
if 'H2' in carriers:
h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict)
#it won't change original buses components, it will add new rows to buses table in network 'n'.
# index of new row is like DEH2 (original is DE )
h2_buses_salt_cave_i = n_H2.buses[n_H2.buses.hydrogen_storage_potential_MWh>0].index
energy_capacity_H2_salt_cave = n_H2.buses\
.loc[h2_buses_salt_cave_i,'hydrogen_storage_potential_MWh'].tolist()
h2_buses_salt_cave_i = h2_buses_salt_cave_i + " H2"
h2_buses_tank_i = h2_buses_i.difference(h2_buses_salt_cave_i)
n.madd("Store", h2_buses_salt_cave_i,
bus=h2_buses_salt_cave_i,
carrier='H2',
e_nom_extendable=True,
e_nom_max=energy_capacity_H2_salt_cave,
e_cyclic=True,
capital_cost=costs.at["hydrogen storage", "capital_cost"])
# here i think we might need different cost
n.madd("Store", h2_buses_tank_i,
bus=h2_buses_tank_i,
carrier='H2',
e_nom_extendable=True,
e_cyclic=True,
capital_cost=costs.at["hydrogen storage", "capital_cost"])
logger.info('#'*50)
logger.info('#'*50)
logger.info('finish adding hydrogen storage')
n.madd("Link", h2_buses_i + " Electrolysis",
bus0=buses_i,
bus1=h2_buses_i,
carrier='H2 electrolysis',
p_nom_extendable=True,
efficiency=costs.at["electrolysis", "efficiency"],
capital_cost=costs.at["electrolysis", "capital_cost"])
n.madd("Link", h2_buses_i + " Fuel Cell",
bus0=h2_buses_i,
bus1=buses_i,
carrier='H2 fuel cell',
p_nom_extendable=True,
efficiency=costs.at["fuel cell", "efficiency"],
#NB: fixed cost is per MWel
capital_cost=costs.at["fuel cell", "capital_cost"] * costs.at["fuel cell", "efficiency"])
if 'battery' in carriers:
b_buses_i = n.madd("Bus", buses_i + " battery", carrier="battery", **bus_sub_dict)
n.madd("Store", b_buses_i,
bus=b_buses_i,
carrier='battery',
e_cyclic=True,
e_nom_extendable=True,
capital_cost=costs.at['battery storage', 'capital_cost'])
n.madd("Link", b_buses_i + " charger",
bus0=buses_i,
bus1=b_buses_i,
carrier='battery charger',
efficiency=costs.at['battery inverter', 'efficiency'],
capital_cost=costs.at['battery inverter', 'capital_cost'],
p_nom_extendable=True)
n.madd("Link", b_buses_i + " discharger",
bus0=b_buses_i,
bus1=buses_i,
carrier='battery discharger',
efficiency=costs.at['battery inverter','efficiency'],
capital_cost=costs.at['battery inverter', 'capital_cost'],
p_nom_extendable=True)
def attach_hydrogen_pipelines(n, costs):
elec_opts = snakemake.config['electricity']
ext_carriers = elec_opts['extendable_carriers']
as_stores = ext_carriers.get('Store', [])
if 'H2 pipeline' not in ext_carriers.get('Link',[]): return
assert 'H2' in as_stores, ("Attaching hydrogen pipelines requires hydrogen "
"storage to be modelled as Store-Link-Bus combination. See "
"`config.yaml` at `electricity: extendable_carriers: Store:`.")
# determine bus pairs
attrs = ["bus0","bus1","length"]
candidates = pd.concat([n.lines[attrs], n.links.query('carrier=="DC"')[attrs]])\
.reset_index(drop=True)
# remove bus pair duplicates regardless of order of bus0 and bus1
h2_links = candidates[~pd.DataFrame(np.sort(candidates[['bus0', 'bus1']])).duplicated()]
h2_links.index = h2_links.apply(lambda c: f"H2 pipeline {c.bus0}-{c.bus1}", axis=1)
# add pipelines
n.madd("Link",
h2_links.index,
bus0=h2_links.bus0.values + " H2",
bus1=h2_links.bus1.values + " H2",
p_min_pu=-1,
p_nom_extendable=True,
length=h2_links.length.values,
capital_cost=costs.at['H2 pipeline','capital_cost']*h2_links.length,
efficiency=costs.at['H2 pipeline','efficiency'],
carrier="H2 pipeline")
if __name__ == "__main__":
if 'snakemake' not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('add_extra_components', network='elec',
simpl='', clusters=5)
configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network)
n_H2 = pypsa.Network(snakemake.input.network_h2)
logger.info('finish loading')
Nyears = n.snapshot_weightings.sum() / 8760.
costs = load_costs(Nyears, tech_costs=snakemake.input.tech_costs,
config=snakemake.config['costs'],
elec_config=snakemake.config['electricity'])
attach_storageunits(n, costs)
attach_stores(n,n_H2, costs)
attach_hydrogen_pipelines(n, costs)
add_nice_carrier_names(n, config=snakemake.config)
n.export_to_netcdf(snakemake.output[0])
| [
"logging.getLogger",
"numpy.sort",
"pypsa.Network",
"add_electricity.load_costs",
"add_electricity._add_missing_carriers_from_costs",
"add_electricity.add_nice_carrier_names",
"_helpers.mock_snakemake",
"_helpers.configure_logging"
] | [((2666, 2693), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2683, 2693), False, 'import logging\n'), ((2886, 2938), 'add_electricity._add_missing_carriers_from_costs', '_add_missing_carriers_from_costs', (['n', 'costs', 'carriers'], {}), '(n, costs, carriers)\n', (2918, 2938), False, 'from add_electricity import load_costs, add_nice_carrier_names, _add_missing_carriers_from_costs\n'), ((3824, 3876), 'add_electricity._add_missing_carriers_from_costs', '_add_missing_carriers_from_costs', (['n', 'costs', 'carriers'], {}), '(n, costs, carriers)\n', (3856, 3876), False, 'from add_electricity import load_costs, add_nice_carrier_names, _add_missing_carriers_from_costs\n'), ((8714, 8742), '_helpers.configure_logging', 'configure_logging', (['snakemake'], {}), '(snakemake)\n', (8731, 8742), False, 'from _helpers import configure_logging\n'), ((8752, 8790), 'pypsa.Network', 'pypsa.Network', (['snakemake.input.network'], {}), '(snakemake.input.network)\n', (8765, 8790), False, 'import pypsa\n'), ((8802, 8843), 'pypsa.Network', 'pypsa.Network', (['snakemake.input.network_h2'], {}), '(snakemake.input.network_h2)\n', (8815, 8843), False, 'import pypsa\n'), ((8939, 9080), 'add_electricity.load_costs', 'load_costs', (['Nyears'], {'tech_costs': 'snakemake.input.tech_costs', 'config': "snakemake.config['costs']", 'elec_config': "snakemake.config['electricity']"}), "(Nyears, tech_costs=snakemake.input.tech_costs, config=snakemake.\n config['costs'], elec_config=snakemake.config['electricity'])\n", (8949, 9080), False, 'from add_electricity import load_costs, add_nice_carrier_names, _add_missing_carriers_from_costs\n'), ((9235, 9285), 'add_electricity.add_nice_carrier_names', 'add_nice_carrier_names', (['n'], {'config': 'snakemake.config'}), '(n, config=snakemake.config)\n', (9257, 9285), False, 'from add_electricity import load_costs, add_nice_carrier_names, _add_missing_carriers_from_costs\n'), ((8599, 8675), '_helpers.mock_snakemake', 'mock_snakemake', (['"""add_extra_components"""'], {'network': '"""elec"""', 'simpl': '""""""', 'clusters': '(5)'}), "('add_extra_components', network='elec', simpl='', clusters=5)\n", (8613, 8675), False, 'from _helpers import mock_snakemake\n'), ((7895, 7932), 'numpy.sort', 'np.sort', (["candidates[['bus0', 'bus1']]"], {}), "(candidates[['bus0', 'bus1']])\n", (7902, 7932), True, 'import numpy as np\n')] |
import numpy
# scipy.special for the sigmoid function expit()
import scipy.special
# neural network class definition
class neuralNetwork:
# initialise the neural network
def __init__(self, inputnodes, hiddennodes, hiddenlayers, outputnodes, learningrate):
# set number of nodes in each input, hidden, output layer
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
self.hiddenlayers = hiddenlayers
# link weight matrices, wih and who
# weights inside the arrays are w_i_j, where link is from node i to node j in the next layer
# w11 w21
# w12 w22 etc
self.wih = []
self.who = []
for i in range(0, hiddenlayers):
if i == 0:
w = self.inodes
else:
w = self.hnodes
self.wih.append(numpy.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, w)))
self.who.append(numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, w)))
# learning rate
self.lr = learningrate
# activation function is the sigmoid function
self.activation_function = lambda x: scipy.special.expit(x)
pass
# train the neural network
def train(self, inputs_list, targets_list):
# convert inputs list to 2d array
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
# calculate signals into hidden layer
#hidden_inputs = numpy.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
#hidden_outputs = self.activation_function(hidden_inputs)
hidden_outputs = self.queryHiddenLayers(inputs)
# calculate signals into final output layer
final_inputs = numpy.dot(self.who[self.hiddenlayers - 1], hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
# output layer error is the (target - actual)
output_errors = targets - final_outputs
# update the weights for the links between the hidden and output layers
for layerIndex in range(self.hiddenlayers - 1, -1, -1):
# hidden layer error is the output_errors, split by weights, recombined at hidden nodes
hidden_errors = numpy.dot(self.who[layerIndex].T, output_errors)
self.who[layerIndex] += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs))
# update the weights for the links between the input and hidden layers
self.wih[layerIndex] += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs))
pass
# query the neural network
def query(self, inputs_list):
# convert inputs list to 2d array
inputs = numpy.array(inputs_list, ndmin=2).T
# calculate signals into hidden layer
#hidden_inputs = numpy.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
#hidden_outputs = self.activation_function(hidden_inputs)
hidden_outputs = self.queryHiddenLayers(inputs)
# calculate signals into final output layer
final_inputs = numpy.dot(self.who, hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
return final_outputs
def queryHiddenLayers(self, inputs):
for layerWeights in self.wih:
hidden_inputs = numpy.dot(layerWeights, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
print(str(len(inputs)))
inputs = hidden_inputs
print(str(len(inputs)))
return hidden_outputs
| [
"numpy.array",
"numpy.dot",
"numpy.transpose"
] | [((1864, 1922), 'numpy.dot', 'numpy.dot', (['self.who[self.hiddenlayers - 1]', 'hidden_outputs'], {}), '(self.who[self.hiddenlayers - 1], hidden_outputs)\n', (1873, 1922), False, 'import numpy\n'), ((3419, 3454), 'numpy.dot', 'numpy.dot', (['self.who', 'hidden_outputs'], {}), '(self.who, hidden_outputs)\n', (3428, 3454), False, 'import numpy\n'), ((1408, 1441), 'numpy.array', 'numpy.array', (['inputs_list'], {'ndmin': '(2)'}), '(inputs_list, ndmin=2)\n', (1419, 1441), False, 'import numpy\n'), ((1462, 1496), 'numpy.array', 'numpy.array', (['targets_list'], {'ndmin': '(2)'}), '(targets_list, ndmin=2)\n', (1473, 1496), False, 'import numpy\n'), ((2426, 2474), 'numpy.dot', 'numpy.dot', (['self.who[layerIndex].T', 'output_errors'], {}), '(self.who[layerIndex].T, output_errors)\n', (2435, 2474), False, 'import numpy\n'), ((3010, 3043), 'numpy.array', 'numpy.array', (['inputs_list'], {'ndmin': '(2)'}), '(inputs_list, ndmin=2)\n', (3021, 3043), False, 'import numpy\n'), ((3729, 3760), 'numpy.dot', 'numpy.dot', (['layerWeights', 'inputs'], {}), '(layerWeights, inputs)\n', (3738, 3760), False, 'import numpy\n'), ((2590, 2621), 'numpy.transpose', 'numpy.transpose', (['hidden_outputs'], {}), '(hidden_outputs)\n', (2605, 2621), False, 'import numpy\n'), ((2834, 2857), 'numpy.transpose', 'numpy.transpose', (['inputs'], {}), '(inputs)\n', (2849, 2857), False, 'import numpy\n')] |
import csv
import cv2
import numpy as np
from keras.models import Model, Sequential
from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D
import matplotlib.pyplot as plt
adjust_angles = [0, 0.25, -0.25]
def load_data():
"""
Load images and angles data collected in training mode.
:return:
"""
lines = []
with open("data/driving_log.csv", "r") as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
images = []
measurements = []
for line in lines:
for i in range(3):
source_path = line[i]
filename = source_path.split("/")[-1]
current_path = "data/IMG/" + filename
image = cv2.imread(current_path)
images.append(preprocess(image))
measurement = float(line[3]) + adjust_angles[i]
measurements.append(measurement)
X_train = np.array(images)
Y_train = np.array(measurements)
return X_train, Y_train
def preprocess(image, verbose=False):
"""
Perform preprocessing steps on a single bgr frame.
These inlcude: cropping, resizing, eventually converting to grayscale
:param image: input color frame in BGR format
:param verbose: if true, open debugging visualization
:return:
"""
# crop image (remove useless information)
frame_cropped = image[50:140, :, :]
# resize image
frame_resized = cv2.resize(frame_cropped, dsize=(200, 66))
if verbose:
plt.figure(1), plt.imshow(cv2.cvtColor(image, code=cv2.COLOR_BGR2RGB))
plt.figure(2), plt.imshow(cv2.cvtColor(frame_cropped, code=cv2.COLOR_BGR2RGB))
plt.figure(3), plt.imshow(cv2.cvtColor(frame_resized, code=cv2.COLOR_BGR2RGB))
plt.show()
return frame_resized.astype('float32')
def get_nvidia_model():
"""
:param summary: show model summary
:return: keras Model of NVIDIA architecture
"""
init = 'glorot_uniform'
input_frame = Input(shape=(66, 200, 3))
# standardize input
x = Lambda(lambda z: z / 127.5 - 1.)(input_frame)
x = Convolution2D(24, 5, 5, border_mode='valid', subsample=(2, 2), init=init, activation="relu")(x)
x = Dropout(0.2)(x)
x = Convolution2D(36, 5, 5, border_mode='valid', subsample=(2, 2), init=init, activation="relu")(x)
x = Dropout(0.2)(x)
x = Convolution2D(48, 5, 5, border_mode='valid', subsample=(2, 2), init=init, activation="relu")(x)
x = Dropout(0.2)(x)
x = Convolution2D(64, 3, 3, border_mode='valid', init=init, activation="relu")(x)
x = Dropout(0.2)(x)
x = Convolution2D(64, 3, 3, border_mode='valid', init=init, activation="relu")(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(100, init=init)(x)
x = Activation('relu')(x)
x = Dropout(0.5)(x)
x = Dense(50, init=init)(x)
x = Activation('relu')(x)
x = Dropout(0.5)(x)
x = Dense(10, init=init)(x)
x = Activation('relu')(x)
out = Dense(1, init=init)(x)
model = Model(input=input_frame, output=out)
return model
def get_lenet_model():
model = Sequential()
model.add(Lambda(lambda x: x/127.5-1, input_shape=(66,200,3)))
model.add(Convolution2D(6,5,5,activation="relu"))
model.add(MaxPooling2D())
model.add(Convolution2D(6,5,5,activation="relu"))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(120))
model.add(Dense(84))
model.add(Dense(1))
return model
if __name__ == '__main__':
# split udacity csv data into training and validation
X_train, Y_train = load_data()
# get network model and compile it (default Adam opt)
# nvidia_net = get_nvidia_model()
# nvidia_net.compile(optimizer='adam', loss='mse')
# nvidia_net.summary()
# nvidia_net.fit(X_train, Y_train, shuffle=True, validation_split=0.2, nb_epoch=3)
# nvidia_net.save("model.h5")
lenet = get_lenet_model()
lenet.compile(optimizer='adam', loss='mse')
lenet.summary()
lenet.fit(X_train, Y_train, shuffle=True, validation_split=0.2, nb_epoch=3)
lenet.save("model1.h5") | [
"keras.layers.Convolution2D",
"cv2.imread",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"keras.layers.Lambda",
"keras.models.Sequential",
"numpy.array",
"keras.layers.Input",
"keras.layers.Dropout",
"matplotlib.pyplot.figure",
"keras.models.Model",
"keras.layers.Activation",
"cv2.cv... | [((961, 977), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (969, 977), True, 'import numpy as np\n'), ((992, 1014), 'numpy.array', 'np.array', (['measurements'], {}), '(measurements)\n', (1000, 1014), True, 'import numpy as np\n'), ((1476, 1518), 'cv2.resize', 'cv2.resize', (['frame_cropped'], {'dsize': '(200, 66)'}), '(frame_cropped, dsize=(200, 66))\n', (1486, 1518), False, 'import cv2\n'), ((2026, 2051), 'keras.layers.Input', 'Input', ([], {'shape': '(66, 200, 3)'}), '(shape=(66, 200, 3))\n', (2031, 2051), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((3039, 3075), 'keras.models.Model', 'Model', ([], {'input': 'input_frame', 'output': 'out'}), '(input=input_frame, output=out)\n', (3044, 3075), False, 'from keras.models import Model, Sequential\n'), ((3129, 3141), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3139, 3141), False, 'from keras.models import Model, Sequential\n'), ((450, 469), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (460, 469), False, 'import csv\n'), ((1796, 1806), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1804, 1806), True, 'import matplotlib.pyplot as plt\n'), ((2085, 2118), 'keras.layers.Lambda', 'Lambda', (['(lambda z: z / 127.5 - 1.0)'], {}), '(lambda z: z / 127.5 - 1.0)\n', (2091, 2118), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2140, 2236), 'keras.layers.Convolution2D', 'Convolution2D', (['(24)', '(5)', '(5)'], {'border_mode': '"""valid"""', 'subsample': '(2, 2)', 'init': 'init', 'activation': '"""relu"""'}), "(24, 5, 5, border_mode='valid', subsample=(2, 2), init=init,\n activation='relu')\n", (2153, 2236), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2244, 2256), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2251, 2256), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2268, 2364), 'keras.layers.Convolution2D', 'Convolution2D', (['(36)', '(5)', '(5)'], {'border_mode': '"""valid"""', 'subsample': '(2, 2)', 'init': 'init', 'activation': '"""relu"""'}), "(36, 5, 5, border_mode='valid', subsample=(2, 2), init=init,\n activation='relu')\n", (2281, 2364), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2372, 2384), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2379, 2384), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2396, 2492), 'keras.layers.Convolution2D', 'Convolution2D', (['(48)', '(5)', '(5)'], {'border_mode': '"""valid"""', 'subsample': '(2, 2)', 'init': 'init', 'activation': '"""relu"""'}), "(48, 5, 5, border_mode='valid', subsample=(2, 2), init=init,\n activation='relu')\n", (2409, 2492), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2500, 2512), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2507, 2512), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2524, 2598), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'border_mode': '"""valid"""', 'init': 'init', 'activation': '"""relu"""'}), "(64, 3, 3, border_mode='valid', init=init, activation='relu')\n", (2537, 2598), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2610, 2622), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2617, 2622), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2634, 2708), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'border_mode': '"""valid"""', 'init': 'init', 'activation': '"""relu"""'}), "(64, 3, 3, border_mode='valid', init=init, activation='relu')\n", (2647, 2708), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2720, 2732), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2727, 2732), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2744, 2753), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2751, 2753), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2766, 2787), 'keras.layers.Dense', 'Dense', (['(100)'], {'init': 'init'}), '(100, init=init)\n', (2771, 2787), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2799, 2817), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2809, 2817), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2829, 2841), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2836, 2841), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2853, 2873), 'keras.layers.Dense', 'Dense', (['(50)'], {'init': 'init'}), '(50, init=init)\n', (2858, 2873), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2885, 2903), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2895, 2903), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2915, 2927), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2922, 2927), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2939, 2959), 'keras.layers.Dense', 'Dense', (['(10)'], {'init': 'init'}), '(10, init=init)\n', (2944, 2959), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((2971, 2989), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2981, 2989), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((3003, 3022), 'keras.layers.Dense', 'Dense', (['(1)'], {'init': 'init'}), '(1, init=init)\n', (3008, 3022), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((3156, 3213), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 127.5 - 1)'], {'input_shape': '(66, 200, 3)'}), '(lambda x: x / 127.5 - 1, input_shape=(66, 200, 3))\n', (3162, 3213), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((3223, 3264), 'keras.layers.Convolution2D', 'Convolution2D', (['(6)', '(5)', '(5)'], {'activation': '"""relu"""'}), "(6, 5, 5, activation='relu')\n", (3236, 3264), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((3277, 3291), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (3289, 3291), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((3307, 3348), 'keras.layers.Convolution2D', 'Convolution2D', (['(6)', '(5)', '(5)'], {'activation': '"""relu"""'}), "(6, 5, 5, activation='relu')\n", (3320, 3348), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((3361, 3375), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (3373, 3375), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((3391, 3400), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3398, 3400), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((3416, 3426), 'keras.layers.Dense', 'Dense', (['(120)'], {}), '(120)\n', (3421, 3426), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((3442, 3451), 'keras.layers.Dense', 'Dense', (['(84)'], {}), '(84)\n', (3447, 3451), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((3467, 3475), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (3472, 3475), False, 'from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, Lambda, Activation, MaxPooling2D\n'), ((772, 796), 'cv2.imread', 'cv2.imread', (['current_path'], {}), '(current_path)\n', (782, 796), False, 'import cv2\n'), ((1543, 1556), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1553, 1556), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1635), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (1632, 1635), True, 'import matplotlib.pyplot as plt\n'), ((1709, 1722), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (1719, 1722), True, 'import matplotlib.pyplot as plt\n'), ((1569, 1612), 'cv2.cvtColor', 'cv2.cvtColor', (['image'], {'code': 'cv2.COLOR_BGR2RGB'}), '(image, code=cv2.COLOR_BGR2RGB)\n', (1581, 1612), False, 'import cv2\n'), ((1648, 1699), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_cropped'], {'code': 'cv2.COLOR_BGR2RGB'}), '(frame_cropped, code=cv2.COLOR_BGR2RGB)\n', (1660, 1699), False, 'import cv2\n'), ((1735, 1786), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_resized'], {'code': 'cv2.COLOR_BGR2RGB'}), '(frame_resized, code=cv2.COLOR_BGR2RGB)\n', (1747, 1786), False, 'import cv2\n')] |
import numpy as np
import time
nodes = []
class node:
def __init__(self, symbol):
self.symbol = symbol
self.edges = []
self.shortest_distance = float('inf')
self.shortest_path_via = None
nodes.append(self)
def add_edge(self, node, distance):
edge = [node, distance]
if not edge in self.edges:
self.edges.append(edge)
def update_edges(self):
for edge in self.edges:
distance_via = self.shortest_distance + edge[1]
if distance_via < edge[0].shortest_distance:
edge[0].shortest_distance = distance_via
edge[0].shortest_path_via = self
# Couples two nodes
def make_edge(node1, node2, distance):
node1.add_edge(node2, distance)
# node2.add_edge(node1, distance)
# Does the heavy lifting
# Just a python implementation of dijkstras shortest path
def dijkstra(start, end):
global nodes
queue = []
path = []
queue = nodes.copy()
start.shortest_distance = 0
queue.sort(key=lambda node: node.shortest_distance)
while queue[0] != end:
node = queue[0]
node.update_edges()
path.append(queue.pop(0))
queue.sort(key=lambda node: node.shortest_distance)
print(print_path(end))
print(f"Path array: {get_path_array(end)}")
print(f"Distance: {end.shortest_distance}")
# Literally just prints the path
def print_path(node):
if node.shortest_path_via == None:
return f"{node.symbol}"
else:
return f"{print_path(node.shortest_path_via)} -> {node.symbol}"
# Does what it says on the tin
def get_node(symbol):
for node in nodes:
if node.symbol == symbol:
return node
return 0
# Takes a set of edges, as well as start and end nodes
def solve_dijkstra(edges, start, end):
# Make edges into nodes and couple them
global nodes
nodes = []
for edge in edges:
a = get_node(edge[0])
b = get_node(edge[1])
if a == 0:
a = node(edge[0])
if b == 0:
b = node(edge[1])
a.add_edge(b, edge[2])
# b.add_edge(a, edge[2])
# Solve path
dijkstra(get_node(start), get_node(end))
def get_path_array(node):
if node.shortest_path_via == None:
return [node.symbol]
else:
return get_path_array(node.shortest_path_via) + [node.symbol]
data = [
["A", "B", 1],
["A", "C", 3],
["B", "C", 1],
["C", "A", 1],
]
#solve_dijkstra(data, "A", "C")
#solve_dijkstra(data, "C", "A")
tic = time.perf_counter()
# GIVEN FOR FREE
boxes = np.array([[0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0,],
[0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,],
[1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,],
[1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1,],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0,],
[0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0,],
[1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0,],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0,]])
walls = np.array([[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,]])
# IMPORTANT NEW CODE
wx,wy = walls.shape
path_data = []
for x in range(wx):
for y in range(wy):
if walls[x][y] != 1:
if x > 0:
dist = 1
if boxes[x-1][y] == 1:
dist = 33
path_data.append([f"{x}-{y}", f"{x-1}-{y}", dist])
if x < wx-1:
dist = 1
if boxes[x+1][y] == 1:
dist = 33
path_data.append([f"{x}-{y}", f"{x+1}-{y}", dist])
if y > 0:
dist = 1
if boxes[x][y-1] == 1:
dist = 33
path_data.append([f"{x}-{y}", f"{x}-{y-1}", dist])
if y < wy-1:
dist = 1
if boxes[x][y+1] == 1:
dist = 33
path_data.append([f"{x}-{y}", f"{x}-{y+1}", dist])
solve_dijkstra(path_data, "1-1", "9-9")
toc = time.perf_counter()
print(f"{toc-tic:0.4f} seconds")
| [
"numpy.array",
"time.perf_counter"
] | [((2561, 2580), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2578, 2580), False, 'import time\n'), ((2607, 3024), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0], [0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0], [0, \n 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0], [0, 0,\n 0, 0, 0, 0, 0, 1, 0, 0, 0], [1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1], [0, 1, 0,\n 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0], [0, 1, 0, 0,\n 0, 1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0], [0, 1, 0, 1, 0,\n 1, 0, 1, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0], [0, 0, 0, 0, 1, 0, 1, 0, 1, 0,\n 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0\n ], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1],\n [0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0], [\n 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0], [0,\n 1, 0, 1, 0, 1, 0, 1, 0, 0, 0]])\n', (2615, 3024), True, 'import numpy as np\n'), ((3113, 3530), 'numpy.array', 'np.array', (['[[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, \n 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0,\n 1, 0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1,\n 0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0,\n 1, 0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 1,\n 0, 1, 0, 1, 0, 1]]'], {}), '([[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0], [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n ], [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [\n 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1,\n 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]])\n', (3121, 3530), True, 'import numpy as np\n'), ((4555, 4574), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4572, 4574), False, 'import time\n')] |
##################################################################
#
# Trap calibration with stage oscillation (by <NAME>)
# Ref: Tolic-Norrelykke et al. (2006)
#
##################################################################
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.fft import fft
import matplotlib.pyplot as plt
from nptdms import TdmsFile
from scipy.optimize import curve_fit, minimize
from scipy.stats import norm
import os
import shutil
### User input ##################################
# First you need to change directory (cd) to where the file is located
# Update the file name
files = ['Dark_Power000',
'Bright_Power010',
'Bright_Power020',
'Bright_Power030',
'Bright_Power040',
'Bright_Power050',
'Bright_Power060',
'Bright_Power070',
'Bright_Power080',
'Bright_Power090',
'Bright_Power099',
'Bright_Power100']
f_sample = 20000 # Sampling frequency (Hz)
dt = 1/f_sample # Time interval during sampling (s)
t_total = 100 # Total time (s)
N_total = int(f_sample * t_total) # Total number of data
# I use 1 sec window for PSD and do averaging of them
t_window = 0.1 # Time for one window in sec
N_window = int(f_sample * t_window) # Num of data in a window
df = 1/t_window # Freq interval for a window
N_avg = int(t_total / t_window) # Num of windows for averaging
###############################################
class Data:
def __init__(self, fname, power):
self.fname = fname
self.power = power
def read(self):
# File information below
tdms_file = TdmsFile(self.fname+'.tdms') # Reads a tdms file.
group_name = "Trap" # Get the group name
channels = tdms_file.group_channels(group_name) # Get the channel object
self.ch = np.zeros((len(channels), N_total)) # Make a 2D array (ch, timetrace) for trap data
for i, channel in enumerate(channels):
self.ch[i,] = channel.data[range(N_total)]
self.x = self.ch[0] - np.mean(self.ch[0])
self.y = self.ch[1] - np.mean(self.ch[1])
self.s = self.ch[2]
def analyze(self):
x = self.x.reshape((N_avg, N_window))
y = self.y.reshape((N_avg, N_window))
s = self.s.reshape((N_avg, N_window))
PSD_X = np.zeros((N_avg, int(N_window/2)-1))
PSD_Y = np.zeros((N_avg, int(N_window/2)-1))
PSD_S = np.zeros((N_avg, int(N_window/2)-1))
PSD_XY = np.zeros((N_avg, int(N_window/2)-1))
for j in range(N_avg): # per window
PSD_X0 = np.abs(fft(x[j]))**2/t_window
PSD_Y0 = np.abs(fft(y[j]))**2/t_window
PSD_S0 = np.abs(fft(s[j]))**2/t_window
PSD_XY0 = fft(x[j])*np.conj(fft(y[j]))/t_window
PSD_XY0 = PSD_XY0/(PSD_X0*PSD_Y0)**0.5
PSD_X[j] = PSD_X0[1:int(N_window/2)]
PSD_Y[j] = PSD_Y0[1:int(N_window/2)]
PSD_S[j] = PSD_S0[1:int(N_window/2)]
PSD_XY[j] = PSD_XY0[1:int(N_window/2)]
self.PSD_X = np.mean(PSD_X, axis=0)
self.PSD_Y = np.mean(PSD_Y, axis=0)
self.PSD_S = np.mean(PSD_S, axis=0)
self.PSD_XY = np.mean(PSD_XY, axis=0)
self.f = df * np.arange(1, N_window/2)
def plot(self): # PSD
# PSD fitting (log-log)
# PSD (lin)
t = dt * np.arange(N_window)
fig = plt.figure(1, figsize = (20, 10), dpi=300)
sp = fig.add_subplot(221)
sp.loglog(self.f, self.PSD_X, 'k', lw=1)
# sp.set_ylim([1e-12, 5e-9])
sp.set_xlabel('Frequency (Hz)')
sp.set_ylabel('PSD_X (V^2/s)')
sp = fig.add_subplot(222)
sp.loglog(self.f, self.PSD_Y, 'k', lw=1)
# sp.set_ylim([1e-12, 5e-9])
sp.set_xlabel('Frequency (Hz)')
sp.set_ylabel('PSD_Y (V^2/s)')
sp = fig.add_subplot(223)
sp.plot(self.f, self.PSD_XY, 'k', lw=1)
# sp.set_ylim([1e-12, 5e-9])
sp.set_xlabel('Frequency (Hz)')
sp.set_ylabel('PSD_XY')
sp = fig.add_subplot(224)
sp.loglog(self.f, self.PSD_S, 'k', lw=1)
# sp.set_ylim([1e-12, 5e-9])
sp.set_xlabel('Frequency (Hz)')
sp.set_ylabel('PSD_S (V^2/s)')
sp.set_title('Trap power = %d %%' %(self.power))
fig.savefig(self.fname)
plt.close(fig)
def main():
for fname in files:
print(fname)
power = int(fname[-3:])
data = Data(fname, power)
data.read()
data.analyze()
data.plot()
if __name__ == "__main__":
main()
| [
"numpy.mean",
"numpy.arange",
"numpy.fft.fft",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"nptdms.TdmsFile"
] | [((1934, 1964), 'nptdms.TdmsFile', 'TdmsFile', (["(self.fname + '.tdms')"], {}), "(self.fname + '.tdms')\n", (1942, 1964), False, 'from nptdms import TdmsFile\n'), ((3442, 3464), 'numpy.mean', 'np.mean', (['PSD_X'], {'axis': '(0)'}), '(PSD_X, axis=0)\n', (3449, 3464), True, 'import numpy as np\n'), ((3494, 3516), 'numpy.mean', 'np.mean', (['PSD_Y'], {'axis': '(0)'}), '(PSD_Y, axis=0)\n', (3501, 3516), True, 'import numpy as np\n'), ((3540, 3562), 'numpy.mean', 'np.mean', (['PSD_S'], {'axis': '(0)'}), '(PSD_S, axis=0)\n', (3547, 3562), True, 'import numpy as np\n'), ((3587, 3610), 'numpy.mean', 'np.mean', (['PSD_XY'], {'axis': '(0)'}), '(PSD_XY, axis=0)\n', (3594, 3610), True, 'import numpy as np\n'), ((3826, 3866), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(20, 10)', 'dpi': '(300)'}), '(1, figsize=(20, 10), dpi=300)\n', (3836, 3866), True, 'import matplotlib.pyplot as plt\n'), ((4928, 4942), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4937, 4942), True, 'import matplotlib.pyplot as plt\n'), ((2370, 2389), 'numpy.mean', 'np.mean', (['self.ch[0]'], {}), '(self.ch[0])\n', (2377, 2389), True, 'import numpy as np\n'), ((2421, 2440), 'numpy.mean', 'np.mean', (['self.ch[1]'], {}), '(self.ch[1])\n', (2428, 2440), True, 'import numpy as np\n'), ((3635, 3661), 'numpy.arange', 'np.arange', (['(1)', '(N_window / 2)'], {}), '(1, N_window / 2)\n', (3644, 3661), True, 'import numpy as np\n'), ((3777, 3796), 'numpy.arange', 'np.arange', (['N_window'], {}), '(N_window)\n', (3786, 3796), True, 'import numpy as np\n'), ((3107, 3116), 'numpy.fft.fft', 'fft', (['x[j]'], {}), '(x[j])\n', (3110, 3116), False, 'from numpy.fft import fft\n'), ((2957, 2966), 'numpy.fft.fft', 'fft', (['x[j]'], {}), '(x[j])\n', (2960, 2966), False, 'from numpy.fft import fft\n'), ((3009, 3018), 'numpy.fft.fft', 'fft', (['y[j]'], {}), '(y[j])\n', (3012, 3018), False, 'from numpy.fft import fft\n'), ((3061, 3070), 'numpy.fft.fft', 'fft', (['s[j]'], {}), '(s[j])\n', (3064, 3070), False, 'from numpy.fft import fft\n'), ((3125, 3134), 'numpy.fft.fft', 'fft', (['y[j]'], {}), '(y[j])\n', (3128, 3134), False, 'from numpy.fft import fft\n')] |
"""
These are some scripts used in testing multiple particles.
"""
import time
import datetime
import numpy as np
from particle import Particle
#==============================================================================
def fill_particles(diameter, density, births, lifetime, initial_positions, u0):
# someParticles = [p, p, ..., p Ninlets times for birth1
# then p, p, ..., p Ninlets times for birth2
# ...
# repeated len(births) time]
someParticles = []
for b in births:
list_p = [Particle(diameter, density, b, lifetime, pos0, u0) \
for pos0 in initial_positions]
someParticles.extend(list_p)
return np.array(someParticles)
def prepare_initial_positions(x0, list_y):
return np.array([[x0, y] for y in list_y])
def compute_particles(particles, flow, factor, printIt, too_far_stop):
t1 = time.time()
cpu_time = 0
count = 1
Npar = len(particles)
for p in particles:
print('\n---> Particle %d (remaining: %d)' % (count, Npar - count))
p.compute_trajectory(flow, factor, printIt, too_far_stop)
cpu_time = time.time() - t1
print('Accumulated CPU_TIME = %.2f seconds = %s (hh:mm:ss)' \
% (cpu_time, datetime.timedelta(seconds=cpu_time)))
count += 1
captured_ones = np.array([p for p in particles if p.captured])
return particles, captured_ones
def spread_particles(fill_args, flow, factor, printIt, too_far_stop):
# fill_args = diameter, density, births, lifetime, initial_positions, u0
particles = fill_particles(*fill_args)
return compute_particles(particles, flow, factor, printIt, too_far_stop) | [
"numpy.array",
"datetime.timedelta",
"time.time",
"particle.Particle"
] | [((720, 743), 'numpy.array', 'np.array', (['someParticles'], {}), '(someParticles)\n', (728, 743), True, 'import numpy as np\n'), ((799, 834), 'numpy.array', 'np.array', (['[[x0, y] for y in list_y]'], {}), '([[x0, y] for y in list_y])\n', (807, 834), True, 'import numpy as np\n'), ((916, 927), 'time.time', 'time.time', ([], {}), '()\n', (925, 927), False, 'import time\n'), ((1368, 1414), 'numpy.array', 'np.array', (['[p for p in particles if p.captured]'], {}), '([p for p in particles if p.captured])\n', (1376, 1414), True, 'import numpy as np\n'), ((568, 618), 'particle.Particle', 'Particle', (['diameter', 'density', 'b', 'lifetime', 'pos0', 'u0'], {}), '(diameter, density, b, lifetime, pos0, u0)\n', (576, 618), False, 'from particle import Particle\n'), ((1174, 1185), 'time.time', 'time.time', ([], {}), '()\n', (1183, 1185), False, 'import time\n'), ((1288, 1324), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'cpu_time'}), '(seconds=cpu_time)\n', (1306, 1324), False, 'import datetime\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Library with PME calculation functions, provides utility for Private Equity analysis.
@author: <NAME> (<EMAIL>)
"""
import pandas as pd
import numpy as np
import scipy.optimize
from datetime import date
#Helper functions
def nearest(series, lookup, debug = False):
if debug==True:
print( "lookup: " + str(lookup) + " | closest: " + str(series.iloc[(series-lookup).abs().argsort()[0]]))
return series.iloc[(series-lookup).abs().argsort()[0]]
def xnpv(rate, values, dates):
'''Equivalent of Excel's XNPV function.
>>> from datetime import date
>>> dates = [date(2010, 12, 29), date(2012, 1, 25), date(2012, 3, 8)]
>>> values = [-10000, 20, 10100]
>>> xnpv(0.1, values, dates)
-966.4345...
'''
if rate <= -1.0:
return float('inf')
datesx = pd.to_datetime(dates).apply(lambda x: date(x.year,x.month,x.day)) # Just in case conversion to native datime date
d0 = datesx[0] # or min(dates)
return sum([ vi / (1.0 + rate)**((di - d0).days / 365.0) for vi, di in zip(values, datesx)])
def xirr(values, dates):
'''Equivalent of Excel's XIRR function.
>>> from datetime import date
>>> dates = [date(2010, 12, 29), date(2012, 1, 25), date(2012, 3, 8)]
>>> values = [-10000, 20, 10100]
>>> xirr(values, dates)
0.0100612...
'''
try:
return scipy.optimize.newton(lambda r: xnpv(r, values, dates), 0.0)
except RuntimeError: # Failed to converge?
return scipy.optimize.brentq(lambda r: xnpv(r, values, dates), -1.0, 1e10)
def TVM(value, value_date, money_date, discount_rate):
''' Calculates the discounted value of money to date money_date (i.e. either PV or FV depending on date)
'''
time_delta = ((pd.to_datetime(money_date) - pd.to_datetime(value_date)) / np.timedelta64(1, 'D')).astype(int)/365
return value*(1+discount_rate)**time_delta
def xirr2(values, dates):
datesx = pd.to_datetime(dates).apply(lambda x: date(x.year,x.month,x.day)) # Just in case conversion to native datime date
transactions = list(zip(datesx,values))
years = [(ta[0] - transactions[0][0]).days / 365.0 for ta in transactions]
residual = 1
step = 0.05
guess = 0.05
epsilon = 0.0001
limit = 100000
while abs(residual) > epsilon and limit > 0:
limit -= 1
residual = 0.0
for i, ta in enumerate(transactions):
residual += ta[1] / pow(guess, years[i])
if abs(residual) > epsilon:
if residual > 0:
guess += step
else:
guess -= step
step /= 2.0
return guess-1
### PME Algorhitms
#GENERATE DISCOUNTING TABLE
def discount_table(dates_cashflows, cashflows, cashflows_type, dates_index, index, NAV_scaling = 1):
''' Automatically matches cashflow and index dates and subsequently generates discount table (which can be used to calculate Direct Alpha et al.).
Also useful for debugging and exporting.
Args:
dates_cashflows: An ndarray the dates corresponding to cashflows.
cashflows: An ndarray of the cashflow amounts (sign does not matter)
cashflows_type: Accepts three types [Distribution \ Capital Call \ Value]
dates_index: Ndarray of dates for the index, same logic as cashflows.
index: The index levels corresponding to the dates
NAV_scaling: Coefficient which can be used to scale the NAV amount (so as to counteract systemic mispricing)
auto_NAV: Toggle for automatic handling of the NAV. If False, NAV is not calculated and function returns a tuple of [sum_fv_distributions, sum_fv_calls]
(allows for manual completion of the PME formula using appropriate NAV value)
Returns:
DataFrame(Date|Amount|Type|Status|Discounted|Index|FV_Factor)
'''
_dates_index = pd.to_datetime(dates_index)
df_cf = pd.concat([pd.to_datetime(dates_cashflows),cashflows,cashflows_type], axis=1)
df_cf.columns = ["Date", "Amount","Type"]
df_cf = df_cf.sort_values('Date')
df_cf = df_cf.reset_index(drop=True)
#Get NAV
if(df_cf[(df_cf['Type']=='Value') & (df_cf['Amount']==0)].empty): #Checks if liquidated by looking at 0 valuations
NAV_record = df_cf[df_cf['Type']=='Value'].sort_values("Date", ascending = False).head(1).copy()
NAV_date = NAV_record["Date"].iloc[0]
NAV_index_value = index[_dates_index == nearest(_dates_index, NAV_date)].iloc[0]
df_cf['Status'] = 'Active'
else: #Not liquidated
NAV_record = df_cf[df_cf['Type']!='Value'].sort_values("Date", ascending = False).head(1).copy()
NAV_record['Amount'].iloc[0] = 0 # force a 0 value
NAV_date = NAV_record["Date"].iloc[0]
NAV_index_value = index[_dates_index == nearest(_dates_index, NAV_date)].iloc[0]
df_cf['Status'] = 'Liquidated'
#Iterate and assign to table
df_cf["Pre-Discounted"] = 0
df_cf["Discounted"] = 0
df_cf["Index"] = 0
df_cf["Index_date"] = 0
df_cf["FV_Factor"] = 0
for idx, cf in df_cf.iterrows():
# Let us find the closest index value to the current date
index_date = nearest(_dates_index, cf["Date"])
index_value = index[_dates_index == index_date].iloc[0]
df_cf.loc[idx,"Index"] = index_value
df_cf.loc[idx,"Index_date"] = index_date
df_cf.loc[idx,"FV_Factor"] = (NAV_index_value/index_value)
if cf["Type"] == "Distribution":
df_cf.loc[idx,'Discounted'] = abs(cf["Amount"])* (NAV_index_value/index_value)
df_cf.loc[idx,'Pre-Discounted'] = abs(cf["Amount"])
elif cf["Type"] == "Capital Call":
df_cf.loc[idx,'Discounted'] = - abs(cf["Amount"])* (NAV_index_value/index_value)
df_cf.loc[idx,'Pre-Discounted'] = - abs(cf["Amount"])
# Attach relevant NAV value
df_cf.loc[(df_cf['Date']==NAV_date)&(df_cf['Type']=='Value'),'Discounted'] = NAV_record['Amount'].iloc[0] * NAV_scaling
df_cf.loc[(df_cf['Date']==NAV_date)&(df_cf['Type']=='Value'),'Pre-Discounted'] = NAV_record['Amount'].iloc[0] * NAV_scaling
#cut table at FV date
df_cf = df_cf[df_cf["Date"]<=NAV_date].copy()
return df_cf.copy()
#KS-PME
def KS_PME(dates_cashflows, cashflows, cashflows_type, dates_index, index, NAV_scaling = 1, auto_NAV = True):
"""Calculates the Kalpan Schoar PME. Designed for plug & play with Preqin data.
Args:
dates_cashflows: An ndarray the dates corresponding to cashflows.
cashflows: An ndarray of the cashflow amounts (sign does not matter)
cashflows_type: Accepts three types [Distribution \ Capital Call \ Value]
dates_index: Ndarray of dates for the index, same logic as cashflows.
index: The index levels corresponding to the dates
NAV_scaling: Coefficient which can be used to scale the NAV amount (so as to counteract systemic mispricing)
auto_NAV: Toggle for automatic handling of the NAV. If False, NAV is not calculated and function returns a tuple of [sum_fv_distributions, sum_fv_calls]
(allows for manual completion of the PME formula using appropriate NAV value)
Returns:
The KS-PME metric given the inputed index
"""
_dates_index = pd.to_datetime(dates_index)
df_cf = pd.concat([pd.to_datetime(dates_cashflows),cashflows,cashflows_type], axis=1)
df_cf.columns = ["Date", "Amount","Type"]
#first let us run through the cashflow data and sum up all of the calls and distributions
sum_fv_distributions = 0
sum_fv_calls = 0
for idx, cf in df_cf.iterrows():
# Let us find the closest index value to the current date
index_value = index[_dates_index == nearest(_dates_index, cf["Date"])].iloc[0]
if cf["Type"] == "Distribution":
sum_fv_distributions = sum_fv_distributions + abs(cf["Amount"])/index_value
elif cf["Type"] == "Capital Call":
sum_fv_calls = sum_fv_calls + abs(cf["Amount"])/index_value
#Now, let us also consider the nav
if auto_NAV == True:
#Let us find the nav
NAV_record = df_cf[df_cf['Type']=='Value'].sort_values("Date", ascending = False).head(1)
index_value = index[_dates_index == nearest(_dates_index, NAV_record["Date"].iloc[0])].iloc[0]
discounted_NAV = (NAV_record['Amount'].iloc[0]/index_value) * NAV_scaling
#return according to the KSPME formula
return (sum_fv_distributions+discounted_NAV)/sum_fv_calls
else:
return [sum_fv_distributions,sum_fv_calls]
#Direct Alpha
def Direct_Alpha_PME(dates_cashflows, cashflows, cashflows_type, dates_index, index, NAV_scaling = 1):
"""Calculates the Direct Alpha PME. Designed for plug & play with Preqin data.
Args:
dates_cashflows: An ndarray the dates corresponding to cashflows.
cashflows: An ndarray of the cashflow amounts (sign does not matter)
cashflows_type: Accepts three types [Distribution \ Capital Call \ Value]
dates_index: Ndarray of dates for the index, same logic as cashflows.
index: The index levels corresponding to the dates
NAV_scaling: Coefficient which can be used to scale the NAV amount (so as to counteract systemic mispricing)
auto_NAV: Toggle for automatic handling of the NAV. If False, NAV is not calculated and function returns a tuple of [sum_fv_distributions, sum_fv_calls]
(allows for manual completion of the PME formula using appropriate NAV value)
Returns:
The Direct Alpha metric given the inputed index
"""
#First let us grab the discount table - using wrapper function defined above
df_cf = discount_table(dates_cashflows, cashflows, cashflows_type, dates_index, index, NAV_scaling = NAV_scaling)
#Now let us calculate the IRR
irr = xirr2(df_cf['Discounted'], pd.to_datetime(df_cf['Date']))
direct_alpha = np.log(1+irr)
return direct_alpha
#PME+
def PME_PLUS(dates_cashflows, cashflows, cashflows_type, dates_index, index, return_alpha = 1, NAV_scaling = 1):
''' Returns the alpha as generated by the PME+ method. I.e. (IRR - PME+_IRR)
'''
#First let us grab the discount table - using wrapper function defined above
df_cf = discount_table(dates_cashflows, cashflows, cashflows_type, dates_index, index, NAV_scaling = NAV_scaling)
sum_fv_distributions = 0
sum_fv_calls = 0
for idx, cf in df_cf.iterrows():
if cf["Type"] == "Distribution":
sum_fv_distributions = sum_fv_distributions + abs(cf["Amount"]) * cf["FV_Factor"]
elif cf["Type"] == "Capital Call":
sum_fv_calls = sum_fv_calls + abs(cf["Amount"]) * cf["FV_Factor"]
# Check the NAV value
df_cf['PME_PLUS IRR'] = 0
if (df_cf.tail(1).iloc[0]['Type'] == "Value"):
NAV_value = df_cf.tail(1).iloc[0]['Discounted']
df_cf['PME_PLUS IRR'].iloc[-1] = NAV_value
else:
NAV_value = 0
scaling_factor = (sum_fv_calls - NAV_value) / sum_fv_distributions
#Now lets add the PME IRR calc collumn
for idx, cf in df_cf.iterrows():
if cf["Type"] == "Distribution":
df_cf.loc[idx,'PME_PLUS IRR'] = abs(cf["Amount"]) * scaling_factor
elif cf["Type"] == "Capital Call":
df_cf.loc[idx,'PME_PLUS IRR'] = - abs(cf["Amount"])
pme_plus =xirr2(df_cf['PME_PLUS IRR'], pd.to_datetime(df_cf['Date']))
irr = xirr2(df_cf['Pre-Discounted'],pd.to_datetime(df_cf['Date']))
#check if return benchmark or the alpha
if return_alpha == True:
return float(irr-pme_plus)
else:
return float(pme_plus)
#MIRR
def MIRR(dates_cashflows, cashflows, cashflows_type, reinvestment_rate, financing_rate, first_date = 0, NAV_scaling = 1):
''' Calculates the Modified IRR (MIRR)
'''
df_cf = pd.concat([pd.to_datetime(dates_cashflows),cashflows,cashflows_type], axis=1)
df_cf.columns = ["Date", "Amount", "Type"]
df_cf = df_cf.sort_values('Date')
df_cf = df_cf.reset_index(drop=True)
#Grab the
if first_date == 0:
start_date = df_cf['Date'].iloc[0]
else:
start_date = first_date
end_date = df_cf['Date'].iloc[-1]
#Calculate the PV and FVs
sum_pv_calls = 0
sum_fv_dist = 0
for idx, cf in df_cf.iterrows():
if cf["Type"] == "Distribution":
sum_fv_dist = sum_fv_dist + TVM(cf['Amount'],cf['Date'], end_date, reinvestment_rate)
elif cf["Type"] == "Capital Call":
sum_pv_calls = sum_pv_calls + TVM(-cf['Amount'],cf['Date'], start_date, financing_rate)
#Grab the NAV if available
if(df_cf[(df_cf['Type']=='Value') & (df_cf['Amount']==0)].empty): #Checks if liquidated by looking at 0 valuations
NAV_record = df_cf[df_cf['Type']=='Value'].sort_values("Date", ascending = False).head(1).copy()['Amount'].iloc[0]
else:
NAV_record = 0
#Get the Time delta (n)
time_delta = ((pd.to_datetime(end_date) - pd.to_datetime(start_date)) / np.timedelta64(1, 'D')).astype(int)/365
#Return the discounted CAGR
return (((sum_fv_dist + NAV_record)/sum_pv_calls)**(1/time_delta))-1
### Index-adjustment functions below:
# Adjust Beta
def BETA_ADJ_INDEX(dates_index, index, rf_date, rf_rate, beta):
''' Allows us to re-lever an index for a new beta value using a risk-free rate timeseries.
- Applies CAPM formula as follows: r_levered = r_f + beta(r_index - r_f)
- Scales the r_f using the timedelta between dates (arithmetic, not continous) automatically
Args:
dates_index: Ndarray of dates for the index
index: The index levels corresponding to the dates
rf_date: Ndarray of the risk-free dates
rf_rate: The rate corresponding to the dates
beta: The beta level to be simulated
Returns:
A DataFrame (Date|Index_new|Index_original|Beta), with the adjusted index
'''
df_index = pd.concat([pd.to_datetime(dates_index),index], axis=1)
df_index.columns = ["Date", "Index"]
df_index = df_index.sort_values('Date')
df_index = df_index.reset_index(drop=True)
df_rf = pd.concat([pd.to_datetime(rf_date),rf_rate], axis=1)
df_rf.columns = ["Date", "Value"]
df_index['Delta_amount'] = ( df_index['Index'] / df_index['Index'].shift(1) ) - 1
df_index['Delta_time_days'] = ((df_index['Date'] - df_index['Date'].shift(1)) / np.timedelta64(1, 'D'))
df_index['Delta_time_years'] = df_index['Delta_time_days'] / 365
df_index['Closest_rf'] = 0
df_index['Adjusted_delta'] = 0
df_index['Index_beta_adjusted'] = 100
for idx, record in df_index.iterrows():
#assign the closest rf
closest_rf = float(df_rf.loc[df_rf["Date"] == nearest(pd.to_datetime(df_rf["Date"]), pd.to_datetime(record['Date']), 0), 'Value'])
try:
df_index.loc[idx,'Closest_rf'] = closest_rf
except:
pass
#now lets compute the adjusted change
closest_rf_adj = closest_rf * record['Delta_time_years']
adjusted_delta_amount = closest_rf_adj + beta*(record['Delta_amount'] - closest_rf_adj)
df_index.loc[idx,'Adjusted_delta'] = adjusted_delta_amount
#finally, let us recompute a new index
if(record['Index'] != 100 and int(idx) != 0):
df_index.loc[idx,'Index_beta_adjusted'] = (1+df_index.loc[idx,'Adjusted_delta']) * df_index.loc[int(idx)-1,'Index_beta_adjusted']
#clean up, produce new DF, and return it
df_index_adjusted = pd.concat([df_index['Date'],df_index['Index_beta_adjusted'], df_index['Index']], axis=1)
df_index_adjusted['Beta']=beta
df_index_adjusted = df_index_adjusted.reset_index(drop=True)
df_index_adjusted.columns = ['Date','Index_new','Index_original','Beta']
return df_index_adjusted#df_index
| [
"numpy.log",
"datetime.date",
"numpy.timedelta64",
"pandas.concat",
"pandas.to_datetime"
] | [((3929, 3956), 'pandas.to_datetime', 'pd.to_datetime', (['dates_index'], {}), '(dates_index)\n', (3943, 3956), True, 'import pandas as pd\n'), ((7370, 7397), 'pandas.to_datetime', 'pd.to_datetime', (['dates_index'], {}), '(dates_index)\n', (7384, 7397), True, 'import pandas as pd\n'), ((10043, 10058), 'numpy.log', 'np.log', (['(1 + irr)'], {}), '(1 + irr)\n', (10049, 10058), True, 'import numpy as np\n'), ((15706, 15800), 'pandas.concat', 'pd.concat', (["[df_index['Date'], df_index['Index_beta_adjusted'], df_index['Index']]"], {'axis': '(1)'}), "([df_index['Date'], df_index['Index_beta_adjusted'], df_index[\n 'Index']], axis=1)\n", (15715, 15800), True, 'import pandas as pd\n'), ((9993, 10022), 'pandas.to_datetime', 'pd.to_datetime', (["df_cf['Date']"], {}), "(df_cf['Date'])\n", (10007, 10022), True, 'import pandas as pd\n'), ((11533, 11562), 'pandas.to_datetime', 'pd.to_datetime', (["df_cf['Date']"], {}), "(df_cf['Date'])\n", (11547, 11562), True, 'import pandas as pd\n'), ((11604, 11633), 'pandas.to_datetime', 'pd.to_datetime', (["df_cf['Date']"], {}), "(df_cf['Date'])\n", (11618, 11633), True, 'import pandas as pd\n'), ((14593, 14615), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (14607, 14615), True, 'import numpy as np\n'), ((857, 878), 'pandas.to_datetime', 'pd.to_datetime', (['dates'], {}), '(dates)\n', (871, 878), True, 'import pandas as pd\n'), ((895, 923), 'datetime.date', 'date', (['x.year', 'x.month', 'x.day'], {}), '(x.year, x.month, x.day)\n', (899, 923), False, 'from datetime import date\n'), ((1985, 2006), 'pandas.to_datetime', 'pd.to_datetime', (['dates'], {}), '(dates)\n', (1999, 2006), True, 'import pandas as pd\n'), ((2023, 2051), 'datetime.date', 'date', (['x.year', 'x.month', 'x.day'], {}), '(x.year, x.month, x.day)\n', (2027, 2051), False, 'from datetime import date\n'), ((3982, 4013), 'pandas.to_datetime', 'pd.to_datetime', (['dates_cashflows'], {}), '(dates_cashflows)\n', (3996, 4013), True, 'import pandas as pd\n'), ((7423, 7454), 'pandas.to_datetime', 'pd.to_datetime', (['dates_cashflows'], {}), '(dates_cashflows)\n', (7437, 7454), True, 'import pandas as pd\n'), ((12004, 12035), 'pandas.to_datetime', 'pd.to_datetime', (['dates_cashflows'], {}), '(dates_cashflows)\n', (12018, 12035), True, 'import pandas as pd\n'), ((14128, 14155), 'pandas.to_datetime', 'pd.to_datetime', (['dates_index'], {}), '(dates_index)\n', (14142, 14155), True, 'import pandas as pd\n'), ((14333, 14356), 'pandas.to_datetime', 'pd.to_datetime', (['rf_date'], {}), '(rf_date)\n', (14347, 14356), True, 'import pandas as pd\n'), ((1852, 1874), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (1866, 1874), True, 'import numpy as np\n'), ((13174, 13196), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (13188, 13196), True, 'import numpy as np\n'), ((1793, 1819), 'pandas.to_datetime', 'pd.to_datetime', (['money_date'], {}), '(money_date)\n', (1807, 1819), True, 'import pandas as pd\n'), ((1822, 1848), 'pandas.to_datetime', 'pd.to_datetime', (['value_date'], {}), '(value_date)\n', (1836, 1848), True, 'import pandas as pd\n'), ((13117, 13141), 'pandas.to_datetime', 'pd.to_datetime', (['end_date'], {}), '(end_date)\n', (13131, 13141), True, 'import pandas as pd\n'), ((13144, 13170), 'pandas.to_datetime', 'pd.to_datetime', (['start_date'], {}), '(start_date)\n', (13158, 13170), True, 'import pandas as pd\n'), ((14936, 14965), 'pandas.to_datetime', 'pd.to_datetime', (["df_rf['Date']"], {}), "(df_rf['Date'])\n", (14950, 14965), True, 'import pandas as pd\n'), ((14967, 14997), 'pandas.to_datetime', 'pd.to_datetime', (["record['Date']"], {}), "(record['Date'])\n", (14981, 14997), True, 'import pandas as pd\n')] |
import os
import pickle
from pprint import pprint
import numpy as np
import matplotlib.pyplot as plt
OUT_PATH = "/Users/lindronics/workspace/4th_year/out/kfold"
final_report = {}
for path, _, files in os.walk(OUT_PATH):
for fname in files:
if fname.endswith(".pickle") and "report" in fname:
with open(os.path.join(path, fname), "rb") as f:
report = pickle.load(f)
for c, metrics in report.items():
if c == "accuracy":
continue
final_report[c] = final_report.get(c, {})
for metric, val in metrics.items():
final_report[c][metric] = final_report[c].get(metric, [])
final_report[c][metric].append(val)
for c, metrics in final_report.items():
for metric, vals in metrics.items():
final_report[c][metric] = f"{np.mean(np.array(vals)):.2f}"
pprint(final_report)
| [
"os.path.join",
"pickle.load",
"numpy.array",
"pprint.pprint",
"os.walk"
] | [((204, 221), 'os.walk', 'os.walk', (['OUT_PATH'], {}), '(OUT_PATH)\n', (211, 221), False, 'import os\n'), ((916, 936), 'pprint.pprint', 'pprint', (['final_report'], {}), '(final_report)\n', (922, 936), False, 'from pprint import pprint\n'), ((394, 408), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (405, 408), False, 'import pickle\n'), ((330, 355), 'os.path.join', 'os.path.join', (['path', 'fname'], {}), '(path, fname)\n', (342, 355), False, 'import os\n'), ((892, 906), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (900, 906), True, 'import numpy as np\n')] |
import drawing
import itertools
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as a3
import matplotlib.animation as animation
class PolyhedronExtension:
@classmethod
def fromBounds(cls, lb, ub):
"""
Return a new Polyhedron representing an n-dimensional box spanning from [lb] to [ub]
"""
lb = np.asarray(lb, dtype=np.float64)
ub = np.asarray(ub, dtype=np.float64)
p = cls()
p.setA(np.vstack((np.eye(lb.size), -np.eye(lb.size))))
p.setB(np.hstack((ub, -lb)))
return p
# For backward compatibility
from_bounds = fromBounds
def getDrawingVertices(self):
return np.hstack(self.generatorPoints()).T
class EllipsoidExtension:
def getDrawingVertices(self):
if self.getDimension() == 2:
theta = np.linspace(0, 2 * np.pi, 100)
y = np.vstack((np.sin(theta), np.cos(theta)))
return (self.getC().dot(y) + self.getD()).T
elif self.getDimension() == 3:
theta = np.linspace(0, 2 * np.pi, 20)
y = np.vstack((np.sin(theta), np.cos(theta), np.zeros_like(theta)))
for phi in np.linspace(0, np.pi, 10):
R = np.array([[1.0, 0.0, 0.0],
[0.0, np.cos(phi), -np.sin(phi)],
[0.0, np.sin(phi), np.cos(phi)]])
y = np.hstack((y, R.dot(y)))
x = self.getC().dot(y) + self.getD()
return x.T
else:
raise NotImplementedError("Ellipsoid vertices not implemented for dimension < 2 or > 3")
class IRISDebugDataExtension:
def iterRegions(self):
return itertools.izip(self.polyhedron_history, self.ellipsoid_history)
def animate(self, fig=None, pause=0.5, show=True, repeat_delay=2.0):
dim = self.bounds.getDimension()
if dim < 2 or dim > 3:
raise NotImplementedError("animation is not implemented for dimension < 2 or > 3")
if fig is None:
fig = plt.figure()
if dim == 3:
ax = a3.Axes3D(fig)
else:
ax = plt.gca()
bounding_pts = np.hstack(self.boundingPoints()).T
if bounding_pts.size > 0:
lb = bounding_pts.min(axis=0)
ub = bounding_pts.max(axis=0)
assert(lb.size == dim)
assert(ub.size == dim)
width = ub - lb
ax.set_xlim(lb[0] - 0.1 * width[0], ub[0] + 0.1 * width[0])
ax.set_ylim(lb[1] - 0.1 * width[1], ub[1] + 0.1 * width[1])
if dim == 3:
ax.set_zlim(lb[2] - 0.1 * width[2], ub[2] + 0.1 * width[2])
artist_sets = []
for poly, ellipsoid in self.iterRegions():
artists = []
d = self.ellipsoid_history[0].getD()
if dim == 3:
artists.extend(ax.plot([d[0]], [d[1]], 'go', zs=[d[2]], markersize=10))
else:
artists.extend(ax.plot([d[0]], [d[1]], 'go', markersize=10))
artists.extend(poly.draw(ax))
artists.extend(ellipsoid.draw(ax))
for obs in self.getObstacles():
artists.extend(drawing.draw_convhull(obs.T, ax, edgecolor='k', facecolor='k', alpha=0.5))
artist_sets.append(tuple(artists))
ani = animation.ArtistAnimation(fig, artist_sets, interval=pause*1000, repeat_delay=repeat_delay*1000)
if show:
plt.show()
| [
"numpy.eye",
"numpy.hstack",
"matplotlib.pyplot.gca",
"drawing.draw_convhull",
"numpy.asarray",
"matplotlib.animation.ArtistAnimation",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.cos",
"itertools.izip",
"numpy.sin",
"numpy.zeros_like",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.p... | [((366, 398), 'numpy.asarray', 'np.asarray', (['lb'], {'dtype': 'np.float64'}), '(lb, dtype=np.float64)\n', (376, 398), True, 'import numpy as np\n'), ((412, 444), 'numpy.asarray', 'np.asarray', (['ub'], {'dtype': 'np.float64'}), '(ub, dtype=np.float64)\n', (422, 444), True, 'import numpy as np\n'), ((1693, 1756), 'itertools.izip', 'itertools.izip', (['self.polyhedron_history', 'self.ellipsoid_history'], {}), '(self.polyhedron_history, self.ellipsoid_history)\n', (1707, 1756), False, 'import itertools\n'), ((3343, 3447), 'matplotlib.animation.ArtistAnimation', 'animation.ArtistAnimation', (['fig', 'artist_sets'], {'interval': '(pause * 1000)', 'repeat_delay': '(repeat_delay * 1000)'}), '(fig, artist_sets, interval=pause * 1000,\n repeat_delay=repeat_delay * 1000)\n', (3368, 3447), True, 'import matplotlib.animation as animation\n'), ((541, 561), 'numpy.hstack', 'np.hstack', (['(ub, -lb)'], {}), '((ub, -lb))\n', (550, 561), True, 'import numpy as np\n'), ((848, 878), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (859, 878), True, 'import numpy as np\n'), ((2040, 2052), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2050, 2052), True, 'import matplotlib.pyplot as plt\n'), ((3469, 3479), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3477, 3479), True, 'import matplotlib.pyplot as plt\n'), ((1052, 1081), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(20)'], {}), '(0, 2 * np.pi, 20)\n', (1063, 1081), True, 'import numpy as np\n'), ((1185, 1210), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(10)'], {}), '(0, np.pi, 10)\n', (1196, 1210), True, 'import numpy as np\n'), ((2099, 2113), 'mpl_toolkits.mplot3d.Axes3D', 'a3.Axes3D', (['fig'], {}), '(fig)\n', (2108, 2113), True, 'import mpl_toolkits.mplot3d as a3\n'), ((2153, 2162), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2160, 2162), True, 'import matplotlib.pyplot as plt\n'), ((489, 504), 'numpy.eye', 'np.eye', (['lb.size'], {}), '(lb.size)\n', (495, 504), True, 'import numpy as np\n'), ((906, 919), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (912, 919), True, 'import numpy as np\n'), ((921, 934), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (927, 934), True, 'import numpy as np\n'), ((3206, 3279), 'drawing.draw_convhull', 'drawing.draw_convhull', (['obs.T', 'ax'], {'edgecolor': '"""k"""', 'facecolor': '"""k"""', 'alpha': '(0.5)'}), "(obs.T, ax, edgecolor='k', facecolor='k', alpha=0.5)\n", (3227, 3279), False, 'import drawing\n'), ((507, 522), 'numpy.eye', 'np.eye', (['lb.size'], {}), '(lb.size)\n', (513, 522), True, 'import numpy as np\n'), ((1109, 1122), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1115, 1122), True, 'import numpy as np\n'), ((1124, 1137), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1130, 1137), True, 'import numpy as np\n'), ((1139, 1159), 'numpy.zeros_like', 'np.zeros_like', (['theta'], {}), '(theta)\n', (1152, 1159), True, 'import numpy as np\n'), ((1295, 1306), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1301, 1306), True, 'import numpy as np\n'), ((1359, 1370), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1365, 1370), True, 'import numpy as np\n'), ((1372, 1383), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1378, 1383), True, 'import numpy as np\n'), ((1309, 1320), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1315, 1320), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
from gensim.models.keyedvectors import KeyedVectors
from config import params, data, w2v
class RNN(nn.Module):
def __init__(self, params, data):
super(RNN, self).__init__()
self.params = params
self.data = data
self.BATCH_SIZE = params["BATCH_SIZE"]
self.SELECTION_SIZE = params["SELECTION_SIZE"]
self.MAX_SENT_LEN = params["MAX_SENT_LEN"]
self.WORD_DIM = params["WORD_DIM"]
self.VOCAB_SIZE = params["VOCAB_SIZE"]
self.CLASS_SIZE = params["CLASS_SIZE"]
self.FILTERS = params["FILTERS"]
self.FILTER_NUM = params["FILTER_NUM"]
self.DROPOUT_EMBED_PROB = params["DROPOUT_EMBED"]
self.DROPOUT_MODEL_PROB = params["DROPOUT_MODEL"]
self.EMBEDDING = params["EMBEDDING"]
self.input_size = self.WORD_DIM
self.hidden_size = params["HIDDEN_SIZE"]
self.hidden_layers = params["HIDDEN_LAYERS"]
self.output_size = params["CLASS_SIZE"]
self.NUM_EMBEDDINGS = self.VOCAB_SIZE + 2
assert (len(self.FILTERS) == len(self.FILTER_NUM))
if self.EMBEDDING != "random":
self.wv_matrix = w2v["w2v"]
self.init_model()
def init_model(self):
self.embed = nn.Embedding(self.NUM_EMBEDDINGS, self.WORD_DIM, padding_idx=self.VOCAB_SIZE + 1)
if self.EMBEDDING != "random":
self.embed.weight.data.copy_(torch.from_numpy(self.wv_matrix))
self.bigru = nn.GRU(self.WORD_DIM, self.hidden_size, dropout=self.DROPOUT_MODEL_PROB, num_layers=self.hidden_layers, bidirectional=True)
self.hidden2label = nn.Linear(self.hidden_size * 2, self.CLASS_SIZE)
self.dropout = nn.Dropout(self.DROPOUT_EMBED_PROB)
if self.params["CUDA"]:
self.cuda()
def forward(self, input):
if len(input.size()) == 1:
input = input.unsqueeze(0)
hidden = self.init_hidden(self.hidden_layers, len(input))
# print(hidden)
input = input.transpose(0, 1)
embed = self.embed(input)
# print(embed)
embed = self.dropout(embed) # add this reduce the acc
input = embed.view(len(input), embed.size(1), -1)
gru_out, hidden = self.bigru(input, hidden)
# gru_out = (59 x 25 x 2400)
gru_out = gru_out.permute(1, 2, 0)
# gru_out = (25 x 2400 x 59)
gru_out = F.max_pool1d(gru_out, gru_out.size(2)).squeeze(2)
# gru_out = (25 x 2400)
gru_out = F.relu(gru_out)
y = self.hidden2label(gru_out)
return y
def init_hidden(self, num_layers, batch_size):
hidden = Variable(torch.zeros(num_layers * 2, batch_size, self.hidden_size))
if self.params["CUDA"]:
hidden = hidden.cuda()
return hidden
"""
load word2vec pre trained vectors
"""
def load_word2vec(self):
print("loading word2vec...")
word_vectors = KeyedVectors.load_word2vec_format(
"GoogleNews-vectors-negative300.bin", binary=True)
wv_matrix = []
for word in self.data["vocab"]:
if word in word_vectors.vocab:
wv_matrix.append(word_vectors.word_vec(word))
else:
wv_matrix.append(
np.random.uniform(-0.01, 0.01, 300).astype("float32"))
# one for UNK and one for zero padding
wv_matrix.append(np.random.uniform(-0.01, 0.01, 300).astype("float32"))
wv_matrix.append(np.zeros(300).astype("float32"))
wv_matrix = np.array(wv_matrix)
return wv_matrix
| [
"torch.nn.Dropout",
"gensim.models.keyedvectors.KeyedVectors.load_word2vec_format",
"torch.from_numpy",
"numpy.array",
"numpy.zeros",
"numpy.random.uniform",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.zeros",
"torch.nn.Embedding",
"torch.nn.GRU"
] | [((1360, 1446), 'torch.nn.Embedding', 'nn.Embedding', (['self.NUM_EMBEDDINGS', 'self.WORD_DIM'], {'padding_idx': '(self.VOCAB_SIZE + 1)'}), '(self.NUM_EMBEDDINGS, self.WORD_DIM, padding_idx=self.\n VOCAB_SIZE + 1)\n', (1372, 1446), True, 'import torch.nn as nn\n'), ((1577, 1704), 'torch.nn.GRU', 'nn.GRU', (['self.WORD_DIM', 'self.hidden_size'], {'dropout': 'self.DROPOUT_MODEL_PROB', 'num_layers': 'self.hidden_layers', 'bidirectional': '(True)'}), '(self.WORD_DIM, self.hidden_size, dropout=self.DROPOUT_MODEL_PROB,\n num_layers=self.hidden_layers, bidirectional=True)\n', (1583, 1704), True, 'import torch.nn as nn\n'), ((1729, 1777), 'torch.nn.Linear', 'nn.Linear', (['(self.hidden_size * 2)', 'self.CLASS_SIZE'], {}), '(self.hidden_size * 2, self.CLASS_SIZE)\n', (1738, 1777), True, 'import torch.nn as nn\n'), ((1801, 1836), 'torch.nn.Dropout', 'nn.Dropout', (['self.DROPOUT_EMBED_PROB'], {}), '(self.DROPOUT_EMBED_PROB)\n', (1811, 1836), True, 'import torch.nn as nn\n'), ((2595, 2610), 'torch.nn.functional.relu', 'F.relu', (['gru_out'], {}), '(gru_out)\n', (2601, 2610), True, 'import torch.nn.functional as F\n'), ((3039, 3127), 'gensim.models.keyedvectors.KeyedVectors.load_word2vec_format', 'KeyedVectors.load_word2vec_format', (['"""GoogleNews-vectors-negative300.bin"""'], {'binary': '(True)'}), "('GoogleNews-vectors-negative300.bin',\n binary=True)\n", (3072, 3127), False, 'from gensim.models.keyedvectors import KeyedVectors\n'), ((3639, 3658), 'numpy.array', 'np.array', (['wv_matrix'], {}), '(wv_matrix)\n', (3647, 3658), True, 'import numpy as np\n'), ((2746, 2803), 'torch.zeros', 'torch.zeros', (['(num_layers * 2)', 'batch_size', 'self.hidden_size'], {}), '(num_layers * 2, batch_size, self.hidden_size)\n', (2757, 2803), False, 'import torch\n'), ((1522, 1554), 'torch.from_numpy', 'torch.from_numpy', (['self.wv_matrix'], {}), '(self.wv_matrix)\n', (1538, 1554), False, 'import torch\n'), ((3506, 3541), 'numpy.random.uniform', 'np.random.uniform', (['(-0.01)', '(0.01)', '(300)'], {}), '(-0.01, 0.01, 300)\n', (3523, 3541), True, 'import numpy as np\n'), ((3586, 3599), 'numpy.zeros', 'np.zeros', (['(300)'], {}), '(300)\n', (3594, 3599), True, 'import numpy as np\n'), ((3378, 3413), 'numpy.random.uniform', 'np.random.uniform', (['(-0.01)', '(0.01)', '(300)'], {}), '(-0.01, 0.01, 300)\n', (3395, 3413), True, 'import numpy as np\n')] |
from datetime import datetime
import os
import math
import logging
import warnings
from visnav.algo.tools import PositioningException
warnings.filterwarnings("ignore", module='quaternion', lineno=21)
import numpy as np
import cv2
import sys
from visnav.render.render import RenderEngine
from visnav.algo import tools
from visnav.algo.keypoint import KeypointAlgo
from visnav.missions.rosetta import RosettaSystemModel
logger = logging.getLogger(__name__)
def main(outfile='spl-test.log', img_path='67p-imgs', config=None):
logger.info('Setting up renderer and loading the 3d model...')
tconfig = config or {}
config = {'verbose': 1, 'feat': KeypointAlgo.AKAZE, 'v16k': False, 'view_width': 384}
config.update(tconfig)
v16k = config.pop('v16k')
view_width = config.pop('view_width')
sm = RosettaSystemModel(hi_res_shape_model=False, skip_obj_load=True, res_mult=512/1024, view_width=view_width)
re = RenderEngine(sm.view_width, sm.view_height, antialias_samples=0)
re.set_frustum(sm.cam.x_fov, sm.cam.y_fov, 0.1 * sm.min_distance, 1.1 * sm.max_distance)
obj_path = os.path.join(os.path.dirname(__file__), 'data', '67p-16k.obj' if v16k else '67p-4k.obj')
obj_idx = re.load_object(obj_path)
spl = KeypointAlgo(sm, re, obj_idx)
spl.RENDER_TEXTURES = False
img_path = os.path.join(os.path.dirname(__file__), 'data', img_path)
imgfiles = sorted([fname for fname in os.listdir(img_path) if fname[-4:].lower() in ('.jpg', '.png')])
with open(outfile, 'w') as file:
file.write(' '.join(sys.argv) + '\n' + '\t'.join(log_columns()) + '\n')
# loop through some test images
for i, fname in enumerate(imgfiles):
imgfile = os.path.join(img_path, fname)
lblfile = imgfile[:-4] + '.lbl'
if not os.path.exists(lblfile):
lblfile = imgfile[:-6] + '.lbl'
# load a noisy system state
sm.load_state(lblfile, sc_ast_vertices=False)
initial = {
'time': sm.time.value,
'ast_axis': sm.asteroid_axis,
'sc_rot': sm.spacecraft_rot,
'sc_pos': sm.spacecraft_pos,
'ast_pos': sm.asteroid.real_position,
}
# get result and log result stats
try:
spl.solve_pnp(imgfile, None, scale_cam_img=True, **config)
ok = True
except PositioningException as e:
ok = False
rtime = spl.timer.elapsed
# calculate results
results = calculate_result(sm, spl, fname, ok, initial)
# write log entry
write_log_entry(outfile, i, rtime, 0.0, *results)
def calculate_result(sm, spl, fname, ok, initial):
# save function values from optimization
fvals = getattr(spl, 'extra_values', None)
final_fval = fvals[-1] if fvals else None
real_rel_rot = tools.q_to_lat_lon_roll(sm.real_sc_asteroid_rel_q())
elong, direc = sm.solar_elongation(real=True)
r_ast_axis = sm.real_asteroid_axis
# real system state
params = (sm.time.real_value, *r_ast_axis,
*sm.real_spacecraft_rot, math.degrees(elong), math.degrees(direc),
*sm.real_spacecraft_pos, sm.real_spacecraft_altitude, *map(math.degrees, real_rel_rot),
fname, final_fval)
# calculate added noise
#
time_noise = initial['time'] - sm.time.real_value
ast_rot_noise = (
initial['ast_axis'][0] - r_ast_axis[0],
initial['ast_axis'][1] - r_ast_axis[1],
360 * time_noise / sm.asteroid.rotation_period
+ (initial['ast_axis'][2] - r_ast_axis[2])
)
sc_rot_noise = tuple(np.subtract(initial['sc_rot'], sm.real_spacecraft_rot))
dev_angle = math.degrees(tools.angle_between_lat_lon_roll(map(math.radians, ast_rot_noise),
map(math.radians, sc_rot_noise)))
sc_loc_noise = ('', '', '')
noise = sc_loc_noise + (time_noise,) + ast_rot_noise + sc_rot_noise + (dev_angle,)
if np.all(ok):
ok_pos, ok_rot = True, True
elif not np.any(ok):
ok_pos, ok_rot = False, False
else:
ok_pos, ok_rot = ok
if ok_pos:
pos = sm.spacecraft_pos
pos_err = tuple(np.subtract(pos, sm.real_spacecraft_pos))
else:
pos = float('nan') * np.ones(3)
pos_err = tuple(float('nan') * np.ones(3))
if ok_rot:
rel_rot = tools.q_to_lat_lon_roll(sm.sc_asteroid_rel_q())
rot_err = (math.degrees(tools.wrap_rads(tools.angle_between_lat_lon_roll(rel_rot, real_rel_rot))),)
else:
rel_rot = float('nan') * np.ones(3)
rot_err = (float('nan'),)
alt = float('nan')
if ok_pos and ok_rot:
est_vertices = sm.sc_asteroid_vertices()
max_shift = float('nan') if est_vertices is None else \
tools.sc_asteroid_max_shift_error(est_vertices, sm.asteroid.real_sc_ast_vertices)
alt = sm.spacecraft_altitude or float('nan')
both_err = (max_shift, alt - (sm.real_spacecraft_altitude or float('nan')))
else:
both_err = (float('nan'), float('nan'),)
err = pos_err + rot_err + both_err
return params, noise, pos, alt, map(math.degrees, rel_rot), fvals, err
def log_columns():
return (
'iter', 'date', 'execution time',
'time', 'ast lat', 'ast lon', 'ast rot',
'sc lat', 'sc lon', 'sc rot',
'sol elong', 'light dir', 'x sc pos', 'y sc pos', 'z sc pos', 'sc altitude',
'rel yaw', 'rel pitch', 'rel roll',
'imgfile', 'extra val', 'shape model noise',
'sc pos x dev', 'sc pos y dev', 'sc pos z dev',
'time dev', 'ast lat dev', 'ast lon dev', 'ast rot dev',
'sc lat dev', 'sc lon dev', 'sc rot dev', 'total dev angle',
'x est sc pos', 'y est sc pos', 'z est sc pos', 'altitude est sc',
'yaw rel est', 'pitch rel est', 'roll rel est',
'x err sc pos', 'y err sc pos', 'z err sc pos', 'rot error',
'shift error km', 'altitude error', 'lat error (m/km)', 'dist error (m/km)', 'rel shift error (m/km)',
)
def write_log_entry(logfile, i, rtime, sm_noise, params, noise, pos, alt, rel_rot, fvals, err):
# # save execution time
# self.run_times.append(rtime)
# calculate errors
dist = abs(params[-7])
if not math.isnan(err[0]):
lerr = 1000*math.sqrt(err[0]**2 + err[1]**2) / dist # m/km
derr = 1000*err[2] / dist # m/km
rerr = abs(err[3]) # deg
serr = 1000*err[4] / dist # m/km
fail = 0
else:
lerr = derr = rerr = serr = float('nan')
fail = 1
# self.laterrs.append(lerr)
# self.disterrs.append(abs(derr))
# self.roterrs.append(rerr)
# self.shifterrs.append(serr)
# self.fails.append(fail)
# log all parameter values, timing & errors into a file
with open(logfile, 'a') as file:
file.write('\t'.join(map(str, (
i, datetime.now().strftime("%Y-%m-%d %H:%M:%S"), rtime, *params,
sm_noise, *noise, *pos, alt, *rel_rot, *err, lerr, derr, serr
)))+'\n')
# log opt fun values in other file
if fvals:
with open('fval-'+logfile, 'a') as file:
file.write(str(i)+'\t'+'\t'.join(map(str, fvals))+'\n')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
config = [
('spl-orb-4k-384.log', {
'feat': KeypointAlgo.ORB,
'v16k': False,
'view_width': 384,
}),
('spl-akaze-4k-384.log', {
'feat': KeypointAlgo.AKAZE,
'v16k': False,
'view_width': 384,
}),
('spl-orb-4k-512.log', {
'feat': KeypointAlgo.ORB,
'v16k': False,
'view_width': 512,
}),
('spl-akaze-4k-512.log', {
'feat': KeypointAlgo.AKAZE,
'v16k': False,
'view_width': 512,
}),
('spl-orb-16k-512.log', {
'feat': KeypointAlgo.ORB,
'v16k': True,
'view_width': 512,
}),
('spl-akaze-16k-512.log', {
'feat': KeypointAlgo.AKAZE,
'v16k': True,
'view_width': 512,
}),
]
test_ids = list(map(str, range(1, len(config) + 1)))
if len(sys.argv) != 2 or sys.argv[1] not in test_ids:
logger.error('USAGE: %s %s' % (sys.argv[0], '|'.join(test_ids)))
test_id = int(sys.argv[1]) - 1
main(config[test_id][0], config=config[test_id][1])
| [
"logging.getLogger",
"math.sqrt",
"visnav.algo.keypoint.KeypointAlgo",
"os.path.exists",
"visnav.algo.tools.angle_between_lat_lon_roll",
"os.listdir",
"visnav.missions.rosetta.RosettaSystemModel",
"visnav.render.render.RenderEngine",
"numpy.subtract",
"numpy.ones",
"math.degrees",
"numpy.any",... | [((144, 209), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'module': '"""quaternion"""', 'lineno': '(21)'}), "('ignore', module='quaternion', lineno=21)\n", (167, 209), False, 'import warnings\n'), ((451, 478), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (468, 478), False, 'import logging\n'), ((853, 966), 'visnav.missions.rosetta.RosettaSystemModel', 'RosettaSystemModel', ([], {'hi_res_shape_model': '(False)', 'skip_obj_load': '(True)', 'res_mult': '(512 / 1024)', 'view_width': 'view_width'}), '(hi_res_shape_model=False, skip_obj_load=True, res_mult=\n 512 / 1024, view_width=view_width)\n', (871, 966), False, 'from visnav.missions.rosetta import RosettaSystemModel\n'), ((972, 1036), 'visnav.render.render.RenderEngine', 'RenderEngine', (['sm.view_width', 'sm.view_height'], {'antialias_samples': '(0)'}), '(sm.view_width, sm.view_height, antialias_samples=0)\n', (984, 1036), False, 'from visnav.render.render import RenderEngine\n'), ((1289, 1318), 'visnav.algo.keypoint.KeypointAlgo', 'KeypointAlgo', (['sm', 're', 'obj_idx'], {}), '(sm, re, obj_idx)\n', (1301, 1318), False, 'from visnav.algo.keypoint import KeypointAlgo\n'), ((4095, 4105), 'numpy.all', 'np.all', (['ok'], {}), '(ok)\n', (4101, 4105), True, 'import numpy as np\n'), ((7543, 7582), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (7562, 7582), False, 'import logging\n'), ((1160, 1185), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1175, 1185), False, 'import os\n'), ((1383, 1408), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1398, 1408), False, 'import os\n'), ((1757, 1786), 'os.path.join', 'os.path.join', (['img_path', 'fname'], {}), '(img_path, fname)\n', (1769, 1786), False, 'import os\n'), ((3174, 3193), 'math.degrees', 'math.degrees', (['elong'], {}), '(elong)\n', (3186, 3193), False, 'import math\n'), ((3195, 3214), 'math.degrees', 'math.degrees', (['direc'], {}), '(direc)\n', (3207, 3214), False, 'import math\n'), ((3710, 3764), 'numpy.subtract', 'np.subtract', (["initial['sc_rot']", 'sm.real_spacecraft_rot'], {}), "(initial['sc_rot'], sm.real_spacecraft_rot)\n", (3721, 3764), True, 'import numpy as np\n'), ((6441, 6459), 'math.isnan', 'math.isnan', (['err[0]'], {}), '(err[0])\n', (6451, 6459), False, 'import math\n'), ((1846, 1869), 'os.path.exists', 'os.path.exists', (['lblfile'], {}), '(lblfile)\n', (1860, 1869), False, 'import os\n'), ((4158, 4168), 'numpy.any', 'np.any', (['ok'], {}), '(ok)\n', (4164, 4168), True, 'import numpy as np\n'), ((4325, 4365), 'numpy.subtract', 'np.subtract', (['pos', 'sm.real_spacecraft_pos'], {}), '(pos, sm.real_spacecraft_pos)\n', (4336, 4365), True, 'import numpy as np\n'), ((4408, 4418), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (4415, 4418), True, 'import numpy as np\n'), ((4710, 4720), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (4717, 4720), True, 'import numpy as np\n'), ((4937, 5023), 'visnav.algo.tools.sc_asteroid_max_shift_error', 'tools.sc_asteroid_max_shift_error', (['est_vertices', 'sm.asteroid.real_sc_ast_vertices'], {}), '(est_vertices, sm.asteroid.\n real_sc_ast_vertices)\n', (4970, 5023), False, 'from visnav.algo import tools\n'), ((1471, 1491), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (1481, 1491), False, 'import os\n'), ((4459, 4469), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (4466, 4469), True, 'import numpy as np\n'), ((6482, 6518), 'math.sqrt', 'math.sqrt', (['(err[0] ** 2 + err[1] ** 2)'], {}), '(err[0] ** 2 + err[1] ** 2)\n', (6491, 6518), False, 'import math\n'), ((4605, 4660), 'visnav.algo.tools.angle_between_lat_lon_roll', 'tools.angle_between_lat_lon_roll', (['rel_rot', 'real_rel_rot'], {}), '(rel_rot, real_rel_rot)\n', (4637, 4660), False, 'from visnav.algo import tools\n'), ((7174, 7188), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7186, 7188), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
multiple labels classifier
"""
import nltk
from nltk.stem import WordNetLemmatizer
import zipfile
import pandas as pd
import numpy as np
import pickle
from collections import Counter
import gzip
import random
import sklearn
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from nltk.metrics import *
from sklearn.pipeline import Pipeline
def save(obj, filename, protocol=pickle.DEFAULT_PROTOCOL):
with gzip.open(filename, 'wb') as f:
pickle.dump(obj, f, protocol)
def load(filename):
with gzip.open(filename, 'rb') as f:
loaded_object = pickle.load(f)
return loaded_object
booklist=load('booklist.pc')
summarylist,excerptlist,genreslist=load('sum_exc_gen.pc')
sumwds,excwds = load('sum_exc_words.pc')
#%% the classifier for training and testing the whole model
class multiclassify():
def __init__(self,sumwds,excwds,tags):
self._get_ids(sumwds,excwds)
self.tags=tags
# def _train_fiction_weights(self, sum_cl, exc_cl):
# '''input the seperate models and train a model for fiction classification'''
# self.fic_sum=sum_cl
# self.fic_exc=exc_cl
def _train_single(self, wordset, genreslist,trainid,testid,trace=True):
models={}
featureset = make_featureset(wordset)
tags=self.tags
for tag in tags:
if trace: print(tag)
train, test = make_data(featureset,tag,genreslist,trainid,testid)
cl = my_clssifier(sklearn.linear_model.LogisticRegression(class_weight='balanced'))
# cl.fit(train, test)
# if trace: print('fit acc:',cl.acc)
cl.pipfit(train,test)
if trace: print('pipfit acc:',cl.pipacc)
models[tag]=cl
return models
def _get_ids(self, sumwds,excwds):
ids = [i for i in excwds]
ids = [i for i in ids if i in sumwds]
self.mix_trainid,self.mix_testid=set_sep(ids)
self.sum_trainid,self.sum_testid=self._extend_ids(sumwds,self.mix_trainid,self.mix_testid)
self.exc_trainid,self.exc_testid=self._extend_ids(excwds,self.mix_trainid,self.mix_testid)
def _extend_ids(self,wds,trainid,testid):
id1=trainid+testid
idmore=[i for i in wds if i not in id1]
trainid2,testid2=set_sep(idmore)
return trainid+trainid2,testid+testid2
def train_tag(self,sumwds,excwds,genreslist,trace=True):
print('training for summary')
self.summodel = self._train_single(sumwds,genreslist,self.sum_trainid,self.sum_testid,trace)
print('training for excerpt')
self.excmodel = self._train_single(excwds,genreslist,self.exc_trainid,self.exc_testid,trace)
def use_tag_model(self,tag,source):
if source=='sum':
return self.summodel[tag]
elif source=='exc':
return self.excmodel[tag]
else:
print('Error!')
def _train_mix_model_single_plot(self,tag,sumwds,excwds,genreslist):
'''train the model to weight sum and exc for a tag'''
ids=self.mix_trainid+self.mix_testid
data=pd.DataFrame(index=ids,columns=['sum','exc','label'])
s_featureset=make_featureset(sumwds)
e_featureset=make_featureset(excwds)
for id in ids:
data.loc[id,'sum']=self.summodel[tag].pipclassifier.prob_classify(s_featureset[id]).prob(True)
data.loc[id,'exc']=self.excmodel[tag].pipclassifier.prob_classify(e_featureset[id]).prob(True)
data.loc[id,'label']= (tag in genreslist[id])
mix_train=data.loc[self.mix_trainid]
mix_test=data.loc[self.mix_testid]
X_train=mix_train.iloc[:,0:2]
X_test=mix_test.iloc[:,0:2]
y_train=list(mix_train.iloc[:,2].values)
y_test=list(mix_test.iloc[:,2].values)
plt.figure()
w=np.array(range(1,1000))/1000
acc=lambda x:((np.sum(X_test.as_matrix()* np.array([x,1-x]),axis=1)>0.5)==(np.array(y_test)==True)).mean()
plt.plot(w,list(map(acc,w)), label='Test')
acc=lambda x:((np.sum(X_train.as_matrix()* np.array([x,1-x]),axis=1)>0.5)==(np.array(y_train)==True)).mean()
plt.plot(w,list(map(acc,w)),':', label='Train')
plt.xlabel('weight')
plt.ylabel('accuracy')
plt.legend()
plt.title('Combination of two models')
def _train_mix_model_single(self,tag,sumwds,excwds,genreslist):
print(tag)
ids=self.mix_trainid+self.mix_testid
data=pd.DataFrame(index=ids,columns=['sum','exc','label'])
s_featureset=make_featureset(sumwds)
e_featureset=make_featureset(excwds)
for id in ids:
data.loc[id,'sum']=self.summodel[tag].pipclassifier.prob_classify(s_featureset[id]).prob(True)
data.loc[id,'exc']=self.excmodel[tag].pipclassifier.prob_classify(e_featureset[id]).prob(True)
data.loc[id,'label']= (tag in genreslist[id])
mix_train=data.loc[self.mix_trainid]
mix_test=data.loc[self.mix_testid]
X_train=mix_train.iloc[:,0:2]
X_test=mix_test.iloc[:,0:2]
y_train=list(mix_train.iloc[:,2].values)
y_test=list(mix_test.iloc[:,2].values)
cl = sklearn.linear_model.LogisticRegression(class_weight='balanced')
cl.fit(X_train, y_train)
cl.acc = cl.score(X_test,y_test)
print('fit acc:',cl.acc)
return cl
def train_mix(self,sumwds,excwds,genreslist):
self.mixmodel={}
tags=self.tags
for tag in tags:
self.mixmodel[tag]=self._train_mix_model_single(tag,sumwds,excwds,genreslist)
print('done.')
def show_acc(self):
tags=list(self.mixmodel.keys())
acc = pd.DataFrame(index=tags,columns=['sum','exc','mix'])
for tag in tags:
acc.loc[tag,'sum']=self.summodel[tag].pipacc
acc.loc[tag,'exc']=self.excmodel[tag].pipacc
acc.loc[tag,'mix']=self.mixmodel[tag].acc
self.acc_table = acc
return acc
def _calc_fscore_single(self,tag,sumwds,excwds,genreslist):
print(tag)
ids=self.mix_testid
data=pd.DataFrame(index=ids,columns=['sum','exc','mix','label'])
s_featureset=make_featureset(sumwds)
e_featureset=make_featureset(excwds)
for id in ids:
data.loc[id,'sum']=self.summodel[tag].pipclassifier.prob_classify(s_featureset[id]).prob(True)
data.loc[id,'exc']=self.excmodel[tag].pipclassifier.prob_classify(e_featureset[id]).prob(True)
data.loc[id,'label']= (tag in genreslist[id])
data.loc[:,'mix']=self.mixmodel[tag].predict(data.loc[:,['sum','exc']])
data.loc[:,'sum']=(data.loc[:,'sum']>0.5)
data.loc[:,'exc']=(data.loc[:,'exc']>0.5)
fun=sklearn.metrics.precision_recall_fscore_support
def fun(y_true,y_pred):
y_true1,y_pred1 = list(y_true.values),list(y_pred.values)
return sklearn.metrics.precision_recall_fscore_support(y_true1,y_pred1,labels=[True,False])
fsum=fun(data['label'],data['sum'])
fexc=fun(data['label'],data['exc'])
fmix=fun(data['label'],data['mix'])
return fsum,fexc,fmix
def show_fscore(self,sumwds,excwds,genreslist):
tags = self.tags
scores = {}
for tag in tags:
fsum,fexc,fmix = self._calc_fscore_single(tag,sumwds,excwds,genreslist)
scores[tag] = {'sum':fsum,'exc':fexc,'mix':fmix}
self.scores=scores
return scores
def get_f1score(self):
scores=self.scores
tags=self.tags
acc = pd.DataFrame(index=tags,columns=['sum','exc','mix'])
for tag in tags:
for c in acc.columns:
acc.loc[tag,c]=scores[tag][c][2].mean()
self.acc_table = acc
return acc
def save(self):
save(self.__dict__,'tag_models.pc')
def load(self):
self.__dict__.update(load('tag_models.pc'))
features = lambda words: {w:n for w,n in Counter(words).items()}
def make_featureset(wordset):
'''make the feature set from a words dictionary'''
return {c:features(d) for c,d in wordset.items()}
def make_data(featureset,tag,genreslist,trainid,testid):
'''after make_featureset, make the data for nltk'''
train=[(featureset[c], tag in genreslist[c]) for c in trainid]
test=[(featureset[c], tag in genreslist[c]) for c in testid]
return train,test
def set_sep(sets0,test_frac=0.1):
sets = sets0.copy()
random.seed(777)
random.shuffle(sets)
n = int(test_frac*len(sets))
return sets[n:], sets[:n]
class my_clssifier():
def __init__(self, skclssif):
self.skclssif=skclssif
self.classif=nltk.classify.scikitlearn.SklearnClassifier(self.skclssif)
# try:
# self.classif._clf.set_params(n_jobs=-1)
# except:
# pass
self.pip=Pipeline([('tfidf', sklearn.feature_extraction.text.TfidfTransformer()),
#('chi2', sklearn.feature_selection.SelectKBest(sklearn.feature_selection.chi2, k=1000)),
('NB',skclssif)])
self.pipclassif=nltk.classify.scikitlearn.SklearnClassifier(self.pip)
def fit(self, train, test):
self.classifier=self.classif.train(train)
self.acc=(nltk.classify.accuracy(self.classifier, test))
def pipfit(self, train, test):
self.pipclassifier=self.pipclassif.train(train)
self.pipacc=(nltk.classify.accuracy(self.pipclassifier, test))
#%% training
tags=set()
for id in genreslist:
tags.update(genreslist[id])
tags.remove('Fiction')
tags.remove('Nonfiction')
tag_cl = multiclassify(sumwds,excwds,tags)
tag_cl.train_tag(sumwds,excwds,genreslist)
tag_cl.train_mix(sumwds,excwds,genreslist)
tag_cl.save()
#%% read the model
tags=set()
for id in genreslist:
tags.update(genreslist[id])
tags.remove('Fiction')
tags.remove('Nonfiction')
tag_cl = multiclassify(sumwds,excwds,tags)
tag_cl.load()
tag_cl._train_mix_model_single_plot(list(tags)[0],sumwds,excwds,genreslist)
acctable = tag_cl.show_acc()
acctable.mean()
# tag_cl.load()
# sumtestid=tag_cl.sumtestid
# add sample rate
acctable['per']=0
for tag in tags:
acctable.loc[tag,'per']=np.mean([tag in genreslist[id] for id in genreslist])
#fscores=tag_cl.show_fscore(sumwds,excwds,genreslist)
f1score=tag_cl.get_f1score()
f1score['per']=0
for tag in tags:
f1score.loc[tag,'per']=np.mean([tag in genreslist[id] for id in genreslist])
| [
"sklearn.feature_extraction.text.TfidfTransformer",
"gzip.open",
"matplotlib.pyplot.ylabel",
"nltk.classify.scikitlearn.SklearnClassifier",
"numpy.array",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"pandas.DataFrame",
"random.shuffle",
"sklearn.metrics.precision_recall_fscore_support",
"pickle.lo... | [((8474, 8490), 'random.seed', 'random.seed', (['(777)'], {}), '(777)\n', (8485, 8490), False, 'import random\n'), ((8495, 8515), 'random.shuffle', 'random.shuffle', (['sets'], {}), '(sets)\n', (8509, 8515), False, 'import random\n'), ((10193, 10248), 'numpy.mean', 'np.mean', (['[(tag in genreslist[id]) for id in genreslist]'], {}), '([(tag in genreslist[id]) for id in genreslist])\n', (10200, 10248), True, 'import numpy as np\n'), ((10396, 10451), 'numpy.mean', 'np.mean', (['[(tag in genreslist[id]) for id in genreslist]'], {}), '([(tag in genreslist[id]) for id in genreslist])\n', (10403, 10451), True, 'import numpy as np\n'), ((474, 499), 'gzip.open', 'gzip.open', (['filename', '"""wb"""'], {}), "(filename, 'wb')\n", (483, 499), False, 'import gzip\n'), ((514, 543), 'pickle.dump', 'pickle.dump', (['obj', 'f', 'protocol'], {}), '(obj, f, protocol)\n', (525, 543), False, 'import pickle\n'), ((574, 599), 'gzip.open', 'gzip.open', (['filename', '"""rb"""'], {}), "(filename, 'rb')\n", (583, 599), False, 'import gzip\n'), ((630, 644), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (641, 644), False, 'import pickle\n'), ((3123, 3179), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'ids', 'columns': "['sum', 'exc', 'label']"}), "(index=ids, columns=['sum', 'exc', 'label'])\n", (3135, 3179), True, 'import pandas as pd\n'), ((3828, 3840), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3838, 3840), True, 'import matplotlib.pyplot as plt\n'), ((4227, 4247), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""weight"""'], {}), "('weight')\n", (4237, 4247), True, 'import matplotlib.pyplot as plt\n'), ((4256, 4278), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (4266, 4278), True, 'import matplotlib.pyplot as plt\n'), ((4287, 4299), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4297, 4299), True, 'import matplotlib.pyplot as plt\n'), ((4308, 4346), 'matplotlib.pyplot.title', 'plt.title', (['"""Combination of two models"""'], {}), "('Combination of two models')\n", (4317, 4346), True, 'import matplotlib.pyplot as plt\n'), ((4492, 4548), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'ids', 'columns': "['sum', 'exc', 'label']"}), "(index=ids, columns=['sum', 'exc', 'label'])\n", (4504, 4548), True, 'import pandas as pd\n'), ((5202, 5266), 'sklearn.linear_model.LogisticRegression', 'sklearn.linear_model.LogisticRegression', ([], {'class_weight': '"""balanced"""'}), "(class_weight='balanced')\n", (5241, 5266), False, 'import sklearn\n'), ((5707, 5762), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'tags', 'columns': "['sum', 'exc', 'mix']"}), "(index=tags, columns=['sum', 'exc', 'mix'])\n", (5719, 5762), True, 'import pandas as pd\n'), ((6125, 6188), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'ids', 'columns': "['sum', 'exc', 'mix', 'label']"}), "(index=ids, columns=['sum', 'exc', 'mix', 'label'])\n", (6137, 6188), True, 'import pandas as pd\n'), ((7585, 7640), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'tags', 'columns': "['sum', 'exc', 'mix']"}), "(index=tags, columns=['sum', 'exc', 'mix'])\n", (7597, 7640), True, 'import pandas as pd\n'), ((8687, 8745), 'nltk.classify.scikitlearn.SklearnClassifier', 'nltk.classify.scikitlearn.SklearnClassifier', (['self.skclssif'], {}), '(self.skclssif)\n', (8730, 8745), False, 'import nltk\n'), ((9112, 9165), 'nltk.classify.scikitlearn.SklearnClassifier', 'nltk.classify.scikitlearn.SklearnClassifier', (['self.pip'], {}), '(self.pip)\n', (9155, 9165), False, 'import nltk\n'), ((9266, 9311), 'nltk.classify.accuracy', 'nltk.classify.accuracy', (['self.classifier', 'test'], {}), '(self.classifier, test)\n', (9288, 9311), False, 'import nltk\n'), ((9425, 9473), 'nltk.classify.accuracy', 'nltk.classify.accuracy', (['self.pipclassifier', 'test'], {}), '(self.pipclassifier, test)\n', (9447, 9473), False, 'import nltk\n'), ((6931, 7023), 'sklearn.metrics.precision_recall_fscore_support', 'sklearn.metrics.precision_recall_fscore_support', (['y_true1', 'y_pred1'], {'labels': '[True, False]'}), '(y_true1, y_pred1, labels=[\n True, False])\n', (6978, 7023), False, 'import sklearn\n'), ((1515, 1579), 'sklearn.linear_model.LogisticRegression', 'sklearn.linear_model.LogisticRegression', ([], {'class_weight': '"""balanced"""'}), "(class_weight='balanced')\n", (1554, 1579), False, 'import sklearn\n'), ((7979, 7993), 'collections.Counter', 'Counter', (['words'], {}), '(words)\n', (7986, 7993), False, 'from collections import Counter\n'), ((8885, 8935), 'sklearn.feature_extraction.text.TfidfTransformer', 'sklearn.feature_extraction.text.TfidfTransformer', ([], {}), '()\n', (8933, 8935), False, 'import sklearn\n'), ((3963, 3979), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (3971, 3979), True, 'import numpy as np\n'), ((4130, 4147), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (4138, 4147), True, 'import numpy as np\n'), ((3930, 3950), 'numpy.array', 'np.array', (['[x, 1 - x]'], {}), '([x, 1 - x])\n', (3938, 3950), True, 'import numpy as np\n'), ((4097, 4117), 'numpy.array', 'np.array', (['[x, 1 - x]'], {}), '([x, 1 - x])\n', (4105, 4117), True, 'import numpy as np\n')] |
import cv2
import random
import numpy as np
import os
#import redis
#from getfile import get_image_size,get_stride,get_image_path,get_image_bs
import argparse
#path=get_image_path()
#stride=get_stride()
#image_size=get_image_size()
#batch_size=get_image_bs()
def parse_args():
parser=argparse.ArgumentParser(description='inference')
parser.add_argument('--dataset',type=str,required=True)
parser.add_argument('--stride',required=True,type=int,default=1024)
parser.add_argument('--image_size',required=True,type=int,default=1024)
parser.add_argument('--batch_size',type=int,default=1)
args=parser.parse_args()
return args
def crop():
args=parse_args()
# r = redis.Redis(host='redis', port=6379, decode_responses=True)
###############################
# b = redis.Redis(host='redis', port=6379, decode_responses=True)
#####################################
#path_name=os.path.split(args.image_name)
##########################################
#os.makedirs('/mnt/geojson/'+path_name[1].split('.')[0])
##########################################
#f=open('/mnt/path.txt','w')
# f.write('./mnt/geojson/'+path_name[1].split('.')[0]+'/')
# f.close()
path=args.dataset
image=cv2.imread(path)
h,w,_=image.shape
padding_h=(h//args.stride+1)*args.stride
padding_w=(w//args.stride+1)*args.stride
padding_img=np.zeros((padding_h,padding_w,3),dtype=np.uint8)
padding_img[0:h,0:w,:]=image[:,:,:]
n=0
image=np.asarray(padding_img,'f')
for i in range(padding_h//args.stride):
for j in range(padding_w//args.stride):
crop=padding_img[i*args.stride:i*args.stride+args.image_size,j*args.stride:j*args.stride+args.image_size,:]
n+=1
cv2.imwrite('/sniper/data/demo_batch/images/'+str(n)+'.png',crop)
print('finished!')
if __name__ == '__main__':
crop()
| [
"numpy.asarray",
"numpy.zeros",
"cv2.imread",
"argparse.ArgumentParser"
] | [((288, 336), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""inference"""'}), "(description='inference')\n", (311, 336), False, 'import argparse\n'), ((1298, 1314), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (1308, 1314), False, 'import cv2\n'), ((1459, 1510), 'numpy.zeros', 'np.zeros', (['(padding_h, padding_w, 3)'], {'dtype': 'np.uint8'}), '((padding_h, padding_w, 3), dtype=np.uint8)\n', (1467, 1510), True, 'import numpy as np\n'), ((1579, 1607), 'numpy.asarray', 'np.asarray', (['padding_img', '"""f"""'], {}), "(padding_img, 'f')\n", (1589, 1607), True, 'import numpy as np\n')] |
import operator, msgpack, nltk, math, sys, os
from nltk.corpus import stopwords
from datetime import datetime
from tqdm import tqdm
from argparse import ArgumentParser
import numpy as np
import dateutil.parser
from utils import settings, liwc_keys, lsm_keys, open_for_write, DEFAULT_TIMEZONE
from normalizer import expand_text, norms
#from contractions import contractions
from mention_graph import read_mention_names
liwc_words = {}
liwc_stems = {}
with open(settings['LIWC_PATH']) as handle:
lines = handle.readlines()
longest_stem = 0
shortest_stem = 99
for line in lines:
tline = [part.strip() for part in line.strip().split(",")]
if tline[0].endswith("*") and len(tline[0])-1 > longest_stem:
longest_stem = len(tline[0])-1
elif tline[0].endswith("*") and len(tline[0])-1 < shortest_stem:
shortest_stem = len(tline[0])-1
for i in range(shortest_stem, longest_stem+1):
liwc_stems[i] = {}
#liwc_keys = ["POSEMO"]#, "NEGEMO"]
for line in lines:
tline = [part.strip() for part in line.strip().split(",")]
if tline[1] in liwc_keys:
if tline[0].endswith("*"):
tpat = tline[0][:-1]
if tpat not in liwc_stems[len(tpat)]:
liwc_stems[len(tpat)][tpat] = [0]*len(liwc_keys)
liwc_stems[len(tpat)][tpat][liwc_keys.index(tline[1])] = 1
else:
if tline[0] not in liwc_words:
liwc_words[tline[0]] = [0]*len(liwc_keys)
liwc_words[tline[0]][liwc_keys.index(tline[1])] = 1
#print("LIWC keys: "+ str(liwc_keys))
print("LIWC words: " + str(len(liwc_words)))
print("LIWC set words: " + str(len(set(liwc_words))))
#print("liwcwords: " + str([key for key in list(liwc_words.keys())[:10]]))
print("LIWC stems: " + str(len(liwc_stems)))
print("LIWC set stems: " + str(len(set(liwc_stems))))
#print("liwcstems: " + str([key for key in list(liwc_stems[3].keys())[:10]]))
print("Stem range: " + str(shortest_stem) + "-" + str(longest_stem))
print("-"*20)
stop_words = set(stopwords.words('english'))
merge_dict = {}
with open('merged_utterances') as handle:
for line in handle.readlines():
parts = line.strip().split(':::')
merge_dict[parts[0]] = parts[1]
def get_liwc_parts(token_set):
rep_vec = np.array([0]*len(liwc_keys))
match_words = [set() for i in range(len(liwc_keys))]
for token in token_set:
if token in liwc_words:
rep_vec += np.array(liwc_words[token])
for ind in np.array(liwc_words[token]).nonzero()[0]:
match_words[ind].add(token)
else:
upper_lim = min(longest_stem+1, len(token))
for i in range(shortest_stem, upper_lim):
if token[:i] in liwc_stems[i]:
rep_vec += np.array(liwc_stems[i][token[:i]])
for ind in np.array(liwc_stems[i][token[:i]]).nonzero()[0]:
match_words[ind].add(token)
return rep_vec.tolist(), [list(t_mw) for t_mw in match_words]
def main():
er_file = open("error_files", "w")
parser = ArgumentParser()
parser.add_argument("-liwc", "--liwc-augment", dest="liwc", help="Augment the data with LIWC categories", default=False, action="store_true")
parser.add_argument("-time", "--time", dest="time", help="Augment the data with time information", default=False, action="store_true")
parser.add_argument("-normalize", "--normalize", dest="normalize", help="Augment the data with normalized text", default=False, action="store_true")
parser.add_argument("-merge", "--merge", dest="merged", help="Augment the data with merged utterances for top utterances from file", default=False, action="store_true")
parser.add_argument("-style", "--style", dest="style", help="Augment the data with style matching (LSM)", default=False, action="store_true")
parser.add_argument("-freq", "--frequency", dest="freq", help="Augment the data with communication frequency information", default=False, action="store_true")
parser.add_argument("-stopwords", "--stopword-augment", dest="stopwords", help="Augment the data with stopwords", default=False, action="store_true")
parser.add_argument("-all", "--all", dest="all", help="Augment with all flags set to true", default=False, action="store_true")
parser.add_argument("-mentions", "--mentions", dest="mentions", help="Augment with mentions of other tagged people", default=False, action="store_true")
parser.add_argument("-uid", "--uid", dest="uid", help="Augment with unique message IDs accross entire corpus.", default=False, action="store_true")
parser.add_argument("-context", "--context", dest="context", help="Number of context utterances to use for LSM (default 100)", default=100, type=int)
parser.add_argument("-quick", "--quick", dest="quick", help="Augment more quickly by skipping the normalization and merging of utterances", default=False, action="store_true")
opt = parser.parse_args()
if opt.all:
opt.liwc, opt.time, opt.style, opt.freq, opt.stopwords, opt.mentions, opt.uid, opt.normalize, opt.merged = [True]*9
MSG_UNIQID = 0
mention_names, first_names = [None]*2
if opt.mentions:
mention_names, first_names = read_mention_names()
aug_set = [lp[1] for lp in [[opt.liwc, "LIWC"], [opt.stopwords, "stopwords"], [opt.time, "time"], [opt.freq, "frequency"], [opt.style, "style"], [opt.mentions, "mentions"], [opt.uid, "unique ID"], [opt.normalize, 'normalized'], [opt.merged, 'merged']] if lp[0]]
if len(aug_set) == 0:
print("You did not select any attributes to augment. Check possible flags with --help. Exiting...")
sys.exit(0)
print("Augmenting conversation files with " + str(aug_set) + "...")
count = 1
list_dir_set = os.listdir(settings['DATA_MERGE_DIR'])
for filename in list_dir_set:
tprt_str = "File (" + str(count) + "/" + str(len(list_dir_set)) + "): " + filename
print("Merged " + tprt_str)
convo = None
with open(settings['DATA_MERGE_DIR'] + "/" + filename, "rb") as handle:
convo = msgpack.unpackb(handle.read())
cname = convo[b"with"].decode()
last_speaker = None
last_time = None
msg_counter = 0
date_q = []
# for LSM
pmst = []
liwc_sum_me = np.array([0]*len(liwc_keys))
liwc_sum_other = np.array([0]*len(liwc_keys))
total_me = 0
total_other = 0
num_utts = 0
#### error list
ctx_for_err = []
for message in tqdm(convo[b"messages"]):
pmst.append(message)
prev_set = pmst[-opt.context:]
num_utts += 1
if b"text" not in message:
continue
msg_text = message[b"text"]
if type(msg_text) == bytes:
msg_text = msg_text.decode()
# add message date count
mdate = dateutil.parser.parse(message[b"date"])
if mdate.tzinfo == None:
mdate = DEFAULT_TIMEZONE.localize(mdate)
td = (mdate - last_time).seconds + (mdate - last_time).days*24*60*60 if last_time != None else 0
current_speaker = message[b"user"].decode()
if not opt.quick:
# normalize the message
if opt.normalize:
if b"normalized" not in message:
message[b"normalized"] = expand_text(norms, msg_text)
if opt.merged:
merge_str = message[b"normalized"]
merge_str = merge_str.decode() if type(merge_str) == bytes else merge_str
message[b"merged"] = merge_utt(merge_str.strip())
#print(msg_text)
tokens = [_t for _t in nltk.word_tokenize(msg_text)]
if opt.liwc:
t_liwc_parts = get_liwc_parts(tokens)
message[b"liwc_counts"] = t_liwc_parts[0]
message[b"liwc_words"] = t_liwc_parts[1]
message[b"words"] = len(tokens)
if opt.stopwords:
message[b"stopword_count"] = sum([1 if _t.lower() in stop_words else 0 for _t in tokens])
#print(message[b'user'].decode() + ' (' + message[b'date'].decode() + '): ' + msg_text)
ctx_for_err.append(message[b'user'].decode() + ' (' + message[b'date'].decode() + '): ' + msg_text)
ctx_for_err = ctx_for_err[-5:]
if opt.time:
message[b"turn_change"] = last_speaker != current_speaker
if td < 0: #assert td >= 0
er_file.write("\n.\n.\n.\n" + "\n".join(ctx_for_err))
message[b"response_time"] = td
if last_speaker != current_speaker:
last_time = mdate
if opt.style:
if len(pmst) > opt.context:
t_msg_text = prev_set[-opt.context][b"text"].decode()
t_tokens = [_t for _t in nltk.word_tokenize(t_msg_text)]
num_utts -= 1
if prev_set[-opt.context][b"user"].decode() in settings['my_name']:
liwc_sum_me -= prev_set[-opt.context][b"liwc_counts"]
total_me -= len(t_tokens)
else:
liwc_sum_other -= prev_set[-opt.context][b"liwc_counts"]
total_other -= len(t_tokens)
assert num_utts == opt.context and num_utts == len(prev_set)
if message[b"user"].decode() in settings['my_name']:
liwc_sum_me += message[b"liwc_counts"]
total_me += len(tokens)
else:
liwc_sum_other += message[b"liwc_counts"]
total_other += len(tokens)
lsm_full = 0
if msg_counter > 0:
lsm_vec_me = [liwc_sum_me[lv]*1.0/total_me if total_me > 0 else 0.0 for lv in range(len(liwc_keys)) if liwc_keys[lv] in lsm_keys]
lsm_vec_other = [liwc_sum_other[lv]*1.0/total_other if total_other > 0 else 0.0 for lv in range(len(liwc_keys)) if liwc_keys[lv] in lsm_keys]
lsm_full = np.array([0.0]*len(lsm_keys))
for lsm_ind in range(len(lsm_keys)):
lsm_full[lsm_ind] = 1.0 - abs(lsm_vec_me[lsm_ind] - lsm_vec_other[lsm_ind]) / (lsm_vec_other[lsm_ind] + lsm_vec_me[lsm_ind] + 0.0001)
#print(lsm_keys[lsm_ind] + ": " + str(lsm_full[lsm_ind]))
#print("\n\n")
lsm_full = np.average(lsm_full)
message[b"lsm"] = lsm_full
if opt.freq:
message[b"all_freq"] = msg_counter
m_mfreq, m_wfreq, m_dfreq = [0]*3
new_dq = []
for t_date in date_q:
td_t = mdate - t_date
#print(td_t)
if td_t.days < 30:
new_dq.append(t_date)
m_mfreq += 1
if td_t.days < 1:
m_dfreq += 1
if td_t.days < 7:
m_wfreq += 1
date_q = new_dq
message[b"month_freq"] = m_mfreq
message[b"week_freq"] = m_wfreq
message[b"day_freq"] = m_dfreq
if opt.mentions:
message[b"mentions"] = [0]*len(mention_names)
for token in tokens:
if token in first_names:
if first_names[token] != cname:
message[b"mentions"][mention_names.index(first_names[token])] += 1
if opt.uid:
message[b'id'] = MSG_UNIQID
MSG_UNIQID += 1
# end of message loop
msg_counter += 1
last_speaker = current_speaker
date_q.append(mdate)
handle = open_for_write(settings['DATA_MERGE_DIR'] + "/" + filename, binary=True)
handle.write(msgpack.packb(convo))
handle.close()
count += 1
er_file.close()
def merge_utt(msg_text):
ret_val = msg_text
if msg_text in merge_dict:
ret_val = merge_dict[msg_text]
return ret_val
if __name__ == "__main__":
main()
| [
"utils.liwc_keys.index",
"os.listdir",
"nltk.corpus.stopwords.words",
"argparse.ArgumentParser",
"nltk.word_tokenize",
"numpy.average",
"msgpack.packb",
"tqdm.tqdm",
"utils.DEFAULT_TIMEZONE.localize",
"numpy.array",
"sys.exit",
"normalizer.expand_text",
"utils.open_for_write",
"mention_gra... | [((2098, 2124), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2113, 2124), False, 'from nltk.corpus import stopwords\n'), ((3155, 3171), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (3169, 3171), False, 'from argparse import ArgumentParser\n'), ((5860, 5898), 'os.listdir', 'os.listdir', (["settings['DATA_MERGE_DIR']"], {}), "(settings['DATA_MERGE_DIR'])\n", (5870, 5898), False, 'import operator, msgpack, nltk, math, sys, os\n'), ((5312, 5332), 'mention_graph.read_mention_names', 'read_mention_names', ([], {}), '()\n', (5330, 5332), False, 'from mention_graph import read_mention_names\n'), ((5742, 5753), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5750, 5753), False, 'import operator, msgpack, nltk, math, sys, os\n'), ((6632, 6656), 'tqdm.tqdm', 'tqdm', (["convo[b'messages']"], {}), "(convo[b'messages'])\n", (6636, 6656), False, 'from tqdm import tqdm\n'), ((12061, 12133), 'utils.open_for_write', 'open_for_write', (["(settings['DATA_MERGE_DIR'] + '/' + filename)"], {'binary': '(True)'}), "(settings['DATA_MERGE_DIR'] + '/' + filename, binary=True)\n", (12075, 12133), False, 'from utils import settings, liwc_keys, lsm_keys, open_for_write, DEFAULT_TIMEZONE\n'), ((2518, 2545), 'numpy.array', 'np.array', (['liwc_words[token]'], {}), '(liwc_words[token])\n', (2526, 2545), True, 'import numpy as np\n'), ((12155, 12175), 'msgpack.packb', 'msgpack.packb', (['convo'], {}), '(convo)\n', (12168, 12175), False, 'import operator, msgpack, nltk, math, sys, os\n'), ((7109, 7141), 'utils.DEFAULT_TIMEZONE.localize', 'DEFAULT_TIMEZONE.localize', (['mdate'], {}), '(mdate)\n', (7134, 7141), False, 'from utils import settings, liwc_keys, lsm_keys, open_for_write, DEFAULT_TIMEZONE\n'), ((1380, 1405), 'utils.liwc_keys.index', 'liwc_keys.index', (['tline[1]'], {}), '(tline[1])\n', (1395, 1405), False, 'from utils import settings, liwc_keys, lsm_keys, open_for_write, DEFAULT_TIMEZONE\n'), ((1575, 1600), 'utils.liwc_keys.index', 'liwc_keys.index', (['tline[1]'], {}), '(tline[1])\n', (1590, 1600), False, 'from utils import settings, liwc_keys, lsm_keys, open_for_write, DEFAULT_TIMEZONE\n'), ((2857, 2891), 'numpy.array', 'np.array', (['liwc_stems[i][token[:i]]'], {}), '(liwc_stems[i][token[:i]])\n', (2865, 2891), True, 'import numpy as np\n'), ((7858, 7886), 'nltk.word_tokenize', 'nltk.word_tokenize', (['msg_text'], {}), '(msg_text)\n', (7876, 7886), False, 'import operator, msgpack, nltk, math, sys, os\n'), ((10695, 10715), 'numpy.average', 'np.average', (['lsm_full'], {}), '(lsm_full)\n', (10705, 10715), True, 'import numpy as np\n'), ((2569, 2596), 'numpy.array', 'np.array', (['liwc_words[token]'], {}), '(liwc_words[token])\n', (2577, 2596), True, 'import numpy as np\n'), ((7514, 7542), 'normalizer.expand_text', 'expand_text', (['norms', 'msg_text'], {}), '(norms, msg_text)\n', (7525, 7542), False, 'from normalizer import expand_text, norms\n'), ((9066, 9096), 'nltk.word_tokenize', 'nltk.word_tokenize', (['t_msg_text'], {}), '(t_msg_text)\n', (9084, 9096), False, 'import operator, msgpack, nltk, math, sys, os\n'), ((2923, 2957), 'numpy.array', 'np.array', (['liwc_stems[i][token[:i]]'], {}), '(liwc_stems[i][token[:i]])\n', (2931, 2957), True, 'import numpy as np\n')] |
import pickle
import operator
import numpy as np
import csv
import os.path
with open ('y_test', 'rb') as f:
y_test=pickle.load(f)
dicvocab={}
f=open("data/vocab.csv")
vocab=csv.reader(f)
for word in vocab:
if word[0]!='':
dicvocab[int(word[0])-1]=word[1]
f.close()
label_size=y_test.shape[1]
topics=["/Artificial_Intelligence/Machine_Learning/Case-Based/", "/Artificial_Intelligence/Machine_Learning/Genetic_Algorithms/", "/Artificial_Intelligence/Machine_Learning/Neural_Networks/", "/Artificial_Intelligence/Machine_Learning/Probabilistic_Methods/", "/Artificial_Intelligence/Machine_Learning/Reinforcement_Learning/", "/Artificial_Intelligence/Machine_Learning/Rule_Learning/", "/Artificial_Intelligence/Machine_Learning/Theory/"]
maxlabel=np.argmax(y_test, axis=1)
for ind in range(label_size+1):
st='dictionary' + str(ind)
if not os.path.isfile(st):
continue
if ind<label_size: print("Class ",ind, "enabled")
else: print("All Classes enabled")
with open(st, 'rb') as f:
dic= pickle.load(f)
for i in range(label_size):
dic2={}
print("Top 5 Highest Relevance Features for Class ", topics[i], "->", end='')
for x in dic[i]:
if x not in dic2: dic2[x]=0
dic2[x]+=1
k=sorted(dic2.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
for z in k[:15]:
print((dicvocab[z[0]],z[1]),end=',')
print()
print()
print() | [
"pickle.load",
"csv.reader",
"numpy.argmax"
] | [((176, 189), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (186, 189), False, 'import csv\n'), ((760, 785), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (769, 785), True, 'import numpy as np\n'), ((117, 131), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (128, 131), False, 'import pickle\n'), ((1007, 1021), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1018, 1021), False, 'import pickle\n')] |
from __future__ import print_function, unicode_literals, absolute_import, division
from six.moves import range, zip, map, reduce, filter
import numpy as np
import warnings
from zipfile import ZipFile, ZIP_DEFLATED
from scipy.ndimage.morphology import distance_transform_edt, binary_fill_holes
from scipy.ndimage.measurements import find_objects
from skimage.draw import polygon
from csbdeep.utils import _raise
from csbdeep.utils.six import Path
_ocl_kernel = r"""
#ifndef M_PI
#define M_PI 3.141592653589793
#endif
__constant sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;
inline float2 pol2cart(const float rho, const float phi) {
const float x = rho * cos(phi);
const float y = rho * sin(phi);
return (float2)(x,y);
}
__kernel void star_dist(__global float* dst, read_only image2d_t src) {
const int i = get_global_id(0), j = get_global_id(1);
const int Nx = get_global_size(0), Ny = get_global_size(1);
const float2 origin = (float2)(i,j);
const int value = read_imageui(src,sampler,origin).x;
if (value == 0) {
// background pixel -> nothing to do, write all zeros
for (int k = 0; k < N_RAYS; k++) {
dst[k + i*N_RAYS + j*N_RAYS*Nx] = 0;
}
} else {
float st_rays = (2*M_PI) / N_RAYS; // step size for ray angles
// for all rays
for (int k = 0; k < N_RAYS; k++) {
const float phi = k*st_rays; // current ray angle phi
const float2 dir = pol2cart(1,phi); // small vector in direction of ray
float2 offset = 0; // offset vector to be added to origin
// find radius that leaves current object
while (1) {
offset += dir;
const int offset_value = read_imageui(src,sampler,round(origin+offset)).x;
if (offset_value != value) {
const float dist = sqrt(offset.x*offset.x + offset.y*offset.y);
dst[k + i*N_RAYS + j*N_RAYS*Nx] = dist;
break;
}
}
}
}
}
"""
def _ocl_star_dist(a, n_rays=32):
from gputools import OCLProgram, OCLArray, OCLImage
(np.isscalar(n_rays) and 0 < int(n_rays)) or _raise(ValueError())
n_rays = int(n_rays)
src = OCLImage.from_array(a.astype(np.uint16,copy=False))
dst = OCLArray.empty(a.shape+(n_rays,), dtype=np.float32)
program = OCLProgram(src_str=_ocl_kernel, build_options=['-D', 'N_RAYS=%d' % n_rays])
program.run_kernel('star_dist', src.shape, None, dst.data, src)
return dst.get()
def _cpp_star_dist(a, n_rays=32):
from .lib.stardist import c_star_dist
(np.isscalar(n_rays) and 0 < int(n_rays)) or _raise(ValueError())
return c_star_dist(a.astype(np.uint16,copy=False), int(n_rays))
def _py_star_dist(a, n_rays=32):
(np.isscalar(n_rays) and 0 < int(n_rays)) or _raise(ValueError())
n_rays = int(n_rays)
a = a.astype(np.uint16,copy=False)
dst = np.empty(a.shape+(n_rays,),np.float32)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
value = a[i,j]
if value == 0:
dst[i,j] = 0
else:
st_rays = np.float32((2*np.pi) / n_rays)
for k in range(n_rays):
phi = np.float32(k*st_rays)
dy = np.cos(phi)
dx = np.sin(phi)
x, y = np.float32(0), np.float32(0)
while True:
x += dx
y += dy
ii = int(round(i+x))
jj = int(round(j+y))
if (ii < 0 or ii >= a.shape[0] or
jj < 0 or jj >= a.shape[1] or
value != a[ii,jj]):
dist = np.sqrt(x*x + y*y)
dst[i,j,k] = dist
break
return dst
def star_dist(a, n_rays=32, opencl=False):
"""'a' assumbed to be a label image with integer values that encode object ids. id 0 denotes background."""
if not _is_power_of_2(n_rays):
warnings.warn("not tested with 'n_rays' not being a power of 2.")
if opencl:
try:
return _ocl_star_dist(a,n_rays)
except:
pass
return _cpp_star_dist(a,n_rays)
def _is_power_of_2(i):
assert i > 0
e = np.log2(i)
return e == int(e)
def ray_angles(n_rays=32):
return np.linspace(0,2*np.pi,n_rays,endpoint=False)
def dist_to_coord(rhos):
"""convert from polar to cartesian coordinates for a single image (3-D array) or multiple images (4-D array)"""
is_single_image = rhos.ndim == 3
if is_single_image:
rhos = np.expand_dims(rhos,0)
assert rhos.ndim == 4
n_images,h,w,n_rays = rhos.shape
coord = np.empty((n_images,h,w,2,n_rays),dtype=rhos.dtype)
start = np.meshgrid(np.arange(h),np.arange(w), indexing='ij')
for i in range(2):
start[i] = start[i].reshape(1,h,w,1)
# start[i] = np.tile(start[i],(n_images,1,1,n_rays))
start[i] = np.broadcast_to(start[i],(n_images,h,w,n_rays))
coord[...,i,:] = start[i]
phis = ray_angles(n_rays).reshape(1,1,1,n_rays)
coord[...,0,:] += rhos * np.sin(phis) # row coordinate
coord[...,1,:] += rhos * np.cos(phis) # col coordinate
return coord[0] if is_single_image else coord
def _edt_prob(lbl_img):
prob = np.zeros(lbl_img.shape,np.float32)
for l in (set(np.unique(lbl_img)) - set([0])):
mask = lbl_img==l
edt = distance_transform_edt(mask)[mask]
prob[mask] = edt/np.max(edt)
return prob
def edt_prob(lbl_img):
"""Perform EDT on each labeled object and normalize."""
def grow(sl,interior):
return tuple(slice(s.start-int(w[0]),s.stop+int(w[1])) for s,w in zip(sl,interior))
def shrink(interior):
return tuple(slice(int(w[0]),(-1 if w[1] else None)) for w in interior)
objects = find_objects(lbl_img)
prob = np.zeros(lbl_img.shape,np.float32)
for i,sl in enumerate(objects,1):
# i: object label id, sl: slices of object in lbl_img
if sl is None: continue
interior = [(s.start>0,s.stop<sz) for s,sz in zip(sl,lbl_img.shape)]
# 1. grow object slice by 1 for all interior object bounding boxes
# 2. perform (correct) EDT for object with label id i
# 3. extract EDT for object of original slice and normalize
# 4. store edt for object only for pixels of given label id i
shrink_slice = shrink(interior)
grown_mask = lbl_img[grow(sl,interior)]==i
mask = grown_mask[shrink_slice]
edt = distance_transform_edt(grown_mask)[shrink_slice][mask]
prob[sl][mask] = edt/np.max(edt)
return prob
def polygons_to_label(coord, prob, points, thr=-np.inf):
sh = coord.shape[:2]
lbl = np.zeros(sh,np.uint16)
# sort points with increasing probability
ind = np.argsort([ prob[p[0],p[1]] for p in points ])
points = points[ind]
i = 1
for p in points:
if prob[p[0],p[1]] < thr:
continue
rr,cc = polygon(coord[p[0],p[1],0], coord[p[0],p[1],1], sh)
lbl[rr,cc] = i
i += 1
return lbl
def _fill_label_holes(lbl_img, **kwargs):
lbl_img_filled = np.zeros_like(lbl_img)
for l in (set(np.unique(lbl_img)) - set([0])):
mask = lbl_img==l
mask_filled = binary_fill_holes(mask,**kwargs)
lbl_img_filled[mask_filled] = l
return lbl_img_filled
def fill_label_holes(lbl_img, **kwargs):
"""Fill small holes in label image."""
# TODO: refactor 'fill_label_holes' and 'edt_prob' to share code
def grow(sl,interior):
return tuple(slice(s.start-int(w[0]),s.stop+int(w[1])) for s,w in zip(sl,interior))
def shrink(interior):
return tuple(slice(int(w[0]),(-1 if w[1] else None)) for w in interior)
objects = find_objects(lbl_img)
lbl_img_filled = np.zeros_like(lbl_img)
for i,sl in enumerate(objects,1):
if sl is None: continue
interior = [(s.start>0,s.stop<sz) for s,sz in zip(sl,lbl_img.shape)]
shrink_slice = shrink(interior)
grown_mask = lbl_img[grow(sl,interior)]==i
mask_filled = binary_fill_holes(grown_mask,**kwargs)[shrink_slice]
lbl_img_filled[sl][mask_filled] = i
return lbl_img_filled
def sample_points(n_samples, mask, prob=None, b=2):
"""sample points to draw some of the associated polygons"""
if b is not None and b > 0:
# ignore image boundary, since predictions may not be reliable
mask_b = np.zeros_like(mask)
mask_b[b:-b,b:-b] = True
else:
mask_b = True
points = np.nonzero(mask & mask_b)
if prob is not None:
# weighted sampling via prob
w = prob[points[0],points[1]].astype(np.float64)
w /= np.sum(w)
ind = np.random.choice(len(points[0]), n_samples, replace=True, p=w)
else:
ind = np.random.choice(len(points[0]), n_samples, replace=True)
points = points[0][ind], points[1][ind]
points = np.stack(points,axis=-1)
return points
def polyroi_bytearray(x,y,pos=None):
""" Byte array of polygon roi with provided x and y coordinates
See https://github.com/imagej/imagej1/blob/master/ij/io/RoiDecoder.java
"""
def _int16(x):
return int(x).to_bytes(2, byteorder='big', signed=True)
def _uint16(x):
return int(x).to_bytes(2, byteorder='big', signed=False)
def _int32(x):
return int(x).to_bytes(4, byteorder='big', signed=True)
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
assert len(x) == len(y)
top, left, bottom, right = y.min(), x.min(), y.max(), x.max() # bbox
n_coords = len(x)
bytes_header = 64
bytes_total = bytes_header + n_coords*2*2
B = [0] * bytes_total
B[ 0: 4] = map(ord,'Iout') # magic start
B[ 4: 6] = _int16(227) # version
B[ 6: 8] = _int16(0) # roi type (0 = polygon)
B[ 8:10] = _int16(top) # bbox top
B[10:12] = _int16(left) # bbox left
B[12:14] = _int16(bottom) # bbox bottom
B[14:16] = _int16(right) # bbox right
B[16:18] = _uint16(n_coords) # number of coordinates
if pos is not None:
B[56:60] = _int32(pos) # position (C, Z, or T)
for i,(_x,_y) in enumerate(zip(x,y)):
xs = bytes_header + 2*i
ys = xs + 2*n_coords
B[xs:xs+2] = _int16(_x - left)
B[ys:ys+2] = _int16(_y - top)
return bytearray(B)
def export_imagej_rois(fname, polygons, set_position=True, compression=ZIP_DEFLATED):
""" polygons assumed to be a list/array of arrays with shape (id,x,y) """
fname = Path(fname)
if fname.suffix == '.zip':
fname = Path(fname.stem)
with ZipFile(str(fname)+'.zip', mode='w', compression=compression) as roizip:
for pos,polygroup in enumerate(polygons,start=1):
for i,poly in enumerate(polygroup,start=1):
roi = polyroi_bytearray(poly[1],poly[0], pos=(pos if set_position else None))
roizip.writestr('{pos:03d}_{i:03d}.roi'.format(pos=pos,i=i), roi)
| [
"numpy.sqrt",
"numpy.argsort",
"numpy.sin",
"numpy.arange",
"numpy.isscalar",
"numpy.zeros_like",
"numpy.asarray",
"numpy.max",
"numpy.stack",
"numpy.linspace",
"numpy.empty",
"scipy.ndimage.measurements.find_objects",
"warnings.warn",
"six.moves.map",
"six.moves.zip",
"gputools.OCLPro... | [((2365, 2418), 'gputools.OCLArray.empty', 'OCLArray.empty', (['(a.shape + (n_rays,))'], {'dtype': 'np.float32'}), '(a.shape + (n_rays,), dtype=np.float32)\n', (2379, 2418), False, 'from gputools import OCLProgram, OCLArray, OCLImage\n'), ((2431, 2506), 'gputools.OCLProgram', 'OCLProgram', ([], {'src_str': '_ocl_kernel', 'build_options': "['-D', 'N_RAYS=%d' % n_rays]"}), "(src_str=_ocl_kernel, build_options=['-D', 'N_RAYS=%d' % n_rays])\n", (2441, 2506), False, 'from gputools import OCLProgram, OCLArray, OCLImage\n'), ((2991, 3032), 'numpy.empty', 'np.empty', (['(a.shape + (n_rays,))', 'np.float32'], {}), '(a.shape + (n_rays,), np.float32)\n', (2999, 3032), True, 'import numpy as np\n'), ((3044, 3061), 'six.moves.range', 'range', (['a.shape[0]'], {}), '(a.shape[0])\n', (3049, 3061), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((4432, 4442), 'numpy.log2', 'np.log2', (['i'], {}), '(i)\n', (4439, 4442), True, 'import numpy as np\n'), ((4506, 4555), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n_rays'], {'endpoint': '(False)'}), '(0, 2 * np.pi, n_rays, endpoint=False)\n', (4517, 4555), True, 'import numpy as np\n'), ((4870, 4925), 'numpy.empty', 'np.empty', (['(n_images, h, w, 2, n_rays)'], {'dtype': 'rhos.dtype'}), '((n_images, h, w, 2, n_rays), dtype=rhos.dtype)\n', (4878, 4925), True, 'import numpy as np\n'), ((5001, 5009), 'six.moves.range', 'range', (['(2)'], {}), '(2)\n', (5006, 5009), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((5478, 5513), 'numpy.zeros', 'np.zeros', (['lbl_img.shape', 'np.float32'], {}), '(lbl_img.shape, np.float32)\n', (5486, 5513), True, 'import numpy as np\n'), ((6016, 6037), 'scipy.ndimage.measurements.find_objects', 'find_objects', (['lbl_img'], {}), '(lbl_img)\n', (6028, 6037), False, 'from scipy.ndimage.measurements import find_objects\n'), ((6049, 6084), 'numpy.zeros', 'np.zeros', (['lbl_img.shape', 'np.float32'], {}), '(lbl_img.shape, np.float32)\n', (6057, 6084), True, 'import numpy as np\n'), ((6919, 6942), 'numpy.zeros', 'np.zeros', (['sh', 'np.uint16'], {}), '(sh, np.uint16)\n', (6927, 6942), True, 'import numpy as np\n'), ((6998, 7044), 'numpy.argsort', 'np.argsort', (['[prob[p[0], p[1]] for p in points]'], {}), '([prob[p[0], p[1]] for p in points])\n', (7008, 7044), True, 'import numpy as np\n'), ((7345, 7367), 'numpy.zeros_like', 'np.zeros_like', (['lbl_img'], {}), '(lbl_img)\n', (7358, 7367), True, 'import numpy as np\n'), ((7960, 7981), 'scipy.ndimage.measurements.find_objects', 'find_objects', (['lbl_img'], {}), '(lbl_img)\n', (7972, 7981), False, 'from scipy.ndimage.measurements import find_objects\n'), ((8003, 8025), 'numpy.zeros_like', 'np.zeros_like', (['lbl_img'], {}), '(lbl_img)\n', (8016, 8025), True, 'import numpy as np\n'), ((8746, 8771), 'numpy.nonzero', 'np.nonzero', (['(mask & mask_b)'], {}), '(mask & mask_b)\n', (8756, 8771), True, 'import numpy as np\n'), ((9132, 9157), 'numpy.stack', 'np.stack', (['points'], {'axis': '(-1)'}), '(points, axis=-1)\n', (9140, 9157), True, 'import numpy as np\n'), ((9915, 9931), 'six.moves.map', 'map', (['ord', '"""Iout"""'], {}), "(ord, 'Iout')\n", (9918, 9931), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((10753, 10764), 'csbdeep.utils.six.Path', 'Path', (['fname'], {}), '(fname)\n', (10757, 10764), False, 'from csbdeep.utils.six import Path\n'), ((3080, 3097), 'six.moves.range', 'range', (['a.shape[1]'], {}), '(a.shape[1])\n', (3085, 3097), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((4175, 4240), 'warnings.warn', 'warnings.warn', (['"""not tested with \'n_rays\' not being a power of 2."""'], {}), '("not tested with \'n_rays\' not being a power of 2.")\n', (4188, 4240), False, 'import warnings\n'), ((4771, 4794), 'numpy.expand_dims', 'np.expand_dims', (['rhos', '(0)'], {}), '(rhos, 0)\n', (4785, 4794), True, 'import numpy as np\n'), ((4946, 4958), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (4955, 4958), True, 'import numpy as np\n'), ((4959, 4971), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (4968, 4971), True, 'import numpy as np\n'), ((5136, 5187), 'numpy.broadcast_to', 'np.broadcast_to', (['start[i]', '(n_images, h, w, n_rays)'], {}), '(start[i], (n_images, h, w, n_rays))\n', (5151, 5187), True, 'import numpy as np\n'), ((5301, 5313), 'numpy.sin', 'np.sin', (['phis'], {}), '(phis)\n', (5307, 5313), True, 'import numpy as np\n'), ((5360, 5372), 'numpy.cos', 'np.cos', (['phis'], {}), '(phis)\n', (5366, 5372), True, 'import numpy as np\n'), ((7174, 7229), 'skimage.draw.polygon', 'polygon', (['coord[p[0], p[1], 0]', 'coord[p[0], p[1], 1]', 'sh'], {}), '(coord[p[0], p[1], 0], coord[p[0], p[1], 1], sh)\n', (7181, 7229), False, 'from skimage.draw import polygon\n'), ((7467, 7500), 'scipy.ndimage.morphology.binary_fill_holes', 'binary_fill_holes', (['mask'], {}), '(mask, **kwargs)\n', (7484, 7500), False, 'from scipy.ndimage.morphology import distance_transform_edt, binary_fill_holes\n'), ((8647, 8666), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (8660, 8666), True, 'import numpy as np\n'), ((8905, 8914), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (8911, 8914), True, 'import numpy as np\n'), ((10400, 10409), 'six.moves.zip', 'zip', (['x', 'y'], {}), '(x, y)\n', (10403, 10409), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((10812, 10828), 'csbdeep.utils.six.Path', 'Path', (['fname.stem'], {}), '(fname.stem)\n', (10816, 10828), False, 'from csbdeep.utils.six import Path\n'), ((2203, 2222), 'numpy.isscalar', 'np.isscalar', (['n_rays'], {}), '(n_rays)\n', (2214, 2222), True, 'import numpy as np\n'), ((2679, 2698), 'numpy.isscalar', 'np.isscalar', (['n_rays'], {}), '(n_rays)\n', (2690, 2698), True, 'import numpy as np\n'), ((2852, 2871), 'numpy.isscalar', 'np.isscalar', (['n_rays'], {}), '(n_rays)\n', (2863, 2871), True, 'import numpy as np\n'), ((5531, 5549), 'numpy.unique', 'np.unique', (['lbl_img'], {}), '(lbl_img)\n', (5540, 5549), True, 'import numpy as np\n'), ((5604, 5632), 'scipy.ndimage.morphology.distance_transform_edt', 'distance_transform_edt', (['mask'], {}), '(mask)\n', (5626, 5632), False, 'from scipy.ndimage.morphology import distance_transform_edt, binary_fill_holes\n'), ((5664, 5675), 'numpy.max', 'np.max', (['edt'], {}), '(edt)\n', (5670, 5675), True, 'import numpy as np\n'), ((6797, 6808), 'numpy.max', 'np.max', (['edt'], {}), '(edt)\n', (6803, 6808), True, 'import numpy as np\n'), ((7386, 7404), 'numpy.unique', 'np.unique', (['lbl_img'], {}), '(lbl_img)\n', (7395, 7404), True, 'import numpy as np\n'), ((8286, 8325), 'scipy.ndimage.morphology.binary_fill_holes', 'binary_fill_holes', (['grown_mask'], {}), '(grown_mask, **kwargs)\n', (8303, 8325), False, 'from scipy.ndimage.morphology import distance_transform_edt, binary_fill_holes\n'), ((9630, 9643), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (9640, 9643), True, 'import numpy as np\n'), ((9660, 9673), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (9670, 9673), True, 'import numpy as np\n'), ((3226, 3256), 'numpy.float32', 'np.float32', (['(2 * np.pi / n_rays)'], {}), '(2 * np.pi / n_rays)\n', (3236, 3256), True, 'import numpy as np\n'), ((3282, 3295), 'six.moves.range', 'range', (['n_rays'], {}), '(n_rays)\n', (3287, 3295), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((6270, 6292), 'six.moves.zip', 'zip', (['sl', 'lbl_img.shape'], {}), '(sl, lbl_img.shape)\n', (6273, 6292), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((6713, 6747), 'scipy.ndimage.morphology.distance_transform_edt', 'distance_transform_edt', (['grown_mask'], {}), '(grown_mask)\n', (6735, 6747), False, 'from scipy.ndimage.morphology import distance_transform_edt, binary_fill_holes\n'), ((8150, 8172), 'six.moves.zip', 'zip', (['sl', 'lbl_img.shape'], {}), '(sl, lbl_img.shape)\n', (8153, 8172), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((3323, 3346), 'numpy.float32', 'np.float32', (['(k * st_rays)'], {}), '(k * st_rays)\n', (3333, 3346), True, 'import numpy as np\n'), ((3370, 3381), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (3376, 3381), True, 'import numpy as np\n'), ((3407, 3418), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (3413, 3418), True, 'import numpy as np\n'), ((5878, 5895), 'six.moves.zip', 'zip', (['sl', 'interior'], {}), '(sl, interior)\n', (5881, 5895), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((7822, 7839), 'six.moves.zip', 'zip', (['sl', 'interior'], {}), '(sl, interior)\n', (7825, 7839), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((3446, 3459), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (3456, 3459), True, 'import numpy as np\n'), ((3461, 3474), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (3471, 3474), True, 'import numpy as np\n'), ((3861, 3883), 'numpy.sqrt', 'np.sqrt', (['(x * x + y * y)'], {}), '(x * x + y * y)\n', (3868, 3883), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.io import fits
from astropy.table import Table
from gammapy.maps import MapAxis, MapAxes
from gammapy.utils.array import array_stats_str
from gammapy.utils.nddata import NDDataArray
from gammapy.utils.gauss import Gauss2DPDF
from gammapy.utils.scripts import make_path
__all__ = ["TablePSF", "EnergyDependentTablePSF", "PSF3D"]
log = logging.getLogger(__name__)
class TablePSF:
"""Radially-symmetric table PSF.
Parameters
----------
rad_axis : `~astropy.units.Quantity` with angle units
Offset wrt source position
data : `~astropy.units.Quantity` with sr^-1 units
PSF value array
interp_kwargs : dict
Keyword arguments passed to `ScaledRegularGridInterpolator`
"""
def __init__(self, rad_axis, data, interp_kwargs=None):
interp_kwargs = interp_kwargs or {}
rad_axis.assert_name("rad")
self.data = NDDataArray(
axes=[rad_axis], data=u.Quantity(data).to("sr^-1"), interp_kwargs=interp_kwargs
)
@property
def rad_axis(self):
return self.data.axes["rad"]
@classmethod
def from_shape(cls, shape, width, rad):
"""Make TablePSF objects with commonly used shapes.
This function is mostly useful for examples and testing.
Parameters
----------
shape : {'disk', 'gauss'}
PSF shape.
width : `~astropy.units.Quantity` with angle units
PSF width angle (radius for disk, sigma for Gauss).
rad : `~astropy.units.Quantity` with angle units
Offset angle
Returns
-------
psf : `TablePSF`
Table PSF
Examples
--------
>>> import numpy as np
>>> from astropy.coordinates import Angle
>>> from gammapy.irf import TablePSF
>>> rad = Angle(np.linspace(0, 0.7, 100), 'deg')
>>> psf = TablePSF.from_shape(shape='gauss', width='0.2 deg', rad=rad)
"""
width = Angle(width)
rad = Angle(rad)
if shape == "disk":
amplitude = 1 / (np.pi * width.radian ** 2)
data = np.where(rad < width, amplitude, 0)
elif shape == "gauss":
gauss2d_pdf = Gauss2DPDF(sigma=width.radian)
data = gauss2d_pdf(rad.radian)
else:
raise ValueError(f"Invalid shape: {shape}")
data = u.Quantity(data, "sr^-1")
rad_axis = MapAxis.from_nodes(rad, name="rad")
return cls(rad_axis=rad_axis, data=data)
def info(self):
"""Print basic info."""
ss = array_stats_str(self.rad_axis.center, "offset")
ss += f"integral = {self.containment(self.rad_axis.edges[-1])}\n"
for containment in [68, 80, 95]:
radius = self.containment_radius(0.01 * containment)
ss += f"containment radius {radius.deg} deg for {containment}%\n"
return ss
def evaluate(self, rad):
r"""Evaluate PSF.
The following PSF quantities are available:
* 'dp_domega': PDF per 2-dim solid angle :math:`\Omega` in sr^-1
.. math:: \frac{dP}{d\Omega}
Parameters
----------
rad : `~astropy.coordinates.Angle`
Offset wrt source position
Returns
-------
psf_value : `~astropy.units.Quantity`
PSF value
"""
return self.data.evaluate(rad=rad)
def containment(self, rad_max):
"""Compute PSF containment fraction.
Parameters
----------
rad_max : `~astropy.units.Quantity`
Offset angle range
Returns
-------
integral : float
PSF integral
"""
rad_max = np.atleast_1d(rad_max)
return self.data._integrate_rad((rad_max,))
def containment_radius(self, fraction):
"""Containment radius.
Parameters
----------
fraction : array_like
Containment fraction (range 0 .. 1)
Returns
-------
rad : `~astropy.coordinates.Angle`
Containment radius angle
"""
# TODO: check whether starting
rad_max = Angle(
np.linspace(0 * u.deg, self.rad_axis.center[-1], 10 * self.rad_axis.nbin),
"rad",
)
containment = self.containment(rad_max=rad_max)
fraction = np.atleast_1d(fraction)
fraction_idx = np.argmin(np.abs(containment - fraction[:, np.newaxis]), axis=1)
return rad_max[fraction_idx].to("deg")
def normalize(self):
"""Normalize PSF to unit integral.
Computes the total PSF integral via the :math:`dP / dr` spline
and then divides the :math:`dP / dr` array.
"""
integral = self.containment(self.rad_axis.edges[-1])
self.data /= integral
def plot_psf_vs_rad(self, ax=None, **kwargs):
"""Plot PSF vs radius.
Parameters
----------
ax : ``
kwargs : dict
Keyword arguments passed to `matplotlib.pyplot.plot`
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
ax.plot(
self.rad_axis.center.to_value("deg"),
self.data.data.to_value("sr-1"),
**kwargs,
)
ax.set_yscale("log")
ax.set_xlabel("Radius (deg)")
ax.set_ylabel("PSF (sr-1)")
class EnergyDependentTablePSF:
"""Energy-dependent radially-symmetric table PSF (``gtpsf`` format).
TODO: add references and explanations.
Parameters
----------
energy_axis_true : `MapAxis`
Energy axis
rad_axis : `MapAxis`
Offset angle wrt source position axis
exposure : `~astropy.units.Quantity`
Exposure (1-dim)
data : `~astropy.units.Quantity`
PSF (2-dim with axes: psf[energy_index, offset_index]
interp_kwargs : dict
Interpolation keyword arguments pass to `ScaledRegularGridInterpolator`.
"""
def __init__(
self,
energy_axis_true,
rad_axis,
exposure=None,
data=None,
interp_kwargs=None,
):
interp_kwargs = interp_kwargs or {}
axes = MapAxes([energy_axis_true, rad_axis])
axes.assert_names(["energy_true", "rad"])
self.data = NDDataArray(
axes=axes, data=u.Quantity(data).to("sr^-1"), interp_kwargs=interp_kwargs
)
if exposure is None:
self.exposure = u.Quantity(np.ones(self.energy_axis_true.nbin), "cm^2 s")
else:
self.exposure = u.Quantity(exposure).to("cm^2 s")
@property
def energy_axis_true(self):
return self.data.axes["energy_true"]
@property
def rad_axis(self):
return self.data.axes["rad"]
def __str__(self):
ss = "EnergyDependentTablePSF\n"
ss += "-----------------------\n"
ss += "\nAxis info:\n"
ss += " " + array_stats_str(self.rad_axis.center.to("deg"), "rad")
ss += " " + array_stats_str(self.energy_axis_true.center, "energy")
ss += "\nContainment info:\n"
# Print some example containment radii
fractions = [0.68, 0.95]
energies = u.Quantity([10, 100], "GeV")
for fraction in fractions:
rads = self.containment_radius(energy=energies, fraction=fraction)
for energy, rad in zip(energies, rads):
ss += f" {100 * fraction}% containment radius at {energy:3.0f}: {rad:.2f}\n"
return ss
@classmethod
def from_hdulist(cls, hdu_list):
"""Create `EnergyDependentTablePSF` from ``gtpsf`` format HDU list.
Parameters
----------
hdu_list : `~astropy.io.fits.HDUList`
HDU list with ``THETA`` and ``PSF`` extensions.
"""
# TODO: move this to MapAxis.from_table()
rad = Angle(hdu_list["THETA"].data["Theta"], "deg")
rad_axis = MapAxis.from_nodes(rad, name="rad")
energy = u.Quantity(hdu_list["PSF"].data["Energy"], "MeV")
energy_axis_true = MapAxis.from_nodes(energy, name="energy_true", interp="log")
exposure = u.Quantity(hdu_list["PSF"].data["Exposure"], "cm^2 s")
data = u.Quantity(hdu_list["PSF"].data["PSF"], "sr^-1")
return cls(
energy_axis_true=energy_axis_true,
rad_axis=rad_axis,
exposure=exposure,
data=data,
)
def to_hdulist(self):
"""Convert to FITS HDU list format.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
theta_hdu = self.rad_axis.to_table_hdu(format="gtpsf")
psf_table = self.energy_axis_true.to_table(format="gtpsf")
psf_table["Exposure"] = self.exposure.to("cm^2 s")
psf_table["PSF"] = self.data.data.to("sr^-1")
psf_hdu = fits.BinTableHDU(data=psf_table, name="PSF")
return fits.HDUList([fits.PrimaryHDU(), theta_hdu, psf_hdu])
@classmethod
def read(cls, filename):
"""Create `EnergyDependentTablePSF` from ``gtpsf``-format FITS file.
Parameters
----------
filename : str
File name
"""
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_hdulist(hdulist)
def write(self, filename, *args, **kwargs):
"""Write to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs)
def evaluate(self, energy=None, rad=None, method="linear"):
"""Evaluate the PSF at a given energy and offset
Parameters
----------
energy : `~astropy.units.Quantity`
Energy value
rad : `~astropy.coordinates.Angle`
Offset wrt source position
method : {"linear", "nearest"}
Linear or nearest neighbour interpolation.
Returns
-------
values : `~astropy.units.Quantity`
Interpolated value
"""
if energy is None:
energy = self.energy_axis_true.center
if rad is None:
rad = self.rad_axis.center
energy = u.Quantity(energy, ndmin=1)[:, np.newaxis]
rad = u.Quantity(rad, ndmin=1)
return self.data._interpolate((energy, rad), method=method)
def table_psf_at_energy(self, energy, method="linear", **kwargs):
"""Create `~gammapy.irf.TablePSF` at one given energy.
Parameters
----------
energy : `~astropy.units.Quantity`
Energy
method : {"linear", "nearest"}
Linear or nearest neighbour interpolation.
Returns
-------
psf : `~gammapy.irf.TablePSF`
Table PSF
"""
psf_value = self.evaluate(energy=energy, method=method)[0, :]
return TablePSF(rad_axis=self.rad_axis, data=psf_value, **kwargs)
def table_psf_in_energy_range(
self, energy_range, spectrum=None, n_bins=11, **kwargs
):
"""Average PSF in a given energy band.
Expected counts in sub energy bands given the given exposure
and spectrum are used as weights.
Parameters
----------
energy_range : `~astropy.units.Quantity`
Energy band
spectrum : `~gammapy.modeling.models.SpectralModel`
Spectral model used for weighting the PSF. Default is a power law
with index=2.
n_bins : int
Number of energy points in the energy band, used to compute the
weigthed PSF.
Returns
-------
psf : `TablePSF`
Table PSF
"""
from gammapy.modeling.models import PowerLawSpectralModel, TemplateSpectralModel
if spectrum is None:
spectrum = PowerLawSpectralModel()
exposure = TemplateSpectralModel(self.energy_axis_true.center, self.exposure)
e_min, e_max = energy_range
energy = MapAxis.from_energy_bounds(e_min, e_max, n_bins).edges
weights = spectrum(energy) * exposure(energy)
weights /= weights.sum()
psf_value = self.evaluate(energy=energy)
psf_value_weighted = weights[:, np.newaxis] * psf_value
return TablePSF(self.rad_axis, psf_value_weighted.sum(axis=0), **kwargs)
def containment_radius(self, energy, fraction=0.68):
"""Containment radius.
Parameters
----------
energy : `~astropy.units.Quantity`
Energy
fraction : float
Containment fraction.
Returns
-------
rad : `~astropy.units.Quantity`
Containment radius in deg
"""
# upsamle for better precision
rad_max = Angle(self.rad_axis.upsample(factor=10).center)
containment = self.containment(energy=energy, rad_max=rad_max)
# find nearest containment value
fraction_idx = np.argmin(np.abs(containment - fraction), axis=1)
return rad_max[fraction_idx].to("deg")
def containment(self, energy, rad_max):
"""Compute containment of the PSF.
Parameters
----------
energy : `~astropy.units.Quantity`
Energy
rad_max : `~astropy.coordinates.Angle`
Maximum offset angle.
Returns
-------
fraction : array_like
Containment fraction (in range 0 .. 1)
"""
energy = np.atleast_1d(u.Quantity(energy))[:, np.newaxis]
rad_max = np.atleast_1d(u.Quantity(rad_max))
return self.data._integrate_rad((energy, rad_max))
def info(self):
"""Print basic info"""
print(str(self))
def plot_psf_vs_rad(self, energy=None, ax=None, **kwargs):
"""Plot PSF vs radius.
Parameters
----------
energy : `~astropy.units.Quantity`
Energies where to plot the PSF.
**kwargs : dict
Keyword arguments pass to `~matplotlib.pyplot.plot`.
"""
import matplotlib.pyplot as plt
if energy is None:
energy = [100, 1000, 10000] * u.GeV
ax = plt.gca() if ax is None else ax
for value in energy:
psf_value = np.squeeze(self.evaluate(energy=value))
label = f"{value:.0f}"
ax.plot(
self.rad_axis.center.to_value("deg"),
psf_value.to_value("sr-1"),
label=label,
**kwargs,
)
ax.set_yscale("log")
ax.set_xlabel("Offset (deg)")
ax.set_ylabel("PSF (1 / sr)")
plt.legend()
return ax
def plot_containment_vs_energy(
self, ax=None, fractions=[0.68, 0.8, 0.95], **kwargs
):
"""Plot containment versus energy."""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
for fraction in fractions:
rad = self.containment_radius(self.energy_axis_true.center, fraction)
label = f"{100 * fraction:.1f}% Containment"
ax.plot(
self.energy_axis_true.center.to("GeV").value,
rad.to("deg").value,
label=label,
**kwargs,
)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (GeV)")
ax.set_ylabel("Containment radius (deg)")
def plot_exposure_vs_energy(self):
"""Plot exposure versus energy."""
import matplotlib.pyplot as plt
plt.figure(figsize=(4, 3))
plt.plot(self.energy_axis_true.center, self.exposure, color="black", lw=3)
plt.semilogx()
plt.xlabel("Energy (MeV)")
plt.ylabel("Exposure (cm^2 s)")
plt.xlim(1e4 / 1.3, 1.3 * 1e6)
plt.ylim(0, 1.5e11)
plt.tight_layout()
class PSF3D:
"""PSF with axes: energy, offset, rad.
Data format specification: :ref:`gadf:psf_table`
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis.
offset_axis : `MapAxis`
Offset axis
rad_axis : `MapAxis`
Rad axis
data : `~astropy.units.Quantity`
PSF (3-dim with axes: psf[rad_index, offset_index, energy_index]
meta : dict
Meta dict
"""
tag = "psf_table"
def __init__(
self,
energy_axis_true,
offset_axis,
rad_axis,
data,
meta=None,
interp_kwargs=None,
):
interp_kwargs = interp_kwargs or {}
axes = MapAxes([energy_axis_true, offset_axis, rad_axis])
axes.assert_names(["energy_true", "offset", "rad"])
self.data = NDDataArray(
axes=axes, data=u.Quantity(data).to("sr^-1"), interp_kwargs=interp_kwargs
)
self.meta = meta or {}
@property
def energy_thresh_lo(self):
"""Low energy threshold"""
return self.meta["LO_THRES"] * u.TeV
@property
def energy_thresh_hi(self):
"""High energy threshold"""
return self.meta["HI_THRES"] * u.TeV
@property
def energy_axis_true(self):
return self.data.axes["energy_true"]
@property
def rad_axis(self):
return self.data.axes["rad"]
@property
def offset_axis(self):
return self.data.axes["offset"]
def __repr__(self):
"""Print some basic info.
"""
info = self.__class__.__name__ + "\n"
info += "-" * len(self.__class__.__name__) + "\n\n"
info += f"\tshape : {self.data.data.shape}\n"
return info
@classmethod
def read(cls, filename, hdu="PSF_2D_TABLE"):
"""Create `PSF3D` from FITS file.
Parameters
----------
filename : str
File name
hdu : str
HDU name
"""
table = Table.read(make_path(filename), hdu=hdu)
return cls.from_table(table)
@classmethod
def from_table(cls, table):
"""Create `PSF3D` from `~astropy.table.Table`.
Parameters
----------
table : `~astropy.table.Table`
Table Table-PSF info.
"""
axes = MapAxes.from_table(
table=table, column_prefixes=["ENERG", "THETA", "RAD"], format="gadf-dl3"
)
data = table["RPSF"].quantity[0].transpose()
return cls(
energy_axis_true=axes["energy_true"],
offset_axis=axes["offset"],
rad_axis=axes["rad"],
data=data,
meta=table.meta
)
def to_hdulist(self):
"""Convert PSF table data to FITS HDU list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
table = self.data.axes.to_table(format="gadf-dl3")
table["RPSF"] = self.data.data.T[np.newaxis]
hdu = fits.BinTableHDU(table)
hdu.header["LO_THRES"] = self.energy_thresh_lo.value
hdu.header["HI_THRES"] = self.energy_thresh_hi.value
return fits.HDUList([fits.PrimaryHDU(), hdu])
def write(self, filename, *args, **kwargs):
"""Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs)
def evaluate(self, energy=None, offset=None, rad=None):
"""Interpolate PSF value at a given offset and energy.
Parameters
----------
energy : `~astropy.units.Quantity`
energy value
offset : `~astropy.coordinates.Angle`
Offset in the field of view
rad : `~astropy.coordinates.Angle`
Offset wrt source position
Returns
-------
values : `~astropy.units.Quantity`
Interpolated value
"""
if energy is None:
energy = self.energy_axis_true.center
if offset is None:
offset = self.offset_axis.center
if rad is None:
rad = self.rad_axis.center
rad = np.atleast_1d(u.Quantity(rad))
offset = np.atleast_1d(u.Quantity(offset))
energy = np.atleast_1d(u.Quantity(energy))
return self.data._interpolate(
(
energy[np.newaxis, np.newaxis, :],
offset[np.newaxis, :, np.newaxis],
rad[:, np.newaxis, np.newaxis],
)
)
def to_energy_dependent_table_psf(self, theta="0 deg", rad=None, exposure=None):
"""
Convert PSF3D in EnergyDependentTablePSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default is the ``rad`` from this PSF.
exposure : `~astropy.units.Quantity`
Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.
Default exposure = 1.
Returns
-------
table_psf : `~gammapy.irf.EnergyDependentTablePSF`
Energy-dependent PSF
"""
theta = Angle(theta)
if rad is not None:
rad_axis = MapAxis.from_edges(rad, name="rad")
else:
rad_axis = self.rad_axis
psf_value = self.evaluate(offset=theta, rad=rad_axis.center).squeeze()
return EnergyDependentTablePSF(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
exposure=exposure,
data=psf_value.transpose(),
)
def to_table_psf(self, energy, theta="0 deg", **kwargs):
"""Create `~gammapy.irf.TablePSF` at one given energy.
Parameters
----------
energy : `~astropy.units.Quantity`
Energy
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
Returns
-------
psf : `~gammapy.irf.TablePSF`
Table PSF
"""
energy = u.Quantity(energy)
theta = Angle(theta)
psf_value = self.evaluate(energy, theta).squeeze()
return TablePSF(rad_axis=self.rad_axis, data=psf_value, **kwargs)
def containment_radius(
self, energy, theta="0 deg", fraction=0.68
):
"""Containment radius.
Parameters
----------
energy : `~astropy.units.Quantity`
Energy
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
fraction : float
Containment fraction. Default fraction = 0.68
Returns
-------
radius : `~astropy.units.Quantity`
Containment radius in deg
"""
energy = np.atleast_1d(u.Quantity(energy))
theta = np.atleast_1d(u.Quantity(theta))
radii = []
for t in theta:
psf = self.to_energy_dependent_table_psf(theta=t)
radii.append(psf.containment_radius(energy, fraction=fraction))
return u.Quantity(radii).T.squeeze()
def plot_containment_vs_energy(
self, fractions=[0.68, 0.95], thetas=Angle([0, 1], "deg"), ax=None, **kwargs
):
"""Plot containment fraction as a function of energy.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = MapAxis.from_energy_bounds(
self.energy_axis_true.edges[0], self.energy_axis_true.edges[-1], 100
).edges
for theta in thetas:
for fraction in fractions:
plot_kwargs = kwargs.copy()
radius = self.containment_radius(energy, theta, fraction)
plot_kwargs.setdefault(
"label", f"{theta.deg} deg, {100 * fraction:.1f}%"
)
ax.plot(energy.value, radius.value, **plot_kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (TeV)")
ax.set_ylabel("Containment radius (deg)")
def plot_psf_vs_rad(self, theta="0 deg", energy=u.Quantity(1, "TeV")):
"""Plot PSF vs rad.
Parameters
----------
energy : `~astropy.units.Quantity`
Energy. Default energy = 1 TeV
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
"""
theta = Angle(theta)
table = self.to_table_psf(energy=energy, theta=theta)
return table.plot_psf_vs_rad()
def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
"""Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
offset = self.offset_axis.center
# Set up and compute data
containment = self.containment_radius(energy, offset, fraction)
# plotting defaults
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("vmin", np.nanmin(containment.value))
kwargs.setdefault("vmax", np.nanmax(containment.value))
# Plotting
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value.T, **kwargs)
# Axes labels and ticks, colobar
ax.semilogx()
ax.set_ylabel(f"Offset ({offset.unit})")
ax.set_xlabel(f"Energy ({energy.unit})")
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
try:
self._plot_safe_energy_range(ax)
except KeyError:
pass
if add_cbar:
label = f"Containment radius R{100 * fraction:.0f} ({containment.unit})"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def _plot_safe_energy_range(self, ax):
"""add safe energy range lines to the plot"""
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.value.min()
omax = self.offset_axis.center.value.max()
ax.vlines(x=esafe.value, ymin=omin, ymax=omax)
label = f"Safe energy threshold: {esafe:3.2f}"
ax.text(x=0.1, y=0.9 * esafe.value, s=label, va="top")
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots."""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
# TODO: implement this plot
# psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg')
# psf.plot_components(ax=axes[2])
plt.tight_layout()
| [
"logging.getLogger",
"matplotlib.pyplot.ylabel",
"numpy.nanmin",
"astropy.coordinates.Angle",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"gammapy.utils.scripts.make_path",
"numpy.linspace",
"numpy.nanmax",
"gammapy.utils.gauss.Gauss2DPDF",
"matplotlib.pyplot.ylim",
... | [((516, 543), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (533, 543), False, 'import logging\n'), ((2150, 2162), 'astropy.coordinates.Angle', 'Angle', (['width'], {}), '(width)\n', (2155, 2162), False, 'from astropy.coordinates import Angle\n'), ((2177, 2187), 'astropy.coordinates.Angle', 'Angle', (['rad'], {}), '(rad)\n', (2182, 2187), False, 'from astropy.coordinates import Angle\n'), ((2545, 2570), 'astropy.units.Quantity', 'u.Quantity', (['data', '"""sr^-1"""'], {}), "(data, 'sr^-1')\n", (2555, 2570), True, 'from astropy import units as u\n'), ((2590, 2625), 'gammapy.maps.MapAxis.from_nodes', 'MapAxis.from_nodes', (['rad'], {'name': '"""rad"""'}), "(rad, name='rad')\n", (2608, 2625), False, 'from gammapy.maps import MapAxis, MapAxes\n'), ((2741, 2788), 'gammapy.utils.array.array_stats_str', 'array_stats_str', (['self.rad_axis.center', '"""offset"""'], {}), "(self.rad_axis.center, 'offset')\n", (2756, 2788), False, 'from gammapy.utils.array import array_stats_str\n'), ((3879, 3901), 'numpy.atleast_1d', 'np.atleast_1d', (['rad_max'], {}), '(rad_max)\n', (3892, 3901), True, 'import numpy as np\n'), ((4529, 4552), 'numpy.atleast_1d', 'np.atleast_1d', (['fraction'], {}), '(fraction)\n', (4542, 4552), True, 'import numpy as np\n'), ((6353, 6390), 'gammapy.maps.MapAxes', 'MapAxes', (['[energy_axis_true, rad_axis]'], {}), '([energy_axis_true, rad_axis])\n', (6360, 6390), False, 'from gammapy.maps import MapAxis, MapAxes\n'), ((7359, 7387), 'astropy.units.Quantity', 'u.Quantity', (['[10, 100]', '"""GeV"""'], {}), "([10, 100], 'GeV')\n", (7369, 7387), True, 'from astropy import units as u\n'), ((8019, 8064), 'astropy.coordinates.Angle', 'Angle', (["hdu_list['THETA'].data['Theta']", '"""deg"""'], {}), "(hdu_list['THETA'].data['Theta'], 'deg')\n", (8024, 8064), False, 'from astropy.coordinates import Angle\n'), ((8084, 8119), 'gammapy.maps.MapAxis.from_nodes', 'MapAxis.from_nodes', (['rad'], {'name': '"""rad"""'}), "(rad, name='rad')\n", (8102, 8119), False, 'from gammapy.maps import MapAxis, MapAxes\n'), ((8137, 8186), 'astropy.units.Quantity', 'u.Quantity', (["hdu_list['PSF'].data['Energy']", '"""MeV"""'], {}), "(hdu_list['PSF'].data['Energy'], 'MeV')\n", (8147, 8186), True, 'from astropy import units as u\n'), ((8214, 8274), 'gammapy.maps.MapAxis.from_nodes', 'MapAxis.from_nodes', (['energy'], {'name': '"""energy_true"""', 'interp': '"""log"""'}), "(energy, name='energy_true', interp='log')\n", (8232, 8274), False, 'from gammapy.maps import MapAxis, MapAxes\n'), ((8294, 8348), 'astropy.units.Quantity', 'u.Quantity', (["hdu_list['PSF'].data['Exposure']", '"""cm^2 s"""'], {}), "(hdu_list['PSF'].data['Exposure'], 'cm^2 s')\n", (8304, 8348), True, 'from astropy import units as u\n'), ((8364, 8412), 'astropy.units.Quantity', 'u.Quantity', (["hdu_list['PSF'].data['PSF']", '"""sr^-1"""'], {}), "(hdu_list['PSF'].data['PSF'], 'sr^-1')\n", (8374, 8412), True, 'from astropy import units as u\n'), ((9035, 9079), 'astropy.io.fits.BinTableHDU', 'fits.BinTableHDU', ([], {'data': 'psf_table', 'name': '"""PSF"""'}), "(data=psf_table, name='PSF')\n", (9051, 9079), False, 'from astropy.io import fits\n'), ((10476, 10500), 'astropy.units.Quantity', 'u.Quantity', (['rad'], {'ndmin': '(1)'}), '(rad, ndmin=1)\n', (10486, 10500), True, 'from astropy import units as u\n'), ((12089, 12155), 'gammapy.modeling.models.TemplateSpectralModel', 'TemplateSpectralModel', (['self.energy_axis_true.center', 'self.exposure'], {}), '(self.energy_axis_true.center, self.exposure)\n', (12110, 12155), False, 'from gammapy.modeling.models import PowerLawSpectralModel, TemplateSpectralModel\n'), ((14824, 14836), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14834, 14836), True, 'import matplotlib.pyplot as plt\n'), ((15729, 15755), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 3)'}), '(figsize=(4, 3))\n', (15739, 15755), True, 'import matplotlib.pyplot as plt\n'), ((15764, 15838), 'matplotlib.pyplot.plot', 'plt.plot', (['self.energy_axis_true.center', 'self.exposure'], {'color': '"""black"""', 'lw': '(3)'}), "(self.energy_axis_true.center, self.exposure, color='black', lw=3)\n", (15772, 15838), True, 'import matplotlib.pyplot as plt\n'), ((15847, 15861), 'matplotlib.pyplot.semilogx', 'plt.semilogx', ([], {}), '()\n', (15859, 15861), True, 'import matplotlib.pyplot as plt\n'), ((15870, 15896), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy (MeV)"""'], {}), "('Energy (MeV)')\n", (15880, 15896), True, 'import matplotlib.pyplot as plt\n'), ((15905, 15936), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Exposure (cm^2 s)"""'], {}), "('Exposure (cm^2 s)')\n", (15915, 15936), True, 'import matplotlib.pyplot as plt\n'), ((15945, 15985), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(10000.0 / 1.3)', '(1.3 * 1000000.0)'], {}), '(10000.0 / 1.3, 1.3 * 1000000.0)\n', (15953, 15985), True, 'import matplotlib.pyplot as plt\n'), ((15984, 16011), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(150000000000.0)'], {}), '(0, 150000000000.0)\n', (15992, 16011), True, 'import matplotlib.pyplot as plt\n'), ((16012, 16030), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16028, 16030), True, 'import matplotlib.pyplot as plt\n'), ((16725, 16775), 'gammapy.maps.MapAxes', 'MapAxes', (['[energy_axis_true, offset_axis, rad_axis]'], {}), '([energy_axis_true, offset_axis, rad_axis])\n', (16732, 16775), False, 'from gammapy.maps import MapAxis, MapAxes\n'), ((18341, 18438), 'gammapy.maps.MapAxes.from_table', 'MapAxes.from_table', ([], {'table': 'table', 'column_prefixes': "['ENERG', 'THETA', 'RAD']", 'format': '"""gadf-dl3"""'}), "(table=table, column_prefixes=['ENERG', 'THETA', 'RAD'],\n format='gadf-dl3')\n", (18359, 18438), False, 'from gammapy.maps import MapAxis, MapAxes\n'), ((19050, 19073), 'astropy.io.fits.BinTableHDU', 'fits.BinTableHDU', (['table'], {}), '(table)\n', (19066, 19073), False, 'from astropy.io import fits\n'), ((21359, 21371), 'astropy.coordinates.Angle', 'Angle', (['theta'], {}), '(theta)\n', (21364, 21371), False, 'from astropy.coordinates import Angle\n'), ((22251, 22269), 'astropy.units.Quantity', 'u.Quantity', (['energy'], {}), '(energy)\n', (22261, 22269), True, 'from astropy import units as u\n'), ((22286, 22298), 'astropy.coordinates.Angle', 'Angle', (['theta'], {}), '(theta)\n', (22291, 22298), False, 'from astropy.coordinates import Angle\n'), ((23378, 23398), 'astropy.coordinates.Angle', 'Angle', (['[0, 1]', '"""deg"""'], {}), "([0, 1], 'deg')\n", (23383, 23398), False, 'from astropy.coordinates import Angle\n'), ((24305, 24325), 'astropy.units.Quantity', 'u.Quantity', (['(1)', '"""TeV"""'], {}), "(1, 'TeV')\n", (24315, 24325), True, 'from astropy import units as u\n'), ((24617, 24629), 'astropy.coordinates.Angle', 'Angle', (['theta'], {}), '(theta)\n', (24622, 24629), False, 'from astropy.coordinates import Angle\n'), ((26742, 26789), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'figsize': 'figsize'}), '(nrows=1, ncols=3, figsize=figsize)\n', (26754, 26789), True, 'import matplotlib.pyplot as plt\n'), ((27121, 27139), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (27137, 27139), True, 'import matplotlib.pyplot as plt\n'), ((2292, 2327), 'numpy.where', 'np.where', (['(rad < width)', 'amplitude', '(0)'], {}), '(rad < width, amplitude, 0)\n', (2300, 2327), True, 'import numpy as np\n'), ((4348, 4421), 'numpy.linspace', 'np.linspace', (['(0 * u.deg)', 'self.rad_axis.center[-1]', '(10 * self.rad_axis.nbin)'], {}), '(0 * u.deg, self.rad_axis.center[-1], 10 * self.rad_axis.nbin)\n', (4359, 4421), True, 'import numpy as np\n'), ((4587, 4632), 'numpy.abs', 'np.abs', (['(containment - fraction[:, np.newaxis])'], {}), '(containment - fraction[:, np.newaxis])\n', (4593, 4632), True, 'import numpy as np\n'), ((5276, 5285), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5283, 5285), True, 'import matplotlib.pyplot as plt\n'), ((7166, 7221), 'gammapy.utils.array.array_stats_str', 'array_stats_str', (['self.energy_axis_true.center', '"""energy"""'], {}), "(self.energy_axis_true.center, 'energy')\n", (7181, 7221), False, 'from gammapy.utils.array import array_stats_str\n'), ((10419, 10446), 'astropy.units.Quantity', 'u.Quantity', (['energy'], {'ndmin': '(1)'}), '(energy, ndmin=1)\n', (10429, 10446), True, 'from astropy import units as u\n'), ((12045, 12068), 'gammapy.modeling.models.PowerLawSpectralModel', 'PowerLawSpectralModel', ([], {}), '()\n', (12066, 12068), False, 'from gammapy.modeling.models import PowerLawSpectralModel, TemplateSpectralModel\n'), ((12210, 12258), 'gammapy.maps.MapAxis.from_energy_bounds', 'MapAxis.from_energy_bounds', (['e_min', 'e_max', 'n_bins'], {}), '(e_min, e_max, n_bins)\n', (12236, 12258), False, 'from gammapy.maps import MapAxis, MapAxes\n'), ((13171, 13201), 'numpy.abs', 'np.abs', (['(containment - fraction)'], {}), '(containment - fraction)\n', (13177, 13201), True, 'import numpy as np\n'), ((13752, 13771), 'astropy.units.Quantity', 'u.Quantity', (['rad_max'], {}), '(rad_max)\n', (13762, 13771), True, 'from astropy import units as u\n'), ((14361, 14370), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (14368, 14370), True, 'import matplotlib.pyplot as plt\n'), ((15060, 15069), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (15067, 15069), True, 'import matplotlib.pyplot as plt\n'), ((18030, 18049), 'gammapy.utils.scripts.make_path', 'make_path', (['filename'], {}), '(filename)\n', (18039, 18049), False, 'from gammapy.utils.scripts import make_path\n'), ((20260, 20275), 'astropy.units.Quantity', 'u.Quantity', (['rad'], {}), '(rad)\n', (20270, 20275), True, 'from astropy import units as u\n'), ((20308, 20326), 'astropy.units.Quantity', 'u.Quantity', (['offset'], {}), '(offset)\n', (20318, 20326), True, 'from astropy import units as u\n'), ((20359, 20377), 'astropy.units.Quantity', 'u.Quantity', (['energy'], {}), '(energy)\n', (20369, 20377), True, 'from astropy import units as u\n'), ((21424, 21459), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['rad'], {'name': '"""rad"""'}), "(rad, name='rad')\n", (21442, 21459), False, 'from gammapy.maps import MapAxis, MapAxes\n'), ((22999, 23017), 'astropy.units.Quantity', 'u.Quantity', (['energy'], {}), '(energy)\n', (23009, 23017), True, 'from astropy import units as u\n'), ((23049, 23066), 'astropy.units.Quantity', 'u.Quantity', (['theta'], {}), '(theta)\n', (23059, 23066), True, 'from astropy import units as u\n'), ((23553, 23562), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (23560, 23562), True, 'import matplotlib.pyplot as plt\n'), ((23603, 23704), 'gammapy.maps.MapAxis.from_energy_bounds', 'MapAxis.from_energy_bounds', (['self.energy_axis_true.edges[0]', 'self.energy_axis_true.edges[-1]', '(100)'], {}), '(self.energy_axis_true.edges[0], self.\n energy_axis_true.edges[-1], 100)\n', (23629, 23704), False, 'from gammapy.maps import MapAxis, MapAxes\n'), ((25106, 25115), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (25113, 25115), True, 'import matplotlib.pyplot as plt\n'), ((25438, 25466), 'numpy.nanmin', 'np.nanmin', (['containment.value'], {}), '(containment.value)\n', (25447, 25466), True, 'import numpy as np\n'), ((25502, 25530), 'numpy.nanmax', 'np.nanmax', (['containment.value'], {}), '(containment.value)\n', (25511, 25530), True, 'import numpy as np\n'), ((2385, 2415), 'gammapy.utils.gauss.Gauss2DPDF', 'Gauss2DPDF', ([], {'sigma': 'width.radian'}), '(sigma=width.radian)\n', (2395, 2415), False, 'from gammapy.utils.gauss import Gauss2DPDF\n'), ((6640, 6675), 'numpy.ones', 'np.ones', (['self.energy_axis_true.nbin'], {}), '(self.energy_axis_true.nbin)\n', (6647, 6675), True, 'import numpy as np\n'), ((9110, 9127), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (9125, 9127), False, 'from astropy.io import fits\n'), ((9697, 9716), 'gammapy.utils.scripts.make_path', 'make_path', (['filename'], {}), '(filename)\n', (9706, 9716), False, 'from gammapy.utils.scripts import make_path\n'), ((13685, 13703), 'astropy.units.Quantity', 'u.Quantity', (['energy'], {}), '(energy)\n', (13695, 13703), True, 'from astropy import units as u\n'), ((19226, 19243), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (19241, 19243), False, 'from astropy.io import fits\n'), ((19462, 19481), 'gammapy.utils.scripts.make_path', 'make_path', (['filename'], {}), '(filename)\n', (19471, 19481), False, 'from gammapy.utils.scripts import make_path\n'), ((6729, 6749), 'astropy.units.Quantity', 'u.Quantity', (['exposure'], {}), '(exposure)\n', (6739, 6749), True, 'from astropy import units as u\n'), ((9397, 9416), 'gammapy.utils.scripts.make_path', 'make_path', (['filename'], {}), '(filename)\n', (9406, 9416), False, 'from gammapy.utils.scripts import make_path\n'), ((23266, 23283), 'astropy.units.Quantity', 'u.Quantity', (['radii'], {}), '(radii)\n', (23276, 23283), True, 'from astropy import units as u\n'), ((1112, 1128), 'astropy.units.Quantity', 'u.Quantity', (['data'], {}), '(data)\n', (1122, 1128), True, 'from astropy import units as u\n'), ((6503, 6519), 'astropy.units.Quantity', 'u.Quantity', (['data'], {}), '(data)\n', (6513, 6519), True, 'from astropy import units as u\n'), ((16898, 16914), 'astropy.units.Quantity', 'u.Quantity', (['data'], {}), '(data)\n', (16908, 16914), True, 'from astropy import units as u\n')] |
import numpy as np
from scipy.sparse.csgraph import connected_components as inner_conn_comp
def dot_matrix(X):
X_norm = X - np.mean(X, axis=0)
X_norm /= np.max(np.linalg.norm(X, axis=1))
return X_norm.dot(X_norm.T)
def connected_components(sym_mat, thresh=1e-4):
binary_mat = sym_mat > sym_mat.max() * thresh
n_comp, labels = inner_conn_comp(binary_mat, directed=False,
return_labels=True)
clusters = [labels == i for i in range(n_comp)]
return clusters
def log_scale(mat):
mat = np.abs(mat)
mat *= 1e3
mat += 1
return np.log10(mat)
| [
"numpy.abs",
"numpy.mean",
"numpy.log10",
"scipy.sparse.csgraph.connected_components",
"numpy.linalg.norm"
] | [((350, 413), 'scipy.sparse.csgraph.connected_components', 'inner_conn_comp', (['binary_mat'], {'directed': '(False)', 'return_labels': '(True)'}), '(binary_mat, directed=False, return_labels=True)\n', (365, 413), True, 'from scipy.sparse.csgraph import connected_components as inner_conn_comp\n'), ((555, 566), 'numpy.abs', 'np.abs', (['mat'], {}), '(mat)\n', (561, 566), True, 'import numpy as np\n'), ((606, 619), 'numpy.log10', 'np.log10', (['mat'], {}), '(mat)\n', (614, 619), True, 'import numpy as np\n'), ((130, 148), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (137, 148), True, 'import numpy as np\n'), ((170, 195), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (184, 195), True, 'import numpy as np\n')] |
"""
:author: hxq, shy
"""
import numpy as np
from mindspore import ops
DIV = ops.Div()
def l2_regularizer(input_x, axis=1):
"""
:param input_x:
:param axis:
:return:
"""
return DIV(input_x, ((input_x**2).sum(axis=axis, keepdims=True))**0.5)
def set_rng_seed(seed):
"""
:param seed:
:return:
"""
np.random.seed(seed)
| [
"mindspore.ops.Div",
"numpy.random.seed"
] | [((78, 87), 'mindspore.ops.Div', 'ops.Div', ([], {}), '()\n', (85, 87), False, 'from mindspore import ops\n'), ((344, 364), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (358, 364), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# This is designed as a reference implementation of the UVHDF5 specification. It shows a conversion of SMA data in the UVFITS format to the UVHDF5 format.
import argparse
parser = argparse.ArgumentParser(description="Convert SMA FITS files into UVHDF5 file format.")
parser.add_argument("FITS", help="The input UVFITS file.")
parser.add_argument("--out", default="data.hdf5", help="The output UVHDF5 file.")
args = parser.parse_args()
from astropy.io import fits
import h5py
import numpy as np
cc = 2.99792458e10 # [cm s^-1]
# Reading SMA dataset
f = fits.open(args.FITS)
data = f[0].data
hdr = f[0].header
nfreq = hdr["NAXIS4"]
dnu = hdr["CDELT4"]
freqs = hdr["CRVAL4"] + dnu * np.arange(nfreq) # Hz
# Convert each uu, vv cordinate from light nanoseconds to kilolambda,
# depending on which channel frequency we are using
uu = 1e-3 * (freqs * np.tile(data["UU"], (nfreq, 1)).T).T
vv = 1e-3 * (freqs * np.tile(data["VV"], (nfreq, 1)).T).T
# uu, vv are now (nfreq, nvis) shape arrays
shape = uu.shape
nvis = uu.shape[1]
# print("Original shape of DATA", data["DATA"].shape)
# Remove all of the "zombie" 1D columns
vis = np.squeeze(data["DATA"])
# Now, vis is stored as an (npoints, nfreqs, 3) array, where last dimension is
# (real, imag, weight)
# Read and convert all of these to (nfreq, nvis) arrays
real = vis[:, :, 0].T
imag = vis[:, :, 1].T
weight = vis[:, :, 2].T
# Assume that flags are negative weight values
flag = weight < 0
if np.any(flag):
print("We found some negative weights. Setting them to flags.")
# Now, stuff each of these into an HDF5 file.
fid = h5py.File(args.out, "w")
# Add in observational attributes
for key in ["OBJECT", "TELESCOP", "ORIGIN"]:
try:
val = hdr[key]
fid.attrs[key] = val
except KeyError:
continue
# Add in format specification version
fid.attrs["FMT_Version"] = "v0.1"
# Are the frequencies stored in increasing or decreasing order in UVFITS?
# UVHDF5 always stores frequencies in increasing order
if dnu > 0:
fid.create_dataset("freqs", (nfreq,), dtype="float64")[:] = freqs # [Hz]
fid.create_dataset("uu", shape, dtype="float64")[:,:] = uu # [kilolambda]
fid.create_dataset("vv", shape, dtype="float64")[:,:] = vv # [kilolambda]
fid.create_dataset("real", shape, dtype="float64")[:,:] = real # [Jy]
fid.create_dataset("imag", shape, dtype="float64")[:,:] = imag # [Jy]
fid.create_dataset("weight", shape, dtype="float64")[:,:] = weight #[1/Jy^2]
fid.create_dataset("flag", shape, dtype="int")[:,:] = flag # Boolean
else:
print("UVFITS stored frequencies in decreasing order, flipping to positive for UVHDF5")
fid.create_dataset("freqs", (nfreq,), dtype="float64")[:] = freqs[::-1] # [Hz]
fid.create_dataset("uu", shape, dtype="float64")[:,:] = uu[::-1] # [kilolambda]
fid.create_dataset("vv", shape, dtype="float64")[:,:] = vv[::-1] # [kilolambda]
fid.create_dataset("real", shape, dtype="float64")[:,:] = real[::-1] # [Jy]
fid.create_dataset("imag", shape, dtype="float64")[:,:] = imag[::-1] # [Jy]
fid.create_dataset("weight", shape, dtype="float64")[:,:] = weight[::-1] #[1/Jy^2]
fid.create_dataset("flag", shape, dtype="int")[:,:] = flag[::-1] # Boolean
fid.close()
| [
"numpy.tile",
"argparse.ArgumentParser",
"numpy.any",
"h5py.File",
"numpy.squeeze",
"astropy.io.fits.open",
"numpy.arange"
] | [((205, 296), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert SMA FITS files into UVHDF5 file format."""'}), "(description=\n 'Convert SMA FITS files into UVHDF5 file format.')\n", (228, 296), False, 'import argparse\n'), ((580, 600), 'astropy.io.fits.open', 'fits.open', (['args.FITS'], {}), '(args.FITS)\n', (589, 600), False, 'from astropy.io import fits\n'), ((1156, 1180), 'numpy.squeeze', 'np.squeeze', (["data['DATA']"], {}), "(data['DATA'])\n", (1166, 1180), True, 'import numpy as np\n'), ((1481, 1493), 'numpy.any', 'np.any', (['flag'], {}), '(flag)\n', (1487, 1493), True, 'import numpy as np\n'), ((1616, 1640), 'h5py.File', 'h5py.File', (['args.out', '"""w"""'], {}), "(args.out, 'w')\n", (1625, 1640), False, 'import h5py\n'), ((710, 726), 'numpy.arange', 'np.arange', (['nfreq'], {}), '(nfreq)\n', (719, 726), True, 'import numpy as np\n'), ((877, 908), 'numpy.tile', 'np.tile', (["data['UU']", '(nfreq, 1)'], {}), "(data['UU'], (nfreq, 1))\n", (884, 908), True, 'import numpy as np\n'), ((935, 966), 'numpy.tile', 'np.tile', (["data['VV']", '(nfreq, 1)'], {}), "(data['VV'], (nfreq, 1))\n", (942, 966), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import h5py
import collections as cl
import numpy as np
from isce3 import antenna as ant
class AntennaParser:
"""Class for parsing NISAR Antenna HDF5 file.
Parameters
----------
filename : str
filename of HDF5 antenna file.
Attributes
----------
filename : str
Filename of HDF5 antenna file.
fid : h5py.File
File object of HDF5.
frequency : float
RF frequency in Hz.
frame : isce3.antenna.frame
Isce3 Frame object.
rx_beams : list of str
List of names of all receive beams.
tag : str
Tag name for antenna patterns.
timestamp : str
Time stamp in UTC.
tx_beams : list of str
List of names of all transmit beams.
version : str
Version of the file.
Raises
------
IOError
If HDF5 filename does not exist or can not be opened.
"""
def __init__(self, filename):
self._filename = filename
self._fid = h5py.File(filename, mode='r', libver='latest',
swmr=True)
def __repr__(self):
return f"{self.__class__.__name__}({self._filename})"
def __enter__(self):
return self
def __exit__(self, val_, type_, tb_):
self._fid.close()
@property
def filename(self):
return self._filename
@property
def fid(self):
return self._fid
@property
def rx_beams(self):
return [item for item in self._fid if "RX" in item.upper()]
@property
def tx_beams(self):
return [item for item in self._fid if "TX" in item.upper()]
@property
def frequency(self):
return self._from_cut_attrs("frequency")
@property
def timestamp(self):
tm = self._from_cut_attrs("timestamp")
try:
return tm.decode()
except AttributeError:
return tm
@property
def version(self):
vr = self._from_cut_attrs("version")
try:
return vr.decode()
except AttributeError:
return vr
@property
def tag(self):
tg = self._from_cut_attrs("tag")
try:
return tg.decode()
except AttributeError:
return tg
@property
def frame(self):
return ant.Frame(self._gridtype())
def num_beams(self, pol='H'):
"""Number of individual [RX] beams for each pol.
Parameters
----------
pol : str, default='H'
Polarization of the beam , either
`H` or `V'. It is case insensitive.
Returns
-------
int
Number of beams for the `pol`.
Raises
------
ValueError
For bad `pol` value.
"""
pol = self._check_pol(pol)
return len([rx for rx in self.rx_beams if 'DBF' not in
rx.upper() and pol in rx.upper()])
def el_cut(self, beam=1, pol='H'):
"""Parse an Elevation cut pattern from a `RX` beam.
Parse individual RX Elevation-cut 1-D pattern for
a desired polarizatrion `pol` and beam number `beam`.
Parameters
----------
beam : int, default=1
Beam number starting from one.
pol : str, default='H'
Polarization of the beam , either
`H` or `V'. It is case insensitive.
Returns
-------
cl.namedtuple
angle : np.ndarray (float or complex)
Elevation angles in radians.
copol_pattern : np.ndarray (float or complex)
Co-pol 1-D elevation pattern in V/m.
cxpol_pattern : np.ndarray (float or complex)
Cross-pol 1-D elevation pattern in V/m.
None if there no x-pol pattern!
cut_angle : float
Azimuth angle in radians for obtaining elevation cut.
Raises
------
ValueError
For bad input arguments
RuntimeError
For missing fields/attributes in HDF5
"""
return self._get_ang_cut(beam, pol, 'elevation')
def az_cut(self, beam=1, pol='H'):
"""Parse an Azimuth cut pattern from a `RX` beam.
Parse individual RX Azimuth-cut 1-D pattern for
a desired polarizatrion `pol` and beam number `beam`.
Parameters
----------
beam : int, default=1
Beam number starting from one.
pol : str, default='H'
Polarization of the beam , either
`H` or `V'. It is case insensitive.
Returns
-------
cl.namedtuple
angle : np.ndarray (float or complex)
Azimuth angles in radians.
copol_pattern : np.ndarray (float or complex)
Co-pol 1-D azimuth pattern in V/m.
cxpol_pattern : np.ndarray (float or complex)
Cross-pol 1-D azimuth pattern in V/m.
None if there no x-pol pattern!
cut_angle : float
Elevation angle in radians for obtaining azimuth cut.
Raises
------
ValueError
For bad input arguments
RuntimeError
For missing fields/attributes in HDF5
"""
return self._get_ang_cut(beam, pol, 'azimuth')
def el_cut_all(self, pol='H'):
"""Parse all Co-pol EL cuts.
Get all uniformly-spaced EL cuts of co-pol store them in a matrix
with shape `num_beams` by `number of angles`. The number of
uniformly-spaced angles is determined by min, max angles from first
and last beams and the spacing from the first beam.
Parameters
----------
pol : str, default='H'
Polarization , either 'H' or 'V'. It is case-insensitive!
Returns
-------
cl.namedtuple
angle : np.ndarray (float)
Uniformly-spaced elevation angles in radians.
copol_pattern : np.ndarray (float or complex)
Interpolated co-pol 1-D elevation pattern in V/m with
shape (number-of-beams, number-of-EL-angles).
cut_angle : float
Mean azimuth angle in radians from which
elevation patterns are obtained.
Raises
------
ValueError
For bad `pol` value.
"""
num_beam = self.num_beams(pol)
# determine full angular coverage with uniform spcaing over all beams
beam_first = self._get_ang_cut(
1, pol, 'elevation', out_keys=("angle", "copol_pattern"))
if num_beam > 1:
beam_last = self._get_ang_cut(
num_beam, pol, 'elevation', out_keys=("angle",))
else:
beam_last = beam_first
num_ang = int(np.ceil((beam_last.angle[-1] - beam_first.angle[0]) / (
beam_first.angle[1] - beam_first.angle[0]))) + 1
# linearly interpolate each beam over full angular coverage with out
# of range values filled with float or complex zero.
out = {}
out["angle"] = np.linspace(
beam_first.angle[0], beam_last.angle[-1], num_ang)
out["copol_pattern"] = np.zeros((num_beam, num_ang),
beam_first.copol_pattern.dtype)
out_of_range_val = 0.0
cut_ang_ave = 0.0
for nn in range(num_beam):
beam = self._get_ang_cut(
nn + 1, pol, 'elevation', out_keys=("angle", "copol_pattern"))
out["copol_pattern"][nn, :] = np.interp(
out["angle"], beam.angle, beam.copol_pattern,
left=out_of_range_val, right=out_of_range_val)
cut_ang_ave += beam.cut_angle
out["cut_angle"] = cut_ang_ave / num_beam
return cl.namedtuple('el_cut', out)(*out.values())
# Helper functions listed below this line
def _check_pol(self, pol: str) -> str:
"""Check and get upper-case polarization type.
"""
pol = pol.upper()
if pol not in ['H', 'V']:
raise ValueError("'pol' shall be either 'H' or 'V'")
return pol
def _from_cut_attrs(self, attr_name: str):
"""Get a value from cut attribute.
"""
first_cut_name = list(self._fid[self.rx_beams[0]])[0]
cut_attr_obj = self._fid[self.rx_beams[0] + '/'
+ first_cut_name].attrs
try:
return cut_attr_obj[attr_name]
except KeyError:
raise RuntimeError(
f"'{attr_name}' not found in attribute of '{first_cut_name}'!")
def _get_ang_cut(self, beam: int, pol: str, cut_name: str,
out_keys: tuple = ("angle", "copol_pattern",
"cxpol_pattern"),
ang_attr: str = "cut_angle") -> cl.namedtuple:
"""Get angle and co/cross 1-D patterns.
"""
pol = self._check_pol(pol)
# get number of beams
num_beam = self.num_beams(pol)
if num_beam == 0:
raise ValueError(f"There is no individual pattern for pol {pol}")
if (beam < 1 or beam > num_beam):
raise ValueError(f"'beam' shall be within [1,{num_beam}]")
grp_cut = self._fid[f'RX{beam:02d}{pol}/{cut_name}']
out = dict.fromkeys(out_keys)
for key in out_keys:
out[key] = grp_cut.get(key)[()]
if ang_attr and key == "angle":
out[ang_attr] = grp_cut[key].attrs.get(ang_attr)
return cl.namedtuple(cut_name+'_cut', out)(*out.values())
def _gridtype(self) -> str:
"""Get spherical grid type.
"""
grd = self._from_cut_attrs("grid_type")
try:
return grd.decode().replace('-', '_')
except AttributeError:
return grd.replace('-', '_')
| [
"numpy.ceil",
"collections.namedtuple",
"h5py.File",
"numpy.linspace",
"numpy.zeros",
"numpy.interp"
] | [((1036, 1093), 'h5py.File', 'h5py.File', (['filename'], {'mode': '"""r"""', 'libver': '"""latest"""', 'swmr': '(True)'}), "(filename, mode='r', libver='latest', swmr=True)\n", (1045, 1093), False, 'import h5py\n'), ((7225, 7287), 'numpy.linspace', 'np.linspace', (['beam_first.angle[0]', 'beam_last.angle[-1]', 'num_ang'], {}), '(beam_first.angle[0], beam_last.angle[-1], num_ang)\n', (7236, 7287), True, 'import numpy as np\n'), ((7332, 7393), 'numpy.zeros', 'np.zeros', (['(num_beam, num_ang)', 'beam_first.copol_pattern.dtype'], {}), '((num_beam, num_ang), beam_first.copol_pattern.dtype)\n', (7340, 7393), True, 'import numpy as np\n'), ((7686, 7793), 'numpy.interp', 'np.interp', (["out['angle']", 'beam.angle', 'beam.copol_pattern'], {'left': 'out_of_range_val', 'right': 'out_of_range_val'}), "(out['angle'], beam.angle, beam.copol_pattern, left=\n out_of_range_val, right=out_of_range_val)\n", (7695, 7793), True, 'import numpy as np\n'), ((7930, 7958), 'collections.namedtuple', 'cl.namedtuple', (['"""el_cut"""', 'out'], {}), "('el_cut', out)\n", (7943, 7958), True, 'import collections as cl\n'), ((9683, 9720), 'collections.namedtuple', 'cl.namedtuple', (["(cut_name + '_cut')", 'out'], {}), "(cut_name + '_cut', out)\n", (9696, 9720), True, 'import collections as cl\n'), ((6930, 7032), 'numpy.ceil', 'np.ceil', (['((beam_last.angle[-1] - beam_first.angle[0]) / (beam_first.angle[1] -\n beam_first.angle[0]))'], {}), '((beam_last.angle[-1] - beam_first.angle[0]) / (beam_first.angle[1] -\n beam_first.angle[0]))\n', (6937, 7032), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 12 15:21:31 2019
@author: qinayan
"""
import os
import numpy as np
def find_DEM_bn(eta):
#-------------
# find the boundary index
#-------------
nrows,ncols = eta.shape
eta_vector = eta.flatten()*1.0
validID = np.where(eta_vector>0)[0]
xn = [-1,0,1,-1,1,-1, 0, 1]
yn = [1, 1,1, 0,0,-1,-1,-1]
bn_ID = []
ghostbn_ID = []
for i in validID:
irows = i//ncols
icols = i%ncols
for n in range (0,8):
if (irows+xn[n]<=nrows-1) and (irows+xn[n]>=0) and (icols+yn[n]<=ncols-1) and icols+yn[n]>=0:
if eta[irows+xn[n],icols+yn[n]] < 0.0:
bn_ID.append((irows)*ncols+(icols))
ghostbn_ID.append((irows+xn[n])*ncols+(icols+yn[n]))
#--------------
# remove duplicate numbers in bn_ID. bn_ID has duplicate number happends
# when the bondary grid has more than one
# neighbours which is outsided of the dem domain
#--------------
bn_ID_unique = []
for item in bn_ID:
if item not in bn_ID_unique:
bn_ID_unique.append(item)
bn_ID_unique_arr = np.array(bn_ID_unique)
bn_ID_arr = np.array(bn_ID)
ghostbn_ID_arr = np.array(ghostbn_ID)
current_dir = os.getcwd()
os.chdir(current_dir+'/GeoData/')
np.save('DEM_Sf_bn_id',bn_ID_unique_arr)
# save the ID without deleteing duplicate numbers
np.save('DEM_Sf_bn_id_dup',bn_ID_arr)
np.save('DEM_Sf_ghostbn_id_dup',ghostbn_ID_arr)
os.chdir(current_dir)
return (bn_ID_unique_arr, bn_ID_arr,ghostbn_ID_arr) | [
"numpy.where",
"os.getcwd",
"os.chdir",
"numpy.array",
"numpy.save"
] | [((1201, 1223), 'numpy.array', 'np.array', (['bn_ID_unique'], {}), '(bn_ID_unique)\n', (1209, 1223), True, 'import numpy as np\n'), ((1244, 1259), 'numpy.array', 'np.array', (['bn_ID'], {}), '(bn_ID)\n', (1252, 1259), True, 'import numpy as np\n'), ((1285, 1305), 'numpy.array', 'np.array', (['ghostbn_ID'], {}), '(ghostbn_ID)\n', (1293, 1305), True, 'import numpy as np\n'), ((1329, 1340), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1338, 1340), False, 'import os\n'), ((1345, 1380), 'os.chdir', 'os.chdir', (["(current_dir + '/GeoData/')"], {}), "(current_dir + '/GeoData/')\n", (1353, 1380), False, 'import os\n'), ((1389, 1430), 'numpy.save', 'np.save', (['"""DEM_Sf_bn_id"""', 'bn_ID_unique_arr'], {}), "('DEM_Sf_bn_id', bn_ID_unique_arr)\n", (1396, 1430), True, 'import numpy as np\n'), ((1488, 1526), 'numpy.save', 'np.save', (['"""DEM_Sf_bn_id_dup"""', 'bn_ID_arr'], {}), "('DEM_Sf_bn_id_dup', bn_ID_arr)\n", (1495, 1526), True, 'import numpy as np\n'), ((1536, 1584), 'numpy.save', 'np.save', (['"""DEM_Sf_ghostbn_id_dup"""', 'ghostbn_ID_arr'], {}), "('DEM_Sf_ghostbn_id_dup', ghostbn_ID_arr)\n", (1543, 1584), True, 'import numpy as np\n'), ((1593, 1614), 'os.chdir', 'os.chdir', (['current_dir'], {}), '(current_dir)\n', (1601, 1614), False, 'import os\n'), ((299, 323), 'numpy.where', 'np.where', (['(eta_vector > 0)'], {}), '(eta_vector > 0)\n', (307, 323), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.metrics.scorer import _BaseScorer
from solnml.components.utils.constants import CLS_TASKS, IMG_CLS
from solnml.datasets.base_dl_dataset import DLDataset
from solnml.components.evaluators.base_dl_evaluator import get_estimator_with_parameters
from solnml.components.ensemble.dl_ensemble.base_ensemble import BaseEnsembleModel
from solnml.components.models.img_classification.nn_utils.nn_aug.aug_hp_space import get_test_transforms
from functools import reduce
class Bagging(BaseEnsembleModel):
def __init__(self, stats,
ensemble_size: int,
task_type: int,
max_epoch: int,
metric: _BaseScorer,
timestamp: float,
output_dir=None,
device='cpu', **kwargs):
super().__init__(stats=stats,
ensemble_method='bagging',
ensemble_size=ensemble_size,
task_type=task_type,
max_epoch=max_epoch,
metric=metric,
timestamp=timestamp,
output_dir=output_dir,
device=device)
if self.task_type == IMG_CLS:
self.image_size = kwargs['image_size']
def fit(self, train_data):
# Do nothing, models has been trained and saved.
return self
def predict(self, test_data: DLDataset, mode='test'):
model_pred_list = list()
final_pred = list()
model_cnt = 0
for algo_id in self.stats["include_algorithms"]:
model_configs = self.stats[algo_id]['model_configs']
for idx, config in enumerate(model_configs):
if self.task_type == IMG_CLS:
test_transforms = get_test_transforms(config, image_size=self.image_size)
test_data.load_test_data(test_transforms)
test_data.load_data(test_transforms, test_transforms)
else:
test_data.load_test_data()
test_data.load_data()
if mode == 'test':
dataset = test_data.test_dataset
else:
if test_data.subset_sampler_used:
dataset = test_data.train_dataset
else:
dataset = test_data.val_dataset
estimator = get_estimator_with_parameters(self.task_type, config, self.max_epoch,
dataset, self.timestamp, device=self.device)
if self.task_type in CLS_TASKS:
if mode == 'test':
model_pred_list.append(estimator.predict_proba(test_data.test_dataset))
else:
if test_data.subset_sampler_used:
model_pred_list.append(
estimator.predict_proba(test_data.train_dataset, sampler=test_data.val_sampler))
else:
model_pred_list.append(estimator.predict_proba(test_data.val_dataset))
else:
if mode == 'test':
model_pred_list.append(estimator.predict(test_data.test_dataset))
else:
if test_data.subset_sampler_used:
model_pred_list.append(
estimator.predict(test_data.train_dataset, sampler=test_data.val_sampler))
else:
model_pred_list.append(estimator.predict(test_data.val_dataset))
model_cnt += 1
# Calculate the average of predictions
for i in range(len(model_pred_list[0])):
sample_pred_list = [model_pred[i] for model_pred in model_pred_list]
pred_average = reduce(lambda x, y: x + y, sample_pred_list) / len(sample_pred_list)
final_pred.append(pred_average)
return np.array(final_pred)
def get_ens_model_info(self):
raise NotImplementedError
| [
"functools.reduce",
"numpy.array",
"solnml.components.models.img_classification.nn_utils.nn_aug.aug_hp_space.get_test_transforms",
"solnml.components.evaluators.base_dl_evaluator.get_estimator_with_parameters"
] | [((4078, 4098), 'numpy.array', 'np.array', (['final_pred'], {}), '(final_pred)\n', (4086, 4098), True, 'import numpy as np\n'), ((2462, 2580), 'solnml.components.evaluators.base_dl_evaluator.get_estimator_with_parameters', 'get_estimator_with_parameters', (['self.task_type', 'config', 'self.max_epoch', 'dataset', 'self.timestamp'], {'device': 'self.device'}), '(self.task_type, config, self.max_epoch,\n dataset, self.timestamp, device=self.device)\n', (2491, 2580), False, 'from solnml.components.evaluators.base_dl_evaluator import get_estimator_with_parameters\n'), ((3949, 3993), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'sample_pred_list'], {}), '(lambda x, y: x + y, sample_pred_list)\n', (3955, 3993), False, 'from functools import reduce\n'), ((1826, 1881), 'solnml.components.models.img_classification.nn_utils.nn_aug.aug_hp_space.get_test_transforms', 'get_test_transforms', (['config'], {'image_size': 'self.image_size'}), '(config, image_size=self.image_size)\n', (1845, 1881), False, 'from solnml.components.models.img_classification.nn_utils.nn_aug.aug_hp_space import get_test_transforms\n')] |
"""
Created on Tue Jul 28 12:19:11 2020
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import time
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras import backend as K
import horovod.tensorflow.keras as hvd
############
# Parameters
############
date='14_12'
# Training data dimensions
n_rows = 79
n_col = 69
# Data format (channels_last)
input_shape = (n_rows, n_col, 1)
output_shape = (n_rows, n_col, 3)
input_dir = "//home/mrmn/letoumelinl/train"
output_dir = "//scratch/mrmn/letoumelinl/ARPS/"
# Nombre de filtres par couche de convolution
n_conv_features = 32
# Fonction de perte mse = mean squared error
loss="mse"
############
# GPU
############
GPU = True
if GPU:
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
# Horovod: initialize Horovod.
hvd.init()
# Horovod: pin GPU to be used to process local rank (one GPU per process)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
############
# Functions
############
def root_mse(y_true, y_pred):
return(K.sqrt(K.mean(K.square(y_true - y_pred))))
def build_model(input_shape):
model = Sequential()
model.add(ZeroPadding2D(padding=((0, 1), (0, 1)), input_shape=input_shape))
# CONVOLUTION
model.add(Conv2D(n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(Conv2D(n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Conv2D(2*n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(Conv2D(2*n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Conv2D(4*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(Conv2D(4*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Conv2D(8*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(Conv2D(8*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Dropout(0.25))
# DECONVOLUTION
model.add(Conv2DTranspose(8*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(Conv2DTranspose(8*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(UpSampling2D(size=(2, 2)))
model.add(ZeroPadding2D(padding=((0, 0), (0, 1))))
model.add(Conv2DTranspose(4*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(Conv2DTranspose(4*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(UpSampling2D(size=(2, 2)))
model.add(ZeroPadding2D(padding=((0, 0), (0, 1))))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Conv2DTranspose(2*n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(Conv2DTranspose(2*n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(UpSampling2D(size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Conv2DTranspose(n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(Conv2DTranspose(n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(Dropout(0.25))
# Matrice produite
model.add(Conv2DTranspose(3, (5, 5), activation='linear', padding="same"))
model.add(Cropping2D(cropping=((0, 1), (0, 1))))
# Résumé du réseau
model.summary()
return(model)
def train_model(model, root_mse, input_dir, txt):
#model.compile(loss=loss, optimizer=RMSprop(lr=0.001, decay=0.0001), metrics=['mae', root_mse])
#model.save_weights('weights.h5')
for fold in range(8):
print('Fold' + str(fold))
model.load_weights('weights.h5')
# Horovod: adjust learning rate based on number of GPUs.
scaled_lr = 0.001 * hvd.size()
opt = tf.optimizers.RMSprop(scaled_lr)
# Horovod: add Horovod DistributedOptimizer.
opt = hvd.DistributedOptimizer(
opt)
model.compile(loss=loss,
optimizer=opt,
metrics=['mae', root_mse],
experimental_run_tf_function=False)
# Chemin d'accès aux données (préalablement traitées pour numpy)
filepath = output_dir + "fold{}/".format(fold)
# Chargement des données
print("LOADING DATA")
if txt:
TOPO_TRAIN = np.loadtxt(filepath + "train/topo.txt", dtype=np.float32)
WIND_TRAIN = np.loadtxt(filepath + "train/wind.txt", dtype=np.float32)
TOPO_VALID = np.loadtxt(filepath + "validation/topo.txt", dtype=np.float32)
WIND_VALID = np.loadtxt(filepath + "validation/wind.txt", dtype=np.float32)
else:
TOPO_TRAIN = np.load(filepath + "train/topo.npy")
WIND_TRAIN = np.load(filepath + "train/wind.npy")
TOPO_VALID = np.load(filepath + "validation/topo.npy")
WIND_VALID = np.load(filepath + "validation/wind.npy")
#Affichage des dimensions
print("Before reshaping")
print("Training shape: ")
print(TOPO_TRAIN.shape)
print(WIND_TRAIN.shape)
print("Validation shape: ")
print(TOPO_VALID.shape)
print(WIND_VALID.shape)
# Redimensionnement des données brutes (x*x) pour format keras
x_train = TOPO_TRAIN.reshape((TOPO_TRAIN.shape[0], * input_shape))
y_train = WIND_TRAIN.reshape((WIND_TRAIN.shape[0], * output_shape))
x_val = TOPO_VALID.reshape((TOPO_VALID.shape[0], * input_shape))
y_val = WIND_VALID.reshape((WIND_VALID.shape[0], * output_shape))
print("\n\nAfter reshaping:\n")
print("Training shape: ")
print(x_train.shape)
print(y_train.shape)
print("Validation shape: ")
print(np.shape(x_val))
print(np.shape(y_val))
# Normalisation des features
train_mean, train_std = np.mean(x_train), np.std(x_train)
x_train = (x_train - train_mean)/train_std
x_val = (x_val - train_mean)/train_std
# Définition des callbacks utilisés
filepath="checkpoint.hdf5"
callbacks = [
# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
hvd.callbacks.BroadcastGlobalVariablesCallback(0),
# Horovod: average metrics among workers at the end of every epoch.
#
# Note: This callback must be in the list before the ReduceLROnPlateau,
# TensorBoard or other metrics-based callbacks.
hvd.callbacks.MetricAverageCallback(),
# Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final
# accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during
# the first three epochs. See https://arxiv.org/abs/1706.02677 for details.
hvd.callbacks.LearningRateWarmupCallback(initial_lr=scaled_lr, warmup_epochs=3, verbose=1),
ReduceLROnPlateau(monitor='loss', factor=0.1, patience=5, min_lr=1e-10, verbose=1)
]
# Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.
if hvd.rank() == 0:
callbacks.append(ModelCheckpoint(filepath,
monitor='loss',
verbose=0,
save_best_only=True,
mode='min'))
# Horovod: write logs on worker 0.
verbose = 1 if hvd.rank() == 0 else 0
# ENTRAINEMENT DU RESEAU
history = model.fit(x_train,
y_train,
batch_size=32,
steps_per_epoch=500 // hvd.size(),
epochs=150,
verbose=verbose,
validation_data=(x_val, y_val),
callbacks=callbacks)
# Sauvegarde du modele
model.save(output_dir+'model_'+date+'_fold_{}.h5'.format(fold))
np.save(output_dir+'model_'+date+'_fold_{}_history.npy'.format(fold), history.history)
return(model, history)
if GPU:
start = time.perf_counter()
model = build_model(input_shape)
_, history = train_model(model, root_mse, input_dir, False)
finish = time.perf_counter()
print(f'\nFinished in {round((finish-start)/60, 2)} minute(s)')
| [
"horovod.tensorflow.keras.size",
"tensorflow.keras.layers.BatchNormalization",
"horovod.tensorflow.keras.callbacks.MetricAverageCallback",
"horovod.tensorflow.keras.callbacks.LearningRateWarmupCallback",
"horovod.tensorflow.keras.init",
"horovod.tensorflow.keras.rank",
"numpy.mean",
"tensorflow.keras.... | [((1339, 1349), 'horovod.tensorflow.keras.init', 'hvd.init', ([], {}), '()\n', (1347, 1349), True, 'import horovod.tensorflow.keras as hvd\n'), ((1432, 1483), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (1476, 1483), True, 'import tensorflow as tf\n'), ((1140, 1165), 'tensorflow.test.gpu_device_name', 'tf.test.gpu_device_name', ([], {}), '()\n', (1163, 1165), True, 'import tensorflow as tf\n'), ((1505, 1556), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (1545, 1556), True, 'import tensorflow as tf\n'), ((1816, 1828), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1826, 1828), False, 'from tensorflow.keras.models import Sequential\n'), ((9690, 9709), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9707, 9709), False, 'import time\n'), ((9824, 9843), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9841, 9843), False, 'import time\n'), ((1844, 1908), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', ([], {'padding': '((0, 1), (0, 1))', 'input_shape': 'input_shape'}), '(padding=((0, 1), (0, 1)), input_shape=input_shape)\n', (1857, 1908), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((1943, 2009), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['n_conv_features', '(5, 5)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(n_conv_features, (5, 5), activation='relu', padding='same')\n", (1949, 2009), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2025, 2091), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['n_conv_features', '(5, 5)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(n_conv_features, (5, 5), activation='relu', padding='same')\n", (2031, 2091), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2107, 2134), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2116, 2134), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2150, 2170), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2168, 2170), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2186, 2199), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (2193, 2199), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2216, 2286), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(2 * n_conv_features)', '(5, 5)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(2 * n_conv_features, (5, 5), activation='relu', padding='same')\n", (2222, 2286), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2300, 2370), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(2 * n_conv_features)', '(5, 5)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(2 * n_conv_features, (5, 5), activation='relu', padding='same')\n", (2306, 2370), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2384, 2411), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2393, 2411), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2427, 2447), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2445, 2447), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2463, 2476), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (2470, 2476), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2493, 2563), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(4 * n_conv_features)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(4 * n_conv_features, (3, 3), activation='relu', padding='same')\n", (2499, 2563), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2577, 2647), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(4 * n_conv_features)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(4 * n_conv_features, (3, 3), activation='relu', padding='same')\n", (2583, 2647), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2661, 2688), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2670, 2688), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2704, 2724), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2722, 2724), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2740, 2753), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (2747, 2753), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2770, 2840), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(8 * n_conv_features)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(8 * n_conv_features, (3, 3), activation='relu', padding='same')\n", (2776, 2840), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2854, 2924), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(8 * n_conv_features)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(8 * n_conv_features, (3, 3), activation='relu', padding='same')\n", (2860, 2924), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2938, 2958), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2956, 2958), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((2974, 2987), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (2981, 2987), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3024, 3103), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(8 * n_conv_features)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(8 * n_conv_features, (3, 3), activation='relu', padding='same')\n", (3039, 3103), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3117, 3196), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(8 * n_conv_features)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(8 * n_conv_features, (3, 3), activation='relu', padding='same')\n", (3132, 3196), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3210, 3230), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3228, 3230), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3246, 3259), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (3253, 3259), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3276, 3301), 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (3288, 3301), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3317, 3356), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', ([], {'padding': '((0, 0), (0, 1))'}), '(padding=((0, 0), (0, 1)))\n', (3330, 3356), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3373, 3452), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(4 * n_conv_features)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(4 * n_conv_features, (3, 3), activation='relu', padding='same')\n", (3388, 3452), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3466, 3545), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(4 * n_conv_features)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(4 * n_conv_features, (3, 3), activation='relu', padding='same')\n", (3481, 3545), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3559, 3584), 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (3571, 3584), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3600, 3639), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', ([], {'padding': '((0, 0), (0, 1))'}), '(padding=((0, 0), (0, 1)))\n', (3613, 3639), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3655, 3675), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3673, 3675), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3691, 3704), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (3698, 3704), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3721, 3800), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(2 * n_conv_features)', '(5, 5)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(2 * n_conv_features, (5, 5), activation='relu', padding='same')\n", (3736, 3800), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3814, 3893), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(2 * n_conv_features)', '(5, 5)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(2 * n_conv_features, (5, 5), activation='relu', padding='same')\n", (3829, 3893), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3907, 3932), 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (3919, 3932), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3948, 3968), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3966, 3968), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((3984, 3997), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (3991, 3997), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((4014, 4089), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['n_conv_features', '(5, 5)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(n_conv_features, (5, 5), activation='relu', padding='same')\n", (4029, 4089), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((4105, 4180), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['n_conv_features', '(5, 5)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(n_conv_features, (5, 5), activation='relu', padding='same')\n", (4120, 4180), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((4196, 4209), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (4203, 4209), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((4249, 4312), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(3)', '(5, 5)'], {'activation': '"""linear"""', 'padding': '"""same"""'}), "(3, (5, 5), activation='linear', padding='same')\n", (4264, 4312), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((4328, 4365), 'tensorflow.keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((0, 1), (0, 1))'}), '(cropping=((0, 1), (0, 1)))\n', (4338, 4365), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D\n'), ((4864, 4896), 'tensorflow.optimizers.RMSprop', 'tf.optimizers.RMSprop', (['scaled_lr'], {}), '(scaled_lr)\n', (4885, 4896), True, 'import tensorflow as tf\n'), ((4973, 5002), 'horovod.tensorflow.keras.DistributedOptimizer', 'hvd.DistributedOptimizer', (['opt'], {}), '(opt)\n', (4997, 5002), True, 'import horovod.tensorflow.keras as hvd\n'), ((1618, 1634), 'horovod.tensorflow.keras.local_rank', 'hvd.local_rank', ([], {}), '()\n', (1632, 1634), True, 'import horovod.tensorflow.keras as hvd\n'), ((1744, 1769), 'tensorflow.keras.backend.square', 'K.square', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (1752, 1769), True, 'from tensorflow.keras import backend as K\n'), ((4839, 4849), 'horovod.tensorflow.keras.size', 'hvd.size', ([], {}), '()\n', (4847, 4849), True, 'import horovod.tensorflow.keras as hvd\n'), ((5443, 5500), 'numpy.loadtxt', 'np.loadtxt', (["(filepath + 'train/topo.txt')"], {'dtype': 'np.float32'}), "(filepath + 'train/topo.txt', dtype=np.float32)\n", (5453, 5500), True, 'import numpy as np\n'), ((5526, 5583), 'numpy.loadtxt', 'np.loadtxt', (["(filepath + 'train/wind.txt')"], {'dtype': 'np.float32'}), "(filepath + 'train/wind.txt', dtype=np.float32)\n", (5536, 5583), True, 'import numpy as np\n'), ((5609, 5671), 'numpy.loadtxt', 'np.loadtxt', (["(filepath + 'validation/topo.txt')"], {'dtype': 'np.float32'}), "(filepath + 'validation/topo.txt', dtype=np.float32)\n", (5619, 5671), True, 'import numpy as np\n'), ((5697, 5759), 'numpy.loadtxt', 'np.loadtxt', (["(filepath + 'validation/wind.txt')"], {'dtype': 'np.float32'}), "(filepath + 'validation/wind.txt', dtype=np.float32)\n", (5707, 5759), True, 'import numpy as np\n'), ((5799, 5835), 'numpy.load', 'np.load', (["(filepath + 'train/topo.npy')"], {}), "(filepath + 'train/topo.npy')\n", (5806, 5835), True, 'import numpy as np\n'), ((5861, 5897), 'numpy.load', 'np.load', (["(filepath + 'train/wind.npy')"], {}), "(filepath + 'train/wind.npy')\n", (5868, 5897), True, 'import numpy as np\n'), ((5923, 5964), 'numpy.load', 'np.load', (["(filepath + 'validation/topo.npy')"], {}), "(filepath + 'validation/topo.npy')\n", (5930, 5964), True, 'import numpy as np\n'), ((5990, 6031), 'numpy.load', 'np.load', (["(filepath + 'validation/wind.npy')"], {}), "(filepath + 'validation/wind.npy')\n", (5997, 6031), True, 'import numpy as np\n'), ((6870, 6885), 'numpy.shape', 'np.shape', (['x_val'], {}), '(x_val)\n', (6878, 6885), True, 'import numpy as np\n'), ((6901, 6916), 'numpy.shape', 'np.shape', (['y_val'], {}), '(y_val)\n', (6909, 6916), True, 'import numpy as np\n'), ((7014, 7030), 'numpy.mean', 'np.mean', (['x_train'], {}), '(x_train)\n', (7021, 7030), True, 'import numpy as np\n'), ((7032, 7047), 'numpy.std', 'np.std', (['x_train'], {}), '(x_train)\n', (7038, 7047), True, 'import numpy as np\n'), ((7551, 7600), 'horovod.tensorflow.keras.callbacks.BroadcastGlobalVariablesCallback', 'hvd.callbacks.BroadcastGlobalVariablesCallback', (['(0)'], {}), '(0)\n', (7597, 7600), True, 'import horovod.tensorflow.keras as hvd\n'), ((7873, 7910), 'horovod.tensorflow.keras.callbacks.MetricAverageCallback', 'hvd.callbacks.MetricAverageCallback', ([], {}), '()\n', (7908, 7910), True, 'import horovod.tensorflow.keras as hvd\n'), ((8238, 8332), 'horovod.tensorflow.keras.callbacks.LearningRateWarmupCallback', 'hvd.callbacks.LearningRateWarmupCallback', ([], {'initial_lr': 'scaled_lr', 'warmup_epochs': '(3)', 'verbose': '(1)'}), '(initial_lr=scaled_lr,\n warmup_epochs=3, verbose=1)\n', (8278, 8332), True, 'import horovod.tensorflow.keras as hvd\n'), ((8363, 8449), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""loss"""', 'factor': '(0.1)', 'patience': '(5)', 'min_lr': '(1e-10)', 'verbose': '(1)'}), "(monitor='loss', factor=0.1, patience=5, min_lr=1e-10,\n verbose=1)\n", (8380, 8449), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\n'), ((8601, 8611), 'horovod.tensorflow.keras.rank', 'hvd.rank', ([], {}), '()\n', (8609, 8611), True, 'import horovod.tensorflow.keras as hvd\n'), ((8647, 8736), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""loss"""', 'verbose': '(0)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(filepath, monitor='loss', verbose=0, save_best_only=True,\n mode='min')\n", (8662, 8736), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\n'), ((8993, 9003), 'horovod.tensorflow.keras.rank', 'hvd.rank', ([], {}), '()\n', (9001, 9003), True, 'import horovod.tensorflow.keras as hvd\n'), ((9221, 9231), 'horovod.tensorflow.keras.size', 'hvd.size', ([], {}), '()\n', (9229, 9231), True, 'import horovod.tensorflow.keras as hvd\n')] |
from skillmodels.estimation.parse_params import parse_params
from skillmodels.estimation.parse_params import restore_unestimated_quantities
import numpy as np
from skillmodels.fast_routines.kalman_filters import normal_unscented_predict
from skillmodels.fast_routines.kalman_filters import sqrt_unscented_predict
from skillmodels.fast_routines.kalman_filters import normal_linear_update
from skillmodels.fast_routines.kalman_filters import sqrt_linear_update
from skillmodels.fast_routines.kalman_filters import normal_probit_update
from skillmodels.fast_routines.kalman_filters import sqrt_probit_update
from skillmodels.fast_routines.sigma_points import calculate_sigma_points
def log_likelihood_per_individual(
params, like_vec, parse_params_args, stagemap, nmeas_list, anchoring,
square_root_filters, update_types, update_args, predict_args,
calculate_sigma_points_args, restore_args):
"""Return the log likelihood for each individual in the sample.
Users do not have to call this function directly and do not have to bother
about its arguments but the function nicely shows how the likelihood
interpretation of the Kalman filter allow to break the large likelihood
problem of the model into many smaller problems.
First the params vector is parsed into the many quantities that depend on
it. See :ref:`params_and_quants` for details.
Then, for each period of the model first all Kalman updates for the
measurement equations are done. Each Kalman update updates the following
quantities:
* the state array X
* the covariance matrices P
* the likelihood vector like_vec
* the weights for the mixture distribution of the factors W
Then the predict step of the Unscented Kalman filter is applied. The
predict step propagates the following quantities to the next period:
* the state array X
* the covariance matrices P
In the last period an additional update is done to incorporate the
anchoring equation into the likelihood.
"""
like_vec[:] = 1.0
restore_unestimated_quantities(**restore_args)
parse_params(params, **parse_params_args)
k = 0
for t, stage in enumerate(stagemap):
for j in range(nmeas_list[t]):
# measurement updates
update(square_root_filters, update_types[k], update_args[k])
k += 1
if t < len(stagemap) - 1:
calculate_sigma_points(**calculate_sigma_points_args)
predict(stage, square_root_filters, predict_args)
if anchoring is True:
j += 1
# anchoring update
update(square_root_filters, update_types[k], update_args[k])
small = 1e-250
like_vec[like_vec < small] = small
return np.log(like_vec)
def update(square_root_filters, update_type, update_args):
"""Select and call the correct update function.
The actual update functions are implemented in several modules in
:ref:`fast_routines`
"""
if square_root_filters is True:
if update_type == 'linear':
sqrt_linear_update(*update_args)
else:
sqrt_probit_update(**update_args)
else:
if update_type == 'linear':
normal_linear_update(*update_args)
else:
normal_probit_update(**update_args)
def predict(stage, square_root_filters, predict_args):
"""Select and call the correct predict function.
The actual predict functions are implemented in several modules in
:ref:`fast_routines`
"""
if square_root_filters is True:
sqrt_unscented_predict(stage, **predict_args)
else:
normal_unscented_predict(stage, **predict_args)
| [
"skillmodels.fast_routines.kalman_filters.sqrt_unscented_predict",
"skillmodels.estimation.parse_params.restore_unestimated_quantities",
"skillmodels.fast_routines.kalman_filters.sqrt_probit_update",
"numpy.log",
"skillmodels.fast_routines.kalman_filters.normal_unscented_predict",
"skillmodels.fast_routin... | [((2098, 2144), 'skillmodels.estimation.parse_params.restore_unestimated_quantities', 'restore_unestimated_quantities', ([], {}), '(**restore_args)\n', (2128, 2144), False, 'from skillmodels.estimation.parse_params import restore_unestimated_quantities\n'), ((2149, 2190), 'skillmodels.estimation.parse_params.parse_params', 'parse_params', (['params'], {}), '(params, **parse_params_args)\n', (2161, 2190), False, 'from skillmodels.estimation.parse_params import parse_params\n'), ((2776, 2792), 'numpy.log', 'np.log', (['like_vec'], {}), '(like_vec)\n', (2782, 2792), True, 'import numpy as np\n'), ((3603, 3648), 'skillmodels.fast_routines.kalman_filters.sqrt_unscented_predict', 'sqrt_unscented_predict', (['stage'], {}), '(stage, **predict_args)\n', (3625, 3648), False, 'from skillmodels.fast_routines.kalman_filters import sqrt_unscented_predict\n'), ((3667, 3714), 'skillmodels.fast_routines.kalman_filters.normal_unscented_predict', 'normal_unscented_predict', (['stage'], {}), '(stage, **predict_args)\n', (3691, 3714), False, 'from skillmodels.fast_routines.kalman_filters import normal_unscented_predict\n'), ((2453, 2506), 'skillmodels.fast_routines.sigma_points.calculate_sigma_points', 'calculate_sigma_points', ([], {}), '(**calculate_sigma_points_args)\n', (2475, 2506), False, 'from skillmodels.fast_routines.sigma_points import calculate_sigma_points\n'), ((3095, 3127), 'skillmodels.fast_routines.kalman_filters.sqrt_linear_update', 'sqrt_linear_update', (['*update_args'], {}), '(*update_args)\n', (3113, 3127), False, 'from skillmodels.fast_routines.kalman_filters import sqrt_linear_update\n'), ((3154, 3187), 'skillmodels.fast_routines.kalman_filters.sqrt_probit_update', 'sqrt_probit_update', ([], {}), '(**update_args)\n', (3172, 3187), False, 'from skillmodels.fast_routines.kalman_filters import sqrt_probit_update\n'), ((3246, 3280), 'skillmodels.fast_routines.kalman_filters.normal_linear_update', 'normal_linear_update', (['*update_args'], {}), '(*update_args)\n', (3266, 3280), False, 'from skillmodels.fast_routines.kalman_filters import normal_linear_update\n'), ((3307, 3342), 'skillmodels.fast_routines.kalman_filters.normal_probit_update', 'normal_probit_update', ([], {}), '(**update_args)\n', (3327, 3342), False, 'from skillmodels.fast_routines.kalman_filters import normal_probit_update\n')] |
import numpy as np
import math
# Convert point-spread function to optical transfer function
def psf2otf(psf, outSize=None):
# Prepare psf for conversion
data = prepare_psf(psf, outSize)
# Compute the OTF
otf = np.fft.fftn(data)
return np.complex64(otf)
def prepare_psf(psf, outSize=None, dtype=None):
if not dtype:
dtype=np.float32
psf = np.float32(psf)
# Determine PSF / OTF shapes
psfSize = np.int32(psf.shape)
if not outSize:
outSize = psfSize
outSize = np.int32(outSize)
# Pad the PSF to outSize
new_psf = np.zeros(outSize, dtype=dtype)
new_psf[:psfSize[0],:psfSize[1]] = psf[:,:]
psf = new_psf
# Circularly shift the OTF so that PSF center is at (0,0)
#shift = -(psfSize / 2)
#psf = circshift(psf, shift)
return psf
# Circularly shift array
def circshift(A, shift):
print(shift.size)
for i in xrange(shift.size):
A = np.roll(A, shift[i], axis=i)
return A
| [
"numpy.roll",
"numpy.int32",
"numpy.fft.fftn",
"numpy.zeros",
"numpy.float32",
"numpy.complex64"
] | [((220, 237), 'numpy.fft.fftn', 'np.fft.fftn', (['data'], {}), '(data)\n', (231, 237), True, 'import numpy as np\n'), ((248, 265), 'numpy.complex64', 'np.complex64', (['otf'], {}), '(otf)\n', (260, 265), True, 'import numpy as np\n'), ((361, 376), 'numpy.float32', 'np.float32', (['psf'], {}), '(psf)\n', (371, 376), True, 'import numpy as np\n'), ((421, 440), 'numpy.int32', 'np.int32', (['psf.shape'], {}), '(psf.shape)\n', (429, 440), True, 'import numpy as np\n'), ((493, 510), 'numpy.int32', 'np.int32', (['outSize'], {}), '(outSize)\n', (501, 510), True, 'import numpy as np\n'), ((551, 581), 'numpy.zeros', 'np.zeros', (['outSize'], {'dtype': 'dtype'}), '(outSize, dtype=dtype)\n', (559, 581), True, 'import numpy as np\n'), ((886, 914), 'numpy.roll', 'np.roll', (['A', 'shift[i]'], {'axis': 'i'}), '(A, shift[i], axis=i)\n', (893, 914), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import mxnet as mx
import numpy as np
from mxnet import autograd
from NLP.src.training import utils
def train(train_data, model, loss, epochs, batch_size,
context, trainer, freeze_embedding=False):
"""
Train an RNN model, given the input list
param train_data: training data, 2D mx array of size (n, batch_size)
param model: RNN model
param loss: loss function, e.g. gluon.loss.SoftmaxCrossEntropyLoss()
param epochs: number of epochs to run
param batch_size: batch size
param context: cpu or (which) gpu
param trainer: trainer (optimizer)
param freeze_embedding: freeze training of embedding layers (input and output)
"""
if freeze_embedding:
for param in model.encoder.collect_params().values():
param.grad_req = 'null'
for param in model.decoder.collect_params().values():
param.grad_req = 'null'
loss_progress = []
for epoch in range(epochs):
total_loss = 0.0
hidden = model.begin_state(func=mx.nd.zeros, batch_size=batch_size, ctx=context)
for i in range(0, train_data.shape[0] - 1):
data, target = utils.get_batch(train_data, i)
# need this to work? but it doesn't atm
# hidden = detach(hidden)
with autograd.record():
output, hidden = model(data, hidden)
L = loss(output, target)
L.backward()
trainer.step(batch_size)
total_loss += mx.nd.sum(L).asscalar()
# print and record loss every epoch
epoch_loss = total_loss / batch_size / i
print('[Epoch %d] loss %.2f' % (epoch + 1, epoch_loss), end='\r')
loss_progress.append(epoch_loss)
plt.plot(np.arange(epochs), loss_progress)
plt.grid()
plt.xlabel('Epochs')
plt.ylabel('Loss')
| [
"mxnet.nd.sum",
"matplotlib.pyplot.grid",
"mxnet.autograd.record",
"NLP.src.training.utils.get_batch",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.arange"
] | [((1810, 1820), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1818, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1825, 1845), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (1835, 1845), True, 'import matplotlib.pyplot as plt\n'), ((1850, 1868), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (1860, 1868), True, 'import matplotlib.pyplot as plt\n'), ((1772, 1789), 'numpy.arange', 'np.arange', (['epochs'], {}), '(epochs)\n', (1781, 1789), True, 'import numpy as np\n'), ((1181, 1211), 'NLP.src.training.utils.get_batch', 'utils.get_batch', (['train_data', 'i'], {}), '(train_data, i)\n', (1196, 1211), False, 'from NLP.src.training import utils\n'), ((1319, 1336), 'mxnet.autograd.record', 'autograd.record', ([], {}), '()\n', (1334, 1336), False, 'from mxnet import autograd\n'), ((1525, 1537), 'mxnet.nd.sum', 'mx.nd.sum', (['L'], {}), '(L)\n', (1534, 1537), True, 'import mxnet as mx\n')] |
#%% [markdown]
# # Contrastive Hebbian Learning
#
# Contrastive Hebbian Learning (CHL) is an algorithm that can be used to perform supervised learning in a neural network. It unites the power of backpropagation with the plausibility of Hebbian learning, and is thus a favorite of researchers in computational neuroscience. In terms of power, Xie (2006) has shown that, under most conditions, CHL is actually equivalent to backpropagation.
#
# ## "Learn" and "Unlearn" phases
#
# CHL works by performing Hebbian updates in two distinct phases, which are indicated as the __Learn__ (+) or the __Unlearn__ (-) phases. Synaptic weights are updated according to the difference of the two phases:
#
# $$
# w_{i,j} \leftarrow w_{i,j} + \eta (y^+_i y^+_j - y^-_i y^-_j)
# $$
#
# Where $y$ represents, as usual, the output of a neuron.
#
# ## Synchronous and Asynchronous
#
# In the canonical equation (above), the two terms $y^+_i y^+_j$ and $y^-_i y^-_j$ are computed at different times but updated at the same moment. Because of this, the canonical form is called __synchronous__. This form is efficient but implausible, because it requires storing the products $y^+_i y^+_j$ and $-y^-_i y^-_j$ until the update is performed.
#
# A more plausible alternative is to perform __asynchronous__ updates, with the product $y_i y_j$ because it is calculated and used immediately (just like in canonical Hebbian learning) with the sign of the update being dependent upon the phase.
#
# $$
# w_{i,j} \leftarrow w_{i,j} +
# \begin{cases}
# + \eta (y_i y_j) & \mathrm{if~phase~is~Learn} \\
# - \eta (y_i y_j) & \mathrm{if~phase~is~Unlearn}
# \end{cases}
# $$
#
# ## Connectivity
#
# Because of its very nature, CHL requires the network to be __recurrent__, that is, synaptic matrices that connect two adjacent layers both forward and backward.
#
# In turn, recurrent networks are intrinsically unstable, as they require multiple passes to converge towards a stable solution. The number of passes is sometimes used as a proxy for response times or similar behavioral measures.
#
# ## The Network
#
# The CHL version of the XOR network is defined in these few lines of code.
#%%
import numpy as np
import matplotlib.pyplot as plt
#%%
n_inputs = 2
n_hidden = 5
n_outputs = 1
eta = 0.2 # Learning rate.
# The patterns to learn
patterns = [[0, 0], [0, 1], [1, 0], [1, 1]]
#%% [markdown]
# Here are the functions that support the network
#%%
def logistic(x, deriv = False):
"""Sigmoid logistic function (with derivative)"""
if deriv:
return x * (1 - x)
else:
return 1 / (1 + np.exp(-x))
def set_inputs(pattern):
"""Sets a given XOR pattern into the input value"""
global x
x = np.array(pattern).reshape((1,n_inputs))
def set_outputs(vals):
"""Sets the output variables"""
global o
o = np.array(vals).reshape((1,n_outputs))
def set_hidden(vals):
"""Sets the output variables"""
global h
h = vals
def reset_hidden_to_rest():
set_hidden(np.zeros((1, n_hidden)))
def target(val):
"""Desired response function, t(p)"""
if val == [0, 1] or val == [1, 0]:
return np.array([[1.0]])
else:
return np.array([[0.0]])
def calculate_error(p1, p2):
"""Calculates the error function"""
return 0.5 * np.sum(((p1 - p2) ** 2))
def propagate(clamped_output = False):
"""Spreads activation through a network"""
global h
global o
# First propagate forward from input to hidden layer
h_input = x @ w_xh
# Then propagate backward from output to hidden layer
h_input += o @ w_ho.T
h = logistic(h_input)
if not clamped_output:
# Propagate from the hidden layer to the output layer
o_input = h @ w_ho
o = logistic(o_input)
def activation(clamped_output = False, convergence = 0.00001, max_cycles = 1000):
"""Repeatedly spreads activation through a network until it settles"""
reset_hidden_to_rest()
previous_h = np.copy(h)
propagate(clamped_output)
diff = calculate_error(previous_h, h)
i = 0
while diff > convergence and i < max_cycles:
previous_h = np.copy(h)
propagate(clamped_output)
diff = calculate_error(previous_h, h)
i += 1
return i
def calculate_response(p):
"""Calculate the response for a given network's input"""
set_inputs(p)
activation(False)
return np.sum(o)
def unlearn(p):
"""Negative, free phase. This is the 'expectation'."""
set_inputs(p)
activation(clamped_output = False)
def learn(p):
"""Positive, clamped phase. This is the 'confirmation'."""
set_inputs(p)
set_outputs(target(p))
activation(clamped_output = True)
def update_weights_positive():
"""Updates weights. Positive Hebbian update (learn)"""
global w_xh, w_ho
w_xh += eta * (x.T @ h)
w_ho += eta * (h.T @ o)
def update_weights_negative():
"""Updates weights. Negative Hebbian update (unlearn)"""
global w_xh, w_ho
w_xh -= eta * (x.T @ h)
w_ho -= eta * (h.T @ o)
def update_weights_synchronous(h_plus, h_minus, o_plus, o_minus):
"""Updates weights. Synchronous Hebbian update."""
global w_xh, w_ho
w_xh += eta * (x.T @ (h_plus - h_minus))
w_ho += eta * (h.T @ (o_plus - o_minus))
def asynchronous_chl(min_error = 0.001, max_epochs = 1000):
"""Learns associations by means applying CHL asynchronously"""
E = [min_error + 1] ## Initial error value > min_error
epochs = 0
while E[-1] > min_error and epochs < max_epochs:
e = 0.0
for p in patterns:
# I cannot get it to converge with positive phase first.
# Maybe that's ok. Movellan (1990) suggests it won't converge
# without negative phase first. Also, Leech PhD (2008)
# Simulation 5 does negative first, too.
# And so does Detorakis et al (2019).)
# negative phase (expectation)
unlearn(p)
update_weights_negative()
# positive phase (confirmation)
learn(p)
update_weights_positive()
# calculate and record error for this epoch
for p in patterns:
e += calculate_error(target(p), calculate_response(p))
E.append(e)
epochs += 1
return E[1:]
def synchronous_chl(min_error = 0.001, max_epochs = 1000):
"""Learns associations by means applying CHL synchronously"""
E = [min_error + 1] ## Initial error value > min_error
epochs = 0
while E[-1] > min_error and epochs < max_epochs:
e = 0.0
for p in patterns:
#positive phase (confirmation)
learn(p)
h_plus = np.copy(h)
o_plus = np.copy(o)
#negative phase (expectation)
unlearn(p)
h_minus = np.copy(h)
o_minus = np.copy(o)
update_weights_synchronous(h_plus, h_minus, o_plus, o_minus)
for p in patterns:
e += calculate_error(target(p), calculate_response(p))
E.append(e)
epochs += 1
return E[1:]
#%% [markdown]
# ### Test of CHL
#
# Here is a simple test of (asynchronous) CHL:
#%%
np.random.seed(1)
x = np.zeros((1, n_inputs)) # Input layer
h = np.zeros((1, n_hidden)) # Hidden layer
o = np.zeros((1, n_outputs)) # Output layer
w_xh = np.random.random((n_inputs, n_hidden)) * 2 - 1.0 # First layer of synapses between input and hidden
w_ho = np.random.random((n_hidden, n_outputs)) * 2 - 1.0 # Second layer of synapses between hidden and output
# The synchronous version works better with more hidden units (8 or 10, say) and learning rate 0.3
E = synchronous_chl(min_error = 0.01, max_epochs=10000)
if E[-1] < 0.01:
print(f'Convergeance reached after {len(E)} epochs.')
else:
print(f'Failed to converge after {len(E)} epochs.')
print(f'Final error = {E[-1]}.')
#%% [markdown]
# And here is a plot of the error function and the network's learned outputs
#%%
# Plot the Error by epoch
plt.plot(E, color="red")
plt.title("CHL: Convergence reached after %d epochs" %(len(E)))
plt.axis([0, len(E) + 10, 0, max(E + [0.7]) + 0.1])
plt.xlabel("Epoch")
plt.ylabel("Error")
plt.show()
## Plot the responses to the XOR patterns
y_end = [calculate_response(p) for p in patterns]
fig, ax = plt.subplots()
ax.axis([-0.5, 3.5, 0, 1])
ax.set_xticks(np.arange(5))
ax.set_xticklabels(["(%s,%s)" % tuple(p) for p in patterns])
ax.set_ylabel("Activation")
ax.set_xlabel("Patterns")
ax.bar(np.arange(4) - 0.25, y_end, 0.5, color='lightblue')
ax.set_title("Responses to XOR patterns (CHL)")
plt.show()
#%%
res = np.zeros((len(patterns), h.size))
for p in patterns:
calculate_response(p)
i = patterns.index(p)
res[i] = h
plt.imshow(res, interpolation="nearest")
plt.title("Hidden layer responses by pattern")
plt.yticks(np.arange(4), patterns)
plt.ylabel("Stimulus pattern")
plt.xlabel("neuron")
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.copy",
"matplotlib.pyplot.ylabel",
"numpy.random.random",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"numpy.random.seed",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"numpy.ar... | [((7251, 7268), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (7265, 7268), True, 'import numpy as np\n'), ((7276, 7299), 'numpy.zeros', 'np.zeros', (['(1, n_inputs)'], {}), '((1, n_inputs))\n', (7284, 7299), True, 'import numpy as np\n'), ((7358, 7381), 'numpy.zeros', 'np.zeros', (['(1, n_hidden)'], {}), '((1, n_hidden))\n', (7366, 7381), True, 'import numpy as np\n'), ((7441, 7465), 'numpy.zeros', 'np.zeros', (['(1, n_outputs)'], {}), '((1, n_outputs))\n', (7449, 7465), True, 'import numpy as np\n'), ((8218, 8242), 'matplotlib.pyplot.plot', 'plt.plot', (['E'], {'color': '"""red"""'}), "(E, color='red')\n", (8226, 8242), True, 'import matplotlib.pyplot as plt\n'), ((8359, 8378), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (8369, 8378), True, 'import matplotlib.pyplot as plt\n'), ((8379, 8398), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (8389, 8398), True, 'import matplotlib.pyplot as plt\n'), ((8399, 8409), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8407, 8409), True, 'import matplotlib.pyplot as plt\n'), ((8514, 8528), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8526, 8528), True, 'import matplotlib.pyplot as plt\n'), ((8806, 8816), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8814, 8816), True, 'import matplotlib.pyplot as plt\n'), ((8955, 8995), 'matplotlib.pyplot.imshow', 'plt.imshow', (['res'], {'interpolation': '"""nearest"""'}), "(res, interpolation='nearest')\n", (8965, 8995), True, 'import matplotlib.pyplot as plt\n'), ((8996, 9042), 'matplotlib.pyplot.title', 'plt.title', (['"""Hidden layer responses by pattern"""'], {}), "('Hidden layer responses by pattern')\n", (9005, 9042), True, 'import matplotlib.pyplot as plt\n'), ((9078, 9108), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stimulus pattern"""'], {}), "('Stimulus pattern')\n", (9088, 9108), True, 'import matplotlib.pyplot as plt\n'), ((9109, 9129), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""neuron"""'], {}), "('neuron')\n", (9119, 9129), True, 'import matplotlib.pyplot as plt\n'), ((9130, 9140), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9138, 9140), True, 'import matplotlib.pyplot as plt\n'), ((4009, 4019), 'numpy.copy', 'np.copy', (['h'], {}), '(h)\n', (4016, 4019), True, 'import numpy as np\n'), ((4436, 4445), 'numpy.sum', 'np.sum', (['o'], {}), '(o)\n', (4442, 4445), True, 'import numpy as np\n'), ((8570, 8582), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (8579, 8582), True, 'import numpy as np\n'), ((9054, 9066), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (9063, 9066), True, 'import numpy as np\n'), ((3028, 3051), 'numpy.zeros', 'np.zeros', (['(1, n_hidden)'], {}), '((1, n_hidden))\n', (3036, 3051), True, 'import numpy as np\n'), ((3168, 3185), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (3176, 3185), True, 'import numpy as np\n'), ((3211, 3228), 'numpy.array', 'np.array', (['[[0.0]]'], {}), '([[0.0]])\n', (3219, 3228), True, 'import numpy as np\n'), ((3316, 3338), 'numpy.sum', 'np.sum', (['((p1 - p2) ** 2)'], {}), '((p1 - p2) ** 2)\n', (3322, 3338), True, 'import numpy as np\n'), ((4177, 4187), 'numpy.copy', 'np.copy', (['h'], {}), '(h)\n', (4184, 4187), True, 'import numpy as np\n'), ((7524, 7562), 'numpy.random.random', 'np.random.random', (['(n_inputs, n_hidden)'], {}), '((n_inputs, n_hidden))\n', (7540, 7562), True, 'import numpy as np\n'), ((7643, 7682), 'numpy.random.random', 'np.random.random', (['(n_hidden, n_outputs)'], {}), '((n_hidden, n_outputs))\n', (7659, 7682), True, 'import numpy as np\n'), ((8706, 8718), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (8715, 8718), True, 'import numpy as np\n'), ((2736, 2753), 'numpy.array', 'np.array', (['pattern'], {}), '(pattern)\n', (2744, 2753), True, 'import numpy as np\n'), ((2861, 2875), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (2869, 2875), True, 'import numpy as np\n'), ((6757, 6767), 'numpy.copy', 'np.copy', (['h'], {}), '(h)\n', (6764, 6767), True, 'import numpy as np\n'), ((6789, 6799), 'numpy.copy', 'np.copy', (['o'], {}), '(o)\n', (6796, 6799), True, 'import numpy as np\n'), ((6888, 6898), 'numpy.copy', 'np.copy', (['h'], {}), '(h)\n', (6895, 6898), True, 'import numpy as np\n'), ((6921, 6931), 'numpy.copy', 'np.copy', (['o'], {}), '(o)\n', (6928, 6931), True, 'import numpy as np\n'), ((2621, 2631), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (2627, 2631), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 20 16:23:57 2019
@author: hitansh
"""
import numpy as np
import os
import sys
# import tarfile
import tensorflow as tf
# import zipfile
# from distutils.version import StrictVersion
# from collections import defaultdict
# from io import StringIO
# from matplotlib import pyplot as plt
from PIL import Image
import cv2
# from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
import imutils
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# print('pwd: ' + os.getcwd())
'''
## Variables
Any model exported using the `export_inference_graph.py` tool can be loaded here simply
by changing `PATH_TO_FROZEN_GRAPH` to point to a new .pb file.
By default we use an "SSD with Mobilenet" model here.
See the [detection model zoo]
(https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md)
for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
'''
print('Have you already satisfied the protobuf requirement? Check Wiki.')
os.chdir(r'../../../data/TensorFlow/workspace/training_demo/')
# print('Changed to: ' + os.getcwd())
# Path to frozen detection graph. This is the actual model that is used for the object detection.
# PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'
PATH_TO_FROZEN_GRAPH = 'trained-inference-graphs/output_inference_graph_v1.pb/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
# PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
PATH_TO_LABELS = 'annotations/label_map.pbtxt'
# Loading graph into memory
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
'''
rh: right hand
lh: left hand
ll: left label
rl: right label
'''
# Detection
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
# PATH_TO_TEST_IMAGES_DIR = 'test_images'
PATH_TO_TEST_IMAGES_DIR = '../../../train/XR_HAND'
TEST_IMAGE_PATHS = []
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
# loading files list
for r, d, f in os.walk(PATH_TO_TEST_IMAGES_DIR):
for file in f:
if '.png' in file:
# os.remove(os.path.join(r,file))
TEST_IMAGE_PATHS.append(os.path.join(r, file))
total_files = len(TEST_IMAGE_PATHS)
# This is a fucntion which detects the stuff (like hands etc and then returns a dictionary)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes']:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: image})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.int64)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
return output_dict
# Looping through all images
log = open('../../../../../hand_detection_script_log.txt', 'w')
# this file is one folder behind x_ray folder
print('Starting Script. Check log file at one directory behind your git folder.')
log.write('Starting script.\n')
j = 0
count = 0
SAVE_PATH = r'../../../train/XR_HAND_CENTRED_NEW_2'
if not os.path.exists(os.path.exists(SAVE_PATH)):
os.mkdir(SAVE_PATH)
for image_path in TEST_IMAGE_PATHS:
count += 1
# print(count,end='\r')
log.write(str(count) + ' ')
image = Image.open(image_path)
image_rotate = image.transpose(Image.ROTATE_270)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = cv2.imread(image_path, 1)
image_np_rotate = imutils.rotate_bound(image_np, 90)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_np_rotate_expanded = np.expand_dims(image_np_rotate, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np_expanded, detection_graph)
output_dict_rotate = run_inference_for_single_image(image_np_rotate_expanded, detection_graph)
# Visualization of the results of a detection.
boxes = output_dict['detection_boxes']
boxes_rotate = output_dict_rotate['detection_boxes']
if output_dict_rotate['num_detections'] > output_dict['num_detections']:
image = image_rotate
image_np = image_np_rotate
boxes = boxes_rotate
bool_anything_found = 0
detection_number = 0
for i in range(output_dict['num_detections']):
if(output_dict['detection_scores'][i] > 0.70):
j += 1
detection_number += 1
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
im_width, im_height = image_pil.size
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
# plt.figure(j,figsize=IMAGE_SIZE)
# plt.plot([left,right],[bottom,top],linewidth=1.0)
# plt.imshow(image_np)
# check if it is a label
if(output_dict['detection_classes'][i] == 3 or output_dict['detection_classes'][i] == 4):
'''
This code can be used to paint labels, however, it is not implemented
mask=np.zeros(image_np.shape,dtype='uint8')
mask[int(top):int(bottom+top), int(left):int(left+right)]=image_np[int(top):int(bottom+top), int(left):int(left+right)]
mask[:int(top)]
'''
# j+=1
# plt.figure(j,figsize=IMAGE_SIZE)
# plt.imshow(mask)
# inpainted_image=cv2.inpaint(image_np,mask,3,cv2.INPAINT_TELEA)
# cv2.imshow(inpainted_image)
# print('Label', end='\r')
pass
# if it is not a label
# will only come here if score>70% and not a label
else:
bool_anything_found = 1
j = j + 1
crop_img = image_np[int(top):int(bottom + top), int(left):int(left + right)]
# plt.figure(j,figsize=IMAGE_SIZE)
# plt.imshow(crop_img)
IMAGE_PATH_DIR = os.path.join(SAVE_PATH, image_path.split('/')[-3], image_path.split('/')[-2])
if not os.path.exists(IMAGE_PATH_DIR):
os.makedirs(IMAGE_PATH_DIR)
IMAGE_PATH_NEW = IMAGE_PATH_DIR + '/' + image_path.split('/')[-1][:-4] + r'_cropped_' + str(detection_number) + '.png'
cv2.imwrite(IMAGE_PATH_NEW, crop_img)
log.flush()
if(not bool_anything_found):
# print('Nothing found in this image')
# save the image as it is
IMAGE_PATH_DIR = os.path.join(SAVE_PATH, image_path.split('/')[-3], image_path.split('/')[-2])
if not os.path.exists(IMAGE_PATH_DIR):
os.makedirs(IMAGE_PATH_DIR)
IMAGE_PATH_NEW = IMAGE_PATH_DIR + '/' + image_path.split('/')[-1][:-4] + r'_undetected.png'
cv2.imwrite(IMAGE_PATH_NEW, image_np)
# plt.figure(j,figsize=IMAGE_SIZE)
# plt.imshow(image_np)
pass
log.write('\nFertig.')
log.close()
| [
"numpy.uint8",
"tensorflow.gfile.GFile",
"sys.path.append",
"os.walk",
"tensorflow.Graph",
"os.path.exists",
"tensorflow.Session",
"tensorflow.GraphDef",
"os.mkdir",
"tensorflow.get_default_graph",
"object_detection.utils.label_map_util.create_category_index_from_labelmap",
"tensorflow.import_... | [((587, 608), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (602, 608), False, 'import sys\n'), ((1176, 1237), 'os.chdir', 'os.chdir', (['"""../../../data/TensorFlow/workspace/training_demo/"""'], {}), "('../../../data/TensorFlow/workspace/training_demo/')\n", (1184, 1237), False, 'import os\n'), ((1779, 1789), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1787, 1789), True, 'import tensorflow as tf\n'), ((2100, 2193), 'object_detection.utils.label_map_util.create_category_index_from_labelmap', 'label_map_util.create_category_index_from_labelmap', (['PATH_TO_LABELS'], {'use_display_name': '(True)'}), '(PATH_TO_LABELS,\n use_display_name=True)\n', (2150, 2193), False, 'from object_detection.utils import label_map_util\n'), ((2588, 2620), 'os.walk', 'os.walk', (['PATH_TO_TEST_IMAGES_DIR'], {}), '(PATH_TO_TEST_IMAGES_DIR)\n', (2595, 2620), False, 'import os\n'), ((1844, 1857), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1855, 1857), True, 'import tensorflow as tf\n'), ((4560, 4579), 'os.mkdir', 'os.mkdir', (['SAVE_PATH'], {}), '(SAVE_PATH)\n', (4568, 4579), False, 'import os\n'), ((4704, 4726), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (4714, 4726), False, 'from PIL import Image\n'), ((4936, 4961), 'cv2.imread', 'cv2.imread', (['image_path', '(1)'], {}), '(image_path, 1)\n', (4946, 4961), False, 'import cv2\n'), ((4984, 5018), 'imutils.rotate_bound', 'imutils.rotate_bound', (['image_np', '(90)'], {}), '(image_np, 90)\n', (5004, 5018), False, 'import imutils\n'), ((5132, 5164), 'numpy.expand_dims', 'np.expand_dims', (['image_np'], {'axis': '(0)'}), '(image_np, axis=0)\n', (5146, 5164), True, 'import numpy as np\n'), ((5196, 5235), 'numpy.expand_dims', 'np.expand_dims', (['image_np_rotate'], {'axis': '(0)'}), '(image_np_rotate, axis=0)\n', (5210, 5235), True, 'import numpy as np\n'), ((1867, 1909), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['PATH_TO_FROZEN_GRAPH', '"""rb"""'], {}), "(PATH_TO_FROZEN_GRAPH, 'rb')\n", (1881, 1909), True, 'import tensorflow as tf\n'), ((2019, 2061), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (2038, 2061), True, 'import tensorflow as tf\n'), ((4528, 4553), 'os.path.exists', 'os.path.exists', (['SAVE_PATH'], {}), '(SAVE_PATH)\n', (4542, 4553), False, 'import os\n'), ((8428, 8465), 'cv2.imwrite', 'cv2.imwrite', (['IMAGE_PATH_NEW', 'image_np'], {}), '(IMAGE_PATH_NEW, image_np)\n', (8439, 8465), False, 'import cv2\n'), ((2995, 3007), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3005, 3007), True, 'import tensorflow as tf\n'), ((8248, 8278), 'os.path.exists', 'os.path.exists', (['IMAGE_PATH_DIR'], {}), '(IMAGE_PATH_DIR)\n', (8262, 8278), False, 'import os\n'), ((8292, 8319), 'os.makedirs', 'os.makedirs', (['IMAGE_PATH_DIR'], {}), '(IMAGE_PATH_DIR)\n', (8303, 8319), False, 'import os\n'), ((2750, 2771), 'os.path.join', 'os.path.join', (['r', 'file'], {}), '(r, file)\n', (2762, 2771), False, 'import os\n'), ((7950, 7987), 'cv2.imwrite', 'cv2.imwrite', (['IMAGE_PATH_NEW', 'crop_img'], {}), '(IMAGE_PATH_NEW, crop_img)\n', (7961, 7987), False, 'import cv2\n'), ((3089, 3111), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3109, 3111), True, 'import tensorflow as tf\n'), ((3559, 3581), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3579, 3581), True, 'import tensorflow as tf\n'), ((7719, 7749), 'os.path.exists', 'os.path.exists', (['IMAGE_PATH_DIR'], {}), '(IMAGE_PATH_DIR)\n', (7733, 7749), False, 'import os\n'), ((7771, 7798), 'os.makedirs', 'os.makedirs', (['IMAGE_PATH_DIR'], {}), '(IMAGE_PATH_DIR)\n', (7782, 7798), False, 'import os\n'), ((6018, 6033), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (6026, 6033), True, 'import numpy as np\n'), ((3477, 3499), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3497, 3499), True, 'import tensorflow as tf\n')] |
"""
Multi-node example (GPU)
"""
import os
import numpy as np
import torch
from test_tube import HyperOptArgumentParser, Experiment
from pytorch_lightning import Trainer
from examples.new_project_templates.lightning_module_template import LightningTemplateModel
SEED = 2334
torch.manual_seed(SEED)
np.random.seed(SEED)
def main(hparams):
"""
Main training routine specific for this project
:param hparams:
:return:
"""
# ------------------------
# 1 INIT LIGHTNING MODEL
# ------------------------
model = LightningTemplateModel(hparams)
# ------------------------
# 2 INIT TEST TUBE EXP
# ------------------------
# init experiment
exp = Experiment(
name='test_exp',
save_dir=hyperparams.log_dir,
autosave=False,
description='test demo'
)
# ------------------------
# 2 INIT TRAINER
# ------------------------
trainer = Trainer(
experiment=exp,
gpus=8,
nb_gpu_nodes=2
)
# ------------------------
# 5 START TRAINING
# ------------------------
trainer.fit(model)
if __name__ == '__main__':
# use current dir for logging
root_dir = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(root_dir, 'pt_lightning_demo_logs')
parent_parser = HyperOptArgumentParser(strategy='grid_search', add_help=False)
parent_parser.add_argument('--log_dir', type=str, default=log_dir,
help='where to save logs')
# allow model to overwrite or extend args
parser = LightningTemplateModel.add_model_specific_args(parent_parser, root_dir)
hyperparams = parser.parse_args()
# ---------------------
# RUN TRAINING
# ---------------------
main(hyperparams)
| [
"torch.manual_seed",
"os.path.join",
"os.path.realpath",
"examples.new_project_templates.lightning_module_template.LightningTemplateModel.add_model_specific_args",
"pytorch_lightning.Trainer",
"numpy.random.seed",
"test_tube.Experiment",
"test_tube.HyperOptArgumentParser",
"examples.new_project_temp... | [((276, 299), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (293, 299), False, 'import torch\n'), ((300, 320), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (314, 320), True, 'import numpy as np\n'), ((546, 577), 'examples.new_project_templates.lightning_module_template.LightningTemplateModel', 'LightningTemplateModel', (['hparams'], {}), '(hparams)\n', (568, 577), False, 'from examples.new_project_templates.lightning_module_template import LightningTemplateModel\n'), ((700, 802), 'test_tube.Experiment', 'Experiment', ([], {'name': '"""test_exp"""', 'save_dir': 'hyperparams.log_dir', 'autosave': '(False)', 'description': '"""test demo"""'}), "(name='test_exp', save_dir=hyperparams.log_dir, autosave=False,\n description='test demo')\n", (710, 802), False, 'from test_tube import HyperOptArgumentParser, Experiment\n'), ((935, 982), 'pytorch_lightning.Trainer', 'Trainer', ([], {'experiment': 'exp', 'gpus': '(8)', 'nb_gpu_nodes': '(2)'}), '(experiment=exp, gpus=8, nb_gpu_nodes=2)\n', (942, 982), False, 'from pytorch_lightning import Trainer\n'), ((1258, 1306), 'os.path.join', 'os.path.join', (['root_dir', '"""pt_lightning_demo_logs"""'], {}), "(root_dir, 'pt_lightning_demo_logs')\n", (1270, 1306), False, 'import os\n'), ((1328, 1390), 'test_tube.HyperOptArgumentParser', 'HyperOptArgumentParser', ([], {'strategy': '"""grid_search"""', 'add_help': '(False)'}), "(strategy='grid_search', add_help=False)\n", (1350, 1390), False, 'from test_tube import HyperOptArgumentParser, Experiment\n'), ((1580, 1651), 'examples.new_project_templates.lightning_module_template.LightningTemplateModel.add_model_specific_args', 'LightningTemplateModel.add_model_specific_args', (['parent_parser', 'root_dir'], {}), '(parent_parser, root_dir)\n', (1626, 1651), False, 'from examples.new_project_templates.lightning_module_template import LightningTemplateModel\n'), ((1216, 1242), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1232, 1242), False, 'import os\n')] |
import numpy as np
import multiprocessing
OPTIMAL_CHUNK_SIZE_EFAST = 50
get_chunk_size_eFAST = lambda num_params: min(OPTIMAL_CHUNK_SIZE_EFAST, num_params)
def random_samples(iterations, num_params, seed=None):
"""Random standard uniform sampling for all iterations and parameters with an option of fixing random seed.
Parameters
----------
iterations : int
Number of iterations.
num_params : int
Number of model inputs.
seed : int
Random seed.
Returns
-------
samples : array
Randomly generated samples of size ``iterations x num_params``.
"""
np.random.seed(seed)
samples = np.random.rand(iterations, num_params)
return samples
def custom_unitcube_samples(X_unitcube):
"""Wrapper function to return custom sampling matrix if it is specified by the user, values are in [0,1] range."""
return X_unitcube
def custom_rescaled_samples(X_rescaled):
"""Wrapper function to return custom sampling matrix if it is specified by the user, values are in rescaled range."""
return X_rescaled
def latin_hypercube_samples(iterations, num_params, seed=None):
"""Latin hypercube samples in [0,1] range.
Parameters
----------
iterations : int
Number of iterations.
num_params : int
Number of model inputs.
seed : int
Random seed.
Returns
-------
samples : array
Randomly generated latin hypercube samples of size ``iterations x num_params``.
"""
np.random.seed(seed)
step = 1 / iterations
samples = np.random.uniform(low=0, high=step, size=(num_params, iterations))
interval_start = np.linspace(start=0, stop=1, num=iterations, endpoint=False)
for sample in samples:
np.random.shuffle(interval_start)
sample += interval_start
return samples.T
def sobol_samples(iterations, num_params, skip_iterations=1000):
"""Quasi-random Sobol sequence in [0,1] range that skips first ``skip_iterations`` samples to avoid boundary values.
Parameters
----------
iterations : int
Number of iterations.
num_params : int
Number of model inputs.
skip_iterations : int
Number of first Sobol sequence samples to skip.
Returns
-------
samples : array
Sobol samples of size ``iterations x num_params``.
"""
from .sobol_sequence import SobolSample
sobol = SobolSample(iterations + skip_iterations, num_params, scale=31)
samples = sobol.generate_all_samples()
return samples[skip_iterations:]
def saltelli_samples(iterations, num_params, skip_iterations=1000):
"""Saltelli samples in [0,1] range based on Sobol sequences and radial sampling.
Parameters
----------
iterations : int
Number of iterations.
num_params : int
Number of model inputs.
skip_iterations : int
Number of first Sobol sequence samples to skip.
Returns
-------
samples : array
Saltelli samples of size ``iterations_per_parameter (num_params + 2) x num_params``,
where ``iterations_per_parameter = iterations // (num_params + 2)``.
References
----------
Paper:
:cite:ts:`saltelli2010variance`
Link to the original implementation:
https://github.com/SALib/SALib/blob/master/src/SALib/sample/saltelli.py
"""
# Use Sobol samples as base
from .sobol_sequence import SobolSample
iterations_per_parameter = iterations // (num_params + 2)
# generate base Sobol sequence samples
sobol = SobolSample(
iterations_per_parameter + skip_iterations, num_params * 2, scale=31
)
base_samples = sobol.generate_all_samples()
base_samples = base_samples[skip_iterations:]
# create saltelli samples with radial basis design
samples = np.tile(base_samples[:, :num_params], (1, num_params + 2)).reshape(
iterations_per_parameter * (num_params + 2), -1
)
samples[num_params + 1 :: num_params + 2, :] = base_samples[:, num_params:]
# use a boolean mask for cross sampling of elements
mask_ = np.full((num_params + 2) * num_params, False)
mask_[num_params :: num_params + 1] = 1
mask = np.tile(mask_, iterations_per_parameter).reshape(
iterations_per_parameter * (num_params + 2), -1
)
samples[mask] = base_samples[:, num_params:].flatten()
return samples
def eFAST_omega(iterations, num_params, M):
"""Compute omega parameter for the extended FAST sampling."""
omega = np.zeros([num_params])
omega[0] = np.floor((iterations - 1) / (2 * M))
m = np.floor(omega[0] / (2 * M))
if m >= (num_params - 1):
omega[1:] = np.floor(np.linspace(1, m, num_params - 1))
else:
omega[1:] = np.arange(num_params - 1) % m + 1
return omega
def eFAST_samples_one_chunk(i, iterations, num_params, M=4, seed=None):
np.random.seed(seed)
iterations_per_parameter = iterations // num_params
# Determine current chunk
chunk_size = get_chunk_size_eFAST(num_params)
num_chunks = int(np.ceil(num_params / chunk_size))
last_chunk = num_params % chunk_size
if i < num_chunks - 1 or last_chunk == 0:
num_params_curr = chunk_size
elif i == num_chunks - 1:
num_params_curr = last_chunk
# Minimum number of iterations is chosen based on the Nyquist criterion, ``N`` in the paper
N = max(4 * M ** 2 + 1, iterations_per_parameter)
# Set of frequencies that would be assigned to each input factor
omega = eFAST_omega(N, num_params, M)
# Discretization of the frequency space
s = (2 * np.pi / N) * np.arange(N)
# Random phase-shift
phi = 2 * np.pi * np.random.rand(num_params)
mask_partial = np.ones([chunk_size, chunk_size], dtype=bool)
np.fill_diagonal(mask_partial, False)
chunk_before = i * chunk_size
chunk_after = num_params - (i + 1) * chunk_size
if i < num_chunks - 1 or last_chunk == 0:
mask = np.hstack(
[
np.ones([chunk_size, chunk_before], dtype=bool),
mask_partial,
np.ones([chunk_size, chunk_after], dtype=bool),
]
)
omega_temp = np.zeros([chunk_size, num_params])
omega_temp[mask] = np.tile(omega[1:], chunk_size)
omega_temp[~mask] = omega[0]
elif i == num_chunks - 1:
mask = np.hstack(
[
np.ones([last_chunk, chunk_before], dtype=bool),
mask_partial[:last_chunk, :last_chunk],
]
)
omega_temp = np.zeros([last_chunk, num_params])
omega_temp[mask] = np.tile(omega[1:], last_chunk)
omega_temp[~mask] = omega[0]
start = i * chunk_size
end = (i + 1) * chunk_size
phi_chunk = phi[start:end]
phi_chunk = np.tile(phi_chunk, [num_params]).reshape(num_params, num_params_curr).T
phi_chunk = np.tile(phi_chunk, N).reshape(num_params_curr, num_params, N)
omega2_kron = np.kron(omega_temp, s).reshape(num_params_curr, num_params, N)
g = 0.5 + (1 / np.pi) * np.arcsin(np.sin(omega2_kron + phi_chunk))
current_samples = np.transpose(g, (0, 2, 1)).reshape(
N * num_params_curr, num_params
)
return current_samples
def eFAST_samples_many_chunks(
icpu, iterations, num_params, num_params_per_cpu, M=4, seed=None
):
chunk_size = get_chunk_size_eFAST(num_params_per_cpu)
num_chunks = int(np.ceil(num_params_per_cpu / chunk_size))
samples = np.zeros(shape=(0, num_params))
for ichunk in range(num_chunks):
i = icpu * num_chunks + ichunk
samples = np.vstack(
[samples, eFAST_samples_one_chunk(i, iterations, num_params, M, seed)]
)
return samples
def eFAST_samples(iterations, num_params, M=4, seed=None, cpus=None):
"""Extended FAST samples in [0,1] range.
Parameters
----------
iterations : int
Number of iterations.
num_params : int
Number of model inputs.
M : int
Interference factor, usually 4 or higher.
seed : int
Random seed.
cpus : int
Number of cpus for parallel computation of eFAST samples with ``multiprocessing`` library.
Returns
-------
samples : array
eFAST samples of size ``iterations x num_params``.
References
----------
Paper:
:cite:ts:`saltelli1999quantitative`
Link to the original implementation:
https://github.com/SALib/SALib/blob/master/src/SALib/sample/fast_sampler.py
"""
chunk_size = get_chunk_size_eFAST(num_params)
num_jobs = int(np.ceil(np.ceil(num_params / chunk_size) / cpus))
params_range_per_cpu = np.hstack(
[np.arange(0, num_params, num_jobs * chunk_size), num_params]
)
num_params_per_cpu = params_range_per_cpu[1:] - params_range_per_cpu[:-1]
cpus_needed = len(num_params_per_cpu)
with multiprocessing.Pool(processes=cpus_needed) as pool:
samples_temp = pool.starmap(
eFAST_samples_many_chunks,
[
(icpu, iterations, num_params, num_params_per_cpu[icpu], M, seed)
for icpu in range(cpus_needed)
],
)
samples = np.zeros(shape=(0, num_params))
for res in samples_temp:
samples = np.vstack([samples, res])
return samples
| [
"numpy.tile",
"numpy.ceil",
"numpy.random.rand",
"numpy.ones",
"numpy.sin",
"numpy.floor",
"numpy.fill_diagonal",
"numpy.kron",
"numpy.linspace",
"numpy.zeros",
"numpy.random.seed",
"multiprocessing.Pool",
"numpy.random.uniform",
"numpy.vstack",
"numpy.full",
"numpy.transpose",
"nump... | [((627, 647), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (641, 647), True, 'import numpy as np\n'), ((662, 700), 'numpy.random.rand', 'np.random.rand', (['iterations', 'num_params'], {}), '(iterations, num_params)\n', (676, 700), True, 'import numpy as np\n'), ((1521, 1541), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1535, 1541), True, 'import numpy as np\n'), ((1582, 1648), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': 'step', 'size': '(num_params, iterations)'}), '(low=0, high=step, size=(num_params, iterations))\n', (1599, 1648), True, 'import numpy as np\n'), ((1670, 1730), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': '(1)', 'num': 'iterations', 'endpoint': '(False)'}), '(start=0, stop=1, num=iterations, endpoint=False)\n', (1681, 1730), True, 'import numpy as np\n'), ((4108, 4153), 'numpy.full', 'np.full', (['((num_params + 2) * num_params)', '(False)'], {}), '((num_params + 2) * num_params, False)\n', (4115, 4153), True, 'import numpy as np\n'), ((4523, 4545), 'numpy.zeros', 'np.zeros', (['[num_params]'], {}), '([num_params])\n', (4531, 4545), True, 'import numpy as np\n'), ((4561, 4597), 'numpy.floor', 'np.floor', (['((iterations - 1) / (2 * M))'], {}), '((iterations - 1) / (2 * M))\n', (4569, 4597), True, 'import numpy as np\n'), ((4606, 4634), 'numpy.floor', 'np.floor', (['(omega[0] / (2 * M))'], {}), '(omega[0] / (2 * M))\n', (4614, 4634), True, 'import numpy as np\n'), ((4889, 4909), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4903, 4909), True, 'import numpy as np\n'), ((5731, 5776), 'numpy.ones', 'np.ones', (['[chunk_size, chunk_size]'], {'dtype': 'bool'}), '([chunk_size, chunk_size], dtype=bool)\n', (5738, 5776), True, 'import numpy as np\n'), ((5781, 5818), 'numpy.fill_diagonal', 'np.fill_diagonal', (['mask_partial', '(False)'], {}), '(mask_partial, False)\n', (5797, 5818), True, 'import numpy as np\n'), ((7473, 7504), 'numpy.zeros', 'np.zeros', ([], {'shape': '(0, num_params)'}), '(shape=(0, num_params))\n', (7481, 7504), True, 'import numpy as np\n'), ((9185, 9216), 'numpy.zeros', 'np.zeros', ([], {'shape': '(0, num_params)'}), '(shape=(0, num_params))\n', (9193, 9216), True, 'import numpy as np\n'), ((1766, 1799), 'numpy.random.shuffle', 'np.random.shuffle', (['interval_start'], {}), '(interval_start)\n', (1783, 1799), True, 'import numpy as np\n'), ((5067, 5099), 'numpy.ceil', 'np.ceil', (['(num_params / chunk_size)'], {}), '(num_params / chunk_size)\n', (5074, 5099), True, 'import numpy as np\n'), ((5624, 5636), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (5633, 5636), True, 'import numpy as np\n'), ((5684, 5710), 'numpy.random.rand', 'np.random.rand', (['num_params'], {}), '(num_params)\n', (5698, 5710), True, 'import numpy as np\n'), ((6197, 6231), 'numpy.zeros', 'np.zeros', (['[chunk_size, num_params]'], {}), '([chunk_size, num_params])\n', (6205, 6231), True, 'import numpy as np\n'), ((6259, 6289), 'numpy.tile', 'np.tile', (['omega[1:]', 'chunk_size'], {}), '(omega[1:], chunk_size)\n', (6266, 6289), True, 'import numpy as np\n'), ((7417, 7457), 'numpy.ceil', 'np.ceil', (['(num_params_per_cpu / chunk_size)'], {}), '(num_params_per_cpu / chunk_size)\n', (7424, 7457), True, 'import numpy as np\n'), ((8874, 8917), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'cpus_needed'}), '(processes=cpus_needed)\n', (8894, 8917), False, 'import multiprocessing\n'), ((9264, 9289), 'numpy.vstack', 'np.vstack', (['[samples, res]'], {}), '([samples, res])\n', (9273, 9289), True, 'import numpy as np\n'), ((3830, 3888), 'numpy.tile', 'np.tile', (['base_samples[:, :num_params]', '(1, num_params + 2)'], {}), '(base_samples[:, :num_params], (1, num_params + 2))\n', (3837, 3888), True, 'import numpy as np\n'), ((4209, 4249), 'numpy.tile', 'np.tile', (['mask_', 'iterations_per_parameter'], {}), '(mask_, iterations_per_parameter)\n', (4216, 4249), True, 'import numpy as np\n'), ((4694, 4727), 'numpy.linspace', 'np.linspace', (['(1)', 'm', '(num_params - 1)'], {}), '(1, m, num_params - 1)\n', (4705, 4727), True, 'import numpy as np\n'), ((6563, 6597), 'numpy.zeros', 'np.zeros', (['[last_chunk, num_params]'], {}), '([last_chunk, num_params])\n', (6571, 6597), True, 'import numpy as np\n'), ((6625, 6655), 'numpy.tile', 'np.tile', (['omega[1:]', 'last_chunk'], {}), '(omega[1:], last_chunk)\n', (6632, 6655), True, 'import numpy as np\n'), ((6888, 6909), 'numpy.tile', 'np.tile', (['phi_chunk', 'N'], {}), '(phi_chunk, N)\n', (6895, 6909), True, 'import numpy as np\n'), ((6968, 6990), 'numpy.kron', 'np.kron', (['omega_temp', 's'], {}), '(omega_temp, s)\n', (6975, 6990), True, 'import numpy as np\n'), ((7124, 7150), 'numpy.transpose', 'np.transpose', (['g', '(0, 2, 1)'], {}), '(g, (0, 2, 1))\n', (7136, 7150), True, 'import numpy as np\n'), ((8677, 8724), 'numpy.arange', 'np.arange', (['(0)', 'num_params', '(num_jobs * chunk_size)'], {}), '(0, num_params, num_jobs * chunk_size)\n', (8686, 8724), True, 'import numpy as np\n'), ((4759, 4784), 'numpy.arange', 'np.arange', (['(num_params - 1)'], {}), '(num_params - 1)\n', (4768, 4784), True, 'import numpy as np\n'), ((6009, 6056), 'numpy.ones', 'np.ones', (['[chunk_size, chunk_before]'], {'dtype': 'bool'}), '([chunk_size, chunk_before], dtype=bool)\n', (6016, 6056), True, 'import numpy as np\n'), ((6104, 6150), 'numpy.ones', 'np.ones', (['[chunk_size, chunk_after]'], {'dtype': 'bool'}), '([chunk_size, chunk_after], dtype=bool)\n', (6111, 6150), True, 'import numpy as np\n'), ((6800, 6832), 'numpy.tile', 'np.tile', (['phi_chunk', '[num_params]'], {}), '(phi_chunk, [num_params])\n', (6807, 6832), True, 'import numpy as np\n'), ((7069, 7100), 'numpy.sin', 'np.sin', (['(omega2_kron + phi_chunk)'], {}), '(omega2_kron + phi_chunk)\n', (7075, 7100), True, 'import numpy as np\n'), ((8588, 8620), 'numpy.ceil', 'np.ceil', (['(num_params / chunk_size)'], {}), '(num_params / chunk_size)\n', (8595, 8620), True, 'import numpy as np\n'), ((6413, 6460), 'numpy.ones', 'np.ones', (['[last_chunk, chunk_before]'], {'dtype': 'bool'}), '([last_chunk, chunk_before], dtype=bool)\n', (6420, 6460), True, 'import numpy as np\n')] |
# MIT Licence
# Methods to predict the SSIM, taken from
# https://github.com/Po-Hsun-Su/pytorch-ssim/blob/master/pytorch_ssim/__init__.py
from math import exp
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def gaussian(window_size, sigma):
gauss = torch.Tensor(
[
exp(-((x - window_size // 2) ** 2) / float(2 * sigma ** 2))
for x in range(window_size)
]
)
return gauss / gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(
_2D_window.expand(channel, 1, window_size, window_size).contiguous()
)
return window
def _ssim(
img1, img2, window, window_size, channel, mask=None, size_average=True
):
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = (
F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel)
- mu1_sq
)
sigma2_sq = (
F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel)
- mu2_sq
)
sigma12 = (
F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel)
- mu1_mu2
)
C1 = (0.01) ** 2
C2 = (0.03) ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
)
if not (mask is None):
b = mask.size(0)
ssim_map = ssim_map.mean(dim=1, keepdim=True) * mask
ssim_map = ssim_map.view(b, -1).sum(dim=1) / mask.view(b, -1).sum(
dim=1
).clamp(min=1)
return ssim_map
import pdb
pdb.set_trace
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2, mask=None):
(_, channel, _, _) = img1.size()
if (
channel == self.channel
and self.window.data.type() == img1.data.type()
):
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(
img1,
img2,
window,
self.window_size,
channel,
mask,
self.size_average,
)
def ssim(img1, img2, window_size=11, mask=None, size_average=True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, mask, size_average)
if __name__ == "__main__":
import cv2
import numpy as np
a = cv2.imread("/home/samuel/gaodaiheng/3DFace/unsupervised/1.jpg")
a = np.expand_dims(a, 0)
b = cv2.imread("/home/samuel/gaodaiheng/3DFace/unsupervised/1.jpg")
b = np.expand_dims(b, 0)
a, b = a.astype(np.float), b.astype(np.float)
a = torch.from_numpy(np.transpose(a, (0, 3, 1, 2)))
b = torch.from_numpy(np.transpose(b, (0, 3, 1, 2)))
# 2个相同的图片, ssim为1, 那么对loss来说. 应该取1-ssim.
print(ssim(a, b)) | [
"torch.nn.functional.conv2d",
"numpy.transpose",
"cv2.imread",
"numpy.expand_dims"
] | [((884, 948), 'torch.nn.functional.conv2d', 'F.conv2d', (['img1', 'window'], {'padding': '(window_size // 2)', 'groups': 'channel'}), '(img1, window, padding=window_size // 2, groups=channel)\n', (892, 948), True, 'import torch.nn.functional as F\n'), ((959, 1023), 'torch.nn.functional.conv2d', 'F.conv2d', (['img2', 'window'], {'padding': '(window_size // 2)', 'groups': 'channel'}), '(img2, window, padding=window_size // 2, groups=channel)\n', (967, 1023), True, 'import torch.nn.functional as F\n'), ((3483, 3546), 'cv2.imread', 'cv2.imread', (['"""/home/samuel/gaodaiheng/3DFace/unsupervised/1.jpg"""'], {}), "('/home/samuel/gaodaiheng/3DFace/unsupervised/1.jpg')\n", (3493, 3546), False, 'import cv2\n'), ((3555, 3575), 'numpy.expand_dims', 'np.expand_dims', (['a', '(0)'], {}), '(a, 0)\n', (3569, 3575), True, 'import numpy as np\n'), ((3584, 3647), 'cv2.imread', 'cv2.imread', (['"""/home/samuel/gaodaiheng/3DFace/unsupervised/1.jpg"""'], {}), "('/home/samuel/gaodaiheng/3DFace/unsupervised/1.jpg')\n", (3594, 3647), False, 'import cv2\n'), ((3656, 3676), 'numpy.expand_dims', 'np.expand_dims', (['b', '(0)'], {}), '(b, 0)\n', (3670, 3676), True, 'import numpy as np\n'), ((1128, 1199), 'torch.nn.functional.conv2d', 'F.conv2d', (['(img1 * img1)', 'window'], {'padding': '(window_size // 2)', 'groups': 'channel'}), '(img1 * img1, window, padding=window_size // 2, groups=channel)\n', (1136, 1199), True, 'import torch.nn.functional as F\n'), ((1257, 1328), 'torch.nn.functional.conv2d', 'F.conv2d', (['(img2 * img2)', 'window'], {'padding': '(window_size // 2)', 'groups': 'channel'}), '(img2 * img2, window, padding=window_size // 2, groups=channel)\n', (1265, 1328), True, 'import torch.nn.functional as F\n'), ((1384, 1455), 'torch.nn.functional.conv2d', 'F.conv2d', (['(img1 * img2)', 'window'], {'padding': '(window_size // 2)', 'groups': 'channel'}), '(img1 * img2, window, padding=window_size // 2, groups=channel)\n', (1392, 1455), True, 'import torch.nn.functional as F\n'), ((3754, 3783), 'numpy.transpose', 'np.transpose', (['a', '(0, 3, 1, 2)'], {}), '(a, (0, 3, 1, 2))\n', (3766, 3783), True, 'import numpy as np\n'), ((3810, 3839), 'numpy.transpose', 'np.transpose', (['b', '(0, 3, 1, 2)'], {}), '(b, (0, 3, 1, 2))\n', (3822, 3839), True, 'import numpy as np\n')] |
'''
XlPy/Link_Finder/search/single
______________________________
Checks if a sequenced peptide matches back to an unsequenced,
theoretical peptide massfingerprint.
:copyright: (c) 2015 The Regents of the University of California.
:license: GNU GPL, see licenses/GNU GPLv3.txt for more details.
'''
# load future
from __future__ import division
# load modules
import copy
import numpy as np
from models import params
from xldlib import chemical
from xldlib.utils import iterables, masstools
# load objects/functions
from collections import namedtuple
from models.data import unpack_data
from ..crosslinks import CrossLinkerCombinations #, EndsData
from ..maker import PMFMaker
from ..objects import Ends
from xlpy.peptide_database.combinations import FragmentCombinations
from xlpy.tools.mass import PrecursorMass
class Settings(FragmentCombinations): # EndsData
'''Base class providing settings for inheritance'''
max_xl = params.MASS_FINGERPRINT['max_crosslink_count']
min_int = params.MASS_FINGERPRINT['min_intensity']
_threshold = params.MASS_ERRORS['thresholds']["single_link"]
ms1 = namedtuple("MS1", "ends basemass mass mz")
query = namedtuple("Search", "ends basemass mass mz ppm exper z")
_ppm_thresh = params.MASS_ERRORS['thresholds']['ppm'] * 1e-6
def __init__(self):
super(Settings, self).__init__()
self.ms1s = []
self.queries = []
self.matched = []
# apparently Windows improperly does class initiation, have
# to manually __get__ to class instance.
self.unpack_data = unpack_data.__get__(self)
self.calculate_mass = PrecursorMass.calculate_mass.__get__(self)
self.get_exper_mass_ = PMFMaker.get_exper_mass.__get__(self)
class PMFChecker(Settings):
'''
Class which checks possible crosslinker and deadend combinations
after calculating the theoretical profile and then determines
if any matches can be found.
'''
def __init__(self, scan, indexer, index, parent):
super(PMFChecker, self).__init__()
# here, need to recalculate the number of possible XL
# number based on the fragments with 2 peptides
self.scan = copy.deepcopy(scan)
self.formula = self.scan.formula
self.parent = parent
self.xler = parent.xler
self.data = parent.data
self._nterm = parent.engine['nterm']
self._cterm = parent.engine['cterm']
self.bridge = chemical.Molecule(self.parent.xler['bridge']).mass()
self.fragment_masses = self.get_fragment_masses()
self.site_mass = self.get_sitemass()
self.index = [indexer.total[index]]
self.scan_data = self.get_scan_data()
self.mz = self.scan_data['mz'].value
self.intensity = self.scan_data['intensity'].value
self.xlnum = None
self.theor_charge = None
self.link_ends_base = self.get_link_ends()
self.order = sorted(set(self.xler['react_sites']))
def check_single(self):
'''
Checks a single to identify if any other sequences are possible
Does this via a sequential procedure.
1. Calculates all possible permutations of the other peptides
crosslinker combinations.
2. Iterates over all possibilities to calculate the basemass
to generate a potential mass list.
3. Filter only for transitions with an identified mass in
the m/z list.
'''
# can skip peptide since only one index passed in...
if self.missing_z() <= 0 or self.check_threshold():
return
other_fragments = self.get_other_fragment()
self.iter_fragments(other_fragments)
self.extract_mzs()
# minimize memory usage
del self.mz
del self.intensity
self.search()
def check_threshold(self):
'''
Calculates mass of group of peptides, taking into considering
their sequence, posttranslation modificaitons, and their neutral
losses.
'''
formulas = self.unpack_data('formula')
theor = sum(chemical.Molecule(i).mass for i in formulas)
exper = self.get_exper_mass_(self.index)
return exper - theor < self._threshold
# ------------------
# STEP 1
# ------------------
def get_other_fragment(self):
'''
Calculates all permutations of the other peptide's crosslink
fragments, so that the XL numbers can then be iterated over.
'''
fragments = [range(0, self.max_xl + 1) for i in self.order]
return iterables.num_product(*fragments, min=1, max=self.max_xl)
# ------------------
# STEP 2
# ------------------
def iter_fragments(self, other_fragments):
'''
Step 2 in the check_single algorithm, which iterates over all
the crosslink fragment options and then calculates the total,
intact XL num and calculates the MS2 mass options.
'''
for fragments in other_fragments:
link_ends = self.get_combo_linkends(fragments)
combinations = CrossLinkerCombinations(
self.scan, self, missing=1, fragments=link_ends)
for self.xlnum in combinations:
adjust_z = self.parent.xler.get('delta_charge', 0)
self.theor_charge = sum(self.scan.z) + adjust_z * self.xlnum
try:
ms1 = self.get_missing_mass(link_ends, fragments)
self.ms1s.append(ms1)
except AssertionError:
pass
def get_combo_linkends(self, fragments):
'''Grabs the linkends for a given other fragment set'''
link_ends = self.link_ends_base.copy()
for index, res in enumerate(self.order):
link_ends[res] += fragments[index]
return link_ends
def get_missing_mass(self, link_ends, fragments):
'''
Grabs the missing basemass and initiates a named tuple for
the mass lists form the link and deadends.
'''
max_links = self.max_link_ends()
dead_ends = self.get_dead_ends(link_ends, max_links)
ends = Ends(link_ends, dead_ends, self.xlnum)
# now need to calculate the base mass and mass permutations
basemass = self.missing_ms1_mass(dead_ends)
assert basemass > self._threshold
mass_perm = list(self.get_fragment_combinations(sum(fragments), False))
mzs = []
for mass in mass_perm:
mz_ = masstools.mz(basemass + sum(mass), self.missing_z(), 0)
mzs.append(mz_)
mzs = np.array(mzs).reshape((-1, 1))
ms1 = self.ms1(ends, basemass, mass_perm, mzs)
return ms1
def missing_ms1_mass(self, dead_ends):
'''
Calculates the MS1 mass and linkmass and then compares to the
experimental MS1 mass to find the missing MS1 mass.
'''
linkmass = self.xlnum * self.bridge
for res, count in dead_ends.items():
linkmass += self.site_mass[res] * count
basemass = self.calculate_mass()
theor = linkmass + basemass
exper = self.get_exper_mass()
return exper - theor
# ------------------
# STEP 3
# ------------------
def extract_mzs(self):
'''
Step 3 in the sequence. This extracts all the m/z values from
the given candidate list within the ppm threshold for each
candidate fragment modification possibilities.
'''
mzs = [ms1.mz for ms1 in self.ms1s]
ppms = [abs(self.mz - _mz) / _mz for _mz in mzs]
entries, indexes = self.extract_indexes(ppms)
if not indexes:
return
filt_other = [self.ms1s[index] for index in indexes]
filt_entries = [entries[index] for index in indexes]
filt_ppms = [ppms[index] for index in indexes]
for index, other in enumerate(filt_other):
# need to filter for a certain pct basepeak
rows, cols = filt_entries[index]
ppm = filt_ppms[index]
self.pack_searchable(ppm, other, rows, cols)
def pack_searchable(self, ppm, other, rows, cols):
'''Packs the searchable MS transition for peptide mass fingerprint'''
ppm_list = self.get_ppms(ppm, rows, cols)
mz_ = other.mz[rows].flatten()
mass = tuple(other.mass[row] for row in rows)
exper = self.mz[cols]
query = self.query(other.ends, other.basemass, mass, mz_,
ppm_list, exper, self.missing_z())
self.queries.append(query)
# ------------------
# STEP 4
# ------------------
def search(self):
'''
Step 4 in the search algorithm, by comparing to a theoretical DB.
Searches for all matched transitions, including decoys if on,
first by combining the decoy and standard dbs, creating a search
param and then comparing theoretical to experimental.
'''
for query in self.queries:
mass_keys = [str(i) for i in query.mass]
for index, mass_key in enumerate(mass_keys):
items = self.get_theoretical(mass_key)
if not items:
continue
self.search_items(query, items, index)
def get_theoretical(self, mass_key):
'''Extracts the theoretical peptide databases for searching'''
items = []
for database in self.parent.dbs:
if mass_key in self.parent.searchables[database]:
items.append(self.parent.searchables[database][mass_key])
return items
def search_items(self, query, items, index):
'''Generates a search query for the given peptide items'''
search = {}
length = items[0]['id'].shape[0]
for database in items:
for idx in range(length):
search = {k: database[k][idx] for k in database}
PMFMaker(query, search, index, self, self.parent)
# ------------------
# UTILS
# ------------------
def get_scan_data(self):
'''Returns the MS2 scan data for link'''
precursor = self.data['precursor_num'][self.index[0]]
scan_data = self.parent.precursor_scans[str(precursor)]
return scan_data
def get_sitemass(self):
'''Creates a {res: mass} holder for deadend mods'''
dead = self.xler['dead_end']
react = self.xler["react_sites"]
sitemass = {res: chemical.Molecule(dead[i]).mass for i, res
in enumerate(react)}
return sitemass
def missing_z(self):
'''Calculates the theoretical missing peptide charge'''
if self.theor_charge is None:
# assume 1 XLer mod
adjust_z = self.xler.get('delta_charge', 0)
charges = sum(self.unpack_data("z"))
return self.scan.precursor_z - charges - adjust_z
else:
return self.scan.precursor_z - self.theor_charge
def extract_indexes(self, ppms):
'''
Extracts all the XL-MS indexes within the default ppm threshold
of the desired ID and those above a certain percentage of basepeak
'''
try:
basepeak = self.intensity.max()
ints = self.intensity >= basepeak * self.min_int
except ValueError:
# no scan data
return [], []
entries = [np.where((ppm <= self._ppm_thresh) & ints) for ppm in ppms]
indexes = [i for i, j in enumerate(entries) if j[0].size > 0]
return entries, indexes
@staticmethod
def get_ppms(ppm_array, rows, cols):
'''Returns the experimental PPMs within the parent threshold'''
ppm_list = []
indexes = zip(rows, cols)
for index in indexes:
# has a zipped (row, col) format for single indexing,
# which is much faster than chained or ndarray.item()
ppm_list.append(ppm_array[index])
return ppm_list
| [
"collections.namedtuple",
"xldlib.utils.iterables.num_product",
"numpy.where",
"xldlib.chemical.Molecule",
"numpy.array",
"models.data.unpack_data.__get__",
"copy.deepcopy",
"xlpy.tools.mass.PrecursorMass.calculate_mass.__get__"
] | [((1147, 1189), 'collections.namedtuple', 'namedtuple', (['"""MS1"""', '"""ends basemass mass mz"""'], {}), "('MS1', 'ends basemass mass mz')\n", (1157, 1189), False, 'from collections import namedtuple\n'), ((1202, 1259), 'collections.namedtuple', 'namedtuple', (['"""Search"""', '"""ends basemass mass mz ppm exper z"""'], {}), "('Search', 'ends basemass mass mz ppm exper z')\n", (1212, 1259), False, 'from collections import namedtuple\n'), ((1612, 1637), 'models.data.unpack_data.__get__', 'unpack_data.__get__', (['self'], {}), '(self)\n', (1631, 1637), False, 'from models.data import unpack_data\n'), ((1668, 1710), 'xlpy.tools.mass.PrecursorMass.calculate_mass.__get__', 'PrecursorMass.calculate_mass.__get__', (['self'], {}), '(self)\n', (1704, 1710), False, 'from xlpy.tools.mass import PrecursorMass\n'), ((2231, 2250), 'copy.deepcopy', 'copy.deepcopy', (['scan'], {}), '(scan)\n', (2244, 2250), False, 'import copy\n'), ((4672, 4729), 'xldlib.utils.iterables.num_product', 'iterables.num_product', (['*fragments'], {'min': '(1)', 'max': 'self.max_xl'}), '(*fragments, min=1, max=self.max_xl)\n', (4693, 4729), False, 'from xldlib.utils import iterables, masstools\n'), ((11571, 11613), 'numpy.where', 'np.where', (['((ppm <= self._ppm_thresh) & ints)'], {}), '((ppm <= self._ppm_thresh) & ints)\n', (11579, 11613), True, 'import numpy as np\n'), ((2499, 2544), 'xldlib.chemical.Molecule', 'chemical.Molecule', (["self.parent.xler['bridge']"], {}), "(self.parent.xler['bridge'])\n", (2516, 2544), False, 'from xldlib import chemical\n'), ((6717, 6730), 'numpy.array', 'np.array', (['mzs'], {}), '(mzs)\n', (6725, 6730), True, 'import numpy as np\n'), ((10631, 10657), 'xldlib.chemical.Molecule', 'chemical.Molecule', (['dead[i]'], {}), '(dead[i])\n', (10648, 10657), False, 'from xldlib import chemical\n'), ((4180, 4200), 'xldlib.chemical.Molecule', 'chemical.Molecule', (['i'], {}), '(i)\n', (4197, 4200), False, 'from xldlib import chemical\n')] |
import numpy as np
def sqsum(a):
"""
Calculate the squared sum of a list
Parameters
----------
a: list
the list of numbers to be calculated
Return
------
float
the squared sum
Examples
--------
>>> a = [1, 2, 3]
>>> sqsum(a)
14
"""
a = np.array(a)
return np.sum(a**2)
| [
"numpy.array",
"numpy.sum"
] | [((323, 334), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (331, 334), True, 'import numpy as np\n'), ((346, 360), 'numpy.sum', 'np.sum', (['(a ** 2)'], {}), '(a ** 2)\n', (352, 360), True, 'import numpy as np\n')] |
from __future__ import print_function
import numpy
import collections
from typing import Mapping, Union, Sequence, MutableSequence, Tuple, Any
from typing import Optional, Callable, TypeVar, Iterable, List, cast
InitialState = Union[MutableSequence[complex], int, numpy.ndarray]
# This should be Anything subscriptable
IndexType = Union[int, Tuple[int, ...]]
MatrixType = Any
T = TypeVar('T')
def kronselect_dot(mats: Mapping[IndexType, MatrixType], vec: MutableSequence[complex], n: int,
outputarray: MutableSequence[complex],
input_offset: int = 0, output_offset: int = 0, dot_impl: Optional[Callable] = None):
"""
Efficiently performs the operation: OuterProduct( m1, m2, ..., mn ) dot vec
for the case where most mj are identity.
:param mats: { (indices,... #m) : mat(2**m x 2**m) }
:param vec: vector v of size 2**n
:param n: total number of matrices (including identities)
:param outputarray: array in which to store output
:param dot_impl: implementation of cdot function to use, see dot_loop below for example.
:param input_offset: offset at which vec starts relative to possible larger context
:param output_offset: offset at which outputarray starts relative to possible larger context
"""
if len(vec) + input_offset > 2**n:
raise ValueError("Input vector size plus offset may be no larger than the total number of qubit states (2^n)")
if len(outputarray) + output_offset > 2**n:
raise ValueError("Output vector size plus offset may be no larger than the total number of qubit states (2^n)")
newmats = {}
for indices in mats:
# Will be set by one of the paths
nindices = 0
m = None
if type(indices) != tuple and type(indices) != int:
raise Exception("Type of indices must be tuple: {}".format(indices))
elif type(indices) == tuple:
m = mats[indices]
if type(m) == list:
m = numpy.array(m)
newmats[indices] = m
nindices = len(indices)
elif type(indices) == int:
m = mats[indices]
if type(m) == list:
m = numpy.array(m)
newmats[(indices,)] = m
nindices = 1
if 2**nindices != m.shape[0] or 2**nindices != m.shape[1]:
raise Exception("Shape of square submatrix must equal 2**(number of indices): "
"{}: {}".format(indices, m))
if dot_impl is not None:
iter_indices = newmats.keys()
cindices = numpy.array([numpy.array(x, dtype=numpy.int32) for x in iter_indices])
cmats = numpy.array([newmats[x] if type(newmats[x]) != numpy.ndarray else newmats[x].astype(numpy.complex128)
for x in iter_indices])
dot_impl(cindices, cmats, vec, n, outputarray, input_offset=input_offset, output_offset=output_offset)
else:
dot_loop(newmats, vec, n, outputarray, input_offset=input_offset, output_offset=output_offset)
def dot_loop(mats: Mapping[IndexType, MatrixType], vec: MutableSequence[complex], n: int,
output: MutableSequence[complex], input_offset: int = 0, output_offset: int = 0):
allindices = list(mats.keys())
flatindices = list(sorted(set(index for indices in allindices for index in indices)))
for outputrow in range(len(output)):
row = outputrow + output_offset
s = 0
for rowcol, mijs in gen_valid_col_and_matcol(row, flatindices, n):
r, c = rowcol
input_col = c - input_offset
if input_col < 0 or input_col >= len(vec):
continue
p = 1.0
# Multiply required entries in each non-indentity matrix
for indices in allindices:
mat = mats[indices]
submati = bitarray_to_uint([mijs[index][0] for index in indices])
submatj = bitarray_to_uint([mijs[index][1] for index in indices])
p *= mat[submati, submatj]
s += p*vec[input_col]
output[outputrow] = s
def gen_valid_col_and_matcol(row: int, matindices: Sequence[int], n: int):
rowbits = uint_to_bitarray(row, n)
colbits = rowbits[:]
matrow = tuple(rowbits[indx] for indx in matindices)
for i in range(2**len(matindices)):
matcol = uint_to_bitarray(i, len(matindices))
for j, indx in enumerate(matindices):
colbits[indx] = matcol[j]
yield (row, bitarray_to_uint(colbits)), {matindices[j]: item for j, item in enumerate(zip(matrow, matcol))}
def gen_edit_indices(index_groups: Sequence[Sequence[int]], maxindex: int):
if len(index_groups) > 0:
allindices = flatten(index_groups)
bits = [0] * (maxindex+1)
for i in range(2**len(allindices)):
flips = uint_to_bitarray(i, len(allindices))
for j, bit in enumerate(flips):
bits[allindices[j]] = flips[j]
qbit_state_indices = [0] * len(index_groups)
indx = 0
for j, index_group in enumerate(index_groups):
subflips = flips[indx:indx+len(index_group)]
qbit_state_indices[j] = bitarray_to_uint(subflips)
indx += len(index_group)
yield bitarray_to_uint(bits), qbit_state_indices
def expand_kron_matrix(mats: Mapping[IndexType, MatrixType], n: int) -> numpy.ndarray:
m = numpy.zeros((2**n, 2**n), dtype=numpy.complex128)
mats = {i: numpy.array(mats[i], dtype=numpy.complex128) for i in mats}
for i in range(2**n):
v = cast(MutableSequence[complex], numpy.zeros((2**n,), dtype=numpy.complex128))
v[i] = 1.0
kronselect_dot(mats, v, n, m[i, :])
return m
def uint_to_bitarray(num: int, n: int) -> Sequence[int]:
bits = []
for i in range(n):
bits.append(num % 2)
num = num >> 1
return bits[::-1]
def bitarray_to_uint(bits: Sequence[int]) -> int:
s = 0
for i in range(len(bits)):
s += 2**i if bits[len(bits)-i-1] else 0
return s
def flatten(lst: Iterable[Iterable[T]]) -> List[T]:
listgen = [item if isinstance(item, collections.Iterable) else (item,) for item in lst]
return [item for sublist in listgen for item in sublist]
def qubit_index_notation(i: int, *qns: int, n: int = None) -> Sequence[int]:
if n is None:
n = sum(qns)
index_array = []
bit_array = uint_to_bitarray(i, n)
start_indx = 0
for qubit_size in qns:
num = bitarray_to_uint(bit_array[start_indx:start_indx + qubit_size])
index_array.append(num)
start_indx += qubit_size
return index_array
def gen_qubit_prints(state: Sequence[complex], *qs: Union[int, Any]):
qubit_sizes = []
for q in qs:
if type(q) == int:
qubit_sizes.append(q)
else:
qubit_sizes.append(q.n)
n = sum(qubit_sizes)
for i in range(len(state)):
if state[i] == 0:
continue
s = "|"
s += ",".join(map(str,qubit_index_notation(i, *qubit_sizes, n=n)))
s += "> = {}".format(state[i])
yield s
| [
"numpy.array",
"numpy.zeros",
"typing.TypeVar"
] | [((382, 394), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (389, 394), False, 'from typing import Optional, Callable, TypeVar, Iterable, List, cast\n'), ((5447, 5500), 'numpy.zeros', 'numpy.zeros', (['(2 ** n, 2 ** n)'], {'dtype': 'numpy.complex128'}), '((2 ** n, 2 ** n), dtype=numpy.complex128)\n', (5458, 5500), False, 'import numpy\n'), ((5512, 5556), 'numpy.array', 'numpy.array', (['mats[i]'], {'dtype': 'numpy.complex128'}), '(mats[i], dtype=numpy.complex128)\n', (5523, 5556), False, 'import numpy\n'), ((5641, 5687), 'numpy.zeros', 'numpy.zeros', (['(2 ** n,)'], {'dtype': 'numpy.complex128'}), '((2 ** n,), dtype=numpy.complex128)\n', (5652, 5687), False, 'import numpy\n'), ((2591, 2624), 'numpy.array', 'numpy.array', (['x'], {'dtype': 'numpy.int32'}), '(x, dtype=numpy.int32)\n', (2602, 2624), False, 'import numpy\n'), ((1997, 2011), 'numpy.array', 'numpy.array', (['m'], {}), '(m)\n', (2008, 2011), False, 'import numpy\n'), ((2198, 2212), 'numpy.array', 'numpy.array', (['m'], {}), '(m)\n', (2209, 2212), False, 'import numpy\n')] |
from __future__ import print_function
import argparse
import os
# from torchvision.datasets import ImageFolder
import myDataset
import networks
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from PIL import Image
from model import ScNet
import torchvision.models as models
from torch.autograd import Variable
from tqdm import tqdm
# GPU ID
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Training settings
parser = argparse.ArgumentParser(description='PyTorch NI vs CG')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=32, metavar='N',
help='input batch size for testing (default: 32)')
parser.add_argument('--patch-size', type=int, default=96, metavar='N',
help='input the patch size of the network during training and testing (default: 96)')
parser.add_argument('--log-dir', default='./log',
help='folder to output model checkpoints')
parser.add_argument('--epochs', type=int, default=1200, metavar='N',
help='number of epochs to train (default: 1200)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--optimizer', default='sgd', type=str,
metavar='OPT', help='The optimizer to use (default: Adagrad)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=400, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--loss-adjust', default=400, type=int,
help='how many epochs to change the learning rate (default: 400)')
parser.add_argument('--summary-interval', type=int, default=50,
help='how many epochs to summary the log')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
if args.cuda:
cudnn.benchmark = True
kwargs = {'num_workers': 8, 'pin_memory': True} if args.cuda else {}
# The path of data
data_root = './data/'
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
# Data loading code
# You need to refine this for your data set directory
train_dir = os.path.join(data_root, 'train_img')
val_dir = os.path.join(data_root, 'validation_img')
normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
train_loader = myDataset.DataLoaderHalf(
myDataset.MyDataset(train_dir,
transforms.Compose([
transforms.RandomCrop(args.patch_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])),
batch_size=args.batch_size, shuffle=True, half_constraint=True,
sampler_type='RandomBalancedSampler', **kwargs)
val_loader = torch.utils.data.DataLoader(
myDataset.MyDataset(val_dir,
transforms.Compose([
transforms.CenterCrop(args.patch_size),
transforms.ToTensor(),
normalize
])),
batch_size=args.test_batch_size, shuffle=False, **kwargs)
def main():
# instantiate model and initialize weights
model = ScNet()
networks.print_network(model)
networks.init_weights(model, init_type='normal')
if args.cuda:
model.cuda()
print('model load!')
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
L1_criterion = nn.L1Loss(size_average=False).cuda()
optimizer = create_optimizer(model, args.lr)
for epoch in range(1, args.epochs+1):
# update the optimizer learning rate
adjust_learning_rate(optimizer, epoch)
train_acc, train_loss = train(train_loader, model, optimizer, criterion, L1_criterion, epoch)
if epoch % args.summary_interval == 0:
val_acc, val_loss = test(val_loader, model, criterion, epoch)
def train(train_loader, model, optimizer, criterion, L1_criterion, epoch):
# switch to train mode
model.train()
pbar = tqdm(enumerate(train_loader))
running_loss = 0
running_corrects = 0
for batch_idx, (data, target) in pbar:
if args.cuda:
data, target = data.cuda(), target.cuda()
data_var, target_var = Variable(data), Variable(target)
prediction = model(data_var)
_, preds = torch.max(prediction.data, 1)
loss = criterion(prediction, target_var)
# statistics
running_loss += loss.item()
running_corrects += torch.sum(preds == target_var.data).cpu().item()
# compute gradient and update weights
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
pbar.set_description(
'Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}'.format(
epoch, batch_idx * len(data_var), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item()))
if epoch % args.log_interval == 0:
torch.save({'epoch': epoch,
'state_dict': model.state_dict()},
'{}/checkpoint_{}.pth'.format(args.log_dir, epoch))
running_loss = running_loss / (len(train_loader.dataset) // args.batch_size)
ave_corrects = 100. * running_corrects / len(train_loader.dataset)
print('Train Epoch {}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
epoch, running_loss, running_corrects, len(train_loader.dataset), ave_corrects))
return ave_corrects, running_loss
def test(val_loader, model, criterion, epoch):
# switch to evaluate mode
model.eval()
test_loss = 0
correct = 0
pbar = tqdm(enumerate(val_loader))
for batch_idx, (data, target) in pbar:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
# compute output
output = model(data)
test_loss += criterion(output, target).item() # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()
test_loss = test_loss / (len(val_loader.dataset) // args.test_batch_size)
ave_correct = 100. * correct / len(val_loader.dataset)
print('Test Epoch: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
test_loss, correct, len(val_loader.dataset), ave_correct))
return ave_correct, test_loss
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 20 epochs"""
lr = args.lr * (0.1 ** (epoch // args.loss_adjust))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def create_optimizer(model, new_lr):
# setup optimizer
if args.optimizer == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=new_lr,
momentum=0.9,
weight_decay=args.wd)
elif args.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(), lr=new_lr,
weight_decay=args.wd, betas=(args.beta1, 0.999))
elif args.optimizer == 'adagrad':
optimizer = optim.Adagrad(model.parameters(),
lr=new_lr,
lr_decay=args.lr_decay,
weight_decay=args.wd)
return optimizer
if __name__ == '__main__':
main()
| [
"torch.nn.CrossEntropyLoss",
"torch.nn.L1Loss",
"torch.max",
"torch.cuda.is_available",
"torch.sum",
"os.path.exists",
"argparse.ArgumentParser",
"networks.init_weights",
"numpy.random.seed",
"torchvision.transforms.ToTensor",
"torch.autograd.Variable",
"torchvision.transforms.RandomHorizontal... | [((530, 585), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch NI vs CG"""'}), "(description='PyTorch NI vs CG')\n", (553, 585), False, 'import argparse\n'), ((2585, 2610), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2599, 2610), True, 'import numpy as np\n'), ((2918, 2954), 'os.path.join', 'os.path.join', (['data_root', '"""train_img"""'], {}), "(data_root, 'train_img')\n", (2930, 2954), False, 'import os\n'), ((2965, 3006), 'os.path.join', 'os.path.join', (['data_root', '"""validation_img"""'], {}), "(data_root, 'validation_img')\n", (2977, 3006), False, 'import os\n'), ((3020, 3074), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (3040, 3074), True, 'import torchvision.transforms as transforms\n'), ((2559, 2584), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2582, 2584), False, 'import torch\n'), ((2771, 2799), 'os.path.exists', 'os.path.exists', (['args.log_dir'], {}), '(args.log_dir)\n', (2785, 2799), False, 'import os\n'), ((2805, 2830), 'os.makedirs', 'os.makedirs', (['args.log_dir'], {}), '(args.log_dir)\n', (2816, 2830), False, 'import os\n'), ((3958, 3965), 'model.ScNet', 'ScNet', ([], {}), '()\n', (3963, 3965), False, 'from model import ScNet\n'), ((3971, 4000), 'networks.print_network', 'networks.print_network', (['model'], {}), '(model)\n', (3993, 4000), False, 'import networks\n'), ((4005, 4053), 'networks.init_weights', 'networks.init_weights', (['model'], {'init_type': '"""normal"""'}), "(model, init_type='normal')\n", (4026, 4053), False, 'import networks\n'), ((5134, 5163), 'torch.max', 'torch.max', (['prediction.data', '(1)'], {}), '(prediction.data, 1)\n', (5143, 5163), False, 'import torch\n'), ((4188, 4209), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4207, 4209), True, 'import torch.nn as nn\n'), ((4236, 4265), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {'size_average': '(False)'}), '(size_average=False)\n', (4245, 4265), True, 'import torch.nn as nn\n'), ((5043, 5057), 'torch.autograd.Variable', 'Variable', (['data'], {}), '(data)\n', (5051, 5057), False, 'from torch.autograd import Variable\n'), ((5059, 5075), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (5067, 5075), False, 'from torch.autograd import Variable\n'), ((6685, 6714), 'torch.autograd.Variable', 'Variable', (['data'], {'volatile': '(True)'}), '(data, volatile=True)\n', (6693, 6714), False, 'from torch.autograd import Variable\n'), ((6716, 6732), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (6724, 6732), False, 'from torch.autograd import Variable\n'), ((3215, 3253), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['args.patch_size'], {}), '(args.patch_size)\n', (3236, 3253), True, 'import torchvision.transforms as transforms\n'), ((3278, 3311), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (3309, 3311), True, 'import torchvision.transforms as transforms\n'), ((3336, 3357), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3355, 3357), True, 'import torchvision.transforms as transforms\n'), ((3677, 3715), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['args.patch_size'], {}), '(args.patch_size)\n', (3698, 3715), True, 'import torchvision.transforms as transforms\n'), ((3741, 3762), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3760, 3762), True, 'import torchvision.transforms as transforms\n'), ((5300, 5335), 'torch.sum', 'torch.sum', (['(preds == target_var.data)'], {}), '(preds == target_var.data)\n', (5309, 5335), False, 'import torch\n')] |
import os
import numpy as np
from numpy.lib.stride_tricks import as_strided
import nibabel as nib
def nib_load(file_name):
proxy = nib.load(file_name)
data = proxy.get_data().astype('float32')
proxy.uncache()
return data
def crop(x, ksize, stride=3):
shape = (np.array(x.shape[:3]) - ksize)/stride + 1
shape = tuple(shape) + (ksize, )*3 + (x.shape[3], )
strides = np.array(x.strides[:3])*3
strides = tuple(strides) + x.strides
x = as_strided(x, shape=shape, strides=strides)
return x
modalities = ('flair', 't1ce', 't1', 't2')
root = '/home/thuyen/Data/brats17/Brats17TrainingData/'
file_list = root + 'file_list.txt'
subjects = open(file_list).read().splitlines()
subj = subjects[0]
name = subj.split('/')[-1]
path = os.path.join(root, subj, name + '_')
x0 = np.stack([
nib_load(path + modal + '.nii.gz') \
for modal in modalities], 3)
y0 = nib_load(path + 'seg.nii.gz')[..., None]
x0 = np.pad(x0, ((0, 0), (0, 0), (0, 1), (0, 0)), mode='constant')
y0 = np.pad(y0, ((0, 0), (0, 0), (0, 1), (0, 0)), mode='constant')
x1 = crop(x0, 9)
x2 = crop(np.pad(x0, ((8, 8), (8, 8), (8, 8), (0, 0)), mode='constant'), 25)
x3 = crop(np.pad(x0, ((24, 24), (24, 24), (24, 24), (0, 0)), mode='constant'), 57)
y1 = crop(y0, 9)
m = x1.reshape(x1.shape[:3] + (-1, )).sum(3) > 0
x1 = x1[m]
x2 = x2[m]
x3 = x3[m]
y1 = y1[m]
print(x1.shape)
print(x2.shape)
print(x3.shape)
print(y1.shape)
| [
"nibabel.load",
"os.path.join",
"numpy.lib.stride_tricks.as_strided",
"numpy.array",
"numpy.pad"
] | [((765, 801), 'os.path.join', 'os.path.join', (['root', 'subj', "(name + '_')"], {}), "(root, subj, name + '_')\n", (777, 801), False, 'import os\n'), ((945, 1006), 'numpy.pad', 'np.pad', (['x0', '((0, 0), (0, 0), (0, 1), (0, 0))'], {'mode': '"""constant"""'}), "(x0, ((0, 0), (0, 0), (0, 1), (0, 0)), mode='constant')\n", (951, 1006), True, 'import numpy as np\n'), ((1012, 1073), 'numpy.pad', 'np.pad', (['y0', '((0, 0), (0, 0), (0, 1), (0, 0))'], {'mode': '"""constant"""'}), "(y0, ((0, 0), (0, 0), (0, 1), (0, 0)), mode='constant')\n", (1018, 1073), True, 'import numpy as np\n'), ((136, 155), 'nibabel.load', 'nib.load', (['file_name'], {}), '(file_name)\n', (144, 155), True, 'import nibabel as nib\n'), ((471, 514), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['x'], {'shape': 'shape', 'strides': 'strides'}), '(x, shape=shape, strides=strides)\n', (481, 514), False, 'from numpy.lib.stride_tricks import as_strided\n'), ((1102, 1163), 'numpy.pad', 'np.pad', (['x0', '((8, 8), (8, 8), (8, 8), (0, 0))'], {'mode': '"""constant"""'}), "(x0, ((8, 8), (8, 8), (8, 8), (0, 0)), mode='constant')\n", (1108, 1163), True, 'import numpy as np\n'), ((1179, 1246), 'numpy.pad', 'np.pad', (['x0', '((24, 24), (24, 24), (24, 24), (0, 0))'], {'mode': '"""constant"""'}), "(x0, ((24, 24), (24, 24), (24, 24), (0, 0)), mode='constant')\n", (1185, 1246), True, 'import numpy as np\n'), ((395, 418), 'numpy.array', 'np.array', (['x.strides[:3]'], {}), '(x.strides[:3])\n', (403, 418), True, 'import numpy as np\n'), ((282, 303), 'numpy.array', 'np.array', (['x.shape[:3]'], {}), '(x.shape[:3])\n', (290, 303), True, 'import numpy as np\n')] |
# Turn image to animated gif using 3 animation modes: "explode" "melt" "diffuse"
import PIL.Image as Image
import numpy as np
#------------------input parameterss---------------------------------------------------------------
InputImage="Input.jpg" # Input image patg
OutputGifName="Out.gif" # Output gif file path
Mode="melt" #"explode" #Animation Modes: "explode" "melt" "diffuse"
NumFrames=40 # Number of frames for animation
duration=80 # Frame duration in millisecond
Reverse = False # Reverse frames order True/False
Palladrum = False # Palladrum frames order True/False
#---------------Read image-------------------------------------------------------------------
pic=Image.open(InputImage)
im=np.array(pic.getdata()).reshape(pic.size[1], pic.size[0], 3)
im=im.astype(np.uint8)
h,w,d=im.shape
print(h,w)
cx=w/2
cy=h/2
#------------------------------diffuse-----------------------------------------------------------------
def diffuse():
ImArray=[]
for i in range(NumFrames):
print("Frame ", i, "out of ", NumFrames)
for i in range(1000):
x = np.random.randint(w)
y = np.random.randint(h)
for jj in range(30):
dx = np.random.randint(-4,4)
dy = np.random.randint(-4,4)
if x < cx: dx *= -1
if y < cy: dy *= -1
x1 = x + dx
y1 = y + dy
if not (x1>=w or x1<0 or y1>=h or y1<0):
im[y1,x1]=im[y,x]
ImArray.append(Image.fromarray(im))
if Reverse: ImArray = ImArray[::-1] # Inverse frames order
if Palladrum: ImArray = ImArray+ImArray[::-1] # Palladrum frames order
ImArray[0].save(OutputGifName, save_all=True, append_images=ImArray[1:] , duration=duration, loop=10000)
#--------------------------explode--------------------------------------------------------------------------
def explode():
ImArray = []
for i in range(NumFrames):
print("Frame ", i, "out of ", NumFrames)
for i in range(1000):
x = np.random.randint(w)
y = np.random.randint(h)
for iii in range(30):
r= np.random.randint(10)
dx = x-cx
dy = y-cy
dc = (dy**2+dx**2)**0.5
dx /= dc+0.001
dy /= dc+0.001
dx*=r
dy*=r
if np.random.rand()<0.5:
dx+=np.random.randint(3)-1
if np.random.rand() < 0.5:
dy += np.random.randint(3) - 1
x1 = x + int(dx)
y1 = y + int(dy)
if (x1 >= w or x1 < 0 or y1 >= h or y1 < 0): break
im[y1, x1] = im[y, x]
ImArray.append(Image.fromarray(im))
if Reverse: ImArray=ImArray[::-1] # Inverse frames order
if Palladrum: ImArray = ImArray + ImArray[::-1] # Palladrum frames order
ImArray[0].save(OutputGifName, save_all=True, append_images=ImArray[1:] , duration=duration, loop=10000)
#-----------------------------melt--------------------------------------------------------------------------------------
def melt():
ImArray = []
for i in range(NumFrames):
print("Frame ", i, "out of ", NumFrames)
for i in range(1000):
x = np.random.randint(w)
y = np.random.randint(h)
for iii in range(30):
dy = np.random.randint(6)
dx=np.random.randint(-3,3)
x1 = x + int(dx)
y1 = y + int(dy)
if (x1 >= w or x1 < 0 or y1 >= h or y1 < 0): break
im[y1, x1] = im[y, x]
ImArray.append(Image.fromarray(im))
if Reverse: ImArray = ImArray[::-1] # Inverse frames order
if Palladrum: ImArray = ImArray + ImArray[::-1] # Palladrum frames order
ImArray[0].save(OutputGifName, save_all=True, append_images=ImArray[1:] , duration=duration, loop=10000)
#----------------------Main----------------------------------------------------------------
print("mode")
if Mode=="explode": explode()
if Mode=="melt": melt()
if Mode=="diffuse": diffuse() | [
"numpy.random.randint",
"PIL.Image.open",
"numpy.random.rand",
"PIL.Image.fromarray"
] | [((703, 725), 'PIL.Image.open', 'Image.open', (['InputImage'], {}), '(InputImage)\n', (713, 725), True, 'import PIL.Image as Image\n'), ((1128, 1148), 'numpy.random.randint', 'np.random.randint', (['w'], {}), '(w)\n', (1145, 1148), True, 'import numpy as np\n'), ((1166, 1186), 'numpy.random.randint', 'np.random.randint', (['h'], {}), '(h)\n', (1183, 1186), True, 'import numpy as np\n'), ((1581, 1600), 'PIL.Image.fromarray', 'Image.fromarray', (['im'], {}), '(im)\n', (1596, 1600), True, 'import PIL.Image as Image\n'), ((2130, 2150), 'numpy.random.randint', 'np.random.randint', (['w'], {}), '(w)\n', (2147, 2150), True, 'import numpy as np\n'), ((2166, 2186), 'numpy.random.randint', 'np.random.randint', (['h'], {}), '(h)\n', (2183, 2186), True, 'import numpy as np\n'), ((2796, 2815), 'PIL.Image.fromarray', 'Image.fromarray', (['im'], {}), '(im)\n', (2811, 2815), True, 'import PIL.Image as Image\n'), ((3351, 3371), 'numpy.random.randint', 'np.random.randint', (['w'], {}), '(w)\n', (3368, 3371), True, 'import numpy as np\n'), ((3387, 3407), 'numpy.random.randint', 'np.random.randint', (['h'], {}), '(h)\n', (3404, 3407), True, 'import numpy as np\n'), ((3705, 3724), 'PIL.Image.fromarray', 'Image.fromarray', (['im'], {}), '(im)\n', (3720, 3724), True, 'import PIL.Image as Image\n'), ((1243, 1267), 'numpy.random.randint', 'np.random.randint', (['(-4)', '(4)'], {}), '(-4, 4)\n', (1260, 1267), True, 'import numpy as np\n'), ((1289, 1313), 'numpy.random.randint', 'np.random.randint', (['(-4)', '(4)'], {}), '(-4, 4)\n', (1306, 1313), True, 'import numpy as np\n'), ((2236, 2257), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (2253, 2257), True, 'import numpy as np\n'), ((3461, 3481), 'numpy.random.randint', 'np.random.randint', (['(6)'], {}), '(6)\n', (3478, 3481), True, 'import numpy as np\n'), ((3498, 3522), 'numpy.random.randint', 'np.random.randint', (['(-3)', '(3)'], {}), '(-3, 3)\n', (3515, 3522), True, 'import numpy as np\n'), ((2453, 2469), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2467, 2469), True, 'import numpy as np\n'), ((2535, 2551), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2549, 2551), True, 'import numpy as np\n'), ((2496, 2516), 'numpy.random.randint', 'np.random.randint', (['(3)'], {}), '(3)\n', (2513, 2516), True, 'import numpy as np\n'), ((2582, 2602), 'numpy.random.randint', 'np.random.randint', (['(3)'], {}), '(3)\n', (2599, 2602), True, 'import numpy as np\n')] |
"""Bounds module for functions related to coordinate bounds."""
import collections
from typing import Dict, List, Optional, Tuple
import cf_xarray as cfxr # noqa: F401
import numpy as np
import xarray as xr
from typing_extensions import Literal, get_args
from xcdat.logger import setup_custom_logger
logger = setup_custom_logger("root")
Coord = Literal["lat", "latitude", "lon", "longitude", "time"]
#: Tuple of supported coordinates in XCDAT functions and methods.
SUPPORTED_COORDS: Tuple[Coord, ...] = get_args(Coord)
@xr.register_dataset_accessor("bounds")
class DatasetBoundsAccessor:
"""A class to represent the DatasetBoundsAccessor.
Examples
---------
Import:
>>> from xcdat import bounds
Return dictionary of coordinate keys mapped to bounds DataArrays:
>>> ds = xr.open_dataset("file_path")
>>> bounds = ds.bounds.bounds
Fill missing coordinate bounds in the Dataset:
>>> ds = xr.open_dataset("file_path")
>>> ds = ds.bounds.fill_missing()
Get coordinate bounds if they exist:
>>> ds = xr.open_dataset("file_path")
>>>
>>> # Throws error if bounds don't exist
>>> lat_bounds = ds.bounds.get_bounds("lat") # or pass "latitude"
>>> lon_bounds = ds.bounds.get_bounds("lon") # or pass "longitude"
>>> time_bounds = ds.bounds.get_bounds("time")
Add coordinates bounds if they don't exist:
>>> ds = xr.open_dataset("file_path")
>>>
>>> # Throws error if bounds exist
>>> ds = ds.bounds.add_bounds("lat")
"""
def __init__(self, dataset: xr.Dataset):
self._dataset: xr.Dataset = dataset
@property
def bounds(self) -> Dict[str, Optional[xr.DataArray]]:
"""Returns a mapping of coordinate and axis keys to their bounds.
The dictionary provides all valid CF compliant keys for a coordinate.
For example, latitude will includes keys for "lat", "latitude", and "Y".
Returns
-------
Dict[str, Optional[xr.DataArray]]
Dictionary mapping coordinate keys to their bounds.
"""
ds = self._dataset
bounds: Dict[str, Optional[xr.DataArray]] = {}
for coord, bounds_name in ds.cf.bounds.items():
bound = ds.get(bounds_name[0], None)
bounds[coord] = bound
return collections.OrderedDict(sorted(bounds.items()))
@property
def names(self) -> List[str]:
"""Returns a list of names for the bounds data variables in the Dataset.
Returns
-------
List[str]
A list of sorted dounds data variable names.
"""
return sorted(
list(
{
name
for bound_names in self._dataset.cf.bounds.values()
for name in bound_names
}
)
)
def fill_missing(self) -> xr.Dataset:
"""Fills any missing bounds for supported coordinates in the Dataset.
Returns
-------
xr.Dataset
"""
for coord in [*self._dataset.coords]:
if coord in SUPPORTED_COORDS:
try:
self._dataset.cf.get_bounds(coord)
except KeyError:
self._dataset = self.add_bounds(coord)
return self._dataset
def get_bounds(self, coord: Coord) -> xr.DataArray:
"""Get bounds for a coordinate.
Parameters
----------
coord : Coord
The coordinate key.
Returns
-------
xr.DataArray
The coordinate bounds.
Raises
------
ValueError
If an incorrect ``coord`` argument is passed.
ValueError
If bounds were not found. They must be added.
"""
if coord not in SUPPORTED_COORDS:
raise ValueError(
"Incorrect `coord` argument. Supported coordinates include: Supported "
f"arguments include: {', '.join(SUPPORTED_COORDS)}."
)
try:
bounds = self._dataset.cf.get_bounds(coord)
except KeyError:
raise KeyError(f"{coord} bounds were not found, they must be added.")
return bounds
def add_bounds(self, coord: Coord, width: float = 0.5) -> xr.Dataset:
"""Add bounds for a coordinate using its data points.
If bounds already exist, they must be dropped first.
Parameters
----------
coord : Coord
The coordinate key.
width : float, optional
Width of the bounds relative to the position of the nearest points,
by default 0.5.
Returns
-------
xr.Dataset
The dataset with bounds added.
Raises
------
ValueError
If bounds already exist. They must be dropped first.
"""
try:
self._dataset.cf.get_bounds(coord)
raise ValueError(
f"{coord} bounds already exist. Drop them first to add new bounds."
)
except KeyError:
dataset = self._add_bounds(coord, width)
return dataset
def _add_bounds(self, coord: Coord, width: float = 0.5) -> xr.Dataset:
"""Adds bounds for a coordinate using its data points.
Parameters
----------
coord : Coord
The coordinate key.
width : float, optional
Width of the bounds relative to the position of the nearest points,
by default 0.5.
Returns
-------
xr.Dataset
The dataset with bounds added.
Raises
------
ValueError
If coords dimensions does not equal 1.
ValueError
If coords are length of <=1.
Notes
-----
Based on [1]_ ``iris.coords._guess_bounds`` and [2]_ ``cf_xarray.accessor.add_bounds``
References
----------
.. [1] https://scitools-iris.readthedocs.io/en/stable/generated/api/iris/coords.html#iris.coords.AuxCoord.guess_bounds
.. [2] https://cf-xarray.readthedocs.io/en/latest/generated/xarray.Dataset.cf.add_bounds.html#
"""
da_coord: xr.DataArray = self._get_coord(coord)
# Validate coordinate shape and dimensions
if da_coord.ndim != 1:
raise ValueError("Cannot generate bounds for multidimensional coordinates.")
if da_coord.shape[0] <= 1:
raise ValueError("Cannot generate bounds for a coordinate of length <= 1.")
# Retrieve coordinate dimension to calculate the diffs between points.
dim = da_coord.dims[0]
diffs = da_coord.diff(dim)
# Add beginning and end points to account for lower and upper bounds.
diffs = np.insert(diffs, 0, diffs[0])
diffs = np.append(diffs, diffs[-1])
# Get lower and upper bounds by using the width relative to nearest point.
# Transpose both bound arrays into a 2D array.
lower_bounds = da_coord - diffs[:-1] * width
upper_bounds = da_coord + diffs[1:] * (1 - width)
bounds = np.array([lower_bounds, upper_bounds]).transpose()
# Clip latitude bounds at (-90, 90)
if (
da_coord.name in ("lat", "latitude", "grid_latitude")
and "degree" in da_coord.attrs["units"]
):
if (da_coord >= -90).all() and (da_coord <= 90).all():
np.clip(bounds, -90, 90, out=bounds)
# Add coordinate bounds to the dataset
dataset = self._dataset.copy()
var_name = f"{coord}_bnds"
dataset[var_name] = xr.DataArray(
name=var_name,
data=bounds,
coords={coord: da_coord},
dims=[coord, "bnds"],
attrs={"is_generated": "True"},
)
dataset[da_coord.name].attrs["bounds"] = var_name
return dataset
def _get_coord(self, coord: Coord) -> xr.DataArray:
"""Get the matching coordinate in the dataset.
Parameters
----------
coord : Coord
The coordinate key.
Returns
-------
xr.DataArray
Matching coordinate in the Dataset.
Raises
------
TypeError
If no matching coordinate is found in the Dataset.
"""
try:
matching_coord = self._dataset.cf[coord]
except KeyError:
raise KeyError(f"No matching coordinates for coord: {coord}")
return matching_coord
| [
"numpy.insert",
"numpy.clip",
"typing_extensions.get_args",
"xarray.register_dataset_accessor",
"numpy.append",
"numpy.array",
"xarray.DataArray",
"xcdat.logger.setup_custom_logger"
] | [((313, 340), 'xcdat.logger.setup_custom_logger', 'setup_custom_logger', (['"""root"""'], {}), "('root')\n", (332, 340), False, 'from xcdat.logger import setup_custom_logger\n'), ((509, 524), 'typing_extensions.get_args', 'get_args', (['Coord'], {}), '(Coord)\n', (517, 524), False, 'from typing_extensions import Literal, get_args\n'), ((528, 566), 'xarray.register_dataset_accessor', 'xr.register_dataset_accessor', (['"""bounds"""'], {}), "('bounds')\n", (556, 566), True, 'import xarray as xr\n'), ((6813, 6842), 'numpy.insert', 'np.insert', (['diffs', '(0)', 'diffs[0]'], {}), '(diffs, 0, diffs[0])\n', (6822, 6842), True, 'import numpy as np\n'), ((6859, 6886), 'numpy.append', 'np.append', (['diffs', 'diffs[-1]'], {}), '(diffs, diffs[-1])\n', (6868, 6886), True, 'import numpy as np\n'), ((7662, 7787), 'xarray.DataArray', 'xr.DataArray', ([], {'name': 'var_name', 'data': 'bounds', 'coords': '{coord: da_coord}', 'dims': "[coord, 'bnds']", 'attrs': "{'is_generated': 'True'}"}), "(name=var_name, data=bounds, coords={coord: da_coord}, dims=[\n coord, 'bnds'], attrs={'is_generated': 'True'})\n", (7674, 7787), True, 'import xarray as xr\n'), ((7154, 7192), 'numpy.array', 'np.array', (['[lower_bounds, upper_bounds]'], {}), '([lower_bounds, upper_bounds])\n', (7162, 7192), True, 'import numpy as np\n'), ((7475, 7511), 'numpy.clip', 'np.clip', (['bounds', '(-90)', '(90)'], {'out': 'bounds'}), '(bounds, -90, 90, out=bounds)\n', (7482, 7511), True, 'import numpy as np\n')] |
import numpy as np
from scipy.io import loadmat
from utils.dataset import load_label_files, load_labels, load_weights
from data_loader.util import load_challenge_data
import torch
import torch.nn.functional as F
class ChallengeMetric():
def __init__(self, input_directory, alphas):
# challengeMetric initialization
weights_file = '../evaluation/weights.csv'
normal_class = '426783006'
equivalent_classes = [['713427006', '59118001'], ['284470004', '63593006'], ['427172004', '17338001']]
# Find the label files.
print('Finding label...')
label_files = load_label_files(input_directory)
# Load the labels and classes.
print('Loading labels...')
classes, labels_onehot, labels = load_labels(label_files, normal_class, equivalent_classes)
num_files = len(label_files)
print("num_files:", num_files)
# Load the weights for the Challenge metric.
print('Loading weights...')
weights = load_weights(weights_file, classes)
# Only consider classes that are scored with the Challenge metric.
indices = np.any(weights, axis=0) # Find indices of classes in weight matrix.
classes = [x for i, x in enumerate(classes) if indices[i]]
weights = weights[np.ix_(indices, indices)]
self.weights = weights
self.indices = indices
self.classes = classes
self.normal_class = normal_class
self.alphas = alphas
# Compute recording-wise accuracy.
def accuracy(self, outputs, labels):
outputs = outputs[:, self.indices]
labels = labels[:, self.indices]
outputs = self.get_pred(outputs)
num_recordings, num_classes = np.shape(labels)
num_correct_recordings = 0
for i in range(num_recordings):
if np.all(labels[i, :] == outputs[i, :]):
num_correct_recordings += 1
return float(num_correct_recordings) / float(num_recordings)
# Compute confusion matrices.
def confusion_matrices(self, outputs, labels, normalize=False):
# Compute a binary confusion matrix for each class k:
#
# [TN_k FN_k]
# [FP_k TP_k]
#
# If the normalize variable is set to true, then normalize the contributions
# to the confusion matrix by the number of labels per recording.
num_recordings, num_classes = np.shape(labels)
if not normalize:
A = np.zeros((num_classes, 2, 2))
for i in range(num_recordings):
for j in range(num_classes):
if labels[i, j] == 1 and outputs[i, j] == 1: # TP
A[j, 1, 1] += 1
elif labels[i, j] == 0 and outputs[i, j] == 1: # FP
A[j, 1, 0] += 1
elif labels[i, j] == 1 and outputs[i, j] == 0: # FN
A[j, 0, 1] += 1
elif labels[i, j] == 0 and outputs[i, j] == 0: # TN
A[j, 0, 0] += 1
else: # This condition should not happen.
raise ValueError('Error in computing the confusion matrix.')
else:
A = np.zeros((num_classes, 2, 2))
for i in range(num_recordings):
normalization = float(max(np.sum(labels[i, :]), 1))
for j in range(num_classes):
if labels[i, j] == 1 and outputs[i, j] == 1: # TP
A[j, 1, 1] += 1.0 / normalization
elif labels[i, j] == 0 and outputs[i, j] == 1: # FP
A[j, 1, 0] += 1.0 / normalization
elif labels[i, j] == 1 and outputs[i, j] == 0: # FN
A[j, 0, 1] += 1.0 / normalization
elif labels[i, j] == 0 and outputs[i, j] == 0: # TN
A[j, 0, 0] += 1.0 / normalization
else: # This condition should not happen.
raise ValueError('Error in computing the confusion matrix.')
return A
# Compute macro F-measure.
def f_measure(self, outputs, labels):
outputs = outputs[:, self.indices]
labels = labels[:, self.indices]
outputs = self.get_pred(outputs)
num_recordings, num_classes = np.shape(labels)
A = self.confusion_matrices(outputs, labels)
f_measure = np.zeros(num_classes)
for k in range(num_classes):
tp, fp, fn, tn = A[k, 1, 1], A[k, 1, 0], A[k, 0, 1], A[k, 0, 0]
if 2 * tp + fp + fn:
f_measure[k] = float(2 * tp) / float(2 * tp + fp + fn)
else:
f_measure[k] = float('nan')
macro_f_measure = np.nanmean(f_measure)
return macro_f_measure
def beta_measures(self, outputs, labels, beta=2):
outputs = outputs[:, self.indices]
labels = labels[:, self.indices]
outputs = self.get_pred(outputs)
num_recordings, num_classes = np.shape(labels)
A = self.confusion_matrices(outputs, labels, normalize=True)
f_beta_measure = np.zeros(num_classes)
g_beta_measure = np.zeros(num_classes)
for k in range(num_classes):
tp, fp, fn, tn = A[k, 1, 1], A[k, 1, 0], A[k, 0, 1], A[k, 0, 0]
if (1 + beta ** 2) * tp + fp + beta ** 2 * fn:
f_beta_measure[k] = float((1 + beta ** 2) * tp) / float((1 + beta ** 2) * tp + fp + beta ** 2 * fn)
else:
f_beta_measure[k] = float('nan')
if tp + fp + beta * fn:
g_beta_measure[k] = float(tp) / float(tp + fp + beta * fn)
else:
g_beta_measure[k] = float('nan')
macro_f_beta_measure = np.nanmean(f_beta_measure)
macro_g_beta_measure = np.nanmean(g_beta_measure)
return macro_f_beta_measure, macro_g_beta_measure
# Compute modified confusion matrix for multi-class, multi-label tasks.
def modified_confusion_matrix(self, outputs, labels):
# Compute a binary multi-class, multi-label confusion matrix, where the rows
# are the labels and the columns are the outputs.
num_recordings, num_classes = np.shape(labels)
A = np.zeros((num_classes, num_classes))
# Iterate over all of the recordings.
for i in range(num_recordings):
# Calculate the number of positive labels and/or outputs.
normalization = float(max(np.sum(np.any((labels[i, :], outputs[i, :]), axis=0)), 1))
# Iterate over all of the classes.
for j in range(num_classes):
# Assign full and/or partial credit for each positive class.
if labels[i, j]:
for k in range(num_classes):
if outputs[i, k]:
A[j, k] += 1.0 / normalization
return A
# Compute the evaluation metric for the Challenge.
def challenge_metric(self, outputs, labels):
outputs = outputs[:, self.indices]
labels = labels[:, self.indices]
outputs = self.get_pred(outputs)
num_recordings, num_classes = np.shape(labels)
normal_index = self.classes.index(self.normal_class)
# Compute the observed score.
A = self.modified_confusion_matrix(outputs, labels)
observed_score = np.nansum(self.weights * A)
# Compute the score for the model that always chooses the correct label(s).
correct_outputs = labels
A = self.modified_confusion_matrix(labels, correct_outputs)
correct_score = np.nansum(self.weights * A)
# Compute the score for the model that always chooses the normal class.
inactive_outputs = np.zeros((num_recordings, num_classes), dtype=np.bool)
inactive_outputs[:, normal_index] = 1
A = self.modified_confusion_matrix(labels, inactive_outputs)
inactive_score = np.nansum(self.weights * A)
if correct_score != inactive_score:
normalized_score = float(observed_score - inactive_score) / float(correct_score - inactive_score)
else:
normalized_score = float('nan')
return normalized_score
def get_pred(self, output):
num_recordings, num_classes = output.shape
labels = np.zeros((num_recordings, num_classes))
for i in range(num_recordings):
for j in range(num_classes):
if output[i, j] >= self.alphas[j]:
labels[i, j] = 1
else:
labels[i, j] = 0
return labels
def load_data(label_dir, data_dir, split_index):
print('Loading data...')
weights_file = '../evaluation/weights.csv'
normal_class = '426783006'
equivalent_classes = [['713427006', '59118001'], ['284470004', '63593006'], ['427172004', '17338001']]
# Find the label files.
label_files = load_label_files(label_dir)
# Load the labels and classes.
classes, labels_onehot, labels = load_labels(label_files, normal_class, equivalent_classes)
# Load the weights for the Challenge metric.
weights = load_weights(weights_file, classes)
# Classes that are scored with the Challenge metric.
indices = np.any(weights, axis=0) # Find indices of classes in weight matrix.
classes_scored = [x for i, x in enumerate(classes) if indices[i]]
split_idx = loadmat(split_index)
train_index, val_index, test_index = split_idx['train_index'], split_idx['val_index'], split_idx['test_index']
train_index = train_index.reshape((train_index.shape[1],))
val_index = val_index.reshape((val_index.shape[1],))
test_index = test_index.reshape((test_index.shape[1],))
train_data = list()
val_data = list()
test_data = list()
train_label = list()
val_label = list()
test_label = list()
num_files = len(label_files)
for i in range(num_files):
recording, header, name = load_challenge_data(label_files[i], data_dir)
recording[np.isnan(recording)] = 0
if i in train_index:
for j in range(recording.shape[0]):
train_data.append(recording[j])
train_label.append(labels_onehot[i])
elif i in val_index:
for j in range(recording.shape[0]):
val_data.append(recording[j])
val_label.append(labels_onehot[i])
else:
for j in range(recording.shape[0]):
test_data.append(recording[j])
test_label.append(labels_onehot[i])
train_data = np.array(train_data)
val_data = np.array(val_data)
test_data = np.array(test_data)
train_label = np.array(train_label)
val_label = np.array(val_label)
test_label = np.array(test_label)
return (train_data, train_label), (val_data, val_label), (test_data, test_label)
def load_checkpoint(resume_path):
"""
Resume from saved checkpoints
:param resume_path: Checkpoint path to be resumed
"""
checkpoint = torch.load(resume_path)
epoch = checkpoint['epoch']
mnt_best = checkpoint['monitor_best']
# load architecture params from checkpoint.
return checkpoint
def get_metrics(outputs, targets, challenge_metrics):
accuracy = challenge_metrics.accuracy(outputs, targets)
macro_f_measure = challenge_metrics.f_measure(outputs, targets)
macro_f_beta_measure, macro_g_beta_measure = challenge_metrics.beta_measures(outputs, targets)
challenge_metric = challenge_metrics.challenge_metric(outputs, targets)
return accuracy, macro_f_measure, macro_f_beta_measure, macro_g_beta_measure, challenge_metric
def to_np(tensor, device):
if device == 'cuda':
return tensor.cpu().detach().numpy()
else:
return tensor.detach().numpy()
| [
"data_loader.util.load_challenge_data",
"numpy.all",
"utils.dataset.load_label_files",
"numpy.nansum",
"torch.load",
"scipy.io.loadmat",
"numpy.any",
"numpy.ix_",
"numpy.array",
"numpy.zeros",
"numpy.nanmean",
"numpy.isnan",
"utils.dataset.load_weights",
"numpy.sum",
"numpy.shape",
"ut... | [((8954, 8981), 'utils.dataset.load_label_files', 'load_label_files', (['label_dir'], {}), '(label_dir)\n', (8970, 8981), False, 'from utils.dataset import load_label_files, load_labels, load_weights\n'), ((9054, 9112), 'utils.dataset.load_labels', 'load_labels', (['label_files', 'normal_class', 'equivalent_classes'], {}), '(label_files, normal_class, equivalent_classes)\n', (9065, 9112), False, 'from utils.dataset import load_label_files, load_labels, load_weights\n'), ((9176, 9211), 'utils.dataset.load_weights', 'load_weights', (['weights_file', 'classes'], {}), '(weights_file, classes)\n', (9188, 9211), False, 'from utils.dataset import load_label_files, load_labels, load_weights\n'), ((9283, 9306), 'numpy.any', 'np.any', (['weights'], {'axis': '(0)'}), '(weights, axis=0)\n', (9289, 9306), True, 'import numpy as np\n'), ((9439, 9459), 'scipy.io.loadmat', 'loadmat', (['split_index'], {}), '(split_index)\n', (9446, 9459), False, 'from scipy.io import loadmat\n'), ((10616, 10636), 'numpy.array', 'np.array', (['train_data'], {}), '(train_data)\n', (10624, 10636), True, 'import numpy as np\n'), ((10652, 10670), 'numpy.array', 'np.array', (['val_data'], {}), '(val_data)\n', (10660, 10670), True, 'import numpy as np\n'), ((10687, 10706), 'numpy.array', 'np.array', (['test_data'], {}), '(test_data)\n', (10695, 10706), True, 'import numpy as np\n'), ((10725, 10746), 'numpy.array', 'np.array', (['train_label'], {}), '(train_label)\n', (10733, 10746), True, 'import numpy as np\n'), ((10763, 10782), 'numpy.array', 'np.array', (['val_label'], {}), '(val_label)\n', (10771, 10782), True, 'import numpy as np\n'), ((10800, 10820), 'numpy.array', 'np.array', (['test_label'], {}), '(test_label)\n', (10808, 10820), True, 'import numpy as np\n'), ((11064, 11087), 'torch.load', 'torch.load', (['resume_path'], {}), '(resume_path)\n', (11074, 11087), False, 'import torch\n'), ((617, 650), 'utils.dataset.load_label_files', 'load_label_files', (['input_directory'], {}), '(input_directory)\n', (633, 650), False, 'from utils.dataset import load_label_files, load_labels, load_weights\n'), ((767, 825), 'utils.dataset.load_labels', 'load_labels', (['label_files', 'normal_class', 'equivalent_classes'], {}), '(label_files, normal_class, equivalent_classes)\n', (778, 825), False, 'from utils.dataset import load_label_files, load_labels, load_weights\n'), ((1011, 1046), 'utils.dataset.load_weights', 'load_weights', (['weights_file', 'classes'], {}), '(weights_file, classes)\n', (1023, 1046), False, 'from utils.dataset import load_label_files, load_labels, load_weights\n'), ((1141, 1164), 'numpy.any', 'np.any', (['weights'], {'axis': '(0)'}), '(weights, axis=0)\n', (1147, 1164), True, 'import numpy as np\n'), ((1739, 1755), 'numpy.shape', 'np.shape', (['labels'], {}), '(labels)\n', (1747, 1755), True, 'import numpy as np\n'), ((2433, 2449), 'numpy.shape', 'np.shape', (['labels'], {}), '(labels)\n', (2441, 2449), True, 'import numpy as np\n'), ((4352, 4368), 'numpy.shape', 'np.shape', (['labels'], {}), '(labels)\n', (4360, 4368), True, 'import numpy as np\n'), ((4444, 4465), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (4452, 4465), True, 'import numpy as np\n'), ((4772, 4793), 'numpy.nanmean', 'np.nanmean', (['f_measure'], {}), '(f_measure)\n', (4782, 4793), True, 'import numpy as np\n'), ((5044, 5060), 'numpy.shape', 'np.shape', (['labels'], {}), '(labels)\n', (5052, 5060), True, 'import numpy as np\n'), ((5157, 5178), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (5165, 5178), True, 'import numpy as np\n'), ((5204, 5225), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (5212, 5225), True, 'import numpy as np\n'), ((5791, 5817), 'numpy.nanmean', 'np.nanmean', (['f_beta_measure'], {}), '(f_beta_measure)\n', (5801, 5817), True, 'import numpy as np\n'), ((5849, 5875), 'numpy.nanmean', 'np.nanmean', (['g_beta_measure'], {}), '(g_beta_measure)\n', (5859, 5875), True, 'import numpy as np\n'), ((6251, 6267), 'numpy.shape', 'np.shape', (['labels'], {}), '(labels)\n', (6259, 6267), True, 'import numpy as np\n'), ((6281, 6317), 'numpy.zeros', 'np.zeros', (['(num_classes, num_classes)'], {}), '((num_classes, num_classes))\n', (6289, 6317), True, 'import numpy as np\n'), ((7207, 7223), 'numpy.shape', 'np.shape', (['labels'], {}), '(labels)\n', (7215, 7223), True, 'import numpy as np\n'), ((7409, 7436), 'numpy.nansum', 'np.nansum', (['(self.weights * A)'], {}), '(self.weights * A)\n', (7418, 7436), True, 'import numpy as np\n'), ((7647, 7674), 'numpy.nansum', 'np.nansum', (['(self.weights * A)'], {}), '(self.weights * A)\n', (7656, 7674), True, 'import numpy as np\n'), ((7783, 7837), 'numpy.zeros', 'np.zeros', (['(num_recordings, num_classes)'], {'dtype': 'np.bool'}), '((num_recordings, num_classes), dtype=np.bool)\n', (7791, 7837), True, 'import numpy as np\n'), ((7978, 8005), 'numpy.nansum', 'np.nansum', (['(self.weights * A)'], {}), '(self.weights * A)\n', (7987, 8005), True, 'import numpy as np\n'), ((8353, 8392), 'numpy.zeros', 'np.zeros', (['(num_recordings, num_classes)'], {}), '((num_recordings, num_classes))\n', (8361, 8392), True, 'import numpy as np\n'), ((9996, 10041), 'data_loader.util.load_challenge_data', 'load_challenge_data', (['label_files[i]', 'data_dir'], {}), '(label_files[i], data_dir)\n', (10015, 10041), False, 'from data_loader.util import load_challenge_data\n'), ((1303, 1327), 'numpy.ix_', 'np.ix_', (['indices', 'indices'], {}), '(indices, indices)\n', (1309, 1327), True, 'import numpy as np\n'), ((1847, 1884), 'numpy.all', 'np.all', (['(labels[i, :] == outputs[i, :])'], {}), '(labels[i, :] == outputs[i, :])\n', (1853, 1884), True, 'import numpy as np\n'), ((2493, 2522), 'numpy.zeros', 'np.zeros', (['(num_classes, 2, 2)'], {}), '((num_classes, 2, 2))\n', (2501, 2522), True, 'import numpy as np\n'), ((3240, 3269), 'numpy.zeros', 'np.zeros', (['(num_classes, 2, 2)'], {}), '((num_classes, 2, 2))\n', (3248, 3269), True, 'import numpy as np\n'), ((10060, 10079), 'numpy.isnan', 'np.isnan', (['recording'], {}), '(recording)\n', (10068, 10079), True, 'import numpy as np\n'), ((3356, 3376), 'numpy.sum', 'np.sum', (['labels[i, :]'], {}), '(labels[i, :])\n', (3362, 3376), True, 'import numpy as np\n'), ((6520, 6565), 'numpy.any', 'np.any', (['(labels[i, :], outputs[i, :])'], {'axis': '(0)'}), '((labels[i, :], outputs[i, :]), axis=0)\n', (6526, 6565), True, 'import numpy as np\n')] |
# coding=utf8
import numpy as np
class LabelSpreading:
def __init__(self, alpha=0.2, max_iter=30, tol=1e-3):
"""
:param alpha: clamping factor between (0,1)
:param max_iter: maximum number of iterations
:param tol: convergence tolerance
"""
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.dist = None
def fit(self, w, y):
"""
fit label spreading algorithm
:param w: similarity matrix of n x n shape with n samples
:param y: labels of n x c shape with c labels, where 1
denotes label of x_i or 0 otherwise. Unlabeled samples
have labels set to 0.
"""
if type(w) != np.ndarray or type(y) != np.ndarray or len(w) != len(y):
raise Exception("w and y should be numpy array with equal length")
if 0 > self.alpha > 1 or self.max_iter < 0 or self.tol < 0:
raise Exception("Parameters are set incorrectly")
# construct the matrix S
d = np.sum(w, axis=1)
d[d == 0] = 1
np.power(d, -1 / 2., d)
d = np.diag(d)
s = np.dot(np.dot(d, w), d)
# Iterate F(t+1) until convergence
cur_iter = 0
err = self.tol
f0 = y
f1 = None
while cur_iter < self.max_iter and err >= self.tol:
f1 = self.alpha * np.dot(s, f0) + (1 - self.alpha) * y
err = np.max(np.abs(f1 - f0))
f0 = f1
cur_iter += 1
self.dist = f1 # set distributions
return self
def predict(self, y):
"""
use model to create predictions
:param y: labels of n x c shape with c labels, where 1
denotes label of x_i or 0 otherwise. Unlabeled samples
have labels set to 0.
:return: list with predictions
"""
if not np.any(y):
raise Exception("Please fit model first")
if type(y) != np.ndarray:
raise Exception("y should be numpy array")
predictions = []
for i, labels in enumerate(y):
index = np.where(labels == 1)[0]
if len(index) == 1:
# was labeled before
predictions.append(index[0])
else:
# use label with highest score
predictions.append(np.argmax(self.dist[i]))
return predictions
| [
"numpy.abs",
"numpy.power",
"numpy.where",
"numpy.argmax",
"numpy.any",
"numpy.diag",
"numpy.sum",
"numpy.dot"
] | [((1043, 1060), 'numpy.sum', 'np.sum', (['w'], {'axis': '(1)'}), '(w, axis=1)\n', (1049, 1060), True, 'import numpy as np\n'), ((1091, 1115), 'numpy.power', 'np.power', (['d', '(-1 / 2.0)', 'd'], {}), '(d, -1 / 2.0, d)\n', (1099, 1115), True, 'import numpy as np\n'), ((1127, 1137), 'numpy.diag', 'np.diag', (['d'], {}), '(d)\n', (1134, 1137), True, 'import numpy as np\n'), ((1157, 1169), 'numpy.dot', 'np.dot', (['d', 'w'], {}), '(d, w)\n', (1163, 1169), True, 'import numpy as np\n'), ((1878, 1887), 'numpy.any', 'np.any', (['y'], {}), '(y)\n', (1884, 1887), True, 'import numpy as np\n'), ((1447, 1462), 'numpy.abs', 'np.abs', (['(f1 - f0)'], {}), '(f1 - f0)\n', (1453, 1462), True, 'import numpy as np\n'), ((2116, 2137), 'numpy.where', 'np.where', (['(labels == 1)'], {}), '(labels == 1)\n', (2124, 2137), True, 'import numpy as np\n'), ((1385, 1398), 'numpy.dot', 'np.dot', (['s', 'f0'], {}), '(s, f0)\n', (1391, 1398), True, 'import numpy as np\n'), ((2355, 2378), 'numpy.argmax', 'np.argmax', (['self.dist[i]'], {}), '(self.dist[i])\n', (2364, 2378), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from matplotlib import pyplot as P
import numpy as N
from load import ROOT as R
import gna.constructors as C
import pytest
@pytest.mark.parametrize('edges', [N.linspace(0.0, 10.0, 11), N.geomspace(0.1, 1000.0, 5)])
def test_histedges_v01(edges):
centers = 0.5*(edges[1:]+edges[:-1])
widths = edges[1:]-edges[:-1]
data = N.arange(edges.size-1)
hist = C.Histogram(edges, data)
h2e = R.HistEdges()
h2e.histedges.hist(hist.hist.hist)
out_edges = h2e.histedges.edges.data()
out_centers = h2e.histedges.centers.data()
out_widths = h2e.histedges.widths.data()
print( 'Input:' )
print( edges )
print( 'Output:' )
print( 'Edges', out_edges )
print( 'Centers', out_centers )
print( 'Widths', out_widths )
assert (edges==out_edges).all()
assert (centers==out_centers).all()
assert (widths==out_widths).all()
if __name__ == "__main__":
test_histedges()
| [
"gna.constructors.Histogram",
"numpy.geomspace",
"load.ROOT.HistEdges",
"numpy.linspace",
"numpy.arange"
] | [((363, 387), 'numpy.arange', 'N.arange', (['(edges.size - 1)'], {}), '(edges.size - 1)\n', (371, 387), True, 'import numpy as N\n'), ((398, 422), 'gna.constructors.Histogram', 'C.Histogram', (['edges', 'data'], {}), '(edges, data)\n', (409, 422), True, 'import gna.constructors as C\n'), ((433, 446), 'load.ROOT.HistEdges', 'R.HistEdges', ([], {}), '()\n', (444, 446), True, 'from load import ROOT as R\n'), ((182, 207), 'numpy.linspace', 'N.linspace', (['(0.0)', '(10.0)', '(11)'], {}), '(0.0, 10.0, 11)\n', (192, 207), True, 'import numpy as N\n'), ((209, 236), 'numpy.geomspace', 'N.geomspace', (['(0.1)', '(1000.0)', '(5)'], {}), '(0.1, 1000.0, 5)\n', (220, 236), True, 'import numpy as N\n')] |
# 15/05/2020, <NAME>, Edinburgh
# Tidying up codes that plot rho/Paulin-Henriksson stats
# by having some of the functions in here.
import numpy as np
from scipy.stats import binned_statistic_2d
from astropy.io import fits
import time
import glob
def interpolate2D(X, Y, grid): #(It's linear)
# This function does simple 2D linear interpolation to find values of 'grid' at positions (X,Y)
# This is faster over, eg, scipy and numpy functions, which have to be performed in a loop.
Xi = X.astype(np.int)
Yi = Y.astype(np.int) # these round down to integer
VAL_XYlo = grid[Yi, Xi] + (X - Xi)*( grid[Yi, Xi+1] - grid[Yi, Xi] )
VAL_XYhi = grid[Yi+1,Xi] + (X - Xi)*( grid[Yi+1,Xi+1] - grid[Yi+1, Xi] )
VAL_XY = VAL_XYlo + (Y - Yi)*( VAL_XYhi - VAL_XYlo )
return VAL_XY
def Select_Patch(Q, ra, dec, rlo, rhi, dlo, dhi):
# return the elements in Q corresponding to INSIDE the (ra,dec) range
idx_ra = np.where(np.logical_and(ra<rhi, ra>rlo))[0]
idx_dec = np.where(np.logical_and(dec<dhi, dec>dlo))[0]
idx = np.intersect1d(idx_ra, idx_dec)
return Q[idx]
def MeanQ_VS_XY(Q, w, X,Y,num_XY_bins):
# we want the weighted mean of Q.
# Calculate the sum 2D binned value of Q*w and w, and then divide
sumQw_grid, yedges, xedges, binnum = binned_statistic_2d(Y, X, Q*w, statistic='sum', bins=num_XY_bins)
sum_w_grid, yedges, xedges, binnum = binned_statistic_2d(Y, X, w, statistic='sum', bins=num_XY_bins)
AvQ_grid=sumQw_grid/sum_w_grid
return AvQ_grid,sum_w_grid,yedges,xedges
def Bootstrap_Error(nboot, samples, weights):
N = len(samples)
bt_samples = np.zeros(nboot) # Will store mean of nboot resamples
for i in range(nboot):
idx = np.random.randint(0,N,N) # Picks N random indicies with replacement
bt_samples[i] = np.sum( weights[idx]*samples[idx] ) /np.sum (weights[idx])
return np.std( bt_samples )
def Read_GalData(NorS):
# Use the Master catalogue:
data_DIR = '/disk09/KIDS/KIDSCOLLAB_V1.0.0/K1000_CATALOGUES_PATCH/'
f = fits.open('%s/K1000_%s_V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_v2_goldclasses_THELI_INT.cat' %(data_DIR,NorS))
RA = f[1].data['ALPHA_J2000']
Dec= f[1].data['DELTA_J2000']
Z = f[1].data['Z_B']
#Calculated the integrals with wolfram
# - turns out we need a factor of 4 to make T_g and T_PSF equivalent
#https://docs.google.com/document/d/1kylhRNInqzofQtZDiLrH-GH5RwhEBB04OZ0HW8pRZOo/edit?usp=sharing
T_gal = 4*f[1].data['bias_corrected_scalelength_pixels']**2
T_PSF = f[1].data['PSF_Q11'] + f[1].data['PSF_Q22'] # The PSF size at the location of the galaxy
weight= f[1].data['recal_weight_A']*f[1].data['Flag_SOM_Fid_A'] #Include SOM Flag in the weight
return RA, Dec, T_gal, T_PSF, weight, Z
def Interp_deltaT(RA_g, Dec_g, RA_p, Dec_p, delta_TPSF):
# This function interpolates delta_TPSF to the galaxy positions
# To grid up the survey, decide on a suitable angular size a pixel should be:
# Use the dimensions of the PSF data (_p), not gal data (_g),
# as the PSF data spans wider (RA,Dec)
ang_pxl = 5. / 60. # 5 arcmin in degrees
nbins_x = int( ( RA_p.max()-RA_p.min() ) / ang_pxl )
nbins_y = int( ( Dec_p.max()-Dec_p.min() ) / ang_pxl )
# pixel coordinates of the PSF objects
X_p = nbins_x * (RA_p - RA_p.min()) / (RA_p.max()-RA_p.min())
Y_p = nbins_y * (Dec_p - Dec_p.min()) / (Dec_p.max()-Dec_p.min())
# Turn delta_TPSF into a grid:
delta_TPSF_grid,count_grid, _,_ = MeanQ_VS_XY(delta_TPSF, np.ones_like(delta_TPSF),
X_p,Y_p, [nbins_y,nbins_x])
delta_TPSF_grid = np.nan_to_num( delta_TPSF_grid, nan=0. ) # Lots of nans due to 0/0
# Need to append TPSF_grid with final row and column to avoid interp error
delta_TPSF_grid = np.c_[ delta_TPSF_grid, delta_TPSF_grid[:,-1] ]
delta_TPSF_grid = np.r_[ delta_TPSF_grid, [delta_TPSF_grid[-1,:]] ]
# pixel coordinates of galaxies
X_g = nbins_x * (RA_g - RA_p.min()) / (RA_p.max()-RA_p.min())
Y_g = nbins_y * (Dec_g - Dec_p.min()) / (Dec_p.max()-Dec_p.min())
# Finally get delta_T_PSF at the position of the galaxies
delta_TPSF_g = interpolate2D(X_g, Y_g, delta_TPSF_grid)
return delta_TPSF_g
def Calc_Important_Tquantities(LFver,zbounds, nboot):
# This functions calculates:
# < deltaT_PSF / T_gal > & SIGMA[ deltaT_PSF / T_gal ]
# < T_gal^-2 > & SIGMA[ T_gal^-2 ]
num_zbins = len(zbounds)-1
num_zbins_tot = np.sum( range(num_zbins+1) ) # Includes cross bins
# Read in N and S PSF data catalogues
# '_p' means at position of objects used for PSF modelling
bgdir='/home/bengib/KiDS1000_NullTests/Codes_4_KiDSTeam_Eyes/PSF_systests'
RA_Np, Dec_Np, _,_,_,_, TPSF_Np, delta_TPSF_Np, _,_ = np.load('%s/LFver%s/Catalogues/PSF_Data_N.npy'%(bgdir,LFver)).transpose()
RA_Sp, Dec_Sp, _,_,_,_, TPSF_Sp, delta_TPSF_Sp, _,_ = np.load('%s/LFver%s/Catalogues/PSF_Data_S.npy'%(bgdir,LFver)).transpose()
# Read in N and S galaxy data catalogues
RA_Ng, Dec_Ng, T_Ng, T_PSF_Ng, weight_Ng, z_Ng = Read_GalData('N')
RA_Sg, Dec_Sg, T_Sg, T_PSF_Sg, weight_Sg, z_Sg = Read_GalData('S')
# The fact that the S RA data crosses zero causes issues interpolating onto a grid.
# So shift rescale all S RA's in [300,360] to be negative, making the field continuous.
RA_Sg[ ((RA_Sg<360) & (RA_Sg>300)) ] += -360.
RA_Sp[ ((RA_Sp<360) & (RA_Sp>300)) ] += -360.
# Now scroll through the z-bins, calculating the T-quantities in each.
deltaT_ratio = np.zeros([ 2, num_zbins ]) # 1st-row = mean, 2nd-row = error
Tg_invsq = np.zeros_like( deltaT_ratio ) # Same
for i in range(num_zbins):
t1 = time.time()
print('On tomo bin %s-%s' %(zbounds[i],zbounds[i+1]))
# Redshift cut:
idx_N = ( (z_Ng>zbounds[i]) & (z_Ng<zbounds[i+1]) )
idx_S = ( (z_Sg>zbounds[i]) & (z_Sg<zbounds[i+1]) )
delta_TPSF_Ng = Interp_deltaT(RA_Ng[idx_N], Dec_Ng[idx_N], RA_Np, Dec_Np, delta_TPSF_Np)
delta_TPSF_Sg = Interp_deltaT(RA_Sg[idx_S], Dec_Sg[idx_S], RA_Sp, Dec_Sp, delta_TPSF_Sp)
# 1. < deltaT_PSF / T_gal > & SIGMA[ deltaT_PSF / T_gal ]
# NOTE: T_g is on the DENOMINATOR here, so the OPTIMAL weight is not weight_g,
# but thats what we're using.
T_g = np.append(T_Ng[idx_N], T_Sg[idx_S])
dT_p = np.append(delta_TPSF_Ng, delta_TPSF_Sg)
weight_g = np.append(weight_Ng[idx_N], weight_Sg[idx_S])
weight_q1 = weight_g #(T_g**2 * weight_g ) / dT_p
#weight_q1 = np.nan_to_num( weight_q1, nan=0., posinf=0. ) # get rid of nan/inf weights from where dT_p=0
# ^ that weight computed with error propagation:
# if y=a/x, sigma_y^2=(-a/x^2)sigma_x^2, sigma_(x/y)^2=1/weight_(x/y)
# then x=T_g, a=dT_p, y=dT_p/T_g
deltaT_ratio[0,i] = np.average( dT_p/T_g, weights=weight_q1 )
# This is a weighted mean, so lets use a bootstrap estimate for the error on the mean
print("Bootstrapping deltaT-ratio with nboot=%s" %nboot)
deltaT_ratio[1,i] = Bootstrap_Error(nboot, dT_p/T_g, weights=weight_q1)
# 2. < T_gal^-2 > & SIGMA[ T_gal^-2 ]
weight_q2 = weight_g #weight_g #T_g**2 * weight_g
# ^ computed with error prop: y=1/x, sigma_y^2=(dy/dx)sigma_x^2, sigma_x^2=1/weight_x
# with x=T_g, weight_x=weight_g
Tg_inv = np.average( 1./T_g, weights=weight_q2 )
Tg_invsq[0,i] = Tg_inv**2
print("Bootstrapping Tgal_invsq with nboot=%s" %nboot)
Tg_inverr = Bootstrap_Error(nboot, 1/T_g, weights=weight_q2 )
# Need to convert ^this error on 1/T_g to an error on 1/T_g^2
# do error propagation again:
# z=y^2, sigma_z=2y*sigma_y^2
Tg_invsq[1,i] = np.sqrt(2 * Tg_inv) * Tg_inverr
t2 = time.time()
print('For tomo bin %s-%s, got the following T-quantities (took %0.f s)' %(zbounds[i],zbounds[i+1],(t2-t1)) )
print ('%8.3e,%8.3e,%8.3e,%8.3e'%(deltaT_ratio[0,i], deltaT_ratio[1,i], Tg_invsq[0,i], Tg_invsq[1,i]))
# For the cross-bins, for now just average the T-quantities in the individual bins
deltaT_ratio_tot = np.zeros([ 2, num_zbins_tot ]) # 1st-row = mean, 2nd-row = error
Tg_invsq_tot = np.zeros_like( deltaT_ratio_tot ) # Same
k=0
for i in range(num_zbins):
for j in range(num_zbins):
if j>= i:
deltaT_ratio_tot[0,k] = (deltaT_ratio[0,i]+deltaT_ratio[0,j])/2
deltaT_ratio_tot[1,k] = np.sqrt( deltaT_ratio[1,i]**2 + deltaT_ratio[1,j]**2 )
Tg_invsq_tot[0,k] = (Tg_invsq[0,i] + Tg_invsq[0,j])/2
Tg_invsq_tot[1,k] = np.sqrt(Tg_invsq[0,i]**2 + Tg_invsq[0,j]**2)
k+=1
return deltaT_ratio, Tg_invsq, deltaT_ratio_tot, Tg_invsq_tot
def Read_rho_Or_PH(LFver, keyword, ThBins, Res):
# This either reads in and returns the rho stats or the Paulin-Henriksson terms
# depending on what keyword is set to.
# Note the unfortunate variable naming here 'ph' (Paulin-Henriksson)
# even in the case of rho stats being read in.
if keyword == 'rho':
print("Reading in the rho stats")
num_stat = 5
DIR=['rho1','rho2','rho3','rho4','rho5']
stat='rho'
else:
print("Reading in the Paulin-Henriksson terms")
num_stat = 3
DIR=['PHterms','PHterms','PHterms','PHterms','PHterms']
stat='ph'
NFiles = []
numN = []
SFiles = []
numS = []
Plot_Labels = []
for lfv in range(len(LFver)):
NFiles.append( glob.glob('LFver%s/%s/%s1_KiDS_N_*of%sx%s.dat'%(LFver[lfv],DIR[0],stat,Res,Res)) )
numN.append(len( NFiles[lfv] ))
SFiles.append( glob.glob('LFver%s/%s/%s1_KiDS_S_*of%sx%s.dat'%(LFver[lfv],DIR[0],stat,Res,Res)) )
numS.append(len( SFiles[lfv] ))
if LFver[lfv] == "309b":
Plot_Labels.append(" 3:1")
elif LFver[lfv] == "319":
Plot_Labels.append(LFver[lfv] + " 3:1")
elif LFver[lfv] == "319b":
Plot_Labels.append(" 4:1")
elif LFver[lfv] == "319c":
Plot_Labels.append(" 3:2")
elif LFver[lfv] == "319d":
Plot_Labels.append(" 5:1")
elif LFver[lfv] == "321":
Plot_Labels.append("New 4:1")
php_mean = np.zeros([len(LFver),num_stat,ThBins])
php_err = np.zeros_like(php_mean)
# Fix theta to the values saved for the very first statistic:
theta = np.loadtxt('LFver%s/%s/%s%s_KiDS_N.dat'%(LFver[0],DIR[0],stat,1), usecols=(0,), unpack=True)
for lfv in range(len(LFver)):
php_split = np.zeros([ numN[lfv]+numS[lfv], 5, ThBins ])
for i in range(num_stat):
try:
tmp_theta, phpN = np.loadtxt('LFver%s/%s/%s%s_KiDS_N.dat'%(LFver[lfv],DIR[i],stat,i+1), usecols=(0,1), unpack=True)
# If the above exists, try to read in the weight (only saved this for LFver321 of the rho stats)
try:
weightN = np.loadtxt('LFver%s/%s/%s%s_KiDS_N.dat'%(LFver[lfv],DIR[i],stat,i+1), usecols=(3,), unpack=True)
except IndexError:
weightN = 1.
except IOError:
weightN = np.ones(len(ThBins))
phpN = np.zeros(len(ThBins))
# Interp to the fixed_theta scale
phpN = np.interp( theta, tmp_theta, phpN )
# keep the weights as they are - don't interp.
try:
tmp_theta, phpS = np.loadtxt('LFver%s/%s/%s%s_KiDS_S.dat'%(LFver[lfv],DIR[i],stat,i+1), usecols=(0,1), unpack=True)
# If the above exists, try to read in the weight (only saved this for LFver321 of the rho stats)
try:
weightS = np.loadtxt('LFver%s/%s/%s%s_KiDS_S.dat'%(LFver[lfv],DIR[i],stat,i+1), usecols=(3,), unpack=True)
except IndexError:
weightS = 1.
except IOError:
weightS = np.ones(len(ThBins))
phpS = np.zeros(len(ThBins))
# Interp to the fixed_theta scale
phpS = np.interp( theta, tmp_theta, phpS )
# keep the weights as they are - don't interp.
php_mean[lfv,i,:] = (weightN*phpN + weightS*phpS) / (weightN+weightS)
for j in range(numN[lfv]):
tmp_theta, tmp_php_splitN = np.loadtxt(NFiles[lfv][j], usecols=(0,1), unpack=True)
php_split[j,i,:] = np.interp( theta, tmp_theta, tmp_php_splitN )
for j in range(numS[lfv]):
tmp_theta, tmp_php_splitS = np.loadtxt(SFiles[lfv][j], usecols=(0,1), unpack=True)
php_split[numN[lfv]+j,i,:] = np.interp( theta, tmp_theta, tmp_php_splitS )
php_err[lfv,i,:] = np.sqrt( np.diag( np.cov(php_split[:,i,:], rowvar = False) ) / (numN[lfv]+numS[lfv]) )
return Plot_Labels, theta, php_mean, php_err
def Read_alpha_per_bin(LFver):
# This functions reads in the alpha (PSF leakage) values for the 5 tomo bins
# and avg's to estimate values for the cross-bins,
num_zbins = 5
num_zbins_tot = np.sum( range(num_zbins+1) ) # Number source bins including cross-bins
alpha = np.zeros([ len(LFver), num_zbins_tot ])
alpha_err = np.zeros([ len(LFver), num_zbins_tot ])
for lfv in range(len(LFver)):
if LFver[lfv] == "321":
tmp = "glab_%s"%LFver[lfv]
elif LFver[lfv] == "309c":
tmp = "svn_%s"%LFver[lfv]
else:
print("Currently only have saved alpha values for 2 LF versions: 321 and 309c. EXITING")
sys.exit()
tmp_a1, tmp_1_err, tmp_a2, tmp_a2_err = np.loadtxt('KAll.autocal.BlindA.alpha_VS_ZB.ZBcut0.1-1.2_LF_%s_2Dbins.dat'%tmp, usecols=(1,2,3,4), unpack=True)
tmp_a = (tmp_a1 + tmp_a2) / 2. # Just taking the avg of the alpha per ellipticity component
# Also calculate the alphas in the cross-bins as
# the combination of values in the auto-bins
tmp_a_err = 0.5 * np.sqrt( tmp_1_err**2 + tmp_a2_err**2 )
k=0
for i in range(num_zbins):
for j in range(num_zbins):
if j>= i:
alpha[lfv,k] = ( tmp_a[i]+tmp_a[j] )/2.
# The error propagation formula HAS to change if j==i
# since the differentiation would take a different form in this case.
if i==j:
alpha_err[lfv,k] = tmp_a_err[j]
else:
alpha_err[lfv,k] = 0.5* np.sqrt( tmp_a_err[j]**2 + tmp_a_err[i]**2 )
#print("%s : %s %s : %s" %(k, tmp_a1[i], tmp_a1[j], alpha[lfv,k]) )
k+=1
return alpha, alpha_err
def Read_In_Theory_Vector(hi_lo_fid):
num_zbins_tot = 15
# This function returns the theoretical xi+ predictions with either a fiducial, high or low S8 value
# hi_lo_fid must be one of 'high', 'low' or 'fid'
indir_theory = '/home/bengib/KiDS1000_NullTests/Cat_to_Obs_K1000_P1/Predictions/kcap_xi/outputs/test_output_S8_%s_test/chain/output_test_A/shear_xi_plus_binned/' %hi_lo_fid
theta_theory = np.loadtxt('%s/theta_bin_1_1.txt' %indir_theory)
xip_theory_stack = np.zeros( [num_zbins_tot,len(theta_theory)] )
# ^This will store all auto & cross xi_p for the 5 tomo bins
idx = 0
for i in range(1,6):
for j in range(1,6):
if i >= j: # Only read in bins 1-1, 2-1, 2-2, 3-1, 3-2,...
tmp_theta_theory = np.loadtxt('%s/theta_bin_%s_%s.txt' %(indir_theory,i,j))
tmp_xip_theory = np.loadtxt('%s/bin_%s_%s.txt' %(indir_theory,i,j))
xip_theory_stack[idx,:] = np.interp( theta_theory, tmp_theta_theory, tmp_xip_theory )
# pretty sure theta_theory is the same for every tomo bin combination
# but just in case, we interpolate everything to be sampled at the values for the 1_1 bin.
idx+=1
xip_theory_stack = xip_theory_stack.flatten()
return theta_theory, xip_theory_stack
| [
"numpy.sqrt",
"astropy.io.fits.open",
"numpy.cov",
"scipy.stats.binned_statistic_2d",
"glob.glob",
"numpy.average",
"numpy.interp",
"numpy.std",
"time.time",
"numpy.intersect1d",
"numpy.ones_like",
"numpy.logical_and",
"numpy.append",
"numpy.sum",
"numpy.zeros",
"numpy.random.randint",... | [((1190, 1221), 'numpy.intersect1d', 'np.intersect1d', (['idx_ra', 'idx_dec'], {}), '(idx_ra, idx_dec)\n', (1204, 1221), True, 'import numpy as np\n'), ((1431, 1498), 'scipy.stats.binned_statistic_2d', 'binned_statistic_2d', (['Y', 'X', '(Q * w)'], {'statistic': '"""sum"""', 'bins': 'num_XY_bins'}), "(Y, X, Q * w, statistic='sum', bins=num_XY_bins)\n", (1450, 1498), False, 'from scipy.stats import binned_statistic_2d\n'), ((1538, 1601), 'scipy.stats.binned_statistic_2d', 'binned_statistic_2d', (['Y', 'X', 'w'], {'statistic': '"""sum"""', 'bins': 'num_XY_bins'}), "(Y, X, w, statistic='sum', bins=num_XY_bins)\n", (1557, 1601), False, 'from scipy.stats import binned_statistic_2d\n'), ((1767, 1782), 'numpy.zeros', 'np.zeros', (['nboot'], {}), '(nboot)\n', (1775, 1782), True, 'import numpy as np\n'), ((2093, 2111), 'numpy.std', 'np.std', (['bt_samples'], {}), '(bt_samples)\n', (2099, 2111), True, 'import numpy as np\n'), ((2326, 2461), 'astropy.io.fits.open', 'fits.open', (["('%s/K1000_%s_V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_v2_goldclasses_THELI_INT.cat'\n % (data_DIR, NorS))"], {}), "(\n '%s/K1000_%s_V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_v2_goldclasses_THELI_INT.cat'\n % (data_DIR, NorS))\n", (2335, 2461), False, 'from astropy.io import fits\n'), ((4507, 4546), 'numpy.nan_to_num', 'np.nan_to_num', (['delta_TPSF_grid'], {'nan': '(0.0)'}), '(delta_TPSF_grid, nan=0.0)\n', (4520, 4546), True, 'import numpy as np\n'), ((6642, 6666), 'numpy.zeros', 'np.zeros', (['[2, num_zbins]'], {}), '([2, num_zbins])\n', (6650, 6666), True, 'import numpy as np\n'), ((6723, 6750), 'numpy.zeros_like', 'np.zeros_like', (['deltaT_ratio'], {}), '(deltaT_ratio)\n', (6736, 6750), True, 'import numpy as np\n'), ((9283, 9311), 'numpy.zeros', 'np.zeros', (['[2, num_zbins_tot]'], {}), '([2, num_zbins_tot])\n', (9291, 9311), True, 'import numpy as np\n'), ((9372, 9403), 'numpy.zeros_like', 'np.zeros_like', (['deltaT_ratio_tot'], {}), '(deltaT_ratio_tot)\n', (9385, 9403), True, 'import numpy as np\n'), ((11524, 11547), 'numpy.zeros_like', 'np.zeros_like', (['php_mean'], {}), '(php_mean)\n', (11537, 11547), True, 'import numpy as np\n'), ((11626, 11727), 'numpy.loadtxt', 'np.loadtxt', (["('LFver%s/%s/%s%s_KiDS_N.dat' % (LFver[0], DIR[0], stat, 1))"], {'usecols': '(0,)', 'unpack': '(True)'}), "('LFver%s/%s/%s%s_KiDS_N.dat' % (LFver[0], DIR[0], stat, 1),\n usecols=(0,), unpack=True)\n", (11636, 11727), True, 'import numpy as np\n'), ((16484, 16533), 'numpy.loadtxt', 'np.loadtxt', (["('%s/theta_bin_1_1.txt' % indir_theory)"], {}), "('%s/theta_bin_1_1.txt' % indir_theory)\n", (16494, 16533), True, 'import numpy as np\n'), ((1902, 1928), 'numpy.random.randint', 'np.random.randint', (['(0)', 'N', 'N'], {}), '(0, N, N)\n', (1919, 1928), True, 'import numpy as np\n'), ((4381, 4405), 'numpy.ones_like', 'np.ones_like', (['delta_TPSF'], {}), '(delta_TPSF)\n', (4393, 4405), True, 'import numpy as np\n'), ((6810, 6821), 'time.time', 'time.time', ([], {}), '()\n', (6819, 6821), False, 'import time\n'), ((7428, 7463), 'numpy.append', 'np.append', (['T_Ng[idx_N]', 'T_Sg[idx_S]'], {}), '(T_Ng[idx_N], T_Sg[idx_S])\n', (7437, 7463), True, 'import numpy as np\n'), ((7479, 7518), 'numpy.append', 'np.append', (['delta_TPSF_Ng', 'delta_TPSF_Sg'], {}), '(delta_TPSF_Ng, delta_TPSF_Sg)\n', (7488, 7518), True, 'import numpy as np\n'), ((7538, 7583), 'numpy.append', 'np.append', (['weight_Ng[idx_N]', 'weight_Sg[idx_S]'], {}), '(weight_Ng[idx_N], weight_Sg[idx_S])\n', (7547, 7583), True, 'import numpy as np\n'), ((7969, 8010), 'numpy.average', 'np.average', (['(dT_p / T_g)'], {'weights': 'weight_q1'}), '(dT_p / T_g, weights=weight_q1)\n', (7979, 8010), True, 'import numpy as np\n'), ((8508, 8548), 'numpy.average', 'np.average', (['(1.0 / T_g)'], {'weights': 'weight_q2'}), '(1.0 / T_g, weights=weight_q2)\n', (8518, 8548), True, 'import numpy as np\n'), ((8931, 8942), 'time.time', 'time.time', ([], {}), '()\n', (8940, 8942), False, 'import time\n'), ((11779, 11823), 'numpy.zeros', 'np.zeros', (['[numN[lfv] + numS[lfv], 5, ThBins]'], {}), '([numN[lfv] + numS[lfv], 5, ThBins])\n', (11787, 11823), True, 'import numpy as np\n'), ((14886, 15006), 'numpy.loadtxt', 'np.loadtxt', (["('KAll.autocal.BlindA.alpha_VS_ZB.ZBcut0.1-1.2_LF_%s_2Dbins.dat' % tmp)"], {'usecols': '(1, 2, 3, 4)', 'unpack': '(True)'}), "('KAll.autocal.BlindA.alpha_VS_ZB.ZBcut0.1-1.2_LF_%s_2Dbins.dat' %\n tmp, usecols=(1, 2, 3, 4), unpack=True)\n", (14896, 15006), True, 'import numpy as np\n'), ((1085, 1119), 'numpy.logical_and', 'np.logical_and', (['(ra < rhi)', '(ra > rlo)'], {}), '(ra < rhi, ra > rlo)\n', (1099, 1119), True, 'import numpy as np\n'), ((1143, 1179), 'numpy.logical_and', 'np.logical_and', (['(dec < dhi)', '(dec > dlo)'], {}), '(dec < dhi, dec > dlo)\n', (1157, 1179), True, 'import numpy as np\n'), ((2023, 2058), 'numpy.sum', 'np.sum', (['(weights[idx] * samples[idx])'], {}), '(weights[idx] * samples[idx])\n', (2029, 2058), True, 'import numpy as np\n'), ((2060, 2080), 'numpy.sum', 'np.sum', (['weights[idx]'], {}), '(weights[idx])\n', (2066, 2080), True, 'import numpy as np\n'), ((5868, 5932), 'numpy.load', 'np.load', (["('%s/LFver%s/Catalogues/PSF_Data_N.npy' % (bgdir, LFver))"], {}), "('%s/LFver%s/Catalogues/PSF_Data_N.npy' % (bgdir, LFver))\n", (5875, 5932), True, 'import numpy as np\n'), ((6000, 6064), 'numpy.load', 'np.load', (["('%s/LFver%s/Catalogues/PSF_Data_S.npy' % (bgdir, LFver))"], {}), "('%s/LFver%s/Catalogues/PSF_Data_S.npy' % (bgdir, LFver))\n", (6007, 6064), True, 'import numpy as np\n'), ((8885, 8904), 'numpy.sqrt', 'np.sqrt', (['(2 * Tg_inv)'], {}), '(2 * Tg_inv)\n', (8892, 8904), True, 'import numpy as np\n'), ((10721, 10811), 'glob.glob', 'glob.glob', (["('LFver%s/%s/%s1_KiDS_N_*of%sx%s.dat' % (LFver[lfv], DIR[0], stat, Res, Res))"], {}), "('LFver%s/%s/%s1_KiDS_N_*of%sx%s.dat' % (LFver[lfv], DIR[0], stat,\n Res, Res))\n", (10730, 10811), False, 'import glob\n'), ((10867, 10957), 'glob.glob', 'glob.glob', (["('LFver%s/%s/%s1_KiDS_S_*of%sx%s.dat' % (LFver[lfv], DIR[0], stat, Res, Res))"], {}), "('LFver%s/%s/%s1_KiDS_S_*of%sx%s.dat' % (LFver[lfv], DIR[0], stat,\n Res, Res))\n", (10876, 10957), False, 'import glob\n'), ((12523, 12556), 'numpy.interp', 'np.interp', (['theta', 'tmp_theta', 'phpN'], {}), '(theta, tmp_theta, phpN)\n', (12532, 12556), True, 'import numpy as np\n'), ((13299, 13332), 'numpy.interp', 'np.interp', (['theta', 'tmp_theta', 'phpS'], {}), '(theta, tmp_theta, phpS)\n', (13308, 13332), True, 'import numpy as np\n'), ((15305, 15346), 'numpy.sqrt', 'np.sqrt', (['(tmp_1_err ** 2 + tmp_a2_err ** 2)'], {}), '(tmp_1_err ** 2 + tmp_a2_err ** 2)\n', (15312, 15346), True, 'import numpy as np\n'), ((9639, 9697), 'numpy.sqrt', 'np.sqrt', (['(deltaT_ratio[1, i] ** 2 + deltaT_ratio[1, j] ** 2)'], {}), '(deltaT_ratio[1, i] ** 2 + deltaT_ratio[1, j] ** 2)\n', (9646, 9697), True, 'import numpy as np\n'), ((9800, 9850), 'numpy.sqrt', 'np.sqrt', (['(Tg_invsq[0, i] ** 2 + Tg_invsq[0, j] ** 2)'], {}), '(Tg_invsq[0, i] ** 2 + Tg_invsq[0, j] ** 2)\n', (9807, 9850), True, 'import numpy as np\n'), ((11910, 12019), 'numpy.loadtxt', 'np.loadtxt', (["('LFver%s/%s/%s%s_KiDS_N.dat' % (LFver[lfv], DIR[i], stat, i + 1))"], {'usecols': '(0, 1)', 'unpack': '(True)'}), "('LFver%s/%s/%s%s_KiDS_N.dat' % (LFver[lfv], DIR[i], stat, i + 1),\n usecols=(0, 1), unpack=True)\n", (11920, 12019), True, 'import numpy as np\n'), ((12686, 12795), 'numpy.loadtxt', 'np.loadtxt', (["('LFver%s/%s/%s%s_KiDS_S.dat' % (LFver[lfv], DIR[i], stat, i + 1))"], {'usecols': '(0, 1)', 'unpack': '(True)'}), "('LFver%s/%s/%s%s_KiDS_S.dat' % (LFver[lfv], DIR[i], stat, i + 1),\n usecols=(0, 1), unpack=True)\n", (12696, 12795), True, 'import numpy as np\n'), ((13577, 13632), 'numpy.loadtxt', 'np.loadtxt', (['NFiles[lfv][j]'], {'usecols': '(0, 1)', 'unpack': '(True)'}), '(NFiles[lfv][j], usecols=(0, 1), unpack=True)\n', (13587, 13632), True, 'import numpy as np\n'), ((13667, 13710), 'numpy.interp', 'np.interp', (['theta', 'tmp_theta', 'tmp_php_splitN'], {}), '(theta, tmp_theta, tmp_php_splitN)\n', (13676, 13710), True, 'import numpy as np\n'), ((13813, 13868), 'numpy.loadtxt', 'np.loadtxt', (['SFiles[lfv][j]'], {'usecols': '(0, 1)', 'unpack': '(True)'}), '(SFiles[lfv][j], usecols=(0, 1), unpack=True)\n', (13823, 13868), True, 'import numpy as np\n'), ((13913, 13956), 'numpy.interp', 'np.interp', (['theta', 'tmp_theta', 'tmp_php_splitS'], {}), '(theta, tmp_theta, tmp_php_splitS)\n', (13922, 13956), True, 'import numpy as np\n'), ((16975, 17034), 'numpy.loadtxt', 'np.loadtxt', (["('%s/theta_bin_%s_%s.txt' % (indir_theory, i, j))"], {}), "('%s/theta_bin_%s_%s.txt' % (indir_theory, i, j))\n", (16985, 17034), True, 'import numpy as np\n'), ((17065, 17118), 'numpy.loadtxt', 'np.loadtxt', (["('%s/bin_%s_%s.txt' % (indir_theory, i, j))"], {}), "('%s/bin_%s_%s.txt' % (indir_theory, i, j))\n", (17075, 17118), True, 'import numpy as np\n'), ((17158, 17215), 'numpy.interp', 'np.interp', (['theta_theory', 'tmp_theta_theory', 'tmp_xip_theory'], {}), '(theta_theory, tmp_theta_theory, tmp_xip_theory)\n', (17167, 17215), True, 'import numpy as np\n'), ((12172, 12279), 'numpy.loadtxt', 'np.loadtxt', (["('LFver%s/%s/%s%s_KiDS_N.dat' % (LFver[lfv], DIR[i], stat, i + 1))"], {'usecols': '(3,)', 'unpack': '(True)'}), "('LFver%s/%s/%s%s_KiDS_N.dat' % (LFver[lfv], DIR[i], stat, i + 1),\n usecols=(3,), unpack=True)\n", (12182, 12279), True, 'import numpy as np\n'), ((12948, 13055), 'numpy.loadtxt', 'np.loadtxt', (["('LFver%s/%s/%s%s_KiDS_S.dat' % (LFver[lfv], DIR[i], stat, i + 1))"], {'usecols': '(3,)', 'unpack': '(True)'}), "('LFver%s/%s/%s%s_KiDS_S.dat' % (LFver[lfv], DIR[i], stat, i + 1),\n usecols=(3,), unpack=True)\n", (12958, 13055), True, 'import numpy as np\n'), ((14009, 14049), 'numpy.cov', 'np.cov', (['php_split[:, i, :]'], {'rowvar': '(False)'}), '(php_split[:, i, :], rowvar=False)\n', (14015, 14049), True, 'import numpy as np\n'), ((15842, 15888), 'numpy.sqrt', 'np.sqrt', (['(tmp_a_err[j] ** 2 + tmp_a_err[i] ** 2)'], {}), '(tmp_a_err[j] ** 2 + tmp_a_err[i] ** 2)\n', (15849, 15888), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : basic_utils.py
# Author : <NAME>, <NAME>
# Email : <EMAIL>, <EMAIL>
# Date : 09.08.2019
# Last Modified Date: 15.08.2019
# Last Modified By : Chi Han, Jiayuan Mao
#
# This file is part of the VCML codebase
# Distributed under MIT license
#
# python basic utilities
import sys
import numpy as np
def dict_gather(dicts):
summon = {}
for d in dicts:
for key, value in d.items():
if key not in summon:
summon[key] = []
summon[key].append(value)
output = {
key: np.array(values)
for key, values in summon.items()
}
return output
def contains(x, elements):
for e in elements:
if e in x:
return True
return False
def only_contains(x, elements):
for y in x:
if y not in elements:
return False
return True
def belongs_to(stats, query):
for cat, items in stats.items():
if query in items:
return cat
return None
def intersection(*arg, as_set=False):
"""
Taking the intersection of multiple iterables.
"""
output = arg[0]
if as_set:
output = set(output)
else:
# as list
output = list(output)
for y in arg[1:]:
if as_set:
output = output.intersection(set(y))
else:
set_y = set(y)
output = [i for i in output if i in set_y]
return output
def union(*arg, as_set=False):
"""
Taking the union of multiple iterables
If the first input is set, or `as_set` is True, the output will be cast
to a set variable. Otherwise, the output will be a list instance
"""
output = arg[0]
if as_set:
output = set(output)
else:
# as list
output = list(output)
for y in arg[1:]:
if as_set:
output = output.union(set(y))
else:
set_output = set(output)
output = output + [i for i in y if i not in set_output]
return output
def sum_list(*arg):
output = arg[0]
for y in arg[1:]:
output = output + y
return output
def difference(x, y):
# only set or list supported
if isinstance(x, set):
return x.difference(set(y))
else:
set_y = set(y)
return [i for i in x if i not in set_y]
def get_size(obj, seen=None):
"""Recursively finds size of objects"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_size(v, seen) for v in obj.values()])
size += sum([get_size(k, seen) for k in obj.keys()])
elif hasattr(obj, '__dict__'):
size += get_size(obj.__dict__, seen)
elif hasattr(obj, '__iter__') and\
not isinstance(obj, (str, bytes, bytearray)):
size += sum([get_size(i, seen) for i in obj])
return size
class matrix_dict:
"""
A matrix-like dict class
Each query takes two keys
"""
def __init__(self, keys_x, keys_y, values):
self.keys_x = keys_x
self.keys_y = keys_y
self.values = values
self.build_dict()
self.assert_values()
def build_dict(self):
self.dict_x = dict(zip(self.keys_x, range(self.dim_x)))
self.dict_y = dict(zip(self.keys_y, range(self.dim_y)))
def assert_values(self):
values = self.values
assert isinstance(values, list) and len(values) == self.dim_x
for y_values in values:
assert isinstance(y_values, list)
assert len(y_values) == self.dim_y
@property
def dim_x(self):
return len(self.keys_x)
@property
def dim_y(self):
return len(self.keys_y)
def __getitem__(self, query):
query_x, query_y = query
output = self.values[
self.dict_x[query_x]
][
self.dict_y[query_y]
]
return output
| [
"numpy.array",
"sys.getsizeof"
] | [((2502, 2520), 'sys.getsizeof', 'sys.getsizeof', (['obj'], {}), '(obj)\n', (2515, 2520), False, 'import sys\n'), ((634, 650), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (642, 650), True, 'import numpy as np\n')] |
import gym
import pybulletgym
from gym import Wrapper,spaces
from torch.optim import Adam
from nn_builder.pytorch.NN import NN
import torch.nn.functional as F
import random
import numpy as np
from stable_baselines3 import SAC
import torch
from torch import nn
class DIAYN_Skill_Wrapper(Wrapper):
def __init__(self, env, num_skills):
Wrapper.__init__(self, env)
self.num_skills = num_skills
self.state_size = env.observation_space.shape[0]
self.hidden_size = 128
# discriminator負責state到skill的映射
self.discriminator = NN(input_dim = self.state_size,
layers_info = [self.hidden_size,self.hidden_size,self.num_skills],
hidden_activations = "relu",
output_activation = 'none',
)
self.discriminator_optimizer = Adam(self.discriminator.parameters())
# skill的概率分布為均勻分布
self.prior_probability_of_skill = 1.0 / self.num_skills
# 在原本的狀態維度多加一個維度代表skill
self.observation_space = spaces.Box(low=-np.inf, high=np.inf,shape=(env.observation_space.shape[0]+1,), dtype=np.float32)
def reset(self, **kwargs):
# 隨機採樣一個skill
observation = self.env.reset(**kwargs)
self.skill = random.randint(0, self.num_skills - 1)
return self.observation(observation)
def observation(self, observation):
# 狀態跟skill組合起來返回
return np.concatenate((np.array(observation), np.array([self.skill])))
def step(self, action):
# 這裡不使用原生reward
next_state, _, done, _ = self.env.step(action)
# 使用一種技巧計算reward
new_reward, discriminator_outputs = self.calculate_new_reward(next_state)
# discriminator 學習預測正確的skill
self.disciminator_learn(self.skill, discriminator_outputs)
return self.observation(next_state), new_reward, done, _
def calculate_new_reward(self, next_state):
# 取得disciminator輸出以及在正確skill上的數值
probability_correct_skill, disciminator_outputs = self.get_predicted_probability_of_skill(self.skill, next_state)
# 獎勵計算方式參考原始論文
new_reward = np.log(probability_correct_skill + 1e-8) - np.log(self.prior_probability_of_skill)
return new_reward, disciminator_outputs
def disciminator_learn(self, skill, discriminator_outputs):
# 計算disciminator輸出對上正確skill的交叉商
loss = nn.CrossEntropyLoss()(discriminator_outputs, torch.Tensor([skill]).long())
# 把梯度求出來
loss.backward()
# 更新disciminator
self.discriminator_optimizer.step()
# 梯度清空
self.discriminator_optimizer.zero_grad()
def get_predicted_probability_of_skill(self, skill, next_state):
# discriminator 根據next_state預測可能的skill
predicted_probabilities_unnormalised = self.discriminator(torch.Tensor(next_state).unsqueeze(0))
# 正確的skill的概率
probability_of_correct_skill = F.softmax(predicted_probabilities_unnormalised,dim=-1)[:, skill]
return probability_of_correct_skill.item(), predicted_probabilities_unnormalised
| [
"torch.nn.functional.softmax",
"torch.nn.CrossEntropyLoss",
"numpy.log",
"torch.Tensor",
"gym.spaces.Box",
"numpy.array",
"nn_builder.pytorch.NN.NN",
"gym.Wrapper.__init__",
"random.randint"
] | [((346, 373), 'gym.Wrapper.__init__', 'Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (362, 373), False, 'from gym import Wrapper, spaces\n'), ((577, 735), 'nn_builder.pytorch.NN.NN', 'NN', ([], {'input_dim': 'self.state_size', 'layers_info': '[self.hidden_size, self.hidden_size, self.num_skills]', 'hidden_activations': '"""relu"""', 'output_activation': '"""none"""'}), "(input_dim=self.state_size, layers_info=[self.hidden_size, self.\n hidden_size, self.num_skills], hidden_activations='relu',\n output_activation='none')\n", (579, 735), False, 'from nn_builder.pytorch.NN import NN\n'), ((1117, 1220), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(env.observation_space.shape[0] + 1,)', 'dtype': 'np.float32'}), '(low=-np.inf, high=np.inf, shape=(env.observation_space.shape[0] +\n 1,), dtype=np.float32)\n', (1127, 1220), False, 'from gym import Wrapper, spaces\n'), ((1336, 1374), 'random.randint', 'random.randint', (['(0)', '(self.num_skills - 1)'], {}), '(0, self.num_skills - 1)\n', (1350, 1374), False, 'import random\n'), ((2206, 2247), 'numpy.log', 'np.log', (['(probability_correct_skill + 1e-08)'], {}), '(probability_correct_skill + 1e-08)\n', (2212, 2247), True, 'import numpy as np\n'), ((2249, 2288), 'numpy.log', 'np.log', (['self.prior_probability_of_skill'], {}), '(self.prior_probability_of_skill)\n', (2255, 2288), True, 'import numpy as np\n'), ((2461, 2482), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2480, 2482), False, 'from torch import nn\n'), ((2997, 3052), 'torch.nn.functional.softmax', 'F.softmax', (['predicted_probabilities_unnormalised'], {'dim': '(-1)'}), '(predicted_probabilities_unnormalised, dim=-1)\n', (3006, 3052), True, 'import torch.nn.functional as F\n'), ((1517, 1538), 'numpy.array', 'np.array', (['observation'], {}), '(observation)\n', (1525, 1538), True, 'import numpy as np\n'), ((1540, 1562), 'numpy.array', 'np.array', (['[self.skill]'], {}), '([self.skill])\n', (1548, 1562), True, 'import numpy as np\n'), ((2506, 2527), 'torch.Tensor', 'torch.Tensor', (['[skill]'], {}), '([skill])\n', (2518, 2527), False, 'import torch\n'), ((2897, 2921), 'torch.Tensor', 'torch.Tensor', (['next_state'], {}), '(next_state)\n', (2909, 2921), False, 'import torch\n')] |
from unittest import TestCase
import unittest
from equadratures import *
import numpy as np
from copy import deepcopy
def model(x):
return x[0]**2 + x[1]**3 - x[0]*x[1]**2
class TestF(TestCase):
def test_tensor_grid_with_nans(self):
# Without Nans!
param = Parameter(distribution='uniform', lower=-1., upper=1., order=4)
basis = Basis('tensor-grid')
poly = Poly(parameters=[param, param], basis=basis, method='numerical-integration')
pts, wts = poly.get_points_and_weights()
model_evals = evaluate_model(pts, model)
poly.set_model(model_evals)
mean, variance = poly.get_mean_and_variance()
# With Nans!
model_evals_with_NaNs = deepcopy(model_evals)
indices_to_set_to_NaN = np.asarray([1, 3, 9, 13])
model_evals_with_NaNs[indices_to_set_to_NaN] = np.nan * indices_to_set_to_NaN.reshape(len(indices_to_set_to_NaN),1)
basis2 = Basis('tensor-grid')
poly2 = Poly(parameters=[param, param], basis=basis2, method='numerical-integration')
poly2.set_model(model_evals_with_NaNs)
mean, variance = poly2.get_mean_and_variance()
mean_with_nans, variance_with_nans = poly2.get_mean_and_variance()
# Verify!
np.testing.assert_almost_equal(mean, mean_with_nans, decimal=7, err_msg='Problem!')
np.testing.assert_almost_equal(variance, variance_with_nans, decimal=7, err_msg='Problem!')
if __name__== '__main__':
unittest.main() | [
"unittest.main",
"numpy.testing.assert_almost_equal",
"numpy.asarray",
"copy.deepcopy"
] | [((1472, 1487), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1485, 1487), False, 'import unittest\n'), ((718, 739), 'copy.deepcopy', 'deepcopy', (['model_evals'], {}), '(model_evals)\n', (726, 739), False, 'from copy import deepcopy\n'), ((772, 797), 'numpy.asarray', 'np.asarray', (['[1, 3, 9, 13]'], {}), '([1, 3, 9, 13])\n', (782, 797), True, 'import numpy as np\n'), ((1257, 1345), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['mean', 'mean_with_nans'], {'decimal': '(7)', 'err_msg': '"""Problem!"""'}), "(mean, mean_with_nans, decimal=7, err_msg=\n 'Problem!')\n", (1287, 1345), True, 'import numpy as np\n'), ((1349, 1444), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['variance', 'variance_with_nans'], {'decimal': '(7)', 'err_msg': '"""Problem!"""'}), "(variance, variance_with_nans, decimal=7,\n err_msg='Problem!')\n", (1379, 1444), True, 'import numpy as np\n')] |
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: <NAME>
## ECE Department, Rutgers University
## Email: <EMAIL>
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import numpy as np
import torch
from torch.autograd import Variable, gradcheck
import encoding
EPS = 1e-3
ATOL = 1e-3
def _assert_tensor_close(a, b, atol=ATOL, rtol=EPS):
npa, npb = a.cpu().numpy(), b.cpu().numpy()
assert np.allclose(npa, npb, rtol=rtol, atol=atol), \
'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(
a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max())
def test_aggregate():
B,N,K,D = 2,3,4,5
A = Variable(torch.cuda.DoubleTensor(B,N,K).uniform_(-0.5,0.5),
requires_grad=True)
X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5),
requires_grad=True)
C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5),
requires_grad=True)
input = (A, X, C)
test = gradcheck(encoding.functions.aggregate, input, eps=EPS, atol=ATOL)
print('Testing aggregate(): {}'.format(test))
def test_scaled_l2():
B,N,K,D = 2,3,4,5
X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5),
requires_grad=True)
C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5),
requires_grad=True)
S = Variable(torch.cuda.DoubleTensor(K).uniform_(-0.5,0.5),
requires_grad=True)
input = (X, C, S)
test = gradcheck(encoding.functions.scaled_l2, input, eps=EPS, atol=ATOL)
print('Testing scaled_l2(): {}'.format(test))
def test_moments():
B,C,H = 2,3,4
X = Variable(torch.cuda.DoubleTensor(B,C,H).uniform_(-0.5,0.5),
requires_grad=True)
input = (X,)
test = gradcheck(encoding.functions.moments, input, eps=EPS, atol=ATOL)
print('Testing moments(): {}'.format(test))
def test_non_max_suppression():
def _test_nms(cuda):
# check a small test case
boxes = torch.Tensor([
[[10.2, 23., 50., 20.],
[11.3, 23., 52., 20.1],
[23.2, 102.3, 23.3, 50.3],
[101.2, 32.4, 70.6, 70.],
[100.2, 30.9, 70.7, 69.]],
[[200.3, 234., 530., 320.],
[110.3, 223., 152., 420.1],
[243.2, 240.3, 50.3, 30.3],
[243.2, 236.4, 48.6, 30.],
[100.2, 310.9, 170.7, 691.]]])
scores = torch.Tensor([
[0.9, 0.7, 0.11, 0.23, 0.8],
[0.13, 0.89, 0.45, 0.23, 0.3]])
if cuda:
boxes = boxes.cuda()
scores = scores.cuda()
expected_output = (
torch.ByteTensor(
[[1, 1, 0, 0, 1], [1, 1, 1, 0, 1]]),
torch.LongTensor(
[[0, 4, 1, 3, 2], [1, 2, 4, 3, 0]])
)
mask, inds = encoding.functions.NonMaxSuppression(boxes, scores, 0.7)
_assert_tensor_close(mask, expected_output[0])
_assert_tensor_close(inds, expected_output[1])
_test_nms(False)
_test_nms(True)
if __name__ == '__main__':
import nose
nose.runmodule()
| [
"numpy.abs",
"numpy.allclose",
"numpy.fmax",
"torch.LongTensor",
"torch.Tensor",
"nose.runmodule",
"torch.cuda.DoubleTensor",
"encoding.functions.NonMaxSuppression",
"torch.ByteTensor",
"torch.autograd.gradcheck"
] | [((620, 663), 'numpy.allclose', 'np.allclose', (['npa', 'npb'], {'rtol': 'rtol', 'atol': 'atol'}), '(npa, npb, rtol=rtol, atol=atol)\n', (631, 663), True, 'import numpy as np\n'), ((1197, 1263), 'torch.autograd.gradcheck', 'gradcheck', (['encoding.functions.aggregate', 'input'], {'eps': 'EPS', 'atol': 'ATOL'}), '(encoding.functions.aggregate, input, eps=EPS, atol=ATOL)\n', (1206, 1263), False, 'from torch.autograd import Variable, gradcheck\n'), ((1677, 1743), 'torch.autograd.gradcheck', 'gradcheck', (['encoding.functions.scaled_l2', 'input'], {'eps': 'EPS', 'atol': 'ATOL'}), '(encoding.functions.scaled_l2, input, eps=EPS, atol=ATOL)\n', (1686, 1743), False, 'from torch.autograd import Variable, gradcheck\n'), ((1959, 2023), 'torch.autograd.gradcheck', 'gradcheck', (['encoding.functions.moments', 'input'], {'eps': 'EPS', 'atol': 'ATOL'}), '(encoding.functions.moments, input, eps=EPS, atol=ATOL)\n', (1968, 2023), False, 'from torch.autograd import Variable, gradcheck\n'), ((3280, 3296), 'nose.runmodule', 'nose.runmodule', ([], {}), '()\n', (3294, 3296), False, 'import nose\n'), ((2180, 2496), 'torch.Tensor', 'torch.Tensor', (['[[[10.2, 23.0, 50.0, 20.0], [11.3, 23.0, 52.0, 20.1], [23.2, 102.3, 23.3, \n 50.3], [101.2, 32.4, 70.6, 70.0], [100.2, 30.9, 70.7, 69.0]], [[200.3, \n 234.0, 530.0, 320.0], [110.3, 223.0, 152.0, 420.1], [243.2, 240.3, 50.3,\n 30.3], [243.2, 236.4, 48.6, 30.0], [100.2, 310.9, 170.7, 691.0]]]'], {}), '([[[10.2, 23.0, 50.0, 20.0], [11.3, 23.0, 52.0, 20.1], [23.2, \n 102.3, 23.3, 50.3], [101.2, 32.4, 70.6, 70.0], [100.2, 30.9, 70.7, 69.0\n ]], [[200.3, 234.0, 530.0, 320.0], [110.3, 223.0, 152.0, 420.1], [243.2,\n 240.3, 50.3, 30.3], [243.2, 236.4, 48.6, 30.0], [100.2, 310.9, 170.7, \n 691.0]]])\n', (2192, 2496), False, 'import torch\n'), ((2611, 2685), 'torch.Tensor', 'torch.Tensor', (['[[0.9, 0.7, 0.11, 0.23, 0.8], [0.13, 0.89, 0.45, 0.23, 0.3]]'], {}), '([[0.9, 0.7, 0.11, 0.23, 0.8], [0.13, 0.89, 0.45, 0.23, 0.3]])\n', (2623, 2685), False, 'import torch\n'), ((3023, 3079), 'encoding.functions.NonMaxSuppression', 'encoding.functions.NonMaxSuppression', (['boxes', 'scores', '(0.7)'], {}), '(boxes, scores, 0.7)\n', (3059, 3079), False, 'import encoding\n'), ((2838, 2890), 'torch.ByteTensor', 'torch.ByteTensor', (['[[1, 1, 0, 0, 1], [1, 1, 1, 0, 1]]'], {}), '([[1, 1, 0, 0, 1], [1, 1, 1, 0, 1]])\n', (2854, 2890), False, 'import torch\n'), ((2921, 2973), 'torch.LongTensor', 'torch.LongTensor', (['[[0, 4, 1, 3, 2], [1, 2, 4, 3, 0]]'], {}), '([[0, 4, 1, 3, 2], [1, 2, 4, 3, 0]])\n', (2937, 2973), False, 'import torch\n'), ((757, 774), 'numpy.abs', 'np.abs', (['(npa - npb)'], {}), '(npa - npb)\n', (763, 774), True, 'import numpy as np\n'), ((892, 924), 'torch.cuda.DoubleTensor', 'torch.cuda.DoubleTensor', (['B', 'N', 'K'], {}), '(B, N, K)\n', (915, 924), False, 'import torch\n'), ((989, 1021), 'torch.cuda.DoubleTensor', 'torch.cuda.DoubleTensor', (['B', 'N', 'D'], {}), '(B, N, D)\n', (1012, 1021), False, 'import torch\n'), ((1086, 1115), 'torch.cuda.DoubleTensor', 'torch.cuda.DoubleTensor', (['K', 'D'], {}), '(K, D)\n', (1109, 1115), False, 'import torch\n'), ((1376, 1408), 'torch.cuda.DoubleTensor', 'torch.cuda.DoubleTensor', (['B', 'N', 'D'], {}), '(B, N, D)\n', (1399, 1408), False, 'import torch\n'), ((1473, 1502), 'torch.cuda.DoubleTensor', 'torch.cuda.DoubleTensor', (['K', 'D'], {}), '(K, D)\n', (1496, 1502), False, 'import torch\n'), ((1568, 1594), 'torch.cuda.DoubleTensor', 'torch.cuda.DoubleTensor', (['K'], {}), '(K)\n', (1591, 1594), False, 'import torch\n'), ((1851, 1883), 'torch.cuda.DoubleTensor', 'torch.cuda.DoubleTensor', (['B', 'C', 'H'], {}), '(B, C, H)\n', (1874, 1883), False, 'import torch\n'), ((803, 822), 'numpy.fmax', 'np.fmax', (['npa', '(1e-05)'], {}), '(npa, 1e-05)\n', (810, 822), True, 'import numpy as np\n')] |
from openwater.split import split_time_series
import numpy as np
def test_create_split_windows():
grp = {
'DummyModel':{
'inputs':np.zeros((1,1,10000))
}
}
BREAKS = [
[100,1000,5000],
[0,100,1000,5000],
[100,1000,5000,10000],
[0,100,1000,5000,10000]
]
for breaks in BREAKS:
windows = split_time_series(grp,10,breaks)
assert len(windows)==4
assert windows[0] == (0,100)
assert windows[1] == (100,1000)
assert windows[2] == (1000,5000)
assert windows[3] == (5000,10000)
BREAKS = [
[],
[0],
[10000],
[0,10000]
]
for breaks in BREAKS:
windows = split_time_series(grp,11,breaks)
assert len(windows)==1
assert windows[0] == (0,10000)
windows = split_time_series(grp,11,None)
assert len(windows)==11
assert windows[0] == (0,909)
assert windows[1] == (909,1818)
assert windows[10] == (9090,10000)
| [
"numpy.zeros",
"openwater.split.split_time_series"
] | [((846, 878), 'openwater.split.split_time_series', 'split_time_series', (['grp', '(11)', 'None'], {}), '(grp, 11, None)\n', (863, 878), False, 'from openwater.split import split_time_series\n'), ((377, 411), 'openwater.split.split_time_series', 'split_time_series', (['grp', '(10)', 'breaks'], {}), '(grp, 10, breaks)\n', (394, 411), False, 'from openwater.split import split_time_series\n'), ((728, 762), 'openwater.split.split_time_series', 'split_time_series', (['grp', '(11)', 'breaks'], {}), '(grp, 11, breaks)\n', (745, 762), False, 'from openwater.split import split_time_series\n'), ((157, 180), 'numpy.zeros', 'np.zeros', (['(1, 1, 10000)'], {}), '((1, 1, 10000))\n', (165, 180), True, 'import numpy as np\n')] |
from server.models.postgis.mapping_issues import MappingIssueCategory
from server.models.postgis.task import TaskMappingIssue, TaskHistory, Task
from server.models.postgis.user import User
from server.models.postgis.project import Project
from server.models.postgis.statuses import TaskStatus
from server.models.dtos.mapping_issues_dto import MappingIssueCategoryDTO
from server.models.dtos.stats_dto import ProjectContributionsDTO
from server.services.stats_service import StatsService
from copy import deepcopy
import numpy as np
import typing
class MappingIssueCategoryService:
@staticmethod
def get_mapping_issue_category(category_id: int) -> MappingIssueCategory:
"""
Get MappingIssueCategory from DB
:raises: NotFound
"""
category = MappingIssueCategory.get_by_id(category_id)
if category is None:
raise NotFound()
return category
@staticmethod
def get_mapping_issue_category_as_dto(category_id: int) -> MappingIssueCategoryDTO:
""" Get MappingIssueCategory from DB """
category = MappingIssueCategoryService.get_mapping_issue_category(category_id)
return category.as_dto()
@staticmethod
def create_mapping_issue_category(category_dto: MappingIssueCategoryDTO) -> int:
""" Create MappingIssueCategory in DB """
new_mapping_issue_category_id = MappingIssueCategory.create_from_dto(category_dto)
return new_mapping_issue_category_id
@staticmethod
def update_mapping_issue_category(category_dto: MappingIssueCategoryDTO) -> MappingIssueCategoryDTO:
""" Create MappingIssueCategory in DB """
category = MappingIssueCategoryService.get_mapping_issue_category(category_dto.category_id)
category.update_category(category_dto)
return category.as_dto()
@staticmethod
def delete_mapping_issue_category(category_id: int):
""" Delete specified license"""
category = MappingIssueCategoryService.get_mapping_issue_category(category_id)
category.delete()
@staticmethod
def get_all_mapping_issue_categories(include_archived):
""" Get all mapping issue categories"""
return MappingIssueCategory.get_all_categories(include_archived)
class MappingIssueExportService:
#fields for testing
category_names_dict = {}
totals = None
single_task_rows = {}
user_issue_totals = {}
def get_mapping_issues(self, project_id: int, detailedView: str, zerosRows: str):
"""
Returns a csv string of all mapping issues associated with the given project summed and sorted by user
raises: NotFound
"""
detailed = False
if (detailedView == "true"):
detailed = True
zeros = False
if (zerosRows == "true"):
zeros = True
user_dict, project_users, num_validated_tasks, tasks_as_tasks_dict = MappingIssueExportService.compile_validated_tasks_by_user(project_id, zeros)
data_table, category_index_dict, category_names_dict, max_category_index, totals = MappingIssueExportService.build_issue_totals_table(user_dict, project_users)
self.totals = totals #for testing
self.category_names_dict = category_names_dict #for testing
"""
Format
Basic
, issue, issue, issue, ...
user, count, count, count, ...
totals ...
Detailed
User , taskID:validated by, issues......
, id : validator , count , count , count ...
, id : validator , count , count , count ...
user totals ...
grand totals at the bottom
"""
""" Build the csv string """
project_contrib_dto = StatsService.get_user_contributions(project_id)
all_rows = []
issue_names_row = []
if (detailed):
issue_names_row.append('Username (tasks mapped)')
issue_names_row.append('taskId: ValidatedBy')
else:
issue_names_row.append('Username (tasks mapped)')
for i in range(1, max_category_index + 1):
issue_names_row.append(MappingIssueExportService.format_field(category_names_dict[i]))
all_rows.append(','.join(issue_names_row))
for user in project_users:
row = []
if (detailed):
row.append('')
i = 0
for task in user_dict[user]:
validator = User.get_by_id(self, tasks_as_tasks_dict[task].validated_by).username
single_task_row = []
if (i == 0):
single_task_row.append(user)
else:
single_task_row.append('')
single_task_row.append(str(task) + ': ' + validator)
single_task_row_issue_counts = np.zeros(max_category_index + 1, dtype='i')
for issue in user_dict[user][task]:
category_index = category_index_dict[issue]
single_task_row_issue_counts[category_index] = user_dict[user][task][issue]
for issue_count in single_task_row_issue_counts:
single_task_row.append(str(issue_count))
single_task_row.pop(2)
all_rows.append(",".join(single_task_row))
self.single_task_rows[task] = single_task_row #for testing
i += 1
num_mapped_tasks = -1
for user_contrib in project_contrib_dto.user_contributions:
if (user_contrib.username == user):
num_mapped_tasks = user_contrib.mapped
break
row.append(user + ' (' + str(num_mapped_tasks) + ')')
for issue_count in data_table[user]:
row.append(str(issue_count))
row.pop(1)
self.user_issue_totals[user] = deepcopy(row) #for testing
if (detailed):
row[1] = ''
row[0] = user + ' totals (' + str(num_mapped_tasks) + ')'
all_rows.append(','.join(row))
if (detailed):
all_rows[-1] = all_rows[-1] + '\n'
totals_row = ['Project Totals']
for value in totals:
totals_row.append(str(value))
if (not detailed):
totals_row.pop(1)
else:
totals_row[1] = ''
all_rows.append(','.join(totals_row))
csv_string = '\n'.join(all_rows)
return csv_string
@staticmethod
def compile_validated_tasks_by_user(project_id: int, zeros: bool) -> typing.Tuple[typing.Dict[str, typing.Dict[int, typing.Dict[str, str]]], typing.List[str], int, typing.Dict[int, Task]]:
"""
:returns: user_dict {str username : {int taskId : {str issue : str issue_count}}}
project_users: Set[str]
int num_validated_tasks
tasks_as_tasks_dict {int taskId : task}
"""
all_project_tasks = Task.get_all_tasks(project_id)
validated_tasks = []
project_users = []
for task in all_project_tasks:
if (task.task_status == TaskStatus.VALIDATED.value):
validated_tasks.append(task)
if task.mapper.username not in project_users:
project_users.append(task.mapper.username)
else:
continue
def mapper_username(task):
return task.mapper.username
validated_tasks.sort(key=mapper_username)
user_dict = {}
for user in project_users:
user_dict[user] = {}
tasks_as_tasks_dict = {}
task_dict = {}
issue_dict = {}
i = 0
current_username = None
for task in validated_tasks:
tasks_as_tasks_dict[task.id] = task
if (i == 0):
current_username = task.mapper.username
if (task.mapper.username != current_username):
user_dict[current_username] = deepcopy(task_dict)
current_username = task.mapper.username
task_dict.clear()
issue_dict.clear()
for hist in task.task_history:
if (len(hist.task_mapping_issues) > 0):
for issue in hist.task_mapping_issues:
issue_dict[issue.issue] = issue.count
if (len(issue_dict.keys()) > 0):
task_dict[task.id] = deepcopy(issue_dict)
elif (zeros):
task_dict[task.id] = {}
i += 1
user_dict[current_username] = task_dict
return user_dict, project_users, len(validated_tasks), tasks_as_tasks_dict
@staticmethod
def build_issue_totals_table(user_dict, project_users) -> typing.Tuple[typing.Dict[str, np.array], typing.Dict[str, int], typing.Dict[int, str], int, np.array]:
""" Get category names and create table of issue totals: users -> arrayOfMappingIssueTotals """
""" Includes a row of overall totals at the bottom """
"""
:returns: data_table {str user : numpy array of user issue totals}
category_index_dict {str issue : int issueId}
category_names_dict {int the new issueId assigned : str issue}
max_category_index: int
totals: numpy array of project issue totals
"""
categories_dto = MappingIssueCategoryService.get_all_mapping_issue_categories(False)
categories = categories_dto.categories
def category_id(categories_dto):
return categories_dto.category_id
categories.sort(key=category_id)
category_index_dict = {}
category_names_dict = {}
max_category_index = 0
i = 0
for category in categories:
i += 1
category.category_id = i
max_category_index = i
for category in categories:
category_index_dict[category.name] = category.category_id
category_names_dict[category.category_id] = category.name
unarchived_categories = category_index_dict.keys()
data_table = {}
totals = np.zeros(max_category_index + 1, dtype='i')
for user in project_users:
data_table[user] = np.zeros(max_category_index + 1, dtype='i')
for task in user_dict[user]:
for issue in user_dict[user][task]: #issue is the name of the issue
if (issue in unarchived_categories):
category_index = category_index_dict[issue]
issue_count = user_dict[user][task][issue]
data_table[user][category_index] += issue_count
totals[category_index] += issue_count
return data_table, category_index_dict, category_names_dict, max_category_index, totals
@staticmethod
def format_field(field):
if field.find(",") == -1:
return field
else:
newField = ["\""]
for char in field:
if (char == "\""):
newField.append("\"\"")
else:
newField.append(char)
newField.append("\"")
quotedField = "".join(newField)
return quotedField
| [
"server.models.postgis.task.Task.get_all_tasks",
"server.models.postgis.mapping_issues.MappingIssueCategory.get_by_id",
"server.services.stats_service.StatsService.get_user_contributions",
"server.models.postgis.mapping_issues.MappingIssueCategory.create_from_dto",
"numpy.zeros",
"copy.deepcopy",
"serve... | [((789, 832), 'server.models.postgis.mapping_issues.MappingIssueCategory.get_by_id', 'MappingIssueCategory.get_by_id', (['category_id'], {}), '(category_id)\n', (819, 832), False, 'from server.models.postgis.mapping_issues import MappingIssueCategory\n'), ((1387, 1437), 'server.models.postgis.mapping_issues.MappingIssueCategory.create_from_dto', 'MappingIssueCategory.create_from_dto', (['category_dto'], {}), '(category_dto)\n', (1423, 1437), False, 'from server.models.postgis.mapping_issues import MappingIssueCategory\n'), ((2208, 2265), 'server.models.postgis.mapping_issues.MappingIssueCategory.get_all_categories', 'MappingIssueCategory.get_all_categories', (['include_archived'], {}), '(include_archived)\n', (2247, 2265), False, 'from server.models.postgis.mapping_issues import MappingIssueCategory\n'), ((3814, 3861), 'server.services.stats_service.StatsService.get_user_contributions', 'StatsService.get_user_contributions', (['project_id'], {}), '(project_id)\n', (3849, 3861), False, 'from server.services.stats_service import StatsService\n'), ((7116, 7146), 'server.models.postgis.task.Task.get_all_tasks', 'Task.get_all_tasks', (['project_id'], {}), '(project_id)\n', (7134, 7146), False, 'from server.models.postgis.task import TaskMappingIssue, TaskHistory, Task\n'), ((10267, 10310), 'numpy.zeros', 'np.zeros', (['(max_category_index + 1)'], {'dtype': '"""i"""'}), "(max_category_index + 1, dtype='i')\n", (10275, 10310), True, 'import numpy as np\n'), ((6039, 6052), 'copy.deepcopy', 'deepcopy', (['row'], {}), '(row)\n', (6047, 6052), False, 'from copy import deepcopy\n'), ((10377, 10420), 'numpy.zeros', 'np.zeros', (['(max_category_index + 1)'], {'dtype': '"""i"""'}), "(max_category_index + 1, dtype='i')\n", (10385, 10420), True, 'import numpy as np\n'), ((8138, 8157), 'copy.deepcopy', 'deepcopy', (['task_dict'], {}), '(task_dict)\n', (8146, 8157), False, 'from copy import deepcopy\n'), ((8584, 8604), 'copy.deepcopy', 'deepcopy', (['issue_dict'], {}), '(issue_dict)\n', (8592, 8604), False, 'from copy import deepcopy\n'), ((4950, 4993), 'numpy.zeros', 'np.zeros', (['(max_category_index + 1)'], {'dtype': '"""i"""'}), "(max_category_index + 1, dtype='i')\n", (4958, 4993), True, 'import numpy as np\n'), ((4551, 4611), 'server.models.postgis.user.User.get_by_id', 'User.get_by_id', (['self', 'tasks_as_tasks_dict[task].validated_by'], {}), '(self, tasks_as_tasks_dict[task].validated_by)\n', (4565, 4611), False, 'from server.models.postgis.user import User\n')] |
#!/usr/bin/env python3
'''A reference implementation of Bloom filter-based Iris-Code indexing.'''
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2017 Hochschule Darmstadt"
__license__ = "License Agreement provided by Hochschule Darmstadt(https://github.com/dasec/bloom-filter-iris-indexing/blob/master/hda-license.pdf)"
__version__ = "1.0"
import argparse
import copy
import math
import operator
import sys
from pathlib import Path
from timeit import default_timer as timer
from typing import Tuple, List, Set
import numpy as np
parser = argparse.ArgumentParser(description='Bloom filter-based Iris-Code indexing.')
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0')
required = parser.add_argument_group('required named arguments')
required.add_argument('-d', '--directory', action='store', type=Path, required=True, help='directory where the binary templates are stored')
required.add_argument('-n', '--enrolled', action='store', type=int, required=True, help='number of enrolled subjects')
required.add_argument('-bh', '--height', action='store', type=int, required=True, help='filter block height')
required.add_argument('-bw', '--width', action='store', type=int, required=True, help='fitler block width')
required.add_argument('-T', '--constructed', action='store', type=int, required=True, help='number of trees constructed')
required.add_argument('-t', '--traversed', action='store', type=int, required=True, help='number of trees traversed')
args = parser.parse_args()
required_python_version = (3, 5)
if (sys.version_info.major, sys.version_info.minor) < required_python_version:
sys.exit("Python {}.{} or newer is required to run this program".format(*required_python_version))
allowed_bf_heights = frozenset(range(8, 13))
allowed_bf_widths = frozenset({8, 16, 32, 64})
class BloomTemplate(object):
'''Represents a Bloom Filter template or a Bloom Filter tree node'''
def __init__(self, bloom_filter_sets: List[Set[int]], source: List[Tuple[str, str, str, str]]):
self.bloom_filter_sets = bloom_filter_sets
self.source = source
def compare(self, other) -> float:
'''Measures dissimilarity between two BloomTemplates'''
return sum(len(s1 ^ s2) / (len(s1) + len(s2)) for s1, s2 in zip(self.bloom_filter_sets, other.bloom_filter_sets)) / len(self)
def __add__(self, other):
'''Merge two BloomTemplates by ORing their bloom filter sets'''
return BloomTemplate([s1 | s2 for s1, s2 in zip(self.bloom_filter_sets, other.bloom_filter_sets)], self.source + [s for s in other.source if s not in self.source])
def __iadd__(self, other):
'''Add (OR) another template to self in-place'''
self.bloom_filter_sets = [s1 | s2 for s1, s2 in zip(self.bloom_filter_sets, other.bloom_filter_sets)]
self.source += (s for s in other.source if s not in self.source)
return self
def __len__(self) -> int:
'''Number of bloom filters in the template'''
return len(self.bloom_filter_sets)
def __getitem__(self, key: int) -> Set[int]:
'''Convenience access for individual bloom filters in the template'''
return self.bloom_filter_sets[key]
def __repr__(self) -> str:
return "Bloom filter template of {}".format(self.source)
# Convenience functions for template source comparison
def is_same_subject(self, other) -> bool:
return len(self.source) == len(other.source) and all(s_item[0] == o_item[0] for s_item, o_item in zip(self.source, other.source))
def is_same_image(self, other) -> bool:
return len(self.source) == len(other.source) and all(s_item[1] == o_item[1] for s_item, o_item in zip(self.source, other.source))
def is_same_side(self, other) -> bool:
return len(self.source) == len(other.source) and all(s_item[2] == o_item[2] for s_item, o_item in zip(self.source, other.source))
def is_same_dataset(self, other) -> bool:
return len(self.source) == len(other.source) and all(s_item[3] == o_item[3] for s_item, o_item in zip(self.source, other.source))
def is_same_genuine(self, other) -> bool:
return len(self.source) == len(other.source) and self.is_same_subject(other) and self.is_same_side(other) and self.is_same_dataset(other)
def is_same_source(self, other) -> bool:
return len(self.source) == len(other.source) and all(s_item == o_item for s_item, o_item in zip(self.source, other.source))
def is_multi_source(self) -> bool:
return len(self.source) > 1
@classmethod
def from_binary_template(cls, binary_template: List[List[int]], height: int, width: int, source: List[Tuple[str, str, str, str]]):
'''Creates a BloomTemplate with specified block size from an iris code represented as a 2-dimensional (row x column) array of 0's and 1's. The source is a list of tuples following format: [(subject, image_number, side, dataset), ...]'''
if height not in allowed_bf_heights or width not in allowed_bf_widths:
raise ValueError("Invalid block size: ({}, {})".format(height, width))
binary_template = np.array(binary_template)
bf_sets = []
bf_real = set()
bf_imaginary = set()
for column_number, column in enumerate(binary_template.T):
real_part = ''.join(map(str, column[:height]))
im_part_start = 10 if height <= 10 else len(binary_template) - height
im_part_end = im_part_start + height
imaginary_part = ''.join(map(str, column[im_part_start:im_part_end]))
bf_value_real = int(real_part, 2)
bf_value_imaginary = int(imaginary_part, 2)
bf_real.add(bf_value_real)
bf_imaginary.add(bf_value_imaginary)
if column_number != 0 and (column_number + 1) % width == 0:
bf_sets.append(bf_real)
bf_sets.append(bf_imaginary)
bf_real = set()
bf_imaginary = set()
return BloomTemplate(bf_sets, source)
BF_TREE = List[BloomTemplate]
class BloomTreeDb(object):
'''Represents a database of BloomTemplate trees'''
def __init__(self, enrolled: List[BloomTemplate], trees_constructed: int):
def is_power_of2(number: int) -> bool:
'''Check if a number is a power of 2.'''
return number > 0 and (number & (number - 1)) == 0
if not is_power_of2(len(enrolled)) or not is_power_of2(trees_constructed):
raise ValueError("Number of subjects ({}) and trees ({}) must both be a power of 2".format(len(enrolled), trees_constructed))
self.enrolled = enrolled
self.trees_constructed = trees_constructed
self.trees = self._build()
def search(self, probe: BloomTemplate, trees_traversed: int) -> Tuple[float, BloomTemplate]:
'''Perform a search for a template matching the probe in the database.'''
def find_promising_trees(probe: BloomTemplate, trees_traversed: int) -> List[BF_TREE]:
'''Preselection step - most promising trees are found based on the scores between the tree roots and the probe'''
if self.trees_constructed == trees_traversed:
return self.trees
else:
root_scores = [(tree[0].compare(probe), index) for index, tree in enumerate(self.trees)]
root_scores.sort(key=operator.itemgetter(0))
promising_tree_indexes = map(operator.itemgetter(1), root_scores[:trees_traversed])
return [self.trees[index] for index in promising_tree_indexes]
def traverse(trees: List[BF_TREE], probe: BloomTemplate) -> Tuple[float, BloomTemplate]:
'''Traverse the selected trees to find the node corresponding to a best score'''
best_score, best_match_node = 1.0, None
for _, tree in enumerate(trees):
step = 0
score = 1.0
for _ in range(int(math.log(len(self.enrolled), 2)) - int(math.log(self.trees_constructed, 2))):
left_child_index, right_child_index = BloomTreeDb.get_node_children_indices(step)
ds_left = tree[left_child_index].compare(probe)
ds_right = tree[right_child_index].compare(probe)
step, score = (left_child_index, ds_left) if ds_left < ds_right else (right_child_index, ds_right)
score, match_node = score, tree[step]
if score <= best_score:
best_score = score
best_match_node = match_node
return best_score, best_match_node
if trees_traversed < 1 or trees_traversed > self.trees_constructed:
raise ValueError("Invalid number of trees to traverse:", trees_traversed)
promising_trees = find_promising_trees(probe, trees_traversed)
return traverse(promising_trees, probe)
def _build(self) -> List[BF_TREE]:
'''Constructs the BloomTemplate trees using the parameters the db has been initiated with'''
def construct_bf_tree(enrolled_part: List[BloomTemplate]) -> BF_TREE:
'''Constructs a single BloomTemplate tree'''
bf_tree = []
for index in range(len(enrolled_part)-1):
node_level = BloomTreeDb.get_node_level(index)
start_index = int(len(enrolled_part) / (1 << node_level) * ((index + 1) % (1 << node_level)))
end_index = int(len(enrolled_part) / (1 << node_level) * ((index + 1) % (1 << node_level)) + len(enrolled_part) / (1 << node_level))
node = copy.deepcopy(enrolled_part[start_index])
for i in range(start_index, end_index):
node += enrolled_part[i]
bf_tree.append(node)
bf_tree += enrolled_part
return bf_tree
trees = []
i = 0
while i != len(self.enrolled):
i_old = i
i += int(len(self.enrolled) / self.trees_constructed)
bf_tree = construct_bf_tree(self.enrolled[i_old:i])
assert len(bf_tree) == int(len(self.enrolled) / self.trees_constructed) * 2 - 1
trees.append(bf_tree)
assert len(trees) == self.trees_constructed
return trees
def __repr__(self) -> str:
return "<BloomTreeDb object containing {} subjects in {} trees>".format(len(self.enrolled), self.trees_constructed)
'''Convenience methods for tree indexing'''
@staticmethod
def get_node_children_indices(index: int) -> Tuple[int, int]:
'''Compute indices of node children based on its index.'''
return 2 * index + 1, 2 * (index + 1)
@staticmethod
def get_node_level(index: int) -> int:
'''Compute the level of a node in a tree based on its index.'''
return int(math.floor(math.log(index + 1, 2)))
def load_binary_template(path: Path) -> List[List[int]]:
'''Reads a text file into an iris code matrix'''
with path.open("r") as f:
return [list(map(int, list(line.rstrip()))) for line in f.readlines()]
def extract_source_data(filename: str) -> List[Tuple[str, str, str, str]]:
'''This function parses the template filename (path.stem) and extract the subject, image number, image side and dataset and return it as list (this is necessary later on) with one tuple element (Subject, Image, Side, Dataset).
e.g. if the filename is "S1001L01.jpg" from Casia-Interval dataset, then the return value should be: [(1001, 01, L, Interval)] or similar, as long as the convention is consistent.
'''
raise NotImplementedError("Implement me!")
def split_dataset(templates: List[BloomTemplate], num_enrolled: int) -> Tuple[List[BloomTemplate], List[BloomTemplate], List[BloomTemplate]]:
'''This function splits the full template list into disjoint lists of enrolled, genuine and impostor templates'''
enrolled, genuine, impostor = [], [], []
raise NotImplementedError("Implement me!")
return enrolled, genuine, impostor
if __name__ == "__main__":
# Data preparation
start = timer()
binary_templates = [(load_binary_template(f), extract_source_data(f.stem)) for f in args.directory.iterdir() if f.is_file() and f.match('*.txt')] # see file example_binary_template.txt for required format
bloom_templates = [BloomTemplate.from_binary_template(template, args.height, args.width, source) for template, source in binary_templates]
enrolled_templates, genuine_templates, impostor_templates = split_dataset(bloom_templates, args.enrolled)
db = BloomTreeDb(enrolled_templates, args.constructed)
end = timer()
print("Total data preparation time: %02d:%02d" % divmod(end - start, 60))
# Lookup
start = timer()
results_genuine = [db.search(genuine_template, args.traversed) for genuine_template in genuine_templates] # List[Tuple[float, BloomTemplate]]
results_impostor = [db.search(impostor_template, args.traversed) for impostor_template in impostor_templates] # List[Tuple[float, BloomTemplate]]
genuine_scores = [result[0] for result in results_genuine] # List[float]
impostor_scores = [result[0] for result in results_impostor] # List[float]
genuine_matches = [result[1] for result in results_genuine] # List[BloomTemplate]
end = timer()
print("Total lookup time: %02d:%02d" % divmod(end - start, 60))
# Results
print("Experiment configuration: {} enrolled, {} trees, {} traversed trees, {} block height, {} block width".format(len(enrolled_templates), args.constructed, args.traversed, args.height, args.width))
print("Genuine distribution: {} scores, min/max {:.4f}/{:.4f}, mean {:.4f} +/- {:.4f}".format(len(genuine_scores), min(genuine_scores), max(genuine_scores), np.mean(genuine_scores), np.std(genuine_scores)))
print("Impostor distribution: {} scores, min/max {:.4f}/{:.4f}, mean {:.4f} +/- {:.4f}".format(len(impostor_scores), min(impostor_scores), max(impostor_scores), np.mean(impostor_scores), np.std(impostor_scores)))
print("Fraction of genuine attempts with correct leaf reached: {:.4f}".format([probe.is_same_genuine(result) for probe, result in zip(genuine_templates, genuine_matches)].count(True) / len(genuine_templates)))
| [
"numpy.mean",
"copy.deepcopy",
"argparse.ArgumentParser",
"timeit.default_timer",
"math.log",
"numpy.array",
"numpy.std",
"operator.itemgetter"
] | [((547, 624), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Bloom filter-based Iris-Code indexing."""'}), "(description='Bloom filter-based Iris-Code indexing.')\n", (570, 624), False, 'import argparse\n'), ((11022, 11029), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11027, 11029), True, 'from timeit import default_timer as timer\n'), ((11546, 11553), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11551, 11553), True, 'from timeit import default_timer as timer\n'), ((11649, 11656), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11654, 11656), True, 'from timeit import default_timer as timer\n'), ((12187, 12194), 'timeit.default_timer', 'timer', ([], {}), '()\n', (12192, 12194), True, 'from timeit import default_timer as timer\n'), ((4922, 4947), 'numpy.array', 'np.array', (['binary_template'], {}), '(binary_template)\n', (4930, 4947), True, 'import numpy as np\n'), ((12632, 12655), 'numpy.mean', 'np.mean', (['genuine_scores'], {}), '(genuine_scores)\n', (12639, 12655), True, 'import numpy as np\n'), ((12657, 12679), 'numpy.std', 'np.std', (['genuine_scores'], {}), '(genuine_scores)\n', (12663, 12679), True, 'import numpy as np\n'), ((12844, 12868), 'numpy.mean', 'np.mean', (['impostor_scores'], {}), '(impostor_scores)\n', (12851, 12868), True, 'import numpy as np\n'), ((12870, 12893), 'numpy.std', 'np.std', (['impostor_scores'], {}), '(impostor_scores)\n', (12876, 12893), True, 'import numpy as np\n'), ((8765, 8806), 'copy.deepcopy', 'copy.deepcopy', (['enrolled_part[start_index]'], {}), '(enrolled_part[start_index])\n', (8778, 8806), False, 'import copy\n'), ((9818, 9840), 'math.log', 'math.log', (['(index + 1)', '(2)'], {}), '(index + 1, 2)\n', (9826, 9840), False, 'import math\n'), ((6929, 6951), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (6948, 6951), False, 'import operator\n'), ((6872, 6894), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (6891, 6894), False, 'import operator\n'), ((7396, 7431), 'math.log', 'math.log', (['self.trees_constructed', '(2)'], {}), '(self.trees_constructed, 2)\n', (7404, 7431), False, 'import math\n')] |
# coding: utf-8
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
__all__ = ['Ackley','Sphere','Rosenbrock','Beale','GoldsteinPrice','Booth',
'BukinN6','Matyas','LeviN13','ThreeHumpCamel','Easom','Eggholder',
'McCormick','SchafferN2','SchafferN4','StyblinskiTang','DeJongsF1',
'DeJongsF2','DeJongsF3','DeJongsF4','DeJongsF5','Ellipsoid','KTablet',
'FiveWellPotential','WeightedSphere','HyperEllipsodic',
'SumOfDifferentPower','Griewank','Michalewicz','Perm','Rastrigin',
'Schwefel','SixHumpCamel','Shuberts','XinSheYang','Zakharov']
__oneArgument__ = ['Beale','GoldsteinPrice','Booth','BukinN6','Matyas','LeviN13',
'ThreeHumpCamel','Easom','Eggholder','McCormick','SchafferN2',
'SchafferN4','DeJongsF3','DeJongsF4','DeJongsF5',
'FiveWellPotential','SixHumpCamel','Shuberts']
__twoArgument__ = ['Ackley','Sphere','Rosenbrock','StyblinskiTang','DeJongsF1',
'DeJongsF2','Ellipsoid','KTablet','WeightedSphere',
'HyperEllipsodic','SumOfDifferentPower','Griewank',
'Michalewicz','Rastrigin','Schwefel','XinSheYang','Zakharov']
__threeArgument__ = ['Perm']
##### Basic function #####
class OptimalBasic:
def __init__(self, variable_num):
self.variable_num = variable_num
self.max_search_range = np.array([0]*self.variable_num)
self.min_search_range = np.array([0]*self.variable_num)
self.optimal_solution = np.array([0]*self.variable_num)
self.global_optimum_solution = 0
self.plot_place = 0.25
self.func_name = ''
self.save_dir = os.path.dirname(os.path.abspath(__file__))+'\\img\\'
if(os.path.isdir(self.save_dir) == False):
os.mkdir(self.save_dir)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_optimal_solution(self):
return self.optimal_solution
def get_search_range(self):
return [self.max_search_range, self.min_search_range]
def get_func_val(self, variables):
return -1
def plot(self):
x = np.arange(self.min_search_range[0],self.max_search_range[0], self.plot_place, dtype=np.float32)
y = np.arange(self.min_search_range[1],self.max_search_range[1], self.plot_place, dtype=np.float32)
X, Y = np.meshgrid(x,y)
Z = []
for xy_list in zip(X,Y):
z = []
for xy_input in zip(xy_list[0],xy_list[1]):
tmp = list(xy_input)
tmp.extend(list(self.optimal_solution[0:self.variable_num-2]))
z.append(self.get_func_val(np.array(tmp)))
Z.append(z)
Z = np.array(Z)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_wireframe(X,Y,Z)
plt.show()
def save_fig(self):
x = np.arange(self.min_search_range[0],self.max_search_range[0], self.plot_place, dtype=np.float32)
y = np.arange(self.min_search_range[1],self.max_search_range[1], self.plot_place, dtype=np.float32)
X, Y = np.meshgrid(x,y)
Z = []
for xy_list in zip(X,Y):
z = []
for xy_input in zip(xy_list[0],xy_list[1]):
tmp = list(xy_input)
tmp.extend(list(self.optimal_solution[0:self.variable_num-2]))
z.append(self.get_func_val(np.array(tmp)))
Z.append(z)
Z = np.array(Z)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_wireframe(X,Y,Z)
plt.savefig(self.save_dir+self.func_name+'.png')
plt.close()
##### Optimization benchmark function group #####
##### Class Ackley function #####
class Ackley(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([32.768]*self.variable_num)
self.min_search_range = np.array([-32.768]*self.variable_num)
self.optimal_solution = np.array([0]*self.variable_num)
self.global_optimum_solution = 0
self.func_name = 'Ackley'
def get_func_val(self, variables):
tmp1 = 20.-20.*np.exp(-0.2*np.sqrt(1./self.variable_num*np.sum(np.square(variables))))
tmp2 = np.e-np.exp(1./self.variable_num*np.sum(np.cos(variables*2.*np.pi)))
return tmp1+tmp2
##### Class Sphere function #####
class Sphere(OptimalBasic):
def __init__(self, variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([1000]*self.variable_num) # nearly inf
self.min_search_range = np.array([-1000]*self.variable_num) # nearly inf
self.optimal_solution = np.array([1]*self.variable_num)
self.global_optimum_solution = 0
self.plot_place = 10
self.func_name = 'Sphere'
def get_func_val(self, variables):
return np.sum(np.square(variables))
##### Class Rosenbrock function #####
class Rosenbrock(OptimalBasic):
def __init__(self, variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5]*self.variable_num)
self.min_search_range = np.array([-5]*self.variable_num)
self.optimal_solution = np.array([1]*self.variable_num)
self.global_optimum_solution = 0
self.plot_place = 0.25
self.func_name = 'Rosenbrock'
def get_func_val(self, variables):
f = 0
for i in range(self.variable_num-1):
f += 100*np.power(variables[i+1]-np.power(variables[i],2),2)+np.power(variables[i]-1,2)
return f
##### Class Beale function #####
class Beale(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([4.5]*self.variable_num)
self.min_search_range = np.array([-4.5]*self.variable_num)
self.optimal_solution = np.array([3.,0.5])
self.global_optimum_solution = 0
self.plot_place = 0.25
self.func_name = 'Beale'
def get_func_val(self, variables):
tmp1 = np.power(1.5 - variables[0] + variables[0] * variables[1],2)
tmp2 = np.power(2.25 - variables[0] + variables[0] * np.power(variables[1],2),2)
tmp3 = np.power(2.625 - variables[0] + variables[0] * np.power(variables[1],3),2)
return tmp1+tmp2+tmp3
##### Class Goldstein-Price function #####
class GoldsteinPrice(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([2.]*self.variable_num)
self.min_search_range = np.array([-2.]*self.variable_num)
self.optimal_solution = np.array([0.,-1.])
self.global_optimum_solution = 3
self.plot_place = 0.25
self.func_name = 'GoldsteinPrice'
def get_func_val(self, variables):
tmp1 = (1+np.power(variables[0]+variables[1]+1,2)*(19-14*variables[0]+3*np.power(variables[0],2)-14*variables[1]+6*variables[0]*variables[1]+3*np.power(variables[1],2)))
tmp2 = (30+(np.power(2*variables[0]-3*variables[1],2)*(18-32*variables[0]+12*np.power(variables[0],2)+48*variables[1]-36*variables[0]*variables[1]+27*np.power(variables[1],2))))
return tmp1*tmp2
##### Class Booth function #####
class Booth(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([10.]*self.variable_num)
self.min_search_range = np.array([-10.]*self.variable_num)
self.optimal_solution = np.array([1.,-3.])
self.global_optimum_solution = 0
self.func_name = 'Booth'
def get_func_val(self, variables):
tmp1 = np.power(variables[0]+2*variables[1]-7,2)
tmp2 = np.power(2*variables[0]+variables[1]-5,2)
return tmp1+tmp2
##### Class Bukin function N.6 #####
class BukinN6(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([-5.,3.])
self.min_search_range = np.array([-15.,-3.])
self.optimal_solution = np.array([-10.,1.])
self.global_optimum_solution = 0
self.func_name = 'BukinN6'
def get_func_val(self, variables):
tmp1 = 100*np.sqrt(np.absolute(variables[1]-0.01*np.power(variables[1],2)))
tmp2 = 0.01*np.absolute(variables[0]+10)
return tmp1+tmp2
##### Class Matyas function #####
class Matyas(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([10.]*self.variable_num)
self.min_search_range = np.array([-10.]*self.variable_num)
self.optimal_solution = np.array([0.,0.])
self.global_optimum_solution = 0
self.func_name = 'Matyas'
def get_func_val(self, variables):
tmp1 = 0.26*(np.power(variables[0],2)+np.power(variables[1],2))
tmp2 = 0.48*variables[0]*variables[1]
return tmp1-tmp2
##### Class Levi function N.13 #####
class LeviN13(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([10.]*self.variable_num)
self.min_search_range = np.array([-10.]*self.variable_num)
self.optimal_solution = np.array([1.,1.])
self.global_optimum_solution = 0
self.func_name = 'LeviN13'
def get_func_val(self, variables):
tmp1 = np.power(np.sin(3*np.pi*variables[0]),2)
tmp2 = np.power(variables[0]-1,2)*(1+np.power(np.sin(3*np.pi*variables[1]),2))
tmp3 = np.power(variables[1]-1,2)*(1+np.power(np.sin(2*np.pi*variables[1]),2))
return tmp1+tmp2+tmp3
##### Class Three-hump camel function #####
class ThreeHumpCamel(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([5.]*self.variable_num)
self.min_search_range = np.array([-5.]*self.variable_num)
self.optimal_solution = np.array([0.,0.])
self.global_optimum_solution = 0
self.func_name = 'ThreeHumpCamel'
def get_func_val(self, variables):
return 2*np.power(variables[0],2)-1.05*np.power(variables[0],4)+np.power(variables[0],6)/6+variables[0]*variables[1]+np.power(variables[1],2)
##### Class Easom function #####
class Easom(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([100.]*self.variable_num)
self.min_search_range = np.array([-100.]*self.variable_num)
self.optimal_solution = np.array([np.pi,np.pi])
self.global_optimum_solution = -1
self.plot_place = 10
self.func_name = 'Easom'
def get_func_val(self, variables):
return -1.0*np.cos(variables[0])*np.cos(variables[1])*np.exp(-(np.power(variables[0]-np.pi,2)+np.power(variables[1]-np.pi,2)))
##### Class Eggholder function #####
class Eggholder(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([512.]*self.variable_num)
self.min_search_range = np.array([-512.]*self.variable_num)
self.optimal_solution = np.array([512.,404.2319])
self.global_optimum_solution = -959.6407
self.plot_place = 5
self.func_name = 'Eggholder'
def get_func_val(self, variables):
tmp1 = -(variables[1]+47)*np.sin(np.sqrt(np.absolute(variables[1]+variables[0]/2+47)))
tmp2 = -variables[0]*np.sin(np.sqrt(np.absolute(variables[0]-(variables[1]+47))))
return tmp1+tmp2
##### Class McCormick function #####
class McCormick(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([4.]*self.variable_num)
self.min_search_range = np.array([-1.5,-3.])
self.optimal_solution = np.array([-0.54719,-1.54719])
self.global_optimum_solution = -1.9133
self.func_name = 'McCormick'
def get_func_val(self, variables):
tmp1 = np.sin(variables[0]+variables[1])+np.power(variables[0]-variables[1],2)
tmp2 = -1.5*variables[0]+2.5*variables[1]+1
return tmp1+tmp2
##### Class Schaffer function N.2 #####
class SchafferN2(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([100.]*self.variable_num)
self.min_search_range = np.array([-100]*self.variable_num)
self.optimal_solution = np.array([0.,0.])
self.global_optimum_solution = 0
self.plot_place = 10
self.func_name = 'SchafferN2'
def get_func_val(self, variables):
tmp1 = np.power(np.sin(np.power(variables[0],2)-np.power(variables[1],2)),2)-0.5
tmp2 = np.power(1+0.001*(np.power(variables[0],2)+np.power(variables[1],2)),2)
return 0.5+tmp1/tmp2
##### Class Schaffer function N.4 #####
class SchafferN4(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([100.]*self.variable_num)
self.min_search_range = np.array([-100]*self.variable_num)
self.optimal_solution = np.array([0.,1.25313])
self.global_optimum_solution = 0
self.plot_place = 10
self.func_name = 'SchafferN4'
def get_func_val(self, variables):
tmp1 = np.power(np.cos(np.sin(np.absolute(np.power(variables[0],2)-np.power(variables[1],2)))),2)-0.5
tmp2 = np.power(1+0.001*(np.power(variables[0],2)+np.power(variables[1],2)),2)
return 0.5+tmp1/tmp2
##### Class Styblinski-Tang function #####
class StyblinskiTang(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5.]*self.variable_num)
self.min_search_range = np.array([-5.]*self.variable_num)
self.optimal_solution = np.array([-2.903534]*self.variable_num)
self.global_optimum_solution = -39.166165*self.variable_num
self.func_name = 'StyblinskiTang'
def get_func_val(self, variables):
tmp1 = 0
for i in range(self.variable_num):
tmp1 += np.power(variables[i],4)-16*np.power(variables[i],2)+5*variables[i]
return tmp1/2
##### Class De Jong's function F1 #####
class DeJongsF1(Sphere):
def __init__(self,variable_num):
super().__init__(variable_num)
self.func_name = 'DeJongsF1'
##### Class De Jong's function F2 #####
class DeJongsF2(Rosenbrock):
def __init__(self,variable_num):
super().__init__(variable_num)
self.func_name = 'DeJongsF2'
##### Class De Jong's function F3 #####
class DeJongsF3(OptimalBasic):
def __init__(self):
super().__init__(5)
self.max_search_range = np.array([5.12]*self.variable_num)
self.min_search_range = np.array([-5.12]*self.variable_num)
self.optimal_solution = np.array([-5.12]*self.variable_num)
self.global_optimum_solution = 0
self.func_name = 'DeJongsF3'
def get_func_val(self, variables):
tmp1 = 0
for i in range(self.variable_num):
tmp1 += np.floor(variables[i])
return tmp1
##### Class De Jong's function F4 #####
class DeJongsF4(OptimalBasic):
def __init__(self):
super().__init__(30)
self.max_search_range = np.array([1.28]*self.variable_num)
self.min_search_range = np.array([-1.28]*self.variable_num)
self.optimal_solution = np.array([0.]*self.variable_num)
self.global_optimum_solution = np.random.normal(0,1)
self.func_name = 'DeJongsF4'
def get_func_val(self, variables):
tmp1 = 0
for i in range(self.variable_num):
tmp1 += (i+1)*np.power(variables[i],4)
return tmp1 + np.random.normal(0, 1)
##### Class De Jong's function F5 #####
class DeJongsF5(OptimalBasic):
def __init__(self):
super().__init__(25)
self.max_search_range = np.array([65.536]*self.variable_num)
self.min_search_range = np.array([-65.536]*self.variable_num)
self.optimal_solution = np.array([-32.32]*self.variable_num)
self.global_optimum_solution = 1.
self.plot_place = 1.5
self.func_name = 'DeJongsF5'
def get_func_val(self, variables):
A = np.zeros([2,25])
a = [-32,16,0,16,32]
A[0,:] = np.tile(a,(1,5))
tmp = []
for x in a:
tmp_list = [x]*5
tmp.extend(tmp_list)
A[1,:] = tmp
sum = 0
for i in range(self.variable_num):
a1i = A[0,i]
a2i = A[1,i]
term1 = i
term2 = np.power(variables[0]-a1i,6)
term3 = np.power(variables[1]-a2i,6)
new = 1/(term1+term2+term3)
sum += new
return 1/(0.002+sum)
##### Class Ellipsoid function #####
class Ellipsoid(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5.12]*self.variable_num)
self.min_search_range = np.array([-5.12]*self.variable_num)
self.optimal_solution = np.array([0.]*self.variable_num)
self.global_optimum_solution = 0.
self.func_name = 'Ellipsoid'
def get_func_val(self, variables):
tmp = 0
for i in range(self.variable_num):
tmp += np.power(np.power(1000,i/(self.variable_num-1))*variables[i],2)
return tmp
##### Class k-tablet function #####
class KTablet(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5.12]*self.variable_num)
self.min_search_range = np.array([-5.12]*self.variable_num)
self.optimal_solution = np.array([0.]*self.variable_num)
self.global_optimum_solution = 0.
self.func_name = 'KTablet'
def get_func_val(self, variables):
tmp = 0
k = int(self.variable_num/4)
for i in range(k):
tmp += variables[i]
for i in range(k,self.variable_num):
tmp += np.power(100*variables[i],2)
return tmp
##### Class Five-well potential function #####
# Not yet checked to do working properly
class FiveWellPotential(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([20.]*self.variable_num)
self.min_search_range = np.array([-20.]*self.variable_num)
self.optimal_solution = np.array([4.92,-9.89])
self.global_optimum_solution = -1.4616
self.plot_place = 1
self.func_name = 'FiveWellPotential'
def get_func_val(self, variables):
tmp1 = []
tmp1.append(1-1/(1+0.05*np.power(np.power(variables[0],2)+(variables[1]-10),2)))
tmp1.append(-1/(1+0.05*(np.power(variables[0]-10,2)+np.power(variables[1],2))))
tmp1.append(-1/(1+0.03*(np.power(variables[0]+10,2)+np.power(variables[1],2))))
tmp1.append(-1/(1+0.05*(np.power(variables[0]-5,2)+np.power(variables[1]+10,2))))
tmp1.append(-1/(1+0.1*(np.power(variables[0]+5,2)+np.power(variables[1]+10,2))))
tmp1_sum = 0
for x in tmp1:
tmp1_sum += x
tmp2 = 1+0.0001*np.power((np.power(variables[0],2)+np.power(variables[1],2)),1.2)
return tmp1_sum*tmp2
##### Class Weighted Sphere function or hyper ellipsodic function #####
class WeightedSphere(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5.12]*self.variable_num)
self.min_search_range = np.array([-5.12]*self.variable_num)
self.optimal_solution = np.array([0.]*self.variable_num)
self.global_optimum_solution = 0.
self.func_name = 'WeightedSphere'
def get_func_val(self, variables):
tmp = 0
for i in range(self.variable_num):
tmp += (i+1)*np.power(variables[i],2)
return tmp
class HyperEllipsodic(WeightedSphere):
def __init__(self,variable_num):
super().__init__(variable_num)
self.func_name = 'HyperEllipsodic'
##### Class Sum of different power function #####
class SumOfDifferentPower(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([1.]*self.variable_num)
self.min_search_range = np.array([-1.]*self.variable_num)
self.optimal_solution = np.array([0.]*self.variable_num)
self.global_optimum_solution = 0.
self.func_name = 'SumOfDifferentPower'
def get_func_val(self, variables):
tmp = 0
for i in range(self.variable_num):
tmp += np.power(np.absolute(variables[i]),i+2)
return tmp
##### Class Griewank function #####
class Griewank(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([600.]*self.variable_num)
self.min_search_range = np.array([-600.]*self.variable_num)
self.optimal_solution = np.array([0.]*self.variable_num)
self.global_optimum_solution = 0.
self.plot_place = 10.
self.func_name = 'Griewank'
def get_func_val(self, variables):
tmp1 = 0
tmp2 = 1
for i in range(self.variable_num):
tmp1 += np.power(variables[i],2)
tmp2 = tmp2*np.cos(variables[i]/np.sqrt(i+1))
return tmp1/4000-tmp2
##### Class Michalewicz function #####
class Michalewicz(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([np.pi]*self.variable_num)
self.min_search_range = np.array([0.]*self.variable_num)
self.optimal_solution = np.array([0.]*self.variable_num)
self.global_optimum_solution = -1.8013 # In case of variable_num == 2
self.plot_place = 0.1
self.func_name = 'Michalewicz'
def get_func_val(self, variables):
m = 10
tmp1 = 0
for i in range(self.variable_num):
tmp1 += np.sin(variables[i])*np.power(np.sin((i+1)*np.power(variables[i],2)/np.pi),2*m)
return -tmp1
##### Class Perm function #####
class Perm(OptimalBasic):
def __init__(self,variable_num,beta):
super().__init__(variable_num)
self.beta = beta
self.max_search_range = np.array([1.]*self.variable_num)
self.min_search_range = np.array([-1.]*self.variable_num)
tmp = []
for i in range(self.variable_num):
tmp.append(1/(i+1))
self.optimal_solution = np.array(tmp)
self.global_optimum_solution = 0.
self.plot_place = 0.1
self.func_name = 'Perm'
def get_func_val(self, variables):
tmp1 = 0
tmp2 = 0
for j in range(self.variable_num):
for i in range(self.variable_num):
tmp1 += (i+1+self.beta)*(np.power(variables[i],j+1)-np.power(1/(i+1),j+1))
tmp2 += np.power(tmp1,2)
tmp1 = 0
return tmp2
##### Class Rastrigin function #####
class Rastrigin(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5.12]*self.variable_num)
self.min_search_range = np.array([-5.12]*self.variable_num)
self.optimal_solution = np.array([0.]*self.variable_num)
self.global_optimum_solution = 0.
self.func_name = 'Rastrigin'
def get_func_val(self, variables):
tmp1 = 10 * self.variable_num
tmp2 = 0
for i in range(self.variable_num):
tmp2 += np.power(variables[i],2)-10*np.cos(2*np.pi*variables[i])
return tmp1+tmp2
##### Class Schwefel function #####
class Schwefel(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([500.]*self.variable_num)
self.min_search_range = np.array([-500.]*self.variable_num)
self.optimal_solution = np.array([420.9687]*self.variable_num)
self.global_optimum_solution = -418.9829
self.plot_place = 10.
self.func_name = 'Schwefel'
def get_func_val(self, variables):
tmp = 0
for i in range(self.variable_num):
tmp += variables[i]*np.sin(np.sqrt(np.absolute(variables[i])))
return -tmp
##### Class Six-hump camel function #####
class SixHumpCamel(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([3.,2.])
self.min_search_range = np.array([-3.,-2.])
self.optimal_solution = np.array([-0.0898,0.7126])
self.global_optimum_solution = -1.0316
self.func_name = 'SixHumpCamel'
def get_func_val(self, variables):
return 4-2.1*np.power(variables[0],2)+1/3*np.power(variables[0],4)*np.power(variables[0],2)+variables[0]*variables[1]+4*(np.power(variables[1],2)-1)*np.power(variables[1],2)
##### Class Shuberts function #####
class Shuberts(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([1000.,10.]) # Set infinite as 1000 for x1
self.min_search_range = np.array([-10.,-1000]) # Set infinite as -1000 for x2
self.optimal_solution = np.array([0.,0.])
self.global_optimum_solution = -186.7309
self.plot_place = 10.
self.func_name = 'Shuberts'
def get_func_val(self, variables):
n = 5
tmp1 = 0
tmp2 = 0
for i in range(n):
tmp1 += (i+1)*np.cos((i+1)+(i+2)*variables[0])
tmp2 += (i+1)*np.cos((i+1)+(i+2)*variables[1])
return tmp1*tmp2
##### Class Xin-She Yang function #####
class XinSheYang(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([2.*np.pi]*self.variable_num)
self.min_search_range = np.array([-2.*np.pi]*self.variable_num)
self.optimal_solution = np.array([0.]*self.variable_num)
self.global_optimum_solution = 0.
self.func_name = 'XinSheYang'
def get_func_val(self, variables):
tmp1 = 0
tmp2 = 0
for i in range(self.variable_num):
tmp1 += np.absolute(variables[i])
tmp2 += np.sin(np.power(variables[i],2))
return tmp1*np.exp(-tmp2)
##### Class Zakharov function #####
class Zakharov(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([1000.]*self.variable_num) # temporarily set as 1000
self.min_search_range = np.array([-1000]*self.variable_num) # temporarily set as -1000
self.optimal_solution = np.array([0.]*self.variable_num)
self.global_optimum_solution = 0.
self.plot_place = 10.
self.func_name = 'Zakharov'
def get_func_val(self, variables):
tmp1 = 0
tmp2 = 0
for i in range(self.variable_num):
tmp1 += variables[i]
tmp2 += (i+1)*variables[i]
return tmp1+np.power(1/2*tmp2,2)+np.power(1/2*tmp2,4)
| [
"numpy.sqrt",
"numpy.array",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.close",
"numpy.exp",
"os.path.isdir",
"os.mkdir",
"numpy.meshgrid",
"numpy.random.normal",
"numpy.tile",
"matplotlib.pyplot.savefig",
"numpy.floor",
"numpy.square",
"numpy.cos",
"mpl_toolkits.mplot3d.Axes3D",
... | [((1464, 1497), 'numpy.array', 'np.array', (['([0] * self.variable_num)'], {}), '([0] * self.variable_num)\n', (1472, 1497), True, 'import numpy as np\n'), ((1528, 1561), 'numpy.array', 'np.array', (['([0] * self.variable_num)'], {}), '([0] * self.variable_num)\n', (1536, 1561), True, 'import numpy as np\n'), ((1592, 1625), 'numpy.array', 'np.array', (['([0] * self.variable_num)'], {}), '([0] * self.variable_num)\n', (1600, 1625), True, 'import numpy as np\n'), ((2235, 2336), 'numpy.arange', 'np.arange', (['self.min_search_range[0]', 'self.max_search_range[0]', 'self.plot_place'], {'dtype': 'np.float32'}), '(self.min_search_range[0], self.max_search_range[0], self.\n plot_place, dtype=np.float32)\n', (2244, 2336), True, 'import numpy as np\n'), ((2343, 2444), 'numpy.arange', 'np.arange', (['self.min_search_range[1]', 'self.max_search_range[1]', 'self.plot_place'], {'dtype': 'np.float32'}), '(self.min_search_range[1], self.max_search_range[1], self.\n plot_place, dtype=np.float32)\n', (2352, 2444), True, 'import numpy as np\n'), ((2454, 2471), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2465, 2471), True, 'import numpy as np\n'), ((2805, 2816), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (2813, 2816), True, 'import numpy as np\n'), ((2831, 2843), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2841, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2857, 2868), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (2863, 2868), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((2910, 2920), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2918, 2920), True, 'import matplotlib.pyplot as plt\n'), ((2958, 3059), 'numpy.arange', 'np.arange', (['self.min_search_range[0]', 'self.max_search_range[0]', 'self.plot_place'], {'dtype': 'np.float32'}), '(self.min_search_range[0], self.max_search_range[0], self.\n plot_place, dtype=np.float32)\n', (2967, 3059), True, 'import numpy as np\n'), ((3066, 3167), 'numpy.arange', 'np.arange', (['self.min_search_range[1]', 'self.max_search_range[1]', 'self.plot_place'], {'dtype': 'np.float32'}), '(self.min_search_range[1], self.max_search_range[1], self.\n plot_place, dtype=np.float32)\n', (3075, 3167), True, 'import numpy as np\n'), ((3177, 3194), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (3188, 3194), True, 'import numpy as np\n'), ((3528, 3539), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (3536, 3539), True, 'import numpy as np\n'), ((3554, 3566), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3564, 3566), True, 'import matplotlib.pyplot as plt\n'), ((3580, 3591), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (3586, 3591), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((3633, 3685), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.save_dir + self.func_name + '.png')"], {}), "(self.save_dir + self.func_name + '.png')\n", (3644, 3685), True, 'import matplotlib.pyplot as plt\n'), ((3690, 3701), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3699, 3701), True, 'import matplotlib.pyplot as plt\n'), ((3923, 3961), 'numpy.array', 'np.array', (['([32.768] * self.variable_num)'], {}), '([32.768] * self.variable_num)\n', (3931, 3961), True, 'import numpy as np\n'), ((3992, 4031), 'numpy.array', 'np.array', (['([-32.768] * self.variable_num)'], {}), '([-32.768] * self.variable_num)\n', (4000, 4031), True, 'import numpy as np\n'), ((4062, 4095), 'numpy.array', 'np.array', (['([0] * self.variable_num)'], {}), '([0] * self.variable_num)\n', (4070, 4095), True, 'import numpy as np\n'), ((4585, 4621), 'numpy.array', 'np.array', (['([1000] * self.variable_num)'], {}), '([1000] * self.variable_num)\n', (4593, 4621), True, 'import numpy as np\n'), ((4665, 4702), 'numpy.array', 'np.array', (['([-1000] * self.variable_num)'], {}), '([-1000] * self.variable_num)\n', (4673, 4702), True, 'import numpy as np\n'), ((4746, 4779), 'numpy.array', 'np.array', (['([1] * self.variable_num)'], {}), '([1] * self.variable_num)\n', (4754, 4779), True, 'import numpy as np\n'), ((5146, 5179), 'numpy.array', 'np.array', (['([5] * self.variable_num)'], {}), '([5] * self.variable_num)\n', (5154, 5179), True, 'import numpy as np\n'), ((5210, 5244), 'numpy.array', 'np.array', (['([-5] * self.variable_num)'], {}), '([-5] * self.variable_num)\n', (5218, 5244), True, 'import numpy as np\n'), ((5275, 5308), 'numpy.array', 'np.array', (['([1] * self.variable_num)'], {}), '([1] * self.variable_num)\n', (5283, 5308), True, 'import numpy as np\n'), ((5778, 5813), 'numpy.array', 'np.array', (['([4.5] * self.variable_num)'], {}), '([4.5] * self.variable_num)\n', (5786, 5813), True, 'import numpy as np\n'), ((5844, 5880), 'numpy.array', 'np.array', (['([-4.5] * self.variable_num)'], {}), '([-4.5] * self.variable_num)\n', (5852, 5880), True, 'import numpy as np\n'), ((5911, 5931), 'numpy.array', 'np.array', (['[3.0, 0.5]'], {}), '([3.0, 0.5])\n', (5919, 5931), True, 'import numpy as np\n'), ((6090, 6151), 'numpy.power', 'np.power', (['(1.5 - variables[0] + variables[0] * variables[1])', '(2)'], {}), '(1.5 - variables[0] + variables[0] * variables[1], 2)\n', (6098, 6151), True, 'import numpy as np\n'), ((6524, 6559), 'numpy.array', 'np.array', (['([2.0] * self.variable_num)'], {}), '([2.0] * self.variable_num)\n', (6532, 6559), True, 'import numpy as np\n'), ((6589, 6625), 'numpy.array', 'np.array', (['([-2.0] * self.variable_num)'], {}), '([-2.0] * self.variable_num)\n', (6597, 6625), True, 'import numpy as np\n'), ((6655, 6676), 'numpy.array', 'np.array', (['[0.0, -1.0]'], {}), '([0.0, -1.0])\n', (6663, 6676), True, 'import numpy as np\n'), ((7362, 7398), 'numpy.array', 'np.array', (['([10.0] * self.variable_num)'], {}), '([10.0] * self.variable_num)\n', (7370, 7398), True, 'import numpy as np\n'), ((7428, 7465), 'numpy.array', 'np.array', (['([-10.0] * self.variable_num)'], {}), '([-10.0] * self.variable_num)\n', (7436, 7465), True, 'import numpy as np\n'), ((7495, 7516), 'numpy.array', 'np.array', (['[1.0, -3.0]'], {}), '([1.0, -3.0])\n', (7503, 7516), True, 'import numpy as np\n'), ((7643, 7691), 'numpy.power', 'np.power', (['(variables[0] + 2 * variables[1] - 7)', '(2)'], {}), '(variables[0] + 2 * variables[1] - 7, 2)\n', (7651, 7691), True, 'import numpy as np\n'), ((7700, 7748), 'numpy.power', 'np.power', (['(2 * variables[0] + variables[1] - 5)', '(2)'], {}), '(2 * variables[0] + variables[1] - 5, 2)\n', (7708, 7748), True, 'import numpy as np\n'), ((7918, 7939), 'numpy.array', 'np.array', (['[-5.0, 3.0]'], {}), '([-5.0, 3.0])\n', (7926, 7939), True, 'import numpy as np\n'), ((7969, 7992), 'numpy.array', 'np.array', (['[-15.0, -3.0]'], {}), '([-15.0, -3.0])\n', (7977, 7992), True, 'import numpy as np\n'), ((8022, 8044), 'numpy.array', 'np.array', (['[-10.0, 1.0]'], {}), '([-10.0, 1.0])\n', (8030, 8044), True, 'import numpy as np\n'), ((8463, 8499), 'numpy.array', 'np.array', (['([10.0] * self.variable_num)'], {}), '([10.0] * self.variable_num)\n', (8471, 8499), True, 'import numpy as np\n'), ((8529, 8566), 'numpy.array', 'np.array', (['([-10.0] * self.variable_num)'], {}), '([-10.0] * self.variable_num)\n', (8537, 8566), True, 'import numpy as np\n'), ((8596, 8616), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (8604, 8616), True, 'import numpy as np\n'), ((9023, 9059), 'numpy.array', 'np.array', (['([10.0] * self.variable_num)'], {}), '([10.0] * self.variable_num)\n', (9031, 9059), True, 'import numpy as np\n'), ((9089, 9126), 'numpy.array', 'np.array', (['([-10.0] * self.variable_num)'], {}), '([-10.0] * self.variable_num)\n', (9097, 9126), True, 'import numpy as np\n'), ((9156, 9176), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (9164, 9176), True, 'import numpy as np\n'), ((9715, 9750), 'numpy.array', 'np.array', (['([5.0] * self.variable_num)'], {}), '([5.0] * self.variable_num)\n', (9723, 9750), True, 'import numpy as np\n'), ((9780, 9816), 'numpy.array', 'np.array', (['([-5.0] * self.variable_num)'], {}), '([-5.0] * self.variable_num)\n', (9788, 9816), True, 'import numpy as np\n'), ((9846, 9866), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (9854, 9866), True, 'import numpy as np\n'), ((10282, 10319), 'numpy.array', 'np.array', (['([100.0] * self.variable_num)'], {}), '([100.0] * self.variable_num)\n', (10290, 10319), True, 'import numpy as np\n'), ((10349, 10387), 'numpy.array', 'np.array', (['([-100.0] * self.variable_num)'], {}), '([-100.0] * self.variable_num)\n', (10357, 10387), True, 'import numpy as np\n'), ((10417, 10441), 'numpy.array', 'np.array', (['[np.pi, np.pi]'], {}), '([np.pi, np.pi])\n', (10425, 10441), True, 'import numpy as np\n'), ((10873, 10910), 'numpy.array', 'np.array', (['([512.0] * self.variable_num)'], {}), '([512.0] * self.variable_num)\n', (10881, 10910), True, 'import numpy as np\n'), ((10940, 10978), 'numpy.array', 'np.array', (['([-512.0] * self.variable_num)'], {}), '([-512.0] * self.variable_num)\n', (10948, 10978), True, 'import numpy as np\n'), ((11008, 11035), 'numpy.array', 'np.array', (['[512.0, 404.2319]'], {}), '([512.0, 404.2319])\n', (11016, 11035), True, 'import numpy as np\n'), ((11551, 11586), 'numpy.array', 'np.array', (['([4.0] * self.variable_num)'], {}), '([4.0] * self.variable_num)\n', (11559, 11586), True, 'import numpy as np\n'), ((11616, 11638), 'numpy.array', 'np.array', (['[-1.5, -3.0]'], {}), '([-1.5, -3.0])\n', (11624, 11638), True, 'import numpy as np\n'), ((11669, 11699), 'numpy.array', 'np.array', (['[-0.54719, -1.54719]'], {}), '([-0.54719, -1.54719])\n', (11677, 11699), True, 'import numpy as np\n'), ((12144, 12181), 'numpy.array', 'np.array', (['([100.0] * self.variable_num)'], {}), '([100.0] * self.variable_num)\n', (12152, 12181), True, 'import numpy as np\n'), ((12211, 12247), 'numpy.array', 'np.array', (['([-100] * self.variable_num)'], {}), '([-100] * self.variable_num)\n', (12219, 12247), True, 'import numpy as np\n'), ((12278, 12298), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (12286, 12298), True, 'import numpy as np\n'), ((12806, 12843), 'numpy.array', 'np.array', (['([100.0] * self.variable_num)'], {}), '([100.0] * self.variable_num)\n', (12814, 12843), True, 'import numpy as np\n'), ((12873, 12909), 'numpy.array', 'np.array', (['([-100] * self.variable_num)'], {}), '([-100] * self.variable_num)\n', (12881, 12909), True, 'import numpy as np\n'), ((12940, 12964), 'numpy.array', 'np.array', (['[0.0, 1.25313]'], {}), '([0.0, 1.25313])\n', (12948, 12964), True, 'import numpy as np\n'), ((13525, 13560), 'numpy.array', 'np.array', (['([5.0] * self.variable_num)'], {}), '([5.0] * self.variable_num)\n', (13533, 13560), True, 'import numpy as np\n'), ((13590, 13626), 'numpy.array', 'np.array', (['([-5.0] * self.variable_num)'], {}), '([-5.0] * self.variable_num)\n', (13598, 13626), True, 'import numpy as np\n'), ((13656, 13697), 'numpy.array', 'np.array', (['([-2.903534] * self.variable_num)'], {}), '([-2.903534] * self.variable_num)\n', (13664, 13697), True, 'import numpy as np\n'), ((14531, 14567), 'numpy.array', 'np.array', (['([5.12] * self.variable_num)'], {}), '([5.12] * self.variable_num)\n', (14539, 14567), True, 'import numpy as np\n'), ((14598, 14635), 'numpy.array', 'np.array', (['([-5.12] * self.variable_num)'], {}), '([-5.12] * self.variable_num)\n', (14606, 14635), True, 'import numpy as np\n'), ((14666, 14703), 'numpy.array', 'np.array', (['([-5.12] * self.variable_num)'], {}), '([-5.12] * self.variable_num)\n', (14674, 14703), True, 'import numpy as np\n'), ((15097, 15133), 'numpy.array', 'np.array', (['([1.28] * self.variable_num)'], {}), '([1.28] * self.variable_num)\n', (15105, 15133), True, 'import numpy as np\n'), ((15164, 15201), 'numpy.array', 'np.array', (['([-1.28] * self.variable_num)'], {}), '([-1.28] * self.variable_num)\n', (15172, 15201), True, 'import numpy as np\n'), ((15232, 15267), 'numpy.array', 'np.array', (['([0.0] * self.variable_num)'], {}), '([0.0] * self.variable_num)\n', (15240, 15267), True, 'import numpy as np\n'), ((15304, 15326), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (15320, 15326), True, 'import numpy as np\n'), ((15713, 15751), 'numpy.array', 'np.array', (['([65.536] * self.variable_num)'], {}), '([65.536] * self.variable_num)\n', (15721, 15751), True, 'import numpy as np\n'), ((15782, 15821), 'numpy.array', 'np.array', (['([-65.536] * self.variable_num)'], {}), '([-65.536] * self.variable_num)\n', (15790, 15821), True, 'import numpy as np\n'), ((15852, 15890), 'numpy.array', 'np.array', (['([-32.32] * self.variable_num)'], {}), '([-32.32] * self.variable_num)\n', (15860, 15890), True, 'import numpy as np\n'), ((16050, 16067), 'numpy.zeros', 'np.zeros', (['[2, 25]'], {}), '([2, 25])\n', (16058, 16067), True, 'import numpy as np\n'), ((16113, 16131), 'numpy.tile', 'np.tile', (['a', '(1, 5)'], {}), '(a, (1, 5))\n', (16120, 16131), True, 'import numpy as np\n'), ((16749, 16785), 'numpy.array', 'np.array', (['([5.12] * self.variable_num)'], {}), '([5.12] * self.variable_num)\n', (16757, 16785), True, 'import numpy as np\n'), ((16816, 16853), 'numpy.array', 'np.array', (['([-5.12] * self.variable_num)'], {}), '([-5.12] * self.variable_num)\n', (16824, 16853), True, 'import numpy as np\n'), ((16884, 16919), 'numpy.array', 'np.array', (['([0.0] * self.variable_num)'], {}), '([0.0] * self.variable_num)\n', (16892, 16919), True, 'import numpy as np\n'), ((17371, 17407), 'numpy.array', 'np.array', (['([5.12] * self.variable_num)'], {}), '([5.12] * self.variable_num)\n', (17379, 17407), True, 'import numpy as np\n'), ((17438, 17475), 'numpy.array', 'np.array', (['([-5.12] * self.variable_num)'], {}), '([-5.12] * self.variable_num)\n', (17446, 17475), True, 'import numpy as np\n'), ((17506, 17541), 'numpy.array', 'np.array', (['([0.0] * self.variable_num)'], {}), '([0.0] * self.variable_num)\n', (17514, 17541), True, 'import numpy as np\n'), ((18093, 18129), 'numpy.array', 'np.array', (['([20.0] * self.variable_num)'], {}), '([20.0] * self.variable_num)\n', (18101, 18129), True, 'import numpy as np\n'), ((18159, 18196), 'numpy.array', 'np.array', (['([-20.0] * self.variable_num)'], {}), '([-20.0] * self.variable_num)\n', (18167, 18196), True, 'import numpy as np\n'), ((18226, 18249), 'numpy.array', 'np.array', (['[4.92, -9.89]'], {}), '([4.92, -9.89])\n', (18234, 18249), True, 'import numpy as np\n'), ((19277, 19313), 'numpy.array', 'np.array', (['([5.12] * self.variable_num)'], {}), '([5.12] * self.variable_num)\n', (19285, 19313), True, 'import numpy as np\n'), ((19344, 19381), 'numpy.array', 'np.array', (['([-5.12] * self.variable_num)'], {}), '([-5.12] * self.variable_num)\n', (19352, 19381), True, 'import numpy as np\n'), ((19412, 19447), 'numpy.array', 'np.array', (['([0.0] * self.variable_num)'], {}), '([0.0] * self.variable_num)\n', (19420, 19447), True, 'import numpy as np\n'), ((20056, 20091), 'numpy.array', 'np.array', (['([1.0] * self.variable_num)'], {}), '([1.0] * self.variable_num)\n', (20064, 20091), True, 'import numpy as np\n'), ((20121, 20157), 'numpy.array', 'np.array', (['([-1.0] * self.variable_num)'], {}), '([-1.0] * self.variable_num)\n', (20129, 20157), True, 'import numpy as np\n'), ((20187, 20222), 'numpy.array', 'np.array', (['([0.0] * self.variable_num)'], {}), '([0.0] * self.variable_num)\n', (20195, 20222), True, 'import numpy as np\n'), ((20661, 20698), 'numpy.array', 'np.array', (['([600.0] * self.variable_num)'], {}), '([600.0] * self.variable_num)\n', (20669, 20698), True, 'import numpy as np\n'), ((20728, 20766), 'numpy.array', 'np.array', (['([-600.0] * self.variable_num)'], {}), '([-600.0] * self.variable_num)\n', (20736, 20766), True, 'import numpy as np\n'), ((20796, 20831), 'numpy.array', 'np.array', (['([0.0] * self.variable_num)'], {}), '([0.0] * self.variable_num)\n', (20804, 20831), True, 'import numpy as np\n'), ((21368, 21405), 'numpy.array', 'np.array', (['([np.pi] * self.variable_num)'], {}), '([np.pi] * self.variable_num)\n', (21376, 21405), True, 'import numpy as np\n'), ((21436, 21471), 'numpy.array', 'np.array', (['([0.0] * self.variable_num)'], {}), '([0.0] * self.variable_num)\n', (21444, 21471), True, 'import numpy as np\n'), ((21501, 21536), 'numpy.array', 'np.array', (['([0.0] * self.variable_num)'], {}), '([0.0] * self.variable_num)\n', (21509, 21536), True, 'import numpy as np\n'), ((22114, 22149), 'numpy.array', 'np.array', (['([1.0] * self.variable_num)'], {}), '([1.0] * self.variable_num)\n', (22122, 22149), True, 'import numpy as np\n'), ((22179, 22215), 'numpy.array', 'np.array', (['([-1.0] * self.variable_num)'], {}), '([-1.0] * self.variable_num)\n', (22187, 22215), True, 'import numpy as np\n'), ((22337, 22350), 'numpy.array', 'np.array', (['tmp'], {}), '(tmp)\n', (22345, 22350), True, 'import numpy as np\n'), ((22965, 23001), 'numpy.array', 'np.array', (['([5.12] * self.variable_num)'], {}), '([5.12] * self.variable_num)\n', (22973, 23001), True, 'import numpy as np\n'), ((23032, 23069), 'numpy.array', 'np.array', (['([-5.12] * self.variable_num)'], {}), '([-5.12] * self.variable_num)\n', (23040, 23069), True, 'import numpy as np\n'), ((23100, 23135), 'numpy.array', 'np.array', (['([0.0] * self.variable_num)'], {}), '([0.0] * self.variable_num)\n', (23108, 23135), True, 'import numpy as np\n'), ((23627, 23664), 'numpy.array', 'np.array', (['([500.0] * self.variable_num)'], {}), '([500.0] * self.variable_num)\n', (23635, 23664), True, 'import numpy as np\n'), ((23694, 23732), 'numpy.array', 'np.array', (['([-500.0] * self.variable_num)'], {}), '([-500.0] * self.variable_num)\n', (23702, 23732), True, 'import numpy as np\n'), ((23762, 23802), 'numpy.array', 'np.array', (['([420.9687] * self.variable_num)'], {}), '([420.9687] * self.variable_num)\n', (23770, 23802), True, 'import numpy as np\n'), ((24271, 24291), 'numpy.array', 'np.array', (['[3.0, 2.0]'], {}), '([3.0, 2.0])\n', (24279, 24291), True, 'import numpy as np\n'), ((24321, 24343), 'numpy.array', 'np.array', (['[-3.0, -2.0]'], {}), '([-3.0, -2.0])\n', (24329, 24343), True, 'import numpy as np\n'), ((24373, 24400), 'numpy.array', 'np.array', (['[-0.0898, 0.7126]'], {}), '([-0.0898, 0.7126])\n', (24381, 24400), True, 'import numpy as np\n'), ((24860, 24884), 'numpy.array', 'np.array', (['[1000.0, 10.0]'], {}), '([1000.0, 10.0])\n', (24868, 24884), True, 'import numpy as np\n'), ((24944, 24968), 'numpy.array', 'np.array', (['[-10.0, -1000]'], {}), '([-10.0, -1000])\n', (24952, 24968), True, 'import numpy as np\n'), ((25030, 25050), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (25038, 25050), True, 'import numpy as np\n'), ((25602, 25645), 'numpy.array', 'np.array', (['([2.0 * np.pi] * self.variable_num)'], {}), '([2.0 * np.pi] * self.variable_num)\n', (25610, 25645), True, 'import numpy as np\n'), ((25673, 25717), 'numpy.array', 'np.array', (['([-2.0 * np.pi] * self.variable_num)'], {}), '([-2.0 * np.pi] * self.variable_num)\n', (25681, 25717), True, 'import numpy as np\n'), ((25745, 25780), 'numpy.array', 'np.array', (['([0.0] * self.variable_num)'], {}), '([0.0] * self.variable_num)\n', (25753, 25780), True, 'import numpy as np\n'), ((26283, 26321), 'numpy.array', 'np.array', (['([1000.0] * self.variable_num)'], {}), '([1000.0] * self.variable_num)\n', (26291, 26321), True, 'import numpy as np\n'), ((26377, 26414), 'numpy.array', 'np.array', (['([-1000] * self.variable_num)'], {}), '([-1000] * self.variable_num)\n', (26385, 26414), True, 'import numpy as np\n'), ((26472, 26507), 'numpy.array', 'np.array', (['([0.0] * self.variable_num)'], {}), '([0.0] * self.variable_num)\n', (26480, 26507), True, 'import numpy as np\n'), ((1812, 1840), 'os.path.isdir', 'os.path.isdir', (['self.save_dir'], {}), '(self.save_dir)\n', (1825, 1840), False, 'import os\n'), ((1864, 1887), 'os.mkdir', 'os.mkdir', (['self.save_dir'], {}), '(self.save_dir)\n', (1872, 1887), False, 'import os\n'), ((4944, 4964), 'numpy.square', 'np.square', (['variables'], {}), '(variables)\n', (4953, 4964), True, 'import numpy as np\n'), ((8262, 8292), 'numpy.absolute', 'np.absolute', (['(variables[0] + 10)'], {}), '(variables[0] + 10)\n', (8273, 8292), True, 'import numpy as np\n'), ((9314, 9346), 'numpy.sin', 'np.sin', (['(3 * np.pi * variables[0])'], {}), '(3 * np.pi * variables[0])\n', (9320, 9346), True, 'import numpy as np\n'), ((9361, 9390), 'numpy.power', 'np.power', (['(variables[0] - 1)', '(2)'], {}), '(variables[0] - 1, 2)\n', (9369, 9390), True, 'import numpy as np\n'), ((9448, 9477), 'numpy.power', 'np.power', (['(variables[1] - 1)', '(2)'], {}), '(variables[1] - 1, 2)\n', (9456, 9477), True, 'import numpy as np\n'), ((10112, 10137), 'numpy.power', 'np.power', (['variables[1]', '(2)'], {}), '(variables[1], 2)\n', (10120, 10137), True, 'import numpy as np\n'), ((11838, 11873), 'numpy.sin', 'np.sin', (['(variables[0] + variables[1])'], {}), '(variables[0] + variables[1])\n', (11844, 11873), True, 'import numpy as np\n'), ((11872, 11912), 'numpy.power', 'np.power', (['(variables[0] - variables[1])', '(2)'], {}), '(variables[0] - variables[1], 2)\n', (11880, 11912), True, 'import numpy as np\n'), ((14897, 14919), 'numpy.floor', 'np.floor', (['variables[i]'], {}), '(variables[i])\n', (14905, 14919), True, 'import numpy as np\n'), ((15533, 15555), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (15549, 15555), True, 'import numpy as np\n'), ((16402, 16433), 'numpy.power', 'np.power', (['(variables[0] - a1i)', '(6)'], {}), '(variables[0] - a1i, 6)\n', (16410, 16433), True, 'import numpy as np\n'), ((16451, 16482), 'numpy.power', 'np.power', (['(variables[1] - a2i)', '(6)'], {}), '(variables[1] - a2i, 6)\n', (16459, 16482), True, 'import numpy as np\n'), ((17833, 17864), 'numpy.power', 'np.power', (['(100 * variables[i])', '(2)'], {}), '(100 * variables[i], 2)\n', (17841, 17864), True, 'import numpy as np\n'), ((21074, 21099), 'numpy.power', 'np.power', (['variables[i]', '(2)'], {}), '(variables[i], 2)\n', (21082, 21099), True, 'import numpy as np\n'), ((22730, 22747), 'numpy.power', 'np.power', (['tmp1', '(2)'], {}), '(tmp1, 2)\n', (22738, 22747), True, 'import numpy as np\n'), ((25995, 26020), 'numpy.absolute', 'np.absolute', (['variables[i]'], {}), '(variables[i])\n', (26006, 26020), True, 'import numpy as np\n'), ((26094, 26107), 'numpy.exp', 'np.exp', (['(-tmp2)'], {}), '(-tmp2)\n', (26100, 26107), True, 'import numpy as np\n'), ((26843, 26868), 'numpy.power', 'np.power', (['(1 / 2 * tmp2)', '(4)'], {}), '(1 / 2 * tmp2, 4)\n', (26851, 26868), True, 'import numpy as np\n'), ((1764, 1789), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1779, 1789), False, 'import os\n'), ((5589, 5618), 'numpy.power', 'np.power', (['(variables[i] - 1)', '(2)'], {}), '(variables[i] - 1, 2)\n', (5597, 5618), True, 'import numpy as np\n'), ((6846, 6890), 'numpy.power', 'np.power', (['(variables[0] + variables[1] + 1)', '(2)'], {}), '(variables[0] + variables[1] + 1, 2)\n', (6854, 6890), True, 'import numpy as np\n'), ((7026, 7074), 'numpy.power', 'np.power', (['(2 * variables[0] - 3 * variables[1])', '(2)'], {}), '(2 * variables[0] - 3 * variables[1], 2)\n', (7034, 7074), True, 'import numpy as np\n'), ((8750, 8775), 'numpy.power', 'np.power', (['variables[0]', '(2)'], {}), '(variables[0], 2)\n', (8758, 8775), True, 'import numpy as np\n'), ((8775, 8800), 'numpy.power', 'np.power', (['variables[1]', '(2)'], {}), '(variables[1], 2)\n', (8783, 8800), True, 'import numpy as np\n'), ((10626, 10646), 'numpy.cos', 'np.cos', (['variables[1]'], {}), '(variables[1])\n', (10632, 10646), True, 'import numpy as np\n'), ((15486, 15511), 'numpy.power', 'np.power', (['variables[i]', '(4)'], {}), '(variables[i], 4)\n', (15494, 15511), True, 'import numpy as np\n'), ((19653, 19678), 'numpy.power', 'np.power', (['variables[i]', '(2)'], {}), '(variables[i], 2)\n', (19661, 19678), True, 'import numpy as np\n'), ((20436, 20461), 'numpy.absolute', 'np.absolute', (['variables[i]'], {}), '(variables[i])\n', (20447, 20461), True, 'import numpy as np\n'), ((21816, 21836), 'numpy.sin', 'np.sin', (['variables[i]'], {}), '(variables[i])\n', (21822, 21836), True, 'import numpy as np\n'), ((23370, 23395), 'numpy.power', 'np.power', (['variables[i]', '(2)'], {}), '(variables[i], 2)\n', (23378, 23395), True, 'import numpy as np\n'), ((24684, 24709), 'numpy.power', 'np.power', (['variables[1]', '(2)'], {}), '(variables[1], 2)\n', (24692, 24709), True, 'import numpy as np\n'), ((25304, 25342), 'numpy.cos', 'np.cos', (['(i + 1 + (i + 2) * variables[0])'], {}), '(i + 1 + (i + 2) * variables[0])\n', (25310, 25342), True, 'import numpy as np\n'), ((25363, 25401), 'numpy.cos', 'np.cos', (['(i + 1 + (i + 2) * variables[1])'], {}), '(i + 1 + (i + 2) * variables[1])\n', (25369, 25401), True, 'import numpy as np\n'), ((26048, 26073), 'numpy.power', 'np.power', (['variables[i]', '(2)'], {}), '(variables[i], 2)\n', (26056, 26073), True, 'import numpy as np\n'), ((26822, 26847), 'numpy.power', 'np.power', (['(1 / 2 * tmp2)', '(2)'], {}), '(1 / 2 * tmp2, 2)\n', (26830, 26847), True, 'import numpy as np\n'), ((6212, 6237), 'numpy.power', 'np.power', (['variables[1]', '(2)'], {}), '(variables[1], 2)\n', (6220, 6237), True, 'import numpy as np\n'), ((6302, 6327), 'numpy.power', 'np.power', (['variables[1]', '(3)'], {}), '(variables[1], 3)\n', (6310, 6327), True, 'import numpy as np\n'), ((9400, 9432), 'numpy.sin', 'np.sin', (['(3 * np.pi * variables[1])'], {}), '(3 * np.pi * variables[1])\n', (9406, 9432), True, 'import numpy as np\n'), ((9487, 9519), 'numpy.sin', 'np.sin', (['(2 * np.pi * variables[1])'], {}), '(2 * np.pi * variables[1])\n', (9493, 9519), True, 'import numpy as np\n'), ((10605, 10625), 'numpy.cos', 'np.cos', (['variables[0]'], {}), '(variables[0])\n', (10611, 10625), True, 'import numpy as np\n'), ((11237, 11286), 'numpy.absolute', 'np.absolute', (['(variables[1] + variables[0] / 2 + 47)'], {}), '(variables[1] + variables[0] / 2 + 47)\n', (11248, 11286), True, 'import numpy as np\n'), ((11327, 11374), 'numpy.absolute', 'np.absolute', (['(variables[0] - (variables[1] + 47))'], {}), '(variables[0] - (variables[1] + 47))\n', (11338, 11374), True, 'import numpy as np\n'), ((13923, 13948), 'numpy.power', 'np.power', (['variables[i]', '(4)'], {}), '(variables[i], 4)\n', (13931, 13948), True, 'import numpy as np\n'), ((17123, 17166), 'numpy.power', 'np.power', (['(1000)', '(i / (self.variable_num - 1))'], {}), '(1000, i / (self.variable_num - 1))\n', (17131, 17166), True, 'import numpy as np\n'), ((23398, 23430), 'numpy.cos', 'np.cos', (['(2 * np.pi * variables[i])'], {}), '(2 * np.pi * variables[i])\n', (23404, 23430), True, 'import numpy as np\n'), ((2753, 2766), 'numpy.array', 'np.array', (['tmp'], {}), '(tmp)\n', (2761, 2766), True, 'import numpy as np\n'), ((3476, 3489), 'numpy.array', 'np.array', (['tmp'], {}), '(tmp)\n', (3484, 3489), True, 'import numpy as np\n'), ((4359, 4390), 'numpy.cos', 'np.cos', (['(variables * 2.0 * np.pi)'], {}), '(variables * 2.0 * np.pi)\n', (4365, 4390), True, 'import numpy as np\n'), ((6979, 7004), 'numpy.power', 'np.power', (['variables[1]', '(2)'], {}), '(variables[1], 2)\n', (6987, 7004), True, 'import numpy as np\n'), ((7164, 7189), 'numpy.power', 'np.power', (['variables[1]', '(2)'], {}), '(variables[1], 2)\n', (7172, 7189), True, 'import numpy as np\n'), ((10059, 10084), 'numpy.power', 'np.power', (['variables[0]', '(6)'], {}), '(variables[0], 6)\n', (10067, 10084), True, 'import numpy as np\n'), ((10656, 10689), 'numpy.power', 'np.power', (['(variables[0] - np.pi)', '(2)'], {}), '(variables[0] - np.pi, 2)\n', (10664, 10689), True, 'import numpy as np\n'), ((10687, 10720), 'numpy.power', 'np.power', (['(variables[1] - np.pi)', '(2)'], {}), '(variables[1] - np.pi, 2)\n', (10695, 10720), True, 'import numpy as np\n'), ((12475, 12500), 'numpy.power', 'np.power', (['variables[0]', '(2)'], {}), '(variables[0], 2)\n', (12483, 12500), True, 'import numpy as np\n'), ((12500, 12525), 'numpy.power', 'np.power', (['variables[1]', '(2)'], {}), '(variables[1], 2)\n', (12508, 12525), True, 'import numpy as np\n'), ((12566, 12591), 'numpy.power', 'np.power', (['variables[0]', '(2)'], {}), '(variables[0], 2)\n', (12574, 12591), True, 'import numpy as np\n'), ((12591, 12616), 'numpy.power', 'np.power', (['variables[1]', '(2)'], {}), '(variables[1], 2)\n', (12599, 12616), True, 'import numpy as np\n'), ((13254, 13279), 'numpy.power', 'np.power', (['variables[0]', '(2)'], {}), '(variables[0], 2)\n', (13262, 13279), True, 'import numpy as np\n'), ((13279, 13304), 'numpy.power', 'np.power', (['variables[1]', '(2)'], {}), '(variables[1], 2)\n', (13287, 13304), True, 'import numpy as np\n'), ((13951, 13976), 'numpy.power', 'np.power', (['variables[i]', '(2)'], {}), '(variables[i], 2)\n', (13959, 13976), True, 'import numpy as np\n'), ((18975, 19000), 'numpy.power', 'np.power', (['variables[0]', '(2)'], {}), '(variables[0], 2)\n', (18983, 19000), True, 'import numpy as np\n'), ((19000, 19025), 'numpy.power', 'np.power', (['variables[1]', '(2)'], {}), '(variables[1], 2)\n', (19008, 19025), True, 'import numpy as np\n'), ((21143, 21157), 'numpy.sqrt', 'np.sqrt', (['(i + 1)'], {}), '(i + 1)\n', (21150, 21157), True, 'import numpy as np\n'), ((22660, 22689), 'numpy.power', 'np.power', (['variables[i]', '(j + 1)'], {}), '(variables[i], j + 1)\n', (22668, 22689), True, 'import numpy as np\n'), ((22687, 22715), 'numpy.power', 'np.power', (['(1 / (i + 1))', '(j + 1)'], {}), '(1 / (i + 1), j + 1)\n', (22695, 22715), True, 'import numpy as np\n'), ((24062, 24087), 'numpy.absolute', 'np.absolute', (['variables[i]'], {}), '(variables[i])\n', (24073, 24087), True, 'import numpy as np\n'), ((24602, 24627), 'numpy.power', 'np.power', (['variables[0]', '(2)'], {}), '(variables[0], 2)\n', (24610, 24627), True, 'import numpy as np\n'), ((24656, 24681), 'numpy.power', 'np.power', (['variables[1]', '(2)'], {}), '(variables[1], 2)\n', (24664, 24681), True, 'import numpy as np\n'), ((5561, 5586), 'numpy.power', 'np.power', (['variables[i]', '(2)'], {}), '(variables[i], 2)\n', (5569, 5586), True, 'import numpy as np\n'), ((8215, 8240), 'numpy.power', 'np.power', (['variables[1]', '(2)'], {}), '(variables[1], 2)\n', (8223, 8240), True, 'import numpy as np\n'), ((10004, 10029), 'numpy.power', 'np.power', (['variables[0]', '(2)'], {}), '(variables[0], 2)\n', (10012, 10029), True, 'import numpy as np\n'), ((10034, 10059), 'numpy.power', 'np.power', (['variables[0]', '(4)'], {}), '(variables[0], 4)\n', (10042, 10059), True, 'import numpy as np\n'), ((18548, 18578), 'numpy.power', 'np.power', (['(variables[0] - 10)', '(2)'], {}), '(variables[0] - 10, 2)\n', (18556, 18578), True, 'import numpy as np\n'), ((18576, 18601), 'numpy.power', 'np.power', (['variables[1]', '(2)'], {}), '(variables[1], 2)\n', (18584, 18601), True, 'import numpy as np\n'), ((18636, 18666), 'numpy.power', 'np.power', (['(variables[0] + 10)', '(2)'], {}), '(variables[0] + 10, 2)\n', (18644, 18666), True, 'import numpy as np\n'), ((18664, 18689), 'numpy.power', 'np.power', (['variables[1]', '(2)'], {}), '(variables[1], 2)\n', (18672, 18689), True, 'import numpy as np\n'), ((18724, 18753), 'numpy.power', 'np.power', (['(variables[0] - 5)', '(2)'], {}), '(variables[0] - 5, 2)\n', (18732, 18753), True, 'import numpy as np\n'), ((18751, 18781), 'numpy.power', 'np.power', (['(variables[1] + 10)', '(2)'], {}), '(variables[1] + 10, 2)\n', (18759, 18781), True, 'import numpy as np\n'), ((18813, 18842), 'numpy.power', 'np.power', (['(variables[0] + 5)', '(2)'], {}), '(variables[0] + 5, 2)\n', (18821, 18842), True, 'import numpy as np\n'), ((18840, 18870), 'numpy.power', 'np.power', (['(variables[1] + 10)', '(2)'], {}), '(variables[1] + 10, 2)\n', (18848, 18870), True, 'import numpy as np\n'), ((24548, 24573), 'numpy.power', 'np.power', (['variables[0]', '(2)'], {}), '(variables[0], 2)\n', (24556, 24573), True, 'import numpy as np\n'), ((24577, 24602), 'numpy.power', 'np.power', (['variables[0]', '(4)'], {}), '(variables[0], 4)\n', (24585, 24602), True, 'import numpy as np\n'), ((13161, 13186), 'numpy.power', 'np.power', (['variables[0]', '(2)'], {}), '(variables[0], 2)\n', (13169, 13186), True, 'import numpy as np\n'), ((13186, 13211), 'numpy.power', 'np.power', (['variables[1]', '(2)'], {}), '(variables[1], 2)\n', (13194, 13211), True, 'import numpy as np\n'), ((21859, 21884), 'numpy.power', 'np.power', (['variables[i]', '(2)'], {}), '(variables[i], 2)\n', (21867, 21884), True, 'import numpy as np\n'), ((4280, 4300), 'numpy.square', 'np.square', (['variables'], {}), '(variables)\n', (4289, 4300), True, 'import numpy as np\n'), ((6908, 6933), 'numpy.power', 'np.power', (['variables[0]', '(2)'], {}), '(variables[0], 2)\n', (6916, 6933), True, 'import numpy as np\n'), ((7091, 7116), 'numpy.power', 'np.power', (['variables[0]', '(2)'], {}), '(variables[0], 2)\n', (7099, 7116), True, 'import numpy as np\n'), ((18468, 18493), 'numpy.power', 'np.power', (['variables[0]', '(2)'], {}), '(variables[0], 2)\n', (18476, 18493), True, 'import numpy as np\n')] |
import json
import wml_utils as wmlu
import numpy as np
import os
import cv2 as cv
import sys
import random
from iotoolkit.labelme_toolkit import get_labels_and_bboxes
def get_files(dir_path, sub_dir_name):
img_dir = os.path.join(dir_path, sub_dir_name,'images')
label_dir = os.path.join(dir_path, sub_dir_name,"v2.0","polygons")
res = []
json_files = wmlu.recurse_get_filepath_in_dir(label_dir,suffix=".json")
for jf in json_files:
base_name = wmlu.base_name(jf)
igf = os.path.join(img_dir, base_name + ".jpg")
if os.path.exists(igf):
res.append((igf, jf))
else:
print(f"ERROR: Find {igf} faild, json file is {jf}")
return res
class MapillaryVistasData(object):
def __init__(self, label_text2id=None, shuffle=False, sub_dir_name="training",ignored_labels=[],label_map={},
allowed_labels_fn=None):
self.files = None
self.label_text2id = label_text2id
self.shuffle = shuffle
self.sub_dir_name = sub_dir_name
self.ignored_labels = ignored_labels
self.label_map = label_map
self.allowed_labels_fn = None if allowed_labels_fn is None or (isinstance(allowed_labels_fn,list ) and len(allowed_labels_fn)==0) else allowed_labels_fn
if self.allowed_labels_fn is not None and isinstance(self.allowed_labels_fn,list):
self.allowed_labels_fn = lambda x:x in allowed_labels_fn
def read_data(self, dir_path):
self.files = get_files(dir_path, self.sub_dir_name)
if self.shuffle:
random.shuffle(self.files)
def __len__(self):
return len(self.files)
def get_items(self,beg=0,end=None,filter=None):
'''
:return:
binary_masks [N,H,W], value is 0 or 1,
full_path,img_size,category_ids,category_names,boxes,binary_masks,area,is_crowd,num_annotations_skipped
'''
if end is None:
end = len(self.files)
if beg is None:
beg = 0
for i, (img_file, json_file) in enumerate(self.files[beg:end]):
if filter is not None and not filter(img_file,json_file):
continue
print(img_file,json_file)
sys.stdout.write('\r>> read data %d/%d' % (i + 1, len(self.files)))
sys.stdout.flush()
image, annotations_list = self.read_json(json_file)
labels_names, bboxes = get_labels_and_bboxes(image, annotations_list)
masks = [ann["segmentation"] for ann in annotations_list]
if len(masks) > 0:
try:
masks = np.stack(masks, axis=0)
except:
print("ERROR: stack masks faild.")
masks = None
if self.label_text2id is not None:
labels = [self.label_text2id(x) for x in labels_names]
else:
labels = None
yield img_file, [image['height'], image['width']], labels, labels_names, bboxes, masks, None, None, None
def get_boxes_items(self):
'''
:return:
full_path,img_size,category_ids,boxes,is_crowd
'''
for i, (img_file, json_file) in enumerate(self.files):
sys.stdout.write('\r>> read data %d/%d' % (i + 1, len(self.files)))
sys.stdout.flush()
image, annotations_list = self.read_json(json_file,use_semantic=False)
labels_names, bboxes = get_labels_and_bboxes(image, annotations_list)
labels = [self.label_text2id(x) for x in labels_names]
#file, img_size,category_ids, labels_text, bboxes, binary_mask, area, is_crowd, _
yield img_file, [image['height'], image['width']], labels, labels_names,bboxes, None,None,None,None
def read_json(self,file_path,use_semantic=True):
annotations_list = []
image = {}
with open(file_path, "r", encoding="gb18030") as f:
print(file_path)
data_str = f.read()
try:
json_data = json.loads(data_str)
img_width = int(json_data["width"])
img_height = int(json_data["height"])
image["height"] = int(img_height)
image["width"] = int(img_width)
image["file_name"] = wmlu.base_name(file_path)
for shape in json_data["objects"]:
#label = shape["label"].split("--")[-1]
label = shape["label"]
if self.ignored_labels is not None and label in self.ignored_labels:
continue
if self.allowed_labels_fn is not None and not self.allowed_labels_fn(label):
continue
if self.label_map is not None and label in self.label_map:
label = self.label_map[label]
mask = np.zeros(shape=[img_height, img_width], dtype=np.uint8)
all_points = np.array([shape["polygon"]]).astype(np.int32)
if len(all_points) < 1:
continue
points = np.transpose(all_points[0])
x, y = np.vsplit(points, 2)
x = np.reshape(x, [-1])
y = np.reshape(y, [-1])
x = np.minimum(np.maximum(0, x), img_width - 1)
y = np.minimum(np.maximum(0, y), img_height - 1)
xmin = np.min(x)
xmax = np.max(x)
ymin = np.min(y)
ymax = np.max(y)
if use_semantic:
segmentation = cv.drawContours(mask, all_points, -1, color=(1), thickness=cv.FILLED)
annotations_list.append({"bbox": (xmin, ymin, xmax - xmin + 1, ymax - ymin + 1),
"segmentation": segmentation,
"category_id": label,
"points_x": x,
"points_y": y})
else:
annotations_list.append({"bbox": (xmin, ymin, xmax - xmin + 1, ymax - ymin + 1),
"category_id": label,
"points_x": x,
"points_y": y})
except:
print(f"Read file {os.path.basename(file_path)} faild.")
pass
if use_semantic:
'''
Each pixel only belong to one classes, and the latter annotation will overwrite the previous
'''
if len(annotations_list)>2:
mask = 1-annotations_list[-1]['segmentation']
for i in reversed(range(len(annotations_list)-1)):
annotations_list[i]['segmentation'] = np.logical_and(annotations_list[i]['segmentation'], mask)
mask = np.logical_and(mask,1-annotations_list[i]['segmentation'])
return image, annotations_list
if __name__ == "__main__":
id = 0
# data_statistics("/home/vghost/ai/mldata/qualitycontrol/rdatasv3")
import img_utils as wmli
import object_detection_tools.visualization as odv
import matplotlib.pyplot as plt
NAME2ID = {}
ID2NAME = {}
def name_to_id(x):
global id
if x in NAME2ID:
return NAME2ID[x]
else:
NAME2ID[x] = id
ID2NAME[id] = x
id += 1
return NAME2ID[x]
ignored_labels = [
'manhole', 'dashed', 'other-marking', 'static', 'front', 'back',
'solid', 'catch-basin','utility-pole', 'pole', 'street-light','direction-back', 'direction-front'
'ambiguous', 'other','text','diagonal','left','right','water-valve','general-single','temporary-front',
'wheeled-slow','parking-meter','split-left-or-straight','split-right-or-straight','zigzag',
'give-way-row','ground-animal','phone-booth','give-way-single','garage','temporary-back','caravan','other-barrier',
'chevron','pothole','sand'
]
label_map = {
'individual':'person',
'cyclists':'person',
'other-rider':'person'
}
data = MapillaryVistasData(label_text2id=name_to_id, shuffle=False, ignored_labels=ignored_labels,label_map=label_map)
# data.read_data("/data/mldata/qualitycontrol/rdatasv5_splited/rdatasv5")
# data.read_data("/home/vghost/ai/mldata2/qualitycontrol/rdatav10_preproc")
# data.read_data("/home/vghost/ai/mldata2/qualitycontrol/rdatasv10_neg_preproc")
data.read_data(wmlu.home_dir("ai/mldata/mapillary_vistas/mapillary-vistas-dataset_public_v2.0"))
def filter(x):
return x in ['general-single', 'parking', 'temporary', 'general-horizontal']
# return x in ['terrain']
# return x in ['car']
# data.read_data("/home/vghost/ai/mldata2/qualitycontrol/x")
for x in data.get_items():
full_path, img_info, category_ids, category_names, boxes, binary_mask, area, is_crowd, num_annotations_skipped = x
img = wmli.imread(full_path)
def text_fn(classes, scores):
return f"{ID2NAME[classes]}"
if False:
is_keep = [filter(x) for x in category_names]
category_ids = np.array(category_ids)[is_keep]
boxes = np.array(boxes)[is_keep]
binary_mask = np.array(binary_mask)[is_keep]
if len(category_ids) == 0:
continue
wmlu.show_dict(NAME2ID)
odv.draw_bboxes_and_maskv2(
img=img, classes=category_ids, scores=None, bboxes=boxes, masks=binary_mask, color_fn=None,
text_fn=text_fn, thickness=4,
show_text=True,
fontScale=0.8)
plt.figure()
plt.imshow(img)
plt.show()
| [
"wml_utils.recurse_get_filepath_in_dir",
"img_utils.imread",
"numpy.array",
"matplotlib.pyplot.imshow",
"os.path.exists",
"numpy.reshape",
"object_detection_tools.visualization.draw_bboxes_and_maskv2",
"numpy.max",
"numpy.stack",
"numpy.min",
"sys.stdout.flush",
"numpy.maximum",
"wml_utils.h... | [((223, 269), 'os.path.join', 'os.path.join', (['dir_path', 'sub_dir_name', '"""images"""'], {}), "(dir_path, sub_dir_name, 'images')\n", (235, 269), False, 'import os\n'), ((285, 341), 'os.path.join', 'os.path.join', (['dir_path', 'sub_dir_name', '"""v2.0"""', '"""polygons"""'], {}), "(dir_path, sub_dir_name, 'v2.0', 'polygons')\n", (297, 341), False, 'import os\n'), ((370, 429), 'wml_utils.recurse_get_filepath_in_dir', 'wmlu.recurse_get_filepath_in_dir', (['label_dir'], {'suffix': '""".json"""'}), "(label_dir, suffix='.json')\n", (402, 429), True, 'import wml_utils as wmlu\n'), ((475, 493), 'wml_utils.base_name', 'wmlu.base_name', (['jf'], {}), '(jf)\n', (489, 493), True, 'import wml_utils as wmlu\n'), ((508, 549), 'os.path.join', 'os.path.join', (['img_dir', "(base_name + '.jpg')"], {}), "(img_dir, base_name + '.jpg')\n", (520, 549), False, 'import os\n'), ((561, 580), 'os.path.exists', 'os.path.exists', (['igf'], {}), '(igf)\n', (575, 580), False, 'import os\n'), ((8704, 8789), 'wml_utils.home_dir', 'wmlu.home_dir', (['"""ai/mldata/mapillary_vistas/mapillary-vistas-dataset_public_v2.0"""'], {}), "('ai/mldata/mapillary_vistas/mapillary-vistas-dataset_public_v2.0'\n )\n", (8717, 8789), True, 'import wml_utils as wmlu\n'), ((9191, 9213), 'img_utils.imread', 'wmli.imread', (['full_path'], {}), '(full_path)\n', (9202, 9213), True, 'import img_utils as wmli\n'), ((9599, 9622), 'wml_utils.show_dict', 'wmlu.show_dict', (['NAME2ID'], {}), '(NAME2ID)\n', (9613, 9622), True, 'import wml_utils as wmlu\n'), ((9631, 9818), 'object_detection_tools.visualization.draw_bboxes_and_maskv2', 'odv.draw_bboxes_and_maskv2', ([], {'img': 'img', 'classes': 'category_ids', 'scores': 'None', 'bboxes': 'boxes', 'masks': 'binary_mask', 'color_fn': 'None', 'text_fn': 'text_fn', 'thickness': '(4)', 'show_text': '(True)', 'fontScale': '(0.8)'}), '(img=img, classes=category_ids, scores=None,\n bboxes=boxes, masks=binary_mask, color_fn=None, text_fn=text_fn,\n thickness=4, show_text=True, fontScale=0.8)\n', (9657, 9818), True, 'import object_detection_tools.visualization as odv\n'), ((9868, 9880), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9878, 9880), True, 'import matplotlib.pyplot as plt\n'), ((9889, 9904), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (9899, 9904), True, 'import matplotlib.pyplot as plt\n'), ((9913, 9923), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9921, 9923), True, 'import matplotlib.pyplot as plt\n'), ((1579, 1605), 'random.shuffle', 'random.shuffle', (['self.files'], {}), '(self.files)\n', (1593, 1605), False, 'import random\n'), ((2313, 2331), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2329, 2331), False, 'import sys\n'), ((2431, 2477), 'iotoolkit.labelme_toolkit.get_labels_and_bboxes', 'get_labels_and_bboxes', (['image', 'annotations_list'], {}), '(image, annotations_list)\n', (2452, 2477), False, 'from iotoolkit.labelme_toolkit import get_labels_and_bboxes\n'), ((3332, 3350), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3348, 3350), False, 'import sys\n'), ((3469, 3515), 'iotoolkit.labelme_toolkit.get_labels_and_bboxes', 'get_labels_and_bboxes', (['image', 'annotations_list'], {}), '(image, annotations_list)\n', (3490, 3515), False, 'from iotoolkit.labelme_toolkit import get_labels_and_bboxes\n'), ((4058, 4078), 'json.loads', 'json.loads', (['data_str'], {}), '(data_str)\n', (4068, 4078), False, 'import json\n'), ((4320, 4345), 'wml_utils.base_name', 'wmlu.base_name', (['file_path'], {}), '(file_path)\n', (4334, 4345), True, 'import wml_utils as wmlu\n'), ((9400, 9422), 'numpy.array', 'np.array', (['category_ids'], {}), '(category_ids)\n', (9408, 9422), True, 'import numpy as np\n'), ((9452, 9467), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (9460, 9467), True, 'import numpy as np\n'), ((9503, 9524), 'numpy.array', 'np.array', (['binary_mask'], {}), '(binary_mask)\n', (9511, 9524), True, 'import numpy as np\n'), ((2628, 2651), 'numpy.stack', 'np.stack', (['masks'], {'axis': '(0)'}), '(masks, axis=0)\n', (2636, 2651), True, 'import numpy as np\n'), ((4912, 4967), 'numpy.zeros', 'np.zeros', ([], {'shape': '[img_height, img_width]', 'dtype': 'np.uint8'}), '(shape=[img_height, img_width], dtype=np.uint8)\n', (4920, 4967), True, 'import numpy as np\n'), ((5153, 5180), 'numpy.transpose', 'np.transpose', (['all_points[0]'], {}), '(all_points[0])\n', (5165, 5180), True, 'import numpy as np\n'), ((5208, 5228), 'numpy.vsplit', 'np.vsplit', (['points', '(2)'], {}), '(points, 2)\n', (5217, 5228), True, 'import numpy as np\n'), ((5253, 5272), 'numpy.reshape', 'np.reshape', (['x', '[-1]'], {}), '(x, [-1])\n', (5263, 5272), True, 'import numpy as np\n'), ((5297, 5316), 'numpy.reshape', 'np.reshape', (['y', '[-1]'], {}), '(y, [-1])\n', (5307, 5316), True, 'import numpy as np\n'), ((5481, 5490), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (5487, 5490), True, 'import numpy as np\n'), ((5518, 5527), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (5524, 5527), True, 'import numpy as np\n'), ((5555, 5564), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (5561, 5564), True, 'import numpy as np\n'), ((5592, 5601), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (5598, 5601), True, 'import numpy as np\n'), ((6954, 7011), 'numpy.logical_and', 'np.logical_and', (["annotations_list[i]['segmentation']", 'mask'], {}), "(annotations_list[i]['segmentation'], mask)\n", (6968, 7011), True, 'import numpy as np\n'), ((7039, 7100), 'numpy.logical_and', 'np.logical_and', (['mask', "(1 - annotations_list[i]['segmentation'])"], {}), "(mask, 1 - annotations_list[i]['segmentation'])\n", (7053, 7100), True, 'import numpy as np\n'), ((5352, 5368), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (5362, 5368), True, 'import numpy as np\n'), ((5420, 5436), 'numpy.maximum', 'np.maximum', (['(0)', 'y'], {}), '(0, y)\n', (5430, 5436), True, 'import numpy as np\n'), ((5678, 5745), 'cv2.drawContours', 'cv.drawContours', (['mask', 'all_points', '(-1)'], {'color': '(1)', 'thickness': 'cv.FILLED'}), '(mask, all_points, -1, color=1, thickness=cv.FILLED)\n', (5693, 5745), True, 'import cv2 as cv\n'), ((5001, 5029), 'numpy.array', 'np.array', (["[shape['polygon']]"], {}), "([shape['polygon']])\n", (5009, 5029), True, 'import numpy as np\n'), ((6506, 6533), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (6522, 6533), False, 'import os\n')] |
import gym
import random
import torch
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
#%matplotlib inline
from unityagents import UnityEnvironment
import numpy as np
import argparse
import sys
env = UnityEnvironment(file_name="Banana_Windows_x86_64/Banana.exe")
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents in the environment
print('Number of agents:', len(env_info.agents))
# number of actions
action_size = brain.vector_action_space_size
print('Number of actions:', action_size)
# examine the state space
state = env_info.vector_observations[0]
print('States look like:', state)
state_size = len(state)
print('States have length:', state_size)
#from dqn_agent import Agent
def dqn(n_episodes=2000, max_t=5000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):
"""Deep Q-Learning.
Params
======
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon
"""
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes+1):
env_info = env.reset(train_mode=True)[brain_name]
state = env_info.vector_observations[0]
score = 0
#for t in range(max_t):
while True:
action = agent.act(state, eps)
action = action.astype(int)
env_info = env.step(action)[brain_name]
next_state = env_info.vector_observations[0]
# get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0]
#next_state, reward, done, _ = env.step(action)
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
eps = max(eps_end, eps_decay*eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
#print(len(agent.memory.memory), max(agent.memory.memory)[0], agent.memory.memory[0][0])
# if np.mean(scores_window)>=200.0:
# print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))
# torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
# break
return scores
def main():
#try:
scores = dqn()
# except:
# print('****'*8)
# scores = dqn()
# plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("network", help="you can choose to use DDQN by input DDQN or use DuelingDQN by input Duel here")
parser.add_argument("priority", help="you can either train mode with priority exp replay or without it")
args = parser.parse_args()
if args.network not in ('DDQN', 'Duel'):
print('No such network, please make sure you choose either DDQN or Duel as an input for the network argument')
sys.exit()
if args.priority == 'priority':
from dqn_agent_test import Agent
else:
from dqn_agent import Agent
agent = Agent(state_size=37, action_size=4, seed=0, mode = args.network)
main() | [
"numpy.mean",
"collections.deque",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"dqn_agent.Agent",
"unityagents.UnityEnvironment",
"matplotlib.pyplot.figure",
"sys.exit",
"matplotlib.pyplot.show"
] | [((234, 296), 'unityagents.UnityEnvironment', 'UnityEnvironment', ([], {'file_name': '"""Banana_Windows_x86_64/Banana.exe"""'}), "(file_name='Banana_Windows_x86_64/Banana.exe')\n", (250, 296), False, 'from unityagents import UnityEnvironment\n'), ((1451, 1468), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (1456, 1468), False, 'from collections import deque\n'), ((3288, 3300), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3298, 3300), True, 'import matplotlib.pyplot as plt\n'), ((3380, 3399), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Score"""'], {}), "('Score')\n", (3390, 3399), True, 'import matplotlib.pyplot as plt\n'), ((3404, 3427), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode #"""'], {}), "('Episode #')\n", (3414, 3427), True, 'import matplotlib.pyplot as plt\n'), ((3432, 3442), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3440, 3442), True, 'import matplotlib.pyplot as plt\n'), ((3552, 3577), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3575, 3577), False, 'import argparse\n'), ((4157, 4219), 'dqn_agent.Agent', 'Agent', ([], {'state_size': '(37)', 'action_size': '(4)', 'seed': '(0)', 'mode': 'args.network'}), '(state_size=37, action_size=4, seed=0, mode=args.network)\n', (4162, 4219), False, 'from dqn_agent import Agent\n'), ((4011, 4021), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4019, 4021), False, 'import sys\n'), ((2591, 2613), 'numpy.mean', 'np.mean', (['scores_window'], {}), '(scores_window)\n', (2598, 2613), True, 'import numpy as np\n'), ((2731, 2753), 'numpy.mean', 'np.mean', (['scores_window'], {}), '(scores_window)\n', (2738, 2753), True, 'import numpy as np\n')] |
from io import StringIO
from collections import OrderedDict
import numpy as np
import scipy.linalg as sl
import pandas as pd
import pytest
import matmodlab2 as mml
runid = 'simulation_output'
def compare_dataframes(frame1, frame2, tol=1.0e-12):
head1 = frame1.keys()
head2 = frame2.keys()
passed = True
for key in set(list(head1)) & set(list(head2)):
print(key)
arr1 = frame1[key]
arr2 = frame2[key]
if not np.allclose(arr1, arr2, atol=tol, rtol=tol):
passed = False
print("Column {0} failed".format(key))
print(arr1 - arr2)
print(sum(arr1 - arr2))
return passed
@pytest.mark.skip()
@pytest.mark.pandas
@pytest.mark.parametrize('stretch', [1.5, 1.001, 0.999, 0.5])
@pytest.mark.parametrize('kappa', [2.0, 1.0, 0.0, -1.0, 2.0])
def test_defgrad_basic(kappa, stretch):
stretch1 = (stretch - 1.0) / 2.0 + 1.0
stretch2 = stretch
if kappa == 0.0:
strain1 = np.log(stretch1)
strain2 = np.log(stretch2)
else:
strain1 = (stretch1 ** kappa - 1.0) / kappa
strain2 = (stretch2 ** kappa - 1.0) / kappa
sol_io = StringIO("""Time,E.XX,E.YY,E.ZZ,F.XX,F.YY,F.ZZ
0.0,0.0,0.0,0.0,1.0,1.0,1.0
0.5,{0:.14e},0.0,0.0,{1:.14e},1.0,1.0
1.0,{2:.14e},0.0,0.0,{3:.14e},1.0,1.0""".format(strain1, stretch1, strain2, stretch2))
sol_df = pd.read_csv(sol_io)
sol_df.to_csv("defgrad_basic_solution.csv")
# Initialize the simulator
mps = mml.MaterialPointSimulator(runid)
# Initialize the material
mat = mml.ElasticMaterial(E=8.0, Nu=1.0 / 3.0)
mps.assign_material(mat)
# Run the steps
mps.run_step('FFFFFFFFF', [stretch, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0],
frames=20,
increment=1.0, kappa=kappa)
print(sol_df)
mps.df.to_csv("defgrad_basic.csv", index=False)
assert compare_dataframes(sol_df, mps.df)
if __name__ == '__main__':
test_defgrad_basic(0.0, 1.5)
| [
"numpy.allclose",
"matmodlab2.MaterialPointSimulator",
"pandas.read_csv",
"pytest.mark.skip",
"numpy.log",
"matmodlab2.ElasticMaterial",
"pytest.mark.parametrize"
] | [((668, 686), 'pytest.mark.skip', 'pytest.mark.skip', ([], {}), '()\n', (684, 686), False, 'import pytest\n'), ((708, 768), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stretch"""', '[1.5, 1.001, 0.999, 0.5]'], {}), "('stretch', [1.5, 1.001, 0.999, 0.5])\n", (731, 768), False, 'import pytest\n'), ((770, 830), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kappa"""', '[2.0, 1.0, 0.0, -1.0, 2.0]'], {}), "('kappa', [2.0, 1.0, 0.0, -1.0, 2.0])\n", (793, 830), False, 'import pytest\n'), ((1370, 1389), 'pandas.read_csv', 'pd.read_csv', (['sol_io'], {}), '(sol_io)\n', (1381, 1389), True, 'import pandas as pd\n'), ((1480, 1513), 'matmodlab2.MaterialPointSimulator', 'mml.MaterialPointSimulator', (['runid'], {}), '(runid)\n', (1506, 1513), True, 'import matmodlab2 as mml\n'), ((1555, 1595), 'matmodlab2.ElasticMaterial', 'mml.ElasticMaterial', ([], {'E': '(8.0)', 'Nu': '(1.0 / 3.0)'}), '(E=8.0, Nu=1.0 / 3.0)\n', (1574, 1595), True, 'import matmodlab2 as mml\n'), ((977, 993), 'numpy.log', 'np.log', (['stretch1'], {}), '(stretch1)\n', (983, 993), True, 'import numpy as np\n'), ((1012, 1028), 'numpy.log', 'np.log', (['stretch2'], {}), '(stretch2)\n', (1018, 1028), True, 'import numpy as np\n'), ((458, 501), 'numpy.allclose', 'np.allclose', (['arr1', 'arr2'], {'atol': 'tol', 'rtol': 'tol'}), '(arr1, arr2, atol=tol, rtol=tol)\n', (469, 501), True, 'import numpy as np\n')] |
import numpy as np
import scipy.linalg
from numpy.linalg import cond, norm
from scipy.linalg import toeplitz
from scipy.linalg import solve_triangular
import time
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
PI = np.pi
CRED = '\033[91m'
CGREEN = '\033[32m'
CEND = '\033[0m'
def red_print(msg):
print('Partial Time ' + CRED + str(msg) + CEND)
def green_print(msg):
print('Full Time ' + CGREEN + str(msg) + CEND)
class LU_eliminator(object):
def __init__(self, mode):
assert mode in ['partial','full']
self.mode = mode
return
def perform_LU_analysis_partial(self, A):
# Make sure the matrix is square
assert A.shape[0] == A.shape[1]
# Let m be the number of rows/columns.
m = A.shape[0]
# Initialize the LU matrix as a copy of A
# In order to perform in-place substitutions
LU = np.matrix(np.copy(A))
# Initialize the Permutation Matrix P
P = np.arange(m)
# Start Timer
start = time.time()
# For every row i in the matrix.
for i in range(0, m-1):
# Find the pivot point (absolute maximum) location on current lower-right matrix.
p = np.argmax(np.abs(LU[i:,i].ravel()))
# Swap positions in the Permutation Matrix
P[[i,p+i]] = P[[p+i,i]]
# Swap Rows in the LU matrix.
# We can use the One-Liner Python Idiom a,b = b,a here.
LU[[i,p+i],:] = LU[[p+i,i],:]
# Get the Weight Vector that each subsequent row must be multiplied with
# for the elimination step.
w = LU[i+1:m,i] / LU[i,i]
# Perform Elimination on the U part of the LU
LU[i+1:m,i:m] = LU[i+1:m,i:m] - w*LU[i,i:m]
# Update with the weight the L part of the LU
LU[i+1:m,i] = w
end = time.time()
elapsed = (end*1000 -start*1000)
L = np.tril(LU,-1) + np.eye(m)
U = np.triu(LU)
P_ = np.eye(m)
P = P_[P,:]
return L,U,P,elapsed
def perform_LU_analysis_full(self, A):
# Make sure the matrix is square
assert A.shape[0] == A.shape[1]
# Let m be the number of rows/columns.
m = A.shape[0]
# Initialize the LU matrix as a copy of A
# In order to perform in-place substitutions
LU = np.matrix(np.copy(A))
# Initialize the Permutation Matrix P
P = np.arange(m)
# Initialize the Permutation Matrix Q
Q = np.arange(m)
start = time.time()
# For every row i in the matrix.
for i in range(0, m-1):
# Find the pivot point pair id (absolute maximum row / absolute maximum column) location on current lower-right matrix.
p = np.argmax(np.abs(LU[i:,i:]).ravel())
# Convert it to a 2D pair given the current lower-right shape
p_r, p_c = np.unravel_index(p, LU[i:,i:].shape)
# Swap positions in the Row Permutation Matrix
P[[i,p_r+i]] = P[[p_r+i,i]]
# Swap positions in the Column Permutation Matrix
Q[[i,p_c+i]] = Q[[p_c+i,i]]
# Swap Rows in the LU matrix.
# We can use the One-Liner Python Idiom a,b = b,a here.
LU[[i,p_r+i],:] = LU[[p_r+i,i],:]
# Swap Columns in the LU matrix.
# We can use the One-Liner Python Idiom a,b = b,a here.
LU[:,[i,p_c+i]] = LU[:,[p_c+i,i]]
# Get the Weight Vector that each subsequent row must be multiplied with
# for the elimination step.
w = LU[i+1:m,i] / LU[i,i]
# Perform Elimination on the U part of the LU
LU[i+1:m,i:m] = LU[i+1:m,i:m] - w*LU[i,i:m]
# Update with the weight the L part of the LU
LU[i+1:m,i] = w
end = time.time()
elapsed = (end*1000 - start*1000)
L = np.tril(LU,-1) + np.eye(m)
U = np.triu(LU)
P_ = np.eye(m)
P = P_[P,:]
Q_ = np.eye(m)
Q = Q_[:,Q]
return L,U,P,Q,elapsed
def linear_solve_partial(self, A, b):
L, U, P, elapsed = self.perform_LU_analysis_partial(A=A)
Y=scipy.linalg.solve(L,P@b)
X=scipy.linalg.solve(U,Y)
return X , elapsed
def linear_solve_full(self, A, b):
L, U, P, Q, elapsed = self.perform_LU_analysis_full(A=A)
Z=scipy.linalg.solve(L,P@b)
Y=scipy.linalg.solve(U,Z)
X=scipy.linalg.solve(Q.T,Y)
return X , elapsed
def linear_solve(self, A, b):
if self.mode == 'partial':
X, elapsed = self.linear_solve_partial(A=A,b=b)
elif self.mode == 'full':
X, elapsed = self.linear_solve_full(A=A,b=b)
return X, elapsed
def run_exersize_2():
condition_numbers = []
cpu_times_partial = []
cpu_times_full = []
error_partial = []
error_full = []
res_partial = []
res_full = []
def k_diag_value_calc(k):
return ((4*(-1)**k) * ((PI**2) * (k**2)-6)) / k**4
def create_toeplitz_matrix(size):
diag_value = PI ** 4 / 5.0
diagonals = np.array([diag_value] + [k_diag_value_calc(f) for f in range(1,size)])
return toeplitz(diagonals)
sizes = [64,128,256,512,1024,2048]
x_list = [np.random.randn(f,1) for f in sizes]
A_list = [create_toeplitz_matrix(f) for f in sizes]
b_list = [np.matmul(x1,x2) for x1,x2 in zip(A_list,x_list)]
for A,b,x in zip(A_list, b_list, x_list):
print(norm(A,np.inf))
condition_numbers.append(cond(A, np.inf))
partial_solver = LU_eliminator(mode='partial')
full_solver = LU_eliminator(mode='full')
px, ptime = partial_solver.linear_solve(A=A,b=b)
fx, ftime = full_solver.linear_solve(A=A,b=b)
perror = norm(px-x, np.inf)
ferror = norm(fx-x, np.inf)
pres = norm(b-A@px, np.inf)
fres = norm(b-A@fx, np.inf)
cpu_times_partial.append(ptime)
cpu_times_full.append(ftime)
error_partial.append(perror)
error_full.append(ferror)
res_partial.append(pres)
res_full.append(fres)
df = pd.DataFrame(data={'size':sizes, 'condition_number':condition_numbers, 'cpu_times_partial':cpu_times_partial, 'cpu_times_full':cpu_times_full, 'error_partial':error_partial, 'error_full':error_full, 'res_partial':res_partial, 'res_full':res_full})
return df
def run_exersize_4():
condition_numbers = []
cpu_times_partial = []
cpu_times_full = []
error_partial = []
error_full = []
res_partial = []
res_full = []
def create_custom_matrix(size):
A = np.ones((size,size))*(-1)
A *= np.tri(*A.shape,-1)
A += np.eye(size)
A[:, size-1] = np.ones(size)
return A
sizes = [64,128,256,512,1024]
x_list = [np.random.randn(f,1) for f in sizes]
A_list = [create_custom_matrix(f) for f in sizes]
b_list = [np.matmul(x1,x2) for x1,x2 in zip(A_list,x_list)]
for A,b,x in zip(A_list, b_list, x_list):
condition_numbers.append(cond(A, np.inf))
partial_solver = LU_eliminator(mode='partial')
full_solver = LU_eliminator(mode='full')
px, ptime = partial_solver.linear_solve(A=A,b=b)
fx, ftime = full_solver.linear_solve(A=A,b=b)
perror = norm(px-x, np.inf)
ferror = norm(fx-x, np.inf)
pres = norm(b-A@px, np.inf)
fres = norm(b-A@fx, np.inf)
error_partial.append(perror)
error_full.append(ferror)
cpu_times_partial.append(ptime)
cpu_times_full.append(ftime)
res_partial.append(pres)
res_full.append(fres)
df = pd.DataFrame(data={'size':sizes, 'condition_number':condition_numbers, 'cpu_times_partial':cpu_times_partial, 'cpu_times_full':cpu_times_full, 'error_partial':error_partial, 'error_full':error_full, 'res_partial':res_partial, 'res_full':res_full})
return df
def run_exersize_5():
condition_numbers = []
cpu_times_partial = []
error_partial = []
res_partial = []
def k_diag_value_calc(k):
return ((4*(-1)**k) * ((PI**2) * (k**2)-6)) / k**4
def create_toeplitz_matrix(size):
diag_value = PI ** 4 / 5.0
diagonals = np.array([diag_value] + [k_diag_value_calc(f) for f in range(1,size)])
return toeplitz(diagonals)
def l2_normalize(v):
v = v.astype(float)
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
sizes = [64,128,256,512,1024]
x_list = [np.random.randn(f,1) for f in sizes]
u_list = [l2_normalize(np.random.randn(f,1)) for f in sizes]
v_list = [l2_normalize(np.random.randn(f,1)) for f in sizes]
A_list = [create_toeplitz_matrix(f) for f in sizes]
b_list = [np.matmul((A + np.outer(u,v)), x) for A,u,v,x in zip(A_list,u_list,v_list,x_list)]
for A,b,x,u,v in zip(A_list, b_list, x_list, u_list, v_list):
condition_numbers.append(cond(A, np.inf))
partial_solver = LU_eliminator(mode='partial')
L, U, P, _ = partial_solver.perform_LU_analysis_partial(A=A)
# Start time here because Sherman-Morisson Assumes
# Prior Knowledge of LU factorization
# Start Timer
start = time.time()
# Assert Norm = 1
print(norm(u,2))
print(norm(v,2))
# Partial Problem 1 Solve Az = u for z, so z = A^-1 * u
# Forward
pp1 = solve_triangular(L,P@u,lower=True)
# Backward
z = solve_triangular(U,pp1,lower=False)
# Partial Problem 2 Solve Ay = b for y, so y = A^-1 * b
# Forward
pp2 = solve_triangular(L,P@b,lower=True)
# Backward
y = solve_triangular(U,pp2,lower=False)
# Plug-In and solve
vz = v.T@z
vz = vz[0]
vy = v.T@y
vy = vy[0]
calc = vy/(1-vz)
z = calc * z
px = y + z
end = time.time()
elapsed = (end -start) * 1000
perror = norm(px-x, np.inf)
res_part = norm(b-A@px, np.inf)
error_partial.append(perror)
res_partial.append(res_part)
cpu_times_partial.append(elapsed)
df = pd.DataFrame(data={'size':sizes, 'condition_number':condition_numbers, 'error_partial':error_partial, 'cpu_times_partial':cpu_times_partial, 'res_partial':res_partial})
return df
# Exersize 2
# df = run_exersize_2()
# df.to_csv('Exersize2.csv', index=False)
# cn = df['condition_number'].values
# cpu_part = df['cpu_times_partial'].values
# cpu_full = df['cpu_times_full'].values
# error_partial = df['error_partial'].values
# error_full = df['error_full'].values
# res_part = df['res_partial'].values
# res_full = df['res_full'].values
# plt.figure(figsize=(8,8))
# plt.title('Excxecution Time vs Condition Number')
# plt.plot(cn,cpu_part,'ro--',label='Partial Pivoting Excecution Time(ms)')
# plt.plot(cn,cpu_full,'bo--',label='Full Pivoting Excecution Time(ms)')
# plt.legend(loc=2)
# plt.savefig('Cpu_Time_vs_CN_2.png')
# plt.show()
# plt.close()
# plt.figure(figsize=(8,8))
# plt.title('Error vs Condition Number')
# plt.plot(cn,error_partial,'ro--',label='Partial Pivoting Error')
# plt.plot(cn,error_full,'bo--',label='Full Pivoting Error')
# plt.legend(loc=2)
# plt.savefig('Error_vs_CN_2.png')
# plt.show()
# plt.close()
# plt.figure(figsize=(8,8))
# plt.title('Residual vs Condition Number')
# plt.plot(cn,res_part,'ro--',label='Partial Pivoting Residual')
# plt.plot(cn,res_full,'bo--',label='Full Pivoting Residual')
# plt.legend(loc=2)
# plt.savefig('Residual_vs_CN_2.png')
# plt.show()
# plt.close()
# Exersize 4
# df = run_exersize_4()
# df.to_csv('Exersize4.csv', index=False)
# cn = df['condition_number'].values
# cpu_part = df['cpu_times_partial'].values
# cpu_full = df['cpu_times_full'].values
# error_partial = df['error_partial'].values
# error_full = df['error_full'].values
# res_part = df['res_partial'].values
# res_full = df['res_full'].values
# plt.figure(figsize=(8,8))
# plt.title('Excxecution Time vs Condition Number')
# plt.plot(cn,cpu_part,'ro--',label='Partial Pivoting Excecution Time(ms)')
# plt.plot(cn,cpu_full,'bo--',label='Full Pivoting Excecution Time(ms)')
# plt.legend(loc=2)
# plt.savefig('Cpu_Time_vs_CN_4.png')
# plt.show()
# plt.close()
# plt.figure(figsize=(8,8))
# plt.title('Error vs Condition Number')
# plt.plot(cn,error_partial,'ro--',label='Partial Pivoting Error')
# plt.plot(cn,error_full,'bo--',label='Full Pivoting Error')
# plt.legend(loc=2)
# plt.savefig('Error_vs_CN_4.png')
# plt.show()
# plt.close()
# plt.figure(figsize=(8,8))
# plt.title('Residual vs Condition Number')
# plt.plot(cn,res_part,'ro--',label='Partial Pivoting Residual')
# plt.plot(cn,res_full,'bo--',label='Full Pivoting Residual')
# plt.legend(loc=2)
# plt.savefig('Residual_vs_CN_4.png')
# plt.show()
# plt.close()
# Exersize 5
df = run_exersize_5()
df.to_csv('Exersize5.csv', index=False)
cn = df['condition_number'].values
cpu_part = df['cpu_times_partial'].values
error_partial = df['error_partial'].values
res_part = df['res_partial'].values
plt.figure(figsize=(8,8))
plt.title('Excxecution Time vs Condition Number')
plt.plot(cn,cpu_part,'ro--',label='Partial Pivoting Excecution Time(ms)')
plt.legend(loc=2)
plt.savefig('Cpu_Time_vs_CN_5.png')
plt.show()
plt.close()
plt.figure(figsize=(8,8))
plt.title('Error vs Condition Number')
plt.plot(cn,error_partial,'ro--',label='Partial Pivoting Error')
plt.legend(loc=2)
plt.savefig('Error_vs_CN_5.png')
plt.show()
plt.close()
plt.figure(figsize=(8,8))
plt.title('Residual vs Condition Number')
plt.plot(cn,res_part,'ro--',label='Partial Pivoting Residual')
plt.legend(loc=2)
plt.savefig('Residual_vs_CN_5.png')
plt.show()
plt.close() | [
"numpy.linalg.cond",
"seaborn.set_style",
"numpy.linalg.norm",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.matmul",
"scipy.linalg.solve_triangular",
"numpy.unravel_index",
"pandas.DataFrame",
"numpy.tri",
"numpy.abs",
"numpy.eye",
"matplotlib.pyplot.savefig"... | [((237, 262), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (250, 262), True, 'import seaborn as sns\n'), ((13615, 13641), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (13625, 13641), True, 'import matplotlib.pyplot as plt\n'), ((13641, 13690), 'matplotlib.pyplot.title', 'plt.title', (['"""Excxecution Time vs Condition Number"""'], {}), "('Excxecution Time vs Condition Number')\n", (13650, 13690), True, 'import matplotlib.pyplot as plt\n'), ((13691, 13767), 'matplotlib.pyplot.plot', 'plt.plot', (['cn', 'cpu_part', '"""ro--"""'], {'label': '"""Partial Pivoting Excecution Time(ms)"""'}), "(cn, cpu_part, 'ro--', label='Partial Pivoting Excecution Time(ms)')\n", (13699, 13767), True, 'import matplotlib.pyplot as plt\n'), ((13765, 13782), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (13775, 13782), True, 'import matplotlib.pyplot as plt\n'), ((13783, 13818), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Cpu_Time_vs_CN_5.png"""'], {}), "('Cpu_Time_vs_CN_5.png')\n", (13794, 13818), True, 'import matplotlib.pyplot as plt\n'), ((13819, 13829), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13827, 13829), True, 'import matplotlib.pyplot as plt\n'), ((13830, 13841), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13839, 13841), True, 'import matplotlib.pyplot as plt\n'), ((13844, 13870), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (13854, 13870), True, 'import matplotlib.pyplot as plt\n'), ((13870, 13908), 'matplotlib.pyplot.title', 'plt.title', (['"""Error vs Condition Number"""'], {}), "('Error vs Condition Number')\n", (13879, 13908), True, 'import matplotlib.pyplot as plt\n'), ((13909, 13976), 'matplotlib.pyplot.plot', 'plt.plot', (['cn', 'error_partial', '"""ro--"""'], {'label': '"""Partial Pivoting Error"""'}), "(cn, error_partial, 'ro--', label='Partial Pivoting Error')\n", (13917, 13976), True, 'import matplotlib.pyplot as plt\n'), ((13974, 13991), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (13984, 13991), True, 'import matplotlib.pyplot as plt\n'), ((13992, 14024), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Error_vs_CN_5.png"""'], {}), "('Error_vs_CN_5.png')\n", (14003, 14024), True, 'import matplotlib.pyplot as plt\n'), ((14025, 14035), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14033, 14035), True, 'import matplotlib.pyplot as plt\n'), ((14036, 14047), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14045, 14047), True, 'import matplotlib.pyplot as plt\n'), ((14050, 14076), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (14060, 14076), True, 'import matplotlib.pyplot as plt\n'), ((14076, 14117), 'matplotlib.pyplot.title', 'plt.title', (['"""Residual vs Condition Number"""'], {}), "('Residual vs Condition Number')\n", (14085, 14117), True, 'import matplotlib.pyplot as plt\n'), ((14118, 14183), 'matplotlib.pyplot.plot', 'plt.plot', (['cn', 'res_part', '"""ro--"""'], {'label': '"""Partial Pivoting Residual"""'}), "(cn, res_part, 'ro--', label='Partial Pivoting Residual')\n", (14126, 14183), True, 'import matplotlib.pyplot as plt\n'), ((14181, 14198), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (14191, 14198), True, 'import matplotlib.pyplot as plt\n'), ((14199, 14234), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Residual_vs_CN_5.png"""'], {}), "('Residual_vs_CN_5.png')\n", (14210, 14234), True, 'import matplotlib.pyplot as plt\n'), ((14235, 14245), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14243, 14245), True, 'import matplotlib.pyplot as plt\n'), ((14246, 14257), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14255, 14257), True, 'import matplotlib.pyplot as plt\n'), ((6564, 6831), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'size': sizes, 'condition_number': condition_numbers, 'cpu_times_partial':\n cpu_times_partial, 'cpu_times_full': cpu_times_full, 'error_partial':\n error_partial, 'error_full': error_full, 'res_partial': res_partial,\n 'res_full': res_full}"}), "(data={'size': sizes, 'condition_number': condition_numbers,\n 'cpu_times_partial': cpu_times_partial, 'cpu_times_full':\n cpu_times_full, 'error_partial': error_partial, 'error_full':\n error_full, 'res_partial': res_partial, 'res_full': res_full})\n", (6576, 6831), True, 'import pandas as pd\n'), ((8175, 8442), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'size': sizes, 'condition_number': condition_numbers, 'cpu_times_partial':\n cpu_times_partial, 'cpu_times_full': cpu_times_full, 'error_partial':\n error_partial, 'error_full': error_full, 'res_partial': res_partial,\n 'res_full': res_full}"}), "(data={'size': sizes, 'condition_number': condition_numbers,\n 'cpu_times_partial': cpu_times_partial, 'cpu_times_full':\n cpu_times_full, 'error_partial': error_partial, 'error_full':\n error_full, 'res_partial': res_partial, 'res_full': res_full})\n", (8187, 8442), True, 'import pandas as pd\n'), ((10710, 10891), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'size': sizes, 'condition_number': condition_numbers, 'error_partial':\n error_partial, 'cpu_times_partial': cpu_times_partial, 'res_partial':\n res_partial}"}), "(data={'size': sizes, 'condition_number': condition_numbers,\n 'error_partial': error_partial, 'cpu_times_partial': cpu_times_partial,\n 'res_partial': res_partial})\n", (10722, 10891), True, 'import pandas as pd\n'), ((1050, 1062), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (1059, 1062), True, 'import numpy as np\n'), ((1110, 1121), 'time.time', 'time.time', ([], {}), '()\n', (1119, 1121), False, 'import time\n'), ((2037, 2048), 'time.time', 'time.time', ([], {}), '()\n', (2046, 2048), False, 'import time\n'), ((2141, 2152), 'numpy.triu', 'np.triu', (['LU'], {}), '(LU)\n', (2148, 2152), True, 'import numpy as np\n'), ((2166, 2175), 'numpy.eye', 'np.eye', (['m'], {}), '(m)\n', (2172, 2175), True, 'import numpy as np\n'), ((2644, 2656), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (2653, 2656), True, 'import numpy as np\n'), ((2724, 2736), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (2733, 2736), True, 'import numpy as np\n'), ((2762, 2773), 'time.time', 'time.time', ([], {}), '()\n', (2771, 2773), False, 'import time\n'), ((4141, 4152), 'time.time', 'time.time', ([], {}), '()\n', (4150, 4152), False, 'import time\n'), ((4246, 4257), 'numpy.triu', 'np.triu', (['LU'], {}), '(LU)\n', (4253, 4257), True, 'import numpy as np\n'), ((4271, 4280), 'numpy.eye', 'np.eye', (['m'], {}), '(m)\n', (4277, 4280), True, 'import numpy as np\n'), ((4314, 4323), 'numpy.eye', 'np.eye', (['m'], {}), '(m)\n', (4320, 4323), True, 'import numpy as np\n'), ((5558, 5577), 'scipy.linalg.toeplitz', 'toeplitz', (['diagonals'], {}), '(diagonals)\n', (5566, 5577), False, 'from scipy.linalg import toeplitz\n'), ((5641, 5662), 'numpy.random.randn', 'np.random.randn', (['f', '(1)'], {}), '(f, 1)\n', (5656, 5662), True, 'import numpy as np\n'), ((5748, 5765), 'numpy.matmul', 'np.matmul', (['x1', 'x2'], {}), '(x1, x2)\n', (5757, 5765), True, 'import numpy as np\n'), ((6179, 6199), 'numpy.linalg.norm', 'norm', (['(px - x)', 'np.inf'], {}), '(px - x, np.inf)\n', (6183, 6199), False, 'from numpy.linalg import cond, norm\n'), ((6215, 6235), 'numpy.linalg.norm', 'norm', (['(fx - x)', 'np.inf'], {}), '(fx - x, np.inf)\n', (6219, 6235), False, 'from numpy.linalg import cond, norm\n'), ((6258, 6282), 'numpy.linalg.norm', 'norm', (['(b - A @ px)', 'np.inf'], {}), '(b - A @ px, np.inf)\n', (6262, 6282), False, 'from numpy.linalg import cond, norm\n'), ((6294, 6318), 'numpy.linalg.norm', 'norm', (['(b - A @ fx)', 'np.inf'], {}), '(b - A @ fx, np.inf)\n', (6298, 6318), False, 'from numpy.linalg import cond, norm\n'), ((7117, 7137), 'numpy.tri', 'np.tri', (['*A.shape', '(-1)'], {}), '(*A.shape, -1)\n', (7123, 7137), True, 'import numpy as np\n'), ((7150, 7162), 'numpy.eye', 'np.eye', (['size'], {}), '(size)\n', (7156, 7162), True, 'import numpy as np\n'), ((7186, 7199), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (7193, 7199), True, 'import numpy as np\n'), ((7270, 7291), 'numpy.random.randn', 'np.random.randn', (['f', '(1)'], {}), '(f, 1)\n', (7285, 7291), True, 'import numpy as np\n'), ((7375, 7392), 'numpy.matmul', 'np.matmul', (['x1', 'x2'], {}), '(x1, x2)\n', (7384, 7392), True, 'import numpy as np\n'), ((7769, 7789), 'numpy.linalg.norm', 'norm', (['(px - x)', 'np.inf'], {}), '(px - x, np.inf)\n', (7773, 7789), False, 'from numpy.linalg import cond, norm\n'), ((7805, 7825), 'numpy.linalg.norm', 'norm', (['(fx - x)', 'np.inf'], {}), '(fx - x, np.inf)\n', (7809, 7825), False, 'from numpy.linalg import cond, norm\n'), ((7857, 7881), 'numpy.linalg.norm', 'norm', (['(b - A @ px)', 'np.inf'], {}), '(b - A @ px, np.inf)\n', (7861, 7881), False, 'from numpy.linalg import cond, norm\n'), ((7893, 7917), 'numpy.linalg.norm', 'norm', (['(b - A @ fx)', 'np.inf'], {}), '(b - A @ fx, np.inf)\n', (7897, 7917), False, 'from numpy.linalg import cond, norm\n'), ((8838, 8857), 'scipy.linalg.toeplitz', 'toeplitz', (['diagonals'], {}), '(diagonals)\n', (8846, 8857), False, 'from scipy.linalg import toeplitz\n'), ((8931, 8948), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (8945, 8948), True, 'import numpy as np\n'), ((9070, 9091), 'numpy.random.randn', 'np.random.randn', (['f', '(1)'], {}), '(f, 1)\n', (9085, 9091), True, 'import numpy as np\n'), ((9782, 9793), 'time.time', 'time.time', ([], {}), '()\n', (9791, 9793), False, 'import time\n'), ((9970, 10008), 'scipy.linalg.solve_triangular', 'solve_triangular', (['L', '(P @ u)'], {'lower': '(True)'}), '(L, P @ u, lower=True)\n', (9986, 10008), False, 'from scipy.linalg import solve_triangular\n'), ((10036, 10073), 'scipy.linalg.solve_triangular', 'solve_triangular', (['U', 'pp1'], {'lower': '(False)'}), '(U, pp1, lower=False)\n', (10052, 10073), False, 'from scipy.linalg import solve_triangular\n'), ((10171, 10209), 'scipy.linalg.solve_triangular', 'solve_triangular', (['L', '(P @ b)'], {'lower': '(True)'}), '(L, P @ b, lower=True)\n', (10187, 10209), False, 'from scipy.linalg import solve_triangular\n'), ((10237, 10274), 'scipy.linalg.solve_triangular', 'solve_triangular', (['U', 'pp2'], {'lower': '(False)'}), '(U, pp2, lower=False)\n', (10253, 10274), False, 'from scipy.linalg import solve_triangular\n'), ((10457, 10468), 'time.time', 'time.time', ([], {}), '()\n', (10466, 10468), False, 'import time\n'), ((10524, 10544), 'numpy.linalg.norm', 'norm', (['(px - x)', 'np.inf'], {}), '(px - x, np.inf)\n', (10528, 10544), False, 'from numpy.linalg import cond, norm\n'), ((10562, 10586), 'numpy.linalg.norm', 'norm', (['(b - A @ px)', 'np.inf'], {}), '(b - A @ px, np.inf)\n', (10566, 10586), False, 'from numpy.linalg import cond, norm\n'), ((971, 981), 'numpy.copy', 'np.copy', (['A'], {}), '(A)\n', (978, 981), True, 'import numpy as np\n'), ((2102, 2117), 'numpy.tril', 'np.tril', (['LU', '(-1)'], {}), '(LU, -1)\n', (2109, 2117), True, 'import numpy as np\n'), ((2119, 2128), 'numpy.eye', 'np.eye', (['m'], {}), '(m)\n', (2125, 2128), True, 'import numpy as np\n'), ((2565, 2575), 'numpy.copy', 'np.copy', (['A'], {}), '(A)\n', (2572, 2575), True, 'import numpy as np\n'), ((3143, 3180), 'numpy.unravel_index', 'np.unravel_index', (['p', 'LU[i:, i:].shape'], {}), '(p, LU[i:, i:].shape)\n', (3159, 3180), True, 'import numpy as np\n'), ((4207, 4222), 'numpy.tril', 'np.tril', (['LU', '(-1)'], {}), '(LU, -1)\n', (4214, 4222), True, 'import numpy as np\n'), ((4224, 4233), 'numpy.eye', 'np.eye', (['m'], {}), '(m)\n', (4230, 4233), True, 'import numpy as np\n'), ((5863, 5878), 'numpy.linalg.norm', 'norm', (['A', 'np.inf'], {}), '(A, np.inf)\n', (5867, 5878), False, 'from numpy.linalg import cond, norm\n'), ((5912, 5927), 'numpy.linalg.cond', 'cond', (['A', 'np.inf'], {}), '(A, np.inf)\n', (5916, 5927), False, 'from numpy.linalg import cond, norm\n'), ((7078, 7099), 'numpy.ones', 'np.ones', (['(size, size)'], {}), '((size, size))\n', (7085, 7099), True, 'import numpy as np\n'), ((7510, 7525), 'numpy.linalg.cond', 'cond', (['A', 'np.inf'], {}), '(A, np.inf)\n', (7514, 7525), False, 'from numpy.linalg import cond, norm\n'), ((9134, 9155), 'numpy.random.randn', 'np.random.randn', (['f', '(1)'], {}), '(f, 1)\n', (9149, 9155), True, 'import numpy as np\n'), ((9199, 9220), 'numpy.random.randn', 'np.random.randn', (['f', '(1)'], {}), '(f, 1)\n', (9214, 9220), True, 'import numpy as np\n'), ((9496, 9511), 'numpy.linalg.cond', 'cond', (['A', 'np.inf'], {}), '(A, np.inf)\n', (9500, 9511), False, 'from numpy.linalg import cond, norm\n'), ((9835, 9845), 'numpy.linalg.norm', 'norm', (['u', '(2)'], {}), '(u, 2)\n', (9839, 9845), False, 'from numpy.linalg import cond, norm\n'), ((9860, 9870), 'numpy.linalg.norm', 'norm', (['v', '(2)'], {}), '(v, 2)\n', (9864, 9870), False, 'from numpy.linalg import cond, norm\n'), ((9322, 9336), 'numpy.outer', 'np.outer', (['u', 'v'], {}), '(u, v)\n', (9330, 9336), True, 'import numpy as np\n'), ((3018, 3036), 'numpy.abs', 'np.abs', (['LU[i:, i:]'], {}), '(LU[i:, i:])\n', (3024, 3036), True, 'import numpy as np\n')] |
"""siunit - A module to support dimensioned arithmetic using the SI system.
Dimensioned numbers are instances of siunit.Dn(). Arithmetic between Dn's
with incompatible units raises TypeError. Arithmetic between Dn's with
compatible units produces a result with appropriate units. For example, ::
>>> m=Dn('3kg')
>>> a=Dn('2m/s^2')
>>> f=m*a
>>> print(f)
6.0N
"""
# MIT License
# Copyright (c) 2019 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from numbers import Number
from math import sqrt as math_sqrt
import re
try:
from numpy import array
except ImportError:
pass
class Unit:
"""Base class for base unit and derived unit definitions. Not directly
instantiated.
:param name: Full name of the unit.
:param abbreviation: The official unit abbreviation. Used for display
and parsing.
:param quantifies: A string (no white space allowed) that describes the
quantity measured by the unit.
"""
of = {}
"""Dictionary of all defined units. Each Unit instance will be inserted
under three keys: name, abbreviation, and quantifies."""
_all = set()
"""Set of all defined units."""
_measureable = dict((quantifies, unit_index)
for (unit_index, quantifies) in enumerate(
['length', 'mass', 'time', 'electric_current', 'luminousity',
'temperature', 'amount_of_substance', 'angle']))
"""Defines the entities quantified by BaseUnits. Also defines the order
in which they appear in Dimension() exponent vectors."""
def __init__(self, name, abbreviation, quantifies, display_order):
self.name = name
self.abbreviation = abbreviation
self.quantifies = quantifies
self.display_order = display_order
self.of[self._irredundant(name)] = self
self.of[self._irredundant(abbreviation)] = self
self.of[self._irredundant(quantifies)] = self
self._all.add(self)
def _irredundant(self, ident):
"""Validates identifier as previously unused."""
if ident in self.of:
raise ValueError(' '.join([ident, 'already defined.']))
else:
return ident
def __repr__(self):
return ''.join(
[self.__class__.__name__, '(', self.reprvals(), ')'])
class BaseUnit(Unit):
"""Defines an SI base unit.
:param name: Full name of the unit.
:param abbreviation: The official unit abbreviation. Used for display
and parsing.
:param quantifies: A string (no white space allowed) that describes the
quantity measured by the unit.
:param display_order: A integer that controls the print-out order
for this unit for the __str__ method.. Lower display_order units
print first.
"""
named = {}
index_order = [None] * len(Unit._measureable)
def __init__(self, name, abbreviation, quantifies, display_order):
unit_index = self._measureable[quantifies]
super().__init__(name, abbreviation, quantifies, display_order)
self.unit_index = unit_index
self.named[name] = self
self.index_order[Unit._measureable[self.quantifies]] = self
self._dimension = Dimension(**{self.name:1})
def reprvals(self):
return ', '.join([repr(x) for x in [self.name, self.abbreviation,
self.quantifies, self.display_order]])
@property
def dimension(self):
"""Returns an instance of Dimension() representing this unit."""
return self._dimension
def gloss(self):
"""Glossary text for this unit."""
return [self.name, self.abbreviation, self.quantifies, 'SI base unit']
class DerivedUnit(Unit):
"""Defines an SI derived unit.
:param name: Full name of the unit.
:param abbreviation: The official unit abbreviation. Used for display
and parsing.
:param quantifies: A string (no white space allowed) that describes the
quantity measured by the unit.
:param display_order: A integer that controls the print-out order
for this unit. Lower display_order units print first.
:param dimension: An instance of Dimension() that defines the derivation
of this unit.
"""
with_basis = {}
"""Dictionary of all DerivedUnit instances, indexed by a Dimension
vector in tuple() form."""
_factor_order = []
"""List of DerivedUnit instances in the prefered order for factoring
out from complex dimension exponent vectors."""
def __init__(self, name, abbreviation, quantifies, display_order,
dimension):
super().__init__(name, abbreviation, quantifies, display_order)
self.dimension = Dimension(dimension)
self.with_basis[tuple(self.dimension._exponents)] = self
def reprvals(self):
return ', '.join([repr(x) for x in [self.name, self.abbreviation,
self.quantifies, self.display_order, self.dimension]])
@classmethod
def factor_order(cls):
return cls._factor_order
@classmethod
def set_factor_order(cls, derived_units):
"""Sets the order for factoring out instances of DerivedUnit
from a complex Dimension exponent vector.
:param derived_units: A list of DerivedUnit instances, in order
of decreasing preference.
"""
l = []
for du in derived_units:
if du in l:
raise ValueError(' '.join([du.name,
'duplicated in factor_order.']))
if not isinstance(du, cls):
raise ValueError(' '.join([repr(du),
'is not an instance of',
cls.__name__]))
l.append(du)
cls._factor_order = l
def occurs_in(self, other_exponents):
"""Count the numer of times this dimension occurs in some unit
exponent vector.
:param other_exponents: An exponent vector.
:returns: A count of the number of occurances.
"""
# This is not smart enough to tease appart units where
# one derived unit contributes a positive exponent and
# another contributes a negative exponent.
occurances = []
for self_exp, other_exp in zip(
self.dimension._exponents, other_exponents):
if self_exp == 0:
continue # Not relevant.
elif self_exp * other_exp < 0:
return 0 # Different signs.
elif abs(self_exp) <= abs(other_exp):
occurances.append(other_exp // self_exp)
else:
return 0 # Too many.
return min(occurances)
def extract_from(self, exponents, count=None):
"""Extracts *count* occurances of *self's* exponent vector from
the vector *exponents*.
:param exponents: An exponent vector to be reduced.
:param count: Optional. Number of occurances to extract. Default=1.
:returns: An updated (reduced) exponent vector.
"""
count = 1 if count is None else int(count)
return [y - (count * x)
for x, y in zip(self.dimension._exponents, exponents)]
@classmethod
def factor(cls, exponents):
"""Factors a list of exponents into list of (dimension, count) tuples.
This prepares an exponent list for display in maximally-factored
form.
:param exponents: An exponent vector.
:returns: List of tuples of the form: (Dimension, exponent)
"""
factored = []
residue = exponents
for derived_unit in cls.factor_order():
occurances = derived_unit.occurs_in(residue)
if occurances:
#print ('du:', derived_unit.name)
factored.append((derived_unit, occurances))
#print('r before:', residue)
residue = derived_unit.extract_from(residue, occurances)
#print('r after:', residue)
dims = [(d, residue[d.unit_index]) for d in BaseUnit.index_order
if residue[d.unit_index] != 0]
factored.extend(dims)
return factored
def gloss(self):
"""Glossary text for this unit."""
return [self.name, self.abbreviation, self.quantifies,
self.dimension.basis()]
class Dimension:
"""The computed dimensions of a number. The dimensions are carried
internally as a vector of exponents in a canonical order. The vector
contains one integer per measureable, representing the exponent on a
unit, in the order defined by Unit._measureable.
For example: ::
m*kg is [1, 1, 0, 0, 0, 0, 0, 0]
m^2 is [2, 0, 0, 0, 0, 0, 0, 0]
m/s^2 is [1, 0, -2, 0, 0, 0, 0, 0]
:param unit_spec: Can be one of:
- Dimension() instance, which constructs a copy.
- A string, which will be parsed, potentially raising an error.
- An interable of integer-ish things, which will be interpreted as an
exponent vector.
- Omitted.
:param kwargs: Any BaseUnit.name can be a kwarg parameter. If unit_spec
is omitted, a Dimension is constructed from kwargs. If neither
unit_spec nor any kwargs are supplied, a "dimensionless" Dimension()
is constructed.
"""
# Note: _parser_re is created on first attempt to parse a unit string.
# If any new units are added after the regular expression is cached,
# the cached pattern will be invalid. YAGN invalidation?
_parser_re = None
"Cached re match pattern use to parse unit strings."
_preferred = dict()
"""Dictionary of display strings keyed by exponent vector.
Used to trap out preferred, ie: "natural" display strings for
the __str__ function.
"""
def __init__(self, unit_spec=None, **kwargs):
# If unit_spec is an instance of Dimension, construct copy.
if isinstance(unit_spec, Dimension):
self._exponents = unit_spec._exponents
return
# If unit_spec is a string, then try to parse it.
if isinstance(unit_spec, str):
self._exponents = self.parse(unit_spec)._exponents
return
# If unit_spec is not none, expect a list of integers.
if unit_spec is not None:
if len(unit_spec) != len(Unit._measureable):
raise ValueError(' '.join(['Length of exponent list is',
str(len(unit_spec)),
'but', str(len(Unit._measureable)), 'required.']))
else:
self._exponents = [int(x) for x in unit_spec]
return
# Create an empty exponent vector to fill with keywords params, or if
# there are none, it will default to being a dimensionless quantity.
self._exponents = [0] * len(Unit._measureable)
for unit in kwargs:
try:
u = BaseUnit.named[unit]
except IndexError:
raise ValueError(' '.join([unit, 'is not a BaseUnit name.']))
else:
self._exponents[u.unit_index] = int(kwargs[unit])
def __repr__(self):
params = ', '.join(['='.join((nm,str(val)))
for nm, val in [(u.name, self._exponents[u.unit_index])
for u in BaseUnit.index_order] if val != 0])
return ''.join([self.__class__.__name__, '(', params, ')'])
@classmethod
def prefer(cls, s, delete=None):
"""Add/update a preferred display string.
:param s: A unit string. The exponent vector that it represents
will always be display as *s*.
:param delete: Optional. If truthy, *s* is deleted from the
preferences dictionary.
"""
dim = cls.parse(s) # May raise ValueError. Let caller handle.
v = tuple(dim._exponents)
if bool(delete):
del cls._preferred[v]
else:
cls._preferred[v] = s
def __str__(self):
# See if there is a preferred display override.
u = self._preferred.get(tuple(self._exponents), None)
if u:
return u
# Look for an exact match to a derived unit for an easy win.
u = DerivedUnit.with_basis.get(tuple(self._exponents), None)
if u:
return u.abbreviation
# Convert to list of (Dimension, count) tuples, factoring out derived
# units.
dims = DerivedUnit.factor(self._exponents)
# Prep for display.
numerator = sorted([(d, v) for d, v in dims if v > 0],
key=lambda tup: tup[0].display_order)
numerator = ''.join([
'^'.join([d.abbreviation, str(v)]) if v > 1 else d.abbreviation
for d, v in numerator])
denominator = sorted([(d, v) for d, v in dims if v < 0],
key=lambda tup: tup[0].display_order)
denominator = ''.join([
'^'.join([d.abbreviation, str(abs(v))]) if v < -1 else d.abbreviation
for d, v in denominator])
if numerator and denominator:
return '/'.join([numerator, denominator])
elif denominator:
return '/'.join(['1', denominator])
else:
return numerator
def basis(self):
"""Return a human-readabble description of the basis for
this dimension.
"""
bases = [(d, self._exponents[d.unit_index])
for d in BaseUnit.index_order
if self._exponents[d.unit_index] != 0]
return ' '.join(
[''.join([d.name, '^' + str(exp) if exp != 1 else ''])
for d, exp in bases if exp != 0])
@classmethod
def parse(cls, s):
"""Parse *s* as unit abbreviations and exponents, potentially in
simple fraction notation.
:param s: Unit specification as a string. No white space.
:returns: Dimension, or raises ValueError.
"""
# This parser is too ugly to live long. But it seems to work. The
# better strategy is probably to replace this kludgery with a proper
# recursive-descent parser. But, for now.... meh.
patt = cls._parser_pattern()
# See if this is in fraction notation.
f = s.split('/')
if len(f) > 2:
raise ValueError('Only simple fractions of units accepted.')
numerator_str = f.pop(0)
denominator_str = f.pop(0) if f else None
numl = []
while numerator_str:
m = patt.match(numerator_str)
if not m:
raise ValueError(' '.join(['Syntax error in:', numerator_str]))
numl.append(m.group(0))
numerator_str = numerator_str[m.end(0):]
numl2 = []
while numl:
d = numl.pop(0)
if numl and numl[0][0] == '^':
n = numl.pop(0)
n = int(n[1:])
else:
n = 1
numl2.append((d, n))
# Start construction with a dimensionless Dimension()
rslt = Dimension()
for abbr, exp in numl2:
d = Unit.of[abbr].dimension
if exp < 0:
d = Dimension()/d
exp = -exp
d = pow(d, exp)
rslt *= d
if denominator_str:
denom = cls.parse(denominator_str)
rslt = rslt / denom
return rslt
@classmethod
def _parser_pattern(cls):
"Construct the re pattern from abbreviations for defined units."
if cls._parser_re is None:
# re needs to have longest match strings first, so sort
# abbreviations by decreasing length.
tokens = sorted(set([u.abbreviation for u in Unit.of.values()]),
key=lambda s:len(s), reverse=True)
# In addition to unit abbreviations, we need to look for unit
# exponents of the form:
# up-caret, optional minus-sign, numbers,
tokens.append(r'\^-?\d+') # *** NO CARRIER
pattern = '|'.join(tokens)
cls._parser_re = re.compile(''.join(['(', pattern, ')']))
return cls._parser_re
# adding/subtracting mismatched units is invalid.
def __add__(self, other):
if self == other:
return self
raise TypeError('Adding mismatched units.')
def __sub__(self, other):
if self == other:
return self
raise TypeError('Subtracting mismatched units.')
# multiply units by adding exponents.
def __mul__(self, other):
return self.__class__(
[x + y for x, y in zip(self._exponents, other._exponents)])
def __matmul__(self, other):
return self.__mul__(other)
def _div(self, other):
return self.__class__(
[x - y for x, y in zip(self._exponents, other._exponents)])
def __truediv__(self, other):
return self._div(other)
def __floordiv__(self, other):
return self._div(other)
# Comparision operations only make sense for values of same units.
# The __eq__ and __ne__ operators are defined so that they can be
# used to check for equivalence of units. The other comparison
# operators raise.
def __eq__(self, other):
return min([self_exp == other_exp for self_exp, other_exp
in zip(self._exponents, other._exponents)])
def __ne__(self, other):
return not self == other
_invalid_comparison_msg = 'Dimensions can only be compared for equality.'
def __lt__(self, other):
raise TypeError(self._invalid_comparison_msg)
def __gt__(self, other):
raise TypeError(self._invalid_comparison_msg)
def __le__(self, other):
raise TypeError(self._invalid_comparison_msg)
def __ge__(self, other):
raise TypeError(self._invalid_comparison_msg)
# For pow(), other must be a positive iteger. To raise a Dimension
# to a power, multiply the current exponent by other.
def __pow__(self, other):
o = int(other)
if o < 0 or o != other:
raise TypeError(
'pow() only supported for positive integer exponentiation.')
exp = [e * o for e in self._exponents]
return self.__class__(exp)
def __imul__(self, other):
self._exponents = [x + y
for x, y in zip(self._exponents, other._exponents)]
return self
def root(self, n=None):
"""Implement n-th root for dimensions. All units must be evenly
divisible by n.
:param n: N-th root to take. Default==2.
"""
n = 2 if n is None else int(n)
if n < 0:
raise ValueError('Can only take positive roots of dimensions.')
fail = max([e % n for e in self._exponents])
if fail:
raise ValueError(' '.join(['root(n) requires'
'all dimension exponents to be divisble by n.']))
return self.__class__([e // n for e in self._exponents])
# Define the base units for the SI system.
base_units = [
BaseUnit('meter', 'm', 'length', 20),
BaseUnit('kilogram', 'kg', 'mass', 10),
BaseUnit('second', 's', 'time', 30),
BaseUnit('ampere', 'A', 'electric_current', 40),
BaseUnit('kelvin', 'K', 'temperature', 50),
BaseUnit('mole', 'mol', 'amount_of_substance', 60),
BaseUnit('candela', 'cd', 'luminousity', 70),
BaseUnit('radian', 'rad', 'angle', 80),
]
"""The canonical list of SI base units."""
# Import-time self-check: Make sure that all base units are accounted for.
assert(len([x for x in BaseUnit.index_order if x is None]) == 0)
# Define some derived units for the SI system.
derived_units = [
DerivedUnit('coulomb', 'C', 'charge', 90, Dimension(second=1, ampere=1)),
DerivedUnit('hertz', 'Hz', 'frequency', 100, Dimension(second=-1)),
DerivedUnit('newton', 'N', 'force', 110,
Dimension(kilogram=1, meter=1, second=-2)),
DerivedUnit('pascal', 'Pa', 'pressure', 130,
Dimension(kilogram=1, meter=-1, second=-2)),
DerivedUnit('joule', 'J', 'energy', 130,
Dimension(kilogram=1, meter=2, second=-2)),
DerivedUnit('watt', 'W', 'power', 140,
Dimension(kilogram=1, meter=2, second=-3)),
DerivedUnit('volt', 'V', 'electromotive_force', 150,
Dimension(kilogram=1, meter=2, second=-3, ampere=-1)),
DerivedUnit('farad', 'F', 'capacitance', 160,
Dimension(kilogram=-1, meter=-2, second=4, ampere=2)),
DerivedUnit('ohm', 'Ohm', 'resistance', 170,
Dimension(kilogram=1, meter=2, second=-3, ampere=-2)),
DerivedUnit('siemens', 'S', 'conductance', 180,
Dimension(kilogram=-1, meter=-2, second=3, ampere=2)),
DerivedUnit('weber', 'Wb', 'magnetic_flux', 190,
Dimension(kilogram=1, meter=2, second=-2, ampere=-1)),
DerivedUnit('tesla', 'T', 'magnetic_flux_density', 200,
Dimension(kilogram=1, second=-2, ampere=-1)),
DerivedUnit('henry', 'H', 'inductance', 210,
Dimension(kilogram=1, meter=2, second=-2, ampere=-2)),
DerivedUnit('lux', 'lx', 'illuminance', 220,
Dimension(meter=-2, candela=1)),
DerivedUnit('katal', 'kat', 'catalytic_activity', 230,
Dimension(mole=1, second=-1)),
]
"""SI derived units."""
# FIXME: Design/implement a decent default factoring order.
# Set up the order to factor out derived units.
DerivedUnit.set_factor_order([
Unit.of['W'],
Unit.of['N'],
])
# FIXME: Implement a default set of preferences, as necessary.
# Set up display traps for things that come out strangely using the canonical
# conversion in __str__().
#Dimension.prefer('Ohm/m^2')
class Dn:
"""Dimensioned number. The numeric parameter accepts any type, and passes
through arithmetic operations. This allows smooth operation with complex
numbers, numpy arrays, etc.
:param n: A number-ish thing . Can be a numeric value, a Numpy array, or
if a string an attempt will be made to parse it as a number with
units. If the string is *only* units, it constructs a dimensioned
value of 1.
:param units: Omitted when *n* is a string, as the units are taken from
parsing the string. Otherwise, if *n* is not a string and *units*
is omitted, a dimensionless instance of Dimension() is used. Otherwise
*units* will be passed to the Dimension() constructor.
"""
_mismatched_units_message = 'Comparing values with different units.'
_unsupported_op_message = 'operation unsupported for dimensioned numbers.'
def __init__(self, n, units=None):
if isinstance(n, str):
n, units = self.parse(n)
self.n = n
# TODO: So.... do I want to eliminate the redunant copy constructor here?
self.units = Dimension() if units is None else Dimension(units)
def __repr__(self):
params = ', '.join([repr(x) for x in [self.n, self.units]])
return ''.join([self.__class__.__name__, '(', params, ')'])
def __str__(self):
return ''.join([str(x) for x in [self.n, self.units]])
@classmethod
def parse(cls, s):
"""Parse a string into tuple of number and Dimension instance. The
string must parse completely, or else this will raise. Parse makes
some assumptions about type: If the string starts with a '(', the
expression is passed to the complex() constructor. If the string
starts with a '[', the expression is passed to the numpy.array()
constructor. Otherwise, it is passed to the float() constructor.
:param s: A string to be parsed.
:returns: A tuple of (Number, Dimension)
"""
if s[0] in '([':
"split point is balancing close bracket"
p, n = 1, 1
while n:
if s[p] in '([':
n += 1
elif s[p] in ')]':
n -= 1
p += 1
else:
"split point is first char not part of number."
p = 0
while s[p] in '+-0123456789.eE':
p += 1
num, dim = s[0:p], s[p:]
if not len(num):
number = 1
elif num[0] == '[':
number = array(num)
elif num[0] == '(':
number = complex(num)
else:
number = float(num)
dimension = Dimension.parse(dim)
return number, dimension
def _maybe_promote(self, n):
"""If n is dimensionless numeric, turn it into a dimensionless Dn."""
return self.__class__(n, Dimension()) if isinstance(n, Number) else n
def __add__(self, other):
other = self._maybe_promote(other)
return self.__class__(self.n + other.n, self.units + other.units)
def __sub__(self, other):
other = self._maybe_promote(other)
return self.__class__(self.n - other.n, self.units - other.units)
def __mul__(self, other):
other = self._maybe_promote(other)
return self.__class__(self.n * other.n, self.units * other.units)
def __matmul__(self, other):
other = self._maybe_promote(other)
return self.__class__(self.n @ other.n, self.units @ other.units)
def __truediv__(self, other):
other = self._maybe_promote(other)
return self.__class__(self.n / other.n, self.units / other.units)
def __floordiv__(self, other):
other = self._maybe_promote(other)
return self.__class__(self.n // other.n, self.units // other.units)
def __mod__(self, other):
raise TypeError(' '.join(['mod', self._unsupported_op_message]))
def __divmod__(self, other):
raise TypeError(' '.join(['divmod', self._unsupported_op_message]))
def __pow__(self, other, modulo=None):
if modulo is not None:
raise TypeError(' '.join(['pow() with modulo',
self._unsupported_op_message]))
o = int(other)
return self.__class__(self.n ** o, self.units ** o)
# Comparisions only make sense for Dn()'s with same units.
def __eq__(self, other):
other = self._maybe_promote(other)
if self.units == other.units:
return self.n == other.n
raise TypeError(self._mismatched_units_message)
def __ne__(self, other):
other = self._maybe_promote(other)
if self.units == other.units:
return self.n != other.n
raise TypeError(self._mismatched_units_message)
def __lt__(self, other):
other = self._maybe_promote(other)
if self.units == other.units:
return self.n < other.n
raise TypeError(self._mismatched_units_message)
def __gt__(self, other):
other = self._maybe_promote(other)
if self.units == other.units:
return self.n > other.n
raise TypeError(self._mismatched_units_message)
def __le__(self, other):
other = self._maybe_promote(other)
if self.units == other.units:
return self.n <= other.n
raise TypeError(self._mismatched_units_message)
def __ge__(self, other):
other = self._maybe_promote(other)
if self.units == other.units:
return self.n >= other.n
raise TypeError(self._mismatched_units_message)
def __radd__(self, other):
other = self._maybe_promote(other)
return self.__class__(other.n + self.n, other.units + self.units)
def __rsub__(self, other):
other = self._maybe_promote(other)
return self.__class__(other.n - self.n, other.units - self.units)
def __rmul__(self, other):
other = self._maybe_promote(other)
return self.__class__(other.n * self.n, other.units * self.units)
def __rmatmul__(self, other):
other = self._maybe_promote(other)
return self.__class__(other.n @ self.n, other.units @ self.units)
def __rtruediv__(self, other):
other = self._maybe_promote(other)
return self.__class__(other.n / self.n, other.units / self.units)
def __rfloordiv__(self, other):
other = self._maybe_promote(other)
return self.__class__(other.n // self.n, other.units // self.units)
def __neg__(self):
return self.__class__(-self.n, self.units)
def __pos__(self):
# FIXME: Is it really necessary to call a copy constructor here???
# Verify and simplify if possible.
return self.__class__(self.n, self.units)
def __abs__(self):
return self.__class__(abs(self.n), self.units)
def __iadd__(self, other):
other = self._maybe_promote(other)
self.units += other.units
self.n += other.n
return self
def __isub__(self, other):
other = self._maybe_promote(other)
self.units -= other.units
self.n -= other.n
return self
def __imul__(self, other):
other = self._maybe_promote(other)
self.units *= other.units
self.n *= other.n
return self
def __imatmul__(self, other):
other = self._maybe_promote(other)
self.units @= other.units
self.n @= other.n
return self
def __itruediv__(self, other):
other = self._maybe_promote(other)
self.units /= other.units
self.n /= other.n
return self
def __ifloordiv__(self, other):
other = self._maybe_promote(other)
self.units //= other.units
self.n //= other.n
return self
def sqrt(self):
"""Implement sqrt() for dimensioned numbers."""
return self.__class__(math_sqrt(self.n), self.units.root())
def root(self, n):
"""Implement n-th root for dimensioned numbers."""
n = int(n)
return self.__class__(pow(self.n, float(1.0/n)), self.units.root(n))
# Exported functions
def prefer(dimension_str):
"""Set an override string to display a complex dimension as
explicitly specified.
:param dimension_str: The dimension as a string.
"""
Dimension.prefer(dimension_str)
def sqrt(dimensioned_number):
"""Take the square root of a Dn() instance. All of the
dimension exponents must be divisible by 2. math.sqrt()
is called on the numeric part of the Dn().
:param dimensioned_number: A Dn() instance.
:returns: A Dn() instance.
"""
return dimensioned_number.sqrt()
def root(dimensioned_number, n):
"""Take the n-th root of a Dn() instance. All of the
dimension exponents must be divisible by n. math.pow() is
called with an exponent of 1.0/float(n) on the numeric
part of the dimensioned number.
:param dimensioned_number: A Dn() instance.
:param n: A positive integer.
:returns: A Dn() instance.
"""
return dimensioned_number.root(n)
def unit_of(s):
"""Find a unit for *s*, or raise.
:param s: A unit name, unit abbrevation, or unit description.
:returns: An instance of BaseUnit() or DerivedUnit().
"""
return Unit.of[s]
def set_factor_order(derived_unit_list):
"""Set the prefered factoring order for attempting to find
derived units withing a complex dimension.
:param derived_unit_list: A list of derived unit names and/or
abbreviations.
"""
du = []
for s in derived_unit_list:
u = Unit.of[s]
if not isinstance(u, DerivedUnit):
raise ValueError(' '.join([s, 'is not a DerivedUnit.']))
if u in du:
raise ValueError(' '.join(s, 'duplicated.'))
du.append(u)
DerivedUnit.set_factor_order(du)
def unit_definitions():
"""Convenience function to get the set of all unit definitions.
:returns: The set of unit definitions.
"""
return Unit._all
__all__ = ['Dn', 'sqrt', 'root', 'unit_of', 'set_factor_order',
'unit_definitions'] | [
"numpy.array",
"math.sqrt"
] | [((30602, 30619), 'math.sqrt', 'math_sqrt', (['self.n'], {}), '(self.n)\n', (30611, 30619), True, 'from math import sqrt as math_sqrt\n'), ((25248, 25258), 'numpy.array', 'array', (['num'], {}), '(num)\n', (25253, 25258), False, 'from numpy import array\n')] |
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee <EMAIL> #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import matplotlib.pyplot as plt
import numpy as np
plt.figure()
# 定义从-pi到pi之间的数据,平均取64个数据点
x_data = np.linspace(-np.pi, np.pi, 64, endpoint=True) # ①
# 将整个figure分成两行两列,第三个参数表示该图形放在第1个网格
# 沿着正弦曲线绘制散点图
plt.scatter(x_data, np.sin(x_data), c='purple', # 设置点的颜色
#plt.scatter(x_data, np.sin(x_data), cmap=plt.get_cmap('rainbow'), # 设置点的颜色
s=50, # 设置点半径
alpha = 0.5, # 设置透明度
marker='p', # 设置使用五边形标记
linewidths=1, # 设置边框的线宽
edgecolors=['green', 'yellow']) # 设置边框的颜色
# 绘制第二个散点图(只包含一个起点),突出起点
plt.scatter(x_data[0], np.sin(x_data)[0], c='red', # 设置点的颜色
s=150, # 设置点半径
alpha = 1) # 设置透明度
# 绘制第三个散点图(只包含一个结束点),突出结束点
plt.scatter(x_data[63], np.sin(x_data)[63], c='black', # 设置点的颜色
s=150, # 设置点半径
alpha = 1) # 设置透明度
plt.gca().spines['right'].set_color('none')
plt.gca().spines['top'].set_color('none')
plt.gca().spines['bottom'].set_position(('data', 0))
plt.gca().spines['left'].set_position(('data', 0))
plt.title('正弦曲线的散点图')
plt.show() | [
"matplotlib.pyplot.gca",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.sin",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((1106, 1118), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1116, 1118), True, 'import matplotlib.pyplot as plt\n'), ((1157, 1202), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(64)'], {'endpoint': '(True)'}), '(-np.pi, np.pi, 64, endpoint=True)\n', (1168, 1202), True, 'import numpy as np\n'), ((2008, 2029), 'matplotlib.pyplot.title', 'plt.title', (['"""正弦曲线的散点图"""'], {}), "('正弦曲线的散点图')\n", (2017, 2029), True, 'import matplotlib.pyplot as plt\n'), ((2031, 2041), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2039, 2041), True, 'import matplotlib.pyplot as plt\n'), ((1281, 1295), 'numpy.sin', 'np.sin', (['x_data'], {}), '(x_data)\n', (1287, 1295), True, 'import numpy as np\n'), ((1595, 1609), 'numpy.sin', 'np.sin', (['x_data'], {}), '(x_data)\n', (1601, 1609), True, 'import numpy as np\n'), ((1729, 1743), 'numpy.sin', 'np.sin', (['x_data'], {}), '(x_data)\n', (1735, 1743), True, 'import numpy as np\n'), ((1814, 1823), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1821, 1823), True, 'import matplotlib.pyplot as plt\n'), ((1859, 1868), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1866, 1868), True, 'import matplotlib.pyplot as plt\n'), ((1902, 1911), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1909, 1911), True, 'import matplotlib.pyplot as plt\n'), ((1956, 1965), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1963, 1965), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright (c) 2019 <NAME> (<EMAIL>)
"""
@author: <NAME>
base class for CG models and solvers
"""
import abc
from typing import Iterable
import numpy as np
#from cgmodsel.models.model_base import get_modeltype
from cgmodsel.models.model_pwsl import ModelPWSL
from cgmodsel.models.model_pw import ModelPW
from cgmodsel.utils import grp_soft_shrink, l21norm
# pylint: disable=W0511 # todos
# pylint: disable=R0914 # too many locals
DUMMY = 'dummy'
DUMMY_RED = 'dummy_red'
INDEX = 'index'
FLAT = 'flat'
def set_sparsity_weights(meta, cat_data, cont_data):
""" use adjusted weights for all groups as suggested by LST2015
(may be essential for "good", "consistent" results)"""
n_data = meta['n_data']
n_cg = meta['n_cg']
n_cat = meta['n_cat']
dim = n_cg + n_cat
# CG variables
mus = cont_data.sum(axis=0) / n_data
sigmas_cg = np.sqrt((cont_data**2).sum(axis=0) / n_data - mus**2)
# categoricals
sigmas_cat = np.empty(n_cat)
freqs = cat_data.sum(axis=0) / n_data
for r in range(n_cat):
sigma_r = 0
for k in range(meta['sizes'][r]):
p_xr_k = freqs[meta['cat_glims'][r] +
k] # relative probability that x_r has value k
sigma_r += p_xr_k * (1 - p_xr_k)
sigmas_cat[r] = np.sqrt(sigma_r)
weights = np.zeros((dim, dim))
for r in range(n_cat):
for j in range(r):
weights[r, j] = sigmas_cat[r] * sigmas_cat[j]
for s in range(n_cg):
weights[n_cat + s, r] = sigmas_cat[r] * sigmas_cg[s]
for j in range(n_cg):
for i in range(j):
weights[n_cat + j, n_cat + i] = sigmas_cg[j] * sigmas_cg[i]
weights += weights.T
# TODO(franknu): weights on diagonal
return weights
###############################################################################
# base class for all CG model solvers
###############################################################################
class BaseCGSolver(abc.ABC):
"""
base class for all CG model solver
provides external interface to drop data along with meta information
about this data
"""
def __init__(self):
"""must call method drop_data after initialization"""
super().__init__()
self.cat_data = None # discrete data, dropped later
self.cat_format_required = None # must override
self.cont_data = None # continuous data, dropped later
# self.problem_vars = None #TODO(franknu)
self.meta = {'n_data': 0}
if not hasattr(self, 'opts'):
# since this may already be defined from other base classes
self.opts = {}
self.name = 'base'
def _postsetup_data(self):
"""called after drop_data"""
# may be overridden in derived classes
# no pass because function has doc string
def drop_data(self, data, meta: dict) -> None:
"""drop data, derived classes may perform additional computations
uses and augments information contained in meta about the data
categorical data must be provided in dummy-encoded form
(leaving out 0-th levels if required by the solver)"""
# process argument data
if isinstance(data, tuple):
assert len(data) == 2
cat_data, cont_data = data
else:
counter = 0
if 'n_cat' in meta and meta['n_cat'] > 0:
counter += 1
cat_data = data
cont_data = np.empty((data.shape[0], 0))
if 'n_cg' in meta and meta['n_cg'] > 0:
counter += 1
cont_data = data
cat_data = np.empty((data.shape[0], 0))
assert counter == 1, 'dictionary meta incompatible with provided data'
self.cont_data = cont_data
self.cat_data = cat_data
self.meta = {}
for key in ('n_cg', 'n_cat'):
if key in meta:
self.meta[key] = meta[key]
else:
meta[key] = 0
if self.meta['n_cat'] > 0:
assert 'sizes' in meta
assert len(meta['sizes']) == meta['n_cat']
self.meta['sizes'] = meta['sizes']
# continue checking validity of meta
if self.meta['n_cg'] > 0:
assert not np.any(np.isnan(cont_data))
assert meta['n_cg'] == cont_data.shape[1]
self.meta['n_data'] = cont_data.shape[0]
if self.meta['n_cat'] == 0:
self.meta['sizes'] = [] # no discrete variables
self.meta['ltot'] = 0
self.meta['red_levels'] = False # value irrelevant, no cat vars
self.meta['cat_glims'] = [0]
else:
if 'n_data' in self.meta:
assert self.meta['n_data'] == cat_data.shape[0]
else:
self.meta['n_data'] = cat_data.shape[0]
ltot = np.sum(meta['sizes'])
if self.cat_format_required == DUMMY:
assert ltot == cat_data.shape[1]
# 0-th levels of the discrete data are contained
# for identifiability, assume that corresponding
# parameters are constrained to zero
self.meta['red_levels'] = False
elif self.cat_format_required == DUMMY_RED:
assert ltot - meta['n_cat'] == cat_data.shape[1]
# assume that 0-th levels are left out in discrete data
# assures identifiability of the model
self.meta['red_levels'] = True
self.meta['sizes'] = [size - 1 for size in meta['sizes']]
elif self.cat_format_required == INDEX:
assert meta['n_cat'] == cat_data.shape[1]
elif self.cat_format_required == FLAT: # MAP solver
assert len(cat_data.shape) == 1
else:
raise Exception('invalid self.cat_format_required')
if self.cat_format_required in (DUMMY, DUMMY_RED):
self.meta['ltot'] = cat_data.shape[1]
self.meta['dim'] = self.meta['ltot'] + self.meta['n_cg']
# calculate cumulative # of levels/ group delimiters
self.meta['cat_glims'] = np.cumsum([0] + self.meta['sizes'])
self.meta['glims'] = list(self.meta['cat_glims']) + \
[1 + self.meta['ltot'] + s for s in range(self.meta['n_cg'])]
# TODO(franknu): self.meta['glims'] for sparse reg only
self.meta['nonbinary'] = (self.meta['ltot'] > self.meta['n_cat'] *
(2 - self.meta['red_levels']))
self.meta['n_catcg'] = self.meta['n_cat'] + self.meta['n_cg']
# self.meta['type'] = get_modeltype(self.n_cat, self.n_cg, self.sizes)
# fac = np.log(self.meta['n_cg'] + self.meta['n_cat'])
fac = np.sqrt(np.log(self.meta['n_catcg']) / self.meta['n_data'])
self.meta['reg_fac'] = fac # potentially used as prescaling factor
# for regularization parameters
self._postsetup_data()
def get_name(self):
"""return model name"""
return self.name
class BaseSparseSolver(BaseCGSolver):
"""
base class that contains proximal operators
"""
def __init__(self, useweights=False):
super().__init__()
self.opts.setdefault('off', 0) # if 1 regularize only off-diagonal
# model options # TODO(franknu): find better place
self.opts.setdefault('use_alpha', 1)
self.opts.setdefault('use_u', 1)
self.useweights = useweights
self.weights = None
self.name = 'base-sparse'
def _postsetup_data(self):
"""called after drop_data"""
if self.useweights:
self.weights = set_sparsity_weights(self.meta,
self.cat_data,
self.cont_data)
else:
self.weights = None
def shrink(self, mat_s, tau):
"""return (group)- soft shrink of matrix mat_s with tau """
if self.meta['nonbinary']:
return grp_soft_shrink(mat_s, tau,
self.meta['n_cat'] + self.meta['n_cg'],
self.meta['glims'],
self.opts['off'],
weights=self.weights)
return grp_soft_shrink(mat_s,
tau,
off=self.opts['off'],
weights=self.weights)
def sparse_norm(self, mat_s):
"""return l21/ l1-norm of mat_s"""
if self.meta['nonbinary']:
return l21norm(mat_s,
self.meta['n_cat'] + self.meta['n_cg'],
self.meta['glims'],
self.opts['off'],
weights=self.weights)
return l21norm(mat_s, off=self.opts['off'], weights=self.weights)
class BaseGradSolver(abc.ABC):
"""
Base solver for iterative (scipy L-BFGS-B) solvers
provides with methods to pack/unpack parameters into vector
"""
def __init__(self):
# print('Init BaseCGSolver')
super().__init__()
self.shapes = None
self.n_params = None
# self.problem_vars = None
if not hasattr(self, 'opts'):
# should already be defined by other class
self.opts = {}
self._set_defaults()
def _set_defaults(self):
"""default solver options"""
self.opts.setdefault('verb', 1) # write output
## objective variants
self.opts.setdefault('use_alpha', 1) # use univariate cts parameters?
self.opts.setdefault('use_u', 1) # use univariate discrete parameters?
self.opts.setdefault('off', 0) # if 1 regularize only off-diagonal
## stopping criteria and tolerancies
# self.opts.setdefault('abstol', 1e-5)
# self.opts.setdefault('reltol', 1e-5)
self.opts.setdefault('tol', 1e-12)
self.opts.setdefault('maxiter', 500)
# self.opts.setdefault('useweights', False)
# self.opts.setdefault('maxrank', -1)
# @abc.abstractmethod
# def get_bounds(self):
# """get bounds"""
# raise NotImplementedError # deferred to BaseHuber class
@abc.abstractmethod
def get_fval_and_grad(self, optvars, verb=0, **kwargs):
"""calculate function value and gradient for solver"""
raise NotImplementedError
def get_params(self, optvars):
"""a function to display the problem parameters"""
params = self.unpack(optvars)
for i, param in enumerate(params):
print('%s:\n' % self.shapes[i][0], param)
return params
def pack(self, components):
"""pack (typically) gradients into vector x"""
grad = np.empty(self.n_params)
offset = 0
for i, component in enumerate(components):
size = np.prod(self.shapes[i][1])
# print(self.shapes[i][0], size, np.prod(component.shape))
assert size == np.prod(component.shape)
grad[offset:offset + size] = component.flatten() # row-wise
offset += size
return grad
def unpack(self, x) -> Iterable[np.ndarray]:
"""unpack model parameters from vector x, save: returns copy"""
offset = 0
params = []
xcopy = x.copy() # allows modifying the copy without modifying x
for _, shapedim in self.shapes:
tmp = np.prod(shapedim)
params.append(xcopy[offset:offset + tmp].reshape(shapedim))
offset += tmp
return params
class BaseSolverSL(BaseSparseSolver):
"""
base class for S+L model solvers
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.alpha, self.beta = None, None
self.lbda, self.rho = None, None
self.problem_vars = None
def __str__(self):
string = '<ADMMsolver> la=%s' % (self.lbda) + ', rho=%s' % (self.rho)
string += ', alpha=%s' % (self.alpha) + ', beta=%s' % (self.beta)
string += ', use_alpha=%d' % (self.opts.setdefault('use_alpha', 1))
string += ', use_u=%d' % (self.opts.setdefault('use_u', 1))
string += ', off=%d' % (self.opts.setdefault('off', 1))
return string
def get_canonicalparams(self):
"""Retrieves the PW S+L CG model parameters from flat parameter vector.
output: Model_PWSL instance"""
mat_s, mat_l, alpha = self.problem_vars
ltot = self.meta['ltot']
mat_lambda = -mat_s[ltot:, ltot:] # cts-cts parameters
# have negative sign in CG pairwise interaction parameter matrix
if self.meta['n_cat'] > 0:
glims = self.meta['cat_glims']
sizes = self.meta['sizes']
mat_q = mat_s[:ltot, :ltot]
mat_r = mat_s[ltot:, :ltot]
vec_u = 0.5 * np.diag(mat_q).copy().reshape(ltot)
for r in range(self.meta['n_cat']): # set block-diagonal to zero
mat_q[glims[r]:glims[r+1],
glims[r]:glims[r+1]] = \
np.zeros((sizes[r], sizes[r]))
if self.meta['red_levels']:
fullsizes = [size + 1 for size in sizes]
else:
fullsizes = sizes
else:
mat_q = np.empty(0)
mat_r = np.empty(0)
vec_u = np.empty(0)
fullsizes = []
can_pwsl = vec_u, mat_q, mat_r, alpha, mat_lambda, mat_l
annotations = {
'n': self.meta['n_data'],
'lambda': self.lbda,
'rho': self.rho
}
meta = {
'n_cat': self.meta['n_cat'],
'n_cg': self.meta['n_cg'],
'sizes': fullsizes
}
return ModelPWSL(can_pwsl,
meta,
annotations=annotations,
in_padded=False)
def get_regularization_params(self):
"""get regularization parameters"""
return self.lbda, self.rho
def set_regularization_params(self,
hyperparams,
scales=None,
set_direct=False,
ptype: str = 'std') -> None:
"""set regularization parameters
hyperparams ... pair of regularization parameters
ptype ... if 'std',
set lambda, rho = hyperparams * scaling(n, nvars), where
the parameters are for the problem
min l(S-L) + lambda * ||S||_1 + rho * tr(L)
s.t. S-L>0, L>=0
Here, scaling(n, nvars) is a scaling suggested by
consistency results
Argument <scales> is not used in this case!
if 'direct', directly set lambda, rho = hyperparams
if 'convex' assume that alpha, beta = hyperparams and
alpha, beta are weights in [0,1] and the problem is
min (1-alpha-beta) * l(S-L) + alpha * ||S||_1 + beta * tr(L)
s.t. S-L>0, L>=0
In addition to the specified regularization parameters,
the regularization parameters can be scaled by a fixed value (depending
on the number of data points and variables):
scales ... if None, use standard scaling np.sqrt(log(dg)/n)
else scales must be a two-tuple, and lambda and rho are
scaled according to the elements of this two-tuple
"""
assert len(hyperparams) == 2
assert hyperparams[0] >= 0 and hyperparams[1] >= 0
if ptype == 'std':
# standard regularization parameters
# first for l21, second for nuclear norm
self.lbda, self.rho = hyperparams
elif ptype == 'convex':
alpha, beta = hyperparams
# assert alpha + beta <= 1
assert alpha + beta < 1, "must contain likelihood part"
self.alpha = alpha
self.beta = beta
denom = 1 - alpha - beta
if denom != 0:
self.lbda = alpha / denom
self.rho = beta / denom
# else:
# # no likelihood part
# self.lbda, self.rho = 0, 0
else:
raise Exception('unknown ptype')
if not set_direct:
if not scales is None:
scale_lbda, scale_rho = scales
else:
assert self.meta['n_data'] > 0, \
"data-dependent scaling, drop data first"
# calculate prescaling factor for the regularization parameters
# based on consistency analysis by Chandrasekaran et. al (2010)
# assert 'reg_fac' in self.meta
scale_lbda = self.meta['reg_fac']
scale_rho = self.meta['reg_fac']
self.lbda *= scale_lbda
self.rho *= scale_rho
class BaseSolverPW(BaseSparseSolver):
"""
base class for sparse graphical model solvers
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.alpha = None
self.lbda = None
self.problem_vars = None
def __str__(self):
string = '<ADMMsolver> la=%s' % (self.lbda)
string += ', use_alpha=%d' % (self.opts.setdefault('use_alpha', 1))
string += ', use_u=%d' % (self.opts.setdefault('use_u', 1))
string += ', off=%d' % (self.opts.setdefault('off', 1))
return string
def get_canonicalparams(self):
"""Retrieves the PW CG model parameters from flat parameter vector.
output: Model_PW instance"""
mat_s, alpha = self.problem_vars
ltot = self.meta['ltot']
mat_lambda = -mat_s[ltot:, ltot:] # cts-cts parameters
# have negative sign in CG pairwise interaction parameter matrix
if self.meta['n_cat'] > 0:
glims = self.meta['cat_glims']
sizes = self.meta['sizes']
mat_q = mat_s[:ltot, :ltot]
mat_r = mat_s[ltot:, :ltot]
vec_u = 0.5 * np.diag(mat_q).copy().reshape(ltot)
for r in range(self.meta['n_cat']): # set block-diagonal to zero
mat_q[glims[r]:glims[r+1],
glims[r]:glims[r+1]] = \
np.zeros((sizes[r], sizes[r]))
if self.meta['red_levels']:
fullsizes = [size + 1 for size in sizes]
else:
fullsizes = sizes
else:
mat_q = np.empty(0)
mat_r = np.empty(0)
vec_u = np.empty(0)
fullsizes = []
can_params = vec_u, mat_q, mat_r, alpha, mat_lambda
annotations = {
'n': self.meta['n_data'],
'lambda': self.lbda,
}
meta = {
'n_cat': self.meta['n_cat'],
'n_cg': self.meta['n_cg'],
'sizes': fullsizes
}
return ModelPW(can_params,
meta,
annotations=annotations,
in_padded=False)
def get_regularization_params(self):
"""get regularization parameters"""
return self.lbda
def set_regularization_params(self, regparam, set_direct=False,
scale=None, ptype='std'):
"""set regularization parameters for
min l(S) + la*||S||_{2/1}
hyperparams ... pair of regularization parameters
ptype ... if 'std',
set lambda = regparam * scaling(n, nvars), where
Here, scaling(n, nvars) is a scaling suggested by
consistency results
Argument <scales> is not used in this case!
if 'direct', directly set lambda = regparam
if 'convex' assume that alpha = regparam and
min_S (1-alpha) * l(S) + alpha * ||S||_ {2,1}
In addition to the specified regularization parameter,
the regularization parameters can be scaled by a fixed value (depending
on the number of data points and variables):
scales ... if None, use standard scaling np.sqrt(log(dg)/n)
else scales must be a nonnegative number with which lambda
is scaled
"""
if ptype == 'std': # standard regularization parameter for l21-norm
self.lbda = regparam
elif ptype == 'convex': # convex hyperparams are assumed
self.alpha = regparam
assert self.alpha < 1, "must contain likelihood part"
denom = 1 - self.alpha
if denom != 0:
self.lbda = self.alpha/denom
else:
self.lbda = 0
else:
raise Exception('unknown ptype')
if not set_direct:
if scale is None:
assert self.meta['n_data'] > 0, \
"data-dependent scaling, drop data first"
scale = self.meta['reg_fac']
self.lbda *= scale
| [
"cgmodsel.models.model_pw.ModelPW",
"numpy.prod",
"numpy.sqrt",
"numpy.log",
"numpy.diag",
"numpy.sum",
"numpy.zeros",
"cgmodsel.utils.grp_soft_shrink",
"numpy.empty",
"numpy.isnan",
"cgmodsel.models.model_pwsl.ModelPWSL",
"cgmodsel.utils.l21norm",
"numpy.cumsum"
] | [((955, 970), 'numpy.empty', 'np.empty', (['n_cat'], {}), '(n_cat)\n', (963, 970), True, 'import numpy as np\n'), ((1328, 1348), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (1336, 1348), True, 'import numpy as np\n'), ((1296, 1312), 'numpy.sqrt', 'np.sqrt', (['sigma_r'], {}), '(sigma_r)\n', (1303, 1312), True, 'import numpy as np\n'), ((8435, 8506), 'cgmodsel.utils.grp_soft_shrink', 'grp_soft_shrink', (['mat_s', 'tau'], {'off': "self.opts['off']", 'weights': 'self.weights'}), "(mat_s, tau, off=self.opts['off'], weights=self.weights)\n", (8450, 8506), False, 'from cgmodsel.utils import grp_soft_shrink, l21norm\n'), ((8970, 9028), 'cgmodsel.utils.l21norm', 'l21norm', (['mat_s'], {'off': "self.opts['off']", 'weights': 'self.weights'}), "(mat_s, off=self.opts['off'], weights=self.weights)\n", (8977, 9028), False, 'from cgmodsel.utils import grp_soft_shrink, l21norm\n'), ((10952, 10975), 'numpy.empty', 'np.empty', (['self.n_params'], {}), '(self.n_params)\n', (10960, 10975), True, 'import numpy as np\n'), ((13982, 14049), 'cgmodsel.models.model_pwsl.ModelPWSL', 'ModelPWSL', (['can_pwsl', 'meta'], {'annotations': 'annotations', 'in_padded': '(False)'}), '(can_pwsl, meta, annotations=annotations, in_padded=False)\n', (13991, 14049), False, 'from cgmodsel.models.model_pwsl import ModelPWSL\n'), ((19230, 19297), 'cgmodsel.models.model_pw.ModelPW', 'ModelPW', (['can_params', 'meta'], {'annotations': 'annotations', 'in_padded': '(False)'}), '(can_params, meta, annotations=annotations, in_padded=False)\n', (19237, 19297), False, 'from cgmodsel.models.model_pw import ModelPW\n'), ((4919, 4940), 'numpy.sum', 'np.sum', (["meta['sizes']"], {}), "(meta['sizes'])\n", (4925, 4940), True, 'import numpy as np\n'), ((6231, 6266), 'numpy.cumsum', 'np.cumsum', (["([0] + self.meta['sizes'])"], {}), "([0] + self.meta['sizes'])\n", (6240, 6266), True, 'import numpy as np\n'), ((8152, 8284), 'cgmodsel.utils.grp_soft_shrink', 'grp_soft_shrink', (['mat_s', 'tau', "(self.meta['n_cat'] + self.meta['n_cg'])", "self.meta['glims']", "self.opts['off']"], {'weights': 'self.weights'}), "(mat_s, tau, self.meta['n_cat'] + self.meta['n_cg'], self.\n meta['glims'], self.opts['off'], weights=self.weights)\n", (8167, 8284), False, 'from cgmodsel.utils import grp_soft_shrink, l21norm\n'), ((8732, 8850), 'cgmodsel.utils.l21norm', 'l21norm', (['mat_s', "(self.meta['n_cat'] + self.meta['n_cg'])", "self.meta['glims']", "self.opts['off']"], {'weights': 'self.weights'}), "(mat_s, self.meta['n_cat'] + self.meta['n_cg'], self.meta['glims'],\n self.opts['off'], weights=self.weights)\n", (8739, 8850), False, 'from cgmodsel.utils import grp_soft_shrink, l21norm\n'), ((11065, 11091), 'numpy.prod', 'np.prod', (['self.shapes[i][1]'], {}), '(self.shapes[i][1])\n', (11072, 11091), True, 'import numpy as np\n'), ((11639, 11656), 'numpy.prod', 'np.prod', (['shapedim'], {}), '(shapedim)\n', (11646, 11656), True, 'import numpy as np\n'), ((13524, 13535), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (13532, 13535), True, 'import numpy as np\n'), ((13556, 13567), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (13564, 13567), True, 'import numpy as np\n'), ((13588, 13599), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (13596, 13599), True, 'import numpy as np\n'), ((18805, 18816), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (18813, 18816), True, 'import numpy as np\n'), ((18837, 18848), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (18845, 18848), True, 'import numpy as np\n'), ((18869, 18880), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (18877, 18880), True, 'import numpy as np\n'), ((3515, 3543), 'numpy.empty', 'np.empty', (['(data.shape[0], 0)'], {}), '((data.shape[0], 0))\n', (3523, 3543), True, 'import numpy as np\n'), ((3685, 3713), 'numpy.empty', 'np.empty', (['(data.shape[0], 0)'], {}), '((data.shape[0], 0))\n', (3693, 3713), True, 'import numpy as np\n'), ((6872, 6900), 'numpy.log', 'np.log', (["self.meta['n_catcg']"], {}), "(self.meta['n_catcg'])\n", (6878, 6900), True, 'import numpy as np\n'), ((11201, 11225), 'numpy.prod', 'np.prod', (['component.shape'], {}), '(component.shape)\n', (11208, 11225), True, 'import numpy as np\n'), ((13309, 13339), 'numpy.zeros', 'np.zeros', (['(sizes[r], sizes[r])'], {}), '((sizes[r], sizes[r]))\n', (13317, 13339), True, 'import numpy as np\n'), ((18590, 18620), 'numpy.zeros', 'np.zeros', (['(sizes[r], sizes[r])'], {}), '((sizes[r], sizes[r]))\n', (18598, 18620), True, 'import numpy as np\n'), ((4330, 4349), 'numpy.isnan', 'np.isnan', (['cont_data'], {}), '(cont_data)\n', (4338, 4349), True, 'import numpy as np\n'), ((13083, 13097), 'numpy.diag', 'np.diag', (['mat_q'], {}), '(mat_q)\n', (13090, 13097), True, 'import numpy as np\n'), ((18364, 18378), 'numpy.diag', 'np.diag', (['mat_q'], {}), '(mat_q)\n', (18371, 18378), True, 'import numpy as np\n')] |
import numpy as np
from scipy.special import j0 as BesselJ0, j1 as BesselJ1, jn as BesselJ
from scipy.optimize import root
def shoot_S1(central_value: float,
w: float,
R: np.ndarray,
coeffs: np.ndarray,
S_harmonics: np.ndarray = None) -> np.ndarray:
"""
Shoots S1 from the center, starting from a central_value and
zero-drivative.
Parameters
----------
central_value : the value of S1 at r=0
w : the frequency of the oscillon
R : the grid of radii
coeffs : the Fourier coefficients of the potential, normalized
S_harmonics : the in-phase perturbative radiative harmonics
Returns
-------
S1 : the values of S1 over the grid when shooting from the center
"""
S1 = np.empty_like(R)
S1[0], S1[1] = central_value, central_value
dr = R[1] - R[0]
if S_harmonics is None:
def f_(i):
return (
S1[i - 1] * w**2 - 2 *
(coeffs * BesselJ1(S1[i - 1] * np.arange(1,
len(coeffs) + 1)) /
np.arange(1,
len(coeffs) + 1)).sum())
else:
N_harmonics = S_harmonics.shape[0]
def f_(i):
return (
S1[i - 1] * w**2 - 2 *
(coeffs * BesselJ1(S1[i - 1] * np.arange(1,
len(coeffs) + 1)) /
np.arange(1,
len(coeffs) + 1)).sum() -
((coeffs[:, np.newaxis] * BesselJ(
2 * np.arange(0, N_harmonics, 1) + 2, S1[i - 1] *
np.arange(1,
len(coeffs) + 1, 1)[:, np.newaxis])).sum(axis=0)
* S_harmonics[:, i - 1]).sum() +
((coeffs[:, np.newaxis] * BesselJ(
2 * np.arange(0, N_harmonics, 1) + 4, S1[i - 1] *
np.arange(1,
len(coeffs) + 1, 1)[:, np.newaxis])).sum(axis=0)
* S_harmonics[:, i - 1]).sum())
for i in range(2, len(S1)):
S1[i] = (
2 * S1[i-1] -
dr**2 * f_(i) +
S1[i-2] * (2 * dr/(2*R[i-1]) - 1)) \
/ (2 * dr/(2*R[i-1]) + 1)
return S1
def initial_S1(w: float,
R: np.ndarray,
coeffs: np.ndarray,
S_harmonics: np.ndarray = None) -> np.ndarray:
"""
Defines the binary search procedure to find the initial condition which
shoots to zero at infinity.
"""
# find the value at the same potential energy as the zero-field. We know
# the true value will be slightly higher due to friction.
c = root(
lambda x: 0.5 * x**2 * w**2 + 2 * (coeffs * (BesselJ0(x * np.arange(
1,
len(coeffs) + 1)) - 1) / np.arange(1,
len(coeffs) + 1)**2).sum(),
10).x[0]
# define the left- and right-boundaries of the search and push
# these values apart until they have the appropriate signs:
left, right = c, c
left_condition = (shoot_S1(left, w, R, coeffs, S_harmonics)[-1] >= 0)
while not left_condition:
left = 0.95 * left
left_condition = (shoot_S1(left, w, R, coeffs, S_harmonics)[-1] >= 0)
right_condition = shoot_S1(right, w, R, coeffs, S_harmonics)[-1] < 0
while not right_condition:
right = 1.1 * right
right_condition = shoot_S1(right, w, R, coeffs, S_harmonics)[-1] < 0
# perform the binary search for 60 steps:
for _ in range(60):
m = (left + right) / 2
S1 = shoot_S1(m, w, R, coeffs, S_harmonics)
if S1[-1] >= 0:
left = m
else:
right = m
# zero-out the far-field:
S1[np.abs(S1).argmin():] = 0.0
return S1
| [
"numpy.abs",
"numpy.empty_like",
"numpy.arange"
] | [((779, 795), 'numpy.empty_like', 'np.empty_like', (['R'], {}), '(R)\n', (792, 795), True, 'import numpy as np\n'), ((3829, 3839), 'numpy.abs', 'np.abs', (['S1'], {}), '(S1)\n', (3835, 3839), True, 'import numpy as np\n'), ((1904, 1932), 'numpy.arange', 'np.arange', (['(0)', 'N_harmonics', '(1)'], {}), '(0, N_harmonics, 1)\n', (1913, 1932), True, 'import numpy as np\n'), ((1621, 1649), 'numpy.arange', 'np.arange', (['(0)', 'N_harmonics', '(1)'], {}), '(0, N_harmonics, 1)\n', (1630, 1649), True, 'import numpy as np\n')] |
from typing import List
from io import BytesIO
import numpy as np
from PIL import Image
from fastapi import FastAPI, Request, File, UploadFile
from fastapi.responses import HTMLResponse, StreamingResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
app = FastAPI()
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")
@app.get('/', response_class=HTMLResponse)
async def read_root(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
@app.post('/api/image-processing')
async def create_image_processing(files: List[UploadFile] = File(...)):
# open image
bytes_io = BytesIO(files[0].file.read())
image = Image.open(bytes_io).convert('RGB')
# image processing
data = np.array(image)
h, w, _ = data.shape
h = int(h // 2) * 2
w = int(w // 2) * 2
data = data[:h, :w, :] \
.reshape(h // 2, 2, w // 2, 2, -1) \
.transpose(1, 0, 3, 2, 4) \
.reshape(h, w, -1)
content = BytesIO()
Image.fromarray(data).save(content, format='png')
content.seek(0)
# response
return StreamingResponse(content, media_type='image/png')
| [
"PIL.Image.fromarray",
"fastapi.FastAPI",
"fastapi.responses.StreamingResponse",
"PIL.Image.open",
"io.BytesIO",
"fastapi.templating.Jinja2Templates",
"numpy.array",
"fastapi.staticfiles.StaticFiles",
"fastapi.File"
] | [((304, 313), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (311, 313), False, 'from fastapi import FastAPI, Request, File, UploadFile\n'), ((396, 434), 'fastapi.templating.Jinja2Templates', 'Jinja2Templates', ([], {'directory': '"""templates"""'}), "(directory='templates')\n", (411, 434), False, 'from fastapi.templating import Jinja2Templates\n'), ((336, 367), 'fastapi.staticfiles.StaticFiles', 'StaticFiles', ([], {'directory': '"""static"""'}), "(directory='static')\n", (347, 367), False, 'from fastapi.staticfiles import StaticFiles\n'), ((690, 699), 'fastapi.File', 'File', (['...'], {}), '(...)\n', (694, 699), False, 'from fastapi import FastAPI, Request, File, UploadFile\n'), ((847, 862), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (855, 862), True, 'import numpy as np\n'), ((1087, 1096), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1094, 1096), False, 'from io import BytesIO\n'), ((1198, 1248), 'fastapi.responses.StreamingResponse', 'StreamingResponse', (['content'], {'media_type': '"""image/png"""'}), "(content, media_type='image/png')\n", (1215, 1248), False, 'from fastapi.responses import HTMLResponse, StreamingResponse\n'), ((776, 796), 'PIL.Image.open', 'Image.open', (['bytes_io'], {}), '(bytes_io)\n', (786, 796), False, 'from PIL import Image\n'), ((1101, 1122), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (1116, 1122), False, 'from PIL import Image\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The module for Quantum the Fisher Information."""
from typing import List, Union
import numpy as np
from qiskit.circuit import QuantumCircuit, QuantumRegister, ParameterVector, ParameterExpression
from qiskit.utils.arithmetic import triu_to_dense
from ...operator_base import OperatorBase
from ...list_ops.list_op import ListOp
from ...list_ops.summed_op import SummedOp
from ...operator_globals import I, Z, Y
from ...state_fns.state_fn import StateFn
from ...state_fns.circuit_state_fn import CircuitStateFn
from ..circuit_gradients.lin_comb import LinComb
from .circuit_qfi import CircuitQFI
class LinCombFull(CircuitQFI):
r"""Compute the full Quantum Fisher Information (QFI).
Given a pure, parameterized quantum state this class uses the linear combination of unitaries
See also :class:`~qiskit.opflow.QFI`.
"""
# pylint: disable=signature-differs, arguments-differ
def __init__(
self,
aux_meas_op: OperatorBase = Z,
phase_fix: bool = True,
):
"""
Args:
aux_meas_op: The operator that the auxiliary qubit is measured with respect to.
For ``aux_meas_op = Z`` we compute 4Re[(dω⟨ψ(ω)|)O(θ)|ψ(ω)〉],
for ``aux_meas_op = -Y`` we compute 4Im[(dω⟨ψ(ω)|)O(θ)|ψ(ω)〉], and
for ``aux_meas_op = Z - 1j * Y`` we compute 4(dω⟨ψ(ω)|)O(θ)|ψ(ω)〉.
phase_fix: Whether or not to compute and add the additional phase fix term
Re[(dω⟨<ψ(ω)|)|ψ(ω)><ψ(ω)|(dω|ψ(ω))>].
Raises:
ValueError: If the provided auxiliary measurement operator is not supported.
"""
super().__init__()
if aux_meas_op not in [Z, -Y, (Z - 1j * Y)]:
raise ValueError(
"This auxiliary measurement operator is currently not supported. Please choose "
"either Z, -Y, or Z - 1j * Y. "
)
self._aux_meas_op = aux_meas_op
self._phase_fix = phase_fix
def convert(
self,
operator: CircuitStateFn,
params: Union[ParameterExpression, ParameterVector, List[ParameterExpression]],
) -> ListOp:
r"""
Args:
operator: The operator corresponding to the quantum state :math:`|\psi(\omega)\rangle`
for which we compute the QFI.
params: The parameters :math:`\omega` with respect to which we are computing the QFI.
Returns:
A ``ListOp[ListOp]`` where the operator at position ``[k][l]`` corresponds to the matrix
element :math:`k, l` of the QFI.
Raises:
TypeError: If ``operator`` is an unsupported type.
"""
# QFI & phase fix observable
qfi_observable = StateFn(
4 * self._aux_meas_op ^ (I ^ operator.num_qubits), is_measurement=True
)
# Check if the given operator corresponds to a quantum state given as a circuit.
if not isinstance(operator, CircuitStateFn):
raise TypeError(
"LinCombFull is only compatible with states that are given as "
f"CircuitStateFn, not {type(operator)}"
)
# If a single parameter is given wrap it into a list.
if isinstance(params, ParameterExpression):
params = [params]
elif isinstance(params, ParameterVector):
params = params[:] # unroll to list
if self._phase_fix:
# First, the operators are computed which can compensate for a potential phase-mismatch
# between target and trained state, i.e.〈ψ|∂lψ〉
phase_fix_observable = I ^ operator.num_qubits
gradient_states = LinComb(aux_meas_op=(Z - 1j * Y))._gradient_states(
operator,
meas_op=phase_fix_observable,
target_params=params,
open_ctrl=False,
trim_after_grad_gate=True,
)
# pylint: disable=unidiomatic-typecheck
if type(gradient_states) == ListOp:
phase_fix_states = gradient_states.oplist
else:
phase_fix_states = [gradient_states]
# Get 4 * Re[〈∂kψ|∂lψ]
qfi_operators = []
# Add a working qubit
qr_work = QuantumRegister(1, "work_qubit")
state_qc = QuantumCircuit(*operator.primitive.qregs, qr_work)
state_qc.h(qr_work)
# unroll separately from the H gate since we need the H gate to be the first
# operation in the data attributes of the circuit
unrolled = LinComb._transpile_to_supported_operations(
operator.primitive, LinComb.SUPPORTED_GATES
)
state_qc.compose(unrolled, inplace=True)
# Get the circuits needed to compute〈∂iψ|∂jψ〉
for i, param_i in enumerate(params): # loop over parameters
qfi_ops = []
for j, param_j in enumerate(params[i:], i):
# Get the gates of the quantum state which are parameterized by param_i
qfi_op = []
param_gates_i = state_qc._parameter_table[param_i]
for gate_i, idx_i in param_gates_i:
grad_coeffs_i, grad_gates_i = LinComb._gate_gradient_dict(gate_i)[idx_i]
# get the location of gate_i, used for trimming
location_i = None
for idx, (op, _, _) in enumerate(state_qc._data):
if op is gate_i:
location_i = idx
break
for grad_coeff_i, grad_gate_i in zip(grad_coeffs_i, grad_gates_i):
# Get the gates of the quantum state which are parameterized by param_j
param_gates_j = state_qc._parameter_table[param_j]
for gate_j, idx_j in param_gates_j:
grad_coeffs_j, grad_gates_j = LinComb._gate_gradient_dict(gate_j)[idx_j]
# get the location of gate_j, used for trimming
location_j = None
for idx, (op, _, _) in enumerate(state_qc._data):
if op is gate_j:
location_j = idx
break
for grad_coeff_j, grad_gate_j in zip(grad_coeffs_j, grad_gates_j):
grad_coeff_ij = np.conj(grad_coeff_i) * grad_coeff_j
qfi_circuit = LinComb.apply_grad_gate(
state_qc,
gate_i,
idx_i,
grad_gate_i,
grad_coeff_ij,
qr_work,
open_ctrl=True,
trim_after_grad_gate=(location_j < location_i),
)
# create a copy of the original circuit with the same registers
qfi_circuit = LinComb.apply_grad_gate(
qfi_circuit,
gate_j,
idx_j,
grad_gate_j,
1,
qr_work,
open_ctrl=False,
trim_after_grad_gate=(location_j >= location_i),
)
qfi_circuit.h(qr_work)
# Convert the quantum circuit into a CircuitStateFn and add the
# coefficients i, j and the original operator coefficient
coeff = operator.coeff
coeff *= np.sqrt(np.abs(grad_coeff_i) * np.abs(grad_coeff_j))
state = CircuitStateFn(qfi_circuit, coeff=coeff)
param_grad = 1
for gate, idx, param in zip(
[gate_i, gate_j], [idx_i, idx_j], [param_i, param_j]
):
param_expression = gate.params[idx]
param_grad *= param_expression.gradient(param)
meas = param_grad * qfi_observable
term = meas @ state
qfi_op.append(term)
# Compute −4 * Re(〈∂kψ|ψ〉〈ψ|∂lψ〉)
def phase_fix_combo_fn(x):
return -4 * np.real(x[0] * np.conjugate(x[1]))
if self._phase_fix:
phase_fix_op = ListOp(
[phase_fix_states[i], phase_fix_states[j]], combo_fn=phase_fix_combo_fn
)
# Add the phase fix quantities to the entries of the QFI
# Get 4 * Re[〈∂kψ|∂lψ〉−〈∂kψ|ψ〉〈ψ|∂lψ〉]
qfi_ops += [SummedOp(qfi_op) + phase_fix_op]
else:
qfi_ops += [SummedOp(qfi_op)]
qfi_operators.append(ListOp(qfi_ops))
# Return estimate of the full QFI -- A QFI is by definition positive semi-definite.
return ListOp(qfi_operators, combo_fn=triu_to_dense)
| [
"numpy.abs",
"qiskit.circuit.QuantumCircuit",
"numpy.conj",
"numpy.conjugate",
"qiskit.circuit.QuantumRegister"
] | [((4755, 4787), 'qiskit.circuit.QuantumRegister', 'QuantumRegister', (['(1)', '"""work_qubit"""'], {}), "(1, 'work_qubit')\n", (4770, 4787), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, ParameterVector, ParameterExpression\n'), ((4807, 4857), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['*operator.primitive.qregs', 'qr_work'], {}), '(*operator.primitive.qregs, qr_work)\n', (4821, 4857), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister, ParameterVector, ParameterExpression\n'), ((9287, 9305), 'numpy.conjugate', 'np.conjugate', (['x[1]'], {}), '(x[1])\n', (9299, 9305), True, 'import numpy as np\n'), ((6948, 6969), 'numpy.conj', 'np.conj', (['grad_coeff_i'], {}), '(grad_coeff_i)\n', (6955, 6969), True, 'import numpy as np\n'), ((8459, 8479), 'numpy.abs', 'np.abs', (['grad_coeff_i'], {}), '(grad_coeff_i)\n', (8465, 8479), True, 'import numpy as np\n'), ((8482, 8502), 'numpy.abs', 'np.abs', (['grad_coeff_j'], {}), '(grad_coeff_j)\n', (8488, 8502), True, 'import numpy as np\n')] |
import numpy as np
import cv2
def transform_matrix():
# Define 4 source points
#test1_src = np.float32([[499, 530], [844, 530], [1008, 630], [362, 630]])
straight2_src = np.float32([[557, 475], [729, 475], [961, 630], [345, 630]])
src = straight2_src
# Define 4 destination points
#test1_dst = np.float32([[500, 550], [800, 550], [800, 630], [500, 630]])
straight2_dst = np.float32([[500, 300], [800, 300], [800, 680], [500, 680]])
dst = straight2_dst
# Get the transform matrix
M = cv2.getPerspectiveTransform(src, dst)
Min = cv2.getPerspectiveTransform(dst, src)
return M, Min
def transform(tran_matrix, img):
### Get the warped image,
### if tran_matrix = M, img = undist image => return birds_eye image
### if tran_matrix = Min, img = birds_eye image => return undist image
return cv2.warpPerspective(img, tran_matrix, img.shape[1::-1], flags=cv2.INTER_LINEAR) | [
"cv2.warpPerspective",
"numpy.float32",
"cv2.getPerspectiveTransform"
] | [((184, 244), 'numpy.float32', 'np.float32', (['[[557, 475], [729, 475], [961, 630], [345, 630]]'], {}), '([[557, 475], [729, 475], [961, 630], [345, 630]])\n', (194, 244), True, 'import numpy as np\n'), ((404, 464), 'numpy.float32', 'np.float32', (['[[500, 300], [800, 300], [800, 680], [500, 680]]'], {}), '([[500, 300], [800, 300], [800, 680], [500, 680]])\n', (414, 464), True, 'import numpy as np\n'), ((529, 566), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (556, 566), False, 'import cv2\n'), ((577, 614), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['dst', 'src'], {}), '(dst, src)\n', (604, 614), False, 'import cv2\n'), ((865, 944), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'tran_matrix', 'img.shape[1::-1]'], {'flags': 'cv2.INTER_LINEAR'}), '(img, tran_matrix, img.shape[1::-1], flags=cv2.INTER_LINEAR)\n', (884, 944), False, 'import cv2\n')] |
"""
A collection of PyTorch utility functions and module subclasses
"""
import torch
import torch.nn as nn
import numpy as np
from torch.autograd import grad
from torch.optim.lr_scheduler import _LRScheduler
from torch.distributions import constraints
from torch.distributions.transforms import Transform
_NP_TO_PT = {
np.float64: torch.float64,
np.float32: torch.float32,
np.float16: torch.float16,
np.int64: torch.int64,
np.int32: torch.int32,
np.int16: torch.int16,
np.int8: torch.int8,
np.uint8: torch.uint8,
}
def flat_grad(*args, **kwargs):
return torch.cat([g.reshape((-1,)) for g in grad(*args, **kwargs)])
def grad_norm(parameters, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
if norm_type == "inf":
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
total_norm = total_norm ** (1.0 / norm_type)
return total_norm
def explained_variance_1d(ypred, y):
assert y.dim() == 1 and ypred.dim() == 1
vary = y.var().item()
if np.isclose(vary, 0):
if ypred.var().item() > 1e-8:
return 0
else:
return 1
return 1 - (y - ypred).var().item() / (vary + 1e-8)
# ==============================
# Schedulers
# ==============================
class LinearLR(_LRScheduler):
def __init__(self, optimizer, total_num_epochs, last_epoch=-1):
super().__init__(optimizer, last_epoch=last_epoch)
self.total_num_epochs = float(total_num_epochs)
def get_lr(self):
return [
base_lr - (base_lr * (self.last_epoch / self.total_num_epochs))
for base_lr in self.base_lrs
]
# ==============================
# Modules
# ==============================
class ToFloat(nn.Module):
def __init__(self, dtype):
super().__init__()
self.scale = 1 / 250.0 if dtype is np.uint8 else 1
def forward(self, x):
return x.float() * self.scale
class Concat(ToFloat):
def forward(self, *args):
return super().forward(torch.cat(args, dim=-1))
class OneHot(nn.Module):
def __init__(self, n_cat):
super().__init__()
self.n_cat = n_cat
def forward(self, x):
return torch.eye(self.n_cat)[x]
class Flatten(nn.Module):
def __init__(self, flat_size):
super().__init__()
self.flat_size = flat_size
def forward(self, x):
return x.reshape(-1, self.flat_size)
class ExpandVector(nn.Module):
def __init__(self, vector):
super().__init__()
self.vector = nn.Parameter(vector)
def forward(self, x):
return self.vector.expand(len(x), -1)
def update_polyak(from_module, to_module, polyak):
for source, target in zip(from_module.parameters(), to_module.parameters()):
target.data.mul_(polyak).add_(1 - polyak, source.data)
# ==============================
# Transforms
# ==============================
class TanhTransform(Transform):
domain = constraints.real
codomain = constraints.interval(-1, +1)
bijective = True
sign = +1
def __eq__(self, other):
return isinstance(other, TanhTransform)
def _call(self, x):
return torch.tanh(x)
def _inverse(self, y):
to_log1 = 1 + y
to_log2 = 1 - y
to_log1[to_log1 == 0] += torch.finfo(y.dtype).eps
to_log2[to_log2 == 0] += torch.finfo(y.dtype).eps
return (torch.log(to_log1) - torch.log(to_log2)) / 2
def log_abs_det_jacobian(self, x, y):
to_log = 1 - y.pow(2)
to_log[to_log == 0] += torch.finfo(y.dtype).eps
return torch.log(to_log)
| [
"torch.tanh",
"numpy.isclose",
"torch.log",
"torch.eye",
"torch.nn.Parameter",
"torch.autograd.grad",
"torch.finfo",
"torch.distributions.constraints.interval",
"torch.cat"
] | [((1342, 1361), 'numpy.isclose', 'np.isclose', (['vary', '(0)'], {}), '(vary, 0)\n', (1352, 1361), True, 'import numpy as np\n'), ((3316, 3344), 'torch.distributions.constraints.interval', 'constraints.interval', (['(-1)', '(+1)'], {}), '(-1, +1)\n', (3336, 3344), False, 'from torch.distributions import constraints\n'), ((2865, 2885), 'torch.nn.Parameter', 'nn.Parameter', (['vector'], {}), '(vector)\n', (2877, 2885), True, 'import torch.nn as nn\n'), ((3498, 3511), 'torch.tanh', 'torch.tanh', (['x'], {}), '(x)\n', (3508, 3511), False, 'import torch\n'), ((3909, 3926), 'torch.log', 'torch.log', (['to_log'], {}), '(to_log)\n', (3918, 3926), False, 'import torch\n'), ((2350, 2373), 'torch.cat', 'torch.cat', (['args'], {'dim': '(-1)'}), '(args, dim=-1)\n', (2359, 2373), False, 'import torch\n'), ((2529, 2550), 'torch.eye', 'torch.eye', (['self.n_cat'], {}), '(self.n_cat)\n', (2538, 2550), False, 'import torch\n'), ((3621, 3641), 'torch.finfo', 'torch.finfo', (['y.dtype'], {}), '(y.dtype)\n', (3632, 3641), False, 'import torch\n'), ((3679, 3699), 'torch.finfo', 'torch.finfo', (['y.dtype'], {}), '(y.dtype)\n', (3690, 3699), False, 'import torch\n'), ((3869, 3889), 'torch.finfo', 'torch.finfo', (['y.dtype'], {}), '(y.dtype)\n', (3880, 3889), False, 'import torch\n'), ((631, 652), 'torch.autograd.grad', 'grad', (['*args'], {}), '(*args, **kwargs)\n', (635, 652), False, 'from torch.autograd import grad\n'), ((3720, 3738), 'torch.log', 'torch.log', (['to_log1'], {}), '(to_log1)\n', (3729, 3738), False, 'import torch\n'), ((3741, 3759), 'torch.log', 'torch.log', (['to_log2'], {}), '(to_log2)\n', (3750, 3759), False, 'import torch\n')] |
import torch
import torch.nn as nn
import torchvision
import torch.backends.cudnn as cudnn
import torch.optim
import os
import sys
import argparse
import time
import DCE.dce_model
import numpy as np
from torchvision import transforms
from PIL import Image
import glob
import time
from tqdm import tqdm
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def lowlight(DCE_net, image_path):
scale_factor = 12
data_lowlight = Image.open(image_path)
data_lowlight = (np.asarray(data_lowlight)/255.0)
data_lowlight = torch.from_numpy(data_lowlight).float()
h=(data_lowlight.shape[0]//scale_factor)*scale_factor
w=(data_lowlight.shape[1]//scale_factor)*scale_factor
data_lowlight = data_lowlight[0:h,0:w,:]
data_lowlight = data_lowlight.permute(2,0,1)
data_lowlight = data_lowlight.cuda().unsqueeze(0)
enhanced_image,params_maps = DCE_net(data_lowlight)
image_path = image_path.replace('train_clip','train_clip_enhanced')
result_path = image_path
if not os.path.exists(image_path.replace('/'+image.split("/")[-1],'')):
os.makedirs(image_path.replace('/'+image_path.split("/")[-1],''))
# import pdb;pdb.set_trace()
torchvision.utils.save_image(enhanced_image, result_path)
if __name__ == '__main__':
with torch.no_grad():
filePath = '/Your/PATH/NAT2021-train/train_clip/' # the path of original imgs
file_list = os.listdir(filePath)
file_list.sort()
scale_factor = 12
DCE_net = DCE.dce_model.enhance_net_nopool(scale_factor).cuda()
DCE_net.eval()
DCE_net.load_state_dict(torch.load('DCE/Epoch99.pth'))
for file_name in tqdm(file_list):
test_list = glob.glob(filePath+file_name+"/*")
for image in test_list:
if not os.path.exists(image.replace('train_clip','train_clip_enhanced')):
lowlight(DCE_net, image) | [
"PIL.Image.open",
"os.listdir",
"torch.load",
"tqdm.tqdm",
"numpy.asarray",
"torch.from_numpy",
"torch.no_grad",
"torchvision.utils.save_image",
"glob.glob"
] | [((418, 440), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (428, 440), False, 'from PIL import Image\n'), ((1129, 1186), 'torchvision.utils.save_image', 'torchvision.utils.save_image', (['enhanced_image', 'result_path'], {}), '(enhanced_image, result_path)\n', (1157, 1186), False, 'import torchvision\n'), ((463, 488), 'numpy.asarray', 'np.asarray', (['data_lowlight'], {}), '(data_lowlight)\n', (473, 488), True, 'import numpy as np\n'), ((1222, 1237), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1235, 1237), False, 'import torch\n'), ((1334, 1354), 'os.listdir', 'os.listdir', (['filePath'], {}), '(filePath)\n', (1344, 1354), False, 'import os\n'), ((1553, 1568), 'tqdm.tqdm', 'tqdm', (['file_list'], {}), '(file_list)\n', (1557, 1568), False, 'from tqdm import tqdm\n'), ((515, 546), 'torch.from_numpy', 'torch.from_numpy', (['data_lowlight'], {}), '(data_lowlight)\n', (531, 546), False, 'import torch\n'), ((1503, 1532), 'torch.load', 'torch.load', (['"""DCE/Epoch99.pth"""'], {}), "('DCE/Epoch99.pth')\n", (1513, 1532), False, 'import torch\n'), ((1585, 1623), 'glob.glob', 'glob.glob', (["(filePath + file_name + '/*')"], {}), "(filePath + file_name + '/*')\n", (1594, 1623), False, 'import glob\n')] |
import pandas as pd
import numpy as np
import random as rd
class Color_learning:
def __init__(self, feature, labls, entradas, oculta, saida, inst, lr, ephoca, ohl):
self.feature = feature
self.labels = labls
self.input = entradas
self.output = saida
self.hidden = oculta
self.intances = inst
self.learning_rater = lr
self.ephoca = ephoca
self.one_hot_labels = ohl
@staticmethod
def sigmoid(sm):
return 1/(1 + np.exp(-sm))
def sigmoid_der(self, sm):
return self.sigmoid(sm)*(1 - self.sigmoid(sm))
@staticmethod
def reluFunction(soma):
return soma * (soma > 0)
@staticmethod
def softmax(sm):
ex = np.exp(sm)
return ex / ex.sum(axis=1, keepdims=True)
@staticmethod
def saveFile(wh, wo, bh, bo):
np.savetxt('W_hidden', wh)
np.savetxt('W_output', wo)
np.savetxt('Bias_hidden', bh)
np.savetxt('Bias_output', bo)
pass
def pesos_and_bias(self):
self.w_hidden = np.array([
[3.745401188473624909e-01,2.523058828614708204e+00,7.319939418114050911e-01,2.389864374595511709e+00,-2.569265545780381022e+00],
[1.559945203362026467e-01,-6.273783669562087439e-01,8.661761457749351800e-01,5.685504174331969196e+00,2.199633592824242090e+00],
[2.058449429580244683e-02,3.580545654210289808e+00,8.324426408004217404e-01,-6.914150925823431271e-01,5.734765646094067471e+00]])
self.bias_hidden = np.array([[-5.713801657541415224e-01,1.599035767953929543e+00,-2.612549012693601291e+00,4.021618985447205752e+00,4.077521552978897290e+00]])
self.w_output = np.array([[1.394938606520418345e-01,2.921446485352181544e-01,3.663618432936917024e-01,4.560699842170359286e-01,7.851759613930135995e-01],
[-1.357900622441514438e+00,1.062887835078976995e+00,1.666834295713892411e+00,2.228486176073957506e-02,5.662116839433314341e-01],
[1.705241236872915289e-01,6.505159298527951606e-02,9.488855372533332444e-01,9.656320330745593594e-01,8.083973481164611341e-01],
[1.410244732192115658e+00,1.849088662447048170e+00,-1.080324002991959631e+00,5.723329450902929494e-02,-5.875330478799644096e-01],
[1.349525241404493592e+00,-1.235038872119687881e+00,2.177943211965753800e+00,-3.732755317015576391e-02,1.050860711788916130e-01]])
self.bias_output = np.array([[-1.524395046956204203e+00,8.073278612459058312e-01,-2.031484697797881545e+00,-1.504860077798268136e+00,-1.525953782137729475e-01]])
def initial(self):
for i in range(self.ephoca):
i = i
sum_sinapse1 = np.dot(self.feature, self.w_hidden) + self.bias_hidden
layer_hidden = self.reluFunction(sum_sinapse1)
sum_sinapse2 = np.dot(layer_hidden, self.w_output) + self.bias_output
layer_output = self.softmax(sum_sinapse2)
# Backward propagation
error_layer_output = layer_output - self.one_hot_labels
layer_hidden_traspost = layer_hidden.T
delta_input_w_h = np.dot(layer_hidden_traspost, error_layer_output)
w_output_traspost = self.w_output.T
delta_output_xW_h = np.dot(error_layer_output, w_output_traspost)
derivada_layer_hidden = self.reluFunction(sum_sinapse1)
feature_transpost = self.feature.T
delta_hiddenXw_o = np.dot(feature_transpost, derivada_layer_hidden * delta_output_xW_h)
delta_layer_hidden = delta_output_xW_h * derivada_layer_hidden
self.w_hidden -= self.learning_rater * delta_hiddenXw_o
self.bias_hidden -= self.learning_rater * delta_layer_hidden.sum(axis=0)
self.w_output -= self.learning_rater * delta_input_w_h
self.bias_output -= self.learning_rater * error_layer_output.sum(axis=0)
loss = np.sum(-self.one_hot_labels * np.log(layer_output))
print('Loss function value:' + str(loss))
red = []
green = []
blue = []
for i in range(10):
red.append([(rd.randint(200, 255)/255), (rd.randint(100, 180)/255), (rd.randint(0, 99)/255)])
green.append([(rd.randint(0, 99)/255), (rd.randint(200, 255)/255), (rd.randint(100, 180)/255)])
blue.append([(rd.randint(0, 99)/255), (rd.randint(100, 180)/255), (rd.randint(200, 255)/255)])
feature_set = np.vstack([green, red, blue])
labels = np.array([0]*10 + [1]*10 + [2]*10)
one_hot_labels = np.zeros((30, 5))
for i in range(30):
one_hot_labels[i, labels[i]] = 1
instances = feature_set.shape[0]
entradas = feature_set.shape[1]
camadas_ocultas = 5
camadas_saidas = 5
ephocas = 1000
learning_rate = 1e-4
color = Color_learning(feature_set, labels, entradas, camadas_ocultas, camadas_saidas, instances, learning_rate, ephocas, one_hot_labels)
color.pesos_and_bias()
color.initial()
| [
"numpy.log",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.vstack",
"numpy.savetxt",
"random.randint"
] | [((4575, 4604), 'numpy.vstack', 'np.vstack', (['[green, red, blue]'], {}), '([green, red, blue])\n', (4584, 4604), True, 'import numpy as np\n'), ((4615, 4655), 'numpy.array', 'np.array', (['([0] * 10 + [1] * 10 + [2] * 10)'], {}), '([0] * 10 + [1] * 10 + [2] * 10)\n', (4623, 4655), True, 'import numpy as np\n'), ((4670, 4687), 'numpy.zeros', 'np.zeros', (['(30, 5)'], {}), '((30, 5))\n', (4678, 4687), True, 'import numpy as np\n'), ((766, 776), 'numpy.exp', 'np.exp', (['sm'], {}), '(sm)\n', (772, 776), True, 'import numpy as np\n'), ((893, 919), 'numpy.savetxt', 'np.savetxt', (['"""W_hidden"""', 'wh'], {}), "('W_hidden', wh)\n", (903, 919), True, 'import numpy as np\n'), ((929, 955), 'numpy.savetxt', 'np.savetxt', (['"""W_output"""', 'wo'], {}), "('W_output', wo)\n", (939, 955), True, 'import numpy as np\n'), ((965, 994), 'numpy.savetxt', 'np.savetxt', (['"""Bias_hidden"""', 'bh'], {}), "('Bias_hidden', bh)\n", (975, 994), True, 'import numpy as np\n'), ((1004, 1033), 'numpy.savetxt', 'np.savetxt', (['"""Bias_output"""', 'bo'], {}), "('Bias_output', bo)\n", (1014, 1033), True, 'import numpy as np\n'), ((1110, 1446), 'numpy.array', 'np.array', (['[[0.3745401188473625, 2.523058828614708, 0.7319939418114051, \n 2.3898643745955117, -2.569265545780381], [0.15599452033620265, -\n 0.6273783669562087, 0.8661761457749352, 5.685504174331969, \n 2.199633592824242], [0.020584494295802447, 3.58054565421029, \n 0.8324426408004217, -0.6914150925823431, 5.7347656460940675]]'], {}), '([[0.3745401188473625, 2.523058828614708, 0.7319939418114051, \n 2.3898643745955117, -2.569265545780381], [0.15599452033620265, -\n 0.6273783669562087, 0.8661761457749352, 5.685504174331969, \n 2.199633592824242], [0.020584494295802447, 3.58054565421029, \n 0.8324426408004217, -0.6914150925823431, 5.7347656460940675]])\n', (1118, 1446), True, 'import numpy as np\n'), ((1586, 1703), 'numpy.array', 'np.array', (['[[-0.5713801657541415, 1.5990357679539295, -2.6125490126936013, \n 4.021618985447206, 4.077521552978897]]'], {}), '([[-0.5713801657541415, 1.5990357679539295, -2.6125490126936013, \n 4.021618985447206, 4.077521552978897]])\n', (1594, 1703), True, 'import numpy as np\n'), ((1762, 2337), 'numpy.array', 'np.array', (['[[0.13949386065204183, 0.29214464853521815, 0.3663618432936917, \n 0.45606998421703593, 0.7851759613930136], [-1.3579006224415144, \n 1.062887835078977, 1.6668342957138924, 0.022284861760739575, \n 0.5662116839433314], [0.17052412368729153, 0.06505159298527952, \n 0.9488855372533332, 0.9656320330745594, 0.8083973481164611], [\n 1.4102447321921157, 1.8490886624470482, -1.0803240029919596, \n 0.057233294509029295, -0.5875330478799644], [1.3495252414044936, -\n 1.2350388721196879, 2.177943211965754, -0.037327553170155764, \n 0.10508607117889161]]'], {}), '([[0.13949386065204183, 0.29214464853521815, 0.3663618432936917, \n 0.45606998421703593, 0.7851759613930136], [-1.3579006224415144, \n 1.062887835078977, 1.6668342957138924, 0.022284861760739575, \n 0.5662116839433314], [0.17052412368729153, 0.06505159298527952, \n 0.9488855372533332, 0.9656320330745594, 0.8083973481164611], [\n 1.4102447321921157, 1.8490886624470482, -1.0803240029919596, \n 0.057233294509029295, -0.5875330478799644], [1.3495252414044936, -\n 1.2350388721196879, 2.177943211965754, -0.037327553170155764, \n 0.10508607117889161]])\n', (1770, 2337), True, 'import numpy as np\n'), ((2561, 2683), 'numpy.array', 'np.array', (['[[-1.5243950469562042, 0.8073278612459058, -2.0314846977978815, -\n 1.5048600777982681, -0.15259537821377295]]'], {}), '([[-1.5243950469562042, 0.8073278612459058, -2.0314846977978815, -\n 1.5048600777982681, -0.15259537821377295]])\n', (2569, 2683), True, 'import numpy as np\n'), ((3260, 3309), 'numpy.dot', 'np.dot', (['layer_hidden_traspost', 'error_layer_output'], {}), '(layer_hidden_traspost, error_layer_output)\n', (3266, 3309), True, 'import numpy as np\n'), ((3394, 3439), 'numpy.dot', 'np.dot', (['error_layer_output', 'w_output_traspost'], {}), '(error_layer_output, w_output_traspost)\n', (3400, 3439), True, 'import numpy as np\n'), ((3603, 3671), 'numpy.dot', 'np.dot', (['feature_transpost', '(derivada_layer_hidden * delta_output_xW_h)'], {}), '(feature_transpost, derivada_layer_hidden * delta_output_xW_h)\n', (3609, 3671), True, 'import numpy as np\n'), ((522, 533), 'numpy.exp', 'np.exp', (['(-sm)'], {}), '(-sm)\n', (528, 533), True, 'import numpy as np\n'), ((2815, 2850), 'numpy.dot', 'np.dot', (['self.feature', 'self.w_hidden'], {}), '(self.feature, self.w_hidden)\n', (2821, 2850), True, 'import numpy as np\n'), ((2960, 2995), 'numpy.dot', 'np.dot', (['layer_hidden', 'self.w_output'], {}), '(layer_hidden, self.w_output)\n', (2966, 2995), True, 'import numpy as np\n'), ((4276, 4296), 'random.randint', 'rd.randint', (['(200)', '(255)'], {}), '(200, 255)\n', (4286, 4296), True, 'import random as rd\n'), ((4304, 4324), 'random.randint', 'rd.randint', (['(100)', '(180)'], {}), '(100, 180)\n', (4314, 4324), True, 'import random as rd\n'), ((4332, 4349), 'random.randint', 'rd.randint', (['(0)', '(99)'], {}), '(0, 99)\n', (4342, 4349), True, 'import random as rd\n'), ((4377, 4394), 'random.randint', 'rd.randint', (['(0)', '(99)'], {}), '(0, 99)\n', (4387, 4394), True, 'import random as rd\n'), ((4402, 4422), 'random.randint', 'rd.randint', (['(200)', '(255)'], {}), '(200, 255)\n', (4412, 4422), True, 'import random as rd\n'), ((4430, 4450), 'random.randint', 'rd.randint', (['(100)', '(180)'], {}), '(100, 180)\n', (4440, 4450), True, 'import random as rd\n'), ((4477, 4494), 'random.randint', 'rd.randint', (['(0)', '(99)'], {}), '(0, 99)\n', (4487, 4494), True, 'import random as rd\n'), ((4502, 4522), 'random.randint', 'rd.randint', (['(100)', '(180)'], {}), '(100, 180)\n', (4512, 4522), True, 'import random as rd\n'), ((4530, 4550), 'random.randint', 'rd.randint', (['(200)', '(255)'], {}), '(200, 255)\n', (4540, 4550), True, 'import random as rd\n'), ((4125, 4145), 'numpy.log', 'np.log', (['layer_output'], {}), '(layer_output)\n', (4131, 4145), True, 'import numpy as np\n')] |
import csv
from scipy import ndimage
import numpy as np
import cv2
#Reading data from CSV
lines = []
with open('data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
i=0
for line in reader:
if(i==0):
i = i+1
else:
lines.append(line)
images = []
measurements = []
#Image and angle are extracted and adjusted below with (+/-)0.2 factor
for line in lines:
for i in range(3):
source_path = line[i]
filename = source_path.split('/')[-1]
current_path = 'data/IMG/' + filename
image = ndimage.imread(current_path)
images.append(image)
if(i == 0):
measurement = float(line[3])
elif(i == 1):
measurement = float(line[3]) + 0.2
elif(i == 2):
measurement = float(line[3]) - 0.2
measurements.append(measurement)
#Augmentaion of images is done using code below (Flipping and inverting angle)
augmented_images,augmented_measurements = [],[]
for image,measurement in zip(images,measurements):
augmented_images.append(image)
augmented_measurements.append(measurement)
augmented_images.append(cv2.flip(image,1))
augmented_measurements.append(measurement*-1.0)
X_train = np.array(augmented_images)
y_train = np.array(augmented_measurements)
from keras.models import Sequential
from keras.layers import Flatten,Dense,Lambda,Cropping2D,Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
model = Sequential()
model.add(Lambda(lambda x:x/255.0 - 0.5 , input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
model.add(Convolution2D(6,5,5,activation='relu'))
model.add(MaxPooling2D())
model.add(Convolution2D(6,5,5,activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(120))
model.add(Dense(84))
model.add(Dense(1))
model.compile(loss='mse',optimizer='adam')
history_object = model.fit(X_train,y_train,validation_split=0.2,shuffle=True,nb_epoch=5,verbose = 1)
model.save('model.h5') | [
"keras.layers.Flatten",
"keras.layers.convolutional.Convolution2D",
"cv2.flip",
"keras.layers.pooling.MaxPooling2D",
"keras.layers.Lambda",
"keras.models.Sequential",
"scipy.ndimage.imread",
"numpy.array",
"keras.layers.Cropping2D",
"keras.layers.Dense",
"csv.reader"
] | [((1301, 1327), 'numpy.array', 'np.array', (['augmented_images'], {}), '(augmented_images)\n', (1309, 1327), True, 'import numpy as np\n'), ((1338, 1370), 'numpy.array', 'np.array', (['augmented_measurements'], {}), '(augmented_measurements)\n', (1346, 1370), True, 'import numpy as np\n'), ((1581, 1593), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1591, 1593), False, 'from keras.models import Sequential\n'), ((165, 184), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (175, 184), False, 'import csv\n'), ((1604, 1664), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (1610, 1664), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout\n'), ((1672, 1711), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (1682, 1711), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout\n'), ((1720, 1761), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(6)', '(5)', '(5)'], {'activation': '"""relu"""'}), "(6, 5, 5, activation='relu')\n", (1733, 1761), False, 'from keras.layers.convolutional import Convolution2D\n'), ((1770, 1784), 'keras.layers.pooling.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (1782, 1784), False, 'from keras.layers.pooling import MaxPooling2D\n'), ((1796, 1837), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(6)', '(5)', '(5)'], {'activation': '"""relu"""'}), "(6, 5, 5, activation='relu')\n", (1809, 1837), False, 'from keras.layers.convolutional import Convolution2D\n'), ((1846, 1860), 'keras.layers.pooling.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (1858, 1860), False, 'from keras.layers.pooling import MaxPooling2D\n'), ((1872, 1881), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1879, 1881), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout\n'), ((1893, 1903), 'keras.layers.Dense', 'Dense', (['(120)'], {}), '(120)\n', (1898, 1903), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout\n'), ((1915, 1924), 'keras.layers.Dense', 'Dense', (['(84)'], {}), '(84)\n', (1920, 1924), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout\n'), ((1936, 1944), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (1941, 1944), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout\n'), ((618, 646), 'scipy.ndimage.imread', 'ndimage.imread', (['current_path'], {}), '(current_path)\n', (632, 646), False, 'from scipy import ndimage\n'), ((1215, 1233), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (1223, 1233), False, 'import cv2\n')] |
#!/usr/bin/env python
# -*-coding:utf-8-*-
#########################################################################
# > File Name: get_seqdata.py
# > Author: <NAME>
# > Mail: <EMAIL>
# > Created Time: 2019年03月20日 星期三 00时07分18秒
#########################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import pickle
import pdb
import os
import cv2
class seq_dataset(Dataset):
"""seq_dataset"""
def __init__(self, data_dir, resize=(64, 64)):
self.data_dir = data_dir
self.resize = resize
self.sample_fs = sorted(os.listdir(data_dir))
def __len__(self):
return len(self.sample_fs)
def __getitem__(self, idx):
sample_path = os.path.join(self.data_dir, self.sample_fs[idx])
with open(sample_path, 'rb') as f:
input_data = pickle.load(f)
label = pickle.load(f)
input_data = [cv2.resize(x, self.resize) for x in input_data]
input_data = np.array(input_data).astype(np.float32)
input_data = np.transpose(input_data, (0, 3, 1, 2))
label = idx % 2
#label ^= 1
return input_data, label
def main():
seq_d = seq_dataset("../faster-rcnn.pytorch/seqdata/")
tp = 0
for i in range(len(seq_d)):
crops, label = seq_d[i]
tp += label
print(tp, i)
pass
if __name__ == '__main__':
main()
| [
"os.listdir",
"os.path.join",
"pickle.load",
"numpy.array",
"cv2.resize",
"numpy.transpose"
] | [((926, 974), 'os.path.join', 'os.path.join', (['self.data_dir', 'self.sample_fs[idx]'], {}), '(self.data_dir, self.sample_fs[idx])\n', (938, 974), False, 'import os\n'), ((790, 810), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (800, 810), False, 'import os\n'), ((1044, 1058), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1055, 1058), False, 'import pickle\n'), ((1079, 1093), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1090, 1093), False, 'import pickle\n'), ((1258, 1296), 'numpy.transpose', 'np.transpose', (['input_data', '(0, 3, 1, 2)'], {}), '(input_data, (0, 3, 1, 2))\n', (1270, 1296), True, 'import numpy as np\n'), ((1120, 1146), 'cv2.resize', 'cv2.resize', (['x', 'self.resize'], {}), '(x, self.resize)\n', (1130, 1146), False, 'import cv2\n'), ((1193, 1213), 'numpy.array', 'np.array', (['input_data'], {}), '(input_data)\n', (1201, 1213), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu May 16 16:31:04 2019
@author: macfa
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import librosa
import soundfile as sf
import os
from config import PARAS
import warnings
warnings.filterwarnings('ignore')
audio_path = '../../100_download/separated_data/audio'
mixture_path = '../../Unetdata/mixture_data/audio'
normalize_audio_path = '../../Unetdata/norm_data/audio'
SR = 16000
audio_list = []
for (dirpath, dirnames, filenames) in os.walk(audio_path):
if not dirnames:
audio_list.extend(filenames)
from mel_dealer import mel_converter
def frame_feature_extractor(signal, mel_converter=mel_converter):
"""
Takes in new signals and create mel chunks
"""
S = mel_converter.signal_to_melspec(signal)
if not S.shape[0] % (2*PARAS.N_MEL) == 0:
S = S[:-1 * (S.shape[0] % (2*PARAS.N_MEL))] # divide the mel spectrogram
chunk_num = int(S.shape[0] / (2*PARAS.N_MEL))
mel_chunks = np.split(S, chunk_num) # create 150 * 150 data frames
return mel_chunks[0]
f = audio_list
progress = 0
for i in range(len(f)):
path1 = f[i]
name1 = path1[:-4]
path1 = name1 + '.wav'
path2_count = i + 1
if path2_count == len(f):
break
while filenames[path2_count][:11] not in path1:
path2 = f[path2_count]
name2 = path2[:-4]
path2 = name2 + '.wav'
signal1, _ = librosa.load(audio_path + '/' + path1, sr=SR)
signal2, _ = librosa.load(audio_path + '/' + path2, sr=SR)
mel_spec_1 = frame_feature_extractor(signal1, mel_converter=mel_converter)
mel_spec_2 = frame_feature_extractor(signal2, mel_converter=mel_converter)
res_signal_1 = mel_converter.m(mel_spec_1, log=True, phase=None, transpose=True, audio_out=False)
res_signal_2 = mel_converter.m(mel_spec_2, log=True, phase=None, transpose=True, audio_out=False)
signal1_n2 = librosa.util.normalize(res_signal_1, norm=2)
signal2_n2 = librosa.util.normalize(res_signal_2, norm=2)
signal3 = signal1_n2 + signal2_n2
try:
os.makedirs(normalize_audio_path)
except FileExistsError:
pass
dir1 = normalize_audio_path + '/' + name1 + '.wav'
sf.write(dir1, signal1_n2, samplerate=SR)
dir2 = normalize_audio_path + '/' + name2 + '.wav'
sf.write(dir2, signal2_n2, samplerate=SR)
try:
os.makedirs(mixture_path)
except FileExistsError:
pass
name3 = name1 + '~' + name2 + '.wav'
dir3 = mixture_path + '/' + name3
sf.write(dir3, signal3, samplerate=SR)
if path2_count == len(f)-1:
break
path2_count += 1
progress += 1
print("Progress: {0}-{1}/{2}".format(i,path2_count,len(f))) | [
"librosa.util.normalize",
"os.makedirs",
"mel_dealer.mel_converter.signal_to_melspec",
"soundfile.write",
"numpy.split",
"mel_dealer.mel_converter.m",
"warnings.filterwarnings",
"os.walk",
"librosa.load"
] | [((239, 272), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (262, 272), False, 'import warnings\n'), ((503, 522), 'os.walk', 'os.walk', (['audio_path'], {}), '(audio_path)\n', (510, 522), False, 'import os\n'), ((766, 805), 'mel_dealer.mel_converter.signal_to_melspec', 'mel_converter.signal_to_melspec', (['signal'], {}), '(signal)\n', (797, 805), False, 'from mel_dealer import mel_converter\n'), ((1009, 1031), 'numpy.split', 'np.split', (['S', 'chunk_num'], {}), '(S, chunk_num)\n', (1017, 1031), True, 'import numpy as np\n'), ((1439, 1484), 'librosa.load', 'librosa.load', (["(audio_path + '/' + path1)"], {'sr': 'SR'}), "(audio_path + '/' + path1, sr=SR)\n", (1451, 1484), False, 'import librosa\n'), ((1506, 1551), 'librosa.load', 'librosa.load', (["(audio_path + '/' + path2)"], {'sr': 'SR'}), "(audio_path + '/' + path2, sr=SR)\n", (1518, 1551), False, 'import librosa\n'), ((1767, 1854), 'mel_dealer.mel_converter.m', 'mel_converter.m', (['mel_spec_1'], {'log': '(True)', 'phase': 'None', 'transpose': '(True)', 'audio_out': '(False)'}), '(mel_spec_1, log=True, phase=None, transpose=True, audio_out\n =False)\n', (1782, 1854), False, 'from mel_dealer import mel_converter\n'), ((1873, 1960), 'mel_dealer.mel_converter.m', 'mel_converter.m', (['mel_spec_2'], {'log': '(True)', 'phase': 'None', 'transpose': '(True)', 'audio_out': '(False)'}), '(mel_spec_2, log=True, phase=None, transpose=True, audio_out\n =False)\n', (1888, 1960), False, 'from mel_dealer import mel_converter\n'), ((1986, 2030), 'librosa.util.normalize', 'librosa.util.normalize', (['res_signal_1'], {'norm': '(2)'}), '(res_signal_1, norm=2)\n', (2008, 2030), False, 'import librosa\n'), ((2052, 2096), 'librosa.util.normalize', 'librosa.util.normalize', (['res_signal_2'], {'norm': '(2)'}), '(res_signal_2, norm=2)\n', (2074, 2096), False, 'import librosa\n'), ((2332, 2373), 'soundfile.write', 'sf.write', (['dir1', 'signal1_n2'], {'samplerate': 'SR'}), '(dir1, signal1_n2, samplerate=SR)\n', (2340, 2373), True, 'import soundfile as sf\n'), ((2441, 2482), 'soundfile.write', 'sf.write', (['dir2', 'signal2_n2'], {'samplerate': 'SR'}), '(dir2, signal2_n2, samplerate=SR)\n', (2449, 2482), True, 'import soundfile as sf\n'), ((2687, 2725), 'soundfile.write', 'sf.write', (['dir3', 'signal3'], {'samplerate': 'SR'}), '(dir3, signal3, samplerate=SR)\n', (2695, 2725), True, 'import soundfile as sf\n'), ((2182, 2215), 'os.makedirs', 'os.makedirs', (['normalize_audio_path'], {}), '(normalize_audio_path)\n', (2193, 2215), False, 'import os\n'), ((2517, 2542), 'os.makedirs', 'os.makedirs', (['mixture_path'], {}), '(mixture_path)\n', (2528, 2542), False, 'import os\n')] |
import numpy as np
import tensorflow as tf
import json
import pickle
import data_utils
import plotting
import model
import utils
from time import time
from eICU_synthetic_dataset_generation import batch_size
from mmd import median_pairwise_distance, mix_rbf_mmd2_and_ratio
tf.logging.set_verbosity(tf.logging.ERROR)
# --- get settings --- #
# parse command line arguments, or use defaults
parser = utils.rgan_options_parser()
settings = vars(parser.parse_args())
# if a settings file is specified, it overrides command line arguments/defaults
if settings['settings_file']: settings = utils.load_settings_from_file(settings)
# --- get data, split --- #
samples, pdf, labels = data_utils.get_samples_and_labels(settings)
# --- save settings, data --- #
print('Ready to run with settings:')
for (k, v) in settings.items(): print(v, '\t', k)
# add the settings to local environment
# WARNING: at this point a lot of variables appear
locals().update(settings)
json.dump(settings, open('./experiments/settings/' + identifier + '.txt', 'w'), indent=0)
if data == 'eICU_task':
train_seqs = samples['train'].reshape(-1, 16, 4)
vali_seqs = samples['vali'].reshape(-1, 16, 4)
test_seqs = samples['test'].reshape(-1, 16, 4)
train_targets = labels['train']
vali_targets = labels['vali']
test_targets = labels['test']
if not data == 'load':
data_path = './experiments/data/' + identifier + '.data.npy'
np.save(data_path, {'samples': samples, 'pdf': pdf, 'labels': labels})
print('Saved training data to', data_path)
# --- build model --- #
Z, X, CG, CD, CS = model.create_placeholders(batch_size, seq_length, latent_dim,
num_signals, cond_dim)
discriminator_vars = ['hidden_units_d', 'seq_length', 'cond_dim', 'batch_size', 'batch_mean']
discriminator_settings = dict((k, settings[k]) for k in discriminator_vars)
generator_vars = ['hidden_units_g', 'seq_length', 'batch_size',
'num_generated_features', 'cond_dim', 'learn_scale']
generator_settings = dict((k, settings[k]) for k in generator_vars)
CGAN = (cond_dim > 0)
if CGAN: assert not predict_labels
D_loss, G_loss = model.GAN_loss(Z, X, generator_settings, discriminator_settings,
kappa, CGAN, CG, CD, CS, wrong_labels=wrong_labels)
D_solver, G_solver, priv_accountant = model.GAN_solvers(D_loss, G_loss, learning_rate, batch_size,
total_examples=samples['train'].shape[0],
l2norm_bound=l2norm_bound,
batches_per_lot=batches_per_lot, sigma=dp_sigma, dp=dp)
G_sample = model.generator(Z, **generator_settings, reuse=True, c=CG)
# --- evaluation --- #
# frequency to do visualisations
vis_freq = 50
eval_freq = 50
# get heuristic bandwidth for mmd kernel from evaluation samples
heuristic_sigma_training = median_pairwise_distance(samples['vali'])
best_mmd2_so_far = 1000
# optimise sigma using that (that's t-hat)
batch_multiplier = 5000 // batch_size
eval_size = batch_multiplier * batch_size
eval_eval_size = int(0.2 * eval_size)
eval_real_PH = tf.placeholder(tf.float32, [eval_eval_size, seq_length, num_generated_features])
eval_sample_PH = tf.placeholder(tf.float32, [eval_eval_size, seq_length, num_generated_features])
n_sigmas = 2
sigma = tf.get_variable(name='sigma', shape=n_sigmas, initializer=tf.constant_initializer(
value=np.power(heuristic_sigma_training, np.linspace(-1, 3, num=n_sigmas))))
mmd2, that = mix_rbf_mmd2_and_ratio(eval_real_PH, eval_sample_PH, sigma)
with tf.variable_scope("SIGMA_optimizer"):
sigma_solver = tf.train.RMSPropOptimizer(learning_rate=0.05).minimize(-that, var_list=[sigma])
# sigma_solver = tf.train.AdamOptimizer().minimize(-that, var_list=[sigma])
# sigma_solver = tf.train.AdagradOptimizer(learning_rate=0.1).minimize(-that, var_list=[sigma])
sigma_opt_iter = 2000
sigma_opt_thresh = 0.001
sigma_opt_vars = [var for var in tf.global_variables() if 'SIGMA_optimizer' in var.name]
sess = tf.Session()
sess.run(tf.global_variables_initializer())
vis_Z = model.sample_Z(batch_size, seq_length, latent_dim, use_time)
if CGAN:
vis_C = model.sample_C(batch_size, cond_dim, max_val, one_hot)
if 'eICU_task' in data:
vis_C = labels['train'][np.random.choice(labels['train'].shape[0], batch_size, replace=False), :]
vis_sample = sess.run(G_sample, feed_dict={Z: vis_Z, CG: vis_C})
else:
vis_sample = sess.run(G_sample, feed_dict={Z: vis_Z})
vis_C = None
vis_real_indices = np.random.choice(len(samples['vali']), size=6)
vis_real = np.float32(samples['vali'][vis_real_indices, :, :])
if not labels['vali'] is None:
vis_real_labels = labels['vali'][vis_real_indices]
else:
vis_real_labels = None
if 'eICU' in data:
plotting.vis_eICU_patients_downsampled(vis_real, resample_rate_in_min,
identifier=identifier + '_real', idx=0)
else:
plotting.save_plot_sample(vis_real, 0, identifier + '_real', n_samples=6,
num_epochs=num_epochs)
trace = open('./experiments/traces/' + identifier + '.trace.txt', 'w')
trace.write('epoch time D_loss G_loss mmd2 that pdf real_pdf\n')
# --- train --- #
train_vars = ['batch_size', 'D_rounds', 'G_rounds', 'use_time', 'seq_length',
'latent_dim', 'num_generated_features', 'cond_dim', 'max_val',
'WGAN_clip', 'one_hot']
train_settings = dict((k, settings[k]) for k in train_vars)
t0 = time()
best_epoch = 0
print('epoch\ttime\tD_loss\tG_loss')
for epoch in range(num_epochs):
D_loss_curr, G_loss_curr = model.train_epoch(epoch, samples['train'], labels['train'],
sess, Z, X, CG, CD, CS,
D_loss, G_loss,
D_solver, G_solver,
**train_settings)
# -- eval -- #
# visualise plots of generated samples, with/without labels
if epoch % vis_freq == 0:
if CGAN:
vis_sample = sess.run(G_sample, feed_dict={Z: vis_Z, CG: vis_C})
else:
vis_sample = sess.run(G_sample, feed_dict={Z: vis_Z})
plotting.visualise_at_epoch(vis_sample, data,
predict_labels, one_hot, epoch, identifier, num_epochs,
resample_rate_in_min, multivariate_mnist, seq_length, labels=vis_C)
# compute mmd2 and, if available, prob density
if epoch % eval_freq == 0:
t = time() - t0
print('%d\t%.2f\t%.4f\t%.4f' % (epoch, t, D_loss_curr, G_loss_curr))
if 'eICU' in data:
gen_samples = []
labels_gen_samples = []
for batch_idx in range(int(len(train_seqs) / batch_size)):
X_mb, Y_mb = data_utils.get_batch(train_seqs, batch_size, batch_idx, train_targets)
z_ = model.sample_Z(batch_size, seq_length, latent_dim, use_time=use_time)
gen_samples_mb = sess.run(G_sample, feed_dict={Z: z_, CG: Y_mb})
gen_samples.append(gen_samples_mb)
labels_gen_samples.append(Y_mb)
for batch_idx in range(int(len(vali_seqs) / batch_size)):
X_mb, Y_mb = data_utils.get_batch(vali_seqs, batch_size, batch_idx, vali_targets)
z_ = model.sample_Z(batch_size, seq_length, latent_dim, use_time=use_time)
gen_samples_mb = sess.run(G_sample, feed_dict={Z: z_, CG: Y_mb})
gen_samples.append(gen_samples_mb)
labels_gen_samples.append(Y_mb)
gen_samples = np.vstack(gen_samples)
labels_gen_samples = np.vstack(labels_gen_samples)
wd = './synthetic_eICU_datasets'
with open(wd + '/samples_' + identifier + '_' + str(epoch) + '.pk', 'wb') as f:
pickle.dump(file=f, obj=gen_samples)
with open(wd + '/labels_' + identifier + '_' + str(epoch) + '.pk', 'wb') as f:
pickle.dump(file=f, obj=labels_gen_samples)
if shuffle: # shuffle the training data
perm = np.random.permutation(samples['train'].shape[0])
samples['train'] = samples['train'][perm]
if labels['train'] is not None:
labels['train'] = labels['train'][perm]
model.dump_parameters(identifier + '_' + str(epoch), sess) | [
"utils.rgan_options_parser",
"tensorflow.logging.set_verbosity",
"utils.load_settings_from_file",
"numpy.save",
"plotting.visualise_at_epoch",
"data_utils.get_batch",
"model.train_epoch",
"tensorflow.placeholder",
"tensorflow.Session",
"mmd.mix_rbf_mmd2_and_ratio",
"mmd.median_pairwise_distance"... | [((276, 318), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (300, 318), True, 'import tensorflow as tf\n'), ((402, 429), 'utils.rgan_options_parser', 'utils.rgan_options_parser', ([], {}), '()\n', (427, 429), False, 'import utils\n'), ((680, 723), 'data_utils.get_samples_and_labels', 'data_utils.get_samples_and_labels', (['settings'], {}), '(settings)\n', (713, 723), False, 'import data_utils\n'), ((1593, 1681), 'model.create_placeholders', 'model.create_placeholders', (['batch_size', 'seq_length', 'latent_dim', 'num_signals', 'cond_dim'], {}), '(batch_size, seq_length, latent_dim, num_signals,\n cond_dim)\n', (1618, 1681), False, 'import model\n'), ((2173, 2293), 'model.GAN_loss', 'model.GAN_loss', (['Z', 'X', 'generator_settings', 'discriminator_settings', 'kappa', 'CGAN', 'CG', 'CD', 'CS'], {'wrong_labels': 'wrong_labels'}), '(Z, X, generator_settings, discriminator_settings, kappa,\n CGAN, CG, CD, CS, wrong_labels=wrong_labels)\n', (2187, 2293), False, 'import model\n'), ((2360, 2555), 'model.GAN_solvers', 'model.GAN_solvers', (['D_loss', 'G_loss', 'learning_rate', 'batch_size'], {'total_examples': "samples['train'].shape[0]", 'l2norm_bound': 'l2norm_bound', 'batches_per_lot': 'batches_per_lot', 'sigma': 'dp_sigma', 'dp': 'dp'}), "(D_loss, G_loss, learning_rate, batch_size, total_examples\n =samples['train'].shape[0], l2norm_bound=l2norm_bound, batches_per_lot=\n batches_per_lot, sigma=dp_sigma, dp=dp)\n", (2377, 2555), False, 'import model\n'), ((2725, 2783), 'model.generator', 'model.generator', (['Z'], {'reuse': '(True)', 'c': 'CG'}), '(Z, **generator_settings, reuse=True, c=CG)\n', (2740, 2783), False, 'import model\n'), ((2964, 3005), 'mmd.median_pairwise_distance', 'median_pairwise_distance', (["samples['vali']"], {}), "(samples['vali'])\n", (2988, 3005), False, 'from mmd import median_pairwise_distance, mix_rbf_mmd2_and_ratio\n'), ((3207, 3292), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[eval_eval_size, seq_length, num_generated_features]'], {}), '(tf.float32, [eval_eval_size, seq_length, num_generated_features]\n )\n', (3221, 3292), True, 'import tensorflow as tf\n'), ((3305, 3390), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[eval_eval_size, seq_length, num_generated_features]'], {}), '(tf.float32, [eval_eval_size, seq_length, num_generated_features]\n )\n', (3319, 3390), True, 'import tensorflow as tf\n'), ((3584, 3643), 'mmd.mix_rbf_mmd2_and_ratio', 'mix_rbf_mmd2_and_ratio', (['eval_real_PH', 'eval_sample_PH', 'sigma'], {}), '(eval_real_PH, eval_sample_PH, sigma)\n', (3606, 3643), False, 'from mmd import median_pairwise_distance, mix_rbf_mmd2_and_ratio\n'), ((4110, 4122), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4120, 4122), True, 'import tensorflow as tf\n'), ((4176, 4236), 'model.sample_Z', 'model.sample_Z', (['batch_size', 'seq_length', 'latent_dim', 'use_time'], {}), '(batch_size, seq_length, latent_dim, use_time)\n', (4190, 4236), False, 'import model\n'), ((4675, 4726), 'numpy.float32', 'np.float32', (["samples['vali'][vis_real_indices, :, :]"], {}), "(samples['vali'][vis_real_indices, :, :])\n", (4685, 4726), True, 'import numpy as np\n'), ((5576, 5582), 'time.time', 'time', ([], {}), '()\n', (5580, 5582), False, 'from time import time\n'), ((588, 627), 'utils.load_settings_from_file', 'utils.load_settings_from_file', (['settings'], {}), '(settings)\n', (617, 627), False, 'import utils\n'), ((1430, 1500), 'numpy.save', 'np.save', (['data_path', "{'samples': samples, 'pdf': pdf, 'labels': labels}"], {}), "(data_path, {'samples': samples, 'pdf': pdf, 'labels': labels})\n", (1437, 1500), True, 'import numpy as np\n'), ((3649, 3685), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""SIGMA_optimizer"""'], {}), "('SIGMA_optimizer')\n", (3666, 3685), True, 'import tensorflow as tf\n'), ((4132, 4165), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4163, 4165), True, 'import tensorflow as tf\n'), ((4258, 4312), 'model.sample_C', 'model.sample_C', (['batch_size', 'cond_dim', 'max_val', 'one_hot'], {}), '(batch_size, cond_dim, max_val, one_hot)\n', (4272, 4312), False, 'import model\n'), ((4870, 4984), 'plotting.vis_eICU_patients_downsampled', 'plotting.vis_eICU_patients_downsampled', (['vis_real', 'resample_rate_in_min'], {'identifier': "(identifier + '_real')", 'idx': '(0)'}), "(vis_real, resample_rate_in_min,\n identifier=identifier + '_real', idx=0)\n", (4908, 4984), False, 'import plotting\n'), ((5034, 5134), 'plotting.save_plot_sample', 'plotting.save_plot_sample', (['vis_real', '(0)', "(identifier + '_real')"], {'n_samples': '(6)', 'num_epochs': 'num_epochs'}), "(vis_real, 0, identifier + '_real', n_samples=6,\n num_epochs=num_epochs)\n", (5059, 5134), False, 'import plotting\n'), ((5698, 5839), 'model.train_epoch', 'model.train_epoch', (['epoch', "samples['train']", "labels['train']", 'sess', 'Z', 'X', 'CG', 'CD', 'CS', 'D_loss', 'G_loss', 'D_solver', 'G_solver'], {}), "(epoch, samples['train'], labels['train'], sess, Z, X, CG,\n CD, CS, D_loss, G_loss, D_solver, G_solver, **train_settings)\n", (5715, 5839), False, 'import model\n'), ((4046, 4067), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (4065, 4067), True, 'import tensorflow as tf\n'), ((6328, 6505), 'plotting.visualise_at_epoch', 'plotting.visualise_at_epoch', (['vis_sample', 'data', 'predict_labels', 'one_hot', 'epoch', 'identifier', 'num_epochs', 'resample_rate_in_min', 'multivariate_mnist', 'seq_length'], {'labels': 'vis_C'}), '(vis_sample, data, predict_labels, one_hot,\n epoch, identifier, num_epochs, resample_rate_in_min, multivariate_mnist,\n seq_length, labels=vis_C)\n', (6355, 6505), False, 'import plotting\n'), ((8245, 8293), 'numpy.random.permutation', 'np.random.permutation', (["samples['train'].shape[0]"], {}), "(samples['train'].shape[0])\n", (8266, 8293), True, 'import numpy as np\n'), ((3706, 3751), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': '(0.05)'}), '(learning_rate=0.05)\n', (3731, 3751), True, 'import tensorflow as tf\n'), ((6665, 6671), 'time.time', 'time', ([], {}), '()\n', (6669, 6671), False, 'from time import time\n'), ((7755, 7777), 'numpy.vstack', 'np.vstack', (['gen_samples'], {}), '(gen_samples)\n', (7764, 7777), True, 'import numpy as np\n'), ((7811, 7840), 'numpy.vstack', 'np.vstack', (['labels_gen_samples'], {}), '(labels_gen_samples)\n', (7820, 7840), True, 'import numpy as np\n'), ((4373, 4442), 'numpy.random.choice', 'np.random.choice', (["labels['train'].shape[0]", 'batch_size'], {'replace': '(False)'}), "(labels['train'].shape[0], batch_size, replace=False)\n", (4389, 4442), True, 'import numpy as np\n'), ((6946, 7016), 'data_utils.get_batch', 'data_utils.get_batch', (['train_seqs', 'batch_size', 'batch_idx', 'train_targets'], {}), '(train_seqs, batch_size, batch_idx, train_targets)\n', (6966, 7016), False, 'import data_utils\n'), ((7038, 7107), 'model.sample_Z', 'model.sample_Z', (['batch_size', 'seq_length', 'latent_dim'], {'use_time': 'use_time'}), '(batch_size, seq_length, latent_dim, use_time=use_time)\n', (7052, 7107), False, 'import model\n'), ((7388, 7456), 'data_utils.get_batch', 'data_utils.get_batch', (['vali_seqs', 'batch_size', 'batch_idx', 'vali_targets'], {}), '(vali_seqs, batch_size, batch_idx, vali_targets)\n', (7408, 7456), False, 'import data_utils\n'), ((7478, 7547), 'model.sample_Z', 'model.sample_Z', (['batch_size', 'seq_length', 'latent_dim'], {'use_time': 'use_time'}), '(batch_size, seq_length, latent_dim, use_time=use_time)\n', (7492, 7547), False, 'import model\n'), ((7995, 8031), 'pickle.dump', 'pickle.dump', ([], {'file': 'f', 'obj': 'gen_samples'}), '(file=f, obj=gen_samples)\n', (8006, 8031), False, 'import pickle\n'), ((8140, 8183), 'pickle.dump', 'pickle.dump', ([], {'file': 'f', 'obj': 'labels_gen_samples'}), '(file=f, obj=labels_gen_samples)\n', (8151, 8183), False, 'import pickle\n'), ((3535, 3567), 'numpy.linspace', 'np.linspace', (['(-1)', '(3)'], {'num': 'n_sigmas'}), '(-1, 3, num=n_sigmas)\n', (3546, 3567), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class backWarp(nn.Module):
"""
A class for creating a backwarping object.
This is used for backwarping to an image:
Given optical flow from frame I0 to I1 --> F_0_1 and frame I1,
it generates I0 <-- backwarp(F_0_1, I1).
...
Methods
-------
forward(x)
Returns output tensor after passing input `img` and `flow` to the backwarping
block.
"""
def __init__(self, H, W):
"""
Parameters
----------
W : int
width of the image.
H : int
height of the image.
device : device
computation device (cpu/cuda).
"""
super(backWarp, self).__init__()
# create a grid
gridX, gridY = np.meshgrid(np.arange(W), np.arange(H))
self.W = W
self.H = H
self.gridX = torch.nn.Parameter(torch.tensor(gridX), requires_grad=False)
self.gridY = torch.nn.Parameter(torch.tensor(gridY), requires_grad=False)
def forward(self, img, flow):
"""
Returns output tensor after passing input `img` and `flow` to the backwarping
block.
I0 = backwarp(I1, F_0_1)
Parameters
----------
img : tensor
frame I1.
flow : tensor
optical flow from I0 and I1: F_0_1.
Returns
-------
tensor
frame I0.
"""
# Extract horizontal and vertical flows.
u = flow[:, 0, :, :]
v = flow[:, 1, :, :]
x = self.gridX.unsqueeze(0).expand_as(u).float() + u
y = self.gridY.unsqueeze(0).expand_as(v).float() + v
# range -1 to 1
x = 2*(x/self.W - 0.5)
y = 2*(y/self.H - 0.5)
# stacking X and Y
grid = torch.stack((x,y), dim=3)
# Sample pixels using bilinear interpolation.
imgOut = torch.nn.functional.grid_sample(img, grid, padding_mode='border')
return imgOut
# Creating an array of `t` values for the 7 intermediate frames between
# reference frames I0 and I1.
class Coeff(nn.Module):
def __init__(self):
super(Coeff, self).__init__()
self.t = torch.nn.Parameter(torch.FloatTensor(np.linspace(0.125, 0.875, 7)), requires_grad=False)
def getFlowCoeff (self, indices):
"""
Gets flow coefficients used for calculating intermediate optical
flows from optical flows between I0 and I1: F_0_1 and F_1_0.
F_t_0 = C00 x F_0_1 + C01 x F_1_0
F_t_1 = C10 x F_0_1 + C11 x F_1_0
where,
C00 = -(1 - t) x t
C01 = t x t
C10 = (1 - t) x (1 - t)
C11 = -t x (1 - t)
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C00, C01, C10, C11.
"""
# Convert indices tensor to numpy array
ind = indices.detach()
C11 = C00 = - (1 - (self.t[ind])) * (self.t[ind])
C01 = (self.t[ind]) * (self.t[ind])
C10 = (1 - (self.t[ind])) * (1 - (self.t[ind]))
return C00[None, None, None, :].permute(3, 0, 1, 2), C01[None, None, None, :].permute(3, 0, 1, 2), C10[None, None, None, :].permute(3, 0, 1, 2), C11[None, None, None, :].permute(3, 0, 1, 2)
def getWarpCoeff (self, indices):
"""
Gets coefficients used for calculating final intermediate
frame `It_gen` from backwarped images using flows F_t_0 and F_t_1.
It_gen = (C0 x V_t_0 x g_I_0_F_t_0 + C1 x V_t_1 x g_I_1_F_t_1) / (C0 x V_t_0 + C1 x V_t_1)
where,
C0 = 1 - t
C1 = t
V_t_0, V_t_1 --> visibility maps
g_I_0_F_t_0, g_I_1_F_t_1 --> backwarped intermediate frames
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C0 and C1.
"""
# Convert indices tensor to numpy array
ind = indices.detach()
C0 = 1 - self.t[ind]
C1 = self.t[ind]
return C0[None, None, None, :].permute(3, 0, 1, 2), C1[None, None, None, :].permute(3, 0, 1, 2)
def set_t(self, factor):
ti = 1 / factor
self.t = torch.nn.Parameter(torch.FloatTensor(np.linspace(ti, 1 - ti, factor - 1)), requires_grad=False) | [
"torch.nn.functional.grid_sample",
"torch.stack",
"torch.tensor",
"numpy.linspace",
"numpy.arange"
] | [((1976, 2002), 'torch.stack', 'torch.stack', (['(x, y)'], {'dim': '(3)'}), '((x, y), dim=3)\n', (1987, 2002), False, 'import torch\n'), ((2075, 2140), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['img', 'grid'], {'padding_mode': '"""border"""'}), "(img, grid, padding_mode='border')\n", (2106, 2140), False, 'import torch\n'), ((912, 924), 'numpy.arange', 'np.arange', (['W'], {}), '(W)\n', (921, 924), True, 'import numpy as np\n'), ((926, 938), 'numpy.arange', 'np.arange', (['H'], {}), '(H)\n', (935, 938), True, 'import numpy as np\n'), ((1021, 1040), 'torch.tensor', 'torch.tensor', (['gridX'], {}), '(gridX)\n', (1033, 1040), False, 'import torch\n'), ((1104, 1123), 'torch.tensor', 'torch.tensor', (['gridY'], {}), '(gridY)\n', (1116, 1123), False, 'import torch\n'), ((2418, 2446), 'numpy.linspace', 'np.linspace', (['(0.125)', '(0.875)', '(7)'], {}), '(0.125, 0.875, 7)\n', (2429, 2446), True, 'import numpy as np\n'), ((4930, 4965), 'numpy.linspace', 'np.linspace', (['ti', '(1 - ti)', '(factor - 1)'], {}), '(ti, 1 - ti, factor - 1)\n', (4941, 4965), True, 'import numpy as np\n')] |
import numpy as np
np.random.seed(1337)
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
model = Sequential()
model.add(Dense(units=50, input_dim=1, activation='relu'))
model.add(Dense(units=50, activation='relu'))
model.add(Dense(units=1, activation='sigmoid'))
model.add(Dense(units=1, activation='linear'))
model.compile(optimizer='adam', loss='mean_squared_error')
model.summary()
import csv
with open('data/france_history.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
rows = [row for row in reader]
fr_corn_y = []
for each_y in rows:
fr_corn_y.append(int(each_y[0]))
dates = len(fr_corn_y)
fr_corn_x = list(range(1, dates + 1))
fr_corn_x = np.array(fr_corn_x)
fr_corn_y = np.array(fr_corn_y)
fr_dates_length = len(fr_corn_x)
fr_absorb = fr_corn_y[fr_dates_length-1]
corn_y_norm = fr_corn_y / fr_absorb
model.fit(fr_corn_x, corn_y_norm, epochs=10000, shuffle=False)
corn_y_predict = model.predict(fr_corn_x)
corn_y_predict = corn_y_predict * fr_absorb
fig_italy = plt.figure(figsize=(7, 5))
plt.scatter(fr_corn_x, fr_corn_y, label='Real Confirmed')
plt.plot(fr_corn_x, corn_y_predict, label='Predict Result')
plt.title('France Confirmed VS Dates')
plt.xlabel('Dates')
plt.ylabel('Amount')
plt.legend()
plt.show() | [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"keras.models.Sequential",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"keras.layers.Dense",
"matplotlib.pyplot.title",
"csv.reader",
"matplotlib.pyplot.legend",
... | [((19, 39), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (33, 39), True, 'import numpy as np\n'), ((148, 160), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (158, 160), False, 'from keras.models import Sequential\n'), ((722, 741), 'numpy.array', 'np.array', (['fr_corn_x'], {}), '(fr_corn_x)\n', (730, 741), True, 'import numpy as np\n'), ((754, 773), 'numpy.array', 'np.array', (['fr_corn_y'], {}), '(fr_corn_y)\n', (762, 773), True, 'import numpy as np\n'), ((1048, 1074), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 5)'}), '(figsize=(7, 5))\n', (1058, 1074), True, 'import matplotlib.pyplot as plt\n'), ((1075, 1132), 'matplotlib.pyplot.scatter', 'plt.scatter', (['fr_corn_x', 'fr_corn_y'], {'label': '"""Real Confirmed"""'}), "(fr_corn_x, fr_corn_y, label='Real Confirmed')\n", (1086, 1132), True, 'import matplotlib.pyplot as plt\n'), ((1133, 1192), 'matplotlib.pyplot.plot', 'plt.plot', (['fr_corn_x', 'corn_y_predict'], {'label': '"""Predict Result"""'}), "(fr_corn_x, corn_y_predict, label='Predict Result')\n", (1141, 1192), True, 'import matplotlib.pyplot as plt\n'), ((1193, 1231), 'matplotlib.pyplot.title', 'plt.title', (['"""France Confirmed VS Dates"""'], {}), "('France Confirmed VS Dates')\n", (1202, 1231), True, 'import matplotlib.pyplot as plt\n'), ((1232, 1251), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dates"""'], {}), "('Dates')\n", (1242, 1251), True, 'import matplotlib.pyplot as plt\n'), ((1252, 1272), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amount"""'], {}), "('Amount')\n", (1262, 1272), True, 'import matplotlib.pyplot as plt\n'), ((1273, 1285), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1283, 1285), True, 'import matplotlib.pyplot as plt\n'), ((1286, 1296), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1294, 1296), True, 'import matplotlib.pyplot as plt\n'), ((171, 218), 'keras.layers.Dense', 'Dense', ([], {'units': '(50)', 'input_dim': '(1)', 'activation': '"""relu"""'}), "(units=50, input_dim=1, activation='relu')\n", (176, 218), False, 'from keras.layers import Dense\n'), ((230, 264), 'keras.layers.Dense', 'Dense', ([], {'units': '(50)', 'activation': '"""relu"""'}), "(units=50, activation='relu')\n", (235, 264), False, 'from keras.layers import Dense\n'), ((276, 312), 'keras.layers.Dense', 'Dense', ([], {'units': '(1)', 'activation': '"""sigmoid"""'}), "(units=1, activation='sigmoid')\n", (281, 312), False, 'from keras.layers import Dense\n'), ((324, 359), 'keras.layers.Dense', 'Dense', ([], {'units': '(1)', 'activation': '"""linear"""'}), "(units=1, activation='linear')\n", (329, 359), False, 'from keras.layers import Dense\n'), ((516, 535), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (526, 535), False, 'import csv\n')] |
#!/usr/bin/env python3
import numpy as np
M = np.array(
(
[1, -1, 0, 0, 0, 0, 0, 0],
[0.4, 0.4, 0, -1, 0, 0, 0, 0],
[0.6, 0.6, -1, 0, 0, 0, 0, 0],
[0, 0, 0, -0.75, 0, 1, 0, 0],
[-1, 0, 0, 0, 1, 1, 0, 0],
[0, -1, 0, 0, 0, 0, 1, 1],
[0, 0, 0, -1, 0, 1, 0, 1],
[1, 1, 0, 0, 0, 0, 0, 0],
)
)
M_inv = np.linalg.inv(M)
V = np.array((0, 0, 0, 0, 0, 0, 0, 100))
R = np.matmul(M_inv, V)
# print(R)
print("réponse:", R[6])
| [
"numpy.array",
"numpy.linalg.inv",
"numpy.matmul"
] | [((48, 298), 'numpy.array', 'np.array', (['([1, -1, 0, 0, 0, 0, 0, 0], [0.4, 0.4, 0, -1, 0, 0, 0, 0], [0.6, 0.6, -1, 0,\n 0, 0, 0, 0], [0, 0, 0, -0.75, 0, 1, 0, 0], [-1, 0, 0, 0, 1, 1, 0, 0], [\n 0, -1, 0, 0, 0, 0, 1, 1], [0, 0, 0, -1, 0, 1, 0, 1], [1, 1, 0, 0, 0, 0,\n 0, 0])'], {}), '(([1, -1, 0, 0, 0, 0, 0, 0], [0.4, 0.4, 0, -1, 0, 0, 0, 0], [0.6, \n 0.6, -1, 0, 0, 0, 0, 0], [0, 0, 0, -0.75, 0, 1, 0, 0], [-1, 0, 0, 0, 1,\n 1, 0, 0], [0, -1, 0, 0, 0, 0, 1, 1], [0, 0, 0, -1, 0, 1, 0, 1], [1, 1, \n 0, 0, 0, 0, 0, 0]))\n', (56, 298), True, 'import numpy as np\n'), ((370, 386), 'numpy.linalg.inv', 'np.linalg.inv', (['M'], {}), '(M)\n', (383, 386), True, 'import numpy as np\n'), ((391, 427), 'numpy.array', 'np.array', (['(0, 0, 0, 0, 0, 0, 0, 100)'], {}), '((0, 0, 0, 0, 0, 0, 0, 100))\n', (399, 427), True, 'import numpy as np\n'), ((432, 451), 'numpy.matmul', 'np.matmul', (['M_inv', 'V'], {}), '(M_inv, V)\n', (441, 451), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
import numpy as np
FIRST_VALUE = 1
NVALUES = 16
LAST_VALUE = FIRST_VALUE + NVALUES
PACKET_LEN = 4
fin = open('input.txt', 'w') # 入力ファイル
fr11 = open('r11.txt', 'w') # 比較ファイル
fr12 = open('r12.txt', 'w') # 比較ファイル
fr22 = open('r22.txt', 'w') # 比較ファイル
fqt11 = open('qt11.txt', 'w') # 比較ファイル
fqt12 = open('qt12.txt', 'w') # 比較ファイル
fqt21 = open('qt21.txt', 'w') # 比較ファイル
fqt22 = open('qt22.txt', 'w') # 比較ファイル
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 2], [3, 9]])
mat3 = np.array([[21, 9], [-13, 69]])
mat4 = np.array([[1, 1], [1, 2]])
for A in (mat1, mat2, mat3, mat4):
#print("A = \n", A)
(Q, R) = np.linalg.qr(A)
#print("\nR = \n", R)
#print("\nQ = \n", Q)
# Rの対角成分を正の値にする
if R[0, 0] < 0:
R[0, 0] = -R[0, 0]
R[0, 1] = -R[0, 1]
Q[0, 0] = -Q[0, 0]
Q[1, 0] = -Q[1, 0]
if R[1, 1] < 0:
R[1, 1] = -R[1, 1]
Q[0, 1] = -Q[0, 1]
Q[1, 1] = -Q[1, 1]
#print("\nR = \n", R)
#print("\nQ = \n", Q)
#print("\nQR = \n", Q.dot(R))
for el in A.reshape(-1):
el2 = el * 2**8 # フォーマットは7Q8
el2 &= 0xffff
s = "{0:04x}".format(el2)
# 入力ファイルに書き込む
#print(s)
print(s, file=fin)
# 比較ファイルに出力する
s = "{val} {tlast}".format(val=R[0, 0], tlast='0')
#print(s)
print(s, file=fr11)
s = "{val} {tlast}".format(val=R[0, 1], tlast='0')
#print(s)
print(s, file=fr12)
s = "{val} {tlast}".format(val=R[1, 1], tlast='0')
#print(s)
print(s, file=fr22)
s = "{val} {tlast}".format(val=Q[0, 0], tlast='0')
#print(s)
print(s, file=fqt11)
# QT は Qを転値
s = "{val} {tlast}".format(val=Q[1, 0], tlast='0')
#print(s)
print(s, file=fqt12)
s = "{val} {tlast}".format(val=Q[0, 1], tlast='0')
#print(s)
print(s, file=fqt21)
s = "{val} {tlast}".format(val=Q[1, 1], tlast='0')
#print(s)
print(s, file=fqt22)
fin.close()
fr11.close()
fr12.close()
fr22.close()
fqt11.close()
fqt12.close()
fqt21.close()
fqt22.close()
| [
"numpy.array",
"numpy.linalg.qr"
] | [((437, 463), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (445, 463), True, 'import numpy as np\n'), ((471, 497), 'numpy.array', 'np.array', (['[[1, 2], [3, 9]]'], {}), '([[1, 2], [3, 9]])\n', (479, 497), True, 'import numpy as np\n'), ((505, 535), 'numpy.array', 'np.array', (['[[21, 9], [-13, 69]]'], {}), '([[21, 9], [-13, 69]])\n', (513, 535), True, 'import numpy as np\n'), ((543, 569), 'numpy.array', 'np.array', (['[[1, 1], [1, 2]]'], {}), '([[1, 1], [1, 2]])\n', (551, 569), True, 'import numpy as np\n'), ((642, 657), 'numpy.linalg.qr', 'np.linalg.qr', (['A'], {}), '(A)\n', (654, 657), True, 'import numpy as np\n')] |
import numpy as np
from sknet.network_construction import KNNConstructor
class ModularityLabelPropagation():
"""
Semi-supervised method that propagates labels to instances not
classified using the Modularity Propagation method.
Attributes
----------
generated_y_ : {ndarray, pandas series}, shape (n_samples, 1)
The label list
generated_G_ : NetworkX Network
The constructed network on the fit of the model
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sknet.network_construction import KNNConstructor
>>> from sknet.semi_supervised import ModularityLabelPropagation
>>> X, y = load_iris(return_X_y = True)
>>> knn_c = KNNConstructor(k=5, sep_comp=False)
>>> y[10:20] = np.nan
>>> y[70:80] = np.nan
>>> y[110:120] = np.nan
>>> propagator = ModularityLabelPropagation(knn_c)
>>> propagator.fit(X, y)
>>> propagator.generated_y_
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 2.,
2., 2., 2., 2., 2., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 2., 2.,
1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.])
References
----------
<NAME> & <NAME>. (2012). Semi-Supervised Learning Guided
by the Modularity Measure in Complex Networks. Neurocomputing. 78.
30-37. 10.1016/j.neucom.2011.04.042.
<NAME> & <NAME>. (2016). Machine Learning in Complex
Networks. 10.1007/978-3-319-17290-3.
"""
def __init__(self):
self.estimator_type = 'classifier'
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def get_params(self, deep=True):
return {}
def fit(self, X=None, y=None, G=None,
constructor=KNNConstructor(5, sep_comp=False)):
"""Fit the propagator by using the modularity measure
to propagate the labels to non-labeled examples
Parameters
----------
X : {array-like, pandas dataframe} of shape
(n_samples, n_features), optional (default=None)
The input data samples. Can be None if G is set.
y : {ndarray, pandas series}, shape (n_samples,) or
(n_samples, n_classes), optional (default=None)
The target classes. Can be None if G is set. Missing labels
should have the np.nan value
G : NetworkX Network, optional (default=None)
The network with missing labels to be propagated. Can be
None if X and y are not None in which case the constructor
will be used to generate the network. Labels must be into
the data of each node with the 'class' key. Missing labels
should be valued np.nan
constructor : BaseConstructor inhrerited class, optional(default=
KNNConstructor(5, sep_comp=False))
A constructor class to transform the tabular data into a
network. It can be set to None if a complex network is directly
passed to the ``fit`` method. Notice that you should use 'sep_com'
as False on the constructor.
"""
self.constructor = constructor
if y is None and G is None:
raise('Both y and G are None!')
if self.constructor is None and G is None:
raise('You either have to set the constructor or the network')
if y is not None and self.constructor is not None:
G = self.constructor.fit_transform(X, y)
elif y is None and G is not None:
y = np.array([node[1]['class'] for node in G.nodes(data=True)])
missing_elements = len(y[np.isnan(y)])
# Generate modularity matrix
Q = self._increment_modularity_matrix(G)
while missing_elements != 0:
propagated = False
while not propagated:
# Select the i and j of argmax
i, j = np.unravel_index(Q.argmax(), Q.shape)
Q[i][j] = -np.inf
Q[j][i] = -np.inf
if y[i] != y[j]:
if (~np.isnan(y[i])) and (~np.isnan(y[j])):
continue
if np.isnan(y[i]):
y[i] = y[j]
G.nodes[i]['class'] = y[i]
propagated = True
if np.isnan(y[j]):
y[j] = y[i]
G.nodes[j]['class'] = y[j]
propagated = True
else:
continue
missing_elements = len(y[np.isnan(y)])
self.generated_y_ = y
self.generated_G_ = G
return self
def get_propagated_labels(self):
"""
Return the labels list with the propagated classes
Returns
-------
generated_y_ : {ndarray, pandas series}, shape (n_samples, 1)
The label list
"""
return self.generated_y_
def get_propagated_network(self):
"""
Returns the generated network with the propagated labels
Returns
--------
generated_G_ : NetworkX Network
The constructed network on the fit of the model"""
return self.generated_G_
def _increment_modularity_matrix(self, G):
N = len(G.nodes)
E = len(G.edges)
k = [val for (node, val) in G.degree()]
Q = [[0 for i in range(N)] for j in range(N)]
for i in range(N):
for j in range(N):
if i not in G.neighbors(j):
Q[i][j] = 0
else:
Q[i][j] = (1/(2*E)) - (k[i]*k[j])/((2*E)**2)
return np.array(Q)
| [
"numpy.array",
"sknet.network_construction.KNNConstructor",
"numpy.isnan"
] | [((2279, 2312), 'sknet.network_construction.KNNConstructor', 'KNNConstructor', (['(5)'], {'sep_comp': '(False)'}), '(5, sep_comp=False)\n', (2293, 2312), False, 'from sknet.network_construction import KNNConstructor\n'), ((6195, 6206), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (6203, 6206), True, 'import numpy as np\n'), ((4152, 4163), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (4160, 4163), True, 'import numpy as np\n'), ((4688, 4702), 'numpy.isnan', 'np.isnan', (['y[i]'], {}), '(y[i])\n', (4696, 4702), True, 'import numpy as np\n'), ((4856, 4870), 'numpy.isnan', 'np.isnan', (['y[j]'], {}), '(y[j])\n', (4864, 4870), True, 'import numpy as np\n'), ((5090, 5101), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (5098, 5101), True, 'import numpy as np\n'), ((4593, 4607), 'numpy.isnan', 'np.isnan', (['y[i]'], {}), '(y[i])\n', (4601, 4607), True, 'import numpy as np\n'), ((4615, 4629), 'numpy.isnan', 'np.isnan', (['y[j]'], {}), '(y[j])\n', (4623, 4629), True, 'import numpy as np\n')] |
import numpy as np
from mlp_train import *
# Predict classes from weights
def output(inputs, weights, biases):
"""Get the output of a trained MLP for a given set of inputs."""
return forward_propagation(inputs, weights, biases)[-1]
def predict_single(output):
"""Convert MLP outputs into class predictions, assuming one
output neuron per class."""
return np.argmax(output, 1)
# Evaluate network predictions
def get_pred_err(predictions, labels):
"""Compute the prediction error frequency for a neural network."""
return 1 - np.mean(predictions == labels)
| [
"numpy.mean",
"numpy.argmax"
] | [((377, 397), 'numpy.argmax', 'np.argmax', (['output', '(1)'], {}), '(output, 1)\n', (386, 397), True, 'import numpy as np\n'), ((556, 586), 'numpy.mean', 'np.mean', (['(predictions == labels)'], {}), '(predictions == labels)\n', (563, 586), True, 'import numpy as np\n')] |
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import ABCMeta, abstractmethod, abstractclassmethod
from collections import OrderedDict
import json
import numpy as np
import os
TENSOR_CLASS = {}
def register(name):
global TENSOR_CLASS
def core(tensor_cls):
TENSOR_CLASS[name] = tensor_cls
return tensor_cls
return core
def _get_cls(name):
global TENSOR_CLASS
return TENSOR_CLASS[name]
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(
obj,
(
np.int_,
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
),
):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)):
return dict(__ndarray__=obj.tolist(), dtype=str(obj.dtype), shape=obj.shape)
return json.JSONEncoder.default(self, obj)
def json_numpy_obj_hook(dct):
if isinstance(dct, dict) and "__ndarray__" in dct:
data = np.asarray(dct["__ndarray__"], dtype=dct["dtype"])
return data.reshape(dct["shape"])
return dct
class Serializable:
""" Implementation to read/write to file.
All class the is inherited from this class needs to implement to_dict() and
from_dict()
"""
@abstractclassmethod
def from_dict(cls, dict_repr, *args, **kwargs):
""" Read the object from an ordered dictionary
:param dict_repr: the ordered dictionary that is used to construct the object
:type dict_repr: OrderedDict
:param args, kwargs: the arguments that need to be passed into from_dict()
:type args, kwargs: additional arguments
"""
pass
@abstractmethod
def to_dict(self):
""" Construct an ordered dictionary from the object
:rtype: OrderedDict
"""
pass
@classmethod
def from_file(cls, path, *args, **kwargs):
""" Read the object from a file (either .npy or .json)
:param path: path of the file
:type path: string
:param args, kwargs: the arguments that need to be passed into from_dict()
:type args, kwargs: additional arguments
"""
if path.endswith(".json"):
with open(path, "r") as f:
d = json.load(f, object_hook=json_numpy_obj_hook)
elif path.endswith(".npy"):
d = np.load(path, allow_pickle=True).item()
else:
assert False, "failed to load {} from {}".format(cls.__name__, path)
assert d["__name__"] == cls.__name__, "the file belongs to {}, not {}".format(
d["__name__"], cls.__name__
)
return cls.from_dict(d, *args, **kwargs)
def to_file(self, path: str) -> None:
""" Write the object to a file (either .npy or .json)
:param path: path of the file
:type path: string
"""
if os.path.dirname(path) != "" and not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
d = self.to_dict()
d["__name__"] = self.__class__.__name__
if path.endswith(".json"):
with open(path, "w") as f:
json.dump(d, f, cls=NumpyEncoder, indent=4)
elif path.endswith(".npy"):
np.save(path, d)
| [
"json.JSONEncoder.default",
"numpy.asarray",
"os.path.dirname",
"json.load",
"numpy.save",
"json.dump",
"numpy.load"
] | [((2719, 2754), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (2743, 2754), False, 'import json\n'), ((2857, 2907), 'numpy.asarray', 'np.asarray', (["dct['__ndarray__']"], {'dtype': "dct['dtype']"}), "(dct['__ndarray__'], dtype=dct['dtype'])\n", (2867, 2907), True, 'import numpy as np\n'), ((4150, 4195), 'json.load', 'json.load', (['f'], {'object_hook': 'json_numpy_obj_hook'}), '(f, object_hook=json_numpy_obj_hook)\n', (4159, 4195), False, 'import json\n'), ((4763, 4784), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (4778, 4784), False, 'import os\n'), ((4862, 4883), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (4877, 4883), False, 'import os\n'), ((5050, 5093), 'json.dump', 'json.dump', (['d', 'f'], {'cls': 'NumpyEncoder', 'indent': '(4)'}), '(d, f, cls=NumpyEncoder, indent=4)\n', (5059, 5093), False, 'import json\n'), ((5142, 5158), 'numpy.save', 'np.save', (['path', 'd'], {}), '(path, d)\n', (5149, 5158), True, 'import numpy as np\n'), ((4814, 4835), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (4829, 4835), False, 'import os\n'), ((4248, 4280), 'numpy.load', 'np.load', (['path'], {'allow_pickle': '(True)'}), '(path, allow_pickle=True)\n', (4255, 4280), True, 'import numpy as np\n')] |
# coding=utf-8
import json
import re
from typing import List
import pandas as pd
from collections import defaultdict
from dataclasses import dataclass
import numpy as np
from backend.integrations import database
def normalize_test_name(tests: np.ndarray):
"""
Normalize test names to match database.
- Replace / with + to support dashboard tests
- Discard return type
- Only keep namespace and method name
:param tests: list of test names
:return: array with test names normalized
"""
return map(
lambda test: ".".join(
re.search(r"(.*)::(.*)\(", test.replace("/", "+").split(" ")[1]).groups()
),
tests,
)
def normalize_iterative_test_name(test: str):
"""
Normalize iterative test name, if necessary
:param test: test name
:return: normalized test name
"""
if re.match(r"(.*\..+)\+.+", test):
return re.match(r"(.*\..+)\+.+", test).group(1)
return test
def get_historical_metric_map(query_results: pd.DataFrame) -> dict:
"""
Convert 2-columns query results to a dictionary mapping the test name to the historical metric value
:param query_results: 2-columns pandas dataframe with the query results
:return: dictionary mapping the test names to the historical metric values
"""
history_metric_map = defaultdict(int)
for test, time in query_results.values:
key = normalize_iterative_test_name(test)
history_metric_map[key] += time
return history_metric_map
@dataclass
class ProblemData:
original_matrix: np.ndarray
original_tests: np.ndarray
original_methods: np.ndarray
activity_matrix: np.ndarray
tests_index: np.ndarray
methods_index: np.ndarray
methods_map: dict
history_test_fails: dict
history_test_execution_times: dict
new_files: dict
branch: str
ignore_tests: list
swarm_size: int
def __init__(
self,
activity_matrix_path,
branch,
fails_start_date,
from_date,
to_date,
ignore_tests=None,
):
"""
ProblemData initialization.
- Load JSON data for an activity matrix file
- Filter tests with no activity (zero rows)
:param activity_matrix_path: path of the activity matrix JSON file
"""
if ignore_tests is None:
ignore_tests = []
self.branch = branch
self.ignore_tests = ignore_tests
self.load_json_data(activity_matrix_path)
self.filter_tests_with_no_activity()
# Load historical data
self.history_test_fails = get_historical_metric_map(
database.get_test_name_fails(fails_start_date, from_date)
)
self.history_test_execution_times = get_historical_metric_map(
database.get_test_execution_times(from_date, to_date)
)
self.new_files = {}
def load_json_data(self, activity_matrix):
"""
Loads JSON data for an activity matrix.
The loaded JSON data includes:
- The binary activity matrix itself
- The tests considered
- The methods considered
:param activity_matrix: path of the activity matrix JSON file
"""
print(f"Loading json data from {activity_matrix}")
# Find relative path and timestamp to load tests/methods maps
actm_pattern = r"(.*)\\actmatrix_(.*)\.json"
path, timestamp = re.search(actm_pattern, activity_matrix).groups()
# activity matrix
with open(activity_matrix) as actm_file:
self.activity_matrix = np.array(json.load(actm_file), dtype=bool)
self.original_matrix = self.activity_matrix
# tests
with open(f"{path}\\testids_{timestamp}.json") as tests_file:
tests = np.array(list(json.load(tests_file).values()))
self.tests_index = np.array(list(normalize_test_name(tests)))
self.original_tests = self.tests_index
# methods
with open(f"{path}\\methodids_{timestamp}.json") as methods_file:
self.methods_map = json.load(methods_file)
# print(f"methods map: {len(self.methods_map.keys())}")
self.methods_index = np.array(list(self.methods_map.values()))
self.original_methods = self.methods_index
def reset(self):
"""
Reset current activity matrix, tests and methods data to the originally loaded data.
"""
self.activity_matrix = self.original_matrix
self.tests_index = self.original_tests
self.methods_index = self.original_methods
def filter_tests_with_no_activity(self):
"""
Filter tests with no activity (zero rows).
"""
active_tests = ~np.all(self.activity_matrix == 0, axis=1)
self.tests_index = self.tests_index[active_tests]
self.activity_matrix = self.activity_matrix[active_tests]
def filter_methods_with_no_activity(self):
"""
Filter methods with no activity (zero columns)
"""
active_methods = ~np.all(self.activity_matrix == 0, axis=0)
self.methods_index = self.methods_index[active_methods]
self.activity_matrix = self.activity_matrix[:, active_methods]
def filter_data_for_commit(self, changed_methods):
"""
Filter matrix and indexes based on commit.
Also, the changed data is filtered for tests/methods with no activity
:param changed_methods: indexes of methods changed by the commit
"""
self.activity_matrix = self.activity_matrix[:, changed_methods]
self.methods_index = self.methods_index[changed_methods]
# Filter no activity tests/methods
self.filter_tests_with_no_activity()
self.filter_methods_with_no_activity()
def get_changed_indexes_for_changelist(
self, changelist: List[List], ignore_changes: List
) -> object:
"""
Get the changed method indexes in the activity matrix based on the changelist
:param changelist: list of changed files (each element is pair with the type of change and the filename)
:param ignore_changes: list of file paths to be ignored
:return: on success, returns a list of changed indexes in the activity matrix.
on failure, returns a string describing the error case
"""
# Filter changelist before processing
changelist = [
change
for change in changelist
if not any(
(ignore in change[1]) or (change[1] == "/platform/trunk")
for ignore in ignore_changes
)
]
# Process changelist
new_files = []
changed_files = []
cs_pattern = self.branch + r"/(.*)\.cs$"
xaml_cs_pattern = self.branch + r"/(.*)xaml\.cs"
for x in changelist:
if re.search(cs_pattern, x[1]):
# Check if it's not a *.xaml.cs file
if not re.search(xaml_cs_pattern, x[1]):
filename = re.search(cs_pattern, x[1]).group(1)
dot_filename = filename.replace("/", ".")
changed_files.append(dot_filename)
# Check if new file and store in hash table
if x[0] == "A":
self.new_files[dot_filename] = 123
new_files.append(dot_filename)
# Check if modified an already known new file
elif self.new_files.get(dot_filename) is not None:
new_files.append(dot_filename)
# Check if no .cs files were changed
if not changed_files:
return "[Error] Changelist contains no covered .cs files"
# Check if only changed new files
if len(changed_files) == len(new_files):
return "[Error] Changelist contains only new files or modified new files"
# Map files to method indexes
changed_indexes = []
for method in self.methods_map.values():
if any(changed in method for changed in changed_files):
matched_methods = np.where(self.methods_index == method)
changed_indexes.append(matched_methods[0][0])
# Check if there are no method indexes to return
if not changed_indexes:
return "[Error] The provided activity matrix has no coverage data for the changed files"
return changed_indexes
| [
"backend.integrations.database.get_test_execution_times",
"numpy.where",
"re.match",
"collections.defaultdict",
"json.load",
"backend.integrations.database.get_test_name_fails",
"numpy.all",
"re.search"
] | [((870, 902), 're.match', 're.match', (['"""(.*\\\\..+)\\\\+.+"""', 'test'], {}), "('(.*\\\\..+)\\\\+.+', test)\n", (878, 902), False, 'import re\n'), ((1347, 1363), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1358, 1363), False, 'from collections import defaultdict\n'), ((2666, 2723), 'backend.integrations.database.get_test_name_fails', 'database.get_test_name_fails', (['fails_start_date', 'from_date'], {}), '(fails_start_date, from_date)\n', (2694, 2723), False, 'from backend.integrations import database\n'), ((2817, 2870), 'backend.integrations.database.get_test_execution_times', 'database.get_test_execution_times', (['from_date', 'to_date'], {}), '(from_date, to_date)\n', (2850, 2870), False, 'from backend.integrations import database\n'), ((4132, 4155), 'json.load', 'json.load', (['methods_file'], {}), '(methods_file)\n', (4141, 4155), False, 'import json\n'), ((4793, 4834), 'numpy.all', 'np.all', (['(self.activity_matrix == 0)'], {'axis': '(1)'}), '(self.activity_matrix == 0, axis=1)\n', (4799, 4834), True, 'import numpy as np\n'), ((5116, 5157), 'numpy.all', 'np.all', (['(self.activity_matrix == 0)'], {'axis': '(0)'}), '(self.activity_matrix == 0, axis=0)\n', (5122, 5157), True, 'import numpy as np\n'), ((6939, 6966), 're.search', 're.search', (['cs_pattern', 'x[1]'], {}), '(cs_pattern, x[1])\n', (6948, 6966), False, 'import re\n'), ((918, 950), 're.match', 're.match', (['"""(.*\\\\..+)\\\\+.+"""', 'test'], {}), "('(.*\\\\..+)\\\\+.+', test)\n", (926, 950), False, 'import re\n'), ((3469, 3509), 're.search', 're.search', (['actm_pattern', 'activity_matrix'], {}), '(actm_pattern, activity_matrix)\n', (3478, 3509), False, 'import re\n'), ((3639, 3659), 'json.load', 'json.load', (['actm_file'], {}), '(actm_file)\n', (3648, 3659), False, 'import json\n'), ((8212, 8250), 'numpy.where', 'np.where', (['(self.methods_index == method)'], {}), '(self.methods_index == method)\n', (8220, 8250), True, 'import numpy as np\n'), ((7044, 7076), 're.search', 're.search', (['xaml_cs_pattern', 'x[1]'], {}), '(xaml_cs_pattern, x[1])\n', (7053, 7076), False, 'import re\n'), ((3850, 3871), 'json.load', 'json.load', (['tests_file'], {}), '(tests_file)\n', (3859, 3871), False, 'import json\n'), ((7109, 7136), 're.search', 're.search', (['cs_pattern', 'x[1]'], {}), '(cs_pattern, x[1])\n', (7118, 7136), False, 'import re\n')] |
import numpy as np
import pygame as pg
class Tiles(object):
def __init__(self, size):
"""
:param size: How many tiles wide and high
"""
self.size = size
self.screen_rect = pg.display.get_surface().get_rect()
self.screen_width = self.screen_rect.width
self.screen_height = self.screen_rect.height
# shorter side is used as maximum size
self.tile_size = min(self.screen_width, self.screen_height) // self.size
x_offset, y_offset = self.calculate_offsets()
self.tile_rects = self.calculate_rects(x_offset, y_offset)
self.bright_green = pg.Color("GreenYellow")
self.dark_green = pg.Color("LawnGreen")
def calculate_offsets(self):
tiles_size = self.size * self.tile_size
x_offset = (self.screen_width - tiles_size) / 2
y_offset = (self.screen_height - tiles_size) / 2
return x_offset, y_offset
def calculate_rects(self, x_offset, y_offset):
tile_rects = np.empty((self.size, self.size), pg.Rect)
for i in range(self.size):
for j in range(self.size):
tile_rects[i][j] = pg.Rect(x_offset + i * self.tile_size, y_offset + j * self.tile_size,
self.tile_size, self.tile_size)
return tile_rects
def draw(self, surface):
for i in range(self.size):
for j in range(self.size):
if (j + i) % 2 == 0:
pg.draw.rect(surface, self.bright_green, self.tile_rects[i][j])
else:
pg.draw.rect(surface, self.dark_green, self.tile_rects[i][j])
def update_window_size(self, width, height):
self.screen_width = width
self.screen_height = height
self.tile_size = min(self.screen_width, self.screen_height) // self.size
x_offset, y_offset = self.calculate_offsets()
self.tile_rects = self.calculate_rects(x_offset, y_offset)
def get_rectangle(self, coordinate) -> pg.Rect:
return self.tile_rects[coordinate.x][coordinate.y]
| [
"pygame.display.get_surface",
"pygame.draw.rect",
"numpy.empty",
"pygame.Color",
"pygame.Rect"
] | [((639, 662), 'pygame.Color', 'pg.Color', (['"""GreenYellow"""'], {}), "('GreenYellow')\n", (647, 662), True, 'import pygame as pg\n'), ((689, 710), 'pygame.Color', 'pg.Color', (['"""LawnGreen"""'], {}), "('LawnGreen')\n", (697, 710), True, 'import pygame as pg\n'), ((1015, 1056), 'numpy.empty', 'np.empty', (['(self.size, self.size)', 'pg.Rect'], {}), '((self.size, self.size), pg.Rect)\n', (1023, 1056), True, 'import numpy as np\n'), ((219, 243), 'pygame.display.get_surface', 'pg.display.get_surface', ([], {}), '()\n', (241, 243), True, 'import pygame as pg\n'), ((1167, 1273), 'pygame.Rect', 'pg.Rect', (['(x_offset + i * self.tile_size)', '(y_offset + j * self.tile_size)', 'self.tile_size', 'self.tile_size'], {}), '(x_offset + i * self.tile_size, y_offset + j * self.tile_size, self.\n tile_size, self.tile_size)\n', (1174, 1273), True, 'import pygame as pg\n'), ((1499, 1562), 'pygame.draw.rect', 'pg.draw.rect', (['surface', 'self.bright_green', 'self.tile_rects[i][j]'], {}), '(surface, self.bright_green, self.tile_rects[i][j])\n', (1511, 1562), True, 'import pygame as pg\n'), ((1605, 1666), 'pygame.draw.rect', 'pg.draw.rect', (['surface', 'self.dark_green', 'self.tile_rects[i][j]'], {}), '(surface, self.dark_green, self.tile_rects[i][j])\n', (1617, 1666), True, 'import pygame as pg\n')] |
import pandas as pd
import numpy as np
import netCDF4 as nc
from .subroutines import *
class SpecificDoses(pd.DataFrame):
"""A class for specific dose estimates akin to dosimetry measurements
High resolution data allows for personal and ambient dose estimation without the need for
direct measurement. This class is structured like a table with a set of functions to add
columns ultimately leading to dose estimates. Each row of this table represents a specific
exposure instance, i.e. an individual at a specific location for a specific date and time
with a specific exposure ratio. See Harris et al. 2021
(https://doi.org/10.3390/atmos12020268) for more information on calculations appropriate
for this class.
Parameters
----------
src_filename_format : str
Describes the filename of the netCDF files containing the UV data with 'yyyy' in place
of the year.
src_directory : str
The directory where the data is stored. Must end with a slash.
Notes
-----
Presently, the class is inherited from a pandas.DataFrame which is somewhat restrictive
and will likely be revised in a later update. For the time being, this means that the
parameters cannot be set when initialising a `SpecificDoses` object, they must instead
be adjusted after initialisation, like so::
ExistingExposureMapObject.src_directory = "/new/data/directory/"
Example
-------
In this example, we illustrate the process for calculating the doses in Harris et al. 2021
(https://doi.org/10.3390/atmos12020268) from the spreadsheet supplied as supplementary
data (https://www.mdpi.com/2073-4433/12/2/268/s1). Note that results will differ as the
spreadsheet contains only local Swiss time and not UTC time. There are four important
functions as part of this class, three for standardising and preparing the columns,
and one for actually loading the data and performing the dose calculations. See below::
import python_tamer as pt
import pandas as pd
example = pt.SpecificDoses(pd.read_excel(r'atmosphere-12-00268-s001.xlsx',
header=2,index_col=0,usecols="B:K"))
example.src_directory = 'C:/enter_the_directory_of_your_dataset_here'
example = example.standard_column_names()
example = example.schedule_constant_exposure().ER_from_posture()
example = example.calculate_specific_dose()
"""
# This property ensures that functions return the same subclass
@property
def _constructor(self):
return SpecificDoses
# This adds some useful metadata (self-explanatory)
_metadata = ["src_filename_format","src_directory"]
src_filename_format = 'UVery.AS_ch02.lonlat_yyyy01010000.nc'
src_directory = 'C:/Data/UV/' # TODO: set up __init__ for these options
# It feels like this should be declared with __init__ as well but idk
def standard_column_names(self) :
"""Limited function to standardise column names
When loading tables to use as the basis for a SpecificDoses table, some columns may have
slightly names to what is expected. This function standardises the names but is very
limited in terms of what it can recognise. The user is encourages to ensure the columns
are correctly labelled themselves and not to rely on this function.
Returns
-------
SpecificDoses
The table has its column names modified.
"""
legend_dict_reverse = {'Point' : ['Lieu de mesure'],
'Date' : ['Date'],
'Time_start' : ['Heure début','Start_time','Start time'],
'Time_end' : ['Heure fin','End_time','End time'],
'Measured_dose' : ['Exposition [MED]','Exposure'],
'Anatomic_zone' : ['Zone anatomique','Body part','Anatomic zone'],
'Posture' : ['Body posture'],
'Latitude' : ['lat'],
'Longitude' : ['lon','lng']}
legend_dict = {keys: old_keys for old_keys, old_values in legend_dict_reverse.items() for keys in old_values}
self = self.rename(columns=legend_dict)
return self
def schedule_constant_exposure(self) :
"""Generates exposure schedules given start and end times.
This function generates exposure schedules based on simple continuous exposure, i.e.
with a start time and an end time. The exposure schedule is a vector with length 24
with each entry representing the proportion of the corresponding hour of the day that
the subject is exposed.
Returns
-------
python_tamer.SpecificDoses
An exposure_schedule column is created and is appended to the input
`SpecificDoses` object or, if that column already exists, it is overwritten.
Notes
-----
The input `SpecificDoses` object must contain the following columns:
* ``Time_start``
* ``Time_end``
Example
-------
In this example, we illustrate the process for calculating the doses in Harris et al. 2021
(https://doi.org/10.3390/atmos12020268) from the spreadsheet supplied as supplementary
data (https://www.mdpi.com/2073-4433/12/2/268/s1). Note that results will differ as the
spreadsheet contains only local Swiss time and not UTC time. There are four important
functions as part of this class, three for standardising and preparing the columns,
and one for actually loading the data and performing the dose calculations. See below::
import python_tamer as pt
import pandas as pd
example = pt.SpecificDoses(pd.read_excel(r'atmosphere-12-00268-s001.xlsx',
header=2,index_col=0,usecols="B:K"))
example.src_directory = 'C:/enter_the_directory_of_your_dataset_here'
example = example.standard_column_names()
example = example.schedule_constant_exposure().ER_from_posture()
example = example.calculate_specific_dose()
"""
def schedule_constant_exposure_iter(Start_time,End_time) :
"""Iterates through rows of a SpecificDoses table to generate schedules.
This function is designed to be applied to each row in a datatable to generate an
exposure schedule based on a start time and end time
Parameters
----------
Start_time : datetime.time
UTC time at which exposure period begins
End_time : datetime.time
UTC time at which exposure period end
Returns
-------
numpy.array
24 length vector of values between 0 and 1 indicating proportion
of time exposed for that corresponding hour of the day.
"""
schedule = np.zeros(24)
schedule[Start_time.hour:End_time.hour] = 1
# Modify start and end hours according to proportion of time exposed
if Start_time.minute != 0 :
schedule[Start_time.hour] = (1 - Start_time.minute/60)
if End_time.minute != 0 :
schedule[End_time.hour] = End_time.minute/60
return schedule
# With that function defined, we need just one line to apply it to the whole table
self["Schedule"] = self.apply(lambda x: schedule_constant_exposure_iter(
x["Time_start"],x["Time_end"]),axis='columns')
return self
def ER_from_posture(self,
Vis_table_path=None,
Vis_table=None) :
"""ER_from_posture calculates Exposure Ratios for a given anatomic zone, posture, and date.
This function calculates ER as a percentage between 0 and 100 based on information from an input table.
The input table must contain certain columns at a minimum. Those are: Date, Anatomic_zone, and Posture.
This function contains hard-coded synonyms for certain anatomical zones, e.g. 'Forehead" maps to "Face'.
See Vernez et al., Journal of Exposure Science and Environmental Epidemiology (2015) 25, 113–118
(https://doi.org/10.1038/jes.2014.6) for further details on the model used for the calculation.
Parameters
----------
Vis_table_path : str, optional
The full path to an alternative table for the Vis parameter.
Must be a csv file. Defaults to None.
Vis_table : str, optional
An alternative table for the Vis parameter. Defaults to None.
Returns
-------
SpecificDoses
Returns input table appended with ER column
Notes
-----
The SpecificDoses table used must contain columns for Date, Anatomic_zone, and Posture.
The Date column should contain DateTime entries. The Anatonic_zone column should contain one string per
row describing the exposed body part. The Posture column should contain one string per row describing
one of six accepted postures.
Example
-------
In this example, we illustrate the process for calculating the doses in Harris et al. 2021
(https://doi.org/10.3390/atmos12020268) from the spreadsheet supplied as supplementary
data (https://www.mdpi.com/2073-4433/12/2/268/s1). Note that results will differ as the
spreadsheet contains only local Swiss time and not UTC time. There are four important
functions as part of this class, three for standardising and preparing the columns,
and one for actually loading the data and performing the dose calculations. See below::
import python_tamer as pt
import pandas as pd
example = pt.SpecificDoses(pd.read_excel(r'atmosphere-12-00268-s001.xlsx',
header=2,index_col=0,usecols="B:K"))
example.src_directory = 'C:/enter_the_directory_of_your_dataset_here'
example = example.standard_column_names()
example = example.schedule_constant_exposure().ER_from_posture()
example = example.calculate_specific_dose()
"""
# This chunk of code checks if the default Vis table should be used or if the user enters some alternative table.
if Vis_table is None and Vis_table_path is None :
Vis_table = pd.DataFrame.from_records(
columns=['Seated','Kneeling','Standing erect arms down','Standing erect arms up','Standing bowing'],
index=['Face','Skull','Forearm','Upper arm','Neck','Top of shoulders','Belly','Upper back','Hand','Shoulder','Upper leg','Lower leg','Lower back'],
data=[[53.7,28.7,46.6,44.9,19.2],
[56.2,66.6,61.1,58.4,67.5],
[62.3,56.5,49.4,53.1,62.1],
[51.7,60.5,45.9,65.3,61.6],
[58.3,84.3,67.6,65.2,81.6],
[35.9,50.3,48.6,45.7,85.3],
[58.1,45.1,50.3,49.6,15.2],
[35.9,50.3,48.6,45.7,85.3],
[59.2,58.8,42.4,55,58.5],
[68,62,63,67.1,64],
[65.4,45.4,50.9,51,43.5],
[32.8,63.4,49.7,50.3,50],
[44.9,51.6,56.6,53.4,86.9]])
# The 'standing moving' posture must be dealt with somehow...
# Vis_table['Standing moving']= (Vis_table['Standing erect arms down'] + Vis_table['Standing bowing']) / 2
# TODO: add interpeter or force users to conform?
Vis_table['Standing moving']= Vis_table['Standing erect arms down']
elif Vis_table is None :
Vis_table = pd.read_csv(Vis_table_path)
# Below is a dictionary describing a range of synonyms for the anatomical zones defined in the Vis table.
Anatomic_zone_synonyms_reverse = {
'Forearm' : ['wrist',
'Left extern radial',
'Right extern radial',
'Left wrist: radius head',
'Right wrist: radius head',
'Left wrist',
'Right wrist'],
'Face' : ['Forehead'],
'Upper back' : ['Right trapezoid',
'Left trapezoid',
'trapezius'],
'Belly' : ['Chest'],
'Shoulder' : ['Left deltoid',
'Right deltoid',
'Left shoulder',
'Right shoulder'],
'Upper arm' : ['Left elbow',
'Right elbow',
'Left biceps',
'Right biceps'],
'Upper leg' : ['Left thigh',
'Right thigh',
'Left knee',
'Right knee'],
'Lower back' : ['Low back']
}
# The dictionary is reversed so that the multiple synonyms can be mapped to the few correct terms for the Vis table.
Anatomic_zone_synonyms = {keys: old_keys for old_keys, old_values in Anatomic_zone_synonyms_reverse.items() for keys in old_values}
self = self.replace({'Anatomic_zone' : Anatomic_zone_synonyms})
# With the correct anatomic zone names established, we can lookup the Vis values from the table
# TODO: lookup is being depreciated, must replace with something new
Vis = Vis_table.lookup(self['Anatomic_zone'],self['Posture'])
# Next we must calculate the minimal Solar Zenith Angle for the given date
mSZA = min_solar_zenith_angle(self.Date,self.Latitude)
# With the Vis value and the SZA, we can calculate the ER according to the Vernez model
self.loc[:,'ER'] = ER_Vernez_model_equation(Vis,mSZA) / 100
return self
def calculate_specific_dose(self) :
"""Calculates doses according to exposure schedule, ER, date, and location.
This function takes the SpecificDoseEstimationTable and calculates the specific
ambient and personal doses according to the exposure schedule and ER. There are
a few key steps to this function. First it reads the Date column to determine
which years of data must be loaded. It then iterates through each year, loading
only the necessary dates. It applies the exposure schedule and the ER to
calculate the ambient and personal doses.
Returns
-------
SpecificDoses
The input table is appended with a Ambient_dose and Personal_dose column.
Notes
-----
The input SpecificDoses object must include Date, Schedule, ER, Latitude,
and Longitude columns.
Consult Harris et al. 2021 (https://doi.org/10.3390/atmos12020268) for more
information on how this function can be used in the context of mimicking UV
dosimetry measurements.
Example
-------
In this example, we illustrate the process for calculating the doses in Harris et al. 2021
(https://doi.org/10.3390/atmos12020268) from the spreadsheet supplied as supplementary
data (https://www.mdpi.com/2073-4433/12/2/268/s1). Note that results will differ as the
spreadsheet contains only local Swiss time and not UTC time. There are four important
functions as part of this class, three for standardising and preparing the columns,
and one for actually loading the data and performing the dose calculations. See below::
import python_tamer as pt
import pandas as pd
example = pt.SpecificDoses(pd.read_excel(r'atmosphere-12-00268-s001.xlsx',
header=2,index_col=0,usecols="B:K"))
example.src_directory = 'C:/enter_the_directory_of_your_dataset_here'
example = example.standard_column_names()
example = example.schedule_constant_exposure().ER_from_posture()
example = example.calculate_specific_dose()
"""
# First step is find unique years to avoid loading unnecessary data
years = pd.DatetimeIndex(self.Date).year
unique_years = sorted(set(years))
self['Ambient_dose'] = np.nan
self['Personal_dose'] = np.nan
for year in unique_years :
# Load netCDF file
print("Processing year "+str(year))
dataset=nc.Dataset(self.src_directory+self.src_filename_format.replace('yyyy',str(year)))
dataset.set_auto_mask(False) # This is important for nans to import correctly
# Make temporary table for yearly subset
temp_table = self[years == year].copy()
# find all unique days in year to be loaded
unique_days,unique_days_idx = np.unique(pd.DatetimeIndex(temp_table.Date).dayofyear,
return_inverse=True)
temp_table['unique_days_idx'] = unique_days_idx
#pd.DatetimeIndex(nc.num2date(dataset.variables["time"][:],dataset.variables["time"].units,only_use_cftime_datetimes=False))
if dataset.dimensions['time'].size == 24 :
# needed if just a single day
time_subset = [True for i in range(dataset.dimensions['time'].size)]
else :
# Next we pull a subset from the netCDF file
# declare false array with same length of time dimension from netCDF
time_subset = [False for i in range(dataset.dimensions['time'].size)]
# reshape false array to have first dimension 24 (hours in day)
time_subset = assert_data_shape_24(time_subset)
# set the appropriate days as true
time_subset[:,unique_days-1] = True
# flatten time_subset array back to one dimension
time_subset = time_subset.flatten(order='F')
data = assert_data_shape_24(dataset['UV_AS'][time_subset,:,:])
# TODO: improve comprehension of raw data units rather than assuming
# convert lat lon into pixel coordinates
# TODO: consider is necessary to load entire maps for just a few required pixels
lat = dataset['lat'][:]
lon = dataset['lon'][:]
temp_table['pixel_lat'] = temp_table.apply(lambda x:
find_nearest(lat,x['Latitude']),axis='columns')
temp_table['pixel_lon'] = temp_table.apply(lambda x:
find_nearest(lon,x['Longitude']),axis='columns')
# calculate doses
temp_table['Ambient_dose'] = temp_table.apply(lambda x:
np.sum(data[:,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] *
x['Schedule']),axis='columns')
temp_table['Personal_dose'] = temp_table.apply(lambda x:
np.sum(data[:,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] *
(x['Schedule'] * x['ER'])),axis='columns')
# extra step necessary to ensure correct assignment
self.loc[temp_table.index,'Ambient_dose'] = temp_table['Ambient_dose'].values
self.loc[temp_table.index,'Personal_dose'] = temp_table['Personal_dose'].values
# TODO: improve units options here
self['Ambient_dose'] = self['Ambient_dose']/40*3600/100 # SED
self['Personal_dose'] = self['Personal_dose']/40*3600/100 # SED
return self
def analyse_variable(self,
variable="UV_AS",
statistic="Mean",
src_filename_format=None,
src_directory=None) :
"""Basic calculations for specific exposure instances
This function is for calculating information other than ambient and personal
doses that corresponds to specific exposure instances.
Parameters
----------
variable : str, optional
The name of the variable to be analysed. This informs what data should be
pulled from the source netCDF files. This also informs the name of the column(s)
that will be created by this function. Defaults to "UV_AS", i.e. the All-Sky
UV data that is used in the calculate_specific_dose function.
statistic : str or list, optional
The statistic to be calculated, options include: mean, median, stdev, variance,
min, max, weighted_mean, and sum. Not case sensitive. Can be a single string or
a list of strings whereby multiple columns will be calculated. Defaults to "Mean".
src_filename_format : str, optional
Allows the user to select different source data. This may be useful in cases where
the user wants to compare doses calculated with one dataset to (say) cloud cover
from another dataset. Defaults to None, where the function uses the source files
specified by the object's metadata.
src_directory : str, optional
Allows the user to select different source data. This may be useful in cases where
the user wants to compare doses calculated with one dataset to (say) cloud cover
from another dataset. Defaults to None, where the function uses the source files
specified by the object's metadata.
Returns
-------
SpecificDoses
The table is appended with new columns named [variable]_[statistic].
Example
-------
In this example, we illustrate the process for calculating the doses in Harris et al. 2021
(https://doi.org/10.3390/atmos12020268) from the spreadsheet supplied as supplementary
data (https://www.mdpi.com/2073-4433/12/2/268/s1). Note that results will differ as the
spreadsheet contains only local Swiss time and not UTC time. Additionally, to demonstrate
the analyse_variable function, we also calculate the weighted mean CMF assuming it to be
an additional variable in the source data files. See below::
import python_tamer as pt
import pandas as pd
example = pt.SpecificDoses(pd.read_excel(r'atmosphere-12-00268-s001.xlsx',
header=2,index_col=0,usecols="B:K"))
example.src_directory = 'C:/enter_the_directory_of_your_dataset_here'
example = example.standard_column_names()
example = example.schedule_constant_exposure().ER_from_posture()
example = example.calculate_specific_dose()
example = example.analyse_variable(variable="CMF",statistic="weighted_mean")
"""
# users have option to load different files, otherwise defaults to metadata
if src_filename_format is None :
src_filename_format = self.src_filename_format
if src_directory is None :
src_directory = self.src_directory
# First step is find unique years to avoid loading unnecessary data
years = pd.DatetimeIndex(self.Date).year
unique_years = sorted(set(years))
if isinstance(statistic,str) :
self[variable+"_"+statistic] = np.nan
# convert to list to simplify code later
statistic = [statistic]
elif isinstance(statistic,list) :
for x in statistic :
self[variable+"_"+x]=np.nan
else :
raise TypeError("statistic input must be str or list of str")
for year in unique_years :
# Load netCDF file
print("Processing year "+str(year))
dataset=nc.Dataset(src_directory+src_filename_format.replace('yyyy',str(year)))
dataset.set_auto_mask(False) # This is important for nans to import correctly
# Make temporary table for yearly subset
temp_table = self[years == year].copy()
# find all unique days in year to be loaded
unique_days,unique_days_idx = np.unique(pd.DatetimeIndex(temp_table.Date).dayofyear,
return_inverse=True)
temp_table['unique_days_idx'] = unique_days_idx
#pd.DatetimeIndex(nc.num2date(dataset.variables["time"][:],dataset.variables["time"].units,only_use_cftime_datetimes=False))
if dataset.dimensions['time'].size == 24 :
# needed if just a single day
time_subset = [True for i in range(dataset.dimensions['time'].size)]
else :
# Next we pull a subset from the netCDF file
# declare false array with same length of time dimension from netCDF
time_subset = [False for i in range(dataset.dimensions['time'].size)]
# reshape false array to have first dimension 24 (hours in day)
time_subset = assert_data_shape_24(time_subset)
# set the appropriate days as true
time_subset[:,unique_days-1] = True
# flatten time_subset array back to one dimension
time_subset = time_subset.flatten(order='F')
data = assert_data_shape_24(dataset[variable][time_subset,:,:])
# TODO: improve comprehension of raw data units rather than assuming
# convert lat lon into pixel coordinates
# TODO: consider is necessary to load entire maps for just a few required pixels
lat = dataset['lat'][:]
lon = dataset['lon'][:]
temp_table['pixel_lat'] = temp_table.apply(lambda x:
find_nearest(lat,x['Latitude']),axis='columns')
temp_table['pixel_lon'] = temp_table.apply(lambda x:
find_nearest(lon,x['Longitude']),axis='columns')
# calculate
for stat in statistic :
# mean
if stat.lower() in ["mean",'average','avg'] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.mean(data[x['Schedule']!=0,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] ),axis='columns')
# median
elif stat.lower() in ["median","med"] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.median(data[x['Schedule']!=0,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] ),axis='columns')
# stdev
elif stat.lower() in ["std","sd","stdev"] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.std(data[x['Schedule']!=0,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] ),axis='columns')
# variance
elif stat.lower() in ["var","variance"] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.var(data[x['Schedule']!=0,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] ),axis='columns')
# minimum
elif stat.lower() in ["min",'minimum'] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.amin(data[x['Schedule']!=0,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] ),axis='columns')
# maximum
elif stat.lower() in ["max","maximum"] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.amax(data[x['Schedule']!=0,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] ),axis='columns')
# weighted mean
elif stat.lower() in ["weighted_mean","weighted_average","mean_weighted","average_weighted","avg_weighted"] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.average(data[:,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']],weights=x['Schedule']),axis='columns')
# sum
elif stat.lower() in ["sum","total"] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.sum(data[x['Schedule']!=0,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] ),axis='columns')
# extra step necessary to ensure correct assignment
self.loc[temp_table.index,variable+"_"+stat] = temp_table[variable+"_"+stat].values
return self
| [
"pandas.DataFrame.from_records",
"numpy.mean",
"numpy.median",
"numpy.amin",
"pandas.read_csv",
"numpy.average",
"pandas.DatetimeIndex",
"numpy.sum",
"numpy.zeros",
"numpy.std",
"numpy.amax",
"numpy.var"
] | [((7021, 7033), 'numpy.zeros', 'np.zeros', (['(24)'], {}), '(24)\n', (7029, 7033), True, 'import numpy as np\n'), ((10556, 11298), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', ([], {'columns': "['Seated', 'Kneeling', 'Standing erect arms down', 'Standing erect arms up',\n 'Standing bowing']", 'index': "['Face', 'Skull', 'Forearm', 'Upper arm', 'Neck', 'Top of shoulders',\n 'Belly', 'Upper back', 'Hand', 'Shoulder', 'Upper leg', 'Lower leg',\n 'Lower back']", 'data': '[[53.7, 28.7, 46.6, 44.9, 19.2], [56.2, 66.6, 61.1, 58.4, 67.5], [62.3, \n 56.5, 49.4, 53.1, 62.1], [51.7, 60.5, 45.9, 65.3, 61.6], [58.3, 84.3, \n 67.6, 65.2, 81.6], [35.9, 50.3, 48.6, 45.7, 85.3], [58.1, 45.1, 50.3, \n 49.6, 15.2], [35.9, 50.3, 48.6, 45.7, 85.3], [59.2, 58.8, 42.4, 55, \n 58.5], [68, 62, 63, 67.1, 64], [65.4, 45.4, 50.9, 51, 43.5], [32.8, \n 63.4, 49.7, 50.3, 50], [44.9, 51.6, 56.6, 53.4, 86.9]]'}), "(columns=['Seated', 'Kneeling',\n 'Standing erect arms down', 'Standing erect arms up', 'Standing bowing'\n ], index=['Face', 'Skull', 'Forearm', 'Upper arm', 'Neck',\n 'Top of shoulders', 'Belly', 'Upper back', 'Hand', 'Shoulder',\n 'Upper leg', 'Lower leg', 'Lower back'], data=[[53.7, 28.7, 46.6, 44.9,\n 19.2], [56.2, 66.6, 61.1, 58.4, 67.5], [62.3, 56.5, 49.4, 53.1, 62.1],\n [51.7, 60.5, 45.9, 65.3, 61.6], [58.3, 84.3, 67.6, 65.2, 81.6], [35.9, \n 50.3, 48.6, 45.7, 85.3], [58.1, 45.1, 50.3, 49.6, 15.2], [35.9, 50.3, \n 48.6, 45.7, 85.3], [59.2, 58.8, 42.4, 55, 58.5], [68, 62, 63, 67.1, 64],\n [65.4, 45.4, 50.9, 51, 43.5], [32.8, 63.4, 49.7, 50.3, 50], [44.9, 51.6,\n 56.6, 53.4, 86.9]])\n", (10581, 11298), True, 'import pandas as pd\n'), ((16391, 16418), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['self.Date'], {}), '(self.Date)\n', (16407, 16418), True, 'import pandas as pd\n'), ((23264, 23291), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['self.Date'], {}), '(self.Date)\n', (23280, 23291), True, 'import pandas as pd\n'), ((11870, 11897), 'pandas.read_csv', 'pd.read_csv', (['Vis_table_path'], {}), '(Vis_table_path)\n', (11881, 11897), True, 'import pandas as pd\n'), ((17069, 17102), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['temp_table.Date'], {}), '(temp_table.Date)\n', (17085, 17102), True, 'import pandas as pd\n'), ((18931, 19021), 'numpy.sum', 'np.sum', (["(data[:, x['unique_days_idx'], x['pixel_lat'], x['pixel_lon']] * x['Schedule'])"], {}), "(data[:, x['unique_days_idx'], x['pixel_lat'], x['pixel_lon']] * x[\n 'Schedule'])\n", (18937, 19021), True, 'import numpy as np\n'), ((19137, 19239), 'numpy.sum', 'np.sum', (["(data[:, x['unique_days_idx'], x['pixel_lat'], x['pixel_lon']] * (x[\n 'Schedule'] * x['ER']))"], {}), "(data[:, x['unique_days_idx'], x['pixel_lat'], x['pixel_lon']] * (x[\n 'Schedule'] * x['ER']))\n", (19143, 19239), True, 'import numpy as np\n'), ((24240, 24273), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['temp_table.Date'], {}), '(temp_table.Date)\n', (24256, 24273), True, 'import pandas as pd\n'), ((26238, 26330), 'numpy.mean', 'np.mean', (["data[x['Schedule'] != 0, x['unique_days_idx'], x['pixel_lat'], x['pixel_lon']]"], {}), "(data[x['Schedule'] != 0, x['unique_days_idx'], x['pixel_lat'], x[\n 'pixel_lon']])\n", (26245, 26330), True, 'import numpy as np\n'), ((26523, 26617), 'numpy.median', 'np.median', (["data[x['Schedule'] != 0, x['unique_days_idx'], x['pixel_lat'], x['pixel_lon']]"], {}), "(data[x['Schedule'] != 0, x['unique_days_idx'], x['pixel_lat'], x[\n 'pixel_lon']])\n", (26532, 26617), True, 'import numpy as np\n'), ((26813, 26904), 'numpy.std', 'np.std', (["data[x['Schedule'] != 0, x['unique_days_idx'], x['pixel_lat'], x['pixel_lon']]"], {}), "(data[x['Schedule'] != 0, x['unique_days_idx'], x['pixel_lat'], x[\n 'pixel_lon']])\n", (26819, 26904), True, 'import numpy as np\n'), ((27101, 27192), 'numpy.var', 'np.var', (["data[x['Schedule'] != 0, x['unique_days_idx'], x['pixel_lat'], x['pixel_lon']]"], {}), "(data[x['Schedule'] != 0, x['unique_days_idx'], x['pixel_lat'], x[\n 'pixel_lon']])\n", (27107, 27192), True, 'import numpy as np\n'), ((27387, 27479), 'numpy.amin', 'np.amin', (["data[x['Schedule'] != 0, x['unique_days_idx'], x['pixel_lat'], x['pixel_lon']]"], {}), "(data[x['Schedule'] != 0, x['unique_days_idx'], x['pixel_lat'], x[\n 'pixel_lon']])\n", (27394, 27479), True, 'import numpy as np\n'), ((27674, 27766), 'numpy.amax', 'np.amax', (["data[x['Schedule'] != 0, x['unique_days_idx'], x['pixel_lat'], x['pixel_lon']]"], {}), "(data[x['Schedule'] != 0, x['unique_days_idx'], x['pixel_lat'], x[\n 'pixel_lon']])\n", (27681, 27766), True, 'import numpy as np\n'), ((28036, 28136), 'numpy.average', 'np.average', (["data[:, x['unique_days_idx'], x['pixel_lat'], x['pixel_lon']]"], {'weights': "x['Schedule']"}), "(data[:, x['unique_days_idx'], x['pixel_lat'], x['pixel_lon']],\n weights=x['Schedule'])\n", (28046, 28136), True, 'import numpy as np\n'), ((28326, 28417), 'numpy.sum', 'np.sum', (["data[x['Schedule'] != 0, x['unique_days_idx'], x['pixel_lat'], x['pixel_lon']]"], {}), "(data[x['Schedule'] != 0, x['unique_days_idx'], x['pixel_lat'], x[\n 'pixel_lon']])\n", (28332, 28417), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.preprocessing import MinMaxScaler as mms
import ONN_Simulation_Class as ONN_Cls
from plot_scatter_matrix import plot_scatter_matrix
import ONN_Setups
import training_onn as train
import test_trained_onns as test
import create_datasets
from sklearn import preprocessing
import sys
sys.path.append('../')
import neuroptica as neu
onn = ONN_Cls.ONN_Simulation() # Required for containing training/simulation information
onn.topo = 'ONN'
# onn.FOLDER = '/home/edwar/Documents/Github_Projects/neuroptica/tests/Analysis/iris_augment/4x3'
onn.FOLDER = '/home/edwar/Documents/Github_Projects/neuroptica/tests/Analysis/iris_augment/4x3_test'
onn = onn.pickle_load()
model = onn.model
model.set_all_phases_uncerts_losses(Phases=onn.phases)
yhat = model.forward_pass(onn.Xt.T)
cls = np.array([np.argmax(yhat) for yhat in yhat.T])
gt = np.array([np.argmax(tru) for tru in onn.yt])
# print(onn.model.get_all_phases())
# print(cls)
# print(gt)
print(f'Accuracy = {sum(gt == cls)/len(onn.Xt)*100}%')
onn.loss_diff = 0 # Set loss_diff
# For simulation purposes, defines range of loss and phase uncert
onn.loss_dB = np.linspace(0, 2, 3) # set loss/MZI range
onn.phase_uncert_theta = np.linspace(0., 1, 3) # set theta phase uncert range
onn.phase_uncert_phi = np.linspace(0., 1, 3) # set phi phase uncert range
# onn, model = test.test_PT(onn, onn.Xt, onn.yt, model, show_progress=True) # test Phi Theta phase uncertainty accurracy
# onn, model = test.test_LPU(onn, onn.Xt, onn.yt, model, show_progress=True) # test Loss/MZI + Phase uncert accuracy
# onn.saveAll(model) # Save best model information
| [
"numpy.linspace",
"sys.path.append",
"numpy.argmax",
"ONN_Simulation_Class.ONN_Simulation"
] | [((312, 334), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (327, 334), False, 'import sys\n'), ((367, 391), 'ONN_Simulation_Class.ONN_Simulation', 'ONN_Cls.ONN_Simulation', ([], {}), '()\n', (389, 391), True, 'import ONN_Simulation_Class as ONN_Cls\n'), ((1135, 1155), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(3)'], {}), '(0, 2, 3)\n', (1146, 1155), True, 'import numpy as np\n'), ((1202, 1224), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1)', '(3)'], {}), '(0.0, 1, 3)\n', (1213, 1224), True, 'import numpy as np\n'), ((1278, 1300), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1)', '(3)'], {}), '(0.0, 1, 3)\n', (1289, 1300), True, 'import numpy as np\n'), ((815, 830), 'numpy.argmax', 'np.argmax', (['yhat'], {}), '(yhat)\n', (824, 830), True, 'import numpy as np\n'), ((867, 881), 'numpy.argmax', 'np.argmax', (['tru'], {}), '(tru)\n', (876, 881), True, 'import numpy as np\n')] |
'''
util.py
the utility functions of the project
'''
import os
import sys
from warnings import warn
from datetime import datetime
from pathlib import Path
from importlib import import_module
import torch
import torch.nn as nn
import frontend
import util.audio as audio
from config import config
import numpy as np
import matplotlib
matplotlib.use('Agg') # To use on linux server without $DISPLAY
from matplotlib import cm
import matplotlib.pyplot as plt
import util.wavenet_util as wavenet_util
import util.mixture as mix
import librosa
use_cuda = torch.cuda.is_available()
fs = config.sample_rate
def create_model(n_vocab, embed_dim=256, mel_dim=80, linear_dim=513, r=4,
downsample_step=1,
n_speakers=1, speaker_embed_dim=16, padding_idx=0,
dropout=(1 - 0.95), kernel_size=5,
encoder_channels=128,
decoder_channels=256,
converter_channels=256,
query_position_rate=1.0,
key_position_rate=1.29,
use_memory_mask=False,
trainable_positional_encodings=False,
force_monotonic_attention=True,
use_decoder_state_for_postnet_input=True,
max_positions=512,
embedding_weight_std=0.1,
speaker_embedding_weight_std=0.01,
freeze_embedding=False,
window_ahead=3,
window_backward=1,
key_projection=False,
value_projection=False,
):
"""Build model
"""
from model import Encoder, Decoder, Converter, AttentionSeq2Seq, MultiSpeakerTTSModel
time_upsampling = max(downsample_step // r, 1)
# Seq2seq
h = encoder_channels # hidden dim (channels)
k = kernel_size # kernel size
encoder = Encoder(
n_vocab, embed_dim, padding_idx=padding_idx,
n_speakers=n_speakers, speaker_embed_dim=speaker_embed_dim,
dropout=dropout, max_positions=max_positions,
embedding_weight_std=embedding_weight_std,
# (channels, kernel_size, dilation)
convolutions=[(h, k, 1), (h, k, 3), (h, k, 9), (h, k, 27),
(h, k, 1), (h, k, 3), (h, k, 9), (h, k, 27),
(h, k, 1), (h, k, 3)],
)
h = decoder_channels
decoder = Decoder(
embed_dim, in_dim=mel_dim, r=r, padding_idx=padding_idx,
n_speakers=n_speakers, speaker_embed_dim=speaker_embed_dim,
dropout=dropout, max_positions=max_positions,
preattention=[(h, k, 1), (h, k, 3)],
convolutions=[(h, k, 1), (h, k, 3), (h, k, 9), (h, k, 27),
(h, k, 1)],
attention=[True, False, False, False, True],
force_monotonic_attention=force_monotonic_attention,
query_position_rate=query_position_rate,
key_position_rate=key_position_rate,
use_memory_mask=use_memory_mask,
window_ahead=window_ahead,
window_backward=window_backward,
key_projection=key_projection,
value_projection=value_projection,
)
seq2seq = AttentionSeq2Seq(encoder, decoder)
# Post net
if use_decoder_state_for_postnet_input:
in_dim = h // r
else:
in_dim = mel_dim
h = converter_channels
if config.use_wavenet:
converter = Converter(
in_channels=in_dim,
out_channels=config.out_channels,
# out_channels=linear_dim,
layers=config.layers,
stacks=config.stacks,
residual_channels=config.residual_channels,
gate_channels=config.gate_channels,
skip_out_channels=config.skip_out_channels,
cin_channels=mel_dim,
gin_channels=speaker_embed_dim,
weight_normalization=config.weight_normalization,
n_speakers=n_speakers,
dropout=config.dropout,
kernel_size=config.kernel_size,
upsample_conditional_features=config.upsample_conditional_features,
upsample_scales=config.upsample_scales,
freq_axis_kernel_size=config.freq_axis_kernel_size,
scalar_input=wavenet_util.is_scalar_input(config.input_type),
legacy=config.legacy
)
else:
converter = Converter(
n_speakers=n_speakers, speaker_embed_dim=speaker_embed_dim,
in_dim=in_dim, out_dim=linear_dim, dropout=dropout,
time_upsampling=time_upsampling,
convolutions=[(h, k, 1), (h, k, 3), (2 * h, k, 1), (2 * h, k, 3)],
)
# Seq2seq + post net
model = MultiSpeakerTTSModel(
seq2seq, converter, padding_idx=padding_idx,
mel_dim=mel_dim, linear_dim=linear_dim,
n_speakers=n_speakers, speaker_embed_dim=speaker_embed_dim,
trainable_positional_encodings=trainable_positional_encodings,
use_decoder_state_for_postnet_input=use_decoder_state_for_postnet_input,
speaker_embedding_weight_std=speaker_embedding_weight_std,
freeze_embedding=freeze_embedding)
return model
def build_model():
# path = Path(model_path)
# experiment_dir = path.parent
# sys.path.append(str(experiment_dir.resolve()))
# module = import_module(str(path.stem))
_frontend = getattr(frontend, config.frontend)
model = create_model(
n_speakers=config.n_speakers,
speaker_embed_dim=config.speaker_embed_dim,
n_vocab=_frontend.n_vocab,
embed_dim=config.text_embed_dim,
mel_dim=config.num_mels,
linear_dim=config.fft_size // 2 + 1,
r=config.outputs_per_step,
downsample_step=config.downsample_step,
padding_idx=config.padding_idx,
dropout=config.dropout,
kernel_size=config.kernel_size,
encoder_channels=config.encoder_channels,
decoder_channels=config.decoder_channels,
converter_channels=config.converter_channels,
use_memory_mask=config.use_memory_mask,
trainable_positional_encodings=config.trainable_positional_encodings,
force_monotonic_attention=config.force_monotonic_attention,
use_decoder_state_for_postnet_input=config.use_decoder_state_for_postnet_input,
max_positions=config.max_positions,
speaker_embedding_weight_std=config.speaker_embedding_weight_std,
freeze_embedding=config.freeze_embedding,
window_ahead=config.window_ahead,
window_backward=config.window_backward,
key_projection=config.key_projection,
value_projection=config.value_projection,
)
return model
# def build_config(config_path):
# path = Path(config_path)
# experiment_dir = path.parent
# sys.path.append(str(experiment_dir.resolve()))
# module = import_module(str(path.stem))
#
# config = module.Config()
# return config
def clone_as_averaged_model(device, model, ema):
assert ema is not None
averaged_model = build_model().to(device)
averaged_model.load_state_dict(model.state_dict())
for name, param in averaged_model.named_parameters():
if name in ema.shadow:
param.data = ema.shadow[name].clone()
return averaged_model
def save_waveplot(path, y_hat, y_target):
sr = config.sample_rate
plt.figure(figsize=(16, 6))
plt.subplot(2, 1, 1)
librosa.display.waveplot(y_target, sr=sr)
plt.subplot(2, 1, 2)
librosa.display.waveplot(y_hat, sr=sr)
plt.tight_layout()
plt.savefig(path, format="png")
plt.close()
def wavenet_eval_model(global_step, writer, device, model, y, c, g, input_lengths, eval_dir, ema=None):
if ema is not None:
print("Using averaged model for evaluation")
model = clone_as_averaged_model(device, model, ema)
model.make_generation_fast_()
model.eval()
idx = np.random.randint(0, len(y))
length = input_lengths[idx].data.cpu().item()
# (T,)
y_target = y[idx].view(-1).data.cpu().numpy()[:length]
if c is not None:
if config.upsample_conditional_features:
c = c[idx, :, :length // audio.get_hop_size()].unsqueeze(0)
else:
c = c[idx, :, :length].unsqueeze(0)
assert c.dim() == 3
print("Shape of local conditioning features: {}".format(c.size()))
if g is not None:
# TODO: test
g = g[idx]
print("Shape of global conditioning features: {}".format(g.size()))
# Dummy silence
if wavenet_util.is_mulaw_quantize(config.input_type):
initial_value = audio.mulaw_quantize(0, config.quantize_channels)
elif wavenet_util.is_mulaw(config.input_type):
initial_value = audio.mulaw(0.0, config.quantize_channels)
else:
initial_value = 0.0
print("Intial value:", initial_value)
# (C,)
if wavenet_util.is_mulaw_quantize(config.input_type):
initial_input = np_utils.to_categorical(
initial_value, num_classes=config.quantize_channels).astype(np.float32)
initial_input = torch.from_numpy(initial_input).view(
1, 1, config.quantize_channels)
else:
initial_input = torch.zeros(1, 1, 1).fill_(initial_value)
initial_input = initial_input.to(device)
# Run the model in fast eval mode
with torch.no_grad():
y_hat = model.incremental_forward(
initial_input, c=c, g=g, T=length, softmax=True, quantize=True, tqdm=tqdm,
log_scale_min=config.log_scale_min)
if wavenet_util.is_mulaw_quantize(config.input_type):
y_hat = y_hat.max(1)[1].view(-1).long().cpu().data.numpy()
y_hat = audio.inv_mulaw_quantize(y_hat, config.quantize_channels)
y_target = audio.inv_mulaw_quantize(y_target, config.quantize_channels)
elif wavenet_util.is_mulaw(config.input_type):
y_hat = audio.inv_mulaw(y_hat.view(-1).cpu().data.numpy(), config.quantize_channels)
y_target = audio.inv_mulaw(y_target, config.quantize_channels)
else:
y_hat = y_hat.view(-1).cpu().data.numpy()
# Save audio
os.makedirs(eval_dir, exist_ok=True)
path = os.path.join(eval_dir, "step{:09d}_predicted.wav".format(global_step))
librosa.output.write_wav(path, y_hat, sr=config.sample_rate)
path = os.path.join(eval_dir, "step{:09d}_target.wav".format(global_step))
librosa.output.write_wav(path, y_target, sr=config.sample_rate)
# save figure
path = os.path.join(eval_dir, "step{:09d}_waveplots.png".format(global_step))
save_waveplot(path, y_hat, y_target)
def eval_model(global_step, writer, device, model, checkpoint_dir, ismultispeaker):
# harded coded
texts = [
"Scientists at the CERN laboratory say they have discovered a new particle.",
"There's a way to measure the acute emotional intelligence that has never gone out of style.",
"President Trump met with other leaders at the Group of 20 conference.",
"Generative adversarial network or variational auto-encoder.",
"Please call Stella.",
"Some have accepted this as a miracle without any physical explanation.",
]
import synthesis
_frontend = getattr(frontend, config.frontend)
synthesis._frontend = _frontend
eval_output_dir = os.path.join(checkpoint_dir, "eval")
os.makedirs(eval_output_dir, exist_ok=True)
# Prepare model for evaluation
model_eval = build_model().to(device)
model_eval.load_state_dict(model.state_dict())
# hard coded
speaker_ids = [0, 1, 10] if ismultispeaker else [None]
for speaker_id in speaker_ids:
speaker_str = "multispeaker{}".format(speaker_id) if speaker_id is not None else "single"
for idx, text in enumerate(texts):
signal, alignment, _, mel = synthesis.tts(
model_eval, text, p=0, speaker_id=speaker_id, fast=True)
signal /= np.max(np.abs(signal))
# Alignment
path = os.path.join(eval_output_dir, "step{:09d}_text{}_{}_alignment.png".format(
global_step, idx, speaker_str))
save_alignment(path, alignment, global_step)
tag = "eval_averaged_alignment_{}_{}".format(idx, speaker_str)
# writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)
# Mel
# writer.add_image("(Eval) Predicted mel spectrogram text{}_{}".format(idx, speaker_str),
# prepare_spec_image(mel), global_step)
# Audio
path = os.path.join(eval_output_dir, "step{:09d}_text{}_{}_predicted.wav".format(
global_step, idx, speaker_str))
audio.save_wav(signal, path)
try:
writer.add_audio("(Eval) Predicted audio signal {}_{}".format(idx, speaker_str),
signal, global_step, sample_rate=fs)
except Exception as e:
warn(str(e))
pass
def sequence_mask(sequence_length, max_len=None):
if max_len is None:
max_len = sequence_length.data.max()
batch_size = sequence_length.size(0)
seq_range = torch.arange(0, max_len).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
if sequence_length.is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = sequence_length.unsqueeze(1) \
.expand_as(seq_range_expand)
return (seq_range_expand < seq_length_expand).float()
def save_checkpoint(model, optimizer, step, checkpoint_dir, epoch,
train_seq2seq, train_postnet):
if train_seq2seq and train_postnet:
suffix = ""
m = model
elif train_seq2seq:
suffix = "_seq2seq"
m = model.seq2seq
elif train_postnet:
suffix = "_postnet"
m = model.postnet
checkpoint_path = os.path.join(
checkpoint_dir, "checkpoint_step{:09d}{}.pth".format(step, suffix))
optimizer_state = optimizer.state_dict() if config.save_optimizer_state else None
torch.save({
"state_dict": m.state_dict(),
"optimizer": optimizer_state,
"global_step": step,
"global_epoch": epoch,
}, checkpoint_path)
print("Saved checkpoint:", checkpoint_path)
def load_checkpoint(checkpoint_path):
if use_cuda:
checkpoint = torch.load(checkpoint_path)
else:
checkpoint = torch.load(checkpoint_path,
map_location=lambda storage, loc: storage)
return checkpoint
def plot_alignment(alignment, path, info=None):
fig, ax = plt.subplots()
im = ax.imshow(
alignment,
aspect='auto',
origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if info is not None:
xlabel += '\n\n' + info
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
plt.savefig(path, format='png')
plt.close()
def save_alignment(path, attn, global_step):
plot_alignment(attn.T, path, info="{}, {}, step={}".format(
config.builder, time_string(), global_step))
def time_string():
return datetime.now().strftime('%Y-%m-%d %H:%M')
def prepare_spec_image(spectrogram):
# [0, 1]
spectrogram = (spectrogram - np.min(spectrogram)) / (np.max(spectrogram) - np.min(spectrogram))
spectrogram = np.flip(spectrogram, axis=1) # flip against freq axis
return np.uint8(cm.magma(spectrogram.T) * 255)
def save_states(global_step, writer, mel_outputs, linear_outputs, attn, mel, y,
input_lengths, checkpoint_dir=None):
print("Save intermediate states at step {}".format(global_step))
if config.use_wavenet:
# idx = np.random.randint(0, len(linear_outputs))
# length = input_lengths[idx]
idx = min(1, len(input_lengths) - 1)
input_length = input_lengths[idx]
length = input_length
# (B, C, T)
if linear_outputs.dim() == 4:
linear_outputs = linear_outputs.squeeze(-1)
if wavenet_util.is_mulaw_quantize(config.input_type):
# (B, T)
linear_outputs = F.softmax(linear_outputs, dim=1).max(1)[1]
# (T,)
linear_outputs = linear_outputs[idx].data.cpu().long().numpy()
y = y[idx].view(-1).data.cpu().long().numpy()
linear_outputs = audio.inv_mulaw_quantize(linear_outputs, config.quantize_channels)
y = audio.inv_mulaw_quantize(y, config.quantize_channels)
else:
# (B, T)
linear_outputs = mix.sample_from_discretized_mix_logistic(
linear_outputs, log_scale_min=config.log_scale_min)
# (T,)
linear_outputs = linear_outputs[idx].view(-1).data.cpu().numpy()
y = y[idx].view(-1).data.cpu().numpy()
if wavenet_util.is_mulaw(config.input_type):
linear_outputs = audio.inv_mulaw(linear_outputs, config.quantize_channels)
y = audio.inv_mulaw(y, config.quantize_channels)
# Mask by length
# linear_outputs[length:] = 0
# y[length:] = 0
print('yhat', linear_outputs.shape, linear_outputs)
print('y', y.shape, y)
# Save audio
audio_dir = os.path.join(checkpoint_dir, "audio")
os.makedirs(audio_dir, exist_ok=True)
path = os.path.join(audio_dir, "step{:09d}_predicted.wav".format(global_step))
librosa.output.write_wav(path, linear_outputs, sr=config.sample_rate)
path = os.path.join(audio_dir, "step{:09d}_target.wav".format(global_step))
librosa.output.write_wav(path, y, sr=config.sample_rate)
return
# idx = np.random.randint(0, len(input_lengths))
idx = min(1, len(input_lengths) - 1)
input_length = input_lengths[idx]
# Alignment
# Multi-hop attention
if attn is not None and attn.dim() == 4:
for i, alignment in enumerate(attn):
alignment = alignment[idx].cpu().data.numpy()
tag = "alignment_layer{}".format(i + 1)
# writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)
# save files as well for now
alignment_dir = os.path.join(checkpoint_dir, "alignment_layer{}".format(i + 1))
os.makedirs(alignment_dir, exist_ok=True)
path = os.path.join(alignment_dir, "step{:09d}_layer_{}_alignment.png".format(
global_step, i + 1))
save_alignment(path, alignment, global_step)
# Save averaged alignment
alignment_dir = os.path.join(checkpoint_dir, "alignment_ave")
os.makedirs(alignment_dir, exist_ok=True)
path = os.path.join(alignment_dir, "step{:09d}_alignment.png".format(global_step))
alignment = attn.mean(0)[idx].cpu().data.numpy()
save_alignment(path, alignment, global_step)
tag = "averaged_alignment"
# writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)
# Predicted mel spectrogram
if mel_outputs is not None:
mel_output = mel_outputs[idx].cpu().data.numpy()
mel_output = prepare_spec_image(audio._denormalize(mel_output))
# writer.add_image("Predicted mel spectrogram", mel_output, global_step)
# Predicted spectrogram
if linear_outputs is not None:
linear_output = linear_outputs[idx].cpu().data.numpy()
spectrogram = prepare_spec_image(audio._denormalize(linear_output))
# writer.add_image("Predicted linear spectrogram", spectrogram, global_step)
# Predicted audio signal
signal = audio.inv_spectrogram(linear_output.T)
signal /= np.max(np.abs(signal))
path = os.path.join(checkpoint_dir, "step{:09d}_predicted.wav".format(
global_step))
try:
writer.add_audio("Predicted audio signal", signal, global_step, sample_rate=fs)
except Exception as e:
warn(str(e))
pass
audio.save_wav(signal, path)
# Target mel spectrogram
if mel_outputs is not None:
mel_output = mel[idx].cpu().data.numpy()
mel_output = prepare_spec_image(audio._denormalize(mel_output))
# writer.add_image("Target mel spectrogram", mel_output, global_step)
# Target spectrogram
if linear_outputs is not None:
linear_output = y[idx].cpu().data.numpy()
spectrogram = prepare_spec_image(audio._denormalize(linear_output))
# writer.add_image("Target linear spectrogram", spectrogram, global_step)
| [
"util.wavenet_util.is_mulaw",
"matplotlib.pyplot.ylabel",
"util.wavenet_util.is_scalar_input",
"util.audio.inv_mulaw",
"torch.from_numpy",
"torch.cuda.is_available",
"util.wavenet_util.is_mulaw_quantize",
"librosa.display.waveplot",
"torch.arange",
"numpy.flip",
"model.Decoder",
"matplotlib.py... | [((333, 354), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (347, 354), False, 'import matplotlib\n'), ((550, 575), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (573, 575), False, 'import torch\n'), ((1819, 2165), 'model.Encoder', 'Encoder', (['n_vocab', 'embed_dim'], {'padding_idx': 'padding_idx', 'n_speakers': 'n_speakers', 'speaker_embed_dim': 'speaker_embed_dim', 'dropout': 'dropout', 'max_positions': 'max_positions', 'embedding_weight_std': 'embedding_weight_std', 'convolutions': '[(h, k, 1), (h, k, 3), (h, k, 9), (h, k, 27), (h, k, 1), (h, k, 3), (h, k, \n 9), (h, k, 27), (h, k, 1), (h, k, 3)]'}), '(n_vocab, embed_dim, padding_idx=padding_idx, n_speakers=n_speakers,\n speaker_embed_dim=speaker_embed_dim, dropout=dropout, max_positions=\n max_positions, embedding_weight_std=embedding_weight_std, convolutions=\n [(h, k, 1), (h, k, 3), (h, k, 9), (h, k, 27), (h, k, 1), (h, k, 3), (h,\n k, 9), (h, k, 27), (h, k, 1), (h, k, 3)])\n', (1826, 2165), False, 'from model import Encoder, Decoder, Converter, AttentionSeq2Seq, MultiSpeakerTTSModel\n'), ((2323, 2970), 'model.Decoder', 'Decoder', (['embed_dim'], {'in_dim': 'mel_dim', 'r': 'r', 'padding_idx': 'padding_idx', 'n_speakers': 'n_speakers', 'speaker_embed_dim': 'speaker_embed_dim', 'dropout': 'dropout', 'max_positions': 'max_positions', 'preattention': '[(h, k, 1), (h, k, 3)]', 'convolutions': '[(h, k, 1), (h, k, 3), (h, k, 9), (h, k, 27), (h, k, 1)]', 'attention': '[True, False, False, False, True]', 'force_monotonic_attention': 'force_monotonic_attention', 'query_position_rate': 'query_position_rate', 'key_position_rate': 'key_position_rate', 'use_memory_mask': 'use_memory_mask', 'window_ahead': 'window_ahead', 'window_backward': 'window_backward', 'key_projection': 'key_projection', 'value_projection': 'value_projection'}), '(embed_dim, in_dim=mel_dim, r=r, padding_idx=padding_idx, n_speakers\n =n_speakers, speaker_embed_dim=speaker_embed_dim, dropout=dropout,\n max_positions=max_positions, preattention=[(h, k, 1), (h, k, 3)],\n convolutions=[(h, k, 1), (h, k, 3), (h, k, 9), (h, k, 27), (h, k, 1)],\n attention=[True, False, False, False, True], force_monotonic_attention=\n force_monotonic_attention, query_position_rate=query_position_rate,\n key_position_rate=key_position_rate, use_memory_mask=use_memory_mask,\n window_ahead=window_ahead, window_backward=window_backward,\n key_projection=key_projection, value_projection=value_projection)\n', (2330, 2970), False, 'from model import Encoder, Decoder, Converter, AttentionSeq2Seq, MultiSpeakerTTSModel\n'), ((3093, 3127), 'model.AttentionSeq2Seq', 'AttentionSeq2Seq', (['encoder', 'decoder'], {}), '(encoder, decoder)\n', (3109, 3127), False, 'from model import Encoder, Decoder, Converter, AttentionSeq2Seq, MultiSpeakerTTSModel\n'), ((4594, 5013), 'model.MultiSpeakerTTSModel', 'MultiSpeakerTTSModel', (['seq2seq', 'converter'], {'padding_idx': 'padding_idx', 'mel_dim': 'mel_dim', 'linear_dim': 'linear_dim', 'n_speakers': 'n_speakers', 'speaker_embed_dim': 'speaker_embed_dim', 'trainable_positional_encodings': 'trainable_positional_encodings', 'use_decoder_state_for_postnet_input': 'use_decoder_state_for_postnet_input', 'speaker_embedding_weight_std': 'speaker_embedding_weight_std', 'freeze_embedding': 'freeze_embedding'}), '(seq2seq, converter, padding_idx=padding_idx, mel_dim=\n mel_dim, linear_dim=linear_dim, n_speakers=n_speakers,\n speaker_embed_dim=speaker_embed_dim, trainable_positional_encodings=\n trainable_positional_encodings, use_decoder_state_for_postnet_input=\n use_decoder_state_for_postnet_input, speaker_embedding_weight_std=\n speaker_embedding_weight_std, freeze_embedding=freeze_embedding)\n', (4614, 5013), False, 'from model import Encoder, Decoder, Converter, AttentionSeq2Seq, MultiSpeakerTTSModel\n'), ((7252, 7279), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 6)'}), '(figsize=(16, 6))\n', (7262, 7279), True, 'import matplotlib.pyplot as plt\n'), ((7284, 7304), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (7295, 7304), True, 'import matplotlib.pyplot as plt\n'), ((7309, 7350), 'librosa.display.waveplot', 'librosa.display.waveplot', (['y_target'], {'sr': 'sr'}), '(y_target, sr=sr)\n', (7333, 7350), False, 'import librosa\n'), ((7355, 7375), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (7366, 7375), True, 'import matplotlib.pyplot as plt\n'), ((7380, 7418), 'librosa.display.waveplot', 'librosa.display.waveplot', (['y_hat'], {'sr': 'sr'}), '(y_hat, sr=sr)\n', (7404, 7418), False, 'import librosa\n'), ((7423, 7441), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7439, 7441), True, 'import matplotlib.pyplot as plt\n'), ((7446, 7477), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'format': '"""png"""'}), "(path, format='png')\n", (7457, 7477), True, 'import matplotlib.pyplot as plt\n'), ((7482, 7493), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7491, 7493), True, 'import matplotlib.pyplot as plt\n'), ((8427, 8476), 'util.wavenet_util.is_mulaw_quantize', 'wavenet_util.is_mulaw_quantize', (['config.input_type'], {}), '(config.input_type)\n', (8457, 8476), True, 'import util.wavenet_util as wavenet_util\n'), ((8769, 8818), 'util.wavenet_util.is_mulaw_quantize', 'wavenet_util.is_mulaw_quantize', (['config.input_type'], {}), '(config.input_type)\n', (8799, 8818), True, 'import util.wavenet_util as wavenet_util\n'), ((9431, 9480), 'util.wavenet_util.is_mulaw_quantize', 'wavenet_util.is_mulaw_quantize', (['config.input_type'], {}), '(config.input_type)\n', (9461, 9480), True, 'import util.wavenet_util as wavenet_util\n'), ((10000, 10036), 'os.makedirs', 'os.makedirs', (['eval_dir'], {'exist_ok': '(True)'}), '(eval_dir, exist_ok=True)\n', (10011, 10036), False, 'import os\n'), ((10123, 10183), 'librosa.output.write_wav', 'librosa.output.write_wav', (['path', 'y_hat'], {'sr': 'config.sample_rate'}), '(path, y_hat, sr=config.sample_rate)\n', (10147, 10183), False, 'import librosa\n'), ((10267, 10330), 'librosa.output.write_wav', 'librosa.output.write_wav', (['path', 'y_target'], {'sr': 'config.sample_rate'}), '(path, y_target, sr=config.sample_rate)\n', (10291, 10330), False, 'import librosa\n'), ((11183, 11219), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""eval"""'], {}), "(checkpoint_dir, 'eval')\n", (11195, 11219), False, 'import os\n'), ((11224, 11267), 'os.makedirs', 'os.makedirs', (['eval_output_dir'], {'exist_ok': '(True)'}), '(eval_output_dir, exist_ok=True)\n', (11235, 11267), False, 'import os\n'), ((14510, 14524), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (14522, 14524), True, 'import matplotlib.pyplot as plt\n'), ((14762, 14780), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (14772, 14780), True, 'import matplotlib.pyplot as plt\n'), ((14785, 14815), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Encoder timestep"""'], {}), "('Encoder timestep')\n", (14795, 14815), True, 'import matplotlib.pyplot as plt\n'), ((14820, 14838), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14836, 14838), True, 'import matplotlib.pyplot as plt\n'), ((14843, 14874), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'format': '"""png"""'}), "(path, format='png')\n", (14854, 14874), True, 'import matplotlib.pyplot as plt\n'), ((14879, 14890), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14888, 14890), True, 'import matplotlib.pyplot as plt\n'), ((15299, 15327), 'numpy.flip', 'np.flip', (['spectrogram'], {'axis': '(1)'}), '(spectrogram, axis=1)\n', (15306, 15327), True, 'import numpy as np\n'), ((4275, 4509), 'model.Converter', 'Converter', ([], {'n_speakers': 'n_speakers', 'speaker_embed_dim': 'speaker_embed_dim', 'in_dim': 'in_dim', 'out_dim': 'linear_dim', 'dropout': 'dropout', 'time_upsampling': 'time_upsampling', 'convolutions': '[(h, k, 1), (h, k, 3), (2 * h, k, 1), (2 * h, k, 3)]'}), '(n_speakers=n_speakers, speaker_embed_dim=speaker_embed_dim,\n in_dim=in_dim, out_dim=linear_dim, dropout=dropout, time_upsampling=\n time_upsampling, convolutions=[(h, k, 1), (h, k, 3), (2 * h, k, 1), (2 *\n h, k, 3)])\n', (4284, 4509), False, 'from model import Encoder, Decoder, Converter, AttentionSeq2Seq, MultiSpeakerTTSModel\n'), ((8502, 8551), 'util.audio.mulaw_quantize', 'audio.mulaw_quantize', (['(0)', 'config.quantize_channels'], {}), '(0, config.quantize_channels)\n', (8522, 8551), True, 'import util.audio as audio\n'), ((8561, 8601), 'util.wavenet_util.is_mulaw', 'wavenet_util.is_mulaw', (['config.input_type'], {}), '(config.input_type)\n', (8582, 8601), True, 'import util.wavenet_util as wavenet_util\n'), ((9228, 9243), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9241, 9243), False, 'import torch\n'), ((9565, 9622), 'util.audio.inv_mulaw_quantize', 'audio.inv_mulaw_quantize', (['y_hat', 'config.quantize_channels'], {}), '(y_hat, config.quantize_channels)\n', (9589, 9622), True, 'import util.audio as audio\n'), ((9642, 9702), 'util.audio.inv_mulaw_quantize', 'audio.inv_mulaw_quantize', (['y_target', 'config.quantize_channels'], {}), '(y_target, config.quantize_channels)\n', (9666, 9702), True, 'import util.audio as audio\n'), ((9712, 9752), 'util.wavenet_util.is_mulaw', 'wavenet_util.is_mulaw', (['config.input_type'], {}), '(config.input_type)\n', (9733, 9752), True, 'import util.wavenet_util as wavenet_util\n'), ((14262, 14289), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (14272, 14289), False, 'import torch\n'), ((14321, 14391), 'torch.load', 'torch.load', (['checkpoint_path'], {'map_location': '(lambda storage, loc: storage)'}), '(checkpoint_path, map_location=lambda storage, loc: storage)\n', (14331, 14391), False, 'import torch\n'), ((15977, 16026), 'util.wavenet_util.is_mulaw_quantize', 'wavenet_util.is_mulaw_quantize', (['config.input_type'], {}), '(config.input_type)\n', (16007, 16026), True, 'import util.wavenet_util as wavenet_util\n'), ((17198, 17235), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""audio"""'], {}), "(checkpoint_dir, 'audio')\n", (17210, 17235), False, 'import os\n'), ((17244, 17281), 'os.makedirs', 'os.makedirs', (['audio_dir'], {'exist_ok': '(True)'}), '(audio_dir, exist_ok=True)\n', (17255, 17281), False, 'import os\n'), ((17377, 17446), 'librosa.output.write_wav', 'librosa.output.write_wav', (['path', 'linear_outputs'], {'sr': 'config.sample_rate'}), '(path, linear_outputs, sr=config.sample_rate)\n', (17401, 17446), False, 'import librosa\n'), ((17539, 17595), 'librosa.output.write_wav', 'librosa.output.write_wav', (['path', 'y'], {'sr': 'config.sample_rate'}), '(path, y, sr=config.sample_rate)\n', (17563, 17595), False, 'import librosa\n'), ((18522, 18567), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""alignment_ave"""'], {}), "(checkpoint_dir, 'alignment_ave')\n", (18534, 18567), False, 'import os\n'), ((18576, 18617), 'os.makedirs', 'os.makedirs', (['alignment_dir'], {'exist_ok': '(True)'}), '(alignment_dir, exist_ok=True)\n', (18587, 18617), False, 'import os\n'), ((19567, 19605), 'util.audio.inv_spectrogram', 'audio.inv_spectrogram', (['linear_output.T'], {}), '(linear_output.T)\n', (19588, 19605), True, 'import util.audio as audio\n'), ((19938, 19966), 'util.audio.save_wav', 'audio.save_wav', (['signal', 'path'], {}), '(signal, path)\n', (19952, 19966), True, 'import util.audio as audio\n'), ((8627, 8669), 'util.audio.mulaw', 'audio.mulaw', (['(0.0)', 'config.quantize_channels'], {}), '(0.0, config.quantize_channels)\n', (8638, 8669), True, 'import util.audio as audio\n'), ((9866, 9917), 'util.audio.inv_mulaw', 'audio.inv_mulaw', (['y_target', 'config.quantize_channels'], {}), '(y_target, config.quantize_channels)\n', (9881, 9917), True, 'import util.audio as audio\n'), ((11691, 11761), 'synthesis.tts', 'synthesis.tts', (['model_eval', 'text'], {'p': '(0)', 'speaker_id': 'speaker_id', 'fast': '(True)'}), '(model_eval, text, p=0, speaker_id=speaker_id, fast=True)\n', (11704, 11761), False, 'import synthesis\n'), ((12590, 12618), 'util.audio.save_wav', 'audio.save_wav', (['signal', 'path'], {}), '(signal, path)\n', (12604, 12618), True, 'import util.audio as audio\n'), ((13067, 13091), 'torch.arange', 'torch.arange', (['(0)', 'max_len'], {}), '(0, max_len)\n', (13079, 13091), False, 'import torch\n'), ((15087, 15101), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15099, 15101), False, 'from datetime import datetime\n'), ((15214, 15233), 'numpy.min', 'np.min', (['spectrogram'], {}), '(spectrogram)\n', (15220, 15233), True, 'import numpy as np\n'), ((15238, 15257), 'numpy.max', 'np.max', (['spectrogram'], {}), '(spectrogram)\n', (15244, 15257), True, 'import numpy as np\n'), ((15260, 15279), 'numpy.min', 'np.min', (['spectrogram'], {}), '(spectrogram)\n', (15266, 15279), True, 'import numpy as np\n'), ((15374, 15397), 'matplotlib.cm.magma', 'cm.magma', (['spectrogram.T'], {}), '(spectrogram.T)\n', (15382, 15397), False, 'from matplotlib import cm\n'), ((16304, 16370), 'util.audio.inv_mulaw_quantize', 'audio.inv_mulaw_quantize', (['linear_outputs', 'config.quantize_channels'], {}), '(linear_outputs, config.quantize_channels)\n', (16328, 16370), True, 'import util.audio as audio\n'), ((16387, 16440), 'util.audio.inv_mulaw_quantize', 'audio.inv_mulaw_quantize', (['y', 'config.quantize_channels'], {}), '(y, config.quantize_channels)\n', (16411, 16440), True, 'import util.audio as audio\n'), ((16505, 16602), 'util.mixture.sample_from_discretized_mix_logistic', 'mix.sample_from_discretized_mix_logistic', (['linear_outputs'], {'log_scale_min': 'config.log_scale_min'}), '(linear_outputs, log_scale_min=\n config.log_scale_min)\n', (16545, 16602), True, 'import util.mixture as mix\n'), ((16778, 16818), 'util.wavenet_util.is_mulaw', 'wavenet_util.is_mulaw', (['config.input_type'], {}), '(config.input_type)\n', (16799, 16818), True, 'import util.wavenet_util as wavenet_util\n'), ((18236, 18277), 'os.makedirs', 'os.makedirs', (['alignment_dir'], {'exist_ok': '(True)'}), '(alignment_dir, exist_ok=True)\n', (18247, 18277), False, 'import os\n'), ((19115, 19145), 'util.audio._denormalize', 'audio._denormalize', (['mel_output'], {}), '(mel_output)\n', (19133, 19145), True, 'import util.audio as audio\n'), ((19396, 19429), 'util.audio._denormalize', 'audio._denormalize', (['linear_output'], {}), '(linear_output)\n', (19414, 19429), True, 'import util.audio as audio\n'), ((19631, 19645), 'numpy.abs', 'np.abs', (['signal'], {}), '(signal)\n', (19637, 19645), True, 'import numpy as np\n'), ((20118, 20148), 'util.audio._denormalize', 'audio._denormalize', (['mel_output'], {}), '(mel_output)\n', (20136, 20148), True, 'import util.audio as audio\n'), ((20380, 20413), 'util.audio._denormalize', 'audio._denormalize', (['linear_output'], {}), '(linear_output)\n', (20398, 20413), True, 'import util.audio as audio\n'), ((4153, 4200), 'util.wavenet_util.is_scalar_input', 'wavenet_util.is_scalar_input', (['config.input_type'], {}), '(config.input_type)\n', (4181, 4200), True, 'import util.wavenet_util as wavenet_util\n'), ((8977, 9008), 'torch.from_numpy', 'torch.from_numpy', (['initial_input'], {}), '(initial_input)\n', (8993, 9008), False, 'import torch\n'), ((9093, 9113), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (9104, 9113), False, 'import torch\n'), ((11808, 11822), 'numpy.abs', 'np.abs', (['signal'], {}), '(signal)\n', (11814, 11822), True, 'import numpy as np\n'), ((16853, 16910), 'util.audio.inv_mulaw', 'audio.inv_mulaw', (['linear_outputs', 'config.quantize_channels'], {}), '(linear_outputs, config.quantize_channels)\n', (16868, 16910), True, 'import util.audio as audio\n'), ((16931, 16975), 'util.audio.inv_mulaw', 'audio.inv_mulaw', (['y', 'config.quantize_channels'], {}), '(y, config.quantize_channels)\n', (16946, 16975), True, 'import util.audio as audio\n'), ((8061, 8081), 'util.audio.get_hop_size', 'audio.get_hop_size', ([], {}), '()\n', (8079, 8081), True, 'import util.audio as audio\n')] |
"""Fuzzy K-means clustering"""
# ==============================================================================
# Author: <NAME> <ammarsherif90 [at] gmail [dot] com >
# License: MIT
# ==============================================================================
# ==============================================================================
# The file includes an implementation of a fuzzy version of kmeans with sklearn-
# like interface.
# ==============================================================================
import numpy as np
from sklearn.cluster import KMeans
from sklearn.utils import check_random_state
class FuzzyKMeans(KMeans):
"""The class implements the fuzzy version of kmeans
----------------------------------------------------------------------------
Args: same arguments as in SKlearn in addition to
- m: the fuzziness index to determine how fuzzy our boundary is
- eps: the tolerance value for convergence
"""
def __init__(self, m, eps= 0.001,*args, **kwargs):
self.__m = m
self.__eps = eps
self.fmm_ = None
self.__fitted = False
super(FuzzyKMeans, self).__init__(*args, **kwargs)
# --------------------------------------------------------------------------
def __is_fitted(self):
return self.__fitted
# --------------------------------------------------------------------------
def _check_params(self, X):
if (self.__m <= 1):
raise ValueError(
"the fuzziness index m should be more than 1"
f", got '{self.__m}' instead."
)
super(FuzzyKMeans, self)._check_params(X)
# --------------------------------------------------------------------------
def __compute_dist(self, data, centroids):
"""The method computes the distance matrix for each data point with res-
pect to each cluster centroid.
------------------------------------------------------------------------
Inputs:
- data: the input data points
- centroids: the clusters' centroids
Output:
- distance_m: the distace matrix
"""
n_points = data.shape[0]
n_clusters = centroids.shape[0]
distance_m = np.zeros((n_points, n_clusters))
for i in range(n_clusters):
diff = data-centroids[i,:]
distance_m[:,i] = np.sqrt((diff * diff).sum(axis=1))
return distance_m
# --------------------------------------------------------------------------
def __update_centroids(self, data, fmm):
"""The method computes the updated centroids according the the computed
fuzzy membership matrix <fmm> of the previous centroids.
------------------------------------------------------------------------
Inputs:
- data: the input data points
- fmm: fuzzy membership matrix of each data point
Output:
- centroids: the newly computed centroids
"""
# ----------------------------------------------------------------------
# We start computing the normalizing denominator terms
# ----------------------------------------------------------------------
norm = np.sum(fmm**self.__m,axis=0)
# ----------------------------------------------------------------------
# Initialize the new centroids with zeros
# ----------------------------------------------------------------------
n_clusters = fmm.shape[1]
n_features = data.shape[1]
new_centroids = np.zeros((n_clusters, n_features))
# ----------------------------------------------------------------------
# Loop computing each one
# ----------------------------------------------------------------------
for i in range(n_clusters):
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Notice that we multiply the data points by the ith column of <fmm>
# which represent the probabities of being assigned to the ith clus-
# ter. After that, we sum all the weighted points and average them
# by dividing over the norm of that cluster.
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
new_centroids[i,:] = np.sum(data*(fmm[:,i]**self.__m)[:,None],
axis=0)/norm[i]
return new_centroids
# --------------------------------------------------------------------------
def _compute_membership(self, data, centroids):
"""The method computes the membership matrix of the data according to
the clusters specified by the given centroids
Inputs:
- data: the input data points being clustered
- centroids: numpy array including the cluster centroids;
its shape is (n_clusters, n_features)
Outputs:
- fmm: fuzzy membership matrix"""
# ----------------------------------------------------------------------
# First, compute the distance between the point and the other centroids
# ======================================================================
# Note we also add alpha, 1e-10 very little value, as we are computing
# 1 over the distances, and there might be 0 distance
# ----------------------------------------------------------------------
dist = self.__compute_dist(data, centroids) + 1e-10
# ----------------------------------------------------------------------
# We are computing the below value once because we need it in both the
# numerator and the denominator of the value to be computed
# ----------------------------------------------------------------------
sqr_dist = dist**(-2/(self.__m-1))
# ----------------------------------------------------------------------
# We compute the normalizing term (denominator)
# ----------------------------------------------------------------------
norm_dist = np.expand_dims(np.sum(sqr_dist,axis=1),axis=1)
fmm = sqr_dist / norm_dist
return fmm
# --------------------------------------------------------------------------
def __converged(self,centroids, new_centroids):
"""The method checks convergence"""
# ----------------------------------------------------------------------
# We compute the squarred difference between both; indicate convergence
# if the total distance of the centroids is below the eps
# ----------------------------------------------------------------------
diff = (centroids - new_centroids)**2
return (np.sum(diff) <= self.__eps)
# --------------------------------------------------------------------------
def fit(self, X, y=None, sample_weight=None):
"""The method computes the fuzzy k-means clustering algorithm
Inputs:
- X: training data
- y: ignored
- sample_weight: weights of each data point
"""
# ----------------------------------------------------------------------
# Generate a random_state and do some initializations
# ----------------------------------------------------------------------
X = self._validate_data(
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
copy=self.copy_x,
accept_large_sparse=False,
)
self._check_params(X)
random_state = check_random_state(self.random_state)
# ----------------------------------------------------------------------
# Initialize the centroids
# ----------------------------------------------------------------------
centroids = self._init_centroids(X,x_squared_norms=None,init= self.init,
random_state= random_state)
# ----------------------------------------------------------------------
# Do the first iteration
# ----------------------------------------------------------------------
fmm = self._compute_membership(X, centroids)
new_centroids = self.__update_centroids(X, fmm)
while( not self.__converged(centroids, new_centroids)):
centroids = new_centroids
# ------------------------------------------------------------------
# compute the new fuzzy membership matrix, fmm; then, update the new
# centroids.
# ------------------------------------------------------------------
fmm = self._compute_membership(X, centroids)
new_centroids = self.__update_centroids(X, fmm)
# ----------------------------------------------------------------------
# Save the results
# ----------------------------------------------------------------------
self.cluster_centers_ = new_centroids
self.labels_ = fmm.argmax(axis=1)
self.fmm_ = fmm
self.__fitted = True
return self
# --------------------------------------------------------------------------
def compute_membership(self, data):
"""The method computes the membership matrix of the data according to
the fitted points.
Inputs:
- data: the input data points being clustered
Outputs:
- fmm: fuzzy membership matrix of each data point"""
if not self.__is_fitted():
raise RuntimeError("You did not fit the estimator yet.")
return self._compute_membership(data, self.cluster_centers_)
# --------------------------------------------------------------------------
def predict(self, X, sample_weight=None):
"""The method clusters each data point according to a previously fitted
data."""
if not self.__is_fitted():
raise RuntimeError("You did not fit the estimator yet.")
X = self._check_test_data(X)
return self.compute_membership(X).argmax(axis=1)
# --------------------------------------------------------------------------
def score(self, X, y=None, sample_weight=None):
"""Not supported by this implementation"""
pass
# --------------------------------------------------------------------------
| [
"numpy.sum",
"numpy.zeros",
"sklearn.utils.check_random_state"
] | [((2284, 2316), 'numpy.zeros', 'np.zeros', (['(n_points, n_clusters)'], {}), '((n_points, n_clusters))\n', (2292, 2316), True, 'import numpy as np\n'), ((3285, 3316), 'numpy.sum', 'np.sum', (['(fmm ** self.__m)'], {'axis': '(0)'}), '(fmm ** self.__m, axis=0)\n', (3291, 3316), True, 'import numpy as np\n'), ((3621, 3655), 'numpy.zeros', 'np.zeros', (['(n_clusters, n_features)'], {}), '((n_clusters, n_features))\n', (3629, 3655), True, 'import numpy as np\n'), ((7697, 7734), 'sklearn.utils.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (7715, 7734), False, 'from sklearn.utils import check_random_state\n'), ((6180, 6204), 'numpy.sum', 'np.sum', (['sqr_dist'], {'axis': '(1)'}), '(sqr_dist, axis=1)\n', (6186, 6204), True, 'import numpy as np\n'), ((6819, 6831), 'numpy.sum', 'np.sum', (['diff'], {}), '(diff)\n', (6825, 6831), True, 'import numpy as np\n'), ((4383, 4438), 'numpy.sum', 'np.sum', (['(data * (fmm[:, i] ** self.__m)[:, None])'], {'axis': '(0)'}), '(data * (fmm[:, i] ** self.__m)[:, None], axis=0)\n', (4389, 4438), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
def penalty(t, k):
return np.log(1+np.exp(k*t))/k
# return k * t / (k - t + 1)
temp_lb = 310
temp = np.linspace(temp_lb-1, temp_lb+2, 1000)
k = 10
diff = temp_lb-temp
p = penalty(diff, k)
plt.close('all')
plt.figure()
plt.plot(diff, 1445*p)
plt.plot(diff, np.zeros(len(diff)))
plt.show()
# counter = np.arange(0, 24)
# max_change = 0.8
# min_change = 0.02
# rate = 0.25
# values = max_change*np.exp(-rate*counter)+min_change
# plt.figure()
# plt.plot(counter,values)
| [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((163, 206), 'numpy.linspace', 'np.linspace', (['(temp_lb - 1)', '(temp_lb + 2)', '(1000)'], {}), '(temp_lb - 1, temp_lb + 2, 1000)\n', (174, 206), True, 'import numpy as np\n'), ((253, 269), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (262, 269), True, 'import matplotlib.pyplot as plt\n'), ((270, 282), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (280, 282), True, 'import matplotlib.pyplot as plt\n'), ((283, 307), 'matplotlib.pyplot.plot', 'plt.plot', (['diff', '(1445 * p)'], {}), '(diff, 1445 * p)\n', (291, 307), True, 'import matplotlib.pyplot as plt\n'), ((342, 352), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (350, 352), True, 'import matplotlib.pyplot as plt\n'), ((92, 105), 'numpy.exp', 'np.exp', (['(k * t)'], {}), '(k * t)\n', (98, 105), True, 'import numpy as np\n')] |
import numpy as np
import unittest
from network_attack_simulator.envs.action import Action
from network_attack_simulator.envs.machine import Machine
A_COST = 10
class MachineTestCase(unittest.TestCase):
def setUp(self):
self.test_r = 5000.0
self.services = np.asarray([True, False, True])
self.test_m1 = Machine((1, 1), self.services)
self.test_m2 = Machine((1, 2), self.services, self.test_r)
def test_successful_exploit(self):
# address is ignored as this is controlled and checked at Network level
exploit = Action((1, 1), A_COST, "exploit", 0)
# Test on machine with no sensitive docs (i.e. 0 value)
outcome, reward, services = self.test_m1.perform_action(exploit)
self.assertTrue(outcome)
self.assertEqual(reward, 0)
self.assertTrue((services == self.services).all())
# Test exploit on machine with sensitive docs
outcome, reward, services = self.test_m2.perform_action(exploit)
self.assertTrue(outcome)
self.assertEqual(reward, self.test_r)
self.assertTrue((services == self.services).all())
def test_unsuccessful_exploit(self):
# address is ignored as this is controlled and checked at Network level
exploit = Action((1, 1), A_COST, "exploit", 1)
# Test on machine with no sensitive docs (i.e. 0 value)
outcome, reward, services = self.test_m1.perform_action(exploit)
self.assertFalse(outcome)
self.assertEqual(reward, 0)
self.assertTrue((services == np.asarray([])).all())
# Test exploit on machine with sensitive docs
outcome, reward, services = self.test_m2.perform_action(exploit)
self.assertFalse(outcome)
self.assertEqual(reward, 0)
self.assertTrue((services == np.asarray([])).all())
def test_scan(self):
# address is ignored as this is controlled and checked at Network level
exploit = Action((1, 1), A_COST, "scan", None)
# Test on machine with no sensitive docs (i.e. 0 value)
outcome, reward, services = self.test_m1.perform_action(exploit)
self.assertTrue(outcome)
self.assertEqual(reward, 0)
self.assertTrue((services == self.services).all())
# Test exploit on machine with sensitive docs
outcome, reward, services = self.test_m2.perform_action(exploit)
self.assertTrue(outcome)
self.assertEqual(reward, 0)
self.assertTrue((services == self.services).all())
def test_eq(self):
s1 = np.asarray([True, False, True])
s2 = np.asarray([True, False, True])
m1 = Machine((1, 1), s1)
m2 = Machine((1, 1), s2)
self.assertTrue(m1 == m2)
m3 = Machine((1, 1), s1, self.test_r)
m4 = Machine((1, 1), s2, self.test_r)
self.assertTrue(m3 == m4)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"network_attack_simulator.envs.action.Action",
"numpy.asarray",
"network_attack_simulator.envs.machine.Machine"
] | [((2902, 2917), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2915, 2917), False, 'import unittest\n'), ((281, 312), 'numpy.asarray', 'np.asarray', (['[True, False, True]'], {}), '([True, False, True])\n', (291, 312), True, 'import numpy as np\n'), ((336, 366), 'network_attack_simulator.envs.machine.Machine', 'Machine', (['(1, 1)', 'self.services'], {}), '((1, 1), self.services)\n', (343, 366), False, 'from network_attack_simulator.envs.machine import Machine\n'), ((390, 433), 'network_attack_simulator.envs.machine.Machine', 'Machine', (['(1, 2)', 'self.services', 'self.test_r'], {}), '((1, 2), self.services, self.test_r)\n', (397, 433), False, 'from network_attack_simulator.envs.machine import Machine\n'), ((572, 608), 'network_attack_simulator.envs.action.Action', 'Action', (['(1, 1)', 'A_COST', '"""exploit"""', '(0)'], {}), "((1, 1), A_COST, 'exploit', 0)\n", (578, 608), False, 'from network_attack_simulator.envs.action import Action\n'), ((1281, 1317), 'network_attack_simulator.envs.action.Action', 'Action', (['(1, 1)', 'A_COST', '"""exploit"""', '(1)'], {}), "((1, 1), A_COST, 'exploit', 1)\n", (1287, 1317), False, 'from network_attack_simulator.envs.action import Action\n'), ((1969, 2005), 'network_attack_simulator.envs.action.Action', 'Action', (['(1, 1)', 'A_COST', '"""scan"""', 'None'], {}), "((1, 1), A_COST, 'scan', None)\n", (1975, 2005), False, 'from network_attack_simulator.envs.action import Action\n'), ((2565, 2596), 'numpy.asarray', 'np.asarray', (['[True, False, True]'], {}), '([True, False, True])\n', (2575, 2596), True, 'import numpy as np\n'), ((2610, 2641), 'numpy.asarray', 'np.asarray', (['[True, False, True]'], {}), '([True, False, True])\n', (2620, 2641), True, 'import numpy as np\n'), ((2655, 2674), 'network_attack_simulator.envs.machine.Machine', 'Machine', (['(1, 1)', 's1'], {}), '((1, 1), s1)\n', (2662, 2674), False, 'from network_attack_simulator.envs.machine import Machine\n'), ((2688, 2707), 'network_attack_simulator.envs.machine.Machine', 'Machine', (['(1, 1)', 's2'], {}), '((1, 1), s2)\n', (2695, 2707), False, 'from network_attack_simulator.envs.machine import Machine\n'), ((2756, 2788), 'network_attack_simulator.envs.machine.Machine', 'Machine', (['(1, 1)', 's1', 'self.test_r'], {}), '((1, 1), s1, self.test_r)\n', (2763, 2788), False, 'from network_attack_simulator.envs.machine import Machine\n'), ((2802, 2834), 'network_attack_simulator.envs.machine.Machine', 'Machine', (['(1, 1)', 's2', 'self.test_r'], {}), '((1, 1), s2, self.test_r)\n', (2809, 2834), False, 'from network_attack_simulator.envs.machine import Machine\n'), ((1564, 1578), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (1574, 1578), True, 'import numpy as np\n'), ((1822, 1836), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (1832, 1836), True, 'import numpy as np\n')] |
import numpy as np
from datetime import datetime
import System
from System import Array
from DHI.Generic.MikeZero import eumUnit, eumQuantity
from DHI.Generic.MikeZero.DFS import (
DfsFileFactory,
DfsFactory,
DfsSimpleType,
DataValueType,
)
from DHI.Generic.MikeZero.DFS.dfs123 import Dfs1Builder
from .dutil import to_numpy, Dataset, find_item, get_item_info
from .eum import TimeStep, ItemInfo
from .helpers import safe_length
class Dfs1:
def read(self, filename, item_numbers=None, item_names=None):
"""Read data from the dfs1 file
Usage:
read(filename, item_numbers=None, item_names=None)
filename
full path to the dfs1 file.
item_numbers
read only the item_numbers in the array specified (0 base)
item_names
read only the items in the array specified, (takes precedence over item_numbers)
Return:
Dataset(data, time, items)
where data[nt,x]
"""
# NOTE. Item numbers are base 0 (everything else in the dfs is base 0)
# Open the dfs file for reading
dfs = DfsFileFactory.DfsGenericOpen(filename)
if item_names is not None:
item_numbers = find_item(dfs, item_names)
if item_numbers is None:
n_items = safe_length(dfs.ItemInfo)
item_numbers = list(range(n_items))
# Determine the size of the grid
axis = dfs.ItemInfo[0].SpatialAxis
xNum = axis.XCount
nt = dfs.FileInfo.TimeAxis.NumberOfTimeSteps
if nt == 0:
raise Warning("Static dfs1 files (with no time steps) are not supported.")
nt = 1
deleteValue = dfs.FileInfo.DeleteValueFloat
n_items = len(item_numbers)
data_list = []
for item in range(n_items):
# Initialize an empty data block
data = np.ndarray(shape=(nt, xNum), dtype=float)
data_list.append(data)
t = []
startTime = dfs.FileInfo.TimeAxis.StartDateTime
for it in range(dfs.FileInfo.TimeAxis.NumberOfTimeSteps):
for item in range(n_items):
itemdata = dfs.ReadItemTimeStep(item_numbers[item] + 1, it)
src = itemdata.Data
d = to_numpy(src)
d[d == deleteValue] = np.nan
data_list[item][it, :] = d
t.append(
startTime.AddSeconds(itemdata.Time).ToString("yyyy-MM-dd HH:mm:ss")
)
time = [datetime.strptime(x, "%Y-%m-%d %H:%M:%S") for x in t]
items = get_item_info(dfs, item_numbers)
dfs.Close()
return Dataset(data_list, time, items)
def write(self, filename, data):
"""
Function: write to a pre-created dfs1 file.
filename:
full path and filename to existing dfs1 file
data:
list of matrices. len(data) must equal the number of items in the dfs2.
Each matrix must be of dimension time, x
usage:
write(filename, data) where data(nt, x)
Returns:
Nothing
"""
# Open the dfs file for writing
dfs = DfsFileFactory.Dfs1FileOpenEdit(filename)
# Determine the size of the grid
number_x = dfs.SpatialAxis.XCount
n_time_steps = dfs.FileInfo.TimeAxis.NumberOfTimeSteps
n_items = safe_length(dfs.ItemInfo)
deletevalue = -1e-035
if not all(np.shape(d)[0] == n_time_steps for d in data):
raise Warning(
"ERROR data matrices in the time dimension do not all match in the data list. "
"Data is list of matices [t, x]"
)
if not all(np.shape(d)[1] == number_x for d in data):
raise Warning(
"ERROR data matrices in the X dimension do not all match in the data list. "
"Data is list of matices [t, x]"
)
if not len(data) == n_items:
raise Warning(
"The number of matrices in data do not match the number of items in the dfs1 file."
)
for i in range(n_time_steps):
for item in range(n_items):
d = data[item][i, :]
d[np.isnan(d)] = deletevalue
darray = Array[System.Single](np.array(d.reshape(d.size, 1)[:, 0]))
dfs.WriteItemTimeStepNext(0, darray)
dfs.Close()
def create(
self,
filename,
data,
start_time=None,
dt=1,
items=None,
length_x=1,
x0=0,
coordinate=None,
timeseries_unit=TimeStep.SECOND,
title=None,
):
"""
Create a dfs1 file
Parameters
----------
filename: str
Location to write the dfs1 file
data: list[np.array]
list of matrices, one for each item. Matrix dimension: x, time
start_time: datetime, optional
start datetime
timeseries_unit: Timestep, optional
TimeStep unit default TimeStep.SECOND
dt: float
The time step (double based on the timeseries_unit). Therefore dt of 5.5 with timeseries_unit of minutes
means 5 mins and 30 seconds.
items: list[ItemInfo], optional
List of ItemInfo corresponding to a variable types (ie. Water Level).
coordinate:
['UTM-33', 12.4387, 55.2257, 327] for UTM, Long, Lat, North to Y orientation. Note: long, lat in decimal degrees
OR
[TODO: Support not Local Coordinates ...]
x0:
Lower right position
length_x:
length of each grid in the x direction (meters)
title:
title of the dfs2 file (can be blank)
"""
if title is None:
title = ""
n_time_steps = np.shape(data[0])[0]
number_x = np.shape(data[0])[1]
n_items = len(data)
if start_time is None:
start_time = datetime.now()
if coordinate is None:
coordinate = ["LONG/LAT", 0, 0, 0]
if items is None:
items = [ItemInfo(f"temItem {i+1}") for i in range(n_items)]
if not all(np.shape(d)[0] == n_time_steps for d in data):
raise Warning(
"ERROR data matrices in the time dimension do not all match in the data list. "
"Data is list of matices [t, x]"
)
if not all(np.shape(d)[1] == number_x for d in data):
raise Warning(
"ERROR data matrices in the X dimension do not all match in the data list. "
"Data is list of matices [t, x]"
)
if len(items) != n_items:
raise Warning(
"names must be an array of strings with the same number as matrices in data list"
)
if not type(start_time) is datetime:
raise Warning("start_time must be of type datetime ")
system_start_time = System.DateTime(
start_time.year,
start_time.month,
start_time.day,
start_time.hour,
start_time.minute,
start_time.second,
)
# Create an empty dfs1 file object
factory = DfsFactory()
builder = Dfs1Builder.Create(title, "mikeio", 0)
# Set up the header
builder.SetDataType(0)
builder.SetGeographicalProjection(
factory.CreateProjectionGeoOrigin(
coordinate[0], coordinate[1], coordinate[2], coordinate[3]
)
)
builder.SetTemporalAxis(
factory.CreateTemporalEqCalendarAxis(
timeseries_unit, system_start_time, 0, dt
)
)
builder.SetSpatialAxis(
factory.CreateAxisEqD1(eumUnit.eumUmeter, number_x, x0, length_x)
)
for i in range(n_items):
builder.AddDynamicItem(
items[i].name,
eumQuantity.Create(items[i].type, items[i].unit),
DfsSimpleType.Float,
DataValueType.Instantaneous,
)
try:
builder.CreateFile(filename)
except IOError:
print("cannot create dfs2 file: ", filename)
dfs = builder.GetFile()
deletevalue = dfs.FileInfo.DeleteValueFloat # -1.0000000031710769e-30
for i in range(n_time_steps):
for item in range(n_items):
d = data[item][i, :]
d[np.isnan(d)] = deletevalue
darray = Array[System.Single](np.array(d.reshape(d.size, 1)[:, 0]))
dfs.WriteItemTimeStepNext(0, darray)
dfs.Close()
| [
"DHI.Generic.MikeZero.DFS.DfsFileFactory.Dfs1FileOpenEdit",
"datetime.datetime.strptime",
"DHI.Generic.MikeZero.eumQuantity.Create",
"DHI.Generic.MikeZero.DFS.DfsFileFactory.DfsGenericOpen",
"datetime.datetime.now",
"DHI.Generic.MikeZero.DFS.dfs123.Dfs1Builder.Create",
"System.DateTime",
"numpy.ndarra... | [((1138, 1177), 'DHI.Generic.MikeZero.DFS.DfsFileFactory.DfsGenericOpen', 'DfsFileFactory.DfsGenericOpen', (['filename'], {}), '(filename)\n', (1167, 1177), False, 'from DHI.Generic.MikeZero.DFS import DfsFileFactory, DfsFactory, DfsSimpleType, DataValueType\n'), ((3210, 3251), 'DHI.Generic.MikeZero.DFS.DfsFileFactory.Dfs1FileOpenEdit', 'DfsFileFactory.Dfs1FileOpenEdit', (['filename'], {}), '(filename)\n', (3241, 3251), False, 'from DHI.Generic.MikeZero.DFS import DfsFileFactory, DfsFactory, DfsSimpleType, DataValueType\n'), ((7068, 7193), 'System.DateTime', 'System.DateTime', (['start_time.year', 'start_time.month', 'start_time.day', 'start_time.hour', 'start_time.minute', 'start_time.second'], {}), '(start_time.year, start_time.month, start_time.day,\n start_time.hour, start_time.minute, start_time.second)\n', (7083, 7193), False, 'import System\n'), ((7335, 7347), 'DHI.Generic.MikeZero.DFS.DfsFactory', 'DfsFactory', ([], {}), '()\n', (7345, 7347), False, 'from DHI.Generic.MikeZero.DFS import DfsFileFactory, DfsFactory, DfsSimpleType, DataValueType\n'), ((7366, 7404), 'DHI.Generic.MikeZero.DFS.dfs123.Dfs1Builder.Create', 'Dfs1Builder.Create', (['title', '"""mikeio"""', '(0)'], {}), "(title, 'mikeio', 0)\n", (7384, 7404), False, 'from DHI.Generic.MikeZero.DFS.dfs123 import Dfs1Builder\n'), ((1903, 1944), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(nt, xNum)', 'dtype': 'float'}), '(shape=(nt, xNum), dtype=float)\n', (1913, 1944), True, 'import numpy as np\n'), ((2533, 2574), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(x, '%Y-%m-%d %H:%M:%S')\n", (2550, 2574), False, 'from datetime import datetime\n'), ((5915, 5932), 'numpy.shape', 'np.shape', (['data[0]'], {}), '(data[0])\n', (5923, 5932), True, 'import numpy as np\n'), ((5955, 5972), 'numpy.shape', 'np.shape', (['data[0]'], {}), '(data[0])\n', (5963, 5972), True, 'import numpy as np\n'), ((6061, 6075), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6073, 6075), False, 'from datetime import datetime\n'), ((8056, 8104), 'DHI.Generic.MikeZero.eumQuantity.Create', 'eumQuantity.Create', (['items[i].type', 'items[i].unit'], {}), '(items[i].type, items[i].unit)\n', (8074, 8104), False, 'from DHI.Generic.MikeZero import eumUnit, eumQuantity\n'), ((4285, 4296), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (4293, 4296), True, 'import numpy as np\n'), ((8584, 8595), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (8592, 8595), True, 'import numpy as np\n'), ((3494, 3505), 'numpy.shape', 'np.shape', (['d'], {}), '(d)\n', (3502, 3505), True, 'import numpy as np\n'), ((3746, 3757), 'numpy.shape', 'np.shape', (['d'], {}), '(d)\n', (3754, 3757), True, 'import numpy as np\n'), ((6275, 6286), 'numpy.shape', 'np.shape', (['d'], {}), '(d)\n', (6283, 6286), True, 'import numpy as np\n'), ((6527, 6538), 'numpy.shape', 'np.shape', (['d'], {}), '(d)\n', (6535, 6538), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 11 14:27:45 2018
@author: aceituno
"""
import data_processing as dp
import matplotlib.pyplot as plt
import numpy as np
path_fig = './'
showPlots = True
savePlots = True
def plot_spiking_rate_with_learning(spikesInitial, spikesFinal, neurons, timePlot=50, bins=25, saveFigName = []):
bins = np.linspace(0, timePlot, bins)
times = dp.list_firing_times(spikesInitial, range(2,neurons))
plt.hist(times, bins, alpha=0.5, label='Before STDP')
times = dp.list_firing_times(spikesFinal, range(2,neurons))
plt.hist(times, bins, alpha=0.5, label='After STDP')
plt.legend()
if not showPlots:
print('Print should work!')
plt.savefig(path_fig + saveFigName +'.pdf', format = 'pdf')
if showPlots:
plt.show()
def rasterPlotPop(list_sim, populations_neuron_idxs, colorList = ['b', 'r', 'g', 'k'], saveFigName = []):
plt.ioff()
for pop_idx in range(0,len(populations_neuron_idxs)):
low_idx = min(populations_neuron_idxs[pop_idx])
high_idx = max(populations_neuron_idxs[pop_idx])
repetitions = []
times = []
for repet in range(0,len(list_sim)):
firing_times_per_repet = dp.list_firing_times(list_sim[repet], range(low_idx, high_idx))
times.extend(firing_times_per_repet)
repetitions.extend([repet]*len(firing_times_per_repet))
plt.scatter(repetitions, times, color = colorList[pop_idx],marker='.')
plt.title("Evolution of Median Latency")
plt.xlabel("Stimulus repetition")
plt.ylabel("Latency")
plt.legend(loc = 'upper right')
if savePlots:
print('Print should work!')
plt.savefig(path_fig + saveFigName +'.pdf', format = 'pdf')
if showPlots:
plt.show()
#def plot_latencyVsNoise(list_times, noise_Lvls, saveFigName = []):
# avg_avgLatency = []
# std_avgLatency = []
# for times in list_times:
# avg_avgLatency.append(np.average(times))
# std_avgLatency.append(np.std(times))
# plt.errorbar(noise_Lvls,avg_avgLatency, yerr = std_avgLatency)
# plt.plot(noise_Lvls,avg_avgLatency, 'bo', markersize=12)
#
# plt.title("Average Latency vs Input Frequency", fontsize=18)
# plt.xlabel("Random Spikes Between Repetitions", fontsize=16)
# plt.ylabel("Average Latency", fontsize=16)
# if saveFigName:
# plt.savefig(path_fig + saveFigName +'.pdf', format = 'pdf')
#
# if showPlots:
# plt.show()
#
#def plot_SpikeCountVsNoise(list_spikeCount, noise_Lvls, saveFigName = []):
# avg_Count = []
# std_Count = []
# for counts in list_spikeCount:
# avg_Count.append(np.average(counts))
# std_Count.append(np.std(counts))
# plt.errorbar(noise_Lvls,avg_Count, yerr = std_Count)
# plt.plot(noise_Lvls,avg_Count, 'bo', markersize=12)
# plt.title("Spike Count vs Input Frequency", fontsize=20)
# plt.xlabel("Random Spikes Between Repetitions", fontsize=16)
# plt.ylabel("Number of Spikes", fontsize=16)
# if saveFigName:
# plt.savefig(path_fig + saveFigName +'.pdf', format = 'pdf')
# if showPlots:
# plt.show()
def plot_firing_times_population(list_sim_results, pop_idx, saveFigName = []):
repetitions = range(len(list_sim_results))
neuron_idxs = range(pop_idx[0], pop_idx[1])
avg_latency, sdt_latency = dp.spike_timing_evolution(list_sim_results, neuron_idxs)
med_latency, upQ_latency, lowQ_latency = dp.spike_timing_evolution_quartiles(list_sim_results, neuron_idxs)
plt.plot(repetitions, S2_to_Pop_max*np.ones(len(repetitions)), 'k', label = 'Stimulus-population latency')
plt.plot(repetitions, S2_to_Pop_min*np.ones(len(repetitions)), 'k')
plt.plot(repetitions, med_latency, label = 'Median Latency')
plt.fill_between(repetitions, lowQ_latency, upQ_latency, alpha=0.5, label = 'Quartiles')
plt.title("Evolution of Median Latency")
plt.xlabel("Stimulus repetition")
plt.ylabel("Latency")
plt.legend(loc = 'upper right')
if saveFigName:
plt.savefig(path_fig + saveFigName +'.pdf', format = 'pdf')
if showPlots:
plt.show()
def plot_firing_times_population_avg(list_sim_results, pop_idx, saveFigName = []):
repetitions = range(len(list_sim_results))
neuron_idxs = range(pop_idx[0], pop_idx[1])
avg_latency, sdt_latency = dp.spike_timing_evolution(list_sim_results, neuron_idxs)
lowBar = [avg_latency[r] - sdt_latency[r] for r in range(0,len(avg_latency))]
highBar = [avg_latency[r] + sdt_latency[r] for r in range(0,len(avg_latency))]
plt.plot(repetitions, S2_to_Pop_max*np.ones(len(repetitions)), 'k', label = 'Stimulus-population latency')
plt.plot(repetitions, S2_to_Pop_min*np.ones(len(repetitions)), 'k')
plt.plot(repetitions, avg_latency, label = 'Median Latency')
plt.fill_between(repetitions, highBar, lowBar, alpha=0.5, label = 'Quartiles')
plt.title("Evolution of Median Latency")
plt.xlabel("Stimulus repetition")
plt.ylabel("Latency")
plt.legend(loc = 'upper right')
if saveFigName:
plt.savefig(path_fig + saveFigName +'.pdf', format = 'pdf')
if showPlots:
plt.show()
def plot_firing_times_percentiles(list_sim_results, population_idxs, percentiles, saveFigName = []):
repetitions = range(len(list_sim_results))
neuron_idxs = range(population_idxs[0], population_idxs[1])
list_perc = dp.spike_timing_evolution_percentiles(list_sim_results, neuron_idxs, percentiles)
for perc_idx in range(len(list_perc) - 1):
label = str(percentiles[perc_idx]) + '% - ' + str(percentiles[perc_idx + 1]) +'% of neurons'
plt.fill_between(repetitions, list_perc[perc_idx], list_perc[perc_idx + 1], label = label)
plt.plot(repetitions, S2_to_Pop_max*np.ones(len(repetitions)), 'k', label = 'Stimulus time window')
plt.plot(repetitions, S2_to_Pop_min*np.ones(len(repetitions)), 'k')
plt.title("Evolution Average Latency")
plt.xlabel("Stimulus repetition")
plt.ylabel("Latency")
plt.legend(loc = 'upper right')
if saveFigName:
plt.savefig(path_fig + saveFigName +'.pdf', format = 'pdf')
if showPlots:
plt.show()
def plot_firing_times_line_percentiles(list_sim_results, population_idxs, percentiles, saveFigName = []):
repetitions = range(len(list_sim_results))
neuron_idxs = range(population_idxs[0], population_idxs[1])
list_perc = dp.spike_timing_evolution_percentiles(list_sim_results, neuron_idxs, percentiles)
for perc_idx in range(len(list_perc)):
label = str(percentiles[perc_idx]) + 'th fastest neuron'
plt.plot(repetitions, list_perc[perc_idx], label=label)
plt.fill_between(repetitions, S2_to_Pop_max*np.ones(len(repetitions)), S2_to_Pop_min*np.ones(len(repetitions)), facecolor='black',alpha=0.2, label = 'Stimulus time window')
plt.title("Evolution of Neuron Latency", fontsize=18)
plt.xlabel("Stimulus repetition", fontsize=16 )
plt.ylabel("Latency", fontsize=16)
plt.legend(loc = 'upper right', fontsize = 12)
if saveFigName:
plt.savefig(path_fig + saveFigName +'.pdf', format = 'pdf')
if showPlots:
plt.show()
| [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"data_processing.spike_timing_evolution",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"data_processing.spike_timing_evolution_percentiles",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.fill_between",
"nump... | [((371, 401), 'numpy.linspace', 'np.linspace', (['(0)', 'timePlot', 'bins'], {}), '(0, timePlot, bins)\n', (382, 401), True, 'import numpy as np\n'), ((472, 525), 'matplotlib.pyplot.hist', 'plt.hist', (['times', 'bins'], {'alpha': '(0.5)', 'label': '"""Before STDP"""'}), "(times, bins, alpha=0.5, label='Before STDP')\n", (480, 525), True, 'import matplotlib.pyplot as plt\n'), ((599, 651), 'matplotlib.pyplot.hist', 'plt.hist', (['times', 'bins'], {'alpha': '(0.5)', 'label': '"""After STDP"""'}), "(times, bins, alpha=0.5, label='After STDP')\n", (607, 651), True, 'import matplotlib.pyplot as plt\n'), ((656, 668), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (666, 668), True, 'import matplotlib.pyplot as plt\n'), ((958, 968), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (966, 968), True, 'import matplotlib.pyplot as plt\n'), ((1543, 1583), 'matplotlib.pyplot.title', 'plt.title', (['"""Evolution of Median Latency"""'], {}), "('Evolution of Median Latency')\n", (1552, 1583), True, 'import matplotlib.pyplot as plt\n'), ((1588, 1621), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Stimulus repetition"""'], {}), "('Stimulus repetition')\n", (1598, 1621), True, 'import matplotlib.pyplot as plt\n'), ((1626, 1647), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latency"""'], {}), "('Latency')\n", (1636, 1647), True, 'import matplotlib.pyplot as plt\n'), ((1652, 1681), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (1662, 1681), True, 'import matplotlib.pyplot as plt\n'), ((3452, 3508), 'data_processing.spike_timing_evolution', 'dp.spike_timing_evolution', (['list_sim_results', 'neuron_idxs'], {}), '(list_sim_results, neuron_idxs)\n', (3477, 3508), True, 'import data_processing as dp\n'), ((3554, 3620), 'data_processing.spike_timing_evolution_quartiles', 'dp.spike_timing_evolution_quartiles', (['list_sim_results', 'neuron_idxs'], {}), '(list_sim_results, neuron_idxs)\n', (3589, 3620), True, 'import data_processing as dp\n'), ((3823, 3881), 'matplotlib.pyplot.plot', 'plt.plot', (['repetitions', 'med_latency'], {'label': '"""Median Latency"""'}), "(repetitions, med_latency, label='Median Latency')\n", (3831, 3881), True, 'import matplotlib.pyplot as plt\n'), ((3888, 3979), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['repetitions', 'lowQ_latency', 'upQ_latency'], {'alpha': '(0.5)', 'label': '"""Quartiles"""'}), "(repetitions, lowQ_latency, upQ_latency, alpha=0.5, label=\n 'Quartiles')\n", (3904, 3979), True, 'import matplotlib.pyplot as plt\n'), ((3981, 4021), 'matplotlib.pyplot.title', 'plt.title', (['"""Evolution of Median Latency"""'], {}), "('Evolution of Median Latency')\n", (3990, 4021), True, 'import matplotlib.pyplot as plt\n'), ((4026, 4059), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Stimulus repetition"""'], {}), "('Stimulus repetition')\n", (4036, 4059), True, 'import matplotlib.pyplot as plt\n'), ((4064, 4085), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latency"""'], {}), "('Latency')\n", (4074, 4085), True, 'import matplotlib.pyplot as plt\n'), ((4090, 4119), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (4100, 4119), True, 'import matplotlib.pyplot as plt\n'), ((4478, 4534), 'data_processing.spike_timing_evolution', 'dp.spike_timing_evolution', (['list_sim_results', 'neuron_idxs'], {}), '(list_sim_results, neuron_idxs)\n', (4503, 4534), True, 'import data_processing as dp\n'), ((4897, 4955), 'matplotlib.pyplot.plot', 'plt.plot', (['repetitions', 'avg_latency'], {'label': '"""Median Latency"""'}), "(repetitions, avg_latency, label='Median Latency')\n", (4905, 4955), True, 'import matplotlib.pyplot as plt\n'), ((4962, 5038), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['repetitions', 'highBar', 'lowBar'], {'alpha': '(0.5)', 'label': '"""Quartiles"""'}), "(repetitions, highBar, lowBar, alpha=0.5, label='Quartiles')\n", (4978, 5038), True, 'import matplotlib.pyplot as plt\n'), ((5045, 5085), 'matplotlib.pyplot.title', 'plt.title', (['"""Evolution of Median Latency"""'], {}), "('Evolution of Median Latency')\n", (5054, 5085), True, 'import matplotlib.pyplot as plt\n'), ((5090, 5123), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Stimulus repetition"""'], {}), "('Stimulus repetition')\n", (5100, 5123), True, 'import matplotlib.pyplot as plt\n'), ((5128, 5149), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latency"""'], {}), "('Latency')\n", (5138, 5149), True, 'import matplotlib.pyplot as plt\n'), ((5154, 5183), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (5164, 5183), True, 'import matplotlib.pyplot as plt\n'), ((5555, 5640), 'data_processing.spike_timing_evolution_percentiles', 'dp.spike_timing_evolution_percentiles', (['list_sim_results', 'neuron_idxs', 'percentiles'], {}), '(list_sim_results, neuron_idxs,\n percentiles)\n', (5592, 5640), True, 'import data_processing as dp\n'), ((6070, 6108), 'matplotlib.pyplot.title', 'plt.title', (['"""Evolution Average Latency"""'], {}), "('Evolution Average Latency')\n", (6079, 6108), True, 'import matplotlib.pyplot as plt\n'), ((6113, 6146), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Stimulus repetition"""'], {}), "('Stimulus repetition')\n", (6123, 6146), True, 'import matplotlib.pyplot as plt\n'), ((6151, 6172), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latency"""'], {}), "('Latency')\n", (6161, 6172), True, 'import matplotlib.pyplot as plt\n'), ((6177, 6206), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (6187, 6206), True, 'import matplotlib.pyplot as plt\n'), ((6583, 6668), 'data_processing.spike_timing_evolution_percentiles', 'dp.spike_timing_evolution_percentiles', (['list_sim_results', 'neuron_idxs', 'percentiles'], {}), '(list_sim_results, neuron_idxs,\n percentiles)\n', (6620, 6668), True, 'import data_processing as dp\n'), ((7023, 7076), 'matplotlib.pyplot.title', 'plt.title', (['"""Evolution of Neuron Latency"""'], {'fontsize': '(18)'}), "('Evolution of Neuron Latency', fontsize=18)\n", (7032, 7076), True, 'import matplotlib.pyplot as plt\n'), ((7081, 7127), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Stimulus repetition"""'], {'fontsize': '(16)'}), "('Stimulus repetition', fontsize=16)\n", (7091, 7127), True, 'import matplotlib.pyplot as plt\n'), ((7133, 7167), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latency"""'], {'fontsize': '(16)'}), "('Latency', fontsize=16)\n", (7143, 7167), True, 'import matplotlib.pyplot as plt\n'), ((7172, 7214), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'fontsize': '(12)'}), "(loc='upper right', fontsize=12)\n", (7182, 7214), True, 'import matplotlib.pyplot as plt\n'), ((740, 798), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_fig + saveFigName + '.pdf')"], {'format': '"""pdf"""'}), "(path_fig + saveFigName + '.pdf', format='pdf')\n", (751, 798), True, 'import matplotlib.pyplot as plt\n'), ((826, 836), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (834, 836), True, 'import matplotlib.pyplot as plt\n'), ((1455, 1524), 'matplotlib.pyplot.scatter', 'plt.scatter', (['repetitions', 'times'], {'color': 'colorList[pop_idx]', 'marker': '"""."""'}), "(repetitions, times, color=colorList[pop_idx], marker='.')\n", (1466, 1524), True, 'import matplotlib.pyplot as plt\n'), ((1746, 1804), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_fig + saveFigName + '.pdf')"], {'format': '"""pdf"""'}), "(path_fig + saveFigName + '.pdf', format='pdf')\n", (1757, 1804), True, 'import matplotlib.pyplot as plt\n'), ((1832, 1842), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1840, 1842), True, 'import matplotlib.pyplot as plt\n'), ((4150, 4208), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_fig + saveFigName + '.pdf')"], {'format': '"""pdf"""'}), "(path_fig + saveFigName + '.pdf', format='pdf')\n", (4161, 4208), True, 'import matplotlib.pyplot as plt\n'), ((4236, 4246), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4244, 4246), True, 'import matplotlib.pyplot as plt\n'), ((5214, 5272), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_fig + saveFigName + '.pdf')"], {'format': '"""pdf"""'}), "(path_fig + saveFigName + '.pdf', format='pdf')\n", (5225, 5272), True, 'import matplotlib.pyplot as plt\n'), ((5300, 5310), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5308, 5310), True, 'import matplotlib.pyplot as plt\n'), ((5799, 5891), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['repetitions', 'list_perc[perc_idx]', 'list_perc[perc_idx + 1]'], {'label': 'label'}), '(repetitions, list_perc[perc_idx], list_perc[perc_idx + 1],\n label=label)\n', (5815, 5891), True, 'import matplotlib.pyplot as plt\n'), ((6237, 6295), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_fig + saveFigName + '.pdf')"], {'format': '"""pdf"""'}), "(path_fig + saveFigName + '.pdf', format='pdf')\n", (6248, 6295), True, 'import matplotlib.pyplot as plt\n'), ((6323, 6333), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6331, 6333), True, 'import matplotlib.pyplot as plt\n'), ((6786, 6841), 'matplotlib.pyplot.plot', 'plt.plot', (['repetitions', 'list_perc[perc_idx]'], {'label': 'label'}), '(repetitions, list_perc[perc_idx], label=label)\n', (6794, 6841), True, 'import matplotlib.pyplot as plt\n'), ((7247, 7305), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_fig + saveFigName + '.pdf')"], {'format': '"""pdf"""'}), "(path_fig + saveFigName + '.pdf', format='pdf')\n", (7258, 7305), True, 'import matplotlib.pyplot as plt\n'), ((7333, 7343), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7341, 7343), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
#import simpleaudio as sa
import scipy.io.wavfile as sw
'''
def audioplay(fs, y):
yout = np.iinfo(np.int16).max / np.max(np.abs(y)) * y
yout = yout.astype(np.int16)
play_obj = sa.play_buffer(yout, y.ndim, 2, fs)
'''
def wavread(wavefile):
fs, y = sw.read(wavefile)
if y.dtype == 'float32' or y.dtype == 'float64':
max_y = 1
elif y.dtype == 'uint8':
y = y - 128
max_y = 128
elif y.dtype == 'int16':
max_y = np.abs(np.iinfo(np.int16).min)
else:
max_y = np.abs(np.iinfo(np.int16).min)
y = y / max_y
y = y.astype(np.float32)
return fs, y
def wavwrite(wavefile, fs, data):
if data.dtype == 'float32' or data.dtype == 'float64':
max_y = np.max(np.abs(data))
elif data.dtype == 'uint8':
data = data - 128
max_y = 128
elif data.dtype == 'int16':
max_y = np.abs(np.iinfo(np.int16).min)
else:
max_y = np.abs(np.iinfo(np.int16).min)
data = np.int16(data / max_y * np.abs(np.iinfo(np.int16).min))
sw.write(wavefile, fs, data)
| [
"numpy.abs",
"numpy.iinfo",
"scipy.io.wavfile.read",
"scipy.io.wavfile.write"
] | [((284, 301), 'scipy.io.wavfile.read', 'sw.read', (['wavefile'], {}), '(wavefile)\n', (291, 301), True, 'import scipy.io.wavfile as sw\n'), ((1057, 1085), 'scipy.io.wavfile.write', 'sw.write', (['wavefile', 'fs', 'data'], {}), '(wavefile, fs, data)\n', (1065, 1085), True, 'import scipy.io.wavfile as sw\n'), ((758, 770), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (764, 770), True, 'import numpy as np\n'), ((1028, 1046), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (1036, 1046), True, 'import numpy as np\n'), ((494, 512), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (502, 512), True, 'import numpy as np\n'), ((551, 569), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (559, 569), True, 'import numpy as np\n'), ((905, 923), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (913, 923), True, 'import numpy as np\n'), ((962, 980), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (970, 980), True, 'import numpy as np\n')] |
import numpy as np
# Compute element-wise square of vector
def vsquare(V):
R = np.power(V, 2)
| [
"numpy.power"
] | [((85, 99), 'numpy.power', 'np.power', (['V', '(2)'], {}), '(V, 2)\n', (93, 99), True, 'import numpy as np\n')] |
#author:<NAME>
#insitution: MIT
import matplotlib.pyplot as plt
import time
import numpy as np
try:
from HAPILite import CalcCrossSection
except:
from ..HAPILite import CalcCrossSection
WaveNumber = np.arange(0,10000,0.001)
StartTime = time.time()
CrossSection = CalcCrossSection("CO2",Temp=1000.0,WN_Grid=WaveNumber, Profile="Doppler", NCORES=-1)
StopTime = time.time()
print("The time take to calculate the cross-section is %4.3f" %(StopTime - StartTime))
plt.figure()
plt.plot(WaveNumber, CrossSection, "k-")
plt.title("L-HAPI")
plt.show()
| [
"matplotlib.pyplot.plot",
"HAPILite.CalcCrossSection",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"time.time",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((211, 237), 'numpy.arange', 'np.arange', (['(0)', '(10000)', '(0.001)'], {}), '(0, 10000, 0.001)\n', (220, 237), True, 'import numpy as np\n'), ((249, 260), 'time.time', 'time.time', ([], {}), '()\n', (258, 260), False, 'import time\n'), ((277, 367), 'HAPILite.CalcCrossSection', 'CalcCrossSection', (['"""CO2"""'], {'Temp': '(1000.0)', 'WN_Grid': 'WaveNumber', 'Profile': '"""Doppler"""', 'NCORES': '(-1)'}), "('CO2', Temp=1000.0, WN_Grid=WaveNumber, Profile='Doppler',\n NCORES=-1)\n", (293, 367), False, 'from HAPILite import CalcCrossSection\n'), ((373, 384), 'time.time', 'time.time', ([], {}), '()\n', (382, 384), False, 'import time\n'), ((475, 487), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (485, 487), True, 'import matplotlib.pyplot as plt\n'), ((488, 528), 'matplotlib.pyplot.plot', 'plt.plot', (['WaveNumber', 'CrossSection', '"""k-"""'], {}), "(WaveNumber, CrossSection, 'k-')\n", (496, 528), True, 'import matplotlib.pyplot as plt\n'), ((529, 548), 'matplotlib.pyplot.title', 'plt.title', (['"""L-HAPI"""'], {}), "('L-HAPI')\n", (538, 548), True, 'import matplotlib.pyplot as plt\n'), ((549, 559), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (557, 559), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import general as gen
def cluster(dataArray, k, dim, dNo, t):
# reps = gen.initializeRandom(dataArray, k, dim, dNo)
# print(reps)
# reps = np.array([[1], [2], [3]])
reps = np.array([[1], [11], [28]])
print(reps)
for itr in range(t):
n = []
# print(dataArray)
clusters = gen.clustering_k_means(dataArray, k, reps, dNo)
# print(clusters)
for i in range(k):
n.append(gen.findmeanofcluster(clusters[i]))
if ((np.array(n) - np.array(reps)).any()) <= 0.00000001:
print("Number of iterations = " + str(itr))
break
reps = n
# print(n)
for i in range(k):
print("Cluster #" + str(i))
print("--------------------------")
print(clusters[i])
print("--------------------------")
return None
| [
"numpy.array",
"general.findmeanofcluster",
"general.clustering_k_means"
] | [((209, 236), 'numpy.array', 'np.array', (['[[1], [11], [28]]'], {}), '([[1], [11], [28]])\n', (217, 236), True, 'import numpy as np\n'), ((339, 386), 'general.clustering_k_means', 'gen.clustering_k_means', (['dataArray', 'k', 'reps', 'dNo'], {}), '(dataArray, k, reps, dNo)\n', (361, 386), True, 'import general as gen\n'), ((461, 495), 'general.findmeanofcluster', 'gen.findmeanofcluster', (['clusters[i]'], {}), '(clusters[i])\n', (482, 495), True, 'import general as gen\n'), ((511, 522), 'numpy.array', 'np.array', (['n'], {}), '(n)\n', (519, 522), True, 'import numpy as np\n'), ((525, 539), 'numpy.array', 'np.array', (['reps'], {}), '(reps)\n', (533, 539), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.