text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
"""test_wave.py.
.. module:: test_wave
:platform: Unix, Windows
.. modelauthor:: Juan C Galan-Hernandez <jcgalanh@gmail.com>
"""
import unittest
import waveletcodec.wave as wave
import numpy as np
import numpy.testing as npt
import cv2
class TestWave(unittest.TestCase):
"""Test unit of the wave module.
This is the unit test of the wave module
"""
def test_WCSet_creation(self):
"""Test case for creation of a WCSet.
This test case checks for a correct creation of a WCSet from a
numpy.ndarray instance
"""
fakewave = np.eye(2 ** 2)
obj = wave.WCSet(fakewave, 2, wave.CDF97)
self.assertIsInstance(obj, wave.WCSet, "Obj class missmatch")
npt.assert_array_equal(obj, fakewave)
self.assertEqual(obj.level, 2, "Failed to read level")
self.assertEqual(obj.filter, wave.CDF97, "Failed to read filter type")
def test_cdf97(self):
signal = np.ones((2 ** 6, 2 ** 6))
wavelet = wave.cdf97(signal)
isignal = wave.icdf97(wavelet)
npt.assert_array_almost_equal(signal, isignal, 6)
def test_to_image(self_):
signal = cv2.imread('docs/lena512color.tiff', cv2.IMREAD_GRAYSCALE)
wavelet = wave.cdf97(signal, 3)
display = wavelet.as_image()
self.assertEqual(display.dtype, np.uint8, "Not an image")
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "d7a9c8d19a0f5a0ea17a03bb238a8d5181434f0e", "size": 1415, "ext": "py", "lang": "Python", "max_stars_repo_path": "waveletcodec/test/test_wave.py", "max_stars_repo_name": "zenathark/jg.waveletcodec", "max_stars_repo_head_hexsha": "7994dd18ef5472e7e4d6447062cf4dc3c2f6463f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-05-14T01:42:18.000Z", "max_stars_repo_stars_event_max_datetime": "2017-05-14T01:42:18.000Z", "max_issues_repo_path": "waveletcodec/test/test_wave.py", "max_issues_repo_name": "zenathark/jg.waveletcodec", "max_issues_repo_head_hexsha": "7994dd18ef5472e7e4d6447062cf4dc3c2f6463f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "waveletcodec/test/test_wave.py", "max_forks_repo_name": "zenathark/jg.waveletcodec", "max_forks_repo_head_hexsha": "7994dd18ef5472e7e4d6447062cf4dc3c2f6463f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6981132075, "max_line_length": 78, "alphanum_fraction": 0.6530035336, "include": true, "reason": "import numpy", "num_tokens": 373}
|
/**
* \file
* \author Thomas Fischer
* \date 2010-06-16
* \brief Implementation of string helper functions.
*
* \copyright
* Copyright (c) 2012-2019, OpenGeoSys Community (http://www.opengeosys.org)
* Distributed under a Modified BSD License.
* See accompanying file LICENSE.txt or
* http://www.opengeosys.org/project/license
*
*/
#include "StringTools.h"
#include <algorithm>
#include <cctype>
#include <cstdarg>
#include <cstdio>
#include <iomanip>
#include <logog/include/logog.hpp>
#include <boost/algorithm/string/replace.hpp>
namespace BaseLib
{
std::vector<std::string> splitString(std::string const& str)
{
std::istringstream str_stream(str);
std::vector<std::string> items;
std::copy(std::istream_iterator<std::string>(str_stream),
std::istream_iterator<std::string>(),
std::back_inserter(items));
return items;
}
std::list<std::string> splitString(const std::string &str, char delim)
{
std::list<std::string> strList;
std::stringstream ss(str);
std::string item;
while(getline(ss, item, delim))
strList.push_back(item);
return strList;
}
std::string replaceString(const std::string &searchString,
const std::string &replaceString,
std::string stringToReplace)
{
boost::replace_all(stringToReplace, searchString, replaceString);
return stringToReplace;
}
void trim(std::string &str, char ch)
{
std::string::size_type pos = str.find_last_not_of(ch);
if(pos != std::string::npos)
{
str.erase(pos + 1);
pos = str.find_first_not_of(ch);
if(pos != std::string::npos)
str.erase(0, pos);
}
else
str.erase(str.begin(), str.end());
}
void simplify(std::string &str)
{
trim (str);
str.erase(
std::unique(str.begin(), str.end(), [](char a, char b) { return a == ' ' && b == ' '; }),
str.end()
);
}
std::string padLeft(std::string const& str, int maxlen, char ch)
{
std::stringstream ss(str);
ss << std::right << std::setw(maxlen) << std::setfill(ch) << str;
return ss.str();
}
std::string const& tostring(std::string const& value)
{
return value;
}
std::string format(const char* format_str, ... )
{
va_list args;
va_start(args, format_str);
// get the number of chars to write
va_list args_tmp;
va_copy(args_tmp, args);
int char_length = std::vsnprintf(nullptr, 0, format_str, args_tmp);
va_end(args_tmp);
// allocate buffer and store formatted output there
std::vector<char> buffer(char_length + 1); // note +1 for null terminator
vsnprintf(buffer.data(), buffer.size(), format_str, args);
va_end(args);
return std::string(buffer.data());
}
} // end namespace BaseLib
|
{"hexsha": "a1e10e4a1ec141e1c0c99a7ef635accc1158a4e1", "size": 2809, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "BaseLib/StringTools.cpp", "max_stars_repo_name": "mjamoein/ogs", "max_stars_repo_head_hexsha": "52e4d1bcf3bc21a44ee7710fc9900d8729334ad4", "max_stars_repo_licenses": ["BSD-4-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BaseLib/StringTools.cpp", "max_issues_repo_name": "mjamoein/ogs", "max_issues_repo_head_hexsha": "52e4d1bcf3bc21a44ee7710fc9900d8729334ad4", "max_issues_repo_licenses": ["BSD-4-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BaseLib/StringTools.cpp", "max_forks_repo_name": "mjamoein/ogs", "max_forks_repo_head_hexsha": "52e4d1bcf3bc21a44ee7710fc9900d8729334ad4", "max_forks_repo_licenses": ["BSD-4-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.7706422018, "max_line_length": 97, "alphanum_fraction": 0.6318974724, "num_tokens": 703}
|
# classes to hold the physical components of the system
# e.g. the stellar light, dark matter, black hole, globular clusters
import numpy as np
import logging
from dynamite import mges as mge
class System(object):
"""The physical system being modelled
e.g. system is a galaxy. A system is composed of ``Components`` e.g. the
galaxy is composed of stars, black hole, dark matter halo. This object is
automatically created when the configuration file is read.
"""
def __init__(self, *args):
self.logger = logging.getLogger(f'{__name__}.{__class__.__name__}')
self.n_cmp = 0
self.cmp_list = []
self.n_pot = 0
self.n_kin = 0
self.n_pop = 0
self.parameters = None
self.distMPc = None
self.name = None
self.position_angle = None
for component in args:
self.add_component(component)
def add_component(self, cmp):
"""add a component to the system
Parameters
----------
cmp : a ``dyn.physical_system.Component`` object
Returns
-------
None
updated the system componenent attributes
"""
self.cmp_list += [cmp]
self.n_cmp += 1
self.n_pot += cmp.contributes_to_potential
self.n_kin += len(cmp.kinematic_data)
self.n_pop += len(cmp.population_data)
def validate(self):
"""
Validate the system
Ensures the System has the required attributes: at least one component,
no duplicate component names, and the ml parameter, and that the
sformat string for the ml parameter is set.
Raises
------
ValueError : if required attributes or components are missing, or if
there is no ml parameter
Returns
-------
None.
"""
if len(self.cmp_list) != len(set(self.cmp_list)):
raise ValueError('No duplicate component names allowed')
if (self.distMPc is None) or (self.name is None) \
or (self.position_angle is None):
text = 'System needs distMPc, name, and position_angle attributes'
self.logger.error(text)
raise ValueError(text)
if not self.cmp_list:
text = 'System has no components'
self.logger.error(text)
raise ValueError(text)
if len(self.parameters) != 1 and self.parameters[0].name != 'ml':
text = 'System needs ml as its sole parameter'
self.logger.error(text)
raise ValueError(text)
self.parameters[0].update(sformat = '01.2f') # sformat of ml parameter
def validate_parset(self, par):
"""
Validates the system's parameter values
Kept separate from the validate method to facilitate easy calling from
the ``ParameterGenerator`` class. Returns `True` if all parameters are
non-negative, except for logarithmic parameters which are not checked.
Parameters
----------
par : dict
{ "p":val, ... } where "p" are the system's parameters and
val are their respective raw values
Returns
-------
isvalid : bool
True if the parameter set is valid, False otherwise
"""
p_raw_values = [par[p.name]
for p in self.parameters if not p.logarithmic]
isvalid = np.all(np.sign(p_raw_values) >= 0)
if not isvalid:
self.logger.debug(f'Invalid system parameters {par}: at least '
'one negative non-log parameter.')
return bool(isvalid)
def __repr__(self):
return f'{self.__class__.__name__} with {self.__dict__}'
def get_component_from_name(self, cmp_name):
"""get_component_from_name
Parameters
----------
cmp_name : string
component name (as specified in the congi file)
Returns
-------
a ``dyn.physical_system.Component`` object
"""
cmp_list_list = np.array([cmp0.name for cmp0 in self.cmp_list])
idx = np.where(cmp_list_list == cmp_name)
self.logger.debug(f'Checking for 1 and only 1 component {cmp_name}...')
error_msg = f"There should be 1 and only 1 component named {cmp_name}"
assert len(idx[0]) == 1, error_msg
self.logger.debug('...check ok.')
component = self.cmp_list[idx[0][0]]
return component
def get_component_from_class(self, cmp_class):
"""get_component_from_class
Parameters
----------
cmp_class : string
name of the component type/class
Raises
-------
ValueError : if there are more than one component of the same class.
# TODO: remove this limit, e.g. if we had two MGE-based components
one for stars, one for gas
Returns
-------
a ``dyn.physical_system.Component`` object
"""
self.logger.debug('Checking for 1 and only 1 component of class '
f'{cmp_class}...')
components = filter(lambda c: isinstance(c,cmp_class), self.cmp_list)
component = next(components, False)
if component is False or next(components, False) is not False:
error_msg = 'Actually... there should be 1 and only 1 ' \
f'component of class {cmp_class}'
self.logger.error(error_msg)
raise ValueError(error_msg)
self.logger.debug('...check ok.')
return component
def get_all_dark_components(self):
"""Get all components which are Dark
Returns
-------
list
a list of Component objects, keeping only the dark components
"""
dark_cmp = [c for c in self.cmp_list if isinstance(c, DarkComponent)]
return dark_cmp
def get_all_dark_non_plummer_components(self):
"""Get all Dark components which are not plummer
Useful in legacy orbit libraries for finding the dark halo component.
For legacy models, the black hole is always a plummer, so any Dark but
non plummer components must represent the dark halo.
Returns
-------
list
a list of Component objects, keeping only the dark components
"""
dark_cmp = self.get_all_dark_components()
dark_non_plum_cmp = [c for c in dark_cmp if not isinstance(c, Plummer)]
return dark_non_plum_cmp
def get_all_kinematic_data(self):
"""get_all_kinematic_data
Loop over all components, extract their kinemtics into a list.
Returns
-------
list
all_kinematics in a list
"""
all_kinematics = []
for component in self.cmp_list:
all_kinematics += component.kinematic_data
return all_kinematics
class Component(object):
"""A component of the physical system
e.g. the stellar component, black hole, or dark halo of a galaxy
Parameters
----------
name : string
a short but descriptive name of the component
visible : Bool
whether this is visible <--> whether it has an associated MGE
contributes_to_potential : Bool
whether this contributes_to_potential **not currently used**
symmetry : string
one of 'spherical', 'axisymm', or 'triax' **not currently used**
kinematic_data : list
a list of ``dyn.kinemtics.Kinematic`` data for this component
parameters : list
a list of ``dyn.parameter_space.Parameter`` objects for this component
population_data : list
a list of ``dyn.populations.Population`` data for this component **not
currently used**
"""
def __init__(self,
name = None,
visible=None,
contributes_to_potential=None,
symmetry=None,
kinematic_data=[],
population_data=[],
parameters=[]):
self.logger = logging.getLogger(f'{__name__}.{__class__.__name__}')
if name == None:
self.name = self.__class__.__name__
else:
self.name = name
self.visible = visible
self.contributes_to_potential = contributes_to_potential
self.symmetry = symmetry
self.kinematic_data = kinematic_data
self.population_data = population_data
self.parameters = parameters
def validate(self, par=None):
"""
Validate the component
Ensure it has the required attributes and parameters.
Additionally, the sformat strings for the parameters are set.
Parameters
----------
par : a list with parameter names. Mandatory.
Raises
------
ValueError : if a required attribute is missing or the required
parameters do not exist
Returns
-------
None.
"""
errstr = f'Component {self.__class__.__name__} needs attribute '
if self.visible is None:
text = errstr + 'visible'
self.logger.error(text)
raise ValueError(text)
if self.contributes_to_potential is None:
text = errstr + 'contributes_to_potential'
self.logger.error(text)
raise ValueError(text)
if not self.parameters:
text = errstr + 'parameters'
self.logger.error(text)
raise ValueError(text)
pars = [self.get_parname(p.name) for p in self.parameters]
if set(pars) != set(par):
text = f'{self.__class__.__name__} needs parameters ' + \
f'{par}, not {pars}.'
self.logger.error(text)
raise ValueError(text)
def validate_parset(self, par):
"""
Validates the component's parameter values.
Kept separate from the
validate method to facilitate easy calling from the parameter
generator class. This is a `placeholder` method which returns
`True` if all parameters are non-negative, except for logarithmic
parameters which are not checked. Specific validation
should be implemented for each component subclass.
Parameters
----------
par : dict
{ "p":val, ... } where "p" are the component's parameters and
val are their respective raw values
Returns
-------
isvalid : bool
True if the parameter set is valid, False otherwise
"""
p_raw_values = [par[self.get_parname(p.name)]
for p in self.parameters if not p.logarithmic]
isvalid = np.all(np.sign(p_raw_values) >= 0)
if not isvalid:
self.logger.debug(f'Invalid parset {par}: at least one negative '
'non-log parameter.')
return isvalid
def get_parname(self, par):
"""
Strip the component name suffix from the parameter name.
Parameters
----------
par : str
The full parameter name "parameter-component".
Returns
-------
pure_parname : str
The parameter name without the component name suffix.
"""
try:
pure_parname = par[:par.rindex(f'-{self.name}')]
except:
self.logger.error(f'Component name {self.name} not found in '
f'parameter string {par}')
raise
return pure_parname
def __repr__(self):
return (f'\n{self.__class__.__name__}({self.__dict__}\n)')
class VisibleComponent(Component):
"""Any visible component of the sytem, with an MGE
Parameters
----------
mge_pot : a ``dyn.mges.MGE`` object
describing the (projected) surface-mass density
mge_lum : a ``dyn.mges.MGE`` object
describing the (projected) surface-luminosity density
"""
def __init__(self,
mge_pot=None,
mge_lum=None,
**kwds):
# visible components have MGE surface density
self.mge_pot = mge_pot
self.mge_lum = mge_lum
super().__init__(visible=True, **kwds)
self.logger = logging.getLogger(f'{__name__}.{__class__.__name__}')
def validate(self, **kwds):
super().validate(**kwds)
if not (isinstance(self.mge_pot, mge.MGE) and \
isinstance(self.mge_lum, mge.MGE)):
text = f'{self.__class__.__name__}.mge_pot and ' \
f'{self.__class__.__name__}.mge_lum ' \
'must be mges.MGE objects'
self.logger.error(text)
raise ValueError(text)
if len(self.mge_pot.data) != len(self.mge_lum.data):
text = f'{self.__class__.__name__}.mge_pot and ' \
f'{self.__class__.__name__}.mge_lum ' \
'must be of equal length'
self.logger.error(text)
raise ValueError(text)
class AxisymmetricVisibleComponent(VisibleComponent):
def __init__(self, **kwds):
super().__init__(symmetry='axisymm', **kwds)
def validate(self):
par = ['par1', 'par2']
super().validate(par=par)
class TriaxialVisibleComponent(VisibleComponent):
"""Triaxial component with a MGE projected density
Has parameters (p,q,u) = (b/a, c/a, sigma_obs/sigma_intrinsic) used for
deprojecting the MGE. A given (p,q,u) correspond to a fixed set of
`viewing angles` for the triaxial ellipsoid.
"""
def __init__(self, **kwds):
super().__init__(symmetry='triax', **kwds)
self.logger = logging.getLogger(f'{__name__}.{__class__.__name__}')
self.qobs = np.nan
def validate(self):
"""
Validate the TriaxialVisibleComponent
In addition to validating parameter names and setting their sformat
strings, also set self.qobs (minimal flattening from mge data)
Returns
-------
None.
"""
par = ['q', 'p', 'u']
super().validate(par=par)
self.qobs = np.amin(self.mge_pot.data['q'])
if self.qobs is np.nan:
raise ValueError(f'{self.__class__.__name__}.qobs is np.nan')
def validate_parset(self, par):
"""
Validate the p, q, u parset
Validates the triaxial component's p, q, u parameter set. Requires
self.qobs to be set. A parameter set is valid if the resulting
(theta, psi, phi) are not np.nan.
Parameters
----------
par : dict
{ "p":val, ... } where "p" are the component's parameters and
val are their respective values
Returns
-------
bool
True if the parameter set is valid, False otherwise
"""
tpp = self.triax_pqu2tpp(par['p'], par['q'], par['u'])
return bool(not np.any(np.isnan(tpp)))
def triax_pqu2tpp(self,p,q,u):
"""
transform axis ratios to viewing angles
transfer (p, q, u) to the three viewing angles (theta, psi, phi)
with known flatting self.qobs.
Taken from schw_basics, same as in vdB et al. 2008, MNRAS 385,2,647
We should possibly revisit the expressions later
"""
# avoid legacy_fortran's u=1 (rather, phi=psi=90deg) problem
if u == 1:
u *= (1-np.finfo(float).epsneg) # same value as for np.double
p2 = np.double(p) ** 2
q2 = np.double(q) ** 2
u2 = np.double(u) ** 2
o2 = np.double(self.qobs) ** 2
# Check for possible triaxial deprojection (v. d. Bosch 2004,
# triaxpotent.f90 and v. d. Bosch et al. 2008, MNRAS 385, 2, 647)
str = f'{q} <= {p} <= {1}, ' \
f'{max((q/self.qobs,p))} <= {u} <= {min((p/self.qobs),1)}, ' \
f'q\'={self.qobs}'
# 0<=t<=1, t = (1-p2)/(1-q2) and p,q>0 is the same as 0<q<=p<=1 and q<1
t = (1-p2)/(1-q2)
if not (0 <= t <= 1) or \
not (max((q/self.qobs,p)) <= u <= min((p/self.qobs),1)) :
theta = phi = psi = np.nan
self.logger.debug(f'DEPROJ FAIL: {str}')
else:
self.logger.debug(f'DEPROJ PASS: {str}')
w1 = (u2 - q2) * (o2 * u2 - q2) / ((1.0 - q2) * (p2 - q2))
w2 = (u2 - p2) * (p2 - o2 * u2) * (1.0 - q2) / ((1.0 - u2) * (1.0 - o2 * u2) * (p2 - q2))
w3 = (1.0 - o2 * u2) * (p2 - o2 * u2) * (u2 - q2) / ((1.0 - u2) * (u2 - p2) * (o2 * u2 - q2))
if w1 >=0.0 :
theta = np.arccos(np.sqrt(w1)) * 180 /np.pi
else:
theta=np.nan
if w2 >=0.0 :
phi = np.arctan(np.sqrt(w2)) * 180 /np.pi
else:
phi=np.nan
if w3 >=0.0 :
psi = 180 - np.arctan(np.sqrt(w3)) * 180 /np.pi
else:
psi=np.nan
self.logger.debug(f'theta={theta}, phi={phi}, psi={psi}')
return theta,psi,phi
class DarkComponent(Component):
"""Any dark component of the sytem, with no observed MGE or kinemtics
This is an abstract layer and none of the attributes/methods are currently
used.
"""
def __init__(self,
density=None,
**kwds):
# these have no observed properties (MGE/kinematics/populations)
# instead they are initialised with an input density function
self.density = density
# self.mge = 'self.fit_mge()'
super().__init__(visible=False,
kinematic_data=[],
population_data=[],
**kwds)
def fit_mge(self,
density,
parameters,
xyz_grid=[]):
# fit an MGE for a given set of parameters
# will be used in potential calculation
rho = self.density.evaluate(xyz_grid, parameters)
# self.mge = MGES.intrinsic_MGE_from_xyz_grid(xyz_grid, rho)
class Plummer(DarkComponent):
"""A Plummer sphere
Defined with parameters: M [mass, Msol] and a [scale length, arcsec]
"""
def __init__(self, **kwds):
super().__init__(symmetry='spherical', **kwds)
def density(x, y, z, pars):
M, a = pars
r = (x**2 + y**2 + z**2)**0.5
rho = 3*M/4/np.pi/a**3 * (1. + (r/a)**2)**-2.5
return rho
def validate(self):
par = ['m', 'a']
super().validate(par=par)
class NFW(DarkComponent):
"""An NFW halo
Defined with parameters: c [concentration, R200/scale] and f
[dm-fraction, M200/total-stellar-mass]
"""
def __init__(self, **kwds):
self.legacy_code = 1
super().__init__(symmetry='spherical', **kwds)
def validate(self):
par = ['c', 'f']
super().validate(par=par)
class Hernquist(DarkComponent):
"""A Hernquist sphere
Defined with parameters: rhoc [central density, Msun/km^3] and rc [scale
length, km]
"""
def __init__(self, **kwds):
self.legacy_code = 2
super().__init__(symmetry='spherical', **kwds)
def validate(self):
par = ['rhoc', 'rc']
super().validate(par=par)
class TriaxialCoredLogPotential(DarkComponent):
"""A TriaxialCoredLogPotential
see e.g. Binney & Tremaine second edition p.171
Defined with parameters: p [B/A], q [C/A], Rc [core radius, kpc], Vc
[asympt. circular velovity, km/s]
"""
def __init__(self, **kwds):
self.legacy_code = 3
super().__init__(symmetry='triaxial', **kwds)
def validate(self):
par = ['Vc', 'Rc', 'p', 'q']
super().validate(par=par)
class GeneralisedNFW(DarkComponent):
"""A GeneralisedNFW halo
from Zhao (1996)
Defined with parameters: concentration [R200/NFW scale length], Mvir [Msol],
inner_log_slope []
"""
def __init__(self, **kwds):
self.legacy_code = 5
super().__init__(symmetry='triaxial', **kwds)
def validate(self):
par = ['c', 'Mvir', 'gam']
super().validate(par=par)
def validate_parset(self, par):
"""
Validates the GeneralisedNFW's parameter set.
Requires c and Mvir >0, and gam leq 1
Parameters
----------
par : dict
{ "p":val, ... } where "p" are the component's parameters and
val are their respective values
Returns
-------
bool
True if the parameter set is valid, False otherwise
"""
if (par['c']<0.) or (par['Mvir']<0.) or (par['gam']>1):
is_valid = False
else:
is_valid = True
return is_valid
# end
|
{"hexsha": "302694ea697c61ba9ed5a9256e214d8738e61755", "size": 20844, "ext": "py", "lang": "Python", "max_stars_repo_path": "dynamite/physical_system.py", "max_stars_repo_name": "dynamics-of-stellar-systems/dynamite_release", "max_stars_repo_head_hexsha": "a921d8a1bde98f48daeea78213fb17b3edb223bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-10-14T12:22:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T15:32:59.000Z", "max_issues_repo_path": "dynamite/physical_system.py", "max_issues_repo_name": "dynamics-of-stellar-systems/dynamite_release", "max_issues_repo_head_hexsha": "a921d8a1bde98f48daeea78213fb17b3edb223bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dynamite/physical_system.py", "max_forks_repo_name": "dynamics-of-stellar-systems/dynamite_release", "max_forks_repo_head_hexsha": "a921d8a1bde98f48daeea78213fb17b3edb223bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-04T04:36:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-01T01:07:38.000Z", "avg_line_length": 32.0184331797, "max_line_length": 105, "alphanum_fraction": 0.5670696603, "include": true, "reason": "import numpy", "num_tokens": 5009}
|
function [Bx,By,Bz] = magnetic_field_current_loop(x,y,z,x_p,y_p,z_p,a,I0)
%Kilian O'Donoghue
%30th July 2013
%This function calculates the magnetic field resulting from a single
%circular coil of radius a, carrying a currnet I0. The coil points in the z
%direction. The centre of the coil is located at {x_p,y_p,z_p} and the
%magnetic field is calculated at the point or points stored in the arrays
%x, y and z. This code accepts meshgrid inputs or single values along with
%vectors.
%These equations are based on those given in "General relation for the vector
%magnetic field of a circular current loop: a closer look" by Robert
%Schill, this is available from IEEE.
global u0 %permeability of free space is a global variable
rc=((x-x_p).^2+(y-y_p).^2).^.5; %Radial component is required for cylindrical coordinate system.
m=(4.*a.*rc).*(((rc+a).^2)+((z-z_p).^2)).^(-1); %This is a parameter for calculating the Elliptical integrals
kofkc=(pi/2)+(pi/8).*m+(9*pi/128).*m.^2; %K(k) elliptical function, this is a taylor expansion of the K elliptical integral.
eofkc=(pi/2)+(-pi/8).*m+(-3*pi/128).*m.^2;%E(k) elliptical function this is a taylor expansion of the E elliptical integral.
%Note for improved accuracy, Matlab has built in elliptical integral
%calculation but these expressions here are still very accurate when rc < a
Brc=(u0.*I0./(2.*pi.*rc)).*(z-z_p).*((((rc+a).^2)+((z-z_p).^2)).^(-.5)).*(-kofkc+eofkc.*((rc.^2+a.^2+(z-z_p).^2)./(((rc-a).^2)+((z-z_p).^2)))); %radial component of B%
Bz=(u0.*I0./(2.*pi)).*((((rc+a).^2)+((z-z_p).^2)).^(-.5)).*(kofkc-eofkc.*((rc.^2-a.^2+(z-z_p).^2)./(((rc-a).^2)+((z-z_p).^2)))); %axial component of B
Bx=Brc.*(x-x_p)./rc; %This converts the polar component into cartesian form.
By=Brc.*(y-y_p)./rc;
%The following sets any terms that result in Inf to zero, this occurs at
%the points near the coil itself.
Bx(isnan(Bx)) = 0 ;
By(isnan(By)) = 0 ;
Bz(isnan(Bz)) = 0 ;
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/43394-magnetic-field-of-circular-coils-using-elliptical-integrals/Magnetic field of circular coil using elliptical integrals/magnetic_field_current_loop.m"}
|
from __future__ import absolute_import, division, unicode_literals
import time
from collections import defaultdict
import numpy as np
import panel as pn
from bokeh.models import (
CustomJS, FactorRange, DatetimeAxis, Range1d, DataRange1d,
PolyDrawTool, BoxEditTool, PolyEditTool, FreehandDrawTool,
PointDrawTool
)
from panel.io.state import state
from panel.io.model import hold
from tornado import gen
from ...core import OrderedDict
from ...core.options import CallbackError
from ...core.util import (
datetime_types, dimension_sanitizer, dt64_to_dt
)
from ...element import Table
from ...streams import (
Stream, PointerXY, RangeXY, Selection1D, RangeX, RangeY, PointerX,
PointerY, BoundsX, BoundsY, Tap, SingleTap, DoubleTap, MouseEnter,
MouseLeave, PressUp, PanEnd, PlotSize, Draw, BoundsXY, PlotReset,
BoxEdit, PointDraw, PolyDraw, PolyEdit, CDSStream, FreehandDraw,
CurveEdit, SelectionXY, Lasso, SelectMode
)
from .util import bokeh_version, convert_timestamp
if bokeh_version >= '2.3.0':
CUSTOM_TOOLTIP = 'description'
else:
CUSTOM_TOOLTIP = 'custom_tooltip'
class Callback(object):
"""
Provides a baseclass to define callbacks, which return data from
bokeh model callbacks, events and attribute changes. The callback
then makes this data available to any streams attached to it.
The definition of a callback consists of a number of components:
* models : Defines which bokeh models the callback will be
attached on referencing the model by its key in
the plots handles, e.g. this could be the x_range,
y_range, plot, a plotting tool or any other
bokeh mode.
* attributes : The attributes define which attributes to send
back to Python. They are defined as a dictionary
mapping between the name under which the variable
is made available to Python and the specification
of the attribute. The specification should start
with the variable name that is to be accessed and
the location of the attribute separated by
periods. All models defined by the models and can
be addressed in this way, e.g. to get the start of
the x_range as 'x' you can supply {'x':
'x_range.attributes.start'}. Additionally certain
handles additionally make the cb_obj variables
available containing additional information about
the event.
* on_events : If the Callback should listen to bokeh events this
should declare the types of event as a list (optional)
* on_changes : If the Callback should listen to model attribute
changes on the defined ``models`` (optional)
If either on_events or on_changes are declared the Callback will
be registered using the on_event or on_change machinery, otherwise
it will be treated as a regular callback on the model. The
callback can also define a _process_msg method, which can modify
the data sent by the callback before it is passed to the streams.
A callback supports three different throttling modes:
- adaptive (default): The callback adapts the throttling timeout
depending on the rolling mean of the time taken to process each
message. The rolling window is controlled by the `adaptive_window`
value.
- throttle: Uses the fixed `throttle_timeout` as the minimum amount
of time between events.
- debounce: Processes the message only when no new event has been
received within the `throttle_timeout` duration.
"""
# Throttling configuration
adaptive_window = 3
throttle_timeout = 100
throttling_scheme = 'adaptive'
# Attributes to sync
attributes = {}
# The plotting handle(s) to attach the JS callback on
models = []
# Conditions when callback should be skipped
skip_events = []
skip_changes = []
# Callback will listen to events of the supplied type on the models
on_events = []
# List of change events on the models to listen to
on_changes = []
# Internal state
_callbacks = {}
_transforms = []
def __init__(self, plot, streams, source, **params):
self.plot = plot
self.streams = streams
self.source = source
self.handle_ids = defaultdict(dict)
self.reset()
self._active = False
self._prev_msg = None
self._last_event = time.time()
self._history = []
def _transform(self, msg):
for transform in self._transforms:
msg = transform(msg, self)
return msg
def _process_msg(self, msg):
"""
Subclassable method to preprocess JSON message in callback
before passing to stream.
"""
return self._transform(msg)
def cleanup(self):
self.reset()
self.handle_ids = None
self.plot = None
self.source = None
self.streams = []
Callback._callbacks = {k: cb for k, cb in Callback._callbacks.items()
if cb is not self}
def reset(self):
if self.handle_ids:
handles = self._init_plot_handles()
for handle_name in self.models:
if not (handle_name in handles):
continue
handle = handles[handle_name]
cb_hash = (id(handle), id(type(self)))
self._callbacks.pop(cb_hash, None)
self.plot_handles = {}
self._queue = []
def _filter_msg(self, msg, ids):
"""
Filter event values that do not originate from the plotting
handles associated with a particular stream using their
ids to match them.
"""
filtered_msg = {}
for k, v in msg.items():
if isinstance(v, dict) and 'id' in v:
if v['id'] in ids:
filtered_msg[k] = v['value']
else:
filtered_msg[k] = v
return filtered_msg
def on_msg(self, msg):
streams = []
for stream in self.streams:
handle_ids = self.handle_ids[stream]
ids = list(handle_ids.values())
filtered_msg = self._filter_msg(msg, ids)
processed_msg = self._process_msg(filtered_msg)
if not processed_msg:
continue
stream.update(**processed_msg)
stream._metadata = {h: {'id': hid, 'events': self.on_events}
for h, hid in handle_ids.items()}
streams.append(stream)
try:
Stream.trigger(streams)
except CallbackError as e:
if self.plot.root and self.plot.root.ref['id'] in state._handles:
handle, _ = state._handles[self.plot.root.ref['id']]
handle.update({'text/html': str(e)}, raw=True)
else:
raise e
except Exception as e:
raise e
finally:
for stream in streams:
stream._metadata = {}
def _init_plot_handles(self):
"""
Find all requested plotting handles and cache them along
with the IDs of the models the callbacks will be attached to.
"""
plots = [self.plot]
if self.plot.subplots:
plots += list(self.plot.subplots.values())
handles = {}
for plot in plots:
for k, v in plot.handles.items():
handles[k] = v
self.plot_handles = handles
requested = {}
for h in self.models:
if h in self.plot_handles:
requested[h] = handles[h]
self.handle_ids.update(self._get_stream_handle_ids(requested))
return requested
def _get_stream_handle_ids(self, handles):
"""
Gather the ids of the plotting handles attached to this callback
This allows checking that a stream is not given the state
of a plotting handle it wasn't attached to
"""
stream_handle_ids = defaultdict(dict)
for stream in self.streams:
for h in self.models:
if h in handles:
handle_id = handles[h].ref['id']
stream_handle_ids[stream][h] = handle_id
return stream_handle_ids
@classmethod
def resolve_attr_spec(cls, spec, cb_obj, model=None):
"""
Resolves a Callback attribute specification looking the
corresponding attribute up on the cb_obj, which should be a
bokeh model. If not model is supplied cb_obj is assumed to
be the same as the model.
"""
if not cb_obj:
raise Exception('Bokeh plot attribute %s could not be found' % spec)
if model is None:
model = cb_obj
spec = spec.split('.')
resolved = cb_obj
for p in spec[1:]:
if p == 'attributes':
continue
if isinstance(resolved, dict):
resolved = resolved.get(p)
else:
resolved = getattr(resolved, p, None)
return {'id': model.ref['id'], 'value': resolved}
def skip_event(self, event):
return any(skip(event) for skip in self.skip_events)
def skip_change(self, msg):
return any(skip(msg) for skip in self.skip_changes)
def _set_busy(self, busy):
"""
Sets panel.state to busy if available.
"""
if 'busy' not in state.param:
return # Check if busy state is supported
from panel.util import edit_readonly
with edit_readonly(state):
state.busy = busy
def _schedule_callback(self, cb, timeout=None, offset=True):
if timeout is None:
if self._history and self.throttling_scheme == 'adaptive':
timeout = int(np.array(self._history).mean()*1000)
else:
timeout = self.throttle_timeout
if self.throttling_scheme != 'debounce' and offset:
# Subtract the time taken since event started
diff = time.time()-self._last_event
timeout = max(timeout-(diff*1000), 50)
if not pn.state.curdoc:
cb()
else:
pn.state.curdoc.add_timeout_callback(cb, int(timeout))
def on_change(self, attr, old, new):
"""
Process change events adding timeout to process multiple concerted
value change at once rather than firing off multiple plot updates.
"""
self._queue.append((attr, old, new, time.time()))
if not self._active and self.plot.document:
self._active = True
self._set_busy(True)
if self.plot.renderer.mode == 'server':
self._schedule_callback(self.process_on_change, offset=False)
else:
with hold(self.plot.document):
self.process_on_change()
def on_event(self, event):
"""
Process bokeh UIEvents adding timeout to process multiple concerted
value change at once rather than firing off multiple plot updates.
"""
self._queue.append((event, time.time()))
if not self._active and self.plot.document:
self._active = True
self._set_busy(True)
if self.plot.renderer.mode == 'server':
self._schedule_callback(self.process_on_event, offset=False)
else:
with hold(self.plot.document):
self.process_on_event()
def throttled(self):
now = time.time()
timeout = self.throttle_timeout/1000.
if self.throttling_scheme in ('throttle', 'adaptive'):
diff = (now-self._last_event)
if self._history and self.throttling_scheme == 'adaptive':
timeout = np.array(self._history).mean()
if diff < timeout:
return int((timeout-diff)*1000)
else:
prev_event = self._queue[-1][-1]
diff = (now-prev_event)
if diff < timeout:
return self.throttle_timeout
self._last_event = time.time()
return False
@gen.coroutine
def process_on_event_coroutine(self):
self.process_on_event()
def process_on_event(self):
"""
Trigger callback change event and triggering corresponding streams.
"""
if not self._queue:
self._active = False
self._set_busy(False)
return
throttled = self.throttled()
if throttled and pn.state.curdoc:
self._schedule_callback(self.process_on_event, throttled)
return
# Get unique event types in the queue
events = list(OrderedDict([(event.event_name, event)
for event, dt in self._queue]).values())
self._queue = []
# Process event types
for event in events:
if self.skip_event(event):
continue
msg = {}
for attr, path in self.attributes.items():
model_obj = self.plot_handles.get(self.models[0])
msg[attr] = self.resolve_attr_spec(path, event, model_obj)
self.on_msg(msg)
w = self.adaptive_window-1
diff = time.time()-self._last_event
self._history = self._history[-w:] + [diff]
if self.plot.renderer.mode == 'server':
self._schedule_callback(self.process_on_event)
@gen.coroutine
def process_on_change_coroutine(self):
self.process_on_change()
def process_on_change(self):
if not self._queue:
self._active = False
self._set_busy(False)
return
throttled = self.throttled()
if throttled:
self._schedule_callback(self.process_on_change, throttled)
return
self._queue = []
msg = {}
for attr, path in self.attributes.items():
attr_path = path.split('.')
if attr_path[0] == 'cb_obj':
obj_handle = self.models[0]
path = '.'.join(self.models[:1]+attr_path[1:])
else:
obj_handle = attr_path[0]
cb_obj = self.plot_handles.get(obj_handle)
msg[attr] = self.resolve_attr_spec(path, cb_obj)
if self.skip_change(msg):
equal = True
else:
try:
equal = msg == self._prev_msg
except Exception:
equal = False
if not equal or any(s.transient for s in self.streams):
self.on_msg(msg)
w = self.adaptive_window-1
diff = time.time()-self._last_event
self._history = self._history[-w:] + [diff]
self._prev_msg = msg
if self.plot.renderer.mode == 'server':
self._schedule_callback(self.process_on_change)
else:
self._active = False
def set_callback(self, handle):
"""
Set up on_change events for bokeh server interactions.
"""
if self.on_events:
for event in self.on_events:
handle.on_event(event, self.on_event)
if self.on_changes:
for change in self.on_changes:
if change in ['patching', 'streaming']:
# Patch and stream events do not need handling on server
continue
handle.on_change(change, self.on_change)
def initialize(self, plot_id=None):
handles = self._init_plot_handles()
for handle_name in self.models:
if handle_name not in handles:
warn_args = (handle_name, type(self.plot).__name__,
type(self).__name__)
print('%s handle not found on %s, cannot '
'attach %s callback' % warn_args)
continue
handle = handles[handle_name]
# Hash the plot handle with Callback type allowing multiple
# callbacks on one handle to be merged
cb_hash = (id(handle), id(type(self)))
if cb_hash in self._callbacks:
# Merge callbacks if another callback has already been attached
cb = self._callbacks[cb_hash]
cb.streams = list(set(cb.streams+self.streams))
for k, v in self.handle_ids.items():
cb.handle_ids[k].update(v)
continue
self.set_callback(handle)
self._callbacks[cb_hash] = self
class PointerXYCallback(Callback):
"""
Returns the mouse x/y-position on mousemove event.
"""
attributes = {'x': 'cb_obj.x', 'y': 'cb_obj.y'}
models = ['plot']
on_events = ['mousemove']
def _process_out_of_bounds(self, value, start, end):
"Clips out of bounds values"
if isinstance(value, np.datetime64):
v = dt64_to_dt(value)
if isinstance(start, (int, float)):
start = convert_timestamp(start)
if isinstance(end, (int, float)):
end = convert_timestamp(end)
s, e = start, end
if isinstance(s, np.datetime64):
s = dt64_to_dt(s)
if isinstance(e, np.datetime64):
e = dt64_to_dt(e)
else:
v, s, e = value, start, end
if v < s:
value = start
elif v > e:
value = end
return value
def _process_msg(self, msg):
x_range = self.plot.handles.get('x_range')
y_range = self.plot.handles.get('y_range')
xaxis = self.plot.handles.get('xaxis')
yaxis = self.plot.handles.get('yaxis')
if 'x' in msg and isinstance(xaxis, DatetimeAxis):
msg['x'] = convert_timestamp(msg['x'])
if 'y' in msg and isinstance(yaxis, DatetimeAxis):
msg['y'] = convert_timestamp(msg['y'])
if isinstance(x_range, FactorRange) and isinstance(msg.get('x'), (int, float)):
msg['x'] = x_range.factors[int(msg['x'])]
elif 'x' in msg and isinstance(x_range, (Range1d, DataRange1d)):
xstart, xend = x_range.start, x_range.end
if xstart > xend:
xstart, xend = xend, xstart
x = self._process_out_of_bounds(msg['x'], xstart, xend)
if x is None:
msg = {}
else:
msg['x'] = x
if isinstance(y_range, FactorRange) and isinstance(msg.get('y'), (int, float)):
msg['y'] = y_range.factors[int(msg['y'])]
elif 'y' in msg and isinstance(y_range, (Range1d, DataRange1d)):
ystart, yend = y_range.start, y_range.end
if ystart > yend:
ystart, yend = yend, ystart
y = self._process_out_of_bounds(msg['y'], ystart, yend)
if y is None:
msg = {}
else:
msg['y'] = y
return self._transform(msg)
class PointerXCallback(PointerXYCallback):
"""
Returns the mouse x-position on mousemove event.
"""
attributes = {'x': 'cb_obj.x'}
class PointerYCallback(PointerXYCallback):
"""
Returns the mouse x/y-position on mousemove event.
"""
attributes = {'y': 'cb_obj.y'}
class DrawCallback(PointerXYCallback):
on_events = ['pan', 'panstart', 'panend']
models = ['plot']
attributes = {'x': 'cb_obj.x', 'y': 'cb_obj.y', 'event': 'cb_obj.event_name'}
def __init__(self, *args, **kwargs):
self.stroke_count = 0
super().__init__(*args, **kwargs)
def _process_msg(self, msg):
event = msg.pop('event')
if event == 'panend':
self.stroke_count += 1
return self._transform(dict(msg, stroke_count=self.stroke_count))
class TapCallback(PointerXYCallback):
"""
Returns the mouse x/y-position on tap event.
Note: As of bokeh 0.12.5, there is no way to distinguish the
individual tap events within a doubletap event.
"""
on_events = ['tap', 'doubletap']
def _process_out_of_bounds(self, value, start, end):
"Sets out of bounds values to None"
if isinstance(value, np.datetime64):
v = dt64_to_dt(value)
if isinstance(start, (int, float)):
start = convert_timestamp(start)
if isinstance(end, (int, float)):
end = convert_timestamp(end)
s, e = start, end
if isinstance(s, np.datetime64):
s = dt64_to_dt(s)
if isinstance(e, np.datetime64):
e = dt64_to_dt(e)
else:
v, s, e = value, start, end
if v < s or v > e:
value = None
return value
class SingleTapCallback(TapCallback):
"""
Returns the mouse x/y-position on tap event.
"""
on_events = ['tap']
class PressUpCallback(TapCallback):
"""
Returns the mouse x/y-position of a pressup mouse event.
"""
on_events = ['pressup']
class PanEndCallback(TapCallback):
"""
Returns the mouse x/y-position of a pan end event.
"""
on_events = ['panend']
class DoubleTapCallback(TapCallback):
"""
Returns the mouse x/y-position on doubletap event.
"""
on_events = ['doubletap']
class MouseEnterCallback(PointerXYCallback):
"""
Returns the mouse x/y-position on mouseenter event, i.e. when
mouse enters the plot canvas.
"""
on_events = ['mouseenter']
class MouseLeaveCallback(PointerXYCallback):
"""
Returns the mouse x/y-position on mouseleave event, i.e. when
mouse leaves the plot canvas.
"""
on_events = ['mouseleave']
class RangeXYCallback(Callback):
"""
Returns the x/y-axis ranges of a plot.
"""
attributes = {'x0': 'x_range.attributes.start',
'x1': 'x_range.attributes.end',
'y0': 'y_range.attributes.start',
'y1': 'y_range.attributes.end'}
models = ['x_range', 'y_range']
on_changes = ['start', 'end']
def _process_msg(self, msg):
data = {}
if 'x0' in msg and 'x1' in msg:
x0, x1 = msg['x0'], msg['x1']
if x0 > x1:
x0, x1 = x1, x0
if isinstance(self.plot.handles.get('xaxis'), DatetimeAxis):
if not isinstance(x0, datetime_types):
x0 = convert_timestamp(x0)
if not isinstance(x1, datetime_types):
x1 = convert_timestamp(x1)
data['x_range'] = (x0, x1)
if 'y0' in msg and 'y1' in msg:
y0, y1 = msg['y0'], msg['y1']
if y0 > y1:
y0, y1 = y1, y0
if isinstance(self.plot.handles.get('yaxis'), DatetimeAxis):
if not isinstance(y0, datetime_types):
y0 = convert_timestamp(y0)
if not isinstance(y1, datetime_types):
y1 = convert_timestamp(y1)
data['y_range'] = (y0, y1)
return self._transform(data)
class RangeXCallback(RangeXYCallback):
"""
Returns the x-axis range of a plot.
"""
attributes = {'x0': 'x_range.attributes.start',
'x1': 'x_range.attributes.end'}
models = ['x_range']
class RangeYCallback(RangeXYCallback):
"""
Returns the y-axis range of a plot.
"""
attributes = {'y0': 'y_range.attributes.start',
'y1': 'y_range.attributes.end'}
models = ['y_range']
class PlotSizeCallback(Callback):
"""
Returns the actual width and height of a plot once the layout
solver has executed.
"""
models = ['plot']
attributes = {'width': 'cb_obj.inner_width',
'height': 'cb_obj.inner_height'}
on_changes = ['inner_width', 'inner_height']
def _process_msg(self, msg):
if msg.get('width') and msg.get('height'):
return self._transform(msg)
else:
return {}
class SelectModeCallback(Callback):
attributes = {'box_mode': 'box_select.mode',
'lasso_mode': 'lasso_select.mode'}
models = ['box_select', 'lasso_select']
on_changes = ['mode']
def _process_msg(self, msg):
stream = self.streams[0]
if 'box_mode' in msg:
mode = msg.pop('box_mode')
if mode != stream.mode:
msg['mode'] = mode
if 'lasso_mode' in msg:
mode = msg.pop('lasso_mode')
if mode != stream.mode:
msg['mode'] = mode
return msg
class BoundsCallback(Callback):
"""
Returns the bounds of a box_select tool.
"""
attributes = {'x0': 'cb_obj.geometry.x0',
'x1': 'cb_obj.geometry.x1',
'y0': 'cb_obj.geometry.y0',
'y1': 'cb_obj.geometry.y1'}
models = ['plot']
on_events = ['selectiongeometry']
skip_events = [lambda event: event.geometry['type'] != 'rect',
lambda event: not event.final]
def _process_msg(self, msg):
if all(c in msg for c in ['x0', 'y0', 'x1', 'y1']):
if isinstance(self.plot.handles.get('xaxis'), DatetimeAxis):
msg['x0'] = convert_timestamp(msg['x0'])
msg['x1'] = convert_timestamp(msg['x1'])
if isinstance(self.plot.handles.get('yaxis'), DatetimeAxis):
msg['y0'] = convert_timestamp(msg['y0'])
msg['y1'] = convert_timestamp(msg['y1'])
msg = {'bounds': (msg['x0'], msg['y0'], msg['x1'], msg['y1'])}
return self._transform(msg)
else:
return {}
class SelectionXYCallback(BoundsCallback):
"""
Converts a bounds selection to numeric or categorical x-range
and y-range selections.
"""
def _process_msg(self, msg):
msg = super()._process_msg(msg)
if 'bounds' not in msg:
return msg
el = self.plot.current_frame
x0, y0, x1, y1 = msg['bounds']
x_range = self.plot.handles['x_range']
if isinstance(x_range, FactorRange):
x0, x1 = int(round(x0)), int(round(x1))
xfactors = x_range.factors[x0: x1]
if x_range.tags and x_range.tags[0]:
xdim = el.get_dimension(x_range.tags[0][0][0])
if xdim and hasattr(el, 'interface'):
dtype = el.interface.dtype(el, xdim)
try:
xfactors = list(np.array(xfactors).astype(dtype))
except:
pass
msg['x_selection'] = xfactors
else:
msg['x_selection'] = (x0, x1)
y_range = self.plot.handles['y_range']
if isinstance(y_range, FactorRange):
y0, y1 = int(round(y0)), int(round(y1))
yfactors = y_range.factors[y0: y1]
if y_range.tags and y_range.tags[0]:
ydim = el.get_dimension(y_range.tags[0][0][0])
if ydim and hasattr(el, 'interface'):
dtype = el.interface.dtype(el, ydim)
try:
yfactors = list(np.array(yfactors).astype(dtype))
except:
pass
msg['y_selection'] = yfactors
else:
msg['y_selection'] = (y0, y1)
return msg
class BoundsXCallback(Callback):
"""
Returns the bounds of a xbox_select tool.
"""
attributes = {'x0': 'cb_obj.geometry.x0', 'x1': 'cb_obj.geometry.x1'}
models = ['plot']
on_events = ['selectiongeometry']
skip_events = [lambda event: event.geometry['type'] != 'rect',
lambda event: not event.final]
def _process_msg(self, msg):
if all(c in msg for c in ['x0', 'x1']):
if isinstance(self.plot.handles.get('xaxis'), DatetimeAxis):
msg['x0'] = convert_timestamp(msg['x0'])
msg['x1'] = convert_timestamp(msg['x1'])
msg = {'boundsx': (msg['x0'], msg['x1'])}
return self._transform(msg)
else:
return {}
class BoundsYCallback(Callback):
"""
Returns the bounds of a ybox_select tool.
"""
attributes = {'y0': 'cb_obj.geometry.y0', 'y1': 'cb_obj.geometry.y1'}
models = ['plot']
on_events = ['selectiongeometry']
skip_events = [lambda event: event.geometry['type'] != 'rect',
lambda event: not event.final]
def _process_msg(self, msg):
if all(c in msg for c in ['y0', 'y1']):
if isinstance(self.plot.handles.get('yaxis'), DatetimeAxis):
msg['y0'] = convert_timestamp(msg['y0'])
msg['y1'] = convert_timestamp(msg['y1'])
msg = {'boundsy': (msg['y0'], msg['y1'])}
return self._transform(msg)
else:
return {}
class LassoCallback(Callback):
attributes = {'xs': 'cb_obj.geometry.x', 'ys': 'cb_obj.geometry.y'}
models = ['plot']
on_events = ['selectiongeometry']
skip_events = [lambda event: event.geometry['type'] != 'poly',
lambda event: not event.final]
def _process_msg(self, msg):
if not all(c in msg for c in ('xs', 'ys')):
return {}
xs, ys = msg['xs'], msg['ys']
if isinstance(xs, dict):
xs = ((int(i), x) for i, x in xs.items())
xs = [x for _, x in sorted(xs)]
if isinstance(ys, dict):
ys = ((int(i), y) for i, y in ys.items())
ys = [y for _, y in sorted(ys)]
if xs is None or ys is None:
return {}
return {'geometry': np.column_stack([xs, ys])}
class Selection1DCallback(Callback):
"""
Returns the current selection on a ColumnDataSource.
"""
attributes = {'index': 'cb_obj.indices'}
models = ['selected']
on_changes = ['indices']
def _process_msg(self, msg):
el = self.plot.current_frame
if 'index' in msg:
msg = {'index': [int(v) for v in msg['index']]}
if isinstance(el, Table):
# Ensure that explicitly applied selection does not
# trigger new events
sel = el.opts.get('plot').kwargs.get('selected')
if sel is not None and list(sel) == msg['index']:
return {}
return self._transform(msg)
else:
return {}
class ResetCallback(Callback):
"""
Signals the Reset stream if an event has been triggered.
"""
models = ['plot']
on_events = ['reset']
def _process_msg(self, msg):
msg = {'resetting': True}
return self._transform(msg)
class CDSCallback(Callback):
"""
A Stream callback that syncs the data on a bokeh ColumnDataSource
model with Python.
"""
attributes = {'data': 'source.data'}
models = ['source']
on_changes = ['data', 'patching']
def initialize(self, plot_id=None):
super().initialize(plot_id)
plot = self.plot
data = self._process_msg({'data': plot.handles['source'].data})['data']
for stream in self.streams:
stream.update(data=data)
def _process_msg(self, msg):
if 'data' not in msg:
return {}
msg['data'] = dict(msg['data'])
for col, values in msg['data'].items():
if isinstance(values, dict):
shape = values.pop('shape', None)
dtype = values.pop('dtype', None)
values.pop('dimension', None)
items = sorted([(int(k), v) for k, v in values.items()])
values = [v for k, v in items]
if dtype is not None:
values = np.array(values, dtype=dtype).reshape(shape)
elif isinstance(values, list) and values and isinstance(values[0], dict):
new_values = []
for vals in values:
if isinstance(vals, dict):
shape = vals.pop('shape', None)
dtype = vals.pop('dtype', None)
vals.pop('dimension', None)
vals = sorted([(int(k), v) for k, v in vals.items()])
vals = [v for k, v in vals]
if dtype is not None:
vals = np.array(vals, dtype=dtype).reshape(shape)
new_values.append(vals)
values = new_values
elif any(isinstance(v, (int, float)) for v in values):
values = [np.nan if v is None else v for v in values]
msg['data'][col] = values
return self._transform(msg)
class GlyphDrawCallback(CDSCallback):
_style_callback = """
var types = Bokeh.require("core/util/types");
var length = cb_obj.data[length_var].length;
for (var i = 0; i < length; i++) {
for (var style in styles) {
var value = styles[style];
if (types.isArray(value)) {
value = value[i % value.length];
}
cb_obj.data[style][i] = value;
}
}
"""
def _create_style_callback(self, cds, glyph, length_var):
stream = self.streams[0]
for style, values in stream.styles.items():
cds.data[style] = [
values[i % len(values)]
for i in range(len(cds.data[length_var]))]
setattr(glyph, style, style)
cb = CustomJS(code=self._style_callback,
args={'styles': stream.styles,
'empty': stream.empty_value,
'length_var': length_var})
cds.js_on_change('data', cb)
def _update_cds_vdims(self, data):
"""
Add any value dimensions not already in the data ensuring the
element can be reconstituted in entirety.
"""
element = self.plot.current_frame
stream = self.streams[0]
for d in element.vdims:
dim = dimension_sanitizer(d.name)
if dim in data:
continue
values = element.dimension_values(d)
if len(values) != len(list(data.values())[0]):
values = np.concatenate([values, [stream.empty_value]])
data[dim] = values
class PointDrawCallback(GlyphDrawCallback):
def initialize(self, plot_id=None):
plot = self.plot
stream = self.streams[0]
cds = plot.handles['source']
glyph = plot.handles['glyph']
renderers = [plot.handles['glyph_renderer']]
kwargs = {}
if stream.num_objects:
kwargs['num_objects'] = stream.num_objects
if stream.tooltip:
kwargs[CUSTOM_TOOLTIP] = stream.tooltip
if stream.styles:
self._create_style_callback(cds, glyph, 'x')
if stream.empty_value is not None:
kwargs['empty_value'] = stream.empty_value
point_tool = PointDrawTool(
add=all(s.add for s in self.streams),
drag=all(s.drag for s in self.streams),
renderers=renderers, **kwargs)
self.plot.state.tools.append(point_tool)
self._update_cds_vdims(cds.data)
# Add any value dimensions not already in the CDS data
# ensuring the element can be reconstituted in entirety
super().initialize(plot_id)
def _process_msg(self, msg):
self._update_cds_vdims(msg['data'])
return super()._process_msg(msg)
class CurveEditCallback(GlyphDrawCallback):
def initialize(self, plot_id=None):
plot = self.plot
stream = self.streams[0]
cds = plot.handles['cds']
glyph = plot.handles['glyph']
renderer = plot.state.scatter(glyph.x, glyph.y, source=cds,
visible=False, **stream.style)
renderers = [renderer]
kwargs = {}
if stream.tooltip:
kwargs[CUSTOM_TOOLTIP] = stream.tooltip
point_tool = PointDrawTool(
add=False, drag=True, renderers=renderers, **kwargs
)
code="renderer.visible = tool.active || (cds.selected.indices.length > 0)"
show_vertices = CustomJS(args={'renderer': renderer, 'cds': cds, 'tool': point_tool}, code=code)
point_tool.js_on_change('change:active', show_vertices)
cds.selected.js_on_change('indices', show_vertices)
self.plot.state.tools.append(point_tool)
self._update_cds_vdims(cds.data)
super().initialize(plot_id)
def _process_msg(self, msg):
self._update_cds_vdims(msg['data'])
return super()._process_msg(msg)
def _update_cds_vdims(self, data):
"""
Add any value dimensions not already in the data ensuring the
element can be reconstituted in entirety.
"""
element = self.plot.current_frame
for d in element.vdims:
dim = dimension_sanitizer(d.name)
if dim not in data:
data[dim] = element.dimension_values(d)
class PolyDrawCallback(GlyphDrawCallback):
def initialize(self, plot_id=None):
plot = self.plot
stream = self.streams[0]
cds = self.plot.handles['cds']
glyph = self.plot.handles['glyph']
renderers = [plot.handles['glyph_renderer']]
kwargs = {}
if stream.num_objects:
kwargs['num_objects'] = stream.num_objects
if stream.show_vertices:
vertex_style = dict({'size': 10}, **stream.vertex_style)
r1 = plot.state.scatter([], [], **vertex_style)
kwargs['vertex_renderer'] = r1
if stream.styles:
self._create_style_callback(cds, glyph, 'xs')
if stream.tooltip:
kwargs[CUSTOM_TOOLTIP] = stream.tooltip
if stream.empty_value is not None:
kwargs['empty_value'] = stream.empty_value
poly_tool = PolyDrawTool(
drag=all(s.drag for s in self.streams), renderers=renderers,
**kwargs
)
plot.state.tools.append(poly_tool)
self._update_cds_vdims(cds.data)
super().initialize(plot_id)
def _process_msg(self, msg):
self._update_cds_vdims(msg['data'])
return super()._process_msg(msg)
def _update_cds_vdims(self, data):
"""
Add any value dimensions not already in the data ensuring the
element can be reconstituted in entirety.
"""
element = self.plot.current_frame
stream = self.streams[0]
interface = element.interface
scalar_kwargs = {'per_geom': True} if interface.multi else {}
for d in element.vdims:
scalar = element.interface.isunique(element, d, **scalar_kwargs)
dim = dimension_sanitizer(d.name)
if dim not in data:
if scalar:
values = element.dimension_values(d, not scalar)
else:
values = [arr[:, 0] for arr in element.split(datatype='array', dimensions=[dim])]
if len(values) != len(data['xs']):
values = np.concatenate([values, [stream.empty_value]])
data[dim] = values
class FreehandDrawCallback(PolyDrawCallback):
def initialize(self, plot_id=None):
plot = self.plot
cds = plot.handles['cds']
glyph = plot.handles['glyph']
stream = self.streams[0]
if stream.styles:
self._create_style_callback(cds, glyph, 'xs')
kwargs = {}
if stream.tooltip:
kwargs[CUSTOM_TOOLTIP] = stream.tooltip
if stream.empty_value is not None:
kwargs['empty_value'] = stream.empty_value
poly_tool = FreehandDrawTool(
num_objects=stream.num_objects,
renderers=[plot.handles['glyph_renderer']],
**kwargs
)
plot.state.tools.append(poly_tool)
self._update_cds_vdims(cds.data)
CDSCallback.initialize(self, plot_id)
class BoxEditCallback(GlyphDrawCallback):
attributes = {'data': 'cds.data'}
models = ['cds']
def _path_initialize(self):
plot = self.plot
cds = plot.handles['cds']
data = cds.data
element = self.plot.current_frame
xs, ys, widths, heights = [], [], [], []
for x, y in zip(data['xs'], data['ys']):
x0, x1 = (np.nanmin(x), np.nanmax(x))
y0, y1 = (np.nanmin(y), np.nanmax(y))
xs.append((x0+x1)/2.)
ys.append((y0+y1)/2.)
widths.append(x1-x0)
heights.append(y1-y0)
data = {'x': xs, 'y': ys, 'width': widths, 'height': heights}
data.update({vd.name: element.dimension_values(vd, expanded=False) for vd in element.vdims})
cds.data.update(data)
style = self.plot.style[self.plot.cyclic_index]
style.pop('cmap', None)
r1 = plot.state.rect('x', 'y', 'width', 'height', source=cds, **style)
if plot.handles['glyph_renderer'] in self.plot.state.renderers:
self.plot.state.renderers.remove(plot.handles['glyph_renderer'])
data = self._process_msg({'data': data})['data']
for stream in self.streams:
stream.update(data=data)
return r1
def initialize(self, plot_id=None):
from .path import PathPlot
stream = self.streams[0]
cds = self.plot.handles['cds']
kwargs = {}
if stream.num_objects:
kwargs['num_objects'] = stream.num_objects
if stream.tooltip:
kwargs[CUSTOM_TOOLTIP] = stream.tooltip
renderer = self.plot.handles['glyph_renderer']
if isinstance(self.plot, PathPlot):
renderer = self._path_initialize()
if stream.styles:
self._create_style_callback(cds, renderer.glyph, 'x')
box_tool = BoxEditTool(renderers=[renderer], **kwargs)
self.plot.state.tools.append(box_tool)
self._update_cds_vdims(cds.data)
super(CDSCallback, self).initialize()
def _process_msg(self, msg):
data = super()._process_msg(msg)
if 'data' not in data:
return {}
data = data['data']
x0s, x1s, y0s, y1s = [], [], [], []
for (x, y, w, h) in zip(data['x'], data['y'], data['width'], data['height']):
x0s.append(x-w/2.)
x1s.append(x+w/2.)
y0s.append(y-h/2.)
y1s.append(y+h/2.)
values = {}
for col in data:
if col in ('x', 'y', 'width', 'height'):
continue
values[col] = data[col]
msg = {'data': dict(values, x0=x0s, x1=x1s, y0=y0s, y1=y1s)}
self._update_cds_vdims(msg['data'])
return self._transform(msg)
class PolyEditCallback(PolyDrawCallback):
def initialize(self, plot_id=None):
plot = self.plot
cds = plot.handles['cds']
vertex_tool = None
if all(s.shared for s in self.streams):
tools = [tool for tool in plot.state.tools if isinstance(tool, PolyEditTool)]
vertex_tool = tools[0] if tools else None
stream = self.streams[0]
kwargs = {}
if stream.tooltip:
kwargs[CUSTOM_TOOLTIP] = stream.tooltip
if vertex_tool is None:
vertex_style = dict({'size': 10}, **stream.vertex_style)
r1 = plot.state.scatter([], [], **vertex_style)
vertex_tool = PolyEditTool(vertex_renderer=r1, **kwargs)
plot.state.tools.append(vertex_tool)
vertex_tool.renderers.append(plot.handles['glyph_renderer'])
self._update_cds_vdims(cds.data)
CDSCallback.initialize(self, plot_id)
Stream._callbacks['bokeh'].update({
PointerXY : PointerXYCallback,
PointerX : PointerXCallback,
PointerY : PointerYCallback,
Tap : TapCallback,
SingleTap : SingleTapCallback,
DoubleTap : DoubleTapCallback,
PressUp : PressUpCallback,
PanEnd : PanEndCallback,
MouseEnter : MouseEnterCallback,
MouseLeave : MouseLeaveCallback,
RangeXY : RangeXYCallback,
RangeX : RangeXCallback,
RangeY : RangeYCallback,
BoundsXY : BoundsCallback,
BoundsX : BoundsXCallback,
BoundsY : BoundsYCallback,
Lasso : LassoCallback,
Selection1D : Selection1DCallback,
PlotSize : PlotSizeCallback,
SelectionXY : SelectionXYCallback,
Draw : DrawCallback,
PlotReset : ResetCallback,
CDSStream : CDSCallback,
BoxEdit : BoxEditCallback,
PointDraw : PointDrawCallback,
CurveEdit : CurveEditCallback,
FreehandDraw: FreehandDrawCallback,
PolyDraw : PolyDrawCallback,
PolyEdit : PolyEditCallback,
SelectMode : SelectModeCallback
})
|
{"hexsha": "913dc9f09af00dec3a9dcbdcbb4ba4d6b807d6a8", "size": 45066, "ext": "py", "lang": "Python", "max_stars_repo_path": "holoviews/plotting/bokeh/callbacks.py", "max_stars_repo_name": "kgullikson88/holoviews", "max_stars_repo_head_hexsha": "942c5ac7db46d1bc04b21a1fcf837285bbc9bde8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "holoviews/plotting/bokeh/callbacks.py", "max_issues_repo_name": "kgullikson88/holoviews", "max_issues_repo_head_hexsha": "942c5ac7db46d1bc04b21a1fcf837285bbc9bde8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "holoviews/plotting/bokeh/callbacks.py", "max_forks_repo_name": "kgullikson88/holoviews", "max_forks_repo_head_hexsha": "942c5ac7db46d1bc04b21a1fcf837285bbc9bde8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6129032258, "max_line_length": 104, "alphanum_fraction": 0.569054276, "include": true, "reason": "import numpy", "num_tokens": 10106}
|
#=
From https://reference.wolfram.com/language/ref/HypergeometricDistribution.html
"""
Suppose an urn has 100 elements, of which 40 are special.
...
Compute the probability that there are more than 25 special elements in a
draw of 50 elements.
Answer: 0.0120902
Compute the expected number of special elements in a draw of 50 elements.
Answer: 20
"""
mean(sumSpecialOf50): 19.9992
Distributions of variable sumSpecialOf50 (num:0)
19.00000 => 1210 (0.121000)
20.00000 => 1139 (0.113900)
21.00000 => 1118 (0.111800)
18.00000 => 972 (0.097200)
22.00000 => 902 (0.090200)
17.00000 => 794 (0.079400)
23.00000 => 732 (0.073200)
24.00000 => 619 (0.061900)
16.00000 => 596 (0.059600)
15.00000 => 429 (0.042900)
25.00000 => 384 (0.038400)
26.00000 => 256 (0.025600)
14.00000 => 255 (0.025500)
13.00000 => 151 (0.015100)
27.00000 => 151 (0.015100)
28.00000 => 102 (0.010200)
12.00000 => 72 (0.007200)
29.00000 => 43 (0.004300)
11.00000 => 28 (0.002800)
30.00000 => 21 (0.002100)
10.00000 => 13 (0.001300)
31.00000 => 7 (0.000700)
35.00000 => 1 (0.000100)
8.00000 => 1 (0.000100)
32.00000 => 1 (0.000100)
9.00000 => 1 (0.000100)
7.00000 => 1 (0.000100)
34.00000 => 1 (0.000100)
moreThan25SpecialOf50): 0.0583
Cf ~/blog/urn_model1.blog
~/webppl/urn_model1.wppl
=#
using Turing, StatsPlots, Distributions
include("jl_utils.jl")
@model function urn_model1(n=100,NumDraws=50,NumSpecial=40)
nonspecial = 1
special = 2
element ~ filldist(Categorical(simplex([n-NumSpecial,NumSpecial])),n) # [nonspecial,special]
# We have (/observe) exactly 40 special elements (no random there!)
numSpecial ~ Dirac(sum([element[i] == special for i in 1:n]))
# Compute the expected number of special elements in a draw of 50 elements.
sumSpecialOf50 ~ Dirac(sum([element[i] == special ? 1 : 0 for i in 1:NumDraws]))
# What's the probability that there are more than 25 special elements in a draw of 50 elements
moreThan25SpecialOf50 ~ Dirac(sumSpecialOf50 > 25)
end
model = urn_model1()
# chns = sample(model, Prior(), 10_000)
# chns = sample(model, MH(), 10_000)
chns = sample(model, PG(5), 10_000)
# chns = sample(model, SMC(), 10_000)
# chns = sample(model, IS(), 10_000)
# display(chns)
# display(plot(chns))
println("mean(sumSpecialOf50): ", mean_val(chns,:sumSpecialOf50))
show_var_dist_pct(chns, :sumSpecialOf50)
println("moreThan25SpecialOf50): ", mean_val(chns,:moreThan25SpecialOf50))
|
{"hexsha": "437d76e3aaea2110803a199ca62d7ab8af15a8c3", "size": 2722, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia/turing/urn_model1.jl", "max_stars_repo_name": "tias/hakank", "max_stars_repo_head_hexsha": "87b7f180c9393afce440864eb9e5fb119bdec1a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 279, "max_stars_repo_stars_event_min_datetime": "2015-01-10T09:55:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T02:34:03.000Z", "max_issues_repo_path": "julia/turing/urn_model1.jl", "max_issues_repo_name": "tias/hakank", "max_issues_repo_head_hexsha": "87b7f180c9393afce440864eb9e5fb119bdec1a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2017-10-05T15:48:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-20T12:06:52.000Z", "max_forks_repo_path": "julia/turing/urn_model1.jl", "max_forks_repo_name": "tias/hakank", "max_forks_repo_head_hexsha": "87b7f180c9393afce440864eb9e5fb119bdec1a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 83, "max_forks_repo_forks_event_min_datetime": "2015-01-20T03:44:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-13T23:53:06.000Z", "avg_line_length": 29.9120879121, "max_line_length": 98, "alphanum_fraction": 0.6190301249, "num_tokens": 1000}
|
# Hierarchical Model for Abalone Length
Abalone were collected from various sites on the coast of California north of San Francisco. Here I'm going to develop a model to predict abalone lengths based on sites and harvest method - diving or rock-picking. I'm interested in how abalone lengths vary between sites and harvesting methods. This should be a hierarchical model as the abalone at the different sites are from the same population and should exhibit similar effects based on harvesting method. The hierarchical model will be beneficial since some of the sites are missing a harvesting method.
```python
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import sampyl as smp
from sampyl import np
import pandas as pd
```
Load our data here. This is just data collected in 2017.
```python
data = pd.read_csv('Clean2017length.csv')
data.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>data year</th>
<th>full lengths</th>
<th>group_id</th>
<th>site_code</th>
<th>Full_ID</th>
<th>Mode</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>2017</td>
<td>181</td>
<td>1.0</td>
<td>5</td>
<td>2017_06_24_005_30_01_01</td>
<td>R</td>
</tr>
<tr>
<th>1</th>
<td>2017</td>
<td>182</td>
<td>1.0</td>
<td>5</td>
<td>2017_06_24_005_30_01_01</td>
<td>R</td>
</tr>
<tr>
<th>2</th>
<td>2017</td>
<td>183</td>
<td>1.0</td>
<td>5</td>
<td>2017_06_24_005_30_01_01</td>
<td>R</td>
</tr>
<tr>
<th>3</th>
<td>2017</td>
<td>191</td>
<td>1.0</td>
<td>5</td>
<td>2017_06_24_005_30_01_01</td>
<td>R</td>
</tr>
<tr>
<th>4</th>
<td>2017</td>
<td>191</td>
<td>1.0</td>
<td>5</td>
<td>2017_06_24_005_30_01_01</td>
<td>R</td>
</tr>
</tbody>
</table>
</div>
Important columns here are:
* **full lengths:** length of abalone
* **mode:** Harvesting method, R: rock-picking, D: diving
* **site_code:** codes for 15 different sites
First some data preprocessing to get it into the correct format for our model.
```python
# Convert sites from codes into sequential integers starting at 0
unique_sites = data['site_code'].unique()
site_map = dict(zip(unique_sites, np.arange(len(unique_sites))))
data = data.assign(site=data['site_code'].map(site_map))
# Convert modes into integers as well
# Filter out 'R/D' modes, bad data collection
data = data[(data['Mode'] != 'R/D')]
mode_map = {'R':0, 'D':1}
data = data.assign(mode=data['Mode'].map(mode_map))
```
## A Hierarchical Linear Model
Here we'll define our model. We want to make a linear model for each site in the data where we predict the abalone length given the mode of catching and the site.
$$ y_s = \alpha_s + \beta_s * x_s + \epsilon $$
where $y_s$ is the predicted abalone length, $x$ denotes the mode of harvesting, $\alpha_s$ and $\beta_s$ are coefficients for each site $s$, and $\epsilon$ is the model error. We'll use this prediction for our likelihood with data $D_s$, using a normal distribution with mean $y_s$ and variance $ \epsilon^2$ :
$$ \prod_s P(D_s \mid \alpha_s, \beta_s, \epsilon) = \prod_s \mathcal{N}\left(D_s \mid y_s, \epsilon^2\right) $$
The abalone come from the same population just in different locations. We can take these similarities between sites into account by creating a hierarchical model where the coefficients are drawn from a higher-level distribution common to all sites.
$$
\begin{align}
\alpha_s & \sim \mathcal{N}\left(\mu_{\alpha}, \sigma_{\alpha}^2\right) \\
\beta_s & \sim \mathcal{N}\left(\mu_{\beta}, \sigma_{\beta}^2\right) \\
\end{align}
$$
```python
sites = data['site'].values
modes = data['mode'].values
lengths = data['full lengths'].values
# Now define the model (log-probability proportional to the posterior)
def logp(μ_α, μ_β, σ_α, σ_β, site_α, site_β, ϵ):
model = smp.Model()
# Population priors - normals for population means and half-Cauchy for population stds
model.add(smp.normal(μ_α, sig=500),
smp.normal(μ_β, sig=500),
smp.half_cauchy(σ_α, beta=5),
smp.half_cauchy(σ_β, beta=0.5))
# Priors for site coefficients, sampled from population distributions
model.add(smp.normal(site_α, mu=μ_α, sig=σ_α),
smp.normal(site_β, mu=μ_β, sig=σ_β))
# Prior for likelihood uncertainty
model.add(smp.half_normal(ϵ))
# Our estimate for abalone length, α + βx
length_est = site_α[sites] + site_β[sites]*modes
# Add the log-likelihood
model.add(smp.normal(lengths, mu=length_est, sig=ϵ))
return model()
```
```python
start = {'μ_α': 201., 'μ_β': 5., 'σ_α': 1., 'σ_β': 1.,
'site_α': np.ones(len(site_map))*201,
'site_β': np.zeros(len(site_map)),
'ϵ': 1.}
# Using NUTS is slower per sample, but more likely to give good samples (and converge)
sampler = smp.NUTS(logp, start)
chain = sampler(1100, burn=100, thin=2)
```
Progress: [##############################] 1100 of 1100 samples
There are some checks for convergence you can do, but they aren't implemented yet. Instead, we can visually inspect the chain. In general, the samples should be stable, the first half should vary around the same point as the second half.
```python
plt.plot(chain.σ_β);
```
With the posterior distribution, we can look at many different results. Here I'll make a function that plots the means and 95% credible regions (range that contains central 95% of the probability) for the coefficients $\alpha_s$ and $\beta_s$.
```python
def coeff_plot(coeff, ax=None):
if ax is None:
fig, ax = plt.subplots(figsize=(3,5))
CRs = np.percentile(coeff, [2.5, 97.5], axis=0)
means = coeff.mean(axis=0)
ax.errorbar(means, np.arange(len(means)), xerr=np.abs(means - CRs), fmt='o')
ax.set_yticks(np.arange(len(site_map)))
ax.set_yticklabels(site_map.keys())
ax.set_ylabel('Site')
ax.grid(True, axis='x', color="#CCCCCC")
ax.tick_params(axis='both', length=0)
for each in ['top', 'right', 'left', 'bottom']:
ax.spines[each].set_visible(False)
return ax
```
Now we can look at how abalone lengths vary between sites for the rock-picking method ($\alpha_s$).
```python
ax = coeff_plot(chain.site_α)
ax.set_xlim(175, 225)
ax.set_xlabel('Abalone Length (mm)');
```
Here I'm plotting the mean and 95% credible regions (CR) of $\alpha$ for each site. This coefficient measures the average length of rock-picked abalones. We can see that the average abalone length varies quite a bit between sites. The CRs give a measure of the uncertainty in $\alpha$, wider CRs tend to result from less data at those sites.
Now, let's see how the abalone lengths vary between harvesting methods (the difference for diving is given by $\beta_s$).
```python
ax = coeff_plot(chain.site_β)
#ax.set_xticks([-5, 0, 5, 10, 15])
ax.set_xlabel('Mode effect (mm)');
```
Here I'm plotting the mean and 95% credible regions (CR) of $\beta$ for each site. This coefficient measures the difference in length of dive picked abalones compared to rock picked abalones. Most of the $\beta$ coefficients are above zero which indicates that abalones harvested via diving are larger than ones picked from the shore. For most of the sites, diving results in 5 mm longer abalone, while at site 72, the difference is around 12 mm. Again, wider CRs mean there is less data leading to greater uncertainty.
Next, I'll overlay the model on top of the data and make sure it looks right. We'll also see that some sites don't have data for both harvesting modes but our model still works because it's hierarchical. That is, we can get a posterior distribution for the coefficient from the population distribution even though the actual data is missing.
```python
def model_plot(data, chain, site, ax=None, n_samples=20):
if ax is None:
fig, ax = plt.subplots(figsize=(4,6))
site = site_map[site]
xs = np.linspace(-1, 3)
for ii, (mode, m_data) in enumerate(data[data['site'] == site].groupby('mode')):
a = chain.site_α[:, site]
b = chain.site_β[:, site]
# now sample from the posterior...
idxs = np.random.choice(np.arange(len(a)), size=n_samples, replace=False)
# Draw light lines sampled from the posterior
for idx in idxs:
ax.plot(xs, a[idx] + b[idx]*xs, color='#E74C3C', alpha=0.05)
# Draw the line from the posterior means
ax.plot(xs, a.mean() + b.mean()*xs, color='#E74C3C')
# Plot actual data points with a bit of noise for visibility
mode_label = {0: 'Rock-picking', 1: 'Diving'}
ax.scatter(ii + np.random.randn(len(m_data))*0.04,
m_data['full lengths'], edgecolors='none',
alpha=0.8, marker='.', label=mode_label[mode])
ax.set_xlim(-0.5, 1.5)
ax.set_xticks([0, 1])
ax.set_xticklabels('')
ax.set_ylim(150, 250)
ax.grid(True, axis='y', color="#CCCCCC")
ax.tick_params(axis='both', length=0)
for each in ['top', 'right', 'left', 'bottom']:
ax.spines[each].set_visible(False)
return ax
```
```python
fig, axes = plt.subplots(figsize=(10, 5), ncols=4, sharey=True)
for ax, site in zip(axes, [5, 52, 72, 162]):
ax = model_plot(data, chain, site, ax=ax, n_samples=30)
ax.set_title(site)
first_ax = axes[0]
first_ax.legend(framealpha=1, edgecolor='none')
first_ax.set_ylabel('Abalone length (mm)');
```
For site 5, there are few data points for the diving method so there is a lot of uncertainty in the prediction. The prediction is also pulled lower than the data by the population distribution. Similarly, for site 52 there is no diving data, but we still get a (very uncertain) prediction because it's using the population information.
Finally, we can look at the harvesting mode effect for the population. Here I'm going to print out a few statistics for $\mu_{\beta}$.
```python
fig, ax = plt.subplots()
ax.hist(chain.μ_β, bins=30);
b_mean = chain.μ_β.mean()
b_CRs = np.percentile(chain.μ_β, [2.5, 97.5])
p_gt_0 = (chain.μ_β > 0).mean()
print(
"""Mean: {:.3f}
95% CR: [{:.3f}, {:.3f}]
P(mu_b) > 0: {:.3f}
""".format(b_mean, b_CRs[0], b_CRs[1], p_gt_0))
```
We can also look at the population distribution for $\beta_s$ by sampling from a normal distribution with mean and variance sampled from $\mu_\beta$ and $\sigma_\beta$.
$$
\beta_s \sim \mathcal{N}\left(\mu_{\beta}, \sigma_{\beta}^2\right)
$$
```python
import scipy.stats as stats
```
```python
samples = stats.norm.rvs(loc=chain.μ_β, scale=chain.σ_β)
plt.hist(samples, bins=30);
plt.xlabel('Dive harvesting effect (mm)')
```
It's apparent that dive harvested abalone are roughly 5 mm longer than rock-picked abalone. Maybe this is a bias of the divers to pick larger abalone. Or, it's possible that abalone that stay in the water grow larger.
|
{"hexsha": "67fe8aac687949b5cd9c018b051d724a8d2a8261", "size": 343600, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "examples/Abalone Model.ipynb", "max_stars_repo_name": "wilsonify/sampyl", "max_stars_repo_head_hexsha": "fb05a0d04393e4f1691bcc9bc664dbc1b688fc97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-14T21:20:53.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-14T21:20:53.000Z", "max_issues_repo_path": "examples/Abalone Model.ipynb", "max_issues_repo_name": "wilsonify/sampyl", "max_issues_repo_head_hexsha": "fb05a0d04393e4f1691bcc9bc664dbc1b688fc97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/Abalone Model.ipynb", "max_forks_repo_name": "wilsonify/sampyl", "max_forks_repo_head_hexsha": "fb05a0d04393e4f1691bcc9bc664dbc1b688fc97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 555.9870550162, "max_line_length": 180532, "alphanum_fraction": 0.9416268917, "converted": true, "num_tokens": 3281}
|
import numpy as np
from datetime import datetime
import time
from sklearn import tree
from sklearn.preprocessing import normalize, scale
from sklearn.metrics import accuracy_score
fmt = '%H:%M:%S'
def get_current_time():
time.ctime()
return time.strftime(fmt)
r = range(42, 753)
trainX = np.genfromtxt(fname='trainData.csv', delimiter=',', dtype=int, skip_header=1, usecols=r)
trainY = np.genfromtxt(fname='trainLabels.csv', dtype=int, skip_header=1)
# test = np.genfromtxt(fname='testData.csv', delimiter=',', dtype=int, skip_header=1, usecols=r)
testX = np.genfromtxt(fname='kaggleTestSubset.csv', delimiter=',', dtype=int, skip_header=1, usecols=r)
testY = np.genfromtxt(fname='kaggleTestSubsetLabels.csv', dtype=int, skip_header=1)
print('Data loaded')
trainX = normalize(trainX, axis=1, copy=True, return_norm=False)
testX = normalize(testX, axis=1, copy=True, return_norm=False)
# trainX = scale(trainX, axis=1, with_mean=False)
# testX = scale(testX, axis=1, with_mean=False)
first = get_current_time()
classifier = tree.DecisionTreeClassifier(criterion='entropy', max_depth=10)
classifier.fit(trainX, trainY)
second = get_current_time()
print("Time taken to train(sec):", datetime.strptime(second, fmt) - datetime.strptime(first, fmt))
testpredy = classifier.predict(testX)
third = get_current_time()
print("Time taken to predict(sec):", datetime.strptime(third, fmt) - datetime.strptime(second, fmt))
# np.savetxt('result_tree.csv', np.dstack((np.arange(1, testpredy.size+1), testpredy))[0], "%d,%d", header="ID,Label",
# comments='')
print("Accuracy is", accuracy_score(testY, testpredy)*100)
|
{"hexsha": "2f64b7c0d32168c58a9122a88cfe12e7d7c8004d", "size": 1638, "ext": "py", "lang": "Python", "max_stars_repo_path": "Code/Solution(DecisionTree).py", "max_stars_repo_name": "FarhanShoukat/DigitRecognition1438", "max_stars_repo_head_hexsha": "f1245b393905ee2722b30589acfeac947c69cc8a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-09-24T04:55:29.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-06T09:08:49.000Z", "max_issues_repo_path": "Code/Solution(DecisionTree).py", "max_issues_repo_name": "FarhanShoukat/DigitRecognition1438", "max_issues_repo_head_hexsha": "f1245b393905ee2722b30589acfeac947c69cc8a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Code/Solution(DecisionTree).py", "max_forks_repo_name": "FarhanShoukat/DigitRecognition1438", "max_forks_repo_head_hexsha": "f1245b393905ee2722b30589acfeac947c69cc8a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4285714286, "max_line_length": 118, "alphanum_fraction": 0.7387057387, "include": true, "reason": "import numpy", "num_tokens": 432}
|
module OILMMs
using AbstractGPs
using ChainRulesCore
using FillArrays
using KernelFunctions
using LinearAlgebra
using Random
using AbstractGPs: AbstractGP, FiniteGP
using KernelFunctions: MOInput
include("util.jl")
include("oilmm.jl")
include("missing_data.jl")
export OILMM
end # module
|
{"hexsha": "8ade0c71939dd686d98ce21e1ccebfdc80c2dbcc", "size": 293, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/OILMMs.jl", "max_stars_repo_name": "willtebbutt/OILMMs.jl", "max_stars_repo_head_hexsha": "c860fd6f9266331b4877921ce8147d962bcbd72a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-12-22T09:09:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-30T20:03:10.000Z", "max_issues_repo_path": "src/OILMMs.jl", "max_issues_repo_name": "willtebbutt/OILMMs.jl", "max_issues_repo_head_hexsha": "c860fd6f9266331b4877921ce8147d962bcbd72a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-07-09T00:39:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-16T20:59:08.000Z", "max_forks_repo_path": "src/OILMMs.jl", "max_forks_repo_name": "willtebbutt/OILMMs.jl", "max_forks_repo_head_hexsha": "c860fd6f9266331b4877921ce8147d962bcbd72a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-06T10:05:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T10:05:01.000Z", "avg_line_length": 14.65, "max_line_length": 39, "alphanum_fraction": 0.819112628, "num_tokens": 79}
|
import numpy as np
from time import sleep
# Registers
reg_id = 0x00
reg_timestamp = 0x04
reg_pkt_len = 0x08
reg_decay = 0x0C
reg_wr_en = 0x10
reg_rst = 0x14
reg_n_in = 0x18
reg_n_out = 0x1C
# First mean in register
reg_mean_in = 0x20
# First mean out register
reg_mean_out = 0xC60
# Generic functions
def check_register(ip, register, expected_value):
value = ip.read(register)
if (value == expected_value):
print("Read register succesful, got %d" % value)
else:
print("Read register failed, expected %d, got %d" % (expected_value, value))
def set_mean_equal(srbm, n_in, value):
print("Set {} input means to {}".format(n_in, value))
for i in range(n_in):
srbm.write(reg_mean_in+i*0x4, value)
def set_mean(srbm, mean_in):
print("Set input means")
for i in range(len(mean_in)):
srbm.write(reg_mean_in+i*0x4, int(mean_in[i]))
def print_mean(srbm, n_out):
# Print output mean of all n_out neurons
print("Neuron output means:")
for i in range(n_out):
print("{:<3} ".format(srbm.read(reg_mean_out+i*0x4)), end='')
print("")
def get_mean(srbm, n_out, filename):
mean_out = []
for i in range(n_out):
mean = srbm.read(reg_mean_out+i*0x4)
mean_out.append(mean)
print('%3d ' % mean, end='')
print('')
mean_out_array = np.array(mean_out)
mean_out_array = np.reshape(mean_out_array, [num_meas, -1])
np.savetxt(filename, mean_out_array, fmt="%3d")
def print_input(x, width, length):
for i in range(length):
for j in range(width):
print('{:<3} '.format(x[i*length+j]), end='')
print('')
def run_once(srbm, n_out, mean_in, sleep_time):
srbm.write(reg_rst, 1)
srbm.write(reg_rst, 0)
set_mean(srbm, mean_in)
sleep(sleep_time)
print_mean(srbm, n_out)
def get_mean_int(srbm, n_out):
mean_out = np.zeros([n_out])
for i in range(n_out):
mean_out[i] = srbm.read(reg_mean_out+i*0x4)
return mean_out
def run_int(srbm, n_out, mean_in, num_samples=100, sleep_time=0.005):
srbm.write(reg_rst, 1)
srbm.write(reg_rst, 0)
set_mean(srbm, mean_in)
mean_values_ = np.zeros([n_out, num_samples])
for i in range(num_samples):
mean_values_[:,i] = get_mean_int(srbm, n_out)
sleep(sleep_time)
mean_values_ = np.mean(mean_values_,axis=1)
return mean_values_
def run_test(srbm, n_out, mean_in, ref, num_samples=100, sleep_time=0.005):
output = np.zeros([np.shape(mean_in)[0]])
for this_image in range(np.shape(mean_in)[0]):
print('Testing image %3d' % this_image)
mean_values = run_int(srbm, n_out, mean_in[this_image], num_samples=num_samples, sleep_time=sleep_time)
result = np.where(np.max(mean_values) == mean_values)[0][0]
print(' res %d' % result)
output[this_image] = result
test_accuracy = np.sum(np.array(ref).astype('int') == output) / len(output)
print(' TEST ACCURACY %f' % test_accuracy)
return output, test_accuracy
def make_dset(srbm, n_out, mean_in, ref, num_samples=100, sleep_time=0.005):
output = []
for this_image in range(np.shape(mean_in)[0]):
print('Testing image %3d' % this_image)
mean_values = run_int(srbm, n_out, mean_in[this_image], num_samples=num_samples, sleep_time=sleep_time)
output.append(mean_values)
return np.array(output)
|
{"hexsha": "e11c324dea94a8c44d2327d50abfec792159930e", "size": 3416, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/gyro.py", "max_stars_repo_name": "federicohyo/Gyro", "max_stars_repo_head_hexsha": "9475b250aa3986948ed78a7ab27e0b86547025a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/gyro.py", "max_issues_repo_name": "federicohyo/Gyro", "max_issues_repo_head_hexsha": "9475b250aa3986948ed78a7ab27e0b86547025a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/gyro.py", "max_forks_repo_name": "federicohyo/Gyro", "max_forks_repo_head_hexsha": "9475b250aa3986948ed78a7ab27e0b86547025a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9252336449, "max_line_length": 112, "alphanum_fraction": 0.6566159251, "include": true, "reason": "import numpy", "num_tokens": 1017}
|
"""
Tensor
======
Tensor is implement as ndarray from numpy
``from numpy import ndarray as Tensor``
"""
# TO DO: Implement Tensor class seperate from numpy
from numpy import ndarray as Tensor
|
{"hexsha": "1529f24ef33932b92d5c3d074b2e8153f75783e2", "size": 195, "ext": "py", "lang": "Python", "max_stars_repo_path": "bluebird/tensor.py", "max_stars_repo_name": "Stoick01/bluebird", "max_stars_repo_head_hexsha": "a6ab5fcbf42da24ef8268ba6bc110b9eadd9a2ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-04T10:44:51.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-04T10:44:51.000Z", "max_issues_repo_path": "bluebird/tensor.py", "max_issues_repo_name": "Stoick01/bluebird", "max_issues_repo_head_hexsha": "a6ab5fcbf42da24ef8268ba6bc110b9eadd9a2ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-06-02T03:33:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T01:00:23.000Z", "max_forks_repo_path": "bluebird/tensor.py", "max_forks_repo_name": "Stoick01/bluebird", "max_forks_repo_head_hexsha": "a6ab5fcbf42da24ef8268ba6bc110b9eadd9a2ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.25, "max_line_length": 51, "alphanum_fraction": 0.7282051282, "include": true, "reason": "from numpy", "num_tokens": 39}
|
import os, sys
import numpy as np
from pdb import set_trace as st
import pandas as pd
posneg_dir = f"models/prune/lastconv_class_auc_channel_pos+neg"
dir = f"models/prune/lastconv_class_auc_channel"
result_dir = f"models/prune/lastconv_class_auc_channel_new"
os.makedirs(result_dir, exist_ok=True)
exp_configs = {
"conv12": ["conv2d_12"],
"block4": ["conv2d_10", "conv2d_11", "conv2d_12"],
"block4main": ["conv2d_11", "conv2d_12"],
}
for class_id in ["all"] + list(range(10)):
for exp_name in exp_configs.keys():
filename = f"allconv_class_{exp_name}_{class_id}.csv"
path = os.path.join(posneg_dir, filename)
posneg_csv = pd.read_csv(path, index_col=0)
path = os.path.join(dir, filename)
other_csv = pd.read_csv(path, index_col=0)
other_csv['posneg_small'] = posneg_csv['posneg_small']
path = os.path.join(result_dir, filename)
other_csv.to_csv(path)
# st()
|
{"hexsha": "b7e7e0363a62f9325b01cbcf6d8f8d5f0809e680", "size": 949, "ext": "py", "lang": "Python", "max_stars_repo_path": "submissions/available/NNSlicer/finetune/knockoff/prune/post_process/concat_posneg_exps.py", "max_stars_repo_name": "ziqi-zhang/fse20", "max_stars_repo_head_hexsha": "f3998abda2e40d67989ec113340236f3460f0dc3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "submissions/available/NNSlicer/finetune/knockoff/prune/post_process/concat_posneg_exps.py", "max_issues_repo_name": "ziqi-zhang/fse20", "max_issues_repo_head_hexsha": "f3998abda2e40d67989ec113340236f3460f0dc3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "submissions/available/NNSlicer/finetune/knockoff/prune/post_process/concat_posneg_exps.py", "max_forks_repo_name": "ziqi-zhang/fse20", "max_forks_repo_head_hexsha": "f3998abda2e40d67989ec113340236f3460f0dc3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-24T20:43:34.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-08T07:10:14.000Z", "avg_line_length": 35.1481481481, "max_line_length": 63, "alphanum_fraction": 0.6880927292, "include": true, "reason": "import numpy", "num_tokens": 274}
|
// Copyright (c) 2014-2021 LG Electronics, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
#define PBNJSON_USE_DEPRECATED_API
#include <iostream>
#include <sys/ioctl.h>
#include <pbnjson.hpp>
#include <boost/program_options.hpp>
using namespace std;
using namespace pbnjson;
namespace {
const char *Basename(const char *path)
{
const char *res = strrchr(path, (int)('/'));
return res ? res + 1 : path;
}
int DetectTerminalWidth()
{
struct winsize w;
ioctl(0, TIOCGWINSZ, &w);
return w.ws_col;
}
class SchemaErrorHandler
: public JErrorHandler
{
private:
virtual void syntax(JParser *, SyntaxError code, const string &reason)
{
cerr << "Syntax error " << code << ": " << reason << endl;
}
virtual void schema(JParser *, SchemaError code, const string &reason)
{
cerr << "Schema error " << code << ": " << reason << endl;
}
virtual void misc(JParser *, const string &reason)
{
cerr << "Unknown error: " << reason << endl;
}
virtual void parseFailed(JParser *, const string &reason)
{
cerr << "Parse failed: " << reason << endl;
}
virtual void badObject(pbnjson::JParser*, JErrorHandler::BadObject){}
virtual void badArray(pbnjson::JParser*, pbnjson::JErrorHandler::BadArray){}
virtual void badString(pbnjson::JParser*, const string &str){}
virtual void badNumber(pbnjson::JParser*, const string& num){}
virtual void badBoolean(pbnjson::JParser*){}
virtual void badNull(pbnjson::JParser*){}
};
} //namespace;
int main(int argc, char *argv[])
{
const char *program_name = Basename(argv[0]);
int line_length = DetectTerminalWidth();
string file_name;
string schema_file;
try
{
using namespace boost::program_options;
options_description desc("Options", line_length, line_length / 2);
desc.add_options()
("version,V", "Print program version")
("help,h", "Print usage summary")
("file,f", value<string>(&file_name)->default_value(file_name),
"JSON file to validate (skip for stdin)")
("schema,s", value<string>(&schema_file)->default_value(schema_file),
"File with JSON schema")
;
positional_options_description p;
p.add("file", 1);
variables_map vm;
store(command_line_parser(argc, argv)
.options(desc)
.positional(p)
.run(),
vm);
notify(vm);
if (vm.count("help"))
{
cout << program_name << " -- validate a JSON file against JSON schema\n\n";
cout << "Usage: " << program_name << " [OPTION] <file.json>\n";
cout << desc << endl;
return 0;
}
if (vm.count("version"))
{
cout << program_name << " " << WEBOS_COMPONENT_VERSION << endl;
return 0;
}
SchemaErrorHandler error_handler;
// Prepare JSON schema
unique_ptr<JSchema> schema;
if (schema_file.empty())
schema.reset(new JSchemaFragment("{}"));
else
{
schema.reset(new JSchemaFile(schema_file, schema_file, &error_handler, NULL));
if (!schema->isInitialized())
{
cerr << "Failed to open JSON schema " << schema_file << endl;
return 1;
}
}
// Try to parse the file to validate
JDomParser parser;
if (file_name.empty())
{
if (!parser.begin(*schema.get(), &error_handler))
{
cerr << "Failed to parse JSON stdin" << endl;
return 1;
}
string buf;
while (getline(cin, buf))
{
if (!parser.feed(buf))
{
cerr << "Failed to parse JSON stdin" << endl;
return 1;
}
}
if (!parser.end())
{
cerr << "Failed to finalyze parse" << endl;
return 1;
}
}
else
{
// Try to parse the file
if (!parser.parseFile(file_name, *schema.get(), JFileOptNoOpt, &error_handler))
{
cerr << "Failed to parse JSON file " << file_name << endl;
return 1;
}
}
}
catch (const std::exception &e)
{
cerr << e.what() << endl;
return 1;
}
return 0;
}
|
{"hexsha": "17260ed8b2830031944b6ca1a2625f811d60598f", "size": 4316, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/pbnjson_validate/pbnjson_validate.cpp", "max_stars_repo_name": "webosose/libpbnjson", "max_stars_repo_head_hexsha": "19c047b28d73c50330c1c97d2fa94aa8fccebcf6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2018-03-20T15:15:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-02T02:30:15.000Z", "max_issues_repo_path": "src/pbnjson_validate/pbnjson_validate.cpp", "max_issues_repo_name": "webosose/libpbnjson", "max_issues_repo_head_hexsha": "19c047b28d73c50330c1c97d2fa94aa8fccebcf6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pbnjson_validate/pbnjson_validate.cpp", "max_forks_repo_name": "webosose/libpbnjson", "max_forks_repo_head_hexsha": "19c047b28d73c50330c1c97d2fa94aa8fccebcf6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2018-03-19T12:43:43.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-02T02:30:19.000Z", "avg_line_length": 23.9777777778, "max_line_length": 82, "alphanum_fraction": 0.6577849861, "num_tokens": 1157}
|
import numpy as np
import time
import util as u
# simulation parameters
dt = 0.1 # time step
T = 3000 # number of time steps
# parameters of the system
gamma = .00002 # gravitational coefficient
m1 = 300 # mass of the first planet
m2 = 1 # mass of the second planet
# initial conditions: state of the system in the first two time steps
x0 = np.array([0, 0, 1, 0])
x1 = np.array([0, 0, 1, 0.005])
# initializing the vector of system states
xs = [x0, x1]
# defining the function that calculates the forces acting on the planets at a time step
def F(x):
r1 = x[:2] # position of the first planet
r2 = x[2:4] # position of the second planet
posdiff_vec = r2 - r1 # vector of difference of the positions
dist = np.linalg.norm(posdiff_vec) # distance between the planets
# Newton's law of universal gravitation
F1 = gamma * m2 * posdiff_vec / (dist**2) # force acting on the first planet
F2 = - gamma * m1 * posdiff_vec / (dist**2) # force acting on the second planet
return np.concatenate([F1, F2], axis=0)
for i in range(T):
# getting previous values of the iteration
Lx = xs[-1]
LLx = xs[-2]
# approximating the second derivative with finite differences
F_prev = F(Lx) # force acting at the previous time step
x = dt**2*F_prev + 2*Lx - LLx # estimated position at the next time step
xs.append(x)
####################################################
x, y = np.split(np.array(xs), 2, axis=1)
u.plotAnim(x, y, T, isSaveVideo=False)
# this is slow and not working properly
#u.plotScatter(x, y)
|
{"hexsha": "6af5c2bdecd1c4fa441f13333a6baf4a53ae707a", "size": 1655, "ext": "py", "lang": "Python", "max_stars_repo_path": "planets/planets.py", "max_stars_repo_name": "ricsirke/simulations", "max_stars_repo_head_hexsha": "395722bfe524d3b62a994bbc99c504f2f434d9f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "planets/planets.py", "max_issues_repo_name": "ricsirke/simulations", "max_issues_repo_head_hexsha": "395722bfe524d3b62a994bbc99c504f2f434d9f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "planets/planets.py", "max_forks_repo_name": "ricsirke/simulations", "max_forks_repo_head_hexsha": "395722bfe524d3b62a994bbc99c504f2f434d9f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5535714286, "max_line_length": 87, "alphanum_fraction": 0.6205438066, "include": true, "reason": "import numpy", "num_tokens": 465}
|
# © 2019 University of Illinois Board of Trustees. All rights reserved
import h5py
import MemmapDataLite
import _pickle as pickle
import sys
import logging
import numpy as np
logging.basicConfig(level=logging.INFO)
def compare_tensors(a, b):
a = np.array(a)
b = np.array(b)
logging.info("Comparing tensors of sizes %s, %s" % (str(a.shape), str(b.shape)))
assert(a.shape == b.shape)
assert(np.sum(np.abs(a - b).flatten()) < 1e-8)
dict_data = pickle.load(open(sys.argv[1], 'rb'))
hdf5_data = h5py.File(sys.argv[2], 'r')
attributes = [
'label', 'feature', 'feature2', 'supportingReadsStrict', 'supportingReadsStrict2', 'segment'
]
for location in dict_data.locations:
site_a = dict_data[location]
site_b = hdf5_data[location]
logging.info("Alleles at site %s are %s" % (location, str(site_a.keys())))
for allele in site_a:
logging.info("Available attributes for allele %s are %s" %(allele, str(site_a[allele].keys())))
for attribute in attributes:
logging.info("Comparing %s for allele %s at location %s" % (attribute, allele, location))
compare_tensors(
np.array(site_a[allele][attribute]),
np.array(site_b[allele][attribute])
)
logging.info("Ensuring that all locations are covered")
assert(set(dict_data.locations) == set(hdf5_data.keys()))
print("Passed")
|
{"hexsha": "3b4280bb647875135b8fa7905ad89a0a32dc1d7a", "size": 1394, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/test_dict_data.py", "max_stars_repo_name": "anands-repo/hello", "max_stars_repo_head_hexsha": "743ff49c0de97a985643280a3b5cb562c58e7fb7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-03-27T06:33:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:16:47.000Z", "max_issues_repo_path": "python/test_dict_data.py", "max_issues_repo_name": "anands-repo/hello", "max_issues_repo_head_hexsha": "743ff49c0de97a985643280a3b5cb562c58e7fb7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-08-24T08:33:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-10T09:30:21.000Z", "max_forks_repo_path": "python/test_dict_data.py", "max_forks_repo_name": "anands-repo/hello", "max_forks_repo_head_hexsha": "743ff49c0de97a985643280a3b5cb562c58e7fb7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6595744681, "max_line_length": 103, "alphanum_fraction": 0.6649928264, "include": true, "reason": "import numpy", "num_tokens": 356}
|
@inline metric(κ::Kernel) = κ.metric
## Allows to iterate over kernels
Base.length(::Kernel) = 1
Base.iterate(k::Kernel) = (k,nothing)
Base.iterate(k::Kernel, ::Any) = nothing
### Syntactic sugar for creating matrices and using kernel functions
for k in [:ExponentialKernel,:SqExponentialKernel,:GammaExponentialKernel,:MaternKernel,:Matern32Kernel,:Matern52Kernel,:LinearKernel,:PolynomialKernel,:ExponentiatedKernel,:ZeroKernel,:WhiteKernel,:ConstantKernel,:RationalQuadraticKernel,:GammaRationalQuadraticKernel]
@eval begin
@inline (κ::$k)(d::Real) = kappa(κ,d) #TODO Add test
@inline (κ::$k)(x::AbstractVector{<:Real},y::AbstractVector{<:Real}) = kappa(κ,evaluate(κ.metric,transform(κ,x),transform(κ,y)))
@inline (κ::$k)(X::AbstractMatrix{T},Y::AbstractMatrix{T};obsdim::Integer=defaultobs) where {T} = kernelmatrix(κ,X,Y,obsdim=obsdim)
@inline (κ::$k)(X::AbstractMatrix{T};obsdim::Integer=defaultobs) where {T} = kernelmatrix(κ,X,obsdim=obsdim)
end
end
### Transform generics
@inline transform(κ::Kernel) = κ.transform
@inline transform(κ::Kernel,x::AbstractVecOrMat) = transform(κ.transform,x)
@inline transform(κ::Kernel,x::AbstractVecOrMat,obsdim::Int) = transform(κ.transform,x,obsdim)
## Constructors for kernels without parameters
for kernel in [:ExponentialKernel,:SqExponentialKernel,:Matern32Kernel,:Matern52Kernel,:ExponentiatedKernel]
@eval begin
$kernel(ρ::T=1.0) where {T<:Real} = $kernel{T,ScaleTransform{Base.RefValue{T}}}(ScaleTransform(ρ))
$kernel(ρ::A) where {A<:AbstractVector{<:Real}} = $kernel{eltype(A),ScaleTransform{A}}(ScaleTransform(ρ))
$kernel(t::Tr) where {Tr<:Transform} = $kernel{eltype(t),Tr}(t)
end
end
function set_params!(k::Kernel,x)
@error "Setting parameters to this kernel is either not possible or has not been implemented"
end
params(k::Kernel) = (params(k.transform),)
|
{"hexsha": "bb5ee5a6d8bab00504865ad0e6ed7e8eab6fc26e", "size": 1899, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/generic.jl", "max_stars_repo_name": "UnofficialJuliaMirror/KernelFunctions.jl-ec8451be-7e33-11e9-00cf-bbf324bd1392", "max_stars_repo_head_hexsha": "2765f8e101aed49e4cdf44cca83dba4829d1554b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/generic.jl", "max_issues_repo_name": "UnofficialJuliaMirror/KernelFunctions.jl-ec8451be-7e33-11e9-00cf-bbf324bd1392", "max_issues_repo_head_hexsha": "2765f8e101aed49e4cdf44cca83dba4829d1554b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/generic.jl", "max_forks_repo_name": "UnofficialJuliaMirror/KernelFunctions.jl-ec8451be-7e33-11e9-00cf-bbf324bd1392", "max_forks_repo_head_hexsha": "2765f8e101aed49e4cdf44cca83dba4829d1554b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.3243243243, "max_line_length": 269, "alphanum_fraction": 0.7240652975, "num_tokens": 552}
|
"""
For panda (two-finger) gripper: pushing, pushing-left, pushing-up, pulling, pulling-left, pulling-up
50% all parts closed, 50% middle (for each part, 50% prob. closed, 50% prob. middle)
Simulate until static before starting
"""
import os
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = "1"
import sys
import shutil
import numpy as np
from PIL import Image
from utils import get_global_position_from_camera, save_h5
import cv2
import json
import pickle
from argparse import ArgumentParser
from sapien.core import Pose
from env import Env, ContactError
from camera import Camera
from robots.panda_robot import Robot
parser = ArgumentParser()
parser.add_argument('shape_id', type=str)
parser.add_argument('category', type=str)
parser.add_argument('cnt_id', type=int)
parser.add_argument('primact_type', type=str)
parser.add_argument('--out_dir', type=str)
parser.add_argument('--trial_id', type=int, default=0, help='trial id')
parser.add_argument('--random_seed', type=int, default=None)
parser.add_argument('--no_gui', action='store_true', default=False, help='no_gui [default: False]')
args = parser.parse_args()
shape_id = args.shape_id
trial_id = args.trial_id
primact_type = args.primact_type
if args.no_gui:
out_dir = os.path.join(args.out_dir, '%s_%s_%d_%s_%d' % (shape_id, args.category, args.cnt_id, primact_type, trial_id))
else:
out_dir = os.path.join('results', '%s_%s_%d_%s_%d' % (shape_id, args.category, args.cnt_id, primact_type, trial_id))
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir)
flog = open(os.path.join(out_dir, 'log.txt'), 'w')
out_info = dict()
# set random seed
if args.random_seed is not None:
np.random.seed(args.random_seed)
out_info['random_seed'] = args.random_seed
# setup env
env = Env(flog=flog, show_gui=(not args.no_gui))
# setup camera
cam = Camera(env, random_position=True)
out_info['camera_metadata'] = cam.get_metadata_json()
if not args.no_gui:
env.set_controller_camera_pose(cam.pos[0], cam.pos[1], cam.pos[2], np.pi+cam.theta, -cam.phi)
# load shape
object_urdf_fn = '../data/where2act_original_sapien_dataset/%s/mobility_vhacd.urdf' % shape_id
flog.write('object_urdf_fn: %s\n' % object_urdf_fn)
object_material = env.get_material(4, 4, 0.01)
state = 'random-closed-middle'
if np.random.random() < 0.5:
state = 'closed'
flog.write('Object State: %s\n' % state)
out_info['object_state'] = state
joint_angles = env.load_object(object_urdf_fn, object_material, state=state)
out_info['joint_angles'] = joint_angles
out_info['joint_angles_lower'] = env.joint_angles_lower
out_info['joint_angles_upper'] = env.joint_angles_upper
# Get the joint poses in the global frame
out_info['joints'] = env.joints
# Convert the joint pose from global frame to camera frame
# w2c_mat = np.linalg.inv(cam.get_metadata()['mat44'])
w2c_mat = np.linalg.inv(cam.get_metadata()['mat44'])
# permutation = np.array([[0, 0, 1, 0], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]])
for i in range(len(out_info['joints'])):
pose_global = out_info['joints'][i]['pose_global'].to_transformation_matrix()
# pose_global = permutation @ pose_global
out_info['joints'][i]['pose_cam'] = (w2c_mat @ pose_global).tolist()
out_info['joints'][i]['pose_global'] = pose_global.tolist()
cur_qpos = env.get_object_qpos()
# simulate some steps for the object to stay rest
still_timesteps = 0
wait_timesteps = 0
while still_timesteps < 5000 and wait_timesteps < 20000:
env.step()
env.render()
cur_new_qpos = env.get_object_qpos()
invalid_contact = False
for c in env.scene.get_contacts():
for p in c.points:
if abs(p.impulse @ p.impulse) > 1e-4:
invalid_contact = True
break
if invalid_contact:
break
if np.max(np.abs(cur_new_qpos - cur_qpos)) < 1e-6 and (not invalid_contact):
still_timesteps += 1
else:
still_timesteps = 0
cur_qpos = cur_new_qpos
wait_timesteps += 1
if still_timesteps < 5000:
flog.write('Object Not Still!\n')
flog.close()
env.close()
exit(1)
### use the GT vision
rgb, depth, seg, obj_seg = cam.get_observation()
Image.fromarray((rgb*255).astype(np.uint8)).save(os.path.join(out_dir, 'rgb.png'))
pickle.dump(seg, open(os.path.join(out_dir, 'seg.pkl'), "wb"))
pickle.dump(obj_seg, open(os.path.join(out_dir, 'obj_seg.pkl'), "wb"))
cam_XYZA_id1, cam_XYZA_id2, cam_XYZA_pts = cam.compute_camera_XYZA(depth)
cam_XYZA = cam.compute_XYZA_matrix(cam_XYZA_id1, cam_XYZA_id2, cam_XYZA_pts, depth.shape[0], depth.shape[1])
save_h5(os.path.join(out_dir, 'cam_XYZA.h5'), \
[(cam_XYZA_id1.astype(np.uint64), 'id1', 'uint64'), \
(cam_XYZA_id2.astype(np.uint64), 'id2', 'uint64'), \
(cam_XYZA_pts.astype(np.float32), 'pc', 'float32'), \
])
gt_nor = cam.get_normal_map()
Image.fromarray(((gt_nor+1)/2*255).astype(np.uint8)).save(os.path.join(out_dir, 'gt_nor.png'))
object_link_ids = env.movable_link_ids
gt_movable_link_mask = cam.get_movable_link_mask(object_link_ids)
Image.fromarray((gt_movable_link_mask>0).astype(np.uint8)*255).save(os.path.join(out_dir, 'interaction_mask.png'))
# sample a pixel to interact
xs, ys = np.where(gt_movable_link_mask>0)
if len(xs) == 0:
flog.write('No Movable Pixel! Quit!\n')
flog.close()
env.close()
exit(1)
idx = np.random.randint(len(xs))
x, y = xs[idx], ys[idx]
out_info['pixel_locs'] = [int(x), int(y)]
env.set_target_object_part_actor_id(object_link_ids[gt_movable_link_mask[x, y]-1])
out_info['target_object_part_actor_id'] = env.target_object_part_actor_id
out_info['target_object_part_joint_id'] = env.target_object_part_joint_id
# get pixel 3D pulling direction (cam/world)
direction_cam = gt_nor[x, y, :3]
direction_cam /= np.linalg.norm(direction_cam)
out_info['direction_camera'] = direction_cam.tolist()
flog.write('Direction Camera: %f %f %f\n' % (direction_cam[0], direction_cam[1], direction_cam[2]))
direction_world = cam.get_metadata()['mat44'][:3, :3] @ direction_cam
out_info['direction_world'] = direction_world.tolist()
flog.write('Direction World: %f %f %f\n' % (direction_world[0], direction_world[1], direction_world[2]))
flog.write('mat44: %s\n' % str(cam.get_metadata()['mat44']))
# sample a random direction in the hemisphere (cam/world)
action_direction_cam = np.random.randn(3).astype(np.float32)
action_direction_cam /= np.linalg.norm(action_direction_cam)
if action_direction_cam @ direction_cam > 0:
action_direction_cam = -action_direction_cam
out_info['gripper_direction_camera'] = action_direction_cam.tolist()
action_direction_world = cam.get_metadata()['mat44'][:3, :3] @ action_direction_cam
out_info['gripper_direction_world'] = action_direction_world.tolist()
# get pixel 3D position (cam/world)
position_cam = cam_XYZA[x, y, :3]
out_info['position_cam'] = position_cam.tolist()
position_cam_xyz1 = np.ones((4), dtype=np.float32)
position_cam_xyz1[:3] = position_cam
position_world_xyz1 = cam.get_metadata()['mat44'] @ position_cam_xyz1
position_world = position_world_xyz1[:3]
out_info['position_world'] = position_world.tolist()
# compute final pose
up = np.array(action_direction_world, dtype=np.float32)
forward = np.random.randn(3).astype(np.float32)
while abs(up @ forward) > 0.99:
forward = np.random.randn(3).astype(np.float32)
left = np.cross(up, forward)
left /= np.linalg.norm(left)
forward = np.cross(left, up)
forward /= np.linalg.norm(forward)
out_info['gripper_forward_direction_world'] = forward.tolist()
forward_cam = np.linalg.inv(cam.get_metadata()['mat44'][:3, :3]) @ forward
out_info['gripper_forward_direction_camera'] = forward_cam.tolist()
rotmat = np.eye(4).astype(np.float32)
rotmat[:3, 0] = forward
rotmat[:3, 1] = left
rotmat[:3, 2] = up
final_dist = 0.1
if primact_type == 'pushing-left' or primact_type == 'pushing-up':
final_dist = 0.11
final_rotmat = np.array(rotmat, dtype=np.float32)
final_rotmat[:3, 3] = position_world - action_direction_world * final_dist
final_pose = Pose().from_transformation_matrix(final_rotmat)
out_info['target_rotmat_world'] = final_rotmat.tolist()
start_rotmat = np.array(rotmat, dtype=np.float32)
start_rotmat[:3, 3] = position_world - action_direction_world * 0.15
start_pose = Pose().from_transformation_matrix(start_rotmat)
out_info['start_rotmat_world'] = start_rotmat.tolist()
action_direction = None
if 'left' in primact_type:
action_direction = forward
elif 'up' in primact_type:
action_direction = left
if action_direction is not None:
end_rotmat = np.array(rotmat, dtype=np.float32)
end_rotmat[:3, 3] = position_world - action_direction_world * final_dist + action_direction * 0.05
out_info['end_rotmat_world'] = end_rotmat.tolist()
### viz the EE gripper position
# setup robot
robot_urdf_fn = './robots/panda_gripper.urdf'
robot_material = env.get_material(4, 4, 0.01)
robot = Robot(env, robot_urdf_fn, robot_material, open_gripper=('pulling' in primact_type))
# move to the final pose
robot.robot.set_root_pose(final_pose)
env.render()
rgb_final_pose, _, _, _ = cam.get_observation()
Image.fromarray((rgb_final_pose*255).astype(np.uint8)).save(os.path.join(out_dir, 'viz_target_pose.png'))
# move back
robot.robot.set_root_pose(start_pose)
env.render()
# activate contact checking
env.start_checking_contact(robot.hand_actor_id, robot.gripper_actor_ids, 'pushing' in primact_type)
if not args.no_gui:
### wait to start
env.wait_to_start()
### main steps
out_info['start_target_part_qpos'] = env.get_target_part_qpos()
target_link_mat44 = env.get_target_part_pose().to_transformation_matrix()
position_local_xyz1 = np.linalg.inv(target_link_mat44) @ position_world_xyz1
success = True
try:
if 'pushing' in primact_type:
robot.close_gripper()
elif 'pulling' in primact_type:
robot.open_gripper()
# approach
robot.move_to_target_pose(final_rotmat, 2000)
robot.wait_n_steps(2000)
if 'pulling' in primact_type:
robot.close_gripper()
robot.wait_n_steps(2000)
if 'left' in primact_type or 'up' in primact_type:
robot.move_to_target_pose(end_rotmat, 2000)
robot.wait_n_steps(2000)
if primact_type == 'pulling':
robot.move_to_target_pose(start_rotmat, 2000)
robot.wait_n_steps(2000)
except ContactError:
success = False
target_link_mat44 = env.get_target_part_pose().to_transformation_matrix()
position_world_xyz1_end = target_link_mat44 @ position_local_xyz1
flog.write('touch_position_world_xyz_start: %s\n' % str(position_world_xyz1))
flog.write('touch_position_world_xyz_end: %s\n' % str(position_world_xyz1_end))
out_info['touch_position_world_xyz_start'] = position_world_xyz1[:3].tolist()
out_info['touch_position_world_xyz_end'] = position_world_xyz1_end[:3].tolist()
if success:
out_info['result'] = 'VALID'
out_info['final_target_part_qpos'] = env.get_target_part_qpos()
else:
out_info['result'] = 'CONTACT_ERROR'
# save results
with open(os.path.join(out_dir, 'result.json'), 'w') as fout:
json.dump(out_info, fout)
#close the file
flog.close()
if args.no_gui:
# close env
env.close()
else:
if success:
print('[Successful Interaction] Done. Ctrl-C to quit.')
### wait forever
robot.wait_n_steps(100000000000)
else:
print('[Unsuccessful Interaction] invalid gripper-object contact.')
# close env
env.close()
|
{"hexsha": "e09b03372b64371a29c425f51ab8d4aa22abedc7", "size": 11460, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/collect_data.py", "max_stars_repo_name": "georgegu1997/where2act", "max_stars_repo_head_hexsha": "90867056e79dc1fce38ccec170c30b7874379593", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/collect_data.py", "max_issues_repo_name": "georgegu1997/where2act", "max_issues_repo_head_hexsha": "90867056e79dc1fce38ccec170c30b7874379593", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/collect_data.py", "max_forks_repo_name": "georgegu1997/where2act", "max_forks_repo_head_hexsha": "90867056e79dc1fce38ccec170c30b7874379593", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-05T05:16:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-05T05:16:16.000Z", "avg_line_length": 36.2658227848, "max_line_length": 123, "alphanum_fraction": 0.7308900524, "include": true, "reason": "import numpy", "num_tokens": 3145}
|
// The Open Trading Project - open-trading.org
//
// Copyright (c) 2011 Martin Tapia - martin.tapia@open-trading.org
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <boost/cstdlib.hpp>
#include "Logger.hpp"
#include "tools/ToString.hpp"
#include "core/History.hpp"
#include "core/Bar.hpp"
int main(int ac, char** av)
{
Hischeck::Logger logger;
if (ac <= 1 || !av[1])
{
logger.Log("Usage: hischeck PATH_TO_CSV");
return boost::exit_failure;
}
Core::History history(logger, true);
if (!history.Load(av[1]))
{
logger.Log("Failed to load history.", Logger::Error);
return boost::exit_failure;
}
std::vector<Core::Bar> const& bars = history.GetBars();
logger.Log("Start: " + bars[0].TimeToString() + ".");
logger.Log("End: " + bars[bars.size() - 1].TimeToString() + ".");
logger.Log("Duration:");
time_t diff = bars[bars.size() - 1].time - bars[0].time;
logger.Log(" seconds: " + Tools::ToString(diff));
logger.Log(" minutes: " + Tools::ToString(diff / 60));
logger.Log(" hours: " + Tools::ToString(diff / (60 * 60)));
logger.Log(" days: " + Tools::ToString(diff / (60 * 60 * 24)));
return boost::exit_success;
}
|
{"hexsha": "6fbcd0193735a9948bd2f13e6b88fa30f9a46b23", "size": 2535, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/hischeck/main.cpp", "max_stars_repo_name": "paps/Open-Trading", "max_stars_repo_head_hexsha": "b62f85f391be9975a161713f87aeff0cae0a1e37", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 23.0, "max_stars_repo_stars_event_min_datetime": "2015-07-24T15:45:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-23T15:35:33.000Z", "max_issues_repo_path": "src/hischeck/main.cpp", "max_issues_repo_name": "paps/Open-Trading", "max_issues_repo_head_hexsha": "b62f85f391be9975a161713f87aeff0cae0a1e37", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/hischeck/main.cpp", "max_forks_repo_name": "paps/Open-Trading", "max_forks_repo_head_hexsha": "b62f85f391be9975a161713f87aeff0cae0a1e37", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 21.0, "max_forks_repo_forks_event_min_datetime": "2015-07-12T16:42:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-23T22:56:50.000Z", "avg_line_length": 39.609375, "max_line_length": 76, "alphanum_fraction": 0.6863905325, "num_tokens": 595}
|
# TF CNN Basics
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
sess = tf.InteractiveSession()
# sess = tf.Session()으로 열면 앞부분에 그래프를 먼저 구성하고(모델링)하고
# 세션범위 안에서 값을 sess.un(Tesor)해서 확인해야하지만,
# InteractiveSession의 경우는 현재 세션을 default session으로 인식해 값 확인이 편리하다.
# Ipython(jupyter) notebook 같은 곳에서 InteractiveSession을 사용하면 편리하다.
# tf.Session()을 with와 동시에 사용하는 것과 동일.
# InteractiveSession은 현재 열린 세션을 default 세션으로 인식하기 때문에
# sess.run()을 통해 결과값을 얻지 않아도 되고 Tensor.eval()로 바로 값을 구하거나
# Operation.run()을 통해 함수를 돌릴 수 있습니다.
image = np.array([[[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]]], dtype=np.float32)
print(image.shape)
plt.imshow(image.reshape(3, 3), cmap='Greys')
plt.show()
# (1, 3, 3, 1) 맨 앞에 1이 instence의 개수, 나머지 3 x 3 x 1(채널)
########################################################################
weight = tf.constant([[[[1.]], [[1.]]], [[[1.]], [[1.]]]])
# Filter = W
print(weight.shape)
conv2d = tf.nn.conv2d(image, weight, strides=[1, 1, 1, 1], padding='SAME')
# input(여기서는 image) : [batch, in_height, in_width, in_channels] 형식. 28x28x1 형식의 손글씨 이미지.
# filter : [filter_height, filter_width, in_channels, out_channels] 형식. 3, 3, 1, 32의 w.
# strides : 크기 4인 1차원 리스트. [0], [3]은 반드시 1. 일반적으로 [1], [2]는 같은 값 사용.
# padding : 'SAME' 또는 'VALID'. 패딩을 추가하는 공식의 차이.
# SAME은 Zero paddings로서 주위를 0으로 만든다. 출력 크기를 입력과 같게 유지.
# VALID는 패딩 없이 하는 것.
# ex)
# 3x3x1 필터를 32개 만드는 것을 코드로 표현하면 [3, 3, 1, 32]가 된다.
# 순서대로 너비(3), 높이(3), 입력 채널(1), 출력 채널(32)을 뜻한다. 32개의 출력이 만들어진다.
conv2d_img = conv2d.eval()
# eval로 이미지를 실행시킨다.
# 앞에서 InteractiveSession로 실행했기 때문에 가능한 것.
print(conv2d_img.shape)
# 시각화를 위해서 만든 code
conv2d_img = np.swapaxes(conv2d_img, 0 , 3)
for i, one_img in enumerate(conv2d_img):
print(one_img.reshape(3,3))
plt.subplot(1, 2, i+1), plt.imshow(one_img.reshape(3,3), cmap='gray')
plt.show()
#####################################################################################33
# 필터를 3개로 적용 (2,2,1,3)
weight1 = tf.constant([[[[1., 10., -1.]], [[1., 10., -1.]]], [[[1., 10., -1.]], [[1., 10., -1.]]]])
print(weight1.shape)
conv2d1 = tf.nn.conv2d(image, weight1, strides=[1,1,1,1], padding='SAME')
conv2d1_img = conv2d1.eval()
print(conv2d1_img.shape)
conv2d1_img = np.swapaxes(conv2d1_img, 0, 3)
for i, one_img in enumerate(conv2d1_img):
print(one_img.reshape(3, 3))
plt.subplot(1, 3, i + 1), plt.imshow(one_img.reshape(3, 3), cmap='gray')
# subplot(1, 3(여기가 필터의 개수자리, 필터와 동일하게 만들어줘야 한다.))
plt.show()
# 여기까지 Convolution end
#####################################################################33
# Max Pooling
image1 = np.array([[[[4], [3]], [[2], [1]]]], dtype=np.float32)
pool = tf.nn.max_pool(image1, ksize=[1, 2, 2, 1], strides=[1,1,1,1], padding='SAME')
# ksize는 kernel size = W, filter size를 말한다. 맨 앞에 1이 개수, 2x2x1
print(pool.shape)
print(pool.eval())
#########################################################################
# MNIST image loading
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
img = mnist.train.images[0].reshape(28, 28)
plt.imshow(img, cmap='gray')
plt.show()
##########################################################################
# MNIST Convolution layer
sess = tf.InteractiveSession()
img = img.reshape(-1, 28, 28, 1)
# 여기서 -1로 한다면 개수를 하나씩 투입하겠다?라는 느낌으로 집어넣는다.
W1 = tf.Variable(tf.random_normal([3, 3, 1, 5], stddev=0.01))
# 3x3x1는 W의 모양 / 5는 필터의 개수
conv2d = tf.nn.conv2d(img, W1, strides=[1,2,2,1], padding='SAME')
# 2x2씩 이동하겠다 0, 3은 건들지 말것.
print(conv2d)
sess.run(tf.global_variables_initializer())
conv2d_img = conv2d.eval()
conv2d_img = np.swapaxes(conv2d_img, 0, 3)
for i, one_img in enumerate(conv2d_img):
plt.subplot(1, 5, i+1), plt.imshow(one_img.reshape(14, 14), cmap='gray')
# plt.show()
#####################################################################################
# MNIST max_pooling
pool = tf.nn.max_pool(conv2d, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# conv2d는 1x14x14x5
print(pool)
sess.run(tf.global_variables_initializer())
pool_img = pool.eval()
pool_img = np.swapaxes(pool_img, 0, 3)
for i, one_img in enumerate(pool_img):
plt.subplot(1, 5, i + 1), plt.imshow(one_img.reshape(7, 7), cmap='gray')
plt.show()
|
{"hexsha": "29d2df1317fd752f37b837c8984141dfb155190e", "size": 4381, "ext": "py", "lang": "Python", "max_stars_repo_path": "TF_Lab_11_1.py", "max_stars_repo_name": "leedongminAI/DL_Tensorflow_Lab", "max_stars_repo_head_hexsha": "1956429adf29b972ffe638c07db8dd8dde7eabd3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-09-15T12:08:58.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-15T12:08:58.000Z", "max_issues_repo_path": "TF_Lab_11_1.py", "max_issues_repo_name": "leedongminAI/DL_Tensorflow_Lab", "max_issues_repo_head_hexsha": "1956429adf29b972ffe638c07db8dd8dde7eabd3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TF_Lab_11_1.py", "max_forks_repo_name": "leedongminAI/DL_Tensorflow_Lab", "max_forks_repo_head_hexsha": "1956429adf29b972ffe638c07db8dd8dde7eabd3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-31T11:13:53.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-31T11:13:53.000Z", "avg_line_length": 34.2265625, "max_line_length": 100, "alphanum_fraction": 0.576352431, "include": true, "reason": "import numpy", "num_tokens": 1678}
|
# Use baremodule to shave off a few KB from the serialized `.ji` file
baremodule SDPA_jll
using Base
using Base: UUID
import JLLWrappers
JLLWrappers.@generate_main_file_header("SDPA")
JLLWrappers.@generate_main_file("SDPA", UUID("7fc90fd6-dbef-5a6a-93f8-169f2a2e705b"))
end # module SDPA_jll
|
{"hexsha": "7cc1951f6d6a3c0188bcaea102937b774d665a6b", "size": 294, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/SDPA_jll.jl", "max_stars_repo_name": "JuliaBinaryWrappers/SDPA_jll.jl", "max_stars_repo_head_hexsha": "8b9692712c6f1e8870499f9c0499b139b7b88b50", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/SDPA_jll.jl", "max_issues_repo_name": "JuliaBinaryWrappers/SDPA_jll.jl", "max_issues_repo_head_hexsha": "8b9692712c6f1e8870499f9c0499b139b7b88b50", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/SDPA_jll.jl", "max_forks_repo_name": "JuliaBinaryWrappers/SDPA_jll.jl", "max_forks_repo_head_hexsha": "8b9692712c6f1e8870499f9c0499b139b7b88b50", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4, "max_line_length": 85, "alphanum_fraction": 0.7993197279, "num_tokens": 101}
|
[STATEMENT]
lemma subset_Ico_iff[simp]:
"{a..<b} \<subseteq> {c..<b} \<longleftrightarrow> b \<le> a \<or> c \<le> a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ({a..<b} \<subseteq> {c..<b}) = (b \<le> a \<or> c \<le> a)
[PROOF STEP]
unfolding atLeastLessThan_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ({a..} \<inter> {..<b} \<subseteq> {c..} \<inter> {..<b}) = (b \<le> a \<or> c \<le> a)
[PROOF STEP]
by auto
|
{"llama_tokens": 205, "file": "Poincare_Bendixson_Analysis_Misc", "length": 2}
|
# MIT license
# Copyright (c) Microsoft Corporation. All rights reserved.
# See LICENSE in the project root for full license information.
# This file contains average properties of the human eye and optical human eye models
# luminance (cd/m2) Multiple Value Item
# 10−6 µcd/m2 1 µcd/m2 Absolute threshold of vision[1]
# 10−5
# 10−4 400 µcd/m2 Darkest sky[2]
# 10−3 mcd/m2 1 mcd/m2 Night sky[3]
# 1.4 mcd/m2 Typical photographic scene lit by full moon[4]
# 5 mcd/m2 Approximate scotopic/mesopic threshold[5]
# 10−2 40 mcd/m2 Phosphorescent markings on a watch dial after 1 h in the dark[6][7]
# 10−1
# 100 cd/m2 2 cd/m2 Floodlit buildings, monuments, and fountains[8]
# 5 cd/m2 Approximate mesopic/photopic threshold[5]
# 101 25 cd/m2 Typical photographic scene at sunrise or sunset[4]
# 30 cd/m2 Green electroluminescent source[2]
# 102 250 cd/m2 Peak luminance of a typical LCD monitor[10][11]
# 700 cd/m2 Typical photographic scene on overcast day[4][8][11]
# 103 kcd/m2 2 kcd/m2 Average cloudy sky[2]
# 5 kcd/m2 Typical photographic scene in full sunlight[4][8]
"""
# Pupil diameter as a function of scene luminance
https://jov.arvojournals.org/article.aspx?articleid=2279420
https://en.wikipedia.org/wiki/Orders_of_magnitude_(luminance)
Pupil diameter is approximately 2.8mm at 100cd/m^2. A typical overcast day is 700cd/m^2
"""
"""computes pupil diameter as a function of scene luminance `L`, in cd/m², and the angular area, `a`, over which this luminance is presented to the eye."""
𝐃sd(L,a) = 7.75 - 5.75 * ((L * a / 846)^.41) / ((L * a / 846)^.41 + 2) # the first letter of this function name is \bfD not D.
export 𝐃sd
eyeradius() = 12mm
export eyeradius
"""Posterior focal length, i.e., optical distance from entrance pupil to the retina. Focal length will change depending on accomodation. This value is for focus at ∞. When the eye is focused at 25cm focal length will be ≈ 22mm. Because the index of refraction of the vitreous humor is approximately 1.33 the physical distance from the entrance pupil to the retina will be 24mm/1.33 = 18mm."""
eyefocallength() = 24mm
export eyefocallength
vc_epupil() = 3mm #distance from vertex of cornea to entrance pupil
""" distance from vertex of cornea to center of rotation"""
cornea_to_eyecenter() = 13.5mm
export cornea_to_eyecenter
entrancepupil_to_retina() = 22.9mm
"""distance from entrance pupil to center of rotation."""
entrancepupil_to_eyecenter() = entrancepupil_to_retina() - eyeradius()
export entrancepupil_to_eyecenter
"""average angle, in degrees, the eye will rotate before users will turn their head"""
comfortable_eye_rotation_angle() = 20°
"""Average one sided translation of the entrance pupil associated with comfortable eye rotation. If you are using this to define an eyebox multiply this value by 2"""
comfortable_entrance_pupil_translation() = sin(comfortable_eye_rotation_angle())*entrancepupil_to_eyecenter()
export comfortable_entrance_pupil_translation
|
{"hexsha": "e0a6559a6fee7818691ce99b1cc1364ae332e8aa", "size": 2982, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Data/HumanEye.jl", "max_stars_repo_name": "LukasACH/OpticSim.jl", "max_stars_repo_head_hexsha": "36aca7c067e4f4754efe649a8f906a64eb682442", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Data/HumanEye.jl", "max_issues_repo_name": "LukasACH/OpticSim.jl", "max_issues_repo_head_hexsha": "36aca7c067e4f4754efe649a8f906a64eb682442", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Data/HumanEye.jl", "max_forks_repo_name": "LukasACH/OpticSim.jl", "max_forks_repo_head_hexsha": "36aca7c067e4f4754efe649a8f906a64eb682442", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.8769230769, "max_line_length": 392, "alphanum_fraction": 0.754527163, "num_tokens": 902}
|
include("MeshCreator.jl")
include("MeshTopology.jl")
include("MeshGeometry.jl")
function processopenfoammesh(polymesh)
println("Generating mesh ...")
# Create mesh
nodes = createnodes(polymesh["points"])
faces = createfaces(polymesh["faces"], polymesh["owner"], polymesh["neighbour"])
boundaries = createboundaries(polymesh["boundary"])
cells = createcells(polymesh["owner"])
# Get mesh topology
getnodestopology(nodes, faces)
getfacestopology(nodes, faces)
getcellstopology(nodes, faces, cells)
# Get mesh geometrical properties
getnodesgeometricalquantities(nodes, faces)
getfacesgeometricalquantities(nodes, faces)
getcellsgeometricalquantities(nodes, faces, cells)
# Get faces remaining properties
getfacesremainingproperties(nodes, faces, cells)
# Create FlicsMesh object
mesh = Mesh(nodes, faces, boundaries, cells)
println("FlicsMesh is successfully generated")
return mesh
end
|
{"hexsha": "a5110ead0c4ba76a424bfd7958f6f8c36b78beb7", "size": 1004, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Mesh/MeshProcessor.jl", "max_stars_repo_name": "andiraarif/FlicsFlow.jl", "max_stars_repo_head_hexsha": "50fa959666b9ed4fde158fc1be4d259f73675b06", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Mesh/MeshProcessor.jl", "max_issues_repo_name": "andiraarif/FlicsFlow.jl", "max_issues_repo_head_hexsha": "50fa959666b9ed4fde158fc1be4d259f73675b06", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Mesh/MeshProcessor.jl", "max_forks_repo_name": "andiraarif/FlicsFlow.jl", "max_forks_repo_head_hexsha": "50fa959666b9ed4fde158fc1be4d259f73675b06", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4242424242, "max_line_length": 85, "alphanum_fraction": 0.7081673307, "num_tokens": 240}
|
include("models.jl")
function test_bernoulli()
model_bernoulli = bernoulli_example()
graph_bernoulli = trackdependencies(model_bernoulli)
@testdependencies(model_bernoulli, w, p, x)
cond_bernoulli_p = StaticConditional(model_bernoulli, :p)
@test_nothrow sample(model_bernoulli, Gibbs(cond_bernoulli_p, MH(:w)), 500)
# Analytic tests
w = graph_bernoulli[4].value
p = graph_bernoulli[6].value
x = graph_bernoulli[2].value
p_1 = w[1] * pdf(Bernoulli(0.3), x)
p_2 = w[2] * pdf(Bernoulli(0.7), x)
Z = p_1 + p_2
# 𝓅(p | w, x) ∝ 𝓅(p | w) * 𝓅(x | p)
analytic_conditional = DiscreteNonParametric([0.3, 0.7], [p_1 / Z, p_2 / Z])
@info "Bernoulli analytic" analytic_conditional
θ = AutoGibbs.sampled_values(graph_bernoulli)
local calculated_conditional
@test_nothrow calculated_conditional = conditionals(graph_bernoulli, @varname(p))[@varname(p)]
@info "Bernoulli calculated" calculated_conditional(θ)
@test issimilar(calculated_conditional(θ), analytic_conditional)
end
function test_gmm()
model_gmm = gmm_example()
graph_gmm = trackdependencies(model_gmm)
@testdependencies(model_gmm, μ, w, z, x[1], x[2], x[3])
cond_gmm_z = StaticConditional(model_gmm, :z)
@test_nothrow sample(model_gmm, Gibbs(cond_gmm_z, MH(:w, :μ)), 500)
@test_nothrow sample(model_gmm, Gibbs(cond_gmm_z, HMC(0.01, 10, :w, :μ)), 500)
# Analytic tests
x = graph_gmm[2].value
μ = graph_gmm[7].value
w = graph_gmm[10].value
z = graph_gmm[14].value
p_1 = w[1] .* pdf.(Normal(μ[1], s2_gmm), x)
p_2 = w[2] .* pdf.(Normal(μ[2], s2_gmm), x)
(Z1, Z2, Z3) = p_1 .+ p_2
# 𝓅(zᵢ | μ, w, x, z₋ᵢ) ∝ 𝓅(zᵢ | w) * 𝓅(xᵢ | zᵢ, μ)
analytic_conditionals = [@varname(z[1]) => Categorical([p_1[1], p_2[1]] ./ Z1),
@varname(z[2]) => Categorical([p_1[2], p_2[2]] ./ Z2),
@varname(z[3]) => Categorical([p_1[3], p_2[3]] ./ Z3)]
@info "GMM analytic" analytic_conditionals
θ = AutoGibbs.sampled_values(graph_gmm)
local calculated_conditional
@test_nothrow calculated_conditional = conditionals(graph_gmm, @varname(z))[@varname(z)]
@info "GMM calculated" calculated_conditional(θ)
@test issimilar(calculated_conditional(θ), Product([D for (vn, D) in analytic_conditionals]))
end
function test_gmm_loopy()
model_gmm_loopy = gmm_loopy_example()
graph_gmm_loopy = trackdependencies(model_gmm_loopy)
@testdependencies(model_gmm_loopy, μ[1], μ[2], w, z[1], z[2], z[3], x[1], x[2], x[3])
cond_gmm_loopy_z = StaticConditional(model_gmm_loopy, :z)
@test_nothrow sample(model_gmm_loopy, Gibbs(cond_gmm_loopy_z, MH(:w, :μ)), 500)
@test_nothrow sample(model_gmm_loopy, Gibbs(cond_gmm_loopy_z, HMC(0.01, 10, :w, :μ)), 500)
# Analytic tests
x = graph_gmm_loopy[2].value
μ = [graph_gmm_loopy[19].value, graph_gmm_loopy[28].value]
w = graph_gmm_loopy[32].value
z = [graph_gmm_loopy[44].value, graph_gmm_loopy[63].value, graph_gmm_loopy[82].value]
p_1 = w[1] .* pdf.(Normal(μ[1], s2_gmm), x)
p_2 = w[2] .* pdf.(Normal(μ[2], s2_gmm), x)
(Z1, Z2, Z3) = p_1 .+ p_2
# 𝓅(zᵢ | μ, w, x, z₋ᵢ) ∝ 𝓅(zᵢ | w) * 𝓅(xᵢ | zᵢ, μ)
analytic_conditionals = [@varname(z[1]) => Categorical([p_1[1], p_2[1]] ./ Z1),
@varname(z[2]) => Categorical([p_1[2], p_2[2]] ./ Z2),
@varname(z[3]) => Categorical([p_1[3], p_2[3]] ./ Z3)]
@info "Loopy GMM analytic" analytic_conditionals
θ = AutoGibbs.sampled_values(graph_gmm_loopy)
local calculated_conditionals
@test_nothrow calculated_conditionals = conditionals(graph_gmm_loopy, @varname(z))
@info "Loopy GMM calculated" Dict(vn => cond(θ) for (vn, cond) in calculated_conditionals)
for (vn, analytic_conditional) in analytic_conditionals
@test issimilar(calculated_conditionals[vn](θ), analytic_conditional)
end
end
function test_gmm_shifted()
model_gmm_shifted = gmm_shifted_example()
graph_gmm_shifted = trackdependencies(model_gmm_shifted)
@testdependencies(model_gmm_shifted, μ[1], μ[2], w, z[1], z[2], z[3], x[1], x[2], x[3])
cond_gmm_shifted_z = StaticConditional(model_gmm_shifted, :z)
@test_nothrow sample(model_gmm_shifted, Gibbs(cond_gmm_shifted_z, MH(:w, :μ)), 500)
@test_nothrow sample(model_gmm_shifted, Gibbs(cond_gmm_shifted_z, HMC(0.01, 10, :w, :μ)), 500)
end
function test_hmm()
model_hmm = hmm_example()
graph_hmm = trackdependencies(model_hmm)
@testdependencies(model_hmm, T[1], T[2], m[1], m[2], s[1], s[2], s[3], x[1], x[2], x[3])
cond_hmm_s = StaticConditional(model_hmm, :s)
@test_nothrow sample(model_hmm, Gibbs(cond_hmm_s, MH(:m, :T)), 500)
@test_nothrow sample(model_hmm, Gibbs(cond_hmm_s, HMC(0.01, 10, :m, :T)), 500)
# Analytic tests
x = graph_hmm[2].value
T = [graph_hmm[24].value, graph_hmm[40].value]
m = [graph_hmm[30].value, graph_hmm[46].value]
s1 = graph_hmm[50].value
s2 = graph_hmm[70].value
s3 = graph_hmm[91].value
D_obs_1 = Normal(m[1], s2_hmm)
D_obs_2 = Normal(m[2], s2_hmm)
p_s1_1 = pdf(Categorical(2), 1) * pdf(Categorical(T[1]), s2) * pdf(D_obs_1, x[1])
p_s1_2 = pdf(Categorical(2), 2) * pdf(Categorical(T[2]), s2) * pdf(D_obs_2, x[1])
p_s2_1 = pdf(Categorical(T[s1]), 1) * pdf(Categorical(T[1]), s3) * pdf(D_obs_1, x[2])
p_s2_2 = pdf(Categorical(T[s1]), 2) * pdf(Categorical(T[2]), s3) * pdf(D_obs_2, x[2])
p_s3_1 = pdf(Categorical(T[s2]), 1) * pdf(D_obs_1, x[3])
p_s3_2 = pdf(Categorical(T[s2]), 2) * pdf(D_obs_2, x[3])
Z_1 = p_s1_1 + p_s1_2
Z_2 = p_s2_1 + p_s2_2
Z_3 = p_s3_1 + p_s3_2
# 𝓅(s₁ | T, m, s₋₁, x) ∝ 𝓅(s₁) 𝓅(s₂ | s₁, T) 𝓅(x₁ | s₁, m)
# 𝓅(sᵢ | T, m, s₋ᵢ, x) ∝ 𝓅(sᵢ | sᵢ₋₁, T) 𝓅(sᵢ₊₁ | sᵢ, T) 𝓅(xᵢ | sᵢ, m) (for i ≥ 2)
analytic_conditionals = [@varname(s[1]) => Categorical([p_s1_1, p_s1_2] ./ Z_1),
@varname(s[2]) => Categorical([p_s2_1, p_s2_2] ./ Z_2),
@varname(s[3]) => Categorical([p_s3_1, p_s3_2] ./ Z_3)]
θ = AutoGibbs.sampled_values(graph_hmm)
@info "HMM analytic" analytic_conditionals
local calculated_conditionals
@test_nothrow calculated_conditionals = conditionals(graph_hmm, @varname(s))
@info "HMM calculated" Dict(vn => cond(θ) for (vn, cond) in calculated_conditionals)
for (vn, analytic_conditional) in analytic_conditionals
# @show vn => probs(calculated_conditionals[vn]), probs(analytic_conditional)
@test issimilar(calculated_conditionals[vn](θ), analytic_conditional)
end
end
function test_imm_stick()
model_imm_stick = imm_stick_example()
graph_imm_stick = trackdependencies(model_imm_stick)
# we leave out the μs, because there might be 1--3 of them
@testdependencies(model_imm_stick,
v, # 10 - 1 sticks
μ, # 10 cluster centers
# z, # 9 data points
y[1], y[2], y[3], y[4], y[5], y[6], y[7], y[8], y[9])
cond_imm_stick = StaticConditional(model_imm_stick, :z)
@test_nothrow sample(model_imm_stick, Gibbs(cond_imm_stick, MH(:μ, :v)), 500)
@test_nothrow sample(model_imm_stick, Gibbs(cond_imm_stick, HMC(0.01, 10, :μ, :v)), 500)
# Analytic tests
y = graph_imm_stick[2].value
α = graph_imm_stick[4].value
K = graph_imm_stick[6].value
N = graph_imm_stick[7].value
v = graph_imm_stick[12].value
# z = graph_imm_stick[16].value
z = [v.value for v in values(graph_imm_stick.statements)
if v isa AutoGibbs.Assumption && DynamicPPL.subsumes(@varname(z), v.vn)]
# μ = graph_imm_stick[19].value
μ = (v.value for v in values(graph_imm_stick.statements)
if v isa AutoGibbs.Assumption && DynamicPPL.subsumes(@varname(μ), v.vn)) |> first
D_w = Categorical(stickbreak(v))
analytic_conditionals = map(1:N) do n
p̃ = [exp(logpdf(D_w, z) + logpdf(Normal(μ[z], 1.0), y[n])) for z in support(D_w)]
@varname(z[n]) => Categorical(p̃ ./ sum(p̃))
end
# analytic_conditional = Product([D for (vn, D) in analytic_conditionals])
# @info "stick-breaking IMM analytic conditionals" analytic_conditionals
θ = AutoGibbs.sampled_values(graph_imm_stick)
local calculated_conditionals
@test_nothrow calculated_conditionals = conditionals(graph_imm_stick, @varname(z))
# @info "stick-breaking IMM calculated conditionals" Dict(
# vn => cond(θ) for (vn, cond) in calculated_conditionals)
for (vn, analytic_conditional) in analytic_conditionals
# @show vn => probs(calculated_conditionals[vn]), probs(analytic_conditional)
@test issimilar(calculated_conditionals[vn](θ), analytic_conditional)
end
end
function test_changepoint()
model_changepoint = changepoint([1.1, 0.9, 0.2])
graph_changepoint = trackdependencies(model_changepoint)
@testdependencies(model_changepoint, λ₁, λ₂, τ, y[1], y[2], y[3])
@test_nothrow sample(model_changepoint, Gibbs(AutoConditional(:τ), MH(:λ₁, :λ₂)), 20)
end
# function test_reverseps()
# model_reverse_deps = reverse_deps([0.1, -0.2])
# graph_reverse_deps = trackdependencies(model_reverse_deps)
# @testdependencies(model_reverse_deps, m[1], m[2], x)
# end
##########################################################################
#########################################################################
### TEST TOGGLES
test_bernoulli()
test_gmm()
test_gmm_loopy()
test_gmm_shifted()
test_hmm()
test_imm_stick()
test_changepoint()
|
{"hexsha": "9aeb2af04ef7a32052df75c8885f7d576ca5bc24", "size": 9637, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_conditionals.jl", "max_stars_repo_name": "phipsgabler/AutoGibbs.jl", "max_stars_repo_head_hexsha": "de3cb28a00943ca4c3f1b928c391777f69d32fd5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-03-06T11:14:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-12T15:01:55.000Z", "max_issues_repo_path": "test/test_conditionals.jl", "max_issues_repo_name": "phipsgabler/AutoGibbs.jl", "max_issues_repo_head_hexsha": "de3cb28a00943ca4c3f1b928c391777f69d32fd5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-05-14T09:42:31.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-25T18:06:22.000Z", "max_forks_repo_path": "test/test_conditionals.jl", "max_forks_repo_name": "phipsgabler/AutoGibbs.jl", "max_forks_repo_head_hexsha": "de3cb28a00943ca4c3f1b928c391777f69d32fd5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-10T13:30:42.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-10T13:30:42.000Z", "avg_line_length": 41.0085106383, "max_line_length": 98, "alphanum_fraction": 0.640552039, "num_tokens": 3359}
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import networkx as nx
from mo.back.replacement import BackReplacementPattern
from mo.middle.passes.eliminate import remove_op_node_with_data_node
class RemoveLastSoftMaxPattern(BackReplacementPattern):
# This replacer is intentionally disabled and must be called if the flag --remove_output_softmax was enabled
enabled = False
@staticmethod
def pattern():
return dict(
nodes=[
('softmax_node', dict(kind='op', op='SoftMax'))
],
edges=[]
)
@staticmethod
def replace_pattern(graph: nx.MultiDiGraph, match: dict):
"""
Need to find the pattern: Parent (any type) -> SoftMAx -> OpOutput
It is needed to remove output SoftMAx layer
Parameters
----------
graph : nx.MultiDiGraph
Graph with loaded model.
match : dict
Patterns which were found in graph structure.
"""
softmax = match['softmax_node']
child = softmax.out_node()
if not child.has_and_set('is_output'):
return
remove_op_node_with_data_node(graph, softmax)
|
{"hexsha": "488e161433af9df01f9064f9a25846bb12e1f2f7", "size": 1721, "ext": "py", "lang": "Python", "max_stars_repo_path": "model-optimizer/extensions/back/remove_last_softmax_pattern.py", "max_stars_repo_name": "apexxs/dldt", "max_stars_repo_head_hexsha": "17e66dc5a6631d630da454506902bd7c25d4170b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-04-19T06:08:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-25T02:43:43.000Z", "max_issues_repo_path": "model-optimizer/extensions/back/remove_last_softmax_pattern.py", "max_issues_repo_name": "apexxs/dldt", "max_issues_repo_head_hexsha": "17e66dc5a6631d630da454506902bd7c25d4170b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2022-01-11T18:56:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T13:20:20.000Z", "max_forks_repo_path": "model-optimizer/extensions/back/remove_last_softmax_pattern.py", "max_forks_repo_name": "apexxs/dldt", "max_forks_repo_head_hexsha": "17e66dc5a6631d630da454506902bd7c25d4170b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-02-05T17:11:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-19T08:33:31.000Z", "avg_line_length": 31.8703703704, "max_line_length": 112, "alphanum_fraction": 0.6740267286, "include": true, "reason": "import networkx", "num_tokens": 355}
|
# Copyright (c) Felix Petersen.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import argparse
import random
from tqdm import tqdm, trange
import time
import numpy as np
from sklearn.decomposition import TruncatedSVD
from sklearn.model_selection import train_test_split
from pprint import pprint
from utils_ import load_data, load_test_names, load_nyc_names
########################################################################################################################
"""
wget https://publicdata1.nyc3.digitaloceanspaces.com/IF_Bios_BERT.tar.gz
tar -xvzf IF_Bios_BERT.tar.gz && rm IF_Bios_BERT.tar.gz
wget https://publicdata1.nyc3.digitaloceanspaces.com/IF_Toxicity_BERT.tar.gz
tar -xvzf IF_Toxicity_BERT.tar.gz && rm IF_Toxicity_BERT.tar.gz
python -u run_coordinate_descent.py -ni 2000 --nloglr 3 --seed 0 --dataset sentiment --lambda_GLIF .1 --lambda_GLIF_NRW .1 --tau 30
python -u run_coordinate_descent.py -ni 10_000 --nloglr 5 --seed 0 --dataset bios --lambda_GLIF 10 --lambda_GLIF_NRW .1 --tau 16 --test_fraction 0.1
python -u run_coordinate_descent.py -ni 10_000 --nloglr 3 --seed 0 --dataset toxicity --lambda_GLIF 30 --lambda_GLIF_NRW .1 --tau .4 --test_fraction 0.05
"""
########################################################################################################################
parser = argparse.ArgumentParser(description='')
parser.add_argument('-ni', '--num_iterations', type=int, default=2_000)
parser.add_argument('--batch_size', type=int, default=1_000)
parser.add_argument('--nloglr', type=float, default=3.)
parser.add_argument('--dataset', default='sentiment', type=str, choices=['sentiment', 'bios', 'toxicity'])
parser.add_argument('--test_fraction', default=None, type=float)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--lambda_GLIF', type=float, required=True)
parser.add_argument('--lambda_GLIF_NRW', type=float, required=True)
parser.add_argument('--taus', nargs='+', type=float, default=[])
parser.add_argument('--coo_epochs', type=int, default=10)
args = parser.parse_args()
print(vars(args))
torch.set_num_threads(min(8, torch.get_num_threads()))
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
device = torch.device('cpu')
if args.taus:
taus = args.taus
else:
taus = {
'sentiment': list(np.logspace(np.log10(10), np.log10(100), 51)),
'toxicity': list(np.logspace(np.log10(.1), np.log10(1), 51)),
'bios': list(np.logspace(np.log10(1), np.log10(100), 51)),
}[args.dataset]
########################################################################################################################
# Training #############################################################################################################
########################################################################################################################
def sample_batch_idx(y, n_per_class):
batch_idx = []
for i in range(y.shape[1]):
batch_idx += np.random.choice(np.where(y[:, i] == 1)[0], size=n_per_class, replace=False).tolist()
np.random.shuffle(batch_idx)
return batch_idx
def get_model():
input_dim = {
'sentiment': 300,
'bios': 768,
'toxicity': 768,
}[args.dataset]
hidden_dim = 1_000 if args.dataset == 'sentiment' else 2_000
output_dim = {
'sentiment': 2,
'bios': 28,
'toxicity': 2,
}[args.dataset]
return nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, output_dim)
).to(device)
########################################################################################################################
# Basic Evaluation ###################################################################################################
########################################################################################################################
def eval_model(main_pred=None, pred_for_fairness=None, y_test_local_sentiment=None, name='unknown'):
if main_pred is None:
main_pred = model(torch.tensor(X_test).float().to(device))
if pred_for_fairness is None:
if args.dataset == 'sentiment':
pred_for_fairness = model(torch.tensor(test_names_embed).float().to(device))
pred_for_fairness = pred_for_fairness[:, 1] - pred_for_fairness[:, 0]
elif args.dataset == 'bios':
pred_for_fairness = model(torch.tensor(X_counter_test).float().to(device))
elif args.dataset == 'toxicity':
assert X_test_counter.shape[0] == 51, X_test_counter.shape
assert X_test_counter.shape[2] == 768, X_test_counter.shape
x = torch.tensor(X_test_counter).reshape(-1, 768).float().to(device)
pred_for_fairness = model(x)
pred_for_fairness = pred_for_fairness.reshape(51, X_test_counter.shape[1], 2)
# --------
if args.dataset == 'sentiment':
assert len(pred_for_fairness.shape) == 1, pred_for_fairness.shape
acc = (main_pred.argmax(1) == torch.tensor(
y_test if y_test_local_sentiment is None else y_test_local_sentiment
).float().to(device).argmax(1)).float().mean()
test_df['naive_logits'] = pred_for_fairness.cpu().detach().numpy()
race_gap = (
test_df[test_df['race'] == 'White']['naive_logits'].mean()
- test_df[test_df['race'] == 'Black']['naive_logits'].mean()
)
gender_gap = (
test_df[test_df['gender'] == 'Female']['naive_logits'].mean()
- test_df[test_df['gender'] == 'Male']['naive_logits'].mean()
)
pprint({
'test_acc_{}'.format(name): acc.item(),
'race_gap_{}'.format(name): race_gap.item(),
'gender_gap_{}'.format(name): gender_gap.item(),
'logit_std_{}'.format(name): pred_for_fairness.std().item(),
})
# --------
elif args.dataset == 'bios':
accs = (main_pred.argmax(1) == torch.tensor(y_test).float().to(device).argmax(1)).float()
accs = [accs[y_test.argmax(1) == i].mean() for i in range(28)]
acc = torch.mean(torch.stack(accs))
consistency = (main_pred.argmax(1) == pred_for_fairness.argmax(1)).float().mean()
pprint({
'test_acc_{}'.format(name): acc.item(),
'test_consistency_{}'.format(name): consistency.item(),
})
# --------
elif args.dataset == 'toxicity':
accs = (main_pred.argmax(1) == torch.tensor(y_test).float().to(device).argmax(1)).float()
accs = [accs[y_test.argmax(1) == i].mean() for i in range(2)]
acc = torch.mean(torch.stack(accs))
fairness = (pred_for_fairness.argmax(2).float().mean(0) == 0.).float().mean() \
+ (pred_for_fairness.argmax(2).float().mean(0) == 1.).float().mean()
pprint({
'test_acc_{}'.format(name): acc.item(),
'test_consistency_{}'.format(name): fairness.item(),
})
########################################################################################################################
# Main ###############################################################################################################
########################################################################################################################
if __name__ == '__main__':
####################################################################################################################
# Sentiment w/ Names #############################################################################################
####################################################################################################################
if args.dataset == 'sentiment':
# The files in this repo are the respective relevant files (or relevant parts of the embedding) from:
# http://www.cs.uic.edu/~liub/FBS/opinion-lexicon-English.rar
# http://nlp.stanford.edu/data/glove.42B.300d.zip
data_path = './project_data/'
embeddings_path = './project_data/sentiment_glove.42B.300d.txt'
nyc_names_path = './project_data/Popular_Baby_Names.csv'
# Load data and embeddings
# Loading GloVe might take couple of minutes
embeddings, X_train, X_test, y_train, y_test, train_vocab, test_vocab = load_data(data_path, embeddings_path,
state=args.seed)
# Load test names and their embeddings
test_df, test_names_embed = load_test_names(embeddings)
# Load Popular NYC Baby Names and their embeddings
nyc_names_embed = load_nyc_names(nyc_names_path, embeddings)
prohibited_subspace = nyc_names_embed
assert args.test_fraction is None, args.test_fraction
####################################################################################################################
# Bios ###########################################################################################################
####################################################################################################################
elif args.dataset == 'bios':
# wget https://publicdata1.nyc3.digitaloceanspaces.com/IF_Bios_BERT.tar.gz
# tar -xvzf IF_Bios_BERT.tar.gz && rm IF_Bios_BERT.tar.gz
bios_train_size = 0.98 if args.test_fraction is None else 1 - args.test_fraction
bios_datafolder = './IF_Bios_BERT/'
y_title = list(map(str, np.load(bios_datafolder + 'bios_titles.npy')))
y_gender = list(map(str, np.load(bios_datafolder + 'bios_gedner.npy')))
categories = ['dietitian', 'physician', 'photographer', 'dentist', 'surgeon', 'journalist',
'pastor', 'yoga_teacher', 'professor', 'accountant', 'architect', 'interior_designer',
'personal_trainer', 'chiropractor', 'poet', 'comedian', 'rapper', 'filmmaker',
'nurse', 'dj', 'painter', 'attorney', 'model', 'software_engineer', 'teacher',
'paralegal', 'composer', 'psychologist']
from sklearn.preprocessing import OneHotEncoder
one_hot_title = OneHotEncoder(sparse=False, categories=[categories])
y_title = one_hot_title.fit_transform([[y] for y in y_title])
one_hot_gender = OneHotEncoder(sparse=False, categories='auto')
y_gender = one_hot_gender.fit_transform(np.array(y_gender).reshape(-1, 1))
## Partition data
np.random.seed(args.seed)
N = y_title.shape[0]
bios_real = np.load(bios_datafolder + 'X_bert_real_seed_%d.npy' % (args.seed%10))
bios_counter = np.load(bios_datafolder + 'X_bert_counter_seed_%d.npy' % (args.seed%10))
idx_train = np.random.choice(N, int(N * bios_train_size), replace=False)
idx_test = np.setdiff1d(range(N), idx_train)
X_real_train, X_real_test = bios_real[idx_train], bios_real[idx_test]
X_counter_train, X_counter_test = bios_counter[idx_train], bios_counter[idx_test]
y_train, y_test, gender_train, gender_test = y_title[idx_train], y_title[idx_test], y_gender[idx_train], y_gender[
idx_test]
X_train, X_test = X_real_train, X_real_test
prohibited_subspace = X_real_train - X_counter_train
####################################################################################################################
# Toxicity #######################################################################################################
####################################################################################################################
elif args.dataset == 'toxicity':
# wget https://publicdata1.nyc3.digitaloceanspaces.com/IF_Toxicity_BERT.tar.gz
# tar -xvzf IF_Toxicity_BERT.tar.gz && rm IF_Toxicity_BERT.tar.gz
toxicity_test_size = 0.003 if args.test_fraction is None else args.test_fraction
data_folder = './IF_Toxicity_BERT/'
with open(data_folder + 'adjectives_people.txt', 'r') as f:
IDENTITY_TERMS = np.array([w.strip() for w in f.readlines()])
X_nid = np.load(data_folder + 'X_bert_nid_kaggle.npy')
group_nid = np.load(data_folder + 'subgroups_nid.npy')
target_nid = np.load(data_folder + 'target_nid.npy')
target_nid = np.column_stack((1 * (target_nid == 0), 1 * (target_nid == 1)))
X_id = np.load(data_folder + 'original_X_bert_id_kaggle.npy')
group_id = np.load(data_folder + 'subgroups_id.npy')
target_id = np.load(data_folder + 'target_id.npy')
target_id = np.column_stack((1 * (target_id == 0), 1 * (target_id == 1)))
id_mask = np.load(data_folder + 'id_mask.npy')
n_nid = X_nid.shape[0]
X_terms = []
for term in IDENTITY_TERMS:
X_term = np.load(data_folder + 'X_bert_kaggle_' + term + '.npy')
X_terms.append(X_term)
np.random.seed(args.seed)
terms_train_idx = np.random.choice(len(IDENTITY_TERMS), size=25, replace=False)
idx_nid_train, idx_nid_test = train_test_split(np.arange(X_nid.shape[0]), test_size=toxicity_test_size)
idx_id_train, idx_id_test = train_test_split(np.arange(X_id.shape[0]), test_size=toxicity_test_size)
id_train_mask = id_mask[idx_id_train][:, terms_train_idx]
idx_train_INactive_id = idx_id_train[id_train_mask.sum(axis=1) == 0]
idx_id_train = idx_id_train[id_train_mask.sum(axis=1) > 0]
X_nid_seed = np.vstack((X_nid, X_id[idx_train_INactive_id]))
target_nid_seed = np.vstack((target_nid, target_id[idx_train_INactive_id]))
group_nid_seed = np.vstack((group_nid, group_id[idx_train_INactive_id]))
idx_nid_train = np.concatenate((idx_nid_train, n_nid + np.arange(len(idx_train_INactive_id))))
X_train = np.vstack((X_nid_seed[idx_nid_train], X_id[idx_id_train]))
X_test = np.vstack((X_nid[idx_nid_test], X_id[idx_id_test]))
y_train = np.vstack((target_nid_seed[idx_nid_train], target_id[idx_id_train]))
y_test = np.vstack((target_nid[idx_nid_test], target_id[idx_id_test]))
groups_train = np.vstack((group_nid_seed[idx_nid_train], group_id[idx_id_train]))
groups_test = np.vstack((group_nid[idx_nid_test], group_id[idx_id_test]))
X_test_counter = [X_id[idx_id_test]] + [X_terms_id[idx_id_test] for X_terms_id in X_terms]
X_test_counter = np.array(X_test_counter)
X_test_all = np.vstack((X_nid[idx_nid_test], X_test_counter.reshape(-1, 768)))
y_test_all = np.vstack((
target_nid[idx_nid_test],
np.repeat(np.expand_dims(target_id[idx_id_test], 0), 51, axis=0).reshape(-1, 2)
))
metric_X = [X_terms[i][idx_id_train] for i in terms_train_idx]
terms_mean = np.mean(metric_X, axis=0)
prohibited_subspace = np.vstack([X_t - terms_mean for X_t in metric_X])
else:
raise NotImplementedError(args.dataset)
if args.dataset == 'sentiment':
num_test_samples = X_test.shape[0]
elif args.dataset == 'bios':
num_test_samples = X_test.shape[0]
print('test_set_size', X_real_test.shape[0] + X_counter_test.shape[0])
elif args.dataset == 'toxicity':
num_test_samples1 = len(idx_nid_test)
num_test_samples2 = len(idx_id_test)
num_test_samples = X_test.shape[0]
assert num_test_samples1 + num_test_samples2 == num_test_samples, (num_test_samples1, num_test_samples2, num_test_samples)
print('num_test_samples1', num_test_samples1)
print('num_test_samples2', num_test_samples2)
print('test_set_size', X_test_all.shape[0])
print('num_test_samples', num_test_samples)
print('num_train_samples', X_train.shape[0])
####################################################################################################################
# Prohibited Subspace ############################################################################################
####################################################################################################################
# Learning sensitive direction from Popular Baby Names
tSVD = TruncatedSVD(n_components={
'sentiment': 50,
'bios': 25,
'toxicity': 25,
}[args.dataset])
tSVD.fit(prohibited_subspace)
svd_sens_directions = tSVD.components_
svd_sens_directions = torch.tensor(svd_sens_directions).float().to(device)
print(svd_sens_directions.shape)
####################################################################################################################
# Baseline Model #################################################################################################
####################################################################################################################
print('')
print('Baseline Model')
print('')
model = get_model()
optim = torch.optim.Adam(model.parameters(), lr=10**(-args.nloglr))
for iter_idx in trange(args.num_iterations):
batch_idx = sample_batch_idx(y_train, args.batch_size // (y_train.shape[1]))
batch_x = torch.tensor(X_train[batch_idx]).float().to(device)
batch_y = torch.tensor(y_train[batch_idx]).float().to(device)
y_pred = model(batch_x)
loss = torch.nn.CrossEntropyLoss()(y_pred, batch_y.argmax(1))
acc = (y_pred.argmax(1) == batch_y.argmax(1)).float().mean()
optim.zero_grad()
loss.backward()
optim.step()
print('train loss / acc', loss.item(), acc.item() * 100., '%')
eval_model(name='baseline')
####################################################################################################################
# GLIF: Laplacian Evaluation (coordinate descent) ################################################################
####################################################################################################################
print('')
print('GLIF: Laplacian Evaluation (coordinate descent)')
print('')
with torch.no_grad():
for tau in taus:
print('tau', tau)
if args.dataset == 'sentiment':
data = torch.cat([
torch.tensor(X_test).float().to(device),
torch.tensor(test_names_embed).float().to(device),
])
elif args.dataset == 'bios':
data = torch.cat([
torch.tensor(X_test).float().to(device),
torch.tensor(X_counter_test).float().to(device),
])
elif args.dataset == 'toxicity':
data = torch.cat([
torch.tensor(X_test_all).float().to(device),
])
else:
raise NotImplementedError(args.dataset)
outputs = model(data)
basis = svd_sens_directions.cpu().numpy().T
proj = np.linalg.inv(np.matmul(basis.T, basis))
proj = np.matmul(basis, proj)
proj = np.matmul(proj, basis.T)
proj_compl = np.eye(proj.shape[0]) - proj
proj_compl = torch.tensor(proj_compl).float().to(device)
fair_space_data = data @ proj_compl
fair_space_data = fair_space_data.cpu().numpy()
def e_dist(A, B, cosine=False, eps=1e-10):
A_n = (A ** 2).sum(axis=1).reshape(-1, 1)
B_n = (B ** 2).sum(axis=1).reshape(1, -1)
inner = np.matmul(A, B.T)
if cosine:
return 1 - inner / (np.sqrt(A_n * B_n) + eps)
else:
return A_n - 2 * inner + B_n
def e_dist_torch(A, B):
A_n = (A ** 2).sum(dim=1).reshape(-1, 1)
B_n = (B ** 2).sum(dim=1).reshape(1, -1)
inner = A @ B.T
return A_n - 2 * inner + B_n
fair_similarity_Ws = []
fair_similarity_Ds = []
t_s = time.time()
for fair_space_data_split in tqdm(np.array_split(fair_space_data, fair_space_data.shape[0] // 1_500 + 1)):
fair_space_data_squared_distances = e_dist_torch(torch.tensor(fair_space_data_split).to(device), torch.tensor(fair_space_data).to(device))
fair_similarity_W_current = fair_space_data_squared_distances <= tau
fair_similarity_Ds.append(fair_similarity_W_current.float().sum(1).cpu().numpy())
fair_similarity_W_current = np.packbits(fair_similarity_W_current.cpu().numpy(), axis=1)
fair_similarity_Ws.append(fair_similarity_W_current)
fair_similarity_W = np.concatenate(fair_similarity_Ws, axis=0)
fair_similarity_D = np.concatenate(fair_similarity_Ds, axis=0)
y_original = outputs.clone().cpu().numpy()
y_updated = outputs.clone().cpu().numpy()
t_e = time.time()
print('Time (setup) [s]: {:.3f}'.format(t_e - t_s))
t_s = time.time()
batch_size = 512
for iter_idx in trange((y_updated.shape[0] // batch_size) * args.coo_epochs):
selection = np.random.choice(np.arange(y_updated.shape[0]), batch_size, replace=False)
fair_similarity_W_selection = fair_similarity_W[selection]
fair_similarity_W_selection = np.unpackbits(fair_similarity_W_selection, axis=1, count=fair_similarity_W.shape[0]).astype(np.float32)
fair_similarity_W_selection[np.arange(batch_size), selection] = 0
avg_degree = fair_similarity_W_selection.sum(1).mean()
if iter_idx == 0:
print('avg_degree', avg_degree.item())
y_w = (np.expand_dims(y_updated, 0) * np.expand_dims(fair_similarity_W_selection, 2)).sum(1)
y_updated[selection] = (y_original[selection] + args.lambda_GLIF * y_w) / (1 + args.lambda_GLIF * fair_similarity_W_selection.sum(1, keepdims=True))
y_updated = torch.tensor(y_updated).to(device)
t_e = time.time()
print('Time [s]: {:.3f}'.format(t_e - t_s))
if args.dataset == 'sentiment':
names_logits = y_updated[num_test_samples:]
names_logits = names_logits[:, 1] - names_logits[:, 0]
eval_model(y_updated[:num_test_samples], names_logits, name='coo_laplacian_tau_{}'.format(tau))
elif args.dataset == 'bios':
eval_model(y_updated[:num_test_samples], y_updated[num_test_samples:],
name='coo_laplacian_tau_{}'.format(tau))
elif args.dataset == 'toxicity':
pred_for_fairness = y_updated[num_test_samples1:]
pred_for_fairness = pred_for_fairness.reshape(51, X_test_counter.shape[1], 2)
eval_model(y_updated[:num_test_samples], pred_for_fairness,
name='coo_laplacian_tau_{}'.format(tau))
else:
raise NotImplementedError(args.dataset)
####################################################################################################################
# GLIF-NRW: Normalized Random Walk Laplacian Evaluation (coordinate descent) #####################################
####################################################################################################################
print('')
print('GLIF-NRW: Normalized Random Walk Laplacian Evaluation (coordinate descent)')
print('')
with torch.no_grad():
for tau in taus:
print('tau', tau)
if args.dataset == 'sentiment':
data = torch.cat([
torch.tensor(X_test).float().to(device),
torch.tensor(test_names_embed).float().to(device),
])
elif args.dataset == 'bios':
data = torch.cat([
torch.tensor(X_test).float().to(device),
torch.tensor(X_counter_test).float().to(device),
])
elif args.dataset == 'toxicity':
data = torch.cat([
torch.tensor(X_test_all).float().to(device),
])
else:
raise NotImplementedError(args.dataset)
outputs = model(data)
basis = svd_sens_directions.cpu().numpy().T
proj = np.linalg.inv(np.matmul(basis.T, basis))
proj = np.matmul(basis, proj)
proj = np.matmul(proj, basis.T)
proj_compl = np.eye(proj.shape[0]) - proj
proj_compl = torch.tensor(proj_compl).float().to(device)
fair_space_data = data @ proj_compl
fair_space_data = fair_space_data.cpu().numpy()
def e_dist(A, B, cosine=False, eps=1e-10):
A_n = (A ** 2).sum(axis=1).reshape(-1, 1)
B_n = (B ** 2).sum(axis=1).reshape(1, -1)
inner = np.matmul(A, B.T)
if cosine:
return 1 - inner / (np.sqrt(A_n * B_n) + eps)
else:
return A_n - 2 * inner + B_n
def e_dist_torch(A, B):
A_n = (A ** 2).sum(dim=1).reshape(-1, 1)
B_n = (B ** 2).sum(dim=1).reshape(1, -1)
inner = A @ B.T
return A_n - 2 * inner + B_n
fair_similarity_Ws = []
fair_similarity_Ds = []
t_s = time.time()
for fair_space_data_split in tqdm(np.array_split(fair_space_data, fair_space_data.shape[0] // 1_500 + 1)):
fair_space_data_squared_distances = e_dist_torch(torch.tensor(fair_space_data_split).to(device), torch.tensor(fair_space_data).to(device))
fair_similarity_W_current = fair_space_data_squared_distances <= tau
fair_similarity_Ds.append(fair_similarity_W_current.float().sum(1).cpu().numpy())
fair_similarity_W_current = np.packbits(fair_similarity_W_current.cpu().numpy(), axis=1)
fair_similarity_Ws.append(fair_similarity_W_current)
fair_similarity_W = np.concatenate(fair_similarity_Ws, axis=0)
fair_similarity_D = np.concatenate(fair_similarity_Ds, axis=0)
D_tildes = []
for selection in tqdm(np.array_split(np.arange(fair_space_data.shape[0]), fair_space_data.shape[0] // 1_500 + 1)):
fair_similarity_W_selection = fair_similarity_W[selection]
fair_similarity_W_selection = np.unpackbits(fair_similarity_W_selection, axis=1, count=fair_similarity_W.shape[0]).astype(np.float32)
W_tilde = fair_similarity_W_selection / np.sqrt(fair_similarity_D[selection].reshape((-1, 1))) / np.sqrt(fair_similarity_D.reshape((1, -1)))
D_tildes.append(W_tilde.sum(1))
D_tilde = np.concatenate(D_tildes, axis=0)
y_original = outputs.clone().cpu().numpy()
y_updated = outputs.clone().cpu().numpy()
t_e = time.time()
print('Time (setup) [s]: {:.3f}'.format(t_e - t_s))
t_s = time.time()
batch_size = 512
for iter_idx in trange((y_updated.shape[0] // batch_size) * args.coo_epochs):
selection = np.random.choice(np.arange(y_updated.shape[0]), batch_size, replace=False)
fair_similarity_W_selection = fair_similarity_W[selection]
fair_similarity_W_selection = np.unpackbits(fair_similarity_W_selection, axis=1, count=fair_similarity_W.shape[0]).astype(np.float32)
avg_degree = fair_similarity_W_selection.sum(1).mean()
if iter_idx == 0:
print('avg_degree', avg_degree.item())
W_tilde = fair_similarity_W_selection / np.sqrt(fair_similarity_D[selection].reshape((-1, 1))) / np.sqrt(fair_similarity_D.reshape((1, -1)))
one_over_D_tilde_plus_one_over_D_tilde_T = \
1 / D_tilde[selection].reshape((-1, 1)) + 1 / D_tilde.reshape((1, -1))
W_new = W_tilde * one_over_D_tilde_plus_one_over_D_tilde_T / 2
y_w = np.expand_dims(y_updated, 0) * np.expand_dims(W_new, 2)
y_w[np.arange(batch_size), selection] = 0
y_w = y_w.sum(1)
y_updated[selection] = (y_original[selection] + args.lambda_GLIF_NRW * avg_degree * y_w) / \
(1 + args.lambda_GLIF_NRW * avg_degree *
(1 - np.expand_dims(W_new[np.arange(batch_size), selection], 1)))
y_updated = torch.tensor(y_updated).to(device)
t_e = time.time()
print('Time [s]: {:.3f}'.format(t_e - t_s))
if args.dataset == 'sentiment':
names_logits = y_updated[num_test_samples:]
names_logits = names_logits[:, 1] - names_logits[:, 0]
eval_model(y_updated[:num_test_samples], names_logits, name='coo_GLIF_NRW_tau_{}'.format(tau))
elif args.dataset == 'bios':
eval_model(y_updated[:num_test_samples], y_updated[num_test_samples:],
name='coo_GLIF_NRW_tau_{}'.format(tau))
elif args.dataset == 'toxicity':
pred_for_fairness = y_updated[num_test_samples1:]
pred_for_fairness = pred_for_fairness.reshape(51, X_test_counter.shape[1], 2)
eval_model(y_updated[:num_test_samples], pred_for_fairness,
name='coo_GLIF_NRW_tau_{}'.format(tau))
else:
raise NotImplementedError(args.dataset)
|
{"hexsha": "4c09672e7eab57c0661f3d21d626295ccf65385c", "size": 30020, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_coordinate_descent.py", "max_stars_repo_name": "Felix-Petersen/fairness-post-processing", "max_stars_repo_head_hexsha": "6c23ed4976544df83d67c0d79feb70a5fff92027", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-11-16T01:20:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-03T21:18:11.000Z", "max_issues_repo_path": "run_coordinate_descent.py", "max_issues_repo_name": "Felix-Petersen/fairness-post-processing", "max_issues_repo_head_hexsha": "6c23ed4976544df83d67c0d79feb70a5fff92027", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-16T01:23:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-16T02:19:33.000Z", "max_forks_repo_path": "run_coordinate_descent.py", "max_forks_repo_name": "Felix-Petersen/fairness-post-processing", "max_forks_repo_head_hexsha": "6c23ed4976544df83d67c0d79feb70a5fff92027", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.1428571429, "max_line_length": 164, "alphanum_fraction": 0.5421385743, "include": true, "reason": "import numpy", "num_tokens": 6645}
|
from flask import Blueprint, Response, request, send_file
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import pandas as pd
import numpy as np
from io import BytesIO
from .db import engine
from .auth import checkAuth
bp = Blueprint('expenses', __name__, url_prefix='/api/expenses')
def format_numbers(x):
return "{:.2f}".format(x)
def get_expenses(start_date, end_date):
sql = "SELECT entry_id, person_id, broad_category_id, narrow_category_id, vendor_id, Date, v.name AS Vendor, Amount, b.name AS Broad_category, n.name AS Narrow_category, p.name AS Person, Notes FROM expenses e \
LEFT JOIN vendor v ON v.id=e.vendor_id \
LEFT JOIN broad_category b ON b.id=e.broad_category_id \
LEFT JOIN person_earner p ON p.id=e.person_id \
LEFT JOIN narrow_category n ON n.id=e.narrow_category_id \
WHERE date > %s AND date < %s \
ORDER BY date;"
EXP_report = pd.read_sql(sql, con=engine, params=[start_date, end_date], parse_dates=['date'])
EXP_report['Broad_category'] = EXP_report['Broad_category'].str.replace('_', ' ')
EXP_report['Narrow_category'] = EXP_report['Narrow_category'].str.replace('_', ' ')
EXP_report.set_index('Date', inplace=True)
return EXP_report
# Get expenses by month
@bp.route("/<year>/<month>")
def api_expenses(year, month):
validToken = checkAuth(request)
if not validToken:
return Response("Nice Try!", status=401)
else:
year_month = year + "-" + month
month = datetime.strptime(year_month, '%Y-%m')
start_date = (month - timedelta(days=1)).date()
end_date = (month + relativedelta(months=+1)).date()
EXP_report = get_expenses(start_date, end_date)
# EXP_report['Amount'] = EXP_report['Amount'].apply(format_numbers)
return EXP_report.to_json(orient="table")
# Get xlsx file with all expenses and income
@bp.route("/file/<start>/<end>") # Dates formatted '%Y-%m-%d'
def expenses_file(start, end):
validToken = checkAuth(request)
if not validToken:
return Response("Nice Try!", status=401)
else:
start_date = datetime.strptime(start, '%Y-%m-%d')
end_date = datetime.strptime(end, '%Y-%m-%d')
# Get Expenses
sql = "SELECT entry_id, person_id, broad_category_id, narrow_category_id, vendor_id, Date, v.name AS Vendor, Amount, b.name AS Broad_category, n.name AS Narrow_category, p.name AS Person, Notes FROM expenses e \
LEFT JOIN vendor v ON v.id=e.vendor_id \
LEFT JOIN broad_category b ON b.id=e.broad_category_id \
LEFT JOIN person_earner p ON p.id=e.person_id \
LEFT JOIN narrow_category n ON n.id=e.narrow_category_id \
WHERE Date >= %s AND Date <= %s \
ORDER BY Date;"
EXP_report = pd.read_sql(sql, con=engine, params=[start_date, end_date], parse_dates=['Date'])
EXP_report['Broad_category'] = EXP_report['Broad_category'].str.replace('_', ' ')
EXP_report['Narrow_category'] = EXP_report['Narrow_category'].str.replace('_', ' ')
EXP_report['Date'] = EXP_report['Date'].dt.strftime("%m/%d/%Y")
drop_columns = [c for c in EXP_report.columns if c[-3:] == '_id']
EXP_report.drop(columns=drop_columns, inplace=True)
EXP_report.columns = EXP_report.columns.str.title()
EXP_report.set_index('Date', inplace=True)
EXP_report.columns = EXP_report.columns.str.replace("_", " ")
# Get Income
INC_sql = "SELECT i.id, i.source_id, i.earner_id as person_id, Date, Amount, s.name AS Source, p.name AS Person\
FROM income i\
LEFT JOIN source s ON s.id=i.source_id\
LEFT JOIN person_earner p ON p.id=i.earner_id\
WHERE Date >= %s AND Date <= %s\
ORDER BY Date;"
INC_report = pd.read_sql(INC_sql, con=engine, params=[start_date, end_date], parse_dates=['Date'])
INC_report['Date'] = INC_report['Date'].dt.strftime("%m/%d/%Y")
drop_columns = [c for c in INC_report.columns if c[-2:] == 'id']
INC_report.drop(columns=drop_columns, inplace=True)
INC_report.columns = INC_report.columns.str.title()
INC_report.set_index('Date', inplace=True)
# Write to file
buffer = BytesIO()
writer = pd.ExcelWriter(buffer, engine='xlsxwriter')
title_format = writer.book.add_format({'bold': True, 'font_size': 20})
num_format = writer.book.add_format({'num_format': '$#,##0.00'})
EXP_report.to_excel(writer, sheet_name='Expenses', startcol = 0, startrow = 2)
INC_report.to_excel(writer, sheet_name='Income', startcol = 0, startrow = 2)
# Styling Expenses
all_expenses = writer.sheets['Expenses']
all_expenses.set_column('A:G', 18)
all_expenses.set_row(0, 30)
all_expenses.set_column('C:C', None, num_format)
all_expenses.write_string(0, 0, 'Expenses', title_format)
# Styling Income
income = writer.sheets['Income']
income.set_column('A:G', 18)
income.set_row(0, 30)
income.set_column('B:B', None, num_format)
income.write_string(0, 0, 'Income', title_format)
# Send File
writer.save()
buffer.seek(0)
return send_file(buffer, attachment_filename="reports.xlsx", cache_timeout=0)
# Used by post_expense and post_expenses_batch
def insert_expense(json):
date = datetime.strptime(json['Date'], "%m/%d/%Y").strftime("%Y-%m-%d")
amount = json['Amount'] or None
person = json['person_id'] or None
b_cat = json['broad_category_id'] or None
n_cat = json['narrow_category_id'] or None
vendor = json['vendor'] or None
notes = json['notes']
with engine.connect() as con:
insert_vendor_sql = "INSERT IGNORE INTO vendor(name) VALUES(%s)"
con.execute(insert_vendor_sql, [vendor])
vendor_id = con.execute("SELECT id FROM vendor WHERE name=%s", [vendor]).fetchone()[0]
sql = "INSERT INTO expenses(date, vendor_id, amount, broad_category_id, narrow_category_id, person_id, notes)\
VALUES(DATE(%s), %s, %s, %s, %s, %s, %s)"
con.execute(sql, [date, vendor_id, amount, b_cat, n_cat, person, notes])
# Create Expense
@bp.route("/", methods=["POST"])
def post_expense():
json = request.get_json()
validToken = checkAuth(request)
print("JSON: ", json)
if not validToken:
return Response("Nice Try!", status=401)
else:
insert_expense(json)
return Response('Record Inserted!', status=200)
# Load in batch of expenses
@bp.route("/batch", methods=["POST"])
def post_batch_expense():
json = request.get_json()
validToken = checkAuth(request)
print("JSON: ", json)
if not validToken:
return Response("Nice Try!", status=401)
else:
for row in json:
insert_expense(row)
return Response('Records Inserted!', status=200)
# Edit expenses
@bp.route("/<int:id>", methods=['PUT'])
def update_expenses(id):
validToken = checkAuth(request)
if not validToken:
return Response("Nice Try!", status=401)
else:
json = request.get_json()
# Parse dates
date = datetime.strptime(json['Date'], "%m/%d/%Y").strftime("%Y-%m-%d")
# Convert any null values
amount = json['Amount'] or None
person = json['person_id'] or None
b_cat = json['broad_category_id'] or None
n_cat = json['narrow_category_id'] or None
vendor = json['vendor_id'] or None
notes = json['Notes']
sql = "UPDATE expenses \
SET date=DATE(%s), vendor_id=%s, \
amount=%s, broad_category_id=%s, \
narrow_category_id=%s, person_id=%s, \
notes=%s\
WHERE entry_id=%s;"
engine.connect().execute(sql, [date, vendor, amount, b_cat, n_cat, person, notes, id])
return Response(f'id: {id} Updated', status=200)
# Delete expenses
@bp.route("/<int:id>", methods=['DELETE'])
def delete_expenses(id):
validToken = checkAuth(request)
if not validToken:
return Response("Nice Try!", status=401)
else:
sql = "DELETE FROM expenses WHERE entry_id=%s;"
engine.connect().execute(sql, [id])
return Response(f'id: {id} Deleted', status=200)
# Search Expenses
@bp.route("search/<string:param>", methods=['GET'])
def search_expenses(param):
validToken = checkAuth(request)
if not validToken:
return Response("Nice Try!", status=401)
else:
sql = "SELECT entry_id, person_id, broad_category_id, narrow_category_id, vendor_id, Date, v.name AS Vendor, Amount, b.name AS Broad_category, n.name AS Narrow_category, p.name AS Person, Notes FROM expenses e \
LEFT JOIN vendor v ON v.id=e.vendor_id \
LEFT JOIN broad_category b ON b.id=e.broad_category_id \
LEFT JOIN person_earner p ON p.id=e.person_id \
LEFT JOIN narrow_category n ON n.id=e.narrow_category_id \
WHERE v.name LIKE CONCAT('%%', %s, '%%') OR b.name LIKE CONCAT('%%', %s, '%%') OR n.name LIKE CONCAT('%%', %s, '%%') OR e.Notes LIKE CONCAT('%%', %s, '%%') \
ORDER BY date;"
search_report = pd.read_sql(sql, con=engine, params=[param, param, param, param], parse_dates=['date'])
search_report['Broad_category'] = search_report['Broad_category'].str.replace('_', ' ')
search_report['Narrow_category'] = search_report['Narrow_category'].str.replace('_', ' ')
search_report.set_index('Date', inplace=True)
return search_report.to_json(orient="table")
# Return Pivot Table
@bp.route("/pivot/<year>/<month>")
def api_pivot(year, month):
validToken = checkAuth(request)
if not validToken:
return Response("Nice Try!", status=401)
else:
year_month = year + "-" + month
month = datetime.strptime(year_month, '%Y-%m')
start_date = (month - timedelta(days=1)).date()
end_date = (month + relativedelta(months=+1)).date()
sql = "SELECT Date, v.name AS Vendor, Amount, b.name AS Broad_category, n.name AS Narrow_category, p.name AS Person, Notes FROM expenses e \
LEFT JOIN vendor v ON v.id=e.vendor_id \
LEFT JOIN broad_category b ON b.id=e.broad_category_id \
LEFT JOIN person_earner p ON p.id=e.person_id \
LEFT JOIN narrow_category n ON n.id=e.narrow_category_id \
WHERE date > %s AND date < %s;"
EXP_dataframe = pd.read_sql(sql, con=engine, params=[start_date, end_date], parse_dates=['date'])
EXP_dataframe['Broad_category'] = EXP_dataframe['Broad_category'].str.replace('_', ' ')
EXP_dataframe['Narrow_category'] = EXP_dataframe['Narrow_category'].str.replace('_', ' ')
PT_report = pd.pivot_table(EXP_dataframe, values='Amount', index=['Broad_category', 'Narrow_category'], aggfunc=np.sum)
PT_report_broad = pd.pivot_table(EXP_dataframe, values='Amount', index='Broad_category', aggfunc=np.sum)
PT_report_broad.index = pd.MultiIndex.from_product([PT_report_broad.index, ['x----TOTAL']], names=['Broad_category', 'Narrow_category'])
PT_report = pd.concat([PT_report, PT_report_broad]).sort_index()
PT_report['Amount'] = PT_report['Amount'].apply(format_numbers)
return PT_report.to_json(orient="table")
|
{"hexsha": "070734ec21ac7e5ba453c7e3cdca25829ddab412", "size": 11873, "ext": "py", "lang": "Python", "max_stars_repo_path": "flaskr/expenses.py", "max_stars_repo_name": "elijah415hz/finances-flask", "max_stars_repo_head_hexsha": "a74f09b7c51fa3f08d88540e75bfffd6ca840563", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "flaskr/expenses.py", "max_issues_repo_name": "elijah415hz/finances-flask", "max_issues_repo_head_hexsha": "a74f09b7c51fa3f08d88540e75bfffd6ca840563", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "flaskr/expenses.py", "max_forks_repo_name": "elijah415hz/finances-flask", "max_forks_repo_head_hexsha": "a74f09b7c51fa3f08d88540e75bfffd6ca840563", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.0970464135, "max_line_length": 220, "alphanum_fraction": 0.6198096522, "include": true, "reason": "import numpy", "num_tokens": 2828}
|
#! /usr/bin/env python
import numpy as np
import gc
import matplotlib.pyplot as plt
from random import seed, sample, randint
from ransac import LineModel, ransac
from time import time
random_seed = 0
num_iterations = 100
num_samples = 1000
noise_ratio = 0.8
num_noise = int(noise_ratio * num_samples)
def setup():
global data, model
seed(random_seed)
X = np.asarray(range(num_samples))
Y = 2 * X
noise = [randint(0, 2 * (num_samples - 1)) for i in xrange(num_noise)]
Y[sample(xrange(len(Y)), num_noise)] = noise
data = np.asarray([X, Y]).T
model = LineModel()
plt.plot(X, Y, 'bx')
def run():
global params, residual, mean_time
gc.disable()
start_time = time()
for i in xrange(num_iterations):
try:
(params, inliers, residual) = ransac(data, model, 2, (1 - noise_ratio) * num_samples)
except ValueError:
pass
end_time = time()
mean_time = (end_time - start_time) / num_iterations
gc.enable()
def summary():
if params:
print ' Parameters '.center(40, '=')
print params
print ' Residual '.center(40, '=')
print residual
print ' Time '.center(40, '=')
print '%.1f msecs mean time spent per call' % (1000 * mean_time)
X = np.asarray([0, num_samples - 1])
Y = params[0] * X + params[1]
plt.plot(X, Y, 'k-')
else:
print 'RANSAC failed to find a sufficiently good fit for the data.'
plt.show()
if __name__ == '__main__':
setup()
run()
summary()
|
{"hexsha": "dd5a89069494ad6a57081f54dc38bbf75eb3a79e", "size": 1575, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "FredrikAppelros/ransac", "max_stars_repo_head_hexsha": "1ce13a963e2fc6d3e86edf2b0e3f6ef22ceec177", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2016-04-28T03:32:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-03T13:30:25.000Z", "max_issues_repo_path": "test.py", "max_issues_repo_name": "FredrikAppelros/ransac", "max_issues_repo_head_hexsha": "1ce13a963e2fc6d3e86edf2b0e3f6ef22ceec177", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "FredrikAppelros/ransac", "max_forks_repo_head_hexsha": "1ce13a963e2fc6d3e86edf2b0e3f6ef22ceec177", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2016-04-28T03:32:34.000Z", "max_forks_repo_forks_event_max_datetime": "2016-04-28T03:32:34.000Z", "avg_line_length": 24.609375, "max_line_length": 97, "alphanum_fraction": 0.6006349206, "include": true, "reason": "import numpy", "num_tokens": 436}
|
[STATEMENT]
lemma square_part_square_dvd [simp, intro]: "square_part n ^ 2 dvd n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (square_part n)\<^sup>2 dvd n
[PROOF STEP]
proof (cases "n = 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. n = (0::'a) \<Longrightarrow> (square_part n)\<^sup>2 dvd n
2. n \<noteq> (0::'a) \<Longrightarrow> (square_part n)\<^sup>2 dvd n
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
n \<noteq> (0::'a)
goal (2 subgoals):
1. n = (0::'a) \<Longrightarrow> (square_part n)\<^sup>2 dvd n
2. n \<noteq> (0::'a) \<Longrightarrow> (square_part n)\<^sup>2 dvd n
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
n \<noteq> (0::'a)
goal (1 subgoal):
1. (square_part n)\<^sup>2 dvd n
[PROOF STEP]
by (intro multiplicity_le_imp_dvd)
(auto simp: prime_multiplicity_square_part prime_elem_multiplicity_power_distrib)
[PROOF STATE]
proof (state)
this:
(square_part n)\<^sup>2 dvd n
goal (1 subgoal):
1. n = (0::'a) \<Longrightarrow> (square_part n)\<^sup>2 dvd n
[PROOF STEP]
qed auto
|
{"llama_tokens": 472, "file": null, "length": 5}
|
[STATEMENT]
lemma S5n_common_knowledge_induct:
assumes S5n: "S5n M"
assumes w: "w \<in> worlds M"
assumes E: "\<forall>a \<in> set as. \<forall>w \<in> worlds M.
M, w \<Turnstile> \<phi> \<longrightarrow> M, w \<Turnstile> \<^bold>K\<^sub>a (Kand \<phi> \<psi>)"
assumes p: "M, w \<Turnstile> \<phi>"
shows "M, w \<Turnstile> \<^bold>C\<^bsub>as\<^esub> \<psi>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. M, w \<Turnstile> \<^bold>C\<^bsub>as\<^esub> \<psi>
[PROOF STEP]
(*<*)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. M, w \<Turnstile> \<^bold>C\<^bsub>as\<^esub> \<psi>
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. M, w \<Turnstile> \<^bold>C\<^bsub>as\<^esub> \<psi>
[PROOF STEP]
{
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. M, w \<Turnstile> \<^bold>C\<^bsub>as\<^esub> \<psi>
[PROOF STEP]
fix w'
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. M, w \<Turnstile> \<^bold>C\<^bsub>as\<^esub> \<psi>
[PROOF STEP]
assume ww': "(w, w') \<in> (\<Union>x\<in>set as. relations M x)\<^sup>+"
[PROOF STATE]
proof (state)
this:
(w, w') \<in> (\<Union> (relations M ` set as))\<^sup>+
goal (1 subgoal):
1. M, w \<Turnstile> \<^bold>C\<^bsub>as\<^esub> \<psi>
[PROOF STEP]
from ww' S5n E p w
[PROOF STATE]
proof (chain)
picking this:
(w, w') \<in> (\<Union> (relations M ` set as))\<^sup>+
S5n M
\<forall>a\<in>set as. \<forall>w\<in>worlds M. M, w \<Turnstile> \<phi> \<longrightarrow> M, w \<Turnstile> \<^bold>K\<^sub>a Kand \<phi> \<psi>
M, w \<Turnstile> \<phi>
w \<in> worlds M
[PROOF STEP]
have "M, w' \<Turnstile> Kand \<phi> \<psi>"
[PROOF STATE]
proof (prove)
using this:
(w, w') \<in> (\<Union> (relations M ` set as))\<^sup>+
S5n M
\<forall>a\<in>set as. \<forall>w\<in>worlds M. M, w \<Turnstile> \<phi> \<longrightarrow> M, w \<Turnstile> \<^bold>K\<^sub>a Kand \<phi> \<psi>
M, w \<Turnstile> \<phi>
w \<in> worlds M
goal (1 subgoal):
1. M, w' \<Turnstile> Kand \<phi> \<psi>
[PROOF STEP]
by ( induct rule: trancl_induct
, simp_all, blast+)
[PROOF STATE]
proof (state)
this:
M, w' \<Turnstile> Kand \<phi> \<psi>
goal (1 subgoal):
1. M, w \<Turnstile> \<^bold>C\<^bsub>as\<^esub> \<psi>
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
(w, ?w'2) \<in> (\<Union> (relations M ` set as))\<^sup>+ \<Longrightarrow> M, ?w'2 \<Turnstile> Kand \<phi> \<psi>
goal (1 subgoal):
1. M, w \<Turnstile> \<^bold>C\<^bsub>as\<^esub> \<psi>
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
(w, ?w'2) \<in> (\<Union> (relations M ` set as))\<^sup>+ \<Longrightarrow> M, ?w'2 \<Turnstile> Kand \<phi> \<psi>
goal (1 subgoal):
1. M, w \<Turnstile> \<^bold>C\<^bsub>as\<^esub> \<psi>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
M, w \<Turnstile> \<^bold>C\<^bsub>as\<^esub> \<psi>
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1291, "file": "KBPs_Kripke", "length": 12}
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 06 19:09:58 2014
@author: Morten
"""
import os
from pathlib import Path
import pytest
import numpy as np
from anypytools.tools import (
array2anyscript,
get_anybodycon_path,
define2str,
path2str,
AnyPyProcessOutput,
AnyPyProcessOutputList,
)
@pytest.yield_fixture(scope="module")
def fixture():
yield True
def test_define2str():
assert define2str("test", 2) == '-def test="2"'
assert define2str("Test", "Main.MyStudy") == '-def Test="Main.MyStudy"'
assert (
define2str("test", '"This is a string"')
== '-def test=---"\\"This is a string\\""'
)
def test_path2str():
assert path2str("test", "C:\hallo.txt") == '-p test=---"C:\\\\hallo.txt"'
assert path2str("Test", "C:/hallo.txt") == '-p Test=---"C:/hallo.txt"'
def test_array2anyscript():
mat33 = array2anyscript(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))
assert mat33 == "{{1,0,0},{0,1,0},{0,0,1}}"
mat31 = array2anyscript(np.array([[1, 0, 0]]))
assert mat31 == "{{1,0,0}}"
mat13 = array2anyscript(np.array([[1], [0], [0]]))
assert mat13 == "{{1},{0},{0}}"
mat3 = array2anyscript(np.array([0.333333333, -1.9999999999, 0.0]))
assert mat3 == "{0.333333333,-1.9999999999,0}"
str2 = array2anyscript(np.array(["hallo", "world"]))
assert str2 == '{"hallo","world"}'
def test_AnyPyProcessOutput():
out = AnyPyProcessOutputList(
[
AnyPyProcessOutput({"AAAA": 1}),
AnyPyProcessOutput({"AAAA": 2}),
AnyPyProcessOutput({"AAAA": 3}),
AnyPyProcessOutput({"AAAA": 4}),
AnyPyProcessOutput({"AAAA": 5}),
AnyPyProcessOutput({"AAAA": 6, "ERROR": 0}),
]
)
assert len(out) == 6
assert isinstance(out[0], AnyPyProcessOutput)
# Test slice get
assert len(out[1:3]) == 2
assert isinstance(out[0:2][0], AnyPyProcessOutput)
# Test slice set
out[:] = [e for e in out if "ERROR" not in e]
assert len(out) == 5
assert isinstance(out["A"], np.ndarray)
assert out["A"].shape == (5,)
def test_AnyPyProcessOutput_to_dataframe():
data = {
"Output.Abscissa.t": np.linspace(0, 1, 6),
"int": 8,
"float": 0.38,
"str": "Hello world",
"one_dim_data": np.ones(6),
"three_dim_data": np.ones((6, 3)),
"StringArray": np.array("Hello world"),
"speciel_length": np.arange(5),
}
anypydata = AnyPyProcessOutput(data)
df = anypydata.to_dataframe()
assert df.shape == (6, 14)
df2 = anypydata.to_dataframe(index_var="speciel_length")
assert df2.shape == (5, 35)
df3 = anypydata.to_dataframe(index_var=None)
assert df3.shape == (1, 39)
def test_AnyPyProcessOutputList_to_dataframe():
time_len = 6
no_simulations = 10
data = {
"Output.Abscissa.t": np.linspace(0, 1, time_len),
"three_dim_data": np.ones((time_len, 3)),
}
appl = AnyPyProcessOutputList(
[AnyPyProcessOutput(data) for i in range(no_simulations)]
)
df = appl.to_dataframe()
assert df.shape == (no_simulations * time_len, 4)
def test_get_anybodycon_path():
abc = get_anybodycon_path()
assert os.path.exists(abc)
if __name__ == "__main__":
os.chdir(Path(__file__).parent)
pytest.main([str("test_tools.py::test_AnyPyProcessOutputList_to_dataframe")])
|
{"hexsha": "9516c1eaddf679dc2180e3b53cf92d288e7486e1", "size": 3404, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_tools.py", "max_stars_repo_name": "AnyBody-Research-Group/AnyPyTools", "max_stars_repo_head_hexsha": "042bc454122e9f8e03d96e1ec9e46be41a2dd5c8", "max_stars_repo_licenses": ["MIT", "BSD-3-Clause"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2015-04-15T17:52:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T03:51:40.000Z", "max_issues_repo_path": "tests/test_tools.py", "max_issues_repo_name": "AnyBody-Research-Group/AnyPyTools", "max_issues_repo_head_hexsha": "042bc454122e9f8e03d96e1ec9e46be41a2dd5c8", "max_issues_repo_licenses": ["MIT", "BSD-3-Clause"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2015-04-08T08:52:40.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-09T08:57:18.000Z", "max_forks_repo_path": "tests/test_tools.py", "max_forks_repo_name": "AnyBody-Research-Group/AnyPyTools", "max_forks_repo_head_hexsha": "042bc454122e9f8e03d96e1ec9e46be41a2dd5c8", "max_forks_repo_licenses": ["MIT", "BSD-3-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2015-02-03T09:03:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-27T01:53:37.000Z", "avg_line_length": 26.1846153846, "max_line_length": 81, "alphanum_fraction": 0.6016451234, "include": true, "reason": "import numpy", "num_tokens": 1049}
|
"""Test for kernel functionality."""
import functools
import jax
import jax.numpy as jnp
import pytest
import pytest_cases
from probfindiff.utils import autodiff, kernel, kernel_zoo
def case_exponentiated_quadratic():
k = lambda x, y: jnp.exp(-(x - y).dot(x - y))
return kernel.batch_gram(k)[0]
def case_exponentiated_quadratic_builtin():
return kernel.batch_gram(kernel_zoo.exponentiated_quadratic)[0]
def case_differentiate_0():
k = lambda x, y: (x - y).dot(x - y)
return kernel.differentiate(k, L=autodiff.derivative)[0]
def case_differentiate_1():
k = lambda x, y: (x - y).dot(x - y)
return kernel.differentiate(k, L=autodiff.derivative)[1]
def case_differentiate_2():
k = lambda x, y: (x - y).dot(x - y)
return kernel.differentiate(k, L=autodiff.derivative)[2]
def case_polynomial_builtin():
k = functools.partial(kernel_zoo.polynomial, p=jnp.ones((3,)))
return kernel.batch_gram(k)[0]
@pytest_cases.parametrize_with_cases("k", cases=".")
def test_vectorize_gram_shapes(k):
xs = jnp.arange(8.0).reshape((4, 2))
ys = jnp.arange(12.0).reshape((6, 2))
assert k(xs, ys.T).shape == (4, 6)
@pytest.mark.parametrize("L, d, diffop_shape", ([jax.jacfwd, 2, (2,)],))
def test_kernel_batch_shape(L, d, diffop_shape):
k = kernel_zoo.exponentiated_quadratic
k_batch, lk_batch, llk_batch = kernel.differentiate(k, L=L)
num_xs, num_ys = 4, 3
xs = jnp.arange(1, 1 + d * num_xs, dtype=float).reshape((num_xs, d))
ys = jnp.arange(1, 1 + d * num_ys, dtype=float).reshape((num_ys, d))
k_shape = (num_xs, num_ys)
assert k_batch(xs, ys.T).shape == k_shape
assert lk_batch(xs, ys.T).shape == diffop_shape + k_shape
assert llk_batch(xs, ys.T).shape == diffop_shape + diffop_shape + k_shape
|
{"hexsha": "d066822dc351a3f259080f5f3b9244114f120d9d", "size": 1786, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_utils/test_kernel.py", "max_stars_repo_name": "schmidtjonathan/probfindiff", "max_stars_repo_head_hexsha": "8d7e3fc991d8589ca70dacac9c01ab93d185dd82", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-02-07T18:09:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T07:17:41.000Z", "max_issues_repo_path": "tests/test_utils/test_kernel.py", "max_issues_repo_name": "schmidtjonathan/probfindiff", "max_issues_repo_head_hexsha": "8d7e3fc991d8589ca70dacac9c01ab93d185dd82", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-02-07T09:12:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-18T07:20:04.000Z", "max_forks_repo_path": "tests/test_utils/test_kernel.py", "max_forks_repo_name": "schmidtjonathan/probfindiff", "max_forks_repo_head_hexsha": "8d7e3fc991d8589ca70dacac9c01ab93d185dd82", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2022-02-08T15:10:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T15:05:02.000Z", "avg_line_length": 27.90625, "max_line_length": 77, "alphanum_fraction": 0.6842105263, "include": true, "reason": "import jax", "num_tokens": 554}
|
[STATEMENT]
lemma "r = \<lparr>xpos = xpos r, ypos = ypos r\<rparr>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. r = \<lparr>xpos = xpos r, ypos = ypos r\<rparr>
[PROOF STEP]
by simp
|
{"llama_tokens": 92, "file": null, "length": 1}
|
(*
Autor(s):
Andrej Dudenhefner (1)
Affiliation(s):
(1) Saarland University, Saarbrücken, Germany
Related Work:
[1] Giannini, Paola, and Simona Ronchi Della Rocca.
"Characterization of typings in polymorphic type discipline."
Proceedings Third Annual Symposium on Logic in Computer Science. IEEE Computer Society, 1988.
*)
Require Import List Lia Relation_Definitions Relation_Operators Operators_Properties.
Import ListNotations.
Require Import Undecidability.SystemF.SysF Undecidability.SystemF.Autosubst.syntax Undecidability.SystemF.Autosubst.unscoped.
Import UnscopedNotations.
From Undecidability.SystemF.Util Require Import Facts poly_type_facts pure_term_facts term_facts typing_facts iipc2_facts.
Require Import ssreflect ssrbool ssrfun.
Set Default Goal Selector "!".
Arguments funcomp {X Y Z} _ _ / _.
Arguments fresh_in _ _ /.
(* ∀x.t contains_step t[x:= s] *)
Inductive contains_step : poly_type -> poly_type -> Prop :=
| contains_step_subst {s t} : contains_step (poly_abs t) (subst_poly_type (scons s poly_var) t).
Definition contains := clos_refl_trans poly_type contains_step.
(*
system F type erased derivability predicate
cf. containment type assignment [1]
*)
Inductive pure_typing : environment -> pure_term -> poly_type -> Prop :=
| pure_typing_pure_var n {Gamma x t t'} :
nth_error (map (ren_poly_type (Nat.add n)) Gamma) x = Some t ->
contains t t' ->
pure_typing Gamma (pure_var x) (many_poly_abs n t')
| pure_typing_pure_app n {Gamma M N s t t'} :
pure_typing (map (ren_poly_type (Nat.add n)) Gamma) M (poly_arr s t) ->
pure_typing (map (ren_poly_type (Nat.add n)) Gamma) N s ->
contains t t' ->
pure_typing Gamma (pure_app M N) (many_poly_abs n t')
| pure_typing_pure_abs n {Gamma M s t} :
pure_typing (s :: map (ren_poly_type (Nat.add n)) (Gamma)) M t ->
pure_typing Gamma (pure_abs M) (many_poly_abs n (poly_arr s t)).
Definition pure_typable Gamma M := exists t, pure_typing Gamma M t.
Arguments pure_typable : simpl never.
(* case analysis on the pure_typing predicate wrt. pure_term *)
Lemma pure_typingE {Gamma M t} : pure_typing Gamma M t ->
match M with
| pure_var x => exists n s t',
nth_error (map (ren_poly_type (Nat.add n)) Gamma) x = Some s /\
contains s t' /\ t = many_poly_abs n t'
| pure_app M N => exists n s t' t'',
pure_typing (map (ren_poly_type (Nat.add n)) Gamma) M (poly_arr s t') /\
pure_typing (map (ren_poly_type (Nat.add n)) Gamma) N s /\
contains t' t'' /\ t = many_poly_abs n t''
| pure_abs M => exists n s t',
pure_typing (s :: map (ren_poly_type (Nat.add n)) Gamma) M t' /\
t = many_poly_abs n (poly_arr s t')
end.
Proof.
case => *; [do 3 eexists | do 4 eexists | do 3 eexists]; by eauto.
Qed.
(* case analysis on the pure_typing predicate wrt. pure_term in case t is not an abstraction *)
Lemma pure_typingE' {Gamma M t} : pure_typing Gamma M t ->
match M with
| pure_var x =>
match t with
| poly_var _ | poly_arr _ _ =>
exists s, nth_error Gamma x = Some s /\ contains s t
| poly_abs _ => True
end
| pure_app M N =>
match t with
| poly_var _ | poly_arr _ _ =>
exists s t', pure_typing Gamma M (poly_arr s t') /\ pure_typing Gamma N s /\ contains t' t
| poly_abs _ => True
end
| pure_abs M =>
match t with
| poly_var _ => False
| poly_arr s t => pure_typing (s :: Gamma) M t
| poly_abs _ => True
end
end.
Proof.
case: M.
- move=> > /pure_typingE [n] [?] [?] [+] []. case: t; last done.
all: (case: n; [rewrite map_ren_poly_type_id /=; subst | done]).
all: (move=> *; subst; eexists; constructor; by eassumption).
- move=> > /pure_typingE [n] [?] [?] [?] [+] [+] []. case: t; last done.
all: (case: n; [rewrite map_ren_poly_type_id /=; subst | done]).
all: (move=> *; subst; (do 2 eexists); constructor; [| constructor]; by eassumption).
- move=> > /pure_typingE [n] [?] [?] []. case: t; last done.
all: (case: n; [rewrite map_ren_poly_type_id /=; subst | done]).
+ done.
+ move=> > ? [? ?]; by subst.
Qed.
Lemma contains_stepI {s t t'}:
t' = subst_poly_type (s .: poly_var) t -> contains_step (poly_abs t) t'.
Proof. move=> ->. by apply: contains_step_subst. Qed.
Lemma contains_ren_poly_typeI ξ {s t} : contains s t -> contains (ren_poly_type ξ s) (ren_poly_type ξ t).
Proof.
elim.
- move=> > [] > /=. apply: rt_step. rewrite poly_type_norm /=.
have := contains_step_subst.
evar (s' : poly_type) => /(_ s'). evar (t' : poly_type) => /(_ t').
congr contains_step. subst t'. rewrite poly_type_norm /=.
apply: ext_poly_type. subst s'. by case.
- move=> ?. by apply: rt_refl.
- move=> *. apply: rt_trans; by eassumption.
Qed.
Lemma contains_subst_poly_typeI σ {s t} : contains s t -> contains (subst_poly_type σ s) (subst_poly_type σ t).
Proof.
elim.
- move=> > [] > /=. apply: rt_step. rewrite poly_type_norm /=.
have := contains_step_subst.
evar (s' : poly_type) => /(_ s'). evar (t' : poly_type) => /(_ t').
congr contains_step. subst t'. rewrite poly_type_norm /=.
apply: ext_poly_type. subst s'. case; first done.
move=> ?. by rewrite /= poly_type_norm /= subst_poly_type_poly_var.
- move=> ?. by apply: rt_refl.
- move=> *. apply: rt_trans; by eassumption.
Qed.
Lemma contains_ren_poly_type_addLR {n s t} : contains (ren_poly_type (Nat.add n) s) t ->
contains s (ren_poly_type (fun x => x - n) t).
Proof.
move=> /(contains_ren_poly_typeI (fun x => x - n)). congr contains.
rewrite poly_type_norm /= ren_poly_type_id'; by [|lia].
Qed.
Lemma containsE {t t'} : contains t t' ->
match t with
| poly_var _ => t = t'
| poly_arr _ _ => t = t'
| poly_abs t => (poly_abs t) = t' \/ exists s, contains (subst_poly_type (scons s poly_var) t) t'
end.
Proof.
move=> /clos_rt_rt1n_iff. case.
- case: t; [done | done | by left].
- move=> > [] > /clos_rt_rt1n_iff ?. right. eexists. by eassumption.
Qed.
(* contains respect subtypes *)
Lemma contains_sub {n s t t''} :
contains (many_poly_abs n (poly_arr s t)) t'' ->
exists n' s' t',
t'' = many_poly_abs n' (poly_arr s' t') /\
contains (many_poly_abs n s) (many_poly_abs n' s') /\
contains (many_poly_abs n t) (many_poly_abs n' t').
Proof.
move Es1: (many_poly_abs n _) => s1 /clos_rt_rt1n_iff Hs1s2.
elim: Hs1s2 n s t Es1.
- move=> > <-. do 3 eexists. constructor; first done.
constructor; by apply: rt_refl.
- move=> > [] r > _ IH [|n] > /=; first done.
move=> [] /(congr1 (subst_poly_type (r .: poly_var))).
rewrite subst_poly_type_many_poly_abs /= => /IH [?] [?] [?] [->].
rewrite -?subst_poly_type_many_poly_abs => - [? ?].
do 3 eexists. constructor; first done.
constructor; (apply: rt_trans; [| by eassumption]); by apply /rt_step.
Qed.
Lemma contains_sub' {n s t n' s' t'} :
contains (many_poly_abs n (poly_arr s t)) (many_poly_abs n' (poly_arr s' t')) ->
contains (many_poly_abs n s) (many_poly_abs n' s') /\
contains (many_poly_abs n t) (many_poly_abs n' t').
Proof. by move=> /contains_sub [?] [?] [?] [/many_poly_abs_eqE'] [<-] [<- <-]. Qed.
Lemma typing_contains {s t Gamma P} : contains s t -> typing Gamma P s ->
exists Q, erase P = erase Q /\ typing Gamma Q t.
Proof.
move=> /clos_rt_rt1n_iff => H. elim: H P.
- move=> ? ? ?. by eexists.
- move=> ? ? ? [] s' ? _ IH P. by move=> /typing_ty_app => /(_ s') /IH.
Qed.
Lemma pure_typing_pure_var_simpleI {Gamma x t} :
nth_error Gamma x = Some t -> pure_typing Gamma (pure_var x) t.
Proof.
move=> Hx. apply: (pure_typing_pure_var 0); last by apply: rt_refl.
rewrite nth_error_map Hx /=. by rewrite ren_poly_type_id.
Qed.
Lemma pure_typing_pure_app_simpleI {Gamma M N s t} :
pure_typing Gamma M (poly_arr s t) -> pure_typing Gamma N s -> pure_typing Gamma (pure_app M N) t.
Proof.
move=> HM HN. apply: (pure_typing_pure_app 0 (s := s)); last by apply: rt_refl.
- by rewrite map_ren_poly_type_id.
- by rewrite map_ren_poly_type_id.
Qed.
Lemma pure_typing_pure_abs_simpleI {Gamma M s t} :
pure_typing (s :: Gamma) M t -> pure_typing Gamma (pure_abs M) (poly_arr s t).
Proof.
move=> HM. apply: (pure_typing_pure_abs 0). by rewrite map_ren_poly_type_id.
Qed.
Lemma pure_typing_many_poly_absI {n Gamma M t} :
pure_typing (map (ren_poly_type (Nat.add n)) Gamma) M t ->
pure_typing Gamma M (many_poly_abs n t).
Proof.
have Hnn' : forall Gamma n n',
(map (ren_poly_type (Nat.add n')) (map (ren_poly_type (Nat.add n)) Gamma)) =
map (ren_poly_type (Nat.add (n + n'))) Gamma.
{ move=> *. rewrite ?map_map. apply: map_ext => ?.
rewrite poly_type_norm /=. apply: extRen_poly_type. by lia. }
case: M.
- move=> x /pure_typingE [n'] [?] [?] [+] [HC] ->.
rewrite Hnn' many_poly_abs_many_poly_abs => ?.
apply: (pure_typing_pure_var (n + n')); by eassumption.
- move=> ? ? /pure_typingE [n'] [?] [?] [?] [+] [+] [HC] ->.
rewrite ?Hnn' many_poly_abs_many_poly_abs => ? ?.
apply: (pure_typing_pure_app (n + n')); by eassumption.
- move=> ? /pure_typingE [n'] [?] [?] [+] ->.
rewrite ?Hnn' many_poly_abs_many_poly_abs => ?.
apply: (pure_typing_pure_abs (n + n')); by eassumption.
Qed.
Lemma pure_typing_subst_poly_type {Gamma M t} σ : pure_typing Gamma M t ->
pure_typing (map (subst_poly_type σ) Gamma) M (subst_poly_type σ t).
Proof.
pose σ' n σ := Nat.iter n up_poly_type_poly_type σ.
have Hσ' : forall n t σ, subst_poly_type (σ' n σ) (ren_poly_type (Nat.add n) t) =
ren_poly_type (Nat.add n) (subst_poly_type σ t).
{ move=> >. rewrite ?poly_type_norm /σ' /=. apply: ext_poly_type.
move=> ?. by rewrite iter_up_poly_type_poly_type. }
move=> H. elim: H σ.
- move=> n {}Gamma x > Hx + σ => /(contains_subst_poly_typeI (σ' n σ)) HC.
rewrite subst_poly_type_many_poly_abs. apply: (pure_typing_pure_var n); last by eassumption.
move: Hx. rewrite ?nth_error_map. case: (nth_error Gamma x); last done.
move=> ? [<-] /=. by rewrite Hσ'.
- move=> n > _ + _ + + σ => /(_ (σ' n σ)) + /(_ (σ' n σ)) + /(contains_subst_poly_typeI (σ' n σ)) ?.
rewrite ?map_map. under map_ext => ? do rewrite Hσ'. move=> ? ?.
rewrite subst_poly_type_many_poly_abs. apply: (pure_typing_pure_app n); rewrite ?map_map; by eassumption.
- move=> n > _ + σ => /(_ (σ' n σ)). rewrite /= map_map. under map_ext => ? do rewrite Hσ'. move=> ?.
rewrite subst_poly_type_many_poly_abs /=. apply: (pure_typing_pure_abs n).
rewrite ?map_map. by eassumption.
Qed.
Lemma pure_typing_poly_absE {s Gamma M t} :
pure_typing Gamma M (poly_abs t) ->
pure_typing Gamma M (subst_poly_type (s .: poly_var) t).
Proof.
pose σ n s := Nat.iter n up_poly_type_poly_type (s .: poly_var).
have Hσ: forall n t s, subst_poly_type (σ n s) (ren_poly_type (Nat.add (S n)) t) =
ren_poly_type (Nat.add n) t.
{ move=> n >. rewrite ?poly_type_norm [RHS]ren_as_subst_poly_type /σ.
apply: ext_poly_type => y /=. rewrite iter_up_poly_type_poly_type_ge; first by lia.
by have ->: S (n + y) - n = S y by lia. }
elim: M s Gamma t.
- move=> x s Gamma t /pure_typingE [[|n]] [tx] [?] [+] [+] /=.
+ move=> *. subst.
apply: (pure_typing_pure_var 0); first by eassumption.
apply: rt_trans; [by eassumption | by apply /rt_step /contains_step_subst].
+ move=> Hx /(contains_subst_poly_typeI (σ n s)) HC [?]. subst.
rewrite subst_poly_type_many_poly_abs. apply: (pure_typing_pure_var n); last by eassumption.
move: Hx. rewrite ?nth_error_map. case: (nth_error Gamma x); last done.
move=> ? /= [<-]. by rewrite Hσ.
- move=> ? IH1 ? IH2 s Gamma t /pure_typingE [[|n]] [?] [?] [?] [+] [+] [].
+ move=> ? ? ? /= ?. subst.
apply: (pure_typing_pure_app 0); [by eassumption | by eassumption |].
apply: rt_trans; [by eassumption | by apply /rt_step /contains_step_subst].
+ move=> /(pure_typing_subst_poly_type (σ n s)) /= + /(pure_typing_subst_poly_type (σ n s)) /= +.
rewrite ?map_map. under map_ext => ? do rewrite Hσ. move=> ? ?.
move=> /(contains_subst_poly_typeI (σ n s)) ? [->]. rewrite subst_poly_type_many_poly_abs.
apply: (pure_typing_pure_app n); by eassumption.
- move=> ? IH s Gamma t /pure_typingE [[|n]] [?] [?] []; first done.
move=> + [->]. rewrite subst_poly_type_many_poly_abs.
move=> /(pure_typing_subst_poly_type (σ n s)) /=.
rewrite ?map_map. under map_ext => ? do rewrite Hσ. move=> ?.
by apply: (pure_typing_pure_abs n).
Qed.
(* translation from typing to pure_typing *)
Theorem typing_to_pure_typing {Gamma P t} : typing Gamma P t -> pure_typing Gamma (erase P) t.
Proof.
elim => *.
- by apply: pure_typing_pure_var_simpleI.
- apply: pure_typing_pure_app_simpleI; by eassumption.
- by apply: pure_typing_pure_abs_simpleI.
- by apply: pure_typing_poly_absE.
- by apply: (pure_typing_many_poly_absI (n := 1)).
Qed.
(* translation from pure_typing to typing *)
Theorem pure_typing_to_typing {Gamma M t} : pure_typing Gamma M t -> exists P, M = erase P /\ typing Gamma P t.
Proof.
elim.
- move=> > /typing_var /typing_contains H /H {H} [?] /= [->] /typing_many_ty_absI ?.
eexists. constructor; last by eassumption. by rewrite erase_many_ty_abs.
- move=> > _ [P] [-> +] _ [Q] [-> +] => /typing_app H /H /typing_contains {}H /H.
move=> [?] /= [->] /typing_many_ty_absI ?.
eexists. constructor; last by eassumption. by rewrite erase_many_ty_abs.
- move=> > _ [?] [->] /typing_abs /typing_many_ty_absI ?.
eexists. constructor; last by eassumption. by rewrite erase_many_ty_abs.
Qed.
Lemma pure_typing_contains {s t Gamma M} : contains s t -> pure_typing Gamma M s ->
pure_typing Gamma M t.
Proof. by move=> H /pure_typing_to_typing [?] [->] /(typing_contains H) [?] [->] /typing_to_pure_typing. Qed.
Lemma pure_typing_ren_poly_type {Gamma M t} ξ : pure_typing Gamma M t ->
pure_typing (map (ren_poly_type ξ) Gamma) M (ren_poly_type ξ t).
Proof.
move=> /pure_typing_to_typing [?] [->] /(typing_ren_poly_type ξ) /typing_to_pure_typing.
congr pure_typing. by apply: erase_ren_term_id.
Qed.
Lemma pure_typing_many_poly_absE {n Gamma M t} :
pure_typing Gamma M (many_poly_abs n t) ->
pure_typing (map (ren_poly_type (Nat.add n)) Gamma) M t.
Proof.
elim: n Gamma t.
- move=> > /=. by rewrite map_ren_poly_type_id.
- move=> n IH Gamma t /=.
move=> /(pure_typing_ren_poly_type S) /pure_typing_to_typing [?] [?] /=.
move=> /typing_ty_app => /(_ (poly_var 0)). subst M.
rewrite ?poly_type_norm subst_poly_type_poly_var'; first by case.
move=> /typing_to_pure_typing /= /IH. congr pure_typing.
rewrite ?map_map. apply: map_ext => ?. rewrite ?poly_type_norm /=.
apply: extRen_poly_type. by lia.
Qed.
(* constructs a ∀I (cf. λI) type expression *)
Fixpoint tidy (t: poly_type) :=
match t with
| poly_var _ => t
| poly_arr s t => poly_arr (tidy s) (tidy t)
| poly_abs t => if fresh_inb 0 t then ren_poly_type (scons 0 id) (tidy t) else (poly_abs (tidy t))
end.
Lemma allfv_poly_type_tidy {P t} : allfv_poly_type P (tidy t) <-> allfv_poly_type P t.
Proof.
elim: t P; [done | by move=> /= ? + ? + ? => -> -> |].
move=> ? IH ? /=.
case H: (fresh_inb _ _); last by apply: IH.
rewrite allfv_poly_type_ren_poly_type IH.
apply: ext_allfv_poly_type_allfv_poly_type. move: H => /(fresh_inP).
apply: allfv_poly_type_impl. by case.
Qed.
Lemma tidy_ren_poly_type {ξ t} : tidy (ren_poly_type ξ t) = ren_poly_type ξ (tidy t).
Proof.
elim: t ξ; [done | by move=> ? + ? + ? /= => -> -> |].
move=> ? IH ξ /=.
set b1 := (fresh_inb _ (ren_poly_type _ _)). move Hb2: (fresh_inb _ _) => b2.
have ->: b1 = b2.
{ apply /is_trueP. rewrite /b1 -Hb2 -?(rwP fresh_inP) /= allfv_poly_type_ren_poly_type.
apply: ext_allfv_poly_type. by case. }
case: b2 Hb2; last by rewrite /= IH.
rewrite IH ?poly_type_norm => /fresh_inP H.
apply: ext_ren_poly_type_allfv_poly_type. rewrite allfv_poly_type_tidy.
apply: allfv_poly_type_impl H. by case.
Qed.
Lemma tidy_subst_poly_type {σ t} : tidy (subst_poly_type σ t) = subst_poly_type (σ >> tidy) (tidy t).
Proof.
elim: t σ; [done | by move=> ? + ? + ? /= => -> -> |].
move=> ? IH σ /=.
set b1 := (fresh_inb _ (subst_poly_type _ _)). move Hb2: (fresh_inb _ _) => b2.
have ->: b1 = b2.
{ apply /is_trueP. rewrite /b1 -Hb2 -?(rwP fresh_inP) /= allfv_poly_type_subst_poly_type.
apply: ext_allfv_poly_type. case; first done.
move=> ?. rewrite /= allfv_poly_type_ren_poly_type /=. constructor; first done.
move=> _. by apply: allfv_poly_type_TrueI. }
case: b2 Hb2.
- rewrite IH ?poly_type_norm => /fresh_inP H.
apply: ext_subst_poly_type_allfv_poly_type. rewrite allfv_poly_type_tidy.
apply: allfv_poly_type_impl H. case; first done.
move=> ? _ /=. by rewrite tidy_ren_poly_type ?poly_type_norm ren_poly_type_id.
- move=> _ /=. rewrite IH /=. congr poly_abs. apply: ext_poly_type. case; first done.
move=> ?. by rewrite /= tidy_ren_poly_type.
Qed.
Lemma contains_tidyI {t t'} : contains t t' -> contains (tidy t) (tidy t').
Proof.
move=> /clos_rt_rt1n_iff. elim; first by move=> ?; apply: rt_refl.
move=> > [] s'' t'' _ IH /=. case H: (fresh_inb _ _).
- move: IH. rewrite tidy_subst_poly_type. congr contains.
rewrite ren_as_subst_poly_type. apply: ext_subst_poly_type_allfv_poly_type.
rewrite allfv_poly_type_tidy. move: H => /fresh_inP.
apply: allfv_poly_type_impl. by case.
- apply: rt_trans; last by eassumption.
apply: rt_step. rewrite tidy_subst_poly_type.
have := contains_step_subst (s := tidy s'') (t := tidy t''). congr contains_step.
apply: ext_poly_type. by case.
Qed.
(* introduces canonical type derivations (cf. Wells) *)
Lemma typing_tidyI {Gamma P t} : typing Gamma P t -> exists Q, erase P = erase Q /\ typing (map tidy Gamma) Q (tidy t).
Proof.
elim: P Gamma t.
- move=> x Gamma t /typingE Hx. exists (var x). constructor; first done.
apply: typing_var. by rewrite nth_error_map Hx.
- move=> P IHP Q IHQ Gamma t /typingE [?] /= [/IHP [P'] [->] {}IHP /IHQ [Q'] [->] {}IHQ].
exists (app P' Q'). constructor; first done.
apply: typing_app; by eassumption.
- move=> s P IH Gamma t /typingE [?] /= [->] /IH [P'] [->] HP'.
exists (abs (tidy s) P'). constructor; first done.
by apply: typing_abs.
- move=> P IH s Gamma t /typingE [t'] /= [->] /IH [P'] [->].
move=> /typing_contains. apply.
by apply /contains_tidyI /rt_step /contains_step_subst.
- move=> P IH Gamma t /typingE [t'] /= [->] /IH [P'] [->] /=.
case Hb: (fresh_inb _ _).
+ move=> /(typing_ren_poly_type (0 .: id)) => H.
exists (ren_term (0 .: id) id P'). constructor; first by rewrite erase_ren_term_id.
congr typing: H. rewrite ?map_map. apply: map_ext => ?.
by rewrite tidy_ren_poly_type ?poly_type_norm /= ren_poly_type_id.
+ move=> H. exists (ty_abs P'). constructor; first done.
apply: typing_ty_abs. congr typing: H. rewrite ?map_map.
apply: map_ext => ?. by apply: tidy_ren_poly_type.
Qed.
(* introduce pure canonical typings *)
Lemma pure_typing_tidyI {Gamma M t} : pure_typing Gamma M t -> pure_typing (map tidy Gamma) M (tidy t).
Proof.
by move=> /pure_typing_to_typing [?] [->] /typing_tidyI [?] [->] /typing_to_pure_typing.
Qed.
Lemma tidy_tidy {t} : tidy (tidy t) = tidy t.
Proof.
elim: t; [done | by move=> /= ? -> ? -> |].
move=> t /=. case Ht: (fresh_inb _ _).
- by rewrite ?tidy_ren_poly_type => ->.
- rewrite /=.
have -> : fresh_inb 0 (tidy t) = fresh_inb 0 t.
{ apply /is_trueP. by rewrite -?(rwP fresh_inP) /= allfv_poly_type_tidy. }
by rewrite Ht => ->.
Qed.
Lemma tidy_many_poly_abs_tidy {n t} : tidy (many_poly_abs n (tidy t)) = tidy (many_poly_abs n t).
Proof.
elim: n; first by rewrite tidy_tidy.
move=> n /= ->.
have ->: fresh_inb 0 (many_poly_abs n (tidy t)) = fresh_inb 0 (many_poly_abs n t).
{ apply /is_trueP. by rewrite -?(rwP fresh_inP) /= ?allfv_poly_type_many_poly_abs allfv_poly_type_tidy. }
done.
Qed.
Lemma tidy_many_poly_abs_le {n t}:
allfv_poly_type (le n) t -> tidy (many_poly_abs n t) = ren_poly_type (fun x => x - n) (tidy t).
Proof.
elim: n t.
- move=> t _ /=. rewrite ren_poly_type_id'; by [|lia].
- move=> n IH t /= Ht.
have ->: fresh_inb 0 (many_poly_abs n t) = true.
{ apply /fresh_inP. rewrite /fresh_in allfv_poly_type_many_poly_abs.
apply: allfv_poly_type_impl Ht. move=> x ?. rewrite iter_scons_ge; by lia. }
rewrite IH.
{ apply: allfv_poly_type_impl Ht. by lia. }
rewrite poly_type_norm. apply: extRen_poly_type.
move=> x /=. case H: (x-n) => /=; by lia.
Qed.
Lemma tidy_many_poly_abs {n s} :
{ n'ξ' | tidy (many_poly_abs n s) = many_poly_abs (n'ξ'.1) (tidy (ren_poly_type (n'ξ'.2) s)) }.
Proof.
elim: n s.
- move=> s. exists (0, id). by rewrite ren_poly_type_id.
- move=> n IH s /=. case Hns: (fresh_inb 0 (many_poly_abs n s)).
+ have := IH s. move=> [[n' ξ']] ->.
rewrite ren_poly_type_many_poly_abs -tidy_ren_poly_type ?poly_type_norm.
evar (n'': nat). evar (ξ'': nat -> nat).
exists (n'', ξ'') => /=. by subst n'' ξ''.
+ have := IH s. move=> [[n' ξ']] ->.
by exists ((1+n'), ξ').
Qed.
(* note: hard to make more general because of cases like ∀x.x < ∀x.x < ∀xy.x *)
Lemma contains_many_poly_absE {n s1 t1 t} : contains (many_poly_abs n (poly_arr s1 t1)) t ->
exists ts, length ts <= n /\
t = subst_poly_type (fold_right scons poly_var ts) (many_poly_abs (n - length ts) (poly_arr s1 t1)).
Proof.
move=> /clos_rt_rtn1_iff. elim.
- exists [] => /=. constructor; first by lia.
by rewrite (ltac:(lia) : n - 0 = n) subst_poly_type_poly_var.
- move=> > [] r ? _ [ts] [?].
move Hn': (n - length ts) => n'. case: n' Hn'; first done.
move=> n' ? [->]. exists (r :: ts) => /=.
constructor; first by lia.
have ->: n - S (length ts) = n' by lia.
rewrite ?poly_type_norm. apply: ext_poly_type. case; first done.
move=> x /=. by rewrite ?poly_type_norm /= subst_poly_type_poly_var.
Qed.
Lemma contains_poly_arrE {n s1 t1 s2 t2} : contains (many_poly_abs n (poly_arr s1 t1)) (poly_arr s2 t2) ->
exists ts, n = length ts /\ (poly_arr s2 t2) = subst_poly_type (fold_right scons poly_var ts) (poly_arr s1 t1).
Proof.
move=> /contains_many_poly_absE [ts] [?] H.
have Hnts : n - length ts = 0 by case: (n - length ts) H.
exists ts. constructor; [by lia | by rewrite H Hnts].
Qed.
Lemma contains_poly_varE {t x} : contains t (poly_var x) ->
exists n y, t = many_poly_abs n (poly_var y).
Proof.
have [n [t' [->]]] := many_poly_absI t. case: t'.
- move=> *. by do 2 eexists.
- move=> > _ /contains_many_poly_absE [ts] [_]. by case: (n - length ts).
- by move=> /=.
Qed.
Lemma contains_many_poly_abs_free {n x t} :
contains (many_poly_abs n (poly_var (n + x))) t ->
exists i m, n = i + m /\ t = many_poly_abs m (poly_var (m+x)).
Proof.
elim: n x t.
- move=> /= x ? /containsE <-. by exists 0, 0.
- move=> n IH x t /containsE /= [].
+ move=> <-. by exists 0, (1+n).
+ move=> [?]. rewrite subst_poly_type_many_poly_abs /=.
have ->: S (n + x) = n + S x by lia. rewrite iter_up_poly_type_poly_type /=.
move=> /IH [i] [m] [-> ->]. by exists (1+i), m.
Qed.
Lemma contains_subst_poly_type_fold_rightI {ts t} :
contains (many_poly_abs (length ts) t) (subst_poly_type (fold_right scons poly_var ts) t).
Proof.
elim: ts t.
- move=> /= ?. rewrite subst_poly_type_poly_var. by apply: rt_refl.
- move=> r ts IH t. rewrite [length _]/=.
have ->: S (length ts) = (length ts) + 1 by lia.
rewrite -many_poly_abs_many_poly_abs /=.
have {}IH := IH (poly_abs t). apply: rt_trans; first by eassumption.
apply: rt_step => /=. have := contains_step_subst.
evar (s': poly_type) => /(_ s'). evar (t': poly_type) => /(_ t').
congr contains_step. subst t'.
rewrite ?poly_type_norm /=. apply: ext_poly_type.
case=> /=; first by subst s'.
move=> ?. by rewrite poly_type_norm /= subst_poly_type_poly_var.
Qed.
Lemma contains_subst_poly_type_fold_rightE {ts t t'} :
contains (subst_poly_type (fold_right scons poly_var ts) t) t' ->
contains (many_poly_abs (length ts) t) t'.
Proof.
move Hs: (subst_poly_type (fold_right scons poly_var ts) t) => s /clos_rt_rtn1_iff Hst'.
elim: Hst' t ts Hs.
- move=> ? ? <-. by apply: contains_subst_poly_type_fold_rightI.
- move=> > [] r ? _ IH t ts /IH {}IH.
apply: rt_trans; first by eassumption. by apply /rt_step /contains_step_subst.
Qed.
Lemma contains_many_poly_abs_closedI {n s σ} :
allfv_poly_type (gt n) s ->
contains (many_poly_abs n s) (subst_poly_type σ s).
Proof.
move=> Hns. pose ts := (map σ (seq 0 n)).
have -> : n = length ts by rewrite /ts map_length seq_length.
apply: contains_subst_poly_type_fold_rightE.
have ->: subst_poly_type (fold_right scons poly_var ts) s = subst_poly_type σ s.
{
apply: ext_subst_poly_type_allfv_poly_type. apply: allfv_poly_type_impl Hns => x ?.
rewrite /ts fold_right_length_ts_lt; first by (rewrite map_length seq_length; lia).
rewrite [nth _ _ (poly_var 0)](nth_indep _ _ (σ 0)); first by (rewrite map_length seq_length; lia).
rewrite map_nth seq_nth; by [|lia].
}
by apply: rt_refl.
Qed.
(* pure typing is preserved under renaming *)
Lemma pure_typing_ren_pure_term {Gamma Delta M t} (ξ : nat -> nat) :
pure_typing Gamma M t ->
(forall n s, nth_error Gamma n = Some s -> nth_error Delta (ξ n) = Some s) ->
pure_typing Delta (ren_pure_term ξ M) t.
Proof.
move=> /pure_typing_to_typing [P] [->] /typing_ren_term H /H{H} /typing_to_pure_typing.
by rewrite erase_ren_term.
Qed.
(* pure typing is preserved under renaming of occurring free variables *)
Lemma pure_typing_ren_pure_term_allfv_pure_term {Gamma Delta M t} (ξ : nat -> nat) :
(allfv_pure_term (fun x => nth_error Gamma x = nth_error Delta (ξ x)) M) ->
pure_typing Gamma M t -> pure_typing Delta (ren_pure_term ξ M) t.
Proof.
move=> + /pure_typing_to_typing [P] [?]. subst M.
move=> /allfv_pure_term_erase /typing_ren_allfv_term H /H /typing_to_pure_typing.
by rewrite erase_ren_term.
Qed.
(* transport pure typing along contains / many_poly_abs *)
Lemma pure_typing_many_poly_abs_closed {s t n} :
allfv_poly_type (fun=> False) s ->
pure_typing [s] (pure_var 0) t ->
pure_typing [s] (pure_var 0) (many_poly_abs n t).
Proof.
move=> Hs /pure_typingE [n1] [?] [?] [[?]] [HC ?]. subst.
rewrite many_poly_abs_many_poly_abs.
apply: pure_typing_pure_var; first by reflexivity.
congr contains: HC. apply: ext_ren_poly_type_allfv_poly_type.
by apply: allfv_poly_type_impl Hs.
Qed.
(* transport pure typing along contains / many_poly_abs *)
Lemma pure_typing_many_poly_abs_contains_closed {s t t' n} :
allfv_poly_type (fun=> False) s ->
contains (many_poly_abs n t) t' ->
pure_typing [s] (pure_var 0) t ->
pure_typing [s] (pure_var 0) t'.
Proof.
by move=> /pure_typing_many_poly_abs_closed H /pure_typing_contains HC /H => /(_ n) /HC.
Qed.
(* transport pure typing along contains / many_poly_abs *)
Lemma pure_typing_tidy_many_poly_abs_closed {s t n} :
allfv_poly_type (fun=> False) s ->
pure_typing [s] (pure_var 0) (tidy t) ->
pure_typing [tidy s] (pure_var 0) (tidy (many_poly_abs n t)).
Proof.
move=> Hs Ht. rewrite -tidy_many_poly_abs_tidy -/(map tidy [s]).
apply: pure_typing_tidyI. by apply: pure_typing_many_poly_abs_closed.
Qed.
(* transport pure typing along contains / many_poly_abs *)
Lemma pure_typing_many_poly_abs_contains_tidy_closed {s t t' n} :
allfv_poly_type (fun=> False) s ->
contains (many_poly_abs n t) t' ->
pure_typing [s] (pure_var 0) (tidy t) ->
pure_typing [tidy s] (pure_var 0) (tidy t').
Proof.
move=> Hs /contains_tidyI HC /(pure_typing_tidy_many_poly_abs_closed Hs (n := n)) /pure_typing_contains. by apply.
Qed.
(* weaken an assumption wrt. derivability *)
Lemma pure_typing_weaken_closed {s s' Gamma1 Gamma2 M t} :
allfv_poly_type (fun=> False) s ->
pure_typing [s] (pure_var 0) s' ->
pure_typing (Gamma1 ++ s' :: Gamma2) M t ->
pure_typing (Gamma1 ++ s :: Gamma2) M t.
Proof.
move=> Hs Hss' /pure_typing_to_typing [P] [->].
elim: P Gamma1 Gamma2 s' t Hss'.
- move=> x Gamma1 Gamma2 s' t Hss' /typingE Hx.
have [?|[H'x|?]] : x < length Gamma1 \/ x - length Gamma1 = 1 + (x - length Gamma1 - 1) \/ length Gamma1 = x by lia.
+ apply: pure_typing_pure_var_simpleI. move: Hx. by rewrite ?nth_error_app1.
+ apply: pure_typing_pure_var_simpleI. move: Hx.
rewrite nth_error_app2; first by lia. rewrite nth_error_app2; [by lia | by rewrite H'x].
+ move: Hx. rewrite nth_error_app2; first by lia.
have H'x: x - length Gamma1 = 0 by lia. move: (H'x) => -> [<-].
move: Hss' => /(pure_typing_ren_pure_term_allfv_pure_term (fun y => y + x)). apply.
rewrite /= nth_error_app2; first by lia. by rewrite H'x.
- move=> ? IH1 ? IH2 > /copy [/IH1 {}IH1 /IH2 {}IH2] /typingE [?] [/IH1 + /IH2 +].
by move=> /pure_typing_pure_app_simpleI H /H.
- move=> ? ? IH > /IH {}IH /typingE [?] [->]. rewrite -/((_ :: _) ++ _).
by move=> /IH /= /pure_typing_pure_abs_simpleI.
- move=> > IH > /IH {}IH /typingE [?] [->] /IH. apply: pure_typing_contains.
by apply /rt_step /contains_step_subst.
- move=> > IH > Hss' /typingE [?] [->]. rewrite map_app /=.
move=> /IH. apply: unnest.
{ move: Hss' => /(pure_typing_ren_poly_type S). congr pure_typing => /=.
by rewrite ren_poly_type_closed_id. }
move=> {}IH. apply: (pure_typing_many_poly_absI (n := 1)).
congr pure_typing: IH. by rewrite map_app /= ren_poly_type_closed_id.
Qed.
Lemma tidy_is_simple {t} : is_simple t -> tidy t = t.
Proof. elim: t; [done | by move=> ? IH1 ? IH2 /= [/IH1 -> /IH2 ->] | done]. Qed.
Lemma is_simple_ren_poly_type {ξ t} : is_simple (ren_poly_type ξ t) <-> is_simple t.
Proof. elim t; [done | by move=> ? IH1 ? IH2 /=; rewrite IH1 IH2 | done]. Qed.
Lemma pure_typing_tidyRL {s n} : is_simple s -> allfv_poly_type (gt n) s ->
pure_typing [many_poly_abs n s] (pure_var 0) (tidy (many_poly_abs n s)).
Proof.
rewrite (svalP tidy_many_poly_abs).
move: (sval _) => [n' ξ'] /= Hs Hns.
apply: (pure_typing_pure_var n'); first done.
rewrite tidy_is_simple; first by rewrite is_simple_ren_poly_type.
rewrite ren_poly_type_many_poly_abs.
have -> : ren_poly_type (Nat.iter n up_ren (Nat.add n')) s = s.
{
rewrite -[RHS]ren_poly_type_id.
apply: ext_ren_poly_type_allfv_poly_type. apply: allfv_poly_type_impl Hns.
move=> ? ?. rewrite iter_up_ren_lt; by [|lia].
}
rewrite ren_as_subst_poly_type. by apply: contains_many_poly_abs_closedI.
Qed.
Lemma pure_typing_tidyLR {s n} : is_simple s -> allfv_poly_type (gt n) s ->
pure_typing [tidy (many_poly_abs n s)] (pure_var 0) (many_poly_abs n s).
Proof.
move=> Hs Hns.
apply: (pure_typing_pure_var n); first by reflexivity.
rewrite -tidy_ren_poly_type ren_poly_type_many_poly_abs.
have -> : ren_poly_type (Nat.iter n up_ren (Nat.add n)) s = s.
{
rewrite -[RHS]ren_poly_type_id.
apply: ext_ren_poly_type_allfv_poly_type. apply: allfv_poly_type_impl Hns.
move=> ? ?. rewrite iter_up_ren_lt; by [|lia].
}
rewrite -[s in contains _ s](tidy_is_simple Hs).
apply: contains_tidyI.
rewrite -[s in contains _ s]subst_poly_type_poly_var.
by apply: contains_many_poly_abs_closedI.
Qed.
Lemma pure_typableI {Gamma M t} : pure_typing Gamma M t -> pure_typable Gamma M.
Proof. move=> ?. by exists t. Qed.
Lemma pure_typableE {Gamma M} : pure_typable Gamma M ->
match M with
| pure_var x => if (nth_error Gamma x) is None then False else True
| pure_app M N => exists s t, pure_typing Gamma M (poly_arr s t) /\ pure_typing Gamma N s
| pure_abs M => exists s, pure_typable (s :: Gamma) M
end.
Proof.
move=> [t] [].
- move=> n {}Gamma x >. rewrite nth_error_map.
by case: (nth_error Gamma x).
- move=> n > /(pure_typing_ren_poly_type (fun x => x - n)) H1.
move=> /(pure_typing_ren_poly_type (fun x => x - n)) H2 _.
move: H1 H2. rewrite ?map_map. rewrite map_id'.
{ move=> ?. rewrite poly_type_norm ren_poly_type_id' /=; by [|lia]. }
move=> ? ?. do 2 eexists. constructor; by eassumption.
- move=> n > /(pure_typing_ren_poly_type (fun x => x - n)) /=.
rewrite ?map_map map_id'.
{ move=> ?. rewrite poly_type_norm ren_poly_type_id' /=; by [|lia]. }
move=> /pure_typableI ?. eexists. by eassumption.
Qed.
Lemma pure_typable_tidyI {Gamma M} :
pure_typable Gamma M -> pure_typable (map tidy Gamma) M.
Proof. by move=> [?] /pure_typing_tidyI /pure_typableI. Qed.
Lemma pure_typable_tidy_iff {Gamma1 Gamma2 M} :
Forall (fun t => exists n s, t = many_poly_abs n s /\ is_simple s /\ allfv_poly_type (gt n) s) Gamma2 ->
pure_typable (Gamma1 ++ map tidy Gamma2) M <-> pure_typable (Gamma1 ++ Gamma2) M.
Proof.
elim: Gamma2 Gamma1; first done.
move=> ? Gamma2 IH Gamma1 /Forall_cons_iff [] [n] [s] [->] [Hs Hns] /IH {}IH.
constructor => /=.
- move=> [?] /pure_typing_weaken_closed => /(_ (many_poly_abs n s)).
apply: unnest.
{
rewrite allfv_poly_type_many_poly_abs. apply: allfv_poly_type_impl Hns.
move=> *. rewrite iter_scons_lt; by [|lia].
}
apply: unnest; first by apply: pure_typing_tidyRL.
rewrite -/([_] ++ map tidy Gamma2) -/([_] ++ Gamma2) ?app_assoc.
by move=> /pure_typableI /IH.
- move=> [?] /pure_typing_weaken_closed => /(_ (tidy (many_poly_abs n s))).
apply: unnest.
{
rewrite allfv_poly_type_tidy allfv_poly_type_many_poly_abs. apply: allfv_poly_type_impl Hns.
move=> *. rewrite iter_scons_lt; by [|lia].
}
apply: unnest; first by apply: pure_typing_tidyLR.
rewrite -/([_] ++ map tidy Gamma2) -/([_] ++ Gamma2) ?app_assoc.
by move=> /pure_typableI /IH.
Qed.
Lemma pure_typable_many_pure_term_abs_allI M {Gamma} :
pure_typable Gamma M -> pure_typable [] (many_pure_term_abs (pure_var_bound M) M).
Proof.
have := pure_var_boundP M. move: (pure_var_bound M) => n.
elim: Gamma n M.
- elim; first done.
move=> n IH M HnM [tM].
move=> /(pure_typing_ren_pure_term id (Delta := [poly_var 0])).
apply: unnest; first by case. rewrite ren_pure_term_id.
move=> /(pure_typing_pure_abs 0 (Gamma := [])) /pure_typableI /IH.
rewrite /many_pure_term_abs (ltac:(lia) : S n = n + 1) -iter_plus. apply.
move=> /=. apply: allfv_pure_term_impl HnM.
case; first done. move=> /=. by lia.
- move=> s Gamma IH [|n] M.
+ move=> HM [tM].
move=> /(pure_typing_ren_pure_term_allfv_pure_term id (Delta := [])).
rewrite ren_pure_term_id. apply: unnest; first by apply: allfv_pure_term_impl HM; lia.
by move=> /pure_typableI.
+ move=> HnM [tM]. rewrite -[Gamma]map_ren_poly_type_id.
move=> /(pure_typing_pure_abs 0) /pure_typableI /IH.
rewrite /many_pure_term_abs (ltac:(lia) : S n = n + 1) -iter_plus. apply.
move=> /=. apply: allfv_pure_term_impl HnM.
case; first done. move=> /=. by lia.
Qed.
Lemma pure_typing_iff_type_assignment {Gamma M t} :
pure_typing Gamma M t <-> type_assignment Gamma M t.
Proof.
constructor.
- by move=> /pure_typing_to_typing [?] [->] /typing_to_type_assignment.
- by move=> /typing_of_type_assignment [?] [->] /typing_to_pure_typing.
Qed.
|
{"author": "uds-psl", "repo": "coq-synthetic-incompleteness", "sha": "cd7d8490f8542bfe85658c465bcb26b2ed163f53", "save_path": "github-repos/coq/uds-psl-coq-synthetic-incompleteness", "path": "github-repos/coq/uds-psl-coq-synthetic-incompleteness/coq-synthetic-incompleteness-cd7d8490f8542bfe85658c465bcb26b2ed163f53/theories/SystemF/Util/pure_typing_facts.v"}
|
#ifndef LEESA_SEQUENCE_HPP
#define LEESA_SEQUENCE_HPP
#ifdef LEESA_SUPPORTS_VARIADIC_TEMPLATES
#include <typeinfo>
namespace boost {
namespace mpl {
struct null_type {};
template <typename... Arg>
struct vector {};
template <typename V> struct front;
template <typename V> struct pop_front;
template <typename Head, typename... Tail>
struct front <vector <Head, Tail...> >
{
typedef Head type;
};
template <>
struct front <vector <> >
{
typedef null_type type;
};
template <typename Head, typename... Tail>
struct pop_front <vector <Head, Tail...> >
{
typedef vector<Tail...> type;
};
template <>
struct pop_front <vector <> >
{
typedef vector<> type;
};
template <typename Vector, typename T> struct push_back;
template <typename T, typename... Args>
struct push_back < vector<Args...>, T>
{
typedef vector<Args..., T> type;
};
template <typename Vector> struct size;
template <typename... Args>
struct size <vector <Args...> >
{
typedef size type;
enum { value = sizeof...(Args) };
};
template <typename Vector> struct empty;
template <typename... Args>
struct empty <vector <Args...> >
{
typedef empty type;
enum { value = 0 };
};
template<>
struct empty <vector<> >
{
typedef empty type;
enum { value = 1 };
};
template <typename Vector, typename What> struct contains;
template <typename What, typename Head, typename... Tail>
struct contains < vector<Head, Tail...>, What> :
std::conditional < std::is_same<Head, What>::value,
std::true_type,
contains < vector<Tail...>, What>
>::type
{
typedef contains type;
};
template <typename What>
struct contains <vector<>, What>
{
typedef contains type;
enum { value = 0 };
};
template <class V1, template <typename> class Pred>
struct copy_if
{
typedef typename front<V1>::type Head;
typedef typename pop_front<V1>::type Tail;
typedef typename std::conditional<
Pred<Head>::value,
typename push_back<typename copy_if<Tail, Pred>::type,
Head
>::type,
typename copy_if<Tail, Pred>::type>::type type;
};
template <template <typename> class Pred>
struct copy_if <vector<>, Pred>
{
typedef vector<> type;
};
template <class V1, template <typename> class Pred>
struct remove_if
{
typedef typename front<V1>::type Head;
typedef typename pop_front<V1>::type Tail;
typedef typename std::conditional<
!Pred<Head>::value,
typename push_back<typename copy_if<Tail, Pred>::type,
Head
>::type,
typename copy_if<Tail, Pred>::type>::type type;
};
template <template <typename> class Pred>
struct remove_if <vector<>, Pred>
{
typedef vector<> type;
};
template <class V1, template <typename> class Pred>
struct count_if
{
typedef count_if type;
typedef typename front<V1>::type Head;
typedef typename pop_front<V1>::type Tail;
enum { value = bool(Pred<Head>::value) + count_if<Tail, Pred>::value };
};
template <template <typename> class Pred>
struct count_if <vector<>, Pred>
{
typedef count_if type;
enum { value = 0 };
};
} // namespace mpl
} // namespace boost
#define LEESA_MPL_VECTOR_N(N,...) boost::mpl::vector<__VA_ARGS__>
#else // LEESA_SUPPORTS_VARIADIC_TEMPLATES
#define LEESA_MPL_VECTOR_N(N,...) boost::mpl::vector##N<__VA_ARGS__>
#if (LEESA_MAX_MPL_VECTOR_SIZE < 20)
// That's fine.
#elif (LEESA_MAX_MPL_VECTOR_SIZE < 30)
#define BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS
#define BOOST_MPL_LIMIT_VECTOR_SIZE 30
#elif (LEESA_MAX_MPL_VECTOR_SIZE < 40)
#define BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS
#define BOOST_MPL_LIMIT_VECTOR_SIZE 40
#elif (LEESA_MAX_MPL_VECTOR_SIZE < 50)
#define BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS
#define BOOST_MPL_LIMIT_VECTOR_SIZE 50
#else
#define BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS
#define BOOST_MPL_LIMIT_VECTOR_SIZE 50
#include <boost/mpl/vector.hpp>
#include <boost/mpl/aux_/config/ctps.hpp>
#include <boost/preprocessor/iterate.hpp>
namespace boost {
namespace mpl {
#define BOOST_PP_ITERATION_PARAMS_1 (3,( \
BOOST_PP_INC(BOOST_MPL_LIMIT_VECTOR_SIZE), \
LEESA_MAX_MPL_VECTOR_SIZE, \
"boost/mpl/vector/aux_/numbered.hpp"))
#include BOOST_PP_ITERATE()
}
}
#endif // LEESA_SUPPORTS_VARIADIC_TEMPLATES
#include <boost/mpl/vector.hpp>
#include <boost/mpl/front.hpp>
#include <boost/mpl/pop_front.hpp>
#include <boost/mpl/push_back.hpp>
#include <boost/mpl/empty.hpp>
#include <boost/mpl/size.hpp>
#include <boost/mpl/remove_if.hpp>
#include <boost/mpl/count_if.hpp>
#include <boost/mpl/copy_if.hpp>
#include <boost/mpl/contains.hpp>
#endif // LEESA_SUPPORTS_VARIADIC_TEMPLATES
namespace LEESA {
using boost::mpl::front;
using boost::mpl::pop_front;
using boost::mpl::push_back;
using boost::mpl::empty;
using boost::mpl::size;
using boost::mpl::remove_if;
using boost::mpl::count_if;
using boost::mpl::copy_if;
using boost::mpl::contains;
}
#endif // LEESA_SEQUENCE_HPP
|
{"hexsha": "d0eb80f17c407af0e211d20965ac606b14919de5", "size": 5236, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/LEESA/Sequence.hpp", "max_stars_repo_name": "sutambe/LEESA", "max_stars_repo_head_hexsha": "fedaf4c0d7887c361d2a79967e3e770f375ddc25", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/LEESA/Sequence.hpp", "max_issues_repo_name": "sutambe/LEESA", "max_issues_repo_head_hexsha": "fedaf4c0d7887c361d2a79967e3e770f375ddc25", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2018-09-17T19:12:54.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-19T16:09:53.000Z", "max_forks_repo_path": "include/LEESA/Sequence.hpp", "max_forks_repo_name": "sutambe/LEESA", "max_forks_repo_head_hexsha": "fedaf4c0d7887c361d2a79967e3e770f375ddc25", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.0183486239, "max_line_length": 76, "alphanum_fraction": 0.6650114591, "num_tokens": 1283}
|
import json
import pathlib
from unittest import mock
from click.testing import CliRunner
import numpy as np
import pytest
import yaml
from cabinetry import cli
from cabinetry import fit
class CLIHelpers:
@staticmethod
def write_config(path, config):
with open(path, "w") as f:
yaml.dump(config, f)
@pytest.fixture
def cli_helpers():
return CLIHelpers
def test_cabinetry():
runner = CliRunner()
result = runner.invoke(cli.cabinetry, ["--help"])
assert result.exit_code == 0
assert "Entrypoint to the cabinetry CLI." in result.output
# using autospec to catch changes in public API
@mock.patch("cabinetry.template_builder.create_histograms", autospec=True)
@mock.patch("cabinetry.configuration.validate", autospec=True)
def test_templates(mock_validate, mock_create_histograms, cli_helpers, tmp_path):
config = {"General": {"Measurement": "test_config"}}
config_path = str(tmp_path / "config.yml")
cli_helpers.write_config(config_path, config)
runner = CliRunner()
# default method
result = runner.invoke(cli.templates, [config_path])
assert result.exit_code == 0
assert mock_validate.call_args_list == [((config,), {})]
assert mock_create_histograms.call_args_list == [((config,), {"method": "uproot"})]
# different method
result = runner.invoke(cli.templates, ["--method", "unknown", config_path])
assert result.exit_code == 0
assert mock_create_histograms.call_args_list[-1] == (
(config,),
{"method": "unknown"},
)
@mock.patch("cabinetry.template_postprocessor.run", autospec=True)
@mock.patch("cabinetry.configuration.validate", autospec=True)
def test_postprocess(mock_validate, mock_postprocess, cli_helpers, tmp_path):
config = {"General": {"Measurement": "test_config"}}
config_path = str(tmp_path / "config.yml")
cli_helpers.write_config(config_path, config)
runner = CliRunner()
result = runner.invoke(cli.postprocess, [config_path])
assert result.exit_code == 0
assert mock_validate.call_args_list == [((config,), {})]
assert mock_postprocess.call_args_list == [((config,), {})]
@mock.patch(
"cabinetry.workspace.build", return_value={"workspace": "mock"}, autospec=True
)
@mock.patch("cabinetry.configuration.validate", autospec=True)
def test_workspace(mock_validate, mock_build, cli_helpers, tmp_path):
config = {"General": {"Measurement": "test_config"}}
config_path = str(tmp_path / "config.yml")
cli_helpers.write_config(config_path, config)
workspace_path = str(tmp_path / "workspace.json")
runner = CliRunner()
result = runner.invoke(cli.workspace, [config_path, workspace_path])
assert result.exit_code == 0
assert mock_validate.call_args_list == [((config,), {})]
assert mock_build.call_args_list == [((config,), {})]
assert json.loads(pathlib.Path(workspace_path).read_text()) == {"workspace": "mock"}
@mock.patch("cabinetry.visualize.correlation_matrix", autospec=True)
@mock.patch("cabinetry.visualize.pulls", autospec=True)
@mock.patch(
"cabinetry.fit.fit",
return_value=fit.FitResults(
np.asarray([1.0]), np.asarray([0.1]), ["label"], np.asarray([[1.0]]), 1.0
),
autospec=True,
)
@mock.patch(
"cabinetry.model_utils.model_and_data",
return_value=("model", "data"),
autospec=True,
)
def test_fit(mock_util, mock_fit, mock_pulls, mock_corrmat, tmp_path):
workspace = {"workspace": "mock"}
bestfit = np.asarray([1.0])
uncertainty = np.asarray([0.1])
labels = ["label"]
corr_mat = np.asarray([[1.0]])
fit_results = fit.FitResults(bestfit, uncertainty, labels, corr_mat, 1.0)
workspace_path = str(tmp_path / "workspace.json")
# need to save workspace to file since click looks for it
with open(workspace_path, "w") as f:
f.write('{"workspace": "mock"}')
runner = CliRunner()
# default
result = runner.invoke(cli.fit, [workspace_path])
assert result.exit_code == 0
assert mock_util.call_args_list == [((workspace,), {"asimov": False})]
assert mock_fit.call_args_list == [
(("model", "data"), {"minos": None, "goodness_of_fit": False})
]
# Asimov
result = runner.invoke(cli.fit, ["--asimov", workspace_path])
assert result.exit_code == 0
assert mock_util.call_args_list[-1] == ((workspace,), {"asimov": True})
assert mock_fit.call_args_list[-1] == (
("model", "data"),
{"minos": None, "goodness_of_fit": False},
)
# MINOS for one parameter
result = runner.invoke(cli.fit, ["--minos", "par", workspace_path])
assert result.exit_code == 0
assert mock_util.call_args_list[-1] == ((workspace,), {"asimov": False})
assert mock_fit.call_args_list[-1] == (
("model", "data"),
{"minos": ["par"], "goodness_of_fit": False},
)
# MINOS for multiple parameters
result = runner.invoke(
cli.fit, ["--minos", "par_a", "--minos", "par_b", workspace_path]
)
assert result.exit_code == 0
assert mock_util.call_args_list[-1] == ((workspace,), {"asimov": False})
assert mock_fit.call_args_list[-1] == (
("model", "data"),
{"minos": ["par_a", "par_b"], "goodness_of_fit": False},
)
# goodness-of-fit
result = runner.invoke(cli.fit, ["--goodness_of_fit", workspace_path])
assert result.exit_code == 0
assert mock_util.call_args_list[-1] == ((workspace,), {"asimov": False})
assert mock_fit.call_args_list[-1] == (
("model", "data"),
{"minos": None, "goodness_of_fit": True},
)
# pull plot
result = runner.invoke(cli.fit, ["--pulls", workspace_path])
assert result.exit_code == 0
assert mock_pulls.call_args_list == [((fit_results,), {"figure_folder": "figures"})]
# correlation matrix plot
result = runner.invoke(cli.fit, ["--corrmat", workspace_path])
assert result.exit_code == 0
assert mock_corrmat.call_args_list == [
((fit_results,), {"figure_folder": "figures"})
]
# both plots, different folder
result = runner.invoke(
cli.fit, ["--figfolder", "folder", "--pulls", "--corrmat", workspace_path]
)
assert result.exit_code == 0
assert mock_corrmat.call_args_list[-1] == (
(fit_results,),
{"figure_folder": "folder"},
)
assert mock_pulls.call_args_list[-1] == (
(fit_results,),
{"figure_folder": "folder"},
)
@mock.patch("cabinetry.visualize.ranking", autospec=True)
@mock.patch(
"cabinetry.fit.ranking",
return_value=fit.RankingResults(
np.asarray([1.0]),
np.asarray([0.1]),
["label"],
np.asarray([[1.2]]),
np.asarray([[0.8]]),
np.asarray([[1.1]]),
np.asarray([[0.9]]),
),
autospec=True,
)
@mock.patch(
"cabinetry.fit.fit",
return_value=fit.FitResults(
np.asarray([1.0]), np.asarray([0.1]), ["label"], np.asarray([[1.0]]), 1.0
),
autospec=True,
)
@mock.patch(
"cabinetry.model_utils.model_and_data",
return_value=("model", "data"),
autospec=True,
)
def test_ranking(mock_util, mock_fit, mock_rank, mock_vis, tmp_path):
workspace = {"workspace": "mock"}
bestfit = np.asarray([1.0])
uncertainty = np.asarray([0.1])
labels = ["label"]
corr_mat = np.asarray([[1.0]])
fit_results = fit.FitResults(bestfit, uncertainty, labels, corr_mat, 1.0)
workspace_path = str(tmp_path / "workspace.json")
# need to save workspace to file since click looks for it
with open(workspace_path, "w") as f:
f.write('{"workspace": "mock"}')
runner = CliRunner()
# default
result = runner.invoke(cli.ranking, [workspace_path])
assert result.exit_code == 0
assert mock_util.call_args_list == [((workspace,), {"asimov": False})]
assert mock_fit.call_args_list == [(("model", "data"), {})]
assert mock_rank.call_args_list == [
(("model", "data"), {"fit_results": fit_results})
]
assert mock_vis.call_count == 1
assert np.allclose(mock_vis.call_args[0][0].prefit_up, [[1.2]])
assert np.allclose(mock_vis.call_args[0][0].prefit_down, [[0.8]])
assert np.allclose(mock_vis.call_args[0][0].postfit_up, [[1.1]])
assert np.allclose(mock_vis.call_args[0][0].postfit_down, [[0.9]])
assert mock_vis.call_args[1] == {"figure_folder": "figures", "max_pars": 10}
# Asimov, maximum amount of parameters, custom folder
result = runner.invoke(
cli.ranking,
["--asimov", "--max_pars", 3, "--figfolder", "folder", workspace_path],
)
assert result.exit_code == 0
assert mock_util.call_args_list[-1] == ((workspace,), {"asimov": True})
assert mock_fit.call_args_list[-1] == (("model", "data"), {})
assert mock_rank.call_args_list[-1] == (
("model", "data"),
{"fit_results": fit_results},
)
assert mock_vis.call_args_list[-1][1] == {"figure_folder": "folder", "max_pars": 3}
@mock.patch("cabinetry.visualize.scan", autospec=True)
@mock.patch(
"cabinetry.fit.scan",
return_value=fit.ScanResults("par", 1.0, 0.1, np.asarray([1.5]), np.asarray([3.5])),
autospec=True,
)
@mock.patch(
"cabinetry.model_utils.model_and_data",
return_value=("model", "data"),
autospec=True,
)
def test_scan(mock_util, mock_scan, mock_vis, tmp_path):
workspace = {"workspace": "mock"}
workspace_path = str(tmp_path / "workspace.json")
# need to save workspace to file since click looks for it
with open(workspace_path, "w") as f:
f.write('{"workspace": "mock"}')
par_name = "par"
scan_results = fit.ScanResults(
par_name, 1.0, 0.1, np.asarray([1.5]), np.asarray([3.5])
)
runner = CliRunner()
# default
result = runner.invoke(cli.scan, [workspace_path, par_name])
assert result.exit_code == 0
assert mock_util.call_args_list == [((workspace,), {"asimov": False})]
assert mock_scan.call_args_list == [
(("model", "data", par_name), {"par_range": None, "n_steps": 11})
]
assert mock_vis.call_count == 1
assert mock_vis.call_args[0][0].name == scan_results.name
assert mock_vis.call_args[0][0].bestfit == scan_results.bestfit
assert mock_vis.call_args[0][0].uncertainty == scan_results.uncertainty
assert np.allclose(
mock_vis.call_args[0][0].parameter_values, scan_results.parameter_values
)
assert np.allclose(mock_vis.call_args[0][0].delta_nlls, scan_results.delta_nlls)
assert mock_vis.call_args[1] == {"figure_folder": "figures"}
# only one bound
with pytest.raises(
ValueError,
match="Need to either specify both lower_bound and upper_bound, or neither.",
):
runner.invoke(
cli.scan,
["--lower_bound", 1.0, workspace_path, par_name],
catch_exceptions=False,
)
with pytest.raises(
ValueError,
match="Need to either specify both lower_bound and upper_bound, or neither.",
):
runner.invoke(
cli.scan,
["--upper_bound", 1.0, workspace_path, par_name],
catch_exceptions=False,
)
# custom bounds, number of steps and Asimov
result = runner.invoke(
cli.scan,
[
"--lower_bound",
0.0,
"--upper_bound",
2.0,
"--n_steps",
21,
"--asimov",
"--figfolder",
"folder",
workspace_path,
par_name,
],
)
assert result.exit_code == 0
assert mock_util.call_args_list[-1] == ((workspace,), {"asimov": True})
assert mock_scan.call_args_list[-1] == (
("model", "data", par_name),
{"par_range": (0.0, 2.0), "n_steps": 21},
)
assert mock_vis.call_args[1] == {"figure_folder": "folder"}
@mock.patch("cabinetry.visualize.limit", autospec=True)
@mock.patch(
"cabinetry.fit.limit",
return_value=fit.LimitResults(
3.0,
np.asarray([1.0, 2.0, 3.0, 4.0, 5.0]),
np.asarray([0.05]),
np.asarray([0.01, 0.02, 0.05, 0.07, 0.10]),
np.asarray([3.0]),
),
autospec=True,
)
@mock.patch(
"cabinetry.model_utils.model_and_data",
return_value=("model", "data"),
autospec=True,
)
def test_limit(mock_util, mock_limit, mock_vis, tmp_path):
workspace = {"workspace": "mock"}
workspace_path = str(tmp_path / "workspace.json")
# need to save workspace to file since click looks for it
with open(workspace_path, "w") as f:
f.write('{"workspace": "mock"}')
limit_results = fit.LimitResults(
3.0,
np.asarray([1.0, 2.0, 3.0, 4.0, 5.0]),
np.asarray([0.05]),
np.asarray([0.01, 0.02, 0.05, 0.07, 0.10]),
np.asarray([3.0]),
)
runner = CliRunner()
# default
result = runner.invoke(cli.limit, [workspace_path])
assert result.exit_code == 0
assert mock_util.call_args_list == [((workspace,), {"asimov": False})]
assert mock_limit.call_args_list == [(("model", "data"), {"tolerance": 0.01})]
assert mock_vis.call_count == 1
assert np.allclose(
mock_vis.call_args[0][0].observed_limit, limit_results.observed_limit
)
assert np.allclose(
mock_vis.call_args[0][0].expected_limit, limit_results.expected_limit
)
assert np.allclose(
mock_vis.call_args[0][0].observed_CLs, limit_results.observed_CLs
)
assert np.allclose(
mock_vis.call_args[0][0].expected_CLs, limit_results.expected_CLs
)
assert np.allclose(mock_vis.call_args[0][0].poi_values, limit_results.poi_values)
assert mock_vis.call_args[1] == {"figure_folder": "figures"}
# Asimov, tolerance, custom folder
result = runner.invoke(
cli.limit,
["--asimov", "--tolerance", "0.1", "--figfolder", "folder", workspace_path],
)
assert result.exit_code == 0
assert mock_util.call_args_list[-1] == ((workspace,), {"asimov": True})
assert mock_limit.call_args_list[-1] == (("model", "data"), {"tolerance": 0.1})
assert mock_vis.call_args_list[-1][1] == {"figure_folder": "folder"}
@mock.patch("cabinetry.fit.significance", autospec=True)
@mock.patch(
"cabinetry.model_utils.model_and_data",
return_value=("model", "data"),
autospec=True,
)
def test_significance(mock_util, mock_sig, tmp_path):
workspace = {"workspace": "mock"}
workspace_path = str(tmp_path / "workspace.json")
# need to save workspace to file since click looks for it
with open(workspace_path, "w") as f:
f.write('{"workspace": "mock"}')
runner = CliRunner()
# default
result = runner.invoke(cli.significance, [workspace_path])
assert result.exit_code == 0
assert mock_util.call_args_list == [((workspace,), {"asimov": False})]
assert mock_sig.call_args_list == [(("model", "data"), {})]
# Asimov
result = runner.invoke(cli.significance, ["--asimov", workspace_path])
assert result.exit_code == 0
assert mock_util.call_args_list[-1] == ((workspace,), {"asimov": True})
assert mock_sig.call_args_list[-1] == (("model", "data"), {})
@mock.patch("cabinetry.visualize.data_mc", autospec=True)
@mock.patch(
"cabinetry.fit.fit",
return_value=fit.FitResults(
np.asarray([1.0]), np.asarray([0.1]), ["label"], np.asarray([[1.0]]), 1.0
),
autospec=True,
)
@mock.patch("cabinetry.configuration.validate", autospec=True)
@mock.patch(
"cabinetry.model_utils.model_and_data",
return_value=("model", "data"),
autospec=True,
)
def test_data_mc(mock_util, mock_validate, mock_fit, mock_vis, cli_helpers, tmp_path):
workspace = {"workspace": "mock"}
workspace_path = str(tmp_path / "workspace.json")
# need to save workspace to file since click looks for it
with open(workspace_path, "w") as f:
f.write('{"workspace": "mock"}')
runner = CliRunner()
# default
result = runner.invoke(cli.data_mc, [workspace_path])
assert result.exit_code == 0
assert mock_util.call_args_list == [((workspace,), {})]
assert mock_validate.call_count == 0
assert mock_fit.call_count == 0
assert mock_vis.call_args_list == [
(
("model", "data"),
{"config": None, "figure_folder": "figures", "fit_results": None},
)
]
# with config, post-fit, custom figure folder
config = {"General": {"Measurement": "test_config"}}
config_path = str(tmp_path / "config.yml")
cli_helpers.write_config(config_path, config)
fit_results = fit.FitResults(
np.asarray([1.0]), np.asarray([0.1]), ["label"], np.asarray([[1.0]]), 1.0
)
result = runner.invoke(
cli.data_mc,
[workspace_path, "--config", config_path, "--postfit", "--figfolder", "folder"],
)
assert result.exit_code == 0
assert mock_util.call_args_list[-1] == ((workspace,), {})
assert mock_validate.call_args_list == [((config,), {})]
assert mock_fit.call_args_list == [(("model", "data"), {})]
assert mock_vis.call_args_list[-1] == (
("model", "data"),
{"config": config, "figure_folder": "folder", "fit_results": fit_results},
)
|
{"hexsha": "9374b07c5864c1bd39bfd940c835254737354bb7", "size": 17161, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/cli/test_cli.py", "max_stars_repo_name": "ExternalRepositories/cabinetry", "max_stars_repo_head_hexsha": "2261b603786dd3a5c5963b4e365d04ff250ba012", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/cli/test_cli.py", "max_issues_repo_name": "ExternalRepositories/cabinetry", "max_issues_repo_head_hexsha": "2261b603786dd3a5c5963b4e365d04ff250ba012", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/cli/test_cli.py", "max_forks_repo_name": "ExternalRepositories/cabinetry", "max_forks_repo_head_hexsha": "2261b603786dd3a5c5963b4e365d04ff250ba012", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4522417154, "max_line_length": 88, "alphanum_fraction": 0.6328302546, "include": true, "reason": "import numpy", "num_tokens": 4532}
|
\documentclass[letterpaper]{article}
\usepackage{amsmath, amsfonts, amssymb, amsthm}
\usepackage{enumerate,hyperref}
\usepackage[margin=1in]{geometry}
\usepackage[section]{placeins}
\theoremstyle{definition}
\newtheorem{problem}{Problem}
\newtheorem*{lemma}{Lemma}
\newtheorem*{corollary}{Corollary}
\providecommand{\equationref}[1]{Equation \eqref{eq:#1}}
\providecommand{\needscite}{[\textit{Citation Needed}]}
\setcounter{secnumdepth}{0}
\title{Spell Damage Analysis and Stat Weights}
\author{Balor - Anathema}
\date{Status: DRAFT. Last updated \today}
\begin{document}
\maketitle
This analysis was motivated by determining stat weights for a Balance druid casting Starfire. Wherever possible, however, things were kept general so as to be applicable to other spells and classes.
\section{Assumptions}
We use the following assumptions about how damage works.
\begin{itemize}
\item Spells have a base chance to hit that is purely a function of player level, target level, and Hit gear. Resistance does not affect a spell's chance to hit. For raid bosses, the base spell hit is 83, and thus a spell's percent chance to hit is $(83 + H)$ where $H$ is your total hit bonus from gear or talents.\needscite
\item Whether a spell lands as a criticial hit is determined after a spell is known to land. That is, a 10\% chance to crit means that 10\% of all spells \textit{that hit} will be critical hits, not that 10\% of all spells that are cast will crit. \needscite This is in contrast to melee attacks, which use a different system to determine hit and crit chance.
\item Critical hits provide a fixed multiplicative boost to the damage of a spell. This is usually a 1.5 multiplier, but can vary depending on talents. \needscite For Balance Druids, the Vengeance talent gives a 2.0 damage multiplier on critical hits.
\item Spellpower increases the damage of a spell by increasing the damage of a non-resisted, non-critical hit by $c$ times your total spellpower, where $c$ is a fixed constant for a given spell. Usually, this constant is given by the default cast time for that spell divided by 3.5. \needscite
\end{itemize}
\section{The Damage Formula}
Let $B$ be the base damage of a spell, $c$ be that spell's corresponding spell coefficient, $H \in [0, 16]$ be a player's current total hit bonus (as a percentage, so +12\% hit is $H = 12$. Note that player hit chance can not be increased to 100, so only the first 16 are useful \needscite), $P$ be a player's total spellpower that applies to that spell, and $R \in [0, 100]$ be the player's spell crit, also as a percentage. Finally, let $x$ be the crit bonus, or one minus the crit multiplier (for example, if spell crits do 1.5 times damage in the default case, $x = 0.5$). Then the expected damage from one spell cast on a raid boss is given by the following.
\begin{equation}
\left(0.83 + \frac{H}{100}\right)\left(B + cP\right)\left(1 + x\frac{R}{100}\right)
\label{eq:damage}
\end{equation}
To get DPS, we can simiply divide this by $T$, the total casting time of the spell. There is one complication here for druids, however. The Nature's Grace talent decreases the cast time of your next spell by 0.5 seconds whenever a spell lands a critical hit. Using assumption 2 above, we know that the probability of one spell resulting in a critical hit is $(0.83 + \frac{H}{100})(\frac{R}{100})$. Therefore, we can calculate an average cast time for the spell over a sufficiently long encounter as the following. Note that $t$ here is the casting time reduction that a critical hit yields. In the case of having Nature's Grace, $t=0.5$. If one does not have Nature's Grace, then $t=0$.
\begin{equation}
T - t\left(0.83 + \frac{H}{100}\right)\frac{R}{100}
\label{eq:time}
\end{equation}
Note that this is somewhat inaccurate, as the first spell in a fight is guaranteed to take $T$ time to cast, and so this is truly only the expected cast time for all subsequent spells. Factoring in the additional time from the first cast would require making assumptions on the total encounter length, which we hope to avoid here. Over sufficiently long encounters, these will converge to the same, so the effect of thsi is ignored in the following analysis.
Dividing the expected damage in \equationref{damage} by the expected cast time in \equationref{time} yields our expected total DPS, $D$.
\begin{equation}
D = d\frac{\left(0.83 + \frac{H}{100}\right)\left(mB + cP\right)\left(1 + x\frac{R}{100}\right)}{T - t\left(0.83 + \frac{H}{100}\right)\frac{R}{100}}
\end{equation}
For completeness, we have added in two additional factors, $d$, and $m$. $m$ is any multiplicative modifier on the base damage of a spell that might arise from talents or set bonuses. For example, the Druid talent Moonfury sets $m=1.1$. $d$ is any multiplicative damage modifer on total damage of the spell, including things like Curse of Shadows and the target's resistance. (TODO: add argument for why we can treat resistance, which really determines a probability distrubution of multiplicative damage reductions, as one simple average damage reduction. Also verify that either resistance cannot cause full 100\% damage reductions, or, that if it does, a spell can still be a crit while being 100\% resisted. If this is untrue, resistance will have an effect on Nature's Grace proc rates.).
\section{Stat Weightings}
To determine how we should value each stat ($H$, $P$, $R$), we have to examine how DPS varies as you change each stat. To do so, we will use derivatives, which measure the rate of change of the function with respect to a given parameter. The partial derivatives of DPS with respect to $H$, $P$, and $R$ are given below.
\begin{equation}
\frac{\partial D}{\partial P} = d\frac{c\left(83+H\right)\left(100 + xR\right)}{100^2T - t(83 + H)R}
\end{equation}
\begin{equation}
\frac{\partial D}{\partial H} = d\left(mB + cP\right)\left(100+xR\right) \left(\frac{100^2T}{\left(100^2T - t\left(83+H\right)R\right)^2}\right)
\end{equation}
\begin{equation}
\frac{\partial D}{\partial R} = d\left(mB+cP\right)\left(83+H\right) \left(\frac{xT + t\left(0.83 + \frac{H}{100}\right)}{\left(100T - t\left(0.83 + \frac{H}{100}\right)R\right)^2}\right)
\end{equation}
$\frac{\partial D}{\partial P}$ says that, when adding a very small amount of $P$, we expect the function value to change by $\frac{\partial D}{\partial P}$ \textit{per point of $P$ we varied}. It is the limiting value for very small changes of $P$, which gives a sense of how relevant $P$ is to the output function at a given point in the parameter space.
Since we are concerned with stat weights, what we care most about is how these derivatives relate to each other. If we set the value of one spellpower to be 1 by convention, then taking ratios of derivatives will give us values for the other stats, $R$ and $H$. These equations are as follows.
\begin{equation}
\textrm{HitWeight} = \frac{\frac{\partial D}{\partial H}}{\frac{\partial D}{\partial P}} = \frac{\frac{mB}{c} + P}{83 + H} \left(\frac{100^2 T}{100^2T - t(83 + H)R}\right)
\end{equation}
\begin{equation}
\textrm{CritWeight} = \frac{\frac{\partial D}{\partial R}}{\frac{\partial D}{\partial P}} = x\frac{\frac{mB}{c} + P}{100+xR} \left(\frac{T + \frac{t}{x}\left(0.83+\frac{H}{100}\right)}{T - t\left(0.83 + \frac{H}{100}\right)\frac{R}{100}}\right)
\end{equation}
\subsection{No Nature's Grace}
To slightly generalize these to other classes, we can remove Nature's Grace from the equations by setting the casting time reduction from a crit to zero. That is, by setting $t=0$. Note that the equations were already factorized to make the impact of Nature's Grace apparent. Upon doing so, we get the following stat weights, which should be applicable to other classes.
\begin{equation}
\nonumber
\textrm{SpellpowerWeight} = 1
\end{equation}
\begin{equation}
\nonumber
\textrm{HitWeight} = \frac{\frac{mB}{c} + P}{83 + H}
\end{equation}
\begin{equation}
\nonumber
\textrm{CritWeight} = x\frac{\frac{mB}{c} + P}{100 + xR}
\end{equation}
\end{document}
|
{"hexsha": "9dd89e7b899168398a25938a30b9ba776b9e0284", "size": 8029, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "contrib/whitepaper/SpellDamage.tex", "max_stars_repo_name": "kmmiles/libclassic", "max_stars_repo_head_hexsha": "d1cfbb110a49677b8cb1cc82231e4931efa02e63", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "contrib/whitepaper/SpellDamage.tex", "max_issues_repo_name": "kmmiles/libclassic", "max_issues_repo_head_hexsha": "d1cfbb110a49677b8cb1cc82231e4931efa02e63", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-12-04T20:57:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-27T07:02:52.000Z", "max_forks_repo_path": "contrib/whitepaper/SpellDamage.tex", "max_forks_repo_name": "ultrabis/libclassic", "max_forks_repo_head_hexsha": "d1cfbb110a49677b8cb1cc82231e4931efa02e63", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 79.495049505, "max_line_length": 793, "alphanum_fraction": 0.7430564205, "num_tokens": 2290}
|
# encoding=utf-8
import unittest
import numpy as np
from ..scorers import *
class TestScorers(unittest.TestCase):
def test_average_precision(self):
y_true = ["perm2", "perm4", "perm8"]
y_pred = ["perm2", "perm3", "perm4", "perm5"]
result = 0.5556
ap = round(average_precision(y_true, y_pred), 4)
self.assertEqual(result, ap)
def test_mean_average_precision(self):
y_true = [
["perm2", "perm4", "perm8"],
["perm3"],
["perm5", "perm6", "perm9", "perm11", "perm12"]
]
y_pred = [
# 0.555555556
["perm2", "perm3", "perm4", "perm5"],
# 0
["perm2", "perm4", "perm5", "perm6"],
# (0.5 + 2/3 + 3/4) / 5 = 0.3833333
["perm2", "perm9", "perm11", "perm6"]
]
mean_ap = round(mean_average_precision(y_true, y_pred),4)
result = 0.3130
self.assertEqual(result, mean_ap)
def test_total_recall_ratio(self):
candidate_num = 45
cases = [
(
[],
["p1", "p2"]
),
(
["p1", "p2"],
[]
),
(
["p1"],
["p1", "p2"]
),
(
["p1", "p2", "p3"],
["p4", "p3"]
),
(
np.array(["p1", "p2", "p3"]),
np.array(["p4", "p3", "p1", "p5", "p2"])
),
]
results = [1, 45/2, 1, 45/3, 5/3]
for case, res in zip(cases, results):
cur = total_recall_ratio(case[0], case[1], candidate_num)
self.assertEqual(res, cur)
def test_average_total_recall_ratio(self):
candidate_num = 45
y_true = [
[],
["p1", "p2"],
["p1"],
["p1", "p2", "p3"],
["p1", "p2", "p3"]
]
y_pred = [
["p1", "p2"],
[],
["p1", "p2"],
["p4", "p3"],
["p4", "p3", "p1", "p5", "p2"]
]
result = (39.5 + 5/3)/5
cur = average_total_recall_ratio(y_true, y_pred, candidate_num)
self.assertEqual(result, cur)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "76fc91f63e8d556ca51cc3d48dfa620f7f195db0", "size": 2315, "ext": "py", "lang": "Python", "max_stars_repo_path": "perrec/common/tests/test_scorers.py", "max_stars_repo_name": "Tbabm/PerRec", "max_stars_repo_head_hexsha": "1f711d70df8354156b37857719db0559876be08c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-07-24T12:03:24.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-28T14:42:51.000Z", "max_issues_repo_path": "perrec/common/tests/test_scorers.py", "max_issues_repo_name": "Tbabm/PerRec", "max_issues_repo_head_hexsha": "1f711d70df8354156b37857719db0559876be08c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "perrec/common/tests/test_scorers.py", "max_forks_repo_name": "Tbabm/PerRec", "max_forks_repo_head_hexsha": "1f711d70df8354156b37857719db0559876be08c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2352941176, "max_line_length": 71, "alphanum_fraction": 0.4064794816, "include": true, "reason": "import numpy", "num_tokens": 661}
|
# Optimize.py
# Created: Feb 2016, M. Vegh
# Modified: Aug 2017, E. Botero
# Aug 2018, T. MacDonald
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
if not SUAVE.__version__=='2.5.0':
assert('These tutorials only work with the SUAVE 2.5.0 release')
from SUAVE.Core import Units, Data
import numpy as np
import Vehicles
import Analyses
import Missions
import Procedure
import Plot_Mission
import matplotlib.pyplot as plt
from SUAVE.Optimization import Nexus, carpet_plot
import SUAVE.Optimization.Package_Setups.scipy_setup as scipy_setup
# ----------------------------------------------------------------------
# Run the whole thing
# ----------------------------------------------------------------------
def main():
problem = setup()
## Base Input Values
output = problem.objective()
## Uncomment to view contours of the design space
#variable_sweep(problem)
# Uncomment for the first optimization
output = scipy_setup.SciPy_Solve(problem,solver='SLSQP')
print (output)
print('fuel burn = ', problem.summary.base_mission_fuelburn)
print('fuel margin = ', problem.all_constraints())
Plot_Mission.plot_mission(problem)
return
# ----------------------------------------------------------------------
# Inputs, Objective, & Constraints
# ----------------------------------------------------------------------
def setup():
nexus = Nexus()
problem = Data()
nexus.optimization_problem = problem
# -------------------------------------------------------------------
# Inputs
# -------------------------------------------------------------------
# [ tag , initial, lb , ub , scaling , units ]
problem.inputs = np.array([
[ 'wing_area' , 92 , 50. , 130. , 100. , 1*Units.meter**2],
[ 'cruise_altitude' , 8 , 6. , 12. , 10. , 1*Units.km],
],dtype=object)
# -------------------------------------------------------------------
# Objective
# -------------------------------------------------------------------
# [ tag, scaling, units ]
problem.objective = np.array([
[ 'fuel_burn', 10000, 1*Units.kg ]
],dtype=object)
# -------------------------------------------------------------------
# Constraints
# -------------------------------------------------------------------
# [ tag, sense, edge, scaling, units ]
problem.constraints = np.array([
[ 'design_range_fuel_margin' , '>', 0., 1E-1, 1*Units.less], #fuel margin defined here as fuel
],dtype=object)
# -------------------------------------------------------------------
# Aliases
# -------------------------------------------------------------------
# [ 'alias' , ['data.path1.name','data.path2.name'] ]
problem.aliases = [
[ 'wing_area' , ['vehicle_configurations.*.wings.main_wing.areas.reference',
'vehicle_configurations.*.reference_area' ]],
[ 'cruise_altitude' , 'missions.base.segments.climb_5.altitude_end' ],
[ 'fuel_burn' , 'summary.base_mission_fuelburn' ],
[ 'design_range_fuel_margin' , 'summary.max_zero_fuel_margin' ],
]
# -------------------------------------------------------------------
# Vehicles
# -------------------------------------------------------------------
nexus.vehicle_configurations = Vehicles.setup()
# -------------------------------------------------------------------
# Analyses
# -------------------------------------------------------------------
nexus.analyses = Analyses.setup(nexus.vehicle_configurations)
# -------------------------------------------------------------------
# Missions
# -------------------------------------------------------------------
nexus.missions = Missions.setup(nexus.analyses)
# -------------------------------------------------------------------
# Procedure
# -------------------------------------------------------------------
nexus.procedure = Procedure.setup()
# -------------------------------------------------------------------
# Summary
# -------------------------------------------------------------------
nexus.summary = Data()
nexus.total_number_of_iterations = 0
return nexus
def variable_sweep(problem):
number_of_points = 5
outputs = carpet_plot(problem, number_of_points, 0, 0) #run carpet plot, suppressing default plots
inputs = outputs.inputs
objective = outputs.objective
constraints = outputs.constraint_val
plt.figure(0)
CS = plt.contourf(inputs[0,:],inputs[1,:], objective, 20, linewidths=2,cmap='jet')
cbar = plt.colorbar(CS)
cbar.ax.set_ylabel('fuel burn (kg)')
CS_const = plt.contour(inputs[0,:],inputs[1,:], constraints[0,:,:],cmap='jet')
plt.clabel(CS_const, inline=1, fontsize=10)
cbar = plt.colorbar(CS_const)
cbar.ax.set_ylabel('fuel margin')
plt.xlabel('Wing Area (m^2)')
plt.ylabel('Cruise Altitude (km)')
plt.show(block=True)
return
if __name__ == '__main__':
main()
|
{"hexsha": "6c93e799f7a8b9597023826f6a0d577d4abd80ad", "size": 5635, "ext": "py", "lang": "Python", "max_stars_repo_path": "SUAVE/Tutorials-2.5.0/Regional_Jet_Optimization/Optimize.py", "max_stars_repo_name": "Vinicius-Tanigawa/Undergraduate-Research-Project", "max_stars_repo_head_hexsha": "e92372f07882484b127d7affe305eeec2238b8a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SUAVE/Tutorials-2.5.0/Regional_Jet_Optimization/Optimize.py", "max_issues_repo_name": "Vinicius-Tanigawa/Undergraduate-Research-Project", "max_issues_repo_head_hexsha": "e92372f07882484b127d7affe305eeec2238b8a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SUAVE/Tutorials-2.5.0/Regional_Jet_Optimization/Optimize.py", "max_forks_repo_name": "Vinicius-Tanigawa/Undergraduate-Research-Project", "max_forks_repo_head_hexsha": "e92372f07882484b127d7affe305eeec2238b8a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0723684211, "max_line_length": 114, "alphanum_fraction": 0.3998225377, "include": true, "reason": "import numpy", "num_tokens": 1101}
|
import ghalton
import numpy as np
from src.autoks.distance.sampling.sampler import Sampler
from src.autoks.distance.sampling.scramble import scramble_array
def generate_halton(n: int, d: int):
sequencer = ghalton.Halton(d)
return sequencer.get(n)
def generate_generalized_halton(n: int, d: int):
sequencer = ghalton.GeneralizedHalton(ghalton.EA_PERMS[:d])
return sequencer.get(n)
def halton_sample(n_samples: int,
n_dims: int,
scramble: bool = True):
samples = generate_halton(n_samples, n_dims)
# Scramble
if scramble:
scramble_array(np.asarray(samples))
return samples
def generalized_halton_sample(n_samples: int,
n_dims: int):
max_dims = 100
if n_dims > max_dims:
raise ValueError(
f'{ghalton.GeneralizedHalton.__class__.__name__} supports up to {max_dims} spatial dimensions.')
# Generate samples
samples = generate_generalized_halton(n_samples, n_dims)
return samples
class HaltonSampler(Sampler):
"""Halton sequence sampler."""
def sample(self, n_points: int, n_dims: int) -> np.ndarray:
return np.asarray(halton_sample(n_points, n_dims))
class GeneralizedHaltonSampler(Sampler):
"""Generalized Halton sequence sampler"""
def sample(self, n_points: int, n_dims: int) -> np.ndarray:
return np.asarray(generalized_halton_sample(n_points, n_dims))
|
{"hexsha": "fdb08479b2fdc3089d7f511ea2922e6d46714aa0", "size": 1450, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/autoks/distance/sampling/halton.py", "max_stars_repo_name": "lschlessinger1/MS-project", "max_stars_repo_head_hexsha": "e1c02d1d1a7a2480ff6f14f30625dc42ee3417e3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-04-29T15:18:11.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-13T18:58:40.000Z", "max_issues_repo_path": "src/autoks/distance/sampling/halton.py", "max_issues_repo_name": "lschlessinger1/MS-project", "max_issues_repo_head_hexsha": "e1c02d1d1a7a2480ff6f14f30625dc42ee3417e3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 275, "max_issues_repo_issues_event_min_datetime": "2019-02-19T22:59:39.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-03T08:56:08.000Z", "max_forks_repo_path": "src/autoks/distance/sampling/halton.py", "max_forks_repo_name": "lschlessinger1/MS-project", "max_forks_repo_head_hexsha": "e1c02d1d1a7a2480ff6f14f30625dc42ee3417e3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8928571429, "max_line_length": 108, "alphanum_fraction": 0.6896551724, "include": true, "reason": "import numpy", "num_tokens": 349}
|
'''
Classifier : K Nearest Neighbour
Dataset : Play Predictor Dataset
Features : Whether, Temperature
Labels : Yes, No
Training Dataset : 30 Entries
Testing Dataset : 1 Entry
Author : Prasad Dangare
Function Name : MarvellousPredictor
'''
import pandas as pd
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn import preprocessing
def MarvellousPredictor(path):
# Step 1
data = pd.read_csv(path)
print("Dataset loaded successfully with the size ", len(data))
# Step 2
Feature_name = ["Whether", "Temprature"]
print("Feature Name Are ", Feature_name)
Whether = data.Wether # names according to csv file
Temprature = data.Temperature
Play = data.Play
lobj = preprocessing.LabelEncoder()
WhetherX = lobj.fit_transform(Whether)
TempratureX = lobj.fit_transform(Temprature)
Label = lobj.fit_transform(Play)
print("Encoded Whether is ")
print(WhetherX)
print("Encoded Temprature is ")
print(TempratureX)
features = list(zip(WhetherX, TempratureX))
# Step 3
obj = KNeighborsClassifier(n_neighbors = 3)
obj.fit(features,Label)
# Step 4
output = obj.predict([[0,2]])
if output == 1:
print("You Can Play")
else:
print("Dont Play")
def main():
print("_____Marvellous Play Predictor_____")
print("Enter the path of the file which contains dataset : ")
path = input()
MarvellousPredictor(path)
if __name__ == "__main__":
main()
|
{"hexsha": "d2c63992fb571e957045b0b1b258a9e62ffc97b9", "size": 1574, "ext": "py", "lang": "Python", "max_stars_repo_path": "Application7/Play_Predictor2.py", "max_stars_repo_name": "PRASAD-DANGARE/Machine_Learning-Applications", "max_stars_repo_head_hexsha": "e2fa540d44993dc0750d95ce6ad686facd3bb769", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-07T07:55:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-12T07:59:13.000Z", "max_issues_repo_path": "Application7/Play_Predictor2.py", "max_issues_repo_name": "PRASAD-DANGARE/Machine_Learning-Applications", "max_issues_repo_head_hexsha": "e2fa540d44993dc0750d95ce6ad686facd3bb769", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Application7/Play_Predictor2.py", "max_forks_repo_name": "PRASAD-DANGARE/Machine_Learning-Applications", "max_forks_repo_head_hexsha": "e2fa540d44993dc0750d95ce6ad686facd3bb769", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-29T04:30:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-29T04:30:00.000Z", "avg_line_length": 22.8115942029, "max_line_length": 66, "alphanum_fraction": 0.6556543837, "include": true, "reason": "import numpy", "num_tokens": 394}
|
import pickle
from collections import deque
import numpy as np
from mher.algos.util import convert_episode_to_batch_major, store_args
class RolloutWorker:
@store_args
def __init__(self, venv, policy, dims, logger, T, rollout_batch_size=1,
exploit=False, use_target_net=False, compute_Q=False, noise_eps=0,
random_eps=0, history_len=100, render=False, monitor=False, **kwargs):
"""Rollout worker generates experience by interacting with one or many environments.
Args:
venv: vectorized gym environments.
policy (object): the policy that is used to act
dims (dict of ints): the dimensions for observations (o), goals (g), and actions (u)
logger (object): the logger that is used by the rollout worker
rollout_batch_size (int): the number of parallel rollouts that should be used
exploit (boolean): whether or not to exploit, i.e. to act optimally according to the
current policy without any exploration
use_target_net (boolean): whether or not to use the target net for rollouts
compute_Q (boolean): whether or not to compute the Q values alongside the actions
noise_eps (float): scale of the additive Gaussian noise
random_eps (float): probability of selecting a completely random action
history_len (int): length of history for statistics smoothing
render (boolean): whether or not to render the rollouts
"""
assert self.T > 0
self.info_keys = [key.replace('info_', '') for key in dims.keys() if key.startswith('info_')]
self.success_history = deque(maxlen=history_len)
self.Q_history = deque(maxlen=history_len)
self.n_episodes = 0
self.reset_all_rollouts()
self.clear_history()
def reset_all_rollouts(self):
self.obs_dict = self.venv.reset()
self.initial_o = self.obs_dict['observation']
self.initial_ag = self.obs_dict['achieved_goal']
self.g = self.obs_dict['desired_goal']
def generate_rollouts(self, random_ac=False):
"""Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current
policy acting on it accordingly.
"""
self.reset_all_rollouts()
# compute observations
o = np.empty((self.rollout_batch_size, self.dims['o']), np.float32) # observations
ag = np.empty((self.rollout_batch_size, self.dims['g']), np.float32) # achieved goals
o[:] = self.initial_o
ag[:] = self.initial_ag
# generate episodes
obs, achieved_goals, acts, goals, successes, rewards = [], [], [], [], [], []
dones = []
info_values = [np.empty((self.T, self.rollout_batch_size, self.dims['info_' + key]), np.float32) for key in self.info_keys]
Qs = []
for t in range(self.T):
if random_ac:
u = self.policy._random_action(self.rollout_batch_size)
else:
policy_output = self.policy.get_actions(
o, ag, self.g,
compute_Q=self.compute_Q,
noise_eps=self.noise_eps if not self.exploit else 0.,
random_eps=self.random_eps if not self.exploit else 0.,
use_target_net=self.use_target_net)
if self.compute_Q:
u, Q = policy_output
Qs.append(Q)
else:
u = policy_output
if u.ndim == 1:
# The non-batched case should still have a reasonable shape.
u = u.reshape(1, -1)
o_new = np.empty((self.rollout_batch_size, self.dims['o']))
ag_new = np.empty((self.rollout_batch_size, self.dims['g']))
success = np.zeros(self.rollout_batch_size)
# compute new states and observations
obs_dict_new, reward, done, info = self.venv.step(u)
o_new = obs_dict_new['observation']
ag_new = obs_dict_new['achieved_goal']
success = np.array([i.get('is_success', 0.0) for i in info])
for i, info_dict in enumerate(info):
for idx, key in enumerate(self.info_keys):
try:
info_values[idx][t, i] = info[i][key]
except:
pass
dones.append(done)
obs.append(o.copy())
achieved_goals.append(ag.copy())
successes.append(success.copy())
acts.append(u.copy())
goals.append(self.g.copy())
rewards.append(reward.copy())
o[...] = o_new
ag[...] = ag_new
if any(done) or t == self.T-1:
# here we assume all environments are done is ~same number of steps, so we terminate rollouts whenever any of the envs returns done
# trick with using vecenvs is not to add the obs from the environments that are "done", because those are already observations
# after a reset
break
obs.append(o.copy())
achieved_goals.append(ag.copy())
episode = dict(o=obs,
u=acts,
g=goals,
ag=achieved_goals,
r=rewards)
for key, value in zip(self.info_keys, info_values):
episode['info_{}'.format(key)] = value
# stats
successful = np.array(successes)[-1, :]
assert successful.shape == (self.rollout_batch_size,)
success_rate = np.mean(successful)
self.success_history.append(success_rate)
if self.compute_Q:
self.Q_history.append(np.mean(Qs))
self.n_episodes += self.rollout_batch_size
return convert_episode_to_batch_major(episode) # change shape to (rollout, steps, dim)
def clear_history(self):
"""Clears all histories that are used for statistics
"""
self.success_history.clear()
self.Q_history.clear()
def current_success_rate(self):
return np.mean(self.success_history)
def current_mean_Q(self):
return np.mean(self.Q_history)
def logs(self, prefix='worker'):
"""Generates a dictionary that contains all collected statistics.
"""
logs = []
logs += [('success_rate', np.mean(self.success_history))]
if self.compute_Q:
logs += [('mean_Q', np.mean(self.Q_history))]
logs += [('episode', self.n_episodes)]
if prefix != '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs
|
{"hexsha": "9c56a3fbad1c99ce1a7d56dfa9fd85ae2cc3a4f8", "size": 6781, "ext": "py", "lang": "Python", "max_stars_repo_path": "mher/rollouts/rollout.py", "max_stars_repo_name": "YangRui2015/Modular_HER", "max_stars_repo_head_hexsha": "77acca83d6849d140ab893ec1b472b71e1da08d4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2020-10-31T15:01:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-30T06:51:15.000Z", "max_issues_repo_path": "mher/rollouts/rollout.py", "max_issues_repo_name": "YangRui2015/Modular_HER", "max_issues_repo_head_hexsha": "77acca83d6849d140ab893ec1b472b71e1da08d4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-22T14:25:07.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-23T11:47:37.000Z", "max_forks_repo_path": "mher/rollouts/rollout.py", "max_forks_repo_name": "YangRui2015/Modular_HER", "max_forks_repo_head_hexsha": "77acca83d6849d140ab893ec1b472b71e1da08d4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-24T03:26:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-14T01:20:57.000Z", "avg_line_length": 42.6477987421, "max_line_length": 147, "alphanum_fraction": 0.5870815514, "include": true, "reason": "import numpy", "num_tokens": 1439}
|
from time import time
import copy
import numpy
from rmgpy.tools.canteraModel import Cantera
try:
#import muq.libmuqModelling
from libmuqModelling import ModPiece
#import muq.libmuqUtilities as libmuqUtilities
#import muq.libmuqApproximation as libmuqApproximation
from libmuqUtilities import LegendrePolynomials1DRecursive, GaussPattersonQuadrature1D, VariableCollection
from libmuqApproximation import SmolyakPCEFactory
except:
ModPiece = object
print 'Could not import MUQ. Please check that it is installed correctly before using the global uncertainty modules.'
# You must install the MUQ library before using this. Add the folder containing
# libmuqUtilities.so, libmuqApproximation.so, etc to your $PYTHONPATH
# For linux users, you can install via 'conda install -c rmg muq' to your environment
# and add the ~/anaconda/envs/your_env/lib folder to your $PYTHONPATH
class ReactorModPiece(ModPiece):
def __init__(self, cantera, outputSpeciesList, kReactions, kUncertainty, gSpecies, gUncertainty):
"""
======================= ====================================================
Attribute Description
======================= ====================================================
`cantera` A Cantera() object containing CanteraConditions and initialized species and reactions
`outputSpeciesList` A list of Species() objects corresponding to the desired observables for uncertainty analysis
`kReactions` A list of Reaction() objects corresponding to the uncertain input rate coefficients
`kUncertainty` A list of uncertainties dlnk corresponding to the reactions in kReactions
`gSpecies` A list of Species() objects corresponding to the uncertain input free energies of individual species
`gUncertainty` A list of uncertainties dG corresponding to the species in gSpecies in units of kcal/mol
============================================================================
"""
self.cantera = cantera
self.outputSpeciesList = outputSpeciesList
self.outputSpeciesIndices = [cantera.speciesList.index(outputSpecies) for outputSpecies in outputSpeciesList]
self.kReactions = kReactions
kUncertaintyFactors = [val*numpy.sqrt(3)/numpy.log(10) for val in kUncertainty]
self.kUncertaintyFactors = {}
for i, rxnIndex in enumerate(kReactions):
self.kUncertaintyFactors[rxnIndex] = kUncertaintyFactors[i]
self.gSpecies = gSpecies
gUncertaintyFactors = [val*numpy.sqrt(3) for val in gUncertainty]
self.gUncertaintyFactors = {}
for i, spcIndex in enumerate(gSpecies):
self.gUncertaintyFactors[spcIndex] = gUncertaintyFactors[i]
# The size of the uncertain inputs: [reaction rates k, species free energy G]
self.inputSize = [len(kReactions) + len(gSpecies)]
# The size of the vector corresponding to the outputs to be analyzed for uncertainty analysis
# is equal to the number of cantera conditions involved multiplied by the number of desired observables
outputSize = len(cantera.conditions)*len(outputSpeciesList)
self.numOutputSpecies = len(outputSpeciesList)
self.numConditions = len(cantera.conditions)
# Initialize the ModPiece with some input and output size specifications
ModPiece.__init__(self,
self.inputSize,
outputSize,
False, # No GradientImpl
False, # No JacobianImpl
False, # No HessianImpl
False, # Not random)
False, # Not random)
)
def EvaluateImpl(self, ins):
"""
Evaluate the desired output mole fractions based on a set of inputs ins = [[k_rv], [G_rv]] which contains the
random normal variables attributed to the uncertain kinetics and free energy parameters, respectively.
The output returned contains [Condition1_outputMoleFraction1, Condition1_outputMoleFraction2, Condition2_output.... ConditionN_output...]
"""
assert len(ins[0]) == self.inputSize[0], "Number of inputs matches number of uncertain parameters"
k_rv = ins[0][0:len(self.kReactions)]
G_rv = ins[0][len(self.kReactions):]
## Check that the number of inputs is correct
#assert len(k_rv) == len(self.kReactions), "Number of inputs matches number of kReactions"
#assert len(G_rv) == len(self.gSpecies), "Number of inputs matches number of gSpecies"
# Make deepcopies of the thermo and kinetics so as to not modify the originals in the speciesList and reactionList
originalThermo = [copy.deepcopy(self.cantera.speciesList[index].thermo) for index in self.gSpecies]
originalKinetics = [copy.deepcopy(self.cantera.reactionList[index].kinetics) for index in self.kReactions]
# print ''
# print 'Kinetics before'
# ctReactions = self.cantera.model.reactions()
# print ctReactions[0].rate
# print ''
# print 'Thermo before'
# ctSpecies = self.cantera.model.species()
# print ctSpecies[5].thermo.h(298)
# Scale the thermo and kinetics of the current objects
for i, rv in enumerate(k_rv):
self.scaleToKinetics(rv,self.kReactions[i])
for i, rv in enumerate(G_rv):
self.scaleToThermo(rv, self.gSpecies[i])
# The model must be refreshed when there are any thermo changes
# kinetics can be refreshed automatically so we don't need to recreate the Solution() object.
if G_rv:
self.cantera.refreshModel()
# Run the cantera simulation
allData = self.cantera.simulate()
# Create a vector to hold the ModPiece output, which will be the mole fraction of the output species of interest
output = numpy.zeros(self.outputSize)
# Extract the final time point for each of the mole fractions within the outputSpeciesList
for i in range(self.numConditions):
for j in range(self.numOutputSpecies):
speciesIndex = self.outputSpeciesIndices[j]
speciesGenericData = allData[i][1][2:]
output[i*self.numOutputSpecies+j]=speciesGenericData[speciesIndex].data[-1]
# print ''
# print 'Kinetics after'
# ctReactions = self.cantera.model.reactions()
# print ctReactions[0].rate
# print ''
# print 'Thermo after'
# ctSpecies = self.cantera.model.species()
# print ctSpecies[5].thermo.h(298)
# Now reset the cantera object's speciesList and reactionList back to original thermo and kinetics
for i, thermo in enumerate(originalThermo):
index = self.gSpecies[i]
self.cantera.speciesList[index].thermo = thermo
for i, kinetics in enumerate(originalKinetics):
index = self.kReactions[i]
self.cantera.reactionList[index].kinetics = kinetics
return list(output)
def scaleToKinetics(self, randomInput, reactionIndex):
"""
This function takes a random uniform input X = Unif(-1,1) and scales the kinetics within a reaction to that value, given
that the kinetics has a loguniform distribution where ln(k) = Unif[ln(k_min), ln(k_max)]
k_sampled = 10^(randomInput * UncertaintyFactor) * k0
The kinetics is permanently altered in the cantera model and must be reset to its original value after the evaluation is finished.
"""
rxn = self.cantera.reactionList[reactionIndex]
uncertaintyFactor = self.kUncertaintyFactors[reactionIndex]
factor = randomInput*uncertaintyFactor
# The rate is loguniform in k
rxn.kinetics.changeRate(10**factor)
self.cantera.modifyReactionKinetics(reactionIndex, rxn)
def scaleToThermo(self,randomInput, speciesIndex):
"""
This function takes a random normal input X = Unif(-1,1) and scales the thermodynamics for a species to that value,
given that the thermo has a uniform distribution G = Unif(-Gmin,Gmax)
G_sampled = randomInput * UncertaintyFactor + G0
The thermo is permanently altered in the cantera model and must be reset to its original value after the evaluation is finished.
"""
species = self.cantera.speciesList[speciesIndex]
uncertaintyFactor = self.gUncertaintyFactors[speciesIndex]
deltaH = randomInput*uncertaintyFactor*4184.0 # Convert kcal/mol to J/mol
species.thermo.changeBaseEnthalpy(deltaH)
self.cantera.modifySpeciesThermo(speciesIndex, species)
class ReactorPCEFactory:
"""
This class uses MUQ to generate adaptive Polynomial Chaos Expansions for global uncertainty analysis in chemical reaction systems.
It uses RMG, Cantera, and MUQ dependencies.
Methodology
1. Set up reactor conditions and load chemical kinetic mechanism. Select desired outputs
2. Run local uncertainty analysis
3. Extract top N most uncertain parameters
4. Input these set of parameters into a MUQ model class (which is a child of the ModPiece class)
5. Create EvaluateImpl function within model class that runs simulation based on reactor conditions through Cantera
6. Perform PCE analysis of desired outputs
"""
def __init__(self, cantera, outputSpeciesList, kReactions, kUncertainty, gSpecies, gUncertainty):
self.reactorMod = ReactorModPiece(cantera=cantera,
outputSpeciesList=outputSpeciesList,
kReactions=kReactions,
kUncertainty = kUncertainty,
gSpecies = gSpecies,
gUncertainty = gUncertainty,
)
# Define the polynomials and quadrature rules in each dimension using a VariableCollection object.
# We can do this directly using the classes from libmuqUtilities. Build the PCE factory for the ReactorModPiece this way.
# Uniform random variables used chemical kinetics uncertainty propagation uses Legendre polynomials
# We select the Gauss-Patterson quadrature as it is recommended as the fastest in the Patrick Conrad, Youssef Marzouk paper
# Select the polynomial and quadrature families
polyFamily = LegendrePolynomials1DRecursive()
quadFamily = GaussPattersonQuadrature1D()
# Create a random variable collection for each of the uncertain variables
varCollection = VariableCollection()
for rxnIndex in kReactions:
varCollection.PushVariable("k{0}".format(rxnIndex+1), polyFamily, quadFamily)
for speciesIndex in gSpecies:
varCollection.PushVariable("G{0}".format(speciesIndex+1), polyFamily, quadFamily)
# Initialize the PCE Factory
self.factory = SmolyakPCEFactory(varCollection, self.reactorMod)
self.pce = None
def generatePCE(self, runTime=None, startOrder=2, tolerance=None, fixedTerms=False):
"""
Generate the PCEs adaptively. There are three methods for doing so.
`runTime` should be given in seconds
Option 1: Adaptive for a pre-specified amount of time
Option 2: Adaptively construct PCE to error tolerance
Option 3: Used a fixed order, and (optionally) adapt later.
"""
# Also monitor the amount of time it takes
start_time = time()
if runTime:
# Option 1: Adaptive for a pre-specified amount of time
self.pce = self.factory.StartAdaptiveTimed(startOrder,runTime)
elif tolerance:
# Option 2: adaptively construct PCE to error tolerance
self.pce = self.factory.StartAdaptiveToTolerance(startOrder,tolerance)
elif fixedTerms:
# Option 3: Used a fixed order, and (optionally) adapt later
self.pce = self.factory.StartFixedTerms(startOrder)
# # Optionally adapt to tolerance later:
# pce = self.AdaptToTolerance(tolerance)
else:
raise Exception('Must have at least one chosen method')
end_time = time()
time_taken = end_time - start_time
print 'Polynomial Chaos Expansion construction took {0:2f} seconds.'.format(time_taken)
def compareOutput(self, testPoint):
"""
Evaluate the PCEs against what the real output might give for a test point.
testPoint is an array of all the values in terms of factor of f
Returns a tuple containing the
(true output mole fractions, pce output mole fractions) evaluated at the test point.
"""
trueOutput = self.reactorMod.Evaluate([testPoint])
pceOutput = self.pce.Evaluate(testPoint)
reactorMod = self.reactorMod
for i in range(reactorMod.numConditions):
print 'Condition {}'.format(i+1)
print '======================================================='
print str(reactorMod.cantera.conditions[i])
print ''
print 'Condition {} Mole Fractions Evaluated at Test Point'.format(i+1)
print '========================================'
print 'Species True output PCE output'
print '========================================'
for j, outputSpecies in enumerate(reactorMod.outputSpeciesList):
outputIndex = i*reactorMod.numOutputSpecies+j
print '{0:10} {1:11.2f} {2:14.2f}'.format(outputSpecies.toChemkin(),trueOutput[outputIndex],pceOutput[outputIndex])
print ''
return trueOutput, pceOutput
def analyzeResults(self):
"""
Obtain the results: the prediction mean and variance, as well as the global sensitivity indices
Returns a tuple containing the following statistics
(mean species mole fractions, variance, covariance, main sensitivity indices, total sensitivity indices)
"""
reactorMod = self.reactorMod
pce = self.pce
# Compute the mean and variance for each of the uncertain parameters
mean = numpy.array(pce.ComputeMean())
var = numpy.array(pce.ComputeVariance())
stddev = numpy.sqrt(var)
stddev_percent = stddev/mean*100.0
cov = pce.ComputeCovariance()
# print "Covariance = ", cov
# Extract the global sensitivity indices
mainSens = numpy.array(pce.ComputeAllMainSensitivityIndices())
totalSens = numpy.array(pce.ComputeAllSobolTotalSensitivityIndices())
for i in range(reactorMod.numConditions):
print 'Condition {}'.format(i+1)
print '======================================================='
print str(reactorMod.cantera.conditions[i])
print ''
print 'Condition {} Mole Fractions'.format(i+1)
print '=============================================='
print 'Species Mean Stddev Stddev (%)'
print '=============================================='
for j, outputSpecies in enumerate(reactorMod.outputSpeciesList):
outputIndex = i*reactorMod.numOutputSpecies+j
print '{0:10} {1:10.3e} {2:10.3e} {3:10.3f}'.format(outputSpecies.toChemkin(),
mean[outputIndex],
stddev[outputIndex],
stddev_percent[outputIndex])
print ''
if reactorMod.kReactions:
print ''
print 'Condition {} Reaction Sensitivities'.format(i+1)
print '==============================================================================='
print 'Description sens_main sens_total'
print '==============================================================================='
for j, outputSpecies in enumerate(reactorMod.outputSpeciesList):
outputIndex = i*reactorMod.numOutputSpecies+j
for k, rxnIndex in enumerate(reactorMod.kReactions):
parameterIndex=k
description = 'dln[{0}]/dln[{1}]'.format(outputSpecies.toChemkin(),
reactorMod.cantera.reactionList[rxnIndex].toChemkin(kinetics=False),
)
print '{0:55} {1:10.3f} {2:10.3f}'.format(description,
mainSens[outputIndex][parameterIndex],
totalSens[outputIndex][parameterIndex],
)
if reactorMod.gSpecies:
print ''
print 'Condition {} Thermo Sensitivities'.format(i+1)
print '==========================================================='
print 'Description sens_main sens_total'
print '==========================================================='
for j, outputSpecies in enumerate(reactorMod.outputSpeciesList):
outputIndex = i*reactorMod.numOutputSpecies+j
for g, speciesIndex in enumerate(reactorMod.gSpecies):
parameterIndex = len(reactorMod.kReactions)+g
description = 'dln[{0}]/dlnG[{1}]'.format(outputSpecies.toChemkin(),
reactorMod.cantera.speciesList[speciesIndex].toChemkin(),)
print '{0:35} {1:10.3f} {2:10.3f}'.format(description,
mainSens[outputIndex][parameterIndex],
totalSens[outputIndex][parameterIndex],
)
print ''
return mean, var, cov, mainSens, totalSens
|
{"hexsha": "38a68cc1c155943adb75f375180abc1512634e90", "size": 19390, "ext": "py", "lang": "Python", "max_stars_repo_path": "rmgpy/tools/muq.py", "max_stars_repo_name": "keceli/RMG-Py", "max_stars_repo_head_hexsha": "17c7870195a4feb6e8bf8974292f9bcdca1a1d9d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-12-18T18:43:22.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-18T18:43:22.000Z", "max_issues_repo_path": "rmgpy/tools/muq.py", "max_issues_repo_name": "keceli/RMG-Py", "max_issues_repo_head_hexsha": "17c7870195a4feb6e8bf8974292f9bcdca1a1d9d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rmgpy/tools/muq.py", "max_forks_repo_name": "keceli/RMG-Py", "max_forks_repo_head_hexsha": "17c7870195a4feb6e8bf8974292f9bcdca1a1d9d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.8923884514, "max_line_length": 145, "alphanum_fraction": 0.5705518308, "include": true, "reason": "import numpy", "num_tokens": 3855}
|
import numpy as np
import matplotlib.pyplot as plt
from random import randint
#TODO: there a a couple of places for optimization:
#1)Calculate accuracy only once per epoch
#2)Incorporate testing accuracy within learning
#Entry point into the program
class MLP:
def __init__(self, hiddenNeurons, beta, momentum, learning, epochs, batchSize):
#constructor
self.hiddenNeurons = hiddenNeurons
self.beta = beta
self.momentum = momentum
self.learning = learning
self.epochs = epochs
self.batchSize = batchSize
#Helper function that loads and filters train/test data and
#creates weights (basically initializes data we need)
def getData(self):
#open files with train/test data -> store into matrices
rawTrain = np.loadtxt(open("mnist_train.csv", "rb"), delimiter=",", skiprows=1)
rawTest = np.loadtxt(open("mnist_test.csv", "rb"), delimiter=",", skiprows=1)
#grab the target columns and create somewhat of a
#one hot vector (matrix) with 0.9 and 0.1 instead of 1 and 0
trainTargets = rawTrain[:,0:1]
testTargets = rawTest[:,0:1]
self.trainTargets = np.full((rawTrain.shape[0], 10), 0.1)
self.testTargets = np.full((rawTest.shape[0], 10), 0.1)
rowNumber = 0
for target in trainTargets:
self.trainTargets[rowNumber, int(target)] = 0.9
rowNumber +=1
rowNumber = 0
for target in testTargets:
self.testTargets[rowNumber, int(target)] = 0.9
rowNumber+=1
#remove first column from the input datas (target column)
rawTrain = np.delete(rawTrain, 0, 1)
rawTest = np.delete(rawTest, 0, 1)
#optimize data
rawTrain = rawTrain / 255
rawTest = rawTest / 255
#create bias vector and concat bias into data
trainRows = rawTrain.shape[0]
testRows = rawTest.shape[0]
trainBias = np.full((1, trainRows), 1)
testBias = np.full((1, testRows), 1)
rawTrain = np.concatenate((rawTrain, trainBias.T), axis = 1)
rawTest = np.concatenate((rawTest, testBias.T), axis = 1)
#create actual train/test matrices
self.trainData = rawTrain
self.testData = rawTest
#make weights
cols = self.trainData.shape[1]
self.weights1 = np.random.uniform(low=-0.5, high = 0.5, size=(cols, self.hiddenNeurons)) #input -> hidden
self.weights2 = np.random.uniform(low=-0.5, high = 0.5, size=(self.hiddenNeurons+1, 10)) #hidden -> output
#Function that performs the training by grabbing random batches from the dataset
def trainRandom(self):
updatew1 = np.zeros((np.shape(self.weights1)))
updatew2 = np.zeros((np.shape(self.weights2)))
self.weightList1 = [] #list that stores weights1 (1 per epoch)
self.weightList2 = [] #list that stores weights2 (1 per epoch)
self.trainAccuracyList = [] #list that stores accuracy during the training (1 accuracy per epoch)
for epoch in range(self.epochs):
for r in range(self.batchSize):
batchStart = self.getRandStart() #grabs random number b/w 0-60k -> where batch will start
batchEnd = batchStart + self.batchSize #where batch will end (depending on the batch size)
#batch matrix used for this epoch
inputMatrix = self.trainData[batchStart:batchEnd, :] #e.g. 20 x 785 or batchSize x 785
self.outputs = self.forward(inputMatrix) #forward step (calls a helper function)
batchTargets = self.trainTargets[batchStart:batchEnd, :] #grab targets for this particular batch
correct = 0;
#Calculate accuracy for this batch
for i in range (self.batchSize):
out = np.argmax(self.outputs[i:i+1, :])
exp = np.argmax(batchTargets[i:i+1, :])
if out == exp: correct+=1
accuracy = (correct / self.batchSize) * 100
error = 0.5*np.sum((self.outputs-self.trainTargets[batchStart:batchEnd, :])**2)
print ("Training: ",epoch, "Accuracy:", accuracy, "%", "Error: ",error)
#BACKWARD propogation (step):
#deltao = self.beta*(self.outputs-batchTargets)*self.outputs*(1.0-self.outputs)
deltao = (self.outputs-batchTargets)*(self.outputs*(-self.outputs)+self.outputs)/self.batchSize
deltah = self.hidden*self.beta*(1.0-self.hidden)*(np.dot(deltao,np.transpose(self.weights2)))
updatew1 = self.learning*(np.dot(np.transpose(inputMatrix),deltah[:,:-1])) + self.momentum*updatew1
updatew2 = self.learning*(np.dot(np.transpose(self.hidden),deltao)) + self.momentum*updatew2
weights1 = self.weights1 - updatew1
weights2 = self.weights2 - updatew2
self.weights1 = weights1
self.weights2 = weights2
self.trainAccuracyList.append(accuracy) #insert latest accuracy
self.weightList1.append(weights1) #insert latest weight1
self.weightList2.append(weights2) #insert latest weight2
#Once we are done with training, call functions for testing and show output
self.test()
self.accuracyGraph()
self.confMat()
#Function that uses incremental batches (not fully tested)
def trainWhole(self):
updatew1 = np.zeros((np.shape(self.weights1)))
updatew2 = np.zeros((np.shape(self.weights2)))
self.weightList1 = []
self.weightList2 = []
self.trainAccuracyList = []
increase = int(self.trainData.shape[0] / self.batchSize) #e.g. 20 batches = 3000 rows per batch
print("BatchSize:", increase)
for epoch in range(self.epochs):
for i in range(self.batchSize):
batchStart = i * increase
batchEnd = batchStart + increase
print("Start:", batchStart)
print("End", batchEnd)
#batch matrix used for this epoch
inputMatrix = self.trainData[batchStart:batchEnd, :] #e.g. 20 x 785 or batchSize x 785
self.outputs = self.forward(inputMatrix)
batchTargets = self.trainTargets[batchStart:batchEnd, :]
correct = 0;
for j in range (increase):
out = np.argmax(self.outputs[j:j+1, :])
exp = np.argmax(batchTargets[j:j+1, :])
if out == exp: correct+=1
accuracy = (correct / increase) * 100
error = 0.5*np.sum((self.outputs-self.trainTargets[batchStart:batchEnd, :])**2)
print ("Training: ",epoch, "Accuracy:", accuracy, "%", "Error: ",error)
#deltao = self.beta*(self.outputs-batchTargets)*self.outputs*(1.0-self.outputs)
deltao = (self.outputs-batchTargets)*(self.outputs*(-self.outputs)+self.outputs)/self.batchSize
deltah = self.hidden*self.beta*(1.0-self.hidden)*(np.dot(deltao,np.transpose(self.weights2)))
updatew1 = self.learning*(np.dot(np.transpose(inputMatrix),deltah[:,:-1])) + self.momentum*updatew1
updatew2 = self.learning*(np.dot(np.transpose(self.hidden),deltao)) + self.momentum*updatew2
weights1 = self.weights1 - updatew1
weights2 = self.weights2 - updatew2
self.weights1 = weights1
self.weights2 = weights2
self.trainAccuracyList.append(accuracy)
self.weightList1.append(weights1)
self.weightList2.append(weights2)
self.test()
self.accuracyGraph()
self.confMat()
#Function that tests the network
#We are using different weights from each epoch for output purposes (show how the net learned)
def test(self):
self.testAccuracyList = [] #list that holds accuracy of test per epoch
for epoch in range (0, self.epochs):
correct = 0
#forward step
in_to_hidden = np.dot(self.testData, self.weightList1[epoch])
in_to_hidden = 1.0 / (1.0 + np.exp(-self.beta * in_to_hidden))
#concat bias to hidden
hiddenBias = np.full((1, in_to_hidden.shape[0]), 1)
in_to_hidden = np.concatenate((in_to_hidden, hiddenBias.T), axis = 1)
hidden_to_out = np.dot(in_to_hidden, self.weightList2[epoch])
hidden_to_out = 1.0 / (1.0 + np.exp(-self.beta * hidden_to_out))
#compute accuracy for this particular set of weights
for i in range (0, self.testData.shape[0]):
out = np.argmax(hidden_to_out[i:i+1, :])
exp = np.argmax(self.testTargets[i:i+1, :])
if out == exp:
correct += 1
accuracy = (correct / self.testData.shape[0]) * 100
print("Test:", epoch, "Accuracy:", accuracy, "%")
self.testAccuracyList.append(accuracy) #insert accuracy into the list
#helper function to get random number for a batch
def getRandStart(self):
start = randint(0, self.trainData.shape[0])
while start >= (self.trainData.shape[0] - self.batchSize + 1):
start = randint(0, self.trainData.shape[0])
return start
#Helper function for forward step
def forward(self, inputMatrix):
#calculate hidden layer input
self.hidden = np.dot(inputMatrix, self.weights1) #compute dot product for lvl1
self.hidden = 1.0 / (1.0 + np.exp(-self.beta * self.hidden))
#concat bias to hidden
hiddenBias = np.full((1, self.hidden.shape[0]), 1)
self.hidden = np.concatenate((self.hidden, hiddenBias.T), axis = 1)
#calculate output
output = np.dot(self.hidden, self.weights2)
output = 1.0 / (1.0 + np.exp(-self.beta * output))
return output
#helper function that graphs accuracy per epoch for both training
#and testing data
def accuracyGraph(self):
plt.title("Accuracy Graph")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.axis([0,self.epochs,0,100])
epochList = []
for i in range (0, self.epochs):
epochList.append(i)
plt.plot(epochList, self.trainAccuracyList, color = "green", label="Training")
plt.plot(epochList, self.testAccuracyList, color = "red", label="Test")
plt.legend()
plt.show()
#helper function that computes the confusion matrix for the testing
#data using the latest (most accurate) weights
def confMat(self):
#create conf matrix
size = self.testTargets.shape[1]
conf = np.zeros(shape=(size+1, size+1), dtype = int)
corr = 0
for i in range(size):
conf[0,i+1] = i
conf[i+1,0] = i
in_to_hidden = np.dot(self.testData, self.weights1)
in_to_hidden = 1.0 / (1.0 + np.exp(-self.beta * in_to_hidden))
#concat bias to hidden
hiddenBias = np.full((1, in_to_hidden.shape[0]), 1)
in_to_hidden = np.concatenate((in_to_hidden, hiddenBias.T), axis = 1)
hidden_to_out = np.dot(in_to_hidden, self.weights2)
hidden_to_out = 1.0 / (1.0 + np.exp(-self.beta * hidden_to_out))
correct = 0
for i in range(0, self.testData.shape[0]):
out = np.argmax(hidden_to_out[i:i+1, :])
exp = np.argmax(self.testTargets[i:i+1, :])
if out == exp:
correct += 1
conf[exp+1, out+1] += 1
print(conf)
print("Accuracy:", (correct / self.testData.shape[0]) * 100, "%")
print("Learning rate:", self.learning)
print("Hidden neurons:", self.hiddenNeurons)
print("Momentum:", self.momentum)
print("Epochs:", self.epochs)
print("Batch size:", self.batchSize)
t2 = MLP(100, 1, 0.9, 0.1 , 50, 100)
t2.getData()
t2.trainRandom()
|
{"hexsha": "582b96cdf6519cb1e74e16c0c21f4a19714f1e7a", "size": 10761, "ext": "py", "lang": "Python", "max_stars_repo_path": "PerceptronTwo.py", "max_stars_repo_name": "gmillia/PerceptronTwo", "max_stars_repo_head_hexsha": "9f2a6add614b04bc5bb9712408df6914d4ee8a52", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PerceptronTwo.py", "max_issues_repo_name": "gmillia/PerceptronTwo", "max_issues_repo_head_hexsha": "9f2a6add614b04bc5bb9712408df6914d4ee8a52", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PerceptronTwo.py", "max_forks_repo_name": "gmillia/PerceptronTwo", "max_forks_repo_head_hexsha": "9f2a6add614b04bc5bb9712408df6914d4ee8a52", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6012861736, "max_line_length": 110, "alphanum_fraction": 0.6725211412, "include": true, "reason": "import numpy", "num_tokens": 3018}
|
#==============================================================================
# Test for lmer
#==============================================================================
#------------------------------------------------------------------------------
# Because current version of predict.merMod oes not support predicting
# with newdata when formula has ".", following test does not use "."
# in the formula.
#
# object <- lmer(Sepal.Length ~ . + (1 | Species), data = iris)
# predict(object, newdata=iris)
#------------------------------------------------------------------------------
source("tests.r")
test <- ma.test$new(
call = substitute(
lmer(
Sepal.Length ~ Petal.Length + Petal.Width + (1 | Species),
data = iris
)
),
"lmer",
expected.for.call = expected(
call = substitute(
lmer(
Sepal.Length ~ Petal.Length + Petal.Width + (1 | Species),
data = iris
)
),
formula = Sepal.Length ~ Petal.Length + Petal.Width + (1|Species),
data = iris, model.type = "regression", family = "gaussian",
link = gaussian()$linkfun, linkinv = gaussian()$linkinv
)
)
test$run.all()
rm(test)
|
{"hexsha": "e6be0fdc3e6681a4dd157d7dc04f7637569aa031", "size": 1125, "ext": "r", "lang": "R", "max_stars_repo_path": "tests/test__model.adapter__function__lmer.r", "max_stars_repo_name": "Marchen/model.adapter", "max_stars_repo_head_hexsha": "ace7f78abee9e2ce2b1ee5e09cc8ac59cea66c3e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test__model.adapter__function__lmer.r", "max_issues_repo_name": "Marchen/model.adapter", "max_issues_repo_head_hexsha": "ace7f78abee9e2ce2b1ee5e09cc8ac59cea66c3e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2018-11-20T10:07:41.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-28T01:11:12.000Z", "max_forks_repo_path": "tests/test__model.adapter__function__lmer.r", "max_forks_repo_name": "Marchen/model.adapter", "max_forks_repo_head_hexsha": "ace7f78abee9e2ce2b1ee5e09cc8ac59cea66c3e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-04T04:46:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-04T04:46:54.000Z", "avg_line_length": 29.6052631579, "max_line_length": 79, "alphanum_fraction": 0.4622222222, "num_tokens": 249}
|
# 1.
# import math
# # a,b,c = map(float,input('Enter a,b,c:').split(','))
# a,b,c = eval(input('Enter a,b,c:'))
# sum=b*b-4*a*c
# if sum >0:
# r1=(-b+math.sqrt(sum))/2*a
# r2=(-b-math.sqrt(sum))/2*a
# print('The roots are %.2f and %.2f'%(r1,r2))
# elif sum == 0:
# r1=r2=(-b+math.sqrt(sum))/2*a
# print('The root is %.2f'%r1)
# else:
# print('The equation has no real roots')
# 2.
# import random
# num1 = random.randint(0,100)
# num2 = random.randint(0,100)
# print(num1,num2)
# sum == num1+num2
# sum1 = input('请输入结果:')
# if sum1 == sum:
# print('结果为真')
# else:
# print('结果为假')
# 3.
# today = int(input('Enter today is day:'))
# num1 = int(input('Enter the number of days elapsed since today: '))
# week = ['Sunday','Monday','Tuesday','Wednesday','Thursday','Friday','Saturday']
# week1=(today+num1)%7
# print("Today is %s and the future day is %s"%(week[today],week[week1]))
# 4.1
# num1,num2,num3 = eval(input('输入三个数:'))
# if num1 > num2 :
# if num1 > num3:
# if num2 > num3:
# print(num3,num2,num1)
# else:
# print(num2,num3,num1)
# else:
# print(num2,num1,num3)
# else:
# if num1 > num3:
# print(num3,num1,num2)
# elif num2 > num3:
# print(num1,num3,num2)
# else:
# print(num1,num2,num3)
# 5.
# w1,p1 = eval(input('Enter weight and price for package 1:'))
# w2,p2 = eval(input('Enter weight and price for package 2:'))
# if p1%w1 < p2% w2:
# print('Package 1 has the better price.')
# else:
# print('Package 2 has the better price.')
# 6.
# m,y = eval (input('请输入月份,年份:'))
# if m in (1,3,5,7,8,10,12):
# print('%s年%s月的天数为31天'%(y,m))
# elif m in (4, 6 ,9,11):
# print('%s年%s月的天数为30天'%(y,m))
# if m == 2:
# if y % 4 == 0 and y % 100 != 0 or y % 400 == 0:
# print('%s年2月的天数为29天'%y)
# else:
# print('%s年2月的天数为28天'%y)
# 7.
# import numpy as np
# np1 = input('请猜测硬币是正面或是反面:')
# np2 = np.random.choice(['正面' ,'反面'])
# print(np2)
# if np1 == np2:
# print('恭喜你,答对了!😊')
# else:
# print('很遗憾,错了呢!😔')
# 8.
# import random
# user = int(input('请选择:scissor(0),rock(1),paper(2):'))
# computer = random.randint(0,2) #用于生成一个指定范围内的整数
# if user == 0 and computer == 1:
# print('The computer is rock,You are scissor. You lost.')
# elif user == 0 and computer == 2:
# print('The computer is paper,You are scissor. You won.')
# elif user == 0 and computer == 0:
# print('The computer is scissor,You are scissor. It is a draw. ')
# elif user == 1 and computer == 0:
# print('The computer is scissor,You are rock.You won. ')
# elif user == 1 and computer == 2:
# print('The computer is paper,You are rock.You lost. ')
# elif user == 1 and computer == 1:
# print('The computer is rock,You are rock.It is a draw. ')
# elif user == 2 and computer == 0:
# print('The computer is scissor,You are paper.You lost. ')
# elif user == 2 and computer == 1:
# print('The computer is rock,You are paper.You won. ')
# elif user == 2 and computer == 2:
# print('The computer is paper,You are paper.It is a draw.')
9#
# y = int(input('Enter year:'))
# m = int(input('Ether month:1-12:'))
# q = int(input('Ether the day of the month:1-31:'))
# a = ['Saturday' ,'Sunday','Monday','Tuesday','Wednesday','Thurday','Firday']
# if m == 1:
# m = 13
# y = y-1
# elif m == 2:
# m = 14
# y = y-1
# h = (q + ((26 * (m + 1) // 10)) + (y % 100) + ((y % 100) // 4) + ((y // 100) // 4) + (5 * y // 100)) % 7
# # '//'只输出整数,即只输出小数点前的:'/'输出完整运算结果,即会输出小数点后的:
# D=a[h]
# print('Day of the week is %s'%D)
#10.
# import numpy as np
# np1 = np.random.choice(['Ace',2,3,4,5,6,7,8,9,10,'Jack','Queen','King'])
# np2 = np.random.choice(['梅花' , '红桃' ,'黑桃' ,'方块'])
# print('The card you picked is the %s of %s'%(np1,np2))
# 11.
# num = input('Enter a three-digit integer:')
# if num == num [::-1]:
# print('%s is a palindrome'%num)
# else:
# print('%s is not a palindrome'%num)
# 12.
# a,b,c = eval(input('Enter three edges :'))
# if a+b>c and a-b<c:
# d=a+b+c
# print('The perimter is:%.0f'%d)
# 1.
#2.
money = 10000
sum = 0
for i in range(14):
money = money + money * 0.05
if i == 9:
print('十年后的大学的学费是:%.2f'%money)
else:
print('十年后的大学四年的总学费是:%.2f'%sum)
# 4.
# count = 0
# for i in range(100,1001):
# if i >= 100 and i <= 1000:
# if i % 5 == 0 and i % 6 == 0:
# count += 1
# print(i,"\t",end=' ')
# if count % 10 == 0:
# print(" ")
# 5.
# n=1
# while n * n < 1200:
# n=n+1
# print(n)
# while n * n * n >1200:
# n=n-1
# print(n)
|
{"hexsha": "f88dde8030042f5fb5cb91f4286dd07bc28d1294", "size": 4879, "ext": "py", "lang": "Python", "max_stars_repo_path": "homework2.py", "max_stars_repo_name": "TiAmolj/Python-By-Joker", "max_stars_repo_head_hexsha": "df37ddfb58c40d68298b4e862ba23671943de8cf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "homework2.py", "max_issues_repo_name": "TiAmolj/Python-By-Joker", "max_issues_repo_head_hexsha": "df37ddfb58c40d68298b4e862ba23671943de8cf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "homework2.py", "max_forks_repo_name": "TiAmolj/Python-By-Joker", "max_forks_repo_head_hexsha": "df37ddfb58c40d68298b4e862ba23671943de8cf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8, "max_line_length": 107, "alphanum_fraction": 0.5152695224, "include": true, "reason": "import numpy", "num_tokens": 1809}
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from datetime import datetime
from datagen import TextgenDatagen
from model import textgen_transformer
text_file = '../datasets/text_generation/deeplearning.txt'
output_path = 'output'
seq_len = 40
performance = 10
datagen = TextgenDatagen()
words, total_vocabs, _, _, _ = datagen.load_data(text_file)
print(len(words), total_vocabs)
model = textgen_transformer(total_vocabs=total_vocabs, seq_len=seq_len)
model.load_weights(output_path+'/weights_.h5')
output = np.zeros((1, seq_len))
output[0, 0:1] = datagen.encode(['deep'])
for i in range(1000):
prediction = model.predict_on_batch(output)
idx = i if i < seq_len else seq_len-1
pred_tokens = [j for j in range(len(prediction[idx])) if prediction[idx, j] > performance/total_vocabs]
# print(pred_tokens)
# print(np.array([prediction[idx, j] for j in range(len(prediction[idx])) if prediction[idx, j] > performance/total_vocabs]))
if len(pred_tokens) < 2:
pred_token = np.argmax(prediction[idx], axis=-1)
else:
pred_token = pred_tokens[np.random.randint(0, len(pred_tokens)-1)]
if i >= seq_len-1:
output[0, :-1] = output[0, 1:]
output[0, -1] = pred_token
else:
output[0, i+1] = pred_token
print(datagen.decode([pred_token])[0], end=' ')
|
{"hexsha": "f08e7af060f51b7676d2c226f1430c8b04776104", "size": 1295, "ext": "py", "lang": "Python", "max_stars_repo_path": "transformer/test_textgen.py", "max_stars_repo_name": "hqbao/dlp_tf", "max_stars_repo_head_hexsha": "e8fe3281470faebbe8e36caf55025c270e84c44f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "transformer/test_textgen.py", "max_issues_repo_name": "hqbao/dlp_tf", "max_issues_repo_head_hexsha": "e8fe3281470faebbe8e36caf55025c270e84c44f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "transformer/test_textgen.py", "max_forks_repo_name": "hqbao/dlp_tf", "max_forks_repo_head_hexsha": "e8fe3281470faebbe8e36caf55025c270e84c44f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-30T08:55:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-30T08:55:37.000Z", "avg_line_length": 28.152173913, "max_line_length": 126, "alphanum_fraction": 0.7335907336, "include": true, "reason": "import numpy", "num_tokens": 370}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import multiprocessing as mp
import os
import re
import time
import ants
import antspynet
import sys
import numpy as np
from utils.HelperFunctions import Imaging, FileOperations
from dependencies import ROOTDIR, FILEDIR, CONFIGDATA
class AntsPyX:
"""this class contains functions to extract the brain from different datasets"""
def __init__(self):
self.verbose = False
pass
def extract_list_of_patients(self, subjects):
"""Brain extraction using the routines from the ANTs environment cf.
https://github.com/ANTsX/ANTsPyNet/blob/master/antspynet/utilities/brain_extraction.py."""
print('\nExtracting the brain of {} subject(s)'.format(len(subjects)))
allfiles = FileOperations.get_filelist_as_tuple(f"{FILEDIR}", subjects, subdir='')
strings2exclude = ['bcorr', 'reg_run', '_ep2d', 'bc_', 'diff_', 'reg_']
start_extraction = time.time()
sequences = {'t1': '_MDEFT3D', 't2': '_t2_'}
list_of_files = {k: [] for k in sequences.keys()}
# print(allfiles)
template_folder = os.path.join(ROOTDIR, 'data', 'template', 'icbm152')
for seq, keyword in sequences.items():
list_of_files[seq] = [x for x in allfiles if x[0].endswith('.gz') and
any(re.search(r'\w+(?!_).({})|^({}[\-])\w+.|^({})[a-z\-\_0-9].'.format(z, z, z),
os.path.basename(x[0]),
re.IGNORECASE) for z in [keyword] * 3) and not
any(re.search(r'\w+(?!_).({})|^({}[\-])\w+.|^({})[a-z\-\_0-9].'.format(z, z, z),
os.path.basename(x[0]),
re.IGNORECASE) for z in strings2exclude)]
for file in list_of_files['t1']:
output_folder = os.path.join(FILEDIR, file[1], 'output')
FileOperations.create_folder(output_folder)
print(f"creating mask for {file[0]}")
filename2save = os.path.join(output_folder, 'brainmask_' + os.path.split(file[0])[1])
modality = 't1combined' if seq == 't1' else 't2'
template = os.path.join(template_folder, 'mni_icbm152_t1_tal_nlin_asym_09b_hires.nii')
preprocess_imaged, mask = self.create_brainmask(file[0])
self.skullstrip(image=preprocess_imaged, mask=mask,
output_file=os.path.join(output_folder, 'noskull_' + os.path.split(file[0])[1]))
print("mask created... ok\n")
return_dict = {'preprocessed_image': preprocessed_image}
print('\nIn total, a list of {} file(s) was processed \nOverall, brain_extraction took '
'{:.1f} secs.'.format(len(subjects), time.time() - start_extraction))
@staticmethod
def create_brainmask(registered_images, truncate_intensity=(.01, .99), verbose=True, antsxnet_cache_directory=None):
"""this function imports antspynet in order to obtain a probabilistic brain mask for the T1 imaging"""
preprocessed_image = ants.image_clone(registered_images)
if antsxnet_cache_directory is None:
antsxnet_cache_directory = "ANTsXNet"
# Truncate intensity
if truncate_intensity is not None:
quantiles = (preprocessed_image.quantile(truncate_intensity[0]),
preprocessed_image.quantile(truncate_intensity[1]))
if verbose:
print("Preprocessing: truncate intensities ( low =", quantiles[0], ", high =", quantiles[1], ").")
preprocessed_image[preprocessed_image < quantiles[0]] = quantiles[0]
preprocessed_image[preprocessed_image > quantiles[1]] = quantiles[1]
# Brain extraction
if verbose:
print("Preprocessing: brain extraction.")
probability_mask = antspynet.brain_extraction(preprocessed_image,
antsxnet_cache_directory=antsxnet_cache_directory,
verbose=verbose)
mask = ants.threshold_image(probability_mask, 0.5, 1, 1, 0)
return preprocessed_image, mask
|
{"hexsha": "0ed5811c2ac1b6078b72b90c5cb829fb10ef54f1", "size": 4291, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/BrainExtraction.py", "max_stars_repo_name": "dpedrosac/MRItremor", "max_stars_repo_head_hexsha": "04af7e6024f99ca9392ea40e91650653ede80f9e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/BrainExtraction.py", "max_issues_repo_name": "dpedrosac/MRItremor", "max_issues_repo_head_hexsha": "04af7e6024f99ca9392ea40e91650653ede80f9e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/BrainExtraction.py", "max_forks_repo_name": "dpedrosac/MRItremor", "max_forks_repo_head_hexsha": "04af7e6024f99ca9392ea40e91650653ede80f9e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-13T10:29:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-13T10:29:08.000Z", "avg_line_length": 45.1684210526, "max_line_length": 120, "alphanum_fraction": 0.5940340247, "include": true, "reason": "import numpy", "num_tokens": 997}
|
%% test2.m - exp03
% This script shows histograms for errors computed in test1.m
%
% 02-08-10 Michal Uricar
% 07-03-11 Michal Uricar
% clc;
close all; clearvars;
%% Timestamp
fprintf(1,'Started on %s\n\n', datestr(now));
%% Add path
addpath('./Functions/');
%% Load errors
load('./results/errors.mat');
%% Options
% show images
% opt.verbose = true;
opt.verbose = false;
% save images
opt.save = true;
% opt.save = false;
%% Show histograms
if (~exist('./img/', 'dir'))
mkdir('./img/');
end;
% get screen size
scrsz = get(0,'ScreenSize');
% histogram of mean errors
x = 0:options.bw(1); y = round(L);
hist_mean = hist(y, x);
% cumulative histogram of mean errors
n_elements = histc(y, x); c_elements = cumsum(n_elements);
if (opt.verbose)
figure; hist(y, x);
set(gcf, 'OuterPosition', [scrsz(1) scrsz(2) scrsz(3) scrsz(4)]);
set(gca, 'XTick', x); set(gca, 'xlim', [-0.5 options.bw(1)+0.5]);
xlabel('Distance in pixels'); ylabel('Count of occurrences');
set(gca, 'YTick', 0:50:2*c_elements(end));
title('Histogram of mean errors');
if (opt.save)
saveas(gcf, './img/histogram_mean.png');
saveas(gcf, './img/histogram_mean.fig');
end;
figure; bar(x, c_elements / max(c_elements));
xlabel('RMS [px]'); ylabel('Count of occurrences [%]');
set(gcf, 'OuterPosition', [scrsz(1) scrsz(2) scrsz(3) scrsz(4)]);
set(gca, 'XTick', x); set(gca, 'xlim', [-0.5 options.bw(1)+0.5]);
set(gca, 'YTick', 0:0.05:1);
title('Cumulative historgam of mean errors');
if (opt.save)
saveas(gcf, './img/cumhist_mean.png');
saveas(gcf, './img/cumhist_mean.fig');
end;
end;
% histogram of max errors
x = 0:options.bw(1); y = round(err_maxdist);
hist_max = hist(y, x);
% cumulative histogram of max errors
n_elements = histc(y, x); c_elements = cumsum(n_elements);
if (opt.verbose)
figure; hist(y, x);
set(gcf, 'OuterPosition', [scrsz(1) scrsz(2) scrsz(3) scrsz(4)]);
set(gca, 'XTick', x); set(gca, 'xlim', [-0.5 options.bw(1)+0.5]);
xlabel('Distance in pixels'); ylabel('Count of occurrences');
set(gca, 'YTick', 0:50:2*c_elements(end));
title('Histogram of max errors');
if (opt.save)
saveas(gcf, './img/histogram_max.png');
saveas(gcf, './img/histogram_max.fig');
end;
figure; bar(x, c_elements / max(c_elements));
xlabel('RMS [px]'); ylabel('Count of occurrences [%]');
set(gcf, 'OuterPosition', [scrsz(1) scrsz(2) scrsz(3) scrsz(4)]);
set(gca, 'XTick', x); set(gca, 'xlim', [-0.5 options.bw(1)+0.5]);
set(gca, 'YTick', 0:0.05:1);
title('Cumulative historgam of max errors');
if (opt.save)
saveas(gcf, './img/cumhist_max.png');
saveas(gcf, './img/cumhist_max.fig');
end;
end;
% save histograms
save('./results/histograms.mat', 'hist_mean', 'hist_max');
%% Show mean and max error for each point from ground truth
mean_nose = mean(err_nose);
mean_canthus_rl = mean(err_canthus_rl);
mean_canthus_lr = mean(err_canthus_lr);
mean_mouth_corner_r = mean(err_mouth_corner_r);
mean_mouth_corner_l = mean(err_mouth_corner_l);
mean_canthus_rr = mean(err_canthus_rr);
mean_canthus_ll = mean(err_canthus_ll);
max_nose = max(err_nose);
max_canthus_rl = max(err_canthus_rl);
max_canthus_lr = max(err_canthus_lr);
max_mouth_corner_r = max(err_mouth_corner_r);
max_mouth_corner_l = max(err_mouth_corner_l);
max_canthus_rr = max(err_canthus_rr);
max_canthus_ll = max(err_canthus_ll);
fprintf('\n\n__________________________________________________________\n');
fprintf('\nResults:\n');
fprintf('Mean error on nose: \t\t\t\t%.4f\t\n', mean_nose);
fprintf('Mean error on mean_canthus_rl: \t\t%.4f\t\n', mean_canthus_rl);
fprintf('Mean error on mean_canthus_lr: \t\t%.4f\t\n', mean_canthus_lr);
fprintf('Mean error on mean_mouth_corner_r: \t%.4f\t\n', mean_mouth_corner_r);
fprintf('Mean error on mean_mouth_corner_l: \t%.4f\t\n', mean_mouth_corner_l);
fprintf('Mean error on mean_canthus_rr: \t\t%.4f\t\n', mean_canthus_rr);
fprintf('Mean error on mean_canthus_ll: \t\t%.4f\t\n', mean_canthus_ll);
fprintf('__________________________________________________________\n');
fprintf('\nR_mean_tst: \t\t\t\t\t%.5f\n', Rtst);
fprintf('R_max_tst: \t\t\t\t\t\t%.5f\n', sum(err_maxdist)/length(err_maxdist));
fprintf('__________________________________________________________\n');
%%
xSC = 0:ceil(max(L));
ySC = round(L);
hist_meanSC = hist(ySC, xSC);
% cumulative histogram of mean errors
n_elementsSC = histc(ySC, xSC);
c_elementsSC = cumsum(n_elementsSC);
% max
xSCmax = 0:ceil(max(L));
ySCmax = round(err_maxdist);
hist_maxSC = hist(ySCmax, xSCmax);
% cumulative histogram of max errors
n_elementsSCmax = histc(ySCmax, xSCmax);
c_elementsSCmax = cumsum(n_elementsSCmax);
fig = figure;
y1 = c_elementsSC / max(c_elementsSC) * 100;
plot(xSC(1:end), y1(1:end), 'b', 'LineWidth', 3); hold on; grid on;
set(gcf, 'OuterPosition', [scrsz(1) scrsz(2) scrsz(3) scrsz(4)]);
set(gca, 'XTick', xSC); set(gca, 'YTick', 0:5:100, 'FontSize', 15); set(gca, 'ylim', [0 105]);
l1 = legend('corners dataset, normalized trn loss', 'Location', 'SouthEast');
xlabel(['relative error [%]'], 'FontSize', 20);
ylabel('Count of occurrences [%]', 'FontSize', 20);
title('Cumulative historgam of mean errors', 'FontSize', 20);
% r = 781; c = 731;
r = 801; c = 801;
% Sets position and size of figure on the screen
set(fig, 'Units', 'pixels', 'position', [0 0 c r] );
% Sets axes to fill the figure space
% set(gca, 'Units', 'pixels', 'position', [0 0 c+1 r+1 ]);
% Sets print properties; Looks like 1 pixel = (3/4)th of a point
set(fig, 'paperunits', 'points', 'papersize', [fix((c-1)*(3/4))+1 fix((r-1)*(3/4))+1]);
set(fig, 'paperunits', 'normalized', 'paperposition', [0 0 1 1]);
print( fig, sprintf('-r%d', ceil(72*(4/3))), '-dpng', './img/cumhist-mean.png');
fig = figure;
y2 = c_elementsSCmax / max(c_elementsSCmax) * 100;
plot(xSCmax(1:end), y2(1:end), 'b', 'LineWidth', 3); hold on; grid on;
set(gcf, 'OuterPosition', [scrsz(1) scrsz(2) scrsz(3) scrsz(4)]);
set(gca, 'XTick', xSCmax); set(gca, 'YTick', 0:5:100, 'FontSize', 15);
set(gca, 'ylim', [0 105]);
l1 = legend('corners dataset, normalized trn loss', 'Location', 'SouthEast');
xlabel(['relative error [%]'], 'FontSize', 20);
ylabel('Count of occurrences [%]', 'FontSize', 20);
title('Cumulative historgam of max errors', 'FontSize', 20);
r = 801; c = 801;
% Sets position and size of figure on the screen
set(fig, 'Units', 'pixels', 'position', [0 0 c r] );
% Sets axes to fill the figure space
% set(gca, 'Units', 'pixels', 'position', [0 0 c+1 r+1 ]);
% Sets print properties; Looks like 1 pixel = (3/4)th of a point
set(fig, 'paperunits', 'points', 'papersize', [fix((c-1)*(3/4))+1 fix((r-1)*(3/4))+1]);
set(fig, 'paperunits', 'normalized', 'paperposition', [0 0 1 1]);
print( fig, sprintf('-r%d', ceil(72*(4/3))), '-dpng', './img/cumhist-max.png');
%% Timestamp
fprintf(1,'Finished on %s\n\n', datestr(now));
|
{"author": "uricamic", "repo": "flandmark", "sha": "ecf122f93f73504fe7d8faccca525c6b1e98fdcd", "save_path": "github-repos/MATLAB/uricamic-flandmark", "path": "github-repos/MATLAB/uricamic-flandmark/flandmark-ecf122f93f73504fe7d8faccca525c6b1e98fdcd/learning/code/test2.m"}
|
"""Documentation for verify_results.py
This script processes class data in order to calculate accuracy, sensitivity etc. on the overall class. This should match results given in our report.
The script prints these values out to screen.
"""
import os
import sys
import argparse
import glob
import json
import numpy as np
import cv2
from io import BytesIO
import torch
import csv
import itertools
import PIL.Image
from torchvision import transforms
import json
import face_model
from engagement_model import EngageModel
from functions import get_detections
from model.utils import get_model
currDir = os.getcwd()
os.chdir('..')
sys.path.append(os.getcwd() + '/helper')
sys.path.append(os.getcwd() + '/helper/tinyfaces')
sys.path.append(os.getcwd() + '/models/model-r100-ii')
os.chdir(currDir)
__author__ = "Philip Baker & Keith Spencer-Edgar"
__date__ = "25-10-2020"
parser = argparse.ArgumentParser(description='face model test')
parser.add_argument('--image-size', default='112,112', help='')
parser.add_argument('--model', default='../models/model-r100-ii/model,0', help='path to load model.')
parser.add_argument('--gpu', default=-1, type=int, help='gpu id (-1 to run on CPU)')
parser.add_argument('--det', default=0, type=int, help='mtcnn option, 1 means using R+O, 0 means detect from begining')
parser.add_argument('--flip', default=0, type=int, help='whether do lr flip aug')
parser.add_argument('--threshold', default=1.24, type=float, help='ver dist threshold')
parser.add_argument('--code', default='my_course', type=str, help='The course code of the lecture')
args_arcface = parser.parse_args()
# load ArcFace model
model_arcface = face_model.FaceModel(args_arcface)
# load the Tiny Face Detector
class args_eval():
def __init__(self):
self.nms_thresh = 0.3
self.prob_thresh = 0.03
self.checkpoint = "../models/tinyfaces/checkpoint_50.pth"
self.template_file = "../helper/tinyfaces/data/templates.json"
self.threshold_score = 0
args_tinyface = args_eval()
threshold_score = 0
# get templates
templates = json.load(open(args_tinyface.template_file))
json.dump(templates, open(args_tinyface.template_file, "w"))
templates = np.round_(np.array(templates), decimals=8)
num_templates = templates.shape[0]
# get transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
val_transforms = transforms.Compose([transforms.ToTensor(), normalize])
if args_arcface.gpu > 0:
device = torch.device('gpu')
else:
device = torch.device('cpu')
# get model
model_tinyfaces = get_model(args_tinyface.checkpoint, num_templates=num_templates)
rf = {
'size': [859, 859],
'stride': [8, 8],
'offset': [-1, -1]
}
# process test cases
classes = ['NZP']
# loop through class folder
output = list()
for this_class in classes:
class_images = list(glob.glob('verification_images/class_images/' + this_class + '/*.jpg'))
attendance_sheet = list()
for class_image in class_images:
# find detections with tinyface
img = PIL.Image.open(class_image)
basewidth = 750
qual = 100
img_tensor = transforms.functional.to_tensor(img)
dets = get_detections(model_tinyfaces, img_tensor, templates, rf, val_transforms,
prob_thresh=args_tinyface.prob_thresh,
nms_thresh=args_tinyface.nms_thresh, device=device)
class_faces = list()
class_scores = list()
for i in range(len(dets)): # for each detection
if dets[i][4] > args_tinyface.threshold_score: # if the tinyfaces score is good
this_face = np.array(img.crop(dets[i][0:4])) # get cropped face
class_faces.append([this_face[:, :, ::-1].copy(), 100]) # append the cropped face
class_scores.append(dets[i][4]) # append the scor
# calculate arcface embeddings for each sample picture
detection_features, face_widths = EngageModel.get_embeddings(model_arcface, class_faces)
# load student profile embeddings for the class in progress
names = list()
features = list()
for filename in os.listdir('verification_images/class_profiles/' + this_class + '/'):
if filename.endswith(".jpg"):
file_path = os.path.join('verification_images/class_profiles/' + this_class + '/', filename)
img = cv2.imread(file_path)
img = model_arcface.get_input(img)
f1 = model_arcface.get_feature(img)
name = os.path.splitext(filename)[0]
names.append(name)
features.append(f1)
data = list([names, features])
# compare samples to profile face embeddings, produce roll
attendance_sheet.append(EngageModel.class_list(model_arcface, detection_features, data,
this_class, class_image, qual, 100))
# calculate FP, FN, sensitivity etc
print(class_image + str(qual) + "didn't fail!")
output.append(list([list(itertools.chain.from_iterable(attendance_sheet))]))
with open("output3.csv", 'w', newline='') as myfile:
wr = csv.writer(myfile)
for class_i in output:
for class_photo in class_i:
wr.writerows(class_photo)
with open("roll_call.json", "r") as read_file:
data = json.load(read_file)
for i in classes:
print('------------------------')
print('PERFORMANCE METRICS FOR:')
print(i)
fp = 0
fn = 0
tp = 0
tn = 0
for j in range(len(output[0][0])):
if output[0][0][2][2] == i:
key = output[0][0][j][3]
upi = output[0][0][j][0]
status = output[0][0][j][1]
roll = data.get(key)
if upi in roll:
if status == 1:
tp += 1
else:
fn += 1
print("FALSE NEGATIVE")
print(upi)
print(status)
print(key)
else:
if status == 0:
tn += 1
else:
fp += 1
print("FALSE POSITIVE")
print(upi)
print(status)
print(key)
print(tp)
print(fp)
print(tn)
print(fn)
if (tp + tn + fp + fn) > 0:
accuracy = (tp + tn) / (tp + tn + fp + fn)
print("Model Accuracy")
print(accuracy)
else:
print("Division by zero - could not calculate accuracy")
if (tp + fn) > 0:
sensitivity = tp / (tp + fn)
print("Model sensitivity / recall")
print(sensitivity)
else:
print("Division by zero could not calculate sensitivity")
if (tn + fp) > 0:
specificity = tn / (tn + fp)
print("Model specificity")
print(specificity)
else:
print("Division by zero could not calculate sensitivity")
|
{"hexsha": "3bf355bdb1566d160b84b6da51219a25d855b192", "size": 7017, "ext": "py", "lang": "Python", "max_stars_repo_path": "verification/verify_results.py", "max_stars_repo_name": "philip-baker/engage", "max_stars_repo_head_hexsha": "dbc4c3278db07fd3b4c678af337a59ab3c319b55", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "verification/verify_results.py", "max_issues_repo_name": "philip-baker/engage", "max_issues_repo_head_hexsha": "dbc4c3278db07fd3b4c678af337a59ab3c319b55", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "verification/verify_results.py", "max_forks_repo_name": "philip-baker/engage", "max_forks_repo_head_hexsha": "dbc4c3278db07fd3b4c678af337a59ab3c319b55", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5665024631, "max_line_length": 153, "alphanum_fraction": 0.6170728231, "include": true, "reason": "import numpy", "num_tokens": 1649}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides tools to return the installed astropy and photutils
versions.
"""
def _get_version_info():
"""
Return astropy and photutils versions.
Returns
-------
result : str
The astropy and photutils versions.
"""
from astropy import __version__ as astropy_version
from photutils import __version__ as photutils_version
return f'astropy: {astropy_version}, photutils: {photutils_version}'
|
{"hexsha": "83e41a4b98d43d13f6d459129449314667986afa", "size": 516, "ext": "py", "lang": "Python", "max_stars_repo_path": "photutils/utils/_misc.py", "max_stars_repo_name": "Onoddil/photutils", "max_stars_repo_head_hexsha": "433f3e54d3f53282ae04eadde9e1ddf657944590", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "photutils/utils/_misc.py", "max_issues_repo_name": "Onoddil/photutils", "max_issues_repo_head_hexsha": "433f3e54d3f53282ae04eadde9e1ddf657944590", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "photutils/utils/_misc.py", "max_forks_repo_name": "Onoddil/photutils", "max_forks_repo_head_hexsha": "433f3e54d3f53282ae04eadde9e1ddf657944590", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.4545454545, "max_line_length": 72, "alphanum_fraction": 0.7112403101, "include": true, "reason": "from astropy", "num_tokens": 119}
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import tensorflow_probability as tfp
from tensorflow.python.ops import control_flow_util
from tensorflow.python.framework import function
class LayerPrepostprocess(tf.keras.Model):
def __init__(self,
process_type,
hparams,):
super(LayerPrepostprocess, self).__init__()
if process_type == "pre":
self._process_sequence = hparams.layer_preprocess_sequence
elif process_type == "post":
self._process_sequence = hparams.layer_postprocess_sequence
else:
raise ValueError("Unknown input process_type. It should be selected in ['pre', 'post']")
if 'n' in self._process_sequence:
if hparams.norm_type == "layer":
self.norm_block = LayerNorm(hparams.hidden_size, hparams.norm_epsilon)
elif hparams.norm_type == "group":
self.norm_block = GroupNorm(hparams.hidden_size, hparams.norm_epsilon)
def call(self, previous_value=None, x=None, training=False):
if self._process_sequence == 'none':
return x
for c in self._process_sequence:
if c == 'a':
x += previous_value
elif c == 'n':
x = self.norm_block(x, training)
else:
tf.logging.error('Unknown type of layer pre-post processing command.')
raise ValueError
return x
class LayerNorm(tf.keras.Model):
def __init__(self, vector_dim, norm_epsilon):
"""Create Variables for layer norm."""
super(LayerNorm, self).__init__()
self._epsilon = norm_epsilon
self._scale = tf.get_variable(
"layer_norm_scale", [vector_dim], initializer=tf.ones_initializer())
self._bias = tf.get_variable(
"layer_norm_bias", [vector_dim], initializer=tf.zeros_initializer())
def call(self, x, training=False, mask=None):
epsilon, scale, bias = [cast_like(t, x) for t in [self._epsilon, self._scale, self._bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(
tf.squared_difference(x, mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
output = norm_x * scale + bias
return output
class GroupNorm(tf.keras.Model):
"""Group normalization as in https://arxiv.org/abs/1803.08494."""
def __init__(self, vector_dim, norm_epsilon, num_groups=8):
super(GroupNorm, self).__init__()
self._epsilon = norm_epsilon
self._num_groups = num_groups
self._vector_dim = vector_dim
# Prepare variables.
self._scale = tf.get_variable(
"group_norm_scale", [vector_dim], initializer=tf.ones_initializer())
self._bias = tf.get_variable(
"group_norm_bias", [vector_dim], initializer=tf.zeros_initializer())
def call(self, x, training=False, mask=None):
x_shape = shape_list(x)
filters = x_shape[-1]
assert filters == self._vector_dim, "Disagreement found here."
epsilon, scale, bias = [cast_like(t, x) for t in [self._epsilon, self._scale, self._bias]]
x = tf.reshape(x, x_shape[:-1] + [self._num_groups, filters // self._num_groups])
mean, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return tf.reshape(norm_x, x_shape) * scale + bias
class DenseReluDense(tf.keras.Model):
def __init__(self, filter_size, output_size):
super(DenseReluDense, self).__init__()
self.dense_input_unit = Dense(
filter_size,
use_bias=True,
activation=tf.nn.relu,
)
self.dense_output_unit = Dense(
output_size,
use_bias=True,
activation=None,
)
def call(self, inputs, training=False, mask=None):
hidden_mat = self.dense_input_unit(inputs, training)
output_mat = self.dense_output_unit(hidden_mat, training)
return output_mat
class Dense(tf.keras.Model):
"""
Identical to tf.keras.layers.Dense
"""
def __init__(self, units, **kwargs):
super(Dense, self).__init__()
self.activations = tf.keras.layers.Dense(units, **kwargs)
def call(self, x, training=False, mask=None):
return self.activations(x)
def shape_list(x):
"""Return list of dims, statically where possible."""
x = tf.convert_to_tensor(x)
# If unknown rank, return dynamic shape
if x.get_shape().dims is None:
return tf.shape(x)
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i, dim in enumerate(static):
if dim is None:
dim = shape[i]
ret.append(dim)
return ret
def cast_like(x, y):
"""Cast x to y's dtype, if necessary."""
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
if x.dtype.base_dtype == y.dtype.base_dtype:
return x
cast_x = tf.cast(x, y.dtype)
if cast_x.device != x.device:
x_name = "(eager Tensor)"
try:
x_name = x.name
except AttributeError:
pass
tf.logging.warning("Cast for %s may induce copy from '%s' to '%s'", x_name,
x.device, cast_x.device)
return cast_x
def gather(params, indices, dtype=tf.float32):
"""Version of tf.gather that works faster on tpu."""
if not is_xla_compiled():
return tf.gather(params, indices)
vocab_size = params.get_shape().as_list()[0]
indices_flat = tf.reshape(indices, [-1])
out = tf.matmul(tf.one_hot(indices_flat, vocab_size, dtype=dtype), params)
out = reshape_like(out, tf.expand_dims(indices, -1))
return out
def is_xla_compiled():
"""Whether we are building graph that will be compiled by XLA.
This checks whether the code is executing within an XLA context.
If True, model authors should ensure the graph they build is compilable by
XLA. Specifically, they should ensure that all ops have XLA implementations
and that all shapes are statically known.
Returns:
bool, whether the current graph will be compiled for XLA.
"""
ctxt = tf.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
return control_flow_util.GetContainingXLAContext(ctxt) is not None
def reshape_like(a, b):
"""Reshapes a to match the shape of b in all but the last dimension."""
ret = tf.reshape(a, tf.concat([tf.shape(b)[:-1], tf.shape(a)[-1:]], 0))
if not tf.executing_eagerly():
ret.set_shape(b.get_shape().as_list()[:-1] + a.get_shape().as_list()[-1:])
return ret
def weights_nonzero(labels):
"""Assign weight 1.0 to all labels except for padding (id=0)."""
return to_float(tf.not_equal(labels, 0))
def to_float(x):
"""Cast x to float; created because tf.to_float is deprecated."""
return tf.cast(x, tf.float32)
def pad_with_zeros(logits, labels):
"""Pad labels on the length dimension to match logits length."""
with tf.name_scope("pad_with_zeros", values=[logits, labels]):
logits, labels = pad_to_same_length(logits, labels)
if len(labels.shape) == 3: # 2-d labels.
logits, labels = pad_to_same_length(logits, labels, axis=2)
return logits, labels
def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):
"""Pad tensors x and y on axis 1 so that they have the same length."""
if axis not in [1, 2]:
raise ValueError("Only axis=1 and axis=2 supported for now.")
with tf.name_scope("pad_to_same_length", values=[x, y]):
x_length = shape_list(x)[axis]
y_length = shape_list(y)[axis]
if (isinstance(x_length, int) and isinstance(y_length, int) and
x_length == y_length and final_length_divisible_by == 1):
return x, y
max_length = tf.maximum(x_length, y_length)
if final_length_divisible_by > 1:
# Find the nearest larger-or-equal integer divisible by given number.
max_length += final_length_divisible_by - 1
max_length //= final_length_divisible_by
max_length *= final_length_divisible_by
length_diff1 = max_length - x_length
length_diff2 = max_length - y_length
def padding_list(length_diff, arg):
if axis == 1:
return [[[0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)]
return [[[0, 0], [0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)]
paddings1 = tf.concat(padding_list(length_diff1, x), axis=0)
paddings2 = tf.concat(padding_list(length_diff2, y), axis=0)
res_x = tf.pad(x, paddings1)
res_y = tf.pad(y, paddings2)
# Static shapes are the same except for axis=1.
x_shape = x.shape.as_list()
x_shape[axis] = None
res_x.set_shape(x_shape)
y_shape = y.shape.as_list()
y_shape[axis] = None
res_y.set_shape(y_shape)
return res_x, res_y
def smoothing_cross_entropy(logits,
labels,
vocab_size,
confidence,
gaussian=False):
"""Cross entropy with label smoothing to limit over-confidence.
Args:
logits: Tensor of shape [batch_size, ?, ?, ?, vocab_size].
labels: Tensor of shape [batch_size, ?, ?, ?].
vocab_size: Tensor representing the size of the vocabulary.
confidence: Used to determine on and off values for label smoothing.
If `gaussian` is true, `confidence` is the variance to the Gaussian
distribution.
gaussian: Uses a Gaussian distribution for label smoothing
Returns:
Tensor of shape [batch_size, ?, ?, ?].
"""
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
# Low confidence is given to all non-true labels, uniformly.
low_confidence = (1.0 - confidence) / to_float(vocab_size - 1)
# Normalizing constant is the best cross-entropy value with soft targets.
# We subtract it just for readability, makes no difference on learning.
normalizing = -(
confidence * tf.log(confidence) + to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
if gaussian and confidence > 0.0:
labels = tf.cast(labels, tf.float32)
normal_dist = tfp.distributions.Normal(loc=labels, scale=confidence)
# Locations to evaluate the probability distributions.
soft_targets = normal_dist.prob(
tf.cast(tf.range(vocab_size), tf.float32)[:, None, None, None, None])
# Reordering soft_targets from [vocab_size, batch_size, ?, ?, ?] to match
# logits: [batch_size, ?, ?, ?, vocab_size]
soft_targets = tf.transpose(soft_targets, perm=[1, 2, 3, 4, 0])
else:
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
return xentropy - normalizing
def should_generate_summaries():
"""Is this an appropriate context to generate summaries.
Returns:
a boolean
"""
name_scope = tf.contrib.framework.get_name_scope()
if name_scope and "while/" in name_scope:
# Summaries don't work well within tf.while_loop()
return False
if tf.get_variable_scope().reuse:
# Avoid generating separate summaries for different data shards
return False
return True
@function.Defun(
python_grad_func=lambda x, dy: tf.convert_to_tensor(dy),
shape_func=lambda op: [op.inputs[0].get_shape()])
def convert_gradient_to_tensor(x):
"""Identity operation whose gradient is converted to a `Tensor`.
Currently, the gradient to `tf.concat` is particularly expensive to
compute if dy is an `IndexedSlices` (a lack of GPU implementation
forces the gradient operation onto CPU). This situation occurs when
the output of the `tf.concat` is eventually passed to `tf.gather`.
It is sometimes faster to convert the gradient to a `Tensor`, so as
to get the cheaper gradient for `tf.concat`. To do this, replace
`tf.concat(x)` with `convert_gradient_to_tensor(tf.concat(x))`.
Args:
x: A `Tensor`.
Returns:
The input `Tensor`.
"""
return x
def flatten4d3d(x):
"""Flatten a 4d-tensor into a 3d-tensor by joining width and height."""
xshape = shape_list(x)
result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]])
return result
def shift_right_3d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :]
return shifted_targets
def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None):
"""Matrix band part of ones.
Args:
rows: int determining number of rows in output
cols: int
num_lower: int, maximum distance backward. Negative values indicate
unlimited.
num_upper: int, maximum distance forward. Negative values indicate
unlimited.
out_shape: shape to reshape output by.
Returns:
Tensor of size rows * cols reshaped into shape out_shape.
"""
if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]):
# Needed info is constant, so we construct in numpy
if num_lower < 0:
num_lower = rows - 1
if num_upper < 0:
num_upper = cols - 1
lower_mask = np.tri(cols, rows, num_lower).T
upper_mask = np.tri(rows, cols, num_upper)
band = np.ones((rows, cols)) * lower_mask * upper_mask
if out_shape:
band = band.reshape(out_shape)
band = tf.constant(band, tf.float32)
else:
band = tf.matrix_band_part(
tf.ones([rows, cols]), tf.cast(num_lower, tf.int64),
tf.cast(num_upper, tf.int64))
if out_shape:
band = tf.reshape(band, out_shape)
return band
|
{"hexsha": "bd067621c090fe6dea528b16cd897c6d555597bc", "size": 13635, "ext": "py", "lang": "Python", "max_stars_repo_path": "pct/layers/common_layers.py", "max_stars_repo_name": "EstelleHuang666/mini_transformer", "max_stars_repo_head_hexsha": "dc6fc270c573e613bc2f5cad62534c6416e7dbd5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-24T00:35:04.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-24T00:35:04.000Z", "max_issues_repo_path": "pct/layers/common_layers.py", "max_issues_repo_name": "EstelleHuang666/mini_transformer", "max_issues_repo_head_hexsha": "dc6fc270c573e613bc2f5cad62534c6416e7dbd5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pct/layers/common_layers.py", "max_forks_repo_name": "EstelleHuang666/mini_transformer", "max_forks_repo_head_hexsha": "dc6fc270c573e613bc2f5cad62534c6416e7dbd5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-22T08:25:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-22T08:25:47.000Z", "avg_line_length": 33.75, "max_line_length": 95, "alphanum_fraction": 0.6776677668, "include": true, "reason": "import numpy", "num_tokens": 3463}
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as pl
#%% Q1 setup
def binomial(n, p, N):
return np.math.factorial(N)/(np.math.factorial(n)*np.math.factorial(N-n))*(p**n)*((1-p)**(N-n))
def uniform(n, p, N):
return 1/(N+1)
def KL(p, N, distp, distq):
kl = 0
for n in range(N):
kl -= distp(n, p, N)*np.log2(distq(n, p, N)/distp(n, p, N))
return kl
def align_yaxis(ax1, v1, ax2, v2):
"""adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
inv = ax2.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
miny, maxy = ax2.get_ylim()
ax2.set_ylim(miny+dy, maxy+dy)
#%% Q1(c)
KL_N10 = []
KL_N100 = []
for p in np.linspace(0, 1, 1000):
KL_N10.append(KL(p, 10, uniform, binomial))
KL_N100.append(KL(p, 100, uniform, binomial))
fig, ax = pl.subplots(1,1)
ax.plot(np.linspace(0, 1, 1000), KL_N10, 'b-', label="N=10")
ax2 = ax.twinx()
ax2.plot(np.linspace(0, 1, 1000), KL_N100, 'r-', label="N=100")
ax.set_xlabel("p")
ax.set_ylabel("KL divergence (N=10)")
ax2.set_ylabel("KL divergence (N=100)")
align_yaxis(ax, 0, ax2, 0)
ax.legend()
ax2.legend()
#%% Q1(d)
def poisson(n, p, N):
lam = p*N
return (lam**n)*(np.exp(-lam))/np.math.factorial(n)
KL_lam_N10 = []
KL_lam_N100 = []
for p in np.linspace(0, 1, 1000):
KL_lam_N10.append(KL(p, 10, poisson, binomial))
KL_lam_N100.append(KL(p, 100, poisson, binomial))
fig1, ax1 = pl.subplots(1,1)
ax1.plot(np.linspace(0, 1, 1000), KL_lam_N10, 'b-', label="N=10")
ax3 = ax1.twinx()
ax3.plot(np.linspace(0, 1, 1000), KL_lam_N100, 'r-', label="N=100")
ax1.set_xlabel("p")
ax1.set_ylabel("KL divergence (N=10)")
ax3.set_ylabel("KL divergence (N=10000)")
align_yaxis(ax1, 0, ax3, 0)
ax1.legend(loc=0)
ax3.legend(loc=6)
#%% Q5(a)
import math
def eff(N):
return np.log2(N)/math.ceil(np.log2(N))
N_arr = []
for n in range(2,101):
N_arr.append(eff(n))
fig2, ax4 = pl.subplots(1,1)
ax4.plot(np.arange(2, 101), N_arr, 'b-')
ax4.set_xlabel(r"$N$")
ax4.set_ylabel(r"Efficiency")
#%%
N_arr = np.array(N_arr)
print(np.arange(2,101)[np.argmin(N_arr)]) # N = 5 gives smallest efficiency
print(np.min(N_arr))
print(N_arr[-1])
|
{"hexsha": "15cb5eb58f68cbe18e3650e64391ce786f96e5b7", "size": 2372, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/info_ps3.py", "max_stars_repo_name": "adrielyeung/info-theory", "max_stars_repo_head_hexsha": "89863cf5d704c4c6396647d29d6e0446e8627c4b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-06T19:01:13.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-06T19:01:13.000Z", "max_issues_repo_path": "code/info_ps3.py", "max_issues_repo_name": "adrielyeung/info-theory", "max_issues_repo_head_hexsha": "89863cf5d704c4c6396647d29d6e0446e8627c4b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/info_ps3.py", "max_forks_repo_name": "adrielyeung/info-theory", "max_forks_repo_head_hexsha": "89863cf5d704c4c6396647d29d6e0446e8627c4b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3555555556, "max_line_length": 100, "alphanum_fraction": 0.6037099494, "include": true, "reason": "import numpy", "num_tokens": 858}
|
@typeparams struct MutableBinaryHeapWithReduction
v::{}
r::{}
k2i::{}
i2k::{}
order::{<:Ordering}
reduce::{}
end
function MutableBinaryHeapWithReduction(
v,
r = Vector{eltype(v)}(undef, length(v)),
i2k = nothing,
k2i = nothing;
reduce,
lt = isless,
by = identity,
rev::Bool = false,
order::Ordering = Forward
)
if isnothing(i2k)
@assert isnothing(k2i)
n = length(v)
i2k = collect(1:n)
k2i = collect(1:n)
elseif isnothing(k2i)
k2i = Dict(k=>i for (i,k) in enumerate(i2k))
end
order = ord(lt, by, rev, order)
heapify!(
PermutationTracker(v,k2i,i2k),
By(((k,vk),) -> vk, order)
)
reduce_subtrees!(v,r,reduce)
return MutableBinaryHeapWithReduction(
v, r, k2i, i2k,
order, reduce
)
end
function update!(h::MutableBinaryHeapWithReduction, k, vk)
@unpack v,r,k2i,i2k,order,reduce = h
n = length(v)
if n == 0; return k; end
i = k2i[k]
tv = PermutationTracker(v, k2i, i2k)
tvk = (k,vk)
torder = By(((k,vk),) -> vk, order)
if lt(order, vk, v[i])
i = walk(
piggyback(
Bubbler!(tv, tvk, torder, Val(:up)),
Reducer!(v,r,reduce)
),
n, i, Val(:up)
)
else
i = walk(
Bubbler!(tv, tvk, torder, Val(:down)),
n, i, Val(:down)
)
end
tv[i] = tvk
walk(Reducer!(v,r,reduce),n,i, Val(:up))
return i
end
|
{"hexsha": "4592f316d672b9e88a34f0f7464e83514c5a7a7e", "size": 1525, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/MutableBinaryHeapsWithReduction.jl", "max_stars_repo_name": "synchronoustechnologies/BinaryHeaps.jl", "max_stars_repo_head_hexsha": "01066c3a451c25d004111a277e82e72ff3e57332", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/MutableBinaryHeapsWithReduction.jl", "max_issues_repo_name": "synchronoustechnologies/BinaryHeaps.jl", "max_issues_repo_head_hexsha": "01066c3a451c25d004111a277e82e72ff3e57332", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/MutableBinaryHeapsWithReduction.jl", "max_forks_repo_name": "synchronoustechnologies/BinaryHeaps.jl", "max_forks_repo_head_hexsha": "01066c3a451c25d004111a277e82e72ff3e57332", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.4264705882, "max_line_length": 58, "alphanum_fraction": 0.5160655738, "num_tokens": 504}
|
"""doc
# leanai.data.datasets.images_in_classfolders_dataset
> An implementation of a dataset where the images are stored in folders which have the class names.
"""
from typing import NamedTuple
import numpy as np
import os
import cv2
from leanai.core.definitions import SPLIT_TRAIN, SPLIT_VAL, SPLIT_TEST
from leanai.data.dataset import SimpleDataset
InputType = NamedTuple("Input", image=np.ndarray)
OutputType = NamedTuple("Output", class_id=np.ndarray)
class ImagesInClassfoldersDataset(SimpleDataset):
def __init__(self, split: str, data_path, data_train_split=0.6, data_val_split=0.2, data_test_split=0.2) -> None:
super().__init__(InputType, OutputType)
self.data_path = data_path
self.classes = os.listdir(data_path)
sample_tokens = []
self.class_mapping = {}
for idx, class_id in enumerate(self.classes):
self.class_mapping[class_id] = idx
# Split the data
assert data_train_split + data_val_split + data_test_split == 1
train_end = int(data_train_split*len(self.classes))
val_end = int((data_train_split+data_val_split)*len(self.classes))
if split == SPLIT_TRAIN:
self.classes = self.classes[:train_end]
if split == SPLIT_VAL:
self.classes = self.classes[train_end:val_end]
if split == SPLIT_TEST:
self.classes = self.classes[val_end:]
for class_id in self.classes:
image_names = os.listdir(os.path.join(data_path, class_id))
image_names = [class_id + "/" + image_name for image_name in image_names]
sample_tokens.extend(image_names)
self.set_sample_tokens(sample_tokens)
def get_image(self, sample_token):
image = cv2.imread(os.path.join(self.data_path, sample_token))[:,:,::-1]
return image
def get_class_id(self, sample_token):
label = sample_token.split("/")[0]
return label
def test_visualization(data_path):
import matplotlib.pyplot as plt
dataset = ImagesInClassfoldersDataset(SPLIT_TRAIN, "")
image, class_id = dataset[0]
plt.title(class_id.class_id)
plt.imshow(image[0])
plt.show()
if __name__ == "__main__":
import sys
test_visualization(sys.argv[1])
|
{"hexsha": "c28d7fc7bea8044400eeb7bb8953475caa79402d", "size": 2265, "ext": "py", "lang": "Python", "max_stars_repo_path": "leanai/data/datasets/images_in_classfolders_dataset.py", "max_stars_repo_name": "penguinmenac3/leanai", "max_stars_repo_head_hexsha": "6d26575b248ff03c4a24009cd82f26ea99d96d15", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-28T21:32:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-28T21:32:59.000Z", "max_issues_repo_path": "leanai/data/datasets/images_in_classfolders_dataset.py", "max_issues_repo_name": "penguinmenac3/leanai", "max_issues_repo_head_hexsha": "6d26575b248ff03c4a24009cd82f26ea99d96d15", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "leanai/data/datasets/images_in_classfolders_dataset.py", "max_forks_repo_name": "penguinmenac3/leanai", "max_forks_repo_head_hexsha": "6d26575b248ff03c4a24009cd82f26ea99d96d15", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8059701493, "max_line_length": 117, "alphanum_fraction": 0.6865342163, "include": true, "reason": "import numpy", "num_tokens": 516}
|
import numpy as np
from mex.simplex.simplex_networks import create_matrix
from mex.simplex.problem_definition import constrain, obj, maxz
from mex.utils.general import generates_matrix, generate_tableau
class Maximizer():
"""
Maximize the objective function
Args:
A (matrix):
b (vector):
c (vector):
>>> A_max = [[1, 0], [0, 2], [3, 2]]
>>> b_max = [[4], [12], [18]]
>>> c_max = [[-3], [-5]]
>>>
>>> maxim = Maximizer(A_max, b_max, c_max)
>>> maxim.solve()
>>> max_approx = maxim.get_max()
>>> coeff_approx = maxim.get_coeff()
"""
def __init__(self, A, b, c):
self.A_matrix = A
self.b_vector = b
self.c_vector = c
self.matrix = generates_matrix(A, b, c)
self.max = None
self.coeff = None
def add_constraints(self, lowerbounds, upperbounds):
"""
Add constraints to current matrix.
Args:
lowerbounds (vector): lower bounds of the problem.
upperbounds (vector): upper bounds of the problem.
"""
self.matrix = generate_tableau(self.A_matrix, self.b_vector, lowerbounds, upperbounds, self.c_vector, compr=False)
def solve(self):
"""
Solve the maximization problem.
"""
solve = maxz(self.matrix)
self.max = solve['max']
self.coeff = np.array(list(solve.values()))[:-1]
def get_max(self):
"""
Obtain the approximated maximum value.
Returns:
max (value): Maximum value of the problem
"""
return self.max
def get_coeff(self):
"""
Obtain the approximated coefficients per variable
Returns:
coeff (dictionary): Dictionary of the approximated coefficients.
"""
return self.coeff
|
{"hexsha": "98f8a4a4522d2cfb50c98971dd6d6db35d37bd27", "size": 1842, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/mex/simplex/maximizer_class.py", "max_stars_repo_name": "lecepe00/mex_simplex", "max_stars_repo_head_hexsha": "ab103bc7df4a8fa73daac4469c50eae43a55ffca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-12T02:34:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-12T02:34:49.000Z", "max_issues_repo_path": "src/mex/simplex/maximizer_class.py", "max_issues_repo_name": "lecepe00/mex_simplex", "max_issues_repo_head_hexsha": "ab103bc7df4a8fa73daac4469c50eae43a55ffca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2021-05-14T04:44:07.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T22:23:12.000Z", "max_forks_repo_path": "src/mex/simplex/maximizer_class.py", "max_forks_repo_name": "lecepe00/mex_simplex", "max_forks_repo_head_hexsha": "ab103bc7df4a8fa73daac4469c50eae43a55ffca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-12T17:04:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-12T17:04:52.000Z", "avg_line_length": 25.5833333333, "max_line_length": 122, "alphanum_fraction": 0.5743756786, "include": true, "reason": "import numpy", "num_tokens": 424}
|
C$Procedure VRELG ( Vector relative difference, general dimension )
DOUBLE PRECISION FUNCTION VRELG ( V1, V2, NDIM )
C$ Abstract
C
C Return the relative difference between two vectors of general
C dimension.
C
C$ Disclaimer
C
C THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE
C CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S.
C GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE
C ADMINISTRATION (NASA). THE SOFTWARE IS TECHNOLOGY AND SOFTWARE
C PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED "AS-IS"
C TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY
C WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A
C PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC
C SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE
C SOFTWARE AND RELATED MATERIALS, HOWEVER USED.
C
C IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA
C BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT
C LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND,
C INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS,
C REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE
C REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY.
C
C RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF
C THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY
C CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE
C ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE.
C
C$ Required_Reading
C
C None.
C
C$ Keywords
C
C MATH
C VECTOR
C
C$ Declarations
DOUBLE PRECISION V1 ( * )
DOUBLE PRECISION V2 ( * )
INTEGER NDIM
C$ Brief_I/O
C
C Variable I/O Description
C -------- --- --------------------------------------------------
C V1,V2 I Input vectors.
C NDIM I Dimension of V1 and V2.
C
C$ Detailed_Input
C
C V1, V2 are two vectors for which the relative difference
C is to be computed.
C
C NDIM is the dimension of V1 and V2.
C
C$ Detailed_Output
C
C VRELG is the relative difference between V1 and V2.
C It is defined as:
C || V1 - V2 ||
C VRELG = ----------------------
C MAX ( ||V1||, ||V2|| )
C
C where || X || indicates the Euclidean norm of
C the vector X.
C
C VRELG assumes values in the range [0,2]. If both
C V1 and V2 are zero vectors then VRELG is defined
C to be zero.
C
C$ Parameters
C
C None.
C
C$ Exceptions
C
C Error free.
C
C If both V1 and V2 are zero vectors then VRELG is defined to be
C zero.
C
C$ Files
C
C None.
C
C$ Particulars
C
C This function computes the relative difference between two vectors
C of general dimension as defined above.
C
C The function VREL may be used to find the relative difference
C for two 3-dimensional vectors.
C
C$ Examples
C
C This example determines if the state of Jupiter, with respect
C to Voyager 2, for a set of times is the same for two different
C ephemeris files. Instead of insisting on absolute equality
C between the state vectors, the program will check if the relative
C difference between the vectors is greater than a fixed tolerance.
C
C C
C C The NAIF code for Jupiter is 599 and for Voyager 2 is -32.
C C Set the tolerance to be 0.0005.
C C
C INTEGER JUP
C PARAMETER ( JUP = 599 )
C
C INTEGER VG2
C PARAMETER ( VG2 = -32 )
C
C INTEGER NUM
C PARAMETER ( NUM = 500 )
C
C DOUBLE PRECISION TOL
C PARAMETER ( TOL = 5.D-04 )
C
C C
C C Spicelib function
C C
C DOUBLE PRECISION VRELG
C C
C C Local variables
C C
C DOUBLE PRECISION STATE1 ( 6, NUM )
C DOUBLE PRECISION STATE2 ( 6, NUM )
C DOUBLE PRECISION ET ( NUM )
C DOUBLE PRECISION LT
C DOUBLE PRECISION DIFF
C
C INTEGER HANDLE
C INTEGER I
C
C .
C .
C .
C
C C
C C Load the first SPK file.
C C
C CALL SPKLEF ( 'VG2_SOURCE_1.BSP', HANDLE )
C C
C C Find the states for each time in the array ET.
C C This example assumes that the SPK file can
C C provide states for all of the times in the array.
C C
C DO I = 1, NUM
C
C CALL SPKEZ ( JUP, ET(I), 'J2000', 'LT',
C . VG2, STATE1(1,I), LT )
C
C END DO
C C
C C Unload the first file and load the second one.
C C
C CALL SPKUEF ( HANDLE )
C
C CALL SPKLEF ( 'VG2_SOURCE_2.BSP', HANDLE )
C C
C C Find the states from the new file.
C C
C DO I = 1, NUM
C
C CALL SPKEZ ( JUP, ET(I), 'J2000', 'LT',
C . VG2, STATE2(1,I), LT )
C
C END DO
C C
C C Now compare the two state vectors for each time.
C C
C DO I = 1, NUM
C
C DIFF = VRELG ( STATE1(1,I), STATE2(1,I), 6 )
C
C IF ( DIFF .GT. TOL ) THEN
C
C .
C .
C .
C
C END IF
C
C END DO
C
C$ Restrictions
C
C None.
C
C$ Literature_References
C
C None.
C
C$ Author_and_Institution
C
C J.M. Lynch (JPL)
C
C$ Version
C
C- SPICELIB Version 1.0.0, 15-JUN-1992 (JML)
C
C-&
C$ Index_Entries
C
C relative difference of n-dimensional vectors
C
C-&
C
C SPICELIB functions
C
DOUBLE PRECISION VNORMG
DOUBLE PRECISION VDISTG
C
C Local variables
C
DOUBLE PRECISION NUNORM
DOUBLE PRECISION DENORM
C
C If the numerator is zero then set VRELG equal to zero. Otherwise,
C perform the rest of the calculation.
C
C This handles the case where both vectors are zero vectors since
C the distance between them will be zero.
C
NUNORM = VDISTG ( V1, V2, NDIM )
IF ( NUNORM .EQ. 0.D0 ) THEN
VRELG = 0.D0
ELSE
DENORM = MAX ( VNORMG(V1,NDIM), VNORMG(V2,NDIM) )
VRELG = NUNORM / DENORM
END IF
RETURN
END
|
{"hexsha": "9c58e70092f6281ff284b5927daa505370d2e59c", "size": 6897, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "source/nasa_f/vrelg.f", "max_stars_repo_name": "agforero/FTFramework", "max_stars_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-08-19T21:43:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-20T02:57:25.000Z", "max_issues_repo_path": "source/nasa_f/vrelg.f", "max_issues_repo_name": "agforero/fortran-testing-framework", "max_issues_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-08-07T21:17:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-09T02:18:07.000Z", "max_forks_repo_path": "source/nasa_f/vrelg.f", "max_forks_repo_name": "agforero/fortran-testing-framework", "max_forks_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-31T08:41:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:41:53.000Z", "avg_line_length": 26.4252873563, "max_line_length": 72, "alphanum_fraction": 0.5489343193, "num_tokens": 1878}
|
from __future__ import division
import numpy as np
import audio_producer
class SpeechModelState(object):
def __init__(self, rnn_state):
self.rnn_state = rnn_state
self.audio = None
self.frames = None
class StreamingSpeechModel(object):
def __init__(self, model, session, sample_rate):
self.model = model
self.session = session
self.sample_rate = sample_rate
self.context = model.temporal_context
self.stride = model.stride
self.spec_window = 25
self.spec_hop = 10
self.ops = [model.probabilities, model.state]
def initial_state(self):
rnn_state = (i.eval(session=self.session)
for i in self.model.initial_state)
return SpeechModelState(rnn_state)
def propagate_packet(self, packet, sample_rate, state):
model = self.model
if state.audio is not None:
packet = np.hstack([state.audio, packet])
feats = audio_producer.compute_features_raw(packet,
self.sample_rate,
sample_rate)
if state.frames is not None:
feats = np.hstack([state.frames, feats])
if feats.shape[1] >= self.context:
feed_dict = model.feed_dict([feats], rnn_state=state.rnn_state)
res = self.session.run(self.ops, feed_dict)
probs, rnn_state = res
probs = probs.squeeze(axis=1)
else:
probs = np.empty((0, model.output_dim), dtype=np.float32)
rnn_state = None
## Compute the audio overlap pre specgram
n_window = int(self.spec_window * sample_rate / 1000)
n_hop = int(self.spec_hop * sample_rate / 1000)
skip = n_hop - (packet.shape[0] - n_window) % n_hop
n_overlap = n_window - skip
state.audio = packet[-n_overlap:]
## Compute the features overlap pre convolution
skip = self.stride - (feats.shape[1] - self.context) % self.stride
frame_overlap = self.context - skip
state.frames = feats[:, -frame_overlap:]
state.rnn_state = rnn_state
return probs, state
|
{"hexsha": "fa0e5d55619271248ee3cf7604e4c4e552774b72", "size": 2218, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/streaming_model.py", "max_stars_repo_name": "gaoyiyeah/KWS-CTC", "max_stars_repo_head_hexsha": "28fdc2062281996d6408e41a9b49febf3334d730", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 340, "max_stars_repo_stars_event_min_datetime": "2017-12-15T22:41:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T14:51:05.000Z", "max_issues_repo_path": "models/streaming_model.py", "max_issues_repo_name": "gaoyiyeah/KWS-CTC", "max_issues_repo_head_hexsha": "28fdc2062281996d6408e41a9b49febf3334d730", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2018-03-08T07:52:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:26:52.000Z", "max_forks_repo_path": "models/streaming_model.py", "max_forks_repo_name": "gaoyiyeah/KWS-CTC", "max_forks_repo_head_hexsha": "28fdc2062281996d6408e41a9b49febf3334d730", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 82, "max_forks_repo_forks_event_min_datetime": "2017-12-21T01:05:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-29T09:27:23.000Z", "avg_line_length": 31.6857142857, "max_line_length": 75, "alphanum_fraction": 0.5987376014, "include": true, "reason": "import numpy", "num_tokens": 474}
|
'''subcommands/test.py'''
"""
Test auto-editor and make sure everything is working.
"""
# Internal Libraries
import os
import sys
import shutil
import platform
import subprocess
from time import perf_counter
# Included Libraries
from auto_editor.utils.func import clean_list
from auto_editor.utils.log import Log
import auto_editor.vanparse as vanparse
def test_options(parser):
parser.add_argument('--ffprobe_location', default='ffprobe',
help='point to your custom ffmpeg file.')
parser.add_argument('--only', '-n', nargs='*')
parser.add_argument('--help', '-h', action='store_true',
help='print info about the program or an option and exit.')
return parser
class FFprobe():
def __init__(self, path):
self.path = path
def run(self, cmd):
cmd.insert(0, self.path)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, __ = process.communicate()
return stdout.decode('utf-8')
def pipe(self, cmd):
full_cmd = [self.path, '-v', 'error'] + cmd
process = subprocess.Popen(full_cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, __ = process.communicate()
return stdout.decode('utf-8')
def _get(self, file, stream, the_type, track, of='compact=p=0:nk=1'):
return self.pipe(['-select_streams', '{}:{}'.format(the_type, track),
'-show_entries', 'stream={}'.format(stream), '-of', of, file]).strip()
def getResolution(self, file):
return self._get(file, 'height,width', 'v', 0, of='csv=s=x:p=0')
def getTimeBase(self, file):
return self.pipe(['-select_streams', 'v', '-show_entries',
'stream=avg_frame_rate', '-of', 'compact=p=0:nk=1', file]).strip()
def getFrameRate(self, file):
nums = clean_list(self.getTimeBase(file).split('/'), '\r\t\n')
return int(nums[0]) / int(nums[1])
def getAudioCodec(self, file, track=0):
return self._get(file, 'codec_name', 'a', track)
def getVideoCodec(self, file, track=0):
return self._get(file, 'codec_name', 'v', track)
def getSampleRate(self, file, track=0):
return self._get(file, 'sample_rate', 'a', track)
def getVLanguage(self, file):
return self.pipe(['-show_entries', 'stream=index:stream_tags=language',
'-select_streams', 'v', '-of', 'compact=p=0:nk=1', file]).strip()
def getALanguage(self, file):
return self.pipe(['-show_entries', 'stream=index:stream_tags=language',
'-select_streams', 'a', '-of', 'compact=p=0:nk=1', file]).strip()
def AudioBitRate(self, file):
def bitrate_format(num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
num = round(num)
return '%d%s' % (num, ['', 'k', 'm', 'g', 't', 'p'][magnitude])
exact_bitrate = self._get(file, 'bit_rate', 'a', 0)
return bitrate_format(int(exact_bitrate))
def pipe_to_console(cmd):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
return process.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
def cleanup(the_dir):
for item in os.listdir(the_dir):
item = os.path.join(the_dir, item)
if('_ALTERED' in item or item.endswith('.xml') or item.endswith('.json')
or item.endswith('.fcpxml') or item.endswith('.mlt')):
os.remove(item)
if(item.endswith('_tracks')):
shutil.rmtree(item)
def clean_all():
cleanup('resources')
cleanup(os.getcwd())
def getRunner():
if(platform.system() == 'Windows'):
return ['py', '-m', 'auto_editor']
return ['python3', '-m', 'auto_editor']
def run_program(cmd):
no_open = '.' in cmd[0]
cmd = getRunner() + cmd
if(no_open):
cmd += ['--no_open']
returncode, stdout, stderr = pipe_to_console(cmd)
if(returncode > 0):
raise Exception('{}\n{}\n'.format(stdout, stderr))
def check_for_error(cmd, match=None):
returncode, stdout, stderr = pipe_to_console(getRunner() + cmd)
if(returncode > 0):
if('Error!' in stderr):
if(match is not None and match not in stderr):
raise Exception('Could\'t find "{}"'.format(match))
else:
raise Exception('Program crashed.\n{}\n{}'.format(stdout, stderr))
else:
raise Exception('Program should not respond with a code 0.')
def inspect(path, *args):
if(not os.path.exists(path)):
raise Exception(f"Path '{path}' does not exist.")
for item in args:
func = item[0]
expectedOutput = item[1]
if(func(path) != expectedOutput):
# Cheating on float numbers to allow 30 to equal 29.99944409236961
if(isinstance(expectedOutput, float)):
from math import ceil
if(ceil(func(path) * 100) == expectedOutput * 100):
continue
if(expectedOutput.endswith('k')):
a = int(func(path)[:-1])
b = int(expectedOutput[:-1])
# Allow bitrate to have slight differences.
if(abs(a - b) < 2):
continue
raise Exception(
f'Inspection Failed. Was {func(path)}, Expected {expectedOutput}.')
def make_np_list(in_file, compare_file, the_speed):
import numpy as np
from auto_editor.scipy.wavfile import read
from auto_editor.audiotsm2 import phasevocoder
from auto_editor.audiotsm2.io.array import ArrReader, ArrWriter
samplerate, sped_chunk = read(in_file)
spedup_audio = np.zeros((0, 2), dtype=np.int16)
channels = 2
with ArrReader(sped_chunk, channels, samplerate, 2) as reader:
with ArrWriter(spedup_audio, channels, samplerate, 2) as writer:
phasevocoder(reader.channels, speed=the_speed).run(
reader, writer
)
spedup_audio = writer.output
loaded = np.load(compare_file)
if(not np.array_equal(spedup_audio, loaded['a'])):
if(spedup_audio.shape == loaded['a'].shape):
print(f'Both shapes ({spedup_audio.shape}) are same')
else:
print(spedup_audio.shape)
print(loaded['a'].shape)
result = np.subtract(spedup_audio, loaded['a'])
print('result non-zero: {}'.format(np.count_nonzero(result)))
print('len of spedup_audio: {}'.format(len(spedup_audio)))
print(np.count_nonzero(result) / spedup_audio.shape[0], 'difference between arrays')
raise Exception("file {} doesn't match array.".format(compare_file))
# np.savez_compressed(out_file, a=spedup_audio)
class Tester():
def __init__(self, args):
self.passed_tests = 0
self.failed_tests = 0
self.args = args
def run_test(self, name, func, description='', cleanup=None, allow_fail=False):
if(self.args.only != [] and name not in self.args.only):
return
start = perf_counter()
try:
func()
end = perf_counter() - start
except Exception as e:
self.failed_tests += 1
print("Test '{}' failed.".format(name))
print(e)
if(not allow_fail):
clean_all()
sys.exit(1)
else:
self.passed_tests += 1
print('{} Passed. {} secs.'.format(name, round(end, 2)))
if(cleanup is not None):
cleanup()
def end(self):
print('{}/{}'.format(self.passed_tests, self.passed_tests + self.failed_tests))
clean_all()
sys.exit(0)
def main(sys_args=None):
parser = vanparse.ArgumentParser('test', 'version')
parser = test_options(parser)
if(sys_args is None):
sys_args = sys.args[1:]
args = parser.parse_args(sys_args, Log(), 'test')
ffprobe = FFprobe(args.ffprobe_location)
tester = Tester(args)
def help_tests():
run_program(['--help'])
run_program(['-h'])
run_program(['--frame_margin', '--help'])
run_program(['--frame_margin', '-h'])
run_program(['exportMediaOps', '--help'])
run_program(['exportMediaOps', '-h'])
run_program(['progressOps', '-h'])
run_program(['--help', '--help'])
run_program(['-h', '--help'])
run_program(['--help', '-h'])
run_program(['-h', '--help'])
tester.run_test('help_tests', help_tests, description='check the help option, '
'its short, and help on options and groups.')
def version_debug():
run_program(['--version'])
run_program(['-v'])
run_program(['-V'])
run_program(['--debug'])
# sanity check for example.mp4/ffprobe
if(ffprobe.getFrameRate('example.mp4') != 30.0):
print('getFrameRate did not equal 30.0')
sys.exit(1)
tester.run_test('version_tests', version_debug)
def subtitle_tests():
from auto_editor.render.subtitle import SubtitleParser
test = SubtitleParser()
test.contents = [
[0, 10, "A"],
[10, 20, "B"],
[20, 30, "C"],
[30, 40, "D"],
[40, 50, "E"],
[50, 60, "F"],
]
speeds = [99999, 1]
chunks = [[0, 10, 1], [10, 20, 0], [20, 30, 1], [30, 40, 0], [40, 50, 1],
[50, 60, 0]]
test.edit(chunks, speeds)
if(test.contents != [[0, 10, "A"], [10, 20, "C"], [20, 30, "E"]]):
raise ValueError('Incorrect subtitle results.')
tester.run_test('subtitle_tests', subtitle_tests)
def tsm_1a5_test():
make_np_list('resources/example_cut_s16le.wav',
'resources/example_1.5_speed.npz', 1.5)
def tsm_0a5_test():
make_np_list('resources/example_cut_s16le.wav',
'resources/example_0.5_speed.npz', 0.5)
def tsm_2a0_test():
make_np_list('resources/example_cut_s16le.wav',
'resources/example_2.0_speed.npz', 2)
tester.run_test('tsm_1a5_test', tsm_1a5_test)
tester.run_test('tsm_0a5_test', tsm_0a5_test, allow_fail=True)
tester.run_test('tsm_2a0_test', tsm_2a0_test)
def info_tests():
run_program(['info', 'example.mp4'])
run_program(['info', 'resources/man_on_green_screen.mp4'])
run_program(['info', 'resources/multi-track.mov'])
run_program(['info', 'resources/newCommentary.mp3'])
run_program(['info', 'resources/test.mkv'])
tester.run_test('info_tests', info_tests)
def level_tests():
run_program(['levels', 'resources/multi-track.mov'])
run_program(['levels', 'resources/newCommentary.mp3'])
tester.run_test('level_tests', level_tests)
def example_tests():
run_program(['example.mp4', '--has_vfr', 'no'])
inspect(
'example_ALTERED.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '1280x720'],
[ffprobe.getVideoCodec, 'h264'],
[ffprobe.getSampleRate, '48000'],
[ffprobe.getVLanguage, '0|eng'],
[ffprobe.getALanguage, '1|eng'],
)
run_program(['example.mp4', '--video_codec', 'uncompressed', '--has_vfr', 'no'])
inspect(
'example_ALTERED.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '1280x720'],
[ffprobe.getVideoCodec, 'mpeg4'],
[ffprobe.getSampleRate, '48000'],
)
tester.run_test('example_tests', example_tests)
# Issue #200
def url_test():
run_program(['https://github.com/WyattBlue/auto-editor/raw/master/example.mp4'])
tester.run_test('url_test', url_test)
# Issue #172
def bitrate_test():
run_program(['example.mp4', '--audio_bitrate', '50k', '--has_vfr', 'no'])
inspect(
'example_ALTERED.mp4',
[ffprobe.AudioBitRate, '50k'],
)
tester.run_test('bitrate_test', bitrate_test)
# Issue #184
def unit_tests():
run_program(['example.mp4', '--mark_as_loud', '20s,22sec', '25secs,26.5seconds'])
run_program(['example.mp4', '--sample_rate', '44100', '--has_vfr', 'no'])
run_program(['example.mp4', '--sample_rate', '44100 Hz', '--has_vfr', 'no'])
run_program(['example.mp4', '--sample_rate', '44.1 kHz', '--has_vfr', 'no'])
run_program(['example.mp4', '--silent_threshold', '4%', '--has_vfr', 'no'])
tester.run_test('unit_tests', unit_tests,
description='''
Make sure all units are working appropriately. That includes:
- Seconds units: s, sec, secs, second, seconds
- Frame units: f, frame, frames
- Sample units: Hz, kHz
- Percent: %
''')
def backwards_range_test():
run_program(['example.mp4', '--edit', 'none', '--cut_out', '-5secs,end'])
run_program(['example.mp4', '--edit', 'all', '--add_in', '-5secs,end'])
tester.run_test('backwards_range_test', backwards_range_test, description='''
Cut out the last 5 seconds of a media file by using negative number in the
range.
''')
def cut_out_test():
run_program(['example.mp4', '--edit', 'none', '--video_speed', '2',
'--silent_speed', '3', '--cut_out', '2secs,10secs', '--has_vfr', 'no'])
run_program(['example.mp4', '--edit', 'all', '--video_speed', '2',
'--add_in', '2secs,10secs', '--has_vfr', 'no'])
tester.run_test('cut_out_test', cut_out_test)
def gif_test():
run_program(['resources/man_on_green_screen.gif', '--edit', 'none'])
inspect(
'resources/man_on_green_screen_ALTERED.gif',
[ffprobe.getVideoCodec, 'gif'],
)
tester.run_test('gif_test', gif_test, description='''
Feed auto-editor a gif file and make sure it can spit out a correctly formated
gif. No editing is requested.
''',
cleanup=clean_all)
def margin_tests():
run_program(['example.mp4', '-m', '3'])
run_program(['example.mp4', '--margin', '3'])
run_program(['example.mp4', '-m', '0.3sec'])
run_program(['example.mp4', '-m', '6f'])
run_program(['example.mp4', '-m', '5 frames'])
run_program(['example.mp4', '-m', '0.4 seconds'])
tester.run_test('margin_tests', margin_tests)
def input_extension():
# Input file must have an extension. Throw error if none is given.
shutil.copy('example.mp4', 'example')
check_for_error(['example', '--no_open'], 'must have an extension.')
os.remove('example')
tester.run_test('input_extension', input_extension)
def output_extension():
# Add input extension to output name if no output extension is given.
run_program(['example.mp4', '--has_vfr', 'no', '-o', 'out'])
inspect(
'out.mp4',
[ffprobe.getVideoCodec, 'h264']
)
os.remove('out.mp4')
run_program(['resources/test.mkv', '--has_vfr', 'no', '-o', 'out'])
inspect(
'out.mkv',
[ffprobe.getVideoCodec, 'h264']
)
os.remove('out.mkv')
tester.run_test('output_extension', output_extension)
def progress_ops_test():
run_program(['example.mp4', '--has_vfr', 'no', 'progressOps', '--machine_readable_progress'])
run_program(['example.mp4', '--has_vfr', 'no', 'progressOps', '--no_progress'])
tester.run_test('progress_ops_test', progress_ops_test)
def silent_threshold():
run_program(['resources/newCommentary.mp3', '--silent_threshold', '0.1'])
tester.run_test('silent_threshold', silent_threshold)
def track_tests():
run_program(['resources/multi-track.mov', '--cut_by_all_tracks'])
run_program(['resources/multi-track.mov', '--keep_tracks_seperate'])
run_program(['example.mp4', '--cut_by_this_audio', 'resources/newCommentary.mp3'])
tester.run_test('track_tests', track_tests)
def json_tests():
run_program(['example.mp4', '--export_as_json'])
run_program(['example.json'])
tester.run_test('json_tests', json_tests)
def scale_tests():
run_program(['example.mp4', '--scale', '1.5', '--has_vfr', 'no'])
inspect(
'example_ALTERED.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '1920x1080'],
[ffprobe.getSampleRate, '48000'],
)
run_program(['example.mp4', '--scale', '0.2', '--has_vfr', 'no'])
inspect(
'example_ALTERED.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '256x144'],
[ffprobe.getSampleRate, '48000'],
)
tester.run_test('scale_tests', scale_tests)
def various_errors_test():
check_for_error(['example.mp4', '--zoom', '0', '--cut_out', '60,end'])
check_for_error(['example.mp4', '--zoom', '0,60', '--cut_out', '60,end'])
check_for_error(['example.mp4', '--rectangle', '0,60', '--cut_out', '60,end'])
tester.run_test('various_errors_test', various_errors_test)
def effect_tests():
run_program(['create', 'test', '--width', '640', '--height', '360', '-o',
'testsrc.mp4'])
inspect(
'testsrc.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '640x360'],
)
run_program(['testsrc.mp4', '--mark_as_loud', 'start,end', '--zoom', '10,60,2'])
run_program(['example.mp4', '--mark_as_loud', 'start,end', '--rectangle',
'audio>0.05,audio<0.05,20,50,50,100', 'audio>0.1,audio<0.1,120,50,150,100'])
run_program(['testsrc.mp4', '--mark_as_loud', 'start,end', '--zoom',
'start,end,1,0.5,centerX,centerY,linear', '--scale', '0.5'])
inspect(
'testsrc_ALTERED.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '320x180'],
)
run_program(['testsrc.mp4', '--mark_as_loud', 'start,end', '--rectangle',
'0,30,0,200,100,300,#43FA56,10'])
os.remove('testsrc_ALTERED.mp4')
os.remove('testsrc.mp4')
tester.run_test('effect_tests', effect_tests,
description='test the zoom and rectangle options',
cleanup=clean_all)
def export_tests():
for item in os.listdir('resources'):
if('man_on_green_screen' in item or item.startswith('.') or '_ALTERED' in item
or item.endswith('.npz')):
continue
item = 'resources/{}'.format(item)
run_program([item])
run_program([item, '-exp'])
run_program([item, '-exr'])
run_program([item, '-exf'])
run_program([item, '-exs'])
run_program([item, '--export_as_clip_sequence'])
run_program([item, '--preview'])
cleanup('resources')
tester.run_test('export_tests', export_tests)
def codec_tests():
run_program(['example.mp4', '--video_codec', 'h264', '--preset', 'faster'])
run_program(['example.mp4', '--audio_codec', 'ac3'])
tester.run_test('codec_tests', codec_tests)
def combine_tests():
run_program(['example.mp4', '--mark_as_silent', '0,171', '-o', 'hmm.mp4'])
run_program(['example.mp4', 'hmm.mp4', '--combine_files', '--debug'])
os.remove('hmm.mp4')
tester.run_test('combine_tests', combine_tests)
def motion_tests():
run_program(['resources/man_on_green_screen.mp4', '--edit_based_on', 'motion',
'--debug', '--frame_margin', '0', '-mcut', '0', '-mclip', '0'])
run_program(['resources/man_on_green_screen.mp4', '--edit_based_on', 'motion',
'--motion_threshold', '0'])
tester.run_test('motion_tests', motion_tests)
tester.end()
if(__name__ == '__main__'):
main()
|
{"hexsha": "1097b5f938fd62a01fd454f2bb4776ac3de503a3", "size": 20010, "ext": "py", "lang": "Python", "max_stars_repo_path": "auto_editor/subcommands/test.py", "max_stars_repo_name": "dunganau/auto-editor", "max_stars_repo_head_hexsha": "bdfd8b955f0b35c2061d0f5a9e7d500ee5bf0ec7", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "auto_editor/subcommands/test.py", "max_issues_repo_name": "dunganau/auto-editor", "max_issues_repo_head_hexsha": "bdfd8b955f0b35c2061d0f5a9e7d500ee5bf0ec7", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "auto_editor/subcommands/test.py", "max_forks_repo_name": "dunganau/auto-editor", "max_forks_repo_head_hexsha": "bdfd8b955f0b35c2061d0f5a9e7d500ee5bf0ec7", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.25, "max_line_length": 101, "alphanum_fraction": 0.5850074963, "include": true, "reason": "import numpy", "num_tokens": 5044}
|
\section{Memo}
\memoto{\LaTeX{}studio}
\memofrom{Liam Huang}
\memosubject{Happy \TeX{}ing!}
\memodate{\today}
\memologo{\includegraphics{SUSTeX.pdf}}
\begin{memo}[Memorandum]
hello world.
\end{memo}
|
{"hexsha": "db19cbceb633746dc314aed38b78cf089e911edc", "size": 200, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Contest/2019MCM/sections/memo.tex", "max_stars_repo_name": "iydon/homework", "max_stars_repo_head_hexsha": "253d4746528ef62d33eba1de0b90dcb17ec587ed", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-10-20T08:18:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-11T12:14:56.000Z", "max_issues_repo_path": "Contest/2019MCM/sections/memo.tex", "max_issues_repo_name": "iydon/homework", "max_issues_repo_head_hexsha": "253d4746528ef62d33eba1de0b90dcb17ec587ed", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-01-13T03:04:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:49:10.000Z", "max_forks_repo_path": "Contest/2019MCM/sections/memo.tex", "max_forks_repo_name": "iydon/homework", "max_forks_repo_head_hexsha": "253d4746528ef62d33eba1de0b90dcb17ec587ed", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-11-02T05:46:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-12T23:11:28.000Z", "avg_line_length": 22.2222222222, "max_line_length": 39, "alphanum_fraction": 0.74, "num_tokens": 74}
|
"""
Chronological Ephemeris
-----------------------
Collate ephemeris data generated by :mod:`~embers.sat_utils.sat_ephemeris` for multiple satellites
and determine all ephemeris present in 30 minute observation windows.
"""
import json
import math
from datetime import datetime, timedelta
from pathlib import Path
import numpy as np
import pytz
from scipy import interpolate
def obs_times(time_zone, start_date, stop_date):
"""Time conversion tools for 30 minute observations
Given a :samp:`time_zone`, :samp:`start_date`, :samp:`stop_date`,
create lists of human readable start times in :samp:`YYYY-MM-DD-HH:MM`
format, and start and stop UNIX times for 30 minute rf observations.
.. code-block:: python
from embers.sat_utils.chrono_ephem import obs_times
time_tuple = obs_times("Australia/Perth", "2020-01-01", "2020-01-02")
obs_time, obs_unix, obs_unix_end - time_tuple
print(obs_time)
>>> ["2020-01-01-00:00", "2020-01-01-00:30", ....]
print(obs_unix)
>>> [1577809800.0, 1577811600.0, 1577813400.0, ...]
print(obs_unix_end)
>>> [1577809800.0, 1577811600.0, 1577813400.0, ...]
:param time_zone: A :class:`~str` representing a :samp:`pytz` `timezones <https://gist.github.com/heyalexej/8bf688fd67d7199be4a1682b3eec7568>`_.
:param start_date: in :samp:`YYYY-MM-DD` format :class:`~str`
:param stop_date: in :samp:`YYYY-MM-DD` format :class:`~str`
:returns:
A :class:`~tuple` (obs_time, obs_unix, obs_unix_end)
- obs_time: :class:`~list` of start times of 30 min observations in :samp:`YYYY-MM-DD-HH::MM` format
- obs_unix: :class:`~list` of start times of 30 min observations in unix time
- obs_unix_end: :class:`~list` of end times of 30 min observations in unix time
"""
# Time stuff
# The time input is in local time. As in Austraila/Perth
local = pytz.timezone(time_zone)
t_start = datetime.strptime(start_date, "%Y-%m-%d")
t_stop = datetime.strptime(stop_date, "%Y-%m-%d")
# Number of days that the date range spans
n_days = (t_stop - t_start).days
# YYYY-MM-DD-HH:MM format
obs_time = []
# Start of half hour obs in unix time
obs_unix = []
# End of half hour obs in unix time
obs_unix_end = []
# The +1 makes the date ranges inclusive
for i in range(n_days + 1):
day = t_start + timedelta(days=i)
date = day.strftime("%Y-%m-%d")
# loop over 48x30 minute obs in a day
for j in range(48):
t_delta = datetime.strptime(date, "%Y-%m-%d") + timedelta(minutes=30 * j)
# convert t_delta to a readable string YYYY-MM-DD-HH:MM
d_time = t_delta.strftime("%Y-%m-%d-%H:%M")
# Convert from naive local time to utc aware time
utc_delta = local.localize(t_delta, is_dst=None).astimezone(pytz.utc)
# convert to a unix timestamp, used within rf explorer data files
utc_unix = utc_delta.timestamp()
# time at end of half hour window
utc_unix_end = utc_unix + (30 * 60)
obs_time.append(d_time)
obs_unix.append(utc_unix)
obs_unix_end.append(utc_unix_end)
return (obs_time, obs_unix, obs_unix_end)
def interp_ephem(t_array, s_alt, s_az, interp_type, interp_freq):
"""Interpolates satellite ephemeris from :mod:`~embers.sat_utils.sat_ephemeris`
Satellite ephemeris is interpolated to the same freq as used in :mod:`~embers.rf_tools.align_data`.
This ensures that each point of rf data, will have an corresponding ephemeris.
:samp:`Time`, :samp:`Altitude` & :samp:`Azimuth` ephemeris arrays are interpolated.
:param t_array: Time array of on satellite pass :class:`~numpy.ndarray`
:param s_alt: Satellite :samp:`Altitude` at the :samp:`t_array` :class:`~numpy.ndarray`
:param s_az: Satellite :samp:`Azimuth` at the :samp:`t_array` :class:`~numpy.ndarray`
:param interp_type: Type of interpolation. Ex: :samp:`cubic`, :samp:`linear` :class:`str`
:param interp_freq: Frequency at which to interpolate, in Hertz. :class:`~int`
:returns:
A :class:`~tuple` (time_interp, sat_alt, sat_az)
- time_interp: Interpolated :samp:`t_array`
- sat_alt: Interpolated :samp:`s_alt`
- sat_az: Interpolated :samp:`s_az`
"""
# Create interpolation functions. Math functions, not Python!
alt_interp = interpolate.interp1d(t_array, s_alt, kind=interp_type)
# The next step was a bit tricky. Azimuth may wrap around.
# This may lead to a discontinuity between 0, 2π
# We deal with this, by unwrapping the angles
# This extends the angles beyond 2π, if the angles cross the discontinuity
s_az_cont = np.unwrap(s_az)
az_interp = interpolate.interp1d(t_array, s_az_cont, kind=interp_type)
# Makes start and end times clean integers
# Also ensures that the interp range is inclusive of data points
start = math.ceil(t_array[0])
stop = math.floor(t_array[-1])
# Create time array, at which to evaluate alt/az of sat
time_interp = list(np.double(np.arange(start, stop, (1 / interp_freq))))
sat_alt = list(alt_interp(time_interp))
# The modulus division by 2π, un-does the np.unwrap
sat_az = list(az_interp(time_interp) % (2 * np.pi))
return (time_interp, sat_alt, sat_az)
def write_json(data, filename=None, out_dir=None):
"""writes data to json file in output dir
:param data: Data to be written to json file
:param filename: Json filename :class:`~str`
:param out_dir: Path to output directory :class:`~str`
"""
with open(f"{out_dir}/{filename}", "w") as f:
json.dump(data, f, indent=4)
def save_chrono_ephem(
time_zone, start_date, stop_date, interp_type, interp_freq, ephem_dir, out_dir
):
"""Save 30 minute ephem from all satellites to file.
Native skyfiled gps timestamps are converted to unix
timestamps to match the output of the rf explorers. The
alt, az data is interpolated to match the cadence of
:mod:`~embers.rf_tools.align_data`. Make a json file with all the passes
from each 30 min observation. This will help in the
next stage, where we identify all sats in each obs.
:param time_zone: A :class:`~str` representing a :samp:`pytz` `timezones <https://gist.github.com/heyalexej/8bf688fd67d7199be4a1682b3eec7568>`_.
:param start_date: in :samp:`YYYY-MM-DD` format :class:`~str`
:param stop_date: in :samp:`YYYY-MM-DD` format :class:`~str`
:param interp_type: Type of interpolation. Ex: :samp:`cubic`, :samp:`linear` :class:`str`
:param interp_freq: Frequency at which to interpolate, in Hertz. :class:`~int`
:param ephem_dir: Directory where :samp:`npz` ephemeris files from :func:`~embers.sat_utils.sat_ephemeris.save_ephem` are saved :class:`~str`
:param out_dir: Path to output directory where chronological ephemeris files will be saved :class:`~str`
"""
obs_time, obs_unix, obs_unix_end = obs_times(time_zone, start_date, stop_date)
# creates output dir, if it doesn't exist
Path(out_dir).mkdir(parents=True, exist_ok=True)
# Lets make the a json file for each 30 min observation, with an empty list
data = []
for i in range(len(obs_time)):
write_json(data, filename=f"{obs_time[i]}.json", out_dir=out_dir)
# Finds all sat ephem json files, and loops over them
for ephem_npz in list(Path(ephem_dir).glob("*.npz")):
# Extract data from npz ephem file
sat_ephem = np.load(ephem_npz, allow_pickle=True)
t_array = sat_ephem["time_array"]
s_alt = sat_ephem["sat_alt"]
s_az = sat_ephem["sat_az"]
s_id = str(sat_ephem["sat_id"])
# here, we're looping over each satellite pass with a single sat ephem file
# to check which observation window it falls in
for pass_idx in range(len(t_array)):
# if sat ephem has more than 10 data points
if t_array[pass_idx].shape[0] >= 10:
time_interp, sat_alt, sat_az = interp_ephem(
t_array[pass_idx],
s_alt[pass_idx],
s_az[pass_idx],
interp_type,
interp_freq,
)
# Find which sat passes are within a 30 minute obs
for obs_int in range(len(obs_unix)):
sat_ephem = {}
sat_ephem["sat_id"] = [s_id]
sat_ephem["time_array"] = []
sat_ephem["sat_alt"] = []
sat_ephem["sat_az"] = []
# Case I: Satpass occurs completely within the 30min observation
if (
obs_unix[obs_int] < time_interp[0]
and obs_unix_end[obs_int] > time_interp[-1]
):
# append the whole pass to the dict
sat_ephem["time_array"].extend(time_interp)
sat_ephem["sat_alt"].extend(sat_alt)
sat_ephem["sat_az"].extend(sat_az)
# Case II: Satpass begins before the obs, but ends within it
elif (
obs_unix[obs_int] > time_interp[0]
and obs_unix[obs_int] < time_interp[-1]
and obs_unix_end[obs_int] > time_interp[-1]
):
# find index of time_interp == obs_unix
start_idx = (
np.where(np.asarray(time_interp) == obs_unix[obs_int])
)[0][0]
# append the end of the pass which is within the obs
sat_ephem["time_array"].extend(time_interp[start_idx:])
sat_ephem["sat_alt"].extend(sat_alt[start_idx:])
sat_ephem["sat_az"].extend(sat_az[start_idx:])
# Case III: Satpass begins within the obs, but ends after it
elif (
obs_unix_end[obs_int] > time_interp[0]
and obs_unix_end[obs_int] < time_interp[-1]
and obs_unix[obs_int] < time_interp[0]
):
# find index of time_interp == obs_unix_end
stop_idx = (
np.where(np.asarray(time_interp) == obs_unix_end[obs_int])
)[0][0]
# append the end of the pass which is within the obs
sat_ephem["time_array"].extend(time_interp[: stop_idx + 1])
sat_ephem["sat_alt"].extend(sat_alt[: stop_idx + 1])
sat_ephem["sat_az"].extend(sat_az[: stop_idx + 1])
# doesn't create json if there are no satellite passes within it
if sat_ephem["time_array"] != []:
print(f"Satellite {s_id} in {obs_time[obs_int]}")
# open the relevant json file and loads contents to 'data_json'
with open(f"{out_dir}/{obs_time[obs_int]}.json") as json_file:
data_json = json.load(json_file)
# append new satpass ephem data to data_json
data_json.append(sat_ephem)
# write the combined data back to the original file
write_json(
data_json,
filename=f"{obs_time[obs_int]}.json",
out_dir=out_dir,
)
# clear data_json
data_json = []
|
{"hexsha": "a5944c3b14e12f3fbf204368d643a204ab407840", "size": 11870, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/embers/sat_utils/chrono_ephem.py", "max_stars_repo_name": "amanchokshi/mwa-satellites", "max_stars_repo_head_hexsha": "f9e8de353e7eddf28ed715c01d7d3fb5336f0f18", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-10T11:42:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-10T11:42:55.000Z", "max_issues_repo_path": "src/embers/sat_utils/chrono_ephem.py", "max_issues_repo_name": "amanchokshi/mwa-satellites", "max_issues_repo_head_hexsha": "f9e8de353e7eddf28ed715c01d7d3fb5336f0f18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-11-16T03:05:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-20T23:49:09.000Z", "max_forks_repo_path": "src/embers/sat_utils/chrono_ephem.py", "max_forks_repo_name": "amanchokshi/mwa-satellites", "max_forks_repo_head_hexsha": "f9e8de353e7eddf28ed715c01d7d3fb5336f0f18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-27T02:34:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-27T02:34:30.000Z", "avg_line_length": 40.3741496599, "max_line_length": 148, "alphanum_fraction": 0.597978096, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2897}
|
import json
import os
import copy
import numpy as np
import torch
import json
from deer.experiment.base_controllers import Controller
from deer.helper.exploration import calculate_scores
from deer.helper.knn import ranked_avg_knn_scores, batch_count_scaled_knn
from deer.helper.pytorch import device, calculate_large_batch
class ExplorationMetricController(Controller):
def __init__(self, evaluate_on='action', periodicity=1, reset_every='none',
env_name='default', experiment_dir=None, baseline_file=None,
hyperparams=None, reload_dataset=None, **kwargs):
"""
Controller for PLOTTING exploration metric. Requires Visdom.
:param evaluate_on:
:param periodicity:
:param reset_every:
"""
super(ExplorationMetricController, self).__init__(**kwargs)
self._periodicity = periodicity
self._baseline_file = baseline_file
self._on_action = 'action' == evaluate_on
self._on_episode = 'episode' == evaluate_on
self._on_epoch = 'epoch' == evaluate_on
self._env_name = env_name
self._experiment_dir = experiment_dir
self._exp_factor = []
self._ratio_visited = []
if not self._on_action and not self._on_episode and not self._on_epoch:
self._on_action = True
if reload_dataset is not None:
self._reload_dataset(reload_dataset)
self._reset_on_episode = 'episode' == reset_every
self._reset_on_epoch = 'epoch' == reset_every
self._ep_num = -1
self._hyperparams = hyperparams
def _plot_baseline(self, agent):
if self._baseline_file is not None:
# If a baseline data is given, we overlay it.
with open(self._baseline_file, 'r') as f:
baseline = json.load(f)
exp_factor = np.array([l for l in baseline['exploration_factors'] if l])
avg_exp_factor = np.average(exp_factor, axis=0)
agent._plotter.plot("exploration_factor",
np.arange(0, len(avg_exp_factor)), avg_exp_factor,
"Exploration Factor",
ymin=0, ymax=1, name='baseline')
ratio_visited = np.array([l for l in baseline['ratios_visited'] if l])
avg_ratio_visited = np.average(ratio_visited, axis=0)
agent._plotter.plot("states_visited",
np.arange(0, len(avg_ratio_visited)), avg_ratio_visited,
"Ratio of states visited",
ymin=0, ymax=1, name='baseline')
def onStart(self, agent):
if (self._active == False):
return
self._reset(agent)
def onEpisodeEnd(self, agent, terminal_reached, reward):
if (self._active == False):
return
if self._reset_on_episode:
self. _reset(agent)
elif self._on_episode:
self._update(agent)
def onEpochEnd(self, agent):
if (self._active == False):
return
if self._reset_on_epoch:
self._reset(agent)
elif self._on_epoch:
self._update(agent)
def onActionTaken(self, agent):
if (self._active == False):
return
if self._on_action:
self._update(agent)
def _reset(self, agent):
self._count = 0
self._exp_factor.append([])
self._ratio_visited.append([])
self._plot_baseline(agent)
if agent._dataset.n_elems > len(self._exp_factor[self._ep_num]) and len(self._exp_factor[self._ep_num]) == 0:
self._plot_dataset(agent)
self._count = agent._dataset.n_elems
self._ep_num += 1
def _plot_dataset(self, agent):
all_observations = agent._dataset.observationsMatchingBatchDim()[0]
all_positions = [(y.item(), x.item()) for y, x in zip(*np.where(all_observations == 0.5)[1:])]
unique_counts = []
for i, pos in enumerate(all_positions):
if i == 0:
unique_counts.append(1)
else:
unique_counts.append(unique_counts[i - 1])
if pos not in all_positions[:i]:
unique_counts[i] += 1
# self._exp_factor[self._ep_num] = [c / (i + 1) for i, c in enumerate(unique_counts)]
y = np.array(self._exp_factor[self._ep_num])
x = np.arange(len(self._exp_factor[self._ep_num]))
agent._plotter.plot("exploration_factor",
x, y,
"Exploration Factor", ymin=0, ymax=1)
if hasattr(agent._environment, "_size_maze"):
ys, xs = np.nonzero(agent._environment._map == 0.0)
total_possible_states = len(ys)
self._ratio_visited[self._ep_num] = [c / total_possible_states for c in unique_counts]
y_tps = np.array(self._ratio_visited[self._ep_num])
agent._plotter.plot("states_visited",
x, y_tps,
"Ratio of states visited",
ymin=0, ymax=1)
def _update(self, agent):
self._count += 1
if self._periodicity <= 1 or self._count % self._periodicity == 0:
all_observations = agent._dataset.observations()[0]
if all_observations.shape[0] < 1:
return
unique_observations = np.unique(all_observations, axis=0)
exp_factor = unique_observations.shape[0] / all_observations.shape[0]
self._exp_factor[self._ep_num].append(exp_factor)
x = np.array([self._count])
y = np.array([exp_factor])
agent._plotter.plot("exploration_factor",
x, y,
"Exploration Factor",
ymin=0, ymax=1)
if hasattr(agent._environment, "_size_maze"):
ys, xs = np.nonzero(agent._environment._map == 0.0)
total_possible_states = len(ys)
ratio = unique_observations.shape[0] / total_possible_states
self._ratio_visited[self._ep_num].append(ratio)
y_tps = np.array([ratio])
agent._plotter.plot("states_visited",
x, y_tps,
"Ratio of states visited",
ymin=0, ymax=1)
def onEnd(self, agent):
exp_factor = np.array([l for l in self._exp_factor if l])
avg_exp_factor = np.average(exp_factor, axis=0)
agent._plotter.plot("average exploration factor",
np.arange(0, len(avg_exp_factor)), avg_exp_factor,
"Average exploration factor over %d episodes" % exp_factor.shape[0],
ymin=0, ymax=1)
ratio_visited = np.array([l for l in self._ratio_visited if l])
avg_ratio_visited = np.average(ratio_visited, axis=0)
agent._plotter.plot("average ratios visited",
np.arange(0, len(avg_exp_factor)), avg_ratio_visited,
"Average ratio of states visited over %d episodes" % ratio_visited.shape[0],
ymin=0, ymax=1)
record = {
'exploration_factors': self._exp_factor,
'ratios_visited': self._ratio_visited
}
if self._hyperparams is not None:
record['hyperparameters'] = self._hyperparams
filename = os.path.join(self._experiment_dir, 'results.json')
with open(filename, 'w') as f:
json.dump(record, f)
class RewardController(Controller):
def __init__(self, evaluate_on='train_loop', periodicity=1):
super(RewardController, self).__init__()
self._on_train_loop = 'train_loop' == evaluate_on
self._on_train_step = 'train_step' == evaluate_on
self._before_action = 'before_action' == evaluate_on
self._on_action = 'action' == evaluate_on
self._on_episode = 'episode' == evaluate_on
self._on_epoch = 'epoch' == evaluate_on
self._periodicity = periodicity
self._count = 0
def onActionTaken(self, agent):
if self._on_action:
if self._count % self._periodicity == 0:
self._update(agent)
self._count += 1
def onActionChosen(self, agent, action):
if self._before_action:
if self._count % self._periodicity == 0:
self._update(agent)
self._count += 1
def onEpochEnd(self, agent):
if self._on_epoch:
if self._count % self._periodicity == 0:
self._update(agent)
self._count += 1
def onTrainLoopTaken(self, agent):
if self._on_train_loop:
if self._count % self._periodicity == 0:
self._update(agent)
self._count += 1
def repopulate_rewards(self, agent):
self._update(agent)
def _update(self, agent):
raise NotImplementedError
class NoveltyRewardController(RewardController):
def __init__(self, evaluate_on='train_loop', periodicity=1,
metric_func=calculate_scores,
score_func=ranked_avg_knn_scores, k=10, knn=batch_count_scaled_knn,
secondary=False):
super(NoveltyRewardController, self).__init__(evaluate_on=evaluate_on, periodicity=periodicity)
self._metric_func = metric_func
self._k = k
self._score_func = score_func
self._knn = knn
self._secondary = secondary
def _update(self, agent):
# Now we have to calculate intrinsic rewards
for m in agent._learning_algo.all_models: m.eval()
all_prev_state = agent._dataset.observationsMatchingBatchDim()[0]
intr_rewards = self._metric_func(all_prev_state,
all_prev_state,
agent._learning_algo.encoder,
dist_score=self._score_func,
k=self._k, knn=self._knn)
# UPDATE HTIS TO TAKE INTO ACCOUNT NON INTRINSIC REWARDS
# reward clipping for preventing divergence
# intr_rewards = np.clip(intr_rewards, -1, 1)
# s_t, a_t, r_t (where r_t is intr_reward of s_{t+1})
agent._dataset.updateRewards(intr_rewards[1:], np.arange(0, agent._dataset.n_elems - 1), secondary=self._secondary)
# we still need to calculate most recent reward.
latest_state = np.array(agent._environment.observe())
if len(latest_state.shape) != len(all_prev_state.shape):
latest_obs = latest_state
obs_per_state = all_prev_state.shape[1]
n_to_fill = obs_per_state - 1
n_prev_obs = agent._dataset.observations()[0][-n_to_fill:]
latest_state = np.expand_dims(np.concatenate((n_prev_obs, latest_obs), axis=0), axis=0)
latest_obs_intr_reward = self._metric_func(latest_state,
all_prev_state,
agent._learning_algo.encoder,
dist_score=self._score_func,
k=self._k, knn=self._knn)
# latest_obs_intr_reward = np.clip(latest_obs_intr_reward, -1, 1)
agent._dataset.updateRewards(latest_obs_intr_reward, agent._dataset.n_elems - 1, secondary=self._secondary)
class HashStateCounterController(RewardController):
def __init__(self, plotter, evaluate_on='action', periodicity=1, granularity=32,
input_dims=(1, 64, 64), **kwargs):
self._periodicity = periodicity
self._count = 0
super(HashStateCounterController, self).__init__(evaluate_on=evaluate_on, periodicity=periodicity)
self._granularity = granularity # size of binary code
# Get input_dims from self._environment.inputDimensions()[0]
self._A = np.random.normal(size=(self._granularity, np.prod(input_dims)))
self.plotter = plotter
self._unique_state_count = [0]
self._count_table = {}
def onStart(self, agent):
all_obs = agent._dataset.observationsMatchingBatchDim()[0]
indices_to_update = [0]
for i, ob in enumerate(all_obs[1:], 1):
hashed_ob = self.calc_hash(ob)
to_add = self._unique_state_count[-1]
if hashed_ob not in self._count_table:
self._count_table[hashed_ob] = 0
to_add += 1
self._unique_state_count.append(to_add)
self._count_table[hashed_ob] += 1
indices_to_update.append(i)
self._count += 1
if indices_to_update:
self.plotter.plot('hashed_unique_state_counts', indices_to_update, self._unique_state_count)
def onEnd(self, agent):
self.plotter.plot_text('ending', f'Environment completed after {self._count} steps')
def calc_hash(self, obs):
A_g = np.matmul(self._A, obs.flatten())
hash_seq = np.sign(A_g).astype(int)
zero_mask = (hash_seq == 0).astype(int)
hash_seq += zero_mask
return str(hash_seq)
def _update(self, agent):
all_obs = agent._dataset.observationsMatchingBatchDim()[0]
latest_obs = agent._environment.observe()[0]
if len(all_obs.shape) == 4:
second_to_last = all_obs[-1]
to_attach = second_to_last[1:]
latest_obs = np.concatenate((to_attach, latest_obs[None, :, :]), axis=0)
hashed_obs = self.calc_hash(latest_obs)
to_add = 0
if hashed_obs not in self._count_table:
to_add = 1
self._count_table[hashed_obs] = 0
self._count_table[hashed_obs] += 1
self._unique_state_count.append(self._unique_state_count[-1] + to_add)
self.plotter.plot('hashed_unique_state_counts', [self._count], [self._unique_state_count[-1]], 'Hashed Unique State Counts')
class HashCountRewardController(RewardController):
def __init__(self, evaluate_on='action', periodicity=1, granularity=32,
input_dims=(1, 64, 64), secondary=False, discrete=False, **kwargs):
super(HashCountRewardController, self).__init__(evaluate_on=evaluate_on, periodicity=periodicity)
self._granularity = granularity # size of binary code
self._bonus_coeff = 0.01 * (256 / self._granularity)
if discrete:
self._bonus_coeff = 1
# self._A = np.random.normal(size=(self._granularity, self._learning_algo.internal_dim))
# Get input_dims from self._environment.inputDimensions()[0]
self._A = np.random.normal(size=(self._granularity, np.prod(input_dims)))
self._count_table = {}
self._secondary = secondary
self._discrete = discrete
# FOR DEBUGGING
self._all_latest_obs = []
self._all_obs = []
def onStart(self, agent):
all_obs = agent._dataset.observationsMatchingBatchDim()[0]
hashed_obs = []
indices_to_update = []
for i, ob in enumerate(all_obs[1:]):
hashed_ob = self.calc_hash(ob)
hashed_obs.append(hashed_ob)
if hashed_ob not in self._count_table:
self._count_table[hashed_ob] = 0
self._count_table[hashed_ob] += 1
indices_to_update.append(i)
rewards = []
for hob in hashed_obs:
rewards.append(self._bonus_coeff / np.sqrt(self._count_table[hob]))
agent._dataset.updateRewards(rewards, indices_to_update, secondary=self._secondary)
def calc_hash(self, obs):
if self._discrete:
return np.array2string(obs.astype(np.half).flatten())
else:
A_g = np.matmul(self._A, obs.flatten())
hash_seq = np.sign(A_g).astype(int)
zero_mask = (hash_seq == 0).astype(int)
hash_seq += zero_mask
return str(hash_seq)
def _update(self, agent):
all_obs = agent._dataset.observationsMatchingBatchDim()[0]
# hacky as shit
if len(all_obs) == 1:
# we need to first count the first state
hashed_first_ob = self.calc_hash(all_obs[0])
self._count_table[hashed_first_ob] = 1
latest_obs = agent._environment.observe()[0]
self._all_obs.append(copy.deepcopy(latest_obs))
if len(all_obs.shape) == 4:
second_to_last = all_obs[-1]
to_attach = second_to_last[1:]
latest_obs = np.concatenate((to_attach, latest_obs[None, :, :]), axis=0)
self._all_latest_obs.append(latest_obs)
hashed_obs = self.calc_hash(latest_obs)
# FIGURE THIS OUT FOR ACROBOT
if hashed_obs not in self._count_table:
self._count_table[hashed_obs] = 0
self._count_table[hashed_obs] += 1
next_states = np.concatenate((all_obs[1:],latest_obs[np.newaxis]), axis=0)
# idx_to_update = [i for i, obs in enumerate(all_obs[1:]) if hashed_obs == self.calc_hash(obs)]
idx_to_update = np.arange(len(all_obs))
# idx_to_update = np.array(idx_to_update)
# THIS MIGHT NEED TO BE REFACTORED
all_rewards = []
for i, s in enumerate(next_states):
hob = self.calc_hash(s)
all_rewards.append(self._bonus_coeff / np.sqrt(self._count_table[hob]))
agent._dataset.updateRewards(all_rewards, idx_to_update, secondary=self._secondary)
class CountBasedRewardController(RewardController):
def __init__(self, evaluate_on='action', periodicity=1, bonus=1, hash_func=None, secondary=False, **kwargs):
super(CountBasedRewardController, self).__init__(evaluate_on=evaluate_on, periodicity=periodicity)
self._hash_func = hash_func
self._bonus = bonus
self._counts = {}
self._secondary = secondary
def _update(self, agent):
"""
increment counts and update rewards
:param agent:
:return:
"""
for m in agent._learning_algo.all_models: m.eval()
all_obs = agent._dataset.observations()[0]
latest_obs = agent._environment.observe()[0]
hashed_obs = self._hash_func(latest_obs)
if hashed_obs not in self._counts:
self._counts[hashed_obs] = 0
self._counts[hashed_obs] += 1
idx_to_update = [i for i, obs in enumerate(all_obs[1:]) if hashed_obs == self._hash_func(obs)]
idx_to_update = np.array(idx_to_update)
if len(idx_to_update) > 0:
new_reward = self._bonus / np.sqrt(self._counts[hashed_obs])
rewards = np.repeat(new_reward, len(idx_to_update))
agent._dataset.updateRewards(rewards, idx_to_update, secondary=self._secondary)
class TransitionLossRewardController(RewardController):
def __init__(self, evaluate_on='train_loop', periodicity=1, secondary=False, **kwargs):
super(TransitionLossRewardController, self).__init__(evaluate_on=evaluate_on, periodicity=periodicity)
self._secondary = secondary
def _update(self, agent):
for m in agent._learning_algo.all_models: m.eval()
all_obs = agent._dataset.observationsMatchingBatchDim()[0]
states = torch.tensor(all_obs, dtype=torch.float).to(device)
latest_obs = agent._environment.observe()[0][None, :, :]
if len(all_obs.shape) == 4:
second_to_last = all_obs[-1]
to_attach = second_to_last[1:]
latest_obs = np.concatenate((to_attach, latest_obs), axis=0)
latest_obs = latest_obs[None, :, :, :]
next_states = torch.tensor(np.concatenate((all_obs[1:],latest_obs), axis=0), dtype=torch.float, requires_grad=False).to(device)
actions = agent._dataset.actions()
one_hot_actions = np.zeros((actions.shape[0], agent._environment.nActions()))
one_hot_actions[np.arange(len(actions)), actions] = 1
one_hot_actions = torch.tensor(one_hot_actions, requires_grad=False).float().to(device)
encoder = agent._learning_algo.encoder
transition = agent._learning_algo.transition
with torch.no_grad():
abstr_states = encoder(states)
target_next_states = encoder(next_states)
transition_input = torch.cat((abstr_states, one_hot_actions), dim=-1)
abstr_next_states = transition(transition_input)
squared_diff = torch.sum((abstr_next_states - target_next_states) ** 2, dim=-1)
rewards = squared_diff.cpu().numpy()
idx_to_update = list(range(len(all_obs)))
agent._dataset.updateRewards(rewards, idx_to_update, secondary=self._secondary)
class RNDRewardController(RewardController):
def __init__(self, evaluate_on='action', periodicity=1,
secondary=False):
super(RNDRewardController, self).__init__(evaluate_on=evaluate_on, periodicity=periodicity)
self._secondary = secondary
@torch.no_grad()
def _update(self, agent):
for m in agent._learning_algo.all_models: m.eval()
all_prev_state = agent._dataset.observationsMatchingBatchDim()[0]
states = torch.tensor(all_prev_state, dtype=torch.float, device=device)
abstr_states = calculate_large_batch(agent._learning_algo.encoder, states)
predictor = agent._learning_algo.rnd_network.predictor
target = agent._learning_algo.rnd_network.target
pred_features = calculate_large_batch(predictor, abstr_states)
target_features = calculate_large_batch(target, abstr_states)
intrinsic_reward = (target_features - pred_features).pow(2).sum(1) / 2
intrinsic_reward = intrinsic_reward.cpu().numpy()
idx_to_update = list(range(len(all_prev_state)))
agent._dataset.updateRewards(intrinsic_reward, idx_to_update, secondary=self._secondary)
class AbstractRepPlottingController(Controller):
def __init__(self, plotter, evaluate_on='train_loop', periodicity=3, **kwargs):
super(AbstractRepPlottingController, self).__init__(**kwargs)
self._on_train_loop = 'train_loop' == evaluate_on
self._on_train_step = 'train_step' == evaluate_on
self._on_action = 'action' == evaluate_on
self._on_episode = 'episode' == evaluate_on
self._on_epoch = 'epoch' == evaluate_on
self._periodicity = periodicity
self._plotter = plotter
self._limit_history = kwargs.get('limit_history', -1)
self._skip_first = kwargs.get('skip_first', 0)
def onActionTaken(self, agent):
if self._on_action:
if self._count > self._skip_first and self._count % self._periodicity == 0:
self._update(agent)
self._count += 1
def onTrainLoopTaken(self, agent):
if self._on_train_loop:
if self._count > self._skip_first and self._count % self._periodicity == 0:
self._update(agent)
self._count += 1
def onTrainStepTaken(self, agent):
if self._on_train_step:
if self._count > self._skip_first and self._count % self._periodicity == 0:
self._update(agent)
self._count += 1
def onEnd(self, agent):
self._update(agent)
def plot_idx(self, agent, idx):
obs = agent._dataset.observations()[0][idx][3].astype('float64')
agent._environment.plot_state(obs)
def _update(self, agent):
fig = agent._environment.summarizePerformance(agent._dataset,
agent._learning_algo,
n_observations=self._limit_history)
self._plotter.plot_plotly_fig("abstr_rep_step_%d" % self._count, fig,
title_name="abstr reps_step %d" % self._count)
class RNNQHistoryController(Controller):
def __init__(self, evaluate_on='action_taken', periodicity=1):
super(RNNQHistoryController, self).__init__()
self._on_train_loop = 'train_loop' == evaluate_on
self._on_train_step = 'train_step' == evaluate_on
self._on_action_taken = 'action_taken' == evaluate_on
self._on_episode = 'episode' == evaluate_on
self._on_epoch = 'epoch' == evaluate_on
self._periodicity = periodicity
def onActionTaken(self, agent):
if self._on_action_taken:
self._update(agent)
def _update(self, agent):
all_obs = agent._dataset.observations()[0]
agent._learning_algo.Q.set_history(all_obs)
agent._learning_algo.Q_target.set_history(all_obs)
class ExtrinsicRewardPlottingController(Controller):
def __init__(self, plotter):
super(ExtrinsicRewardPlottingController, self).__init__()
self._plotter = plotter
self._count = 0
def onActionTaken(self, agent):
reward = agent._dataset.rewards()[-1]
self._plotter.plot("extrinsic_rewards", np.array([self._count]), np.array([reward]), title_name="Extrinsic Rewards")
self._count += 1
class UniqueStateCounterController(Controller):
def __init__(self, plotter, evaluate_on='action', periodicity=1, **kwargs):
super(UniqueStateCounterController, self).__init__(**kwargs)
self._on_train_loop = 'train_loop' == evaluate_on
self._on_train_step = 'train_step' == evaluate_on
self._on_action = 'action' == evaluate_on
self._on_episode = 'episode' == evaluate_on
self._on_epoch = 'epoch' == evaluate_on
self._data = []
self._states = set()
self._count = 0
self._num_uniques = 0
self._periodicity = periodicity
self._plotter = plotter
def onStart(self, agent):
trajectory = agent._environment._trajectory
for t in trajectory:
s = str(t)
if s not in self._states:
self._states.add(s)
self._num_uniques += 1
self._data.append(self._num_uniques)
self._count += 1
self._plotter.plot('unique_state_counter', list(range(self._count)), self._data, title_name='Unique State Counter')
def onActionTaken(self, agent):
if self._on_action:
if self._count > 0 and self._count % self._periodicity == 0:
self._update(agent)
self._count += 1
def onEnd(self, agent):
self._plotter.plot_text('ending', f'Environment completed after {self._count} steps')
def _update(self, agent):
latest = agent._environment._trajectory[-1]
sl = str(latest)
if sl not in self._states:
self._states.add(sl)
self._num_uniques += 1
self._data.append(self._num_uniques)
self._plotter.plot('unique_state_counter', [self._count], [self._num_uniques], title_name='Unique State Counter')
class MapPlottingController(Controller):
def __init__(self, plotter, evaluate_on='train_loop', periodicity=3,
metric_func=calculate_scores, k=10, learn_representation=True,
reward_type='novelty_reward', train_q=True,
internal_dim=2, plot_quiver=True, **kwargs):
super(MapPlottingController, self).__init__(**kwargs)
self._on_train_loop = 'train_loop' == evaluate_on
self._on_train_step = 'train_step' == evaluate_on
self._on_action = 'action' == evaluate_on
self._on_episode = 'episode' == evaluate_on
self._on_epoch = 'epoch' == evaluate_on
self._periodicity = periodicity
self._metric_func = metric_func
self._reward_type = reward_type
self._learn_representation = learn_representation
self._k = k
self._train_q = train_q
self._internal_dim = internal_dim
self._plot_quiver = plot_quiver
self._plotter = plotter
# def onStart(self, agent):
# print("starting")
def onTrainLoopTaken(self, agent):
if self._on_train_loop:
if self._count > 0 and self._count % self._periodicity == 0:
self._update(agent)
self._count += 1
def onTrainStepTaken(self, agent):
if self._on_train_step:
if self._count > 0 and self._count % self._periodicity == 0:
self._update(agent)
self._count += 1
def onStart(self, agent):
print("here")
def onActionTaken(self, agent):
if self._on_action:
if self._count > 0 and self._count % self._periodicity == 0:
self._update(agent)
self._count += 1
def _update(self, agent):
for m in agent._learning_algo.all_models: m.eval()
all_prev_obs = agent._dataset.observationsMatchingBatchDim()[0]
# intr_rewards = agent._dataset.rewards()[:-1]
all_possible_obs = all_prev_obs
if hasattr(agent._environment, 'getAllPossibleStates'):
all_possible_obs = agent._environment.getAllPossibleStates()
# all_possible_obs = all_prev_obs
# Now we might want to map the Q values of the given states.
example = agent._environment._map
borders = example == 1
intr_rewards_map = np.zeros(example.shape) + borders.astype(int)
vals_map = np.zeros(example.shape) + borders.astype(int)
q_vals_map = np.zeros(example.shape + (4,))
state_count_map = np.zeros_like(example) + borders.astype(int)
q_vals = np.zeros((len(all_possible_obs), agent._environment.nActions()))
if self._train_q:
with torch.no_grad():
if agent._bootstrap_q:
Q = agent._learning_algo.Q
n_heads = Q.n_heads
copy_state = copy.deepcopy(all_possible_obs) # Required!
all_possible_obs_tensor = torch.tensor(copy_state, dtype=torch.float).to(device)
all_possible_abstr = agent._learning_algo.encoder(all_possible_obs_tensor)
all_qs = torch.stack(Q(all_possible_abstr, list(range(n_heads))))
q_vals = torch.mean(all_qs, dim=0).cpu().detach().numpy()
else:
q_vals = agent._learning_algo.qValues(all_possible_obs).cpu().detach().numpy()
intr_rewards = np.zeros_like(q_vals)
if self._reward_type == 'novelty_reward':
intr_rewards = self._metric_func(all_possible_obs,
all_prev_obs,
agent._learning_algo.encoder,
k=self._k)
# intr_rewards = np.clip(intr_rewards, -1, 1)
for r, q, s in zip(intr_rewards, q_vals, all_possible_obs):
pos_y, pos_x = np.where(s == 0.5)
if self._reward_type == 'novelty_reward':
intr_rewards_map[pos_y, pos_x] = r
if self._train_q:
q_vals_map[pos_y, pos_x] = q
max_vals = np.max(q, axis=-1)
vals_map[pos_y, pos_x] = max_vals.item()
for pos_y, pos_x in agent._environment._trajectory:
state_count_map[pos_y, pos_x] += 1
if self._reward_type == 'count_reward':
intr_rewards_map = 1 / (np.sqrt(state_count_map))
infs = np.isinf(intr_rewards_map)
intr_rewards_map[infs] = 0
heatmaps = [("counts step %d" % self._count, state_count_map)]
if self._train_q:
# for logging purposes
q_up_map = q_vals_map[:, :, 0] + borders
q_down_map = q_vals_map[:, :, 1] + borders
q_left_map = q_vals_map[:, :, 2] + borders
q_right_map = q_vals_map[:, :, 3] + borders
# Now we make a window for each
heatmaps = [
("Q values (up) for step %d" % self._count, q_up_map),
("Q values (down) for step %d" % self._count, q_down_map),
("Q values (left) for step %d" % self._count, q_left_map),
("Q values (right) for step %d" % self._count, q_right_map),
("State value function for step %d" % self._count, vals_map)
] + heatmaps
if self._plot_quiver:
self._plotter.plot_quiver("quiver_step_%d" % self._count,
q_up_map, q_down_map,
q_left_map, q_right_map,
"Quiver map for step %d" % self._count)
# if self._reward_type == 'novelty_reward':
heatmaps.append(("Intrinsic reward map for step %d" % self._count, intr_rewards_map))
cols = 4 if len(heatmaps) > 4 else len(heatmaps)
self._plotter.plot_mapping_heatmap(
"heatmaps_step_%d" % self._count,
heatmaps,
title_name="Heatmaps for step %d" % self._count,
cols=cols)
# Here we plot abstr representations
if self._learn_representation and self._internal_dim < 4:
# abstr_rep_fig, abstr_rep_plt = agent._environment.summarizePerformance(agent._dataset, agent._learning_algo)
#
# self._plotter.plot_mpl_fig("abstr_rep_step_%d" % self._count, abstr_rep_fig,
# title_name="abstr reps_step %d" % self._count, replace=True)
fig = agent._environment.summarizePerformance(agent._dataset, agent._learning_algo)
self._plotter.plot_plotly_fig("abstr_rep_step_%d" % self._count, fig,
title_name="abstr reps_step %d" % self._count)
# self._log_trajectory(agent)
def _log_trajectory(self, agent):
trajectory = np.array(agent._environment._trajectory)
x = trajectory[:, 0]
y = trajectory[:, 1]
agent._plotter.plot('trajectory step %d' % self._count,
x, y, xmin=0, xmax=agent._environment._size_maze,
ymin=0, ymax=agent._environment._size_maze,
title_name='Trajectory Plot for step %d' % self._count,
markers=True, linecolor=np.array([[255,0,0]]))
class LossPlottingController(Controller):
def __init__(self, plotter, evaluate_on='train_step', sum_over=1000, periodicity=1, max_size=1000):
super(LossPlottingController, self).__init__()
self._on_train_loop = 'train_loop' == evaluate_on
self._on_train_step = 'train_step' == evaluate_on
self._on_action = 'action' == evaluate_on
self._on_episode = 'episode' == evaluate_on
self._on_epoch = 'epoch' == evaluate_on
self._plotter = plotter
self._periodicity = periodicity
self._sum_over = sum_over
self._count = 0
self._buffer = dict(counts=[])
self._max_size = max_size
self._steps = 0
def onTrainStepTaken(self, agent):
if self._count % self._sum_over == 0 and self._on_train_step:
self._update_buffer(agent)
if self._count % (self._sum_over * self._periodicity) == 0:
self._update()
self._count += 1
def onActionTaken(self, agent):
self._steps += 1
def _update_buffer(self, agent):
for k, v in agent._all_losses.items():
if v:
if k not in self._buffer:
self._buffer[k] = []
if len(self._buffer[k]) > self._sum_over + self._max_size:
self._buffer[k] = self._buffer[1:]
self._buffer[k] += [np.mean(v[-self._sum_over:])]
self._buffer['counts'] += [self._count // self._sum_over]
def onEnd(self, agent):
to_save = dict(
total_steps=self._steps,
# final_buffer=self._buffer
)
exp_dir = self._plotter.experiment_dir
final_results_file = os.path.join(exp_dir, 'final_results.json')
with open(final_results_file, 'w') as f:
json.dump(to_save, f)
def _update(self):
counts = self._buffer['counts']
del self._buffer['counts']
self._plotter.plot_dict(counts, self._buffer)
self._buffer = dict(counts=[])
def simple_hash_func(arr):
"""
Simple hash function that finds the agent location (0.5) and returns that
:param arr:
:return: string of a tuple
"""
pos_y, pos_x = np.where(arr == 0.5)
return (pos_x.item(), pos_y.item())
|
{"hexsha": "fc471332781a7d06c92ac28ac92f54b4d03d57c8", "size": 36701, "ext": "py", "lang": "Python", "max_stars_repo_path": "nsrl/experiment/exploration_helpers.py", "max_stars_repo_name": "taodav/novelty-search-repr-space", "max_stars_repo_head_hexsha": "461691104dc3a72b9b4f7ec040b71d95eec434b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-12-03T13:24:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T21:40:14.000Z", "max_issues_repo_path": "nsrl/experiment/exploration_helpers.py", "max_issues_repo_name": "taodav/novelty-search-repr-space", "max_issues_repo_head_hexsha": "461691104dc3a72b9b4f7ec040b71d95eec434b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nsrl/experiment/exploration_helpers.py", "max_forks_repo_name": "taodav/novelty-search-repr-space", "max_forks_repo_head_hexsha": "461691104dc3a72b9b4f7ec040b71d95eec434b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-12-17T00:42:34.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-19T12:59:11.000Z", "avg_line_length": 41.4232505643, "max_line_length": 135, "alphanum_fraction": 0.6080760742, "include": true, "reason": "import numpy", "num_tokens": 8427}
|
module mod_uscr_insert_entries
use blas_sparse_namedconstants
use mod_uscr_insert_entry
interface uscr_insert_entries
module procedure iuscr_insert_entries
module procedure suscr_insert_entries
module procedure duscr_insert_entries
module procedure cuscr_insert_entries
module procedure zuscr_insert_entries
end interface
contains
! **********************************************************************
!!***************************************************************************
! **********************************************************************
subroutine iuscr_insert_entries (a,val,indx,jndx,istat)
implicit none
integer ,dimension(:),intent(in) ::val
integer ,intent(in) ::a
integer,intent(out)::istat
integer,dimension(:),intent(in)::indx,jndx
integer ::i
istat=-1
do i=1,size(val)
call iuscr_insert_entry (a,val(i),indx(i),&
jndx(i),istat)
if(istat.ne.0) return
end do
end subroutine iuscr_insert_entries
! **********************************************************************
!!***************************************************************************
! **********************************************************************
subroutine suscr_insert_entries (a,val,indx,jndx,istat)
implicit none
real(KIND=sp) ,dimension(:),intent(in) ::val
integer ,intent(in) ::a
integer,intent(out)::istat
integer,dimension(:),intent(in)::indx,jndx
integer ::i
istat=-1
do i=1,size(val)
call suscr_insert_entry (a,val(i),indx(i),&
jndx(i),istat)
if(istat.ne.0) return
end do
end subroutine suscr_insert_entries
! **********************************************************************
!!***************************************************************************
! **********************************************************************
subroutine duscr_insert_entries (a,val,indx,jndx,istat)
implicit none
real(KIND=dp) ,dimension(:),intent(in) ::val
integer ,intent(in) ::a
integer,intent(out)::istat
integer,dimension(:),intent(in)::indx,jndx
integer ::i
istat=-1
do i=1,size(val)
call duscr_insert_entry (a,val(i),indx(i),&
jndx(i),istat)
if(istat.ne.0) return
end do
end subroutine duscr_insert_entries
! **********************************************************************
!!***************************************************************************
! **********************************************************************
subroutine cuscr_insert_entries (a,val,indx,jndx,istat)
implicit none
complex(KIND=sp) ,dimension(:),intent(in) ::val
integer ,intent(in) ::a
integer,intent(out)::istat
integer,dimension(:),intent(in)::indx,jndx
integer ::i
istat=-1
do i=1,size(val)
call cuscr_insert_entry (a,val(i),indx(i),&
jndx(i),istat)
if(istat.ne.0) return
end do
end subroutine cuscr_insert_entries
! **********************************************************************
!!***************************************************************************
! **********************************************************************
subroutine zuscr_insert_entries (a,val,indx,jndx,istat)
implicit none
complex(KIND=dp) ,dimension(:),intent(in) ::val
integer ,intent(in) ::a
integer,intent(out)::istat
integer,dimension(:),intent(in)::indx,jndx
integer ::i
istat=-1
do i=1,size(val)
call zuscr_insert_entry (a,val(i),indx(i),&
jndx(i),istat)
if(istat.ne.0) return
end do
end subroutine zuscr_insert_entries
! **********************************************************************
!!***************************************************************************
! **********************************************************************
end module mod_uscr_insert_entries
|
{"hexsha": "6ab27a70417d129795006070c4c6ee92dba139d5", "size": 4193, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "ext/SOFTWARE/uscr_insert_entries.f90", "max_stars_repo_name": "ittnas/qsc", "max_stars_repo_head_hexsha": "5e23c7f0c2985d79b966f8301a17c2679a5f2c8d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-23T09:41:11.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-23T09:41:11.000Z", "max_issues_repo_path": "ext/SOFTWARE/uscr_insert_entries.f90", "max_issues_repo_name": "ittnas/qsc", "max_issues_repo_head_hexsha": "5e23c7f0c2985d79b966f8301a17c2679a5f2c8d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ext/SOFTWARE/uscr_insert_entries.f90", "max_forks_repo_name": "ittnas/qsc", "max_forks_repo_head_hexsha": "5e23c7f0c2985d79b966f8301a17c2679a5f2c8d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5148514851, "max_line_length": 78, "alphanum_fraction": 0.4137848796, "num_tokens": 814}
|
"""Script for testing reference files
Usage:
test_ref_file <ref_file> <db_path> [--data=<fname>] [--max_matches=<match>] [--num_cpu=<n>] [--email=<addr>]
Arguments:
<db_path> Absolute path to database.
<file_path> Absolute path to fits file to add.
Options:
-h --help Show this screen.
--version Show version.
--data=<fname> data to run pipeline with
--max_matches=<match> maximum number of data sets to test
--num_cpu=<n> number of cores to use [default: 2]
--email=<addr> email results from job with html table.
"""
from __future__ import print_function
import os
from astropy.io import fits
import crds
from dask import compute, delayed
from dask.diagnostics import ProgressBar
from docopt import docopt
from email.headerregistry import Address
from email.message import EmailMessage
from email.mime.text import MIMEText
from jwst import datamodels
from jwst.pipeline import calwebb_dark, calwebb_image2, calwebb_spec2
try:
from jwst.pipeline import SloperPipeline as Detector1Pipeline
except ImportError:
from jwst.pipeline import Detector1Pipeline
import logging
import numpy as np
import pandas as pd
import psutil
import smtplib
from sqlalchemy import or_
# Remove python 2 dependencies in the future..
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from . import db
p_mapping = {
"META.EXPOSURE.TYPE": "META.EXPOSURE.P_EXPTYPE",
"META.INSTRUMENT.BAND": "META.INSTRUMENT.P_BAND",
"META.INSTRUMENT.DETECTOR": "META.INSTRUMENT.P_DETECTOR",
"META.INSTRUMENT.CHANNEL": "META.INSTRUMENT.P_CHANNEL",
"META.INSTRUMENT.FILTER": "META.INSTRUMENT.P_FILTER",
"META.INSTRUMENT.PUPIL": "META.INSTRUMENT.P_PUPIL",
"META.INSTRUMENT.MODULE": "META.INSTRUMENT.P_MODULE",
"META.SUBARRAY.NAME": "META.SUBARRAY.P_SUBARRAY",
"META.INSTRUMENT.GRATING": "META.INSTRUMENT.P_GRATING",
"META.EXPOSURE.READPATT": "META.EXPOSURE.P_READPATT"
}
meta_to_fits = {
'META.INSTRUMENT.NAME': 'INSTRUME',
'META.EXPOSURE.READPATT': 'READPATT',
'META.EXPOSURE.TYPE': 'EXP_TYPE',
'META.INSTRUMENT.BAND': 'BAND',
'META.INSTRUMENT.CHANNEL': 'CHANNEL',
'META.INSTRUMENT.DETECTOR': 'DETECTOR',
'META.INSTRUMENT.FILTER': 'FILTER',
'META.INSTRUMENT.GRATING': 'GRATING',
'META.SUBARRAY.NAME': 'SUBARRAY'
}
IMAGING = ['fgs_image', 'fgs_focus', 'fgs_skyflat', 'fgs_intflat', 'mir_image',
'mir_tacq', 'mir_lyot', 'mir_4qpm', 'mir_coroncal', 'nrc_image',
'nrc_tacq', 'nrc_coron', 'nrc_taconfirm', 'nrc_focus', 'nrc_tsimage',
'nis_image', 'nis_dark', 'nis_ami', 'nis_tacq', 'nis_taconfirm', 'nis_focus',
'nrs_tacq', 'nrs_taslit', 'nrs_taconfirm', 'nrs_confirm', 'nrs_image',
'nrs_focus', 'nrs_mimf', 'nrs_bota']
def get_pipelines(exp_type):
"""Sorts which pipeline to use based on exp_type
Parameters
----------
exp_type: str
JWST exposure type
Returns
-------
pipeline: list
Pipeline(s) to return for calibrating files.
"""
if 'DARK' in exp_type:
pipeline = [calwebb_dark.DarkPipeline()]
elif 'FLAT' in exp_type:
pipeline = [Detector1Pipeline()]
elif exp_type.lower() in IMAGING:
pipeline = [Detector1Pipeline(), calwebb_image2.Image2Pipeline()]
else:
pipeline = [Detector1Pipeline(), calwebb_spec2.Spec2Pipeline()]
return pipeline
def override_reference_file(ref_file, pipeline):
dm = datamodels.open(ref_file)
for step in pipeline.step_defs.keys():
# check if a step has an override_<reftype> option
if hasattr(getattr(pipeline, step), 'override_{}'.format(dm.meta.reftype)):
setattr(getattr(pipeline, step), 'override_{}'.format(dm.meta.reftype), ref_file)
print('Setting {} in {} step'.format('override_{}'.format(dm.meta.reftype), step))
return pipeline
def test_reference_file(ref_file, data_file):
"""Override CRDS reference file with the supplied reference file and run
pipeline with supplied data file.
Parameters
----------
ref_file: str
Path to reference file.
data_file: str
Path to data file.
Returns
-------
result_meta: dict
Dictionary with results from run.
"""
# redirect pipeline log from sys.stderr to a string
log_stream = StringIO()
stpipe_log = logging.Logger.manager.loggerDict['stpipe']
stpipe_log.handlers[0].stream = log_stream
# allow invalid keyword values
os.environ['PASS_INVALID_VALUES'] = '1'
path, filename = os.path.split(data_file)
result_meta = {'Path': path,
'Filename': filename}
try:
for pipeline in get_pipelines(fits.getheader(data_file)['EXP_TYPE']):
pipeline = override_reference_file(ref_file, pipeline)
pipeline.run(data_file)
result_meta['Test_Status'] = 'PASSED'
result_meta['Error_Msg'] = None
return result_meta
except Exception as err:
result_meta['Test_Status'] = 'FAILED'
result_meta['Error_Msg'] = err
return result_meta
def find_matches(ref_file, session, max_matches=-1):
"""Find matches in user provided database based on header keywords
inside of user provided reference file.
Parameters
----------
ref_file: str
File path to reference file to test
session: sqlite session object
A sqlite database session.
max_matches: int
Maximum matches to return. (Default=-1, return all matches)
Returns
-------
matches: list
a list of filenames
"""
# Create a JWST datamodel based off of the reference file.
dm = datamodels.open(ref_file)
# Get the calibration context, pipeline map (pmap), instrument map (imap)
# and reference map (rmap). For more detail on these maps, visit:
# https://hst-crds.stsci.edu/static/users_guide/rmap_syntax.html
context = crds.heavy_client.get_processing_mode('jwst')[1]
pmap = crds.rmap.load_mapping(context)
imap = pmap.get_imap(dm.meta.instrument.name)
rmap = imap.get_rmap(dm.meta.reftype)
meta_attrs = rmap.get_required_parkeys()
meta_attrs.remove('META.OBSERVATION.DATE')
meta_attrs.remove('META.OBSERVATION.TIME')
query_args = []
keys_used = []
for attr in meta_attrs:
# Hack to get around MIRI Dark Issue for now.
if attr in ['META.EXPOSURE.READPATT', 'META.SUBARRAY.NAME']:
continue
if p_mapping[attr].lower() in dm.to_flat_dict():
p_attr = p_mapping[attr]
if '|' in dm[p_attr.lower()]:
or_vals = dm[p_attr.lower()].split('|')[:-1]
or_vals = [val.strip() for val in or_vals]
query_args.append(or_(getattr(db.RegressionData, meta_to_fits[attr]) == val for val in or_vals))
keys_used.append([meta_to_fits[attr], dm[p_attr.lower()]])
# Ignore special CRDS-only values
elif dm[attr.lower()] in ['GENERIC', 'N/A', 'ANY']:
pass
# Normal values
else:
query_args.append(getattr(db.RegressionData, meta_to_fits[attr]) == dm[attr.lower()])
keys_used.append([meta_to_fits[attr], dm[attr.lower()]])
query_string = '\n'.join(['\t{} = {}'.format(key[0], key[1]) for key in keys_used])
print('Searching DB for test data with\n'+query_string)
query_result = session.query(db.RegressionData).filter(*query_args)
filenames = [os.path.join(result.path, result.filename) for result in query_result]
print('Found {} instances:'.format(len(filenames)), end="")
print('\n'+'\n'.join(['\t'+f for f in filenames]))
if filenames:
if max_matches > 0:
print('Using first {} matches'.format(max_matches))
else:
print('\tNo matches found')
return filenames[:max_matches]
def send_email(data_for_email, addr):
"""Send nicely formatted pandas dataframe as html table via email when
reference file test job is done.
Parameters
----------
data_for_email: list
List of dictionaries to create dataframe out of
addr: str
Email address
Returns
-------
None
"""
# Make sure to strip the username from the domain if full email given.
if '@' in addr:
addr = addr.split("@")[0]
# Make sure to print full error message...
pd.set_option('display.max_colwidth', -1)
# Make dataframe
df = pd.DataFrame(data_for_email)
# Make df into html table and then put into email.
html_tb = df.to_html(justify='center',index=False)
msg = EmailMessage()
msg['Subject'] = 'Results From JWST Reference File Testing.'
msg['From'] = Address('', addr, 'stsci.edu')
msg['To'] = Address('', addr, 'stsci.edu')
body_str = """
<html>
<head></head>
<body>
<p><b> Results from run </b></p>
{}
</body>
</html>
""".format(html_tb)
msg.add_alternative(body_str, subtype='html')
with smtplib.SMTP('smtp.stsci.edu') as s:
s.send_message(msg)
def main():
"""Main to parse command line arguments.
Parameters
----------
None
Returns
-------
None
"""
# Get docopt arguments..
args = docopt(__doc__, version='0.1')
ref_file = args['<ref_file>']
data_file = args['--data']
# if you only want to test one JWST file against ref file
# else, search DB for files that will be effected by new ref file
if data_file is not None:
file_to_cal = delayed(test_reference_file)(ref_file, data_file)
tab_data = file_to_cal.compute()
pd.set_option('display.max_colwidth', -1)
print(pd.DataFrame(tab_data))
else:
session = db.load_session(db_path=args['<db_path>'])
if args['--max_matches']:
data_files = find_matches(ref_file, session, max_matches=int(args['--max_matches']))
else:
data_files = find_matches(ref_file, session)
# If files are returned, build list of objects to process
if data_files:
delayed_data_files = [delayed(test_reference_file)(ref_file, fname)
for fname in data_files]
# Check to make sure user isn't exceeding number of CPUs.
if int(args['--num_cpu']) > psutil.cpu_count():
args = (psutil.cpu_count(), args['--num_cpu'])
err_str = "YOUR MACHINE ONLY HAS {} CPUs! YOU ENTERED {}"
raise ValueError(err_str.format(*args))
else:
# Compute results in parallel.
print("Performing Calibration...")
with ProgressBar():
tab_data = compute(delayed_data_files, num_workers=int(args['--num_cpu']))[0]
# If you want to email,
if args['--email']:
send_email(tab_data, args['--email'])
else:
pd.set_option('display.max_colwidth', -1)
print(pd.DataFrame(tab_data))
|
{"hexsha": "a18a0ee0c1ce7e33f0f4c2e963ca051451745735", "size": 11316, "ext": "py", "lang": "Python", "max_stars_repo_path": "reference_file_testing_tool/reftest.py", "max_stars_repo_name": "pllim/reference-file-testing-tool", "max_stars_repo_head_hexsha": "f6ee7b6d306904c29b6321c738d8ac76c0a75157", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reference_file_testing_tool/reftest.py", "max_issues_repo_name": "pllim/reference-file-testing-tool", "max_issues_repo_head_hexsha": "f6ee7b6d306904c29b6321c738d8ac76c0a75157", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2018-06-20T20:39:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-09T23:44:38.000Z", "max_forks_repo_path": "reference_file_testing_tool/reftest.py", "max_forks_repo_name": "pllim/reference-file-testing-tool", "max_forks_repo_head_hexsha": "f6ee7b6d306904c29b6321c738d8ac76c0a75157", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-21T14:12:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-09T22:40:04.000Z", "avg_line_length": 32.4240687679, "max_line_length": 112, "alphanum_fraction": 0.6255744079, "include": true, "reason": "import numpy,from astropy", "num_tokens": 2763}
|
import numpy as np
import performance
import dislib as ds
from dislib.cluster import KMeans
def main():
n_samples = 300000000
n_chunks = 1536
chunk_size = int(np.ceil(n_samples / n_chunks))
n_features = 100
n_clusters = 500
x = ds.random_array((n_samples, n_features), (chunk_size, n_features))
km = KMeans(n_clusters=n_clusters, max_iter=5, tol=0, arity=48)
performance.measure("KMeans", "300M", km.fit, x)
if __name__ == "__main__":
main()
|
{"hexsha": "b95289e3fa4abc5ccb9d1ebede12d6348bcc14d8", "size": 485, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/performance/mn4/tests/kmeans.py", "max_stars_repo_name": "alexbarcelo/dislib", "max_stars_repo_head_hexsha": "989f81f235ae30b17410a8d805df258c7d931b38", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2018-10-22T19:21:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T12:10:01.000Z", "max_issues_repo_path": "tests/performance/mn4/tests/kmeans.py", "max_issues_repo_name": "alexbarcelo/dislib", "max_issues_repo_head_hexsha": "989f81f235ae30b17410a8d805df258c7d931b38", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 329, "max_issues_repo_issues_event_min_datetime": "2018-11-22T18:04:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T01:26:55.000Z", "max_forks_repo_path": "tests/performance/mn4/tests/kmeans.py", "max_forks_repo_name": "alexbarcelo/dislib", "max_forks_repo_head_hexsha": "989f81f235ae30b17410a8d805df258c7d931b38", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2019-01-10T11:46:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T12:59:45.000Z", "avg_line_length": 21.0869565217, "max_line_length": 74, "alphanum_fraction": 0.6886597938, "include": true, "reason": "import numpy", "num_tokens": 139}
|
from __future__ import division
import os
import warnings
import numpy as np
import matplotlib.pyplot as plt
# This is our custom tesseroid code
from tesseroid_density import tesseroid
# Define functions
def density_function(height):
return np.exp(b_factor * height / thickness)
def normalized_density(height):
return (density_function(height) - rho_min)/(rho_max - rho_min)
def line(height, top, bottom):
a = (normalized_density(top) - normalized_density(bottom))/(top - bottom)
b = normalized_density(bottom)
return a*(height - bottom) + b
# Define tesseroid boundaries
w, e, s, n, top, bottom = -10, 10, -10, 10, 0, -1
thickness = top - bottom
bounds = [w, e, s, n, top, bottom]
# Set b factor for exp density variation
b_factor = 6
# Define heights array and boundary density values
heights = np.linspace(bottom, top, 101)
density = density_function(heights)
rho_min, rho_max = density.min(), density.max()
# Calculate discretizations
# -------------------------
delta = 1
delta_step = 0.001
max_divisions = 4
subsets = [[np.array(bounds)]]
deltas = [1]
while True:
subset = tesseroid._density_based_discretization(bounds,
density_function,
delta)
divisions = len(subset)
if divisions == len(subsets[-1]) + 1:
subsets.append(subset)
deltas.append(delta)
elif divisions > len(subsets[-1]) + 1:
print("More discretizations than 1 in delta {}".format(delta))
if divisions >= max_divisions:
break
else:
delta -= delta_step
# Plot discretization steps
# -------------------------
# Check for DISPLAY variable for matplotlib
try:
os.environ["DISPLAY"]
except Exception:
plt.switch_backend('agg')
# Configure LaTeX style for plots
try:
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Computer Modern Roman'
plt.rcParams['xtick.major.size'] = 2
plt.rcParams['ytick.major.size'] = 2
except Exception as e:
warnings.warn("Couldn't configure LaTeX style for plots:" + str(e))
# Initialize figure and subplots
fig, axes = plt.subplots(1, len(subsets),
figsize=(6.66, 2),
sharey=True)
labels = ["(a)", "(b)", "(c)", "(d)"]
divisions = np.array([top, bottom])
# Plot first three stages
for i in range(len(subsets) - 1):
ax = axes[i]
subset = subsets[i]
tops = [tess[-2] for tess in subset]
bottoms = [tess[-1] for tess in subset]
divisions = np.unique(np.array(tops + bottoms))
tops = [tess[-2] for tess in subsets[i + 1]]
bottoms = [tess[-1] for tess in subsets[i + 1]]
new_divisions = np.unique(np.array(tops + bottoms))
line1 = ax.plot(heights, normalized_density(heights),
linewidth=2)
dots = ax.plot(divisions, normalized_density(divisions), 'o')
for j in range(1, len(new_divisions) - 1):
bottom_j = new_divisions[j - 1]
top_j = new_divisions[j + 1]
div = new_divisions[j]
if div not in divisions:
line2 = ax.plot([div]*2,
[normalized_density(div),
line(div, top_j, bottom_j)],
'--', color="C3")
line3 = ax.plot([bottom_j, top_j],
[normalized_density(bottom_j),
normalized_density(top_j)],
'-', color="C1")
if i == 1:
ax.plot([bottom_j, top_j],
[normalized_density(bottom_j) - 0.08]*2, "|--",
color="C7", markersize=8)
ax.text(0.5*(bottom_j + top_j),
normalized_density(bottom_j) - 0.03,
'$L_r$',
fontdict={'color': "C7"},
horizontalalignment='center')
elif i == 2:
ax.plot([bottom_j, top_j],
[normalized_density(top_j) + 0.08]*2, "|--",
color="C7", markersize=8)
ax.text(0.5*(bottom_j + top_j),
normalized_density(top_j) + 0.13,
'$L_r$',
fontdict={'color': "C7"},
horizontalalignment='center')
# Plot final stage
ax = axes[-1]
tops = [tess[-2] for tess in subsets[-1]]
bottoms = [tess[-1] for tess in subsets[-1]]
divisions = np.unique(np.array(tops + bottoms))
ax.plot(heights, normalized_density(heights),
linewidth=2)
ax.plot(divisions, normalized_density(divisions), 'o')
# Configure axes
axes[0].set_ylabel("Normalized Density")
for ax, label in zip(axes, labels):
ax.text(0.03, 0.91, label,
fontdict={'weight': 'bold'},
verticalalignment="center",
transform=ax.transAxes)
ax.set_xticks([-1, 0])
ax.set_xticklabels([r"$r_1$", r"$r_2$"])
ax.set_yticks([])
# Create legend
axes[-1].legend((line1[0], line3[0], line2[0], dots[0]),
("Norm. density",
"Straight Line",
"Max. difference",
"Discretizations"),
fontsize="x-small",
loc=(0.03, 0.46),
)
plt.tight_layout(pad=1, h_pad=0, w_pad=0)
script_path = os.path.dirname(os.path.abspath(__file__))
figure_fname = os.path.join(
script_path,
"../../manuscript/figures/density-based-discretization-algorithm.pdf"
)
plt.savefig(figure_fname)
plt.show()
|
{"hexsha": "f956102eab5914d0a147a2b36b4f9dc634af118d", "size": 5597, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/scripts/density-based-discretization.py", "max_stars_repo_name": "pinga-lab/tesseroid-variable-density", "max_stars_repo_head_hexsha": "7fae8a2d6df645d64ce940f98273aef256de2e38", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-06-05T21:24:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-03T08:57:54.000Z", "max_issues_repo_path": "code/scripts/density-based-discretization.py", "max_issues_repo_name": "pinga-lab/tesseroid-variable-density", "max_issues_repo_head_hexsha": "7fae8a2d6df645d64ce940f98273aef256de2e38", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 30, "max_issues_repo_issues_event_min_datetime": "2018-09-21T22:09:24.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-24T12:34:40.000Z", "max_forks_repo_path": "code/scripts/density-based-discretization.py", "max_forks_repo_name": "pinga-lab/tesseroid-variable-density", "max_forks_repo_head_hexsha": "7fae8a2d6df645d64ce940f98273aef256de2e38", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-10-15T02:02:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-25T05:00:32.000Z", "avg_line_length": 31.8011363636, "max_line_length": 77, "alphanum_fraction": 0.5628015008, "include": true, "reason": "import numpy", "num_tokens": 1368}
|
import numpy as np
import pytest
import shetar
# =================== Test input paramaters ===================
@pytest.fixture(scope='module', params=[3, 5])
def input_order(request):
return request.param
@pytest.fixture(scope='module', params=[3, 5])
def output_order(request):
return request.param
@pytest.fixture(scope='module', params=[
1, # Single value
np.linspace(1e-3, 1, 7), # Unique size for wavenumbers
np.linspace(1e-3, 1, 9), # Shared size for all
# np.linspace(1e-3, 1, 11), # Shared with only radius
])
def wavenumber(request):
return request.param
@pytest.fixture(scope='module', params=[
1, # Single value
np.linspace(0.1, 1, 9), # Shared size for all
np.linspace(0.1, 1, 11), # Unique for radius
])
def radius(request):
return request.param
@pytest.fixture(scope='module', params=[
np.pi / 2, # Single value
np.linspace(0, np.pi, 9), # Shared size for all
np.linspace(0, np.pi, 13),
])
def colatitude(request):
return request.param
@pytest.fixture(scope='module', params=[
0, # Single value
np.linspace(0, 2 * np.pi, 9), # Shared size for all
np.linspace(0, 2 * np.pi, 15),
])
def azimuth(request):
return request.param
# ============================ Evaluated parameters ============================
@pytest.fixture(scope='module')
def colatitude_azimuth(colatitude, azimuth):
try:
np.broadcast(colatitude, azimuth)
except ValueError:
if np.size(azimuth) == np.size(radius):
azimuth = azimuth[:, None]
else:
colatitude = colatitude[:, None]
return colatitude, azimuth
@pytest.fixture(scope='module')
def radius_colatitude_azimuth(radius, colatitude_azimuth):
colatitude, azimuth = colatitude_azimuth
angular_shape = np.broadcast(colatitude, azimuth)
try:
np.broadcast(radius, angular_shape)
except ValueError:
if np.size(radius) == angular_shape.shape[0]:
radius = np.reshape(radius, radius.shape + (1,) * (angular_shape.ndim - 1))
else:
radius = np.reshape(radius, radius.shape + (1,) * angular_shape.ndim)
return radius, colatitude, azimuth
# ================================== Helpers ===================================
def class_name(obj):
return '.'.join([obj.__class__.__module__, obj.__class__.__name__])
def assert_shape_match(obj, matching):
assert obj.shape == np.shape(matching), f"Object of type {class_name(obj)} has shape {obj.shape}, expected {np.shape(matching)}"
assert np.shape(obj) == obj.shape, f"Object of type {class_name(obj)} has `np.shape(obj)`` {np.shape(obj)} and `obj.shape` {obj.shape}"
assert obj.ndim == np.ndim(matching), f"Object of type {class_name(obj)} has ndim {obj.ndim}, expected {np.ndim(matching)}"
assert np.ndim(obj) == obj.ndim, f"Object of type {class_name(obj)} has `np.ndim(obj)`` {np.ndim(obj)} and `obj.ndim` {obj.ndim}"
# ================================= Test bases =================================
def test_AssociatedLegendrePolynomials(input_order, colatitude):
obj = shetar.bases.AssociatedLegendrePolynomials(order=input_order, x=np.cos(colatitude))
assert_shape_match(obj, colatitude)
def test_RegularRadialBase(input_order, radius, wavenumber):
obj = shetar.bases.RegularRadialBase(order=input_order, radius=radius, wavenumber=wavenumber)
assert_shape_match(obj, radius)
def test_SingularRadialBase(input_order, radius, wavenumber):
obj = shetar.bases.SingularRadialBase(order=input_order, radius=radius, wavenumber=wavenumber)
assert_shape_match(obj, radius)
def test_DualRadialBase(input_order, radius, wavenumber):
obj = shetar.bases.DualRadialBase(order=input_order, radius=radius, wavenumber=wavenumber)
assert_shape_match(obj, radius)
def test_SphericalHarmonics(input_order, colatitude_azimuth):
colatitude, azimuth = colatitude_azimuth
obj = shetar.bases.SphericalHarmonics(order=input_order, colatitude=colatitude, azimuth=azimuth)
assert_shape_match(obj._legendre, colatitude)
assert_shape_match(obj._phase, azimuth)
assert_shape_match(obj, np.broadcast(colatitude, azimuth))
def test_RegularBase(input_order, wavenumber, radius_colatitude_azimuth):
radius, colatitude, azimuth = radius_colatitude_azimuth
obj = shetar.bases.RegularBase(order=input_order, wavenumber=wavenumber, radius=radius, colatitude=colatitude, azimuth=azimuth)
assert_shape_match(obj, np.broadcast(radius, colatitude, azimuth))
assert_shape_match(obj._radial, radius)
assert_shape_match(obj._angular, np.broadcast(colatitude, azimuth))
assert_shape_match(obj._angular._legendre, colatitude)
assert_shape_match(obj._angular._phase, azimuth)
def test_SingularBase(input_order, wavenumber, radius_colatitude_azimuth):
radius, colatitude, azimuth = radius_colatitude_azimuth
obj = shetar.bases.SingularBase(order=input_order, wavenumber=wavenumber, radius=radius, colatitude=colatitude, azimuth=azimuth)
assert_shape_match(obj, np.broadcast(radius, colatitude, azimuth))
assert_shape_match(obj._radial, radius)
assert_shape_match(obj._angular, np.broadcast(colatitude, azimuth))
assert_shape_match(obj._angular._legendre, colatitude)
assert_shape_match(obj._angular._phase, azimuth)
def test_DualBase(input_order, wavenumber, radius_colatitude_azimuth):
radius, colatitude, azimuth = radius_colatitude_azimuth
obj = shetar.bases.DualBase(order=input_order, wavenumber=wavenumber, radius=radius, colatitude=colatitude, azimuth=azimuth)
assert_shape_match(obj, np.broadcast(radius, colatitude, azimuth))
assert_shape_match(obj._radial, radius)
assert_shape_match(obj._angular, np.broadcast(colatitude, azimuth))
assert_shape_match(obj._angular._legendre, colatitude)
assert_shape_match(obj._angular._phase, azimuth)
# =============================== Test rotations ===============================
def test_ColatitudeRotation(input_order, colatitude):
obj = shetar.rotations.ColatitudeRotation(order=input_order, colatitude=colatitude)
assert_shape_match(obj, colatitude)
def test_Rotation(input_order, colatitude_azimuth):
colatitude, azimuth = colatitude_azimuth
obj = shetar.rotations.Rotation(order=input_order, colatitude=colatitude, azimuth=azimuth)
assert_shape_match(obj, np.broadcast(colatitude, azimuth))
assert_shape_match(obj._primary_phase, azimuth)
# ============================= Test translations ==============================
def test_InteriorCoaxialTranslation(input_order, output_order, radius, wavenumber):
obj = shetar.translations.InteriorCoaxialTranslation(input_order=input_order, output_order=output_order, radius=radius, wavenumber=wavenumber)
assert_shape_match(obj, radius)
def test_ExteriorCoaxialTranslation(input_order, output_order, radius, wavenumber):
obj = shetar.translations.ExteriorCoaxialTranslation(input_order=input_order, output_order=output_order, radius=radius, wavenumber=wavenumber)
assert_shape_match(obj, radius)
def test_ExteriorInteriorCoaxialTranslation(input_order, output_order, radius, wavenumber):
obj = shetar.translations.ExteriorInteriorCoaxialTranslation(input_order=input_order, output_order=output_order, radius=radius, wavenumber=wavenumber)
assert_shape_match(obj, radius)
def test_InteriorTranslation(input_order, output_order, radius_colatitude_azimuth, wavenumber):
radius, colatitude, azimuth = radius_colatitude_azimuth
obj = shetar.translations.InteriorTranslation(input_order=input_order, output_order=output_order, wavenumber=wavenumber, radius=radius, colatitude=colatitude, azimuth=azimuth)
assert_shape_match(obj, np.broadcast(radius, colatitude, azimuth))
assert_shape_match(obj._rotation, np.broadcast(colatitude, azimuth))
def test_ExteriorTranslation(input_order, output_order, radius_colatitude_azimuth, wavenumber):
radius, colatitude, azimuth = radius_colatitude_azimuth
obj = shetar.translations.ExteriorTranslation(input_order=input_order, output_order=output_order, wavenumber=wavenumber, radius=radius, colatitude=colatitude, azimuth=azimuth)
assert_shape_match(obj, np.broadcast(radius, colatitude, azimuth))
assert_shape_match(obj._rotation, np.broadcast(colatitude, azimuth))
def test_ExteriorInteriorTranslation(input_order, output_order, radius_colatitude_azimuth, wavenumber):
radius, colatitude, azimuth = radius_colatitude_azimuth
obj = shetar.translations.ExteriorInteriorTranslation(input_order=input_order, output_order=output_order, wavenumber=wavenumber, radius=radius, colatitude=colatitude, azimuth=azimuth)
assert_shape_match(obj, np.broadcast(radius, colatitude, azimuth))
assert_shape_match(obj._rotation, np.broadcast(colatitude, azimuth))
|
{"hexsha": "d0cb6f418b1a2705ede6f3874efa0f10f0b88ca1", "size": 8819, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_broadcasting.py", "max_stars_repo_name": "AppliedAcousticsChalmers/Spherical-Helmholtz-Translation-and-Rotation", "max_stars_repo_head_hexsha": "84c5b51dab5d7ee26886ece44945a5d887bff369", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_broadcasting.py", "max_issues_repo_name": "AppliedAcousticsChalmers/Spherical-Helmholtz-Translation-and-Rotation", "max_issues_repo_head_hexsha": "84c5b51dab5d7ee26886ece44945a5d887bff369", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_broadcasting.py", "max_forks_repo_name": "AppliedAcousticsChalmers/Spherical-Helmholtz-Translation-and-Rotation", "max_forks_repo_head_hexsha": "84c5b51dab5d7ee26886ece44945a5d887bff369", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.8756218905, "max_line_length": 187, "alphanum_fraction": 0.7339834448, "include": true, "reason": "import numpy", "num_tokens": 2099}
|
from CTL._tree import _CausalTree
from CTL.causal_tree.sig_diff.sig_base import SigTreeBase
from CTL.causal_tree.sig_diff.sig_val import SigTreeVal
import numpy as np
class SigDiffTree(_CausalTree):
def __init__(self, alpha=0.05, min_size=2, max_depth=-1, val=False, split_size=0.5, seed=724):
super().__init__()
params = {
"alpha": alpha,
"min_size": min_size,
"max_depth": max_depth,
"seed": seed,
}
if val:
params["split_size"] = split_size
self.tree = SigTreeVal(**params)
else:
self.tree = SigTreeBase(**params)
self.column_num = 0
self.fitted = False
self.tree_depth = 0
self.obj = 0
def fit(self, x, y, t):
self.column_num = x.shape[1]
x = x.astype(np.float)
y = y.astype(np.float)
t = t.astype(np.float)
self.tree.fit(x, y, t)
self.fitted = True
self.tree_depth = self.tree.tree_depth
self.obj = self.tree.obj
|
{"hexsha": "0d9db95cc410ba47e887550291f1af66c26dabd5", "size": 1053, "ext": "py", "lang": "Python", "max_stars_repo_path": "CTL/sig_diff_tree.py", "max_stars_repo_name": "Youngyi/CTL", "max_stars_repo_head_hexsha": "3dc578b17adf7ddc4eecb5630ed96b3693c53f68", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 38, "max_stars_repo_stars_event_min_datetime": "2019-08-03T08:06:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T17:24:35.000Z", "max_issues_repo_path": "CTL/sig_diff_tree.py", "max_issues_repo_name": "springbarley/CTL", "max_issues_repo_head_hexsha": "3dc578b17adf7ddc4eecb5630ed96b3693c53f68", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2019-09-19T02:43:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-29T03:40:23.000Z", "max_forks_repo_path": "CTL/sig_diff_tree.py", "max_forks_repo_name": "springbarley/CTL", "max_forks_repo_head_hexsha": "3dc578b17adf7ddc4eecb5630ed96b3693c53f68", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-10-24T18:31:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T01:31:14.000Z", "avg_line_length": 27.0, "max_line_length": 98, "alphanum_fraction": 0.5745489079, "include": true, "reason": "import numpy", "num_tokens": 275}
|
function TestCart()
nx=4;
nz=4;
xL=0;
xU=10;
zL=0;
zU=10;
Boundary.BT='FreeSlip';
Boundary.WE='FreeSlip';
Param.hS='AgnesiCart';
Param.hC=1;
Param.x0C=5;
Param.aC=1;
Param.Grid=CartGrid(nx,nz,xU-xL,zU-zL,xL,zL,OrientFaceCart,Boundary,Param);
DG.OrdPoly=2;
DG.OrdPolyX=2;
DG.OrdPolyY=2;
[DG.w,DG.xw]=GaussLobattoQuad(DG.OrdPoly);
[DG.wX,DG.xwX]=GaussLobattoQuad(DG.OrdPolyX);
[DG.wY,DG.xwY]=GaussLobattoQuad(DG.OrdPolyX);
[DG.DW,DG.DS,DG.DV,DG.DVT,DG.B]=DerivativeMatrixSingle(DG);
[DG.DWX,DG.DSX]=DerivativeMatrixSingle(DG);
[DG.DWY,DG.DSY]=DerivativeMatrixSingle(DG);
Param.X=zeros(DG.OrdPoly+1,DG.OrdPoly+1,3,Param.Grid.NumFaces);
Param.dXdx=zeros(DG.OrdPoly+1,DG.OrdPoly+1,2,2,Param.Grid.NumFaces);
Param.J=zeros(DG.OrdPoly+1,DG.OrdPoly+1,Param.Grid.NumFaces);
for iF=1:Param.Grid.NumFaces
[Param.X(:,:,:,iF),Param.J(:,:,iF),Param.dXdx(:,:,:,:,iF)]=...
JacobiDG1(DG,Param.Grid.Faces(iF),Param.Grid,@Cart,Param);
end
Subs=1;
fig=1;
PlotFaceGrid(Param.Grid,Subs,@JacobiCart,fig)
end
|
{"hexsha": "cc59fd4174fb96f82094bbd11ffd01d82923c71a", "size": 994, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/Grid/TestCart.jl", "max_stars_repo_name": "CliMA/CGDycore.jl", "max_stars_repo_head_hexsha": "77297631f8db7775f19daee2d7ac75bc810d9c11", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-05T07:09:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T07:09:16.000Z", "max_issues_repo_path": "test/Grid/TestCart.jl", "max_issues_repo_name": "CliMA/CGDycore.jl", "max_issues_repo_head_hexsha": "77297631f8db7775f19daee2d7ac75bc810d9c11", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/Grid/TestCart.jl", "max_forks_repo_name": "CliMA/CGDycore.jl", "max_forks_repo_head_hexsha": "77297631f8db7775f19daee2d7ac75bc810d9c11", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4871794872, "max_line_length": 75, "alphanum_fraction": 0.722334004, "num_tokens": 421}
|
from icecream import ic
import os
import logging
from tqdm import tqdm
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import VGG16
from keras.models import Model
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
import numpy as np
def extract_features(file, model):
# load image and make it a numpy array
img = load_img(file, target_size=(224, 224))
img = np.array(img)
# append dimension for number of images
reshaped_img = img.reshape(1,224,224,3)
# preprocess image and get feature vector
img_prepro = preprocess_input(reshaped_img)
features = model.predict(img_prepro, use_multiprocessing=True)
return features
logging.basicConfig(format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.INFO)
DATADIR = './images'
with os.scandir(DATADIR) as files:
filenames = [ifile.name for ifile in files
if ifile.name.endswith(('.png','.jpg','.gif'))]
logging.info(f'Found {len(filenames)} images in data directory')
model = VGG16()
model = Model(inputs = model.inputs, outputs = model.layers[-2].output)
data = {}
logging.info(f'Loading and preprocessing {len(filenames)} images from datadir')
for ifile in tqdm(filenames):
load_path = os.path.join(DATADIR,ifile)
feat = extract_features(load_path,model)
data[ifile] = feat
feat = np.array(list(data.values()))
feat = feat.reshape(-1,4096)
pca = PCA(n_components=0.95)
pca.fit(feat)
x = pca.transform(feat)
logging.info(f'PCA reduced dimensionality to {pca.n_components_}')
i = 1
objective = 1
while i <= len(filenames) and objective > 1e-9:
kmeans = KMeans(n_clusters=i)
kmeans.fit(x)
objective = kmeans.inertia_
testout = max(kmeans.labels_)
logging.info(f'K-Means with {i} clusters yields inertia {objective}')
i+=1
|
{"hexsha": "c9fece490a04a2b45746fbfde25162086b4d1b43", "size": 2000, "ext": "py", "lang": "Python", "max_stars_repo_path": "clustering.py", "max_stars_repo_name": "rtkfan/image-clusters", "max_stars_repo_head_hexsha": "18a7aa26079c91c960c1f615c8951e3dd60522ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "clustering.py", "max_issues_repo_name": "rtkfan/image-clusters", "max_issues_repo_head_hexsha": "18a7aa26079c91c960c1f615c8951e3dd60522ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "clustering.py", "max_forks_repo_name": "rtkfan/image-clusters", "max_forks_repo_head_hexsha": "18a7aa26079c91c960c1f615c8951e3dd60522ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6666666667, "max_line_length": 79, "alphanum_fraction": 0.7075, "include": true, "reason": "import numpy", "num_tokens": 508}
|
\chapter{Linear time invariant DT systems}
\section{DT system representations}
We can mathematically represent, or model, DT systems multiple ways.
\begin{itemize}
\item purely mathematically - in time domain we will use
\begin{itemize}
\item linear, constant coefficient difference equations, e.g.
\[
y[n] = a y[n-1] + b y[n-2] + x[n]
\]
\item DT impulse response $h[n]$
\end{itemize}
\item purely mathematically - in frequency domain we will use
\begin{itemize}
\item frequency response
\item transfer function (complex frequency, covered in ECE 3704)
\end{itemize}
\item graphically, using a mixture of math and block diagrams
\end{itemize}
\section{System properties and classification}
Choosing the right kind of system model is important. Here are some important properties that allow us to broadly classify systems.
\begin{itemize}
\item Memory
\item Invertability
\item Causality
\item Stability
\item Time-invariance
\item Linearity
\end{itemize}
Let's define each it turn.
\subsection{Memory}
The output of a DT system with memory depends on previous or future inputs and is said to be {\it dynamic}. Otherwise the system is memoryless or {\it instantaneous}, and the output $y[n]$ at index $n$ depends only on $x[n]$.
For example:
\[
y[n] = 2x[n]
\]
is a memoryless system, while
\[
y[n+1] + y[n] = x[n]
\]
has memory. To see this, write the difference equation in recursive form
\[
y[n] = -y[n-1] + x[n-1]
\]
and we see explicitly the current output $y[n]$ depends on past values of output and input.
\subsection{Invertability}
A system is invertible if there exists a system that when placed in series with the original recovers the input.
\[
x[n] \mapsto{T} y[n] \mapsto{T^{-1}} x[n]
\]
where $T^{-1}$ is the inverse system of $T$. For example, consider a system
\[
x[n] \mapsto y[n] = \sum\limits_{m=-\infty}^{n} x[m]
\]
and a system
\[
y[n] \mapsto z[n] = y[n] - y[n-1]
\]
The combination in series $x[n] \mapsto y[n] \mapsto z[n] = x[n]$, since
\[
z[n] = y[n] - y[n-1] = \sum\limits_{m=-\infty}^{n} x[m] - \sum\limits_{m=-\infty}^{n-1} x[m] = x[n]
\]
i.e. the difference undoes the accumulation.
\subsection{Causality}
A DT system is causal if the output at index $n$ depends on the input for index values at or before $n$:
\[
y[n] \;\text{depends on}\; x[m] \;\text{for} \; m \leq n
\]
While all physical CT systems are causal, practical DT systems may not be since we can used memory to "shift time". For CT systems we cannot store the infinite number of values between two time points $t_1$ and $t_2$, but we can store the $n_2-n_1$ values of a DT system between between two indices $n_1$ and $n_2$ (assuming infinite precision).
\begin{example}
Consider a DT system whose difference equation is
\[
y[n] = -x[n-1] + 2x[n] - x[n+1]
\]
We see the current output $y[n]$ depends on a "future" value of the input $x[n+1]$. Thus the system \textbf{is not} causal. In practice we can shift the difference equation to
\[
y[n-1] = -x[n-2] + 2x[n-1] - x[n]
\]
and then delay the output by one sample to get $y[n]$.
\end{example}
\begin{example}
Consider a DT system whose difference equation is
\[
y[n] = -y[n-1] + 2x[n]
\]
We see the current output $y[n]$ depends on a "past" value of the output $y[n-1]$ and the current input $x[n]$. Thus the system \textbf{is} causal. In practice we can immediately compute $y[n]$ with no delay.
\end{example}
\subsection{Stability}
A DT system is (BIBO) stable if applying a bounded-input
\[
x[n] < \infty \; \forall \; n
\]
results in a bounded-output $x[n] \mapsto y[n]$ and
\[
y[n] < \infty \; \forall \; n
\]
Note, bounded in practice is limited by the physical situation, e.g. the number of bits used to store values.
For example, a DT system described by the LCCDE
\[
y[n+1] - 2 y[n] = x[n+1]
\]
is unstable because the solution $y[n]$ will have one term of the form $\left( 2\right)^n$, for most non-zero inputs $x[n]$ or any non-zero initial condition, that grows unbounded as $n$ increases.
\subsection{Time-invariance}
A DT system is time(index)-invariant if, given
\[
x[n] \mapsto y[n]
\]
then an index-shift of the input leads to the same index-shift in the output
\[
x[n-m] \mapsto y[n-m]
\]
An important example is a DT system described by a LCCDE, e.g.
\[
y[n+1] - \frac{1}{2} y[n] = x[n+1]
\]
or in recursive form
\[
y[n] = \frac{1}{2} y[n-1] + x[n]
\]
If we index shift the input $x[n - m]$ we replace $n$ by $n-m$ and the difference equation becomes
\[
y[n-m+1] - \frac{1}{2} y[n-m] = x[n-m+1]
\]
which has the same solution shifted by $m$
\[
y[n-m] = \frac{1}{2} y[n-m -1] + x[n-m]
\]
If a coefficient depends on $n$ however, e.g
\[
y[n+1] - \frac{n}{2} y[n] = x[n+1]
\]
so that it is no longer LCC then the solution depends on $m$ and the system is no longer time-invariant.
\subsection{Linearity}
A DT system is linear if the output due to a sum of scaled individual inputs is the same as the scaled sum of the individual outputs with respect to those inputs. In other words given
\[
x_1[n] \mapsto y_1[n] \;\text{and}\; x_2[n] \mapsto y_2[n]
\]
then
\[
a x_1[n] + b x_2[n] \mapsto a y_1[n] + b y_2[n]
\]
for constants $a$ and $b$.
Note this property extends to sums of arbitrary signals, e.g. if
\[
x_i[n] \mapsto y_i[n] \; \forall\; i \in [1 \cdots N]
\]
then given $N$ constants $a_i$, if the system is linear
\[
\sum\limits_{i = 1}^N a_i x_i[n] \mapsto \sum\limits_{i = 1}^N a_i y_i[n]
\]
This is a very important property, called {\it superposition}, and it simplifies the analysis of systems greatly.
An important non-linear system is that is described by a LCCDE with non-zero auxiliary conditions at some $n_0$, $y[n_0] = y_0$. As in CT, such systems will have a term in it's solution that depends on $y_0$. Given two inputs, each individual response will have that term in it, so their sum has double that term. However the response due to the sum of the inputs would again only have one and the sum of the responses would not be the same as the response of the sum. Such a system cannot be linear. Thus the system must be "at rest" before applying the input in order to be a linear system.
\section{Stable LTI Systems}
The remainder of this course is about stable, linear, time-invariant (LTI) systems. As we have seen in DT such systems can be described by a LCCDE with zero auxiliary (initial) conditions (the system is \emph{at rest}).
We have seen previously how to find the impulse response, $h[n]$, of such systems. We now note some relationships between the impulse response and the system properties described above.
\begin{itemize}
\item If a system is memoryless then $h[n] = C \delta[n]$ for some constant $C$.
\item If a system is causal then $h[n] = 0$ for $n < 0$.
\item If a system is BIBO stable then
\[
\sum\limits_{-\infty}^{\infty} |h[n]| < \infty
\]
\end{itemize}
|
{"hexsha": "69205f3c6817950eb3739d47257b41c7cc0fc520", "size": 6831, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "07-dt-lti.tex", "max_stars_repo_name": "clwyatt/notes-2714", "max_stars_repo_head_hexsha": "4715455db62b5455a05e274f25c5b9fb21ed7573", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "07-dt-lti.tex", "max_issues_repo_name": "clwyatt/notes-2714", "max_issues_repo_head_hexsha": "4715455db62b5455a05e274f25c5b9fb21ed7573", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "07-dt-lti.tex", "max_forks_repo_name": "clwyatt/notes-2714", "max_forks_repo_head_hexsha": "4715455db62b5455a05e274f25c5b9fb21ed7573", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.578125, "max_line_length": 592, "alphanum_fraction": 0.6955057825, "num_tokens": 2130}
|
import numpy as np
from py_wake.site._site import UniformWeibullSite
from py_wake.wind_turbines import WindTurbine
from py_wake.wind_turbines.power_ct_functions import PowerCtTabular
wt_x = [423974, 424042, 424111, 424179, 424247, 424315, 424384, 424452, 424534,
424602, 424671, 424739, 424807, 424875, 424944, 425012, 425094, 425162,
425231, 425299, 425367, 425435, 425504, 425572, 425654, 425722, 425791,
425859, 425927, 425995, 426064, 426132, 426214, 426282, 426351, 426419,
426487, 426555, 426624, 426692, 426774, 426842, 426911, 426979, 427047,
427115, 427184, 427252, 427334, 427402, 427471, 427539, 427607, 427675,
427744, 427812, 427894, 427962, 428031, 428099, 428167, 428235, 428304,
428372, 428454, 428522, 428591, 428659, 428727, 428795, 428864, 428932,
429014, 429082, 429151, 429219, 429287, 429355, 429424, 429492]
wt_y = [6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,
6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,
6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,
6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,
6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,
6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,
6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,
6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,
6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,
6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556]
wt9_x = np.array(wt_x)[[0, 1, 2, 8, 9, 10, 16, 17, 18]]
wt9_y = np.array(wt_y)[[0, 1, 2, 8, 9, 10, 16, 17, 18]]
i16 = [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27]
wt16_x = np.array(wt_x)[i16]
wt16_y = np.array(wt_y)[i16]
power_curve = np.array([[3.0, 0.0],
[4.0, 66.6],
[5.0, 154.0],
[6.0, 282.0],
[7.0, 460.0],
[8.0, 696.0],
[9.0, 996.0],
[10.0, 1341.0],
[11.0, 1661.0],
[12.0, 1866.0],
[13.0, 1958.0],
[14.0, 1988.0],
[15.0, 1997.0],
[16.0, 1999.0],
[17.0, 2000.0],
[18.0, 2000.0],
[19.0, 2000.0],
[20.0, 2000.0],
[21.0, 2000.0],
[22.0, 2000.0],
[23.0, 2000.0],
[24.0, 2000.0],
[25.0, 2000.0]]) * [1, 1000]
ct_curve = np.array([[3.0, 0.0],
[4.0, 0.818],
[5.0, 0.806],
[6.0, 0.804],
[7.0, 0.805],
[8.0, 0.806],
[9.0, 0.807],
[10.0, 0.793],
[11.0, 0.739],
[12.0, 0.709],
[13.0, 0.409],
[14.0, 0.314],
[15.0, 0.249],
[16.0, 0.202],
[17.0, 0.167],
[18.0, 0.14],
[19.0, 0.119],
[20.0, 0.102],
[21.0, 0.088],
[22.0, 0.077],
[23.0, 0.067],
[24.0, 0.06],
[25.0, 0.053]])
class V80(WindTurbine):
def __init__(self, method='linear'):
"""
Parameters
----------
method : {'linear', 'pchip'}
linear(fast) or pchip(smooth and gradient friendly) interpolation
"""
WindTurbine.__init__(self, name='V80', diameter=80, hub_height=70,
powerCtFunction=PowerCtTabular(power_curve[:, 0], power_curve[:, 1], 'w',
ct_curve[:, 1], method=method))
HornsrevV80 = V80
class Hornsrev1Site(UniformWeibullSite):
def __init__(self, ti=.1, shear=None):
f = [3.597152, 3.948682, 5.167395, 7.000154, 8.364547, 6.43485,
8.643194, 11.77051, 15.15757, 14.73792, 10.01205, 5.165975]
a = [9.176929, 9.782334, 9.531809, 9.909545, 10.04269, 9.593921,
9.584007, 10.51499, 11.39895, 11.68746, 11.63732, 10.08803]
k = [2.392578, 2.447266, 2.412109, 2.591797, 2.755859, 2.595703,
2.583984, 2.548828, 2.470703, 2.607422, 2.626953, 2.326172]
UniformWeibullSite.__init__(self, np.array(f) / np.sum(f), a, k, ti=ti, shear=shear)
self.initial_position = np.array([wt_x, wt_y]).T
def main():
wt = V80()
print('Diameter', wt.diameter())
print('Hub height', wt.hub_height())
import matplotlib.pyplot as plt
ws = np.linspace(3, 20, 100)
plt.plot(ws, wt.power(ws) * 1e-3, label='Power')
c = plt.plot([], [], label='Ct')[0].get_color()
plt.ylabel('Power [kW]')
ax = plt.gca().twinx()
ax.plot(ws, wt.ct(ws), color=c)
ax.set_ylabel('Ct')
plt.xlabel('Wind speed [m/s]')
plt.gcf().axes[0].legend(loc=1)
plt.show()
if __name__ == '__main__':
main()
|
{"hexsha": "1ca8a437f28d9fb8bf111391ff260642ec9facaf", "size": 5398, "ext": "py", "lang": "Python", "max_stars_repo_path": "py_wake/examples/data/hornsrev1.py", "max_stars_repo_name": "aemoser/PyWake", "max_stars_repo_head_hexsha": "889a2c10882195af21339e9bcf2ede0db9b58319", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2019-03-18T14:10:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T17:39:04.000Z", "max_issues_repo_path": "py_wake/examples/data/hornsrev1.py", "max_issues_repo_name": "aemoser/PyWake", "max_issues_repo_head_hexsha": "889a2c10882195af21339e9bcf2ede0db9b58319", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-12T06:13:00.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-12T06:43:26.000Z", "max_forks_repo_path": "py_wake/examples/data/hornsrev1.py", "max_forks_repo_name": "aemoser/PyWake", "max_forks_repo_head_hexsha": "889a2c10882195af21339e9bcf2ede0db9b58319", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2019-01-11T14:45:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-13T19:55:29.000Z", "avg_line_length": 42.171875, "max_line_length": 102, "alphanum_fraction": 0.4944423861, "include": true, "reason": "import numpy", "num_tokens": 2078}
|
section \<open>Implementation\<close>
theory Affine_Code
imports
Executable_Euclidean_Space
Affine_Approximation
begin
text \<open>Implementing partial deviations as sorted lists of coefficients.\<close>
subsection \<open>Reverse Sorted, Distinct Association Lists\<close>
typedef (overloaded) ('a, 'b) slist =
"{xs::('a::linorder \<times> 'b) list. distinct (map fst xs) \<and> sorted (rev (map fst xs))}"
by (auto intro!: exI[where x="[]"])
setup_lifting type_definition_slist
lift_definition map_of_slist::"(nat, 'a::zero) slist \<Rightarrow> nat \<Rightarrow> 'a option" is map_of .
lemma finite_dom_map_of_slist[intro, simp]: "finite (dom (map_of_slist xs))"
by transfer (auto simp: finite_dom_map_of)
abbreviation "the_default a x \<equiv> (case x of None \<Rightarrow> a | Some b \<Rightarrow> b)"
definition "Pdevs_raw xs i = the_default 0 (map_of xs i)"
lemma nonzeros_Pdevs_raw_subset: "{i. Pdevs_raw xs i \<noteq> 0} \<subseteq> dom (map_of xs)"
unfolding Pdevs_raw_def[abs_def]
by transfer (auto simp: Pdevs_raw_def split: option.split_asm)
lift_definition Pdevs::"(nat, 'a::zero) slist \<Rightarrow> 'a pdevs"
is Pdevs_raw
by (rule finite_subset[OF nonzeros_Pdevs_raw_subset]) (simp add: finite_dom_map_of)
code_datatype Pdevs
subsection \<open>Degree\<close>
primrec degree_list::"(nat \<times> 'a::zero) list \<Rightarrow> nat" where
"degree_list [] = 0"
| "degree_list (x#xs) = (if snd x = 0 then degree_list xs else Suc (fst x))"
lift_definition degree_slist::"(nat, 'a::zero) slist \<Rightarrow> nat" is degree_list .
lemma degree_list_eq_zeroD:
assumes "degree_list xs = 0"
shows "the_default 0 (map_of xs i) = 0"
using assms
by (induct xs) (auto simp: Pdevs_raw_def sorted_append split: if_split_asm)
lemma degree_slist_eq_zeroD: "degree_slist xs = 0 \<Longrightarrow> degree (Pdevs xs) = 0"
unfolding degree_eq_Suc_max
by transfer (auto dest: degree_list_eq_zeroD simp: Pdevs_raw_def)
lemma degree_slist_eq_SucD: "degree_slist xs = Suc n \<Longrightarrow> pdevs_apply (Pdevs xs) n \<noteq> 0"
proof (transfer, goal_cases)
case (1 xs n)
thus ?case
by (induct xs)
(auto simp: Pdevs_raw_def sorted_append map_of_eq_None_iff[symmetric]
split: if_split_asm option.split_asm)
qed
lemma degree_slist_zero:
"degree_slist xs = n \<Longrightarrow> n \<le> j \<Longrightarrow> pdevs_apply (Pdevs xs) j = 0"
proof (transfer, goal_cases)
case (1 xs n j)
thus ?case
by (induct xs)
(auto simp: Pdevs_raw_def sorted_append split: if_split_asm option.split)
qed
lemma compute_degree[code]: "degree (Pdevs xs) = degree_slist xs"
by (cases "degree_slist xs")
(auto dest: degree_slist_eq_zeroD degree_slist_eq_SucD intro!: degree_eqI degree_slist_zero)
subsection \<open>Auxiliary Definitions\<close>
fun binop where
"binop f z [] [] = []"
| "binop f z ((i, x)#xs) [] = (i, f x z) # binop f z xs []"
| "binop f z [] ((i, y)#ys) = (i, f z y) # binop f z [] ys"
| "binop f z ((i, x)#xs) ((j, y)#ys) =
(if (i = j) then (i, f x y) # binop f z xs ys
else if (i > j) then (i, f x z) # binop f z xs ((j, y)#ys)
else (j, f z y) # binop f z ((i, x)#xs) ys)"
lemma set_binop_elemD1:
"(a, b) \<in> set (binop f z xs ys) \<Longrightarrow> (a \<in> set (map fst xs) \<or> a \<in> set (map fst ys))"
by (induct f z xs ys rule: binop.induct) (auto split: if_split_asm)
lemma set_binop_elemD2:
"(a, b) \<in> set (binop f z xs ys) \<Longrightarrow>
(\<exists>x\<in>set (map snd xs). b = f x z) \<or>
(\<exists>y\<in>set (map snd ys). b = f z y) \<or>
(\<exists>x\<in>set (map snd xs). \<exists>y\<in>set (map snd ys). b = f x y)"
by (induct f z xs ys rule: binop.induct) (auto split: if_split_asm)
abbreviation "rsorted\<equiv>\<lambda>x. sorted (rev x)"
lemma rsorted_binop:
fixes xs ys::"('a::linorder * 'b) list"
assumes "rsorted ((map fst xs))"
assumes "rsorted ((map fst ys))"
shows "rsorted ((map fst (binop f z xs ys)))"
using assms
by (induct f z xs ys rule: binop.induct) (force simp: sorted_append dest!: set_binop_elemD1)+
lemma distinct_binop:
fixes xs ys::"('a::linorder * 'b) list"
assumes "distinct (map fst xs)"
assumes "distinct (map fst ys)"
assumes "rsorted ((map fst xs))"
assumes "rsorted ((map fst ys))"
shows "distinct (map fst (binop f z xs ys))"
using assms
by (induct f z xs ys rule: binop.induct)
(force dest!: set_binop_elemD1 simp: sorted_append)+
lemma binop_plus:
fixes b::"(nat * 'a::euclidean_space) list"
shows
"(\<Sum>(i, y)\<leftarrow>binop op + 0 b ba. e i *\<^sub>R y) = (\<Sum>(i, y)\<leftarrow>b. e i *\<^sub>R y) + (\<Sum>(i, y)\<leftarrow>ba. e i *\<^sub>R y)"
by (induct "op +::'a\<Rightarrow>_" "0::'a" b ba rule: binop.induct)
(auto simp: algebra_simps)
lemma binop_compose:
"binop (\<lambda>x y. f (g x y)) z xs ys = map (apsnd f) (binop g z xs ys)"
by (induct "\<lambda>x y. f (g x y)" z xs ys rule: binop.induct) auto
lemma linear_cmul_left[intro, simp]: "linear (op * x::real \<Rightarrow> _)"
by (auto intro!: linearI simp: algebra_simps)
lemma length_merge_sorted_eq:
"length (binop f z xs ys) = length (binop g y xs ys)"
by (induction f z xs ys rule: binop.induct) auto
subsection \<open>Pointswise Addition\<close>
lift_definition add_slist::"(nat, 'a::{plus, zero}) slist \<Rightarrow> (nat, 'a) slist \<Rightarrow> (nat, 'a) slist" is
"\<lambda>xs ys. binop op + 0 xs ys"
by (auto simp: intro!: distinct_binop rsorted_binop)
lemma map_of_binop[simp]: "rsorted (map fst xs) \<Longrightarrow> rsorted (map fst ys) \<Longrightarrow>
distinct (map fst xs) \<Longrightarrow> distinct (map fst ys) \<Longrightarrow>
map_of (binop f z xs ys) i =
(case map_of xs i of
Some x \<Rightarrow> Some (f x (case map_of ys i of Some x \<Rightarrow> x | None \<Rightarrow> z))
| None \<Rightarrow> (case map_of ys i of Some y \<Rightarrow> Some (f z y) | None \<Rightarrow> None))"
by (induct f z xs ys rule: binop.induct)
(auto split: option.split option.split_asm simp: sorted_append)
lemma pdevs_apply_Pdevs_add_slist[simp]:
fixes xs ys::"(nat, 'a::monoid_add) slist"
shows "pdevs_apply (Pdevs (add_slist xs ys)) i =
pdevs_apply (Pdevs xs) i + pdevs_apply (Pdevs ys) i"
by (transfer) (auto simp: Pdevs_raw_def split: option.split)
lemma compute_add_pdevs[code]: "add_pdevs (Pdevs xs) (Pdevs ys) = Pdevs (add_slist xs ys)"
by (rule pdevs_eqI) simp
subsection \<open>Set of Coefficients\<close>
lift_definition set_slist::"(nat, 'a::real_vector) slist \<Rightarrow> (nat * 'a) set" is set .
lemma finite_set_slist[intro, simp]: "finite (set_slist xs)"
by transfer simp
subsection \<open>Domain\<close>
lift_definition list_of_slist::"('a::linorder, 'b::zero) slist \<Rightarrow> ('a * 'b) list"
is "\<lambda>xs. filter (\<lambda>x. snd x \<noteq> 0) xs" .
lemma compute_pdevs_domain[code]: "pdevs_domain (Pdevs xs) = set (map fst (list_of_slist xs))"
unfolding pdevs_domain_def
by transfer (force simp: Pdevs_raw_def split: option.split_asm)
lemma sort_rev_eq_sort: "distinct xs \<Longrightarrow> sort (rev xs) = sort xs"
by (rule sorted_distinct_set_unique) auto
lemma compute_list_of_pdevs[code]: "list_of_pdevs (Pdevs xs) = list_of_slist xs"
proof -
have "list_of_pdevs (Pdevs xs) =
map (\<lambda>i. (i, pdevs_apply (Pdevs xs) i)) (rev (sorted_list_of_set (pdevs_domain (Pdevs xs))))"
by (simp add: list_of_pdevs_def)
also have "(sorted_list_of_set (pdevs_domain (Pdevs xs))) = rev (map fst (list_of_slist xs))"
unfolding compute_pdevs_domain sorted_list_of_set_sort_remdups
proof (transfer, goal_cases)
case prems: (1 xs)
hence distinct: "distinct (map fst [x\<leftarrow>xs . snd x \<noteq> 0])"
by (auto simp: filter_map distinct_map intro: subset_inj_on)
with prems show ?case
using sort_rev_eq_sort[symmetric, OF distinct]
by (auto simp: rev_map rev_filter distinct_map distinct_remdups_id
intro!: sorted_sort_id sorted_filter)
qed
also
have "map (\<lambda>i. (i, pdevs_apply (Pdevs xs) i)) (rev \<dots>) = list_of_slist xs"
proof (transfer, goal_cases)
case (1 xs)
thus ?case
unfolding Pdevs_raw_def o_def rev_rev_ident map_map
by (subst map_cong[where g="\<lambda>x. x"]) (auto simp: map_filter_map_filter)
qed
finally show ?thesis .
qed
lift_definition slist_of_pdevs::"'a pdevs \<Rightarrow> (nat, 'a::real_vector) slist" is list_of_pdevs
by (auto simp: list_of_pdevs_def rev_map rev_filter
filter_map o_def distinct_map image_def
intro!: distinct_filter sorted_filter[of "\<lambda>x. x", simplified])
subsection \<open>Application\<close>
lift_definition slist_apply::"('a::linorder, 'b::zero) slist \<Rightarrow> 'a \<Rightarrow> 'b" is
"\<lambda>xs i. the_default 0 (map_of xs i)" .
lemma compute_pdevs_apply[code]: "pdevs_apply (Pdevs x) i = slist_apply x i"
by transfer (auto simp: Pdevs_raw_def)
subsection \<open>Total Deviation\<close>
lift_definition tdev_slist::"(nat, 'a::ordered_euclidean_space) slist \<Rightarrow> 'a" is
"sum_list o map (abs o snd)" .
lemma tdev_slist_sum: "tdev_slist xs = sum (abs \<circ> snd) (set_slist xs)"
by transfer (auto simp: distinct_map sum_list_distinct_conv_sum_set[symmetric] o_def)
lemma pdevs_apply_set_slist: "x \<in> set_slist xs \<Longrightarrow> snd x = pdevs_apply (Pdevs xs) (fst x)"
by transfer (auto simp: Pdevs_raw_def)
lemma
tdev_list_eq_zeroI:
shows "(\<And>i. pdevs_apply (Pdevs xs) i = 0) \<Longrightarrow> tdev_slist xs = 0"
unfolding tdev_slist_sum
by (auto simp: pdevs_apply_set_slist)
lemma inj_on_fst_set_slist: "inj_on fst (set_slist xs)"
by transfer (simp add: distinct_map)
lemma pdevs_apply_Pdevs_eq_0:
"pdevs_apply (Pdevs xs) i = 0 \<longleftrightarrow> ((\<forall>x. (i, x) \<in> set_slist xs \<longrightarrow> x = 0))"
by transfer (safe, auto simp: Pdevs_raw_def split: option.split)
lemma compute_tdev[code]: "tdev (Pdevs xs) = tdev_slist xs"
proof -
have "tdev (Pdevs xs) = (\<Sum>i<degree (Pdevs xs). \<bar>pdevs_apply (Pdevs xs) i\<bar>)"
by (simp add: tdev_def)
also have "\<dots> =
(\<Sum>i <degree (Pdevs xs).
if pdevs_apply (Pdevs xs) i = 0 then 0 else \<bar>pdevs_apply (Pdevs xs) i\<bar>)"
by (auto intro!: sum.cong)
also have "\<dots> =
(\<Sum>i\<in>{0..<degree (Pdevs xs)} \<inter> {x. pdevs_apply (Pdevs xs) x \<noteq> 0}.
\<bar>pdevs_apply (Pdevs xs) i\<bar>)"
by (auto simp: sum.If_cases Collect_neg_eq atLeast0LessThan)
also have "\<dots> = (\<Sum>x\<in>fst ` set_slist xs. \<bar>pdevs_apply (Pdevs xs) x\<bar>)"
by (rule sum.mono_neutral_cong_left)
(force simp: pdevs_apply_Pdevs_eq_0 intro!: imageI degree_gt)+
also have "\<dots> = (\<Sum>x\<in>set_slist xs. \<bar>pdevs_apply (Pdevs xs) (fst x)\<bar>)"
by (rule sum.reindex_cong[of fst]) (auto simp: inj_on_fst_set_slist)
also have "\<dots> = tdev_slist xs"
by (simp add: tdev_slist_sum pdevs_apply_set_slist)
finally show ?thesis .
qed
subsection \<open>Minkowski Sum\<close>
lemma dropWhile_rsorted_eq_filter:
"rsorted (map fst xs) \<Longrightarrow> dropWhile (\<lambda>(i, x). i \<ge> (m::nat)) xs = filter (\<lambda>(i, x). i < m) xs"
(is "_ \<Longrightarrow> ?lhs xs = ?rhs xs")
proof (induct xs)
case (Cons x xs)
hence "?rhs (x#xs) = ?lhs (x#xs)"
by (auto simp: sorted_append filter_id_conv intro: sym)
thus ?case ..
qed simp
lift_definition msum_slist::"nat \<Rightarrow> (nat, 'a) slist \<Rightarrow> (nat, 'a) slist \<Rightarrow> (nat, 'a) slist"
is "\<lambda>m xs ys. map (apfst (\<lambda>n. n + m)) ys @ dropWhile (\<lambda>(i, x). i \<ge> m) xs"
proof (safe, goal_cases)
case (1 n l1 l2)
thus ?case
by (auto dest: set_dropWhileD
simp: dropWhile_rsorted_eq_filter sorted_append rev_map rev_filter sorted_filter distinct_map
intro!: comp_inj_on
subset_inj_on[where A="{x \<in> set l1. case x of (i, x) \<Rightarrow> i < n}" and B="set l1"])
next
case prems: (2 n l1 l2)
hence "sorted (map ((\<lambda>na. na + n) \<circ> fst) (rev l2))"
unfolding rev_map
by (intro sorted_nth_monoI) (auto dest!: sorted_nth_mono)
with prems show ?case
by (auto simp: sorted_append dropWhile_rsorted_eq_filter rev_map rev_filter sorted_filter)
qed
lemma slist_apply_msum_slist:
"slist_apply (msum_slist m xs ys) i =
(if i < m then slist_apply xs i else slist_apply ys (i - m))"
proof (transfer, goal_cases)
case prems: (1 m xs ys i)
thus ?case
proof (cases "i \<in> dom (map_of (map (\<lambda>(x, y). (x + m, y)) ys))")
case False
have "\<And>a. i < m \<Longrightarrow> i \<notin> fst ` {x \<in> set xs. case x of (i, x) \<Rightarrow> i < m} \<Longrightarrow> (i, a) \<notin> set xs"
"\<And>a. i \<notin> fst ` set xs \<Longrightarrow> (i, a) \<notin> set xs"
"\<And>a. m \<le> i \<Longrightarrow> i \<notin> fst ` (\<lambda>(x, y). (x + m, y)) ` set ys \<Longrightarrow> (i - m, a) \<notin> set ys"
by force+
thus ?thesis
using prems False
by (auto simp add: dropWhile_rsorted_eq_filter map_of_eq_None_iff distinct_map_fst_snd_eqD
split: option.split dest!: map_of_SomeD)
qed (force simp: map_of_eq_None_iff distinct_map_fst_snd_eqD
split: option.split
dest!: map_of_SomeD)
qed
lemma pdevs_apply_msum_slist:
"pdevs_apply (Pdevs (msum_slist m xs ys)) i =
(if i < m then pdevs_apply (Pdevs xs) i else pdevs_apply (Pdevs ys) (i - m))"
by (auto simp: compute_pdevs_apply slist_apply_msum_slist)
lemma compute_msum_pdevs[code]: "msum_pdevs m (Pdevs xs) (Pdevs ys) = Pdevs (msum_slist m xs ys)"
by (rule pdevs_eqI) (auto simp: pdevs_apply_msum_slist pdevs_apply_msum_pdevs)
subsection \<open>Unary Operations\<close>
lift_definition map_slist::"('a \<Rightarrow> 'b) \<Rightarrow> (nat, 'a) slist \<Rightarrow> (nat, 'b) slist" is "\<lambda>f. map (apsnd f)"
by simp
lemma pdevs_apply_map_slist:
"f 0 = 0 \<Longrightarrow> pdevs_apply (Pdevs (map_slist f xs)) i = f (pdevs_apply (Pdevs xs) i)"
by transfer
(force simp: Pdevs_raw_def map_of_eq_None_iff distinct_map_fst_snd_eqD image_def
split: option.split dest: distinct_map_fst_snd_eqD)
lemma compute_scaleR_pdves[code]: "scaleR_pdevs r (Pdevs xs) = Pdevs (map_slist (\<lambda>x. r *\<^sub>R x) xs)"
and compute_pdevs_scaleR[code]: "pdevs_scaleR (Pdevs rs) x = Pdevs (map_slist (\<lambda>r. r *\<^sub>R x) rs)"
and compute_uminus_pdevs[code]: "uminus_pdevs (Pdevs xs) = Pdevs (map_slist (\<lambda>x. - x) xs)"
and compute_pdevs_inner[code]: "pdevs_inner (Pdevs xs) b = Pdevs (map_slist (\<lambda>x. x \<bullet> b) xs)"
and compute_pdevs_inner2[code]:
"pdevs_inner2 (Pdevs xs) b c = Pdevs (map_slist (\<lambda>x. (x \<bullet> b, x \<bullet> c)) xs)"
and compute_inner_scaleR_pdevs[code]:
"inner_scaleR_pdevs x (Pdevs ys) = Pdevs (map_slist (\<lambda>y. (x \<bullet> y) *\<^sub>R y) ys)"
and compute_trunc_pdevs[code]:
"trunc_pdevs p (Pdevs xs) = Pdevs (map_slist (\<lambda>x. eucl_truncate_down p x) xs)"
and compute_trunc_err_pdevs[code]:
"trunc_err_pdevs p (Pdevs xs) = Pdevs (map_slist (\<lambda>x. eucl_truncate_down p x - x) xs)"
by (auto intro!: pdevs_eqI simp: pdevs_apply_map_slist zero_prod_def)
subsection \<open>Filter\<close>
lift_definition filter_slist::"(nat \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> (nat, 'a) slist \<Rightarrow> (nat, 'a) slist"
is "\<lambda>P xs. filter (\<lambda>(i, x). (P i x)) xs"
by (auto simp: o_def filter_map distinct_map rev_map rev_filter sorted_filter
intro: subset_inj_on)
lemma slist_apply_filter_slist: "slist_apply (filter_slist P xs) i =
(if P i (slist_apply xs i) then slist_apply xs i else 0)"
by transfer (force simp: Pdevs_raw_def o_def map_of_eq_None_iff distinct_map_fst_snd_eqD
dest: map_of_SomeD distinct_map_fst_snd_eqD split: option.split)
lemma pdevs_apply_filter_slist: "pdevs_apply (Pdevs (filter_slist P xs)) i =
(if P i (pdevs_apply (Pdevs xs) i) then pdevs_apply (Pdevs xs) i else 0)"
by (simp add: compute_pdevs_apply slist_apply_filter_slist)
lemma compute_filter_pdevs[code]: "filter_pdevs P (Pdevs xs) = Pdevs (filter_slist P xs)"
by (auto simp: pdevs_apply_filter_slist intro!: pdevs_eqI)
subsection \<open>Constant\<close>
lift_definition zero_slist::"(nat, 'a) slist" is "[]" by simp
lemma compute_zero_pdevs[code]: "zero_pdevs = Pdevs (zero_slist)"
by transfer (auto simp: Pdevs_raw_def)
lift_definition One_slist::"(nat, 'a::executable_euclidean_space) slist"
is "rev (zip [0..<length (Basis_list::'a list)] (Basis_list::'a list))"
by (simp add: zip_rev[symmetric])
lemma
map_of_rev_zip_upto_length_eq_nth:
assumes "i < length B"
shows "(map_of (rev (zip [0..<length B] B)) i) = Some (B ! i)"
proof -
have "length (rev [0..<length B]) = length (rev B)"
by simp
from map_of_zip_is_Some[OF this, of i] assms
obtain y where y: "map_of (zip (rev [0..<length B]) (rev B)) i = Some y"
by (auto simp: zip_rev)
hence "y = B ! i"
by (auto simp: in_set_zip rev_nth)
with y show ?thesis
by (simp add: zip_rev)
qed
lemma
map_of_rev_zip_upto_length_eq_None:
assumes "\<not>i < length B"
shows "(map_of (rev (zip [0..<length B] B)) i) = None"
using assms
by (auto simp: map_of_eq_None_iff in_set_zip)
lemma pdevs_apply_One_slist:
"pdevs_apply (Pdevs One_slist) i =
(if i < length (Basis_list::'a::executable_euclidean_space list)
then (Basis_list::'a list) ! i
else 0)"
by transfer
(auto simp: Pdevs_raw_def map_of_rev_zip_upto_length_eq_nth map_of_rev_zip_upto_length_eq_None
split: option.split)
lemma compute_One_pdevs[code]: "One_pdevs = Pdevs One_slist"
by (rule pdevs_eqI) (simp add: pdevs_apply_One_slist)
subsection \<open>Update\<close>
primrec update_list::"nat \<Rightarrow> 'a \<Rightarrow> (nat * 'a) list \<Rightarrow> (nat * 'a) list"
where
"update_list n x [] = [(n, x)]"
| "update_list n x (y#ys) =
(if n > fst y then (n, x)#y#ys
else if n = fst y then (n, x)#ys
else y#(update_list n x ys))"
lemma map_of_update_list[simp]: "map_of (update_list n x ys) = (map_of ys)(n:=Some x)"
by (induct ys) auto
lemma in_set_update_listD:
assumes "y \<in> set (update_list n x ys)"
shows "y = (n, x) \<or> (y \<in> set ys)"
using assms
by (induct ys) (auto split: if_split_asm)
lemma in_set_update_listI:
"y = (n, x) \<or> (fst y \<noteq> n \<and> y \<in> set ys) \<Longrightarrow> y \<in> set (update_list n x ys)"
by (induct ys) (auto split: if_split_asm)
lemma in_set_update_list: "(n, x) \<in> set (update_list n x xs)"
by (induct xs) simp_all
lemma overwrite_update_list: "(a, b) \<in> set xs \<Longrightarrow> (a, b) \<notin> set (update_list n x xs) \<Longrightarrow> a = n"
by (induct xs) (auto split: if_split_asm)
lemma insert_update_list:
"distinct (map fst xs) \<Longrightarrow> rsorted (map fst xs) \<Longrightarrow> (a, b) \<in> set (update_list a x xs) \<Longrightarrow> b = x"
by (induct xs) (force split: if_split_asm simp: sorted_append)+
lemma set_update_list_eq: "distinct (map fst xs) \<Longrightarrow> rsorted (map fst xs) \<Longrightarrow>
set (update_list n x xs) = insert (n, x) (set xs - {x. fst x = n})"
by (auto intro!: in_set_update_listI dest: in_set_update_listD simp: insert_update_list)
lift_definition update_slist::"nat \<Rightarrow> 'a \<Rightarrow> (nat, 'a) slist \<Rightarrow> (nat, 'a) slist" is update_list
proof goal_cases
case (1 n a l)
thus ?case
by (induct l) (force simp: sorted_append distinct_map not_less dest!: in_set_update_listD)+
qed
lemma pdevs_apply_update_slist: "pdevs_apply (Pdevs (update_slist n x xs)) i =
(if i = n then x else pdevs_apply (Pdevs xs) i)"
by transfer (auto simp: Pdevs_raw_def)
lemma compute_pdev_upd[code]: "pdev_upd (Pdevs xs) n x = Pdevs (update_slist n x xs)"
by (rule pdevs_eqI) (auto simp: pdevs_apply_update_slist)
subsection \<open>Approximate Total Deviation\<close>
lift_definition fold_slist::"('a \<Rightarrow> 'b \<Rightarrow> 'b) \<Rightarrow> (nat, 'a::zero) slist \<Rightarrow> 'b \<Rightarrow> 'b"
is "\<lambda>f xs z. fold (f o snd) (filter (\<lambda>x. snd x \<noteq> 0) xs) z" .
lemma Pdevs_raw_Cons:
"Pdevs_raw ((a, b) # xs) = (\<lambda>i. if i = a then b else Pdevs_raw xs i)"
by (auto simp: Pdevs_raw_def map_of_eq_None_iff
dest!: map_of_SomeD
split: option.split)
lemma zeros_aux: "- (\<lambda>i. if i = a then b else Pdevs_raw xs i) -` {0} \<subseteq>
- Pdevs_raw xs -` {0} \<union> {a}"
by auto
lemma compute_tdev'[code]:
"tdev' p (Pdevs xs) = fold_slist (\<lambda>a b. eucl_truncate_up p (\<bar>a\<bar> + b)) xs 0"
unfolding tdev'_def sum_list'_def compute_list_of_pdevs
by transfer (auto simp: o_def fold_map)
subsection \<open>Equality\<close>
lemma slist_apply_list_of_slist_eq: "slist_apply a i = the_default 0 (map_of (list_of_slist a) i)"
by (transfer)
(force split: option.split simp: map_of_eq_None_iff distinct_map_fst_snd_eqD
dest!: map_of_SomeD)
lemma compute_equal_pdevs[code]:
"equal_class.equal (Pdevs a) (Pdevs b) \<longleftrightarrow> (list_of_slist a) = (list_of_slist b)"
by (auto intro!: pdevs_eqI simp: equal_pdevs_def compute_pdevs_apply slist_apply_list_of_slist_eq
compute_list_of_pdevs[symmetric])
subsection \<open>From List of Generators\<close>
lift_definition slist_of_list::"'a::zero list \<Rightarrow> (nat, 'a) slist"
is "\<lambda>xs. rev (zip [0..<length xs] xs)"
by (auto simp: rev_map[symmetric] )
lemma slist_apply_slist_of_list:
"slist_apply (slist_of_list xs) i = (if i < length xs then xs ! i else 0)"
by transfer (auto simp: map_of_rev_zip_upto_length_eq_nth map_of_rev_zip_upto_length_eq_None)
lemma compute_pdevs_of_list[code]: "pdevs_of_list xs = Pdevs (slist_of_list xs)"
by (rule pdevs_eqI)
(auto simp: compute_pdevs_apply slist_apply_slist_of_list pdevs_apply_pdevs_of_list)
end
|
{"author": "rizaldialbert", "repo": "overtaking", "sha": "0e76426d75f791635cd9e23b8e07669b7ce61a81", "save_path": "github-repos/isabelle/rizaldialbert-overtaking", "path": "github-repos/isabelle/rizaldialbert-overtaking/overtaking-0e76426d75f791635cd9e23b8e07669b7ce61a81/Affine_Arithmetic/Affine_Code.thy"}
|
fahrenhetToRankine(f) = f+460.0
rankineToFahrenheit(r) = r-460.0
|
{"hexsha": "85ed0fc96a26a059582de18cd7407aac91264831", "size": 65, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/UnitConversions.jl", "max_stars_repo_name": "leytzher/Shot", "max_stars_repo_head_hexsha": "b35d2dab13e74dc1cd251197029ae31d291fdbe4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/UnitConversions.jl", "max_issues_repo_name": "leytzher/Shot", "max_issues_repo_head_hexsha": "b35d2dab13e74dc1cd251197029ae31d291fdbe4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/UnitConversions.jl", "max_forks_repo_name": "leytzher/Shot", "max_forks_repo_head_hexsha": "b35d2dab13e74dc1cd251197029ae31d291fdbe4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.6666666667, "max_line_length": 32, "alphanum_fraction": 0.7538461538, "num_tokens": 27}
|
import cv2
import numpy as np
def F1_score_128(pred_lines_128_list, gt_lines_128_list, thickness=3):
"""
@brief heat F1 score, draw the lines to a 128 * 128 img
@pred_lines_128 [ [x0, y0, x1, y1], ... ]
@gt_lines_128_list [ [x0, y0, x1, y1], ... ]
"""
pred_heatmap = np.zeros((128, 128), np.uint8)
gt_heatmap = np.zeros((128, 128), np.uint8)
for l in pred_lines_128_list:
x0, y0, x1, y1 = l
x0 = int(round(x0))
y0 = int(round(y0))
x1 = int(round(x1))
y1 = int(round(y1))
cv2.line(pred_heatmap, (x0, y0), (x1, y1), (1, 1, 1), thickness, 8)
for l in gt_lines_128_list:
x0, y0, x1, y1 = l
x0 = int(round(x0))
y0 = int(round(y0))
x1 = int(round(x1))
y1 = int(round(y1))
cv2.line(gt_heatmap, (x0, y0), (x1, y1), (1, 1, 1), thickness, 8)
pred_heatmap = np.array(pred_heatmap, np.float32)
gt_heatmap = np.array(gt_heatmap, np.float32)
intersection = np.sum(gt_heatmap * pred_heatmap)
# union = np.sum(gt_heatmap) + np.sum(gt_heatmap)
eps = 0.001
# dice = (2. * intersection + eps) / (union + eps)
recall = intersection /(np.sum(gt_heatmap) + eps)
precision = intersection /(np.sum(pred_heatmap) + eps)
fscore = (2 * precision * recall) / (precision + recall + eps)
return fscore, recall, precision
def msTPFP(line_pred, line_gt, threshold):
line_pred = line_pred.reshape(-1, 2, 2)[:, :, ::-1]
line_gt = line_gt.reshape(-1, 2, 2)[:, :, ::-1]
diff = ((line_pred[:, None, :, None] - line_gt[:, None]) ** 2).sum(-1)
diff = np.minimum(
diff[:, :, 0, 0] + diff[:, :, 1, 1], diff[:, :, 0, 1] + diff[:, :, 1, 0]
)
choice = np.argmin(diff, 1)
dist = np.min(diff, 1)
hit = np.zeros(len(line_gt), np.bool)
tp = np.zeros(len(line_pred), np.float)
fp = np.zeros(len(line_pred), np.float)
for i in range(len(line_pred)):
if dist[i] < threshold and not hit[choice[i]]:
hit[choice[i]] = True
tp[i] = 1
else:
fp[i] = 1
return tp, fp
def TPFP(lines_dt, lines_gt, threshold):
lines_dt = lines_dt.reshape(-1,2,2)[:,:,::-1]
lines_gt = lines_gt.reshape(-1,2,2)[:,:,::-1]
diff = ((lines_dt[:, None, :, None] - lines_gt[:, None]) ** 2).sum(-1)
diff = np.minimum(
diff[:, :, 0, 0] + diff[:, :, 1, 1], diff[:, :, 0, 1] + diff[:, :, 1, 0]
)
choice = np.argmin(diff,1)
dist = np.min(diff,1)
hit = np.zeros(len(lines_gt), np.bool)
tp = np.zeros(len(lines_dt), np.float)
fp = np.zeros(len(lines_dt),np.float)
for i in range(lines_dt.shape[0]):
if dist[i] < threshold and not hit[choice[i]]:
hit[choice[i]] = True
tp[i] = 1
else:
fp[i] = 1
return tp, fp
def AP(tp, fp):
recall = tp
precision = tp/np.maximum(tp+fp, 1e-9)
recall = np.concatenate(([0.0], recall, [1.0]))
precision = np.concatenate(([0.0], precision, [0.0]))
for i in range(precision.size - 1, 0, -1):
precision[i - 1] = max(precision[i - 1], precision[i])
i = np.where(recall[1:] != recall[:-1])[0]
ap = np.sum((recall[i + 1] - recall[i]) * precision[i + 1])
return ap
|
{"hexsha": "b9cb8c75a2a348b538f652c3c9d599c2687a7bc3", "size": 3249, "ext": "py", "lang": "Python", "max_stars_repo_path": "mlsd_pytorch/metric.py", "max_stars_repo_name": "michelebechini/mlsd_pytorch", "max_stars_repo_head_hexsha": "fa7dcd10dd1f4e12f33df027c14950c8b655ff06", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 73, "max_stars_repo_stars_event_min_datetime": "2021-06-07T08:07:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T08:33:19.000Z", "max_issues_repo_path": "mlsd_pytorch/metric.py", "max_issues_repo_name": "michelebechini/mlsd_pytorch", "max_issues_repo_head_hexsha": "fa7dcd10dd1f4e12f33df027c14950c8b655ff06", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2021-06-08T10:44:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T00:06:02.000Z", "max_forks_repo_path": "mlsd_pytorch/metric.py", "max_forks_repo_name": "michelebechini/mlsd_pytorch", "max_forks_repo_head_hexsha": "fa7dcd10dd1f4e12f33df027c14950c8b655ff06", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2021-06-08T02:35:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T01:27:37.000Z", "avg_line_length": 31.8529411765, "max_line_length": 80, "alphanum_fraction": 0.5453985842, "include": true, "reason": "import numpy", "num_tokens": 1092}
|
Require Import Coq.ZArith.ZArith.
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.montgomery32_2e384m5x2e368m1_12limbs.Synthesis.
Local Open Scope Z_scope.
(* TODO : change this to field once field isomorphism happens *)
Definition nonzero :
{ nonzero : feBW_small -> BoundedWord.BoundedWord 1 adjusted_bitwidth bound1
| forall a, (BoundedWord.BoundedWordToZ _ _ _ (nonzero a) =? 0) = (if Decidable.dec (phiM_small a = F.of_Z m 0) then true else false) }.
Proof.
Set Ltac Profiling.
Time synthesize_nonzero ().
Show Ltac Profile.
Time Defined.
Print Assumptions nonzero.
|
{"author": "anonymous-code-submission-01", "repo": "sp2019-54-code", "sha": "8867f5bed0821415ec99f593b1d61f715ed4f789", "save_path": "github-repos/coq/anonymous-code-submission-01-sp2019-54-code", "path": "github-repos/coq/anonymous-code-submission-01-sp2019-54-code/sp2019-54-code-8867f5bed0821415ec99f593b1d61f715ed4f789/src/Specific/montgomery32_2e384m5x2e368m1_12limbs/fenz.v"}
|
[GOAL]
a b : ℕ
⊢ a ≠ b ↔ ↑a ≠ ↑b
[PROOFSTEP]
simp only [ne_eq, Int.cast_eq_cast_iff_Nat]
|
{"mathlib_filename": "Mathlib.Tactic.Zify", "llama_tokens": 55}
|
import pandas as pd
import argparse
import matplotlib.pyplot as plt
import numpy as np
import json
import os
from sklearn.metrics import r2_score
from scipy import stats
def _check_col_type(df, col, df_type):
"""
Each column we are working with should be of type "float".
Checks to ensure this is true, otherwise raises an error.
:Pandas.DataFrame df: df of interest
:str col: column of interest
:str df_type: df we are looking at
"""
if df[col].dtype != 'float':
raise ValueError(f'{df_type} column not found')
def _get_col(df, col, df_type):
"""
Gets the column of interest in a csv. Uses the optional
col arg if provided, otherwise will assume it is the
second column in the df. Used for getting the value_col
and the preds_col.
:Pandas.DataFrame df: df of interest
:str col: provided col arg
"""
columns = df.columns
if col:
if col not in columns:
raise ValueError(f'{df_type} column not found in test_csv')
else:
# expect column to be the second column in df
col = columns[1]
_check_col_type(df, col, df_type)
return df[col]
def _get_uncertainty_col(df, uncertainty_col, split_UQ):
"""
Gets the uncertainty column of the test_csv. Uses the optional
value_col arg if provided, otherwise will assume it is the
second column in the df. If split_UQ is flagged, will look for
two columns--epistemic and aleatoric.
:Pandas.DataFrame df: the preds_csv
:str uncertainty_col: provided uncertainty_col arg
:bool split_UQ: uncertainty is split to aleatoric, epistemic
"""
columns = df.columns
list_cols = list(columns)
if uncertainty_col:
if uncertainty_col not in columns:
raise ValueError('Uncertainty column not found in test_csv')
_check_col_type(df, uncertainty_col, 'Uncertainty')
return df[uncertainty_col]
else:
if split_UQ:
try:
aleatoric_idx = list_cols.index('Aleatoric')
epistemic_idx = list_cols.index('Epistemic')
except ValueError:
raise ValueError('Uncertainty appears not to be split')
aleatoric_col = columns[aleatoric_idx]
epistemic_col = columns[epistemic_idx]
_check_col_type(df, aleatoric_col, 'Aleatoric')
_check_col_type(df, epistemic_col, 'Epistemic')
return aleatoric_col, epistemic_col
else:
uncertainty_idx = list_cols.index('Uncertainty')
uncertainty_col = columns[uncertainty_idx]
_check_col_type(df, uncertainty_col, 'Uncertainty')
return df[uncertainty_col]
def _combine_cols(df, col_names):
"""
Sums the aleatoric and epistemic columns to make a new
uncertainty column for analysis.
:Pandas.DataFrame df: preds dataframe
:tuple col_names: strings of names of aleatoric, epistemic cols
"""
# TODO: Allow analysis for both uncertainties?
aleatoric, epistemic = col_names
df['Uncertainty'] = df. \
loc[:, [aleatoric, epistemic]].sum(axis=1)
return df['Uncertainty']
def _get_r2(x, y, rounding=4):
"""
Gets the r2 value between two columns. Rounds the value
to the order provided (default 4).
:pandas.DataSeries x: first series
:pandas.DataSeries y: second series
"""
sig_fig = '%.' + str(rounding) + 'g'
r2 = r2_score(x, y)
return float(sig_fig % r2)
def _create_scatter(x, y):
"""
Create a scatter plot between two series.
:Pandas.DataSeries x: first series
:Pandas.DataSeries y: second series
"""
plt.scatter(x, y)
z = np.polyfit(x, y, 1)
y_hat = np.poly1d(z)(x)
plt.plot(x, y_hat, 'r--', lw=1)
plt.xlabel('True Value', fontsize=16)
plt.ylabel('Predicted Value', fontsize=16)
plt.title('True vs. Predicted', fontsize=20)
plt.show()
def _save_analysis(preds_r2, unc_r2, spearman, args):
"""
Saves the values from analysis to a .json file.
Uses the directory given and strips the name of the
predictions file to name this analysis file.
:float preds_r2: r2 value for predictions calculated
:float unc_r2: r2 value for uncertaitny calculated
:float spearman: spearman value calculated
:args: args provided by user
"""
preds_name = args.preds_path
save_dir = args.save_dir
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
prefix = preds_name.split('/')[-1][:-4] + '_analysis.json'
path = os.path.join(save_dir, prefix)
data = {'Predictions R^2': preds_r2,
'Uncertainty R^2': unc_r2,
'Spearman': spearman}
print('Saving analysis to ' + path)
with open(path, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def analyze(args):
test_csv = pd.read_csv(args.test_path)
preds_csv = pd.read_csv(args.preds_path)
value_col = _get_col(test_csv, args.value_col, 'Value')
preds_col = _get_col(preds_csv, args.preds_col, 'Preds')
uncertainty_col = _get_uncertainty_col(preds_csv, args.uncertainty_col,
args.split_UQ)
# Combine aleatoric and epistemic to make an uncertainty column
if type(uncertainty_col) == tuple:
uncertainty_col = _combine_cols(preds_csv, uncertainty_col)
assert value_col.size == uncertainty_col.size == preds_col.size
abs_err = (value_col - preds_col).abs()
preds_r2 = _get_r2(value_col, preds_col)
unc_r2 = _get_r2(abs_err, uncertainty_col)
print('\nR2 for value <-> predictions: ' + str(preds_r2))
print('R2 for absolute err <-> uncertainty: ' + str(unc_r2))
spearman, _ = stats.spearmanr(abs_err, uncertainty_col)
spearman = float('%.4g' % spearman)
print('Spearman value for absolute err <-> uncertainty: ' + str(spearman))
if args.save_dir:
_save_analysis(preds_r2, unc_r2, spearman, args)
if not args.quiet:
_create_scatter(value_col, preds_col)
def analysis_outside(outside_args):
"""
Used for calling this script from another python script.
:dict outside_args: dict of args to use
"""
parser = argparse.ArgumentParser()
args, _ = parser.parse_known_args()
args.test_path = outside_args['--test_path']
args.preds_path = outside_args['--preds_path']
args.save_dir = outside_args['--save_dir']
args.quiet = True
args.value_col = None
args.uncertainty_col = None
args.preds_col = None
args.split_UQ = False
analyze(args)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('test_path', type=str,
help='Path to a csv file containing test data')
parser.add_argument('preds_path', type=str,
help='Path to a csv file containing preds')
parser.add_argument('--save_dir', type=str, default=None,
help='Directory to save analysis')
parser.add_argument('--value_col', '--v', type=str, default=None,
help='Value column of test_csv')
parser.add_argument('--uncertainty_col', '--UQ', type=str, default=None,
help='Uncertainty column of preds_csv')
parser.add_argument('--preds_col', '--p', type=str, default=None,
help='Predictions column of preds_csv')
parser.add_argument('--split_UQ', action='store_true', default=False,
help='Uncertainty is split to aleatoric, epistemic')
parser.add_argument('--quiet', action='store_true', default=False,
help='Supress graphs')
args = parser.parse_args()
analyze(args)
|
{"hexsha": "b662cf9f9b3f3ecc04ab5da7ce06bd1dd14072c0", "size": 7740, "ext": "py", "lang": "Python", "max_stars_repo_path": "UQ_analysis.py", "max_stars_repo_name": "opnbnch/chemprop", "max_stars_repo_head_hexsha": "b7c364b108b3421c2bd5a135233f6e36d9cb32cc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-21T18:06:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-21T18:06:00.000Z", "max_issues_repo_path": "UQ_analysis.py", "max_issues_repo_name": "opnbnch/chemprop", "max_issues_repo_head_hexsha": "b7c364b108b3421c2bd5a135233f6e36d9cb32cc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-08-12T05:38:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-12T05:38:38.000Z", "max_forks_repo_path": "UQ_analysis.py", "max_forks_repo_name": "opnbnch/chemprop", "max_forks_repo_head_hexsha": "b7c364b108b3421c2bd5a135233f6e36d9cb32cc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.25, "max_line_length": 78, "alphanum_fraction": 0.6533591731, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1917}
|
import argparse
import json
import logging
import os
import threading
import time
import traceback
import colors
import docker
import numpy
import psutil
from benchmark.algorithms.definitions import (Definition,
instantiate_algorithm)
from benchmark.datasets import DATASETS
from benchmark.results import store_results
from benchmark.sensors.power_capture import power_capture
from benchmark.t3.helper import t3_create_container
def run_individual_query(algo, X, distance, count, run_count, search_type):
best_search_time = float('inf')
for i in range(run_count):
print('Run %d/%d...' % (i + 1, run_count))
start = time.time()
if search_type == "knn":
algo.query(X, count)
total = (time.time() - start)
results = algo.get_results()
assert len(results) == len(X)
else:
algo.range_query(X, count)
total = (time.time() - start)
results = algo.get_range_results()
search_time = total
best_search_time = min(best_search_time, search_time)
attrs = {
"best_search_time": best_search_time,
"name": str(algo),
"run_count": run_count,
"distance": distance,
"type": search_type,
"count": int(count)
}
additional = algo.get_additional()
for k in additional:
attrs[k] = additional[k]
return (attrs, results)
def run(definition, dataset, count, run_count, rebuild):
algo = instantiate_algorithm(definition)
assert not definition.query_argument_groups \
or hasattr(algo, "set_query_arguments"), """\
error: query argument groups have been specified for %s.%s(%s), but the \
algorithm instantiated from it does not implement the set_query_arguments \
function""" % (definition.module, definition.constructor, definition.arguments)
ds = DATASETS[dataset]()
#X_train = numpy.array(D['train'])
X = ds.get_queries()
distance = ds.distance()
search_type = ds.search_type()
print(f"Running {definition.algorithm} on {dataset}")
print(fr"Got {len(X)} queries")
try:
# Try loading the index from the file
memory_usage_before = algo.get_memory_usage()
if rebuild or not algo.load_index(dataset):
# Build the index if it is not available
t0 = time.time()
algo.fit(dataset)
build_time = time.time() - t0
print('Built index in', build_time)
else:
print("Loaded existing index")
index_size = algo.get_memory_usage() - memory_usage_before
print('Index size: ', index_size)
query_argument_groups = definition.query_argument_groups
# Make sure that algorithms with no query argument groups still get run
# once by providing them with a single, empty, harmless group
if not query_argument_groups:
query_argument_groups = [[]]
for pos, query_arguments in enumerate(query_argument_groups, 1):
print("Running query argument group %d of %d..." %
(pos, len(query_argument_groups)))
if query_arguments:
algo.set_query_arguments(*query_arguments)
descriptor, results = run_individual_query(
algo, X, distance, count, run_count, search_type)
# A bit unclear how to set this correctly if we usually load from file
#descriptor["build_time"] = build_time
descriptor["index_size"] = index_size
descriptor["algo"] = definition.algorithm
descriptor["dataset"] = dataset
if power_capture.enabled():
power_stats = power_capture.run(algo, X, distance, count,
run_count, search_type, descriptor)
store_results(dataset, count, definition,
query_arguments, descriptor, results, search_type)
finally:
algo.done()
def run_from_cmdline(args=None):
parser = argparse.ArgumentParser('''
NOTICE: You probably want to run.py rather than this script.
''')
parser.add_argument(
'--dataset',
choices=DATASETS.keys(),
help=f'Dataset to benchmark on.',
required=True)
parser.add_argument(
'--algorithm',
help='Name of algorithm for saving the results.',
required=True)
parser.add_argument(
'--module',
help='Python module containing algorithm. E.g. "ann_benchmarks.algorithms.annoy"',
required=True)
parser.add_argument(
'--constructor',
help='Constructer to load from module. E.g. "Annoy"',
required=True)
parser.add_argument(
'--count',
help='k: Number of nearest neighbours for the algorithm to return.',
required=True,
type=int)
parser.add_argument(
'--rebuild',
help='re-build index even if it exists',
action='store_true')
parser.add_argument(
'--runs',
help='Number of times to run the algorihm. Will use the fastest run-time over the bunch.',
required=True,
type=int)
parser.add_argument(
'build',
help='JSON of arguments to pass to the constructor. E.g. ["angular", 100]'
)
parser.add_argument(
'queries',
help='JSON of arguments to pass to the queries. E.g. [100]',
nargs='*',
default=[])
parser.add_argument(
'--power-capture',
help='Power capture parameters for the T3 competition. '
'Format is "ip:port:capture_time_in_seconds (ie, 127.0.0.1:3000:10).',
default="")
args = parser.parse_args(args)
algo_args = json.loads(args.build)
print(algo_args)
query_args = [json.loads(q) for q in args.queries]
if args.power_capture:
power_capture( args.power_capture )
power_capture.ping()
definition = Definition(
algorithm=args.algorithm,
docker_tag=None, # not needed
module=args.module,
constructor=args.constructor,
arguments=algo_args,
query_argument_groups=query_args,
disabled=False
)
run(definition, args.dataset, args.count, args.runs, args.rebuild)
def run_docker(definition, dataset, count, runs, timeout, rebuild,
cpu_limit, mem_limit=None, t3=None, power_capture=None):
cmd = ['--dataset', dataset,
'--algorithm', definition.algorithm,
'--module', definition.module,
'--constructor', definition.constructor,
'--runs', str(runs),
'--count', str(count)]
if power_capture:
cmd += ["--power-capture", power_capture ]
if rebuild:
cmd.append("--rebuild")
cmd.append(json.dumps(definition.arguments))
cmd += [json.dumps(qag) for qag in definition.query_argument_groups]
client = docker.from_env()
if mem_limit is None:
mem_limit = psutil.virtual_memory().available
container = None
if t3:
container = t3_create_container(definition, cmd, cpu_limit, mem_limit )
timeout = 3600*24*3 # 3 days
print("Setting container wait timeout to 3 days")
else:
container = client.containers.run(
definition.docker_tag,
cmd,
volumes={
os.path.abspath('benchmark'):
{'bind': '/home/app/benchmark', 'mode': 'ro'},
os.path.abspath('data'):
{'bind': '/home/app/data', 'mode': 'rw'},
os.path.abspath('results'):
{'bind': '/home/app/results', 'mode': 'rw'},
},
cpuset_cpus=cpu_limit,
mem_limit=mem_limit,
detach=True)
logger = logging.getLogger(f"annb.{container.short_id}")
logger.info('Created container %s: CPU limit %s, mem limit %s, timeout %d, command %s' % \
(container.short_id, cpu_limit, mem_limit, timeout, cmd))
def stream_logs():
for line in container.logs(stream=True):
logger.info(colors.color(line.decode().rstrip(), fg='blue'))
t = threading.Thread(target=stream_logs, daemon=True)
t.start()
try:
exit_code = container.wait(timeout=timeout)
# Exit if exit code
if exit_code not in [0, None]:
logger.error(colors.color(container.logs().decode(), fg='red'))
logger.error('Child process for container %s raised exception %d' % (container.short_id, exit_code))
except:
logger.error('Container.wait for container %s failed with exception' % container.short_id)
logger.error('Invoked with %s' % cmd)
traceback.print_exc()
finally:
container.remove(force=True)
def run_no_docker(definition, dataset, count, runs, timeout, rebuild,
cpu_limit, mem_limit=None, t3=False, power_capture=None):
cmd = ['--dataset', dataset,
'--algorithm', definition.algorithm,
'--module', definition.module,
'--constructor', definition.constructor,
'--runs', str(runs),
'--count', str(count)]
if power_capture:
cmd += ["--power-capture", power_capture ]
if rebuild:
cmd.append("--rebuild")
cmd.append(json.dumps(definition.arguments))
cmd += [json.dumps(qag) for qag in definition.query_argument_groups]
run_from_cmdline(cmd)
|
{"hexsha": "4d139b3ffc4bb348f4692df50106d246f1b00442", "size": 9459, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmark/runner.py", "max_stars_repo_name": "uoynac/big-ann-benchmarks", "max_stars_repo_head_hexsha": "8180e0e6ea5b8e36f76c5e34728116f0de23c05e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "benchmark/runner.py", "max_issues_repo_name": "uoynac/big-ann-benchmarks", "max_issues_repo_head_hexsha": "8180e0e6ea5b8e36f76c5e34728116f0de23c05e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "benchmark/runner.py", "max_forks_repo_name": "uoynac/big-ann-benchmarks", "max_forks_repo_head_hexsha": "8180e0e6ea5b8e36f76c5e34728116f0de23c05e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6483516484, "max_line_length": 112, "alphanum_fraction": 0.6138069563, "include": true, "reason": "import numpy", "num_tokens": 2012}
|
from __future__ import division, print_function, absolute_import
from keras.models import Sequential, model_from_json
from keras.layers import (
Dense,
Dropout,
Flatten,
Conv3D,
MaxPool3D,
BatchNormalization,
Input,
)
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from keras.callbacks import ReduceLROnPlateau, TensorBoard
import h5py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("white")
from sklearn.metrics import confusion_matrix, accuracy_score
class ClassifyWith3dCnn(object):
def __init__(self, X_train, y_train, X_test, y_test):
# Hyper Parameter
self.batch_size = 86
self.epochs = 30
# Set up TensorBoard
self.tensorboard = TensorBoard(batch_size=self.batch_size)
self.X_train = self.translate(X_train).reshape(-1, 16, 16, 16, 3)
self.X_test = self.translate(X_test).reshape(-1, 16, 16, 16, 3)
self.y_train = to_categorical(y_train, 2)
self.y_test = y_test
self.model = self.CNN((16, 16, 16, 3), 2)
def translate(self, x):
xx = np.ndarray((x.shape[0], 4096, 3))
for i in range(x.shape[0]):
xx[i] = self.array_to_color(x[i])
if i % 1000 == 0:
print(i)
# Free Memory
del x
return xx
# Translate data to color
def array_to_color(self, array, cmap="Oranges"):
s_m = plt.cm.ScalarMappable(cmap=cmap)
return s_m.to_rgba(array)[:, :-1]
# Conv2D layer
def Conv(
self, filters=16, kernel_size=(3, 3, 3), activation="relu", input_shape=None
):
if input_shape:
return Conv3D(
filters=filters,
kernel_size=kernel_size,
padding="Same",
activation=activation,
input_shape=input_shape,
)
else:
return Conv3D(
filters=filters,
kernel_size=kernel_size,
padding="Same",
activation=activation,
)
# Define Model
def CNN(self, input_dim, num_classes):
model = Sequential()
model.add(self.Conv(8, (3, 3, 3), input_shape=input_dim))
model.add(self.Conv(16, (3, 3, 3)))
# model.add(BatchNormalization())
model.add(MaxPool3D())
# model.add(Dropout(0.25))
model.add(self.Conv(32, (3, 3, 3)))
model.add(self.Conv(64, (3, 3, 3)))
model.add(BatchNormalization())
model.add(MaxPool3D())
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4096, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1024, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation="softmax"))
return model
# Train Model
def train(self, optimizer, scheduler):
print("Training...")
self.model.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]
)
self.model.fit(
x=self.X_train,
y=self.y_train,
batch_size=self.batch_size,
epochs=self.epochs,
validation_split=0.15,
verbose=2,
callbacks=[scheduler, self.tensorboard],
)
def evaluate(self):
pred = self.model.predict(self.X_test)
pred = np.argmax(pred, axis=1)
print("Accuracy: ", accuracy_score(pred, self.y_test))
# Heat Map
array = confusion_matrix(self.y_test, pred)
cm = pd.DataFrame(array, index=range(2), columns=range(2))
plt.figure(figsize=(20, 20))
sns.heatmap(cm, annot=True)
plt.show()
def cnn_initiate(self):
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
scheduler = ReduceLROnPlateau(
monitor="val_acc", patience=3, verbose=1, factor=0.5, min_lr=1e-5
)
self.train(optimizer, scheduler)
self.evaluate()
|
{"hexsha": "361fc87f4c863c6062becf7fd2fecbbb22dd8df8", "size": 4179, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/starter-code/cnn3d.py", "max_stars_repo_name": "kiat/debs2019", "max_stars_repo_head_hexsha": "b1231a0995a154f8549ef23a00f635b81cc3c689", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/starter-code/cnn3d.py", "max_issues_repo_name": "kiat/debs2019", "max_issues_repo_head_hexsha": "b1231a0995a154f8549ef23a00f635b81cc3c689", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-12-11T23:19:14.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-12T06:39:53.000Z", "max_forks_repo_path": "src/starter-code/cnn3d.py", "max_forks_repo_name": "kiat/debs2019", "max_forks_repo_head_hexsha": "b1231a0995a154f8549ef23a00f635b81cc3c689", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-06T21:54:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-06T21:54:47.000Z", "avg_line_length": 27.86, "max_line_length": 84, "alphanum_fraction": 0.5917683656, "include": true, "reason": "import numpy", "num_tokens": 1036}
|
using compost
using SafeTestsets
using DataFrames
using Test
# Test functions that calculate comparative advantage: ==============
@safetestset "Comparative advantage tests" begin include(normpath(@__DIR__, "test/comparative-advantage-tests.jl")) end
# Test functions the calculate similarity and distance measures: ====
@safetestset "Relatedness tests" begin include(normpath(@__DIR__, "test/relatedness-tests.jl")) end
# Test complexity algorithms: =======================================
@safetestset "Complexity algorithm tests" begin include(normpath(@__DIR__, "test/similarity-distance-tests.jl")) end
# Test forecasting functions: =======================================
# TODO:
|
{"hexsha": "c1934fdf3c3c29c46ef36c5e585693c6a1d05812", "size": 692, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "sbpost/compost", "max_stars_repo_head_hexsha": "616d1e123bea38ef5adbcdcf857b6f0b8ddbca44", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "sbpost/compost", "max_issues_repo_head_hexsha": "616d1e123bea38ef5adbcdcf857b6f0b8ddbca44", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "sbpost/compost", "max_forks_repo_head_hexsha": "616d1e123bea38ef5adbcdcf857b6f0b8ddbca44", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4210526316, "max_line_length": 119, "alphanum_fraction": 0.686416185, "num_tokens": 144}
|
import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as layers
from tensorflow_graphics.rendering.camera import perspective
from scipy.spatial import Delaunay
from utils.matrix_util import *
from model.nerf import RadianceField, RadianceFieldBase, PositionalEncoding
class MLPDecomposition(tf.Module):
def __init__(self,
heads,
units=64,
depth=3,
pos_feature=PositionalEncoding(5)):
self.n_heads = heads
self.pos_feature = pos_feature
self.layers = []
for _ in range(depth):
self.layers += [
layers.Dense(units),
layers.ReLU(),
]
self.layers += [layers.Dense(self.n_heads)]
def __call__(self, positions):
net = self.pos_feature(positions)
for l in self.layers:
net = l(net)
weights = tf.nn.softmax(tf.cast(net, tf.float32))
return weights
class VoronoiDecomposition(tf.Module):
def __init__(self, scene_bbox, n_heads):
self.n_heads = n_heads
self.temperature = tf.Variable(1.0, trainable=False)
self.center_scale = 1.0
init_centers = self.center_scale * tf.random.uniform(
(n_heads, 3)) * (scene_bbox[1] - scene_bbox[0]) + scene_bbox[0]
self.head_centers = tf.Variable(init_centers, trainable=True)
def __call__(self, positions):
d = tf.linalg.norm(positions[..., None, :] -
self.head_centers / self.center_scale,
axis=-1)
weights = tf.nn.softmax(-self.temperature * d)
return weights
class GridDecomposition(tf.Module):
def __init__(self, scene_bbox, n_heads):
self.n_heads = n_heads
dim = int(np.ceil(n_heads**(1 / 3)))
i = tf.linspace(0.0, 1.0, dim + 2)[1:-1]
xyz = tf.reshape(tf.stack(tf.meshgrid(i, i, i), axis=-1), (-1, 3))
self.center_scale = 1.0
self.head_centers = xyz * (scene_bbox[1] - scene_bbox[0]) + scene_bbox[0]
def __call__(self, positions):
d = tf.linalg.norm(positions[..., None, :] - self.head_centers, axis=-1)
weights = tf.one_hot(tf.argmax(-d, axis=-1), self.n_heads)
return weights
class DecomposedRadianceField(RadianceFieldBase):
def __init__(self,
decomposition_model,
head_constructor=lambda: RadianceField(64)):
self.decomposition_model = decomposition_model
self.n_heads = self.decomposition_model.n_heads
self.pilot = head_constructor()
self.coarse = head_constructor()
self.using_pilot = True
self.heads = list(head_constructor() for _ in range(self.n_heads))
def get_decomposition_vars(self):
return self.decomposition_model.trainable_variables
def get_radiance_vars(self):
rvars = []
rvars += self.coarse.trainable_variables
rvars += self.pilot.trainable_variables
for head in self.heads:
rvars += head.trainable_variables
return rvars
def trace_rays_importance(self,
rays,
samples,
samples_coarse,
deterministic=False,
**kwargs):
if self.using_pilot:
return self.trace_rays(rays,
samples,
deterministic=deterministic,
**kwargs)
else:
coarse_xyz, coarse_dir, _, _, pdfx = self.gen_ray_samples(
rays, samples_coarse, deterministic=deterministic)
_, pdfy, _ = self.coarse.sample_field(coarse_xyz,
coarse_dir,
density_noise=0.0)
pdf = tf.stack([pdfx, pdfy], axis=-1)
return self.trace_rays(rays,
samples,
pdf=pdf,
deterministic=deterministic,
**kwargs)
def render_importance(self,
pose,
farplane,
focal,
principal,
res,
samples,
samples_coarse,
chunk=64,
**kwargs):
i = tf.range(res[0])
j = tf.range(res[1])
ij = tf.reshape(tf.stack(tf.meshgrid(i, j, indexing="ij"), axis=-1),
(-1, 2))
xy = tf.reshape(tf.cast(ij, tf.float32), (-1, 2))[:, ::-1]
ip_points = perspective.ray(xy, focal, principal)
ws_ip_points = apply_transform(pose, ip_points)
ws_eye = apply_transform(pose, tf.zeros((1, 3)))
eyes_tiled = tf.tile(ws_eye, (res[0] * res[1], 1))
ray_dirs, _ = tf.linalg.normalize(ws_ip_points - eyes_tiled, axis=-1)
rays = tf.concat([eyes_tiled, farplane * ray_dirs], axis=-1)
chunk = 64
def cond(i, rgba, depth, attention):
return i < (1 + (res[0] * res[1]) // chunk)
def body(i, rgba, depth, attention):
rgba_i, depth_i, attention_i = self.trace_rays_importance(
rays[i * chunk:(i + 1) * chunk],
samples=samples,
samples_coarse=samples_coarse,
deterministic=False,
**kwargs)
rgba = tf.concat([rgba, rgba_i], axis=0)
depth = tf.concat([depth, depth_i], axis=0)
attention = tf.concat([attention, attention_i], axis=0)
return [i + 1, rgba, depth, attention]
lvars = [0, tf.zeros((0, 4)), tf.zeros((0, 1)), tf.zeros((0, self.n_heads))]
_, rgba, depth, attention = tf.while_loop(cond,
body,
lvars,
parallel_iterations=1,
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([None, 4]),
tf.TensorShape([None, 1]),
tf.TensorShape(
[None, self.n_heads])
])
rgba = tf.reshape(rgba, (res[0], res[1], 4))
depth = tf.reshape(depth, (res[0], res[1], 1))
attention = tf.reshape(attention, (res[0], res[1], self.n_heads))
return rgba, depth, attention
def sample_field(self, positions, directions, density_noise=1e0, **kwargs):
if "head_index" in kwargs:
i = kwargs["head_index"]
del kwargs["head_index"]
return self.sample_head(i, positions, directions, density_noise, **kwargs)
if "use_coarse" in kwargs and kwargs["use_coarse"]:
decomposition = self.decomposition_model(positions)
radiance, density, _ = self.coarse.sample_field(positions, directions,
density_noise, **kwargs)
if self.using_pilot:
decomposition = self.decomposition_model(positions)
radiance, density, _ = self.pilot.sample_field(positions, directions,
density_noise, **kwargs)
else:
decomposition = self.decomposition_model(positions)
if self.coarse is not None:
decomposition = tf.stop_gradient(decomposition)
density = 0.0
radiance = 0.0
for i, head in enumerate(self.heads):
w_i = decomposition[..., i]
radiance_i, density_i, _ = head.sample_field(positions, directions,
density_noise, **kwargs)
density += density_i * w_i
radiance += radiance_i * w_i[..., None]
return radiance, density, decomposition
def sample_head(self, i, positions, directions, density_noise=1e0, **kwargs):
if self.using_pilot:
radiance, density, _ = self.pilot.sample_field(positions, directions,
density_noise, **kwargs)
else:
radiance, density, _ = self.heads[i].sample_field(positions, directions,
density_noise, **kwargs)
decomposition = tf.ones_like(radiance[..., :1])
return radiance, density, decomposition
def get_im_rays(pose, farplane, focal, principal, res):
i = tf.range(res[0])
j = tf.range(res[1])
ij = tf.reshape(tf.stack(tf.meshgrid(i, j, indexing="ij"), axis=-1), (-1, 2))
xy = tf.reshape(tf.cast(ij, tf.float32), (-1, 2))[:, ::-1]
ip_points = perspective.ray(xy, focal, principal)
ws_ip_points = apply_transform(pose, ip_points)
ws_eye = apply_transform(pose, tf.zeros((1, 3)))
eyes_tiled = tf.tile(ws_eye, (res[0] * res[1], 1))
ray_dirs, _ = tf.linalg.normalize(ws_ip_points - eyes_tiled, axis=-1)
rays = tf.concat([eyes_tiled, farplane * ray_dirs], axis=-1)
return rays
def get_ray_cell_bounds(model, rays):
if model.n_heads == 1:
nr = tf.shape(rays)[0]
bounds = tf.stack([tf.zeros((nr, 1)), tf.ones((nr, 1))], axis=-1)
mask = tf.ones((nr, 1), dtype=tf.bool)
return bounds, mask
o = rays[:, None, None, :3]
d = rays[:, None, None, 3:]
def get_delaunay(sites):
triangulation = Delaunay(sites)
return triangulation.vertex_neighbor_vertices
p = model.decomposition_model.head_centers / model.decomposition_model.center_scale
row_starts, neighbour_inds = tf.py_function(get_delaunay, [p],
(tf.int64, tf.int64))
row_starts = tf.reshape(row_starts, (-1,))
neighbour_inds = tf.reshape(neighbour_inds, (-1,))
p1_vals = tf.gather(p, neighbour_inds)
p1 = tf.RaggedTensor.from_row_starts(values=p1_vals,
row_starts=row_starts[:-1])
p0 = tf.ones_like(p1) * p[:, None]
p0 = p0[None]
p1 = p1[None]
def rdot(a, b):
prod = a * b
return prod[..., 0] + prod[..., 1] + prod[..., 2]
pm = (p0 + p1) / 2
t = rdot(pm - o, p1 - p0) / rdot(d, p1 - p0)
backfacing = tf.cast(tf.less(rdot(p1 - p0, d), 0.0), tf.float32)
back_t = t + backfacing * (1e10 * tf.ones_like(t))
front_t = t + (1.0 - backfacing) * (-1e10 * tf.ones_like(t))
head_max = tf.reduce_min(back_t, axis=2).to_tensor()
head_min = tf.reduce_max(front_t, axis=2).to_tensor()
head_max = tf.clip_by_value(head_max, 0.0, 1.0)
head_min = tf.clip_by_value(head_min, 0.0, 1.0)
bounds = tf.stack([head_min, head_max], axis=-1)
mask = tf.greater(head_max, head_min)
return bounds, mask
def fast_voronoi_render(model,
pose,
farplane,
focal,
principal,
res,
samples,
chunk=64,
return_invocations=False,
return_layers=False):
with tf.device("CPU"):
rays = get_im_rays(pose, farplane, focal, principal, res)
rcell_bounds, rcell_masks = get_ray_cell_bounds(model, rays)
ray_samples = tf.cast(
(rcell_bounds[..., 1] - rcell_bounds[..., 0]) * samples, tf.int32)
ray_samples = tf.clip_by_value(tf.abs(ray_samples), 0, samples)
rgba_r_layers = []
depth_r_layers = []
ind_r_layers = []
total_invocations = 0
for j in range(model.n_heads):
inds_j = tf.where(rcell_masks[:, j])
rays_j = tf.gather(rays, inds_j[:, 0])
bounds_j = tf.gather(rcell_bounds[:, j], inds_j[:, 0])
starts_j = rays_j[:, :3] + rays_j[:, 3:] * bounds_j[:, :1]
offsets_j = rays_j[:, 3:] * (bounds_j[:, 1:] - bounds_j[:, :1])
rays_j = tf.concat([starts_j, offsets_j], axis=-1)
samples_j = tf.gather(ray_samples[:, j], inds_j[:, 0])
n_rays_j = tf.shape(rays_j)[0]
def cond(i, rgba, depth, decomposition, invocations):
return i < (1 + n_rays_j // chunk)
def body(i, rgba, depth, decomposition, invocations):
samples_i = tf.reduce_max(samples_j[i * chunk:(i + 1) * chunk])
samples_i = tf.clip_by_value(samples_i, 1, samples)
coarse_samples_i = tf.clip_by_value(samples_i // 2, 1, samples)
rays_i = rays_j[i * chunk:(i + 1) * chunk]
rgba_i, depth_i, decomposition_i = model.trace_rays_importance(
rays_i, samples_i, coarse_samples_i, deterministic=True, head_index=j)
rgba = tf.concat([rgba, rgba_i], axis=0)
depth = tf.concat([depth, depth_i], axis=0)
decomposition = tf.concat([decomposition, decomposition_i], axis=0)
invocations += samples_i * tf.shape(rays_i)[0]
return [i + 1, rgba, depth, decomposition, invocations]
lvars = [
0,
tf.zeros((0, 4)),
tf.zeros((0, 1)),
tf.zeros((0, model.heads[j].n_heads)), 0
]
_, rgba_j, depth_j, _, invocations_j = tf.while_loop(
cond,
body,
lvars,
parallel_iterations=1,
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([None, 4]),
tf.TensorShape([None, 1]),
tf.TensorShape([None, model.heads[j].n_heads]),
tf.TensorShape([])
])
with tf.device("CPU"):
rgba_r_layers.append(tf.identity(rgba_j))
depth_r_layers.append(tf.identity(depth_j))
ind_r_layers.append(tf.identity(inds_j))
total_invocations += invocations_j
with tf.device("CPU"):
rgba_0 = tf.zeros((tf.shape(rays)[0], 4))
depth_0 = tf.zeros((tf.shape(rays)[0], 1))
rgba_layers = []
depth_layers = []
for j in range(model.n_heads):
rgba_j = tf.tensor_scatter_nd_update(rgba_0, ind_r_layers[j],
rgba_r_layers[j])
depth_j = tf.tensor_scatter_nd_update(depth_0, ind_r_layers[j],
depth_r_layers[j])
rgba_j = tf.reshape(rgba_j, (res[0], res[1], 4))
depth_j = tf.reshape(depth_j, (res[0], res[1], 1))
rgba_layers.append(rgba_j)
depth_layers.append(depth_j)
rgba_layers = tf.stack(rgba_layers, axis=0)
depth_layers = tf.stack(depth_layers, axis=0)
ws_sites = model.decomposition_model.head_centers
ws_eye = apply_transform(pose, tf.zeros((1, 3)))
head_order = tf.argsort(tf.linalg.norm(ws_sites - ws_eye, axis=-1),
direction="DESCENDING")
result_rgba = rgba_layers[head_order[0]]
result_depth = depth_layers[head_order[0]]
result_decomposition = tf.zeros(
(res[0], res[1], model.n_heads)) + tf.one_hot(
tf.constant(0)[None, None], model.n_heads)
for i in range(1, model.n_heads):
j = head_order[i]
rgba = rgba_layers[j]
alpha = rgba_layers[j, :, :, 3:]
depth = depth_layers[j]
decomposition = tf.one_hot(tf.constant(i)[None, None], model.n_heads)
result_rgba = rgba + (1.0 - alpha) * result_rgba
result_depth = depth + (1.0 - alpha) * result_depth
result_decomposition = alpha * decomposition + (
1.0 - alpha) * result_decomposition
if return_invocations:
return result_rgba, result_depth, result_decomposition, total_invocations
elif return_layers:
return result_rgba, result_depth, result_decomposition, rgba_layers, head_order
else:
return result_rgba, result_depth, result_decomposition
|
{"hexsha": "89ee967e8967904c61b688c5bb87005b18f20294", "size": 15168, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/derf.py", "max_stars_repo_name": "ubc-vision/derf", "max_stars_repo_head_hexsha": "f8f1704fc01ddb42fa2c4853f8bb7607611982b4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2021-06-24T07:33:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T11:18:15.000Z", "max_issues_repo_path": "model/derf.py", "max_issues_repo_name": "ubc-vision/derf", "max_issues_repo_head_hexsha": "f8f1704fc01ddb42fa2c4853f8bb7607611982b4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/derf.py", "max_forks_repo_name": "ubc-vision/derf", "max_forks_repo_head_hexsha": "f8f1704fc01ddb42fa2c4853f8bb7607611982b4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-06-27T05:28:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T01:31:15.000Z", "avg_line_length": 34.7889908257, "max_line_length": 85, "alphanum_fraction": 0.5756197257, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3761}
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import os
import json
import uuid
import numpy as np
from os.path import join as pjoin
from typing import Optional, Mapping, Dict, Union
from numpy.random import RandomState
from textworld import g_rng
from textworld.utils import maybe_mkdir, str2bool
from textworld.logic import State
from textworld.generator.chaining import ChainingOptions, sample_quest
from textworld.generator.world import World
from textworld.generator.game import Game, Quest, Event, GameOptions
from textworld.generator.graph_networks import create_map, create_small_map
from textworld.generator.text_generation import generate_text_from_grammar
from textworld.generator import inform7
from textworld.generator.inform7 import generate_inform7_source, compile_inform7_game
from textworld.generator.inform7 import CouldNotCompileGameError
from textworld.generator.data import KnowledgeBase
from textworld.generator.text_grammar import Grammar
from textworld.generator.maker import GameMaker
from textworld.generator.logger import GameLogger
class GenerationWarning(UserWarning):
pass
class NoSuchQuestExistError(NameError):
pass
def make_map(n_rooms, size=None, rng=None, possible_door_states=["open", "closed", "locked"]):
""" Make a map.
Parameters
----------
n_rooms : int
Number of rooms in the map.
size : tuple of int
Size (height, width) of the grid delimiting the map.
"""
rng = g_rng.next() if rng is None else rng
if size is None:
edge_size = int(np.ceil(np.sqrt(n_rooms + 1)))
size = (edge_size, edge_size)
map = create_map(rng, n_rooms, size[0], size[1], possible_door_states)
return map
def make_small_map(n_rooms, rng=None, possible_door_states=["open", "closed", "locked"]):
""" Make a small map.
The map will contains one room that connects to all others.
Parameters
----------
n_rooms : int
Number of rooms in the map (maximum of 5 rooms).
possible_door_states : list of str, optional
Possible states doors can have.
"""
rng = g_rng.next() if rng is None else rng
if n_rooms > 5:
raise ValueError("Nb. of rooms of a small map must be less than 6 rooms.")
map_ = create_small_map(rng, n_rooms, possible_door_states)
return map_
def make_world(world_size, nb_objects=0, rngs=None):
""" Make a world (map + objects).
Parameters
----------
world_size : int
Number of rooms in the world.
nb_objects : int
Number of objects in the world.
"""
if rngs is None:
rngs = {}
rng = g_rng.next()
rngs['map'] = RandomState(rng.randint(65635))
rngs['objects'] = RandomState(rng.randint(65635))
map_ = make_map(n_rooms=world_size, rng=rngs['map'])
world = World.from_map(map_)
world.set_player_room()
world.populate(nb_objects=nb_objects, rng=rngs['objects'])
return world
def make_world_with(rooms, rng=None):
""" Make a world that contains the given rooms.
Parameters
----------
rooms : list of textworld.logic.Variable
Rooms in the map. Variables must have type 'r'.
"""
map = make_map(n_rooms=len(rooms), rng=rng)
for (n, d), room in zip(map.nodes.items(), rooms):
d["name"] = room.name
world = World.from_map(map)
world.set_player_room()
return world
def make_quest(world: Union[World, State], options: Optional[GameOptions] = None):
state = getattr(world, "state", world)
if options is None:
options = GameOptions()
# By default, exclude quests finishing with: go, examine, look and inventory.
exclude = ["go.*", "examine.*", "look.*", "inventory.*"]
options.chaining.rules_per_depth = [options.kb.rules.get_matching(".*", exclude=exclude)]
options.chaining.rng = options.rngs['quest']
chains = []
for _ in range(options.nb_parallel_quests):
chain = sample_quest(state, options.chaining)
if chain is None:
msg = "No quest can be generated with the provided options."
raise NoSuchQuestExistError(msg)
chains.append(chain)
state = chain.initial_state # State might have changed, i.e. options.create_variable is True.
if options.chaining.backward and hasattr(world, "state"):
world.state = state
quests = []
actions = []
for chain in reversed(chains):
for i in range(1, len(chain.nodes)):
actions.append(chain.actions[i - 1])
if chain.nodes[i].breadth != chain.nodes[i - 1].breadth:
event = Event(actions)
quests.append(Quest(win_events=[event]))
actions.append(chain.actions[-1])
event = Event(actions)
quests.append(Quest(win_events=[event]))
return quests
def make_grammar(options: Mapping = {}, rng: Optional[RandomState] = None) -> Grammar:
rng = g_rng.next() if rng is None else rng
grammar = Grammar(options, rng)
grammar.check()
return grammar
def make_game_with(world, quests=None, grammar=None):
game = Game(world, grammar, quests)
if grammar is None:
for var, var_infos in game.infos.items():
var_infos.name = var.name
else:
game = generate_text_from_grammar(game, grammar)
return game
def make_game(options: GameOptions) -> Game:
"""
Make a game (map + objects + quest).
Arguments:
options:
For customizing the game generation (see
:py:class:`textworld.GameOptions <textworld.generator.game.GameOptions>`
for the list of available options).
Returns:
Generated game.
"""
rngs = options.rngs
# Generate only the map for now (i.e. without any objects)
world = make_world(options.nb_rooms, nb_objects=0, rngs=rngs)
# Generate quest(s).
# By default, exclude quests finishing with: go, examine, look and inventory.
exclude = ["go.*", "examine.*", "look.*", "inventory.*"]
options.chaining.rules_per_depth = [options.kb.rules.get_matching(".*", exclude=exclude)]
options.chaining.backward = True
options.chaining.create_variables = True
options.chaining.rng = rngs['quest']
options.chaining.restricted_types = {"r", "d"}
quests = make_quest(world, options)
# If needed, add distractors objects (i.e. not related to the quest) to reach options.nb_objects.
nb_objects = sum(1 for e in world.entities if e.type not in {'r', 'd', 'I', 'P'})
nb_distractors = options.nb_objects - nb_objects
if nb_distractors > 0:
world.populate(nb_distractors, rng=rngs['objects'])
grammar = make_grammar(options.grammar, rng=rngs['grammar'])
game = Game(world, grammar, quests)
game.metadata["uuid"] = options.uuid
return game
def compile_game(game: Game, options: Optional[GameOptions] = None):
"""
Compile a game.
Arguments:
game: Game object to compile.
options:
For customizing the game generation (see
:py:class:`textworld.GameOptions <textworld.generator.game.GameOptions>`
for the list of available options).
Returns:
The path to compiled game.
"""
options = options or GameOptions()
folder, filename = os.path.split(options.path)
if not filename:
filename = game.metadata.get("uuid", str(uuid.uuid4()))
filename, ext = os.path.splitext(filename)
if not ext:
ext = options.file_ext # Add default extension, if needed.
source = generate_inform7_source(game)
maybe_mkdir(folder)
game_json = pjoin(folder, filename + ".json")
game_file = pjoin(folder, filename + ext)
already_compiled = False # Check if game is already compiled.
if not options.force_recompile and os.path.isfile(game_file) and os.path.isfile(game_json):
already_compiled = game == Game.load(game_json)
msg = ("It's highly unprobable that two games with the same id have different structures."
" That would mean the generator has been modified."
" Please clean already generated games found in '{}'.".format(folder))
assert already_compiled, msg
if not already_compiled or options.force_recompile:
game.save(game_json)
compile_inform7_game(source, game_file)
return game_file
|
{"hexsha": "0f300807fa5fd162adfdda680f152286f93afa1c", "size": 8436, "ext": "py", "lang": "Python", "max_stars_repo_path": "textworld/generator/__init__.py", "max_stars_repo_name": "HakiRose/TextWorld", "max_stars_repo_head_hexsha": "2b38411e6492ac0e57d8c89482c1b36ff2f20067", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "textworld/generator/__init__.py", "max_issues_repo_name": "HakiRose/TextWorld", "max_issues_repo_head_hexsha": "2b38411e6492ac0e57d8c89482c1b36ff2f20067", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "textworld/generator/__init__.py", "max_forks_repo_name": "HakiRose/TextWorld", "max_forks_repo_head_hexsha": "2b38411e6492ac0e57d8c89482c1b36ff2f20067", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9545454545, "max_line_length": 102, "alphanum_fraction": 0.6702228544, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1958}
|
# -*- coding:utf-8 -*-
"""
获取疫情数据接口
# Created on 2021/12/15
# @author: WHT
# @group : datafactory
# @contact: whtsf@foxmail.com
"""
from __future__ import division
import time
import json
import re
import pandas as pd
import numpy as np
from datafactory.disease import cons as ct
from datafactory.util import dateu as du
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib3 import urlopen, Request # 王
def get_ncov_tencent_rt():
'''
获取腾讯疫情实时数据
'''
listed_columns = ct.TENCENT_NCOV_AREA_RT_COLUMNS
request = ct.TENCENT_NCOV_RT_URL % (
ct.P_TYPE['http'], ct.DOMAINS['tencent'])
text = urlopen(request, timeout=10).read()
text = text.decode('utf-8')
org_js = json.loads(text)
status_code = int(org_js['ret'])
if status_code != 0:
status = str(org_js['ret'])
raise ValueError(status)
data = org_js['data'] # str(org_js['data']).replace('\\','')
data = data.replace('true','1')
data = data.replace('false','0')
data = eval(data)
# print(type(data))
# print(data['areaTree'])
dict_data = list()
for row in data['areaTree']:
row_dict = {'lastUpdateTime':data['lastUpdateTime']}
if row['name'] == '中国':
row_dict['area'] = '中国'
for i in row['today']:
row_dict['today_' + i] = int(row['today'][i])
row_dict['today_confirmCuts', 'today_wzz_add'] = {-1, -1}
for i, field in enumerate(listed_columns[6:]):
if field in ['total_deadRate', 'total_healRate']:
row_dict[field] = float(row['total'][field[6:]])
elif field in ['grade']:
row_dict[field] = ''
else:
row_dict[field] = int(row['total'][field[6:]])
dict_data.append(row_dict)
# temp_data = data['areaTree'][0]
# print(type(temp_data))
area_data = data['areaTree'][0]['children']
for row in area_data:
row_dict = {'lastUpdateTime': data['lastUpdateTime']}
row_dict['area'] = row['name']
for i, field in enumerate(listed_columns[2:5]):
row_dict[field] = int(row['today'][field[6:]])
for i, field in enumerate(listed_columns[6:]):
if field in ['total_deadRate', 'total_healRate']:
row_dict[field] = float(row['total'][field[6:]])
elif field in ['grade']:
row_dict[field] = ''
else:
row_dict[field] = int(row['total'][field[6:]])
dict_data.append(row_dict)
if 'children' in row.keys():
children_data = row['children']
for row in children_data:
row_dict = {'lastUpdateTime': data['lastUpdateTime']}
row_dict['area'] = row['name']
for i, field in enumerate(listed_columns[2:5]):
row_dict[field] = int(row['today'][field[6:]])
for i, field in enumerate(listed_columns[6:]):
if field in ['total_deadRate', 'total_healRate']:
row_dict[field] = float(row['total'][field[6:]])
elif field in ['grade']:
if 'grade' in row['total'].keys():
row_dict[field] = str(row['total']['grade'])
else:
row_dict[field] = int(row['total'][field[6:]])
dict_data.append(row_dict)
# result = []
# for i in data['areaTree']:
# t = {}
# for j in list(i.keys()):
# if isinstance(i[j],dict):
# t1 = {}
# for k in list([j].keys()):
# if isinstance(i[j][k],dict):
# continue
# else:
# t1[j+'_'+k] = i[j][k]
# t.update(t1)
# else:
# t.update({j:i[j]})
# result.append(t)
ncov_df = pd.DataFrame(dict_data,columns=ct.TENCENT_NCOV_AREA_RT_COLUMNS)
# ncov_df = pd.DataFrame(data)
#dict_data = list()
# for row in data[1:]:
# row_dict = {'lastUpdateTime': row[0].value}
# for i, field in enumerate(ct.TENCENT_NCOV_RT_COLUMNS[1:]):
# dict_data.append(row)
# ncov_df = pd.DataFrame(dict_data, columns=ct.TENCENT_NCOV_RT_COLUMNS, index=[0])
return ncov_df
|
{"hexsha": "e9ca5f89ee9d57cdfb50340032757262397d228c", "size": 4415, "ext": "py", "lang": "Python", "max_stars_repo_path": "datafactory/disease/ncov.py", "max_stars_repo_name": "taotaotututu/ncov_data", "max_stars_repo_head_hexsha": "58a40ac4b66d93f1a75a10907ae82f2cadb40d02", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datafactory/disease/ncov.py", "max_issues_repo_name": "taotaotututu/ncov_data", "max_issues_repo_head_hexsha": "58a40ac4b66d93f1a75a10907ae82f2cadb40d02", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datafactory/disease/ncov.py", "max_forks_repo_name": "taotaotututu/ncov_data", "max_forks_repo_head_hexsha": "58a40ac4b66d93f1a75a10907ae82f2cadb40d02", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.32, "max_line_length": 86, "alphanum_fraction": 0.5306908267, "include": true, "reason": "import numpy", "num_tokens": 1124}
|
"""
Implementation of a class for the analysis of hyperfine structure spectra with isomeric presence.
.. moduleauthor:: Wouter Gins <wouter.gins@kuleuven.be>
.. moduleauthor:: Ruben de Groote <ruben.degroote@kuleuven.be>
"""
import copy
import lmfit as lm
from satlas.models.basemodel import BaseModel
from satlas.utilities import poisson_interval
import matplotlib.pyplot as plt
import numpy as np
__all__ = ['SumModel']
class SumModel(BaseModel):
"""Create a model that sums all the underlying models for a single input variable."""
def __init__(self, models):
"""Initializes the HFS by providing a list of :class:`.HFSModel`
objects.
Parameters
----------
models: list of :class:`.HFSModel` instances
A list containing the models."""
super(SumModel, self).__init__()
self.models = models
for i, model in enumerate(self.models):
model._add_prefix('s' + str(i) + '_')
self._set_params()
self.shared = []
def _set_params(self):
for model in self.models:
try:
p.add_many(*model.params.values())
except:
p = model.params.copy()
self.params = p
def _add_prefix(self, value):
for model in self.models:
model._add_prefix(value)
self._set_params()
def get_chisquare_mapping(self):
return np.hstack([f.get_chisquare_mapping() for f in self.models])
def get_lnprior_mapping(self, params):
return sum([f.get_lnprior_mapping(params) for f in self.models])
@property
def shared(self):
"""Contains all parameters which share the same value among all models."""
return self._shared
@shared.setter
def shared(self, value):
params = self.params.copy()
self._shared = value
for name in self._shared:
selected_list = [p for p in params.keys() if name in p]
try:
selected_name = selected_list[0]
for p in selected_list[1:]:
params[p].expr = selected_name
except IndexError:
pass
self.params = params
@property
def params(self):
"""Instance of lmfit.Parameters object characterizing the
shape of the HFS."""
return self._parameters
@params.setter
def params(self, params):
self._parameters = params.copy()
for spec in self.models:
spec.params = self._parameters.copy()
def seperate_response(self, x, background=False):
"""Get the response for each seperate spectrum for the values *x*,
without background.
Parameters
----------
x : float or array_like
Frequency in MHz.
Other parameters
----------------
background: boolean
If True, each spectrum has the same background. If False,
the background of each spectrum is assumed to be 0.
Returns
-------
list of floats or NumPy arrays
Seperate responses of models to the input *x*."""
background_vals = [np.polyval([s.params[par_name].value for par_name in s.params if par_name.startswith('Background')], x) for s in self.models]
return [s(x) - b * (1-background) for s, b in zip(self.models, background_vals)]
###############################
# PLOTTING ROUTINES #
###############################
def plot(self, x=None, y=None, yerr=None, ax=None, plot_kws={}, plot_seperate=True, show=True, legend=None, data_legend=None, xlabel='Frequency (MHz)', ylabel='Counts'):
"""Routine that plots the hfs of all the models,
possibly on top of experimental data.
Parameters
----------
x: list of arrays
Experimental x-data. If list of Nones, a suitable region around
the peaks is chosen to plot the hfs.
y: list of arrays
Experimental y-data.
yerr: list of arrays
Experimental errors on y.
plot_seperate: boolean, optional
Controls if the underlying models are drawn as well, or only
the sum. Defaults to False.
no_of_points: int
Number of points to use for the plot of the hfs if
experimental data is given.
ax: matplotlib axes object
If provided, plots on this axis.
show: boolean
If True, the plot will be shown at the end.
legend: string, optional
If given, an entry in the legend will be made for the spectrum.
data_legend: string, optional
If given, an entry in the legend will be made for the experimental
data.
xlabel: string, optional
If given, sets the xlabel to this string. Defaults to 'Frequency (MHz)'.
ylabel: string, optional
If given, sets the ylabel to this string. Defaults to 'Counts'.
indicate: boolean, optional
If set to True, dashed lines are drawn to indicate the location of the
transitions, and the labels are attached. Defaults to False.
model: boolean, optional
If given, the region around the fitted line will be shaded, with
the luminosity indicating the pmf of the Poisson
distribution characterized by the value of the fit. Note that
the argument *yerr* is ignored if *model* is True.
normalized: Boolean
If True, the data and fit are plotted normalized such that the highest
data point is one.
distance: float, optional
Controls how many FWHM deviations are used to generate the plot.
Defaults to 4.
Returns
-------
fig, ax: matplotlib figure and axes"""
if ax is None:
fig, ax = plt.subplots(1, 1)
else:
fig = ax.get_figure()
toReturn = fig, ax
if x is not None and y is not None:
try:
ax.errorbar(x, y, yerr=[y - yerr['low'], yerr['high'] - y], fmt='o', label=data_legend)
except:
ax.errorbar(x, y, yerr=yerr, fmt='o', label=data_legend)
plot_kws['background'] = False
plot_copy = copy.deepcopy(plot_kws)
plot_copy['model'] = False
x_points = np.array([])
line_counter = 1
for m in self.models:
plot_copy['legend'] = 'I=' + str(m.I)
try:
color = ax.lines[-1].get_color()
except IndexError:
color = next(ax._get_lines.prop_cycler)['color']
m.plot(x=x, y=y, yerr=yerr, show=False, ax=ax, plot_kws=plot_copy)
# plot_kws['indicate'] = False
x_points = np.append(x_points, ax.lines[-1].get_xdata())
if not plot_seperate:
ax.lines.pop(-1)
if x is not None:
ax.lines.pop(-1 - plot_seperate)
while not next(ax._get_lines.prop_cycler)['color'] == color:
pass
if plot_seperate:
c = next(ax._get_lines.prop_cycler)['color']
for l in ax.lines[line_counter:]:
l.set_color(c)
while not next(ax._get_lines.prop_cycler)['color'] == c:
pass
line_counter = len(ax.lines)
x = np.sort(x_points)
model = plot_kws.pop('model', False)
if model:
colormap = plot_kws.pop('colormap', 'bone_r',)
min_loc = [s.locations.min() for s in self.models]
max_loc = [s.locations.max() for s in self.models]
range = (min(min_loc), max(max_loc))
from scipy import optimize
max_counts = np.ceil(-optimize.brute(lambda x: -self(x), (range,), full_output=True, Ns=1000, finish=optimize.fmin)[1])
min_counts = [self.params[par_name].value for par_name in self.params if par_name.endswith('Background0')][0]
min_counts = np.floor(max(0, min_counts - 3 * min_counts ** 0.5))
y = np.arange(min_counts, max_counts + 3 * max_counts ** 0.5 + 1)
X, Y = np.meshgrid(x, y)
from scipy import stats
z = stats.poisson(self(X)).pmf(Y)
z = z / z.sum(axis=0)
ax.imshow(z, extent=(x.min(), x.max(), y.min(), y.max()), cmap=plt.get_cmap(colormap))
line, = ax.plot(x, self(x), label=legend, lw=0.5)
else:
ax.plot(x, self(x))
ax.legend(loc=0)
# ax.set_xlabel(xlabel)
# ax.set_ylabel(ylabel)
if show:
plt.show()
return toReturn
def plot_spectroscopic(self, x=None, y=None, plot_kws={}, **kwargs):
"""Routine that plots the hfs of all the models, possibly on
top of experimental data. It assumes that the y data is drawn from
a Poisson distribution (e.g. counting data).
Parameters
----------
x: list of arrays
Experimental x-data. If list of Nones, a suitable region around
the peaks is chosen to plot the hfs.
y: list of arrays
Experimental y-data.
yerr: list of arrays
Experimental errors on y.
no_of_points: int
Number of points to use for the plot of the hfs.
ax: matplotlib axes object
If provided, plots on this axis
show: Boolean
if True, the plot will be shown at the end.
Returns
-------
fig, ax: matplotlib figure and axes"""
if y is not None:
ylow, yhigh = poisson_interval(y)
yerr = {'low': ylow, 'high': yhigh}
else:
yerr = None
return self.plot(x=x, y=y, yerr=yerr, plot_kws=plot_kws, **kwargs)
def __add__(self, other):
"""Adding an SumModel results in a new SumModel
with the new spectrum added.
Returns
-------
SumModel"""
if isinstance(other, SumModel):
models = self.models + other.models
return SumModel(models)
else:
try:
return other.__add__(self)
except:
raise TypeError('unsupported operand type(s)')
def __call__(self, x):
"""Get the response for frequency *x* (in MHz) of the spectrum.
Parameters
----------
x : float or array_like
Frequency in MHz
Returns
-------
float or NumPy array
Response of the spectrum for each value of *x*."""
return np.sum([s(x) for s in self.models], axis=0)
|
{"hexsha": "877407b4f1a20c7b7217501244fa98f72198eb5d", "size": 10710, "ext": "py", "lang": "Python", "max_stars_repo_path": "satlas/models/summodel.py", "max_stars_repo_name": "woutergins/SATLAS", "max_stars_repo_head_hexsha": "2f7a4be557165f8f7f12e5cace71881b0046c19d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2015-08-26T14:58:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T02:37:21.000Z", "max_issues_repo_path": "satlas/models/summodel.py", "max_issues_repo_name": "eatradish/satlas", "max_issues_repo_head_hexsha": "29f01bc2994c45bb32bdcb63668ed9afab95dbda", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2015-06-02T13:13:39.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-15T08:28:06.000Z", "max_forks_repo_path": "satlas/models/summodel.py", "max_forks_repo_name": "eatradish/satlas", "max_forks_repo_head_hexsha": "29f01bc2994c45bb32bdcb63668ed9afab95dbda", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-11-18T01:13:24.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-25T21:52:04.000Z", "avg_line_length": 36.5529010239, "max_line_length": 173, "alphanum_fraction": 0.568907563, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2421}
|
import torch
import numpy as np
def generate_field(f, xi, H, W, el, roll, device):
u0 = W / 2.
v0 = H / 2.
grid_x, grid_y = torch.meshgrid(torch.arange(0, W).float().to(device), torch.arange(0, H).float().to(device))
x_ref = 1
y_ref = 1
X_Cam = (grid_x - u0) / f
Y_Cam = -(grid_y - v0) / f
#from matplotlib import pyplot as plt
#plt.subplot(121); plt.imshow(X_Cam); plt.colorbar()
#plt.subplot(122); plt.imshow(Y_Cam); plt.colorbar()
#plt.show()
#import pdb; pdb.set_trace()
# 2. Projection on the sphere
AuxVal = X_Cam*X_Cam + Y_Cam*Y_Cam
#alpha_cam = np.real(xi + torch.sqrt(1 + (1 - xi*xi)*AuxVal))
alpha_cam = xi + torch.sqrt(1 + (1 - xi*xi)*AuxVal)
alpha_div = AuxVal + 1
alpha_cam_div = alpha_cam / alpha_div
X_Sph = X_Cam * alpha_cam_div
Y_Sph = Y_Cam * alpha_cam_div
Z_Sph = alpha_cam_div - xi
#from matplotlib import pyplot as plt
#import pdb; pdb.set_trace()
#from matplotlib import pyplot as plt
#plt.figure()
#plt.subplot(121); plt.imshow(X_Cam); plt.colorbar()
#plt.subplot(122); plt.imshow(Y_Cam); plt.colorbar()
#plt.figure()
#plt.subplot(131); plt.imshow(X_Sph); plt.colorbar()
#plt.subplot(132); plt.imshow(Y_Sph); plt.colorbar()
#plt.subplot(133); plt.imshow(Z_Sph); plt.colorbar()
# 3. Rotation of the sphere
#idx1 = np.array([[0], [0], [0]])
#idx2 = np.array([[1], [1], [1]])
#idx3 = np.array([[2], [2], [2]])
#elems1 = rot[:, 0]
#elems2 = rot[:, 1]
#elems3 = rot[:, 2]
#x1 = elems1[0] * X_Sph + elems2[0] * Y_Sph + elems3[0] * Z_Sph
#y1 = elems1[1] * X_Sph + elems2[1] * Y_Sph + elems3[1] * Z_Sph
#z1 = elems1[2] * X_Sph + elems2[2] * Y_Sph + elems3[2] * Z_Sph
#import pdb; pdb.set_trace()
#coords = torch.stack((X_Sph.reshape(-1), Y_Sph.reshape(-1), Z_Sph.reshape(-1)))
# Gradient not passing
#rot_el = torch.Tensor([[1., 0., 0.], [0., torch.cos(el), -torch.sin(el)], [0., torch.sin(el), torch.cos(el)]]).float().to(device)
#rot_az = torch.Tensor([[torch.cos(az), 0., torch.sin(az)], [0., 1., 0.], [-torch.sin(az), 0., torch.cos(az)]])
#rot_roll = torch.Tensor([[torch.cos(roll), -torch.sin(roll), 0.], [torch.sin(roll), torch.cos(roll), 0.], [0., 0., 1.]]).float().to(device)
#sph = rot_roll.transpose(1, 0).matmul(rot_el.matmul(coords))
# rot_el
cosel, sinel = torch.cos(el), torch.sin(el)
Y_Sph = Y_Sph*cosel - Z_Sph*sinel
Z_Sph = Y_Sph*sinel + Z_Sph*cosel
#rot_roll
cosroll, sinroll = torch.cos(roll), torch.sin(roll)
X_Sph = X_Sph*cosroll - Y_Sph*sinroll
Y_Sph = X_Sph*sinroll + Y_Sph*cosroll
#sph = rot_az.dot(sph)
#sph = sph.reshape((3, H, W))#.transpose((1,2,0))
coords = torch.stack((X_Sph.reshape(-1), Y_Sph.reshape(-1), Z_Sph.reshape(-1)))
coords = coords / torch.sqrt(torch.sum(coords**2, dim=0))
return coords
#import pdb; pdb.set_trace()
#X_Sph, Y_Sph, Z_Sph = sph[0,:,:], sph[1,:,:], sph[2,:,:]
#X_Sph = x1
#Y_Sph = y1
#Z_Sph = z1
#from matplotlib import pyplot as plt
#plt.figure()
#plt.subplot(131); plt.imshow(X_Sph); plt.colorbar()
#plt.subplot(132); plt.imshow(Y_Sph); plt.colorbar()
#plt.subplot(133); plt.imshow(Z_Sph); plt.colorbar()
#plt.show()
# 4. cart 2 sph
ntheta = torch.atan2(X_Sph, Z_Sph)
nphi = torch.atan2(Y_Sph, torch.sqrt(Z_Sph**2 + X_Sph**2))
return ntheta, nphi
pi = m.pi
# 5. Sphere to pano
min_theta = -pi
max_theta = pi
min_phi = -pi / 2.
max_phi = pi / 2.
min_x = 0
max_x = ImPano_W - 1.0
min_y = 0
max_y = ImPano_H - 1.0
## for x
a = (max_theta - min_theta)
b = max_theta - a * max_x # from y=ax+b %% -a;
nx = (1. / a)* (ntheta - b)
## for y
a = (min_phi - max_phi)
b = max_phi - a * min_y # from y=ax+b %% -a;
ny = (1. / a)* (nphi - b)
|
{"hexsha": "b7bf2337745fc5b8c00536784df58abcdacc4e41", "size": 3909, "ext": "py", "lang": "Python", "max_stars_repo_path": "RELEASE_ScaleNet_minimal/utils/compute_vectors.py", "max_stars_repo_name": "Jerrypiglet/scalenet", "max_stars_repo_head_hexsha": "6a5db480e650d10d1e575e93b6cee366d1912c8a", "max_stars_repo_licenses": ["Adobe-2006", "Adobe-Glyph"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2020-08-27T09:06:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T02:27:31.000Z", "max_issues_repo_path": "RELEASE_ScaleNet_minimal/utils/compute_vectors.py", "max_issues_repo_name": "Jerrypiglet/scalenet", "max_issues_repo_head_hexsha": "6a5db480e650d10d1e575e93b6cee366d1912c8a", "max_issues_repo_licenses": ["Adobe-2006", "Adobe-Glyph"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-12-28T04:41:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-07T06:03:36.000Z", "max_forks_repo_path": "RELEASE_ScaleNet_minimal/utils/compute_vectors.py", "max_forks_repo_name": "Jerrypiglet/scalenet", "max_forks_repo_head_hexsha": "6a5db480e650d10d1e575e93b6cee366d1912c8a", "max_forks_repo_licenses": ["Adobe-2006", "Adobe-Glyph"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-12-22T03:02:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-08T09:46:01.000Z", "avg_line_length": 30.5390625, "max_line_length": 144, "alphanum_fraction": 0.5840368381, "include": true, "reason": "import numpy", "num_tokens": 1401}
|
[STATEMENT]
lemma BSIA_implies_SIA_for_modified_view :
"\<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr> V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> , N = {} , C = C\<^bsub>\<V>\<^esub> \<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
assume "BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>"
and "\<V>' = \<lparr> V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> , N = {} , C = C\<^bsub>\<V>\<^esub> \<rparr>"
and "\<rho> \<V> = \<rho>' \<V>'"
[PROOF STATE]
proof (state)
this:
BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>
\<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>
\<rho> \<V> = \<rho>' \<V>'
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>
\<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>
\<rho> \<V> = \<rho>' \<V>'
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
fix \<alpha> \<beta> c
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
assume "c \<in> C\<^bsub>\<V>\<^esub>"
and "\<beta> @ \<alpha> \<in> Tr\<^bsub>ES\<^esub>"
and "\<alpha>\<upharpoonleft>C\<^bsub>\<V>\<^esub> = []"
and "Adm \<V> \<rho> Tr\<^bsub>ES\<^esub> \<beta> c"
[PROOF STATE]
proof (state)
this:
c \<in> C\<^bsub>\<V>\<^esub>
\<beta> @ \<alpha> \<in> Tr\<^bsub>ES\<^esub>
\<alpha> \<upharpoonleft> C\<^bsub>\<V>\<^esub> = []
Adm \<V> \<rho> Tr\<^bsub>ES\<^esub> \<beta> c
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
from \<open>c \<in> C\<^bsub>\<V>\<^esub>\<close> \<open>\<V>' = \<lparr> V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> , N ={} , C = C\<^bsub>\<V>\<^esub> \<rparr>\<close>
[PROOF STATE]
proof (chain)
picking this:
c \<in> C\<^bsub>\<V>\<^esub>
\<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>
[PROOF STEP]
have "c \<in> C\<^bsub>\<V>'\<^esub>"
[PROOF STATE]
proof (prove)
using this:
c \<in> C\<^bsub>\<V>\<^esub>
\<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>
goal (1 subgoal):
1. c \<in> C\<^bsub>\<V>'\<^esub>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
c \<in> C\<^bsub>\<V>'\<^esub>
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
from \<open>\<alpha>\<upharpoonleft>C\<^bsub>\<V>\<^esub> = []\<close> \<open>\<V>' = \<lparr> V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> , N ={} , C = C\<^bsub>\<V>\<^esub> \<rparr>\<close>
[PROOF STATE]
proof (chain)
picking this:
\<alpha> \<upharpoonleft> C\<^bsub>\<V>\<^esub> = []
\<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>
[PROOF STEP]
have "\<alpha>\<upharpoonleft>C\<^bsub>\<V>'\<^esub> = []"
[PROOF STATE]
proof (prove)
using this:
\<alpha> \<upharpoonleft> C\<^bsub>\<V>\<^esub> = []
\<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>
goal (1 subgoal):
1. \<alpha> \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = []
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<alpha> \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = []
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
from \<open>Adm \<V> \<rho> Tr\<^bsub>ES\<^esub> \<beta> c\<close> \<open>\<rho> \<V> = \<rho>' \<V>'\<close>
[PROOF STATE]
proof (chain)
picking this:
Adm \<V> \<rho> Tr\<^bsub>ES\<^esub> \<beta> c
\<rho> \<V> = \<rho>' \<V>'
[PROOF STEP]
have "Adm \<V>' \<rho>' Tr\<^bsub>ES\<^esub> \<beta> c"
[PROOF STATE]
proof (prove)
using this:
Adm \<V> \<rho> Tr\<^bsub>ES\<^esub> \<beta> c
\<rho> \<V> = \<rho>' \<V>'
goal (1 subgoal):
1. Adm \<V>' \<rho>' Tr\<^bsub>ES\<^esub> \<beta> c
[PROOF STEP]
by (simp add: Adm_def)
[PROOF STATE]
proof (state)
this:
Adm \<V>' \<rho>' Tr\<^bsub>ES\<^esub> \<beta> c
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
from \<open>c \<in> C\<^bsub>\<V>'\<^esub>\<close> \<open>\<beta> @ \<alpha> \<in> Tr\<^bsub>ES\<^esub>\<close> \<open>\<alpha>\<upharpoonleft>C\<^bsub>\<V>'\<^esub> = []\<close> \<open>Adm \<V>' \<rho>' Tr\<^bsub>ES\<^esub> \<beta> c\<close>
[PROOF STATE]
proof (chain)
picking this:
c \<in> C\<^bsub>\<V>'\<^esub>
\<beta> @ \<alpha> \<in> Tr\<^bsub>ES\<^esub>
\<alpha> \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = []
Adm \<V>' \<rho>' Tr\<^bsub>ES\<^esub> \<beta> c
[PROOF STEP]
obtain \<alpha>' where "\<beta> @ [c] @ \<alpha>' \<in> Tr\<^bsub>ES\<^esub>"
and " \<alpha>' \<upharpoonleft> V\<^bsub>\<V>'\<^esub> = \<alpha> \<upharpoonleft> V\<^bsub>\<V>'\<^esub>"
and " \<alpha>' \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = []"
[PROOF STATE]
proof (prove)
using this:
c \<in> C\<^bsub>\<V>'\<^esub>
\<beta> @ \<alpha> \<in> Tr\<^bsub>ES\<^esub>
\<alpha> \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = []
Adm \<V>' \<rho>' Tr\<^bsub>ES\<^esub> \<beta> c
goal (1 subgoal):
1. (\<And>\<alpha>'. \<lbrakk>\<beta> @ [c] @ \<alpha>' \<in> Tr\<^bsub>ES\<^esub>; \<alpha>' \<upharpoonleft> V\<^bsub>\<V>'\<^esub> = \<alpha> \<upharpoonleft> V\<^bsub>\<V>'\<^esub>; \<alpha>' \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = []\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using \<open>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>\<close>
[PROOF STATE]
proof (prove)
using this:
c \<in> C\<^bsub>\<V>'\<^esub>
\<beta> @ \<alpha> \<in> Tr\<^bsub>ES\<^esub>
\<alpha> \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = []
Adm \<V>' \<rho>' Tr\<^bsub>ES\<^esub> \<beta> c
BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>
goal (1 subgoal):
1. (\<And>\<alpha>'. \<lbrakk>\<beta> @ [c] @ \<alpha>' \<in> Tr\<^bsub>ES\<^esub>; \<alpha>' \<upharpoonleft> V\<^bsub>\<V>'\<^esub> = \<alpha> \<upharpoonleft> V\<^bsub>\<V>'\<^esub>; \<alpha>' \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = []\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding BSIA_def
[PROOF STATE]
proof (prove)
using this:
c \<in> C\<^bsub>\<V>'\<^esub>
\<beta> @ \<alpha> \<in> Tr\<^bsub>ES\<^esub>
\<alpha> \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = []
Adm \<V>' \<rho>' Tr\<^bsub>ES\<^esub> \<beta> c
\<forall>\<alpha> \<beta>. \<forall>c\<in>C\<^bsub>\<V>'\<^esub>. \<beta> @ \<alpha> \<in> Tr\<^bsub>ES\<^esub> \<and> \<alpha> \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = [] \<and> Adm \<V>' \<rho>' Tr\<^bsub>ES\<^esub> \<beta> c \<longrightarrow> (\<exists>\<alpha>'. \<beta> @ [c] @ \<alpha>' \<in> Tr\<^bsub>ES\<^esub> \<and> \<alpha>' \<upharpoonleft> V\<^bsub>\<V>'\<^esub> = \<alpha> \<upharpoonleft> V\<^bsub>\<V>'\<^esub> \<and> \<alpha>' \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = [])
goal (1 subgoal):
1. (\<And>\<alpha>'. \<lbrakk>\<beta> @ [c] @ \<alpha>' \<in> Tr\<^bsub>ES\<^esub>; \<alpha>' \<upharpoonleft> V\<^bsub>\<V>'\<^esub> = \<alpha> \<upharpoonleft> V\<^bsub>\<V>'\<^esub>; \<alpha>' \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = []\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<beta> @ [c] @ \<alpha>' \<in> Tr\<^bsub>ES\<^esub>
\<alpha>' \<upharpoonleft> V\<^bsub>\<V>'\<^esub> = \<alpha> \<upharpoonleft> V\<^bsub>\<V>'\<^esub>
\<alpha>' \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = []
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
(*Show that \<alpha>'=\<alpha>*)
[PROOF STATE]
proof (state)
this:
\<beta> @ [c] @ \<alpha>' \<in> Tr\<^bsub>ES\<^esub>
\<alpha>' \<upharpoonleft> V\<^bsub>\<V>'\<^esub> = \<alpha> \<upharpoonleft> V\<^bsub>\<V>'\<^esub>
\<alpha>' \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = []
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
from \<open>\<beta> @ \<alpha> \<in> Tr\<^bsub>ES\<^esub>\<close> validES
[PROOF STATE]
proof (chain)
picking this:
\<beta> @ \<alpha> \<in> Tr\<^bsub>ES\<^esub>
ES_valid ES
[PROOF STEP]
have alpha_consists_of_events: "set \<alpha> \<subseteq> E\<^bsub>ES\<^esub>"
[PROOF STATE]
proof (prove)
using this:
\<beta> @ \<alpha> \<in> Tr\<^bsub>ES\<^esub>
ES_valid ES
goal (1 subgoal):
1. set \<alpha> \<subseteq> E\<^bsub>ES\<^esub>
[PROOF STEP]
by (auto simp add: ES_valid_def traces_contain_events_def)
[PROOF STATE]
proof (state)
this:
set \<alpha> \<subseteq> E\<^bsub>ES\<^esub>
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
from \<open>\<beta> @ [c] @ \<alpha>' \<in> Tr\<^bsub>ES\<^esub>\<close> validES
[PROOF STATE]
proof (chain)
picking this:
\<beta> @ [c] @ \<alpha>' \<in> Tr\<^bsub>ES\<^esub>
ES_valid ES
[PROOF STEP]
have alpha'_consists_of_events: "set \<alpha>' \<subseteq> E\<^bsub>ES\<^esub>"
[PROOF STATE]
proof (prove)
using this:
\<beta> @ [c] @ \<alpha>' \<in> Tr\<^bsub>ES\<^esub>
ES_valid ES
goal (1 subgoal):
1. set \<alpha>' \<subseteq> E\<^bsub>ES\<^esub>
[PROOF STEP]
by (auto simp add: ES_valid_def traces_contain_events_def)
[PROOF STATE]
proof (state)
this:
set \<alpha>' \<subseteq> E\<^bsub>ES\<^esub>
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
from \<open>\<alpha>' \<upharpoonleft> V\<^bsub>\<V>'\<^esub> = \<alpha> \<upharpoonleft> V\<^bsub>\<V>'\<^esub>\<close> \<open>\<V>' = \<lparr> V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> , N = {} , C = C\<^bsub>\<V>\<^esub> \<rparr>\<close>
[PROOF STATE]
proof (chain)
picking this:
\<alpha>' \<upharpoonleft> V\<^bsub>\<V>'\<^esub> = \<alpha> \<upharpoonleft> V\<^bsub>\<V>'\<^esub>
\<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>
[PROOF STEP]
have "\<alpha>'\<upharpoonleft>(V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>)=\<alpha>\<upharpoonleft>(V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>)"
[PROOF STATE]
proof (prove)
using this:
\<alpha>' \<upharpoonleft> V\<^bsub>\<V>'\<^esub> = \<alpha> \<upharpoonleft> V\<^bsub>\<V>'\<^esub>
\<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>
goal (1 subgoal):
1. \<alpha>' \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>) = \<alpha> \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<alpha>' \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>) = \<alpha> \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>)
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
with \<open>\<alpha>' \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = []\<close> \<open>\<alpha>\<upharpoonleft>C\<^bsub>\<V>\<^esub> = []\<close> \<open>\<V>' = \<lparr> V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> , N = {} , C = C\<^bsub>\<V>\<^esub> \<rparr>\<close>
[PROOF STATE]
proof (chain)
picking this:
\<alpha>' \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = []
\<alpha> \<upharpoonleft> C\<^bsub>\<V>\<^esub> = []
\<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>
\<alpha>' \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>) = \<alpha> \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>)
[PROOF STEP]
have "\<alpha>'\<upharpoonleft>(V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> \<union> C\<^bsub>\<V>\<^esub>)=\<alpha>\<upharpoonleft>(V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> \<union> C\<^bsub>\<V>\<^esub>)"
[PROOF STATE]
proof (prove)
using this:
\<alpha>' \<upharpoonleft> C\<^bsub>\<V>'\<^esub> = []
\<alpha> \<upharpoonleft> C\<^bsub>\<V>\<^esub> = []
\<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>
\<alpha>' \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>) = \<alpha> \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>)
goal (1 subgoal):
1. \<alpha>' \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> \<union> C\<^bsub>\<V>\<^esub>) = \<alpha> \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> \<union> C\<^bsub>\<V>\<^esub>)
[PROOF STEP]
by (simp add: projection_on_union)
[PROOF STATE]
proof (state)
this:
\<alpha>' \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> \<union> C\<^bsub>\<V>\<^esub>) = \<alpha> \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> \<union> C\<^bsub>\<V>\<^esub>)
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
with VIsViewOnE alpha_consists_of_events alpha'_consists_of_events
[PROOF STATE]
proof (chain)
picking this:
isViewOn \<V> E\<^bsub>ES\<^esub>
set \<alpha> \<subseteq> E\<^bsub>ES\<^esub>
set \<alpha>' \<subseteq> E\<^bsub>ES\<^esub>
\<alpha>' \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> \<union> C\<^bsub>\<V>\<^esub>) = \<alpha> \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> \<union> C\<^bsub>\<V>\<^esub>)
[PROOF STEP]
have "\<alpha>'=\<alpha>"
[PROOF STATE]
proof (prove)
using this:
isViewOn \<V> E\<^bsub>ES\<^esub>
set \<alpha> \<subseteq> E\<^bsub>ES\<^esub>
set \<alpha>' \<subseteq> E\<^bsub>ES\<^esub>
\<alpha>' \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> \<union> C\<^bsub>\<V>\<^esub>) = \<alpha> \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> \<union> C\<^bsub>\<V>\<^esub>)
goal (1 subgoal):
1. \<alpha>' = \<alpha>
[PROOF STEP]
unfolding isViewOn_def
[PROOF STATE]
proof (prove)
using this:
V_valid \<V> \<and> V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> \<union> C\<^bsub>\<V>\<^esub> = E\<^bsub>ES\<^esub>
set \<alpha> \<subseteq> E\<^bsub>ES\<^esub>
set \<alpha>' \<subseteq> E\<^bsub>ES\<^esub>
\<alpha>' \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> \<union> C\<^bsub>\<V>\<^esub>) = \<alpha> \<upharpoonleft> (V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> \<union> C\<^bsub>\<V>\<^esub>)
goal (1 subgoal):
1. \<alpha>' = \<alpha>
[PROOF STEP]
by (simp add: list_subset_iff_projection_neutral)
[PROOF STATE]
proof (state)
this:
\<alpha>' = \<alpha>
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
hence "\<beta> @ [c] @ \<alpha> \<in> Tr\<^bsub>ES\<^esub> "
[PROOF STATE]
proof (prove)
using this:
\<alpha>' = \<alpha>
goal (1 subgoal):
1. \<beta> @ [c] @ \<alpha> \<in> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
using \<open>\<beta> @ [c] @ \<alpha>' \<in> Tr\<^bsub>ES\<^esub>\<close>
[PROOF STATE]
proof (prove)
using this:
\<alpha>' = \<alpha>
\<beta> @ [c] @ \<alpha>' \<in> Tr\<^bsub>ES\<^esub>
goal (1 subgoal):
1. \<beta> @ [c] @ \<alpha> \<in> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<beta> @ [c] @ \<alpha> \<in> Tr\<^bsub>ES\<^esub>
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>?c2 \<in> C\<^bsub>\<V>\<^esub>; ?\<beta>2 @ ?\<alpha>2 \<in> Tr\<^bsub>ES\<^esub>; ?\<alpha>2 \<upharpoonleft> C\<^bsub>\<V>\<^esub> = []; Adm \<V> \<rho> Tr\<^bsub>ES\<^esub> ?\<beta>2 ?c2\<rbrakk> \<Longrightarrow> ?\<beta>2 @ [?c2] @ ?\<alpha>2 \<in> Tr\<^bsub>ES\<^esub>
goal (1 subgoal):
1. \<lbrakk>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>; \<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>; \<rho> \<V> = \<rho>' \<V>'\<rbrakk> \<Longrightarrow> SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
with \<open>BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>\<close>
[PROOF STATE]
proof (chain)
picking this:
BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>
\<lbrakk>?c2 \<in> C\<^bsub>\<V>\<^esub>; ?\<beta>2 @ ?\<alpha>2 \<in> Tr\<^bsub>ES\<^esub>; ?\<alpha>2 \<upharpoonleft> C\<^bsub>\<V>\<^esub> = []; Adm \<V> \<rho> Tr\<^bsub>ES\<^esub> ?\<beta>2 ?c2\<rbrakk> \<Longrightarrow> ?\<beta>2 @ [?c2] @ ?\<alpha>2 \<in> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>
\<lbrakk>?c2 \<in> C\<^bsub>\<V>\<^esub>; ?\<beta>2 @ ?\<alpha>2 \<in> Tr\<^bsub>ES\<^esub>; ?\<alpha>2 \<upharpoonleft> C\<^bsub>\<V>\<^esub> = []; Adm \<V> \<rho> Tr\<^bsub>ES\<^esub> ?\<beta>2 ?c2\<rbrakk> \<Longrightarrow> ?\<beta>2 @ [?c2] @ ?\<alpha>2 \<in> Tr\<^bsub>ES\<^esub>
goal (1 subgoal):
1. SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
unfolding SIA_def
[PROOF STATE]
proof (prove)
using this:
BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>
\<lbrakk>?c2 \<in> C\<^bsub>\<V>\<^esub>; ?\<beta>2 @ ?\<alpha>2 \<in> Tr\<^bsub>ES\<^esub>; ?\<alpha>2 \<upharpoonleft> C\<^bsub>\<V>\<^esub> = []; Adm \<V> \<rho> Tr\<^bsub>ES\<^esub> ?\<beta>2 ?c2\<rbrakk> \<Longrightarrow> ?\<beta>2 @ [?c2] @ ?\<alpha>2 \<in> Tr\<^bsub>ES\<^esub>
goal (1 subgoal):
1. \<forall>\<alpha> \<beta>. \<forall>c\<in>C\<^bsub>\<V>\<^esub>. \<beta> @ \<alpha> \<in> Tr\<^bsub>ES\<^esub> \<and> \<alpha> \<upharpoonleft> C\<^bsub>\<V>\<^esub> = [] \<and> Adm \<V> \<rho> Tr\<^bsub>ES\<^esub> \<beta> c \<longrightarrow> \<beta> @ [c] @ \<alpha> \<in> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
using \<open>\<V>' = \<lparr> V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub> , N ={} , C = C\<^bsub>\<V>\<^esub> \<rparr>\<close>
[PROOF STATE]
proof (prove)
using this:
BSIA \<rho>' \<V>' Tr\<^bsub>ES\<^esub>
\<lbrakk>?c2 \<in> C\<^bsub>\<V>\<^esub>; ?\<beta>2 @ ?\<alpha>2 \<in> Tr\<^bsub>ES\<^esub>; ?\<alpha>2 \<upharpoonleft> C\<^bsub>\<V>\<^esub> = []; Adm \<V> \<rho> Tr\<^bsub>ES\<^esub> ?\<beta>2 ?c2\<rbrakk> \<Longrightarrow> ?\<beta>2 @ [?c2] @ ?\<alpha>2 \<in> Tr\<^bsub>ES\<^esub>
\<V>' = \<lparr>V = V\<^bsub>\<V>\<^esub> \<union> N\<^bsub>\<V>\<^esub>, N = {}, C = C\<^bsub>\<V>\<^esub>\<rparr>
goal (1 subgoal):
1. \<forall>\<alpha> \<beta>. \<forall>c\<in>C\<^bsub>\<V>\<^esub>. \<beta> @ \<alpha> \<in> Tr\<^bsub>ES\<^esub> \<and> \<alpha> \<upharpoonleft> C\<^bsub>\<V>\<^esub> = [] \<and> Adm \<V> \<rho> Tr\<^bsub>ES\<^esub> \<beta> c \<longrightarrow> \<beta> @ [c] @ \<alpha> \<in> Tr\<^bsub>ES\<^esub>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
SIA \<rho> \<V> Tr\<^bsub>ES\<^esub>
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 10006, "file": "Modular_Assembly_Kit_Security_Verification_Basics_BSPTaxonomy", "length": 46}
|
#include <catch.hpp>
#include <random>
#include <vector>
#include <Eigen/Core>
#include <torch/torch.h>
#include <renderer_utils.cuh>
#include <losses.h>
#include "check_adjoint.h"
#include "test_utils.h"
//namespace std {
// template < class T >
// inline std::ostream& operator << (std::ostream& os, const std::vector<T>& v)
// {
// os << "[";
// for (typename std::vector<T>::const_iterator ii = v.begin(); ii != v.end(); ++ii)
// {
// os << " " << *ii;
// }
// os << " ]";
// return os;
// }
//}
TEST_CASE("LogSumExp-double", "[math]")
{
std::default_random_engine rnd(42);
std::uniform_real_distribution<double> distr(-5, +5);
for (int N=1; N<=10; ++N)
{
DYNAMIC_SECTION("N=" << N)
{
std::vector<double> values(N);
for (int i = 0; i < N; ++i) values[i] = distr(rnd);
const auto valuesLambda = [values](int i) {return values[i]; };
double resultActual = kernel::logSumExp<double>(N, valuesLambda);
//naive
double resultExpected = 0;
for (int i = 0; i < N; ++i) resultExpected += rexp(values[i]);
resultExpected = rlog(resultExpected);
REQUIRE(resultActual == Approx(resultExpected));
}
}
}
TEST_CASE("LogSumExp-double4", "[math]")
{
std::default_random_engine rnd(42);
std::uniform_real_distribution<double> distr(-5, +5);
for (int N = 1; N <= 10; ++N)
{
DYNAMIC_SECTION("N=" << N)
{
std::vector<double4> values(N);
for (int i = 0; i < N; ++i) {
values[i] = make_double4(
distr(rnd), distr(rnd),
distr(rnd), distr(rnd));
}
const auto valuesLambda = [values](int i) {return values[i]; };
double4 resultActual = kernel::logSumExp<double4>(N, valuesLambda);
//naive
double4 resultExpected = make_double4(0);
for (int i = 0; i < N; ++i) resultExpected += rexp(values[i]);
resultExpected = rlog(resultExpected);
REQUIRE(resultActual.x == Approx(resultExpected.x));
REQUIRE(resultActual.y == Approx(resultExpected.y));
REQUIRE(resultActual.z == Approx(resultExpected.z));
REQUIRE(resultActual.w == Approx(resultExpected.w));
}
}
}
TEST_CASE("LogSumExp-scaling-double", "[math]")
{
std::default_random_engine rnd(42);
std::uniform_real_distribution<double> distr(-5, +5);
for (int N = 1; N <= 10; ++N)
{
DYNAMIC_SECTION("N=" << N)
{
std::vector<double> values(N);
std::vector<double> scaling(N);
for (int i = 0; i < N; ++i) {
values[i] = distr(rnd);
scaling[i] = fabs(distr(rnd));
}
INFO("values: " << values);
INFO("scaling: " << scaling);
const auto valuesLambda = [values](int i) {return values[i]; };
const auto scalingLambda = [scaling](int i) {return scaling[i]; };
double resultActual = kernel::logSumExpWithScaling<double>(
N, valuesLambda, scalingLambda);
//naive
double resultExpected = 0;
for (int i = 0; i < N; ++i)
resultExpected += scaling[i] * rexp(values[i]);
resultExpected = rlog(resultExpected);
REQUIRE(resultActual == Approx(resultExpected));
}
}
}
TEST_CASE("LogSumExp-scaling-double4", "[math]")
{
std::default_random_engine rnd(42);
std::uniform_real_distribution<double> distr(-5, +5);
for (int N = 1; N <= 10; ++N)
{
DYNAMIC_SECTION("N=" << N)
{
std::vector<double4> values(N);
std::vector<double> scaling(N);
for (int i = 0; i < N; ++i) {
values[i] = make_double4(
distr(rnd), distr(rnd),
distr(rnd), distr(rnd));
scaling[i] = fabs(distr(rnd));
}
const auto valuesLambda = [values](int i) {return values[i]; };
const auto scalingLambda = [scaling](int i) {return scaling[i]; };
double4 resultActual = kernel::logSumExpWithScaling<double4>(
N, valuesLambda, scalingLambda);
//naive
double4 resultExpected = make_double4(0);
for (int i = 0; i < N; ++i)
resultExpected += scaling[i] * rexp(values[i]);
resultExpected = rlog(resultExpected);
REQUIRE(resultActual.x == Approx(resultExpected.x));
REQUIRE(resultActual.y == Approx(resultExpected.y));
REQUIRE(resultActual.z == Approx(resultExpected.z));
REQUIRE(resultActual.w == Approx(resultExpected.w));
}
}
}
TEST_CASE("LogMSE-double", "[math]")
{
std::default_random_engine rnd(42);
std::uniform_real_distribution<double> distr(-5, +5);
for (int i = 1; i <= 50; ++i)
{
INFO("i: " << i);
double logX = distr(rnd);
double logY = distr(rnd);
double x = exp(logX), y = exp(logY);
INFO("x=" << x << ", y=" << y);
double mseExpected = (x - y) * (x - y);
double mseActualLog = kernel::logMSE(logX, logY);
double mseActual = exp(mseActualLog);
REQUIRE(mseActual == Approx(mseExpected));
}
}
TEST_CASE("LogL1-double", "[math]")
{
std::default_random_engine rnd(42);
std::uniform_real_distribution<double> distr(-5, +5);
for (int i = 1; i <= 50; ++i)
{
INFO("i: " << i);
double logX = distr(rnd);
double logY = distr(rnd);
double x = exp(logX), y = exp(logY);
INFO("x=" << x << ", y=" << y);
double l1Expected = fabs(x - y);
double l1ActualLog = kernel::logL1(logX, logY);
double l1Actual = exp(l1ActualLog);
REQUIRE(l1Actual == Approx(l1Expected));
}
}
TEST_CASE("Adjoint-LogMSE", "[adjoint]")
{
typedef empty TmpStorage_t;
typedef Eigen::VectorXd Vector_t;
auto forward = [](const Vector_t& x, TmpStorage_t* tmp) -> Vector_t
{
double logX = x[0], logY = x[1];
double res = kernel::logMSE(logX, logY);
Vector_t rese(1);
rese[0] = res;
return rese;
};
auto adjoint = [](const Vector_t& x, const Vector_t& e, const Vector_t& g,
Vector_t& z, const TmpStorage_t& tmp)
{
double logX = x[0], logY = x[1];
double adjOut = g[0];
double adjLogX, adjLogY;
kernel::adjLogMSE(logX, logY, adjOut, adjLogX, adjLogY);
z[0] = adjLogX;
z[1] = adjLogY;
};
std::default_random_engine rnd(42);
std::uniform_real_distribution<double> distr(-5, +5);
int N = 20;
for (int i = 0; i < N; ++i)
{
INFO("N=" << i);
Vector_t x(2);
for (int j = 0; j < 2; ++j) x[j] = distr(rnd);
checkAdjoint<Vector_t, TmpStorage_t>(x, forward, adjoint,
1e-5, 1e-5, 1e-6);
}
}
TEST_CASE("Adjoint-Log1", "[adjoint]")
{
typedef empty TmpStorage_t;
typedef Eigen::VectorXd Vector_t;
auto forward = [](const Vector_t& x, TmpStorage_t* tmp) -> Vector_t
{
double logX = x[0], logY = x[1];
double res = kernel::logL1(logX, logY);
Vector_t rese(1);
rese[0] = res;
return rese;
};
auto adjoint = [](const Vector_t& x, const Vector_t& e, const Vector_t& g,
Vector_t& z, const TmpStorage_t& tmp)
{
double logX = x[0], logY = x[1];
double adjOut = g[0];
double adjLogX, adjLogY;
kernel::adjLogL1(logX, logY, adjOut, adjLogX, adjLogY);
z[0] = adjLogX;
z[1] = adjLogY;
};
std::default_random_engine rnd(42);
std::uniform_real_distribution<double> distr(-5, +5);
int N = 20;
for (int i = 0; i < N; ++i)
{
INFO("N=" << i);
Vector_t x(2);
for (int j = 0; j < 2; ++j) x[j] = distr(rnd);
checkAdjoint<Vector_t, TmpStorage_t>(x, forward, adjoint,
1e-5, 1e-5, 1e-6);
}
}
TEST_CASE("Adjoint-LogMSE-Full", "[adjoint]")
{
torch::Tensor logX = torch::randn({ 4,5 },
at::TensorOptions().dtype(c10::kDouble).device(c10::kCUDA))
.requires_grad_(true);
torch::Tensor logY = torch::randn({ 4,5 },
at::TensorOptions().dtype(c10::kDouble).device(c10::kCUDA))
.requires_grad_(true);
torch::Tensor out = renderer::logMSE(logX, logY);
torch::Tensor grad_out = torch::rand_like(out);
auto grad_inputs = torch::autograd::grad(
{ out }, { logX, logY }, { grad_out });
torch::Tensor grad_logX = grad_inputs[0];
torch::Tensor grad_logY = grad_inputs[1];
std::cout << "grad_logX:\n" << grad_logX << std::endl;
}
|
{"hexsha": "323695eceedbfdf3eaf8623e53e318cddcf55302", "size": 7557, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "unittests/testMath.cpp", "max_stars_repo_name": "shamanDevel/DiffDVR", "max_stars_repo_head_hexsha": "99fbe9f114d0097daf402bde2ae35f18dade335d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 12.0, "max_stars_repo_stars_event_min_datetime": "2021-08-02T04:51:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T18:02:27.000Z", "max_issues_repo_path": "unittests/testMath.cpp", "max_issues_repo_name": "shamanDevel/DiffDVR", "max_issues_repo_head_hexsha": "99fbe9f114d0097daf402bde2ae35f18dade335d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-11-04T14:23:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-28T10:30:13.000Z", "max_forks_repo_path": "unittests/testMath.cpp", "max_forks_repo_name": "shamanDevel/DiffDVR", "max_forks_repo_head_hexsha": "99fbe9f114d0097daf402bde2ae35f18dade335d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2021-07-16T10:23:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-04T02:51:43.000Z", "avg_line_length": 25.8801369863, "max_line_length": 85, "alphanum_fraction": 0.6282916501, "num_tokens": 2456}
|
from whoosh.analysis import *
from whoosh.index import create_in
from whoosh.qparser import *
from whoosh.fields import *
from whoosh import scoring
from whoosh import index
import csv
import statistics
import numpy as np
import matplotlib.pyplot as plt
import math
import itertools
def mrr(gt, se):
sum = 0
for query_id in se: # for each query
rank = 0 # find position of first relevant result
for doc_id in se[query_id]: # for each document in results
if query_id in gt.keys(): # avoid getting a keyerror
if doc_id in gt[query_id]: # it is indeed a relevant document
sum += 1/(rank+1) # accumulate sum from all queries
# print(doc_id, query_id)
break # i found first relevant,go to the next query
rank += 1
return 1/(len(gt))*sum
def pak(gt, se, k, q):
if q not in gt.keys(): # if query not in ground truth, exit
return -1
eval = 0
i = 0
for doc_id in se[q]: # for each document in results
if i < k: # check first k docs
if q in gt.keys(): # avoid getting a keyerror
if doc_id in gt[q]: # it is indeed a relevant document
eval += 1 # increment counter
i += 1 # go to next doc
if q in gt.keys(): # if query in ground truth
return eval/min(k, len(gt[q]))
return -1
def r_precision(gt, se, q):
if q not in gt.keys(): # if query not in ground truth, exit
return -1
eval = 0
i = 0
for doc_id in se[q]:
if i < len(gt[q]):
if q in gt.keys():
if q in gt.keys():
if doc_id in gt[q]:
# print(q, doc_id)
eval += 1
i += 1
if q in gt.keys():
return eval/len(gt[q])
return -1
def ndcgak(gt, se, k, q):
if q not in gt.keys(): # if query not in ground truth, exit
return -1
dcg = 0.0
relevance = 0.0
i = 1
for doc_id in se[q]: # for each document in results
if i <= k: # check first k docs
if q in gt.keys(): # avoid getting a keyerror
if doc_id in gt[q]: # it is indeed a relevant document
relevance = 1 / math.log2(i+1)
else:
relevance = 0.0
dcg += relevance # accumulate relevance -> dcg
i += 1 # go to next doc
i = 1
idcg = 0.0
if q in gt.keys(): # avoid getting a keyerror
for i in range(1, len(gt[q])+1):
if i <= k:
# ideal situation -> first k docs are relevant/in ground truth
idcg += 1 / math.log2(i+1)
if idcg != 0.0:
return dcg/idcg
return -1
# Open ground truth file
filename = open("./Cranfield_DATASET/cran_Ground_Truth.tsv")
ground_truth = csv.reader(filename, delimiter="\t")
next(ground_truth)
gt = {} # save ground truth in a dictionary, in order to save time
for row in ground_truth:
if row[0] in gt.keys():
gt[row[0]].append(row[1])
else:
gt[row[0]] = [row[1]]
filename.close()
# print(gt)
query = {} # dictionary with key=query_id and value=query
filename = open("./Cranfield_DATASET/cran_Queries.tsv")
csv_reader = csv.reader(filename, delimiter='\t')
# to skip the header: first line containing the name of each field.
csv_reader.__next__()
for record in csv_reader:
query[record[0]] = record[1]
filename.close()
configurations = {0: [SimpleAnalyzer(), scoring.Frequency(), "SimpleAnalyzer, Frequency"],
1: [SimpleAnalyzer(), scoring.TF_IDF(), "SimpleAnalyzer, TF_IDF"],
2: [SimpleAnalyzer(), scoring.BM25F(), "SimpleAnalyzer, BM25F"],
3: [StandardAnalyzer(), scoring.TF_IDF(), "StandardAnalyzer, TF_IDF"],
4: [StandardAnalyzer(), scoring.BM25F(), "StandardAnalyzer, BM25F"],
5: [StemmingAnalyzer(), scoring.TF_IDF(), "StemmingAnalyzer, TF_IDF"],
6: [StemmingAnalyzer(), scoring.BM25F(), "StemmingAnalyzer, BM25F"],
7: [KeywordAnalyzer(lowercase=True), scoring.TF_IDF(), "KeywordAnalyzer, TF_IDF"],
8: [KeywordAnalyzer(lowercase=True), scoring.BM25F(), "KeywordAnalyzer, BM25F"],
9: [FancyAnalyzer(), scoring.TF_IDF(), "FancyAnalyzer, TF_IDF"],
10: [FancyAnalyzer(), scoring.BM25F(), "FancyAnalyzer, BM25F"],
11: [LanguageAnalyzer("en"), scoring.BM25F(), "LanguageAnalyzer, BM25F"],
}
mean = {} # dictionary with mrr for each search engine
temp = 0 # index of each search engine
r_mean = {} # mean r-precision for each search engine
r_prec = {} # all r-precisions for each search engine
max_ = {} # dictionary with max r-precision for each search engine
min_ = {} # dictionary with min r-precision for each search engine
all_se = {} # dictionary of dictionaries containing the search results
for conf in range(len(configurations)):
# Define a Text-Analyzer
selected_analyzer = configurations[conf][0]
# Create a Schema
schema = Schema(id=ID(stored=True), content=TEXT(
stored=False, analyzer=selected_analyzer))
# Create an empty-Index according to the just defined Schema
directory_containing_the_index = './index'
create_in(directory_containing_the_index, schema)
ix = index.open_dir(directory_containing_the_index)
num_docs = 0
# Fill the Index
writer = ix.writer()
for x in range(1, 1401): # for every html file
html_files = "./Cranfield_DATASET/DOCUMENTS/______"+str(x)+".html"
num_docs += 1
id = str(x)
# print(id)
with open(html_files, "r", encoding='latin1') as filename:
file_data = filename.readline()
# i dont care about the info before <body>
while(file_data.startswith('<body>') == False):
file_data = filename.readline()
content = ''
while(file_data != '</body>\n'): # i stop when body ends
file_data = filename.readline()
content += file_data
# print(content)
writer.add_document(id=id, content=content)
# print("-----------------------------------")
writer.commit()
filename.close()
# Select a Scoring-Function
scoring_function = configurations[conf][1]
temp += 1 # count search engines
# Create a QueryParser for parsing the input_query
qp = QueryParser("content", ix.schema)
# Create a Searcher for the Index with the selected Scoring-Function
searcher = ix.searcher(weighting=scoring_function)
# Create tsv file to save results (all possible combinations of queries and documents)
with open('./search_engines/results'+str(temp)+'.tsv', 'w', newline='') as filename:
writer = csv.writer(filename, delimiter='\t')
writer.writerow(['Query_ID', 'Doc_ID', 'Rank', 'Score'])
se = {} # save search engine results in dictionary to save time
sum_r = 0
min_r = float("inf")
max_r = float("-inf")
tmp = 0
for x in range(1, 226): # for each query
if str(x) in query: # make sure that i dont get a keyerror e.g. 31 doesn't exist
input_query = query[str(x)]
parsed_query = qp.parse(input_query) # parsing the query
# Perform a Search
results = searcher.search(parsed_query)
# Save results in tsv file
for hit in results:
writer.writerow(
[str(x), hit['id'], str(hit.rank + 1), str(hit.score)])
if str(x) in se.keys():
se[str(x)].append(hit['id'])
else:
se[str(x)] = [hit['id']]
# print(temp,str(x),pak(gt, se, k,str(x)))
tmp = r_precision(gt, se, str(x))
if tmp != -1: # -1 if not in ground truth
if temp in r_prec.keys():
r_prec[temp].append(tmp)
else:
r_prec[temp] = [tmp]
sum_r += tmp
if tmp > max_r:
max_r = tmp
if tmp < min_r:
min_r = tmp
max_[temp] = max_r
min_[temp] = min_r
r_mean[temp] = sum_r/len(gt)
all_se[temp] = se
mean[temp] = mrr(gt, se)
# print(temp, mean[temp])
filename.close()
searcher.close()
med = {}
quar1 = {}
quar3 = {}
print("MRR:")
print(mean)
print("--------------")
print("Mean R-precision:")
print(r_mean)
print("--------------")
print("Max R-precision:")
print(max_)
print("--------------")
print("Min R-precision:")
print(min_)
print("--------------")
for conf in r_prec:
med[conf] = statistics.median(sorted(r_prec[conf]))
quar1[conf] = np.percentile(r_prec[conf], 25)
quar3[conf] = np.percentile(r_prec[conf], 75)
# print("median",np.percentile(r_prec[conf], 50)) #check that median is correct
print("1st quartile: ")
print(quar1)
print("--------------")
print("3rd quartile: ")
print(quar3)
print("--------------")
print("Median R-precision:")
print(med)
sorted_mrr = {k: v for k, v in sorted(
mean.items(), key=lambda x: x[1], reverse=True)} # sort by mrr
sorted_mrr = dict(itertools.islice(sorted_mrr.items(), 5)
) # take top five search engines
print("--------------")
print("Top 5 search engine configurations:")
print(sorted_mrr)
y = {}
X = 1.0
x = [1, 3, 5, 10]
plt.xlabel("k")
plt.ylabel("average p@k")
plt.title("Average p@k for top 5 search engines")
for key, se in all_se.items(): # key = search engine index
if key in sorted_mrr: # search engine is in top five
total1 = 0
total2 = 0
total3 = 0
total4 = 0
# print(key)
for q in se: # for every query
result1 = pak(gt, se, 1, q) # k = 1
result2 = pak(gt, se, 3, q) # k = 3
result3 = pak(gt, se, 5, q) # k = 5
result4 = pak(gt, se, 10, q) # k = 10
if result1 != -1: # if result = -1 ,that means thats the query doesnt exist in ground truth
total1 += result1
if result2 != -1:
total2 += result2
if result3 != -1:
total3 += result3
if result4 != -1:
total4 += result4
#print(total1, total2, total3, total4, len(gt))
plt.plot(x, [total1/len(gt), total2/len(gt), total3/len(gt),
total4/len(gt)], label="search engine no "+str(key), marker='o')
plt.legend()
plt.xticks(np.arange(min(x), max(x)+1, X))
plt.show()
plt.xlabel("k")
plt.ylabel("average nDCG")
plt.title("Average nDCG for top 5 search engines")
for key, se in all_se.items():
if key in sorted_mrr:
total1 = 0
total2 = 0
total3 = 0
total4 = 0
for q in se:
result1 = ndcgak(gt, se, 1, q)
result2 = ndcgak(gt, se, 3, q)
result3 = ndcgak(gt, se, 5, q)
result4 = ndcgak(gt, se, 10, q)
if result1 != -1:
total1 += result1
if result2 != -1:
total2 += result2
if result3 != -1:
total3 += result3
if result4 != -1:
total4 += result4
# print(total1,total2,total3,total4)
plt.plot(x, [total1/len(gt), total2/len(gt), total3/len(gt),
total4/len(gt)], label="search engine no "+str(key), marker='o')
plt.legend()
plt.xticks(np.arange(min(x), max(x)+1, X))
plt.show()
print("--------------")
print("Indexed documents:")
print(num_docs)
print("Number of queries:")
print(len(query))
print("Number of queries in ground truth:")
print(len(gt))
|
{"hexsha": "c18e202e8868f0b62a06a735c9a14bdf191ff0a3", "size": 11939, "ext": "py", "lang": "Python", "max_stars_repo_path": "search-engine-evaluation/search_engine.py", "max_stars_repo_name": "NefeliTav/Search-Engine-Evaluation", "max_stars_repo_head_hexsha": "bdc8ceb316448ad334410e5f8ef227f355ef4e3b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "search-engine-evaluation/search_engine.py", "max_issues_repo_name": "NefeliTav/Search-Engine-Evaluation", "max_issues_repo_head_hexsha": "bdc8ceb316448ad334410e5f8ef227f355ef4e3b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "search-engine-evaluation/search_engine.py", "max_forks_repo_name": "NefeliTav/Search-Engine-Evaluation", "max_forks_repo_head_hexsha": "bdc8ceb316448ad334410e5f8ef227f355ef4e3b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9093567251, "max_line_length": 104, "alphanum_fraction": 0.5552391323, "include": true, "reason": "import numpy", "num_tokens": 3128}
|
import numpy as np
import numba
def argminrelmin(a, axis=None, out=None):
"""
Return the index of the minimum relative minimum.
A relative minimum is an element which is lower than its neigbours, or the
central element of a series of contiguous elements which are equal to each
other and lower than their external neighbours.
If there are more relative minima with the same value, return the first. If
there are no relative minima, return -1.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
"""
# TODO the axis parameter does not work when `out` is specified.
a = np.asarray(a)
if axis is None:
a = a.reshape(-1)
else:
a = np.moveaxis(a, axis, -1)
return _argminrelmin(a, out=out)
@numba.guvectorize(['(f8[:],i8[:])'], '(n)->()', cache=True)
def _argminrelmin(a, out):
idx = -1
val = 0
wide = -1
for i in range(1, len(a) - 1):
if a[i - 1] > a[i] < a[i + 1]:
if a[i] < val or idx < 0:
idx = i
val = a[i]
elif a[i - 1] > a[i] == a[i + 1]:
wide = i
elif wide >= 0 and a[i - 1] == a[i] < a[i + 1]:
if a[i] < val or idx < 0:
idx = (wide + i) // 2
val = a[i]
elif a[i] != a[i + 1]:
wide = -1
out[0] = idx
def test_argminrelmin():
a = np.concatenate([
np.linspace(-1, 1, 20),
np.linspace(1, 0, 10),
np.linspace(0, 1, 10),
np.linspace(1, 0.5, 10),
np.linspace(0.5, 1, 10),
])
i = argminrelmin(a)
assert i == 29, i
assert a[i] == 0, a[i]
i = argminrelmin([3,2,1,2,3])
assert i == 2, i
i = argminrelmin([3,2,1,1,2,3])
assert i == 2, i
i = argminrelmin([3,2,1,1,1,2,3])
assert i == 3, i
i = argminrelmin([3,2,1,1,0])
assert i == -1, i
i = argminrelmin([0,1,1,2,3])
assert i == -1, i
if __name__ == '__main__':
test_argminrelmin()
|
{"hexsha": "7ec48a0f363eabb9a7a6c34e8887127b116f69ca", "size": 2468, "ext": "py", "lang": "Python", "max_stars_repo_path": "argminrelmin.py", "max_stars_repo_name": "Gattocrucco/sipmfilter", "max_stars_repo_head_hexsha": "74215d6c53b998808fc6c677b46030234d996bdf", "max_stars_repo_licenses": ["CC-BY-4.0", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "argminrelmin.py", "max_issues_repo_name": "Gattocrucco/sipmfilter", "max_issues_repo_head_hexsha": "74215d6c53b998808fc6c677b46030234d996bdf", "max_issues_repo_licenses": ["CC-BY-4.0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "argminrelmin.py", "max_forks_repo_name": "Gattocrucco/sipmfilter", "max_forks_repo_head_hexsha": "74215d6c53b998808fc6c677b46030234d996bdf", "max_forks_repo_licenses": ["CC-BY-4.0", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.367816092, "max_line_length": 79, "alphanum_fraction": 0.5356564019, "include": true, "reason": "import numpy,import numba", "num_tokens": 777}
|
import json
import numpy as np
from scipy.stats import norm
import math
from scipy.linalg import cholesky
# Basis function of the Metalog
def basis(y,t) -> int:
ret = 0
if(t == 1):
ret = 1
elif(t == 2):
ret = math.log(y/(1-y))
elif(t == 3):
ret = (y-0.5)*math.log(y/(1-y))
elif(t == 4):
ret = y-0.5
elif(t >= 5 ) and (t%2 == 1):
ret = math.pow((y - 0.5), ((t-1) // 2))
elif(t >= 6 ) and (t%2 == 0):
ret = math.pow((y - 0.5), ((t-1) // 2)) * math.log(y/(1-y))
return ret
def metalog(y, a, bl = "", bu = ""):
t = range(1,len(a)+1)
vector = []
np_a = np.array(a).reshape(-1,1)
for n in t:
vector.append(basis(y,n))
vector = np.array(vector, dtype=object)
mky = np.matmul(vector,np_a)
# Unbounded
if (type(bl) == str) and (type(bu) == str):
return float(mky)
# Bounded lower
elif (type(bl) != str) and (type(bu) == str):
float(bl)
return bl + math.exp(mky)
# Bounded upper
elif (type(bl) == str) and (type(bu) != str):
float(bu)
return (bu - math.exp(-mky))
# Bounded
elif (type(bl) != str) and (type(bu) != str):
return bl+bu*math.exp(mky)/(1+math.exp(mky))
def hdr(pm_trials, var=0, ent=0, attr1=0, attr2=0, round_off= 15, Seedlist=None):
if (type(Seedlist) == list):
while len(Seedlist) < 4:
Seedlist.append(0)
"""Hubbard Decision Research counter-based PRNG"""
values = []
for pm_index in range(1, pm_trials+1):
eq = ((((
(((999999999999989 % ((
(pm_index * 2499997 + Seedlist[0] * 1800451 + Seedlist[1] * 2000371 + Seedlist[2] * 1796777 + Seedlist[3] * 2299603)
% 7450589) * 4658 + 7450581)) * 383) % 99991) * 7440893 +
(((999999999999989 % ((
(pm_index * 2246527 + Seedlist[0] * 2399993 + Seedlist[1] * 2100869 + Seedlist[2] * 1918303 + Seedlist[3] * 1624729)
% 7450987) * 7580 + 7560584)) * 17669) % 7440893)) * 1343)
% 4294967296) + 0.5) / 4294967296
eq = round(eq, ndigits=round_off)
values.append(eq)
return values
else:
values = []
for pm_index in range(1, pm_trials+1):
eq = ((((
(((999999999999989 % ((
(pm_index * 2499997 + var * 1800451 + ent * 2000371 + attr1 * 1796777 + attr2 * 2299603)
% 7450589) * 4658 + 7450581)) * 383) % 99991) * 7440893 +
(((999999999999989 % ((
(pm_index * 2246527 + var * 2399993 + ent * 2100869 + attr1 * 1918303 + attr2 * 1624729)
% 7450987) * 7580 + 7560584)) * 17669) % 7440893)) * 1343)
% 4294967296) + 0.5) / 4294967296
eq = round(eq, ndigits=round_off)
values.append(eq)
return values
def convertMx(correlationMatrix):
variables = []
# gotta figure out all of the variables in the matrix
for vars in correlationMatrix:
if vars["row"] not in variables:
variables.append(vars["row"])
variableCount = len(set(variables))
returnArray = np.zeros(shape=[variableCount, variableCount])
for items in correlationMatrix:
i = variables.index(items["row"])
j = variables.index(items["col"])
value = items["value"]
returnArray[i][j] = value
returnArray[j][i] = value
return returnArray
## function to import json
class ImportJSON:
def __init__(self, jsonfile):
# Viewable attributes
self.jsonData = json.loads(jsonfile)
self.randoms = {}
self.copula = None
self.SIPs = {}
# Initially set all values of hidden variables
self.__rng = None
self.__copulaValues = None
self.__SIPs = None
self.enviroment = "generic"
self.__x = json.loads(jsonfile)
try:
self.__rng = self.__x["U01"]["rng"]
except: Exception: ...
try:
self.__copulaValues = self.__x["U01"]['copula']
except: Exception: ...
try:
self.__SIPs = self.__x['sips']
except: Exception: ...
def randomList(self):
for i in self.__rng:
print(i['name'])
def generateRandom(self, random):
randomIndex = [item.get("name") for item in self.__rng].index(random)
fxn = self.__rng[randomIndex]["function"]
# TODO: Check if arguments all have the same dictionary labels
entity = self.__rng[randomIndex]["arguments"]["entity"]
varId = self.__rng[randomIndex]["arguments"]["varId"]
seed3 = self.__rng[randomIndex]["arguments"]["seed3"]
seed4 = self.__rng[randomIndex]["arguments"]["seed4"]
if (type(seed3) == dict):
if "seed3" in self.__kwargDict[random]:
seed3 = self.__kwargDict[random]["seed3"]
else:
raise ValueError(seed3["name"],". Seed3 needs to have a value.")
if (type(seed4) == dict):
if "seed4" in self.__kwargDict[random]:
seed4 = self.__kwargDict[random]["seed4"]
else:
raise ValueError(seed4["name"],"Seed4 needs to have a value.")
args = [varId, entity, seed3, seed4]
if (fxn == "HDR_2_0"):
val = hdr(1000,*args)
self.randoms[random] = val
return val
else:
return eval(fxn.format(*args,all=args))
def generateCopula(self, random):
ret = []
# TODO: need to check for copula in document
# Do all of this for all copulas in document
for copulas in self.__copulaValues:
if (copulas["function"] == "GaussianCopula"):
# now get the cholesky factors
# get the global variable
specifyCorrelationMatrix = copulas["arguments"]["correlationMatrix"]["value"]
copulaArgs = copulas["arguments"]['rng']
randomMatrix = np.zeros(shape=(1, 1000))
for i in range(len(copulaArgs)):
val = self.generateRandom(copulaArgs[i])
randomMatrix = np.append(randomMatrix, [val], axis=0)
for index, item in enumerate(self.__x["globalVariables"]):
if item["name"] == specifyCorrelationMatrix:
whichCorrelationMatrix = index
break
else:
index = -1
thisCorrelationMatrix = self.__x["globalVariables"][whichCorrelationMatrix]["value"]["matrix"]
correlationMatrix = convertMx(thisCorrelationMatrix)
# Find the Cholesky Factors
cho = cholesky(correlationMatrix, lower=False)
#print("Cholesky: \n", cho)
cho = np.matrix(cho)
# Apply the Cholesky Factors to the randoms
for i in range(0, 1000):
# find out what rows of the hdr ret matrix to use based on what was asked for
col = copulas["copulaLayer"].index(random)
choSubSample = cho[:col+1,col]
trial = randomMatrix[1:col+2, i]
invCdf = norm.ppf(trial).reshape(-1,col+1)
# get HDRs from matrix
mMult = np.dot(invCdf,choSubSample)
val = float(norm.cdf(mMult))
ret.append(val)
else:
raise TypeError("The function type for this copula is unsupported. ")
# Add the result of the cholesky to the copula variable
self.copula = ret
return ret
def sipList(self):
for i in self.__SIPs:
print(i['name'])
def simulateSIP(self, sip, **kwargs):
# TODO: Add an all option for doing all sips
#example keyword argument for input: simulateSIP("Variable1", HDR2= {"seed3":0, "seed4":1})
self.__kwargDict = kwargs
randomarray = []
sipIndex = [item.get("name") for item in self.__SIPs].index(sip)
if (self.__SIPs[sipIndex]["ref"]["source"] == "copula"):
randomarray = ImportJSON.generateCopula(self, self.__SIPs[sipIndex]["ref"]["copulaLayer"])
elif (self.__SIPs[sipIndex]["ref"]["source"] == "rng"):
randomarray = ImportJSON.generateRandom(self, self.__SIPs[sipIndex]["ref"]["name"])
args = self.__SIPs[sipIndex]['arguments']["aCoefficients"]
lowerBound = ""
upperBound = ""
try:
lowerBound = self.__SIPs[sipIndex]["arguments"]["lowerBound"]
pass
except:
pass
try:
upperBound = self.__SIPs[sipIndex]["arguments"]["upperBound"]
pass
except:
pass
function = self.__SIPs[sipIndex]["function"]
returnValue = []
# if the function is a built in function Metalog_1_0 then,
# for each item
if ( function == "Metalog_1_0"):
# TODO: change for loop into a vectorized numpy function
for trial in randomarray:
ml = metalog(trial, a=args, bl=lowerBound,bu=upperBound)
returnValue.append(ml)
self.SIPs = returnValue
return returnValue
|
{"hexsha": "63e47d2bbbcdee82ca4174d723ee168377b4ca25", "size": 9703, "ext": "py", "lang": "Python", "max_stars_repo_path": "PySIP/chanceCalc.py", "max_stars_repo_name": "knownboyofno/PySIPmath", "max_stars_repo_head_hexsha": "22521d0acaa74250fb7684b8eedbb3e6d044b4af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-11-11T03:47:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-19T07:46:24.000Z", "max_issues_repo_path": "PySIP/chanceCalc.py", "max_issues_repo_name": "knownboyofno/PySIPmath", "max_issues_repo_head_hexsha": "22521d0acaa74250fb7684b8eedbb3e6d044b4af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PySIP/chanceCalc.py", "max_forks_repo_name": "knownboyofno/PySIPmath", "max_forks_repo_head_hexsha": "22521d0acaa74250fb7684b8eedbb3e6d044b4af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-11-11T18:14:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-22T07:53:30.000Z", "avg_line_length": 34.2862190813, "max_line_length": 133, "alphanum_fraction": 0.5222096259, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2518}
|
[STATEMENT]
lemma lnull_inf_step2inf_step_table [simp]:
"lnull (inf_step2inf_step_table s tls) \<longleftrightarrow> lnull tls"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lnull (inf_step2inf_step_table s tls) = lnull tls
[PROOF STEP]
by(simp add: inf_step2inf_step_table_def)
|
{"llama_tokens": 122, "file": "JinjaThreads_Framework_LTS", "length": 1}
|
import os
import math
import time
import numpy as np
from skimage.io import imread
from skimage.measure import regionprops,label
from skimage.transform import resize
from . import DataGenerator
class ArtefactGenerator(DataGenerator):
def __init__(self, **kwargs):
super().__init__(**kwargs)
if( self.timeit ): self.log_time += [{'event': 'init artefact generator', 'time': time.time()}]
if( self.mag not in ['1.25x', '2.50x', 'both'] ):
raise ValueError('mag must be "1.25x", "2.5x" or "both"')
x_dir = os.path.join(self.directory, self.folder_x)
y_dir = os.path.join(self.directory, self.folder_y)
all_tiles = [f for f in os.listdir(x_dir) if '_rgb' in f]
if( self.mag == '1.25x' ):
all_tiles = [f for f in all_tiles if '1.25x' in f]
elif( self.mag == '2.5x' ):
all_tiles = [f for f in all_tiles if '2.5x' in f]
self.files_x = [os.path.join(x_dir, f) for f in all_tiles]
self.files_y = [os.path.join(y_dir, f.replace('_rgb', '_anno')) for f in all_tiles]
# Can't put all the dataset in RAM here -> have to load it on the fly
# -> really need to re-do the threading stuff to improve performance
# self.images_x = [self._preprocess(imread(f)) for f in self.files_x]
# self.images_y = [imread(f)>0 for f in self.files_y]
self.idxs = np.arange(len(self.files_x))
self.keep_idxs = self.idxs.copy()
if( self.pPositive is not False ):
self.keep_idxs = []
for idx,f in enumerate(self.files_y):
if( imread(f).max() > 0 ):
self.keep_idxs += [idx]
self.keep_idxs = np.array(self.keep_idxs)
self.isset = True
self.split_train_val()
self.batches_per_epoch = len(self.keep_idxs)//self.batch_size
if( self.timeit ): self.log_time += [{'event': 'end init', 'time': time.time()}]
def get_epoch(self, idxs, e):
if( self.timeit ): self.log_time += [{'event': 'in get_epoch', 'time': time.time()}]
if( self.pPositive is not False ):
# we need to have x % of the idxs from positive samples (keep_idxs), and the rest from negative.
# by doing the selection here, we ensure that each epoch has the right balance, but we should be
# aware that the balance may be wrong at the level of the mini-batch.
take_positive = np.random.random((len(idxs),)) <= self.pPositive # -> will be true everywhere if pPositive = 1
idxs = [idx for idx,pos in zip(idxs,take_positive) if (idx in self.keep_idxs and pos) or (idx not in self.keep_idxs and not pos)]
for idb in range(self.batches_per_epoch):
if( self.timeit ): self.log_time += [{'event': 'in batches loop', 'time': time.time()}]
batch_x = np.array([self._preprocess(imread(self.files_x[idx])) for idx in idxs[idb*self.batch_size:(idb+1)*self.batch_size]])
batch_y_1 = np.array([imread(self.files_y[idx])>0 for idx in idxs[idb*self.batch_size:(idb+1)*self.batch_size]])
if( self.timeit ): self.log_time += [{'event': 'opened images from disk', 'time': time.time()}]
batch_y = np.zeros(list(batch_y_1.shape) + [2,])
batch_y[:,:,:,1] = batch_y_1
batch_y[:,:,:,0] = 1-batch_y[:,:,:,1]
if self.gamodel is not False:
for i in range(batch_y.shape[0]):
if batch_y[i,:,:,1].mean() < self.maxPositiveAreaForGenerator \
and np.random.random() < self.pNoise :
batch_y[i] = self.gamodel.predict(np.array([batch_x[i]]))
yield self._augment(*self._random_crop(batch_x, batch_y), idb)
def _random_crop(self, batch_x, batch_y):
"""Randomly takes a cropped region of self.tile_size from each image in the mini batch"""
if( self.timeit ): self.log_time += [{'event': 'pre-crop', 'time': time.time()}]
shape_images = batch_y[0].shape
shape_tiles = self.batch_y_shape[1:3]
max_rt = np.array([shape_images[0]-shape_tiles[0], shape_images[1]-shape_tiles[1]])
rts = (np.random.random((batch_x.shape[0], 2))*(max_rt)).astype('int')
cropped_batch_x = np.zeros((len(batch_x),)+self.batch_x_shape[1:])
cropped_batch_y = np.zeros((len(batch_x),)+self.batch_y_shape[1:])
for i,(bx,by,rt) in enumerate(zip(batch_x,batch_y,rts)):
cropped_batch_x[i] = bx[rt[0]:rt[0]+self.tile_size[0],rt[1]:rt[1]+self.tile_size[1]]
cropped_batch_y[i] = by[rt[0]:rt[0]+self.tile_size[0],rt[1]:rt[1]+self.tile_size[1]]
if( self.timeit ): self.log_time += [{'event': 'random crop', 'time': time.time()}]
return cropped_batch_x,cropped_batch_y
def get_validation_set(self):
if( not self.isset ):
raise ValueError(f"Cannot get validation set before dataset has been loaded")
n_images = len(self.val_idxs)
Xval = np.array([self._preprocess(imread(self.files_x[idx])) for idx in self.val_idxs])
Yval = np.zeros(Xval.shape[:3]+(2,))
Yval[:,:,:,1] = np.array([imread(self.files_y[idx])>0 for idx in self.val_idxs])
Yval[:,:,:,0] = 1-Yval[:,:,:,1]
return self._random_crop(Xval, Yval)
|
{"hexsha": "71930c4b619a1ba71c114df5c68c0a46667e3871", "size": 5379, "ext": "py", "lang": "Python", "max_stars_repo_path": "generator/ArtefactGenerator.py", "max_stars_repo_name": "adfoucart/deephisto", "max_stars_repo_head_hexsha": "f70fbaad9f95a9b9f2e420c9c33d46bdfab5bdf9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-05T13:44:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-05T13:44:43.000Z", "max_issues_repo_path": "generator/ArtefactGenerator.py", "max_issues_repo_name": "adfoucart/deephisto", "max_issues_repo_head_hexsha": "f70fbaad9f95a9b9f2e420c9c33d46bdfab5bdf9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generator/ArtefactGenerator.py", "max_forks_repo_name": "adfoucart/deephisto", "max_forks_repo_head_hexsha": "f70fbaad9f95a9b9f2e420c9c33d46bdfab5bdf9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-02T14:25:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-02T14:25:13.000Z", "avg_line_length": 50.2710280374, "max_line_length": 141, "alphanum_fraction": 0.5999256367, "include": true, "reason": "import numpy", "num_tokens": 1418}
|
import string
import chainer
import cupy as cp
class HashMap(object):
def __init__(self, data, table_size=2 ** 24):
xp = chainer.cuda.get_array_module(data)
self.hash_factor = 2531011
self.batch_size, self.num_points, self.dim = data.shape
self.table_size = table_size
self.indices = cp.ascontiguousarray(cp.zeros((self.batch_size, self.table_size,), 'int32')) - 1
self.values = cp.ascontiguousarray(cp.zeros((self.batch_size, self.table_size, self.dim), 'int32'))
self.value_list = cp.ascontiguousarray(cp.zeros((self.batch_size, self.table_size, self.dim), 'int32'))
self.size = None
self.init_keys(data)
def init_keys(self, data):
data = cp.ascontiguousarray(data)
used = cp.ascontiguousarray(cp.zeros((self.batch_size, self.table_size), 'int32'))
written = cp.ascontiguousarray(cp.zeros((self.batch_size, self.table_size), 'int32'))
count = cp.ascontiguousarray(cp.zeros((self.batch_size,), 'int32'))
ok = cp.zeros((1,), 'int32')
loop_indices = cp.arange(data.size / self.dim).astype('int32')
chainer.cuda.elementwise(
'int32 j, raw int32 data, raw int32 indices, raw int32 values, ' +
'raw int32 value_list, raw int32 used, raw int32 written, raw int32 count, raw int32 ok',
'',
string.Template('''
int* value_init;
int* value;
value_init = &data[i * ${dim}];
int bn = i / ${num_points};
/* compute initial key */
unsigned int key = 0;
value = value_init;
for (int k = 0; k < ${dim}; k++) key = (key + *value++) * ${hash_factor};
key = key % ${table_size};
for (int l = 0; l < 100; l++) {
/* check if the key is used */
int ret;
ret = used[bn * ${table_size} + key];
if (ret == 0) ret = atomicExch(&used[bn * ${table_size} + key], 1);
if (ret == 0) {
/* register true key */
int* value_ref = &values[(bn * ${table_size} + key) * ${dim}];
value = value_init;
for (int k = 0; k < ${dim}; k++) *value_ref++ = *value++;
written[bn * ${table_size} + key] = 1;
int num = atomicAdd(&count[bn], 1);
indices[bn * ${table_size} + key] = num;
value_ref = &value_list[(bn * ${table_size} + num) * ${dim}];
value = value_init;
for (int k = 0; k < ${dim}; k++) *value_ref++ = *value++;
break;
} else {
bool match = true;
while (atomicAdd(&written[bn * ${table_size} + key], 0) == 0) {}
int* value_ref = &values[(bn * ${table_size} + key) * ${dim}];
value = value_init;
for (int k = 0; k < ${dim}; k++) if (*value_ref++ != *value++) match = false;
if (match) {
break;
} else {
key = (key + 1) % ${table_size};
}
}
if (l == 99) {
ok[0] = -1;
}
}
''').substitute(
table_size=self.table_size,
hash_factor=self.hash_factor,
num_points=self.num_points,
dim=self.dim,
),
'kernel',
)(loop_indices, data, self.indices, self.values, self.value_list, used, written, count, ok)
self.size = int(count.max())
if int(ok[0]) < 0:
raise Exception
def find(self, data):
ret = cp.ascontiguousarray(cp.zeros(data.shape[:-1], 'int32')) - 1
data = cp.ascontiguousarray(data)
loop_indices = cp.arange(data.size / self.dim).astype('int32')
ok = cp.zeros((1,), 'int32')
chainer.cuda.elementwise(
'int32 j, raw int32 data, raw int32 indices, raw int32 values, raw int32 ret, raw int32 ok',
'',
string.Template('''
/* */
int* value = &data[j * ${dim}];
int bn = i / ${num_points};
/* compute initial key */
unsigned int key = 0;
for (int k = 0; k < ${dim}; k++) key = (key + value[k]) * ${hash_factor};
key = key % ${table_size};
for (int l = 0; l < 100; l++) {
if (indices[bn * ${table_size} + key] < 0) {
ret[j] = -1;
break;
}
bool match = true;
for (int k = 0; k < ${dim}; k++)
if (values[(bn * ${table_size} + key) * ${dim} + k] != value[k])
match = false;
if (match) {
ret[j] = indices[bn * ${table_size} + key];
break;
} else {
key = (key + 1) % ${table_size};
}
if (l == 99) {
ok[0] = -1;
}
}
''').substitute(
table_size=self.table_size,
hash_factor=self.hash_factor,
num_points=data.shape[1],
dim=self.dim,
),
'function',
)(loop_indices, data, self.indices, self.values, ret, ok)
if int(ok[0]) < 0:
raise Exception
return ret
|
{"hexsha": "c69f919f46a55ba2e415d6d53ba8fb4152802e1a", "size": 5862, "ext": "py", "lang": "Python", "max_stars_repo_path": "fast_gaussian_filter/hash_table.py", "max_stars_repo_name": "hiroharu-kato/fast_gaussian_filter", "max_stars_repo_head_hexsha": "2ffc62c8ba1d7bf9559559e102a8eb84bf3a65dc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2018-01-08T06:08:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-30T07:56:39.000Z", "max_issues_repo_path": "fast_gaussian_filter/hash_table.py", "max_issues_repo_name": "hiroharu-kato/fast_gaussian_filter", "max_issues_repo_head_hexsha": "2ffc62c8ba1d7bf9559559e102a8eb84bf3a65dc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-02-14T07:00:47.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-14T07:00:47.000Z", "max_forks_repo_path": "fast_gaussian_filter/hash_table.py", "max_forks_repo_name": "hiroharu-kato/fast_gaussian_filter", "max_forks_repo_head_hexsha": "2ffc62c8ba1d7bf9559559e102a8eb84bf3a65dc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-01-09T03:15:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-03T03:15:07.000Z", "avg_line_length": 41.2816901408, "max_line_length": 111, "alphanum_fraction": 0.4324462641, "include": true, "reason": "import cupy", "num_tokens": 1343}
|
push!(LOAD_PATH,"../src/") # load module from local directory
using TheDancer
N, Edisorder, Jdisorder, modelJ, B, dipolestrength, r = init!(50, 0.0, 0.0, 0.001, 298, 7.5)
function main()
SCFcycles=200
Unitarycycles=500
for dampening in [0.07,0.025,0.05]
SCFthenUnitary(dampening, SCFcycles, Unitarycycles, PNG=false) # PNG=true for .pngs for movie making
end
end
main() # Party like it's C99!
|
{"hexsha": "aea4ce5482ce76a9f00f1fb9c0119ee58d43fe03", "size": 424, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "example/example.jl", "max_stars_repo_name": "jarvist/1D-TimeDependentPolaron", "max_stars_repo_head_hexsha": "e71e52ae58b6c9a5ad81cfe33f4017e1b7ba8cc1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "example/example.jl", "max_issues_repo_name": "jarvist/1D-TimeDependentPolaron", "max_issues_repo_head_hexsha": "e71e52ae58b6c9a5ad81cfe33f4017e1b7ba8cc1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-06-22T12:54:34.000Z", "max_issues_repo_issues_event_max_datetime": "2018-07-17T10:45:18.000Z", "max_forks_repo_path": "example/example.jl", "max_forks_repo_name": "jarvist/TheDancer.jl", "max_forks_repo_head_hexsha": "e71e52ae58b6c9a5ad81cfe33f4017e1b7ba8cc1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3157894737, "max_line_length": 108, "alphanum_fraction": 0.679245283, "num_tokens": 150}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 8 17:42:34 2018
@author: Arvinder Shinh
"""
import numpy as np
import matplotlib.pyplot as plt
# returns 10 evenly spaced samples from 0.1 to 2*PI
x = np.linspace(2, 2 * np.pi, 30)
G = np.zeros_like(x)
F = lambda x: np.exp(x)
fig, vax = plt.subplots(1, 1, figsize=(6, 3))
vax.plot(x, F(x), 'go', label='F(x)=exp(x)')
vax.plot(x, G, 'ro', label='G(x)=0')
vax.vlines(x, [0], F(x))
plt.legend()
plt.show()
|
{"hexsha": "89a380ab066a1a3a8d9c934bb8df157cac4c3255", "size": 453, "ext": "py", "lang": "Python", "max_stars_repo_path": "Mandatory Documents/plot.py", "max_stars_repo_name": "arvindershinh/ExponentialWave", "max_stars_repo_head_hexsha": "0b79f84bd7ae64b60758e05b623e7d00c04670bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Mandatory Documents/plot.py", "max_issues_repo_name": "arvindershinh/ExponentialWave", "max_issues_repo_head_hexsha": "0b79f84bd7ae64b60758e05b623e7d00c04670bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Mandatory Documents/plot.py", "max_forks_repo_name": "arvindershinh/ExponentialWave", "max_forks_repo_head_hexsha": "0b79f84bd7ae64b60758e05b623e7d00c04670bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.65, "max_line_length": 51, "alphanum_fraction": 0.6225165563, "include": true, "reason": "import numpy", "num_tokens": 170}
|
import sys
import os
import argparse
# import numpy as np
# import pandas as pd
from fr.tagc.rainet.core.util.exception.RainetException import RainetException
from fr.tagc.rainet.core.util.log.Logger import Logger
from fr.tagc.rainet.core.util.time.Timer import Timer
# from fr.tagc.rainet.core.util.subprocess.SubprocessUtil import SubprocessUtil
from fr.tagc.rainet.core.util.sql.SQLManager import SQLManager
# from fr.tagc.rainet.core.util.data.DataManager import DataManager
from fr.tagc.rainet.core.data.RNACrossReference import RNACrossReference
from fr.tagc.rainet.core.data.RNA import RNA
#===============================================================================
# Started 03-January-2017
# Diogo Ribeiro
DESC_COMMENT = "Script to read and ID map data from lnc2cancer database."
SCRIPT_NAME = "ParseLnc2cancer.py"
#===============================================================================
#===============================================================================
# General plan:
# 1) Read Rainet DB containing tables containing RNA ID mapping
# 2) Read lnc2cancer file
# 3) Return lnc2cancer data with extra column harboring Ensembl IDs
#===============================================================================
#===============================================================================
# Processing notes:
# 1) Use of a greedy approach that tries to map any lncRNA name / symbol / synonym to any ENST*
# 2) Output a line per transcript match, even if what is matched is the gene and not the transcript
#===============================================================================
class ParseLnc2cancer(object):
OUTPUT_FILE = "/lncRNA_cancer_association_ensemblID.txt"
def __init__(self, rainetDB, lnc2cancerData, outputFolder):
self.rainetDB = rainetDB
self.lnc2cancerData = lnc2cancerData
self.outputFolder = outputFolder
# Build a SQL session to DB
SQLManager.get_instance().set_DBpath(self.rainetDB)
self.sql_session = SQLManager.get_instance().get_session()
# make output folder
if not os.path.exists( self.outputFolder):
os.mkdir( self.outputFolder)
# #
# Get correspondence of complex to protein IDs from RainetDB
def read_rainet_db(self):
#===============================================================================
# Query CrossReference table
#===============================================================================
# query table containing all annotation mappings of rna and crossreference
query = self.sql_session.query( RNACrossReference.transcriptID, RNACrossReference.crossReferenceID ).all()
rnaCrossReference = {} # key -> cross reference ID, val -> ensembl transcript ID
# a crossReference ID can map to several ensembl transcript IDs
setOfTranscriptIDs = set()
for ensemblID, crossRef in query:
if crossRef not in rnaCrossReference:
rnaCrossReference[ crossRef] = []
rnaCrossReference[ crossRef].append( str( ensemblID) )
setOfTranscriptIDs.add( ensemblID)
print "read_rainet_db: Read RNACrossReference table."
print "read %s entries" % len( query)
print "read %s cross references for %s transcripts" % (len( rnaCrossReference), len(setOfTranscriptIDs) )
#===============================================================================
# Query RNA table, which contains gene ID, external gene and transcript names
#===============================================================================
query = self.sql_session.query( RNA.transcriptID, RNA.geneID, RNA.externalGeneName, RNA.externalTranscriptName ).all()
# Note: an gene name points to several ensembl IDs
for ensemblID, geneID, geneName, txName in query:
if geneName not in rnaCrossReference:
rnaCrossReference[ geneName] = set()
if txName not in rnaCrossReference:
rnaCrossReference[ txName] = set()
if geneID not in rnaCrossReference:
rnaCrossReference[ geneID] = set()
if ensemblID not in rnaCrossReference:
rnaCrossReference[ ensemblID] = set()
# add external gene name
rnaCrossReference[ geneName].add( str( ensemblID) )
# add external transcript name
rnaCrossReference[ txName].add( str( ensemblID) )
# add gene-to-transcript correspondence
rnaCrossReference[ geneID].add( str( ensemblID) )
# add transcript entry as well as some lnc2cancer IDs have ENST*
rnaCrossReference[ ensemblID].add( str( ensemblID) )
setOfTranscriptIDs.add( ensemblID)
print "read_rainet_db: Read RNA table."
print "read %s entries" % len( query)
print "read %s cross references for %s transcripts" % (len( rnaCrossReference), len(setOfTranscriptIDs) )
self.rnaCrossReference = rnaCrossReference
# #
# Read lnc2cancer data and map IDs to Ensembl transcript IDs
def read_lnc2cancer(self):
#===============================================================================
# Reac lnc2cancer file
#===============================================================================
# Example format
# [internal ID] LncRNA name Synonyms Ensembl ID Refseq ID Position Cancer name ICD-0-3-T ICD-0-3-M Methods Sample (tissue/ cell line) Expression pattern Functional description PubMed ID Year Title
# 1 91H AI747191; 91H N/A N/A N/A colorectal cancer C19.9 qPCR, RNAi etc. CRC tissue, cell lines ( HCT8, HT29, HCT116, SW620 etc.) up-regulated 91H was significantly overexpressed in cancerous tissue and CRC cell lines compared with adjacent normal tissue and a normal human intestinal epithelial cell line. Moreover, 91H overexpression was closely associated with distant metastasis and poor prognosis in patients with CRC, except for CNV of 91H. Multivariate analysis indicated that 91H expression was an independent prognostic indicator, as well as distant metastasis. 91H played an important role in the molecular etiology of CRC and might be regarded as a novel prognosis indicator in patients with CRC. 25058480 2014 Up-regulation of 91H promotes tumor metastasis and predicts poor prognosis for patients with colorectal cancer.
# 10 ABHD11-AS1 ABHD11-AS1; WBSCR26; LINC00035; NCRNA00035 ENSG00000225969 NR_026690 chr7:73735038-73736054 gastric cancer C16 qPCR etc. gastric cancer tissue up-regulated Results show that compared with adjacent nontumor tissues the expression level of ABHD11-AS1 in gastric cancer tissues was significantly increased. 24984296 2014 Increased expression of long noncoding RNA ABHD11-AS1 in gastric cancer and its clinical significance.
# Approach: They provide lncRNA name and synonyms.
# First we try to map the lncRNA name and if this is not found, look for the synonyms, then Ensembl ID, then RefSeq ID
# Note: both columns are always filled. The synonym column has at least the same as the lncRNAName
# Note: lnc2cancer provide one line per lncRNA-disease association, therefore, several lines may contain the same lncRNA associated to different diseases
# Note: lnc2cancer IDs most often refer to Genes, here we map to transcripts, which may be all the transcripts of each of those genes
notFound = set()
found = set()
# counters for mapping provinience
namCount = 0
synCount = 0
ensCount = 0
refCount = 0
#===============================================================================
# Output file
#===============================================================================
# Same as input file, but with an extra column (the first column) with the mapped Ensembl transcript ID
outFile = open( self.outputFolder + ParseLnc2cancer.OUTPUT_FILE, "w")
with open( self.lnc2cancerData, "r") as inFile:
# write header of output file
header = inFile.readline()
newHeader = header.split("\t")
# note that the first column of original header is empty. that entry is replaced with the new column
newList = ["transcriptID"]
newList.extend( newHeader[1:])
newHeader = "\t".join( newList)
outFile.write( newHeader)
for line in inFile:
spl = line.strip().split( "\t")
# first column of original file is just an incrementing number
lncRNAName = spl[1]
synonyms = spl[2].split( ";")
ensemblID = spl[3]
refseqID = spl[4]
mappedTranscripts = set()
# 1) Try to map lncRNA name (i.e. mostly external gene name)
if lncRNAName in self.rnaCrossReference:
for tx in self.rnaCrossReference[ lncRNAName]:
mappedTranscripts.add( tx)
found.add( lncRNAName)
namCount += 1
else:
# 2) Try to map synonym lncRNA names / IDs
foundSyn = 0 # whether any synonym is found
for syn in synonyms:
syn = syn.strip() # they put a space after the ";"
if syn in self.rnaCrossReference:
for tx in self.rnaCrossReference[ syn]:
mappedTranscripts.add( tx)
foundSyn = 1
found.add( lncRNAName)
synCount += 1
break
if foundSyn == 0:
# 3) Try to map Ensembl IDs
if ensemblID in self.rnaCrossReference:
for tx in self.rnaCrossReference[ ensemblID]:
mappedTranscripts.add( tx)
found.add( lncRNAName)
ensCount += 1
else:
# 4) Try to map RefSeq IDs
if refseqID in self.rnaCrossReference:
for tx in self.rnaCrossReference[ refseqID]:
mappedTranscripts.add( tx)
found.add( lncRNAName)
refCount += 1
else:
# if no mapping is found
notFound.add( lncRNAName)
# write to output file
if len( mappedTranscripts) > 0:
for transcript in mappedTranscripts:
newLine = [transcript]
newLine.extend( spl)
newText = "\t".join( newLine)
outFile.write( newText + "\n")
outFile.close()
# print namCount, synCount, ensCount, refCount
print "read_lnc2cancer: lnc2cancer IDs found: %s" % len( found)
print "read_lnc2cancer: lnc2cancer IDs not found: %s" % len( notFound)
print "List of transcripts not found: "
print notFound
if __name__ == "__main__":
try:
# Start chrono
Timer.get_instance().start_chrono()
print "STARTING " + SCRIPT_NAME
#===============================================================================
# Get input arguments, initialise class
#===============================================================================
parser = argparse.ArgumentParser(description= DESC_COMMENT)
# positional args
parser.add_argument('rainetDB', metavar='rainetDB', type=str,
help='Rainet database to be used.')
parser.add_argument('lnc2cancerData', metavar='lnc2cancerData', type=str,
help='Data from lnc2cancer database.')
parser.add_argument('outputFolder', metavar='outputFolder', type=str,
help='Output folder.')
# gets the arguments
args = parser.parse_args( )
# Initialise class
parseLnc2cancer = ParseLnc2cancer( args.rainetDB, args.lnc2cancerData, args.outputFolder)
#===============================================================================
# Run analysis / processing
#===============================================================================
Timer.get_instance().step( "Read RainetDB table..")
parseLnc2cancer.read_rainet_db( )
Timer.get_instance().step( "Read lnc2cancer file..")
parseLnc2cancer.read_lnc2cancer( )
# Stop the chrono
Timer.get_instance().stop_chrono( "FINISHED " + SCRIPT_NAME )
# Use RainetException to catch errors
except RainetException as rainet:
Logger.get_instance().error( "Error during execution of %s. Aborting :\n" % SCRIPT_NAME + rainet.to_string())
|
{"hexsha": "6a4a1c3b01d8d5b2201ec3fb056073f4c13c3d81", "size": 13789, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/fr/tagc/rainet/core/execution/analysis/EnrichmentAnalysis/ParseLnc2cancer.py", "max_stars_repo_name": "TAGC-Brun/RAINET-RNA", "max_stars_repo_head_hexsha": "4d5a6658c41d4ab28d7c3d168eed65fe79233b48", "max_stars_repo_licenses": ["Linux-OpenIB"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/fr/tagc/rainet/core/execution/analysis/EnrichmentAnalysis/ParseLnc2cancer.py", "max_issues_repo_name": "TAGC-Brun/RAINET-RNA", "max_issues_repo_head_hexsha": "4d5a6658c41d4ab28d7c3d168eed65fe79233b48", "max_issues_repo_licenses": ["Linux-OpenIB"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/fr/tagc/rainet/core/execution/analysis/EnrichmentAnalysis/ParseLnc2cancer.py", "max_forks_repo_name": "TAGC-Brun/RAINET-RNA", "max_forks_repo_head_hexsha": "4d5a6658c41d4ab28d7c3d168eed65fe79233b48", "max_forks_repo_licenses": ["Linux-OpenIB"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.2132867133, "max_line_length": 913, "alphanum_fraction": 0.5312205381, "include": true, "reason": "import numpy", "num_tokens": 2859}
|
import os
import numpy as np
import deepchem as dc
import pandas as pd
from rdkit import Chem
def load_pdbbind_labels(labels_file):
"""Loads pdbbind labels as dataframe
Parameters
----------
labels_file: str
Location of PDBbind datafile.
Returns
-------
contents_df: pd.DataFrame
Dataframe containing contents of PDBbind datafile.
"""
contents = []
with open(labels_file) as f:
for line in f:
if line.startswith("#"):
continue
else:
splitline = line.split()
if len(splitline) == 8:
contents.append(splitline)
else:
print("Incorrect data format")
print(splitline)
contents_df = pd.DataFrame(
contents,
columns=("PDB code", "resolution", "release year", "-logKd/Ki", "Kd/Ki",
"ignore-this-field", "reference", "ligand name"))
return contents_df
seed = 123
np.random.seed(seed)
base_dir = os.getcwd()
data_dir = os.path.join(base_dir, "refined_atomconv")
train_dir = os.path.join(base_dir, "train")
test_dir = os.path.join(base_dir, "test")
pdbbind_dir = os.path.join(base_dir, "v2015")
print("Loading ids from %s" % data_dir)
d = dc.data.DiskDataset(data_dir)
ids = d.ids
pdbbind_data_file = os.path.join(pdbbind_dir, "INDEX_general_PL_data.2015")
contents_df = load_pdbbind_labels(pdbbind_data_file)
df_ids = contents_df["PDB code"].values.tolist()
df_years = contents_df["release year"].values
def shard_generator():
for ind, pdb_code in enumerate(ids):
i = df_ids.index(pdb_code)
y = df_years[i]
X = np.zeros((1, 5))
w = np.ones((1, 1))
yield X, y, w, pdb_code
print("Generating year dataset")
temp_d = dc.data.DiskDataset.create_dataset(shard_generator())
print("Performing Stratified split on year dataset")
s = dc.splits.SingletaskStratifiedSplitter()
train_ind, test_ind = s.train_test_indices(temp_d)
print("Using indices from Stratified splitter on pdbbind dataset")
splitter = dc.splits.IndiceSplitter(test_indices=test_ind)
train_dataset, test_dataset = splitter.train_test_split(d, train_dir, test_dir)
|
{"hexsha": "382c14a2b76bec4d3631cd2a96f55007ad7cab3f", "size": 2090, "ext": "py", "lang": "Python", "max_stars_repo_path": "contrib/atomicconv/splits/pdbbind_temporal_split.py", "max_stars_repo_name": "cjgalvin/deepchem", "max_stars_repo_head_hexsha": "64993a129e7f0f78fed9500298b1828ac8a0757a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3782, "max_stars_repo_stars_event_min_datetime": "2016-02-21T03:53:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:10:26.000Z", "max_issues_repo_path": "contrib/atomicconv/splits/pdbbind_temporal_split.py", "max_issues_repo_name": "cjgalvin/deepchem", "max_issues_repo_head_hexsha": "64993a129e7f0f78fed9500298b1828ac8a0757a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2666, "max_issues_repo_issues_event_min_datetime": "2016-02-11T01:54:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T11:14:33.000Z", "max_forks_repo_path": "contrib/atomicconv/splits/pdbbind_temporal_split.py", "max_forks_repo_name": "cjgalvin/deepchem", "max_forks_repo_head_hexsha": "64993a129e7f0f78fed9500298b1828ac8a0757a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1597, "max_forks_repo_forks_event_min_datetime": "2016-02-21T03:10:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T13:21:28.000Z", "avg_line_length": 25.8024691358, "max_line_length": 79, "alphanum_fraction": 0.6966507177, "include": true, "reason": "import numpy", "num_tokens": 542}
|
[STATEMENT]
lemma AE_restr_to_subalg:
assumes "subalgebra M F"
"AE x in (restr_to_subalg M F). P x"
shows "AE x in M. P x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. almost_everywhere M P
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. almost_everywhere M P
[PROOF STEP]
obtain A where A: "\<And>x. x \<in> space (restr_to_subalg M F) - A \<Longrightarrow> P x" "A \<in> null_sets (restr_to_subalg M F)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>A. \<lbrakk>\<And>x. x \<in> space (restr_to_subalg M F) - A \<Longrightarrow> P x; A \<in> null_sets (restr_to_subalg M F)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using AE_E3[OF assms(2)]
[PROOF STATE]
proof (prove)
using this:
(\<And>N. \<lbrakk>\<And>x. x \<in> space (restr_to_subalg M F) - N \<Longrightarrow> P x; N \<in> null_sets (restr_to_subalg M F)\<rbrakk> \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis
goal (1 subgoal):
1. (\<And>A. \<lbrakk>\<And>x. x \<in> space (restr_to_subalg M F) - A \<Longrightarrow> P x; A \<in> null_sets (restr_to_subalg M F)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
?x \<in> space (restr_to_subalg M F) - A \<Longrightarrow> P ?x
A \<in> null_sets (restr_to_subalg M F)
goal (1 subgoal):
1. almost_everywhere M P
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?x \<in> space (restr_to_subalg M F) - A \<Longrightarrow> P ?x
A \<in> null_sets (restr_to_subalg M F)
[PROOF STEP]
have "A \<in> null_sets M"
[PROOF STATE]
proof (prove)
using this:
?x \<in> space (restr_to_subalg M F) - A \<Longrightarrow> P ?x
A \<in> null_sets (restr_to_subalg M F)
goal (1 subgoal):
1. A \<in> null_sets M
[PROOF STEP]
using null_sets_restr_to_subalg[OF assms(1)]
[PROOF STATE]
proof (prove)
using this:
?x \<in> space (restr_to_subalg M F) - A \<Longrightarrow> P ?x
A \<in> null_sets (restr_to_subalg M F)
null_sets (restr_to_subalg M F) = null_sets M \<inter> sets F
goal (1 subgoal):
1. A \<in> null_sets M
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
A \<in> null_sets M
goal (1 subgoal):
1. almost_everywhere M P
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
A \<in> null_sets M
goal (1 subgoal):
1. almost_everywhere M P
[PROOF STEP]
have "\<And>x. x \<in> space M - A \<Longrightarrow> P x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. x \<in> space M - A \<Longrightarrow> P x
[PROOF STEP]
using space_restr_to_subalg A(1)
[PROOF STATE]
proof (prove)
using this:
space (restr_to_subalg ?M ?F) = space ?M
?x \<in> space (restr_to_subalg M F) - A \<Longrightarrow> P ?x
goal (1 subgoal):
1. \<And>x. x \<in> space M - A \<Longrightarrow> P x
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
?x \<in> space M - A \<Longrightarrow> P ?x
goal (1 subgoal):
1. almost_everywhere M P
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
A \<in> null_sets M
?x \<in> space M - A \<Longrightarrow> P ?x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
A \<in> null_sets M
?x \<in> space M - A \<Longrightarrow> P ?x
goal (1 subgoal):
1. almost_everywhere M P
[PROOF STEP]
unfolding eventually_ae_filter
[PROOF STATE]
proof (prove)
using this:
A \<in> null_sets M
?x \<in> space M - A \<Longrightarrow> P ?x
goal (1 subgoal):
1. Bex (null_sets M) ((\<subseteq>) {x \<in> space M. \<not> P x})
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
almost_everywhere M P
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1513, "file": null, "length": 17}
|
module Main
main : IO()
main = putStrLn "Hello world"
|
{"hexsha": "50e1db5ee36147f4132e1c7f56590b8b2910fdbb", "size": 53, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "fp/idris/tutorial/src/hello.idr", "max_stars_repo_name": "lonelyhentai/workspace", "max_stars_repo_head_hexsha": "2a996af58d6b9be5d608ed040267398bcf72403b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-04-26T16:37:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T01:26:19.000Z", "max_issues_repo_path": "fp/idris/tutorial/src/hello.idr", "max_issues_repo_name": "lonelyhentai/workspace", "max_issues_repo_head_hexsha": "2a996af58d6b9be5d608ed040267398bcf72403b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fp/idris/tutorial/src/hello.idr", "max_forks_repo_name": "lonelyhentai/workspace", "max_forks_repo_head_hexsha": "2a996af58d6b9be5d608ed040267398bcf72403b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-15T01:26:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T01:26:23.000Z", "avg_line_length": 17.6666666667, "max_line_length": 29, "alphanum_fraction": 0.7169811321, "num_tokens": 14}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.