input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
individual
annulus is stored as an array in the ``zones`` attribute, inherited
from the ``multizone`` class.
By default, they will be named where "zone0" is the zero'th element of
the ``zones`` attribute, corresponding to the innermost zone. The
second innermost zone will be the first element of the ``zones``
attribute, and by default will be named "zone1", and so on.
Example Code
------------
>>> import vice
>>> mw = vice.milkyway(name = "example", zone_width = 0.2)
>>> mw.annuli
[0.0,
0.2,
0.4,
...,
19.6,
19.8,
20.0]
"""
return self.migration.stars.radial_bins
@property
def zone_width(self):
r"""
Type : ``float``
Default : 0.5
The width of each annulus in kpc. This value can only be set at
initialization of the ``milkyway`` object.
.. seealso:: vice.milkyway.annuli
Example Code
------------
>>> import vice
>>> mw = vice.milkyway(name = "example", zone_width = 0.2)
>>> mw.zone_width
0.2
"""
return self.annuli[1] - self.annuli[0]
@property
def evolution(self):
r"""
Type : ``<function>``
Default : vice.milkyway.default_evolution
As a function of radius in kpc and time in Gyr, respectively, either
the surface density of gas in :math:`M_\odot kpc^{-2}`, the surface
density of star formation in :math:`M_\odot kpc^{-2} yr^{-1}`, or the
surface density of infall in :math:`M_\odot kpc^{-2} yr^{-1}`. As in
the ``singlezone`` object, the interpretation is set by the attribute
``mode``.
.. seealso:: vice.milkyway.default_evolution
.. note:: This attribute will always be expected to accept radius in
kpc and time in Gyr as parameters, in that order. However, surface
densities of star formation and infall will always be interpreted
as having units of :math:`M_\odot yr^{-1} kpc^{-2}` according to
convention.
.. note:: This is the **only** object in the current version of VICE
which formulates an evolutionary parameter in terms of surface
densities. This is done because many physical quantities are
reported as surface densities in the astronomical literature. The
``singlezone`` and ``multizone`` objects, however, formulate
parameters in terms of mass, out of necessity for the
implementation.
Example Code
------------
>>> import vice
>>> mw = vice.milkyway(name = "example")
"""
return self._evolution
@evolution.setter
def evolution(self, value):
# Error handling in the mass_from_surface_density and singlezone objects
for i in range(self.n_zones):
self.zones[i].func = mass_from_surface_density(
value,
(self.annuli[i] + self.annuli[i + 1]) / 2,
m.pi * (self.annuli[i + 1]**2 - self.annuli[i]**2)
)
# If the code gets here, the surface density passes error handling
self._evolution = value
@staticmethod
def default_evolution(radius, time):
r"""
The default evolutionary function of the ``milkyway`` object.
**Signature**: vice.milkyway.default_evolution(radius, time)
Parameters
----------
radius : float
Galactocentric radius in kpc.
time : float
Simulation time in Gyr.
Returns
-------
value : float
Always returns the value of 1.0. The interpretation of this is set
by the attribute ``mode``. With the default value of "ifr", this
represents a uniform surface of infall of 1.0
:math:`M_\odot yr^{-1} kpc^{-2}`.
Example Code
------------
>>> import vice
>>> mw = vice.milkyway(name = "example")
>>> mw.evolution
<function vice.milkyway.milkyway.milkyway.default_evolution(radius, time)>
>>> vice.milkyway.default_evolution(10, 1)
1.0
>>> vice.milkyway.default_evolution(5, 4)
1.0
"""
return 1.0
@property
def mode(self):
r"""
Type : ``str`` [case-insensitive]
Default : "ifr"
The interpretation of the attribute ``evolution``.
* mode = "ifr": The value returned from the attribute ``evolution``
represents the surface density of gas infall into the interstellar
medium in :math:`M_\odot kpc^{-2} yr^{-1}``.
* mode = "sfr": The value returned from the attribute ``evolution``
represents the surface density of star formation in
:math:`M_\odot kpc^{-2} yr^{-1}`.
* mode = "gas": The value returned from the attribute ``evolution``
represents the surface density of the interstellar medium in
:math:`M_\odot kpc^{-2}`.
.. note:: The attribute ``evolution`` will always be expected to accept
radius in kpc and time in Gyr as the first and second parameters,
respectively. However, infall and star formation histories will be
interpreted as having units of :math:`M_\odot yr^{-1}` according
to convention.
.. note:: Updating the value of this attribute also updates the
corresponding attribute of the ``J21_sf_law`` star formation law
where it has been assigned the attribute ``tau_star``.
Example Code
------------
>>> import vice
>>> mw = vice.milkyway(name = "example")
>>> mw.mode
"ifr"
>>> mw.mode = "sfr"
>>> mw.mode
"sfr"
"""
return self.zones[0].mode
@mode.setter
def mode(self, value):
# Let the singlezone object do the error handling
for i in range(len(self.zones)):
self.zones[i].mode = value
# The star formation law needs to know the mode changed too
if isinstance(self.zones[i].tau_star, J21_sf_law):
# it will be a string if the previous line passed
self.zones[i].tau_star._mode = value.lower()
else: pass
@property
def elements(self):
r"""
Type : ``tuple`` [elements of type str [case-insensitive]]
Default : ("fe", "sr", "o")
The symbols of the elements to track the enrichment for
(case-insensitive). The more elements that are tracked, the longer the
simulation will take, but the better calibrated is the total
metallicity of the ISM in handling metallicity-dependent yields.
.. tip::
The order in which the elements appear in this tuple will dictate
the abundance ratios that are quoted in the final stellar
metallicity distribution function. That is, if element X appears
before element Y, then VICE will determine the MDF in
:math:`dN/d[Y/X]` as opposed to :math:`dN/d[X/Y]`. The elements
that users intend to use as "reference elements" should come
earliest in this list.
.. note::
All versions of VICE support the simulation of all 76
astrophysically produced elements between carbon ("c") and
bismuth ("bi"). Versions >= 1.1.0 also support helium ("he").
.. note::
Some of the heaviest elements that VICE recognizes have
statistically significant enrichment from r-process
nucleosynthesis [1]_. Simulations of these elements with realistic
parameters and realistic nucleosynthetic yields will underpredict
the absolute abundances of these elements. However, if these
nuclei are assumed to be produced promptly following the formation
of a single stellar population, the yield can be added to the
yield from core collapse supernovae, which in theory can describe
the total yield from all prompt sources [2]_.
Example Code
------------
>>> import vice
>>> mw = vice.milkyway(name = "example")
>>> mw.elements
("fe", "sr", "o")
>>> mw.elements = ["mg", "fe", "n", "c", "o"]
>>> mw.elements
("mg", "fe", "n", "c", "o")
.. [1] Johnson (2019), Science, 363, 474
.. [2] <NAME> (2020), MNRAS, 498, 1364
"""
return self.zones[0].elements
@elements.setter
def elements(self, value):
# Let the singlezone object do the error handling
for i in range(self.n_zones):
self.zones[i].elements = value
@property
def IMF(self):
r"""
Type : ``str`` [case-insensitive] or ``<function>``
Default : "kroupa"
.. versionadded:: 1.2.0
In version >= 1.2.0, users may construct a function of mass to
describe the IMF.
The assumed stellar initial mass function (IMF). If assigned a string,
VICE will adopt a built-in IMF. Functions must accept stellar mass as
the only parameter and are expected to return the value of the IMF at
that mass (it need not be normalized).
Built-in IMFs:
- "kroupa" [1]_
- "salpeter" [2]_
.. note::
VICE has analytic solutions to the
:ref:`cumulative return fraction <crf>` and the
:ref:`main sequence mass fraction <msmf>` for built-in IMFs. If
assigned a function, VICE will calculate these quantities
numerically, increasing the required integration time.
Example Code
------------
>>> import vice
>>> mw = vice.milkyway(name = "example")
>>> mw.IMF = "salpeter"
>>> def f(m):
if m < 0.5:
return m**-1.2
else:
return m**-2.2
>>> mw.IMF = f
.. [1] Kroupa (2001), MNRAS, 322, 231
.. [2] Salpeter (1955), ApJ, 121, 161
"""
return self.zones[0].IMF
@IMF.setter
def IMF(self, value):
# Let the singlezone object do the error handling
for i in range(self.n_zones):
self.zones[i].IMF = value
@property
def mass_loading(self):
r"""
Type : ``<function>``
Default : vice.milkyway.default_mass_loading
The mass-loading factor as a function of galactocentric radius in kpc
describing the efficiency of outflows. For a given star formation rate
:math:`\dot{M}_\star` and an outflow rate :math:`\dot{M}_\text{out}`,
the mass-loading factor is defined as the unitless ratio:
.. math:: \eta \equiv \dot{M}_\text{out} / \dot{M}_\star
This function must return a non-negative real number for all radii
defined in the disk model.
.. note:: This formalism assumes a time-independent mass-loading
factor at each radius. To implement a time-dependent alternative,
users should modify the attribute ``eta`` of the ``singlezone``
objects corresponding to each annulus in this model. See example
below.
.. seealso:: vice.singlezone.eta
Example Code
------------
>>> import math as m
>>> import vice
>>> mw = vice.milkyway(name = "example")
>>> def f(r):
return 0.5 * m.exp(r / 3)
>>> mw.mass_loading = f
>>> def g(t): # a time-dependent mass-loading factor
return 3.0 * m.exp(-t / 3)
>>> # assign each individual annulus a time-dependent value
>>> for i in range(mw.n_zones):
>>> mw.zones[i].eta = g
"""
return self._mass_loading
@mass_loading.setter
def mass_loading(self, value):
if callable(value):
# Let the singlezone object do error handling from here
for i in range(self.n_zones):
self.zones[i].eta = value(
(self.annuli[i] + self.annuli[i + 1]) / 2
)
# If the code gets here, the function passes
self._mass_loading = value
else:
raise TypeError("""Attribute 'mass_loading' must be a callable \
object. Got: %s""" % (type(value)))
@staticmethod
def default_mass_loading(rgal):
r"""
The default mass loading factor as a function of galactocentric
radius in kpc.
**Signature**: vice.milkyway.default_mass_loading(rgal)
Parameters
----------
rgal : real number
Galactocentric radius in kpc.
Returns
-------
eta : real number
The mass loading factor at that radius, defined by:
.. math:: \eta(r) = (y_\text{O}^\text{CC}) / Z_\text{O}^\odot
10^{0.08(r - 4\text{ kpc}) - 0.3} - 0.6
where :math:`Z_\text{O}^\odot` is the solar abundance by mass of
oxygen and :math:`y_\text{O}^\text{CC}` is the IMF-averaged CCSN
yield of oxygen. While these values are customizable through
``vice.solar_z`` and ``vice.yields.ccsne.settings``, this function
assumes a value of :math:`Z_\text{O}^\odot` = 0.00572 (Asplund et
al. 2009 [1]_) and :math:`y_\text{O}^\text{CC}` = 0.015 (Johnson &
Weinberg 2020 [2]_, Johnson et al. 2021 [3]_).
.. seealso:: vice.milkyway.mass_loading
Example Code
------------
>>> import vice
>>> vice.milkyway.default_mass_loading(0)
0.029064576665950193
>>> vice.milkyway.default_mass_loading(8)
2.1459664721614495
.. [1] Asplund et al. (2009), ARA&A, 47, 481
.. [2] <NAME> (2020), MNRAS, 498, 1364
.. [3] Johnson et al. (2021), arxiv:2103.09838
"""
return 0.015 / 0.00572 * (10**(0.08 * (rgal - 4) - 0.3)) - 0.6
@property
def dt(self):
r"""
Type : ``float``
Default: 0.01
The | |
import os
import numpy as np
from netCDF4 import Dataset
from grid_resolution import grid_resolution
import multiprocessing as mp
class numerical_model:
name = 'speedy';
path = None;
res = None;
source_model = None;
source_local = None;
ensemble_0 = None;
snapshots = None;
model_local = None;
free_run = None;
initial_condition = None;
Nens = None;
var_names = ['UG0','VG0','TG0','TRG0','PSG0','UG1','VG1','TG1','TRG1','PSG1'];
x_ic = None;
X = None;
def __init__(self, path, gs, Nens, par=True):
self.path = path;
self.res = gs.name;
self.source_model = '../models/speedy/'+self.res+'/';
self.source_local = self.path+'source_local/'
self.ensemble_0 = self.path+'ensemble_0/'
self.snapshots = self.path+'snapshots/'
self.model_local = self.path+'model_local/';
self.free_run = self.path+'free_run/'
self.initial_condition = self.path+'initial_condition/'
self.Nens = Nens;
self.test = 1;
self.par = par;
self.gs = gs;
self.create_all_folders();
def clear_all_folders(self):
os.system(f'rm -rf {self.ensemble_0}');
os.system(f'rm -rf {self.source_local}');
os.system(f'rm -rf {self.model_local}');
def create_all_folders(self):
#
os.system(f'mkdir {self.snapshots} ;mkdir {self.ensemble_0} ;mkdir {self.source_local} ;mkdir {self.free_run} ;mkdir {self.initial_condition} ;mkdir {self.model_local}');
#
Nens = self.Nens;
for e in range(0, Nens):
os.system(f'mkdir {self.ensemble_0}ens_{e}');
#
os.system(f'cp {self.source_model}* {self.source_local}');
def set_resol_variables(self):
self.var_resol = [];
nvar = len(self.var_names);
for v in range(0, nvar):
if 'PSG' in self.var_names[v]:
self.var_resol.append([self.gs.get_resolution(self.res), 1]);
else:
self.var_resol.append([self.gs.get_resolution(self.res), 8]);
def set_time_integration(self,args):
nmonths = args[0];
days = args[1];
restart = args[2];
self.create_cls_instep_file(nmonths, days, restart);
self.create_cls_indyns_file();
os.system(f'mv cls_instep.h {self.source_local}cls_instep.h; mv cls_indyns.h {self.source_local}cls_indyns.h; cd {self.source_local}/ ; sh compile.sh>out.txt;');
def load_netcdf_file(self,nc_f):
var_names = self.var_names;
n_vars = len(var_names);
x = [];
#print('dimension {0}'.format(x.shape));
nc_fid = Dataset(nc_f, 'r');
for v in range(0,n_vars):
var_name = var_names[v];
#print(var_name);
var_full = nc_fid.variables[var_name][:];
if 'TRG' in var_names[v]:
x.append(var_full[0,:,:,:]);
elif 'PSG' in var_names[v]:
x.append(var_full[:,:]); #ask for 0 or -1
else:
x.append(var_full[:,:,:]);
#elias
#print('var_name is {0} and shape reads {1}'.format( var_name,var_data.shape));
#var_data = var_data.T; #The model stores the info lat x lon
#print(x);
nc_fid.close();
return x;
def create_initial_condition(self, ini0):
#nmonths = args[0];
#days = args[1];
#restart = args[2];
self.set_time_integration(ini0);
os.system(f'cp {self.source_local}* {self.model_local}');
os.system(f'cd {self.model_local}; sh remove_grad_ctl.sh; ./imp.exe>out.txt');
nc_f = self.model_local+'/ensemble_member.nc';
gr_f = self.model_local+'/fort.10';
os.system(f'cp {nc_f} {self.initial_condition}initial_condition.nc');
os.system(f'cp {gr_f} {self.initial_condition}fort.3');
def load_initial_condition(self):
nc_f = self.initial_condition+'initial_condition.nc';
self.x_ic = self.load_netcdf_file(nc_f);
def create_perturbed_ensemble(self, per, Nens):
np.random.seed(seed=10); #To replicate the initial ensemble
self.load_initial_condition();
n_vars = len(self.var_names);
self.X = [];
for e in range(0, Nens):
Xe = [];
for v in range(0, n_vars):
if self.test == 1: print('* Creating xb^[{0}] var {1}'.format(e, self.var_names[v]));
dim_v = self.x_ic[v].shape
ne = np.prod(dim_v);
x_e_v = self.x_ic[v].reshape((ne,1)) + per*np.random.randn(ne,1)*self.x_ic[v].reshape((ne,1));
Xe.append(x_e_v.reshape(dim_v));
#print(Xe[v].shape)
fn_e = self.ensemble_0+f'ens_{e}/'+f'ensemble_member.nc';
self.map_state_netcdf(Xe, fn_e); #Creating perturbed member in ensemble_0 local folder
self.X.append(Xe);
if self.test==1: print('* ENDJ - Stored the {0}-th ensemble member in = {1}'.format(e, fn_e));
def map_state_netcdf(self, xs, fn):
ds = Dataset(fn, 'w', format='NETCDF4');
ntr = ds.createDimension('ntr',1);
lev = ds.createDimension('lev', self.gs.lev);
lat = ds.createDimension('lat', self.gs.lat);
lon = ds.createDimension('lon', self.gs.lon);
UG0 = ds.createVariable('UG0', np.float64, ('lev','lat', 'lon'));
VG0 = ds.createVariable('VG0', np.float64, ('lev','lat', 'lon'));
TG0 = ds.createVariable('TG0', np.float64, ('lev','lat', 'lon'));
TRG0 = ds.createVariable('TRG0', np.float64, ('ntr','lev','lat', 'lon'));
PSG0 = ds.createVariable('PSG0', np.float64, ('lat', 'lon',))
UG1 = ds.createVariable('UG1', np.float64, ('lev','lat', 'lon'));
VG1 = ds.createVariable('VG1', np.float64, ('lev','lat', 'lon'));
TG1 = ds.createVariable('TG1', np.float64, ('lev','lat', 'lon'));
TRG1 = ds.createVariable('TRG1', np.float64, ('ntr','lev', 'lat', 'lon'))
PSG1 = ds.createVariable('PSG1', np.float64, ('lat', 'lon',))
#print([UG0.shape, xs[0].shape]);
UG0[:,:,:] = xs[0][:,:,:];
VG0[:,:,:] = xs[1][:,:,:];
TG0[:,:,:] = xs[2][:,:,:];
TRG0[0,:,:,:] = xs[3][:,:,:];
PSG0[:,:] = xs[4][:,:];
UG1[:,:,:] = xs[5][:,:,:];
VG1[:,:,:] = xs[6][:,:,:];
TG1[:,:,:] = xs[7][:,:,:];
TRG1[0,:,:,:] = xs[8][:,:,:];
PSG1[:,:] = xs[9][:,:];
ds.close();
###############################################################################
###############################################################################
#Routines regarding ensembles
###############################################################################
###############################################################################
def copy_reference_restart(self, Nens):
gr_f = self.initial_condition+'/fort.3';
for e in range(0, Nens):
os.system(f'cp {gr_f} {self.ensemble_0}ens_{e}/fort.3');
def update_model_ensemble_folders(self, Nens):
for e in range(0, Nens):
ensemble_path = self.ensemble_0+'ens_'+str(e)+'/';
os.system(f'cp {self.source_local}/* {ensemble_path}');
def update_members_ensemble_folders(self, Nens):
for e in range(0, Nens):
if self.test==1: print('* ENDJ - Updating ensemble member {0}'.format(e));
ensemble_path = self.ensemble_0+'ens_'+str(e)+'/';
ornc_path = self.ensemble_0+'ensemble_member_'+str(e)+'.nc';
orgr_path = self.ensemble_0+'fort_'+str(e)+'.3';
os.system(f'mv {ornc_path} {ensemble_path}ensemble_member.nc');
os.system(f'mv {orgr_path} {ensemble_path}fort.3');
def perform_forecast(self, e):
if self.test==1: print('* ENDJ - Performing forecast ensemble member {0}'.format(e));
ensemble_path = f'{self.ensemble_0}ens_{e}/';
os.system(f'cd {ensemble_path}; sh remove_grad_ctl.sh; ./imp.exe>out.txt; mv fort.10 fort.3');
return ensemble_path;
def forecast_ensemble(self, Nens):
for e in range(0, Nens):
#print('* Forecast the ensemble member {0}'.format(e));
self.perform_forecast(e);
def forecast_ensemble_parallel(self, Nens):
pool = mp.Pool(mp.cpu_count())
results = pool.map(self.perform_forecast, [e for e in range(0, Nens)]);
pool.close();
#print(results);
def collect_ensemble_members(self):
Nens = self.Nens;
for e in range(0, Nens):
os.system(f'mv {self.ensemble_0}/ens_{e}/ensemble_member.nc {self.ensemble_0}ensemble_member_{e}.nc');
os.system(f'mv {self.ensemble_0}/ens_{e}/fort.3 {self.ensemble_0}fort_{e}.3');
os.system(f'rm -rf {self.ensemble_0}/ens_{e}');
if self.test==1: print('* ENDJ - All ensemble members have been collected');
def load_ensemble(self):
self.X = [];
Nens = self.Nens;
for e in range(0, Nens):
ensemble_path = f'{self.ensemble_0}ens_{e}/ensemble_member.nc';
xe = self.load_netcdf_file(ensemble_path);
self.X.append(xe);
def create_initial_ensemble(self, ini0, args, per, Nens):
#self.update_members_ensemble_folders(Nens);
self.set_time_integration(ini0);
self.update_model_ensemble_folders(Nens);
self.create_perturbed_ensemble(per, Nens);
self.copy_reference_restart(Nens);
if self.par:
self.forecast_ensemble_parallel(Nens);
else:
self.forecast_ensemble(Nens);
self.set_time_integration(args);
self.update_model_ensemble_folders(Nens);
if self.test == 1: print('* ENDJ - The initial ensemble has been created Nens = {0}'.format(Nens));
###############################################################################
###############################################################################
#Routines regarding reference and background trajectories
###############################################################################
###############################################################################
def create_reference_snapshots(self, ini0, args, M):
nc_f = self.initial_condition+'initial_condition.nc';
gr_f = self.initial_condition+'fort.3';
self.create_snapshots(nc_f, gr_f, self.snapshots, 'reference_solution', ini0, args, M);
def create_free_run(self, ini0, args, M):
self.load_ensemble();
xb = self.compute_snapshot_mean(self.X);
nc_f = self.model_local+'ensemble_member.nc';
self.map_state_netcdf(xb, nc_f);
gr_f = self.initial_condition+'fort.3';
os.system(f'cp {gr_f} {self.model_local}fort.10');
self.create_snapshots(nc_f, gr_f, self.free_run, 'free_run', ini0, args, M, dyn_cons = False);
def create_snapshots(self, nc_f, gr_f, folder_dest, name_conv, ini0, args, M, dyn_cons = True):
#To months
#args = [3, 6, 1]; #[months, hours, restart = 1(yes, we read the nc]
#OJOOO
if dyn_cons:
self.set_time_integration(ini0);
os.system(f'cp {nc_f} {folder_dest}{name_conv}.nc');
os.system(f'cp {gr_f} {folder_dest}fort.3');
os.system(f'cp {self.source_local}/* {self.model_local}');
os.system(f'cp {folder_dest}{name_conv}.nc {self.model_local}ensemble_member.nc');
os.system(f'cp {folder_dest}fort.3 {self.model_local}fort.3');
os.system(f'cd {self.model_local}; sh remove_grad_ctl.sh; ./imp.exe>out.txt');
#Copy the propagated model
os.system(f'mv {self.model_local}ensemble_member.nc {folder_dest}{name_conv}_0.nc');
os.system(f'mv {self.model_local}fort.10 {folder_dest}fort_0.3');
#Change time integration to days
self.set_time_integration(args);
#Update the model folder
os.system(f'cp {self.source_local}/* {self.model_local}');
#Let's take the snapshots
for s in range(0, M):
print('* Working on snapshot {0}'.format(s));
#Copy the reference solution to the Speedy model
os.system(f'cp {folder_dest}{name_conv}_{s}.nc {self.model_local}ensemble_member.nc');
os.system(f'cp {folder_dest}fort_{s}.3 {self.model_local}fort.3');
#Enter in the reference model and run the model
os.system(f'cd {self.model_local}; sh remove_grad_ctl.sh; ./imp.exe>out.txt');
#Copy the propagated model
os.system(f'mv {self.model_local}ensemble_member.nc {folder_dest}{name_conv}_{s+1}.nc');
os.system(f'mv {self.model_local}fort.10 {folder_dest}fort_{s+1}.3');
if self.test == 1: print(f'* ENDJ - Finishing creating the {name_conv} trajectory for M = {M}');
def get_empty_state(self):
n_vars = len(self.var_names);
X = [];
for v in range(0, n_vars):
var_resol = self.var_resol[v];
lat, lon = var_resol[0];
lev = var_resol[1];
if lev>1:
X.append(np.zeros((lev, lat, lon)));
else:
X.append(np.zeros((lat, lon)));
return X;
def map_vector_state(self, X_all, e):
X = self.get_empty_state();
for msk_cor, X_block in zip(self.mask_cor, X_all):
ini = 0;
for var in msk_cor:
var_info = var[0];
var_reso = var[1];
var_index = var_info[0];
var_level = var_info[1];
lat, lon = var_reso[0], var_reso[1];
X_v = X[var_index];
n = lat * lon;
fin = ini+n;
#print(f'ini = {ini} fin = {fin} n = {n} var_level = {var_level} e = {e}');
if 'PSG' in self.var_names[var_index]:
X_v = X_block[ini:fin, e].reshape((lat, lon));
else:
X_v[var_level,:,:] = X_block[ini:fin, e].reshape((lat, lon));
X[var_index] = X_v;
ini+=n;
return X;
def compute_snapshot_mean(self, X):
var_names = self.var_names;
n_vars = len(var_names);
samples = len(X);
xm = [];
for v in range(0, n_vars):
x = np.zeros(X[0][v].shape);
for e in range(0, samples):
x+=X[e][v];
x/=samples;
xm.append(x);
return xm;
###############################################################################
###############################################################################
#Routines regarding model parameters
###############################################################################
###############################################################################
def create_cls_instep_file(self, nmonths, days, restart):
f = open('cls_instep.h','w')
f.write(' NMONTS = '+str(nmonths)+'\n');
f.write(' NDAYSL = '+str(days)+'\n');
f.write(' HOURS = 0'+'\n');
f.write(' ISTART = '+str(restart)+'\n');
f.write(' NSTPPR = 6'+'\n');
f.write(' NSTOUT = -1'+'\n');
f.write(' IDOUT = 0'+'\n');
f.write(' NMONRS = -1'+'\n');
f.write(' ISEASC = 1'+'\n');
f.write(' IYEAR0 = 1979'+'\n');
f.write(' IMONT0 = 1'+'\n');
f.write(' NSTRAD = 3'+'\n');
f.write(' NSTRDF = 0'+'\n');
f.write(' INDRDF = 1'+'\n');
f.write(' ICLAND = 1'+'\n');
f.write(' ICSEA = 0'+'\n');
f.write(' ICICE = 1'+'\n');
f.write(' ISSTAN = 1'+'\n');
f.write(' ISSTY0 = 1870'+'\n');
f.write(' ISST0 = (IYEAR0-ISSTY0)*12+IMONT0'+'\n');
f.write(' LPPRES = .true.'+'\n');
f.write(' LCO2 = .false.'+'\n');
if (self.res=='t21') or (self.res=='t30'):
f.write(' NSTEPS = 36'+'\n');
f.write(' NSTDIA = 36*5'+'\n');
elif self.res=='t47':
f.write(' NSTEPS = 72'+'\n');
f.write(' NSTDIA = 72*5'+'\n');
elif self.res=='t63':
f.write(' NSTEPS = 96'+'\n');
f.write(' NSTDIA = 96*5'+'\n');
elif self.res=='t106':
f.write(' NSTEPS = 106'+'\n');
f.write(' NSTDIA = 106*5'+'\n');
else:
print('* Invalid resolution '+self.res);
exit();
f.write(' BLOCKHOURS = 24./FLOAT(NSTEPS)'+'\n');
f.close();
print('* cls_instep.h has been created');
def create_cls_indyns_file(self):
f = open('cls_indyns.h','w')
f.write(' GAMMA | |
sigh_unflat = sigh.reshape(mlats.shape)
if return_f107:
return sigp_unflat, sigh_unflat, f107
else:
return sigp_unflat, sigh_unflat
class AverageEnergyEstimator(object):
"""A class which estimates average energy by estimating both
energy and number flux
"""
def __init__(self, atype, numflux_threshold=5.0e7):
self.numflux_threshold = numflux_threshold
self.numflux_estimator = FluxEstimator(atype, 'number')
self.energyflux_estimator = FluxEstimator(atype, 'energy')
def get_eavg_for_time(self, dt, hemi='N', return_dF=False, combine_hemispheres=True):
kwargs = {
'hemi': hemi,
'combine_hemispheres': combine_hemispheres,
'return_dF': True
}
grid_mlats, grid_mlts, gridnumflux, dF = self.numflux_estimator.get_flux_for_time(dt, **kwargs)
grid_mlats, grid_mlts, gridenergyflux, dF = self.energyflux_estimator.get_flux_for_time(dt, **kwargs)
grideavg = (gridenergyflux / 1.6e-12) / gridnumflux # energy flux Joules->eV
grideavg = grideavg / 1000. # eV to keV
# Limit to reasonable number fluxes
n_pts = len(grideavg.flatten())
n_low_numflux = np.count_nonzero(gridnumflux < self.numflux_threshold)
grideavg[gridnumflux < self.numflux_threshold] = 0.
log.debug(('Zeroed {:d}/{:d} average energies'.format(n_low_numflux, n_pts)
+ 'with numflux below {:e}'.format(self.numflux_threshold)))
# Limit to DMSP SSJ channels range
n_over = np.count_nonzero(grideavg > 30)
n_under = np.count_nonzero(grideavg < .5)
log.debug('Zeroed {:d}/{:d} average energies over 30 keV'.format(n_over, n_pts))
log.debug('Zeroed {:d}/{:d} average energies under .2 keV'.format(n_under, n_pts))
grideavg[grideavg > 30.] = 30. # Max of 30keV
grideavg[grideavg < .2] = 0. # Min of 1 keV
if not return_dF:
return grid_mlats, grid_mlts, grideavg
else:
return grid_mlats, grid_mlts, grideavg, dF
class FluxEstimator(object):
"""
A class which estimates auroral flux
based on the Ovation Prime regressions,
at arbitrary locations and times.
Locations are in magnetic latitude and local
time, and are interpolated using a B-spline
representation
"""
def __init__(self, atype, energy_or_number, seasonal_estimators=None):
"""
doy - int
day of year
atype - str, ['diff','mono','wave','ions']
type of aurora for which to load regression coeffients
energy_or_number - str, ['energy','number']
Type of flux you want to estimate
seasonal_estimators - dict, optional
A dictionary of SeasonalFluxEstimators for seasons
'spring','fall','summer','winter', if you
don't want to create them
(for efficiency across multi-day calls)
"""
self.atype = atype # Type of aurora
# Check for legacy values of this argument
_check_for_old_jtype(self, energy_or_number)
self.energy_or_number = energy_or_number # Type of flux
seasons = ['spring', 'summer', 'fall', 'winter']
if seasonal_estimators is None:
# Make a seasonal estimator for each season with nonzero weight
self.seasonal_flux_estimators = {season: SeasonalFluxEstimator(season, atype, energy_or_number) for season
in seasons}
else:
# Ensure the passed seasonal estimators are approriate for this atype and jtype
for season, estimator in seasonal_estimators.items():
jtype_atype_ok = jtype_atype_ok and (self.jtype == estimator.jtype and self.atype == estimator.atype)
if not jtype_atype_ok:
raise RuntimeError(
'Auroral and flux type of SeasonalFluxEstimators do not match {0} and {1}!'.format(self.atype,
self.jtype))
def season_weights(self, doy):
"""
Determines the relative weighting of the
model coeffecients for the various seasons for a particular
day of year (doy). Nominally, weights the seasons
based on the difference between the doy and the peak
of the season (solstice/equinox)
Returns:
a dictionary with a key for each season.
Each value in the dicionary is a float between 0 and 1
"""
weight = OrderedDict(winter=0.,
spring=0.,
summer=0.,
fall=0.)
if doy >= 79. and doy < 171:
weight['summer'] = 1. - (171. - doy) / 92.
weight['spring'] = 1. - weight['summer']
elif doy >= 171. and doy < 263.:
weight['fall'] = 1. - (263. - doy) / 92.
weight['summer'] = 1. - weight['fall']
elif doy >= 263. and doy < 354.:
weight['winter'] = 1. - (354. - doy) / 91.
weight['fall'] = 1. - weight['winter']
elif doy >= 354 or doy < 79:
# For days of year > 354, subtract 365 to get negative
# day of year values for computation
doy0 = doy - 365. if doy >= 354 else doy
weight['spring'] = 1. - (79. - doy0) / 90.
weight['winter'] = 1. - weight['spring']
return weight
def get_season_fluxes(self, dF, weights):
"""
Extract the flux for each season and hemisphere and
store them in a dictionary
Return positive latitudes, since northern and southern
latitude/localtime grids are the same
"""
seasonfluxesN, seasonfluxesS = OrderedDict(), OrderedDict()
gridmlats, gridmlts = None, None
for season, estimator in self.seasonal_flux_estimators.items():
if weights[season] == 0.:
continue # Skip calculation for seasons with zero weight
flux_outs = estimator.get_gridded_flux(dF)
gridmlatsN, gridmltsN, gridfluxN = flux_outs[:3]
gridmlatsS, gridmltsS, gridfluxS = flux_outs[3:]
seasonfluxesN[season] = gridfluxN
seasonfluxesS[season] = gridfluxS
gridmlats = gridmlatsN
gridmlts = gridmltsN
return gridmlats, gridmlts, seasonfluxesN, seasonfluxesS
def get_flux_for_time(self, dt,
hemi='N', return_dF=False, combine_hemispheres=True):
"""
The weighting of the seasonal flux for the different hemispheres
is a bit counterintuitive, but after some investigation of the flux
patterns produced for Northern and Southern hemispheres using a
particular SeasonalFluxEstimator (which in turn reads a particular
season's coefficients file), it seems like the 'summer' coefficients
file contains the northern hemisphere coefficients for Boreal Summer
(roughly May-August) and the southern hemisphere coefficients for
Austral Summer (roughly November-February).
In earlier versions of this code, the season weighting was wrong,
because the code operated on the assumption that 'summer'
meant Boreal summer, and specified a range of dates for the data
used to construct the coefficients.
In the IDL version of this model, the flux produced for
Northern and Southern hemispheres is averaged, with the following
comment on the IDL keyword argument:
;n_or_s=3 for combine north and south. In effect this is the only
;option that works. The result is appropriate to the northern
;hemisphere. To get a result appropriate to the southern hemisphere,
;call with doy = 365 - actual doy
Combining hemispheres is probably nessecary because
there are data gaps (particularly in the northern hemisphere dawn)
so this is the default behavior here as well. This can be overriden
by passing combine_hemispheres=False
"""
doy = dt.timetuple().tm_yday
if not combine_hemispheres:
log.warning(('Warning: IDL version of OP2010 always combines hemispheres.'
+ 'know what you are doing before switching this behavior'))
if hemi == 'N':
weights = self.season_weights(doy)
elif hemi == 'S':
weights = self.season_weights(365. - doy)
else:
raise ValueError('Invalid hemisphere {0} (use N or S)'.format(hemi))
dF = ovation_utilities.calc_dF(dt)
if hasattr(self, '_dF'):
log.warning(('Warning: Overriding real Newell Coupling {0}'.format(dF)
+ 'with secret instance property _dF {0}'.format(self._dF)
+ 'this is for debugging and will not'
+ 'produce accurate results for a particular date'))
dF = self._dF
season_fluxes_outs = self.get_season_fluxes(dF, weights)
grid_mlats, grid_mlts, seasonfluxesN, seasonfluxesS = season_fluxes_outs
gridflux = np.zeros_like(grid_mlats)
for season, W in weights.items():
if W == 0.:
continue
gridfluxN = seasonfluxesN[season]
gridfluxS = seasonfluxesS[season]
if combine_hemispheres:
gridflux += W * (gridfluxN + gridfluxS) / 2
elif hemi == 'N':
gridflux += W * gridfluxN
elif hemi == 'S':
gridflux += W * gridfluxS
if hemi == 'S':
grid_mlats = -1. * grid_mlats # by default returns positive latitudes
if not return_dF:
return grid_mlats, grid_mlts, gridflux
else:
return grid_mlats, grid_mlts, gridflux, dF
class SeasonalFluxEstimator(object):
"""
A class to hold and caculate predictions from the regression coeffecients
which are tabulated in the data/premodel/{season}_{atype}_*.txt
files.
Given a particular season, type of aurora ( one of ['diff','mono','wave'])
and type of flux, returns
"""
_valid_atypes = ['diff', 'mono', 'wave', 'ions']
def __init__(self, season, atype, energy_or_number):
"""
season - str,['winter','spring','summer','fall']
season for which to load regression coeffients
atype - str, ['diff','mono','wave','ions']
type of aurora for which to load regression coeffients, ions
are not implemented
energy_or_number - str, ['energy','number']
type of flux you want to estimate
"""
nmlt = 96 # number of mag local times in arrays (resolution of 15 minutes)
nmlat = 160 # number of mag latitudes in arrays (resolution of 1/4 of a degree (.25))
ndF = 12 # number of coupling strength bins
self.n_mlt_bins, self.n_mlat_bins, self.n_dF_bins = nmlt, nmlat, ndF
self.atype = atype
if atype not in self._valid_atypes:
raise ValueError(('Not a valid aurora type {}.'.format(atype)
+ 'valid values {}'.format(self._valid_atypes)))
# Check for legacy values of this argument
_check_for_old_jtype(self, energy_or_number)
self.energy_or_number = energy_or_number
# The mlat bins are orgainized like -50:-dlat:-90, 50:dlat:90
self.mlats = np.concatenate([np.linspace(-90., -50., self.n_mlat_bins // 2)[::-1],
np.linspace(50., 90., self.n_mlat_bins // 2)])
self.mlts = np.linspace(0., 24., self.n_mlt_bins)
# Determine file names
file_suffix = '_n' if energy_or_number == 'number' else ''
self.afile = os.path.join(ovation_datadir, 'premodel/{0}_{1}{2}.txt'.format(season, atype, file_suffix))
self.pfile = os.path.join(ovation_datadir, 'premodel/{0}_prob_b_{1}.txt'.format(season, atype))
# Defualt values of header (don't know why need yet)
# b1 = 0.
| |
for name, version in manager.session.query(Network.name, Network.version).filter(Network.id_in(network_ids))
]
return jsonify(rv)
@api_blueprint.route('/api/query/<int:query_id>/parent')
def get_query_parent(query_id):
"""Return the parent of the query.
---
tags:
- query
parameters:
- name: query_id
in: path
description: The database identifier of a query
required: true
type: integer
"""
query = manager.cu_get_query_by_id_or_404(query_id=query_id)
if not query.parent:
return jsonify({
'id': query.id,
'parent': False,
})
else:
return jsonify({
'id': query.parent.id,
'parent': True,
})
@api_blueprint.route('/api/query/<int:query_id>/ancestor')
def get_query_oldest_ancestry(query_id):
"""Return the parent of the query.
---
tags:
- query
parameters:
- name: query_id
in: path
description: The database identifier of a query
required: true
type: integer
"""
query = manager.cu_get_query_by_id_or_404(query_id=query_id)
ancestor = query.get_ancestor()
return jsonify({
'id': ancestor.id,
'parent': bool(query.parent),
})
def add_pipeline_entry(query_id: int, name: str, *args, **kwargs) -> Response:
"""Add an entry to the pipeline.
:param query_id: The identifier of the query
:param name: The name of the function to append
"""
query = manager.cu_get_query_by_id_or_404(query_id=query_id)
try:
qo = query.build_appended(name, *args, **kwargs)
except MissingPipelineFunctionError:
logger.error('missing pipeline function: %s', name)
return abort(403, f'Invalid function name: {name}')
if current_user.is_authenticated:
qo.user = current_user
manager.session.add(qo)
manager.session.commit()
return jsonify({
'status': 200,
'id': qo.id,
})
@api_blueprint.route('/api/query/<int:query_id>/isolated_node/<node_hash>')
def get_query_from_isolated_node(query_id: int, node_hash: str):
"""Create a query with a single node hash.
---
tags:
- query
parameters:
- name: query_id
in: path
description: The database identifier of a query
required: true
type: integer
- name: node_hash
in: path
description: The PyBEL hash of a node
required: true
type: string
"""
parent_query = manager.cu_get_query_by_id_or_404(query_id=query_id)
node = manager.get_dsl_by_hash(node_hash)
child_query = Query(network_ids=[
network.id
for network in parent_query.assembly.networks
])
child_query.append_seeding_induction([node])
child_query_model = parent_query.get_assembly_query()
child_query_model.set_seeding_from_query(child_query)
child_query_model.set_pipeline_from_query(child_query)
if current_user.is_authenticated:
child_query_model.user = current_user
manager.session.add(child_query_model)
manager.session.commit()
return jsonify(child_query_model.to_json())
@api_blueprint.route('/api/query/<int:query_id>/add_applier/<name>')
def add_applier_to_query(query_id: int, name: str):
"""Build a new query with the applier in the url and adds it to the end of the pipeline.
---
tags:
- query
parameters:
- name: query_id
in: path
description: The database identifier of a query
required: true
type: integer
- name: name
in: path
description: The name of the function to apply
required: true
type: string
"""
return add_pipeline_entry(query_id, name)
@api_blueprint.route('/api/query/<int:query_id>/add_node_list_applier/<name>/<node_hashes>')
def add_node_list_applier_to_query(query_id: int, name: str, node_hashes: str):
"""Build a new query with a node list applier added to the end of the pipeline.
:param query_id: A query's database identifier
:param name: The name of the function to apply at the end of the query
:param node_hashes: The node identifiers to use as the argument to the function
---
tags:
- query
parameters:
- name: query_id
in: path
description: The database identifier of a query
required: true
type: integer
- name: name
in: path
description: The name of the function to apply
required: true
type: string
- name: node_hashes
in: path
description: A list of comma-separated PyBEL node hashes
required: true
type: string
"""
return add_pipeline_entry(query_id, name, node_hashes)
@api_blueprint.route('/api/query/<int:query_id>/add_node_applier/<name>/<node_hash>')
def add_node_applier_to_query(query_id: int, name: str, node_hash: str):
"""Build a new query with a node applier added to the end of the pipeline.
:param int query_id: A query's database identifier
:param str name: The name of the function to apply at the end of the query
:param int node_hash: The node identifier to use as the argument ot the function
---
tags:
- query
parameters:
- name: query_id
in: path
description: The database identifier of a query
required: true
type: integer
- name: name
in: path
description: The name of the function to apply
required: true
type: string
- name: node_hash
in: path
description: The PyBEL hash of a node
required: true
type: string
"""
return add_pipeline_entry(query_id, name, node_hash)
@api_blueprint.route('/api/query/<int:query_id>/add_annotation_filter/')
def add_annotation_filter_to_query(query_id: int):
"""Build a new query with the annotation in the arguments.
If 'and' is passed as an argument, it performs a AND query. By default it uses the OR condition.
---
tags:
- query
parameters:
- name: query_id
in: path
description: The database identifier of a query
required: true
type: integer
"""
filters = {
key: request.args.getlist(key)
for key in request.args
if key not in BLACK_LIST
}
if not filters: # If no filters send back the same query
return jsonify({
'id': query_id,
})
query_type = not request.args.get(AND)
return add_pipeline_entry(query_id, get_subgraph_by_annotations, filters, query_type)
####################################
# USER
####################################
@api_blueprint.route('/api/user/count')
def get_number_users():
"""Return the number of users."""
count = manager.session.query(func.count(User.id)).scalar()
return jsonify({
'time': str(time.asctime()),
'count': count,
})
@api_blueprint.route('/api/user')
@roles_required('admin')
def get_all_users():
"""Return all users.
---
tags:
- user
"""
return jsonify([
user.to_json(include_id=True)
for user in manager.session.query(User).all()
])
@api_blueprint.route('/api/user/current')
@login_required
def get_current_user():
"""Return the current user.
---
tags:
- user
"""
return jsonify(current_user.to_json())
@api_blueprint.route('/api/user/<user>/add_role/<role>')
@roles_required('admin')
def add_user_role(user, role):
"""Add a role to a use.
---
tags:
- user
"""
manager.user_datastore.add_role_to_user(user, role)
manager.user_datastore.commit()
return jsonify({'status': 200})
@api_blueprint.route('/api/user/<user>/remove_role/<role>')
@roles_required('admin')
def drop_user_role(user, role):
"""Remove a role from a user.
---
tags:
- user
"""
manager.user_datastore.remove_role_from_user(user, role)
manager.user_datastore.commit()
return jsonify({'status': 200})
@api_blueprint.route('/api/user/<int:user_id>', methods=['DELETE'])
@roles_required('admin')
def drop_user(user_id: int) -> Response:
"""Drop a user.
---
tags:
- user
parameters:
- name: user_id
in: path
description: The database identifier of a user
required: true
type: integer
"""
user = manager.get_user_by_id_or_404(user_id)
manager.user_datastore.delete_user(user)
manager.user_datastore.commit()
return next_or_jsonify(
f'Dropped user: {user}',
user={
'id': user.id,
'email': user.email,
}
)
####################################
# Analysis
####################################
@api_blueprint.route('/api/query/<int:query_id>/analysis/<int:experiment_id>/')
def get_analysis(query_id: int, experiment_id: int):
"""Return data from analysis.
---
tags:
- experiment
- query
parameters:
- name: query_id
in: path
description: The database identifier of a query
required: true
type: integer
- name: experiment_id
in: path
description: The database identifier of an experiment
required: true
type: integer
"""
graph = manager.cu_get_graph_from_query_id_or_404(query_id)
experiment = manager.get_experiment_by_id_or_404(experiment_id)
data = pickle.loads(experiment.result)
results = [
{
'node': node.md5,
'data': data[node],
}
for node in graph
if node in data
]
return jsonify(results)
@api_blueprint.route('/api/query/<int:query_id>/analysis/<int:experiment_id>/median')
def get_analysis_median(query_id: int, experiment_id: int):
"""Return data from analysis.
---
tags:
- query
- experiment
parameters:
- name: query_id
in: path
description: The database identifier of a query
required: true
type: integer
- name: experiment_id
in: path
description: The database identifier of an experiment
required: true
type: integer
"""
graph = manager.cu_get_graph_from_query_id_or_404(query_id)
experiment = manager.get_experiment_by_id_or_404(experiment_id)
data = pickle.loads(experiment.result)
# position 3 is the 'median' score
results = {
node.md5: data[node][3]
for node in graph
if node in data
}
return jsonify(results)
@api_blueprint.route('/api/experiment/<int:experiment_id>', methods=['DELETE'])
@login_required
def drop_experiment_by_id(experiment_id: int):
"""Delete an experiment.
---
tags:
- experiment
parameters:
- name: experiment_id
in: path
description: The identifier of the experiment
required: true
type: integer
format: int32
"""
experiment = manager.get_experiment_by_id_or_404(experiment_id)
if not current_user.has_experiment_rights(experiment):
abort(403)
manager.session.delete(experiment)
manager.session.commit()
return next_or_jsonify(
f'Dropped experiment {experiment.id}',
experiment={
'id': experiment.id,
'description': experiment.description,
}
)
####################################
# RIGHTS MANAGEMENT
####################################
@api_blueprint.route('/api/network/<int:network_id>/grant_project/<int:project_id>')
@login_required
def grant_network_to_project(network_id: int, project_id: int):
"""Add the rights to a network to a project.
---
tags:
- network
parameters:
- name: network_id
in: path
description: The database identifier of a network
required: true
type: integer
format: int32
- name: project_id
in: path
description: The identifier of a project
required: true
type: integer
format: int32
"""
network = manager.cu_owner_get_network_by_id_or_404(network_id=network_id)
project = manager.cu_authenticated_get_project_by_id_or_404(project_id=project_id)
project.networks.append(network)
manager.session.commit()
return next_or_jsonify(
f'Added rights for {network} to {project}',
network={
'id': network.id,
}
)
@api_blueprint.route('/api/network/<int:network_id>/grant_user/<int:user_id>')
@login_required
def grant_network_to_user(network_id: int, user_id: int):
"""Add the rights to a network to a user.
---
tags:
- network
- user
parameters:
- name: network_id
in: path
description: The identifier of a network
required: true
type: integer
format: int32
- name: user_id
in: path
description: The identifier of a user
required: true
type: integer
format: int32
"""
network = manager.cu_owner_get_network_by_id_or_404(network_id=network_id)
user = manager.get_user_by_id(user_id)
user.networks.append(network)
manager.session.commit()
return next_or_jsonify(f'Added rights for {network} to {user}')
@api_blueprint.route('/api/project')
@roles_required('admin')
def get_all_projects():
"""Return all project as a JSON file.
---
tags:
- project
"""
return jsonify([
project.to_json(include_id=True)
for project in manager.session.query(Project).all()
])
@api_blueprint.route('/api/project/<int:project_id>')
@login_required
def get_project_metadata(project_id: int):
"""Return the project as a JSON file.
---
tags:
- project
parameters:
- name: project_id
in: path
description: The identifier of a project
required: true
type: integer
format: int32
"""
project = manager.cu_authenticated_get_project_by_id_or_404(project_id=project_id)
return jsonify(**project.to_json())
@api_blueprint.route('/api/project/<int:project_id>', methods=['DELETE'])
@login_required
def drop_project_by_id(project_id: int):
"""Drop a project.
---
tags:
- project
parameters:
- name: project_id
in: path
description: The identifier of a project
required: true
type: integer
format: int32
"""
project = manager.cu_authenticated_get_project_by_id_or_404(project_id=project_id)
# FIXME cascade on project/users
manager.session.delete(project)
manager.session.commit()
return next_or_jsonify(f'Dropped project {project.id}: {project.name}')
@api_blueprint.route('/api/project/<int:project_id>/summarize')
@login_required
def summarize_project(project_id: int):
"""Provide a summary of all networks in a project as a CSV file.
---
tags:
- project
parameters:
- name: project_id
in: path
description: The identifier of a project
required: true
type: integer
format: int32
"""
project = manager.cu_authenticated_get_project_by_id_or_404(project_id=project_id)
si = StringIO()
cw = csv.writer(si)
csv_list = [
('Name', 'Version', | |
import logging
log = logging.getLogger(__name__)
from copy import deepcopy
from functools import partial
import itertools
import pickle as pickle
import numpy as np
from atom.api import Typed, Bool, Str, observe, Property
from enaml.application import deferred_call
from enaml.layout.api import InsertItem, InsertTab
from enaml.workbench.plugin import Plugin
from psi.core.enaml.api import load_manifests
from ..util import get_tagged_values
from .context_item import (
ContextItem, ContextGroup, ContextSet, Expression, Parameter, ContextMeta
)
from psi.core.enaml.api import PSIPlugin
from .expression import ExpressionNamespace
from .selector import BaseSelector
from .symbol import Symbol
SELECTORS_POINT = 'psi.context.selectors'
SYMBOLS_POINT = 'psi.context.symbols'
ITEMS_POINT = 'psi.context.items'
def get_preferences(obj):
return deepcopy(get_tagged_values(obj, 'preference'))
class ContextLookup:
def __init__(self, context_plugin):
self.__context_plugin = context_plugin
def __getattr__(self, name):
return self.__context_plugin.get_value(name)
def unique_values(self, item_name, iterator='default'):
return self.__context_plugin.unique_values(item_name, iterator)
def lookup(self, attr):
cb = partial(getattr, self, attr)
cb.is_lookup = True
return cb
def get_selector(self, name='default'):
return self.__context_plugin.get_selector(name)
def get_names(self, name='default'):
return self.__context_plugin.get_names(name)
def get_range(self, item_name, iterator='default'):
return self.__context_plugin.get_range(item_name, iterator)
context_initialized_error = '''
Context not initialized
Your experiment must call the `psi.context.initialize` command at the
appropriate time (usually in response to the `experiment_initialize` action).
See the manual on creating your own experiment if you need further guidance.
'''
class ContextPlugin(PSIPlugin):
'''
Plugin that provides a sequence of values that can be used by a controller
to determine the experiment context_items.
'''
context_groups = Typed(dict, {})
context_items = Typed(dict, {})
context_meta = Typed(dict, {})
context_expressions = Typed(dict, {})
# True if some of the context_meta items are user-configurable.
context_meta_editable = Bool(False)
selectors = Typed(dict, ())
symbols = Typed(dict, ())
# Reflects state of selectors and context_items as currently applied.
_context_item_state = Typed(dict, ())
_selector_state = Typed(dict, ())
_selectors = Typed(dict, ())
changes_pending = Bool(False)
# Used to track whether context has properly been initialized. Since all
# experiments must explicitly initialize context, this is very common to
# forget. Need to be able to check this to provide better error message.
initialized = Bool(False)
_iterators = Typed(dict, ())
_namespace = Typed(ExpressionNamespace, ())
# Subset of context_items that are parameters
parameters = Property()
# Return expressions for non-roved parameters
expressions = Property()
# Return all expressions, including those for roved parameters
all_expressions = Property()
lookup = Typed(ContextLookup)
def _default_lookup(self):
return ContextLookup(self)
def start(self):
self._refresh_selectors()
self._refresh_items()
self._refresh_symbols()
self._bind_observers()
def stop(self):
self._unbind_observers()
def _refresh_selectors(self, event=None):
# Hidden here to avoid circular import since selectors define a
# reference to the context plugin.
log.debug('Refreshing selectors')
selectors = {}
point = self.workbench.get_extension_point(SELECTORS_POINT)
for extension in point.extensions:
for selector in extension.get_children(BaseSelector):
if selector.name in selectors:
m = f'Already have a selector named "{selector.name}"'
raise ValueError(m)
selectors[selector.name] = selector
selector.load_manifest(self.workbench)
self.selectors = selectors
def _refresh_symbols(self, event=None):
symbols = {}
point = self.workbench.get_extension_point(SYMBOLS_POINT)
for extension in point.extensions:
for symbol in extension.get_children(Symbol):
symbols[symbol.name] = symbol.get_object()
self.symbols = symbols
def _refresh_items(self, event=None):
# Find all plugin contributions
context_groups = self.load_plugins(ITEMS_POINT, ContextGroup, 'name')
context_sets = self.load_plugins(ITEMS_POINT, ContextSet, 'name')
context_items = self.load_plugins(ITEMS_POINT, ContextItem, 'name')
context_meta = self.load_plugins(ITEMS_POINT, ContextMeta, 'name')
context_expressions = self.load_plugins(ITEMS_POINT, Expression, 'parameter')
groups_updated = set()
# At this point, `context_items` is only the "orphan" context items
# where the group has not yet been assigned.
for item in itertools.chain(context_items.values(), context_sets.values()):
if item.group_name not in context_groups:
valid_names = ', '.join(context_groups.keys())
m = f'Missing group "{item.group_name}" for item {item.name}. Valid groups are {valid_names}.'
raise ValueError(m)
group = context_groups[item.group_name]
item.set_parent(group)
groups_updated.add(group)
# Now, loop through the groups and find all ContextItems defined under
# the group. If the group has already been defined in another
# contribution, raise an error. Also, build up the ContextItems
# dictionary so that we have a list of all the context items we want to
# display.
context_items = {}
for group in context_groups.values():
for item in group.children:
if isinstance(item, ContextItem):
if item.name in context_items:
m = f'Context item {item.name} already defined'
raise ValueError(m)
else:
context_items[item.name] = item
elif isinstance(item, ContextSet):
if item.name in context_sets.values():
m = f'Context set {item.name} already defined'
raise ValueError(m)
else:
context_sets[item.name] = item
for cset in context_sets.values():
for item in cset.children:
if isinstance(item, ContextItem):
if item.name in context_items:
m = f'Context item {item.name} already defined'
raise ValueError(m)
else:
context_items[item.name] = item
for expression in context_expressions.values():
try:
item = context_items.pop(expression.parameter)
groups_updated.add(item.parent)
item.set_parent(None)
except KeyError as e:
log.warn('%s referenced by expression %s does not exist',
expression.parameter, expression.expression)
load_manifests(context_groups.values(), self.workbench)
log.error('\n'.join(context_items.keys()))
self.context_expressions = context_expressions
self.context_items = context_items
self.context_groups = context_groups
self.context_meta = context_meta
self.context_meta_editable = len(self.get_metas(editable=True)) > 0
for group in context_groups.values():
group.updated = True
def _bind_observers(self):
self.workbench.get_extension_point(SELECTORS_POINT) \
.observe('extensions', self._refresh_selectors)
self.workbench.get_extension_point(ITEMS_POINT) \
.observe('extensions', self._refresh_items)
self.workbench.get_extension_point(SYMBOLS_POINT) \
.observe('extensions', self._refresh_symbols)
def _unbind_observers(self):
self.workbench.get_extension_point(SELECTORS_POINT) \
.unobserve('extensions', self._refresh_selectors)
self.workbench.get_extension_point(ITEMS_POINT) \
.unobserve('extensions', self._refresh_items)
self.workbench.get_extension_point(SYMBOLS_POINT) \
.unobserve('extensions', self._refresh_symbols)
@observe('context_items')
def _bind_context_items(self, change):
# Note that theoretically we shouldn't have to check if the item is
# roving or not, but in the event that we change one of the output
# tokens (which contribute their own set of parameters), the ID of the
# parameters may change (even if we're just reloading the token).
# Perhaps there's a more intelligent approach?
# This triggers an interesting "bug" where ContextMeta is set to track
# roving items and we change one of the tokens. I don't have a great
# work-around right now.
oldvalue = change.get('oldvalue', {})
newvalue = change.get('value', {})
for i in oldvalue.values():
i.unobserve('expression', self._item_updated)
i.unobserve('rove', self._item_roved)
if getattr(i, 'rove', False):
if id(i) != id(newvalue.get(i.name, None)):
self.unrove_item(i)
for i in newvalue.values():
i.observe('expression', self._item_updated)
i.observe('rove', self._item_roved)
if getattr(i, 'rove', False):
if id(i) != id(oldvalue.get(i.name, None)):
self.rove_item(i)
@observe('symbols')
def _update_selectors(self, event):
for selector in self.selectors.values():
selector.symbols = self.symbols.copy()
def _item_updated(self, event):
self._check_for_changes()
def _item_roved(self, event):
if event['value']:
log.debug('Roving {}'.format(event['object'].name))
self.rove_item(event['object'])
else:
log.debug('Unroving {}'.format(event['object'].name))
self.unrove_item(event['object'])
@observe('selectors')
def _bind_selectors(self, change):
log.debug('Binding selectors')
for p in change.get('oldvalue', {}).values():
p.unobserve('updated', self._selector_updated)
for p in change.get('value', {}).values():
p.observe('updated', self._selector_updated)
def _selector_updated(self, event):
log.debug('Selectors updated')
self._check_for_changes()
def _get_iterators(self, cycles=np.inf):
log.debug('Getting iterators')
return {k: v.get_iterator(cycles) for k, v in self.selectors.items()}
def iter_settings(self, iterator='default', cycles=np.inf):
log.debug('Iterating through settings for %s iterator', iterator)
# Some paradigms may not actually have an iterator.
namespace = ExpressionNamespace(self.expressions, self.symbols)
if iterator:
selector = self.selectors[iterator].get_iterator(cycles=cycles)
for setting in selector:
expressions = {i.name: i.to_expression(e) for i, e in setting.items()}
namespace.update_expressions(expressions)
yield namespace.get_values()
namespace.reset()
else:
yield namespace.get_values()
def n_values(self, iterator='default'):
iterable = self.iter_settings(iterator, 1)
return len([c for c in iterable])
def unique_values(self, item_names, iterator='default'):
if isinstance(item_names, str):
item_names = [item_names]
extract = True
else:
extract = False
values = set()
for setting in self.iter_settings(iterator, 1):
if isinstance(item_names, str):
values.add(setting[item_names])
else:
values.add(tuple(setting[n] for n in item_names))
log.debug('Found %d unique values: %r', len(values), values)
if extract:
values = {v[0] for v in values}
return values
def get_range(self, item_name, iterator='default'):
values = self.unique_values(item_name, iterator)
return min(values), max(values)
def get_names(self, iterator='default'):
return [i.name for i in self.get_selector(iterator).context_items]
def get_item(self, item_name):
return self.context_items[item_name]
def get_item_info(self, item_name):
item = self.get_item(item_name)
return {
'dtype': item.dtype,
'label': item.label,
'compact_label': item.compact_label,
'default': getattr(item, 'default', None),
'rove': getattr(item, 'rove', False),
}
def rove_item(self, item):
log.debug('Roving item %r', item)
for selector in self.selectors.values():
if item not in selector.context_items:
selector.append_item(item)
for meta in self.context_meta.values():
if meta.link_rove:
meta.add_item(item)
def unrove_item(self, item):
log.debug('Unroving item %r', item)
for selector in self.selectors.values():
if item in selector.context_items:
selector.remove_item(item)
for meta in self.context_meta.values():
if meta.link_rove:
meta.remove_item(item)
def get_context_info(self):
return dict((i, self.get_item_info(i)) for i in self.context_items)
def next(self, save_prior, selector, results):
'''
Shortcut for advancing to the next setting.
'''
log.debug('Next')
self.next_setting(save_prior)
self.next_selector_setting(selector)
self.set_values(results)
def next_setting(self, selector=None):
'''
Load next set of expressions. If there are no selectors defined, then
this essentially clears the namespace and allows expresssions to be
recomputed.
'''
log.debug('Loading next setting')
self._namespace.reset()
if selector is None:
return
try:
log.info('Configuring next setting from selector %s', selector)
expressions = next(self._iterators[selector])
expressions = {i.name: e for i, e in expressions.items()}
self._namespace.update_expressions(expressions)
except KeyError:
m = 'Avaliable selectors include {}'.format(self._iterators.keys())
log.debug(m)
raise
def get_value(self, context_name):
if not self.initialized:
raise ValueError(context_initialized_error)
try:
return self._namespace.get_value(context_name)
except KeyError as e:
m = f'{context_name} not defined.'
raise ValueError(m) from e
def get_values(self, context_names=None):
if not self.initialized:
raise ValueError(context_initialized_error)
return self._namespace.get_values(names=context_names)
def set_value(self, context_name, value):
self._namespace.set_value(context_name, value)
def set_values(self, values):
self._namespace.set_values(values)
def _check_for_changes(self):
log.debug('Checking for changes')
for name, state in self._context_item_state.items():
if name not in self.context_items:
log.debug('%s not | |
<reponame>Elaoed/iplives
# encoding=utf8
"""Redis pool using one module in different files"""
import json
import redis
import time
import os
from functools import wraps
from kits.log import get_logger
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
REDIS_LOGGER = get_logger('redis')
def redis_excepts(orig_func):
"""try excepts around each query"""
@wraps(orig_func)
def wrapper(*args, **kwargs):
try:
return orig_func(*args, **kwargs)
except redis.exceptions.ConnectionError as err:
REDIS_LOGGER.error(err)
except redis.exceptions.TimeoutError as err:
REDIS_LOGGER.error(err)
except Exception:
REDIS_LOGGER.critical("Exception", exc_info=True)
raise Exception("Error on redis. Check the log file")
return wrapper
def get_connection():
"""Expose api to context"""
if not Redispool.redis_pool:
try:
with open(ROOT_PATH + '/config/redis.conf') as config:
conf = json.load(config)
Redispool.redis_pool = redis.ConnectionPool(
host=conf['redis']['host'],
port=conf['redis']['port'],
db=conf['redis']['db'],
password=conf['redis']['password']
)
except IOError:
raise Exception(ROOT_PATH + '/config/redis.conf does not exist')
return redis.StrictRedis(connection_pool=Redispool.redis_pool)
class Redispool(object):
"""Full function of Redis Beckend
"""
redis_pool = None
def __init__(self, *args, **kwargs):
self.queue = None
if 'queue' in kwargs:
self.queue = kwargs['queue']
#################################################
# Commands in key-value
#################################################
@redis_excepts
def set(self, key, value):
"""Set key and value"""
REDIS_LOGGER.info("set %s %s", key, value)
return get_connection().set(key, value)
@redis_excepts
def get(self, key):
"""Get from redis using key"""
REDIS_LOGGER.info("get %s", key)
return get_connection().get(key)
@redis_excepts
def setnx(self, key, value):
"""set value into redis if key does not exist
"""
REDIS_LOGGER.info("setnx %s %s", key, value)
return get_connection().setnx(key, value)
@redis_excepts
def setex(self, key, ttl, value):
"""set key-value into redis with ttl
"""
REDIS_LOGGER.info("setex %s, %s, %s", key, ttl, value)
return get_connection().setex(key, ttl, value)
@redis_excepts
def delete(self, key):
"""Del key
"""
REDIS_LOGGER.info("del %s", key)
return get_connection().delete(key)
@redis_excepts
def incr(self, key):
"""increase key by one
"""
REDIS_LOGGER.info("incr %s", key)
return get_connection().incr(key)
@redis_excepts
def expire(self, key, ttl):
"""indicate expire for a certain key
"""
REDIS_LOGGER.info("expire %s %s", key, ttl)
return get_connection().expire(key, ttl)
@redis_excepts
def exists(self, key):
"""determine if given key exist
"""
REDIS_LOGGER.info("exists %s", key)
return get_connection().exists(key)
@redis_excepts
def mset(self, **kwargs):
"""multiple set keys and values
example: r.mset(hello="h", world="w")
"""
REDIS_LOGGER.info("mset %s", kwargs)
return get_connection().mset(kwargs)
@redis_excepts
def getset(self, key, value):
"""get value of key and update value to prameter value
If key has no value before. return None then
"""
REDIS_LOGGER.info("getset %s %s", key, value)
return get_connection().getset(key, value)
@redis_excepts
def mget(self, *keys):
"""Get the value of all the given keys
"""
if isinstance(keys[0], list):
keys = keys[0]
keys = [str(i) for i in keys]
REDIS_LOGGER.info("mget %s", ' '.join(keys))
return get_connection().mget(keys)
@redis_excepts
def append(self, key, value):
"""Append a value to a key
"""
REDIS_LOGGER.info("append %s %s", key, value)
return get_connection().append(key, value)
@redis_excepts
def substr(self, key, start, stop):
"""get sub string of value of key
"""
REDIS_LOGGER.info("substr %s %d %d", key, start, stop)
return get_connection().substr(key, start, stop)
@redis_excepts
def incrby(self, key, number):
"""increase value of gievn key by number
"""
REDIS_LOGGER.info("incrby %s %d", key, number)
return get_connection().incrby(key, number)
############################################################
# Commands in list
############################################################
@redis_excepts
def push(self, value, queue=None):
"""Push item into the queue
"""
if not queue:
queue = self.queue
if not queue:
raise Exception("queue does not exist")
REDIS_LOGGER.info("rpush %s %s", queue, value)
return get_connection().rpush(queue, value)
@redis_excepts
def pop(self, queue=None):
"""Pop from queue"""
if not queue:
queue = self.queue
if not queue:
raise Exception("queue does not exist")
REDIS_LOGGER.info("blpop:%s", self.queue)
item = get_connection().blpop(queue, timeout=None)
return item[1] if item else None
@redis_excepts
def lrange(self, key, start, stop):
"""decrease value of given key by number
"""
REDIS_LOGGER.info("lrange %s %d %d", key, start, stop)
return get_connection().lrange(key, start, stop)
@redis_excepts
def lindex(self, key, pos):
"""fetch individual items from the list with LINDEX.
"""
REDIS_LOGGER.info("lindex %s %d", key, pos)
return get_connection().lindex(key, pos)
################################
# Commands used on SET values
################################
@redis_excepts
def sadd(self, name, member):
"""add a item into a set
"""
REDIS_LOGGER.info("sadd %s %s", name, member)
return get_connection().sadd(name, member)
@redis_excepts
def smembers(self, name):
"""list members of a set
"""
REDIS_LOGGER.info("smembers %s", name)
return get_connection().smembers(name)
@redis_excepts
def sismembers(self, name, member):
"""determine if item in set collection
"""
REDIS_LOGGER.info("sismembers %s, %s", name, member)
return get_connection().sismember(name, member)
@redis_excepts
def srem(self, name, member):
"""remove a item from set
"""
REDIS_LOGGER.info("srem %s %s", name, member)
return get_connection().srem(name, member)
#########################################
# Hashes in Redis
#########################################
@redis_excepts
def hset(self, name, key, value):
"""Store the value at the key in the hash
"""
REDIS_LOGGER.info("hset %s %s %s", name, key, value)
return get_connection().hset(name, key, value)
@redis_excepts
def hmset(self, name, mapping):
"""multiple hset
example: redispool.hmset(key, {'a':1, 'b':2})
"""
REDIS_LOGGER.info("hmset %s %s", name, str(mapping))
return get_connection().hmset(name, mapping)
@redis_excepts
def hget(self, name, key):
"""Fetche the value at the given hash key
"""
REDIS_LOGGER.info("hget %s %s", name, key)
return get_connection().hget(name, key)
@redis_excepts
def hgetall(self, name):
"""Fetche the entire hash
"""
REDIS_LOGGER.info("hgetall %s", name)
return get_connection().hgetall(name)
@redis_excepts
def hdel(self, name, key):
"""Remove a key from the hash, if it exists
"""
REDIS_LOGGER.info("hdel %s %s", name, key)
return get_connection().hdel(name, key)
@redis_excepts
def hincrby(self, name, key, increment):
"""add increment into filed of key
"""
REDIS_LOGGER.info("hincrby %s %s %d", name, key, increment)
return get_connection().hincrby(name, key, increment)
#########################################
# ZSET in Redis
#########################################
@redis_excepts
def zadd(self, key, score, member):
"""Add member with the given score to the ZSET
"""
REDIS_LOGGER.info("zadd %s %s %s", key, score, member)
return get_connection().zadd(key, score, member)
@redis_excepts
def zrem(self, key, member):
"""Remove the item from the ZSET, if it exists
"""
REDIS_LOGGER.info("zrem %s %s", key, member)
return get_connection().zrem(key, member)
@redis_excepts
def zrange(self, key, start, stop, withscores=False):
"""Fetche the items in the ZSET from their positions in sorted order
"""
REDIS_LOGGER.info("zrange %s %s %s %s", key, start, stop, withscores)
return get_connection().zrange(key, start, stop, withscores=withscores)
@redis_excepts
def zrevrange(self, key, start, stop, withscores=False):
"""reverse range function
"""
REDIS_LOGGER.info("zrevrange: %s %s %s %s", key, start, stop, withscores)
return get_connection().zrevrange(key, start, stop, withscores=withscores)
@redis_excepts
def zrangebyscore(self, key, start, stop, withscores=False):
"""Fetche items in the ZSET based on a range of scores. can you sort yourself?
"""
REDIS_LOGGER.info("zrangebyscore %s %s %s %s", key, start, stop, withscores)
return get_connection().zrangebyscore(key, start, stop, withscores=withscores)
@redis_excepts
def zrevrangebyscore(self, key, start, stop, withscores=False):
"""pass
"""
REDIS_LOGGER.info("zrevrangebyscore: %s %s %s", key, start, stop)
return get_connection().zrevrangebyscore(key, start, stop, withscores=withscores)
@redis_excepts
def zscore(self, key, member):
"""return the ordered collection.
"""
REDIS_LOGGER.info("zscore: %s %s", key, member)
return get_connection().zscore(key, member)
@redis_excepts
def zincrby(self, key, member, increment):
"""Increment the score of a member in a sorted set
"""
REDIS_LOGGER.info("zincrby: %s %s %f", key, member, increment)
return get_connection().zincrby(key, member, increment)
# @redis_excepts
# def zinterstore(self, dest_zsets, sets_num, *args, aggregation='max'):
# """find those entries that are in all of the SETs and ZSETs, combining their scores
# """
# REDIS_LOGGER.info("zinterstore: dest_zets:%s" % (dest_zsets))
# return get_connection().zinterstore(dest, keys)
@redis_excepts
def zrank(self, key, member):
"""return the position of the given member in the ZSET.
return None if not exists
"""
REDIS_LOGGER.info("zrank: %s %s", key, member)
return get_connection().zrank(key, member)
def test():
"""Just test
"""
r = Redispool()
key = 'test'
value = "value"
# test key-value ################################
def test_key_value():
print("Test starts. key:%s ........." % key)
r.delete(key)
assert(r.exists(key) is False)
r.set(key, 234)
assert(r.exists(key) is True)
r.set(key, 1)
r.incr(key)
assert(int(r.get(key)) == 2)
r.incrby(key, 3)
assert(int(r.get(key)) == 5)
r.expire(key, 3)
assert(r.exists(key) is True)
time.sleep(3)
assert(r.exists(key) is False)
r.mset(hello="h", world="w")
assert(r.mget("hello", "world")[0] in "h")
assert(r.mget("hello", "world")[1] in "w")
r.append("hello", "eworld")
assert(r.get("hello") in "heworld")
assert(r.substr("hello", 0, 3) in "hewo")
r.delete(key)
assert(r.getset(key, "value") is None)
assert(r.get(key) in "value")
print("All key-value functions pass the test......\n")
# test_key_value()
# test List ######################################
def test_list():
r.delete(key)
r.push(value, key)
assert(r.lrange(key, 0, 0)[0] == value)
assert(r.lindex(key, 0) == value)
assert(r.pop(key) == value)
r2 = Redispool(queue=key)
r2.push(value, key)
assert(r2.pop(key) == value)
print("All list functions pass the test......\n")
test_list()
# test Set ######################################
def test_set():
r.delete(key)
r.sadd(key, value)
assert(value in r.smembers(key))
assert(r.sismembers(key, value) is True)
r.srem(key, value)
assert(value not in r.smembers(key))
assert(r.sismembers(key, value) is False)
print("All set functions pass the test......\n")
test_set()
# test Hash ######################################
def test_hash():
r.delete(key)
r.hset(key, "q", "q")
r.hset(key, "w", "w")
r.hset(key, "e", 1)
assert(r.hget(key, "q") == "q")
assert(r.hget(key, "w") == "w")
assert(r.hget(key, "e") == "1")
r.hincrby(key, "e", 3)
assert(r.hget(key, "e") == "4")
print(r.hgetall(key))
r.hdel(key, "e")
print(r.hgetall(key))
r.delete(key)
assert(r.exists(key) is False)
r.hmset(key, {"a": "a", "b": "b", "c": 1})
print(r.hgetall(key))
# assert(r.exists(key) is False)
print("All hash functions pass the test......\n")
test_hash()
# test Zset ######################################
def test_zset():
r.delete(key)
r.zadd(key, value, 100)
r.zadd(key, value + "2", 200)
r.zadd(key, value + "3", 300)
r.zadd(key, value + "4", 150)
# ==============================================
assert("value4" in r.zrange(key, 0, -1))
r.zrem(key, value + "4")
assert("value4" not in r.zrange(key, 0, -1))
| |
from NER.utils import *
from NER.model import *
import re
import os
class Handler():
def __init__(self ):
self.cti = None
self.wti = None
self.tti_iob = None
self.tti_ner = None
self.tti = None
self.itt = None
self.itt_iob = None
self.itt_ner = None
self.itw = None
self.itc = None
self.data = None
self.batch_handler = None
self.data_handler = dataloader()
self.BATCH_SIZE = None
self.HRE = False
def readtsv(self, filename):
"""
data will be return as a tupel (word,tag)
it will be suitable for load_data function
data =[ [(word , tag) , ...]]
"""
data = []
with open(filename) as f:
sentence = []
for l in f:
l = l.lower().strip()
if l and not l.startswith('-docstart-'):
sentence.append(re.split('\s+' , l))
else:
if sentence:
# it means we have a sentence contains just one word
if len(sentence) == 1:
# add a dot to be finish the sentence
sentence.append(['.' , 'o'])
data.append(sentence.copy())
sentence.clear()
return data
def load_data(self, cti , wti , tti_iob, tti_ner , tti ,sentences , BATCH_SIZE , HRE = False , load_percentage = 1):
data = dataloader()
batch = []
block = []
# total sentences is loaded
tsl = 0
for si, s in enumerate(sentences):
loaded_percentage = (si+1) / len(sentences)
if loaded_percentage > load_percentage:
print('%.2f is loaded / %d'%(load_percentage , len(sentences)))
break
xy = []
for w,t in s:
w = w.lower()
t = t.lower()
wxc = [cti[c] for c in w]
# characters , word , tag
if '-' in t:
iob , ner = t.split('-')
xy.append((wxc , wti[w] , tti_iob[iob] , tti_ner[ner] , tti[t]))
else:
xy.append((wxc , wti[w] , tti_iob[t] , tti_ner[t] , tti[t]))
# * it will be used to unzip the list
xc , xw , yiob , yner , y0 = zip(*xy)
sl = len(s)
block.append((sl , xc , xw , yiob , yner , y0))
tsl = si+1
# sort based on the longest sentences (sequence)
block.sort(key=lambda x: -x[0])
for s in block:
data.append_item(xc = [list(s[1])] , xw = [list(s[2])] , yiob = s[3] , yner = s[4] , y0= s[5])
data.append_row()
data.strip()
for _batch in data.split(BATCH_SIZE , HRE):
xc, xw = data.tensor(_batch.xc, _batch.xw, _batch.lens)
_, yiob = data.tensor(None, _batch.yiob, sos = True)
_, yner = data.tensor(None, _batch.yner, sos = True)
_, y0 = data.tensor(None, _batch.y0, sos = True)
batch.append((xc, xw, yiob , yner , y0))
print('%d/%d sentenced is loaded'%(tsl,len(sentences)))
self.batch_handler = batch
self.data_handler = data
class ModelHandler(Handler):
def __init__(self , MODE , params , db_location = "datasets/JNLPBA/", sample_test = False , load_percentage = 1):
super().__init__()
self.MODE = MODE
if MODE != 'load':
params['EMBED_SIZE'] = sum(params['EMBED'].values())
params['HRE'] = (params['UNIT'] == "sent")
self.BATCH_SIZE = params["BATCH_SIZE"]
self.HRE = params["HRE"]
self.params = params
self.JNLPBA_LOCATION = db_location
if sample_test:
self.data = [
[('IL-2','B-DNA'),('gene','I-DNA'),('expression','O'),('and','O'),('NF-kappa','B-protein'),('B','I-protein'),('activation','O'),('through','O'),('CD28','B-protein'),('requires','O'),('reactive','O'),('oxygen','O'),('production','O'),('by','O'),('5-lipoxygenase','B-protein'),('.','O')],
[('Activation','O'),('of','O'),('the','O'),('CD28','B-protein'),('surface','I-protein'),('receptor','I-protein'),('provides','O'),('a','O'),('major','O'),('costimulatory','O'),('signal','O'),('for','O'),('T','O'),('cell','O'),('activation','O'),('resulting','O'),('in','O'),('enhanced','O'),('production','O'),('of','O'),('interleukin-2','B-protein'),('(','O'),('IL-2','B-protein'),('),','O'),('and','O'),('cell','O'),('proliferation','O'),('.','O')]
]
self.devel_sentences = ["Number of glucocorticoid receptors in lymphocytes and their sensitivity to hormone action ." ,
"The study demonstrated a decreased level of glucocorticoid receptors ( GR ) in peripheral blood lymphocytes from hypercholesterolemic subjects , and an elevated level in patients with acute myocardial infarction ."]
self.devel_target_iob = [
['O' ,'O' ,'B' ,'I' ,'O' ,'B' ,'O' ,'O' ,'O' ,'O' ,'O' ,'O' ,'O'],
['O','O','O','O','O','O','O','B','I','O','B','O','O','B','I','I','O','O','O','O','O','O','O','O','O','O','O','O','O','O','O']
]
self.devel_target_ner = [
['O' ,'O' ,'protein' ,'protein' ,'O' ,'cell_type' ,'O' ,'O' ,'O' ,'O' ,'O' ,'O' ,'O'],
['O','O','O','O','O','O','O','protein','protein','O','protein','O','O','cell_type','cell_type','cell_type','O','O','O','O','O','O','O','O','O','O','O','O','O','O','O']
]
self.devel_target = [
['O' ,'O' ,'b-protein' ,'i-protein' ,'O' ,'b-cell_type' ,'O' ,'O' ,'O' ,'O' ,'O' ,'O' ,'O'],
['O','O','O','O','O','O','O','b-protein','i-protein','O','b-protein','O','O','b-cell_tyoe','i-cell_type','i-cell_type','O','O','O','O','O','O','O','O','O','O','O','O','O','O','O']
]
else:
self.data = self.readtsv(self.JNLPBA_LOCATION + MODE +".tsv")
self.cti = {PAD: PAD_IDX, SOS: SOS_IDX, EOS: EOS_IDX, UNK: UNK_IDX}
self.wti = {PAD: PAD_IDX, SOS: SOS_IDX, EOS: EOS_IDX, UNK: UNK_IDX}
self.tti_iob = {PAD: PAD_IDX, SOS: SOS_IDX, EOS: EOS_IDX , O:O_IDX, B:B_IDX , I:I_IDX }
self.tti_ner = {PAD: PAD_IDX, SOS: SOS_IDX, EOS: EOS_IDX, O:O_IDX}
self.tti = {PAD: PAD_IDX, SOS: SOS_IDX, EOS: EOS_IDX}
for s in self.data:
for w , t in s:
w = w.lower()
t = t.lower()
if w not in self.wti:
self.wti[w] = len(self.wti)
if t not in self.tti:
self.tti[t] = len(self.tti)
if '-' in t:
iob, ner = t.split('-')
if ner not in self.tti_ner:
self.tti_ner[ner] = len(self.tti_ner)
if iob not in self.tti_iob:
self.tti_iob[iob] = len(self.tti_iob)
for c in w:
if c not in self.cti:
self.cti[c] = len(self.cti)
#save_tkn_to_idx(self.JNLPBA_LOCATION +self.MODE+".wti" , self.wti)
#save_tkn_to_idx(self.JNLPBA_LOCATION +self.MODE+".cti" , self.cti)
#save_tkn_to_idx(self.JNLPBA_LOCATION +self.MODE+".tti_iob" , self.tti_iob)
#save_tkn_to_idx(self.JNLPBA_LOCATION +self.MODE+".tti_ner" , self.tti_ner)
#save_tkn_to_idx(self.JNLPBA_LOCATION +self.MODE+".tti" , self.tti)
self.itt = {v:k for k,v in self.tti.items()}
self.itt_iob = {v:k for k,v in self.tti_iob.items()}
self.itt_ner = {v:k for k,v in self.tti_ner.items()}
self.itw = {v:k for k,v in self.wti.items()}
self.itc = {v:k for k,v in self.cti.items()}
self.devel_sentences = []
self.devel_target_ner = []
self.devel_target_iob = []
self.devel_target = []
self.load_data(self.cti,self.wti,self.tti_iob , self.tti_ner , self.tti, self.data , self.BATCH_SIZE,self.HRE , load_percentage= load_percentage)
print('TRAIN Data is loaded.')
EVAL_EVERY = self.params["EVAL_EVERY"]
if EVAL_EVERY and not sample_test:
assert isfile(self.JNLPBA_LOCATION + 'devel.tsv') , 'devel.tsv is not avaiable in %s'%self.JNLPBA_LOCATION
deval_data = self.readtsv(self.JNLPBA_LOCATION + 'devel.tsv')
print('DEVEL Data is loaded.')
for s in deval_data:
self.devel_sentences.append(' '.join([w.lower() for w,t in s]).strip())
self.devel_target.append([t for w ,t in s])
self.devel_target_ner.append([ re.sub('b-|i-', '' , t.strip().lower()) for w,t in s])
self.devel_target_iob.append([ re.split('-', t.strip().lower())[0] for w,t in s])
def train(self , output_path = '' , retrain = False , model_path = ''):
assert self.MODE.lower().strip() == 'train' , "To train please make sure you have your MODE = train and also have the train.tsv avaiable in the dataset/JNLPBA directory"
model = None
LEARNING_RATE = self.params["LEARNING_RATE"]
num_epoch = self.params["EPOCH"]
SAVE_EVERY = self.params["SAVE_EVERY"]
model_name = self.params["model_name"]
EVAL_EVERY = self.params["EVAL_EVERY"]
model = None
init_epoch = 1
if retrain:
print(''.join(['=']*20) + 'Continue Training' + ''.join(['=']*20))
model = self.load_model(model_path , ACTIVE_DEVICE)
init_epoch = int(re.findall('epoch\d+' , model_path.lower())[0].replace('epoch','')) + 1
print('Continue at ' + str(init_epoch) )
else:
assert model_name.strip() , "model name is empty, choose one of the available model rnn_two_crf_par , rnn_two_crf, rnn_two_crf_seq, rnn_single_crf"
if model_name.lower() == 'rnn_two_crf_par':
model = rnn_two_crf_par(len(self.cti), len(self.wti), len(self.tti_iob) , len(self.tti_ner) , self.params)
elif model_name.lower() == 'rnn_two_crf':
model = rnn_two_crf(len(self.cti), len(self.wti), max(len(self.tti_iob) , len(self.tti_ner)) , len(self.tti_iob) , len(self.tti_ner) , self.params)
elif model_name.lower() == 'rnn_two_crf_seq':
model = rnn_two_crf_seq(len(self.cti), len(self.wti), len(self.tti_iob) , len(self.tti_ner) , self.params)
elif model_name.lower() == 'rnn_two_crf_seq2':
model = rnn_two_crf_seq2(len(self.cti), len(self.wti), len(self.tti_iob) , len(self.tti_ner) , self.params)
elif model_name.lower() == 'rnn_single_crf':
model = rnn_single_crf(len(self.cti), len(self.wti) , len(self.tti) , self.params)
print(model)
optim = torch.optim.Adam(model.parameters(), lr = LEARNING_RATE)
print("Adam Optimizer is using with learning rate of : %.4f"%LEARNING_RATE)
print("training %s model..."%model_name)
for e in range(init_epoch , num_epoch+1):
loss_sum_iob , loss_sum_ner = 0 , 0
loss_sum = 0
timer = time()
for xc, xw, yiob, yner, y0 in self.batch_handler:
if model_name.lower() == 'rnn_two_crf_par' or model_name.lower() == 'rnn_two_crf':
loss = model(xc,xw,yiob,yner)
loss.backward()
loss_sum +=loss.item()
elif model_name == 'rnn_single_crf':
loss = model(xc,xw,y0)
loss.backward()
loss_sum +=loss.item()
elif model_name == 'rnn_two_crf_seq' or model_name == 'rnn_two_crf_seq2':
loss_iob , loss_ner = model(xc,xw,yiob , yner)
loss_iob.backward(retain_graph=True)
loss_ner.backward()
loss_sum_iob += loss_iob.item()
loss_sum_ner += loss_ner.item()
optim.step()
timer = time() - timer
if model_name == 'rnn_two_crf_seq' or model_name == 'rnn_two_crf_seq2':
loss_sum_iob /= len(self.batch_handler)
loss_sum_ner /= len(self.batch_handler)
print('loss_iob :%.2f loss_ner:%.2f'%(loss_iob,loss_ner))
loss_sum = (loss_sum_iob + loss_sum_ner)/2
else:
loss_sum /= len(self.batch_handler)
if e % SAVE_EVERY and e != num_epoch:
save_checkpoint("", None, e, loss_sum, timer, None)
else:
if output_path and not os.path.isdir(output_path):
os.mkdir(output_path)
save_checkpoint(os.path.join(output_path , model_name) , model, e, loss_sum, timer , {"params": self.params , "cti" : self.cti , "wti" : self.wti , "tti" : self.tti , "tti_iob": self.tti_iob , "tti_ner": self.tti_ner })
if EVAL_EVERY and (e % EVAL_EVERY == 0 or e == num_epoch):
if model_name == '':
model.evaluate(self.devel_sentences, self.cti, self.wti, self.itt,
y0 = self.devel_target,
parameters =['amacro_f1'],
model_name =model_name,
save = True ,
filename = model_name)
else:
if output_path and not os.path.isdir(output_path):
os.mkdir(output_path)
if model_name == 'rnn_single_crf':
model.evaluate(self.devel_sentences , self.cti , self.wti , self.itt,
self.devel_target, parameters=['amacro_f1'] , model_name=model_name + '_' + str(e) , save=True , filename= os.path.join(output_path , model_name) )
else:
model.evaluate(self.devel_sentences , self.cti , self.wti , self.itt_iob , self.itt_ner ,
self.devel_target_iob , self.devel_target_ner , parameters=['amacro_f1'] , model_name=model_name + '_' + str(e) , save=True , filename= os.path.join(output_path , model_name) )
print()
def retrain(self, model_path , init_epoch | |
# Basic libraries
import pandas as pd
import requests, json
import time
import numpy as np
import warnings
import random
# Visualization
import seaborn as sns
import scipy.stats as ss
import IPython
from IPython.display import HTML, display, Markdown, IFrame, FileLink
from itertools import combinations
from scipy import stats
# Data analysis
from sklearn.decomposition import PCA
from sklearn.preprocessing import quantile_transform
from sklearn import cluster
from sklearn.metrics import silhouette_score
from sklearn.manifold import TSNE
import umap
from rpy2 import robjects
from rpy2.robjects import r, pandas2ri
import scanpy as sc
import anndata
from maayanlab_bioinformatics.dge.characteristic_direction import characteristic_direction
from maayanlab_bioinformatics.dge.limma_voom import limma_voom_differential_expression
from maayanlab_bioinformatics.enrichment.crisp import enrich_crisp, fisher_overlap
from statsmodels.stats.multitest import multipletests
from scipy.stats.mstats import gmean
# Bokeh
from bokeh.io import output_notebook
from bokeh.plotting import figure, show
from bokeh.models import HoverTool, CustomJS, ColumnDataSource, Span, Select, Legend, PreText, Paragraph, LinearColorMapper, ColorBar, CategoricalColorMapper
from bokeh.layouts import layout, row, column, gridplot
from bokeh.palettes import all_palettes
import colorcet as cc
from bokeh.palettes import Category20
sc.settings.verbosity = 0
def check_files(fname):
if fname == "":
raise IOError
if fname.endswith(".txt") == False and fname.endswith(".csv") ==False and fname.endswith(".tsv")==False:
raise IOError
def check_df(df, col):
if col not in df.columns:
raise IOError
def display_statistics(data, description=""):
print(description)
print("Sample size:", data.n_obs)
print("Feature size:", data.n_vars)
def load_seurat_files(mtx_filename, gene_filename, barcodes_filename, bool_adt=False):
# bool_adt: if data contains both RNA and ADT
adata = anndata.read_mtx(mtx_filename).T
with open(barcodes_filename, "r") as f:
cells = f.readlines()
cells = [x.strip() for x in cells]
genes = pd.read_csv(
gene_filename,
header=None,
sep='\t',
)
adata.var['gene_ids'] = genes.iloc[:, 0].values
if genes.shape[1] > 1:
adata.var['gene_symbols'] = genes.iloc[:, 1].values
else:
adata.var['gene_symbols'] = genes.iloc[:, 0].values
adata.var_names = adata.var['gene_symbols']
adata.var_names_make_unique(join="-")
adata.obs['barcode'] = cells
adata.obs_names = cells
adata.obs_names_make_unique(join="-")
if bool_adt == True:
genes.columns = ["gene_ids", "gene_symbols", "gene_annot"]
adt_list = genes.loc[genes["gene_annot"]=="Antibody Capture", "gene_symbols"].tolist()
adata_adt = adata[:, adata.var["gene_symbols"].isin(adt_list)]
adata = adata[:, ~adata.var["gene_symbols"].isin(adt_list)]
return adata, adata_adt
else:
return adata
def load_metadata(adata, meta_data_filename, meta_class_column_name):
if meta_data_filename is not None and meta_data_filename != "":
if meta_data_filename.endswith(".csv"):
meta_df = pd.read_csv(meta_data_filename, index_col=0)
else:
meta_df = pd.read_csv(meta_data_filename, sep="\t", index_col=0)
if meta_class_column_name == "":
raise Exception ("Run time error: Please provide a proper column name for sample classes in metadata")
try:
check_df(meta_df, meta_class_column_name)
except:
raise Exception (f"Error! Column '{meta_class_column_name}' is not in metadata")
adata.obs[meta_class_column_name] = meta_df.loc[:, meta_class_column_name]
adata.var_names_make_unique()
return adata
def load_data(dataset_name, rnaseq_data_filename, adt_data_filename, mtx_data_filename_rna, gene_data_filename_rna, barcode_data_filename_rna, mtx_data_filename_adt, gene_data_filename_adt, barcode_data_filename_adt, meta_data_filename=None, meta_class_column_name=None, table_counter=1):
adata = None
adata_adt = None
# plain text
if rnaseq_data_filename != "":
display(Markdown(f"### Loading...{dataset_name}"))
check_files(rnaseq_data_filename)
check_files(adt_data_filename)
# load rna
try:
if rnaseq_data_filename.endswith(".csv"):
expr_df = pd.read_csv(rnaseq_data_filename, index_col=0).sort_index()
else:
expr_df = pd.read_csv(rnaseq_data_filename, index_col=0, sep="\t").sort_index()
# convert df into anndata
# adata matrix: sample x gene
adata = anndata.AnnData(expr_df.T)
adata.X = adata.X.astype('float64')
except:
print("Error! Input files are in a wrong format. \
Please check if the index of the expression data are genes and the columns are sample IDs. \
Sample IDs in the expression data and the metadata should be matched")
del expr_df
# load adt
try:
if adt_data_filename.endswith(".csv"):
expr_df = pd.read_csv(adt_data_filename, index_col=0).sort_index()
else:
expr_df = pd.read_csv(adt_data_filename, index_col=0, sep="\t").sort_index()
# convert df into anndata
# adata matrix: sample x gene
adata_adt = anndata.AnnData(expr_df.T)
adata_adt.X = adata_adt.X.astype('float64')
except:
print("Error! Input files are in a wrong format. \
Please check if the index of the expression data are genes and the columns are sample IDs. \
Sample IDs in the expression data and the metadata should be matched")
del expr_df
# mtx files
elif mtx_data_filename_rna != "":
display(Markdown(f"### Loading...{dataset_name}"))
if mtx_data_filename_adt == "":
adata, adata_adt = load_seurat_files(mtx_data_filename_rna, gene_data_filename_rna, barcode_data_filename_rna, bool_adt=True)
else:
adata = load_seurat_files(mtx_data_filename_rna, gene_data_filename_rna, barcode_data_filename_rna)
adata_adt = load_seurat_files(mtx_data_filename_adt, gene_data_filename_adt, barcode_data_filename_adt)
if adata is not None:
# load metadata
adata = load_metadata(adata, meta_data_filename, meta_class_column_name)
adata_adt = load_metadata(adata_adt, meta_data_filename, meta_class_column_name)
# add batch info
if meta_class_column_name == "":
meta_class_column_name = "batch"
adata.obs["batch"] = dataset_name
adata_adt.obs["batch"] = dataset_name
adata.obs.index = adata.obs.index + "-" + dataset_name
adata_adt.obs.index = adata_adt.obs.index + "-" + dataset_name
# common samples
common_samples = list(set(adata_adt.obs.index.tolist()).intersection(adata.obs.index.tolist()))
if len(common_samples) == 0:
raise Exception("There are no matched samples.")
adata = adata[common_samples, :]
adata_adt = adata_adt[common_samples, :]
table_counter = display_object(table_counter, f"Raw RNA data of {dataset_name}. The table displays the first 5 rows of the quantified RNA-seq expression dataset. Rows represent genes, columns represent samples, and values show the number of mapped reads.", adata.to_df().iloc[:10,:5].T.head(), istable=True)
table_counter = display_object(table_counter, f"Raw protein data of {dataset_name}. The table displays the first 5 rows of the protein dataset. Rows represent genes, columns represent samples, and values show the number of mapped reads.", adata_adt.to_df().iloc[:10,:5].T.head(), istable=True)
table_counter = display_object(table_counter, f"Metadata in {dataset_name}. The table displays the metadata associated with the samples in the RNA-seq dataset. Rows represent RNA-seq samples, columns represent metadata categories.", adata.obs.head(), istable=True)
table_counter = display_object(table_counter, f"Sample size for each class in {dataset_name}. The table displays the number of samples in each class.", adata.obs.reset_index().groupby(meta_class_column_name).count(), istable=True)
display_statistics(adata, f"### Statistics of RNA data in {dataset_name} ###")
display_statistics(adata_adt, f"### Statistics of protein data in {dataset_name} ###")
return adata, adata_adt, table_counter, meta_class_column_name
def create_download_link(df, title = "Download CSV file: {}", filename = "data.csv"):
if filename.endswith(".csv"):
df.to_csv(filename)
elif filename.endswith(".h5ad"): #anndata
df.write(filename)
html = "<a href=\"./{}\" target='_blank'>{}</a>".format(filename, title.format(filename))
return HTML(html)
def display_link(url, title=None):
if title is None:
title = url
raw_html = '<a href="%s" target="_blank">%s</a>' % (url, title)
return display(HTML(raw_html))
def display_object(counter, caption, df=None, istable=True):
if df is not None:
display(df)
if istable == True:
display(Markdown("*Table {}. {}*".format(counter, caption)))
else:
display(Markdown("*Figure {}. {}*".format(counter, caption)))
counter += 1
return counter
def autoselect_color_by(sample_metadata):
'''Automatically select a column in the sample_metadata df for coloring.
'''
color_by = None
color_type = 'categorical'
meta_col_nuniques = sample_metadata.nunique()
# pick a column with the cardinality between 2 and 10
meta_col_nuniques = meta_col_nuniques.loc[meta_col_nuniques.between(1, 30)]
if len(meta_col_nuniques) > 0:
color_by = meta_col_nuniques.index[0]
else: # pick a numeric column
is_number = np.vectorize(lambda x: np.issubdtype(x, np.number))
meta_col_dtypes = sample_metadata.dtypes
try:
meta_col_is_number = is_number(meta_col_dtypes)
if meta_col_is_number.sum() > 0:
color_by = meta_col_dtypes.loc[meta_col_is_number].index[0]
color_type = 'continuous'
except:
pass
return color_by, color_type
import hashlib
def str_to_int(string, mod):
byte_string = bytearray(string, "utf8")
return int(hashlib.sha256(byte_string).hexdigest(), base=16)%mod
def clr(x):
return np.log(x) - np.log(gmean(x))
def normalize(adata, normalization_method, log_normalization):
tmp_adata = adata.copy()
if normalization_method == "Seurat":
sc.pp.filter_cells(tmp_adata, min_genes=200)
sc.pp.filter_genes(tmp_adata, min_cells=3)
sc.pp.normalize_total(tmp_adata, target_sum=1e4)
if log_normalization:
sc.pp.log1p(tmp_adata)
sc.pp.scale(tmp_adata, max_value=10)
elif normalization_method == "CLR":
tmp_adata.X = clr((tmp_adata.to_df()+1).values)
return tmp_adata
def run_clustergrammer(dataset, meta_class_column_name, magic_normalization=False, nr_genes=800, metadata_cols=None, filter_samples=True,gene_list=None):
# Subset the expression DataFrame using top 800 genes with largest variance
if magic_normalization == True:
data = dataset.uns["magic"]
else:
data = dataset.to_df().T
meta_df = dataset.obs
variances = np.var(data, axis=1)
srt_idx = variances.argsort()[::-1]
if gene_list == None or len(gene_list) == 0:
expr_df_sub = data.iloc[srt_idx].iloc[:nr_genes]
else:
gene_list = gene_list.split("\n")
common_gene_list = list(set(gene_list).intersection(set(data.index)))
expr_df_sub = data.loc[common_gene_list, :]
assert len(expr_df_sub.index) > 0
# prettify sample names
sample_names = ['::'.join([y, x]) for x,y in
zip(meta_df[meta_class_column_name], expr_df_sub.columns)]
expr_df_sub.columns = sample_names
expr_df_sub.index = ["Gene: "+str(x) for x in expr_df_sub.index]
sample_name = ["Sample: "+x for x in sample_names]
expr_df_sub.columns = sample_name
treatment_type = ["Class: "+ x.split("::")[1] for x in sample_names]
new_series = pd.DataFrame(treatment_type).T
new_series.columns = expr_df_sub.columns
expr_df_sub = pd.concat([new_series, expr_df_sub], axis=0)
index_list = list(expr_df_sub.index)
index_list = ["" if "Gene" not in str(x) else x for x in index_list]
expr_df_sub.index = index_list
#subset of expr_df_sub
if len(expr_df_sub.columns) > 50:
print("Input data is too large. Random sampling (n=50) is performed.")
expr_df_sub = expr_df_sub.sample(50, axis=1)
expr_df_sub_file = "expr_df_sub_file.txt"
expr_df_sub.to_csv("expr_df_sub_file.txt", sep='\t')
# POST the expression matrix to Clustergrammer and get the URL
clustergrammer_url = 'https://maayanlab.cloud/clustergrammer/matrix_upload/'
r = requests.post(clustergrammer_url, files={'file': open(expr_df_sub_file, 'rb')}).text
return r
#############################################
########## 2. Plot
#############################################
def plot_clustergrammar(clustergrammer_url):
clustergrammer_url = clustergrammer_url.replace("http:", "https:")
display_link(clustergrammer_url, clustergrammer_url)
# Embed
display(IPython.display.IFrame(clustergrammer_url, width="1000", height="1000"))
def plot_scatter(umap_df, values_dict, option_list, sample_names, caption_text, category_list_dict=None, location='right', category=True, dropdown=False, figure_counter=0, x_axis_label="UMAP_1", y_axis_label="UMAP_2"):
# init plot
source = ColumnDataSource(data=dict(x=umap_df["x"], y=umap_df["y"], values=values_dict[option_list[0]], names=sample_names))
if location == 'right':
plot = figure(plot_width=800, plot_height=600)
else:
plot = figure(plot_width=600, plot_height=600+20*len(category_list_dict[option_list[0]]))
if category == True:
unique_category_dict = dict()
for option in option_list:
unique_category_dict[option] = sorted(list(set(values_dict[option])))
# map category to color
# color is mapped by its category name
# if a color is used by other categories, use another color
factors_dict = dict()
colors_dict = dict()
for key in values_dict.keys():
unused_color = list(Category20[20])
factors_dict[key] = category_list_dict[key]
colors_dict[key] = list()
for category_name in factors_dict[key]:
color_for_category = Category20[20][str_to_int(category_name, 20)]
if color_for_category not in unused_color:
if len(unused_color) > 0:
color_for_category | |
sel_model.ClearAndSelect)
class CallFrontListBox(ControlledCallFront):
def action(self, value):
if value is not None:
if isinstance(value, int):
for i in range(self.control.count()):
self.control.item(i).setSelected(i == value)
else:
if not isinstance(value, ControlledList):
setattr(
self.control.ogMaster,
self.control.ogValue,
ControlledList(value, self.control),
)
for i in range(self.control.count()):
shouldBe = i in value
if shouldBe != self.control.item(i).isSelected():
self.control.item(i).setSelected(shouldBe)
class CallFrontListBoxLabels(ControlledCallFront):
unknownType = None
def action(self, values):
self.control.clear()
if values:
for value in values:
if isinstance(value, tuple):
text, icon = value
if isinstance(icon, int):
item = QtWidgets.QListWidgetItem(attributeIconDict[icon], text)
else:
item = QtWidgets.QListWidgetItem(icon, text)
elif isinstance(value, Variable):
item = QtWidgets.QListWidgetItem(*attributeItem(value))
else:
item = QtWidgets.QListWidgetItem(value)
item.setData(Qt.UserRole, value)
self.control.addItem(item)
class CallFrontLabel:
def __init__(self, control, label, master):
self.control = control
self.label = label
self.master = master
def __call__(self, *_):
self.control.setText(self.label % self.master.__dict__)
##############################################################################
## Disabler is a call-back class for check box that can disable/enable other
## widgets according to state (checked/unchecked, enabled/disable) of the
## given check box
##
## Tricky: if self.propagateState is True (default), then if check box is
## disabled the related widgets will be disabled (even if the checkbox is
## checked). If self.propagateState is False, the related widgets will be
## disabled/enabled if check box is checked/clear, disregarding whether the
## check box itself is enabled or not. (If you don't understand, see the
## code :-)
DISABLER = 1
HIDER = 2
# noinspection PyShadowingBuiltins
class Disabler:
def __init__(self, widget, master, valueName, propagateState=True, type=DISABLER):
self.widget = widget
self.master = master
self.valueName = valueName
self.propagateState = propagateState
self.type = type
def __call__(self, *value):
currState = self.widget.isEnabled()
if currState or not self.propagateState:
if len(value):
disabled = not value[0]
else:
disabled = not getdeepattr(self.master, self.valueName)
else:
disabled = True
for w in self.widget.disables:
if isinstance(w, tuple):
if isinstance(w[0], int):
i = 1
if w[0] == -1:
disabled = not disabled
else:
i = 0
if self.type == DISABLER:
w[i].setDisabled(disabled)
elif self.type == HIDER:
if disabled:
w[i].hide()
else:
w[i].show()
if hasattr(w[i], "makeConsistent"):
w[i].makeConsistent()
else:
if self.type == DISABLER:
w.setDisabled(disabled)
elif self.type == HIDER:
if disabled:
w.hide()
else:
w.show()
##############################################################################
# some table related widgets
# noinspection PyShadowingBuiltins
class tableItem(QTableWidgetItem):
def __init__(
self,
table,
x,
y,
text,
editType=None,
backColor=None,
icon=None,
type=QTableWidgetItem.Type,
):
super().__init__(type)
if icon:
self.setIcon(QtGui.QIcon(icon))
if editType is not None:
self.setFlags(editType)
else:
self.setFlags(
Qt.ItemIsEnabled | Qt.ItemIsUserCheckable | Qt.ItemIsSelectable
)
if backColor is not None:
self.setBackground(QtGui.QBrush(backColor))
# we add it this way so that text can also be int and sorting will be
# done properly (as integers and not as text)
self.setData(Qt.DisplayRole, text)
table.setItem(x, y, self)
TableValueRole = next(OrangeUserRole) # Role to retrieve orange.Value
TableClassValueRole = next(OrangeUserRole) # Retrieve class value for the row
TableDistribution = next(OrangeUserRole) # Retrieve distribution of the column
TableVariable = next(OrangeUserRole) # Role to retrieve the column's variable
BarRatioRole = next(OrangeUserRole) # Ratio for drawing distribution bars
BarBrushRole = next(OrangeUserRole) # Brush for distribution bar
SortOrderRole = next(OrangeUserRole) # Used for sorting
class TableBarItem(QItemDelegate):
BarRole = next(OrangeUserRole)
BarColorRole = next(OrangeUserRole)
def __init__(
self, parent=None, color=QtGui.QColor(255, 170, 127), color_schema=None
):
"""
:param QObject parent: Parent object.
:param QColor color: Default color of the distribution bar.
:param color_schema:
If not None it must be an instance of
:class:`OWColorPalette.ColorPaletteGenerator` (note: this
parameter, if set, overrides the ``color``)
:type color_schema: :class:`OWColorPalette.ColorPaletteGenerator`
"""
super().__init__(parent)
self.color = color
self.color_schema = color_schema
def paint(self, painter, option, index):
painter.save()
self.drawBackground(painter, option, index)
ratio = index.data(TableBarItem.BarRole)
if isinstance(ratio, float):
if math.isnan(ratio):
ratio = None
color = None
if ratio is not None:
if self.color_schema is not None:
class_ = index.data(TableClassValueRole)
if (
isinstance(class_, Orange.data.Value)
and class_.variable.is_discrete
and not math.isnan(class_)
):
color = self.color_schema[int(class_)]
else:
color = index.data(self.BarColorRole)
if color is None:
color = self.color
rect = option.rect
if ratio is not None:
pw = 5
hmargin = 3 + pw / 2 # + half pen width for the round line cap
vmargin = 1
textoffset = pw + vmargin * 2
baseline = rect.bottom() - textoffset / 2
width = (rect.width() - 2 * hmargin) * ratio
painter.save()
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.setPen(
QtGui.QPen(QtGui.QBrush(color), pw, Qt.SolidLine, Qt.RoundCap)
)
line = QtCore.QLineF(
rect.left() + hmargin, baseline, rect.left() + hmargin + width, baseline
)
painter.drawLine(line)
painter.restore()
text_rect = rect.adjusted(0, 0, 0, -textoffset)
else:
text_rect = rect
text = str(index.data(Qt.DisplayRole))
self.drawDisplay(painter, option, text_rect, text)
painter.restore()
class BarItemDelegate(QtWidgets.QStyledItemDelegate):
def __init__(
self, parent, brush=QtGui.QBrush(QtGui.QColor(255, 170, 127)), scale=(0.0, 1.0)
):
super().__init__(parent)
self.brush = brush
self.scale = scale
def paint(self, painter, option, index):
if option.widget is not None:
style = option.widget.style()
else:
style = QApplication.style()
style.drawPrimitive(QStyle.PE_PanelItemViewRow, option, painter, option.widget)
style.drawPrimitive(QStyle.PE_PanelItemViewItem, option, painter, option.widget)
rect = option.rect
val = index.data(Qt.DisplayRole)
if isinstance(val, float):
minv, maxv = self.scale
val = (val - minv) / (maxv - minv)
painter.save()
if option.state & QStyle.State_Selected:
painter.setOpacity(0.75)
painter.setBrush(self.brush)
painter.drawRect(rect.adjusted(1, 1, -rect.width() * (1.0 - val) - 2, -2))
painter.restore()
class IndicatorItemDelegate(QtWidgets.QStyledItemDelegate):
IndicatorRole = next(OrangeUserRole)
def __init__(self, parent, role=IndicatorRole, indicatorSize=2):
super().__init__(parent)
self.role = role
self.indicatorSize = indicatorSize
def paint(self, painter, option, index):
super().paint(painter, option, index)
rect = option.rect
indicator = index.data(self.role)
if indicator:
painter.save()
painter.setRenderHints(QtGui.QPainter.Antialiasing)
painter.setBrush(QtGui.QBrush(Qt.black))
painter.drawEllipse(rect.center(), self.indicatorSize, self.indicatorSize)
painter.restore()
class LinkStyledItemDelegate(QStyledItemDelegate):
LinkRole = next(OrangeUserRole)
def __init__(self, parent):
super().__init__(parent)
self.mousePressState = QtCore.QModelIndex(), QtCore.QPoint()
parent.entered.connect(self.onEntered)
def sizeHint(self, option, index):
size = super().sizeHint(option, index)
return QtCore.QSize(size.width(), max(size.height(), 20))
def linkRect(self, option, index):
if option.widget is not None:
style = option.widget.style()
else:
style = QApplication.style()
text = self.displayText(index.data(Qt.DisplayRole), QtCore.QLocale.system())
self.initStyleOption(option, index)
textRect = style.subElementRect(
QStyle.SE_ItemViewItemText, option, option.widget
)
if not textRect.isValid():
textRect = option.rect
margin = (
style.pixelMetric(QStyle.PM_FocusFrameHMargin, option, option.widget) + 1
)
textRect = textRect.adjusted(margin, 0, -margin, 0)
font = index.data(Qt.FontRole)
if not isinstance(font, QtGui.QFont):
font = option.font
metrics = QtGui.QFontMetrics(font)
elideText = metrics.elidedText(text, option.textElideMode, textRect.width())
return metrics.boundingRect(textRect, option.displayAlignment, elideText)
def editorEvent(self, event, model, option, index):
if event.type() == QtCore.QEvent.MouseButtonPress and self.linkRect(
option, index
).contains(event.pos()):
self.mousePressState = (
QtCore.QPersistentModelIndex(index),
QtCore.QPoint(event.pos()),
)
elif event.type() == QtCore.QEvent.MouseButtonRelease:
link = index.data(LinkRole)
if not isinstance(link, str):
link = None
pressedIndex, pressPos = self.mousePressState
if (
pressedIndex == index
and (pressPos - event.pos()).manhattanLength() < 5
and link is not None
):
import webbrowser
webbrowser.open(link)
self.mousePressState = QtCore.QModelIndex(), event.pos()
elif event.type() == QtCore.QEvent.MouseMove:
link = index.data(LinkRole)
if not isinstance(link, str):
link = None
if link is not None and self.linkRect(option, index).contains(event.pos()):
self.parent().viewport().setCursor(Qt.PointingHandCursor)
else:
self.parent().viewport().setCursor(Qt.ArrowCursor)
return super().editorEvent(event, model, option, index)
def onEntered(self, index):
link = index.data(LinkRole)
if not isinstance(link, str):
link = None
if link is None:
self.parent().viewport().setCursor(Qt.ArrowCursor)
def paint(self, painter, option, index):
link = index.data(LinkRole)
if not isinstance(link, str):
link = None
if link is not None:
if option.widget is not None:
style = option.widget.style()
else:
style = QApplication.style()
style.drawPrimitive(
QStyle.PE_PanelItemViewRow, option, painter, option.widget
)
style.drawPrimitive(
QStyle.PE_PanelItemViewItem, option, painter, option.widget
)
text = self.displayText(index.data(Qt.DisplayRole), QtCore.QLocale.system())
textRect = style.subElementRect(
QStyle.SE_ItemViewItemText, option, option.widget
)
if not textRect.isValid():
textRect = option.rect
margin = (
style.pixelMetric(QStyle.PM_FocusFrameHMargin, option, option.widget)
+ 1
)
textRect = textRect.adjusted(margin, 0, -margin, 0)
elideText = QtGui.QFontMetrics(option.font).elidedText(
text, option.textElideMode, textRect.width()
)
painter.save()
font = index.data(Qt.FontRole)
if not isinstance(font, QtGui.QFont):
font = option.font
painter.setFont(font)
if option.state & QStyle.State_Selected:
color = option.palette.highlightedText().color()
else:
color = option.palette.link().color()
painter.setPen(QtGui.QPen(color))
painter.drawText(textRect, option.displayAlignment, elideText)
painter.restore()
else:
super().paint(painter, option, index)
LinkRole = LinkStyledItemDelegate.LinkRole
class ColoredBarItemDelegate(QtWidgets.QStyledItemDelegate):
""" Item delegate that can also draws a distribution bar
"""
def __init__(self, parent=None, decimals=3, color=Qt.red):
super().__init__(parent)
self.decimals = decimals
self.float_fmt = "%%.%if" % decimals
self.color = QtGui.QColor(color)
def displayText(self, value, locale=QtCore.QLocale()):
if value is None or isinstance(value, float) and math.isnan(value):
return "NA"
if isinstance(value, float):
return self.float_fmt % value
return str(value)
def sizeHint(self, option, index):
font = self.get_font(option, index)
metrics = QtGui.QFontMetrics(font)
height = metrics.lineSpacing() + 8 # 4 pixel margin
width = (
metrics.width(
self.displayText(index.data(Qt.DisplayRole), QtCore.QLocale())
)
+ 8
)
return QtCore.QSize(width, height)
def paint(self, painter, option, index):
self.initStyleOption(option, index)
text = self.displayText(index.data(Qt.DisplayRole))
ratio, have_ratio = self.get_bar_ratio(option, index)
rect = option.rect
if have_ratio:
# The text is raised 3 pixels above the bar.
# TODO: Style dependent margins?
text_rect = rect.adjusted(4, 1, -4, -4)
else:
text_rect = rect.adjusted(4, 4, -4, -4)
painter.save()
font = self.get_font(option, index)
painter.setFont(font)
if option.widget is not None:
style = option.widget.style()
else:
style = QApplication.style()
style.drawPrimitive(QStyle.PE_PanelItemViewRow, option, painter, option.widget)
style.drawPrimitive(QStyle.PE_PanelItemViewItem, option, painter, option.widget)
# TODO: Check ForegroundRole.
if option.state & QStyle.State_Selected:
color = option.palette.highlightedText().color()
else:
color = option.palette.text().color()
painter.setPen(QtGui.QPen(color))
align = self.get_text_align(option, index)
metrics | |
this function is taken to be a conservative
representation of the damping values presented in Figure 9(a) of
the latter's paper _"Lateral excitation of bridges by balancing
pedestrians"_
If `conservative=True` is used then the negative 300Ns/m value proposed
by Dallard et al will be used. This value is taken to be independent
of frequency.
"""
if conservative:
f_vals = [0.0,2.0]
cp_vals = [-300,-300]
else:
f_vals = [0.0,0.5,1.0,1.5,2.0]
cp_vals = [0,-300,-300,0,0]
cp_func = scipy.interpolate.interp1d(f_vals,cp_vals,kind='linear',
bounds_error=False,
fill_value=0.0)
return cp_func
def init_mp_func(self):
"""
Initialise default function to use to define frequency-dependence of
effective added mass per pedestrian (kg per pedestrian)
The default curve defined within this function is taken to be a
simplified representation of the added mass values presented in
Figure 9(b) of <NAME>'s 2008 Proc. of the Royal Society paper
_"Lateral excitation of bridges by balancing pedestrians"_
"""
f_vals = [0.0,0.5,0.8,1.25,2.0]
deltaM_vals = [+70,-45,-45,0,0]
deltaM_func = scipy.interpolate.interp1d(f_vals,deltaM_vals,
kind='linear',
bounds_error=False,
fill_value=0.0)
return deltaM_func
def calc_pedestrian_effect(self,f=1.0,num=1000):
"""
Calculates modal damping matrix due to pedestrians, per the method
presented in McRobie paper
"""
# Get system matrix to represent distributed action of pedestrians
# along deck modeshapes
ped_effect_mtrx = self.ped_effect_mtrx
if ped_effect_mtrx is None:
modalsys = self.modalsys
modeshape_func = modalsys.modeshapeFunc
L = modalsys.Ltrack
# Evaluate modeshape ordinates at unif
dL = L/num
x = numpy.linspace(0,L,num,endpoint=False) + dL/2
phi = modeshape_func(x)
# Evaluate matrix product of modeshape ordinate matrices
phi_product = phi.T @ phi
# Return mode-generalised damping matrix
ped_effect_mtrx = dL / L * phi_product
self.ped_effect_mtrx = ped_effect_mtrx
# Evaluate cp and mp given mode natural frequency provided
f = numpy.abs(f) # positive frequencies to be used
cp = self.cp_func(f)
mp = self.mp_func(f)
C_pa = cp * ped_effect_mtrx
M_pa = mp * ped_effect_mtrx
# Return results as dict
rslts = {}
rslts['C_pa']=C_pa
rslts['M_pa']=M_pa
rslts['cp']=cp
rslts['mp']=mp
return rslts
def run(self,Np_vals,verbose=True,
calc_Np_crit=True,append_rslts=False,
**kwargs):
"""
Run analysis to explore eigenvalues of system state matrix for various
assumed pedestrian crowd densities
***
Required:
* `Np_vals`, _array-like_: each value defines total number of
pedestrians on bridge; a range of values will typically be provided,
to allow exploration of how system eigenproperties (in particular
effective damping) varies with pedestrian numbers
"""
if verbose:
print("Running lat sync eigenvalues analysis...")
# Run analysis
self._run_analysis(Np_vals,append_rslts,**kwargs)
# Calculate critical number of pedestrians for onset on instability
if calc_Np_crit:
Np_crit = self.calc_Np_crit(verbose=verbose)
else:
Np_crit = None
if verbose:
print("Analysis complete!")
return Np_crit
def _run_analysis(self,Np_vals,
append_rslts=True,
**kwargs):
"""
Run analysis for various pedestrian numbers, as provided to function
To cater for frequency-dependent nature of pedestrian damping/mass
effect an iterative procedure is adopted, similar to the p-k method
devised by Theodorsen for solving aeroelastic problems
"""
modalsys = self.modalsys
store_mtrxs = self.store_mtrxs
# Take copy of system damping matrix with no pedestrians
C0 = deepcopy(modalsys._C_mtrx)
M0 = deepcopy(modalsys._M_mtrx)
# Define function to iterate with
def calc_modal_properties(f,mode_index,Np,return_rslts=False):
# Calculate change in model damping matrix
# due to smeared effect of N=1 pedestrian
rslts = self.calc_pedestrian_effect(f=f,**kwargs)
C_pa = rslts['C_pa']
M_pa = rslts['M_pa']
# Adjust bridge damping and mass matrices
modalsys._C_mtrx = C0 + Np * C_pa
modalsys._M_mtrx = M0 + Np * M_pa
# Carry out eigevalue analysis using updated system matrices
eig_props = modalsys.CalcEigenproperties()
f_new = eig_props['f_d'][mode_index]
f_error = f_new - f
#print("Calculating: Np=%d, mode=%d, f=%.3f, f_error=%.3f"
# % (Np,mode_index,f,f_error))
if return_rslts:
d = {}
d['eig_props']=eig_props
d = {**d, **rslts}
d['C_mtrx'] = modalsys._C_mtrx
d['M_mtrx'] = modalsys._M_mtrx
return f_error, d
else:
return f_error
# Carry out eigevalue analysis of system with no pedestrians
eig_props = modalsys.CalcEigenproperties()
fd_vals_last = eig_props['f_d']
# Loop over all pedestrian numbers provided to function
cp_vals = []
mp_vals = []
s_vals = []
eta_vals = []
fd_vals = []
X_vals = []
if store_mtrxs:
C_mtrx_list = []
M_mtrx_list = []
for _Np in Np_vals:
cp_vals_inner = []
mp_vals_inner = []
s_vals_inner = []
eta_vals_inner = []
fd_vals_inner = []
X_vals_inner = []
if store_mtrxs:
C_mtrx_inner_list = []
M_mtrx_inner_list = []
for _mode_index, _fd in enumerate(fd_vals_last):
# Solve for consistent frequencies, allowing for
# frequency-dependence of pedestrian-related mass and damping
fd_sol = scipy.optimize.newton(func=calc_modal_properties,
x0=_fd,
args=(_mode_index,_Np))
# Rerun for converged frequency of given mode
ferr, rslts = calc_modal_properties(fd_sol,
_mode_index,_Np,
return_rslts=True)
eig_props = rslts['eig_props']
cp = rslts['cp']
mp = rslts['mp']
C_mtrx = rslts['C_mtrx']
M_mtrx = rslts['M_mtrx']
# Unpack results for this mode and append to inner lists
cp_vals_inner.append(cp)
mp_vals_inner.append(mp)
s_vals_inner.append(eig_props['s'][_mode_index])
eta_vals_inner.append(eig_props['eta'][_mode_index])
fd_vals_inner.append(eig_props['f_d'][_mode_index])
X_vals_inner.append(numpy.ravel(eig_props['X'][:,_mode_index]))
if store_mtrxs:
C_mtrx_inner_list.append(C_mtrx)
M_mtrx_inner_list.append(M_mtrx)
# Update last frequencies
fd_vals_last = fd_vals_inner
# Append inner lists to outer lists
cp_vals.append(cp_vals_inner)
mp_vals.append(mp_vals_inner)
s_vals.append(s_vals_inner)
eta_vals.append(eta_vals_inner)
fd_vals.append(fd_vals_inner)
X_vals.append(X_vals_inner)
if store_mtrxs:
C_mtrx_list.append(C_mtrx_inner_list)
M_mtrx_list.append(M_mtrx_inner_list)
# Convert nested lists to numpy ndarray type
cp_vals = numpy.array(cp_vals)
mp_vals = numpy.array(mp_vals)
s_vals = numpy.array(s_vals)
eta_vals = numpy.array(eta_vals)
fd_vals = numpy.array(fd_vals)
X_vals = numpy.array(X_vals)
# Restore original no-pedestrians system matrices
modalsys._C_mtrx = C0
modalsys._M_mtrx = M0
# Check to see if previous results exist
if not hasattr(self,'eigenvalues'):
append_rslts = False # no previous results avaliable
# Record key results as attributes, or append to previous results
if not append_rslts:
self.eigenvalues = s_vals
self.damping_ratios = eta_vals
self.damped_freqs = fd_vals
self.eigenvectors = X_vals
self.N_pedestrians = Np_vals
self.cp_vals = cp_vals
self.mp_vals = mp_vals
if store_mtrxs:
self.C_mtrx_list = C_mtrx_list
self.M_mtrx_list = M_mtrx_list
if append_rslts:
self.eigenvalues = numpy.vstack((self.eigenvalues,s_vals))
self.damping_ratios = numpy.vstack((self.damping_ratios,eta_vals))
self.damped_freqs = numpy.vstack((self.damped_freqs,fd_vals))
self.eigenvectors = numpy.vstack((self.eigenvectors,X_vals))
self.N_pedestrians = numpy.hstack((self.N_pedestrians,Np_vals))
self.cp_vals = numpy.vstack((self.cp_vals,cp_vals))
self.mp_vals = numpy.vstack((self.mp_vals,mp_vals))
if store_mtrxs:
self.C_mtrx_list.append(C_mtrx_list)
self.M_mtrx_list.append(M_mtrx_list)
def plot_results(self):
"""
Plots results from the above analysis
N.b: Figures intentionally emulate the format of figures in
McRobie's paper, as these have been used to validate the above routine
"""
fig = plt.figure()
fig.set_size_inches((14,9))
gs = gridspec.GridSpec(3, 2, height_ratios=[1.5, 1, 1])
gs.update(hspace=0.7)
# Prepare subplots
ax = fig.add_subplot(gs[0, 0])
self.plot_damping_vs_freq(ax=ax)
ax = fig.add_subplot(gs[0, 1])
self.plot_poles(ax=ax)
ax = fig.add_subplot(gs[1:, 0])
self.plot_damping_vs_pedestrians(ax=ax)
ax1 = fig.add_subplot(gs[1, 1])
self.plot_cp_func(ax=ax1)
ax = fig.add_subplot(gs[2, 1],sharex = ax1)
self.plot_mp_func(ax=ax)
return fig
def plot_damping_vs_freq(self,ax=None):
"""
Plot damping ratio against damped natural frequency
(per Figure 5 in McRobie's paper)
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
eta_vals = self.damping_ratios
fd_vals = self.damped_freqs
ax.plot(eta_vals,fd_vals,'k.',markersize=0.5)
ax.plot(eta_vals[::10],fd_vals[::10],'ko',markersize=1.5)
ax.plot(eta_vals[0],fd_vals[0],'bo',markersize=3.0)
ax.axvline(x=0.0,color='r',alpha=0.3) # denotes stability limit
ax.set_ylim([0.0,ax.get_ylim()[1]])
ax.set_xlabel("Damping ratio")
ax.set_ylabel("Damped natural frequency (Hz)")
ax.set_title("Frequency vs Effective Damping\n")
if self.eta_crit is not None:
ax.plot(self.eta_crit,self.fd_crit,'r.')
return fig
def plot_poles(self,ax=None):
"""
Plot eigenvalues (poles) on complex plane
(per Figure 4 in paper)
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
s_vals = self.eigenvalues
ax.plot(real(s_vals),imag(s_vals),'k.',markersize=0.5)
ax.plot(real(s_vals[::10]),imag(s_vals[::10]),'ko',markersize=1.5)
ax.plot(real(s_vals[0]),imag(s_vals[0]),'bo',markersize=3.0)
ax.axvline(x=0.0,color='r',alpha=0.3) # stability limit
ax.axhline(y=0.0,color='k',linewidth=0.5) # illustrates Im(z)=0 axis
ax.set_xlabel("Real(s)")
ax.set_ylabel("Imag(s)")
ax.set_title("Eigenvalues of system state matrix")
s_crit = self.s_crit
if s_crit is not None:
ax.plot(real(s_crit),imag(s_crit),'r.')
return fig
def plot_damping_vs_pedestrians(self,ax=None):
"""
Plots effective damping ratio of poles against number of pedestrians
"""
Np_vals = self.N_pedestrians
eta_vals = self.damping_ratios
ax.plot(Np_vals,eta_vals,'k.',markersize=0.5)
ax.axhline(y=0.0,color='r',alpha=0.3)
ax.set_xlim([0,ax.get_xlim()[1]])
ax.set_xlabel("Number of pedestrians")
ax.set_ylabel("Effective damping ratio")
ax.set_title("Effect of pedestrians on effective damping")
Np_crit = self.Np_crit
if Np_crit is not None:
ax.axvline(x=Np_crit,color='r',alpha=0.3)
ax.plot(Np_crit,self.eta_crit,'r.')
def plot_cp_func(self,ax=None,f_vals=None):
"""
Plots function defining how negative damping per pedestrian varies
with frequency
"""
cp_func = self.cp_func
if f_vals is None:
if hasattr(cp_func,'x'):
f_vals = cp_func.x
else:
raise ValueError("`f_vals` must be provided!")
cp_vals = cp_func(f_vals)
if ax is None:
fig, ax = plt.subplots()
else:
| |
#%%
import os
cwd = os.getcwd()
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
import argparse
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import torchvision.utils
import numpy as np
import os.path
from scipy.io import loadmat
from vae_models import *
from utils import *
from args_python import *
from matplotlib import pyplot as plt
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from sklearn.model_selection import train_test_split
import hdf5storage
EulerN=3
QuaternionN=4
ScaleSpaceAndGainN=2
class CustomDataset(Dataset):
"""TensorDataset with support of transforms.
"""
def __init__(self, tensors, transform=None):
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
self.transform = transform
def __getitem__(self, index):
x = self.tensors[0][index]
if self.transform:
x = self.transform(x)
y = self.tensors[1][index]
return x, y
def __len__(self):
return self.tensors[0].size(0)
#%%
def train(args, model, device, train_loader, optimizer, epoch, writer, Rbeta, zipped_vals, scheduler, kl_weight=None, anneal_rate=None):
model.train()
run_angle_loss = 0.0
run_recon_loss = 0.0
run_kl_loss = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
mu, logvar, angle_gain_scale, in_data, output = model(data)
if args.UseQuaternionNotEuler:
R_est = quaternion2R(angle_gain_scale[:,0:QuaternionN])
R_target = quaternion2R(target[:,0:QuaternionN])
gt, pred, rot_loss = getlossrotation(True, R_est, R_target)
gain_scale_loss = getlossspacescale(angle_gain_scale[:,QuaternionN],target[:,QuaternionN]) + getlossgain(angle_gain_scale[:,QuaternionN+1],target[:,QuaternionN+1])
angle_loss = rot_loss + gain_scale_loss
else:
R_est = euler2R(angle_gain_scale[:,0:EulerN])
R_target = euler2R(target[:,0:EulerN])
gt, pred, rot_loss = getlossrotation(True, R_est, R_target)
gain_scale_loss = getlossspacescale(angle_gain_scale[:,EulerN],target[:,EulerN]) + getlossgain(angle_gain_scale[:,EulerN+1],target[:,EulerN+1])
angle_loss = rot_loss + gain_scale_loss
recon_loss = nn.MSELoss()(torch.flatten(output,1), torch.flatten(in_data,1))
kl_loss = (-0.5 * torch.sum(1 + logvar - mu.pow(2) - torch.exp(logvar)))/data.shape[0]
if args.test:
print("Ground truth : {} \n Predicted values : {}".format(torch.transpose(gt,1,2), pred))
# also need to show reconstructed images
break
run_angle_loss += angle_loss.item()
run_recon_loss += recon_loss.item()
run_kl_loss += kl_loss.item()
kl_weight = min(1.0, kl_weight + anneal_rate)
tot_loss = args.coeff_angle_loss*angle_loss + args.coeff_recon_loss*recon_loss + kl_weight*kl_loss
tot_loss.backward()
optimizer.step()
scheduler.step()
if (batch_idx+1) % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tAngle Loss: {:.8f}, Recon loss: {:.8f}, KL Loss: {:.8f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx * len(data) / len(train_loader.dataset), run_angle_loss/args.log_interval, run_recon_loss/args.log_interval, run_kl_loss/args.log_interval))
writer.add_scalar('Training/Angle_loss', run_angle_loss/args.log_interval, epoch*len(train_loader)+batch_idx)
writer.add_scalar('Training/Reconstruction_loss', run_recon_loss/args.log_interval, epoch*len(train_loader)+batch_idx)
writer.add_scalar('Training/KL_loss', run_kl_loss/args.log_interval, epoch*len(train_loader)+batch_idx)
writer.add_graph(model, data)
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
writer.add_histogram(tag, value.detach().cpu().numpy(), batch_idx+1)
writer.add_histogram(tag+'/grad', value.grad.detach().cpu().numpy(), batch_idx+1)
run_angle_loss = 0.0
run_recon_loss = 0.0
run_kl_loss = 0.0
return kl_weight
def validate(args, model, device, val_loader, Rbeta, zipped_vals):
model.eval()
val_angle_loss = 0.0
val_recon_loss = 0.0
val_kl_loss = 0.0
with torch.no_grad():
for data, target in val_loader:
data, target = data.to(device), target.to(device)
mu, logvar, angle_gain_scale, in_data, output = model(data)
if args.UseQuaternionNotEuler:
R_est = quaternion2R(angle_gain_scale[:,0:QuaternionN])
R_target = quaternion2R(target[:,0:QuaternionN])
gt, pred, rot_loss = getlossrotation(True, R_est, R_target)
gain_scale_loss = getlossspacescale(angle_gain_scale[:,QuaternionN],target[:,QuaternionN]) + getlossgain(angle_gain_scale[:,QuaternionN+1],target[:,QuaternionN+1])
loss_value = rot_loss + gain_scale_loss
else:
R_est = euler2R(angle_gain_scale[:,0:EulerN])
R_target = euler2R(target[:,0:EulerN])
gt, pred, rot_loss = getlossrotation(True, R_est, R_target)
gain_scale_loss = getlossspacescale(angle_gain_scale[:,EulerN],target[:,EulerN]) + getlossgain(angle_gain_scale[:,EulerN+1],target[:,EulerN+1])
loss_value = rot_loss + gain_scale_loss
val_angle_loss += loss_value
val_recon_loss += nn.MSELoss()(torch.flatten(output,1), torch.flatten(in_data,1))
val_kl_loss += -0.5 * torch.sum(1 + logvar - mu.pow(2) - torch.exp(logvar))
val_angle_loss /= len(val_loader)
val_recon_loss /= len(val_loader)
val_kl_loss /= len(val_loader)
print('\nValidation set: Angle loss: {:.8f}, Recon loss: {:.8f}, KL loss: {:.8f}\n'.format(val_angle_loss.item(), val_recon_loss.item(), val_kl_loss.item()))
if args.test:
print("Ground truth : {} \n\n Predicted values : {} \n".format(torch.transpose(gt,1,2), pred))
return val_angle_loss, val_recon_loss, val_kl_loss
def test(args, model, device, test_loader, Rbeta, zipped_vals, data_stat):
if args.get_pred_only:
model.eval()
test_out_list = []
with torch.no_grad():
for data in test_loader:
data = data.to(device)
_, _, output, _, _ = model(data)
test_out_list.append(output.detach().numpy())
save_mat = np.concatenate(test_out_list)
hdf5storage.savemat(args.pred_folder+'/pred_labels.mat', {'labeldata':save_mat})
else:
model.eval()
test_angle_loss = 0.0
test_recon_loss = 0.0
test_kl_loss = 0.0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
mu, logvar, angle_gain_scale, in_data, output = model(data)
if args.UseQuaternionNotEuler:
R_est = quaternion2R(angle_gain_scale[:,0:QuaternionN])
R_target = quaternion2R(target[:,0:QuaternionN])
gt, pred, rot_loss = getlossrotation(True, R_est, R_target)
gain_scale_loss = getlossspacescale(angle_gain_scale[:,QuaternionN],target[:,QuaternionN]) + getlossgain(angle_gain_scale[:,QuaternionN+1],target[:,QuaternionN+1])
loss_value = rot_loss + gain_scale_loss
else:
R_est = euler2R(angle_gain_scale[:,0:EulerN])
R_target = euler2R(target[:,0:EulerN])
gt, pred, rot_loss = getlossrotation(True, R_est, R_target)
gain_scale_loss = getlossspacescale(angle_gain_scale[:,EulerN],target[:,EulerN]) + getlossgain(angle_gain_scale[:,EulerN+1],target[:,EulerN+1])
loss_value = rot_loss + gain_scale_loss
test_angle_loss += loss_value
test_recon_loss += nn.MSELoss()(torch.flatten(output,1), torch.flatten(in_data,1))
test_kl_loss += -0.5 * torch.sum(1 + logvar - mu.pow(2) - torch.exp(logvar))
test_angle_loss /= len(test_loader)
test_recon_loss /= len(test_loader)
test_kl_loss /= len(test_loader)
print('\nTest set: Angle loss: {:.8f}, Recon loss: {:.8f}, KL loss: {:.8f}\n'.format(test_angle_loss.item(), test_recon_loss.item(), test_kl_loss.item()))
if args.test:
print("Ground truth : {} \n\n Predicted values : {} \n".format(torch.transpose(gt,1,2), pred))
for data, target in test_loader:
data, target = data.to(device), target.to(device)
_, _, _, in_data, output_net = model(data)
output = torch.reshape(output_net[0:64,:], (64, data.shape[1], data.shape[2], data.shape[3]))
data_unnorm = torch.zeros_like(data)
data_unnorm[0:64,:,:,:] = data[0:64,:,:,:]*data_stat[1] + data_stat[0]
output[0:64,:,:,:] = output[0:64,:,:,:]*data_stat[1] + data_stat[0]
grid = torchvision.utils.make_grid(data_unnorm[0:64,:,:,:].detach())
matplotlib_imshow(grid, name="org_image.png", one_channel=True)
grid = torchvision.utils.make_grid(output[0:64,:,:,:].detach())
matplotlib_imshow(grid, name="vae_recon.png", one_channel=True)
# latent space interpolation between 2 images from test-dataset
start = [0,2,4,6,8,10]
dest = [30,32,34,36,38,40]
alpha = np.linspace(0,1,11)
dec_out = torch.zeros((len(alpha)*len(start), data.shape[1], data.shape[2], data.shape[3]))
for ii in range(len(start)):
data_interp1 = torch.unsqueeze(data[start[ii],:,:,:],dim=0)
data_interp2 = torch.unsqueeze(data[dest[ii],:,:,:],dim=0)
z_mu1, z_logvar1, z_euler1, _, dec_out1 = model(data_interp1)
z_mu2, z_logvar2, z_euler2, _, dec_out2 = model(data_interp2)
std1 = torch.exp(0.5*z_logvar1)
eps1 = torch.randn_like(std1)
rep1 = z_mu1 + eps1*std1
std2 = torch.exp(0.5*z_logvar2)
eps2 = torch.randn_like(std2)
rep2 = z_mu2 + eps2*std2
for a in range(len(alpha)):
z_euler_interp = (1-alpha[a])*z_euler1 + alpha[a]*z_euler2
feat_vec_interp = torch.cat([z_euler_interp, rep2],dim=1)
lin_out_interp = model.dec_in(feat_vec_interp)
lin_out_interp = torch.reshape(lin_out_interp, (-1,model.ch_factor_6out6,7,7))
d_out = model.decoder(lin_out_interp)
dec_out[(len(alpha)*ii)+a,:,:,:] = torch.squeeze(d_out)
# random sampling of latent space by fixing euler angle fed to it.
dec_sample = torch.zeros_like(data[0:len(alpha),:,:,:])
for idx in range(len(alpha)):
rep_sample = torch.randn_like(std1)
z_euler1 = torch.tensor([[0.39644146, 0.75766391, 0.77631556, 1.00424026, 1.0780347]], device=rep_sample.device)
feat_vec_interp = torch.cat([z_euler1, rep_sample],dim=1)
lin_out_interp = model.dec_in(feat_vec_interp)
lin_out_interp = torch.reshape(lin_out_interp, (-1,model.ch_factor_6out6,7,7))
d_out = model.decoder(lin_out_interp)
dec_sample[idx,:,:,:] = torch.squeeze(d_out)
grid = torchvision.utils.make_grid(dec_out.detach(),nrow=len(alpha))
matplotlib_imshow(grid, name="interpolations.png", one_channel=True)
grid = torchvision.utils.make_grid(dec_sample.detach(),len(alpha))
matplotlib_imshow(grid, name="sample_from_gaussian.png", one_channel=True)
break
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=100, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=600, metavar='N',
help='number of epochs to train (default: 600)')
parser.add_argument('--no-cuda', action='store_false', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--arch', default='EulerGainConvVAE',help='the architecture to use. options are VGG, MLP for now. Can add more')
parser.add_argument('--UseQuaternionNotEuler', action='store_true', default=False, help='give this flag in order to use the Quaternion representation, otherwise the Euler angles representation will be used')
parser.add_argument('--ScaleSpaceMin', type=float, default=0.8, help='minimum value of the space scaling')
parser.add_argument('--ScaleSpaceMax', type=float, default=1.2, help='maximum value of the space scaling')
parser.add_argument('--GainMin', type=float, default=0.8, help='minimum value of the gain')
parser.add_argument('--GainMax', type=float, default=1.2, help='maximum value of the gain')
parser.add_argument('--RootDirectory4Data', default='./', help='the name of the root director for the data')
parser.add_argument('--carve_val', action='store_false', default=True, help='Whether validation set has to be carved out from the training set. Default is true')
parser.add_argument('--test', action='store_true', default=False, help='Whether train or test mode. Default is train mode.')
parser.add_argument('--coeff_angle_loss', type=float, default=1, help='Lagrangian multiplier for the angle loss term')
parser.add_argument('--coeff_recon_loss', type=float, default=2, help='Lagrangian multiplier for the reconstruction loss term')
parser.add_argument('--coeff_kl_loss', type=float, default=1, help='Lagrangian multiplier for the KL divergence loss term')
parser.add_argument('--get_pred_only', action='store_true', default=False, help='Get only predictions from images')
parser.add_argument('--pred_folder', default='./', help='Directory of file with test images.')
args = parser.parse_args()
# args=Args()
#
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
trainingdirectory = args.RootDirectory4Data+"/"+"training"
trainingimagefile="imagefile.mat"
traininglabelfile="labelfile.mat"
train_images = hdf5storage.loadmat(os.path.join(trainingdirectory, trainingimagefile))['imagedata']
train_labels = hdf5storage.loadmat(os.path.join(trainingdirectory, traininglabelfile))['labeldata']
if args.carve_val:
print("Carving out validation set from training set")
train_images, val_images, train_labels, val_labels = train_test_split(train_images, train_labels, test_size=0.1, random_state=42)
else:
print("Loading validation set")
validationdirectory = args.RootDirectory4Data+"/"+"validation"
validationimagefile="imagefile.mat"
validationlabelfile="labelfile.mat"
val_images = hdf5storage.loadmat(os.path.join(validationdirectory, validationimagefile))['imagedata']
val_labels = hdf5storage.loadmat(os.path.join(validationdirectory, validationlabelfile))['labeldata']
train_images = np.expand_dims(train_images,1)
val_images = np.expand_dims(val_images,1)
mean = np.mean(train_images)
std = np.std(train_images)
data_stat = [mean, std]
print("Dataset mean is {}".format(mean))
print("Dataset std is {}".format(std))
norm_train_images = (train_images - mean)/std
norm_val_images = (val_images - mean)/std
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_dataset = torch.utils.data.TensorDataset(torch.Tensor(norm_train_images), torch.Tensor(train_labels))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
val_dataset = torch.utils.data.TensorDataset(torch.Tensor(norm_val_images), torch.Tensor(val_labels))
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.test_batch_size, shuffle=True, **kwargs)
# torch.autograd.set_detect_anomaly(True)
if args.arch == "EulerGainConvVAE":
model = EulerGainConvVAE(args).to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=5e-4, amsgrad=True)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=1e-5, cycle_momentum=False, steps_per_epoch=len(train_loader), epochs=100)
'''
STILL IN DEVELOPMENT
if args.arch == "EulerGainVAE":
model = EulerGainVAE(args).to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=5e-4, amsgrad=True)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=1e-5, cycle_momentum=False, steps_per_epoch=len(train_loader), epochs=args.epochs)
if args.arch == "EulerGainConvVAE2":
model = EulerGainConvVAE2(args).to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=5e-4, amsgrad=True)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, | |
'REB',
'AST',
'TOV',
'STL',
'BLK',
'BLKA',
'PF',
'PFD',
'PTS',
'PLUS_MINUS',
'GP_RANK',
'W_RANK',
'L_RANK',
'W_PCT_RANK',
'MIN_RANK',
'FGM_RANK',
'FGA_RANK',
'FG_PCT_RANK',
'FG3M_RANK',
'FG3A_RANK',
'FG3_PCT_RANK',
'FTM_RANK',
'FTA_RANK',
'FT_PCT_RANK',
'OREB_RANK',
'DREB_RANK',
'REB_RANK',
'AST_RANK',
'TOV_RANK',
'STL_RANK',
'BLK_RANK',
'BLKA_RANK',
'PF_RANK',
'PFD_RANK',
'PTS_RANK',
'PLUS_MINUS_RANK',
'CFID',
'CFPARAMS']
assert list(example_overall.keys()) == ['GROUP_SET',
'GROUP_VALUE',
'GP',
'W',
'L',
'W_PCT',
'MIN',
'FGM',
'FGA',
'FG_PCT',
'FG3M',
'FG3A',
'FG3_PCT',
'FTM',
'FTA',
'FT_PCT',
'OREB',
'DREB',
'REB',
'AST',
'TOV',
'STL',
'BLK',
'BLKA',
'PF',
'PFD',
'PTS',
'PLUS_MINUS',
'GP_RANK',
'W_RANK',
'L_RANK',
'W_PCT_RANK',
'MIN_RANK',
'FGM_RANK',
'FGA_RANK',
'FG_PCT_RANK',
'FG3M_RANK',
'FG3A_RANK',
'FG3_PCT_RANK',
'FTM_RANK',
'FTA_RANK',
'FT_PCT_RANK',
'OREB_RANK',
'DREB_RANK',
'REB_RANK',
'AST_RANK',
'TOV_RANK',
'STL_RANK',
'BLK_RANK',
'BLKA_RANK',
'PF_RANK',
'PFD_RANK',
'PTS_RANK',
'PLUS_MINUS_RANK',
'CFID',
'CFPARAMS']
assert list(example_scorediff.keys()) == columns
assert list(example_scored.keys()) == columns
assert list(example_against.keys()) == columns
def test_team_yoy():
""" tests the teamdashboardbyyearoveryear endpoint of the Team class
"""
time.sleep(1)
example_team = Team(headers=HEADERS,
endpoint='teamdashboardbyyearoveryear')
table_names = example_team.data.keys()
assert 'OverallTeamDashboard' in table_names
assert 'ByYearTeamDashboard' in table_names
example_overall = example_team.data['OverallTeamDashboard'][0]
example_yoy = example_team.data['ByYearTeamDashboard'][0]
columns = ['GROUP_SET',
'GROUP_VALUE',
'GP',
'W',
'L',
'W_PCT',
'MIN',
'FGM',
'FGA',
'FG_PCT',
'FG3M',
'FG3A',
'FG3_PCT',
'FTM',
'FTA',
'FT_PCT',
'OREB',
'DREB',
'REB',
'AST',
'TOV',
'STL',
'BLK',
'BLKA',
'PF',
'PFD',
'PTS',
'PLUS_MINUS',
'GP_RANK',
'W_RANK',
'L_RANK',
'W_PCT_RANK',
'MIN_RANK',
'FGM_RANK',
'FGA_RANK',
'FG_PCT_RANK',
'FG3M_RANK',
'FG3A_RANK',
'FG3_PCT_RANK',
'FTM_RANK',
'FTA_RANK',
'FT_PCT_RANK',
'OREB_RANK',
'DREB_RANK',
'REB_RANK',
'AST_RANK',
'TOV_RANK',
'STL_RANK',
'BLK_RANK',
'BLKA_RANK',
'PF_RANK',
'PFD_RANK',
'PTS_RANK',
'PLUS_MINUS_RANK',
'CFID',
'CFPARAMS']
assert list(example_overall.keys()) == columns
assert list(example_yoy.keys()) == columns
def test_team_lineups():
""" tests the teamdashlineups endpoint of the Team class
"""
time.sleep(1)
example_team = Team(headers=HEADERS,
endpoint='teamdashlineups',
game_id='0021700608',
team_id='1610612745',
player_id='2772')
table_names = example_team.data.keys()
assert 'Overall' in table_names
assert 'Lineups' in table_names
example_overall = example_team.data['Overall'][0]
example_lineups = example_team.data['Lineups'][0]
assert list(example_overall.keys()) == ['GROUP_SET',
'GROUP_VALUE',
'TEAM_ID',
'TEAM_ABBREVIATION',
'TEAM_NAME',
'GP',
'W',
'L',
'W_PCT',
'MIN',
'FGM',
'FGA',
'FG_PCT',
'FG3M',
'FG3A',
'FG3_PCT',
'FTM',
'FTA',
'FT_PCT',
'OREB',
'DREB',
'REB',
'AST',
'TOV',
'STL',
'BLK',
'BLKA',
'PF',
'PFD',
'PTS',
'PLUS_MINUS',
'GP_RANK',
'W_RANK',
'L_RANK',
'W_PCT_RANK',
'MIN_RANK',
'FGM_RANK',
'FGA_RANK',
'FG_PCT_RANK',
'FG3M_RANK',
'FG3A_RANK',
'FG3_PCT_RANK',
'FTM_RANK',
'FTA_RANK',
'FT_PCT_RANK',
'OREB_RANK',
'DREB_RANK',
'REB_RANK',
'AST_RANK',
'TOV_RANK',
'STL_RANK',
'BLK_RANK',
'BLKA_RANK',
'PF_RANK',
'PFD_RANK',
'PTS_RANK',
'PLUS_MINUS_RANK']
assert list(example_lineups.keys()) == ['GROUP_SET',
'GROUP_ID',
'GROUP_NAME',
'GP',
'W',
'L',
'W_PCT',
'MIN',
'FGM',
'FGA',
'FG_PCT',
'FG3M',
'FG3A',
'FG3_PCT',
'FTM',
'FTA',
'FT_PCT',
'OREB',
'DREB',
'REB',
'AST',
'TOV',
'STL',
'BLK',
'BLKA',
'PF',
'PFD',
'PTS',
'PLUS_MINUS',
'GP_RANK',
'W_RANK',
'L_RANK',
'W_PCT_RANK',
'MIN_RANK',
'FGM_RANK',
'FGA_RANK',
'FG_PCT_RANK',
'FG3M_RANK',
'FG3A_RANK',
'FG3_PCT_RANK',
'FTM_RANK',
'FTA_RANK',
'FT_PCT_RANK',
'OREB_RANK',
'DREB_RANK',
'REB_RANK',
'AST_RANK',
'TOV_RANK',
'STL_RANK',
'BLK_RANK',
'BLKA_RANK',
'PF_RANK',
'PFD_RANK',
'PTS_RANK',
'PLUS_MINUS_RANK']
def test_team_pass():
""" tests the teamdashptpass endpoint of the Team class
"""
time.sleep(1)
example_team = Team(headers=HEADERS,
endpoint='teamdashptpass')
table_names = example_team.data.keys()
assert 'PassesMade' in table_names
assert 'PassesReceived' in table_names
example_made = example_team.data['PassesMade'][0]
example_received = example_team.data['PassesReceived'][0]
assert list(example_made.keys()) == ['TEAM_ID',
'TEAM_NAME',
'PASS_TYPE',
'G',
'PASS_FROM',
'PASS_TEAMMATE_PLAYER_ID',
'FREQUENCY',
'PASS',
'AST',
'FGM',
'FGA',
'FG_PCT',
'FG2M',
'FG2A',
'FG2_PCT',
'FG3M',
'FG3A',
'FG3_PCT']
assert list(example_received.keys()) == ['TEAM_ID',
'TEAM_NAME',
'PASS_TYPE',
'G',
'PASS_TO',
'PASS_TEAMMATE_PLAYER_ID',
'FREQUENCY',
'PASS',
'AST',
'FGM',
'FGA',
'FG_PCT',
'FG2M',
'FG2A',
'FG2_PCT',
'FG3M',
'FG3A',
'FG3_PCT']
def test_team_reb():
""" tests the teamdashptreb endpoint of the Team class
"""
time.sleep(1)
example_team = Team(headers=HEADERS,
endpoint='teamdashptreb')
table_names = example_team.data.keys()
assert 'OverallRebounding' in table_names
assert 'ShotTypeRebounding' in table_names
assert 'NumContestedRebounding' in table_names
assert 'ShotDistanceRebounding' in table_names
assert 'RebDistanceRebounding' in table_names
example_overall = example_team.data['OverallRebounding'][0]
example_type = example_team.data['ShotTypeRebounding'][0]
example_num = example_team.data['NumContestedRebounding'][0]
example_shotdist = example_team.data['ShotDistanceRebounding'][0]
example_rebdist = example_team.data['RebDistanceRebounding'][0]
assert list(example_overall.keys()) == ['TEAM_ID',
'TEAM_NAME',
'G',
'OVERALL',
'REB_FREQUENCY',
'OREB',
'DREB',
'REB',
'C_OREB',
'C_DREB',
'C_REB',
'C_REB_PCT',
'UC_OREB',
'UC_DREB',
'UC_REB',
'UC_REB_PCT']
assert list(example_type.keys()) == ['TEAM_ID',
'TEAM_NAME',
'SORT_ORDER',
'G',
'SHOT_TYPE_RANGE',
'REB_FREQUENCY',
'OREB',
'DREB',
'REB',
'C_OREB',
'C_DREB',
'C_REB',
'C_REB_PCT',
'UC_OREB',
'UC_DREB',
'UC_REB',
'UC_REB_PCT']
assert list(example_num.keys()) == ['TEAM_ID',
'TEAM_NAME',
'SORT_ORDER',
'G',
'REB_NUM_CONTESTING_RANGE',
'REB_FREQUENCY',
'OREB',
'DREB',
'REB',
'C_OREB',
'C_DREB',
'C_REB',
'C_REB_PCT',
'UC_OREB',
'UC_DREB',
'UC_REB',
'UC_REB_PCT']
assert list(example_shotdist.keys()) == ['TEAM_ID',
'TEAM_NAME',
'SORT_ORDER',
'G',
'SHOT_DIST_RANGE',
'REB_FREQUENCY',
'OREB',
'DREB',
'REB',
'C_OREB',
'C_DREB',
'C_REB',
'C_REB_PCT',
'UC_OREB',
'UC_DREB',
'UC_REB',
'UC_REB_PCT']
assert list(example_rebdist.keys()) == ['TEAM_ID',
'TEAM_NAME',
'SORT_ORDER',
'G',
'REB_DIST_RANGE',
'REB_FREQUENCY',
'OREB',
'DREB',
'REB',
'C_OREB',
'C_DREB',
'C_REB',
'C_REB_PCT',
'UC_OREB',
'UC_DREB',
'UC_REB',
'UC_REB_PCT']
def test_team_shot():
""" tests the teamdashptshots endpoint of the Team class
"""
time.sleep(1)
example_team = Team(headers=HEADERS,
endpoint='teamdashptshots')
table_names = example_team.data.keys()
assert 'GeneralShooting' in table_names
assert 'ShotClockShooting' in table_names
assert 'DribbleShooting' in table_names
assert 'ClosestDefenderShooting' in table_names
assert 'ClosestDefender10ftPlusShooting' in table_names
assert 'TouchTimeShooting' in table_names
example_gen = example_team.data['GeneralShooting'][0]
example_clock = example_team.data['ShotClockShooting'][0]
example_dribble = example_team.data['DribbleShooting'][0]
example_defender = example_team.data['ClosestDefenderShooting'][0]
example_defender10 = example_team.data['ClosestDefender10ftPlusShooting'][0]
example_touch = example_team.data['TouchTimeShooting'][0]
assert list(example_gen.keys()) == ['TEAM_ID',
'TEAM_NAME',
'SORT_ORDER',
'G',
'SHOT_TYPE',
'FGA_FREQUENCY',
'FGM',
'FGA',
'FG_PCT',
'EFG_PCT',
'FG2A_FREQUENCY',
'FG2M',
'FG2A',
'FG2_PCT',
'FG3A_FREQUENCY',
'FG3M',
'FG3A',
'FG3_PCT']
assert list(example_clock.keys()) == ['TEAM_ID',
'TEAM_NAME',
'SORT_ORDER',
'G',
'SHOT_CLOCK_RANGE',
'FGA_FREQUENCY',
'FGM',
'FGA',
'FG_PCT',
'EFG_PCT',
'FG2A_FREQUENCY',
'FG2M',
'FG2A',
'FG2_PCT',
'FG3A_FREQUENCY',
'FG3M',
'FG3A',
'FG3_PCT']
assert list(example_dribble.keys()) == ['TEAM_ID',
'TEAM_NAME',
'SORT_ORDER',
'G',
'DRIBBLE_RANGE',
'FGA_FREQUENCY',
'FGM',
'FGA',
'FG_PCT',
'EFG_PCT',
'FG2A_FREQUENCY',
'FG2M',
'FG2A',
'FG2_PCT',
'FG3A_FREQUENCY',
'FG3M',
'FG3A',
'FG3_PCT']
assert list(example_defender.keys()) == ['TEAM_ID',
'TEAM_NAME',
'SORT_ORDER',
'G',
'CLOSE_DEF_DIST_RANGE',
'FGA_FREQUENCY',
'FGM',
'FGA',
'FG_PCT',
'EFG_PCT',
'FG2A_FREQUENCY',
'FG2M',
'FG2A',
'FG2_PCT',
'FG3A_FREQUENCY',
'FG3M',
'FG3A',
'FG3_PCT']
assert list(example_defender10.keys()) == ['TEAM_ID',
'TEAM_NAME',
'SORT_ORDER',
'G',
'CLOSE_DEF_DIST_RANGE',
'FGA_FREQUENCY',
'FGM',
'FGA',
'FG_PCT',
'EFG_PCT',
'FG2A_FREQUENCY',
'FG2M',
'FG2A',
'FG2_PCT',
'FG3A_FREQUENCY',
'FG3M',
'FG3A',
'FG3_PCT']
assert list(example_touch.keys()) == ['TEAM_ID',
'TEAM_NAME',
'SORT_ORDER',
'G',
'TOUCH_TIME_RANGE',
'FGA_FREQUENCY',
'FGM',
'FGA',
'FG_PCT',
'EFG_PCT',
'FG2A_FREQUENCY',
'FG2M',
'FG2A',
'FG2_PCT',
'FG3A_FREQUENCY',
'FG3M',
'FG3A',
'FG3_PCT']
def test_team_gamelog():
""" tests the teamgamelog endpoint of the Team class
"""
time.sleep(1)
example_team = Team(headers=HEADERS,
endpoint='teamgamelog')
table_names = example_team.data.keys()
assert 'TeamGameLog' in table_names
example_log = example_team.data['TeamGameLog'][0]
assert list(example_log.keys()) == ['Team_ID',
'Game_ID',
'GAME_DATE',
'MATCHUP',
'WL',
'W',
'L',
'W_PCT',
'MIN',
'FGM',
'FGA',
'FG_PCT',
'FG3M',
'FG3A',
'FG3_PCT',
'FTM',
'FTA',
'FT_PCT',
'OREB',
'DREB',
'REB',
'AST',
'STL',
'BLK',
'TOV',
'PF',
'PTS']
def test_team_info():
""" tests the teaminfocommon endpoint of the Team class
"""
time.sleep(1)
example_team = Team(headers=HEADERS,
endpoint='teaminfocommon')
table_names = example_team.data.keys()
assert 'TeamInfoCommon' in table_names
assert 'TeamSeasonRanks' in table_names
assert 'AvailableSeasons' in table_names
example_info = example_team.data['TeamInfoCommon'][0]
example_ranks = example_team.data['TeamSeasonRanks'][0]
example_season = example_team.data['AvailableSeasons'][0]
assert list(example_info.keys()) == ['TEAM_ID',
'SEASON_YEAR',
'TEAM_CITY',
'TEAM_NAME',
'TEAM_ABBREVIATION',
'TEAM_CONFERENCE',
'TEAM_DIVISION',
'TEAM_CODE',
'W',
'L',
'PCT',
'CONF_RANK',
'DIV_RANK',
'MIN_YEAR',
'MAX_YEAR']
assert list(example_ranks.keys()) == ['LEAGUE_ID',
'SEASON_ID',
'TEAM_ID',
'PTS_RANK',
'PTS_PG',
'REB_RANK',
'REB_PG',
'AST_RANK',
'AST_PG',
'OPP_PTS_RANK',
'OPP_PTS_PG']
assert list(example_season.keys()) == ['SEASON_ID']
def test_team_player():
""" tests the teamplayerdashboard endpoint of the Player class
"""
time.sleep(1)
example_team = Team(headers=HEADERS,
player_id='203954',
endpoint='teamplayerdashboard')
table_names = example_team.data.keys()
assert 'TeamOverall' in table_names
assert 'PlayersSeasonTotals' in table_names
example_info = example_team.data['TeamOverall'][0]
example_players = example_team.data['PlayersSeasonTotals'][0]
columns = ['GROUP_SET',
'TEAM_ID',
'TEAM_NAME',
'GROUP_VALUE',
'GP',
'W',
'L',
'W_PCT',
'MIN',
'FGM',
'FGA',
'FG_PCT',
'FG3M',
'FG3A',
'FG3_PCT',
'FTM',
'FTA',
'FT_PCT',
'OREB',
'DREB',
'REB',
'AST',
'TOV',
'STL',
'BLK',
'BLKA',
'PF',
'PFD',
'PTS',
'PLUS_MINUS',
'GP_RANK',
'W_RANK',
'L_RANK',
'W_PCT_RANK',
'MIN_RANK',
'FGM_RANK',
'FGA_RANK',
'FG_PCT_RANK',
'FG3M_RANK',
'FG3A_RANK',
'FG3_PCT_RANK',
'FTM_RANK',
'FTA_RANK',
'FT_PCT_RANK',
'OREB_RANK',
'DREB_RANK',
'REB_RANK',
'AST_RANK',
'TOV_RANK',
'STL_RANK',
'BLK_RANK',
'BLKA_RANK',
'PF_RANK',
'PFD_RANK',
'PTS_RANK',
'PLUS_MINUS_RANK']
assert list(example_info.keys()) == columns
assert list(example_players.keys()) == ['GROUP_SET',
'PLAYER_ID',
'PLAYER_NAME',
'GP',
'W',
'L',
'W_PCT',
'MIN',
'FGM',
'FGA',
'FG_PCT',
'FG3M',
'FG3A',
'FG3_PCT',
'FTM',
'FTA',
'FT_PCT',
'OREB',
'DREB',
'REB',
'AST',
'TOV',
'STL',
'BLK',
'BLKA',
'PF',
'PFD',
'PTS',
'PLUS_MINUS',
'NBA_FANTASY_PTS',
'DD2',
'TD3',
'GP_RANK',
'W_RANK',
'L_RANK',
'W_PCT_RANK',
'MIN_RANK',
'FGM_RANK',
'FGA_RANK',
'FG_PCT_RANK',
'FG3M_RANK',
'FG3A_RANK',
'FG3_PCT_RANK',
'FTM_RANK',
'FTA_RANK',
'FT_PCT_RANK',
'OREB_RANK',
'DREB_RANK',
'REB_RANK',
'AST_RANK',
'TOV_RANK',
'STL_RANK',
'BLK_RANK',
'BLKA_RANK',
'PF_RANK',
'PFD_RANK',
'PTS_RANK',
'PLUS_MINUS_RANK',
'NBA_FANTASY_PTS_RANK',
'DD2_RANK',
'TD3_RANK']
def test_team_onoff_details():
""" tests the teamplayeronoffdetails endpoint of the Team class
"""
time.sleep(1)
example_team = Team(headers=HEADERS,
endpoint='teamplayeronoffdetails')
table_names = example_team.data.keys()
assert 'OverallTeamPlayerOnOffDetails' in table_names
assert 'PlayersOnCourtTeamPlayerOnOffDetails' in table_names
assert 'PlayersOffCourtTeamPlayerOnOffDetails' in table_names
example_overall = example_team.data['OverallTeamPlayerOnOffDetails'][0]
example_on = example_team.data['PlayersOnCourtTeamPlayerOnOffDetails'][0]
example_off = example_team.data['PlayersOffCourtTeamPlayerOnOffDetails'][0]
assert list(example_overall.keys()) == ['GROUP_SET',
'GROUP_VALUE',
'TEAM_ID',
'TEAM_ABBREVIATION',
'TEAM_NAME',
'GP',
'W',
'L',
'W_PCT',
'MIN',
'FGM',
'FGA',
'FG_PCT',
'FG3M',
'FG3A',
'FG3_PCT',
'FTM',
'FTA',
'FT_PCT',
'OREB',
'DREB',
'REB',
'AST',
'TOV',
'STL',
'BLK',
'BLKA',
'PF',
'PFD',
'PTS',
'PLUS_MINUS',
'GP_RANK',
'W_RANK',
'L_RANK',
'W_PCT_RANK',
'MIN_RANK',
'FGM_RANK',
'FGA_RANK',
'FG_PCT_RANK',
'FG3M_RANK',
'FG3A_RANK',
'FG3_PCT_RANK',
'FTM_RANK',
'FTA_RANK',
'FT_PCT_RANK',
'OREB_RANK',
'DREB_RANK',
'REB_RANK',
'AST_RANK',
'TOV_RANK',
'STL_RANK',
'BLK_RANK',
'BLKA_RANK',
'PF_RANK',
'PFD_RANK',
'PTS_RANK',
'PLUS_MINUS_RANK']
assert list(example_on.keys()) == ['GROUP_SET',
'TEAM_ID',
'TEAM_ABBREVIATION',
'TEAM_NAME',
'VS_PLAYER_ID',
'VS_PLAYER_NAME',
'COURT_STATUS',
'GP',
'W',
'L',
'W_PCT',
'MIN',
'FGM',
'FGA',
'FG_PCT',
'FG3M',
'FG3A',
'FG3_PCT',
'FTM',
'FTA',
'FT_PCT',
'OREB',
'DREB',
'REB',
'AST',
'TOV',
'STL',
'BLK',
'BLKA',
'PF',
'PFD',
'PTS',
'PLUS_MINUS',
'GP_RANK',
'W_RANK',
'L_RANK',
'W_PCT_RANK',
'MIN_RANK',
'FGM_RANK',
'FGA_RANK',
'FG_PCT_RANK',
'FG3M_RANK',
'FG3A_RANK',
'FG3_PCT_RANK',
'FTM_RANK',
'FTA_RANK',
'FT_PCT_RANK',
'OREB_RANK',
'DREB_RANK',
'REB_RANK',
'AST_RANK',
'TOV_RANK',
'STL_RANK',
'BLK_RANK',
'BLKA_RANK',
'PF_RANK',
'PFD_RANK',
'PTS_RANK',
'PLUS_MINUS_RANK']
assert list(example_off.keys()) == ['GROUP_SET',
'TEAM_ID',
'TEAM_ABBREVIATION',
'TEAM_NAME',
'VS_PLAYER_ID',
'VS_PLAYER_NAME',
'COURT_STATUS',
'GP',
'W',
'L',
'W_PCT',
'MIN',
'FGM',
'FGA',
'FG_PCT',
'FG3M',
'FG3A',
'FG3_PCT',
'FTM',
'FTA',
'FT_PCT',
'OREB',
'DREB',
'REB',
'AST',
'TOV',
'STL',
'BLK',
'BLKA',
'PF',
'PFD',
'PTS',
'PLUS_MINUS',
'GP_RANK',
'W_RANK',
'L_RANK',
'W_PCT_RANK',
'MIN_RANK',
'FGM_RANK',
'FGA_RANK',
'FG_PCT_RANK',
'FG3M_RANK',
'FG3A_RANK',
| |
Overwrite the file currently on disk, if it exists. Default is false.
:return: None
'''
try:
assert not add_local_density or self.gadget_loc # gadget loc must be nonzero here!
assert not add_particles or self.gadget_loc
except:
raise AssertionError(
'Particle location not specified; please specify gadget location for %s' % self.simname)
if add_local_density or add_particles:
all_snapdirs = sorted(glob(path.join(self.gadget_loc, 'snapdir*')))
snapdirs = [all_snapdirs[idx] for idx in
self.sf_idxs] # only the snapdirs for the scale factors were interested in .
else:
snapdirs = ['' for i in self.scale_factors]
for a, z, fname, cache_fnames, snapdir in izip(self.scale_factors, self.redshifts, self.filenames,
self.cache_filenames, snapdirs):
# TODO get right reader for each halofinder.
if scale_factors != 'all' and a not in scale_factors:
continue
reader = RockstarHlistReader(fname, self.columns_to_keep, cache_fnames, self.simname,
self.halo_finder, z, self.version_name, self.Lbox, self.pmass,
overwrite=overwrite)
reader.read_halocat(self.columns_to_convert)
if add_local_density or add_particles:
particles = self._read_particles(snapdir, downsample_factor=downsample_factor)
if add_local_density:
self.add_local_density(reader, particles, downsample_factor) # TODO how to add radius?
reader.write_to_disk() # do these after so we have a halo table to work off of
reader.update_cache_log()
if add_particles:
self.cache_particles(particles, a, downsample_factor=downsample_factor)
def _read_particles(self, snapdir, downsample_factor):
"""
Read in particles from a snapshot, and return them.
:param snapdir:
location of hte particles
:param downsample_factor:
The amount by which to downsample the particles. Default is 1e-3
:return: all_particles, a numpy arrany of shape (N,3) that lists all particle positions.
"""
from .readGadgetSnapshot import readGadgetSnapshot
assert 0 <= downsample_factor <= 1
np.random.seed(int(time())) # TODO pass in seed?
all_particles = np.array([], dtype='float32')
# TODO should fail gracefully if memory is exceeded or if p is too small.
for file in glob(path.join(snapdir, 'snap*')):
print 'Reading %s' % file
# TODO should find out which is "fast" axis and use that.
# Numpy uses fortran ordering.
particles = readGadgetSnapshot(file, read_pos=True)[
1] # Think this returns some type of tuple; should check
downsample_idxs = np.random.choice(particles.shape[0], size=int(particles.shape[0] * downsample_factor))
particles = particles[downsample_idxs, :]
# particles = particles[np.random.rand(particles.shape[0]) < p] # downsample
if particles.shape[0] == 0:
continue
all_particles = np.resize(all_particles, (all_particles.shape[0] + particles.shape[0], 3))
all_particles[-particles.shape[0]:, :] = particles
return all_particles
def cache_particles(self, particles, scale_factor, downsample_factor):
"""
Add the particle to the halocatalog, so loading it will load the corresponding particles.
:param particles:
A (N,3) shaped numpy array of all particle positions
"""
z = 1.0 / scale_factor - 1.0
ptcl_catalog = UserSuppliedPtclCatalog(redshift=z, Lbox=self.Lbox, particle_mass=self.pmass, \
x=particles[:, 0], y=particles[:, 1], z=particles[:, 2])
ptcl_cache_loc = self.cache_loc
ptcl_cache_filename = 'ptcl_%.2f.list.%s_%s.hdf5' % (
scale_factor, self.simname, self.version_name) # make sure we don't have redunancies.
ptcl_cache_filename = path.join(ptcl_cache_loc, ptcl_cache_filename)
print ptcl_cache_filename
ptcl_catalog.add_ptclcat_to_cache(ptcl_cache_filename, self.simname,
self.version_name + '_particle_%.2f' % (-1 * np.log10(downsample_factor)),
str(downsample_factor),
overwrite=True) # TODO would be nice to make a note of the downsampling without having to do some voodoo to get it.
def add_local_density(self, reader, all_particles, downsample_factor=1e-2, radius=[10]): # [1,5,10]
"""
Calculates the local density around each halo and adds it to the halo table, to be cached.
:param reader:
A RockstartHlistReader object from Halotools.
:param snapdir:
Gadget snapshot corresponding with this rockstar file.
:param radius:
Radius (in Mpc/h) around which to search for particles to estimate locas density. Default is 5 Mpc.
:return: None
"""
# doing imports here since these are both files i've stolen from Yao
# Possible this will be slow
from fast3tree import fast3tree
if type(radius) == float:
radius = np.array([radius])
elif type(radius) == list:
radius = np.array(radius)
densities = np.zeros((reader.halo_table['halo_x'].shape[0], radius.shape[0]))
mean_particle_density = downsample_factor * (self.npart / self.Lbox) ** 3
with fast3tree(all_particles) as tree:
for r_idx, r in enumerate(radius):
print 'Calculating Densities for radius %d' % r
# densities[:, r_idx] = densities[:, r_idx]/ (downsample_factor * 4 * np.pi / 3 * r ** 3)
for idx, halo_pos in enumerate(
izip(reader.halo_table['halo_x'], reader.halo_table['halo_y'], reader.halo_table['halo_z'])):
# print idx
particle_idxs = tree.query_radius(halo_pos, r, periodic=True)
densities[idx, r_idx] += len(particle_idxs)
volume = (4 * np.pi / 3 * r ** 3)
reader.halo_table['halo_local_density_%d' % (int(r))] = densities[:, r_idx] / (
volume * mean_particle_density)
np.save('/scratch/users/swmclau2/mdpl2_densities_tmp.npy', densities[:, r_idx] / (
volume * mean_particle_density))
# adding **kwargs cuz some invalid things can be passed in, hopefully not a pain
# TODO some sort of spell check in the input file
def load(self, scale_factor, HOD='zheng07', biased_satellites=False, tol=0.01, particles=False, downsample_factor=1e-2, hod_kwargs={},
**kwargs):
'''
Load both a halocat and a model to prepare for population and calculation.
:param scale_factor:
Scale factor of the catalog to load. If no exact match found, searches for nearest in tolerance.
:param HOD:
HOD model to load. Currently available options are redMagic, stepFunc, and the halotools defatuls.
:return: None
'''
assert 'model' not in kwargs, 'model has been depreceated, use HOD'
a = self._return_nearest_sf(scale_factor, tol)
if a is None:
raise ValueError('Scale factor %.3f not within given tolerance.' % scale_factor)
self.load_catalog(a, tol, check_sf=False, particles=particles, downsample_factor=downsample_factor)
self.load_model(a, HOD, biased_satellites=biased_satellites, check_sf=False, hod_kwargs=hod_kwargs)
def load_catalog(self, scale_factor, tol=0.05, check_sf=True, particles=False, downsample_factor=1e-2):
'''
Load only a specific catalog. Not reccomended to use separately from broader load function.
Its possible for the redshift ni the model to be different fro the one from the catalog,
though the difference will be small.
:param a:
The scale factor of the catalog of interest
:param check_sf:
Boolean whether or not to use the passed in scale_factor blindly. Default is false.
:return: None
'''
if type(downsample_factor) is not float:
downsample_factor = float(downsample_factor) # will occasionally get an errant string input
if check_sf:
a = self._return_nearest_sf(scale_factor, tol)
if a is None:
raise ValueError('Scale factor %.3f not within given tolerance.' % scale_factor)
else:
a = scale_factor # YOLO
z = 1.0 / a - 1
if not particles:
self.halocat = CachedHaloCatalog(simname=self.simname, halo_finder=self.halo_finder,
version_name=self.version_name,
redshift=z, dz_tol=tol)
else:
self._downsample_factor = downsample_factor
print self.version_name + '_particle_%.2f' % (-1 * np.log10(downsample_factor))
self.halocat = CachedHaloCatalog(simname=self.simname, halo_finder=self.halo_finder,
version_name=self.version_name,
ptcl_version_name=self.version_name + '_particle_%.2f' % (
-1 * np.log10(downsample_factor)),
redshift=z, dz_tol=tol)
# refelct the current catalog
self.z = z
self.a = a
self.populated_once = False # no way this one's been populated!
# TODO not sure if assembias should be boolean, or keep it as separate HODs?
def load_model(self, scale_factor, HOD='zheng07', biased_satellites=False, check_sf=True, hod_kwargs={}):
'''
Load an HOD model. Not reccomended to be used separately from the load function. It
is possible for the scale_factor of the model and catalog to be different.
:param scale_factor:
Scale factor for the model
:param HOD:
HOD model to load. Currently available options are redMagic, stepFunc, and the halotools defatuls.
Also may pass in a tuple of cens and sats classes, which will be instantiated here.
:param biased_satellites:
Whether or not add a parameter to the HOD to allow the satellite population to be biased relative
to the NFW fit to the halo. Default is false.
:param check_sf:
Boolean whether or not to use the passed in scale_factor blindly. Default is false.
:param hod_kwargs:
Kwargs to pass into the HOD model being loaded. Default is none.
:return: None
'''
if check_sf:
a = self._return_nearest_sf(scale_factor)
if a is None:
raise ValueError('Scale factor %.3f not within given tolerance.' % scale_factor)
else:
a = scale_factor # YOLO
z = 1.0 / a - 1
#min_conc, max_conc = np.min(self.halocat.halo_table['halo_nfw_conc']), np.max(self.halocat.halo_table[np.isfinite(self.halocat.halo_table['halo_nfw_conc'])]['halo_nfw_conc'])
min_conc, max_conc = 1, 100
concentration_bins = np.arange(min_conc, max_conc, 1)
if biased_satellites:
sat_phase_space = BiasedNFWPhaseSpace(redshift=z, concentration_bins = concentration_bins)
else:
sat_phase_space = NFWPhaseSpace(redshift=z, concentration_bins = concentration_bins)
if type(HOD) is str:
assert HOD in VALID_HODS
if HOD in VALID_HODS-DEFAULT_HODS: # my custom ones
cens_occ = HOD_DICT[HOD][0](redshift=z, **hod_kwargs)
# TODO this is a hack, something better would be better
try: #hack for central modulation
# the ab ones need to modulated with the baseline model
if HOD=='zheng07':
hod_kwargs['modulate_with_cenocc']=True
sats_occ = HOD_DICT[HOD][1](redshift=z, cenocc_model=cens_occ,
**hod_kwargs)
except: #assume the error is a cenocc issue
#print 'B'
sats_occ = HOD_DICT[HOD][1](redshift=z, **hod_kwargs)
sats_occ._suppress_repeated_param_warning = True
self.model = HodModelFactory(
centrals_occupation=cens_occ,
centrals_profile=TrivialPhaseSpace(redshift=z),
satellites_occupation=sats_occ,
satellites_profile= sat_phase_space)
else:
self.model = PrebuiltHodModelFactory(HOD,redshift=z, **hod_kwargs)
if biased_satellites:
self.model = HodModelFactory(baseline_model_instance=self.model, satellites_profile = sat_phase_space,
modulate_with_cenocc=True)
else:
cens_occ = HOD[0](redshift=z, **hod_kwargs)
try:
sats_occ = HOD[1](redshift=z, cenocc_model=cens_occ, **hod_kwargs)
except TypeError: #not all models accept cenoccs, but it shoudl be the default
sats_occ = HOD[1](redshift=z, **hod_kwargs)
sats_occ._suppress_repeated_param_warning = True
| |
'{key}' not found in RuntimeVirtualMachineVirtualMachineConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RuntimeVirtualMachineVirtualMachineConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RuntimeVirtualMachineVirtualMachineConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
data_disk: 'outputs.RuntimeVirtualMachineVirtualMachineConfigDataDisk',
machine_type: str,
accelerator_config: Optional['outputs.RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfig'] = None,
container_images: Optional[Sequence['outputs.RuntimeVirtualMachineVirtualMachineConfigContainerImage']] = None,
encryption_config: Optional['outputs.RuntimeVirtualMachineVirtualMachineConfigEncryptionConfig'] = None,
guest_attributes: Optional[Mapping[str, str]] = None,
internal_ip_only: Optional[bool] = None,
labels: Optional[Mapping[str, str]] = None,
metadata: Optional[Mapping[str, str]] = None,
network: Optional[str] = None,
nic_type: Optional[str] = None,
shielded_instance_config: Optional['outputs.RuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfig'] = None,
subnet: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
zone: Optional[str] = None):
"""
:param 'RuntimeVirtualMachineVirtualMachineConfigDataDiskArgs' data_disk: Data disk option configuration settings.
Structure is documented below.
:param str machine_type: The Compute Engine machine type used for runtimes.
:param 'RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigArgs' accelerator_config: The Compute Engine accelerator configuration for this runtime.
Structure is documented below.
:param Sequence['RuntimeVirtualMachineVirtualMachineConfigContainerImageArgs'] container_images: Use a list of container images to start the notebook instance.
Structure is documented below.
:param 'RuntimeVirtualMachineVirtualMachineConfigEncryptionConfigArgs' encryption_config: Encryption settings for virtual machine data disk.
Structure is documented below.
:param Mapping[str, str] guest_attributes: -
The Compute Engine guest attributes. (see [Project and instance
guest attributes](https://cloud.google.com/compute/docs/
storing-retrieving-metadata#guest_attributes)).
:param bool internal_ip_only: If true, runtime will only have internal IP addresses. By default,
runtimes are not restricted to internal IP addresses, and will
have ephemeral external IP addresses assigned to each vm. This
`internal_ip_only` restriction can only be enabled for subnetwork
enabled networks, and all dependencies must be configured to be
accessible without external IP addresses.
:param Mapping[str, str] labels: Labels to apply to this disk. These can be later modified
by the disks.setLabels method. This field is only
applicable for persistent disks.
:param Mapping[str, str] metadata: The Compute Engine metadata entries to add to virtual machine.
(see [Project and instance metadata](https://cloud.google.com
/compute/docs/storing-retrieving-metadata#project_and_instance
_metadata)).
:param str network: The Compute Engine network to be used for machine communications.
Cannot be specified with subnetwork. If neither `network` nor
`subnet` is specified, the "default" network of the project is
used, if it exists. A full URL or partial URI. Examples:
* `https://www.googleapis.com/compute/v1/projects/[project_id]/
regions/global/default`
* `projects/[project_id]/regions/global/default`
Runtimes are managed resources inside Google Infrastructure.
Runtimes support the following network configurations:
* Google Managed Network (Network & subnet are empty)
* Consumer Project VPC (network & subnet are required). Requires
configuring Private Service Access.
* Shared VPC (network & subnet are required). Requires
configuring Private Service Access.
:param str nic_type: The type of vNIC to be used on this interface. This may be gVNIC
or VirtioNet.
Possible values are `UNSPECIFIED_NIC_TYPE`, `VIRTIO_NET`, and `GVNIC`.
:param 'RuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigArgs' shielded_instance_config: Shielded VM Instance configuration settings.
Structure is documented below.
:param str subnet: The Compute Engine subnetwork to be used for machine
communications. Cannot be specified with network. A full URL or
partial URI are valid. Examples:
* `https://www.googleapis.com/compute/v1/projects/[project_id]/
regions/us-east1/subnetworks/sub0`
* `projects/[project_id]/regions/us-east1/subnetworks/sub0`
:param Sequence[str] tags: The Compute Engine tags to add to runtime (see [Tagging instances]
(https://cloud.google.com/compute/docs/
label-or-tag-resources#tags)).
:param str zone: -
The zone where the virtual machine is located.
"""
pulumi.set(__self__, "data_disk", data_disk)
pulumi.set(__self__, "machine_type", machine_type)
if accelerator_config is not None:
pulumi.set(__self__, "accelerator_config", accelerator_config)
if container_images is not None:
pulumi.set(__self__, "container_images", container_images)
if encryption_config is not None:
pulumi.set(__self__, "encryption_config", encryption_config)
if guest_attributes is not None:
pulumi.set(__self__, "guest_attributes", guest_attributes)
if internal_ip_only is not None:
pulumi.set(__self__, "internal_ip_only", internal_ip_only)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if network is not None:
pulumi.set(__self__, "network", network)
if nic_type is not None:
pulumi.set(__self__, "nic_type", nic_type)
if shielded_instance_config is not None:
pulumi.set(__self__, "shielded_instance_config", shielded_instance_config)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if zone is not None:
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter(name="dataDisk")
def data_disk(self) -> 'outputs.RuntimeVirtualMachineVirtualMachineConfigDataDisk':
"""
Data disk option configuration settings.
Structure is documented below.
"""
return pulumi.get(self, "data_disk")
@property
@pulumi.getter(name="machineType")
def machine_type(self) -> str:
"""
The Compute Engine machine type used for runtimes.
"""
return pulumi.get(self, "machine_type")
@property
@pulumi.getter(name="acceleratorConfig")
def accelerator_config(self) -> Optional['outputs.RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfig']:
"""
The Compute Engine accelerator configuration for this runtime.
Structure is documented below.
"""
return pulumi.get(self, "accelerator_config")
@property
@pulumi.getter(name="containerImages")
def container_images(self) -> Optional[Sequence['outputs.RuntimeVirtualMachineVirtualMachineConfigContainerImage']]:
"""
Use a list of container images to start the notebook instance.
Structure is documented below.
"""
return pulumi.get(self, "container_images")
@property
@pulumi.getter(name="encryptionConfig")
def encryption_config(self) -> Optional['outputs.RuntimeVirtualMachineVirtualMachineConfigEncryptionConfig']:
"""
Encryption settings for virtual machine data disk.
Structure is documented below.
"""
return pulumi.get(self, "encryption_config")
@property
@pulumi.getter(name="guestAttributes")
def guest_attributes(self) -> Optional[Mapping[str, str]]:
"""
-
The Compute Engine guest attributes. (see [Project and instance
guest attributes](https://cloud.google.com/compute/docs/
storing-retrieving-metadata#guest_attributes)).
"""
return pulumi.get(self, "guest_attributes")
@property
@pulumi.getter(name="internalIpOnly")
def internal_ip_only(self) -> Optional[bool]:
"""
If true, runtime will only have internal IP addresses. By default,
runtimes are not restricted to internal IP addresses, and will
have ephemeral external IP addresses assigned to each vm. This
`internal_ip_only` restriction can only be enabled for subnetwork
enabled networks, and all dependencies must be configured to be
accessible without external IP addresses.
"""
return pulumi.get(self, "internal_ip_only")
@property
@pulumi.getter
def labels(self) -> Optional[Mapping[str, str]]:
"""
Labels to apply to this disk. These can be later modified
by the disks.setLabels method. This field is only
applicable for persistent disks.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def metadata(self) -> Optional[Mapping[str, str]]:
"""
The Compute Engine metadata entries to add to virtual machine.
(see [Project and instance metadata](https://cloud.google.com
/compute/docs/storing-retrieving-metadata#project_and_instance
_metadata)).
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def network(self) -> Optional[str]:
"""
The Compute Engine network to be used for machine communications.
Cannot be specified with subnetwork. If neither `network` nor
`subnet` is specified, the "default" network of the project is
used, if it exists. A full URL or partial URI. Examples:
* `https://www.googleapis.com/compute/v1/projects/[project_id]/
regions/global/default`
* `projects/[project_id]/regions/global/default`
Runtimes are managed resources inside Google Infrastructure.
Runtimes support the following network configurations:
* Google Managed Network (Network & subnet are empty)
* Consumer Project VPC (network & subnet are required). Requires
configuring Private Service Access.
* Shared VPC (network & subnet are required). Requires
configuring Private Service Access.
"""
return pulumi.get(self, "network")
@property
@pulumi.getter(name="nicType")
def nic_type(self) -> Optional[str]:
"""
The type of vNIC to be used on this interface. This may be gVNIC
or VirtioNet.
Possible values are `UNSPECIFIED_NIC_TYPE`, `VIRTIO_NET`, and `GVNIC`.
"""
return pulumi.get(self, "nic_type")
@property
@pulumi.getter(name="shieldedInstanceConfig")
def shielded_instance_config(self) -> Optional['outputs.RuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfig']:
"""
Shielded VM Instance configuration settings.
Structure is documented below.
"""
return pulumi.get(self, "shielded_instance_config")
@property
@pulumi.getter
def subnet(self) -> Optional[str]:
"""
The Compute Engine subnetwork to be used for machine
communications. Cannot be specified with network. A full URL or
partial URI are valid. Examples:
* `https://www.googleapis.com/compute/v1/projects/[project_id]/
regions/us-east1/subnetworks/sub0`
* `projects/[project_id]/regions/us-east1/subnetworks/sub0`
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence[str]]:
"""
The Compute Engine tags to add to runtime (see [Tagging instances]
(https://cloud.google.com/compute/docs/
label-or-tag-resources#tags)).
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def zone(self) -> Optional[str]:
"""
-
The zone where the virtual machine is located.
"""
return pulumi.get(self, "zone")
@pulumi.output_type
class RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "coreCount":
suggest = "core_count"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
core_count: Optional[int] = None,
type: Optional[str] = None):
"""
:param int core_count: Count of cores of this accelerator.
:param str type: Accelerator model. For valid values, see
`https://cloud.google.com/vertex-ai/docs/workbench/reference/
rest/v1/projects.locations.runtimes#AcceleratorType`
"""
if core_count is not None:
pulumi.set(__self__, "core_count", core_count)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="coreCount")
def core_count(self) -> Optional[int]:
"""
Count of cores of this accelerator.
"""
return pulumi.get(self, "core_count")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Accelerator model. For valid values, see
`https://cloud.google.com/vertex-ai/docs/workbench/reference/
rest/v1/projects.locations.runtimes#AcceleratorType`
"""
return pulumi.get(self, "type")
@pulumi.output_type
class RuntimeVirtualMachineVirtualMachineConfigContainerImage(dict):
def __init__(__self__, *,
repository: str,
tag: Optional[str] = None):
"""
:param str repository: The path to the container image repository.
For example: gcr.io/{project_id}/{imageName}
:param str tag: The tag of the container image. If not specified, this | |
<reponame>vieee/Scraper_Myntra<gh_stars>0
"""Testcases for cssutils.CSSSerializer"""
from . import basetest
import cssutils
class PreferencesTestCase(basetest.BaseTestCase):
"""
testcases for cssutils.serialize.Preferences
"""
def setUp(self):
cssutils.ser.prefs.useDefaults()
def tearDown(self):
cssutils.ser.prefs.useDefaults()
# def testkeepUnkownAtRules(self):
# "Preferences.keepUnkownAtRules"
# # py >=2.6 only
# # v = sys.version_info; if v[0]*10+v[1] >= 26:
# from warnings import catch_warnings
# with catch_warnings(record=True) as log:
# x = cssutils.ser.prefs.keepUnkownAtRules
#
# if log:
# # unpack the only member of log
# warning, = log
# self.assertEqual(warning.category, DeprecationWarning)
def test_resolveVariables(self):
"Preferences.resolveVariables"
self.assertEqual(cssutils.ser.prefs.resolveVariables, True)
cssutils.ser.prefs.resolveVariables = False
vars = '''
@variables {
c1: red;
c2: #0f0;
px: 1px 2px;
}
'''
tests = {
'''a {\n color: var(c1)\n }''': '''a {\n color: red\n }''',
'''a {\n color: var(c1)\n; color: var( c2 ) }''': '''a {\n color: red;\n color: #0f0\n }''',
'''a {\n margin: var(px)\n }''': '''a {\n margin: 1px 2px\n }''',
'''@media all {
a {
margin: var(px) var(px);
color: var(c1);
left: var(unknown)
}
}''': '''@media all {\n a {\n margin: 1px 2px 1px 2px;\n'''
''' color: red;\n left: var(unknown)\n }\n }''',
}
cssutils.ser.prefs.resolveVariables = True
for test, exp in list(tests.items()):
s = cssutils.parseString(vars + test)
self.assertEqual(exp.encode(), s.cssText)
cssutils.ser.prefs.resolveVariables = True
def test_useDefaults(self):
"Preferences.useDefaults()"
cssutils.ser.prefs.useMinified()
cssutils.ser.prefs.useDefaults()
self.assertEqual(cssutils.ser.prefs.defaultAtKeyword, True)
self.assertEqual(cssutils.ser.prefs.defaultPropertyName, True)
self.assertEqual(cssutils.ser.prefs.defaultPropertyPriority, True)
self.assertEqual(cssutils.ser.prefs.importHrefFormat, None)
self.assertEqual(cssutils.ser.prefs.indent, 4 * ' ')
self.assertEqual(cssutils.ser.prefs.indentClosingBrace, True)
self.assertEqual(cssutils.ser.prefs.keepAllProperties, True)
self.assertEqual(cssutils.ser.prefs.keepComments, True)
self.assertEqual(cssutils.ser.prefs.keepEmptyRules, False)
self.assertEqual(cssutils.ser.prefs.keepUnknownAtRules, True)
self.assertEqual(cssutils.ser.prefs.keepUsedNamespaceRulesOnly, False)
self.assertEqual(cssutils.ser.prefs.lineNumbers, False)
self.assertEqual(cssutils.ser.prefs.lineSeparator, '\n')
self.assertEqual(cssutils.ser.prefs.listItemSpacer, ' ')
self.assertEqual(cssutils.ser.prefs.minimizeColorHash, True)
self.assertEqual(cssutils.ser.prefs.omitLastSemicolon, True)
self.assertEqual(cssutils.ser.prefs.omitLeadingZero, False)
self.assertEqual(cssutils.ser.prefs.paranthesisSpacer, ' ')
self.assertEqual(cssutils.ser.prefs.propertyNameSpacer, ' ')
self.assertEqual(cssutils.ser.prefs.selectorCombinatorSpacer, ' ')
self.assertEqual(cssutils.ser.prefs.spacer, ' ')
self.assertEqual(cssutils.ser.prefs.validOnly, False)
css = '''
/*1*/
@import url(x) tv , print;
@namespace prefix "uri";
@namespace unused "unused";
@media all {}
@media all {
a {}
}
@media all {
a { color: red; }
}
@page { left: 0; }
a {}
prefix|x, a + b > c ~ d , b { top : 1px ;
font-family : arial ,'some'
}
'''
parsedcss = '''/*1*/
@import url(x) tv, print;
@namespace prefix "uri";
@namespace unused "unused";
@media all {
a {
color: red
}
}
@page {
left: 0
}
prefix|x, a + b > c ~ d, b {
top: 1px;
font-family: arial, "some"
}'''
s = cssutils.parseString(css)
self.assertEqual(s.cssText, parsedcss.encode())
tests = {
'0.1 .1 0.1px .1px 0.1% .1% +0.1 +.1 +0.1px +.1px +0.1% +.1% '
'-0.1 -.1 -0.1px -.1px -0.1% -.1%': '0.1 0.1 0.1px 0.1px 0.1% 0.1% +0.1 +0.1 +0.1px +0.1px +0.1% '
'+0.1% -0.1 -0.1 -0.1px -0.1px -0.1% -0.1%'
}
cssutils.ser.prefs.useDefaults()
for test, exp in list(tests.items()):
s = cssutils.parseString('a{x:%s}' % test)
self.assertEqual(('a {\n x: %s\n }' % exp).encode(), s.cssText)
def test_useMinified(self):
"Preferences.useMinified()"
cssutils.ser.prefs.useDefaults()
cssutils.ser.prefs.useMinified()
self.assertEqual(cssutils.ser.prefs.defaultAtKeyword, True)
self.assertEqual(cssutils.ser.prefs.defaultPropertyName, True)
self.assertEqual(cssutils.ser.prefs.importHrefFormat, 'string')
self.assertEqual(cssutils.ser.prefs.indent, '')
self.assertEqual(cssutils.ser.prefs.keepAllProperties, True)
self.assertEqual(cssutils.ser.prefs.keepComments, False)
self.assertEqual(cssutils.ser.prefs.keepEmptyRules, False)
self.assertEqual(cssutils.ser.prefs.keepUnknownAtRules, False)
self.assertEqual(cssutils.ser.prefs.keepUsedNamespaceRulesOnly, True)
self.assertEqual(cssutils.ser.prefs.lineNumbers, False)
self.assertEqual(cssutils.ser.prefs.lineSeparator, '')
self.assertEqual(cssutils.ser.prefs.listItemSpacer, '')
self.assertEqual(cssutils.ser.prefs.omitLastSemicolon, True)
self.assertEqual(cssutils.ser.prefs.omitLeadingZero, True)
self.assertEqual(cssutils.ser.prefs.paranthesisSpacer, '')
self.assertEqual(cssutils.ser.prefs.propertyNameSpacer, '')
self.assertEqual(cssutils.ser.prefs.selectorCombinatorSpacer, '')
self.assertEqual(cssutils.ser.prefs.spacer, '')
self.assertEqual(cssutils.ser.prefs.validOnly, False)
css = '''
/*1*/
@import url(x) tv , print;
@namespace prefix "uri";
@namespace unused "unused";
@media all {}
@media all {
a {}
}
@media all "name" {
a { color: red; }
}
@page:left {
left: 0
}
a {}
prefix|x, a + b > c ~ d , b { top : 1px ;
font-family : arial , 'some'
}
@x x;
'''
s = cssutils.parseString(css)
cssutils.ser.prefs.keepUnknownAtRules = True
self.assertEqual(
s.cssText,
'''@import"x"tv,print;@namespace prefix"uri";@media all"name"'''
'''{a{color:red}}@page :left{left:0}prefix|x,a+b>c~d,b{top:1px;'''
'''font-family:arial,"some"}@x x;'''.encode(),
)
cssutils.ser.prefs.keepUnknownAtRules = False
self.assertEqual(
s.cssText,
'''@import"x"tv,print;@namespace prefix"uri";@media all"name"'''
'''{a{color:red}}@page :left{left:0}prefix|x,a+b>c~d,b{top:1px;'''
'''font-family:arial,"some"}'''.encode(),
)
# Values
valuetests = {
' a a1 a-1 a-1a ': 'a a1 a-1 a-1a',
'a b 1 c 1em d -1em e': 'a b 1 c 1em d -1em e',
' 1em / 5 ': '1em/5',
'1em/5': '1em/5',
'a 0 a .0 a 0.0 a -0 a -.0 a -0.0 a +0 a +.0 a +0.0': 'a 0 a 0 a 0 a 0 a 0 a 0 a 0 a 0 a 0',
'a 0px a .0px a 0.0px a -0px a -.0px a -0.0px a +0px '
'a +.0px a +0.0px ': 'a 0 a 0 a 0 a 0 a 0 a 0 a 0 a 0 a 0',
'a 1 a .1 a 1.0 a 0.1 a -1 a -.1 a -1.0 a -0.1 a '
'+1 a +.1 a +1.0': 'a 1 a .1 a 1 a .1 a -1 a -.1 a -1 a -.1 a +1 a +.1 a +1',
' url(x) f()': 'url(x) f()',
'#112233': '#123',
'#112234': '#112234',
'#123': '#123',
'#123 url() f()': '#123 url() f()',
'1 +2 +3 -4': '1 +2 +3 -4', # ?
'0.1 .1 0.1px .1px 0.1% .1% +0.1 +.1 +0.1px +.1px +0.1% '
'+.1% -0.1 -.1 -0.1px -.1px -0.1% -.1%': '.1 .1 .1px .1px .1% .1% +.1 +.1 +.1px +.1px +.1% +.1% '
'-.1 -.1 -.1px -.1px -.1% -.1%',
}
for test, exp in list(valuetests.items()):
s = cssutils.parseString('a{x:%s}' % test)
self.assertEqual(('a{x:%s}' % exp).encode(), s.cssText)
def test_defaultAtKeyword(self):
"Preferences.defaultAtKeyword"
s = cssutils.parseString('@im\\port "x";')
self.assertEqual('@import "x";'.encode(), s.cssText)
cssutils.ser.prefs.defaultAtKeyword = True
self.assertEqual('@import "x";'.encode(), s.cssText)
cssutils.ser.prefs.defaultAtKeyword = False
self.assertEqual('@im\\port "x";'.encode(), s.cssText)
def test_defaultPropertyName(self):
"Preferences.defaultPropertyName"
cssutils.ser.prefs.keepAllProperties = False
# does not actually work as once the name is set it is used also
# if used with a backslash in it later...
s = cssutils.parseString(r'a { c\olor: green; }')
self.assertEqual('a {\n color: green\n }'.encode(), s.cssText)
cssutils.ser.prefs.defaultPropertyName = True
self.assertEqual('a {\n color: green\n }'.encode(), s.cssText)
cssutils.ser.prefs.defaultPropertyName = False
self.assertEqual('a {\n c\\olor: green\n }'.encode(), s.cssText)
s = cssutils.parseString(r'a { color: red; c\olor: green; }')
self.assertEqual('a {\n c\\olor: green\n }'.encode(), s.cssText)
cssutils.ser.prefs.defaultPropertyName = False
self.assertEqual('a {\n c\\olor: green\n }'.encode(), s.cssText)
cssutils.ser.prefs.defaultPropertyName = True
self.assertEqual('a {\n color: green\n }'.encode(), s.cssText)
def test_defaultPropertyPriority(self):
"Preferences.defaultPropertyPriority"
css = 'a {\n color: green !IM\\portant\n }'
s = cssutils.parseString(css)
self.assertEqual(s.cssText, 'a {\n color: green !important\n }'.encode())
cssutils.ser.prefs.defaultPropertyPriority = False
self.assertEqual(s.cssText, css.encode())
def test_importHrefFormat(self):
"Preferences.importHrefFormat"
r0 = cssutils.css.CSSImportRule()
r0.cssText = '@import url("not");'
r1 = cssutils.css.CSSImportRule()
r1.cssText = '@import "str";'
self.assertEqual('@import url(not);', r0.cssText)
self.assertEqual('@import "str";', r1.cssText)
cssutils.ser.prefs.importHrefFormat = 'string'
self.assertEqual('@import "not";', r0.cssText)
self.assertEqual('@import "str";', r1.cssText)
cssutils.ser.prefs.importHrefFormat = 'uri'
self.assertEqual('@import url(not);', r0.cssText)
self.assertEqual('@import url(str);', r1.cssText)
cssutils.ser.prefs.importHrefFormat = 'not defined'
self.assertEqual('@import url(not);', r0.cssText)
self.assertEqual('@import "str";', r1.cssText)
def test_indent(self):
"Preferences.ident"
s = cssutils.parseString('a { left: 0 }')
exp4 = '''a {
left: 0
}'''
exp1 = '''a {
left: 0
}'''
cssutils.ser.prefs.indent = ' '
self.assertEqual(exp1.encode(), s.cssText)
cssutils.ser.prefs.indent = 4 * ' '
self.assertEqual(exp4.encode(), s.cssText)
def test_indentClosingBrace(self):
"Preferences.indentClosingBrace"
s = cssutils.parseString('@media all {a {left: 0}} b { top: 0 }')
expT = '''@media all {
a {
left: 0
}
}
b {
top: 0
}'''
expF = '''@media all {
a {
left: 0
}
}
b {
top: 0
}'''
cssutils.ser.prefs.useDefaults()
self.assertEqual(expT.encode(), s.cssText)
cssutils.ser.prefs.indentClosingBrace = False
self.assertEqual(expF.encode(), s.cssText)
def test_keepAllProperties(self):
"Preferences.keepAllProperties"
css = r'''a {
color: pink;
color: red;
c\olor: blue;
c\olor: green;
}'''
s = cssutils.parseString(css)
# keep only last
cssutils.ser.prefs.keepAllProperties = False
self.assertEqual('a {\n color: green\n }'.encode(), s.cssText)
# keep all
cssutils.ser.prefs.keepAllProperties = True
self.assertEqual(
'a {\n color: pink;\n color: red;\n c\\olor: blue;\n '
'c\\olor: green\n }'.encode(),
s.cssText,
)
def test_keepComments(self):
"Preferences.keepComments"
s = cssutils.parseString('/*1*/ a { /*2*/ }')
cssutils.ser.prefs.keepComments = False
self.assertEqual(''.encode(), s.cssText)
cssutils.ser.prefs.keepEmptyRules = True
self.assertEqual('a {}'.encode(), s.cssText)
def test_keepEmptyRules(self):
"Preferences.keepEmptyRules"
# CSSStyleRule
css = '''a {}
a {
/*1*/
}
a {
color: red
}'''
s = cssutils.parseString(css)
cssutils.ser.prefs.useDefaults()
cssutils.ser.prefs.keepEmptyRules = True
self.assertEqual(css.encode(), s.cssText)
cssutils.ser.prefs.keepEmptyRules = False
self.assertEqual(
'a {\n /*1*/\n }\na {\n color: red\n }'.encode(), s.cssText
)
cssutils.ser.prefs.keepComments = False
self.assertEqual('a {\n color: red\n }'.encode(), s.cssText)
# CSSMediaRule
css = '''@media tv {
}
@media all {
/*1*/
}
@media print {
a {}
}
@media print {
a {
/*1*/
}
}
@media all {
a {
color: red
}
}'''
s = cssutils.parseString(css)
cssutils.ser.prefs.useDefaults()
cssutils.ser.prefs.keepEmptyRules = True
# self.assertEqual(css, s.cssText)
cssutils.ser.prefs.keepEmptyRules = False
self.assertEqual(
'''@media all {
/*1*/
}
@media print {
a {
/*1*/
}
}
@media all {
a {
color: red
}
}'''.encode(),
s.cssText,
)
cssutils.ser.prefs.keepComments = False
self.assertEqual(
'''@media all {
a {
color: red
}
}'''.encode(),
s.cssText,
)
def test_keepUnknownAtRules(self):
"Preferences.keepUnknownAtRules"
tests = {
'''@three-dee {
@background-lighting {
azimuth: 30deg;
elevation: 190deg;
}
| |
<reponame>SkandanC/simphony<filename>simphony/libraries/sipann.py
# Copyright © Simphony Project Contributors
# Licensed under the terms of the MIT License
# (see simphony/__init__.py for details)
"""
simphony.libraries.sipann
=========================
This package contains wrappers for models defined in the
SiPANN (Silicon Photonics with Artificial Neural Networks)
project, another project by CamachoLab at BYU. It leverages
machine learning to simulate photonic devices, giving
greater speed and similar accuracy to a full FDTD
simulation.
The wrappers defined here integrate SiPANN models into
Simphony for easier use.
"""
from typing import Callable, Dict, TypeVar, Union
import numpy as np
from SiPANN import comp, scee
from SiPANN.scee_opt import premade_coupler
from simphony import Model
from simphony.tools import freq2wl
class SipannWrapper(Model):
"""Allows wrapping models from SCEE for use in simphony. This class should
be extended, with each extending class wrapping one model.
Each extending class should convert parameters passed in
from meters (which simphony uses) to nanometers (which
SiPANN uses). Each extending class should also define a
class-wide field for 'pin_count', equal to the number of
pins the subcircuit has.
Note that the wrapped SCEE models cannot have varying
geometries; such a device can't be cascaded properly.
Parameters
-----------
`model`
Model from `SiPANN.scee` or `SiPANN.comp` modules, must
have the 'sparams' method
`sigmas`
Dictionary mapping parameters to sigma values for
Monte-Carlo simulations, values should be in meters. If
Monte-Carlo simulations are not needed, pass in
an empty dictionary.
"""
freq_range = (
182800279268292.0,
205337300000000.0,
)
def __init__(self, model: TypeVar("M"), sigmas: Dict[str, float], **kwargs) -> None:
super().__init__(**kwargs)
self.model = model
self.sigmas = sigmas
# catch varying geometries
args = self.model._clean_args(None)
if len(args[0]) != 1:
raise ValueError(
"You have changing geometries, use in simphony doesn't make sense!"
)
self.params = self.model.__dict__.copy()
self.rand_params = dict()
self.regenerate_monte_carlo_parameters()
def s_parameters(self, freqs: np.array) -> np.ndarray:
"""Get the s-parameters of the SCEE Model.
Parameters
----------
`freqs`
Frequency array to calculate s-parameters over, in
Hz
Returns
-------
`s`
The s-parameter matrix
"""
wl = freq2wl(freqs) * 1e9
return self.model.sparams(wl)
def monte_carlo_s_parameters(self, freqs: np.array) -> np.ndarray:
"""Get the s-parameters of the SCEE Model, influenced by noise from
sigma values.
Parameters
----------
`freqs`
Frequency array to calculate s-parameters over, in
Hz
Returns
-------
`s`
The s-parameter matrix
"""
wl = freq2wl(freqs) * 1e9
# Change to noise params for monte carlo, then change back
self.model.update(**self.rand_params)
sparams = self.model.sparams(wl)
self.model.update(**self.params)
return sparams
def regenerate_monte_carlo_parameters(self) -> None:
"""For each sigma value given to the wrapper, will apply noise the
matching parameter."""
for param, sigma in self.sigmas.items():
self.rand_params[param] = np.random.normal(self.params[param], sigma * 1e9)
# Convert gap funcs from meters to nanometers
def convert_func_to_nm(func: Callable[[float], float]) -> Callable[[float], float]:
def converted_func(input: float) -> float:
return func(input) * 1e9
return converted_func
class GapFuncSymmetric(SipannWrapper):
"""Symmetric directional coupler, meaning both waveguides are the same
shape.
A gap function must describe the shape of the two
waveguides, where the vertical distance between the
waveguides is the return of the gap function at every
horizontal point from left to right. The derivative of
the gap function is also required.
Ports are numbered as:
| 2---\ /---4 |
| ------ |
| ------ |
| 1---/ \---3 |
Parameters
----------
`width`
Width of waveguides in meters (Valid from 400e-9 to
600e-9)
`thickness`
Thickness of waveguides in meters (Valid from 180e-9 to
240e-9)
`gap`
Gap function along the waveguide, returns meters (Must
always be greater than 100e-9)
`dgap`
Derivative of the gap function
`zmin`
Real number at which to begin integration of gap
function
`zmax`
Real number at which to end integration of gap function
`sw_angle, optional`
Sidewall angle of waveguide from horizontal in degrees
(Valid from 80 to 90, defaults to 90)
`sigmas, optional`
Dictionary mapping parameters to sigma values for
Monte-Carlo simulations, values should be in meters
"""
pin_count = 4
def __init__(
self,
width: Union[float, np.array],
thickness: Union[float, np.array],
gap: Callable[[float], float],
dgap: Callable[[float], float],
zmin: float,
zmax: float,
sw_angle: Union[float, np.array] = 90,
sigmas: Dict[str, float] = dict(),
**kwargs
) -> None:
super().__init__(
scee.GapFuncSymmetric(
width * 1e9,
thickness * 1e9,
convert_func_to_nm(gap),
convert_func_to_nm(dgap),
zmin,
zmax,
sw_angle,
),
sigmas,
**kwargs
)
class GapFuncAntiSymmetric(SipannWrapper):
"""Antisymmetric directional coupler, meaning both waveguides are
differently shaped.
A gap function describing the vertical distance between
the two waveguides at any horizontal point, and arc
lengths from each port to the coupling point, describe
the shape of the device.
Ports are numbered as:
| 2---\ /---4 |
| ------ |
| ------ |
| 1---/ \---3 |
Parameters
----------
`width`
Width of the waveguide in meters (Valid from 400e-9 to
600e-9)
`thickness`
Thickness of waveguide in meters (Valid for 180e-9 to
240e-9)
`gap`
Gap function along the waveguide, returns meters (must
always be greater than 100e-9)
`zmin`
Real number at which to begin integration of gap
function
`zmax`
Real number at which to end integration of gap function
`arc1`
Arc length from port 1 to minimum coupling point
`arc2`
Arc length from port 2 to minimum coupling point
`arc3`
Arc length from port 3 to minimum coupling point
`arc4`
Arc length from port 4 to minimum coupling point
`sw_angle, optional`
Sidewall angle of waveguide from horizontal in degrees
(Valid from 80 to 90, defaults to 90)
`sigmas, optional`
Dictionary mapping parameters to sigma values for
Monte-Carlo simulations, values should be in meters
"""
pin_count = 4
def __init__(
self,
width: Union[float, np.array],
thickness: Union[float, np.array],
gap: Callable[[float], float],
zmin: float,
zmax: float,
arc1: float,
arc2: float,
arc3: float,
arc4: float,
sw_angle: Union[float, np.array] = 90,
sigmas: dict = dict(),
**kwargs
) -> None:
super().__init__(
scee.GapFuncAntiSymmetric(
width * 1e9,
thickness * 1e9,
convert_func_to_nm(gap),
zmin,
zmax,
arc1 * 1e9,
arc2 * 1e9,
arc3 * 1e9,
arc4 * 1e9,
sw_angle,
),
sigmas,
**kwargs
)
class HalfRing(SipannWrapper):
"""Half of a ring resonator.
Uses a radius and a gap to describe the shape.
Ports are numbered as:
| 2 \ / 4 |
| \ / |
| --- |
| 1---------3 |
Parameters
----------
`width`
Width of the waveguide in meters (Valid from 400e-9 to
600e-9)
`thickness`
Thickness of waveguide in meters (Valid for 180e-9 to
240e-9)
`radius`
Distance from center of ring to middle of waveguide, in
meters
`gap`
Minimum distance from ring waveguide edge to straight
waveguide edge, in meters (must be greater than 100e-9)
`sw_angle, optional`
Sidewall angle of waveguide from horizontal in degrees
(Valid from 80 to 90, defaults to 90)
`sigmas, optional`
Dictionary mapping parameters to sigma values for
Monte-Carlo simulations, values should be in meters
"""
pin_count = 4
def __init__(
self,
width: Union[float, np.array],
thickness: Union[float, np.array],
radius: Union[float, np.array],
gap: Union[float, np.array],
sw_angle: Union[float, np.array] = 90,
sigmas: Dict[str, float] = dict(),
**kwargs
) -> None:
super().__init__(
scee.HalfRing(
width * 1e9, thickness * 1e9, radius * 1e9, gap * 1e9, sw_angle
),
sigmas,
**kwargs
)
class HalfRacetrack(SipannWrapper):
"""Half of a ring resonator, similar to the HalfRing class.
Uses a radius, gap and length to describe the shape of
the device.
Ports are numbered as:
| 2 \ / 4 |
| \ / |
| --------- |
| 1---------------3 |
Parameters
----------
`width`
Width of the waveguide in meters (Valid from 400e-9 to
600e-9)
`thickness`
Thickness of waveguide in meters (Valid for 180e-9 to
240e-9)
`radius`
Distance from center of ring to middle of waveguide, in
meters
`gap`
Minimum distance from ring waveguide edge to straight
waveguide edge, in meters (must be greater than 100e-9)
`length`
Length of straight portion of ring waveguide, in meters
`sw_angle, optional`
Sidewall angle of waveguide from horizontal in degrees
(Valid from 80 to 90, defaults to 90)
`sigmas, optional`
Dictionary mapping parameters to sigma values for
Monte-Carlo simulations, values should be in meters
"""
pin_count = 4
def __init__(
self,
width: Union[float, np.array],
thickness: Union[float, np.array],
radius: Union[float, np.array],
gap: Union[float, np.array],
length: Union[float, np.array],
sw_angle: Union[float, np.array] = 90,
sigmas: Dict[str, float] = dict(),
**kwargs
) -> None:
super().__init__(
scee.HalfRacetrack(
width * 1e9,
thickness * 1e9,
radius * 1e9,
gap * 1e9,
length * 1e9,
sw_angle,
),
sigmas,
**kwargs
)
class StraightCoupler(SipannWrapper):
"""Straight directional coupler, both | |
<reponame>GuoQiang-Fu/UQpy
"""
The module currently contains the following classes:
* ``SRM``: Class for simulation of Gaussian stochastic processes and random fields using the Spectral Representation
Method.
* ``BSRM``: Class for simulation of third-order non-Gaussian stochastic processes and random fields using the
Bispectral Representation Method.
* ``KLE``: Class for simulation of stochastic processes using the Karhunen-Loeve Expansion.
* ``Translation``: Class for transforming a Gaussian stochastic process to a non-Gaussian stochastic process with
prescribed marginal probability distribution.
* ``InverseTranslation``: Call for identifying an underlying Gaussian stochastic process for a non-Gaussian process with
prescribed marginal probability distribution and autocorrelation function / power spectrum.
"""
import itertools
from scipy.linalg import sqrtm
from scipy.stats import norm
from UQpy.Distributions import *
from UQpy.Utilities import *
# TODO: add non-stationary-methods for all the classes
class SRM:
"""
A class to simulate stochastic processes from a given power spectrum density using the Spectral Representation
Method. This class can simulate uni-variate, multi-variate, and multi-dimensional stochastic processes. The class
uses Singular Value Decomposition, as opposed to Cholesky Decomposition, to ensure robust, near-positive definite
multi-dimensional power spectra.
**Input:**
* **nsamples** (`int`):
Number of samples of the stochastic process to be simulated.
The ``run`` method is automatically called if `nsamples` is provided. If `nsamples` is not provided, then the
``SRM`` object is created but samples are not generated.
* **power_spectrum** (`list or numpy.ndarray`):
The discretized power spectrum.
For uni-variate, one-dimensional processes `power_spectrum` will be `list` or `ndarray` of length
`number_frequency_intervals`.
For multi-variate, one-dimensional processes, `power_spectrum` will be a `list` or `ndarray` of size
(`number_of_variables`, `number_of_variables`, `number_frequency_intervals`).
For uni-variate, multi-dimensional processes, `power_spectrum` will be a `list` or `ndarray` of size
(`number_frequency_intervals[0]`, ..., `number_frequency_intervals[number_of_dimensions-1]`)
For multi-variate, multi-dimensional processes, `power_spectrum` will be a `list` or `ndarray` of size
(`number_of_variables`, `number_of_variables`, `number_frequency_intervals[0]`, ...
`number_frequency_intervals[number_of_dimensions-1]``).
* **time_interval** (`list or numpy.ndarray`):
Length of time discretizations (:math:`\Delta t`) for each dimension of size `number_of_dimensions`.
* **frequency_interval** (`list or numpy.ndarray`):
Length of frequency discretizations (:math:`\Delta \omega`) for each dimension of size `number_of_dimensions`.
* **number_frequency_intervals** (`list or numpy.ndarray`):
Number of frequency discretizations for each dimension of size `number_of_dimensions`.
* **number_time_intervals** (`list or numpy.ndarray`):
Number of time discretizations for each dimensions of size `number_of_dimensions`.
* **random_state** (None or `int` or ``numpy.random.RandomState`` object):
Random seed used to initialize the pseudo-random number generator. Default is None.
If an integer is provided, this sets the seed for an object of ``numpy.random.RandomState``. Otherwise, the
object itself can be passed directly.
* **verbose** (Boolean):
A boolean declaring whether to write text to the terminal.
**Attributes:**
* **samples** (`ndarray`):
Generated samples.
The shape of the samples is (`nsamples`, `number_of_variables`, `number_time_intervals[0]`, ...,
`number_time_intervals[number_of_dimensions-1]`)
* **number_of_dimensions** (`int`):
The dimensionality of the stochastic process.
* **number_of_variables** (`int`):
Number of variables in the stochastic process.
* **phi** (`ndarray`):
The random phase angles used in the simulation of the stochastic process.
The shape of the phase angles (`nsamples`, `number_of_variables`, `number_frequency_intervals[0]`, ...,
`number_frequency_intervals[number_of_dimensions-1]`)
**Methods**
"""
def __init__(self, nsamples, power_spectrum, time_interval, frequency_interval, number_time_intervals,
number_frequency_intervals, random_state=None, verbose=False):
self.power_spectrum = power_spectrum
if isinstance(time_interval, float) and isinstance(frequency_interval, float) and \
isinstance(number_time_intervals, int) and isinstance(number_frequency_intervals, int):
time_interval = [time_interval]
frequency_interval = [frequency_interval]
number_time_intervals = [number_time_intervals]
number_frequency_intervals = [number_frequency_intervals]
self.time_interval = np.array(time_interval)
self.frequency_interval = np.array(frequency_interval)
self.number_time_intervals = np.array(number_time_intervals)
self.number_frequency_intervals = np.array(number_frequency_intervals)
self.nsamples = nsamples
# Error checks
t_u = 2 * np.pi / (2 * self.number_frequency_intervals * self.frequency_interval)
if (self.time_interval > t_u).any():
raise RuntimeError('UQpy: Aliasing might occur during execution')
self.verbose = verbose
self.random_state = random_state
if isinstance(self.random_state, int):
np.random.seed(self.random_state)
elif not isinstance(self.random_state, (type(None), np.random.RandomState)):
raise TypeError('UQpy: random_state must be None, an int or an np.random.RandomState object.')
self.samples = None
self.number_of_variables = None
self.number_of_dimensions = len(self.number_frequency_intervals)
self.phi = None
if self.number_of_dimensions == len(self.power_spectrum.shape):
self.case = 'uni'
else:
self.number_of_variables = self.power_spectrum.shape[0]
self.case = 'multi'
# Run Spectral Representation Method
if self.nsamples is not None:
self.run(nsamples=self.nsamples)
def run(self, nsamples):
"""
Execute the random sampling in the ``SRM`` class.
The ``run`` method is the function that performs random sampling in the ``SRM`` class. If `nsamples` is
provided when the ``SRM`` object is defined, the ``run`` method is automatically called. The user may also call
the ``run`` method directly to generate samples. The ``run`` method of the ``SRM`` class can be invoked many
times and each time the generated samples are appended to the existing samples.
**Input:**
* **nsamples** (`int`):
Number of samples of the stochastic process to be simulated.
If the ``run`` method is invoked multiple times, the newly generated samples will be appended to the
existing samples.
**Output/Returns:**
The ``run`` method has no returns, although it creates and/or appends the `samples` attribute of the ``SRM``
class.
"""
if nsamples is None:
raise ValueError('UQpy: Stochastic Process: Number of samples must be defined.')
if not isinstance(nsamples, int):
raise ValueError('UQpy: Stochastic Process: nsamples should be an integer.')
if self.verbose:
print('UQpy: Stochastic Process: Running Spectral Representation Method.')
samples = None
phi = None
if self.case == 'uni':
if self.verbose:
print('UQpy: Stochastic Process: Starting simulation of uni-variate Stochastic Processes.')
print('UQpy: The number of dimensions is :', self.number_of_dimensions)
phi = np.random.uniform(
size=np.append(self.nsamples, np.ones(self.number_of_dimensions, dtype=np.int32)
* self.number_frequency_intervals)) * 2 * np.pi
samples = self._simulate_uni(phi)
elif self.case == 'multi':
if self.verbose:
print('UQpy: Stochastic Process: Starting simulation of multi-variate Stochastic Processes.')
print('UQpy: Stochastic Process: The number of variables is :', self.number_of_variables)
print('UQpy: Stochastic Process: The number of dimensions is :', self.number_of_dimensions)
phi = np.random.uniform(size=np.append(self.nsamples, np.append(
np.ones(self.number_of_dimensions, dtype=np.int32) * self.number_frequency_intervals,
self.number_of_variables))) * 2 * np.pi
samples = self._simulate_multi(phi)
if self.samples is None:
self.samples = samples
self.phi = phi
else:
self.samples = np.concatenate((self.samples, samples), axis=0)
self.phi = np.concatenate((self.phi, phi), axis=0)
if self.verbose:
print('UQpy: Stochastic Process: Spectral Representation Method Complete.')
def _simulate_uni(self, phi):
fourier_coefficient = np.exp(phi * 1.0j) * np.sqrt(
2 ** (self.number_of_dimensions + 1) * self.power_spectrum * np.prod(self.frequency_interval))
samples = np.fft.fftn(fourier_coefficient, self.number_time_intervals)
samples = np.real(samples)
samples = samples[:, np.newaxis]
return samples
def _simulate_multi(self, phi):
power_spectrum = np.einsum('ij...->...ij', self.power_spectrum)
coefficient = np.sqrt(2 ** (self.number_of_dimensions + 1)) * np.sqrt(np.prod(self.frequency_interval))
u, s, v = np.linalg.svd(power_spectrum)
power_spectrum_decomposed = np.einsum('...ij,...j->...ij', u, np.sqrt(s))
fourier_coefficient = coefficient * np.einsum('...ij,n...j -> n...i',
power_spectrum_decomposed, np.exp(phi * 1.0j))
fourier_coefficient[np.isnan(fourier_coefficient)] = 0
samples = np.real(np.fft.fftn(fourier_coefficient, s=self.number_time_intervals,
axes=tuple(np.arange(1, 1 + self.number_of_dimensions))))
samples = np.einsum('n...m->nm...', samples)
return samples
class BSRM:
"""
A class to simulate non-Gaussian stochastic processes from a given power spectrum and bispectrum based on the 3-rd
order Spectral Representation Method. This class can simulate uni-variate, one-dimensional and multi-dimensional
stochastic processes.
**Input:**
* **nsamples** (`int`):
Number of samples of the stochastic process to be simulated.
The ``run`` method is automatically called if `nsamples` is provided. If `nsamples` is not provided, then the
``BSRM`` object is created but samples are not generated.
* **power_spectrum** (`list or numpy.ndarray`):
The discretized power spectrum.
For uni-variate, one-dimensional processes `power_spectrum` will be `list` or `ndarray` of length
`number_frequency_intervals`.
For uni-variate, multi-dimensional processes, `power_spectrum` will be a `list` or `ndarray` of size
(`number_frequency_intervals[0]`, ..., `number_frequency_intervals[number_of_dimensions-1]`)
* **bispectrum** (`list or numpy.ndarray`):
The prescribed bispectrum.
For uni-variate, one-dimensional processes, `bispectrum` will be a `list` or `ndarray` of size
(`number_frequency_intervals`, `number_frequency_intervals`)
For uni-variate, multi-dimensional processes, `bispectrum` will be a `list` or `ndarray` of size
(`number_frequency_intervals[0]`, ..., `number_frequency_intervals[number_of_dimensions-1]`,
`number_frequency_intervals[0]`, ..., `number_frequency_intervals[number_of_dimensions-1]`)
* **time_interval** (`list or numpy.ndarray`):
Length of time discretizations (:math:`\Delta t`) for each dimension of size `number_of_dimensions`.
* **frequency_interval** (`list or numpy.ndarray`):
Length of frequency discretizations (:math:`\Delta \omega`) for each dimension of size `number_of_dimensions`.
* **number_frequency_intervals** (`list or numpy.ndarray`):
Number of frequency discretizations for each dimension of size `number_of_dimensions`.
* **number_time_intervals** (`list or numpy.ndarray`):
Number of time discretizations for each dimensions of size `number_of_dimensions`.
* **random_state** (None or `int` or ``numpy.random.RandomState`` object):
Random seed used to initialize the pseudo-random number generator. Default is None.
If an integer is provided, this sets the seed for an object of ``numpy.random.RandomState``. Otherwise, the
object itself can be passed directly.
* **verbose** (Boolean):
A boolean declaring whether to write text to the terminal.
**Attributes:**
* **samples** (`ndarray`):
Generated samples.
The shape of the samples is (`nsamples`, `number_of_variables`, `number_time_intervals[0]`, ...,
`number_time_intervals[number_of_dimensions-1]`)
* **number_of_dimensions** (`int`):
The dimensionality of the stochastic process.
* **number_of_variables** (`int`):
Number of | |
"serif"
elif "mono" in family or family in FONTS_MONO:
family = "monospace"
else:
family = "serif"
matches = self.fonts.get(family, self.fonts.get("seif"))
if matches is None:
return None
# find style
style = style or FONT_STYLE_NORMAL
matches_out = [match for match in matches if match.style == style]
if not matches_out:
matches_out = [match for match in matches if match.style == FONT_STYLE_NORMAL]
if not matches_out:
return None
matches = matches_out
# find weight
weight = weight or 400
matches = list(sorted(matches, key=lambda f: abs(f.weight - weight)))
return matches[0]
# ------------------------------------------------------------------------------
# SVG
# ------------------------------------------------------------------------------
SVG_UNITS_USER = "userSpaceOnUse"
SVG_UNITS_BBOX = "objectBoundingBox"
COLOR_RE = re.compile("#?([0-9A-Fa-f]+)$")
COLOR_RGB_RE = re.compile(r"\s*(rgba?|hsl)\(([^\)]+)\)\s*")
TRANSFORM_RE = re.compile(r"\s*(translate|scale|rotate|skewX|skewY|matrix)\s*\(([^\)]+)\)\s*")
SVG_INHERIT = {
"color": None,
"fill": "black",
"fill-rule": PATH_FILL_NONZERO,
"fill-opacity": None,
"stroke": None,
"stroke-opacity": None,
"stroke-width": "1",
"stroke-linecap": STROKE_CAP_BUTT,
"stroke-linejoin": STROKE_JOIN_MITER,
"stroke-miterlimit": "4",
"font-family": "serif",
"font-size": "12",
"font-weight": "400",
"text-anchor": None,
}
# fmt: off
SVG_COLORS = {
"aliceblue": "#f0f8ff", "antiquewhite": "#faebd7", "aqua": "#00ffff",
"aquamarine": "#7fffd4","azure": "#f0ffff", "beige": "#f5f5dc",
"bisque": "#ffe4c4", "black": "#000000", "blanchedalmond": "#ffebcd",
"blue": "#0000ff", "blueviolet": "#8a2be2", "brown": "#a52a2a",
"burlywood": "#deb887", "cadetblue": "#5f9ea0", "chartreuse": "#7fff00",
"chocolate": "#d2691e", "coral": "#ff7f50", "cornflowerblue": "#6495ed",
"cornsilk": "#fff8dc", "crimson": "#dc143c", "cyan": "#00ffff",
"darkblue": "#00008b", "darkcyan": "#008b8b", "darkgoldenrod": "#b8860b",
"darkgray": "#a9a9a9", "darkgrey": "#a9a9a9", "darkgreen": "#006400",
"darkkhaki": "#bdb76b", "darkmagenta": "#8b008b", "darkolivegreen": "#556b2f",
"darkorange": "#ff8c00", "darkorchid": "#9932cc", "darkred": "#8b0000",
"darksalmon": "#e9967a", "darkseagreen": "#8fbc8f", "darkslateblue": "#483d8b",
"darkslategray": "#2f4f4f", "darkslategrey": "#2f4f4f",
"darkturquoise": "#00ced1", "darkviolet": "#9400d3", "deeppink": "#ff1493",
"deepskyblue": "#00bfff", "dimgray": "#696969", "dimgrey": "#696969",
"dodgerblue": "#1e90ff", "firebrick": "#b22222", "floralwhite": "#fffaf0",
"forestgreen": "#228b22", "fuchsia": "#ff00ff", "gainsboro": "#dcdcdc",
"ghostwhite": "#f8f8ff", "gold": "#ffd700", "goldenrod": "#daa520",
"gray": "#808080", "grey": "#808080", "green": "#008000",
"greenyellow": "#adff2f", "honeydew": "#f0fff0", "hotpink": "#ff69b4",
"indianred": "#cd5c5c", "indigo": "#4b0082", "ivory": "#fffff0",
"khaki": "#f0e68c", "lavender": "#e6e6fa", "lavenderblush": "#fff0f5",
"lawngreen": "#7cfc00", "lemonchiffon": "#fffacd", "lightblue": "#add8e6",
"lightcoral": "#f08080", "lightcyan": "#e0ffff",
"lightgoldenrodyellow": "#fafad2", "lightgray": "#d3d3d3",
"lightgrey": "#d3d3d3", "lightgreen": "#90ee90", "lightpink": "#ffb6c1",
"lightsalmon": "#ffa07a", "lightseagreen": "#20b2aa", "lightskyblue": "#87cefa",
"lightslategray": "#778899", "lightslategrey": "#778899",
"lightsteelblue": "#b0c4de", "lightyellow": "#ffffe0", "lime": "#00ff00",
"limegreen": "#32cd32", "linen": "#faf0e6", "magenta": "#ff00ff",
"maroon": "#800000", "mediumaquamarine": "#66cdaa", "mediumblue": "#0000cd",
"mediumorchid": "#ba55d3", "mediumpurple": "#9370db",
"mediumseagreen": "#3cb371", "mediumslateblue": "#7b68ee",
"mediumspringgreen": "#00fa9a", "mediumturquoise": "#48d1cc",
"mediumvioletred": "#c71585", "midnightblue": "#191970", "mintcream": "#f5fffa",
"mistyrose": "#ffe4e1", "moccasin": "#ffe4b5", "navajowhite": "#ffdead",
"navy": "#000080", "oldlace": "#fdf5e6", "olive": "#808000",
"olivedrab": "#6b8e23", "orange": "#ffa500", "orangered": "#ff4500",
"orchid": "#da70d6", "palegoldenrod": "#eee8aa", "palegreen": "#98fb98",
"paleturquoise": "#afeeee", "palevioletred": "#db7093", "papayawhip": "#ffefd5",
"peachpuff": "#ffdab9", "peru": "#cd853f", "pink": "#ffc0cb", "plum": "#dda0dd",
"powderblue": "#b0e0e6", "purple": "#800080", "rebeccapurple": "#663399",
"red": "#ff0000", "rosybrown": "#bc8f8f", "royalblue": "#4169e1",
"saddlebrown": "#8b4513", "salmon": "#fa8072", "sandybrown": "#f4a460",
"seagreen": "#2e8b57", "seashell": "#fff5ee", "sienna": "#a0522d",
"silver": "#c0c0c0", "skyblue": "#87ceeb", "slateblue": "#6a5acd",
"slategray": "#708090", "slategrey": "#708090", "snow": "#fffafa",
"springgreen": "#00ff7f", "steelblue": "#4682b4", "tan": "#d2b48c",
"teal": "#008080", "thistle": "#d8bfd8", "tomato": "#ff6347",
"turquoise": "#40e0d0", "violet": "#ee82ee", "wheat": "#f5deb3",
"white": "#ffffff", "whitesmoke": "#f5f5f5", "yellow": "#ffff00",
"yellowgreen": "#9acd32",
}
# fmt: on
def svg_scene(file, fg=None, width=None, fonts=None):
"""Load SVG scene from a file object"""
fonts = FontsDB() if fonts is None else fonts
def svg_scene_rec(element, inherit, top=False, width=None):
tag = element.tag.split("}")[-1]
attrs = svg_attrs(element.attrib, inherit)
inherit = {k: v for k, v in attrs.items() if k in SVG_INHERIT}
group = []
if tag == "svg":
for child in element:
group.extend(svg_scene_rec(child, inherit))
if not group:
return group
scene = Scene.group(group)
# determine size and transform
x = svg_size(attrs.get("x", "0"))
y = svg_size(attrs.get("y", "0"))
w = svg_size(attrs.get("width"))
h = svg_size(attrs.get("height"))
# override height
viewbox = None
if w is not None and h is not None:
viewbox = [0, 0, w, h]
if width is not None:
if w is not None and h is not None:
w, h = width, int(width * h / w)
else:
w, h = width, None
# viewbox transform
viewbox = svg_floats(attrs.get("viewBox"), 4, 4) or viewbox
if viewbox is not None:
transform = svg_viewbox_transform((x, y, w, h), viewbox)
scene = scene.transform(transform)
_vx, _vy, vw, vh = viewbox
if h is None and w is None:
h, w = vh, vw
elif h is None:
h = vh * w / vw
elif w is None:
w = vw * h / vh
elif x > 0 and y > 0:
scene = scene.transform(Transform().translate(x, y))
if w is not None and h is not None:
if top:
nonlocal size
size = (w, h)
else:
clip = [
(PATH_LINE, [[x, y], [x + w, y]]),
(PATH_LINE, [[x + w, y], [x + w, y + h]]),
(PATH_LINE, [[x + w, y + h], [x, y + h]]),
(PATH_CLOSED, [[x, y + h], [x, y]]),
]
scene = scene.clip(Scene.fill(Path([clip]), np.ones(4)))
group = [scene]
elif tag == "path":
group.extend(svg_path(attrs, ids, fg))
elif tag == "g":
for child in element:
group.extend(svg_scene_rec(child, inherit))
elif tag == "defs":
for child in element:
svg_scene_rec(child, inherit)
elif tag in ("linearGradient", "radialGradient"):
id = attrs.get("id")
if id is not None:
is_linear = tag == "linearGradient"
ids[id] = svg_grad(element, None, is_linear)
return []
elif tag == "clipPath":
id = attrs.get("id")
inherit.setdefault("fill-rule", attrs.get("clip-rule"))
if id is not None:
for child in element:
group.extend(svg_scene_rec(child, inherit))
if group:
scene, group = Scene.group(group), []
transform = svg_transform(attrs.get("transform"))
if transform is not None:
scene = scene.transform(transform)
bbox_units = attrs.get("clipPathUnits")
ids[id] = (scene, bbox_units == SVG_UNITS_BBOX)
return []
elif tag == "mask":
id = attrs.get("id")
if id is not None:
for child in element:
group.extend(svg_scene_rec(child, inherit))
scene, group = Scene.group(group), []
transform = svg_transform(attrs.get("transform"))
if transform is not None:
scene = scene.transform(transform)
bbox_units = attrs.get("maskContentUnits")
ids[id] = (scene, bbox_units == SVG_UNITS_BBOX)
elif tag == "filter":
id = attrs.get("id")
if id is not None:
ids[id] = svg_filter(attrs, element)
elif tag == "pattern":
id = attrs.get("id")
if id is not None:
x = svg_float(attrs.get("x", "0"))
y = svg_float(attrs.get("y", "0"))
width = svg_float(attrs.get("width"))
height = svg_float(attrs.get("height"))
if width is None or height is None:
return []
for child in element:
group.extend(svg_scene_rec(child, inherit))
scene, group = Scene.group(group), []
scene_view_box = svg_floats(attrs.get("viewBox"), 4, 4)
scene_bbox_units = (
attrs.get("patternContentUnits", SVG_UNITS_USER) == SVG_UNITS_BBOX
)
# view_box = svg_floats(attrs.get("viewBox"), 4, 4)
# if view_box is not None:
# scene_transform = svg_viewbox_transform((x, y, width, height), view_box)
# scene = scene.transform(scene_transform)
# scene_bbox_units = False
transform = svg_transform(attrs.get("patternTransform"))
if transform is None:
transform = Transform()
bbox_units = attrs.get("patternUnits", SVG_UNITS_BBOX) == SVG_UNITS_BBOX
ids[id] = Pattern(
scene,
scene_bbox_units,
scene_view_box,
x,
y,
width,
height,
transform,
bbox_units,
)
# shapes
elif tag == "rect":
x = svg_size(attrs.pop("x", "0"))
y = svg_size(attrs.pop("y", "0"))
width = svg_size(attrs.pop("width"))
height = svg_size(attrs.pop("height"))
rx = svg_size(attrs.get("rx"))
ry = svg_size(attrs.get("ry"))
attrs["d"] = svg_rect_to_path(x, y, width, height, rx, ry)
group.extend(svg_path(attrs, ids, fg))
elif tag == "circle":
cx = svg_size(attrs.pop("cx", "0"))
cy = svg_size(attrs.pop("cy", "0"))
r = svg_size(attrs.pop("r"))
attrs["d"] = svg_ellipse_to_path(cx, cy, r, r)
group.extend(svg_path(attrs, ids, fg))
elif tag == "ellipse":
cx = svg_size(attrs.pop("cx", "0"))
cy = svg_size(attrs.pop("cy", "0"))
rx = svg_size(attrs.pop("rx"))
ry = svg_size(attrs.pop("ry"))
attrs["d"] = svg_ellipse_to_path(cx, cy, rx, ry)
group.extend(svg_path(attrs, ids, fg))
elif tag == "polygon":
points = attrs.pop("points")
attrs["d"] = f"M{points}z"
group.extend(svg_path(attrs, ids, fg))
elif tag == "polyline":
points = attrs.pop("points")
attrs["d"] = f"M{points}"
group.extend(svg_path(attrs, ids, fg))
elif tag == "line":
x1 = svg_size(attrs.pop("x1", "0"))
y1 = svg_size(attrs.pop("y1", "0"))
x2 = svg_size(attrs.pop("x2", "0"))
y2 = svg_size(attrs.pop("y2", "0"))
attrs["d"] = f"M{x1},{y1} {x2},{y2}"
group.extend(svg_path(attrs, ids, fg))
elif tag in ("title", "desc", "metadata"):
return []
elif tag == "font":
font = svg_font(element)
id = attrs.get("id")
fonts.register(font, id)
if id is not None:
ids[id] = font
return []
elif tag == "text":
group.extend(svg_text(element, attrs, fonts, ids, fg))
elif tag == "use":
x = attrs.get("x")
y = attrs.get("y")
if x is not None or y is not None:
attrs["transform"] = attrs.get("transform", "") + f" translate({x}, {y})"
href = attrs.get("href")
if href is None:
for key, value in attrs.items():
if key.endswith("}href"):
href = value
break
if href and href.startswith("#"):
item = ids.get(href[1:])
if isinstance(item, Scene):
group.append(item)
else:
warnings.warn(f"unsupported element type: {tag}")
if not group:
return group
filter_name = attrs.get("filter")
if filter_name is not None:
flt = svg_url(filter_name, ids)
if not isinstance(flt, Filter):
warnings.warn(f"not a filter referenced {filter_name}: {type(flt)}")
else:
group = [Scene.group(group).filter(flt)]
opacity = svg_float(attrs.get("opacity"))
if opacity is not None:
# create isolated group if opacity is present
group = [Scene.group(group).opacity(opacity)]
clip_path = attrs.get("clip-path")
if clip_path is not None:
clip = svg_url(clip_path, ids)
if clip is None or not isinstance(clip, tuple):
warnings.warn(f"clip path expected {clip_path}: {type(clip)}")
else:
clip, bbox_units = clip
group = [Scene.group(group).clip(clip, bbox_units)]
mask_url = attrs.get("mask")
if mask_url is not None:
mask = svg_url(mask_url, ids)
if mask is None or not isinstance(mask, | |
#
#
# Public Archive of Days Since Timers
# User Account Model Helper Unit Tests
#
#
from django.test import TestCase
from django.utils import timezone
from padsweb.helpers import PADSUserHelper, PADSWriteUserHelper
from padsweb.models import PADSUser
from padsweb.settings import defaults
import secrets # For token_urlsafe()
settings = defaults
#
# Shared Test Data
#
blank_inputs = {
# These inputs are in a dictionary, so that they can be cycled through
# automatically with the dict.values() iterable. The keys are an
# alternative to comments.
'newlines_only_8' : '\n\n\n\n\n\n\n\n',
'whitespace_mix' : '\f\n\r\t\v ',
'none' : None,
'spaces_only_8' : ' ',
'tabs_only_8' : '\t\t\t\t\t\t\t\t',
'zero_length_string' : '',
}
#
# User Account Retrieval Helper Tests
#
class PADSUserHelperCheckPasswordTests(TestCase):
"""Unit tests for PADSUserHelper.check_password()"""
@classmethod
def setUpTestData(cls):
# Sign up test User
cls.username = 'test-dave-cp'
cls.password = ' <PASSWORD>'
cls.write_user_helper = PADSWriteUserHelper()
cls.write_user_helper.new(cls.username, cls.password)
# Prepare User Helper for test User
cls.read_user_helper = PADSUserHelper()
cls.read_user_helper.set_user_id_by_username(cls.username)
def test_check_password_valid(self):
# Assertions
self.assertTrue(self.read_user_helper.check_password(self.password),
'The correct password must validate')
def test_check_password_wrong_password(self):
password_wrong = secrets.token_urlsafe(
settings['message_max_length_short'])
# Assertions
self.assertFalse(self.read_user_helper.check_password(password_wrong),
'An incorrect password must fail to validate')
def test_check_password_blank_input(self):
# Multi-Assertion
for p in blank_inputs.values():
self.assertFalse(self.read_user_helper.check_password(p),
'A blank password must fail to validate')
def test_check_password_no_user(self):
read_user_helper_orphan = PADSUserHelper()
# Assertions
self.assertFalse(read_user_helper_orphan.check_password(self.password),
'A User Helper with no User must fail to validate passwords')
class PADSUserHelperCheckQlPasswordTests(TestCase):
@classmethod
def setUpTestData(cls):
# Sign Up Quick List User
write_user_helper = PADSWriteUserHelper()
cls.ql_password = <PASSWORD>()
# Prepare User Helper for Test QL User
cls.read_user_helper = PADSUserHelper()
cls.ql_user_id = cls.read_user_helper.split_ql_password(
cls.ql_password)[0]
cls.read_user_helper.set_user_id(cls.ql_user_id)
def test_check_ql_password_valid(self):
# Assertion
check = self.read_user_helper.check_ql_password(self.ql_password)
self.assertTrue(check,'A valid Quick List password must validate')
def test_check_ql_password_wrong_password(self):
# Assertion
ql_password_wrong = secrets.token_urlsafe(
settings['message_max_length_short'])
check = self.read_user_helper.check_ql_password(ql_password_wrong)
self.assertFalse(check,
'An incorrect Quick List password must fail to validate')
def test_check_ql_password_blank_input(self):
# Multi-Assertion
for i in blank_inputs:
check = self.read_user_helper.check_ql_password(i)
self.assertFalse(check,
'A blank Quick List password must fail to validate')
def test_check_ql_password_no_user(self):
read_user_helper_orphan = PADSUserHelper()
# Assertions
check = read_user_helper_orphan.check_ql_password(self.ql_password)
self.assertFalse(check,
'A User Helper with no User must fail to validate passwords')
class PADSUserHelperSetUserIdByUsernameTests(TestCase):
@classmethod
def setUpTestData(cls):
# Sign up test User
cls.username = 'test-dave-suiu'
cls.password = ' <PASSWORD>'
cls.write_user_helper = PADSWriteUserHelper()
cls.write_user_helper.new(cls.username, cls.password)
cls.user = PADSUser.objects.get(nickname_short=cls.username)
def test_set_user_id_by_username_valid(self):
read_user_helper = PADSUserHelper()
read_user_helper.set_user_id_by_username(self.username)
# Assertion
self.assertEquals(read_user_helper.user_id, self.user.id,
'A User Helper must be able to be assigned to a registered user')
def test_set_user_id_by_username_invalid(self):
read_user_helper_i = PADSUserHelper()
random_username = secrets.token_urlsafe(
settings['message_max_length_short'])
read_user_helper_i.set_user_id_by_username(random_username)
# Assertions
self.assertEquals(read_user_helper_i.user_id,
settings['user_id_signed_out'],
'A User Helper cannot be assigned a User without a valid username')
self.assertFalse(read_user_helper_i.user_is_present(),
'User Helper must indicate failure setting invalid user by username')
def test_set_user_id_by_username_blank_input(self):
read_user_helper_i2 = PADSUserHelper()
# Mult-Assertion
for i in blank_inputs:
read_user_helper_i2.set_user_id_by_username(i)
self.assertEquals(read_user_helper_i2.user_id,
settings['user_id_signed_out'],
'User Helper cannot be assigened a User following blank input')
self.assertFalse(read_user_helper_i2.user_is_present(),
'User Helper must indicate it failed to set username')
#
# User Account Creation and Configuration Helper Tests
#
# Write Helper Tests
# TODO: PADSWriteUserHelper.generate_ql_password()
# TODO: PADSWriteUserHelper.merge_users_by_id() tests (valid, invalid ids)
class PADSWriteUserHelperDeleteTests(TestCase):
@classmethod
def setUpTestData(cls):
# Sign up Test Users
cls.username_a = 'test_jess_d'
cls.password_a = ' <PASSWORD>'
cls.username_b = 'not_jess'
# Both users unknowingly have the same password
cls.password_b = cls.<PASSWORD>
cls.write_user_helper = PADSWriteUserHelper()
cls.write_user_helper.new(cls.username_a, cls.password_a)
cls.write_user_helper.new(cls.username_b, cls.password_b)
def test_delete_valid(self):
user_a = PADSUser.objects.get(nickname_short=self.username_a)
self.write_user_helper.set_user_id(user_a.id)
op_result = self.write_user_helper.delete()
# Assertions
user_a_is_in = PADSUser.objects.filter(
nickname_short=self.username_a).exists()
user_b_is_in = PADSUser.objects.filter(
nickname_short=self.username_b).exists()
self.assertFalse(user_a_is_in,
'Test User must no longer exist after deletion')
self.assertTrue(user_b_is_in,
'User not targeted for deletion must remain in database')
self.assertTrue(op_result,
'Helper must indicate success of User deletion')
def test_delete_no_such_user(self):
write_user_helper_x = PADSWriteUserHelper(-99999) # Invalid user id
op_result = write_user_helper_x.delete()
# Assertions
user_a_is_in = PADSUser.objects.filter(
nickname_short=self.username_a).exists()
user_b_is_in = PADSUser.objects.filter(
nickname_short=self.username_b).exists()
self.assertTrue(user_a_is_in,
'Test User must remain in database after failed deletion')
self.assertTrue(user_b_is_in,
'Other User must remain in database after failed deletion')
self.assertFalse(op_result,
'Helper must indicate failure of User deletion')
class PADSWriteUserHelperNewTests(TestCase):
"""Unit tests for PADSWriteUserHelper.new() which creates user accounts"""
@classmethod
def setUpTestData(cls):
cls.write_user_helper = PADSWriteUserHelper()
cls.read_user_helper = PADSUserHelper()
def test_new_valid(self):
username = 'test_jess_nv'
password = ' <PASSWORD>!@#$()&;'
self.write_user_helper.new(username, password)
# Assertions
user_test = PADSUser.objects.get(nickname_short=username)
self.assertEquals(user_test.nickname_short, username,
'User must be in database after account creation')
def test_new_ql_valid(self):
ql_password = self.write_user_helper.new() # Sign up to get password
ql_user_id = self.read_user_helper.split_ql_password(ql_password)[0]
# Assertions
ql_user_test = PADSUser.objects.get(pk=ql_user_id)
self.assertEquals(ql_user_test.id, ql_user_id,
'Quick List user must be in database with correct id after creation')
def test_new_blank_usernames(self):
password = ' <PASSWORD>'
usernames = blank_inputs.values()
# Multi-Assertion
for u in usernames:
op_result = self.write_user_helper.new(u, password)
user_is_in = PADSUser.objects.filter(nickname_short=u).exists()
self.assertIsNone(op_result,
'Helper must fail creating User with blank username')
self.assertFalse(user_is_in)
def test_new_fake_ql_user(self):
username = ''.join( (settings['ql_user_name_prefix'], '1234') )
password = ' <PASSWORD>'
# Assertions
op_result = self.write_user_helper.new(username, password)
user_is_in = PADSUser.objects.filter(nickname_short=username).exists()
self.assertFalse(user_is_in,
'Fake Quick List User must not be in database')
self.assertIsNone(op_result,
'Helper must fail creating fake Quick List account')
def test_new_long_username(self):
name_length = settings['name_max_length_short'] + 1
username = secrets.token_urlsafe(name_length) # Random long username
password = ' <PASSWORD>'
op_result = self.write_user_helper.new(username, password)
user_is_in = PADSUser.objects.filter(nickname_short=username).exists()
# Assertions
self.assertFalse(user_is_in,
'User with excessively long username must not be in database')
self.assertIsNone(op_result,
'Helper must fail creating User with excessively long username')
def test_taken_username_different_case(self):
username_a = 'test_jess_tu'
password_a = ' <PASSWORD>'
username_b = 'TEST_JESS_TU' # Username A but in all caps
password_b = ' <PASSWORD>'
# Sign up User A
self.write_user_helper.new(username_a, password_a)
# Assertions
op_result = self.write_user_helper.new(username_b, password_b)
user_is_in = PADSUser.objects.filter(
nickname_short=username_b).exists()
self.assertIsNone(op_result,
'Helper must fail adding User with taken username (different case)')
self.assertFalse(user_is_in,
'User with taken username (differnt case) must not be in database')
class PADSWriteUserHelperSetNicknameTests(TestCase):
"""Unit tests for PADSWriteUserHelper.set_nickname_long()"""
@classmethod
def setUpTestData(cls):
# Sign up test User
cls.username = 'test_jess_snl'
password = ' <PASSWORD>'
cls.write_user_helper = PADSWriteUserHelper()
cls.write_user_helper.new(cls.username, password)
# Get and assign User id to Write User Helper
cls.user = PADSUser.objects.get(nickname_short=cls.username)
cls.write_user_helper.set_user_id(cls.user.id)
# Reminder: The default long nickname for a new User is the same as the
# username (or short nickname)
def test_set_nickname_long_valid(self):
nickname_long_new = 'Jessica the Test User'
op_result = self.write_user_helper.set_nickname_long(nickname_long_new)
user_reloaded = PADSUser.objects.get(pk=self.user.id)
# Assertions
self.assertEquals(user_reloaded.nickname, nickname_long_new,
'User must be able to set a valid nickname')
self.assertTrue(op_result,
'Helper must indicate success in long nickname change')
def test_set_nickname_long_blank_input(self):
# Multi-Assertions
for n in blank_inputs.values():
op_result = self.write_user_helper.set_nickname_long(n)
user_reloaded = PADSUser.objects.get(pk=self.user.id)
self.assertEquals(user_reloaded.nickname, self.username,
'User\'s nickname must not change after failure to set nickname')
self.assertFalse(op_result,
'Helper must indicate failure to set blank long nicknames')
def test_set_nickname_long_too_long(self):
length = settings['name_max_length_long'] + 1
nickname_too_long = ''.join( ['Jessica the Test User',
secrets.token_urlsafe(length)] )
op_result = self.write_user_helper.set_nickname_long(
nickname_too_long)
user_reloaded = PADSUser.objects.get(pk=self.user.id)
# Assertions
self.assertEquals(user_reloaded.nickname, self.username,
'User\'s nickname must not change after failure to set nickname')
self.assertFalse(op_result,
'Helper must indicate failure to set excessively long nickname')
class PADSWriteUserHelperSetPasswordTests(TestCase):
"""Unit tests for PADSWriteUserHelper.set_password()"""
@classmethod
def setUpTestData(cls):
# Sign up test User, get details
cls.username = 'test_jess_sp'
cls.password = ' <PASSWORD>'
cls.write_user_helper = PADSWriteUserHelper()
cls.write_user_helper.new(cls.username, cls.password)
cls.user = PADSUser.objects.get(nickname_short=cls.username)
# Set User Write Helper User id to test User's
cls.write_user_helper.set_user_id(cls.user.id)
def test_set_password_valid(self):
password_new = ''.join( [self.password, secrets.token_urlsafe(7)] )
op_result = self.write_user_helper.set_password(password_new)
# Reload user info
# Beginner's PROTIP: After making changes to the database via the
# Django database model objects, the model object still contains old
# information. Thus, it is necessary to reload the record from the
# database.
user_reloaded = PADSUser.objects.get(nickname_short=self.username)
# Assertions
self.assertFalse(self.write_user_helper.password_hasher.verify(
self.password, user_reloaded.password_hash),
'User\'s old valid password must fail to validate')
self.assertTrue(self.write_user_helper.password_hasher.verify(
password_new, user_reloaded.password_hash),
'User\'s new valid password must validate')
self.assertTrue(op_result,
'Helper must indicate success in password change')
def test_set_password_blank_input(self):
# Multi-Assertion
for p in blank_inputs.values():
user_reloaded = PADSUser.objects.get(nickname_short=self.username)
op_result = self.write_user_helper.set_password(p)
if p is None:
# Workaround PBKDF2PasswordHasher not accepting None as input
p = ''
self.assertFalse(self.write_user_helper.password_hasher.verify(
p, user_reloaded.password_hash),
'Blank password must fail to validate for User')
self.assertTrue(self.write_user_helper.password_hasher.verify(
self.password, user_reloaded.password_hash),
'User\'s old password must continue to validate')
self.assertFalse(op_result,
'Helper must indicate failure to change to blank password')
class PADSWriteUserHelperSetTimezoneTests(TestCase):
@classmethod
def setUpTestData(cls):
# Set Up User Helpers
read_user_helper = PADSUserHelper()
write_user_helper = PADSWriteUserHelper()
# Sign Up Test Quick List User
ql_password = <PASSWORD>_<PASSWORD>_<PASSWORD>()
cls.ql_user_id = read_user_helper.split_ql_password(ql_password)[0]
cls.ql_user = PADSUser.objects.get(pk=cls.ql_user_id)
def test_set_timezone_valid(self):
tz_name = 'Australia/Sydney'
write_user_helper_stz = PADSWriteUserHelper(self.ql_user_id)
result = write_user_helper_stz.set_time_zone(tz_name)
ql_user = PADSUser.objects.get(pk=self.ql_user_id) # Reload User
# Assertion
| |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# pylint: disable=too-many-lines
import asyncio
import base64
import json
import os
import uuid
from http import HTTPStatus
from typing import List, Callable, Awaitable, Union, Dict
from msrest.serialization import Model
from botframework.connector import Channels, EmulatorApiClient
from botframework.connector.aio import ConnectorClient
from botframework.connector.auth import (
AuthenticationConfiguration,
AuthenticationConstants,
ChannelValidation,
ChannelProvider,
ClaimsIdentity,
GovernmentChannelValidation,
GovernmentConstants,
MicrosoftAppCredentials,
JwtTokenValidation,
CredentialProvider,
SimpleCredentialProvider,
SkillValidation,
AppCredentials,
SimpleChannelProvider,
MicrosoftGovernmentAppCredentials,
)
from botframework.connector.token_api import TokenApiClient
from botframework.connector.token_api.models import (
TokenStatus,
TokenExchangeRequest,
SignInUrlResponse,
)
from botbuilder.schema import (
Activity,
ActivityTypes,
ChannelAccount,
ConversationAccount,
ConversationParameters,
ConversationReference,
ExpectedReplies,
TokenResponse,
ResourceResponse,
DeliveryModes,
CallerIdConstants,
)
from . import __version__
from .bot_adapter import BotAdapter
from .oauth import (
ConnectorClientBuilder,
ExtendedUserTokenProvider,
)
from .turn_context import TurnContext
from .invoke_response import InvokeResponse
from .conversation_reference_extension import get_continuation_activity
USER_AGENT = f"Microsoft-BotFramework/3.1 (BotBuilder Python/{__version__})"
OAUTH_ENDPOINT = "https://api.botframework.com"
US_GOV_OAUTH_ENDPOINT = "https://api.botframework.azure.us"
class TokenExchangeState(Model):
"""TokenExchangeState
:param connection_name: The connection name that was used.
:type connection_name: str
:param conversation: Gets or sets a reference to the conversation.
:type conversation: ~botframework.connector.models.ConversationReference
:param relates_to: Gets or sets a reference to a related parent conversation for this token exchange.
:type relates_to: ~botframework.connector.models.ConversationReference
:param bot_ur: The URL of the bot messaging endpoint.
:type bot_ur: str
:param ms_app_id: The bot's registered application ID.
:type ms_app_id: str
"""
_attribute_map = {
"connection_name": {"key": "connectionName", "type": "str"},
"conversation": {"key": "conversation", "type": "ConversationReference"},
"relates_to": {"key": "relatesTo", "type": "ConversationReference"},
"bot_url": {"key": "connectionName", "type": "str"},
"ms_app_id": {"key": "msAppId", "type": "str"},
}
def __init__(
self,
*,
connection_name: str = None,
conversation=None,
relates_to=None,
bot_url: str = None,
ms_app_id: str = None,
**kwargs,
) -> None:
super(TokenExchangeState, self).__init__(**kwargs)
self.connection_name = connection_name
self.conversation = conversation
self.relates_to = relates_to
self.bot_url = bot_url
self.ms_app_id = ms_app_id
class BotFrameworkAdapterSettings:
def __init__(
self,
app_id: str,
app_password: str = None,
channel_auth_tenant: str = None,
oauth_endpoint: str = None,
open_id_metadata: str = None,
channel_provider: ChannelProvider = None,
auth_configuration: AuthenticationConfiguration = None,
app_credentials: AppCredentials = None,
credential_provider: CredentialProvider = None,
):
"""
Contains the settings used to initialize a :class:`BotFrameworkAdapter` instance.
:param app_id: The bot application ID.
:type app_id: str
:param app_password: <PASSWORD>.
the value os the `MicrosoftAppPassword` parameter in the `config.py` file.
:type app_password: str
:param channel_auth_tenant: The channel tenant to use in conversation
:type channel_auth_tenant: str
:param oauth_endpoint:
:type oauth_endpoint: str
:param open_id_metadata:
:type open_id_metadata: str
:param channel_provider: The channel provider
:type channel_provider: :class:`botframework.connector.auth.ChannelProvider`. Defaults to SimpleChannelProvider
if one isn't specified.
:param auth_configuration:
:type auth_configuration: :class:`botframework.connector.auth.AuthenticationConfiguration`
:param credential_provider: Defaults to SimpleCredentialProvider if one isn't specified.
:param app_credentials: Allows for a custom AppCredentials. Used, for example, for CertificateAppCredentials.
"""
self.app_id = app_id
self.app_password = <PASSWORD>
self.app_credentials = app_credentials
self.channel_auth_tenant = channel_auth_tenant
self.oauth_endpoint = oauth_endpoint
self.channel_provider = (
channel_provider if channel_provider else SimpleChannelProvider()
)
self.credential_provider = (
credential_provider
if credential_provider
else SimpleCredentialProvider(self.app_id, self.app_password)
)
self.auth_configuration = auth_configuration or AuthenticationConfiguration()
# If no open_id_metadata values were passed in the settings, check the
# process' Environment Variable.
self.open_id_metadata = (
open_id_metadata
if open_id_metadata
else os.environ.get(AuthenticationConstants.BOT_OPEN_ID_METADATA_KEY)
)
class BotFrameworkAdapter(
BotAdapter, ExtendedUserTokenProvider, ConnectorClientBuilder
):
"""
Defines an adapter to connect a bot to a service endpoint.
.. remarks::
The bot adapter encapsulates authentication processes and sends activities to and
receives activities from the Bot Connector Service. When your bot receives an activity,
the adapter creates a context object, passes it to your bot's application logic, and
sends responses back to the user's channel.
The adapter processes and directs incoming activities in through the bot middleware
pipeline to your bot’s logic and then back out again.
As each activity flows in and out of the bot, each piece of middleware can inspect or act
upon the activity, both before and after the bot logic runs.
"""
_INVOKE_RESPONSE_KEY = "BotFrameworkAdapter.InvokeResponse"
def __init__(self, settings: BotFrameworkAdapterSettings):
"""
Initializes a new instance of the :class:`BotFrameworkAdapter` class.
:param settings: The settings to initialize the adapter
:type settings: :class:`BotFrameworkAdapterSettings`
"""
super(BotFrameworkAdapter, self).__init__()
self.settings = settings or BotFrameworkAdapterSettings("", "")
self._credentials = self.settings.app_credentials
self._credential_provider = SimpleCredentialProvider(
self.settings.app_id, self.settings.app_password
)
self._channel_provider = self.settings.channel_provider
self._is_emulating_oauth_cards = False
if self.settings.open_id_metadata:
ChannelValidation.open_id_metadata_endpoint = self.settings.open_id_metadata
GovernmentChannelValidation.OPEN_ID_METADATA_ENDPOINT = (
self.settings.open_id_metadata
)
# There is a significant boost in throughput if we reuse a ConnectorClient
self._connector_client_cache: Dict[str, ConnectorClient] = {}
# Cache for appCredentials to speed up token acquisition (a token is not requested unless is expired)
self._app_credential_map: Dict[str, AppCredentials] = {}
async def continue_conversation(
self,
reference: ConversationReference,
callback: Callable,
bot_id: str = None,
claims_identity: ClaimsIdentity = None,
audience: str = None,
):
"""
Continues a conversation with a user.
:param reference: A reference to the conversation to continue
:type reference: :class:`botbuilder.schema.ConversationReference
:param callback: The method to call for the resulting bot turn
:type callback: :class:`typing.Callable`
:param bot_id: The application Id of the bot. This is the appId returned by the Azure portal registration,
and is generally found in the `MicrosoftAppId` parameter in `config.py`.
:type bot_id: :class:`typing.str`
:param claims_identity: The bot claims identity
:type claims_identity: :class:`botframework.connector.auth.ClaimsIdentity`
:param audience:
:type audience: :class:`typing.str`
:raises: It raises an argument null exception.
:return: A task that represents the work queued to execute.
.. remarks::
This is often referred to as the bots *proactive messaging* flow as it lets the bot proactively
send messages to a conversation or user that are already in a communication.
Scenarios such as sending notifications or coupons to a user are enabled by this function.
"""
if not reference:
raise TypeError(
"Expected reference: ConversationReference but got None instead"
)
if not callback:
raise TypeError("Expected callback: Callable but got None instead")
# This has to have either a bot_id, in which case a ClaimsIdentity will be created, or
# a ClaimsIdentity. In either case, if an audience isn't supplied one will be created.
if not (bot_id or claims_identity):
raise TypeError("Expected bot_id or claims_identity")
if bot_id and not claims_identity:
claims_identity = ClaimsIdentity(
claims={
AuthenticationConstants.AUDIENCE_CLAIM: bot_id,
AuthenticationConstants.APP_ID_CLAIM: bot_id,
},
is_authenticated=True,
)
if not audience:
audience = self.__get_botframework_oauth_scope()
context = TurnContext(self, get_continuation_activity(reference))
context.turn_state[BotAdapter.BOT_IDENTITY_KEY] = claims_identity
context.turn_state[BotAdapter.BOT_CALLBACK_HANDLER_KEY] = callback
context.turn_state[BotAdapter.BOT_OAUTH_SCOPE_KEY] = audience
# If we receive a valid app id in the incoming token claims, add the channel service URL to the
# trusted services list so we can send messages back.
# The service URL for skills is trusted because it is applied by the SkillHandler based on the original
# request received by the root bot
app_id_from_claims = JwtTokenValidation.get_app_id_from_claims(
claims_identity.claims
)
if app_id_from_claims:
if SkillValidation.is_skill_claim(
claims_identity.claims
) or await self._credential_provider.is_valid_appid(app_id_from_claims):
AppCredentials.trust_service_url(reference.service_url)
client = await self.create_connector_client(
reference.service_url, claims_identity, audience
)
context.turn_state[BotAdapter.BOT_CONNECTOR_CLIENT_KEY] = client
return await self.run_pipeline(context, callback)
async def create_conversation(
self,
reference: ConversationReference,
logic: Callable[[TurnContext], Awaitable] = None,
conversation_parameters: ConversationParameters = None,
channel_id: str = None,
service_url: str = None,
credentials: AppCredentials = None,
):
"""
Starts a new conversation with a user. Used to direct message to a member of a group.
:param reference: The conversation reference that contains the tenant
:type reference: :class:`botbuilder.schema.ConversationReference`
:param logic: The logic to use for the creation of the conversation
:type logic: :class:`typing.Callable`
:param conversation_parameters: The information to use to create the conversation
:type conversation_parameters:
:param channel_id: The ID for the channel.
:type channel_id: :class:`typing.str`
:param service_url: The channel's service URL endpoint.
:type service_url: :class:`typing.str`
:param credentials: The application credentials for the bot.
:type credentials: :class:`botframework.connector.auth.AppCredentials`
:raises: It raises a generic exception error.
:return: A task representing the work queued to execute.
.. remarks::
To start a conversation, your bot must know its account information and the user's
account information on that channel.
Most channels only support initiating a direct message (non-group) conversation.
The adapter attempts to create a new conversation on the channel, and
then sends a conversation update activity through its middleware pipeline
to the the callback method.
If the conversation is established with the specified users, the ID of the activity
will contain the ID of the new conversation.
"""
try:
if not service_url:
service_url = reference.service_url
if not service_url:
raise TypeError(
"BotFrameworkAdapter.create_conversation(): service_url or reference.service_url is required."
)
if not channel_id:
channel_id = reference.channel_id
if not channel_id:
raise TypeError(
"BotFrameworkAdapter.create_conversation(): channel_id or reference.channel_id is required."
)
parameters = (
conversation_parameters
if conversation_parameters
else ConversationParameters(
bot=reference.bot, members=[reference.user], is_group=False
)
)
# Mix in | |
<reponame>alexvonduar/gtec-demo-framework
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#****************************************************************************************************************************************************
# Copyright 2017 NXP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the NXP. nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#****************************************************************************************************************************************************
#from typing import cast
from typing import List
from typing import Optional
import xml.etree.ElementTree as ET
from FslBuildGen import IOUtil
from FslBuildGen.DataTypes import BuildRecipePipelineCommand
from FslBuildGen.DataTypes import BuildRecipeValidateCommand
from FslBuildGen.DataTypes import BuildRecipeValidateMethod
from FslBuildGen.DataTypes import BuildVariantConfig
from FslBuildGen.DataTypes import CMakeTargetType
from FslBuildGen.Version import Version
from FslBuildGen.Log import Log
from FslBuildGen import Util
#from FslBuildGen.Xml.Exceptions import XmlUnsupportedPlatformException
from FslBuildGen.Xml.XmlBase import XmlBase
#from FslBuildGen.Xml.XmlBase2 import XmlBase2
#from FslBuildGen import PackageConfig
g_validJoinCommands = ["Copy", "Unpack", "GitApply", "Delete"]
g_validFetchCommands = ["GitClone", "Download", "Source"]
g_validCommands = ["Unpack", "CMakeBuild", "Combine", "Copy"]
g_validValidateCommands = ["EnvironmentVariable", "Path", "FindFileInPath", "FindExecutableFileInPath", "AddHeaders", "AddLib", "AddDLL", "AddTool"]
g_validValidCombineCommands = ["CMakeBuild"]
g_CMAKE_PACKAGE_NAME = "Recipe.BuildTool.CMake"
g_GIT_PACKAGE_NAME = "Recipe.BuildTool.Git"
# making this list is impossible but lets just check for some obvious bad ones
g_bannedCommands = [
'attrib',
'bash',
'cd',
'copy',
'cp',
'cmd',
'chown'
'chmod',
'cmd',
'dd',
'del',
'delete',
'fdisk',
'format',
'mkfs',
'mv',
'rd',
'reg',
'regedit',
'remove',
'ren',
'rm',
'wget',
]
class XmlRecipeFileDependency(XmlBase):
def __init__(self, log: Log, xmlElement: ET.Element) -> None:
super().__init__(log, xmlElement)
self.Name = self._ReadAttrib(xmlElement, 'Name')
class XmlRecipeValidateCommand(XmlBase):
def __init__(self, log: Log, xmlElement: ET.Element, commandName: str, commandType: BuildRecipeValidateCommand) -> None:
super().__init__(log, xmlElement)
self.CommandName = commandName
self.CommandType = commandType
self.Help = self._TryReadAttrib(xmlElement, 'Help')
class XmlRecipeValidateCommandEnvironmentVariable(XmlRecipeValidateCommand):
def __init__(self, log: Log, xmlElement: ET.Element) -> None:
super().__init__(log, xmlElement, "EnvironmentVariable", BuildRecipeValidateCommand.EnvironmentVariable)
self.Name = self._ReadAttrib(xmlElement, 'Name') # type: str
method = self._ReadAttrib(xmlElement, 'Method')
self.AllowEndSlash = self._ReadBoolAttrib(xmlElement, 'AllowEndSlash', False) # type: bool
self.Method = BuildRecipeValidateMethod.FromString(method) # type: int
class XmlRecipeValidateCommandPath(XmlRecipeValidateCommand):
def __init__(self, log: Log, xmlElement: ET.Element) -> None:
super().__init__(log, xmlElement, "Path", BuildRecipeValidateCommand.Path)
self.Name = self._ReadAttrib(xmlElement, 'Name')
method = self._ReadAttrib(xmlElement, 'Method')
self.Method = BuildRecipeValidateMethod.FromString(method)
if '\\' in self.Name:
raise Exception("A path can not contain backslash '\\': '{0}'".format(self.Name))
if self.Name.endswith('/'):
raise Exception("A path can not end with a slash '/': '{0}'".format(self.Name))
self.Name = IOUtil.NormalizePath(self.Name)
class XmlRecipeValidateCommandFindFileInPath(XmlRecipeValidateCommand):
def __init__(self, log: Log, xmlElement: ET.Element) -> None:
super().__init__(log, xmlElement, "FindFileInPath", BuildRecipeValidateCommand.FindFileInPath)
self.Name = self._ReadAttrib(xmlElement, 'Name')
self.ExpectedPath = self._TryReadAttrib(xmlElement, 'ExpectedPath')
if '\\' in self.Name or '/' in self.Name:
raise Exception("A filename can not contain backslash '\\' or slash '/': '{0}'".format(self.Name))
if not self.ExpectedPath is None:
if '\\' in self.ExpectedPath:
raise Exception("A path can not contain backslash '\\': '{0}'".format(self.ExpectedPath))
if self.ExpectedPath.startswith('/'):
raise Exception("A path can not start with a slash '/': '{0}'".format(self.ExpectedPath))
if self.ExpectedPath.endswith('/'):
raise Exception("A path can not end with a slash '/': '{0}'".format(self.ExpectedPath))
self.ExpectedPath = IOUtil.NormalizePath(self.ExpectedPath)
class XmlRecipeValidateCommandFindExecutableFileInPathAddOnErrorWarning(XmlBase):
def __init__(self, log: Log, xmlElement: ET.Element) -> None:
super().__init__(log, xmlElement)
self.StartVersion = self._ReadAttrib(xmlElement, 'StartVersion')
self.EndVersion = self._TryReadAttrib(xmlElement, 'EndVersion')
self.Help = self._ReadAttrib(xmlElement, 'Help')
trimmed = self.StartVersion.strip()
if trimmed != self.StartVersion:
raise Exception("StartVersion contained leading or ending whitespaces")
if self.EndVersion is not None:
trimmed = self.EndVersion.strip()
if trimmed != self.EndVersion:
raise Exception("EndVersion contained leading or ending whitespaces")
class XmlRecipeValidateCommandFindExecutableFileInPath(XmlRecipeValidateCommand):
def __init__(self, log: Log, xmlElement: ET.Element) -> None:
super().__init__(log, xmlElement, "FindFileInPath", BuildRecipeValidateCommand.FindExecutableFileInPath)
self.Name = self._ReadAttrib(xmlElement, 'Name')
alternatives = self._TryReadAttrib(xmlElement, 'Alternatives')
self.ExpectedPath = self._TryReadAttrib(xmlElement, 'ExpectedPath')
self.MinVersion = self._TryReadAttrib(xmlElement, 'MinVersion')
self.VersionCommand = self._TryReadAttrib(xmlElement, 'VersionCommand')
self.VersionRegEx = self._TryReadAttrib(xmlElement, 'VersionRegEx')
self.AddOnErrorWarning = self.__ParseAddOnErrorWarning(log, xmlElement)
self.Alternatives = self.__ParseAlternatives(alternatives)
if '\\' in self.Name or '/' in self.Name:
raise Exception("A filename can not contain backslash '\\' or slash '/': '{0}'".format(self.Name))
if not self.ExpectedPath is None:
if '\\' in self.ExpectedPath:
raise Exception("A path can not contain backslash '\\': '{0}'".format(self.ExpectedPath))
if self.ExpectedPath.startswith('/'):
raise Exception("A path can not start with a slash '/': '{0}'".format(self.ExpectedPath))
if self.ExpectedPath.endswith('/'):
raise Exception("A path can not end with a slash '/': '{0}'".format(self.ExpectedPath))
self.ExpectedPath = IOUtil.NormalizePath(self.ExpectedPath)
self.__ValidateName()
self.__ValidateVersionCheck()
def __ParseAddOnErrorWarning(self, log: Log, xmlElement: ET.Element) -> List[XmlRecipeValidateCommandFindExecutableFileInPathAddOnErrorWarning]:
entries = xmlElement.findall('AddOnErrorWarning')
return [XmlRecipeValidateCommandFindExecutableFileInPathAddOnErrorWarning(log, entry) for entry in entries]
def __ParseAlternatives(self, alternatives: Optional[str]) -> List[str]:
if alternatives is None:
return []
return alternatives.split(",")
def __ValidateName(self) -> None:
name = self.Name
trimmed = name.strip()
if trimmed != name:
raise Exception("Name contained leading or ending whitespaces'{0}'".format(name))
if len(name) <= 0:
raise Exception("Name length must be greater than zero")
if not Util.IsValidCommandName(name):
raise Exception("Name must start with a a-z or A-Z and can only contain a-z,A-Z,0-9,_ and - '{0}'".format(name))
if name.lower() in g_bannedCommands:
raise Exception("The command '{0}' is banned".format(name))
def __ValidateVersionCheck(self) -> None:
if self.MinVersion is None and self.VersionCommand is None and self.VersionRegEx is None:
return
if (self.MinVersion is None and len(self.AddOnErrorWarning) == 0) or self.VersionCommand is None or self.VersionRegEx is None:
missingAttribs = []
if self.MinVersion is None:
missingAttribs.append("MinVersion")
if self.VersionCommand is None:
missingAttribs.append("VersionCommand")
if self.VersionRegEx is None:
missingAttribs.append("VersionRegEx")
raise Exception("{0} are not defined".format(", ".join(missingAttribs)))
if self.MinVersion is not None:
trimmed = self.MinVersion.strip()
if trimmed != self.MinVersion:
raise Exception("MinVersion contained leading or ending whitespaces")
trimmed = self.VersionCommand.strip()
if trimmed != self.VersionCommand:
raise Exception("VersionCommand contained leading or ending whitespaces")
trimmed = self.VersionRegEx.strip()
if trimmed != self.VersionRegEx:
raise Exception("VersionRegEx contained leading or ending whitespaces")
class XmlRecipeValidateCommandAddHeaders(XmlRecipeValidateCommand):
def __init__(self, log: Log, xmlElement: ET.Element) -> None:
super().__init__(log, xmlElement, "AddHeaders", BuildRecipeValidateCommand.AddHeaders)
self.Name = self._ReadAttrib(xmlElement, 'Name')
if '\\' in self.Name:
raise Exception("A path can not contain backslash '\\': '{0}'".format(self.Name))
if self.Name.endswith('/'):
raise Exception("A path can not end with a slash '/': '{0}'".format(self.Name))
self.Name = IOUtil.NormalizePath(self.Name)
class XmlRecipeValidateCommandAddLib(XmlRecipeValidateCommand):
def __init__(self, log: Log, xmlElement: ET.Element) -> None:
super().__init__(log, xmlElement, "AddLib", BuildRecipeValidateCommand.AddLib)
self.Name = self._ReadAttrib(xmlElement, 'Name') # type:str
self.DebugName = self._ReadAttrib(xmlElement, 'DebugName', self.Name) # type:str
if '\\' in self.Name:
raise Exception("A path can not contain backslash '\\': '{0}'".format(self.Name))
if self.Name.endswith('/'):
raise Exception("A path can not end with a slash '/': '{0}'".format(self.Name))
if '\\' in self.DebugName:
raise Exception("A path can not contain backslash '\\': '{0}'".format(self.Name))
if self.DebugName.endswith('/'):
raise Exception("A path can not end with a slash '/': '{0}'".format(self.Name))
class XmlRecipeValidateCommandAddDLL(XmlRecipeValidateCommand):
def __init__(self, log: Log, xmlElement: ET.Element) -> None:
super().__init__(log, xmlElement, "AddDLL", BuildRecipeValidateCommand.AddDLL)
self.Name = self._ReadAttrib(xmlElement, 'Name') # type:str
self.DebugName = self._ReadAttrib(xmlElement, 'DebugName', self.Name) # type:str
if '\\' in self.Name:
raise Exception("A path can not contain backslash '\\': '{0}'".format(self.Name))
if self.Name.endswith('/'):
raise Exception("A path can not end with a slash '/': '{0}'".format(self.Name))
if '\\' in self.DebugName:
raise Exception("A path can not contain backslash '\\': '{0}'".format(self.Name))
if self.DebugName.endswith('/'):
raise Exception("A path can not end with a slash '/': '{0}'".format(self.Name))
class XmlRecipeValidateCommandAddTool(XmlRecipeValidateCommand):
def __init__(self, log: Log, xmlElement: ET.Element) -> None:
super().__init__(log, xmlElement, "AddTool", BuildRecipeValidateCommand.AddTool)
self.Name = self._ReadAttrib(xmlElement, 'Name') # type:str
self.MinVersion = self._TryReadAttrib(xmlElement, 'MinVersion')
self.VersionCommand = self._TryReadAttrib(xmlElement, 'VersionCommand')
self.VersionRegEx = self._TryReadAttrib(xmlElement, 'VersionRegEx')
if '\\' in self.Name:
raise Exception("A path can not contain backslash '\\': '{0}'".format(self.Name))
if self.Name.endswith('/'):
raise Exception("A path can not end with a slash '/': '{0}'".format(self.Name))
self.__ValidateVersionCheck()
def __ValidateVersionCheck(self) -> None:
if self.MinVersion is None and self.VersionCommand is None and self.VersionRegEx is None:
return
if self.MinVersion is None or self.VersionCommand is None or self.VersionRegEx is None:
missingAttribs = []
if self.MinVersion is None:
missingAttribs.append("MinVersion")
if self.VersionCommand is None:
missingAttribs.append("VersionCommand")
if self.VersionRegEx is None:
missingAttribs.append("VersionRegEx")
raise Exception("{0} are not defined".format(", ".join(missingAttribs)))
if self.MinVersion is not None:
trimmed = self.MinVersion.strip()
if trimmed != self.MinVersion:
raise Exception("MinVersion contained leading or ending whitespaces")
trimmed = self.VersionCommand.strip()
if trimmed != self.VersionCommand:
raise Exception("VersionCommand contained leading or ending whitespaces")
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from datetime import datetime
import json
import mock
from common.waterfall import buildbucket_client
from common.waterfall import failure_type
from common.waterfall import try_job_error
from libs import analysis_status
from model.flake.flake_try_job import FlakeTryJob
from model.flake.flake_try_job_data import FlakeTryJobData
from model.wf_try_job import WfTryJob
from model.wf_try_job_data import WfTryJobData
from waterfall import buildbot
from waterfall import monitor_try_job_pipeline
from waterfall import swarming_util
from waterfall import waterfall_config
from waterfall.monitor_try_job_pipeline import MonitorTryJobPipeline
from waterfall.test import wf_testcase
class MonitorTryJobPipelineTest(wf_testcase.WaterfallTestCase):
def testDictsAreEqual(self):
self.assertTrue(monitor_try_job_pipeline._DictsAreEqual(None, None))
self.assertTrue(monitor_try_job_pipeline._DictsAreEqual({}, {}))
self.assertTrue(monitor_try_job_pipeline._DictsAreEqual({'a': 1}, {'a': 1}))
self.assertTrue(monitor_try_job_pipeline._DictsAreEqual(
{'a': 1},
{'a': 1, 'b': 2},
exclude_keys=['b']))
self.assertTrue(monitor_try_job_pipeline._DictsAreEqual(
{'a': 1, 'b': 1},
{'a': 1, 'b': 2},
exclude_keys=['b']))
self.assertTrue(monitor_try_job_pipeline._DictsAreEqual(
{'a': 1},
{},
exclude_keys=['a']))
self.assertFalse(monitor_try_job_pipeline._DictsAreEqual(
{'a': 1},
{'a': 2}))
self.assertFalse(monitor_try_job_pipeline._DictsAreEqual(
{'a': 1, 'b': 2},
{'a': 1}))
self.assertFalse(monitor_try_job_pipeline._DictsAreEqual(
{'a': 1},
{'a': 1, 'b': 2}))
def testUpdateTryJobMetadataForBuildError(self):
error_data = {
'reason': 'BUILD_NOT_FOUND',
'message': 'message'
}
error = buildbucket_client.BuildbucketError(error_data)
try_job_data = WfTryJobData.Create('1')
try_job_data.try_job_key = WfTryJob.Create('m', 'b', 123).key
monitor_try_job_pipeline._UpdateTryJobMetadata(
try_job_data, failure_type.COMPILE, None, error, False)
self.assertEqual(try_job_data.error, error_data)
def testUpdateTryJobMetadata(self):
try_job_id = '1'
url = 'url'
build_data = {
'id': try_job_id,
'url': url,
'status': 'COMPLETED',
'completed_ts': '1454367574000000',
'created_ts': '1454367570000000',
}
report = {
'result': {
'rev1': 'passed',
'rev2': 'failed'
},
'metadata': {
'regression_range_size': 2
}
}
build = buildbucket_client.BuildbucketBuild(build_data)
expected_error_dict = {
'message': 'Try job monitoring was abandoned.',
'reason': ('Timeout after %s hours' %
waterfall_config.GetTryJobSettings().get(
'job_timeout_hours'))
}
try_job_data = WfTryJobData.Create(try_job_id)
try_job_data.try_job_key = WfTryJob.Create('m', 'b', 123).key
monitor_try_job_pipeline._UpdateTryJobMetadata(
try_job_data, failure_type.COMPILE, build, None, False, report)
try_job_data = WfTryJobData.Get(try_job_id)
self.assertIsNone(try_job_data.error)
self.assertEqual(try_job_data.regression_range_size, 2)
self.assertEqual(try_job_data.number_of_commits_analyzed, 2)
self.assertEqual(try_job_data.end_time, datetime(2016, 2, 1, 22, 59, 34))
self.assertEqual(try_job_data.request_time,
datetime(2016, 2, 1, 22, 59, 30))
self.assertEqual(try_job_data.try_job_url, url)
monitor_try_job_pipeline._UpdateTryJobMetadata(
try_job_data, failure_type.COMPILE, build, None, True)
self.assertEqual(try_job_data.error, expected_error_dict)
self.assertEqual(try_job_data.error_code, try_job_error.TIMEOUT)
@mock.patch.object(buildbot, 'GetStepLog')
@mock.patch.object(monitor_try_job_pipeline, 'buildbucket_client')
def testGetTryJobsForCompileSuccess(self, mock_buildbucket, mock_report):
master_name = 'm'
builder_name = 'b'
build_number = 1
try_job_id = '1'
regression_range_size = 2
try_job = WfTryJob.Create(master_name, builder_name, build_number)
try_job_data = WfTryJobData.Create(try_job_id)
try_job_data.try_job_key = try_job.key
try_job_data.try_job_url = (
'https://build.chromium.org/p/m/builders/b/builds/1234')
try_job_data.put()
try_job.compile_results = [
{
'report': None,
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'try_job_id': '1',
}
]
try_job.status = analysis_status.RUNNING
try_job.put()
build_response = {
'id': '1',
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'status': 'COMPLETED',
}
report = {
'result': {
'rev1': 'passed',
'rev2': 'failed'
},
'metadata': {
'regression_range_size': 2
}
}
mock_buildbucket.GetTryJobs.return_value = [
(None, buildbucket_client.BuildbucketBuild(build_response))]
mock_report.return_value = json.dumps(report)
pipeline = MonitorTryJobPipeline()
pipeline.start_test()
pipeline.run(try_job.key.urlsafe(), failure_type.COMPILE, try_job_id)
pipeline.callback(callback_params=pipeline.last_params)
# Reload from ID to get all internal properties in sync.
pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id)
pipeline.finalized()
compile_result = pipeline.outputs.default.value
expected_compile_result = {
'report': {
'result': {
'rev1': 'passed',
'rev2': 'failed'
},
'metadata': {
'regression_range_size': regression_range_size
}
},
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'try_job_id': '1',
}
self.assertEqual(expected_compile_result, compile_result)
try_job = WfTryJob.Get(master_name, builder_name, build_number)
self.assertEqual(expected_compile_result, try_job.compile_results[-1])
self.assertEqual(analysis_status.RUNNING, try_job.status)
try_job_data = WfTryJobData.Get(try_job_id)
self.assertEqual(try_job_data.regression_range_size, regression_range_size)
@mock.patch.object(buildbot, 'GetStepLog')
@mock.patch.object(monitor_try_job_pipeline, 'buildbucket_client')
def testGetTryJobsForCompileSuccessSerializedCallback(
self, mock_buildbucket, mock_report):
master_name = 'm'
builder_name = 'b'
build_number = 1
try_job_id = '1'
regression_range_size = 2
try_job = WfTryJob.Create(master_name, builder_name, build_number)
try_job_data = WfTryJobData.Create(try_job_id)
try_job_data.try_job_key = try_job.key
try_job_data.try_job_url = (
'https://build.chromium.org/p/m/builders/b/builds/1234')
try_job_data.put()
try_job.compile_results = [
{
'report': None,
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'try_job_id': '1',
}
]
try_job.status = analysis_status.RUNNING
try_job.put()
build_response = {
'id': '1',
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'status': 'COMPLETED',
'completed_ts': '1454367574000000',
'created_ts': '1454367570000000',
'updated_ts': '1454367574000000',
}
report = {
'result': {
'rev1': 'passed',
'rev2': 'failed'
},
'metadata': {
'regression_range_size': 2
}
}
mock_buildbucket.GetTryJobs.return_value = [
(None, buildbucket_client.BuildbucketBuild(build_response))]
mock_report.return_value = json.dumps(report)
pipeline = MonitorTryJobPipeline()
pipeline.start_test()
pipeline.run(try_job.key.urlsafe(), failure_type.COMPILE, try_job_id)
pipeline.callback(callback_params=json.dumps(pipeline.last_params))
# Reload from ID to get all internal properties in sync.
pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id)
pipeline.finalized()
compile_result = pipeline.outputs.default.value
expected_compile_result = {
'report': {
'result': {
'rev1': 'passed',
'rev2': 'failed'
},
'metadata': {
'regression_range_size': regression_range_size
}
},
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'try_job_id': '1',
}
self.assertEqual(expected_compile_result, compile_result)
try_job = WfTryJob.Get(master_name, builder_name, build_number)
self.assertEqual(expected_compile_result, try_job.compile_results[-1])
self.assertEqual(analysis_status.RUNNING, try_job.status)
try_job_data = WfTryJobData.Get(try_job_id)
self.assertEqual(try_job_data.regression_range_size, regression_range_size)
self.assertIsInstance(try_job_data.start_time, datetime)
@mock.patch.object(buildbot, 'GetStepLog')
@mock.patch.object(monitor_try_job_pipeline, 'buildbucket_client')
def testGetTryJobsForTestMissingTryJobData(
self, mock_buildbucket, mock_report):
master_name = 'm'
builder_name = 'b'
build_number = 1
try_job_id = '3'
try_job = WfTryJob.Create(master_name, builder_name, build_number)
try_job.test_results = [
{
'report': None,
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'try_job_id': try_job_id,
}
]
try_job.status = analysis_status.RUNNING
try_job.put()
data = [
{
'build': {
'id': '3',
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'status': 'STARTED'
}
},
{
'error': {
'reason': 'BUILD_NOT_FOUND',
'message': 'message',
}
},
{
'build': {
'id': '3',
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'status': 'STARTED'
}
},
{
'error': {
'reason': 'BUILD_NOT_FOUND',
'message': 'message',
}
},
{
'build': {
'id': '3',
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'status': 'COMPLETED',
}
}
]
report = {
'result': {
'rev1': {
'a_test': {
'status': 'passed',
'valid': True
}
},
'rev2': {
'a_test': {
'status': 'failed',
'valid': True,
'failures': ['test1', 'test2']
}
}
}
}
get_tryjobs_responses = [
[(None, buildbucket_client.BuildbucketBuild(data[0]['build']))],
[(buildbucket_client.BuildbucketError(data[1]['error']), None)],
[(None, buildbucket_client.BuildbucketBuild(data[2]['build']))],
[(buildbucket_client.BuildbucketError(data[3]['error']), None)],
[(None, buildbucket_client.BuildbucketBuild(data[4]['build']))],
]
mock_buildbucket.GetTryJobs.side_effect = get_tryjobs_responses
mock_report.return_value = json.dumps(report)
pipeline = MonitorTryJobPipeline()
pipeline.start_test()
pipeline.run(try_job.key.urlsafe(), failure_type.TEST, try_job_id)
pipeline.run(try_job.key.urlsafe(), failure_type.TEST, try_job_id)
# Since run() calls callback() immediately, we use -1.
for _ in range (len(get_tryjobs_responses) - 1):
pipeline.callback(callback_params=pipeline.last_params)
# Reload from ID to get all internal properties in sync.
pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id)
pipeline.finalized()
test_result = pipeline.outputs.default.value
expected_test_result = {
'report': {
'result': {
'rev1': {
'a_test': {
'status': 'passed',
'valid': True
}
},
'rev2': {
'a_test': {
'status': 'failed',
'valid': True,
'failures': ['test1', 'test2']
}
}
}
},
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'try_job_id': '3',
}
self.assertEqual(expected_test_result, test_result)
try_job = WfTryJob.Get(master_name, builder_name, build_number)
self.assertEqual(expected_test_result, try_job.test_results[-1])
self.assertEqual(analysis_status.RUNNING, try_job.status)
@mock.patch.object(buildbot, 'GetStepLog')
@mock.patch.object(monitor_try_job_pipeline, 'buildbucket_client')
def testGetTryJobsForTestSuccess(self, mock_buildbucket, mock_report):
master_name = 'm'
builder_name = 'b'
build_number = 1
try_job_id = '3'
try_job = WfTryJob.Create(master_name, builder_name, build_number)
try_job.test_results = [
{
'report': None,
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'try_job_id': try_job_id,
}
]
try_job.status = analysis_status.RUNNING
try_job.put()
try_job_data = WfTryJobData.Create(try_job_id)
try_job_data.try_job_key = try_job.key
try_job_data.try_job_url = (
'https://build.chromium.org/p/m/builders/b/builds/1234')
try_job_data.put()
data = [
{
'build': {
'id': '3',
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'status': 'STARTED'
}
},
{
'error': {
'reason': 'BUILD_NOT_FOUND',
'message': 'message',
}
},
{
'build': {
'id': '3',
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'status': 'STARTED'
}
},
{
'error': {
'reason': 'BUILD_NOT_FOUND',
'message': 'message',
}
},
{
'build': {
'id': '3',
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'status': 'COMPLETED',
}
}
]
report = {
'result': {
'rev1': {
'a_test': {
'status': 'passed',
'valid': True
}
},
'rev2': {
'a_test': {
'status': 'failed',
'valid': True,
'failures': ['test1', 'test2']
}
}
}
}
get_tryjobs_responses = [
[(None, buildbucket_client.BuildbucketBuild(data[0]['build']))],
[(buildbucket_client.BuildbucketError(data[1]['error']), None)],
[(None, buildbucket_client.BuildbucketBuild(data[2]['build']))],
[(buildbucket_client.BuildbucketError(data[3]['error']), None)],
[(None, buildbucket_client.BuildbucketBuild(data[4]['build']))],
]
mock_buildbucket.GetTryJobs.side_effect = get_tryjobs_responses
mock_report.return_value = json.dumps(report)
pipeline = MonitorTryJobPipeline()
pipeline.start_test()
pipeline.run(try_job.key.urlsafe(), failure_type.TEST, try_job_id)
pipeline.run(try_job.key.urlsafe(), failure_type.TEST, try_job_id)
# Since run() calls callback() immediately, we use -1.
for _ in range (len(get_tryjobs_responses) - 1):
pipeline.callback(callback_params=pipeline.last_params)
# Reload from ID to get all internal properties in sync.
pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id)
pipeline.finalized()
test_result = pipeline.outputs.default.value
expected_test_result = {
'report': {
'result': {
'rev1': {
'a_test': {
'status': 'passed',
'valid': True
}
},
'rev2': {
'a_test': {
'status': 'failed',
'valid': True,
'failures': ['test1', 'test2']
}
}
}
},
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'try_job_id': '3',
}
self.assertEqual(expected_test_result, test_result)
try_job = WfTryJob.Get(master_name, builder_name, build_number)
self.assertEqual(expected_test_result, try_job.test_results[-1])
self.assertEqual(analysis_status.RUNNING, try_job.status)
@mock.patch.object(buildbot, 'GetStepLog')
@mock.patch.object(monitor_try_job_pipeline, 'buildbucket_client')
def testGetTryJobsForFlakeSuccess(self, mock_buildbucket, mock_report):
master_name = 'm'
builder_name = 'b'
step_name = 's'
test_name = 't'
git_hash = 'a1b2c3d4'
try_job_id = '1'
try_job = FlakeTryJob.Create(
master_name, builder_name, step_name, test_name, git_hash)
try_job.flake_results = [
{
'report': None,
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'try_job_id': '1',
}
]
try_job.status = analysis_status.RUNNING
try_job.put()
try_job_data = FlakeTryJobData.Create(try_job_id)
try_job_data.try_job_key = try_job.key
try_job_data.try_job_url = (
'https://build.chromium.org/p/m/builders/b/builds/1234')
try_job_data.put()
build_response = {
'id': '1',
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'status': 'COMPLETED',
}
report = {
'result': {
'r0': {
'gl_tests': {
'status': 'passed',
'valid': True,
'pass_fail_counts': {
'Test.One': {
'pass_count': 100,
'fail_count': 0
}
}
}
}
}
}
mock_buildbucket.GetTryJobs.return_value = [
(None, buildbucket_client.BuildbucketBuild(build_response))]
mock_report.return_value = json.dumps(report)
pipeline = MonitorTryJobPipeline()
pipeline.start_test()
pipeline.run(try_job.key.urlsafe(), failure_type.FLAKY_TEST, try_job_id)
pipeline.callback(callback_params=pipeline.last_params)
# Reload from ID to get all internal properties in sync.
pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id)
pipeline.finalized()
flake_result = pipeline.outputs.default.value
expected_flake_result = {
'report': {
'result': {
'r0': {
'gl_tests': {
'status': 'passed',
'valid': True,
'pass_fail_counts': {
'Test.One': {
'pass_count': 100,
'fail_count': 0
}
}
}
}
}
},
'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
'try_job_id': '1',
}
self.assertEqual(expected_flake_result, flake_result)
try_job = FlakeTryJob.Get(
master_name, builder_name, step_name, test_name, git_hash)
self.assertEqual(expected_flake_result, try_job.flake_results[-1])
self.assertEqual(analysis_status.RUNNING, try_job.status)
try_job_data = FlakeTryJobData.Get(try_job_id)
self.assertEqual(try_job_data.last_buildbucket_response, build_response)
def testUpdateTryJobResultAnalyzing(self):
master_name = 'm'
builder_name = 'b'
build_number = 1
try_job_id = '3'
try_job = WfTryJob.Create(master_name, builder_name, build_number)
try_job.put()
pipeline = MonitorTryJobPipeline()
| |
30000, 300, nan, 0.52, nan ],
[ nan, 40000, 400, nan, 1.30, nan ],
[ nan, 50000, 500, nan, 1.76, nan ],
[ nan, 60000, 600, nan, 2.13, nan ],
[ nan, 70000, 700, nan, 3.08, nan ],
[ nan, 80000, 800, nan, 4.29, nan ],
[ nan, 90000, 900, nan, 6.18, nan ],
[ nan, 100000, 1000, nan, 9.25, nan ],
[ nan, 200000, 2000, nan, 49.88, nan ],
[ nan, 100, 10000, nan, 0.04, nan ],
[ nan, 200, 20000, nan, 0.19, nan ],
[ nan, 300, 30000, nan, 0.48, nan ],
[ nan, 400, 40000, nan, 0.94, nan ],
[ nan, 500, 50000, nan, 1.62, nan ],
[ nan, 600, 60000, nan, 2.68, nan ],
[ nan, 700, 70000, nan, 4.05, nan ],
[ nan, 800, 80000, nan, 6.02, nan ],
[ nan, 900, 90000, nan, 7.48, nan ],
[ nan, 1000, 100000, nan, 9.54, nan ],
[ nan, 2000, 200000, nan, 58.30, nan ],
])
# ------------------------------------------------------------
# file: v1.6.1/cuda7.0-k40c/cgetrf.txt
# numactl --interleave=all ./testing_cgetrf -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000
cgetrf = array([
[ 10, 10, nan, nan, 0.29, 0.00, nan ],
[ 20, 20, nan, nan, 0.77, 0.00, nan ],
[ 30, 30, nan, nan, 1.88, 0.00, nan ],
[ 40, 40, nan, nan, 3.53, 0.00, nan ],
[ 50, 50, nan, nan, 4.87, 0.00, nan ],
[ 60, 60, nan, nan, 5.66, 0.00, nan ],
[ 70, 70, nan, nan, 1.18, 0.00, nan ],
[ 80, 80, nan, nan, 1.77, 0.00, nan ],
[ 90, 90, nan, nan, 2.35, 0.00, nan ],
[ 100, 100, nan, nan, 3.09, 0.00, nan ],
[ 200, 200, nan, nan, 12.78, 0.00, nan ],
[ 300, 300, nan, nan, 28.80, 0.00, nan ],
[ 400, 400, nan, nan, 46.76, 0.00, nan ],
[ 500, 500, nan, nan, 68.58, 0.00, nan ],
[ 600, 600, nan, nan, 90.26, 0.01, nan ],
[ 700, 700, nan, nan, 115.63, 0.01, nan ],
[ 800, 800, nan, nan, 142.59, 0.01, nan ],
[ 900, 900, nan, nan, 168.11, 0.01, nan ],
[ 1000, 1000, nan, nan, 195.86, 0.01, nan ],
[ 2000, 2000, nan, nan, 498.64, 0.04, nan ],
[ 3000, 3000, nan, nan, 846.24, 0.09, nan ],
[ 4000, 4000, nan, nan, 1107.91, 0.15, nan ],
[ 5000, 5000, nan, nan, 1258.45, 0.26, nan ],
[ 6000, 6000, nan, nan, 1537.05, 0.37, nan ],
[ 7000, 7000, nan, nan, 1707.15, 0.54, nan ],
[ 8000, 8000, nan, nan, 1860.49, 0.73, nan ],
[ 9000, 9000, nan, nan, 1918.42, 1.01, nan ],
[ 10000, 10000, nan, nan, 2029.80, 1.31, nan ],
[ 12000, 12000, nan, nan, 2200.71, 2.09, nan ],
[ 14000, 14000, nan, nan, 2323.94, 3.15, nan ],
[ 16000, 16000, nan, nan, 2422.94, 4.51, nan ],
[ 18000, 18000, nan, nan, 2476.52, 6.28, nan ],
[ 20000, 20000, nan, nan, 2520.95, 8.46, nan ],
])
# numactl --interleave=all ./testing_cgetrf_gpu -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000
cgetrf_gpu = array([
[ 10, 10, nan, nan, 0.07, 0.00, nan ],
[ 20, 20, nan, nan, 0.39, 0.00, nan ],
[ 30, 30, nan, nan, 0.99, 0.00, nan ],
[ 40, 40, nan, nan, 2.12, 0.00, nan ],
[ 50, 50, nan, nan, 2.81, 0.00, nan ],
[ 60, 60, nan, nan, 3.82, 0.00, nan ],
[ 70, 70, nan, nan, 0.73, 0.00, nan ],
[ 80, 80, nan, nan, 0.93, 0.00, nan ],
[ 90, 90, nan, nan, 1.23, 0.00, nan ],
[ 100, 100, nan, nan, 1.95, 0.00, nan ],
[ 200, 200, nan, nan, 8.98, 0.00, nan ],
[ 300, 300, nan, nan, 23.12, 0.00, nan ],
[ 400, 400, nan, nan, 41.40, 0.00, nan ],
[ 500, 500, nan, nan, 67.82, 0.00, nan ],
[ 600, 600, nan, nan, 90.69, 0.01, nan ],
[ 700, 700, nan, nan, 116.90, 0.01, nan ],
[ 800, 800, nan, nan, 149.23, 0.01, nan ],
[ 900, 900, nan, nan, 181.15, 0.01, nan ],
[ 1000, 1000, nan, nan, 229.07, 0.01, nan ],
[ 2000, 2000, nan, nan, 590.46, 0.04, nan ],
[ 3000, 3000, nan, nan, 1028.34, 0.07, nan ],
[ 4000, 4000, nan, nan, 1328.21, 0.13, nan ],
[ 5000, 5000, nan, nan, 1458.62, 0.23, nan ],
[ 6000, 6000, nan, nan, 1688.65, 0.34, nan ],
[ 7000, 7000, nan, nan, 1900.68, 0.48, nan ],
[ 8000, 8000, nan, nan, 2107.74, 0.65, nan ],
[ 9000, 9000, nan, nan, 2129.66, 0.91, nan ],
[ 10000, 10000, nan, nan, 2228.35, 1.20, nan ],
[ 12000, 12000, nan, nan, 2443.86, 1.89, nan ],
[ 14000, 14000, nan, nan, 2583.52, 2.83, nan ],
[ 16000, 16000, nan, nan, 2643.80, 4.13, nan ],
[ 18000, 18000, nan, nan, 2683.00, 5.80, nan ],
[ 20000, 20000, nan, nan, 2724.53, 7.83, nan ],
])
# ------------------------------------------------------------
# file: v1.6.1/cuda7.0-k40c/cheevd.txt
# numactl --interleave=all ./testing_cheevd -JN -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000
cheevd_JN = array([
[ 10, nan, 0.0000 ],
[ 20, nan, 0.0001 ],
[ 30, nan, 0.0001 ],
[ 40, nan, 0.0001 ],
[ 50, nan, 0.0002 ],
[ 60, nan, 0.0003 ],
[ 70, nan, 0.0005 ],
[ 80, nan, 0.0007 ],
[ 90, nan, 0.0009 ],
[ 100, nan, 0.0011 ],
[ 200, nan, 0.0108 ],
[ 300, nan, 0.0203 ],
[ 400, nan, 0.0351 ],
[ 500, nan, 0.0492 ],
[ 600, nan, 0.0680 ],
[ 700, nan, 0.0878 ],
[ 800, nan, 0.1119 ],
[ 900, nan, 0.1375 ],
[ 1000, nan, 0.1640 ],
[ 2000, nan, 0.5694 ],
[ 3000, nan, 1.2952 ],
[ 4000, nan, 2.3348 ],
[ 5000, nan, 3.7632 ],
[ 6000, nan, 5.6444 ],
[ 7000, nan, 8.1125 ],
[ 8000, nan, 11.0722 ],
[ 9000, nan, 14.8193 ],
[ 10000, nan, 19.3837 ],
[ 12000, nan, 31.0675 ],
[ 14000, nan, 45.9363 ],
[ 16000, nan, 65.4731 ],
[ 18000, nan, 90.3068 ],
[ 20000, nan, 119.1788 ],
])
# numactl --interleave=all ./testing_cheevd -JV -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000
cheevd_JV = array([
[ 10, nan, 0.0002 ],
[ 20, nan, 0.0002 ],
[ 30, nan, 0.0003 ],
[ 40, nan, 0.0005 ],
[ 50, nan, 0.0006 ],
[ 60, nan, 0.0008 ],
[ 70, nan, 0.0011 ],
[ 80, nan, 0.0014 ],
[ 90, nan, 0.0017 ],
[ 100, nan, 0.0021 ],
[ 200, nan, 0.0176 ],
[ 300, nan, 0.0302 ],
[ 400, nan, 0.0499 ],
[ 500, nan, 0.0678 ],
[ 600, nan, 0.0891 ],
[ 700, nan, 0.1137 ],
[ 800, nan, 0.1438 ],
[ 900, nan, 0.1778 ],
[ 1000, nan, 0.2086 ],
[ 2000, nan, 0.7005 ],
[ 3000, nan, 1.4685 ],
[ 4000, nan, 2.6798 ],
[ 5000, nan, 4.3095 ],
[ 6000, nan, 6.5492 ],
[ 7000, nan, 9.4248 ],
[ 8000, nan, 12.9559 ],
[ 9000, nan, 17.4774 ],
[ 10000, nan, 22.7936 ],
[ 12000, nan, 36.9305 ],
[ 14000, nan, 54.6267 ],
[ 16000, nan, 78.2375 ],
[ 18000, nan, 109.1700 ],
[ 20000, nan, 144.4371 ],
])
# numactl --interleave=all ./testing_cheevd_gpu -JN -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000
cheevd_gpu_JN = array([
[ 10, nan, 0.0001 ],
[ 20, nan, 0.0001 ],
[ 30, nan, 0.0001 ],
[ 40, nan, 0.0002 ],
[ 50, nan, 0.0003 ],
[ 60, nan, 0.0004 ],
[ 70, nan, 0.0007 ],
[ 80, nan, 0.0009 ],
[ 90, nan, 0.0012 ],
[ 100, nan, 0.0014 ],
[ 200, nan, 0.0124 ],
[ 300, nan, 0.0235 ],
[ 400, nan, 0.0395 ],
[ 500, nan, 0.0542 ],
[ 600, nan, 0.0755 ],
[ 700, nan, 0.0957 ],
[ 800, nan, 0.1225 ],
[ 900, nan, 0.1491 ],
[ 1000, nan, 0.1775 ],
[ 2000, nan, 0.6002 ],
[ 3000, nan, 1.3481 ],
[ 4000, nan, 2.3962 ],
[ 5000, nan, 3.8576 ],
[ 6000, nan, 5.7451 ],
[ 7000, nan, 8.2436 ],
[ 8000, nan, 11.2046 ],
[ 9000, nan, 14.9632 ],
[ 10000, nan, 19.5209 ],
[ 12000, nan, 31.2815 ],
[ 14000, nan, 46.2188 ],
[ 16000, nan, 65.6012 ],
[ 18000, nan, 90.6014 ],
[ 20000, nan, 119.1913 ],
])
# numactl --interleave=all ./testing_cheevd_gpu -JV -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000
cheevd_gpu_JV = array([
[ 10, nan, 0.0002 ],
[ 20, nan, 0.0002 ],
[ 30, nan, 0.0004 ],
[ 40, nan, 0.0005 ],
[ 50, nan, 0.0006 ],
[ 60, nan, 0.0009 ],
[ 70, nan, 0.0012 ],
[ 80, nan, 0.0014 ],
[ 90, nan, 0.0018 ],
[ 100, nan, 0.0021 ],
[ 200, nan, 0.0170 ],
[ 300, nan, 0.0293 ],
[ 400, nan, 0.0490 ],
[ 500, nan, 0.0665 ],
[ 600, nan, 0.0878 ],
[ 700, nan, 0.1103 ],
[ 800, nan, 0.1409 ],
[ 900, nan, 0.1734 ],
[ 1000, nan, 0.2023 ],
[ 2000, nan, 0.6659 ],
[ 3000, nan, 1.5009 ],
[ 4000, nan, 2.7090 ],
[ 5000, nan, 4.4119 ],
[ 6000, nan, 6.5324 ],
[ 7000, nan, 9.3413 ],
[ 8000, nan, 13.0797 ],
[ 9000, | |
import pymysql as pymysql
import requests
from selenium import webdriver
from bs4 import BeautifulSoup
from selenium.common.exceptions import NoSuchElementException
from wordcloud import WordCloud, STOPWORDS
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import time
import random
def click_url(url, header):
req = requests.get(url, header)
soup = BeautifulSoup(req.content, 'html.parser')
return soup
def pagination(page, search, city):
my_header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
"referrer": "www.Google.com"}
search = search.replace(" ", "%20")
# url = 'https://www.indeed.com/jobs?q=software%20developer&l=McKinney%2C%20TX&start=' + str(page)
url = 'https://www.indeed.com/jobs?q=' + search + '&l=' + city + ',%20TX&start=' + str(page)
req = requests.get(url, my_header)
soup = BeautifulSoup(req.content, 'html.parser')
return soup
def is_state(txt):
states_abbrev = ('AK', 'AL', 'AR', 'AS', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'GU', 'HI', 'IA', 'ID',
'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN', 'MO', 'MP', 'MS', 'MT', 'NC', 'ND',
'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH', 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX',
'UM', 'UT', 'VA', 'VI', 'VT', 'WA', 'WI', 'WV', 'WY')
for state in range(len(states_abbrev)):
if txt.upper() == states_abbrev[state].upper():
return True
return False
def split_location(loc):
location_split = loc.replace(",", "").replace("-", "").replace("•", "").split(" ")
city = state = zip_code = ""
for element in location_split:
if element.isnumeric():
zip_code = element
elif is_state(element):
state = element
elif element.isalpha():
city = element
location = {
'city': city,
'state': state,
'zip_code': zip_code,
}
return location
def salary_format(sal_split):
for sal in range(len(sal_split)):
sal_split[sal] = ''.join(i for i in sal_split[sal] if i.isdigit())
salary_min_hourly = salary_max_hourly = 0
salary_hourly = 0
salary_min_year = 0
salary_max_year = 0
if len(sal_split) == 1:
salary_year = int(sal_split[0])
if salary_year < 1000:
salary_hourly = salary_year
salary_year = 0
salary_min_hourly = 0
salary_max_hourly = 0
salary_min_year = 0
salary_max_year = 0
else:
salary_min_year = int(sal_split[0])
salary_max_year = int(sal_split[1])
salary_year = 0
salary_hourly = 0
if salary_max_year < 1000:
salary_min_hourly = salary_min_year
salary_max_hourly = salary_max_year
salary_min_year = 0
salary_max_year = 0
salary = {
'salary_year': salary_year,
'salary_hourly': salary_hourly,
'salary_min_year': salary_min_year,
'salary_max_year': salary_max_year,
'salary_min_hourly': salary_min_hourly,
'salary_max_hourly': salary_max_hourly
}
return salary
def check_languages(desc):
languages_mentioned = [] # will contain languages mentioned in the description
word_list = desc.split(" ") # split description into separate strings
all_languages = ["JAVA", "C", "C++", "C#", "Python", ".NET", "JavaScript", "PHP", "SQL", "OBJECTIVE-C", "ASSEMBLY",
"MATLAB", "PERL", "PASCAL", "R", "RUBY", "VISUAL BASIC", "GO", "GROOVY", "SWIFT", "SAS", "LUA",
"DART",
"FORTRAN", "COBOL", "SCRATCH", "SCALA", "ABAP", "LISP", "ADA", "RUST", "KOTLIN", "HASKELL", "G",
"JULIA", "TCL", "POSTSCRIPT", "ERLANG", "BASH", "HTML", "CSS", "ANGULAR", "REACT", "VUE",
"NODE.JS", "NODE", "NODEJS"]
for word in word_list: # check for matching languages
for language in all_languages:
if word.upper() == language.upper():
languages_mentioned.append(language.upper())
languages_mentioned = list(dict.fromkeys(languages_mentioned)) # remove duplicates
return languages_mentioned
def check_degree(desc):
degrees_mentioned = [] # will contain degrees mentioned in the description
word_list = desc.replace("'", "").split(" ") # split description into separate strings
all_degrees = ["CERTIFICATE", "CERTIFICATION", "ASSOCIATE", "ASSOCIATES", "A.S.", "BACHELOR", "BACHELORS", "B.S.",
"MASTER", "MASTERS", "M.S.", "PHD", "PH.D", "DOCTORATE", "DOCTORATES", "DOCTORAL"]
for word in word_list: # check for matching degrees
for degree in all_degrees:
if word.upper() == degree.upper():
degrees_mentioned.append(degree.upper())
degrees_mentioned = list(dict.fromkeys(degrees_mentioned)) # remove duplicates
for degree in range(len(degrees_mentioned)): # organize data for database
if degrees_mentioned[degree].upper() == 'CERTIFICATION':
degrees_mentioned[degree] = 'CERTIFICATE'
elif degrees_mentioned[degree].upper() == 'ASSOCIATE' or degrees_mentioned[degree].upper() == 'A.S.':
degrees_mentioned[degree] = 'ASSOCIATES'
elif degrees_mentioned[degree].upper() == 'BACHELOR' or degrees_mentioned[degree].upper() == 'B.S.':
degrees_mentioned[degree] = 'BACHELORS'
elif degrees_mentioned[degree].upper() == 'MASTER' or degrees_mentioned[degree].upper() == 'M.S.':
degrees_mentioned[degree] = 'MASTERS'
elif degrees_mentioned[degree].upper() == 'PH.D' or degrees_mentioned[degree].upper() == 'DOCTORATE' or degrees_mentioned[degree].upper() == 'DOCTORATES' or degrees_mentioned[degree].upper() == 'DOCTORAL':
degrees_mentioned[degree] = 'PHD'
return degrees_mentioned
def check_keywords(desc):
keywords_mentioned = []
word_list = desc.replace("'", "").split(" ") # split description into separate strings
interpersonal_skills = ['Assertiveness','Assertiveness','Bodylanguage','Bullying','Charisma','Clarification','Collaboration','Communication','Communication','Interpersonal','Communication, Barriers to Effective','Communication, Improving','Communication, Non-Verbal','Verbal','Effective','Confidentiality','Conflict','Managing','Conflict','Resolution','Mediation','Conversational','Criticism','Constructive','Criticism','Customer','Telephone','Emotional','Intelligence','Empathy','Employability','Feedback','Group','Behaviours','Cohesiveness','Life-Cycle','Groups', 'Teams','Harassment']
intrapersonal_skills = ['verbal communication','non-verbal communication','listening','negotiation','solving','decision-making','assertiveness','patience','empathy']
ide = ['Jupyter ','JupyterLab','Jupyter-Notebooks','RStudio','PyCharm','Notepad++','Spyder','Sublime Text','Vim','Emacs','MATLAB','Atom','Eclipse','NetBeans','IntelliJ','BlueJ','JDeveloper','DrJava','JCreator','jGRASP','Greenfoot','Xcode','Codenvy','RAD','Visual Studio','Visual Studio Code','CodeBlocks','CodeLite','CLion','Qt Creator','Nuclide','WebStorm','Sublime']
for word in word_list: # check for matching degrees
for key in interpersonal_skills:
if word.upper() == key.upper():
keywords_mentioned.append(key.upper())
for key in intrapersonal_skills:
if word.upper() == key.upper():
keywords_mentioned.append(key.upper())
for key in ide:
if word.upper() == key.upper():
keywords_mentioned.append(key.upper())
keywords = list(dict.fromkeys(keywords_mentioned)) # remove duplicates
# print(keywords)
return keywords
# //////////////////////////////////////////////////////////////////////////////////////// get_post() start
def get_post(page):
my_header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'}
entries = []
post = page.find_all('div', class_='jobsearch-SerpJobCard')
for element in post:
url_post = element.find('a', attrs={'class': 'jobtitle turnstileLink'})['href']
url_post = "https://www.indeed.com" + url_post
title = element.find('a', attrs={'class': 'jobtitle'}).text.strip()
company = element.find('span', attrs={'class': 'company'}).text.strip()
location_dict = []
try:
location = element.find(class_='location').text.strip()
location_dict = split_location(location)
except NoSuchElementException:
location = ""
remote = False
try:
remote_check = element.find('span', attrs={'class': 'remote'}).text.strip()
remote = True
except AttributeError:
remote = False
if title.upper().find("REMOTE") != -1:
remote = True
salary = {}
try:
salary_split = element.find('span', attrs={'class': 'salaryText'}).text.strip().replace(',', "").replace(
'$', "").strip().split("-")
salary = salary_format(salary_split)
except AttributeError:
salary = {
'salary_year': 0,
'salary_hourly': 0,
'salary_min_year': 0,
'salary_max_year': 0,
'salary_min_hourly': 0,
'salary_max_hourly': 0
}
job_description = element.find('div', attrs={'class': 'summary'}).text.strip().replace("\n", "")
page = click_url(url_post, my_header)
job_description = get_description(page)
date_days = 0
date_text = element.find(class_='date').text.strip()
if date_text.upper() == "TODAY" or date_text.upper() == "JUST POSTED":
date = datetime.datetime.now()
else:
date_days = int(''.join(i for i in date_text if i.isdigit()))
date = datetime.datetime.now() - datetime.timedelta(days=date_days) # subtract 'days ago' from current date
date = date.strftime("%x") # change to mm/dd/yy format
popular_words = most_common_word(job_description, 10)
entry = {
'url': url_post,
'title': title,
'company': company,
'location': location_dict,
'remote': remote,
'salary': salary,
'job_description': job_description,
'most_common_words': popular_words,
'keywords': check_keywords(job_description),
'languages': check_languages(job_description),
'degrees': check_degree(job_description),
'date': str(date)
}
entries.append(entry)
print(entry)
return entries
# //////////////////////////////////////////////////////////////////////////////////////// get_post() end
def get_description(page):
job_description = ""
try:
job_description = page.find('div', attrs={'id': 'jobDescriptionText'}).text.strip().replace("\n", " ").replace('\\', "")
except AttributeError:
print(page.current_url)
print("LINE 200 -- job_description not found")
return job_description
def most_common_word(desc, num_results):
desc_list = desc.split(" ")
counter = {}
for i in desc_list:
if i in counter:
counter[i] += 1
else:
counter[i] = 1
most_common = sorted(counter, key=counter.get, reverse=True)
results = most_common[:num_results]
return results
def connect_to_db():
conn = pymysql.connect(
host='127.0.0.1',
port=3306,
user='root',
passwd='###',
db='indeeddb'
)
if conn:
print("\nConnected to database")
return conn
else:
print("Failed to connect to database")
return
def insert_to_db(data, conn):
if len(data) < 1:
print("INVALID DATA")
return
my_cursor = conn.cursor()
for entry in data:
sql = "INSERT IGNORE INTO post (url,title,company,city,state,zip_code,remote,job_description, date_posted) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
val = (str(entry['url']), str(entry['title']), str(entry['company']), str(entry['location']['city']),
str(entry['location']['state']), str(entry['location']['zip_code']), entry['remote'],
str(entry['job_description']), str(entry['date']))
my_cursor.execute(sql, val)
for lang in entry['languages']:
sql = "INSERT IGNORE INTO languages (url,language) VALUES (%s, %s)"
val = (str(entry['url']), str(lang))
my_cursor.execute(sql, val)
for degree in entry['degrees']:
sql = "INSERT IGNORE INTO degrees (url,degree) VALUES (%s, %s)"
val = (str(entry['url']), str(degree))
my_cursor.execute(sql, val)
for word in range(len(entry['most_common_words'])):
sql = "INSERT IGNORE INTO most_common_words (url,ranking,word) VALUES (%s, %s, %s)"
val = (str(entry['url']), int(word), str(entry['most_common_words'][word]))
my_cursor.execute(sql, val)
for word in entry['keywords']:
sql = "INSERT IGNORE INTO keywords (url,word) VALUES (%s, %s)"
val = (str(entry['url']), str(word))
my_cursor.execute(sql, val)
sql = "INSERT IGNORE INTO salary(url,salary_year,salary_hourly,salary_min_year,salary_max_year,salary_min_hourly,salary_max_hourly) VALUES (%s, %s, %s, %s, %s, %s, %s)"
val = (str(entry['url']), str(entry['salary']['salary_year']), str(entry['salary']['salary_hourly']),
str(entry['salary']['salary_min_year']), str(entry['salary']['salary_max_year']),
str(entry['salary']['salary_min_hourly']), str(entry['salary']['salary_max_hourly']))
my_cursor.execute(sql, val)
try:
conn.commit()
print("Successfully inserted data into database")
except pymysql.IntegrityError:
print("Failed to Insert data into database")
conn.close()
def select_from_db(sql, conn):
print("~ SELECTING FROM DATABASE")
my_cursor = conn.cursor()
my_cursor.execute(sql)
rows = my_cursor.fetchall()
return rows
def bar_graph(data, x_title, legend_title):
if len(data) < 1:
print("Not enough data to make a bar graph")
return
else:
print("********************************")
print("Status: CREATING BAR GRAPH")
print("********************************")
names = []
vals = []
for i in range(len(data)):
names.append(str(data[i][0]))
vals.append(int(data[i][1]))
df = pd.DataFrame({x_title:names, legend_title:vals})
ax = df.plot.bar(x=x_title, y=legend_title, rot=0, color='green', figsize=(15,7))
plt.xticks(rotation=90)
plt.tight_layout()
plt.show()
def pie_chart(data, title):
if len(data) < 1:
print("Not enough data to make a bar graph")
return
else:
print("********************************")
print("Status: CREATING PIE CHART")
print("********************************")
names = []
vals = []
for i in range(len(data)):
names.append(str(data[i][0]))
vals.append(int(data[i][1]))
df = pd.DataFrame({title:vals}, index=names)
plot = df.plot.pie(y=title, figsize=(7,7))
plt.show()
def line_graph(data, title):
if len(data) < 1:
print("Not enough data to make a bar graph")
return
else:
print("********************************")
print("Status: CREATING LINE GRAPH")
print("********************************")
names = []
vals = []
for i in range(len(data)):
names.append(str(data[i][0]))
vals.append(int(data[i][1]))
print(names)
df = pd.DataFrame({title:vals}, index=names)
lines = df.plot.line()
plt.tight_layout()
plt.show()
def word_cloud(data):
new_str = ""
for desc in data:
new_str += str(desc).replace("'", "")
wordcloud = WordCloud(width=800, height=400, max_font_size=100, max_words=100, background_color="white").generate(new_str)
plt.figure(figsize=(20,10))
| |
10*m.b623 + 10*m.b624 + 10*m.b625 + 10*m.b626 + 10*m.b627 + 10*m.b628 + 10*m.b629
+ 10*m.b630 + 10*m.b631 + 10*m.b632 + 10*m.b633 + 10*m.b634 + 10*m.b635 + 10*m.b636 + 10*m.b637
+ 10*m.b638 + 10*m.b639 + 10*m.b640 + 10*m.b641 + 10*m.b642 + 10*m.b643 + 10*m.b644 + 10*m.b645
+ 10*m.b646 + 10*m.b647 + 10*m.b648 + 10*m.b649 + 8*m.b650 + 8*m.b651 + 8*m.b652 + 8*m.b653
+ 8*m.b654 + 8*m.b655 + 8*m.b656 + 8*m.b657 + 8*m.b658 + 8*m.b659 + 8*m.b660 + 8*m.b661
+ 8*m.b662 + 8*m.b663 + 8*m.b664 + 8*m.b665 + 8*m.b666 + 8*m.b667 + 8*m.b668 + 8*m.b669
+ 8*m.b670 + 8*m.b671 + 8*m.b672 + 8*m.b673 + 8*m.b674 + 8*m.b675 + 8*m.b676 + 8*m.b677
+ 8*m.b678 + 8*m.b679 + 8*m.b680 + 8*m.b681 + 8*m.b682 + 8*m.b683 + 8*m.b684 + 8*m.b685
+ 8*m.b686 + 8*m.b687 + 8*m.b688 + 8*m.b689 + 8*m.b690 + 8*m.b691 + 8*m.b692 + 8*m.b693
+ 8*m.b694 + 8*m.b695 + 8*m.b696 + 8*m.b697 + 8*m.b698 + 8*m.b699 + 8*m.b700 + 8*m.b701
+ 8*m.b702 + 8*m.b703 + 8*m.b704 + 8*m.b705 + 8*m.b706 + 8*m.b707 + 8*m.b708 + 8*m.b709
+ 8*m.b710 + 8*m.b711 + 8*m.b712 + 8*m.b713 + 8*m.b714 + 8*m.b715 + 8*m.b716 + 8*m.b717
+ 8*m.b718 + 8*m.b719 + 8*m.b720 + 8*m.b721 + 10*m.b722 + 10*m.b723 + 10*m.b724 + 10*m.b725
+ 10*m.b726 + 10*m.b727 + 10*m.b728 + 10*m.b729 + 10*m.b730 + 10*m.b731 + 10*m.b732 + 10*m.b733
+ 10*m.b734 + 10*m.b735 + 10*m.b736 + 10*m.b737 + 10*m.b738 + 10*m.b739 + 10*m.b740 + 10*m.b741
+ 10*m.b742 + 10*m.b743 + 10*m.b744 + 10*m.b745 + 10*m.b746 + 10*m.b747 + 10*m.b748 + 10*m.b749
+ 10*m.b750 + 10*m.b751 + 10*m.b752 + 10*m.b753 + 10*m.b754 + 10*m.b755 + 10*m.b756 + 10*m.b757
+ 10*m.b758 + 10*m.b759 + 10*m.b760 + 10*m.b761 + 10*m.b762 + 10*m.b763 + 10*m.b764 + 10*m.b765
+ 10*m.b766 + 10*m.b767 + 10*m.b768 + 10*m.b769 + 8*m.b770 + 8*m.b771 + 8*m.b772 + 8*m.b773
+ 8*m.b774 + 8*m.b775 + 8*m.b776 + 8*m.b777 + 8*m.b778 + 8*m.b779 + 8*m.b780 + 8*m.b781
+ 8*m.b782 + 8*m.b783 + 8*m.b784 + 8*m.b785 + 8*m.b786 + 8*m.b787 + 8*m.b788 + 8*m.b789
+ 8*m.b790 + 8*m.b791 + 8*m.b792 + 8*m.b793 + 8*m.b794 + 8*m.b795 + 8*m.b796 + 8*m.b797
+ 8*m.b798 + 8*m.b799 + 8*m.b800 + 8*m.b801 + 8*m.b802 + 8*m.b803 + 8*m.b804 + 8*m.b805
+ 8*m.b806 + 8*m.b807 + 8*m.b808 + 8*m.b809 + 8*m.b810 + 8*m.b811 + 8*m.b812 + 8*m.b813
+ 8*m.b814 + 8*m.b815 + 8*m.b816 + 8*m.b817 + 8*m.b818 + 8*m.b819 + 8*m.b820 + 8*m.b821
+ 8*m.b822 + 8*m.b823 + 8*m.b824 + 8*m.b825 + 8*m.b826 + 8*m.b827 + 8*m.b828 + 8*m.b829
+ 8*m.b830 + 8*m.b831 + 8*m.b832 + 8*m.b833 + 8*m.b834 + 8*m.b835 + 8*m.b836 + 8*m.b837
+ 8*m.b838 + 8*m.b839 + 8*m.b840 + 8*m.b841 + 10*m.b842 + 10*m.b843 + 10*m.b844 + 10*m.b845
+ 10*m.b846 + 10*m.b847 + 10*m.b848 + 10*m.b849 + 10*m.b850 + 10*m.b851 + 10*m.b852 + 10*m.b853
+ 10*m.b854 + 10*m.b855 + 10*m.b856 + 10*m.b857 + 10*m.b858 + 10*m.b859 + 10*m.b860 + 10*m.b861
+ 10*m.b862 + 10*m.b863 + 10*m.b864 + 10*m.b865 + 10*m.b866 + 10*m.b867 + 10*m.b868 + 10*m.b869
+ 10*m.b870 + 10*m.b871 + 10*m.b872 + 10*m.b873 + 10*m.b874 + 10*m.b875 + 10*m.b876 + 10*m.b877
+ 10*m.b878 + 10*m.b879 + 10*m.b880 + 10*m.b881 + 10*m.b882 + 10*m.b883 + 10*m.b884 + 10*m.b885
+ 10*m.b886 + 10*m.b887 + 10*m.b888 + 10*m.b889 + 8*m.b890 + 8*m.b891 + 8*m.b892 + 8*m.b893
+ 8*m.b894 + 8*m.b895 + 8*m.b896 + 8*m.b897 + 8*m.b898 + 8*m.b899 + 8*m.b900 + 8*m.b901
+ 8*m.b902 + 8*m.b903 + 8*m.b904 + 8*m.b905 + 8*m.b906 + 8*m.b907 + 8*m.b908 + 8*m.b909
+ 8*m.b910 + 8*m.b911 + 8*m.b912 + 8*m.b913 + 8*m.b914 + 8*m.b915 + 8*m.b916 + 8*m.b917
+ 8*m.b918 + 8*m.b919 + 8*m.b920 + 8*m.b921 + 8*m.b922 + 8*m.b923 + 8*m.b924 + 8*m.b925
+ 8*m.b926 + 8*m.b927 + 8*m.b928 + 8*m.b929 + 8*m.b930 + 8*m.b931 + 8*m.b932 + 8*m.b933
+ 8*m.b934 + 8*m.b935 + 8*m.b936 + 8*m.b937 + 8*m.b938 + 8*m.b939 + 8*m.b940 + 8*m.b941
+ 8*m.b942 + 8*m.b943 + 8*m.b944 + 8*m.b945 + 8*m.b946 + 8*m.b947 + 8*m.b948 + 8*m.b949
+ 8*m.b950 + 8*m.b951 + 8*m.b952 + 8*m.b953 + 8*m.b954 + 8*m.b955 + 8*m.b956 + 8*m.b957
+ 8*m.b958 + 8*m.b959 + 8*m.b960 + 8*m.b961, sense=minimize)
m.c2 = Constraint(expr= - m.x2 - m.x26 - m.x50 - m.x74 - m.x98 - m.x122 - m.x146 - m.x170 - m.x194 - m.x218 == -700)
m.c3 = Constraint(expr= - m.x3 - m.x27 - m.x51 - m.x75 - m.x99 - m.x123 - m.x147 - m.x171 - m.x195 - m.x219 == -750)
m.c4 = Constraint(expr= - m.x4 - m.x28 - m.x52 - m.x76 - m.x100 - m.x124 - m.x148 - m.x172 - m.x196 - m.x220 == -850)
m.c5 = Constraint(expr= - m.x5 - m.x29 - m.x53 - m.x77 - m.x101 - m.x125 - m.x149 - m.x173 - m.x197 - m.x221 == -950)
m.c6 = Constraint(expr= - m.x6 - m.x30 - m.x54 - m.x78 - m.x102 - m.x126 - m.x150 - m.x174 - m.x198 - m.x222 == -1000)
m.c7 = Constraint(expr= - m.x7 - m.x31 - m.x55 - m.x79 - m.x103 - m.x127 - m.x151 - m.x175 - m.x199 - m.x223 == -1100)
m.c8 = Constraint(expr= - m.x8 - m.x32 - m.x56 - m.x80 - m.x104 - m.x128 - m.x152 - m.x176 - m.x200 - m.x224 == -1150)
m.c9 = Constraint(expr= - m.x9 - m.x33 - m.x57 - m.x81 - m.x105 - m.x129 - m.x153 - m.x177 - m.x201 - m.x225 == -1200)
m.c10 = Constraint(expr= - m.x10 - m.x34 - m.x58 - m.x82 - m.x106 - m.x130 - m.x154 - m.x178 - m.x202 - m.x226 == -1300)
m.c11 = Constraint(expr= - m.x11 - m.x35 - m.x59 - m.x83 - m.x107 - m.x131 - m.x155 - m.x179 - m.x203 - m.x227 == -1400)
m.c12 = Constraint(expr= - m.x12 - m.x36 - m.x60 - m.x84 - m.x108 - m.x132 - m.x156 - m.x180 - m.x204 - m.x228 == -1450)
m.c13 = Constraint(expr= - m.x13 - m.x37 - m.x61 - m.x85 - m.x109 - m.x133 - m.x157 - m.x181 - m.x205 - m.x229 == -1500)
m.c14 = Constraint(expr= - m.x14 - m.x38 - m.x62 - m.x86 - m.x110 - m.x134 - m.x158 - m.x182 - m.x206 - m.x230 == -1400)
m.c15 = Constraint(expr= - m.x15 - m.x39 - m.x63 - m.x87 - m.x111 - m.x135 - m.x159 - m.x183 - m.x207 - m.x231 == -1300)
m.c16 = Constraint(expr= - m.x16 - m.x40 - m.x64 - m.x88 - m.x112 - m.x136 - m.x160 - m.x184 - m.x208 - m.x232 == -1200)
m.c17 = Constraint(expr= - m.x17 - m.x41 - m.x65 - m.x89 - m.x113 - m.x137 - m.x161 - m.x185 - m.x209 - m.x233 == -1050)
m.c18 = Constraint(expr= - m.x18 - m.x42 - m.x66 - m.x90 - m.x114 - m.x138 - m.x162 - m.x186 - m.x210 - m.x234 == -1000)
m.c19 = Constraint(expr= - m.x19 - m.x43 - m.x67 - m.x91 - m.x115 - m.x139 - m.x163 - m.x187 - m.x211 - m.x235 == -1100)
m.c20 = Constraint(expr= - m.x20 - m.x44 - m.x68 - m.x92 - m.x116 - m.x140 - m.x164 - m.x188 - m.x212 - m.x236 == -1200)
m.c21 = Constraint(expr= - m.x21 - m.x45 - m.x69 - m.x93 - m.x117 - m.x141 - m.x165 - m.x189 - m.x213 - m.x237 == -1400)
m.c22 = Constraint(expr= - m.x22 - m.x46 - m.x70 - m.x94 - m.x118 - m.x142 - m.x166 - m.x190 - m.x214 - m.x238 == -1300)
m.c23 = Constraint(expr= - m.x23 - m.x47 - m.x71 - m.x95 - m.x119 - m.x143 - m.x167 - m.x191 - m.x215 - m.x239 == -1100)
m.c24 = Constraint(expr= - m.x24 - m.x48 - m.x72 - m.x96 - m.x120 - m.x144 - m.x168 - m.x192 - m.x216 - m.x240 == -900)
m.c25 = Constraint(expr= - m.x25 - m.x49 - m.x73 - m.x97 - m.x121 - m.x145 - m.x169 - m.x193 - m.x217 - m.x241 == -800)
m.c26 = Constraint(expr= - 455*m.b242 - 455*m.b266 - 130*m.b290 - 130*m.b314 - 162*m.b338 - 80*m.b362 - 85*m.b386
- 55*m.b410 - 55*m.b434 - 55*m.b458 <= -770)
m.c27 = Constraint(expr= | |
are handled. When set to "nan", predicted y-values
will be NaN. When set to "clip", predicted y-values will be
set to the value corresponding to the nearest train interval endpoint.
When set to "raise", allow ``interp1d`` to throw ValueError.
References
----------
.. [1] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, <NAME> & <NAME>, (KDD 2002)
.. [2] Predicting Good Probabilities with Supervised Learning,
<NAME> & <NAME>, ICML 2005
"""
def __init__(self, out_of_bounds="clip"):
super().__init__()
self.out_of_bounds = out_of_bounds
def fit(self, X, y, n_jobs=None):
"""
Fit the calibration method based on the given uncalibrated class probabilities X and ground truth labels y.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
Training data, i.e. predicted probabilities of the base classifier on the calibration set.
y : array-like, shape (n_samples,)
Target classes.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.
Returns
-------
self : object
Returns an instance of self.
"""
if X.ndim == 1:
raise ValueError("Calibration training data must have shape (n_samples, n_classes).")
elif np.shape(X)[1] == 2:
self.isotonic_regressor_ = sklearn.isotonic.IsotonicRegression(increasing=True,
out_of_bounds=self.out_of_bounds)
self.isotonic_regressor_.fit(X[:, 1], y)
elif np.shape(X)[1] > 2:
self.onevsrest_calibrator_ = OneVsRestCalibrator(calibrator=clone(self), n_jobs=n_jobs)
self.onevsrest_calibrator_.fit(X, y)
return self
def predict_proba(self, X):
"""
Compute calibrated posterior probabilities for a given array of posterior probabilities from an arbitrary
classifier.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
The uncalibrated posterior probabilities.
Returns
-------
P : array, shape (n_samples, n_classes)
The predicted probabilities.
"""
if X.ndim == 1:
raise ValueError("Calibration data must have shape (n_samples, n_classes).")
elif np.shape(X)[1] == 2:
check_is_fitted(self, "isotonic_regressor_")
p1 = self.isotonic_regressor_.predict(X[:, 1])
return np.column_stack([1 - p1, p1])
elif np.shape(X)[1] > 2:
check_is_fitted(self, "onevsrest_calibrator_")
return self.onevsrest_calibrator_.predict_proba(X)
class HistogramBinning(CalibrationMethod):
"""
Probability calibration using histogram binning
Histogram binning [1]_ is a nonparametric approach to probability calibration. Classifier scores are binned into a
given number of bins either based on fixed width or frequency. Classifier scores are then computed based on the
empirical frequency of class 1 in each bin.
Parameters
----------
mode : str, default='equal_width'
Binning mode used. One of ['equal_width', 'equal_freq'].
n_bins : int, default=20
Number of bins to bin classifier scores into.
input_range : list, shape (2,), default=[0, 1]
Range of the classifier scores.
.. [1] <NAME>. & <NAME>. Obtaining calibrated probability estimates from decision trees and naive Bayesian
classifiers in Proceedings of the 18th International Conference on Machine Learning (ICML, 2001), 609–616.
"""
def __init__(self, mode='equal_width', n_bins=20, input_range=[0, 1]):
super().__init__()
if mode in ['equal_width', 'equal_freq']:
self.mode = mode
else:
raise ValueError("Mode not recognized. Choose on of 'equal_width', or 'equal_freq'.")
self.n_bins = n_bins
self.input_range = input_range
def fit(self, X, y, n_jobs=None):
"""
Fit the calibration method based on the given uncalibrated class probabilities X and ground truth labels y.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
Training data, i.e. predicted probabilities of the base classifier on the calibration set.
y : array-like, shape (n_samples,)
Target classes.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.
Returns
-------
self : object
Returns an instance of self.
"""
if X.ndim == 1:
raise ValueError("Calibration training data must have shape (n_samples, n_classes).")
elif np.shape(X)[1] == 2:
return self._fit_binary(X, y)
elif np.shape(X)[1] > 2:
self.onevsrest_calibrator_ = OneVsRestCalibrator(calibrator=clone(self), n_jobs=n_jobs)
self.onevsrest_calibrator_.fit(X, y)
return self
def _fit_binary(self, X, y):
if self.mode == 'equal_width':
# Compute probability of class 1 in each equal width bin
binned_stat = scipy.stats.binned_statistic(x=X[:, 1], values=np.equal(1, y), statistic='mean',
bins=self.n_bins, range=self.input_range)
self.prob_class_1 = binned_stat.statistic # TODO: test this and correct attributes
self.binning = binned_stat.bin_edges
elif self.mode == 'equal_freq':
# Find binning based on equal frequency
self.binning = np.quantile(X[:, 1],
q=np.linspace(self.input_range[0], self.input_range[1], self.n_bins + 1))
# Compute probability of class 1 in equal frequency bins
digitized = np.digitize(X[:, 1], bins=self.binning)
digitized[digitized == len(self.binning)] = len(self.binning) - 1 # include rightmost edge in partition
self.prob_class_1 = [y[digitized == i].mean() for i in range(1, len(self.binning))]
return self
def predict_proba(self, X):
"""
Compute calibrated posterior probabilities for a given array of posterior probabilities from an arbitrary
classifier.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
The uncalibrated posterior probabilities.
Returns
-------
P : array, shape (n_samples, n_classes)
The predicted probabilities.
"""
if X.ndim == 1:
raise ValueError("Calibration data must have shape (n_samples, n_classes).")
elif np.shape(X)[1] == 2:
check_is_fitted(self, ["binning", "prob_class_1"])
# Find bin of predictions
digitized = np.digitize(X[:, 1], bins=self.binning)
digitized[digitized == len(self.binning)] = len(self.binning) - 1 # include rightmost edge in partition
# Transform to empirical frequency of class 1 in each bin
p1 = np.array([self.prob_class_1[j] for j in (digitized - 1)])
# If empirical frequency is NaN, do not change prediction
p1 = np.where(np.isfinite(p1), p1, X[:, 1])
assert np.all(np.isfinite(p1)), "Predictions are not all finite."
return np.column_stack([1 - p1, p1])
elif np.shape(X)[1] > 2:
check_is_fitted(self, "onevsrest_calibrator_")
return self.onevsrest_calibrator_.predict_proba(X)
class BayesianBinningQuantiles(CalibrationMethod):
"""
Probability calibration using Bayesian binning into quantiles
Bayesian binning into quantiles [1]_ considers multiple equal frequency binning models and combines them through
Bayesian model averaging. Each binning model :math:`M` is scored according to
:math:`\\text{Score}(M) = P(M) \\cdot P(D | M),` where a uniform prior :math:`P(M)` is assumed. The marginal likelihood
:math:`P(D | M)` has a closed form solution under the assumption of independent binomial class distributions in each
bin with beta priors.
Parameters
----------
C : int, default = 10
Constant controlling the number of binning models.
input_range : list, shape (2,), default=[0, 1]
Range of the scores to calibrate.
.. [1] <NAME>., <NAME>. & <NAME>. Obtaining Well Calibrated Probabilities Using Bayesian Binning
in Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence, Austin, Texas, USA.
"""
def __init__(self, C=10, input_range=[0, 1]):
super().__init__()
self.C = C
self.input_range = input_range
def _binning_model_logscore(self, probs, y, partition, N_prime=2):
"""
Compute the log score of a binning model
Each binning model :math:`M` is scored according to :math:`Score(M) = P(M) \\cdot P(D | M),` where a uniform prior
:math:`P(M)` is assumed and the marginal likelihood :math:`P(D | M)` has a closed form solution
under the assumption of a binomial class distribution in each bin with beta priors.
Parameters
----------
probs : array-like, shape (n_samples, )
Predicted posterior probabilities.
y : array-like, shape (n_samples, )
Target classes.
partition : array-like, shape (n_bins + 1, )
Interval partition defining a binning.
N_prime : int, default=2
Equivalent sample size expressing the strength of the belief in the prior distribution.
Returns
-------
log_score : float
Log of Bayesian score for a given binning model
"""
# Setup
B = len(partition) - 1
p = (partition[1:] - partition[:-1]) / 2 + partition[:-1]
# Compute positive and negative samples in given bins
N = np.histogram(probs, bins=partition)[0]
digitized = np.digitize(probs, bins=partition)
digitized[digitized == len(partition)] = len(partition) - 1 # include rightmost edge in partition
m = [y[digitized == i].sum() for i in range(1, len(partition))]
n = N - m
# Compute the parameters of the Beta priors
tiny = np.finfo(np.float).tiny # Avoid scipy.special.gammaln(0), which can arise if bin has zero width
alpha = N_prime / B * p
alpha[alpha == 0] = tiny
beta = N_prime / B * (1 - p)
beta[beta == 0] = tiny
# Prior for a given binning model (uniform)
log_prior = - np.log(self.T)
# Compute the marginal log-likelihood for the given binning model
log_likelihood = np.sum(
scipy.special.gammaln(N_prime / B) + scipy.special.gammaln(m + alpha) + scipy.special.gammaln(n + beta) - (
scipy.special.gammaln(N + N_prime / B) + scipy.special.gammaln(alpha) + scipy.special.gammaln(
beta)))
# Compute score for the given binning model
log_score = log_prior + log_likelihood
return log_score
def fit(self, X, y, n_jobs=None):
"""
Fit the calibration method based on the given uncalibrated class probabilities X and ground truth labels y.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
Training | |
<reponame>yogabonito/seir_hawkes
# from scipy.optimize import fmin_l_bfgs_b
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fmin_l_bfgs_b
from sympy import derive_by_array, exp, lambdify, log, Piecewise, symbols
def exp_intensity_sigma_neq_gamma(history, sum_less_equal=True):
"""
Calculate the (exponential) intensity of a (SEIR-)HawkesN process
symbolically.
Parameters
----------
sum_less_equal : bool, default: True
If True, we sum over all event times <= time t. Otherwise, we sum
over all event times < time t.
Returns
-------
exp_intensity_ : sympy.core.mul.Mul
A sympy expression containing the symbols beta, sigma, gamma, n,
and t.
"""
beta, sigma, gamma, n, t = symbols("beta sigma gamma n t")
events_until_t = sum(
[Piecewise((1, h <= t), (0, True)) for h in history]
)
return (1 - events_until_t / n) * (beta * sigma / (gamma-sigma)) * sum(
[Piecewise(
(
exp(-sigma * (t - h)) - exp(-gamma * (t - h)),
h <= t if sum_less_equal else h < t
),
(0, True)
) for h in history])
def exp_intensity_sigma_eq_gamma(history, sum_less_equal=True):
"""
Calculate the (exponential) intensity of a (SEIR-)HawkesN process
symbolically.
Parameters
----------
sum_less_equal : bool, default: True
If True, we sum over all event times <= time t. Otherwise, we sum
over all event times < time t.
Returns
-------
exp_intensity_ : sympy.core.mul.Mul
A sympy expression containing the symbols beta, gamma, n,
and t. The symbol sigma is not contained as sigma=gamma holds in
the case considered by this function.
"""
beta, gamma, n, t = symbols("beta gamma n t")
events_until_t = sum(
[Piecewise((1, h <= t), (0, True)) for h in history]
)
return (1 - events_until_t / n) * beta * gamma * sum(
[Piecewise(
(
(t - h) * exp(-gamma * (t - h)),
h <= t if sum_less_equal else h < t
),
(0, True)
) for h in history])
def plot_exp_intensity(history, t_max, beta, sigma, gamma, n, step=0.01,
width=5.51, height=4, n_xticks=6, fname=None,
sum_less_equal=True):
"""
Plot (or save the plot of) the exponential intensity function from t=0
until t=t_max.
Parameters
----------
t_max : float
Define the time horizon of the plot. The time axis will contain
values from 0 to t_max.
beta : float
Parameter beta of the SEIR model.
sigma : float or None
Parameter sigma of the SEIR model. If None, then sigma=gamma is
assumed.
gamma : float
Parameter gamma of the SEIR model.
n : int
Population size.
step : float, default: 0.01
Interval on the x-axis between two successive points.
width : float, default: 5.51
Width of the plot.
height : float, default: 4.0
Height of the plot.
n_xticks : int (must be non-negative)
Number of ticks on the time axis.
fname : str or None
Name (without extension) of the file the plot is saved to. If
`None`, the plot is not saved.
sum_less_equal : bool
This arg is used in :func:`exp_intensity`.
"""
if sigma is None:
sigma = gamma
subs_list = [("beta", beta), ("sigma", sigma), ("gamma", gamma),
("n", n)]
if sigma == gamma:
exp_intensity = exp_intensity_sigma_eq_gamma(
history, sum_less_equal=sum_less_equal).subs(subs_list)
else:
exp_intensity = exp_intensity_sigma_neq_gamma(
history, sum_less_equal=sum_less_equal).subs(subs_list)
exp_intensity = lambdify("t", exp_intensity)
time = np.arange(0, t_max, step)
plt.figure(dpi=300, figsize=(width, height))
plt.plot(time, exp_intensity(time))
plt.xlabel("$t$")
plt.xlim(0, t_max)
plt.xticks(np.linspace(0, t_max, n_xticks))
plt.ylabel("Intensity")
plt.grid()
title = "Intensity of a HawkesN process"
if history is not None and beta is not None and sigma is not None \
and gamma is not None and n is not None:
title += " with event history \{" \
+ ",".join(str(i) for i in history[:4]) \
+ (", ..." if len(history) > 4 else "") \
+ "\} \nand parameters: beta=" + str(beta) \
+ ", sigma=" + str(sigma) + ", gamma=" + str(gamma) \
+ ", $N$=" + str(n)
title += "."
plt.title(title)
if fname is not None:
plt.savefig(fname + ".pdf")
def llf_sigma_neq_gamma(history, sum_less_equal=True):
"""
Parameters
----------
sum_less_equal : bool, default: True
This arg is used in :func:`exp_intensity_sigma_neq_gamma`.
Returns
-------
llf : sympy.core.add.Add
The log-likelihood function as symbolic expression (containing the
symbols `beta`, `sigma`, `gamma`, and `n`).
"""
beta, sigma, gamma, n = symbols("beta sigma gamma n")
intensity = exp_intensity_sigma_neq_gamma(history, sum_less_equal)
# for h in self.his:
# print("intensity at", h, "is:", intensity.subs("t", h))
first_event = len(history) - sum(1 for t in history if t > 0)
his_pos = history[first_event:]
addend_sum = sum(log(intensity.subs("t", h)) for h in his_pos)
# print("SUM PART", addend_sum.subs([("scale", .5), ("decay", .5), ("n", 100)]))
addend_int = (beta * sigma / (gamma-sigma)) * sum(
(n - (i + 1)) / n * (
(
exp(-sigma * (history[i] - history[j]))
-
exp(-sigma * (history[i + 1] - history[j]))
) / sigma
-
(
exp(-gamma * (history[i] - history[j]))
-
exp(-gamma * (history[i + 1] - history[j]))
) / gamma
)
for i in range(len(history)-1)
for j in range(i+1))
# print("INT PART", addend_int.subs([("scale", .5), ("decay", .5), ("n", 100)]))
return addend_sum - addend_int
def llf_sigma_eq_gamma(history, sum_less_equal=True):
"""
Parameters
----------
sum_less_equal : bool, default: True
This arg is used in :meth:`self.exp_intensity_sigma_eq_gamma`.
Returns
-------
llf : sympy.core.add.Add
The log-likelihood function as symbolic expression (containing the
symbols `beta`, `gamma`, and `n`).
"""
beta, gamma, n = symbols("beta gamma n")
intensity = exp_intensity_sigma_eq_gamma(history, sum_less_equal)
# for h in history:
# print("intensity at", h, "is:", intensity.subs("t", h))
first_event = len(history) - sum(1 for t in history if t > 0)
his_pos = history[first_event:]
addend_sum = sum(log(intensity.subs("t", h)) for h in his_pos)
# print("SUM PART", addend_sum.subs([("scale", .5), ("decay", .5), ("n", 100)]))
addend_int = beta / gamma * sum(
(n - (i + 1)) / n * (
(
exp(-gamma * (history[i] - history[j]))
* (gamma * (history[i] - history[j]) + 1)
-
exp(-gamma * (history[i + 1] - history[j]))
* (gamma * (history[i + 1] - history[j]) + 1)
)
)
for i in range(len(history)-1)
for j in range(i+1))
# print("INT PART", addend_int.subs([("scale", .5), ("decay", .5), ("n", 100)]))
return addend_sum - addend_int
def llf_gradient_sigma_neq_gamma(history, sum_less_equal=True):
"""
Calculate the gradient of the log-likelihood function symbolically.
Parameters
----------
sum_less_equal : bool, default: True
This arg is passed to :meth:`self.llf_sigma_eq_gamma`.
Returns
-------
gradient : sympy.Array
An array containing four entries. The first (second) [third]
{fourth} entry is the derivative of the log-likelihood function
w.r.t. beta (sigma) [gamma] {N} parameter.
"""
beta, sigma, gamma, n = symbols("beta sigma gamma n")
return derive_by_array(
llf_sigma_neq_gamma(history, sum_less_equal),
[beta, sigma, gamma, n]
)
def llf_gradient_sigma_eq_gamma(history, sum_less_equal=True):
"""
Calculate the gradient of the log-likelihood function symbolically.
Parameters
----------
sum_less_equal : bool, default: True
This arg is passed to :meth:`self.llf_sigma_eq_gamma`.
Returns
-------
gradient : sympy.Array
An array containing four entries. The first [second] {third} entry
is the derivative of the log-likelihood function w.r.t. beta
[gamma] {N} parameter. There is no derivative w.r.t. sigma as it is
considered equal to gamma in the case considered by this function.
"""
beta, gamma, n = symbols("beta gamma n")
return derive_by_array(
llf_sigma_eq_gamma(history, sum_less_equal),
[beta, gamma, n]
)
# def fit(scale_start, decay_start, n_start):
# """
# Parameters
# ----------
# scale_start : float
# Starting value for the likelihood maximization.
# decay_start : float
# Starting value for the likelihood maximization.
# n_start : float
# Starting value for the likelihood maximization.
#
# Returns
# -------
# ...
# """
# llf_sym = self.llf()
# llf_grad_sym = self.llf_gradient()
# def negative_llf(scale_decay_n):
# """
# Parameters
# ----------
# scale_decay_n : np.array (shape (3))
# Values for the scale and decay parameter and the parameter N
# a single array.
#
# Returns
# -------
# neg_llf : float
# The negative log-likelihood.
# """
# result = llf_sym.subs([("scale", scale_decay_n[0]),
# ("decay", scale_decay_n[1]),
# ("n", scale_decay_n[2])])
# print("llf", result)
# return result
#
# def negative_llf_gradient(scale_decay_n):
# result = -llf_grad_sym.subs([("scale", scale_decay_n[0]),
# ("decay", scale_decay_n[1]),
# ("n", scale_decay_n[2])])
# print("-grad:", result)
# return np.array(result, dtype=np.float64)
#
# eps = np.finfo(float).eps
#
# return fmin_l_bfgs_b(
# func=negative_llf, # minimize this
# x0=np.array([scale_start, decay_start, n_start]), # initial guess
# fprime=negative_llf_gradient,
# bounds=[(eps, None), (eps, None), (len(self.his), None)],
# iprint=101
# )
def fit_sigma_neq_gamma(history, beta_start=None, sigma_start=None,
gamma_start=None, n_start=None, estimate_n_only=False):
"""
Parameters
----------
history : np.array
1-dimensional array containing the event times in ascending order.
beta_start : float
Starting value for the likelihood optimization.
sigma_start : float
Starting value for the likelihood optimization.
gamma_start : float
Starting value for the likelihood optimization.
n_start : float or None, default: None
Starting value for the likelihood optimization. If None, | |
object to user
user.lives.add(obj.id)
#addling this live object to that employee user id
getus.lives.add(obj.id)
messages.success(request,"Success")
return redirect(dashboard)
else:
messages.error(request,"Change to a Paid Plan First!")
return redirect('dashboard')
else:
return redirect(login)
parms = {
"title":title,
'flag':flag,
'allot':allot,
'date':datetime.date.today(),
}
return render(request,'book.html',parms)
#bmi calculator --
def bmic(request):
headtitle = "BMI | Lifestyles"
bmii =0.0
user = request.user
state = ""
if user.is_authenticated:
bmii = bmi.objects.filter(us=user).order_by('-id')[0]
bmiobjlist = bmi.objects.filter(us=user)
bmilist = []
bmidate = []
bf = bmii.bodyfat
for i in bmiobjlist:
bmilist.append(i.bmi)
bmidate.append(i.date)
if bmii.bmi<=16.0:
state = "Severe Thinness"
elif bmii.bmi>16.0 and bmii.bmi<=17.0:
state = "Moderate Thinness"
elif bmii.bmi > 17.0 and bmii.bmi <= 18.0:
state = "Mild Thinness"
elif bmii.bmi > 18.0 and bmii.bmi <= 25.0:
state = "Normal"
elif bmii.bmi > 25.0 and bmii.bmi <= 30.0:
state = "Overweight"
elif bmii.bmi > 30.0 and bmii.bmi <= 35.0:
state = "Obese Class I"
elif bmii.bmi > 35.0 and bmii.bmi <= 40.0:
state = "Obese Class II"
elif bmii.bmi > 40.0:
state = "Obese Class III"
if request.method=="POST":
weight_metric = request.POST.get("weight-metric")
weight_imperial = request.POST.get("weight-imperial")
if weight_metric:
weight = float(request.POST.get("weight-metric"))
height = float(request.POST.get("height-metric"))
elif weight_imperial:
weight = float(request.POST.get("weight-imperial"))/2.205
height = (float(request.POST.get("feet"))*30.48 + float(request.POST.get("inches"))*2.54)/100
cont = []
cont = bmicalc(weight,height)
bmii = cont[1]
state = cont[0]
user.weight = weight
user.height = height
if user.gender == "Female":
bf = (1.20*bmii)+(0.23*user.age)-5.4
elif user.gender == "Male":
bf = (1.20*bmii)+(0.23*user.age)-16.2
bmi.objects.create(us=user,bmi=round(bmii),bodyfat=bf,date=datetime.date.today())
user.save()
return redirect('bmic')
parms = {
'title':headtitle,
'bmi':bmii,
'bf':bf,
'state':state,
'bmilist':json.dumps(bmilist),
'bmidate':json.dumps(bmidate,indent=4, sort_keys=True, default=str),
}
return render(request,'bmi.html',parms)
#subs plan -- will be added in future only rendering subs page right now
def subs(request):
title = "Subs Plan | Lifestyles"
parms = {
'title':title,
}
return render(request,'sub.html',parms)
#growth page rendered only in this.
def growth(request,id):
try:
userr = MyUser.objects.get(id=id)
except ObjectDoesNotExist:
return render(request,'404.html')
user = request.user
if user.is_authenticated == True:
if (user.id == id) or (user.is_staff == True and user.is_active == True):
if user.is_staff:
flag = True
else:
flag = False
title = "Growth | Lifestyles"
bmii = bmi.objects.filter(us=userr).order_by('-id')[0]
bmrr = bmr.objects.filter(us=userr).order_by('-id')[0]
bmiobjlist = bmi.objects.filter(us=userr)
bmilist = []
bmidate = []
for i in bmiobjlist:
bmilist.append(i.bmi)
bmidate.append(i.date)
bmrobjlist = bmr.objects.filter(us=userr)
bmrlist = []
bmrdate = []
for i in bmrobjlist:
bmrlist.append(i.bmr)
bmrdate.append(i.date)
parms = {
'title':title,
'bmi':bmii,
'bmr':bmrr,
'flag':flag,
'bmilist':json.dumps(bmilist),
'bmidate':json.dumps(bmidate,indent=4, sort_keys=True, default=str),
'bmrlist':json.dumps(bmrlist),
'bmrdate':json.dumps(bmrdate,indent=4, sort_keys=True, default=str),
}
else:
messages.error(request,'Not authorized')
return redirect('login')
else:
return redirect('login')
return render(request,'growth.html',parms)
#grocery function according to user id!
def grocery(request,id):
title = "Grocery | Lifestyles"
user = request.user
#confirming user is authenticated and only that particular user id is accessing its id.
if user.is_authenticated and user.id == id:
try:
#getting that grocery object
grocery = grocerylist.objects.filter(groid=user.id)
paid = []
unpaid = []
unpaidtot = 0
for gro in grocery:
if gro.billitem.paid == True:
paid.append(gro)
else:
unpaid.append(gro)
unpaidtot+=gro.billitem.price
except ObjectDoesNotExist:
return render(request,'404.html')
parms = {
'title':title,
'grocery':grocery,
'paid':paid,
'unpaid':unpaid,
'unpaidtot':unpaidtot,
}
return render(request,'grocery.html',parms)
#allocate function to allocate unallocated customers to free dieticians nutritionist and trainers.
def allocate(request,id):
title = "Allocate | Lifestyles"
user = request.user
#getting the emp object
try:
emp = employeecontrol.objects.get(id=user)
except ObjectDoesNotExist:
return render(request,'404.html')
#security checking for employee.
if user.is_authenticated and user.is_staff == True and user.is_active == True and emp.employeetype == 'employee':
#getting the target user to allocate
target = MyUser.objects.get(id=id)
#creating lists for storing free nutritionist dietician and trainers/
freenutpeeps = []
freefitpeeps = []
freediepeeps = []
#if conditions to check for user subscription plan.
if target.sub.plan == 'Free Plan':
messages.error(request,"User subbed to Free Plan, Not Applicable!")
return redirect(edashboard)
elif target.sub.plan == 'Basic Plan':
#if fitness trainer is not alloted to that user
if target.allottrain == False:
totfitpeeps = employeecontrol.objects.filter(employeetype="Fitness Trainer")
#get free fitness trainers in list
for peep in totfitpeeps:
counter = peep.alloted.count()
if counter <= 100:
freefitpeeps.append(peep)
#if nutritionist is not alloted to user
if target.allotnutri == False:
totnutpeeps = employeecontrol.objects.filter(employeetype="Nutritionist")
for peep in totnutpeeps:
counter = peep.alloted.count()
if counter <= 50:
freenutpeeps.append(peep)
#if he has dieitician too
elif target.sub.plan == 'Semi-Premium Plan' or target.sub.plan == 'Premium Plan':
if target.allottrain == False:
totfitpeeps = employeecontrol.objects.filter(employeetype="Fitness Trainer")
for peep in totfitpeeps:
counter = peep.alloted.count()
if counter <= 100:
freefitpeeps.append(peep)
if target.allotnutri == False:
totnutpeeps = employeecontrol.objects.filter(employeetype="Nutritionist")
for peep in totnutpeeps:
counter = peep.alloted.count()
if counter <= 50:
freenutpeeps.append(peep)
if target.allotdieti == False:
totdiepeeps = employeecontrol.objects.filter(employeetype="Dietician")
for peep in totdiepeeps:
counter = peep.alloted.count()
if counter <= 25:
freediepeeps.append(peep)
else:
return render(request,'404.html')
#form code to allocate the user!
if request.method == 'POST':
if target.sub.plan == 'Basic Plan':
if target.allottrain == False:
fit = request.POST['fit']
else:
fit = None
if target.allotnutri == False:
nut = request.POST['nut']
else:
nut = None
if fit:
getus = MyUser.objects.get(username=fit)
getuser = employeecontrol.objects.get(id=getus.id)
getuser.alloted.add(id)
target.allottrain = True
target.save()
messages.success(request,'Fitness Trainer Added')
if nut:
getus = MyUser.objects.get(username=nut)
getuser = employeecontrol.objects.get(id=getus.id)
getuser.alloted.add(id)
target.allotnutri = True
target.save()
messages.success(request,'Nutritionist Added')
elif target.sub.plan == 'Semi-Premium Plan' or target.sub.plan == 'Premium Plan':
if target.allotdieti == False:
diet = request.POST['diet']
else:
diet = None
if target.allottrain == False:
fit = request.POST['fit']
else:
fit = None
if target.allotnutri == False:
nut = request.POST['nut']
else:
nut = None
if fit:
getus = MyUser.objects.get(username=fit)
getuser = employeecontrol.objects.get(id=getus.id)
getuser.alloted.add(id)
target.allottrain = True
target.save()
messages.success(request,'Fitness Trainer Added')
if nut:
getus = MyUser.objects.get(username=nut)
getuser = employeecontrol.objects.get(id=getus.id)
getuser.alloted.add(id)
target.allotnutri = True
target.save()
messages.success(request,'Nutritionist Added')
if diet:
getus = MyUser.objects.get(username=diet)
getuser = employeecontrol.objects.get(id=getus.id)
getuser.alloted.add(id)
target.allotdieti = True
target.save()
messages.success(request,'Dietician Added')
else:
messages.error(request,'Error, User not Subscribed!')
parms = {
'title':title,
'target':target,
'freediepeeps':freediepeeps,
'freenutpeeps':freenutpeeps,
'freefitpeeps':freefitpeeps,
}
else:
messages.error(request,"Not Authorized!")
return render(request,'404.html')
return render(request, 'allocate.html',parms)
#contact us fucntion to render the html
def contactus(request):
title = "Contact | Lifestyles"
parms = {
"title":title,
}
return render(request,'contact.html',parms)
#check contact entries for employee
def contactchecker(request):
title = "Check Contact | Lifestyles"
user = request.user
if user.is_authenticated and user.is_staff == True and user.is_active == True:
contacts = contact.objects.filter(check=False)
else:
messages.error(request,"Not Authorized")
parms = {
"title":title,
'contacts':contacts,
}
return render(request,'contactchecker.html',parms)
#get the specific contact entry!
def contid(request,id):
title = "Check Contact | Lifestyles"
user = request.user
if user.is_authenticated and user.is_staff == True and user.is_active == True:
cont = contact.objects.get(id=id)
if request.method == "POST":
cont.check = True
cont.save()
messages.success(request,"Checked")
return redirect(contactchecker)
else:
messages.error(request,"Not Authorized")
parms = {
"title":title,
'cont':cont,
}
return render(request,'contid.html',parms)
#get unallocated users for employee and shows free diet, nut and fitness
def unalo(request,emptype):
title = "Unallocated | KOWI"
user = request.user
try:
emp = employeecontrol.objects.get(id=user)
except ObjectDoesNotExist:
return render(request,'404.html')
if user.is_authenticated and user.is_staff == True and user.is_active == True and emp.employeetype == 'employee':
users = MyUser.objects.all()
unal = []
for us in users:
if emptype == 'Dietician':
if us.is_staff == False and us.allotdieti == False:
if us.sub == 'Semi-Premium Plan' or us.sub == 'Premium Plan':
unal.append(us)
elif emptype == 'Nutritionist':
if us.is_staff == False and us.allotnutri == False:
unal.append(us)
elif emptype == 'Fitness Trainer':
if us.is_staff == False and us.allottrain == False:
unal.append(us)
totpeeps = employeecontrol.objects.filter(employeetype=emptype)
freepeeps = []
for peep in totpeeps:
counter = peep.alloted.count()
if emptype == 'Dietician':
if counter <= 25:
freepeeps.append(peep)
elif emptype == 'Nutritionist':
if counter <= 50:
freepeeps.append(peep)
elif emptype == 'Fitness Trainer':
if counter <= 100:
freepeeps.append(peep)
parms = {
"title":title,
'unal':unal,
'freepeeps':freepeeps,
'emptype':emptype,
}
return render(request,'unalo.html',parms)
#bmr calculate
def bmrmain(weight,height,age,gender,status):
heightincm=height*100
if gender == 'male' or gender == 'Male':
bmr=66.47+(13.75*weight)+(5.003*heightincm)-(6.755*age)
if status == 'sedentary(little or no exercise':
ans = bmr * 1.1
elif status == 'lightly active (light exercise/sports 1-3 days/week)':
ans = bmr * 1.275
elif status == 'moderately active (moderate exercise/sports 3-5 days/week)':
ans = bmr * 1.35
elif status == 'very active (hard exercise/sports 6-7 days a week)':
ans = bmr * 1.525
elif gender == 'female' or gender == 'Female':
bmr=655.1+(9.563*weight)+(1.85*heightincm)-(4.676*age)
if status == 'sedentary (little or no exercise)':
ans = bmr * 1.1
elif status == 'lightly active (light exercise/sports 1-3 days/week)':
ans = bmr * 1.275
elif status == 'moderately active (moderate exercise/sports 3-5 days/week)':
ans = bmr * 1.35
elif status == 'very active (hard exercise/sports 6-7 days a week)':
ans = bmr * 1.525
return ans
#bmr calculater!
def bmrcal(request):
headtitle = "Life Styles | Bmr"
user = | |
k.startswith('rnn')}
self.embed_lead.weight.data.copy_(state_dict[1]['embed.weight'])
#self.rnn_title.load_state_dict(rnn_weight)
#print ('lead model loaded.')
#self.embed_lead.weight.data.copy_(state_dict[1]['embed.weight'])
#self.rnn_lead.load_state_dict(rnn_weight)
#checkpoint = torch.load(art_model_path)
#state_dict = checkpoint['model']
#for k,v in state_dict[1].items():
# print (k,v.shape)
#rnn_weight = {k[4:]:v for k,v in state_dict[1].items() if k.startswith('rnn')}
#self.embed_art.weight.data.copy_(state_dict[1]['embed.weight'])
#self.rnn_art.load_state_dict(rnn_weight)
''' # load pretrained 1 layer RNN weights
#self.rnn = nn.GRU(self.word_dim, embed_size, num_layers, batch_first=True)
self.pool_cap = nn.AdaptiveMaxPool1d(1)
self.pool_art = nn.AdaptiveMaxPool1d(1)
self.pool_title = nn.AdaptiveMaxPool1d(1)
self.pool_lead = nn.AdaptiveMaxPool1d(1)
self.fuse_pool = nn.AdaptiveMaxPool1d(1)
self.non_linear_fc = nn.Sequential(
nn.Linear(1024*4,1024*4),
nn.ReLU(),
nn.Linear(1024*4,1024),
nn.ReLU()
)
#self.attention = Attention(self.embed_size)
#print (self.attention)
# some filters to reduce embed size
#self.cnn_reduce_size = cnn_reduce_size(args)
self.init_weights()
def init_weights(self):
if (not self.pretrained_emb) or (self.test) or (self.resume):
print ('randomly init embedding weights...')
#self.embedding_sum.init_weights(self.embeddings.embedding_weights(), self.vocab.vocab())
#self.embed.weight.data.uniform_(-0.1, 0.1) # original init
#self.embed_cap.weight.data.uniform_(-0.1, 0.1) # original init
#self.embed_art.weight.data.uniform_(-0.1, 0.1) # original init
#self.embed_title.weight.data.uniform_(-0.1, 0.1) # original init
#self.embed_lead.weight.data.uniform_(-0.1, 0.1) # original init
else:
#print ('loading pretrained embedding weights...')
model = gensim.models.KeyedVectors.load_word2vec_format('/home/fangyu/downloads/wiki.de.vec')
#model = gensim.models.KeyedVectors.load_word2vec_format('/data/retina_data/wiki.de.vec')
#'/mnt/storage01/fangyu/fasttext_embeddings/wiki.de.vec' )
#print ('pretrained wordvec loaded')
'''
# for previous FastText pre-trained embed
count1,count2 = 0,0
emb_matrix = np.random.uniform(-0.1,0.1,(self.vocab_size,self.word_dim))
print ('emb_matrix:',emb_matrix.shape)
#emb_matrix = np.zeros((self.vocab_size, self.word_dim),dtype=np.float32) # use original (loaded vocab) vocab size
for i,(key,value) in enumerate(self.vocab.word2idx.items()):
#rint (key)
try:
emb_matrix[i,:] = model[key]
#print (key,'replaced')
count1 += 1
except:
#print (key,'not in pretrained')
count2 += 1
#self.embed.weight.data.copy_( torch.from_numpy(emb_matrix))
#self.ulm_encoder.encoder.weight.data.copy_(torch.from_numpy(emb_matrix))
#self.ulm_encoder.encoder_with_dropout.embed.weight.data.copy_(torch.from_numpy(emb_matrix))
#print (emb_matrix.shape)
#print (self.VDCNN.embedding.weight.data.size())
#self.VDCNN.embedding.weight.data.copy_(torch.from_numpy(emb_matrix))
print (count1, 'pre-trained vec used')
print (count2, 'words in vocab not using pre-trained weights')
np.save('fasttext_word_embed.npy',emb_matrix)
print ('fasttext word embbeding saved.')
#self.embed.weight.data.copy_(torch.from_numpy(emb_matrix))
'''
# load from numpy
emb_matrix = np.load('/home/fangyu/data/fasttext_word_embed.npy')
#self.embed.weight.data.copy_(torch.from_numpy(emb_matrix))
#if self.freeze_emb:
# self.embed.weight.requires_grad = False
#self.embed = nn.Embedding(self.vocab_size, word_dim)
#self.embed.weight.data.copy_(torch.from_numpy(new_w))
#prev_states = self.ulm_encoder.state_dict()
#prev_states['encoder.weight'].copy_(torch.from_numpy(new_w))
#prev_states['encoder_with_dropout.embed.weight'].copy_(torch.from_numpy(new_w))
#self.ulm_encoder.load_state_dict(prev_states)
self.embed.weight.data.copy_(torch.from_numpy(emb_matrix))
#self.embed_cap.weight.data.copy_(torch.from_numpy(emb_matrix))
#self.embed_art.weight.data.copy_(torch.from_numpy(emb_matrix))
#self.embed_title.weight.data.copy_(torch.from_numpy(emb_matrix))
#self.embed_lead.weight.data.copy_(torch.from_numpy(emb_matrix))
#self.ulm_encoder.encoder.weight.data.copy_(torch.from_numpy(emb_matrix))
#self.ulm_encoder.encoder_with_dropout.embed.weight.data.copy_(torch.from_numpy(emb_matrix))
#self.gated_cnn.embedding.weight.data.copy_(torch.from_numpy(emb_matrix))
'''# Gated CNN
#self.gated_cnn = GatedCNN(32,100000,self.embed_size,10,(5,self.embed_size),64,5,1024)
self.gated_cnn = torch.load('/home/fangyu/data/gated_cnn_de_wiki_LM_best-32seqlen_55.55.pt')
#prev_weights = torch.load('/home/fangyu/data/gated_cnn_de_wiki_LM_best-32seqlen_55.55.pt').parameters()
# substitute last layer
self.gated_cnn.fc = nn.Linear(2048,1024)
print (self.gated_cnn)
itos_gcnn = pickle.load(open('./vocab/wiki_100000_vocab.pkl','rb'))
stoi_gcnn = collections.defaultdict(lambda:-1, {v:k for k,v in enumerate(itos_gcnn)})
#print (stoi_ulm)
#sys.exit(0)
# load ulm pretrained embed
for name,param in self.gated_cnn.embedding.named_parameters():
print (name,param.size())
embed_weights = param.data # loaded pre-trained embedding
row_m = embed_weights.mean(0) # to be assigned to embed's rows
#vs = embed_weights.shape[0] # 60002
word_dim = embed_weights.shape[1]
#new_w = np.zeros((vs, word_dim),dtype=np.float32)
new_w = np.zeros((self.vocab_size, word_dim),dtype=np.float32) # use original (loaded vocab) vocab size
print ('original vocab_size:',len(self.vocab.word2idx))
print (new_w.shape)
count1, count2 = 0,0
for i,(key,val) in enumerate(self.vocab.word2idx.items()):
#print (i,key,val)
r = stoi_gcnn[key]
if r>= 0:
count1 += 1
new_w[i] = embed_weights[r]
else:
count2 += 1
new_w[i] = row_m
#new_w[i] = embed_weights[r] if r>=0 else row_m
print ('new word embedding size:',new_w.shape)
print (count1, 'pre-trained vec used')
print (count2, 'words in vocab not using pre-trained weights')
'''
def forward(self, x):
"""Handles variable size captions
"""
l = len(x[0])
#print (len(x[0]),len(x[1]),len(x[2]),len(x[3]),len(x[4]),len(x[5]),len(x[6]),len(x[7]))
caps=[self.embedding_sum(x[0][i],x[1][i]) for i in range(l)]
arts=[self.embedding_sum(x[2][i],x[3][i]) for i in range(l)]
tits=[self.embedding_sum(x[4][i],x[5][i]) for i in range(l)]
leds=[self.embedding_sum(x[6][i],x[7][i]) for i in range(l)]
#arts=[self.embedding_sum(i_o[0],i_o[1]) for i_o in x[1]]
#tits=[self.embedding_sum(i_o[0],i_o[1]) for i_o in x[2]]
#leds=[self.embedding_sum(i_o[0],i_o[1]) for i_o in x[3]]
#caps = [ self.embedding_sum(i,o) for i,o in x[0]]
#print (tmp.size())
caps = torch.stack(caps)
arts = torch.stack(arts)
tits = torch.stack(tits)
leds = torch.stack(leds)
#sys.exit(0)
#x_cap = self.embedding_sum(x[0])
#x_art = self.embedding_sum(x[1])
#x_title = self.embedding_sum(x[2])
#x_lead = self.embedding_sum(x[3])
#print(x_cap.size())
#x_cap = self.embed(x[0])
#x_art = self.embed(x[1])
#x_title = self.embed(x[2])
#x_lead = self.embed(x[3])
x_cap = caps
x_art = arts
x_title = tits
x_lead = leds
out_cap, _ = self.rnn_cap(x_cap)
out_art, _ = self.rnn_art(x_art)
out_title, _ = self.rnn_title(x_title)
out_lead, _ = self.rnn_lead(x_lead)
pooled_cap = self.pool_cap(out_cap.transpose(1,2)).squeeze(2) # use pooling rather than nn.gather
pooled_art = self.pool_art(out_art.transpose(1,2)).squeeze(2) # use pooling rather than nn.gather
pooled_title = self.pool_art(out_title.transpose(1,2)).squeeze(2) # use pooling rather than nn.gather
pooled_lead = self.pool_art(out_lead.transpose(1,2)).squeeze(2) # use pooling rather than nn.gather
#concated_feature = torch.stack([pooled_cap,pooled_art,pooled_title,pooled_lead]).transpose(0,1).transpose(1,2)
concated_feature = torch.cat((pooled_cap,pooled_art,pooled_title,pooled_lead),1)
#concated_feature = torch.stack([pooled_cap,pooled_art]).transpose(0,1).transpose(1,2)
out = self.non_linear_fc(concated_feature)
#out = self.fuse_pool(concated_feature).squeeze(2)
#print ('pooled art:',pooled_art.size(),'out art:',out_art.size())
#print (attn_pooled_art.size())
# Embed word ids to vectors
#print (x[0].size(),x[1].size())
#print (lengths[0],lengths[1])
#print ('embed size:',x.size())
#x = self.cnn_reduce_size(x)
#print ('after conv size:',x.size())
# textcnn
#out = self.cnn_enc(x)
''' TCN
out = self.cnn_enc(x.transpose(1,2))
#print (out.size())
out = self.pool(out).transpose(1,2)
#print (out.size())
out = out.squeeze(1)
#print (out.size())
'''
''' cnn 0.0
x = self.m1(x.transpose(1,2))
x = self.m2(x)
x = self.m3(x)
x = x.transpose(1,2)
out = x.view(len(lengths),-1)
'''
# RNNs
#'''# for GRU/LSTM
#print ('after conv1d size:',x.size())
#print ('lengths[0]:',lengths[0])
#lengths = [l/2 for l in lengths] # lengths are now [64,...,64]
#packed_cap = pack_padded_sequence(x_cap, lengths[0], batch_first=True)
#packed_art = pack_padded_sequence(x_art, lengths[1], batch_first=True)
#print ('packed:',Variable(packed).size())
# Forward propagate RNN
#out_cap, _ = self.rnn(x_cap)
#out_art, _ = self.rnn_art(x_art)
#print ('out_cap:',out_cap.size(),'out_art:',out_art.size())
# Reshape *final* output to (batch_size, hidden_size)
#padded_cap = pad_packed_sequence(out_cap, batch_first=True)
#padded_art = pad_packed_sequence(out_art, batch_first=True)
#print (padded[0].size())
#I = torch.LongTensor(lengths).view(-1, 1, 1)
#I = Variable(I.expand(x.size(0), 1, self.embed_size)-1).cuda()
#out = torch.gather(padded[0], 1, I).squeeze(1)
#out_cap = self.pool_cap(out_cap.transpose(1,2)).squeeze(2) # use pooling rather than nn.gather
#out_art = self.pool_art(out_art.transpose(1,2)).squeeze(2) # use pooling rather than nn.gather
#print (out_cap.size(),out_art.size())
#print (out.size())
#del padded_cap
#del padded_art
#del packed_cap
#del packed_art
#del I
#'''# for GRU/LSTM end
# normalization in the joint embedding space
out = l2norm(out)
# take absolute value, used by order embeddings
if self.use_abs:
out = torch.abs(out)
return out
def cosine_sim(im, s):
"""Cosine similarity between all the image and sentence pairs
"""
return im.mm(s.t())
def order_sim(im, s):
"""Order embeddings similarity measure $max(0, s-im)$
"""
YmX = (s.unsqueeze(1).expand(s.size(0), im.size(0), s.size(1))
- im.unsqueeze(0).expand(s.size(0), im.size(0), s.size(1)))
score = -YmX.clamp(min=0).pow(2).sum(2).sqrt().t()
return score
class ContrastiveLoss(nn.Module):
"""
Compute contrastive loss
"""
def __init__(self, margin=0, measure=False, max_violation=False):
super(ContrastiveLoss, self).__init__()
self.margin = margin
if measure == 'order':
self.sim = order_sim
else:
self.sim = cosine_sim
self.max_violation = max_violation
def forward(self, im, s):
# compute image-sentence score matrix
scores = self.sim(im, s)
diagonal = scores.diag().view(im.size(0), 1)
d1 = diagonal.expand_as(scores)
d2 = diagonal.t().expand_as(scores)
# compare every diagonal score to scores in its column
# caption retrieval
cost_s = (self.margin + scores - d1).clamp(min=0)
# compare every diagonal score to scores in its row
# image retrieval
cost_im = (self.margin + scores - d2).clamp(min=0)
# clear diagonals
mask = torch.eye(scores.size(0)) > .5
I = Variable(mask)
if torch.cuda.is_available():
I = I.cuda()
cost_s = cost_s.masked_fill_(I, 0)
cost_im = cost_im.masked_fill_(I, 0)
# keep the maximum violating negative for each query
if self.max_violation:
cost_s = cost_s.max(1)[0]
cost_im = cost_im.max(0)[0]
return cost_s.sum() + cost_im.sum()
class VSE(object):
"""
rkiros/uvs model
"""
def __init__(self, opt, vocab, embeddings):
# tutorials/09 - Image Captioning
# Build Models
self.grad_clip = opt.grad_clip
self.img_enc = EncoderImage(opt.data_name, opt.img_dim, opt.embed_size,
opt.finetune, opt.cnn_type,
use_abs=opt.use_abs,
no_imgnorm=opt.no_imgnorm)
#vocab = pickle.load(open(opt.vocab_path,'rb'))
self.vocab = vocab
self.opt = opt
self.label = opt.label
self.text_encoder = opt.text_encoder
try:
_ = opt.pretrained_emb
except:
opt.pretrained_emb = False
try:
_ = opt.freeze_emb
except:
opt.freeze_emb = False
try:
_ = opt.test
except:
# change to true when testing
opt.test = False
resume = None
try:
if opt.resume != '':
resume = True
except:
resume = False
both = False
if self.opt.label == 'both':
both = True
self.txt_enc = None
if opt.text_encoder == 'default':
self.txt_enc = EncoderText(vocab,opt.vocab_size, opt.word_dim,
opt.embed_size, opt.num_layers,
use_abs=opt.use_abs,
pretrained_emb=opt.pretrained_emb,
freeze_emb=opt.freeze_emb,
test = opt.test,
resume = resume,
both=both,
embeddings=embeddings
)
elif opt.text_encoder in ['caption','article','title','lead']:
self.txt_enc = EncoderTextUnimodal(vocab,opt.vocab_size, opt.word_dim,
opt.embed_size, opt.num_layers,
use_abs=opt.use_abs,
pretrained_emb=opt.pretrained_emb,
freeze_emb=opt.freeze_emb,
test = opt.test,
resume = resume,
both=both,
embeddings=embeddings,
label=opt.text_encoder
)
elif opt.text_encoder == 'transformer' and opt.label != "joint":
print ('[using Transormer as text encoder!]')
self.txt_enc = EncoderTextTransformer(vocab, opt.word_dim,
opt.embed_size, opt.num_layers,
use_abs=opt.use_abs,
pretrained_emb=opt.pretrained_emb,
freeze_emb=opt.freeze_emb,
test = opt.test,
resume = resume,
both=both,
label=opt.label,
embeddings=embeddings,
lang=opt.lang
)
elif opt.text_encoder == 'transformer' and opt.label == "joint":
print ('[using Multimodal Transormer as text encoder!]')
self.txt_enc = EncoderTextMultimodalTransformer(vocab,opt.vocab_size, opt.word_dim,
opt.embed_size, opt.num_layers,
use_abs=opt.use_abs,
pretrained_emb=opt.pretrained_emb,
freeze_emb=opt.freeze_emb,
test = opt.test,
resume = resume,
both=both,
label=opt.label,
embeddings=embeddings
)
if torch.cuda.is_available():
self.img_enc.cuda()
self.txt_enc.cuda()
cudnn.benchmark = True
# Loss and Optimizer
self.criterion = ContrastiveLoss(margin=opt.margin,
measure=opt.measure,
max_violation=opt.max_violation)
params = list(self.txt_enc.parameters())
params += list(self.img_enc.fc.parameters())
if opt.finetune:
params += list(self.img_enc.cnn.parameters())
self.txt_enc.requires_grad = True
# to avoid pass in freezed params
params = filter(lambda p: p.requires_grad, params)
self.params = params
self.optimizer = torch.optim.Adam(params, lr=opt.learning_rate)
self.Eiters = 0
def state_dict(self):
state_dict = [self.img_enc.state_dict(), self.txt_enc.state_dict()]
return state_dict
def load_state_dict(self, state_dict):
self.img_enc.load_state_dict(state_dict[0])
self.txt_enc.load_state_dict(state_dict[1])
def train_start(self):
"""switch to train mode
"""
self.img_enc.train()
self.txt_enc.train()
def val_start(self):
"""switch to evaluate | |
executor.register_quantize_delegate(cfg, delegator)
block_params.extend(delegator.collect_params())
optimizer = torch.optim.Adam([param for param in block_params if param.requires_grad], lr=self.lr)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [int(self.epochs / 2), int(self.epochs * 2 / 3)])
for _ in tqdm(range(self.epochs), total=self.epochs, desc=f'Optimize block {blk_idx + 1}/{len(blocks)}'):
epoch_loss = {name: 0.0 for name in output_names}
for idx,data in enumerate(block_dataloader):
fp_outputs, quant_input = data
quant_outputs = executor.partial_graph_forward(blk.rps, {blk.sp.inputs[0].name: quant_input}, output_names)
if str(executor._device) != self.collecting_device:
quant_input = quant_input.to(executor._device)
fp_outputs = [output.to(executor._device) for output in fp_outputs]
optimizer.zero_grad()
batch_loss = 0.0
for name, fp_output, quant_output in zip(output_names, fp_outputs, quant_outputs):
loss = torch_mean_square_error(fp_output, quant_output)
batch_loss += loss
epoch_loss[name] += loss.detach().item()
batch_loss.backward()
optimizer.step()
scheduler.step()
for name in epoch_loss:
logger.debug(f'Epoch {_ + 1} || output variable {name} || avg MSE loss = {epoch_loss[name] / (idx + 1) :.5f}')
logger.debug(f'Total avg MSE loss {sum(list(epoch_loss.values())) / (idx + 1) :.5f}')
original_block_loss = sum(list(original_loss.values()))
lsq_block_loss = sum(list(epoch_loss.values())) / (idx + 1)
logger.info(f'Original Loss {original_block_loss :.5f} || Optimized Loss {lsq_block_loss :.5f}')
for cfg, delegator in params.items():
if lsq_block_loss < original_block_loss:
delegator.finalize()
executor.remove_quantize_delegate(cfg)
self.disable_grad(blk)
if original_block_loss < lsq_block_loss:
logger.warning('Loss not improved, abandon trained values...')
self.recover(blk)
def optimize(self, processor: GraphCommandProcessor,
dataloader: Iterable, executor: BaseGraphExecutor,
collate_fn: Callable,
**kwargs) -> None:
graph = processor.graph
logger.info(f'Launch Learned Step Quantization ...')
blocks = find_all_blocks(graph, executor._executing_order)
logger.info(f'Graph partition finished, {len(blocks)} trainable blocks in total')
if len(self.interested_layers) == 0:
logger.info('NO INTERESTED LAYERS GIVENS, ALL BLOCKS WILL BE TUNED BY DEFAULT')
final_blocks = blocks
else:
final_blocks = []
for blk in blocks:
if any([op.name in self.interested_layers for op in blk.rps]):
final_blocks.append(blk)
self.LSQ_optimize(final_blocks, graph, dataloader, collate_fn, executor)
class AdvancedQuantOptimization(TrainingBasedPass):
"""PPQ Advanced Quantization Optimization.
This optimization pass minimize the quantization errors of each subgraph separately
by optimizing its parameters over the calibration set.
Where:
qout = quant( quant(W + W_offset) * quant(X) + quant(bias + bias_offset) )
fout = W * B + bias
error = Mean((qout - fout)^2)
This training procedure tries to solve best W_offest and bias_offset to minimize error
Based on your setting and network size, the training procedure will takes 5~120 minutes.
This function will treat your network as series of subgraphs, you should notice that
ONLY THE OUTPUT VALUE OF A SUBGRAPH IS OPTIMIZED IN THIS PASS,
ACTIVATIONS THAT INSIDE YOUR SUBGRAPH MIGHT BE GREATLY CHANGED!
DO NOT ATTEMPT TO COMPARE THOSE INTERNAL VALUE WITH ITS FP32 VERSION.
We use graph search engine to build subgraph from your network with pattern below,
see function build_block_from_start for detail information
Args:
TrainingBasedPass ([type]): [description]
"""
def __init__(self, collecting_device: str, limit: float = 3.0, steps: int = 5000,
lr: float = 3e-4, interested_outputs: List[str] = None,
interested_layers: List[str] = None,
verbose: bool = True, check: bool = True) -> None:
super().__init__(
name='PPQ Advanced Optimization Procedure',
interested_outputs=interested_outputs, verbose=verbose)
if not PPQ_CONFIG.USING_CUDA_KERNEL:
raise NotImplementedError(
'Advanced Quant Optimization requires compliation of ppq cuda kernels. '
'This method is no longer available with pure torch execution Since PPQ 0.6.4, '
'set PPQ.PPQ_CONFIG.USING_CUDA_KERNEL = True or use LSQ optimization instead.')
self.lr = lr
self.collecting_device = collecting_device
self.check_flag = check
self.limit = limit
self.interested_layers = interested_layers
self.target_step = steps
self._bidx = 0
self._num_of_blocks = 0
if isinstance(self.interested_layers, list) and len(self.interested_layers) == 0:
self.interested_layers = None
def collect_training_data(
self, output_name: str,
dataloader: Iterable,
executor: BaseGraphExecutor,
collate_fn: Callable) -> List[List[torch.Tensor]]:
output_collector = []
for data in dataloader:
if collate_fn is not None: data = collate_fn(data)
[output] = executor.forward(data, output_names=[output_name])
output_collector.append(output.to(self.collecting_device))
return output_collector
@ empty_ppq_cache
def finetune(
self, quant_inputs: List[torch.Tensor], fp32_outputs: List[torch.Tensor],
executor: TorchExecutor, block: TrainableBlock,
dataloader: Iterable, collate_fn:Callable) -> None:
# initialize training environment.
loss_ema = EMARecorder(beta=0.98)
cur_iter = 0
delegators = []
device = executor._executing_context.executing_device
output_var = block.ep.outputs[0]
input_var = block.sp.inputs[0]
dataset = RandomMemDataset(data=[[qt, fp] for qt, fp in zip(quant_inputs, fp32_outputs)])
# create trainable delegators for each parameter.
trainable_params = []
for operation in block.rps:
if operation.is_computing_op and isinstance(operation, QuantableOperation):
for cfg, var in operation.config_with_variable:
if not var.is_parameter: continue
trainable_params.append((var, cfg))
delegators = [RQTDelegator(config=cfg, limit=self.limit, binding=var) for var, cfg in trainable_params]
optimizer = torch.optim.Adam(params=[d.binding.value for d in delegators], lr=self.lr)
shcduler = torch.optim.lr_scheduler.LambdaLR(optimizer=optimizer, lr_lambda=lambda t: 1 / (1 << (t // 5000)))
# register all quantization delegators
for d in delegators: executor.register_quantize_delegate(d.config, d)
with tqdm(total=self.target_step) as t:
while cur_iter < self.target_step:
qt_input, fp_output = dataset.pop()
qt_input, fp_output = qt_input.to(device), fp_output.to(device)
qt_output = executor.partial_graph_forward(
operations=block.rps, feed_dict={input_var.name: qt_input},
output_names=[output_var.name])[0]
# compute loss
optimizer.zero_grad()
round_loss = torch.sum(torch.cat([PPQRoundingLoss(d.binding.value, d.config) for d in delegators]))
quant_loss = torch_mean_square_error(qt_output, fp_output)
total_loss = quant_loss + round_loss * OPTIM_ADVOPT_RLOSS_MULTIPLIER
total_loss.backward()
loss_ema.push(total_loss.item())
optimizer.step()
if OPTIM_ADVOPT_USING_SCEHDULER: shcduler.step()
cur_iter += 1
if cur_iter % 50 == 0:
t.set_description(desc=f'Block [{self._bidx + 1}/{self._num_of_blocks}]')
t.set_postfix(loss = loss_ema.pop())
t.update(50)
# finalize all delegates
for delegator in delegators:
assert isinstance(delegator, RQTDelegator)
delegator.finalize()
executor.remove_quantize_delegate(delegator.config)
# Check
if self.check_flag:
if not self.check(executor=executor, dataloader=dataloader, collate_fn=collate_fn):
for delegator in delegators:
assert isinstance(delegator, RQTDelegator)
delegator.withdraw()
# detach weight
for delegator in delegators:
assert isinstance(delegator, RQTDelegator)
delegator.binding.value = delegator.binding.value.detach()
def optimize(
self, processor: GraphCommandProcessor, dataloader: Iterable,
executor: TorchExecutor, collate_fn: Callable, **kwargs) -> None:
if self._verbose: self.report()
if self._interested_outputs is None:
self._interested_outputs = [name for name in processor.graph.outputs]
if self.collecting_device == 'executor':
self.collecting_device = executor._device
graph = processor.graph
block_builder = BlockBuilder(graph=graph, topo_order=executor._executing_order)
# check if there is any baked value inside your graph
for operation in graph.operations.values():
if isinstance(operation, QuantableOperation):
for cfg, var in operation.config_with_variable:
if cfg.state in {QuantizationStates.BAKED, QuantizationStates.PASSIVE_BAKED}:
raise PermissionError('Can not apply advanced optimization pass when weight value is baked. '
f'Variable {var.name} has a baked value.')
# find all operations that need to be finetuned.
interested_ops = []
for target_op in graph.topological_sort():
if isinstance(target_op, QuantableOperation) and target_op.is_computing_op:
if self.interested_layers is None: interested_ops.append(target_op)
elif self.interested_layers is not None and target_op.name in self.interested_layers:
interested_ops.append(target_op)
# build all blocks, drop overlapped layers.
blocks, visited = [], set()
for op in interested_ops:
if op in visited: continue
block = block_builder.build(op, limit=OPTIM_ADVOPT_GRAPH_MAXDEPTH)
# PATCH 20220317 drop block that has no computing op.
if all([rp.is_computing_op == False for rp in block.rps]): continue
if block.sp.is_computing_op == False: continue
for rp in block.rps:
if rp != block.sp and rp != block.ep:
visited.add(rp)
blocks.append(block)
# set up checkpoints
if self.check_flag:
self.initialize_checkpoints(
graph=graph, executor=executor,
dataloader=dataloader, collate_fn=collate_fn)
for bidx, block in enumerate(blocks):
self._bidx, self._num_of_blocks = bidx, len(blocks)
assert isinstance(block, TrainableBlock)
end_op = block.ep
block_input = block.sp.inputs[0]
block_output = end_op.outputs[0]
# dequantize prefix operations and block operations
for op in graph.operations.values():
if isinstance(op, QuantableOperation):
op.dequantize()
# can not use dequantize_immediately cause weight has been changed.
# self.dequantize_immediately(op)
fp32_outputs = self.collect_training_data(
output_name=block_output.name, dataloader=dataloader,
executor=executor, collate_fn=collate_fn)
# quantize prefix operations and block operations
for op in graph.operations.values():
if isinstance(op, QuantableOperation):
op.restore_quantize_state()
quant_inputs = self.collect_training_data(
output_name= block_input.name, dataloader=dataloader,
executor=executor, collate_fn=collate_fn)
# start training, solve the best parameters
self.finetune(
quant_inputs=quant_inputs, fp32_outputs=fp32_outputs,
executor=executor, block=block,
dataloader=dataloader, collate_fn=collate_fn)
# empty cache.
fp32_outputs.clear()
quant_inputs.clear()
empty_cache()
def report(self):
print('')
print('Check your Configuration Again, PPQ fine-tuning procedure will take a few minutes.')
print('-----------------------------------------')
print(f'Learning Rate: {self.lr}')
print(f'Block Depth: {OPTIM_ADVOPT_GRAPH_MAXDEPTH}')
print(f'Check Flag: {self.check_flag}')
print(f'Training Steps: {self.target_step}')
print(f'Interested Layers: {self.interested_layers}')
print(f'Finetune Limit: {self.limit}')
print(f'Cache Device: {self.collecting_device}')
print('-----------------------------------------')
class LearningToCalibPass(TrainingBasedPass):
"""This is an Experimental Pass, do not invoke.
PPQ Leraning Based Calibration Pass
For int8 quantization, you need to calibrate or estimate the value range,
i.e, (min, max) of all floating-point tensors in the model.
Choose value range carefully is really importance procedure during quantization.
Usually we use methods like MSE, Percentile, KL to solve a good value range
from prospective view, while this pass offers you another possibility.
This pass will make all your quantization range as trainable, and learn to quantize
your network with sampling methods.
ATTENTION: YOU SHALL USE THIS FUNCTION AFTER ACTIVATIONS HAVE BEEN CORRECTLY CALIBRATED
SINCE THIS FUNCTION NEEDS A SCALE AND OFFSET AS INITIALIZED VALUE.
ATTENTION: ONLY CONFIGURATION WITH STATE "ACTIVATED" WILL BE TUNED VIA THIS FUNCTION.
"""
def __init__(self, method: str = 'e-greedy',
calib_act: bool = True, calib_weight: bool = True) -> None:
self.method = method
self.calib_act = calib_act
self.calib_weight = calib_weight
self.target_step = 7500
self.e = 0.1
self.collecting_device = 'cuda'
self.arms = [1, 0.9, 1.1, 0.7, 1.3]
# for power-of-2 policy, | |
"""
Heads are build on Brains.
Like in real life, heads do all the difficult part of receiving stimuli,
being above everything else and not falling apart.
You take brains out and they just do nothng. Lazy.
The most common use case is when one head contains one brain.
But who are we to say what you can and cannot do.
You want two brains and a head within your head? Sure, go crazy.
What we're trying to do here is to keep thing relatively simple.
Unfortunately, not everything can be achieved [citation needed] with a serial
topography and at some point you'll need branching.
Heads are "special" in that each is built on networks/brains and will likely need
some special pipeping when attaching to your agent.
"""
from functools import lru_cache, reduce
from operator import mul
from typing import Callable, List, Optional, Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from ai_traineree.networks import NetworkType, NetworkTypeClass
from ai_traineree.networks.bodies import FcNet, NoisyNet
from ai_traineree.types import FeatureType
from ai_traineree.utils import to_numbers_seq
class NetChainer(NetworkType):
"""Chains nets into a one happy family.
As it stands it is a wrapper around pytroch.nn.ModuleList.
The need for wrapper comes from unified API to reset properties.
"""
def __init__(self, net_classes: List[NetworkTypeClass], **kwargs):
super(NetChainer, self).__init__()
self.nets = nn.ModuleList(net_classes)
self.in_features = self._determin_feature_size(self.nets[0].layers[0], is_in=True)
self.out_features = self._determin_feature_size(self.nets[-1].layers[-1], is_in=False)
@staticmethod
def _determin_feature_size(layer, is_in=True):
if "Conv" in str(layer):
return layer.in_channels if is_in else layer.out_channels
else:
return layer.in_features if is_in else layer.out_features
def reset_parameters(self):
for net in self.nets:
if hasattr(net, "reset_parameters"):
net.reset_parameters()
def reset_noise(self):
for net in self.nets:
if hasattr(net, "reset_noise"):
net.reset_noise()
def forward(self, x):
return reduce(lambda x, net: net(x), self.nets, x)
class DoubleCritic(NetworkType):
def __init__(self, in_features: Sequence[int], action_size: int, body_cls: NetworkTypeClass, **kwargs):
super(DoubleCritic, self).__init__()
hidden_layers = kwargs.pop("hidden_layers", (200, 200))
self.critic_1 = body_cls(
in_features=in_features, inj_action_size=action_size, hidden_layers=hidden_layers, **kwargs
)
self.critic_2 = body_cls(
in_features=in_features, inj_action_size=action_size, hidden_layers=hidden_layers, **kwargs
)
def reset_parameters(self):
self.critic_1.reset_parameters()
self.critic_2.reset_parameters()
def act(self, states, actions):
return (self.critic_1.act(states, actions), self.critic_2.act(states, actions))
def forward(self, state, actions):
return (self.critic_1(state, actions), self.critic_2(state, actions))
class DuelingNet(NetworkType):
def __init__(
self,
in_features: Sequence[int],
out_features: Sequence[int],
hidden_layers: Sequence[int],
net_fn: Optional[Callable[..., NetworkType]] = None,
net_class: Optional[NetworkTypeClass] = None,
**kwargs
):
"""
Parameters:
in_features (tuple of ints): Dimension of the input features.
out_features (tuple of ints): Dimension of critic's action. Default: (1,).
hidden_layers (tuple of ints): Shape of the hidden layers.
net_fn (optional func):
net_class (optional class)
Keyword arguments:
device: Device where to allocate memory. CPU or CUDA. Default CUDA if available.
"""
super(DuelingNet, self).__init__()
device = kwargs.get("device")
# We only care about the leading size, e.g. (4,) -> 4
if net_fn is not None:
self.value_net = net_fn(in_features, (1,), hidden_layers=hidden_layers)
self.advantage_net = net_fn(in_features, out_features, hidden_layers=hidden_layers)
elif net_class is not None:
self.value_net = net_class(in_features, (1,), hidden_layers=hidden_layers, device=device)
self.advantage_net = net_class(in_features, out_features, hidden_layers=hidden_layers, device=device)
else:
self.value_net = FcNet(
in_features, (1,), hidden_layers=hidden_layers, gate_out=nn.Identity(), device=device
)
self.advantage_net = FcNet(
in_features, out_features, hidden_layers=hidden_layers, gate_out=nn.Identity(), device=device
)
def reset_parameters(self) -> None:
self.value_net.reset_parameters()
self.advantage_net.reset_parameters()
def act(self, x):
value = self.value_net.act(x).float()
advantage = self.advantage_net.act(x).float()
q = value.expand_as(advantage) + (advantage - advantage.mean(1, keepdim=True).expand_as(advantage))
return q
def forward(self, x):
value = self.value_net(x).float()
advantage = self.advantage_net(x).float()
q = value.expand_as(advantage) + (advantage - advantage.mean(1, keepdim=True).expand_as(advantage))
return q
class CategoricalNet(NetworkType):
"""
Computes discrete probability distribution for the state-action Q function.
CategoricalNet [1] learns significantly different compared to other nets here.
For this reason it won't be suitable for simple replacement in most (current) agents.
Please check the Agent whether it supports.
The algorithm is used in the RainbowNet but not this particular net.
References:
.. [1] "A Distributional Perspective on Reinforcement Learning" (2017) by <NAME>, <NAME>, <NAME>.
Link: http://arxiv.org/abs/1707.06887
"""
def __init__(
self,
num_atoms: int = 21,
v_min: float = -20.0,
v_max: float = 20.0,
in_features: Optional[FeatureType] = None,
out_features: Optional[FeatureType] = None,
hidden_layers: Sequence[int] = (200, 200),
net: Optional[NetworkType] = None,
device: Optional[torch.device] = None,
):
"""
Parameters:
num_atoms: Number of atoms that disceritze the probability distrubition.
v_min: Minimum (edge) value of the shifted distribution.
v_max: Maximum (edge) value of the shifted distribution.
net: (Optional) A network used for estimation. If `net` is proved then `hidden_layers` has no effect.
obs_space: Size of the observation.
action_size: Length of the output.
hidden_layers: Shape of the hidden layers that are fully connected networks.
*Note* that either `net` or both (`obs_space`, `action_size`) need to be not None.
If `obs_space` and `action_size` are provided then the default net is created as
fully connected network with `hidden_layers` size.
"""
super(CategoricalNet, self).__init__()
self.device = device
self.num_atoms = num_atoms
self.v_min = v_min
self.v_max = v_max
self.z_atoms = torch.linspace(v_min, v_max, num_atoms, device=device)
self.z_delta = self.z_atoms[1] - self.z_atoms[0]
if net is not None:
self.net = net
elif in_features is not None and out_features is not None:
assert len(out_features) == 1, "Expecting single dimension for output features"
_out_features = (out_features[0] * self.num_atoms,)
self.net = FcNet(in_features, _out_features, hidden_layers=hidden_layers, device=self.device)
else:
raise ValueError(
"CategoricalNet needs to be instantiated either with `net` or (`obs_space` and `action_size`)"
)
assert len(self.net.out_features) == 1, "Expecting single dimension for output features"
self.in_featores = self.net.in_features
self.out_features = (self.net.out_features[0] // self.num_atoms, self.num_atoms)
self.to(device=device)
def reset_paramters(self):
self.net.reset_parameters()
def forward(self, *args) -> torch.Tensor:
"""
Passes *args through the net with proper handling.
"""
return self.net(*args).view((-1,) + self.out_features)
@lru_cache(maxsize=5)
def _offset(self, batch_size, device=None):
offset = torch.linspace(0, ((batch_size - 1) * self.num_atoms), batch_size, device=self.device)
return offset.unsqueeze(1).expand(batch_size, self.num_atoms)
def mean(self, values):
return (self.z_atoms * values).mean()
def dist_projection(
self, rewards: torch.Tensor, masks: torch.Tensor, discount: float, prob_next: torch.Tensor
) -> torch.Tensor:
"""
Parameters:
rewards: Tensor containing rewards that are used as offsets for each distrubitions.
masks: Tensor indicating whether the iteration is terminal. Usually `masks = 1 - dones`.
discount: Discounting value for added Q distributional estimate. Typically gamma or gamma^(n_steps).
prob_next: Probablity estimates based on transitioned (next) states.
"""
batch_size = rewards.shape[0]
Tz = rewards + discount * masks * self.z_atoms.view(1, -1)
assert Tz.shape == (batch_size, self.num_atoms)
Tz.clamp_(self.v_min, self.v_max - 1e-4) # In place. Tiny eps required for num stability e.g. ceil(1.00000001)
b_idx = (Tz - self.v_min) / self.z_delta
l_idx = b_idx.floor().to(torch.int64)
u_idx = b_idx.ceil().to(torch.int64)
# Fix disappearing probability mass when l = b = u (b is int)
# Checking twice `l_idx == u_idx` is on purpose, since we first want to distribute to the left
# but in cases we can't go any lower (already on the boundary) we will move them higher.
l_idx[torch.logical_and(l_idx == u_idx, u_idx > 0)] -= 1
u_idx[torch.logical_and(l_idx == u_idx, l_idx < self.num_atoms - 1)] += 1
offset = self._offset(batch_size)
l_offset_idx = (l_idx + offset).type(torch.int64)
u_offset_idx = (u_idx + offset).type(torch.int64)
# Distribute probability of Tz
m = rewards.new_zeros(batch_size * self.num_atoms)
# Dealing with indices. *Note* not to forget batches.
# m[l] = m[l] + p(s[t+n], a*)(u - b)
m.index_add_(0, l_offset_idx.view(-1), (prob_next * (u_idx.float() - b_idx)).view(-1))
# m[u] = m[u] + p(s[t+n], a*)(b - l)
m.index_add_(0, u_offset_idx.view(-1), (prob_next * (b_idx - l_idx.float())).view(-1))
return m.view(batch_size, self.num_atoms)
class RainbowNet(NetworkType, nn.Module):
"""Rainbow networks combines dueling and categorical networks."""
def __init__(self, in_features: FeatureType, out_features: FeatureType, **kwargs):
"""
Parameters
in_features (tuple of ints): Shape of the input.
out_features (tuple of ints): Shape of the expected output.
Keyword arguments:
hidden_layers (tuple of ints): Shape of fully connected networks. Default: (200, 200).
num_atoms (int): Number of atoms used in estimating distribution. Default: 21.
v_min (float): Value distribution minimum (left most) value. Default -10.
v_max (float): Value distribution maximum (right most) value. Default 10.
noisy (bool): Whether to use Noisy version of FC networks.
pre_network_fn (func): A shared network that is used before *value* and *advantage* networks.
device (None, str or torch.device): Device where to cast the network. Can be assigned with strings, or
directly passing torch.device type. If `None` then it tries to use CUDA then CPU. Default: None.
"""
super(RainbowNet, self).__init__()
self.device = device = kwargs.get("device", None)
self.pre_network = None
if "pre_network_fn" in kwargs:
self.pre_network = kwargs.get("pre_network_fn")(in_features=in_features)
self.pre_netowrk_params = self.pre_network.parameters() # Registers pre_network's parameters to this module
pof = self.pre_network.out_features
in_features = (pof,) if isinstance(pof, int) else pof
self.v_min = float(kwargs.get("v_min", -10))
self.v_max = float(kwargs.get("v_max", 10))
self.num_atoms = num_atoms = int(kwargs.get("num_atoms", 21))
self.z_atoms = torch.linspace(self.v_min, self.v_max, self.num_atoms, device=self.device)
self.z_delta | |
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/v1/assets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_benchmark_rates(self, **kwargs): # noqa: E501
"""Returns a list of supported USD benchmark rates. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_benchmark_rates(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_benchmark_rates_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_benchmark_rates_with_http_info(**kwargs) # noqa: E501
return data
def list_benchmark_rates_with_http_info(self, **kwargs): # noqa: E501
"""Returns a list of supported USD benchmark rates. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_benchmark_rates_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_benchmark_rates" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/v1/rates/benchmark', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_exchange_markets(self, exchange, **kwargs): # noqa: E501
"""Returns a list of markets for a specific exchange. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_exchange_markets(exchange, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str exchange: The 4-char exchange code (see /exchanges) (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_exchange_markets_with_http_info(exchange, **kwargs) # noqa: E501
else:
(data) = self.list_exchange_markets_with_http_info(exchange, **kwargs) # noqa: E501
return data
def list_exchange_markets_with_http_info(self, exchange, **kwargs): # noqa: E501
"""Returns a list of markets for a specific exchange. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_exchange_markets_with_http_info(exchange, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str exchange: The 4-char exchange code (see /exchanges) (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['exchange'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_exchange_markets" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'exchange' is set
if ('exchange' not in local_var_params or
local_var_params['exchange'] is None):
raise ValueError("Missing the required parameter `exchange` when calling `list_exchange_markets`") # noqa: E501
collection_formats = {}
path_params = {}
if 'exchange' in local_var_params:
path_params['exchange'] = local_var_params['exchange'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/v1/exchanges/{exchange}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_exchanges(self, **kwargs): # noqa: E501
"""Returns a list of supported exchanges. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_exchanges(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_exchanges_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_exchanges_with_http_info(**kwargs) # noqa: E501
return data
def list_exchanges_with_http_info(self, **kwargs): # noqa: E501
"""Returns a list of supported exchanges. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_exchanges_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_exchanges" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/v1/exchanges', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_markets(self, **kwargs): # noqa: E501
"""Returns a list of supported markets. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_markets(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_markets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_markets_with_http_info(**kwargs) # noqa: E501
return data
def list_markets_with_http_info(self, **kwargs): # noqa: E501
"""Returns a list of supported markets. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_markets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_markets" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/v1/markets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_pair_markets(self, pair, **kwargs): # noqa: E501
"""Returns a list of markets for a specific asset pair. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_pair_markets(pair, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pair: The asset pair (see /pairs) (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_pair_markets_with_http_info(pair, **kwargs) # noqa: E501
else:
(data) = self.list_pair_markets_with_http_info(pair, **kwargs) # noqa: E501
return data
def list_pair_markets_with_http_info(self, pair, **kwargs): # noqa: E501
"""Returns a list of markets for a specific asset pair. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_pair_markets_with_http_info(pair, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pair: The asset pair (see /pairs) (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['pair'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_pair_markets" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the | |
import os
import subprocess
import pickle
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sc
import pathlib
import threading
import concurrent.futures as cf
from scipy.signal import medfilt
import csv
import tikzplotlib
import encoders_comparison_tool as enc
import video_info as vi
from bj_delta import bj_delta, bj_delta_akima
# Colors in terminal
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
useage_log_suffix = "_useage.log"
psnr_log_suffix = "-psnr_logfile.txt"
ssim_log_suffix = "-ssim_logfile.txt"
vmaf_log_suffix = "-vmaf_logfile.txt"
videofiles = []
codecs = ["av1", "svtav1", "vp9", "x264", "x265", "vvc"]
codecs_short = {"av1": "AV1", "svtav1": "SVT-AV1", "vp9": "VP9", "x264": "x264", "x265": "x265", "vvc": "VVenC",}
sequences = ["Netflix Aerial yuv420p10le 60fps",
"ShakeNDry yuv420p 30fps",
"SunBath yuv420p10le 50fps",
"Tree Shade yuv420p10le 30fps",
"Sintel2 yuv420p10le 24fps",
]
preset = ["preset"]
top_dir = "/run/media/ondra/video/test2/"
# top_dir = "/run/media/ondra/61597e72-9c9f-4edd-afab-110602521f55/test2/"
graphics_dir = "graphs/"
sequences_short = {
"Netflix Aerial yuv420p10le 60fps": "Aerial",
"ShakeNDry yuv420p 30fps": "ShakeNDry",
"SunBath yuv420p10le 50fps": "SunBath",
"Tree Shade yuv420p10le 30fps": "Tree Shade",
"Sintel2 yuv420p10le 24fps": "Sintel2",
}
series_labels = {
'av1-cpu-used_3-': "AV1 cpu-used 3",
'av1-cpu-used_4-': "AV1 cpu-used 4",
'av1-cpu-used_5-': "AV1 cpu-used 5",
'av1-cpu-used_6-': "AV1 cpu-used 6",
'svtav1-preset_3-': "SVT-AV1 preset 3",
'svtav1-preset_5-': "SVT-AV1 preset 5",
'svtav1-preset_7-': "SVT-AV1 preset 7",
'svtav1-preset_9-': "SVT-AV1 preset 9",
'svtav1-preset_11-': "SVT-AV1 preset 11",
'svtav1-preset_13-': "SVT-AV1 preset 13",
'vp9-rc_0-': "VP9 RC 0",
'vp9-cpu-used_0-': "VP9 cpu-used 0",
'vp9-cpu-used_2-': "VP9 cpu-used 2",
'vp9-cpu-used_4-': "VP9 cpu-used 4",
# 'x264-preset_ultrafast-': "x264 ultrafast",
'x264-preset_fast-': "x264 fast",
'x264-preset_medium-': "x264 medium",
'x264-preset_slow-': "x264 slow",
'x264-preset_veryslow-': "x264 veryslow",
'x264-preset_placebo-': "x264 placebo",
'x265-preset_ultrafast-': "x265 ultrafast",
'x265-preset_fast-': "x265 fast",
'x265-preset_medium-': "x265 medium",
'x265-preset_slow-': "x265 slow",
'x265-preset_veryslow-': "x265 veryslow",
'vvc-preset_faster-': "VVenC faster",
'vvc-preset_fast-': "VVenC fast",
'vvc-preset_medium-': "VVenC medium",
}
psnr_lim = {
"Netflix Aerial yuv420p10le 60fps": (33, 47),
"ShakeNDry yuv420p 30fps": (33, 44),
"Sintel2 yuv420p10le 24fps": (40, 60),
"SunBath yuv420p10le 50fps": (35, 55),
"Tree Shade yuv420p10le 30fps": (35, 45),
}
ssim_lim = {
"Netflix Aerial yuv420p10le 60fps": (0.9, 1),
"ShakeNDry yuv420p 30fps": (0.9, 0.98),
"Sintel2 yuv420p10le 24fps": (0.98, 1),
"SunBath yuv420p10le 50fps": (0.94, 1),
"Tree Shade yuv420p10le 30fps": (0.92, 0.99),
}
msssim_lim = {
"Netflix Aerial yuv420p10le 60fps": (0.9, 1),
"ShakeNDry yuv420p 30fps": (0.92, 1),
"Sintel2 yuv420p10le 24fps": (0.98, 1),
"SunBath yuv420p10le 50fps": (0.94, 1),
"Tree Shade yuv420p10le 30fps": (0.96, 1),
}
vmaf_lim = {
"Netflix Aerial yuv420p10le 60fps": (60, 100),
"ShakeNDry yuv420p 30fps": (70, 100),
"Sintel2 yuv420p10le 24fps": (70, 100),
"SunBath yuv420p10le 50fps": (70, 100),
"Tree Shade yuv420p10le 30fps": (80, 100),
}
bitrate_lim = {
"Netflix Aerial yuv420p10le 60fps": (0, 150),
"ShakeNDry yuv420p 30fps": (0, 200),
"Sintel2 yuv420p10le 24fps": (0, 45),
"SunBath yuv420p10le 50fps": (0, 150),
"Tree Shade yuv420p10le 30fps": (0, 200),
}
bitrate_lim_log = {
"Netflix Aerial yuv420p10le 60fps": (0.1, 1000),
"ShakeNDry yuv420p 30fps": (0.1, 1000),
"SunBath yuv420p10le 50fps": (0.1, 1000),
"Tree Shade yuv420p10le 30fps": (0.1, 1000),
"Sintel2 yuv420p10le 24fps": (0.1, 100),
}
processing_lim = {
"Netflix Aerial yuv420p10le 60fps": (0, 50000),
"ShakeNDry yuv420p 30fps": (0, 8000),
"SunBath yuv420p10le 50fps": (0, 5000),
"Tree Shade yuv420p10le 30fps": (0, 12000),
"Sintel2 yuv420p10le 24fps": (0, 12000),
}
processing_lim_log = {
"Netflix Aerial yuv420p10le 60fps": (1, 1000),
"ShakeNDry yuv420p 30fps": (1, 10000),
"SunBath yuv420p10le 50fps": (1, 1000),
"Tree Shade yuv420p10le 30fps": (1, 1000),
"Sintel2 yuv420p10le 24fps": (1, 1000),
}
cpu_time_lim = {
"Netflix Aerial yuv420p10le 60fps": (0, 200000),
"ShakeNDry yuv420p 30fps": (0, 60000),
"SunBath yuv420p10le 50fps": (0, 35000),
"Tree Shade yuv420p10le 30fps": (0, 70000),
"Sintel2 yuv420p10le 24fps": (0, 70000),
}
cpu_time_lim_log = {
"Netflix Aerial yuv420p10le 60fps": (0.1, 1000),
"ShakeNDry yuv420p 30fps": (0.1, 10000),
"SunBath yuv420p10le 50fps": (0.1, 1000),
"Tree Shade yuv420p10le 30fps": (0.1, 1000),
"Sintel2 yuv420p10le 24fps": (0.1, 1000),
}
cpu_fps_lim = {
"Netflix Aerial yuv420p10le 60fps": (0, 200),
"ShakeNDry yuv420p 30fps": (0, 200),
"SunBath yuv420p10le 50fps": (0, 200),
"Tree Shade yuv420p10le 30fps": (0, 200),
"Sintel2 yuv420p10le 24fps": (0, 200),
}
decode_fps_lim = {
"Netflix Aerial yuv420p10le 60fps": (0, None),
"ShakeNDry yuv420p 30fps": (0, 60),
"SunBath yuv420p10le 50fps": (0, 60),
"Tree Shade yuv420p10le 30fps": (0, 60),
"Sintel2 yuv420p10le 24fps": (0, 60),
}
BJ1_serie = "x264-preset_placebo-"
BD_xname = "avg_bitrate_mb"
BD_ynames = ["psnr_avg", "ssim_avg", "msssim_avg", "vmaf_avg"]
BD_names = []
for n in BD_ynames:
# BD_names.append("bd_" + n)
BD_names.append("bd_rate_" + n)
encode_excluded_states = ["measuring decode"]
speeds_table = {
"placebo": 0,
"slow": 3,
"slower": 2,
"veryslow": 1,
"medium": 4,
"fast": 5,
"faster": 6,
"veryfast": 7,
"superfast": 8,
"ultrafast": 9,
}
binaries = {
"ffprobe": "/usr/bin/ffprobe",
"ffmpeg": "/usr/bin/ffmpeg"
}
vi.set_defaults(binaries)
def video_stream_size(videofile_path):
if videofile_path.endswith(".266"):
return os.path.getsize(videofile_path[0:-4] + ".266") / 1024 #in KiB
log = videofile_path + ".stream_size"
if os.path.exists(log):
with open(log, "r") as f:
s = f.readline()
print("stream size hit!")
return float(s)
result = subprocess.run(
[
"ffmpeg",
"-hide_banner",
"-i", videofile_path,
"-map", "0:v:0",
"-c", "copy",
"-f", "null", "-"
],
capture_output=True,
text=True,
)
try:
size = (result.stderr.rsplit("\n")[-2].rsplit(" ")[0].rsplit(":")[1][0: -2])
s = float(size) # in KiB
with open(log, "w") as f:
f.write(str(s))
return s
except ValueError:
raise ValueError(result.stderr.rstrip("\n"))
def video_stream_length(videofile_path):
if videofile_path.endswith(".266"):
videofile = videofile_path[:-4] + ".mkv"
else:
videofile = videofile_path
log = videofile + ".stream_length"
if os.path.exists(log):
with open(log, "r") as f:
s = f.readline()
print("stream length hit!")
return float(s)
result = vi.video_length_seconds(videofile)
with open(log, "w") as f:
f.write(str(result))
return result
def video_stream_frames(videofile_path):
if videofile_path.endswith(".266"):
videofile = videofile_path[:-4] + ".mkv"
else:
videofile = videofile_path
log = videofile + ".stream_frames"
if os.path.exists(log):
with open(log, "r") as f:
s = f.readline()
print("stream framenum hit!")
return int(s)
result = vi.video_frames(videofile)
with open(log, "w") as f:
f.write(str(result))
return result
def series_label(key, sequence=None):
if sequence is None or sequence in key:
k = series_labels.keys()
for s in (s for s in k if s in key):
return series_labels[s]
raise KeyError
'''
def simple_plot(x, y, xlabel, ylabel, savefile, minxlim=True):
i1, ax1 = plt.subplots()
plt.plot(x, y)
ax1.set(xlabel=xlabel, ylabel=ylabel)
if minxlim:
ax1.set_xlim(left=min(x), right=max(x))
ax1.grid()
plt.savefig(f"{savefile}.svg")
plt.savefig(f"{savefile}.pgf")
tikzplotlib.save(f"{savefile}.tex")
plt.close(i1)
def composite_plot(mxy, mlegend, xlabel, ylabel, savefile, xlim=None, ylim=None):
i1, ax1 = plt.subplots()
i = enc.count()
for m in mxy:
t = zip(*m)
x, y = [list(t) for t in t]
plt.plot(x, y, label=mlegend[next(i)], marker="+")
ax1.set(xlabel=xlabel, ylabel=ylabel)
plt.legend()
if xlim is True:
ax1.set_xlim(left=min(x), right=max(x))
elif xlim is not None:
ax1.set_xlim(left=xlim[0], right=xlim[1])
if ylim is True:
ax1.set_ylim(bottom=min(y), top=max(y))
elif ylim is not None:
ax1.set_ylim(bottom=ylim[0], top=ylim[1])
ax1.grid()
p = os.path.split(savefile)
enc.create_dir(p[0] + '/svg/')
enc.create_dir(p[0] + '/png/')
enc.create_dir(p[0] + '/tex/')
plt.savefig(f"{p[0] + '/svg/' + p[1]}.svg")
plt.savefig(f"{p[0] + '/png/' + p[1]}.png")
tikzplotlib.save(f"{p[0] + '/tex/' + p[1]}.tex")
plt.close(i1)
def composite_plot_smooth(mxy, mlegend, xlabel, ylabel, savefile, xlim=None, ylim=None):
i1, ax1 = plt.subplots()
i = enc.count()
for m in mxy:
t = zip(*m)
x, y = [list(t) for t in t]
c = plt.scatter(x, y, label=mlegend[next(i)], marker="+")
colr = c.get_facecolor()[0]
lx = np.log(x)
p = sc.interpolate.Akima1DInterpolator(lx, y)
x_smooth = np.linspace(min(x), max(x), 1000)
y_smooth = p(np.log(x_smooth))
plt.plot(x_smooth, y_smooth, color=colr)
ax1.set(xlabel=xlabel, ylabel=ylabel)
plt.legend()
if xlim is True:
ax1.set_xlim(left=x.min(), right=x.max())
elif xlim is not None:
ax1.set_xlim(left=xlim[0], right=xlim[1])
if ylim is True:
ax1.set_ylim(bottom=y.min(), top=y.max())
elif ylim is not None:
ax1.set_ylim(bottom=ylim[0], top=ylim[1])
ax1.grid()
p = os.path.split(savefile)
enc.create_dir(p[0] + '/svg/')
enc.create_dir(p[0] + '/png/')
enc.create_dir(p[0] + '/tex/')
plt.savefig(f"{p[0] + '/svg/' + p[1]}.svg")
plt.savefig(f"{p[0] + '/png/' + p[1]}.png")
tikzplotlib.save(f"{p[0] + '/tex/' + p[1]}.tex")
plt.close(i1)
'''
def plot_graphs(data, sequence=None, codec=None):
if sequence is None and codec is None:
out = graphics_dir
elif sequence is None:
out = graphics_dir + codec + "/"
elif codec is None:
out = graphics_dir + sequences_short[sequence] + "/"
else:
out = graphics_dir + sequences_short[sequence] + "/" + codec + "/"
lower_right = 4
d = df_to_plot(data, "avg_bitrate_mb", "psnr_avg")
composite_plot(d, "Bitrate [Mbit/s]", "PSNR (YUV) [dB]", out + "psnr", xlim=bitrate_lim[sequence], ylim=psnr_lim[sequence], legend_loc=lower_right)
composite_plot(d, "Bitrate [Mbit/s]", "PSNR (YUV) [dB]", out + "psnr_log", ylim=psnr_lim[sequence], xlog=True, legend_loc=lower_right)
d = df_to_plot(data, "avg_bitrate_mb", "ssim_avg")
composite_plot(d, "Bitrate [Mbit/s]", "SSIM", out + "ssim", xlim=bitrate_lim[sequence], ylim=ssim_lim[sequence], legend_loc=lower_right)
# composite_plot(d, "Bitrate [Mbit/s]", "SSIM", out + "ssim_log", ylim=ssim_lim[sequence], xlog=True, legend_loc=lower_right)
d = df_to_plot(data, "avg_bitrate_mb", "msssim_avg")
composite_plot(d, "Bitrate [Mbit/s]", "MS-SSIM", out + "msssim", xlim=bitrate_lim[sequence], ylim=msssim_lim[sequence], legend_loc=lower_right)
# composite_plot(d, "Bitrate [Mbit/s]", "MS-SSIM", out + "msssim_log", ylim=msssim_lim[sequence], xlog=True, legend_loc=lower_right)
d = df_to_plot(data, "avg_bitrate_mb", "vmaf_avg")
composite_plot(d, "Bitrate [Mbit/s]", "VMAF", out + "vmaf", xlim=bitrate_lim[sequence], ylim=vmaf_lim[sequence], legend_loc=lower_right)
# composite_plot(d, "Bitrate [Mbit/s]", "VMAF", out + "vmaf_log", ylim=vmaf_lim[sequence], xlog=True, legend_loc=lower_right)
d = df_to_plot(data, "avg_bitrate_mb", "decode_time_fps")
composite_plot(d, "Bitrate [Mbit/s]", "Rychlost dekódování [frame/s]", out + "decode", ylim=(0, None), xlim=bitrate_lim_log[sequence], xlog=True)
d = df_to_plot(data, "avg_bitrate_mb", "total_time_fps")
composite_plot(d, "Bitrate [Mbit/s]", "Procesorový čas [s/frame]", out + "encode", ylim=(0.1, None), xlim=bitrate_lim_log[sequence], xlog=True, ylog=True)
def df_to_plot(data, x_name, y_name):
tables = [t[[x_name, y_name]].rename(columns={x_name: | |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
import math
from enum import Enum, unique
from pathlib import Path
from typing import Any, Dict, List, Set, Tuple
import boto3
from botocore.exceptions import ClientError
from packaging import version
from packaging.version import Version
from intelliflow.core.platform.definitions.compute import ComputeFailedSessionStateType, ComputeSessionStateType
from intelliflow.core.signal_processing.definitions.compute_defs import Lang
logger = logging.getLogger(__name__)
JOB_ARN_FORMAT = "arn:aws:glue:{}:{}:job/{}"
# refer for this list
# https://docs.aws.amazon.com/glue/latest/dg/reduced-start-times-spark-etl-jobs.html#reduced-start-times-limitations
# RheocerOS module uses these as a prefix to avoid all of the related modules (ex: boto is enough to avoid boto3 and botocore).
# We mostly avoid critical components such as boto and the ones with 'native' (C, etc) dependencies (most of the popular
# data science libraries).
PYTHON_MODULES_TO_BE_AVOIDED_IN_GLUE_BUNDLE: Set[str] = {
"setuptools",
"subprocess32",
"ptvsd",
"pydevd",
"PyMySQL",
"docutils",
"jmespath",
"six",
"python_dateutil",
"urllib3",
"botocore",
"s3transfer",
"boto3",
"certifi",
"chardet",
"idna",
"requests",
"pyparsing",
"enum34",
"pytz",
"numpy",
"cycler",
"kiwisolver",
"scipy",
"pandas",
"pyarrow",
"matplotlib",
"pyhocon",
"mpmath",
"sympy",
"patsy",
"statsmodels",
"fsspec",
"s3fs",
"Cython",
"joblib",
"pmdarima",
"scikit-learn",
"tbats",
}
@unique
class GlueJobCommandType(str, Enum):
BATCH = "glueetl"
NORMAL = "pythonshell"
@unique
class GlueJobLanguage(str, Enum):
PYTHON = "python"
SCALA = "scala"
@classmethod
def from_slot_lang(cls, lang: Lang):
if lang in [Lang.PYTHON, Lang.SPARK_SQL]:
return cls.PYTHON
elif lang == Lang.SCALA:
return cls.SCALA
else:
raise ValueError(f"Slot lang '{lang!r}' is not supported by AWS Glue!")
@unique
class GlueVersion(str, Enum):
AUTO = "auto"
VERSION_0_9 = "0.9"
VERSION_1_0 = "1.0"
VERSION_2_0 = "2.0"
VERSION_3_0 = "3.0"
def glue_spark_version_map() -> Dict[GlueVersion, Version]:
"""
Source: https://docs.aws.amazon.com/glue/latest/dg/release-notes.html
"""
return {
GlueVersion.VERSION_0_9: version.parse("2.2.1"),
GlueVersion.VERSION_1_0: version.parse("2.4.3"),
GlueVersion.VERSION_2_0: version.parse("2.4.3"),
GlueVersion.VERSION_3_0: version.parse("3.1.1"),
}
@unique
class GlueWorkerType(str, Enum):
STANDARD = "Standard"
G_1X = "G.1X"
G_2X = "G.2X"
def _create_job_params(
description,
role,
job_command_type: GlueJobCommandType,
job_language: GlueJobLanguage,
script_s3_location,
max_concurrent_runs=20,
max_capacity_in_DPU=None,
# https://docs.aws.amazon.com/glue/latest/dg/add-job.html
glue_version=None,
working_set_s3_location=None,
default_args: Dict[str, Any] = None,
) -> Dict[str, Any]:
default_arguments = {"--enable-metrics": "", "--job-language": job_language.value}
if default_args:
default_arguments.update(default_args)
capacity_params = dict()
if glue_version == "1.0":
if not max_capacity_in_DPU:
max_capacity_in_DPU = 20 if job_command_type == GlueJobCommandType.BATCH else 0.0625
capacity_params.update({"MaxCapacity": max_capacity_in_DPU})
if job_language == GlueJobLanguage.PYTHON:
default_arguments.update({"--extra-py-files": working_set_s3_location})
elif glue_version in ["2.0", "3.0"]:
# with 2.0 cannot even use MaxCapacity
capacity_params.update({"WorkerType": GlueWorkerType.G_1X.value})
if job_command_type == GlueJobCommandType.BATCH:
capacity_params.update({"NumberOfWorkers": 100})
else:
capacity_params.update({"NumberOfWorkers": 1})
if job_language == GlueJobLanguage.PYTHON:
default_arguments.update({"--extra-py-files": working_set_s3_location})
else:
raise ValueError(f"Unsupported glue_version: {glue_version!r}.")
params = {
"Description": description,
"Role": role,
"ExecutionProperty": {"MaxConcurrentRuns": max_concurrent_runs},
"Command": {"Name": job_command_type.value, "ScriptLocation": script_s3_location, "PythonVersion": "3"},
"DefaultArguments": default_arguments,
# RheocerOS controls the retry logic, so implicit retries should be avoided!
"MaxRetries": 0,
"Timeout": 600, # 60 mins x 10 = 10 hours
"NotificationProperty": {"NotifyDelayAfter": 123},
"GlueVersion": glue_version,
}
params.update(capacity_params)
return params
def create_glue_job(
glue_client,
glue_job_name,
description,
role,
job_command_type: GlueJobCommandType,
job_language: GlueJobLanguage,
script_s3_location,
max_concurrent_runs=20,
max_capacity_in_DPU=None,
# https://docs.aws.amazon.com/glue/latest/dg/add-job.html
glue_version=None,
working_set_s3_location=None,
default_args: Dict[str, Any] = None,
):
job_params = _create_job_params(
description,
role,
job_command_type,
job_language,
script_s3_location,
max_concurrent_runs,
max_capacity_in_DPU,
glue_version,
working_set_s3_location,
default_args,
)
job_params.update({"Name": glue_job_name})
try:
response = glue_client.create_job(**job_params)
job_name = response["Name"]
except ClientError:
logger.exception("Couldn't create glue job %s.", glue_job_name)
raise
else:
return job_name
def delete_glue_job(glue_client, glue_job_name):
"""Refer
https://boto3.amazonaws.com/v1/documentation/api/1.9.42/reference/services/glue.html#Glue.Client.delete_job
Note: If the job definition is not found, no exception is thrown.
"""
try:
glue_client.delete_job(JobName=glue_job_name)
except ClientError:
logger.exception("Couldn't delete glue job %s.", glue_job_name)
raise
def update_glue_job(
glue_client,
glue_job_name,
description,
role,
job_command_type: GlueJobCommandType,
job_language: GlueJobLanguage,
script_s3_location,
max_concurrent_runs=20,
max_capacity_in_DPU=None,
# https://docs.aws.amazon.com/glue/latest/dg/add-job.html
glue_version=None,
working_set_s3_location=None,
default_args: Dict[str, Any] = None,
):
job_update_params = _create_job_params(
description,
role,
job_command_type,
job_language,
script_s3_location,
max_concurrent_runs,
max_capacity_in_DPU,
glue_version,
working_set_s3_location,
default_args,
)
job_params = {"JobName": glue_job_name, "JobUpdate": job_update_params}
try:
response = glue_client.update_job(**job_params)
job_name = response["JobName"]
except ClientError:
logger.exception("Couldn't update glue job %s.", job_name)
raise
else:
return job_name
def evaluate_execution_params(
job_command: GlueJobCommandType,
job_lang: GlueJobLanguage,
org_params: Dict[str, Any],
fail_on_misconfiguration=False,
keep_unrecognized_params=True,
) -> Dict[str, Any]:
# Convenience method to catch invalid params early in development or to be more tolerant against type mismatches (by doing
# necessary conversions for compatible values before the actual API call [not relying on boto3's internal impl).
#
# This is generally not a good practice since these checks are creating a coupling with Glue backend. So, some of the checks
# and type conversions are ignored to avoid further coupling.
#
# The ones addressed here are for better customer/developer exp and due to complicated/hidden parametrization of Glue jobs.
# Please check the impl below to understand more.
#
# Warning: These are job run parameters not the sub "Arguments" which is also a job run parameter
# of type Dict[str, str].
# ref
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glue.html#Glue.Client.start_job_run
params = dict(org_params) if keep_unrecognized_params else dict()
glue_version = org_params.get("GlueVersion")
if glue_version == "1.0":
max_capacity = None
if "MaxCapacity" in org_params:
if job_command == GlueJobCommandType.BATCH:
max_capacity = int(org_params["MaxCapacity"])
if fail_on_misconfiguration and (max_capacity < 2 or max_capacity > 100):
raise ValueError(f"MaxCapacity for {job_command} is not defined correctly! Valid range is between 2 and 100")
if max_capacity < 2:
max_capacity = 2
if max_capacity > 100:
max_capacity = 100
elif job_command == GlueJobCommandType.NORMAL:
max_capacity = float(org_params["MaxCapacity"])
if math.isclose(max_capacity, 0.0625):
max_capacity = 0.0625
elif math.isclose(max_capacity, 1.0):
max_capacity = 1.0
else:
if fail_on_misconfiguration:
raise ValueError(f"MaxCapacity for {job_command} is not defined correctly! Valid values are either 0.0625 or 1.0")
if max_capacity:
params.update({"MaxCapacity": float(max_capacity)})
if fail_on_misconfiguration and max_capacity and ("WorkerType" in org_params or "NumberOfWorkers" in org_params):
raise ValueError(f"Do not set Max Capacity for an AWS Glue Job if using WorkerType and NumberOfWorkers.")
elif glue_version in ["2.0", "3.0"]:
if fail_on_misconfiguration and ("WorkerType" not in org_params or "NumberOfWorkers" not in org_params):
raise ValueError(f"AWS Glue Version {glue_version} jobs require 'WorkerType' and 'NumberOfWorkers' to be defined.")
else:
raise ValueError(f"Unsupported glue_version: {glue_version!r}.")
if "Timeout" in org_params:
time_out = int(org_params["Timeout"])
if fail_on_misconfiguration and time_out < 1:
raise ValueError(f"Timeout value {time_out} for AWS Glue job is too low or not valid!")
params.update({"Timeout": time_out})
if "WorkerType" in org_params:
worker_type = org_params["WorkerType"]
# if not any([worker_type == type.value for type in GlueWorkerType]):
if worker_type not in GlueWorkerType.__members__.values():
raise ValueError(
f"WorkerType value {worker_type!r} for AWS Glue job is not valid!" f" Valid values: {GlueWorkerType.__members__.values()}"
)
params.update({"WorkerType": worker_type})
if "NumberOfWorkers" in org_params:
number_of_workers = int(org_params["NumberOfWorkers"])
if fail_on_misconfiguration:
if number_of_workers < 1:
raise ValueError("NumberOfWorkers value '{number_of_workers}' is not valid for AWS Glue Job!")
if org_params["WorkerType"] == GlueWorkerType.G_1X.value and number_of_workers > 299:
raise ValueError(
f"NumberOfWorkers value '{number_of_workers}' is out of bounds for AWS Glue Job. "
f"The maximum number of workers you can define are 299 for G.1X."
)
elif org_params["WorkerType"] == GlueWorkerType.G_2X.value and number_of_workers > 149:
raise ValueError(
f"NumberOfWorkers value '{number_of_workers}' is out of bounds for AWS Glue Job. "
f"The maximum number of workers you can define are 149 for G.2X."
)
params.update({"NumberOfWorkers": number_of_workers})
return params
def start_glue_job(
glue_client,
job_command: GlueJobCommandType,
job_lang: GlueJobLanguage,
job_name,
args: Dict[str, str],
extra_params: Dict[str, Any],
prev_job_run_id: str = None,
):
kwargs = dict()
kwargs.update({"JobName": job_name})
kwargs.update({"Arguments": args})
if prev_job_run_id: # enables retry on prev run
kwargs.update({"JobRunId": prev_job_run_id})
new_extra_params = evaluate_execution_params(job_command, job_lang, extra_params, False, False)
kwargs.update(new_extra_params)
try:
response = glue_client.start_job_run(**kwargs)
job_id = response["JobRunId"]
except ClientError:
logger.exception("Couldn't create glue job %s.", job_name)
raise
else:
return job_id
def get_glue_job(glue_client, job_name):
try:
response = glue_client.get_job(JobName=job_name)
return response["Job"]["Name"]
except ClientError as ex:
# TODO
if ex.response["Error"]["Code"] == "EntityNotFoundException":
return None
logger.error("Couldn't check glue job '%s'! Error: %s", job_name, str(ex))
raise
def get_glue_job_run(glue_client, job_name, job_run_id):
try:
response = glue_client.get_job_run(JobName=job_name, RunId=job_run_id, PredecessorsIncluded=False)
job_run = response["JobRun"]
except ClientError:
logger.exception("Couldn't get glue run job run [job_name: %s, run_id: %s].", job_name, job_run_id)
raise
else:
return job_run
def get_glue_job_run_state_type(job_run) -> ComputeSessionStateType:
run_state = job_run["JobRunState"]
if run_state in ["STARTING", "RUNNING"]:
return ComputeSessionStateType.PROCESSING
elif run_state in ["SUCCEEDED"]:
return ComputeSessionStateType.COMPLETED
elif run_state in ["STOPPING", "STOPPED", "FAILED", "TIMEOUT"]:
return ComputeSessionStateType.FAILED
else:
logger.critical(
f"AWS Glue introduced a new state type {run_state}!"
f" Marking it as {ComputeSessionStateType.UNKNOWN}. "
f" This should be addressed ASAP by RheocerOS Core,"
f" or your app should upgrade to a newer RheocerOS version."
)
return ComputeSessionStateType.UNKNOWN
def get_glue_job_run_failure_type(job_run) -> ComputeFailedSessionStateType:
run_state = job_run["JobRunState"]
if run_state in ["STOPPING", "STOPPED"]:
return ComputeFailedSessionStateType.STOPPED
elif run_state in ["TIMEOUT"]:
return ComputeFailedSessionStateType.TIMEOUT
elif run_state in ["FAILED"]:
error_message = job_run["ErrorMessage"]
# TODO AWS Glue should provide better support for concurrent capacity exceeding
# the account limit. Or we should find it if there is a better way.
if "Exceeded maximum concurrent compute capacity" in error_message or "Resource unavailable" in error_message:
return ComputeFailedSessionStateType.TRANSIENT
else:
return ComputeFailedSessionStateType.APP_INTERNAL
else:
logger.critical(
f"AWS Glue introduced a new state type {run_state}!"
f" Marking it as {ComputeFailedSessionStateType.UNKNOWN}. "
f" This should be | |
p13 = ttk.Entry(window3)
p13.grid(row = 13, column = 3)
p14 = ttk.Entry(window3)
p14.grid(row = 14, column = 3)
save = ttk.Button(window3, text = "SAVE + QUIT", width = 20, command = savedata14)
save.grid(row = 15, column = 1)
ttk.Label(window3, text = "Note: 1. When entering multiple topics").grid(row = 16, column = 0)
ttk.Label(window3, text = "seperate both with a space").grid(row = 17, column = 0)
ttk.Label(window3, text = "2. When entering the PDF baginning page").grid(row = 18, column = 0)
ttk.Label(window3, text = "Enter the page at the top, but subtract 1 from it").grid(row = 19, column = 0)
ttk.Label(window3, text = "So, if the page number at top is 2").grid(row = 20, column = 0)
ttk.Label(window3, text = "enter the beginning page as 1").grid(row = 21, column = 0)
#FN14 HAS NO ISSUES
def fn13():
global window3
j = 13
i = 1
while i<=j:
Label(window3, text = "ENTER TOPIC OF Q" + str(i) + " ----->").grid(row = +i, column = 0)
Label(window3, text = "ENTER PAGE OF THE BEGINNING OF THE QUESTION ----->").grid(row = +i, column = 2)
i+=1
global e1
global e2
global e3
global e4
global e5
global e6
global e7
global e8
global e9
global e10
global e11
global e12
global e13
global p1
global p2
global p3
global p4
global p5
global p6
global p7
global p8
global p9
global p10
global p11
global p12
global p13
e1 = ttk.Entry(window3)
e1.grid(row = 1, column = 1)
e2 = ttk.Entry(window3)
e2.grid(row = 2, column = 1)
e3 = ttk.Entry(window3)
e3.grid(row = 3, column = 1)
e4 = ttk.Entry(window3)
e4.grid(row = 4, column = 1)
e5 = ttk.Entry(window3)
e5.grid(row = 5, column = 1)
e6 = ttk.Entry(window3)
e6.grid(row = 6, column = 1)
e7 = ttk.Entry(window3)
e7.grid(row = 7, column = 1)
e8 = ttk.Entry(window3)
e8.grid(row = 8, column = 1)
e9 = ttk.Entry(window3)
e9.grid(row = 9, column = 1)
e10 = ttk.Entry(window3)
e10.grid(row = 10, column = 1)
e11 = ttk.Entry(window3)
e11.grid(row = 11, column = 1)
e12 = ttk.Entry(window3)
e12.grid(row = 12, column = 1)
e13 = ttk.Entry(window3)
e13.grid(row = 13, column = 1)
p1 = ttk.Entry(window3)
p1.grid(row = 1, column = 3)
p2 = ttk.Entry(window3)
p2.grid(row = 2, column = 3)
p3 = ttk.Entry(window3)
p3.grid(row = 3, column = 3)
p4 = ttk.Entry(window3)
p4.grid(row = 4, column = 3)
p5 = ttk.Entry(window3)
p5.grid(row = 5, column = 3)
p6 = ttk.Entry(window3)
p6.grid(row = 6, column = 3)
p7 = ttk.Entry(window3)
p7.grid(row = 7, column = 3)
p8 = ttk.Entry(window3)
p8.grid(row = 8, column = 3)
p9 = ttk.Entry(window3)
p9.grid(row = 9, column = 3)
p10 = ttk.Entry(window3)
p10.grid(row = 10, column = 3)
p11 = ttk.Entry(window3)
p11.grid(row = 11, column = 3)
p12 = ttk.Entry(window3)
p12.grid(row = 12, column = 3)
p13 = ttk.Entry(window3)
p13.grid(row = 13, column = 3)
save = ttk.Button(window3, text = "SAVE + QUIT", width = 20, command = savedata13)
save.grid(row = 14, column = 1)
ttk.Label(window3, text = "Note: 1. When entering multiple topics").grid(row = 15, column = 0)
ttk.Label(window3, text = "seperate both with a space").grid(row = 16, column = 0)
ttk.Label(window3, text = "2. When entering the PDF baginning page").grid(row = 17, column = 0)
ttk.Label(window3, text = "Enter the page at the top, but subtract 1 from it").grid(row = 18, column = 0)
ttk.Label(window3, text = "So, if the page number at top is 2").grid(row = 19, column = 0)
ttk.Label(window3, text = "enter the beginning page as 1").grid(row = 20, column = 0)
#FN13 HAS NO ISSUES
def fn12():
j = 12
i = 1
while i<=j:
ttk.Label(window3, text = "ENTER TOPIC OF Q" + str(i) + " ----->").grid(row = +i, column = 0)
ttk.Label(window3, text = "ENTER PAGE OF THE BEGINNING OF THE QUESTION ----->").grid(row = +i, column = 2)
i+=1
global e1
global e2
global e3
global e4
global e5
global e6
global e7
global e8
global e9
global e10
global e11
global e12
global p1
global p2
global p3
global p4
global p5
global p6
global p7
global p8
global p9
global p10
global p11
global p12
e1 = ttk.Entry(window3)
e1.grid(row = 1, column = 1)
e2 = ttk.Entry(window3)
e2.grid(row = 2, column = 1)
e3 = ttk.Entry(window3)
e3.grid(row = 3, column = 1)
e4 = ttk.Entry(window3)
e4.grid(row = 4, column = 1)
e5 = ttk.Entry(window3)
e5.grid(row = 5, column = 1)
e6 = ttk.Entry(window3)
e6.grid(row = 6, column = 1)
e7 = ttk.Entry(window3)
e7.grid(row = 7, column = 1)
e8 = ttk.Entry(window3)
e8.grid(row = 8, column = 1)
e9 = ttk.Entry(window3)
e9.grid(row = 9, column = 1)
e10 = ttk.Entry(window3)
e10.grid(row = 10, column = 1)
e11 = ttk.Entry(window3)
e11.grid(row = 11, column = 1)
e12 = ttk.Entry(window3)
e12.grid(row = 12, column = 1)
p1 = ttk.Entry(window3)
p1.grid(row = 1, column = 3)
p2 = ttk.Entry(window3)
p2.grid(row = 2, column = 3)
p3 = ttk.Entry(window3)
p3.grid(row = 3, column = 3)
p4 = ttk.Entry(window3)
p4.grid(row = 4, column = 3)
p5 = ttk.Entry(window3)
p5.grid(row = 5, column = 3)
p6 = ttk.Entry(window3)
p6.grid(row = 6, column = 3)
p7 = ttk.Entry(window3)
p7.grid(row = 7, column = 3)
p8 = ttk.Entry(window3)
p8.grid(row = 8, column = 3)
p9 = ttk.Entry(window3)
p9.grid(row = 9, column = 3)
p10 = ttk.Entry(window3)
p10.grid(row = 10, column = 3)
p11 = ttk.Entry(window3)
p11.grid(row = 11, column = 3)
p12 = ttk.Entry(window3)
p12.grid(row = 12, column = 3)
save = ttk.Button(window3, text = "SAVE + QUIT", width = 20, command = savedata12)
save.grid(row = 13, column = 1)
ttk.Label(window3, text = "Note: 1. When entering multiple topics").grid(row = 14, column = 0)
ttk.Label(window3, text = "seperate both with a space").grid(row = 15, column = 0)
ttk.Label(window3, text = "2. When entering the PDF baginning page").grid(row = 16, column = 0)
ttk.Label(window3, text = "Enter the page at the top, but subtract 1 from it").grid(row = 17, column = 0)
ttk.Label(window3, text = "So, if the page number at top is 2").grid(row = 18, column = 0)
ttk.Label(window3, text = "enter the beginning page as 1").grid(row = 19, column = 0)
#FN12 HAS NO ISSUES
def fn11():
j = 11
i = 1
while i<=j:
ttk.Label(window3, text = "ENTER TOPIC OF Q" + str(i) + " ----->").grid(row = +i, column = 0)
ttk.Label(window3, text = "ENTER PAGE OF THE BEGINNING OF THE QUESTION ----->").grid(row = +i, column = 2)
i+=1
global e1
global e2
global e3
global e4
global e5
global e6
global e7
global e8
global e9
global e10
global e11
global p1
global p2
global p3
global p4
global p5
global p6
global p7
global p8
global p9
global p10
global p11
e1 = ttk.Entry(window3)
e1.grid(row = 1, column = 1)
e2 = ttk.Entry(window3)
e2.grid(row = 2, column = 1)
e3 = ttk.Entry(window3)
e3.grid(row = 3, column = 1)
e4 = ttk.Entry(window3)
e4.grid(row = 4, column = 1)
e5 = ttk.Entry(window3)
e5.grid(row = 5, column = 1)
e6 = ttk.Entry(window3)
e6.grid(row = 6, column = 1)
e7 = ttk.Entry(window3)
e7.grid(row = 7, column = 1)
e8 = ttk.Entry(window3)
e8.grid(row = 8, column = 1)
e9 = ttk.Entry(window3)
e9.grid(row = 9, column = 1)
e10 = ttk.Entry(window3)
e10.grid(row = 10, column = 1)
e11 = ttk.Entry(window3)
e11.grid(row = 11, column = 1)
p1 = ttk.Entry(window3)
p1.grid(row = 1, column = 3)
p2 = ttk.Entry(window3)
p2.grid(row = 2, column = 3)
p3 = ttk.Entry(window3)
p3.grid(row = 3, column = 3)
p4 = ttk.Entry(window3)
p4.grid(row = 4, column = 3)
p5 = ttk.Entry(window3)
p5.grid(row = 5, column = 3)
p6 = ttk.Entry(window3)
p6.grid(row = 6, column = 3)
p7 = ttk.Entry(window3)
p7.grid(row = 7, column = 3)
p8 = ttk.Entry(window3)
p8.grid(row = 8, column = 3)
p9 = ttk.Entry(window3)
p9.grid(row = 9, column = 3)
| |
"default": "1024m"}],
"APP_TIMELINE_SERVER":
[{"config-name": "yarn-env",
"property": "apptimelineserver_heapsize",
"default": "1024m"}],
"ZOOKEEPER_SERVER":
[{"config-name": "zookeeper-env",
"property": "zk_server_heapsize",
"default": "1024m"}],
"METRICS_COLLECTOR":
[{"config-name": "ams-hbase-env",
"property": "hbase_master_heapsize",
"default": "1024m"},
{"config-name": "ams-hbase-env",
"property": "hbase_regionserver_heapsize",
"default": "1024m"},
{"config-name": "ams-env",
"property": "metrics_collector_heapsize",
"default": "512m"}],
"ATLAS_SERVER":
[{"config-name": "atlas-env",
"property": "atlas_server_xmx",
"default": "2048m"}],
"LOGSEARCH_SERVER":
[{"config-name": "logsearch-env",
"property": "logsearch_app_max_memory",
"default": "1024m"}],
"LOGSEARCH_LOGFEEDER":
[{"config-name": "logfeeder-env",
"property": "logfeeder_max_mem",
"default": "512m"}],
"SPARK_JOBHISTORYSERVER":
[{"config-name": "spark-env",
"property": "spark_daemon_memory",
"default": "1024m"}],
"SPARK2_JOBHISTORYSERVER":
[{"config-name": "spark2-env",
"property": "spark_daemon_memory",
"default": "1024m"}]
}
try:
# Override any by reading from the Service Advisors
for service in services["services"]:
serviceName = service["StackServices"]["service_name"]
serviceAdvisor = self.getServiceAdvisor(serviceName)
# This seems confusing, but "self" may actually refer to the actual Service Advisor class that was loaded
# as opposed to this class.
advisor = serviceAdvisor if serviceAdvisor is not None else self
# TODO, switch this to a function instead of a property.
if hasattr(advisor, "heap_size_properties"):
# Override the values in "default" with those from the service advisor
default.update(advisor.heap_size_properties)
except Exception, e:
self.logger.exception()
return default
def createComponentLayoutRecommendations(self, services, hosts):
self.services = services
recommendations = {
"blueprint": {
"host_groups": [ ]
},
"blueprint_cluster_binding": {
"host_groups": [ ]
}
}
hostsList = self.getActiveHosts([host["Hosts"] for host in hosts["items"]])
# for fast lookup
hostsSet = set(hostsList)
hostsComponentsMap = {}
for hostName in hostsList:
if hostName not in hostsComponentsMap:
hostsComponentsMap[hostName] = []
#Sort the services so that the dependent services will be processed before those that depend on them.
sortedServices = self.getServicesSortedByDependencies(services)
#extend hostsComponentsMap' with MASTER components
for service in sortedServices:
masterComponents = [component for component in service["components"] if self.isMasterComponent(component)]
serviceName = service["StackServices"]["service_name"]
serviceAdvisor = self.getServiceAdvisor(serviceName)
for component in masterComponents:
componentName = component["StackServiceComponents"]["component_name"]
advisor = serviceAdvisor if serviceAdvisor is not None else self
#Filter the hosts such that only hosts that meet the dependencies are included (if possible)
filteredHosts = self.getFilteredHostsBasedOnDependencies(services, component, hostsList, hostsComponentsMap)
hostsForComponent = advisor.getHostsForMasterComponent(services, hosts, component, filteredHosts)
#extend 'hostsComponentsMap' with 'hostsForComponent'
for hostName in hostsForComponent:
if hostName in hostsSet:
hostsComponentsMap[hostName].append( { "name":componentName } )
#extend 'hostsComponentsMap' with Slave and Client Components
componentsListList = [service["components"] for service in services["services"]]
componentsList = [item for sublist in componentsListList for item in sublist]
usedHostsListList = [component["StackServiceComponents"]["hostnames"] for component in componentsList if not self.isComponentNotValuable(component)]
utilizedHosts = [item for sublist in usedHostsListList for item in sublist]
freeHosts = [hostName for hostName in hostsList if hostName not in utilizedHosts]
for service in sortedServices:
slaveClientComponents = [component for component in service["components"]
if self.isSlaveComponent(component) or self.isClientComponent(component)]
serviceName = service["StackServices"]["service_name"]
serviceAdvisor = self.getServiceAdvisor(serviceName)
for component in slaveClientComponents:
componentName = component["StackServiceComponents"]["component_name"]
advisor = serviceAdvisor if serviceAdvisor is not None else self
#Filter the hosts and free hosts such that only hosts that meet the dependencies are included (if possible)
filteredHosts = self.getFilteredHostsBasedOnDependencies(services, component, hostsList, hostsComponentsMap)
filteredFreeHosts = self.filterList(freeHosts, filteredHosts)
hostsForComponent = advisor.getHostsForSlaveComponent(services, hosts, component, filteredHosts, filteredFreeHosts)
#extend 'hostsComponentsMap' with 'hostsForComponent'
for hostName in hostsForComponent:
if hostName not in hostsComponentsMap and hostName in hostsSet:
hostsComponentsMap[hostName] = []
if hostName in hostsSet:
hostsComponentsMap[hostName].append( { "name": componentName } )
#colocate custom services
for service in sortedServices:
serviceName = service["StackServices"]["service_name"]
serviceAdvisor = self.getServiceAdvisor(serviceName)
if serviceAdvisor is not None:
serviceComponents = [component for component in service["components"]]
serviceAdvisor.colocateService(hostsComponentsMap, serviceComponents)
serviceAdvisor.colocateServiceWithServicesInfo(hostsComponentsMap, serviceComponents, services)
#prepare 'host-group's from 'hostsComponentsMap'
host_groups = recommendations["blueprint"]["host_groups"]
bindings = recommendations["blueprint_cluster_binding"]["host_groups"]
index = 0
for key in hostsComponentsMap.keys():
index += 1
host_group_name = "host-group-{0}".format(index)
host_groups.append( { "name": host_group_name, "components": hostsComponentsMap[key] } )
bindings.append( { "name": host_group_name, "hosts": [{ "fqdn": key }] } )
return recommendations
def getHostsForMasterComponent(self, services, hosts, component, hostsList):
if self.isComponentHostsPopulated(component):
return component["StackServiceComponents"]["hostnames"]
if len(hostsList) > 1 and self.isMasterComponentWithMultipleInstances(component):
hostsCount = self.getMinComponentCount(component, hosts)
if hostsCount > 1: # get first 'hostsCount' available hosts
hostsForComponent = []
hostIndex = 0
while hostsCount > len(hostsForComponent) and hostIndex < len(hostsList):
currentHost = hostsList[hostIndex]
if self.isHostSuitableForComponent(currentHost, component):
hostsForComponent.append(currentHost)
hostIndex += 1
return hostsForComponent
return [self.getHostForComponent(component, hostsList)]
def getHostsForSlaveComponent(self, services, hosts, component, hostsList, freeHosts):
if component["StackServiceComponents"]["cardinality"] == "ALL":
return hostsList
if self.isComponentHostsPopulated(component):
return component["StackServiceComponents"]["hostnames"]
hostsForComponent = []
componentName = component["StackServiceComponents"]["component_name"]
if self.isSlaveComponent(component):
cardinality = str(component["StackServiceComponents"]["cardinality"])
hostsMin, hostsMax = self.parseCardinality(cardinality, len(hostsList))
hostsMin, hostsMax = (0 if hostsMin is None else hostsMin, len(hostsList) if hostsMax is None else hostsMax)
if self.isComponentUsingCardinalityForLayout(componentName) and cardinality:
if hostsMin > len(hostsForComponent):
hostsForComponent.extend(freeHosts[0:hostsMin-len(hostsForComponent)])
else:
hostsForComponent.extend(freeHosts)
if not hostsForComponent: # hostsForComponent is empty
hostsForComponent = hostsList[-1:]
hostsForComponent = list(set(hostsForComponent)) # removing duplicates
if len(hostsForComponent) < hostsMin:
hostsForComponent = list(set(hostsList))[0:hostsMin]
elif len(hostsForComponent) > hostsMax:
hostsForComponent = list(set(hostsList))[0:hostsMax]
elif self.isClientComponent(component):
hostsForComponent = freeHosts[0:1]
if not hostsForComponent: # hostsForComponent is empty
hostsForComponent = hostsList[-1:]
return hostsForComponent
def getServicesSortedByDependencies(self, services):
"""
Sorts the services based on their dependencies. This is limited to non-conditional host scope dependencies.
Services with no dependencies will go first. Services with dependencies will go after the services they are dependent on.
If there are circular dependencies, the services will go in the order in which they were processed.
"""
processedServices = []
sortedServices = []
for service in services["services"]:
self.sortServicesByDependencies(services, service, processedServices, sortedServices)
return sortedServices
def sortServicesByDependencies(self, services, service, processedServices, sortedServices):
"""
Sorts the services based on their dependencies. This is limited to non-conditional host scope dependencies.
Services with no dependencies will go first. Services with dependencies will go after the services they are dependent on.
If there are circular dependencies, the services will go in the order in which they were processed.
"""
if service is None or service in processedServices:
return
processedServices.append(service)
components = [] if "components" not in service else service["components"]
for component in components:
dependencies = [] if "dependencies" not in component else component['dependencies']
for dependency in dependencies:
# accounts only for dependencies that are not conditional
conditionsPresent = "conditions" in dependency["Dependencies"] and dependency["Dependencies"]["conditions"]
scope = "cluster" if "scope" not in dependency["Dependencies"] else dependency["Dependencies"]["scope"]
if not conditionsPresent and scope == "host":
componentName = component["StackServiceComponents"]["component_name"]
requiredComponentName = dependency["Dependencies"]["component_name"]
requiredService = self.getServiceForComponentName(services, requiredComponentName)
self.sortServicesByDependencies(services, requiredService, processedServices, sortedServices)
sortedServices.append(service)
def getFilteredHostsBasedOnDependencies(self, services, component, hostsList, hostsComponentsMap):
"""
Returns a list of hosts that only includes the ones which have all host scope dependencies already assigned to them.
If an empty list would be returned, instead the full list of hosts are returned.
In that case, we can't possibly return a valid recommended layout so we will at least return a fully filled layout.
"""
removeHosts = []
dependencies = [] if "dependencies" not in component else component['dependencies']
for dependency in dependencies:
# accounts only for dependencies that are not conditional
conditionsPresent = "conditions" in dependency["Dependencies"] and dependency["Dependencies"]["conditions"]
if not conditionsPresent:
componentName = component["StackServiceComponents"]["component_name"]
requiredComponentName = dependency["Dependencies"]["component_name"]
requiredComponent = self.getRequiredComponent(services, requiredComponentName)
# We only deal with "host" scope.
if (requiredComponent is not None) and (requiredComponent["component_category"] != "CLIENT"):
scope = "cluster" if "scope" not in dependency["Dependencies"] else dependency["Dependencies"]["scope"]
if scope == "host":
for host, hostComponents in hostsComponentsMap.iteritems():
isRequiredIncluded = False
for hostComponent in hostComponents:
currentComponentName = None if "name" not in hostComponent else hostComponent["name"]
if requiredComponentName == currentComponentName:
isRequiredIncluded = True
if not isRequiredIncluded:
removeHosts.append(host)
filteredHostsList = []
for host in hostsList:
if host not in removeHosts:
filteredHostsList.append(host)
return filteredHostsList
def filterList(self, list, filter):
"""
Returns the union of the two lists passed in (list and filter params).
"""
filteredList = []
for item in list:
if item in filter:
filteredList.append(item)
return filteredList
def getServiceForComponentName(self, services, componentName):
"""
Return service for component name
:type services dict
:type componentName str
"""
for service in services["services"]:
for component in service["components"]:
if self.getComponentName(component) == componentName:
return service
return None
def isComponentUsingCardinalityForLayout(self, componentName):
return False
def createValidationResponse(self, services, validationItems):
"""Returns array of Validation objects about issues with hostnames components assigned to"""
stackName = services["Versions"]["stack_name"]
stackVersion = services["Versions"]["stack_version"]
validations = {
"Versions": {"stack_name": stackName, "stack_version": stackVersion},
"items": validationItems
}
return validations
def validateComponentLayout(self, services, hosts):
"""Returns array of Validation objects about issues with hostnames components assigned to"""
validationItems = self.getComponentLayoutValidations(services, hosts)
return self.createValidationResponse(services, validationItems)
def validateConfigurations(self, services, hosts):
"""Returns array of Validation objects about issues with hostnames components assigned to"""
self.services = services
validationItems = self.getConfigurationsValidationItems(services, hosts)
return self.createValidationResponse(services, validationItems)
def getComponentLayoutValidations(self, services, | |
#! /usr/bin/python3
import regex as re
import sys
import xml.etree.ElementTree as et
import xml.dom.minidom
COLORS_LIST = [
"silber",
"gold",
"schwarz",
"blau",
"rot",
"grün",
]
COLORS = "|".join(COLORS_LIST)
COLORS_ADJ = {
"silbern" : "silber",
"silberbekleidet" : "silber",
"golden" : "gold",
"goldbekleidet" : "gold",
"schwarz" : "schwarz",
"schwarzbekleidet" : "schwarz",
"blau" : "blau",
"blaubekleidet" : "blau",
"rot" : "rot",
"rotbekleidet" : "rot",
"grün" : "grün",
"grünbekleidet" : "grün",
"naturfarben" : "natur",
"" : "natur",
"verwechselt" : "verwechselt",
}
COLORS_ADJ_OPTIONS = "|".join([key for key, value in COLORS_ADJ.items()])
SEPARATIONS_1 = [
"geteilt",
"gespalten",
"schräggeteilt",
"erniedrigt schräggeteilt",
"überdeckt schräggeteilt",
"schräglinksgeteilt",
]
SEPARATIONS_1 = "|".join(SEPARATIONS_1)
SEPARATIONS_2 = [
"geviert",
"pfahlweise rechtsgerautet",
]
SEPARATIONS_2 = "|".join(SEPARATIONS_2)
FORMATION_OPTIONS = [
"balkenweise",
"pfahlweise",
"schrägbalkenweise",
"schräglinksbalkenweise",
]
FORMATION_OPTIONS = "|".join(FORMATION_OPTIONS)
## FELD_IDENTIFIKATION
# In [Farbe] [Figuren]
felder_1_ledig_1 = "in (?P<Farbe>{0}) (?P<Figur>.*?)(?P<Überdeckt>, überdeckt von .*?)?$".format(COLORS)
# ledig [Farbe]
felder_1_ledig_2 = "ledig ({0})".format(COLORS)
# In [Farbe1]-[Farbe2] [Teilung] Feld [Figur]
felder_2_ledig_3 = "in (?P<Farbe1>{0})-(?P<Farbe2>{0}) (?P<Teilung>{1})em Feld (?P<Figur>.*?)$".format(COLORS, SEPARATIONS_1)
# [Farbe1]-[Farbe2] [Teilung]
felder_2_ledig_1 = "({0})-({1}) ({2})".format(COLORS, COLORS, SEPARATIONS_1)
# [Teilung]: oben/rechts/vorne [feld], unten/links/hinten [feld]
felder_2_ledig_2 = "(?P<Teilung>{0}): (oben|rechts|vorne) (?P<Feld1>.*?), (unten|links|hinten) (?P<Feld2>.*?)$".format(SEPARATIONS_1)
# [Anzahl]mal [Farbe1]-[Farbe2] [Teilung]
felder_X_1 = "(?P<Anzahl>[1-9])mal (?P<Farbe1>{0})-(?P<Farbe2>{1}) (?P<Teilung>{2})(?P<Überdeckt>(,| und) überdeckt von .*?)?$".format(COLORS, COLORS, SEPARATIONS_1)
# [Farbe1]-[Farbe2] geviert
felder_4_1 = "(?P<Farbe1>{0})-(?P<Farbe2>{1}) (?P<Teilung>{2})".format(COLORS, COLORS, SEPARATIONS_2)
## BORD IDENTIFIKATION
# innerhalb 1 [Belegt]? [Farbe] Schildbords, [Feld]
bord_1 = "innerhalb 1(?P<Belegt> mit .*? belegten)? (?<Farbe>{0})en Schildbords, (?P<Feld>.*?)$".format(COLORS_ADJ_OPTIONS)
## HAUPT IDENTIFIKATION
# unter 1 [Belegt]? [Farbe] Schildhaupt, [darin [Figuren],]?, [Feld]
haupt_1 = "unter 1(?P<Belegt> mit .*? belegten)? (?<Farbe>{0})en Schildhaupt(, darin (?P<Figur>.*?),)? (?P<Feld>.*?)$".format(COLORS_ADJ_OPTIONS)
## FIGUR_IDENTIFIKATION
sub_figur_pattern = "(\
(?P<ObenUnten> (der|die|das) (obere(n)?|rechte(n)?) (belegt|besetzt|bewinkelt|begleitet|überdeckt) (von|mit)( je)? \d, (der|die|das) (untere(n)?|linke(n)?) (mit|von) .*?)|\
(?P<Besetzt>.*? besetzt mit .*?)|\
(?P<Spezial2>.*?haltend(, dies(es|e|er) .*?)?)|\
(?P<Belegt>.*? belegt mit( je)? .*?)|\
(?P<Bewinkelt> bewinkelt von .*?)|(?P<Umschliesst>.*? umschliessend)|\
(?P<Begleitet>.*? begleitet von( je)? .*?)|\
(?P<ÜberdecktSingle> überdeckt von .*)\
)"
sub_figur_pattern2 = "(\
(?P<ObenUnten2> (der|die|das) (obere(n)?|rechte(n)?) (belegt|besetzt|bewinkelt|begleitet|überdeckt) (von|mit)( je)? \d, (der|die|das) (untere(n)?|linke(n)?) (mit|von) .*?)|\
(?P<Besetzt2>.*? besetzt mit .*?)|(?P<Spezial3>.*?haltend(, dies(es|e|er) .*?)?)|\
(?P<Belegt3>.*? belegt mit .*?)|\
(?P<Bewinkelt2> bewinkelt von .*?)|(?P<Umschliesst2>.*? umschliessend)|(?P<Begleitet2>.*?begleitet von( je)? .*?)|\
(?P<ÜberdecktSingle2> überdeckt von .*)\
)"
# [Anzahl] [Spezial]? [Farbe] [Figur] [Position]? [,]? [Spezial2]? und viel mehr
figur_pattern = "(?P<Anzahl>[1-9])(?P<FormationSingle2> (\d:)+\d gestellt(en|e|er))?(?P<Spezial>.*?)(?P<Belegt2> mit .*? belegt(e|es|er|en))? ((?P<Farbe>{0})(er|e|es|en) )?\
((?P<Spezial4>[a-z]\w+)(es|e|er|en) )?(?P<Figur>[A-Z]\w+)\
(?P<BuchstabenSpezifikation> (\w, )*?\w( und \w)?)?\
(?P<VerwechselteTinkturen> in verwechselte(n|r) Tinktur(en)?)?\
(?P<Spezial5> mit .*?)?\
(?P<Position> an (der|die|das) \S+)?\
(?P<FormationSingle> ({1}))?(,( dies(es|e|er))?{2}( und{3})?)?$".format(COLORS_ADJ_OPTIONS, FORMATION_OPTIONS, sub_figur_pattern, sub_figur_pattern2, FORMATION_OPTIONS)
# Multiple Groups
fgroups_pattern = "((?P<Figur3>{2}, )?(?P<Figur2>{1} und ))?(?P<BasisFigur>{0})(?P<Formation>, ({3}|(\d:)+\d gestellt))?$".format(figur_pattern[:-1], figur_pattern[:-1], figur_pattern[:-1], FORMATION_OPTIONS)
# Für Figuren ohne Anzahl
uncountable_pattern = "(?P<FormationSingle2>(\d:)+\d gestellt(en|e|er))?(?P<Spezial>.*?)(?P<Belegt2>mit .*? belegt(e|es|er|en))?((?P<Farbe>{0})(er|e|es|en) )?\
((?P<Spezial4>[a-z]\w+)(es|e|er|en) )?\
(?P<Figur>[A-Z]\w+)\
(?P<BuchstabenSpezifikation> (\w, )*?\w( und \w)?)?\
(?P<VerwechselteTinkturen> in verwechselte(n|r) Tinktur(en)?)?\
(?P<Spezial5> mit .*?)?\
(?P<Position> an (der|die|das) \S+)?\
(?P<FormationSingle> ({1}))?(,( dies(es|e|er))?{2}( und{3})?)?$".format(COLORS_ADJ_OPTIONS, FORMATION_OPTIONS, sub_figur_pattern, sub_figur_pattern2, FORMATION_OPTIONS)
def convert(line):
tree = et.Element("Schild")
field = line.strip()
# truecase
field = field[0].lower() + field[1:]
to_lowercase_indices = []
for n, char in enumerate(field):
if char == ":":
to_lowercase_indices.append(n+2)
for index in to_lowercase_indices:
field = field[:index] + field[index].lower() + field[index+1:]
field = " ".join([word.lower() if word.lower() in COLORS_LIST else word for word in field.split()])
field = field.rstrip(".")
analyze_field(tree, field)
#~ for elem in tree.iter():
#~ print(elem, elem.attrib)
return xml.dom.minidom.parseString(et.tostring(tree).decode("utf8")).toprettyxml()
def add_ledig_Feld(field, color):
feld = et.SubElement(field, "Feld", layout="ledig")
et.SubElement(feld, "Farbe", value=color)
def analyze_field(parent, field):
# Überprüfen auf Teilung
if re.match(bord_1, field):
match = re.match(bord_1, field)
color = match.group("Farbe")
feld = match.group("Feld")
belegt = match.group("Belegt")
bord = et.SubElement(parent, "Bord", layout="ledig")
et.SubElement(bord, "Farbe", value=color)
if belegt:
add_belegt(bord, belegt)
analyze_field(parent, feld)
elif re.match(haupt_1, field):
match = re.match(haupt_1, field)
color = match.group("Farbe")
content = match.group("Figur")
feld = match.group("Feld")
belegt = match.group("Belegt")
head = et.SubElement(parent, "Haupt", design="", layout="ledig")
et.SubElement(head, "Farbe", value=COLORS_ADJ[color])
if belegt:
add_belegt(head, belegt)
if content:
analyze_figure(head, content)
analyze_field(parent, feld)
elif re.match(felder_2_ledig_1, field):
match = re.match(felder_2_ledig_1, field)
color1 = match.group(1)
color2 = match.group(2)
sep = match.group(3)
feld = et.SubElement(parent, "Feld", layout=sep)
male = et.SubElement(feld, "Male", value="1")
add_ledig_Feld(feld, color1)
add_ledig_Feld(feld, color2)
elif re.match(felder_1_ledig_1, field):
match = re.match(felder_1_ledig_1, field)
color = match.group("Farbe")
figur = match.group("Figur")
cover = match.group("Überdeckt")
new_field = et.SubElement(parent, "Feld", layout="ledig")
et.SubElement(new_field, "Farbe", value=color)
analyze_figure(new_field, figur)
if cover:
add_cover(new_field, cover)
elif re.match(felder_2_ledig_2, field):
match = re.match(felder_2_ledig_2, field)
sep = match.group("Teilung")
feld1 = match.group("Feld1")
feld2 = match.group("Feld2")
#~ covering = match.group("Überdeckt")
new_field = et.SubElement(parent, "Feld", layout=sep)
male = et.SubElement(new_field, "Male", value="1")
analyze_field(new_field, feld1)
analyze_field(new_field, feld2)
#~ if covering:
#~ add_cover(new_field, covering)
elif re.match(felder_2_ledig_3, field):
match = re.match(felder_2_ledig_3, field)
sep = match.group("Teilung")
color1 = match.group("Farbe1")
color2 = match.group("Farbe2")
figur = match.group("Figur")
new_field = et.SubElement(parent, "Feld", layout=sep)
add_ledig_Feld(new_field, color1)
add_ledig_Feld(new_field, color2)
analyze_figure(new_field, figur)
elif re.match(felder_1_ledig_2, field):
match = re.match(felder_1_ledig_2, field)
color = match.group(1)
new_field = et.SubElement(parent, "Feld", layout="ledig")
et.SubElement(new_field, "Farbe", value=color)
elif re.match(felder_X_1, field):
match = re.match(felder_X_1, field)
color1 = match.group("Farbe1")
color2 = match.group("Farbe2")
sep = match.group("Teilung")
number = match.group("Anzahl")
covering = match.group("Überdeckt")
new_field = et.SubElement(parent, "Feld", layout=sep)
et.SubElement(new_field, "Male", value=number)
add_ledig_Feld(new_field, color1)
add_ledig_Feld(new_field, color2)
if covering:
add_cover(new_field, covering)
elif re.match(felder_4_1, field):
match = re.match(felder_4_1, field)
sep = match.group("Teilung")
color1 = match.group("Farbe1")
color2 = match.group("Farbe2")
new_field = et.SubElement(parent, "Feld", layout=sep)
add_ledig_Feld(new_field, color1)
add_ledig_Feld(new_field, color2)
add_ledig_Feld(new_field, color1)
add_ledig_Feld(new_field, color2)
else:
print("FIELD INFO COULDNT BE READ!")
print(field)
# Überprüfen auf Figuren
# Überprüfen auf weitere
def analyze_figure(parent, figur, je=False, customNum=None):
#~ print(figur)
# before analyzing single figures, find out how many fgroups are in there
match = re.match(fgroups_pattern, figur)
if match:
basic = match.group("BasisFigur")
figur2 = match.group("Figur2")
figur3 = match.group("Figur3")
formation = match.group("Formation")
#~ print(basic)
#~ print(figur2)
#~ print(figur3)
#~ print(formation)
if formation:
formation = match.group("Formation").lstrip(", ")
else:
formation = ""
if basic and figur2 and figur3:
fgroup = et.SubElement(parent, "FGruppe", formation=formation, orientation="")
analyze_single_figure(fgroup, figur3.rstrip(", "), je, customNum)
analyze_single_figure(fgroup, re.sub(" und $", "", figur2), je, customNum)
analyze_single_figure(fgroup, basic, je, customNum)
elif basic and figur2:
fgroup = et.SubElement(parent, "FGruppe", formation=formation, orientation="")
analyze_single_figure(fgroup, re.sub(" und $", "", figur2), je, customNum)
analyze_single_figure(fgroup, basic, je, customNum)
elif basic:
analyze_single_figure(parent, basic, je, customNum, formation)
else:
print("MULTIFIGURES ERROR")
else:
print("MULTIFIGURES COULDNT BE READ!")
print(figur)
def analyze_single_figure(parent, figur, je, customNum, customFormation=None):
#~ print(figur)
match = re.match(figur_pattern, figur)
if match:
process_figure(match, parent, figur, je, customNum, customFormation)
elif re.match(uncountable_pattern, figur):
match = re.match(uncountable_pattern, figur)
process_figure(match, parent, figur, je, 1, customFormation)
else:
print("FIGURE INFO COULDNT BE READ!")
print(figur)
def process_figure(match, parent, figur, je, customNum, customFormation):
if customNum:
number = customNum
else:
number = int(match.group("Anzahl"))
if je:
number *= 2
color = match.group("Farbe")
if not color:
color = ""
verwechselt = match.group("VerwechselteTinkturen")
if verwechselt:
color = "verwechselt"
specials = []
if match.group("Spezial") and len(match.group("Spezial")) > 0:
special1 = match.group("Spezial").strip()
specials.append(special1)
if match.group("Spezial2") and len(match.group("Spezial2")) > 0:
special2 = match.group("Spezial2").strip()
specials.append(special2)
if match.group("Spezial3") and len(match.group("Spezial3")) > 0:
special3 = match.group("Spezial3").strip()
specials.append(special3)
if match.group("Spezial4") and len(match.group("Spezial4")) > 0:
special4 = match.group("Spezial4").strip()
specials.append(special4)
if match.group("Spezial5") and len(match.group("Spezial5")) > 0:
special5 = match.group("Spezial5").strip()
specials.append(special5)
special = ", ".join(specials)
if match.group("Position") and len(match.group("Position")) > 0:
orientation = match.group("Position").strip()
else:
orientation = ""
figure = match.group("Figur")
besetzt = match.group("Besetzt")
besetzt2 = match.group("Besetzt2")
belegt = match.group("Belegt")
belegt2 = match.group("Belegt2")
belegt3 = match.group("Belegt3")
bewinkelt = match.group("Bewinkelt")
bewinkelt2 = match.group("Bewinkelt2")
umschliesst = match.group("Umschliesst")
umschliesst2 = match.group("Umschliesst2")
begleitet = match.group("Begleitet")
begleitet2 = match.group("Begleitet2")
ueberdeckt = match.group("ÜberdecktSingle")
ueberdeckt2 = match.group("ÜberdecktSingle2")
letterSpecial = match.group("BuchstabenSpezifikation")
formation = match.group("FormationSingle")
formation2 = match.group("FormationSingle2")
updown = match.group("ObenUnten")
updown2 = match.group("ObenUnten2")
formations = [x.strip() for x in [formation, formation2, customFormation] if x != None and x != ""]
formation = ", ".join(formations)
if letterSpecial:
letterSpecial = re.sub(",", "", letterSpecial)
letterSpecial = re.sub("und ", "", letterSpecial)
letterSpecial = letterSpecial.split()
if len(letterSpecial) != int(number):
print("NUM OF LETTERS NOT MATCHING COUNT")
letterSpecial = None
fgroup = et.SubElement(parent, "FGruppe", formation=formation, orientation="")
for i in range(int(number)):
figure_element = et.SubElement(fgroup, "Figur", color=COLORS_ADJ[color], special=special, figure=figure, orientation=orientation)
if besetzt:
add_besetzt(figure_element, besetzt)
if besetzt2:
add_besetzt(figure_element, besetzt2)
if belegt:
add_belegt(figure_element, belegt)
if belegt2:
add_belegt(figure_element, belegt2)
if belegt3:
add_belegt(figure_element, belegt3)
if bewinkelt:
add_bewinkelt(figure_element, bewinkelt)
if bewinkelt2:
add_bewinkelt(figure_element, bewinkelt2)
if umschliesst:
add_umschliesst(figure_element, umschliesst)
if umschliesst2:
add_umschliesst(figure_element, umschliesst2)
if begleitet:
add_begleitet(figure_element, begleitet)
if begleitet2:
add_begleitet(figure_element, begleitet2)
if letterSpecial:
if len(figure_element.get("special")) > 0:
figure_element.set("special", letterSpecial[i] + ", " + figure_element.get("special"))
else:
figure_element.set("special", letterSpecial[i])
if ueberdeckt:
add_cover(fgroup, ueberdeckt)
if ueberdeckt2:
add_cover(fgroup, ueberdeckt2)
# prepare updown so they go the right elements
if updown:
updown_list = prepare_updown(fgroup, updown)
if updown2:
updown_list2 = prepare_updown(fgroup, updown2)
def prepare_updown(fgroup, content):
"""
In: FGruppe-Element which contains Figur-Elements
In: String with Info
Out: etree-Element to add to a figure
"""
#~ print(content)
match = re.match(" (der|die|das) (obere(?P<Plural1>n)?|rechte(?P<Plural1>n)?) (?P<Category>(belegt|besetzt|bewinkelt|begleitet|überdeckt)) (von|mit)(?P<Je> je)? (?P<CustomNum>\d), (der|die|das) (untere(?P<Plural2>n)?|linke(?P<Plural2>n)?) (mit|von)(?P<Je> je)? (?P<Figur>.*?)$", content)
if match:
plural1 | |
Fighters and the Venue. This includes the PCs,
the monster group, and the location of the fight. They're created in
initiative order (i.e., the order in which they act in a round of
fighting, according to the ruleset).
Saves that list in self.__fighters.
Returns: dict - (tuple of name, group) -> (tuple of init params)
'''
# Build the fighter list (even if the fight was saved since monsters
# or characters could have been added since the save happened).
# This is a parallel array to self._saved_fight['fighters'] but the
# contents are ThingsInFight (i.e., Fighters or a Venue)
self.__fighters = []
# Start with the PCs
for name in self.world.get_creature_details_list('PCs'):
fighter = self.world.get_creature(name, 'PCs')
if fighter is not None:
self.__fighters.append(fighter)
# Then add the monsters (save the Venue aside, if it exists)
the_fight_itself = None
if monster_group is not None:
for name in self.world.get_creature_details_list(monster_group):
details = self.world.get_creature_details(name,
monster_group)
if details is None:
continue
if name == ca_fighter.Venue.name:
the_fight_itself = details
else:
fighter = self.world.get_creature(name, monster_group)
self.__fighters.append(fighter)
# Put the creatures in order
# Build the 'init' dict: (name, group) -> (init tuple)...
# ...for all creatures but not for the room, itself. This contains
# the data on which the initiatives of the fighters are calculated.
init = {}
if fight_order is None:
# There's no previously established fight order (which would be
# the case if we're jumping into a fight that was saved) to
# maintain, just generate the initiative for all of the fighters.
for fighter in self.__fighters:
init[(fighter.name,
fighter.group)] = self.world.ruleset.initiative(
fighter,
self.__fighters)
else:
# We're assuming that every fighter in fight_order is in
# self.__fighters but not necessarily the other way around.
for fighter in fight_order:
if fighter['name'] == ca_fighter.Venue.name:
continue
# Generate an initiative if it's not already there. This
# deals with legacy fights that don't contain initiative.
if 'init' not in fighter:
for f in self.__fighters:
if (f.name == fighter['name'] and
f.group == fighter['group']):
fighter['init'] = self.world.ruleset.initiative(f)
break
# Now, build |init| from the fight order
init[(fighter['name'], fighter['group'])] = fighter['init']
# Finally, add initiative for any fighters that aren't represented
# in the fight order. This deals with fighters that were added
# after the fight started.
for fighter in self.__fighters:
if (fighter.name, fighter.group) not in init:
init[(fighter.name,
fighter.group)] = self.world.ruleset.initiative(
fighter)
# Now, sort based on the initiative we just built
self.__fighters.sort(key=lambda fighter:
init[(fighter.name, fighter.group)],
reverse=True)
# Assign an ordinal to insure the order for fights that are saved
# and, then, resorted on re-entry.
if fight_order is None:
for index, fighter in enumerate(self.__fighters):
name_group = (fighter.name, fighter.group)
if name_group in init:
init_tuple = init[name_group]
init_list = list(init_tuple)
ordinal = (-index if self.world.ruleset.sort_init_descending
else index)
init_list.append(ordinal)
init_tuple = tuple(init_list)
init[name_group] = init_tuple
# Put the fight info (if any) at the top of the list.
if the_fight_itself is not None:
fight = ca_fighter.Venue(monster_group,
the_fight_itself,
self.world.ruleset,
self._window_manager)
self.__fighters.insert(0, fight)
return init
def __build_saved_fight(self,
init # dict - (name, group) -> (init params)
):
'''
Builds self._saved_fight['fighters']
Returns: Nothing
'''
# Copy the fighter information into the saved_fight. Also, make
# sure this looks like a _NEW_ fight.
self._saved_fight['fighters'] = []
for fighter in self.__fighters:
if fighter.name == ca_fighter.Venue.name:
self._saved_fight['fighters'].append(
{'name': fighter.name,
'group': fighter.group})
else:
# NOTE: an ongoing fight is saved so that re-running a crashed
# fight will come-up in the middle of the fight.
if not self._saved_fight['saved']:
fighter.start_fight()
self._saved_fight['fighters'].append(
{'name': fighter.name,
'group': fighter.group,
'init': init[(fighter.name, fighter.group)]})
def __change_viewing_index(self,
adj # integer adjustment to viewing index
):
'''
Selects a different Fighter or Venue as the currently viewed one.
Some commands will used the currently viewed entity as their default
recipient.
Returns: Nothing
'''
self.__viewing_index += adj
if self.__viewing_index >= len(self._saved_fight['fighters']):
self.__viewing_index = 0
elif self.__viewing_index < 0:
self.__viewing_index = len(self._saved_fight['fighters']) - 1
def __damage_HP(self):
'''
Command ribbon method.
Removes life levels (or 'hit points' -- HP) from the selected fighter
or the current fighter's opponent.
Returns: False to exit the current ScreenHandler, True to stay.
'''
# Figure out who loses the hit points
attacker = None
if self.__viewing_index is not None:
current_fighter = self.__fighters[self.__viewing_index]
opponent = self.get_opponent_for(current_fighter)
hp_recipient = current_fighter
# Not going to do an 'attack' action when the HP was modified
# through an index
attacker = None
else:
current_fighter = self.get_current_fighter()
opponent = self.get_opponent_for(current_fighter)
if opponent is None:
hp_recipient = current_fighter
attacker = None
else:
hp_recipient = opponent
attacker = current_fighter
# Reduce the HP
title = 'Reduce (%s\'s) HP By...' % hp_recipient.name
height = 1
width = len(title)
adj = self._window_manager.input_box_number(height, width, title)
if adj is None:
return True
if adj == 0:
return True # Keep fighting
adj = -adj # NOTE: SUBTRACTING the adjustment
action = {'action-name': 'adjust-hp', 'adj': adj}
# Record for posterity
if hp_recipient is opponent:
if adj < 0:
action['comment'] = '(%s) did %d HP to (%s)' % (
current_fighter.name,
-adj,
opponent.name)
else:
action['comment'] = '(%s) regained %d HP' % (opponent.name,
adj)
# Did attacker already attack
if attacker is None:
ask_to_attack = False
elif 'attack' in attacker.details['actions_this_turn']:
ask_to_attack = False
elif 'all-out-attack' in attacker.details['actions_this_turn']:
ask_to_attack = False
elif 'move-and-attack' in attacker.details['actions_this_turn']:
ask_to_attack = False
else:
ask_to_attack = True
if ask_to_attack:
attack_menu = [('yes', True), ('no', False)]
should_attack, ignore = self._window_manager.menu(
('Should %s Attack?' % attacker.name),
attack_menu)
if should_attack:
comment = '(%s) did (Attack) maneuver' % attacker.name
self.world.ruleset.do_action(
attacker,
{'action-name': 'attack',
'comment': comment},
self)
else:
if adj < 0:
action['comment'] = '%d HP was done to (%s)' % (
-adj,
current_fighter.name)
else:
action['comment'] = ' (%s) regained %d HP' % (
current_fighter.name,
adj)
self.world.ruleset.do_action(hp_recipient, action, self)
self._window.show_fighters(current_fighter,
opponent,
self.__fighters,
self._saved_fight['index'],
self.__viewing_index)
return True # Keep going
def __dead(self):
'''
Command ribbon method.
Allows the user to change the consciousness level of a creature. This
may cause it to become dead, reanimate back to life, or go
unconscious, for example.
Returns: False to exit the current ScreenHandler, True to stay.
'''
now_dead, current_fighter = self.__select_fighter('Who is Dead',
default_selection=1)
if now_dead is None:
return True # Keep fighting
state_menu = sorted(ca_fighter.Fighter.conscious_map.iteritems(),
key=lambda x: x[1])
new_state_number, ignore = self._window_manager.menu('New State',
state_menu)
if new_state_number is None:
return True # Keep fighting
dead_name = now_dead.name
self.world.ruleset.do_action(
now_dead,
{
'action-name': 'set-consciousness',
'level': new_state_number,
'comment': '(%s) is now (%s)' % (
dead_name,
ca_fighter.Fighter.get_name_from_state_number(
new_state_number))
},
self)
opponent = self.get_opponent_for(current_fighter)
self._window.show_fighters(current_fighter,
opponent,
self.__fighters,
self._saved_fight['index'],
self.__viewing_index)
return True # Keep going
def __defend(self):
'''
Command ribbon method.
Allows the user to pick a creature to defend itself. In some rulesets
(GURPS, for example), that would cause the creature to lose aim.
Returns: False to exit the current ScreenHandler, True to stay.
'''
current_fighter = self.get_current_fighter()
opponent = self.get_opponent_for(current_fighter)
if self.__viewing_index != self._saved_fight['index']:
self.__viewing_index = None
self._window.show_fighters(current_fighter,
opponent,
self.__fighters,
self._saved_fight['index'],
self.__viewing_index)
# Figure out who is defending
if opponent is None:
defender = current_fighter
else:
defender_menu = [(current_fighter.name, current_fighter),
(opponent.name, opponent)]
starting_index = 1 # assume the opponent
defender, ignore = self._window_manager.menu('Who is defending',
defender_menu,
starting_index)
if defender is None:
return True # Keep fighting
self.world.ruleset.do_action(
defender,
{
'action-name': 'defend',
'comment': '(%s) defended (and lost aim)' % defender.name
},
self)
self._window.show_fighters(current_fighter,
opponent,
self.__fighters,
self._saved_fight['index'],
self.__viewing_index)
return True # Keep going
def _draw_screen(self):
'''
Draws the complete screen for the FightHandler.
Returns: nothing.
'''
self._window.clear()
if self.__viewing_index is not None:
current_fighter = self.__fighters[self.__viewing_index]
else:
current_fighter = self.get_current_fighter()
opponent = self.get_opponent_for(current_fighter)
next_PC_name = self.__next_PC_name()
self._window.round_ribbon(self._saved_fight['round'],
next_PC_name,
self.world.source_filename,
ScreenHandler.maintain_game_file)
self._window.show_fighters(current_fighter,
opponent,
self.__fighters,
self._saved_fight['index'],
self.__viewing_index)
self._window.status_ribbon(self.world.source_filename,
ScreenHandler.maintain_game_file)
self._window.command_ribbon()
def __edit_attribute(self):
'''
Command ribbon method.
Allows the user to modify one or more of the current fighter's
attributes.
Returns: False to exit the current ScreenHandler, True to stay.
'''
if self.__viewing_index is not None:
fighter = self.__fighters[self.__viewing_index]
else:
fighter = self.get_current_fighter()
attribute_widget = AttributeWidget(self._window_manager,
self,
fighter)
attribute_widget.doit()
return True # keep fighting
def __full_notes(self):
'''
Command ribbon method.
Allows the user to modify the | |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import warnings
import numbers
from enum import Enum
from typing import List, Any, Tuple, Optional
import numpy as np
from PIL import Image
import math
try:
import accimage
except ImportError:
accimage = None
import oneflow as flow
from oneflow.framework.tensor import Tensor
from . import functional_pil as F_pil
from . import functional_tensor as F_t
class InterpolationMode(Enum):
r"""Interpolation modes
"""
NEAREST = "nearest"
BILINEAR = "bilinear"
BICUBIC = "bicubic"
# For PIL compatibility
BOX = "box"
HAMMING = "hamming"
LANCZOS = "lanczos"
def _interpolation_modes_from_int(i: int) -> InterpolationMode:
inverse_modes_mapping = {
0: InterpolationMode.NEAREST,
2: InterpolationMode.BILINEAR,
3: InterpolationMode.BICUBIC,
4: InterpolationMode.BOX,
5: InterpolationMode.HAMMING,
1: InterpolationMode.LANCZOS,
}
return inverse_modes_mapping[i]
pil_modes_mapping = {
InterpolationMode.NEAREST: 0,
InterpolationMode.BILINEAR: 2,
InterpolationMode.BICUBIC: 3,
InterpolationMode.BOX: 4,
InterpolationMode.HAMMING: 5,
InterpolationMode.LANCZOS: 1,
}
def _get_image_size(img: Tensor) -> List[int]:
"""Returns image size as [w, h]
"""
if isinstance(img, flow.Tensor):
return F_t._get_image_size(img)
return F_pil._get_image_size(img)
def _get_image_num_channels(img: Tensor) -> int:
"""Returns number of image channels
"""
if isinstance(img, flow.Tensor):
return F_t._get_image_num_channels(img)
return F_pil._get_image_num_channels(img)
def _is_pil_image(img: Any) -> bool:
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
def _is_numpy(img: Any) -> bool:
return isinstance(img, np.ndarray)
def _is_numpy_image(img: Any) -> bool:
return img.ndim in {2, 3}
def to_tensor(pic):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See :class:`~transforms.ToTensor` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not (_is_pil_image(pic) or _is_numpy(pic)):
raise TypeError("pic should be PIL Image or ndarray. Got {}".format(type(pic)))
if _is_numpy(pic) and not _is_numpy_image(pic):
raise ValueError(
"pic should be 2/3 dimensional. Got {} dimensions.".format(pic.ndim)
)
# default_float_dtype = flow.get_default_dtype()
default_float_dtype = flow.float32
if isinstance(pic, np.ndarray):
# handle numpy array
if pic.ndim == 2:
pic = pic[:, :, None]
img = flow.Tensor(pic.transpose((2, 0, 1)))
# backward compatibility
if img.dtype == flow.int:
return img.to(dtype=default_float_dtype).div(255)
else:
return img
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return flow.Tensor(nppic).to(dtype=default_float_dtype)
# handle PIL Image
mode_to_nptype = {"I": np.int32, "I;16": np.int16, "F": np.float32}
if mode_to_nptype.get(pic.mode, np.uint8) == np.uint8:
dtype = flow.int32
else:
dtype = flow.float32
img = flow.Tensor(
np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True), dtype=dtype,
)
if pic.mode == "1":
img = 255 * img
img = flow.reshape(img, shape=(pic.size[1], pic.size[0], len(pic.getbands())))
# put it from HWC to CHW format
res = img.permute(2, 0, 1)
if img.dtype == flow.int:
res = res.to(dtype=default_float_dtype).div(255)
return res
def pil_to_tensor(pic):
"""Convert a ``PIL Image`` to a tensor of the same type.
See :class:`~vision.transforms.PILToTensor` for more details.
Args:
pic (PIL Image): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not F_pil._is_pil_image(pic):
raise TypeError("pic should be PIL Image. Got {}".format(type(pic)))
if accimage is not None and isinstance(pic, accimage.Image):
# accimage format is always uint8 internally, so always return uint8 here
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8)
pic.copyto(nppic)
return flow.tensor(nppic)
# handle PIL Image
img = flow.tensor(np.asarray(pic))
img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
# put it from HWC to CHW format
img = img.permute((2, 0, 1))
return img
def convert_image_dtype(
image: flow.Tensor, dtype: flow.dtype = flow.float
) -> flow.Tensor:
"""Convert a tensor image to the given ``dtype`` and scale the values accordingly
This function does not support PIL Image.
Args:
image (flow.Tensor): Image to be converted
dtype (flow.dtype): Desired data type of the output
Returns:
Tensor: Converted image
.. note::
When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
If converted back and forth, this mismatch has no effect.
Raises:
RuntimeError: When trying to cast :class:`flow.float32` to :class:`flow.int32` or :class:`flow.int64` as
well as for trying to cast :class:`flow.float64` to :class:`flow.int64`. These conversions might lead to
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
if not isinstance(image, flow.Tensor):
raise TypeError("Input img should be Tensor Image")
return F_t.convert_image_dtype(image, dtype)
def normalize(
tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False
) -> Tensor:
"""Normalize a float tensor image with mean and standard deviation.
This transform does not support PIL Image.
.. note::
This transform acts out of place by default, i.e., it does not mutates the input tensor.
See :class:`~transforms.Normalize` for more details.
Args:
tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation inplace.
Returns:
Tensor: Normalized Tensor image.
"""
if not isinstance(tensor, flow.Tensor):
raise TypeError(
"Input tensor should be a oneflow tensor. Got {}.".format(type(tensor))
)
if not tensor.dtype == flow.float:
raise TypeError(
"Input tensor should be a float tensor. Got {}.".format(tensor.dtype)
)
if tensor.ndim < 3:
raise ValueError(
"Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = "
"{}.".format(tensor.size())
)
if not inplace:
tensor = tensor.clone()
dtype = tensor.dtype
mean = flow.tensor(mean, dtype=dtype, device=tensor.device)
std = flow.tensor(std, dtype=dtype, device=tensor.device)
# TODO: use tensor.any()
# if (std == 0).any():
if std.eq(0).sum().numpy() > 0:
raise ValueError(
"std evaluated to zero after conversion to {}, leading to division by zero.".format(
dtype
)
)
if mean.ndim == 1:
mean = mean.reshape(-1, 1, 1)
if std.ndim == 1:
std = std.reshape(-1, 1, 1)
tensor = tensor.sub(mean).div(std)
# tensor.sub_(mean).div_(std)
return tensor
def resize(
img: Tensor,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
) -> Tensor:
r"""Resize the input image to the given size.
If the image is oneflow Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
img (PIL Image or Tensor): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaining
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`flow.utils.vision.transforms.InterpolationMode`.
Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
Returns:
PIL Image or Tensor: Resized image.
"""
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
"Argument interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
interpolation = _interpolation_modes_from_int(interpolation)
if not isinstance(interpolation, InterpolationMode):
raise TypeError("Argument interpolation should be a InterpolationMode")
if not isinstance(img, (flow.Tensor, flow._oneflow_internal.Tensor)):
pil_interpolation = pil_modes_mapping[interpolation]
return F_pil.resize(img, size=size, interpolation=pil_interpolation)
return F_t.resize(img, size=size, interpolation=interpolation.value)
def scale(*args, **kwargs):
warnings.warn(
"The use of the transforms.Scale transform is deprecated, "
+ "please use transforms.Resize instead."
)
return resize(*args, **kwargs)
def pad(
img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant"
) -> Tensor:
r"""Pad the given image on all sides with the given "pad" value.
If the image is oneflow Tensor, it is expected
to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
at most 3 leading dimensions for mode edge,
and an arbitrary number of leading dimensions for mode constant
Args:
img (PIL Image or Tensor): Image to be padded.
padding (int or sequence): Padding on each border. If a single int is provided this
is used to pad all borders. If sequence of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a sequence of length 4 is provided
this is the padding for the left, top, right and bottom borders respectively.
fill (number | |
# encoding: utf-8
import logging
from datetime import timedelta
from django.conf import settings
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
from django.db.models import Q
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from core.csv_export import CsvExportMixin
from core.utils import (
format_datetime,
NONUNIQUE_SLUG_FIELD_PARAMS,
slugify,
url,
)
logger = logging.getLogger('kompassi')
VIDEO_PERMISSION_CHOICES = [
('public', _('My programme may be recorded and published')),
('private', _('I forbid publishing my programme, but it may be recorded for archiving purposes')),
('forbidden', _('I forbid recording my programme altogether')),
]
START_TIME_LABEL = _('Starting time')
STATE_CHOICES = [
('idea', _('Internal programme idea')),
('asked', _('Asked from the host')),
('offered', _('Offer received')),
('accepted', _('Accepted')),
('published', _('Published')),
('cancelled', _('Cancelled')),
('rejected', _('Rejected')),
]
STATE_CSS = dict(
idea='label-default',
asked='label-default',
offered='label-default',
accepted='label-primary',
published='label-success',
cancelled='label-danger',
rejected='label-danger',
)
COMPUTER_CHOICES = [
('con', _('Laptop provided by the event')),
('pc', _('Own laptop – PC')),
('mac', _('Own laptop – Mac')),
('none', _('No computer required')),
]
TRISTATE_CHOICES = [
('yes', _('Yes')),
('no', _('No')),
('notsure', _('Not sure')),
]
TRISTATE_FIELD_PARAMS = dict(
choices=TRISTATE_CHOICES,
max_length=max(len(key) for (key, label) in TRISTATE_CHOICES),
)
ENCUMBERED_CONTENT_CHOICES = [
('yes', _('My programme contains copyright-encumbered audio or video')),
('no', _('My programme does not contain copyright-encumbered audio or video')),
('notsure', _('I\'m not sure whether my programme contains copyright-encumbered content or not')),
]
PHOTOGRAPHY_CHOICES = [
('please', _('Please photograph my programme')),
('okay', _('It\'s OK to photograph my programme')),
('nope', _('Please do not photograph my programme')),
]
RERUN_CHOICES = [
('already', _('Yes. The programme has previously been presented in another convention.')),
('will', _('Yes. The programme will be presented in a convention that takes place before this one.')),
('might', _('Maybe. The programme might be presented in a convention that takes place before this one.')),
('original', _(
'No. The programme is original to this convention and I promise not to present it elsewhere before.'
)),
]
PHYSICAL_PLAY_CHOICES = [
('lots', _('Lots of it')),
('some', _('Some')),
('none', _('Not at all')),
]
PROGRAMME_STATES_ACTIVE = ['idea', 'asked', 'offered', 'accepted', 'published']
PROGRAMME_STATES_INACTIVE = ['rejected', 'cancelled']
class Programme(models.Model, CsvExportMixin):
"""
Represents a scheduled programme in an event. Usually belongs to a Category and has a start and
end time. Also usually happens in a Room.
Note that this is a "dense sparse model" meaning the model covers multiple types of Programme
some of which have fields that are not used by the others. The fields used are specified by the
Form used. The default form fits lectures etc. and other types of programme are covered using
AlternativeProgrammeForms.
"""
category = models.ForeignKey('programme.Category',
verbose_name=_('category'),
help_text=_('Choose the category that fits your programme the best. We reserve the right to change this.'),
)
form_used = models.ForeignKey('programme.AlternativeProgrammeForm',
blank=True,
null=True,
verbose_name=_('form used'),
help_text=_('Which form was used to offer this Programme? If null, the default form was used.'),
)
slug = models.CharField(**NONUNIQUE_SLUG_FIELD_PARAMS)
title = models.CharField(
max_length=1023,
verbose_name=_('Title'),
help_text=_('Make up a concise title for your programme. We reserve the right to edit the title.'),
)
description = models.TextField(
blank=True,
default='',
verbose_name=_('Description'),
help_text=_(
'This description is published in the web schedule and the programme booklet. The purpose of this '
'description is to give the participant sufficient information to decide whether to take part or '
'not and to market your programme to the participants. We reserve the right to edit the '
'description.'
),
)
three_word_description = models.CharField(
max_length=1023,
blank=True,
default='',
verbose_name=_('Three-word description'),
help_text=_('Describe your game in three words: for example, genre, theme and attitude.'),
)
use_audio = models.CharField(
default='no',
verbose_name=_('Audio playback'),
help_text=_('Will you play audio in your programme?'),
**TRISTATE_FIELD_PARAMS
)
use_video = models.CharField(
default='no',
verbose_name=_('Video playback'),
help_text=_('Will you play video in your programme?'),
**TRISTATE_FIELD_PARAMS
)
number_of_microphones = models.IntegerField(
default=1,
verbose_name=_('Microphones'),
help_text=_('How many microphones do you require?'),
choices=[
(0, '0'),
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(99, _('More than five – Please elaborate on your needs in the "Other tech requirements" field.')),
],
)
computer = models.CharField(
default='con',
choices=COMPUTER_CHOICES,
max_length=max(len(key) for (key, label) in COMPUTER_CHOICES),
verbose_name=_('Computer use'),
help_text=_(
'What kind of a computer do you wish to use? The use of your own computer is only possible if '
'agreed in advance.'
),
)
tech_requirements = models.TextField(
blank=True,
verbose_name=_('Other tech requirements'),
help_text=_('Do you have tech requirements that are not covered by the previous questions?')
)
room_requirements = models.TextField(
blank=True,
verbose_name=_('Room requirements'),
help_text=_(
'How large an audience do you expect for your programme? What kind of a room do you wish for your '
'programme?'
),
)
requested_time_slot = models.TextField(
blank=True,
verbose_name=_('Requested time slot'),
help_text=_(
'At what time would you like to hold your programme? Are there other programme that you do not '
'wish to co-incide with?'
),
)
video_permission = models.CharField(
max_length=15,
choices=VIDEO_PERMISSION_CHOICES,
default=VIDEO_PERMISSION_CHOICES[0][0],
verbose_name=_('Recording permission'),
help_text=_('May your programme be recorded and published in the Internet?'),
)
encumbered_content = models.CharField(
default='no',
max_length=max(len(key) for (key, label) in ENCUMBERED_CONTENT_CHOICES),
choices=ENCUMBERED_CONTENT_CHOICES,
verbose_name=_('Encumbered content'),
help_text=_(
'Encumbered content cannot be displayed on our YouTube channel. Encumbered content will be edited '
'out of video recordings.'
),
)
photography = models.CharField(
default='okay',
max_length=max(len(key) for (key, label) in PHOTOGRAPHY_CHOICES),
choices=PHOTOGRAPHY_CHOICES,
verbose_name=_('Photography of your prorgmme'),
help_text=_(
'Our official photographers will try to cover all programmes whose hosts request their programmes '
'to be photographed.'
),
)
rerun = models.CharField(
default='original',
max_length=max(len(key) for (key, label) in RERUN_CHOICES),
choices=RERUN_CHOICES,
verbose_name=_('Is this a re-run?'),
help_text=_(
'Have you presented this same programme at another event before the event you are offering '
'it to now, or do you intend to present it in another event before this one? If you are unsure '
'about the re-run policy of this event, please consult the programme managers.'
),
)
notes_from_host = models.TextField(
blank=True,
verbose_name=_('Anything else?'),
help_text=_(
'If there is anything else you wish to say to the programme manager that is not covered by the '
'above questions, please enter it here.'
),
)
state = models.CharField(
max_length=15,
choices=STATE_CHOICES,
default='accepted',
verbose_name=_('State'),
help_text=_(
'The programmes in the state "Published" will be visible to the general public, if the schedule '
'has already been published.'
),
)
frozen = models.BooleanField(
default=False,
verbose_name=_('Frozen'),
help_text=_(
'When a programme is frozen, its details can no longer be edited by the programme host. The '
'programme manager may continue to edit these, however.'
),
)
start_time = models.DateTimeField(blank=True, null=True, verbose_name=START_TIME_LABEL)
# denormalized
end_time = models.DateTimeField(blank=True, null=True, verbose_name=_('Ending time'))
length = models.IntegerField(
blank=True,
null=True,
verbose_name=_('Length (minutes)'),
help_text=_(
'In order to be displayed in the schedule, the programme must have a start time and a length and '
'must be assigned into a room.'
),
)
# Originally hitpoint2017 rpg form fields
rpg_system = models.CharField(
max_length=512,
blank=True,
default='',
verbose_name=_('RPG system'),
help_text=_('Which rule system is your RPG using?'),
)
approximate_length = models.IntegerField(
blank=True,
null=True,
default=240,
verbose_name=_('approximate length (minutes)'),
help_text=_('Please give your best guess on how long you expect your game to take.'),
)
physical_play = models.CharField(
max_length=max(len(key) for (key, text) in PHYSICAL_PLAY_CHOICES),
default='some',
choices=PHYSICAL_PLAY_CHOICES,
verbose_name=_('Amount of physical play'),
help_text=_(
'In this context, physical play can mean, for example, using your whole body, acting the actions '
'of your character or moving around in the allocated space.'
),
)
is_english_ok = models.BooleanField(
verbose_name=_('English OK'),
help_text=_(
'Please tick this box if you are able, prepared and willing to host your programme in English if '
'necessary.'
),
default=False,
)
is_children_friendly = models.BooleanField(
verbose_name=_('children-friendly'),
help_text=_(
'Please tick this box if your game is suitable for younger players. Please give more details, if '
'necessary, in the last open field.'
),
default=False,
)
is_age_restricted = models.BooleanField(
verbose_name=_('restricted to people of age 18 and over'),
help_text=_(
'Please tick this box if your game contains themes that require it to be restricted to players of '
'18 years and older.'
),
default=False,
)
is_beginner_friendly = models.BooleanField(
verbose_name=_('beginner friendly'),
help_text=_('Please tick this box if your game can be enjoyed even without any prior role-playing experience.'),
default=False,
)
is_intended_for_experienced_participants = models.BooleanField(
verbose_name=_('experienced participants preferred'),
default=False,
)
min_players = models.PositiveIntegerField(
verbose_name=_('minimum number of players'),
help_text=_('How many players must there at least be for the game to take place?'),
default=1,
validators=[MinValueValidator(1), MaxValueValidator(99)],
)
max_players = models.PositiveIntegerField(
verbose_name=_('maximum number of players'),
help_text=_('What is the maximum number of players that can take | |
return
if user == 'ci':
if sa['name'] in ('ci-agent', 'admin'):
if DEFAULT_NAMESPACE == 'default': # real-ci needs access to all namespaces
return
if sa['namespace'] == BATCH_PODS_NAMESPACE:
return
if user == 'test':
if sa['name'] == 'test-batch-sa' and sa['namespace'] == BATCH_PODS_NAMESPACE:
return
raise web.HTTPBadRequest(reason=f'unauthorized service account {(sa["namespace"], sa["name"])} for user {user}')
@routes.post('/api/v1alpha/batches/{batch_id}/jobs/create')
@prom_async_time(REQUEST_TIME_POST_CREATE_JOBS)
@rest_authenticated_users_only
async def create_jobs(request, userdata):
app = request.app
db = app['db']
log_store = app['log_store']
worker_type = app['worker_type']
worker_cores = app['worker_cores']
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
# restrict to what's necessary; in particular, drop the session
# which is sensitive
userdata = {
'username': user,
'gsa_key_secret_name': userdata['gsa_key_secret_name'],
'tokens_secret_name': userdata['tokens_secret_name']
}
async with LoggingTimer(f'batch {batch_id} create jobs') as timer:
async with timer.step('fetch batch'):
record = await db.select_and_fetchone(
'''
SELECT `state`, format_version FROM batches
WHERE user = %s AND id = %s AND NOT deleted;
''',
(user, batch_id))
if not record:
raise web.HTTPNotFound()
if record['state'] != 'open':
raise web.HTTPBadRequest(reason=f'batch {batch_id} is not open')
batch_format_version = BatchFormatVersion(record['format_version'])
async with timer.step('get request json'):
job_specs = await request.json()
async with timer.step('validate job_specs'):
try:
validate_jobs(job_specs)
except ValidationError as e:
raise web.HTTPBadRequest(reason=e.reason)
async with timer.step('build db args'):
spec_writer = SpecWriter(log_store, batch_id)
jobs_args = []
job_parents_args = []
job_attributes_args = []
n_ready_jobs = 0
ready_cores_mcpu = 0
n_ready_cancellable_jobs = 0
ready_cancellable_cores_mcpu = 0
prev_job_idx = None
start_job_id = None
for spec in job_specs:
job_id = spec['job_id']
parent_ids = spec.pop('parent_ids', [])
always_run = spec.pop('always_run', False)
if batch_format_version.has_full_spec_in_gcs():
attributes = spec.pop('attributes', None)
else:
attributes = spec.get('attributes')
id = (batch_id, job_id)
if start_job_id is None:
start_job_id = job_id
if batch_format_version.has_full_spec_in_gcs() and prev_job_idx:
if job_id != prev_job_idx + 1:
raise web.HTTPBadRequest(
reason=f'noncontiguous job ids found in the spec: {prev_job_idx} -> {job_id}')
prev_job_idx = job_id
resources = spec.get('resources')
if not resources:
resources = {}
spec['resources'] = resources
if 'cpu' not in resources:
resources['cpu'] = BATCH_JOB_DEFAULT_CPU
if 'memory' not in resources:
resources['memory'] = BATCH_JOB_DEFAULT_MEMORY
req_cores_mcpu = parse_cpu_in_mcpu(resources['cpu'])
req_memory_bytes = parse_memory_in_bytes(resources['memory'])
if req_cores_mcpu == 0:
raise web.HTTPBadRequest(
reason=f'bad resource request for job {id}: '
f'cpu cannot be 0')
cores_mcpu = adjust_cores_for_memory_request(req_cores_mcpu, req_memory_bytes, worker_type)
cores_mcpu = adjust_cores_for_packability(cores_mcpu)
if cores_mcpu > worker_cores * 1000:
total_memory_available = worker_memory_per_core_gb(worker_type) * worker_cores
raise web.HTTPBadRequest(
reason=f'resource requests for job {id} are unsatisfiable: '
f'requested: cpu={resources["cpu"]}, memory={resources["memory"]} '
f'maximum: cpu={worker_cores}, memory={total_memory_available}G')
secrets = spec.get('secrets')
if not secrets:
secrets = []
if len(secrets) != 0 and user != 'ci':
secrets = [(secret["namespace"], secret["name"]) for secret in secrets]
raise web.HTTPBadRequest(reason=f'unauthorized secret {secrets} for user {user}')
for secret in secrets:
if user != 'ci':
raise web.HTTPBadRequest(reason=f'unauthorized secret {(secret["namespace"], secret["name"])}')
spec['secrets'] = secrets
secrets.append({
'namespace': BATCH_PODS_NAMESPACE,
'name': userdata['gsa_key_secret_name'],
'mount_path': '/gsa-key',
'mount_in_copy': True
})
sa = spec.get('service_account')
check_service_account_permissions(user, sa)
env = spec.get('env')
if not env:
env = []
spec['env'] = env
if len(parent_ids) == 0:
state = 'Ready'
n_ready_jobs += 1
ready_cores_mcpu += cores_mcpu
if not always_run:
n_ready_cancellable_jobs += 1
ready_cancellable_cores_mcpu += cores_mcpu
else:
state = 'Pending'
spec_writer.add(json.dumps(spec))
db_spec = batch_format_version.db_spec(spec)
jobs_args.append(
(batch_id, job_id, state, json.dumps(db_spec),
always_run, cores_mcpu, len(parent_ids)))
for parent_id in parent_ids:
job_parents_args.append(
(batch_id, job_id, parent_id))
if attributes:
for k, v in attributes.items():
job_attributes_args.append(
(batch_id, job_id, k, v))
if batch_format_version.has_full_spec_in_gcs():
async with timer.step('write spec to gcs'):
await spec_writer.write()
rand_token = random.randint(0, app['n_tokens'] - 1)
n_jobs = len(job_specs)
async with timer.step('insert jobs'):
@transaction(db)
async def insert(tx):
try:
await tx.execute_many('''
INSERT INTO jobs (batch_id, job_id, state, spec, always_run, cores_mcpu, n_pending_parents)
VALUES (%s, %s, %s, %s, %s, %s, %s);
''',
jobs_args)
except pymysql.err.IntegrityError as err:
# 1062 ER_DUP_ENTRY https://dev.mysql.com/doc/refman/5.7/en/server-error-reference.html#error_er_dup_entry
if err.args[0] == 1062:
log.info(f'bunch containing job {(batch_id, jobs_args[0][1])} already inserted ({err})')
return
raise
try:
await tx.execute_many('''
INSERT INTO `job_parents` (batch_id, job_id, parent_id)
VALUES (%s, %s, %s);
''',
job_parents_args)
except pymysql.err.IntegrityError as err:
# 1062 ER_DUP_ENTRY https://dev.mysql.com/doc/refman/5.7/en/server-error-reference.html#error_er_dup_entry
if err.args[0] == 1062:
raise web.HTTPBadRequest(
text=f'bunch contains job with duplicated parents ({err})')
raise
await tx.execute_many('''
INSERT INTO `job_attributes` (batch_id, job_id, `key`, `value`)
VALUES (%s, %s, %s, %s);
''',
job_attributes_args)
await tx.execute_update('''
INSERT INTO batches_staging (batch_id, token, n_jobs, n_ready_jobs, ready_cores_mcpu)
VALUES (%s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
n_jobs = n_jobs + %s,
n_ready_jobs = n_ready_jobs + %s,
ready_cores_mcpu = ready_cores_mcpu + %s;
''',
(batch_id, rand_token,
n_jobs, n_ready_jobs, ready_cores_mcpu,
n_jobs, n_ready_jobs, ready_cores_mcpu))
await tx.execute_update('''
INSERT INTO batch_cancellable_resources (batch_id, token, n_ready_cancellable_jobs, ready_cancellable_cores_mcpu)
VALUES (%s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
n_ready_cancellable_jobs = n_ready_cancellable_jobs + %s,
ready_cancellable_cores_mcpu = ready_cancellable_cores_mcpu + %s;
''',
(batch_id, rand_token,
n_ready_cancellable_jobs, ready_cancellable_cores_mcpu,
n_ready_cancellable_jobs, ready_cancellable_cores_mcpu))
if batch_format_version.has_full_spec_in_gcs():
await tx.execute_update('''
INSERT INTO batch_bunches (batch_id, token, start_job_id)
VALUES (%s, %s, %s);
''',
(batch_id, spec_writer.token, start_job_id))
try:
await insert() # pylint: disable=no-value-for-parameter
except aiohttp.web.HTTPException:
raise
except Exception as err:
raise ValueError(f'encountered exception while inserting a bunch'
f'jobs_args={json.dumps(jobs_args)}'
f'job_parents_args={json.dumps(job_parents_args)}') from err
return web.Response()
@routes.post('/api/v1alpha/batches/create')
@prom_async_time(REQUEST_TIME_POST_CREATE_BATCH)
@rest_authenticated_users_only
async def create_batch(request, userdata):
app = request.app
db = app['db']
batch_spec = await request.json()
try:
validate_batch(batch_spec)
except ValidationError as e:
raise web.HTTPBadRequest(reason=e.reason)
user = userdata['username']
# restrict to what's necessary; in particular, drop the session
# which is sensitive
userdata = {
'username': user,
'gsa_key_secret_name': userdata['gsa_key_secret_name'],
'tokens_secret_name': userdata['tokens_secret_name']
}
billing_project = batch_spec['billing_project']
token = batch_spec['token']
attributes = batch_spec.get('attributes')
@transaction(db)
async def insert(tx):
rows = tx.execute_and_fetchall(
'''
SELECT * FROM billing_project_users
WHERE billing_project = %s AND user = %s
LOCK IN SHARE MODE;
''',
(billing_project, user))
rows = [row async for row in rows]
if len(rows) != 1:
assert len(rows) == 0
raise web.HTTPForbidden(reason=f'unknown billing project {billing_project}')
maybe_batch = await tx.execute_and_fetchone(
'''
SELECT * FROM batches
WHERE token = %s AND user = %s FOR UPDATE;
''',
(token, user))
if maybe_batch is not None:
return maybe_batch['id']
now = time_msecs()
id = await tx.execute_insertone(
'''
INSERT INTO batches (userdata, user, billing_project, attributes, callback, n_jobs, time_created, token, state, format_version)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);
''',
(json.dumps(userdata), user, billing_project, json.dumps(attributes),
batch_spec.get('callback'), batch_spec['n_jobs'],
now, token, 'open', BATCH_FORMAT_VERSION))
if attributes:
await tx.execute_many(
'''
INSERT INTO `batch_attributes` (batch_id, `key`, `value`)
VALUES (%s, %s, %s)
''',
[(id, k, v) for k, v in attributes.items()])
return id
id = await insert() # pylint: disable=no-value-for-parameter
return web.json_response({'id': id})
async def _get_batch(app, batch_id, user):
db = app['db']
record = await db.select_and_fetchone('''
SELECT batches.*, SUM(`usage` * rate) AS cost FROM batches
LEFT JOIN aggregated_batch_resources
ON batches.id = aggregated_batch_resources.batch_id
LEFT JOIN resources
ON aggregated_batch_resources.resource = resources.resource
WHERE user = %s AND id = %s AND NOT deleted
GROUP BY batches.id;
''', (user, batch_id))
if not record:
raise web.HTTPNotFound()
return batch_record_to_dict(record)
async def _cancel_batch(app, batch_id, user):
db = app['db']
record = await db.select_and_fetchone(
'''
SELECT `state` FROM batches
WHERE user = %s AND id = %s AND NOT deleted;
''',
(user, batch_id))
if not record:
raise web.HTTPNotFound()
if record['state'] == 'open':
raise web.HTTPBadRequest(reason=f'cannot cancel open batch {batch_id}')
await db.just_execute(
'CALL cancel_batch(%s);', (batch_id,))
app['cancel_batch_state_changed'].set()
return web.Response()
async def _delete_batch(app, batch_id, user):
db = app['db']
record = await db.select_and_fetchone(
'''
SELECT `state` FROM batches
WHERE user = %s AND id = %s AND NOT deleted;
''',
(user, batch_id))
if not record:
raise web.HTTPNotFound()
await db.just_execute(
'CALL cancel_batch(%s);', (batch_id,))
await db.execute_update(
'UPDATE batches SET deleted = 1 WHERE id = %s;', (batch_id,))
if record['state'] == 'running':
app['delete_batch_state_changed'].set()
@routes.get('/api/v1alpha/batches/{batch_id}')
@prom_async_time(REQUEST_TIME_POST_GET_BATCH)
@rest_authenticated_users_only
async def get_batch(request, userdata):
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
return web.json_response(await _get_batch(request.app, batch_id, user))
@routes.patch('/api/v1alpha/batches/{batch_id}/cancel')
@prom_async_time(REQUEST_TIME_PATCH_CANCEL_BATCH)
@rest_authenticated_users_only
async def cancel_batch(request, userdata):
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
await _cancel_batch(request.app, batch_id, user)
return web.Response()
@routes.patch('/api/v1alpha/batches/{batch_id}/close')
@prom_async_time(REQUEST_TIME_PATCH_CLOSE_BATCH)
@rest_authenticated_users_only
async def close_batch(request, userdata):
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
app = request.app
db = app['db']
record = await db.select_and_fetchone(
'''
SELECT 1 FROM batches
WHERE user = %s AND id = %s AND NOT deleted;
''',
(user, batch_id))
if not record:
raise web.HTTPNotFound()
try:
now = time_msecs()
await check_call_procedure(
db, 'CALL close_batch(%s, %s);', (batch_id, now))
except CallError as e:
# 2: wrong number of jobs
if e.rv['rc'] == 2:
expected_n_jobs = e.rv['expected_n_jobs']
actual_n_jobs = e.rv['actual_n_jobs']
raise web.HTTPBadRequest(
reason=f'wrong number of jobs: expected {expected_n_jobs}, actual {actual_n_jobs}')
raise
async with in_cluster_ssl_client_session(
raise_for_status=True, timeout=aiohttp.ClientTimeout(total=60)) as session:
await request_retry_transient_errors(
session, 'PATCH',
deploy_config.url('batch-driver', f'/api/v1alpha/batches/{user}/{batch_id}/close'),
headers=app['driver_headers'])
return web.Response()
@routes.delete('/api/v1alpha/batches/{batch_id}')
@prom_async_time(REQUEST_TIME_DELETE_BATCH)
@rest_authenticated_users_only
async def delete_batch(request, userdata):
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
await _delete_batch(request.app, batch_id, user)
return web.Response()
@routes.get('/batches/{batch_id}')
@prom_async_time(REQUEST_TIME_GET_BATCH_UI)
@web_authenticated_users_only()
async def ui_batch(request, userdata):
app = request.app
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
batch = await _get_batch(app, batch_id, user)
jobs, last_job_id = await _query_batch_jobs(request, batch_id)
for j in jobs:
j['duration'] = humanize_timedelta_msecs(j['duration'])
batch['jobs'] = jobs
page_context = {
'batch': batch,
'q': request.query.get('q'),
'last_job_id': last_job_id
}
return await render_template('batch', request, userdata, 'batch.html', page_context)
@routes.post('/batches/{batch_id}/cancel')
@prom_async_time(REQUEST_TIME_POST_CANCEL_BATCH_UI)
@check_csrf_token
@web_authenticated_users_only(redirect=False)
async def ui_cancel_batch(request, userdata):
batch_id = int(request.match_info['batch_id'])
user = userdata['username']
await _cancel_batch(request.app, batch_id, user)
session = | |
-2.14805401e-05, 9.05879471e-06],
[1.15765605e-05, -2.20007656e-07, -1.00689171e-05, -7.85316340e-06],
[1.76295477e-06, -4.68035973e-07, 6.34634343e-06, -9.26903305e-06],
[9.56906212e-07, -2.83017535e-06, 1.68342294e-05,
-5.69798533e-06]]]]) * units('s^-1')
assert_array_almost_equal(div.data, truth, 12)
def test_shearing_deformation_4d(data_4d):
"""Test shearing_deformation on a 4D (time, pressure, y, x) grid."""
shdef = shearing_deformation(data_4d.u, data_4d.v)
truth = np.array([[[[-2.33792381e-05, 3.44534094e-06, 2.69410760e-05, 1.06867281e-05],
[-6.40972431e-05, 1.01579031e-05, 1.73678734e-05, -2.40319045e-05],
[7.70545354e-07, -1.87702202e-05, -1.39302341e-05, 3.73230852e-05],
[6.35849225e-05, -1.08009221e-04, -9.62510298e-05, 7.32297192e-05]],
[[-2.42502310e-05, -1.01193319e-05, 5.54828905e-05, -3.31928326e-07],
[-2.69305297e-06, 9.32833730e-06, 2.04600718e-05, 3.36248400e-05],
[-7.24755760e-06, 1.72909996e-05, -5.48615182e-06, -1.30784063e-05],
[-2.51475614e-05, 9.22553765e-06, -2.17297542e-06, -5.34977173e-05]],
[[-2.58416628e-05, 1.01393773e-05, 4.54141476e-05, 6.20366322e-07],
[-1.56077459e-05, 6.20125807e-06, 2.36797141e-05, 2.53616873e-05],
[-2.71240538e-06, 1.14475474e-05, 8.05450723e-06, 3.07240065e-05],
[1.16656764e-05, 2.71686080e-05, -1.88326452e-06, 1.03921795e-05]]],
[[[5.29600994e-06, 1.04331961e-05, -1.72892524e-05, 3.67655639e-05],
[-3.67904320e-05, 8.07030650e-06, 3.05173020e-06, -2.40356283e-05],
[3.03845109e-08, -2.56843275e-07, 1.17465234e-06, 3.08089412e-05],
[1.79034632e-05, -3.12752861e-05, -5.30138255e-05, 6.33453564e-05]],
[[-2.54496668e-05, -1.88685727e-05, 7.59573914e-06, 7.85469836e-06],
[-1.58734272e-05, 8.90875832e-06, 1.95355336e-05, 6.33953947e-06],
[2.90313838e-06, 1.03222777e-05, 1.50063775e-05, 1.13348820e-05],
[-6.20995986e-06, 5.06623932e-06, 3.72239179e-06, -4.41896630e-05]],
[[-1.97608457e-05, 7.98531569e-06, 1.94218554e-05, 1.18509048e-05],
[-1.81300845e-05, 7.12699895e-06, 1.59034980e-05, -7.08850441e-06],
[1.04965562e-05, 3.47535804e-06, 7.24254745e-06, 4.15824912e-05],
[1.29997134e-05, 7.21430847e-06, -1.45932750e-05, 5.00959463e-05]]],
[[[-8.37024044e-06, 2.79795154e-06, -2.39099649e-05, 1.76221280e-05],
[-1.88550094e-05, 3.33869412e-06, 1.34953970e-05, 1.25143854e-05],
[5.96277806e-07, 1.86196124e-05, 1.68723536e-05, 9.74312685e-06],
[6.20326426e-06, 2.93197852e-05, -1.42931965e-05, 2.19484546e-05]],
[[-1.00299098e-05, -4.57260229e-05, 8.56211376e-06, 3.45779631e-05],
[-1.65491061e-05, -4.63468810e-06, 6.71584791e-06, 1.76493950e-06],
[-4.22030685e-06, 1.50431608e-05, 1.81194219e-05, 5.45811766e-06],
[-2.07574370e-06, 1.80633930e-05, 4.39555860e-05, 5.90590854e-06]],
[[-2.08496392e-05, -3.02898043e-05, -3.80429538e-06, 2.71317584e-05],
[-4.80062637e-06, 1.25396267e-06, 6.85529455e-06, 5.70834171e-06],
[5.72435226e-06, 1.05827268e-05, 1.53717763e-05, 1.55950591e-05],
[1.23403264e-05, -1.98341401e-06, 1.56203357e-05,
3.90722041e-05]]]]) * units('s^-1')
assert_array_almost_equal(shdef.data, truth, 12)
def test_stretching_deformation_4d(data_4d):
"""Test stretching_deformation on a 4D (time, pressure, y, x) grid."""
stdef = stretching_deformation(data_4d.u, data_4d.v)
truth = np.array([[[[3.47898088e-05, 2.24845986e-05, -5.97367530e-06, -2.81027927e-05],
[-1.00316265e-05, 2.43890252e-05, 5.13005043e-06, 3.02139765e-05],
[-5.95303373e-05, 4.11805509e-06, 3.94239079e-05, 5.53801191e-05],
[8.92024896e-05, 1.85881092e-05, 3.59490328e-05, -1.03321407e-04]],
[[3.00039817e-06, 1.37094723e-05, -4.34319088e-05, 1.79539749e-05],
[1.87324184e-05, 5.47148050e-06, -9.06983993e-06, 8.15734277e-06],
[1.21798873e-07, 1.26405968e-05, 2.72019585e-05, -3.63162743e-06],
[-1.36470926e-05, 1.87727600e-05, 5.84790724e-05, 5.03903728e-05]],
[[2.89291086e-05, 3.31866090e-05, 1.58458533e-05, 5.68409251e-06],
[1.68472637e-05, 1.52157851e-05, 5.27310978e-06, 1.21993291e-05],
[8.59225306e-06, 7.71174035e-06, -4.82506223e-06, -1.57536424e-05],
[-5.84283826e-06, 8.50599727e-06, -3.27143224e-07, -3.93117456e-05]]],
[[[3.69837694e-05, 1.86562509e-05, -2.79203000e-06, -3.51399535e-05],
[-6.42858314e-06, 2.70027422e-05, 6.97334875e-06, 5.92098244e-06],
[-4.01668004e-05, 5.04173347e-06, 4.75334876e-05, 6.25555261e-05],
[3.66252634e-05, 2.71352154e-06, 7.09783382e-05, -5.79312118e-05]],
[[-5.31921974e-06, -1.04758793e-06, 2.58686924e-05, 7.08365906e-06],
[1.26562011e-05, 1.35206063e-05, -2.74715944e-06, 4.32091552e-06],
[8.54170666e-06, 1.49581427e-05, 6.31110194e-06, 9.12961275e-06],
[2.67785986e-06, 5.37083849e-06, 5.47744998e-05, 4.07259321e-05]],
[[1.73537008e-05, 5.99605247e-06, 4.13461116e-05, -2.90256397e-05],
[4.24395934e-07, 1.02937398e-05, 5.17452359e-06, 7.09934306e-06],
[5.34248818e-06, 6.67495925e-06, -7.90440717e-06, 1.03908310e-05],
[1.46185421e-05, 1.65031056e-07, 4.47900388e-06, -4.46075180e-05]]],
[[[3.02321534e-05, 2.69257238e-05, -4.63180943e-06, 3.00627122e-06],
[-2.01256850e-06, 2.88914919e-05, 1.15236589e-05, -3.75586415e-06],
[-1.41791143e-05, 1.61351154e-05, 3.08316570e-05, 5.12686237e-05],
[-4.95427192e-06, 1.96269721e-05, 4.92464559e-05, 6.43446270e-05]],
[[-1.86155399e-05, -1.13423401e-05, 2.94399620e-05, -8.00532458e-06],
[1.63327091e-05, 8.39898448e-06, 7.11857042e-06, 7.32055442e-06],
[9.11199258e-06, 1.67214834e-05, 5.42904828e-06, 1.03069722e-05],
[2.62789752e-06, 5.48570575e-06, 1.29250179e-05, 3.39387353e-05]],
[[-4.08093319e-06, 1.03359478e-05, 2.30342884e-05, -2.51141968e-05],
[8.42904887e-06, 9.22253152e-06, 6.56793595e-06, -9.65174212e-06],
[6.70904325e-06, 9.42414527e-06, -1.74726096e-06, 4.66995059e-06],
[1.75937571e-05, 1.24577364e-05, -1.28423144e-05,
7.34171029e-06]]]]) * units('s^-1')
assert_array_almost_equal(stdef.data, truth, 10)
def test_total_deformation_4d(data_4d):
"""Test total_deformation on a 4D (time, pressure, y, x) grid."""
totdef = total_deformation(data_4d.u, data_4d.v)
truth = np.array([[[[4.19156244e-05, 2.27470339e-05, 2.75954049e-05, 3.00661456e-05],
[6.48775008e-05, 2.64198324e-05, 1.81096782e-05, 3.86059168e-05],
[5.95353239e-05, 1.92166476e-05, 4.18126289e-05, 6.67830089e-05],
[1.09545089e-04, 1.09597033e-04, 1.02745286e-04, 1.26640850e-04]],
[[2.44351405e-05, 1.70396746e-05, 7.04604984e-05, 1.79570429e-05],
[1.89250108e-05, 1.08145724e-05, 2.23802711e-05, 3.46001750e-05],
[7.24858097e-06, 2.14187617e-05, 2.77496740e-05, 1.35732616e-05],
[2.86119377e-05, 2.09171476e-05, 5.85194303e-05, 7.34928257e-05]],
[[3.87902676e-05, 3.47009797e-05, 4.80992294e-05, 5.71784592e-06],
[2.29658884e-05, 1.64309378e-05, 2.42597310e-05, 2.81431841e-05],
[9.01021396e-06, 1.38027998e-05, 9.38915929e-06, 3.45274069e-05],
[1.30470980e-05, 2.84690226e-05, 1.91146748e-06, 4.06621536e-05]]],
[[[3.73610348e-05, 2.13753895e-05, 1.75132430e-05, 5.08578708e-05],
[3.73478589e-05, 2.81829369e-05, 7.61187559e-06, 2.47541807e-05],
[4.01668119e-05, 5.04827148e-06, 4.75479995e-05, 6.97308017e-05],
[4.07669464e-05, 3.13927813e-05, 8.85911406e-05, 8.58408963e-05]],
[[2.59996085e-05, 1.88976315e-05, 2.69607956e-05, 1.05770748e-05],
[2.03013575e-05, 1.61917500e-05, 1.97277459e-05, 7.67203178e-06],
[9.02158329e-06, 1.81740323e-05, 1.62794771e-05, 1.45543595e-05],
[6.76273132e-06, 7.38327075e-06, 5.49008382e-05, 6.00943247e-05]],
[[2.62990866e-05, 9.98588563e-06, 4.56805146e-05, 3.13517416e-05],
[1.81350510e-05, 1.25201914e-05, 1.67241425e-05, 1.00323261e-05],
[1.17779401e-05, 7.52550294e-06, 1.07207344e-05, 4.28610889e-05],
[1.95625745e-05, 7.21619581e-06, 1.52651613e-05, 6.70778242e-05]]],
[[[3.13694760e-05, 2.70707062e-05, 2.43544673e-05, 1.78767184e-05],
[1.89621152e-05, 2.90837615e-05, 1.77459983e-05, 1.30658470e-05],
[1.41916465e-05, 2.46380177e-05, 3.51463709e-05, 5.21862079e-05],
[7.93884738e-06, 3.52826847e-05, 5.12787372e-05, 6.79850401e-05]],
[[2.11456240e-05, 4.71117592e-05, 3.06597644e-05, 3.54925451e-05],
[2.32514580e-05, 9.59287621e-06, 9.78655496e-06, 7.53030733e-06],
[1.00418822e-05, 2.24923252e-05, 1.89152853e-05, 1.16629638e-05],
[3.34881431e-06, 1.88780066e-05, 4.58164777e-05, 3.44487664e-05]],
[[2.12452693e-05, 3.20047506e-05, 2.33463296e-05, 3.69710047e-05],
[9.70025146e-06, 9.30739007e-06, 9.49383200e-06, 1.12134424e-05],
[8.81926698e-06, 1.41706959e-05, 1.54707604e-05, 1.62792600e-05],
[2.14900894e-05, 1.26146394e-05, 2.02217686e-05,
3.97559787e-05]]]]) * units('s^-1')
assert_array_almost_equal(totdef.data, truth, 12)
def test_frontogenesis_4d(data_4d):
"""Test frontogenesis on a 4D (time, pressure, y, x) grid."""
thta = potential_temperature(data_4d.pressure, data_4d.temperature)
frnt = frontogenesis(thta, data_4d.u, data_4d.v).transpose(
'time1',
'pressure',
'latitude',
'longitude'
)
truth = np.array([[[[4.23682388e-10, -6.60428594e-12, -2.16700227e-10, -3.80960666e-10],
[-5.28427593e-10, -7.11496293e-12, -4.77951513e-11, 2.94985981e-10],
[7.86953679e-10, 3.54196972e-10, 2.07842740e-11, -5.25487973e-10],
[-3.52111258e-10, 2.06421077e-10, 1.67986422e-09, -1.45950592e-09]],
[[-7.31728965e-11, 1.06892315e-10, -1.33453527e-10, 3.42647921e-10],
[-5.05805666e-11, 2.12238918e-11, -4.71306612e-11, 9.62250022e-11],
[4.76933273e-11, 6.94586917e-11, 3.53139630e-10, -7.14834221e-11],
[6.14587969e-10, 1.41091788e-10, 8.42714362e-10, 1.36031856e-09]],
[[2.05113794e-11, 3.21339794e-10, 5.56947831e-10, 1.43142115e-10],
[9.85782985e-11, 1.06721561e-10, 5.73106405e-11, -5.03368922e-12],
[-6.43122987e-11, -2.12772736e-11, -1.17352480e-11, 2.13297934e-10],
[-6.97155996e-11, -4.10739462e-11, -1.75156002e-10, -1.76167917e-10]]],
[[[1.74719456e-10, -1.35620544e-11, -5.23975776e-11, -3.77740716e-10],
[-1.89498320e-10, -2.40570704e-11, 1.09765802e-11, 3.26582884e-10],
[5.05760395e-10, 5.96930313e-11, 2.51806496e-10, 2.62326483e-10],
[8.55597272e-10, -1.03839677e-10, 1.36437001e-09, -2.55279252e-11]],
[[-4.68143046e-11, -4.29566800e-11, 1.37326379e-10, 2.00212822e-10],
[-7.60292021e-11, 3.13481943e-11, 2.02636812e-11, 7.07310188e-11],
[2.07073318e-11, 9.74536122e-11, 3.64495220e-11, 9.11599007e-11],
[1.07707226e-10, 4.27961436e-12, 7.17400120e-10, -4.07742791e-10]],
[[3.51033086e-11, 6.86914537e-12, 7.68630167e-10, 1.73824937e-10],
[8.63644951e-11, 6.43950959e-11, 6.01335884e-11, -3.49684748e-11],
[-8.06772168e-11, 3.34221310e-11, -6.70871076e-11, 2.13933933e-10],
[2.77857293e-12, -1.19419804e-10, -3.88340891e-11, 2.35051688e-10]]],
[[[-1.06920260e-10, 1.42163009e-10, -1.67670634e-10, 7.77738130e-12],
[-2.14431980e-11, -1.40383248e-11, 5.12326588e-11, 4.47136472e-11],
[9.29690678e-11, -1.91237280e-11, 5.11911088e-11, 3.57423744e-10],
[1.48172065e-09, -6.47936247e-11, -2.02021163e-10, 3.76309534e-10]],
[[1.40697485e-10, -3.68197137e-10, 2.35522920e-10, 1.53804948e-10],
[-2.61409796e-10, 3.88149869e-11, 9.17155132e-11, 3.56335985e-11],
[6.05095218e-12, 8.10937994e-11, 2.38586262e-11, 1.57114763e-10],
[5.98536934e-11, 1.42709122e-11, 2.20296991e-10, 6.13222348e-12]],
[[5.77582222e-11, 1.50846336e-10, 9.79419525e-11, 1.38512768e-10],
[-5.73091526e-11, 1.59416672e-11, 8.32303219e-11, 1.08035832e-10],
[-5.84859130e-11, 7.43545248e-13, 9.37957614e-12, 1.74102020e-10],
[-2.38469755e-11, 1.01414977e-10, 4.18826651e-12,
5.18914848e-10]]]]) * units('K/m/s')
assert_array_almost_equal(frnt.data, truth, 13)
def test_geostrophic_wind_4d(data_4d):
"""Test geostrophic_wind on a 4D (time, pressure, y, x) grid."""
u_g, v_g = geostrophic_wind(data_4d.height)
u_g_truth = np.array([[[[4.4048682, 12.51692258, 20.6372888, 3.17769076],
[14.10194272, 17.12263389, 22.04954728, 28.25627227],
[24.44520364, 22.83658626, 31.70185292, 41.43474924],
[35.55078527, 29.81195711, 50.61167797, 41.34530902]],
[[7.35972965, 11.1508039, 15.35393025, 8.90224418],
[8.36112058, 12.51333565, 13.38382857, 14.31961908],
[10.36996705, 13.0359012, 16.55131816, 20.5818523, ],
[13.51358869, 12.61987535, 25.47981594, 27.81300202]],
[[5.75323442, 8.87025383, 12.11513202, 6.9569899],
[5.63036347, 9.22723021, 9.46050042, 9.6346362],
[5.15111673, 8.92136198, 10.13229278, 10.02026762],
[4.27093343, 7.87208428, 14.5287988, 7.84193975]]],
[[[2.56374289, 12.12175071, 18.88903041, 9.31429628],
[11.13363838, 16.0692652, 22.88529273, 23.22479772],
[21.17380408, 18.19154086, 27.4544941, 37.89230504],
[32.89749307, 18.27860521, 32.68137119, 53.46237373]],
[[5.88868673, 10.23886093, 13.99207011, 7.62863328],
[7.72562462, 12.48283865, 13.87130247, 12.9747224],
[9.38948486, 12.47560991, 15.29521325, 18.71570391],
[10.86569379, 9.94843902, 18.45258217, 24.92010393]],
[[5.37666159, 9.31750301, 9.01145261, 3.6887154],
[5.42142711, 8.93123924, 9.34560535, 9.00788023],
[4.9486882, 8.34297898, 9.29367604, 11.09021549],
[3.89472979, 7.52596773, 8.80903347, 9.55782342]]],
[[[4.07701203, 9.91100477, 14.63521206, 11.44931207],
[9.21849021, 15.39896866, 20.84826281, 20.3521286],
[17.27879226, 16.28474129, 23.22522698, 32.4339051],
[28.63614846, 12.02289896, 21.31740279, 48.11881204]],
[[4.67797906, 7.67496412, 7.67070558, 7.4354085],
[6.3676578, 10.5938839, 12.09551605, 11.52096098],
[7.77187678, 11.17427574, 14.91109545, 16.17177845],
[8.86174332, 9.13936002, 15.93605997, 21.47254661]],
[[4.06859757, 6.49637507, 4.98325985, 5.1109647],
[4.19923572, 6.75503352, 8.50297947, 8.50993959],
[3.85339539, 6.92959206, 9.81419868, 10.5154729],
[2.97279544, 7.01038155, 8.65854052, 10.9689316]]]]) * units('m/s')
v_g_truth = np.array([[[[-2.34997753e+01, -1.94136235e+01, -7.45077637e+00,
1.23887662e+01],
[-2.05898579e+01, -1.59712848e+01, -7.24733971e+00,
5.58197747e+00],
[-2.13032949e+01, -1.50665793e+01, -1.26486198e+00,
2.01018571e+01],
[-2.83372497e+01, -1.22624731e+01, 2.75609237e+00,
1.67184466e+01]],
[[-2.12169685e+01, -1.57511747e+01, -7.18451047e+00,
4.48302414e+00],
[-1.85734872e+01, -1.39016674e+01, -7.25703167e+00,
1.36042011e+00],
[-1.48452478e+01, -1.30209105e+01, -6.21005126e+00,
5.58732988e+00],
[-1.64113345e+01, -1.07468232e+01, -3.26209862e+00,
6.04283912e+00]],
[[-1.84240576e+01, -1.51861981e+01, -8.32705150e+00,
2.15338222e+00],
[-1.60768326e+01, -1.37375247e+01, -8.54578152e+00,
-5.01603207e-01],
[-1.26137008e+01, -1.31196694e+01, -8.13994713e+00,
2.32546588e+00],
[-1.08239460e+01, -1.12327091e+01, -8.07473534e+00,
-1.35002468e+00]]],
[[[-2.47825558e+01, -2.06675642e+01, -7.55733001e+00,
1.45481469e+01],
[-2.05171683e+01, -1.66829347e+01, -6.96656838e+00,
8.63193062e+00],
[-2.04375067e+01, -1.42006723e+01, -3.59516781e+00,
1.13790069e+01],
[-3.07199620e+01, -1.35152096e+01, 3.64042638e+00,
2.07469460e+01]],
[[-2.20738890e+01, -1.61045805e+01, -6.81898954e+00,
5.78288395e+00],
[-1.89437910e+01, -1.40832144e+01, -7.12633797e+00,
1.92683830e+00],
[-1.49814792e+01, -1.27484476e+01, -6.57732385e+00,
3.53189205e+00],
[-1.57235558e+01, -1.10808922e+01, -3.83938054e+00,
6.00097928e+00]],
[[-1.89953281e+01, -1.49402619e+01, -8.35222723e+00,
7.68775922e-01],
[-1.58424970e+01, -1.38711585e+01, -9.15189832e+00,
-1.68471661e+00],
[-1.34349198e+01, -1.28199780e+01, -8.35009927e+00,
-2.52835808e-02],
[-1.10578184e+01, -1.17141722e+01, -7.79372570e+00,
7.03521108e-01]]],
[[[-2.88009221e+01, -2.08127679e+01, -7.41206720e+00,
1.14011801e+01],
[-2.51405873e+01, -1.76754149e+01, -6.50182713e+00,
8.38017608e+00],
[-2.16245136e+01, -1.44146994e+01, -4.68003089e+00,
7.57949195e+00],
[-3.09065921e+01, -1.47040769e+01, 2.18126927e+00,
1.97494465e+01]],
[[-2.14639093e+01, -1.55526942e+01, -7.21598014e+00,
3.54623269e+00],
[-1.86145303e+01, -1.43252474e+01, -7.12149199e+00,
2.99673603e+00],
[-1.53220281e+01, -1.24273773e+01, -6.73303389e+00,
1.76100214e+00],
[-1.53722451e+01, -1.06559370e+01, -4.50997751e+00,
3.06563326e+00]],
[[-1.62551769e+01, -1.41559875e+01, -9.23139816e+00,
-1.48140877e+00],
[-1.41654778e+01, -1.34257568e+01, -9.18676573e+00,
-1.44850466e+00],
[-1.30262107e+01, -1.18197548e+01, -8.29562748e+00,
-2.45382867e+00],
[-1.09261218e+01, -1.03837731e+01, -7.37319328e+00,
-1.89438246e+00]]]]) * units('m/s')
assert_array_almost_equal(u_g.data, u_g_truth, 4)
assert_array_almost_equal(v_g.data, v_g_truth, 4)
def test_inertial_advective_wind_4d(data_4d):
"""Test inertial_advective_wind on a 4D (time, pressure, y, x) grid."""
u_g, v_g = geostrophic_wind(data_4d.height)
u_i, v_i = inertial_advective_wind(u_g, v_g, u_g, v_g)
u_i_truth = np.array([[[[-4.77165787, -6.39928757, -7.24239774, -11.14139847],
[-1.8967587, -4.36028755, -6.86016435, -9.424228],
[2.31421679, -6.96263439, -14.11859275, -20.68976199],
[-0.92900951, -13.81722973, -17.96832023, -23.80435234]],
[[-2.62194257, -3.50676725, -3.63961746, -4.21059159],
[-3.38684408, -2.58995365, -2.67792148, -3.36122749],
[-0.56740802, -2.34244481, -4.39126012, -6.69284736],
[1.70715454, -3.60961021, -5.96780511, -7.53107716]],
[[-1.61558735, -2.31867093, -2.40316115, -2.60870259],
[-2.19984407, -1.48762908, -1.58089856, -2.2541336],
[-1.11136338, -1.25207315, -2.02918744, -3.32828099],
[-0.26028196, -1.62956357, -1.75756959, -1.22270124]]],
[[[-6.72938857, -6.77202159, -7.95073037, -12.50625533],
[-2.22377841, -5.0815521, -7.76259189, -11.23523285],
[2.67551814, -4.83617581, -9.58820051, -12.95106032],
[8.58739912, -7.72793742, -12.42304341, -10.25891257]],
[[-3.19431927, -3.55990592, -3.56474965, -4.31772693],
[-3.70858471, -2.86947801, -2.77907873, -3.331319],
[-1.17292465, -2.182095, -3.58631575, -5.27553824],
[1.4236791, -2.45544962, -4.65344893, -6.11853894]],
[[-3.24030343, -1.91423726, -1.1742268, -1.09439772],
[-2.03479751, -1.39015234, -1.40603089, -1.93610702],
[-1.31981448, -1.16318518, -1.73599486, -2.82161648],
[-0.96540565, -0.94432034, -1.53211138, -2.57328907]]],
[[[-5.13892702, -5.35990209, -5.96305829, -8.10039371],
[-5.28049715, -6.05189422, -7.09840362, -9.11834812],
[0.32358269, -4.40891596, -7.27141143, -8.89305721],
[11.86892255, -3.52631413, -8.21707342, -3.9149252]],
[[-2.95997348, -1.94436814, -1.79187921, -2.22918106],
[-2.98223302, -2.49621136, -2.66214712, -3.41052605],
[-1.43265094, -2.2408268, -3.02891598, -3.9658998],
[0.38112998, -2.11641585, -3.417963, -4.08044633]],
[[-1.85590971, -0.74052267, -0.62971895, -1.19099569],
[-0.91035149, -1.11111857, -1.44768616, -1.96172425],
[-0.97667565, -1.23489465, -1.48658447, -1.80074616],
[-1.30083552, -0.98479841, -1.25235639,
-1.96633294]]]]) * units('m/s')
v_i_truth = np.array([[[[1.03230312e+01, 5.87882109e+00, -3.24343027e+00, -1.88483470e+01],
[9.87647721e+00, 5.33706213e+00, 4.80929670e+00, 3.63063183e-02],
[6.37603821e+00, 6.45974507e+00, 8.14449487e+00, 4.38722620e+00],
[-1.31406689e+00, 1.00969188e+01, 4.19901525e+00,
-1.97739544e+01]],
[[1.10383561e+00, 2.30354462e+00, -1.82374723e+00, -3.54809094e+00],
[2.43631993e+00, 1.35723724e+00, 4.91193534e-01, -1.02997771e-02],
[2.33864366e+00, 1.03130947e+00, 3.27949769e+00, 4.52250225e-01],
[2.90865168e-01, 1.43496262e+00, 6.69604741e+00, -4.27768358e+00]],
[[4.77255548e-01, 1.14453826e+00, -1.82710412e+00, -1.96018490e+00],
[5.18797941e-01, 4.51757453e-01, -3.28462782e-01, 6.84789970e-02],
[2.50176678e-01, 1.41538500e-01, 1.08853845e+00, -9.62071225e-02],
[-3.39224824e-01, 2.45760327e-01, 2.41856776e+00,
-2.84808630e+00]]],
[[[9.01508187e+00, 6.74751069e+00, 5.47135566e-01, -1.25176087e+01],
[9.57125782e+00, 4.57776586e+00, 3.34524473e+00, -7.13601695e+00],
[5.46543202e+00, 2.13979774e+00, 7.51931363e+00, 2.43567533e+00],
[-5.48910344e+00, -6.52697336e-01, 1.34309575e+01,
1.61565561e+01]],
[[2.49548039e+00, 3.34982501e+00, -7.11777553e-01, -3.42687086e+00],
[2.70007988e+00, 1.64584666e+00, 2.90292095e-01, -1.12712093e+00],
[1.83356146e+00, 1.69401994e-01, 1.87788933e+00, 7.55385123e-01],
[-4.89203395e-01, -1.06751808e+00, 4.20107093e+00,
1.54893157e+00]],
[[1.05193589e+00, 2.35318468e-01, -4.37301952e-01, -9.41622628e-01],
[5.26337352e-01, 1.32572812e-01, 6.61575719e-02, 1.18009862e-01],
[9.40801497e-02, 3.45333939e-02, 2.13427873e-01, 6.10855423e-01],
[-2.44339907e-01, -6.01035575e-02, -3.78806842e-02,
2.28008249e-01]]],
[[[5.18811867e+00, | |
import datetime
import pickle
import time
import numpy as np
import tensorflow as tf
def stack_data(data, num_shifts, len_time):
"""Stack data from a 2D array into a 3D array.
Arguments:
data -- 2D data array to be reshaped
num_shifts -- number of shifts (time steps) that losses will use (maximum is len_time - 1)
len_time -- number of time steps in each trajectory in data
Returns:
data_tensor -- data reshaped into 3D array, shape: num_shifts + 1, num_traj * (len_time - num_shifts), n
Side effects:
None
"""
nd = data.ndim
if nd > 1:
n = data.shape[1]
else:
data = (np.asmatrix(data)).getT()
n = 1
num_traj = int(data.shape[0] / len_time)
new_len_time = len_time - num_shifts
data_tensor = np.zeros([num_shifts + 1, num_traj * new_len_time, n])
for j in np.arange(num_shifts + 1):
for count in np.arange(num_traj):
data_tensor_range = np.arange(count * new_len_time, new_len_time + count * new_len_time)
data_tensor[j, data_tensor_range, :] = data[count * len_time + j: count * len_time + j + new_len_time, :]
return data_tensor
def choose_optimizer(params, regularized_loss, trainable_var):
"""Choose which optimizer to use for the network training.
Arguments:
params -- dictionary of parameters for experiment
regularized_loss -- loss, including regularization
trainable_var -- list of trainable TensorFlow variables
Returns:
optimizer -- optimizer from TensorFlow Class optimizer
Side effects:
None
Raises ValueError if params['opt_alg'] is not 'adam', 'adadelta', 'adagrad', 'adagradDA', 'ftrl', 'proximalGD',
'proximalAdagrad', or 'RMS'
"""
if params['opt_alg'] == 'adam':
optimizer = tf.train.AdamOptimizer(params['learning_rate']).minimize(regularized_loss, var_list=trainable_var)
elif params['opt_alg'] == 'adadelta':
if params['decay_rate'] > 0:
optimizer = tf.train.AdadeltaOptimizer(params['learning_rate'], params['decay_rate']).minimize(
regularized_loss,
var_list=trainable_var)
else:
# defaults 0.001, 0.95
optimizer = tf.train.AdadeltaOptimizer(params['learning_rate']).minimize(regularized_loss,
var_list=trainable_var)
elif params['opt_alg'] == 'adagrad':
# also has initial_accumulator_value parameter
optimizer = tf.train.AdagradOptimizer(params['learning_rate']).minimize(regularized_loss,
var_list=trainable_var)
elif params['opt_alg'] == 'adagradDA':
# Be careful when using AdagradDA for deep networks as it will require careful initialization of the gradient
# accumulators for it to train.
optimizer = tf.train.AdagradDAOptimizer(params['learning_rate'], tf.get_global_step()).minimize(
regularized_loss,
var_list=trainable_var)
elif params['opt_alg'] == 'ftrl':
# lots of hyperparameters: learning_rate_power, initial_accumulator_value,
# l1_regularization_strength, l2_regularization_strength
optimizer = tf.train.FtrlOptimizer(params['learning_rate']).minimize(regularized_loss, var_list=trainable_var)
elif params['opt_alg'] == 'proximalGD':
# can have built-in reg.
optimizer = tf.train.ProximalGradientDescentOptimizer(params['learning_rate']).minimize(regularized_loss,
var_list=trainable_var)
elif params['opt_alg'] == 'proximalAdagrad':
# initial_accumulator_value, reg.
optimizer = tf.train.ProximalAdagradOptimizer(params['learning_rate']).minimize(regularized_loss,
var_list=trainable_var)
elif params['opt_alg'] == 'RMS':
# momentum, epsilon, centered (False/True)
if params['decay_rate'] > 0:
optimizer = tf.train.RMSPropOptimizer(params['learning_rate'], params['decay_rate']).minimize(
regularized_loss,
var_list=trainable_var)
else:
# default decay_rate 0.9
optimizer = tf.train.RMSPropOptimizer(params['learning_rate']).minimize(regularized_loss,
var_list=trainable_var)
else:
raise ValueError("chose invalid opt_alg %s in params dict" % params['opt_alg'])
return optimizer
def check_progress(start, best_error, params):
"""Check on the progress of the network training and decide if it's time to stop.
Arguments:
start -- time that experiment started
best_error -- best error so far in training
params -- dictionary of parameters for experiment
Returns:
finished -- 0 if should continue training, 1 if should stop training
save_now -- 0 if don't need to save results, 1 if should save results
Side effects:
May update params dict: stop_condition, been5min, been20min, been40min, been1hr, been2hr, been3hr, been4hr,
beenHalf
"""
finished = 0
save_now = 0
current_time = time.time()
if not params['been5min']:
# only check 5 min progress once
if current_time - start > 5 * 60:
if best_error > params['min_5min']:
print("too slowly improving in first five minutes: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first 5 min'
finished = 1
return finished, save_now
else:
print("been 5 minutes, err = %.15f < %.15f" % (best_error, params['min_5min']))
params['been5min'] = best_error
if not params['been20min']:
# only check 20 min progress once
if current_time - start > 20 * 60:
if best_error > params['min_20min']:
print("too slowly improving in first 20 minutes: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first 20 min'
finished = 1
return finished, save_now
else:
print("been 20 minutes, err = %.15f < %.15f" % (best_error, params['min_20min']))
params['been20min'] = best_error
if not params['been40min']:
# only check 40 min progress once
if current_time - start > 40 * 60:
if best_error > params['min_40min']:
print("too slowly improving in first 40 minutes: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first 40 min'
finished = 1
return finished, save_now
else:
print("been 40 minutes, err = %.15f < %.15f" % (best_error, params['min_40min']))
params['been40min'] = best_error
if not params['been1hr']:
# only check 1 hr progress once
if current_time - start > 60 * 60:
if best_error > params['min_1hr']:
print("too slowly improving in first hour: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first hour'
finished = 1
return finished, save_now
else:
print("been 1 hour, err = %.15f < %.15f" % (best_error, params['min_1hr']))
save_now = 1
params['been1hr'] = best_error
if not params['been2hr']:
# only check 2 hr progress once
if current_time - start > 2 * 60 * 60:
if best_error > params['min_2hr']:
print("too slowly improving in first two hours: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first two hours'
finished = 1
return finished, save_now
else:
print("been 2 hours, err = %.15f < %.15f" % (best_error, params['min_2hr']))
save_now = 1
params['been2hr'] = best_error
if not params['been3hr']:
# only check 3 hr progress once
if current_time - start > 3 * 60 * 60:
if best_error > params['min_3hr']:
print("too slowly improving in first three hours: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first three hours'
finished = 1
return finished, save_now
else:
print("been 3 hours, err = %.15f < %.15f" % (best_error, params['min_3hr']))
save_now = 1
params['been3hr'] = best_error
if not params['been4hr']:
# only check 4 hr progress once
if current_time - start > 4 * 60 * 60:
if best_error > params['min_4hr']:
print("too slowly improving in first four hours: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first four hours'
finished = 1
return finished, save_now
else:
print("been 4 hours, err = %.15f < %.15f" % (best_error, params['min_4hr']))
save_now = 1
params['been4hr'] = best_error
if not params['beenHalf']:
# only check halfway progress once
if current_time - start > params['max_time'] / 2:
if best_error > params['min_halfway']:
print("too slowly improving 1/2 of way in: val err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving halfway in'
finished = 1
return finished, save_now
else:
print("Halfway through time, err = %.15f < %.15f" % (best_error, params['min_halfway']))
params['beenHalf'] = best_error
if current_time - start > params['max_time']:
params['stop_condition'] = 'past max time'
finished = 1
return finished, save_now
return finished, save_now
def save_files(sess, csv_path, train_val_error, params, weights, biases):
"""Save error files, weights, biases, and parameters.
Arguments:
sess -- TensorFlow session
csv_path -- string for path to save error file as csv
train_val_error -- table of training and validation errors
params -- dictionary of parameters for experiment
weights -- dictionary of weights for all networks
biases -- dictionary of biases for all networks
Returns:
None (but side effect of saving files and updating params dict.)
Side effects:
Save train_val_error, each weight W, each bias b, and params dict to file.
Update params dict: minTrain, minTest, minRegTrain, minRegTest
"""
np.savetxt(csv_path, train_val_error, delimiter=',')
for key, value in weights.items():
np.savetxt(csv_path.replace('error', key), np.asarray(sess.run(value)), delimiter=',')
for key, value in biases.items():
np.savetxt(csv_path.replace('error', key), np.asarray(sess.run(value)), delimiter=',')
params['minTrain'] = np.min(train_val_error[:, 0])
params['minTest'] = np.min(train_val_error[:, 1])
params['minRegTrain'] = np.min(train_val_error[:, 2])
params['minRegTest'] = np.min(train_val_error[:, 3])
print("min train: %.12f, min val: %.12f, min reg. train: %.12f, min reg. val: %.12f" % (
params['minTrain'], params['minTest'], params['minRegTrain'], params['minRegTest']))
save_params(params)
def save_params(params):
"""Save parameter dictionary to file.
Arguments:
params -- dictionary of parameters for experiment
Returns:
None
Side effects:
Saves params dict to pkl file
"""
with open(params['model_path'].replace('ckpt', 'pkl'), 'wb') as f:
pickle.dump(params, f, pickle.HIGHEST_PROTOCOL)
def set_defaults(params):
"""Set defaults and make some checks in parameters dictionary.
Arguments:
params -- dictionary of parameters for experiment
Returns:
None (but side effect of updating params dict)
Side effects:
May update params dict
Raises KeyError if params is missing data_name, len_time, data_train_len, delta_t, widths, hidden_widths_omega,
num_evals, num_real, or num_complex_pairs
Raises ValueError if num_evals != 2 * num_complex_pairs + num_real
"""
# defaults related to dataset
if 'data_name' not in params:
raise KeyError("Error: must give data_name as input to main")
if 'len_time' not in params:
raise KeyError("Error, must give | |
"""
Layout components to lay out objects in a grid.
"""
import math
from collections import OrderedDict, namedtuple
from functools import partial
import numpy as np
import param
from bokeh.models import Box as BkBox, GridBox as BkGridBox
from ..io.model import hold
from .base import _col, _row, ListPanel, Panel
class GridBox(ListPanel):
"""
List-like Grid which wraps depending on the specified number of
rows or columns.
"""
nrows = param.Integer(default=None, bounds=(0, None), doc="""
Number of rows to reflow the layout into.""")
ncols = param.Integer(default=None, bounds=(0, None), doc="""
Number of columns to reflow the layout into.""")
_bokeh_model = BkGridBox
_rename = {'objects': 'children'}
_source_transforms = {'scroll': None, 'objects': None,
'nrows': None, 'ncols': None}
@classmethod
def _flatten_grid(cls, layout, nrows=None, ncols=None):
Item = namedtuple("Item", ["layout", "r0", "c0", "r1", "c1"])
Grid = namedtuple("Grid", ["nrows", "ncols", "items"])
def gcd(a, b):
a, b = abs(a), abs(b)
while b != 0:
a, b = b, a % b
return a
def lcm(a, *rest):
for b in rest:
a = (a*b) // gcd(a, b)
return a
nonempty = lambda child: child.nrows != 0 and child.ncols != 0
def _flatten(layout, nrows=None, ncols=None):
_flatten_ = partial(_flatten, nrows=nrows, ncols=ncols)
if isinstance(layout, _row):
children = list(filter(nonempty, map(_flatten_, layout.children)))
if not children:
return Grid(0, 0, [])
nrows = lcm(*[ child.nrows for child in children ])
if not ncols: # This differs from bokeh.layout.grid
ncols = sum([ child.ncols for child in children ])
items = []
offset = 0
for child in children:
factor = nrows//child.nrows
for (layout, r0, c0, r1, c1) in child.items:
items.append((layout, factor*r0, c0 + offset, factor*r1, c1 + offset))
offset += child.ncols
return Grid(nrows, ncols, items)
elif isinstance(layout, _col):
children = list(filter(nonempty, map(_flatten_, layout.children)))
if not children:
return Grid(0, 0, [])
if not nrows: # This differs from bokeh.layout.grid
nrows = sum([ child.nrows for child in children ])
ncols = lcm(*[ child.ncols for child in children ])
items = []
offset = 0
for child in children:
factor = ncols//child.ncols
for (layout, r0, c0, r1, c1) in child.items:
items.append((layout, r0 + offset, factor*c0, r1 + offset, factor*c1))
offset += child.nrows
return Grid(nrows, ncols, items)
else:
return Grid(1, 1, [Item(layout, 0, 0, 1, 1)])
grid = _flatten(layout, nrows, ncols)
children = []
for (layout, r0, c0, r1, c1) in grid.items:
if layout is not None:
children.append((layout, r0, c0, r1 - r0, c1 - c0))
return children
@classmethod
def _get_children(cls, children, nrows=None, ncols=None):
"""
This is a copy of parts of the bokeh.layouts.grid implementation
to avoid distributing non-filled columns.
"""
if nrows is not None or ncols is not None:
N = len(children)
if ncols is None:
ncols = int(math.ceil(N/nrows))
layout = _col([ _row(children[i:i+ncols]) for i in range(0, N, ncols) ])
else:
def traverse(children, level=0):
if isinstance(children, list):
container = _col if level % 2 == 0 else _row
return container([ traverse(child, level+1) for child in children ])
else:
return children
layout = traverse(children)
return cls._flatten_grid(layout, nrows, ncols)
def _get_model(self, doc, root=None, parent=None, comm=None):
model = self._bokeh_model()
if root is None:
root = model
objects = self._get_objects(model, [], doc, root, comm)
model.children = self._get_children(objects, self.nrows, self.ncols)
props = {k: v for k, v in self._init_properties().items()
if k not in ('nrows', 'ncols')}
model.update(**self._process_param_change(props))
self._models[root.ref['id']] = (model, parent)
self._link_props(model, self._linked_props, doc, root, comm)
return model
def _update_model(self, events, msg, root, model, doc, comm=None):
from ..io import state
msg = dict(msg)
if self._rename['objects'] in msg or 'ncols' in msg or 'nrows' in msg:
if 'objects' in events:
old = events['objects'].old
else:
old = self.objects
objects = self._get_objects(model, old, doc, root, comm)
children = self._get_children(objects, self.nrows, self.ncols)
msg[self._rename['objects']] = children
with hold(doc):
msg = {k: v for k, v in msg.items() if k not in ('nrows', 'ncols')}
update = Panel._batch_update
Panel._batch_update = True
try:
super(Panel, self)._update_model(events, msg, root, model, doc, comm)
if update:
return
ref = root.ref['id']
if ref in state._views:
state._views[ref][0]._preprocess(root)
finally:
Panel._batch_update = update
class GridSpec(Panel):
objects = param.Dict(default={}, doc="""
The dictionary of child objects that make up the grid.""")
mode = param.ObjectSelector(default='warn', objects=['warn', 'error', 'override'], doc="""
Whether to warn, error or simply override on overlapping assignment.""")
ncols = param.Integer(default=None, bounds=(0, None), doc="""
Limits the number of columns that can be assigned.""")
nrows = param.Integer(default=None, bounds=(0, None), doc="""
Limits the number of rows that can be assigned.""")
width = param.Integer(default=600)
height = param.Integer(default=600)
_bokeh_model = BkGridBox
_source_transforms = {'objects': None, 'mode': None}
_rename = {'objects': 'children', 'mode': None, 'ncols': None, 'nrows': None}
def __init__(self, **params):
if 'objects' not in params:
params['objects'] = OrderedDict()
super(GridSpec, self).__init__(**params)
self._updating = False
self._update_nrows()
self._update_ncols()
self._update_grid_size()
@param.depends('nrows', watch=True)
def _update_nrows(self):
if not self._updating:
self._rows_fixed = self.nrows is not None
@param.depends('ncols', watch=True)
def _update_ncols(self):
if not self._updating:
self._cols_fixed = self.ncols is not None
@param.depends('objects', watch=True)
def _update_grid_size(self):
self._updating = True
if not self._cols_fixed:
max_xidx = [x1 for (_, _, _, x1) in self.objects if x1 is not None]
self.ncols = max(max_xidx) if max_xidx else (1 if len(self.objects) else 0)
if not self._rows_fixed:
max_yidx = [y1 for (_, _, y1, _) in self.objects if y1 is not None]
self.nrows = max(max_yidx) if max_yidx else (1 if len(self.objects) else 0)
self._updating = False
def _init_properties(self):
properties = super(GridSpec, self)._init_properties()
if self.sizing_mode not in ['fixed', None]:
if 'min_width' not in properties and 'width' in properties:
properties['min_width'] = properties['width']
if 'min_height' not in properties and 'height' in properties:
properties['min_height'] = properties['height']
return properties
def _get_objects(self, model, old_objects, doc, root, comm=None):
from ..pane.base import RerenderError
if self.ncols:
width = int(float(self.width)/self.ncols)
else:
width = 0
if self.nrows:
height = int(float(self.height)/self.nrows)
else:
height = 0
current_objects = list(self.objects.values())
if isinstance(old_objects, dict):
old_objects = list(old_objects.values())
for old in old_objects:
if old not in current_objects:
old._cleanup(root)
children = []
for i, ((y0, x0, y1, x1), obj) in enumerate(self.objects.items()):
x0 = 0 if x0 is None else x0
x1 = (self.ncols) if x1 is None else x1
y0 = 0 if y0 is None else y0
y1 = (self.nrows) if y1 is None else y1
r, c, h, w = (y0, x0, y1-y0, x1-x0)
if self.sizing_mode in ['fixed', None]:
properties = {'width': w*width, 'height': h*height}
else:
properties = {'sizing_mode': self.sizing_mode}
if 'width' in self.sizing_mode:
properties['height'] = h*height
elif 'height' in self.sizing_mode:
properties['width'] = w*width
obj.param.set_param(**{k: v for k, v in properties.items()
if not obj.param[k].readonly})
if obj in old_objects:
child, _ = obj._models[root.ref['id']]
else:
try:
child = obj._get_model(doc, root, model, comm)
except RerenderError:
return self._get_objects(model, current_objects[:i], doc, root, comm)
if isinstance(child, BkBox) and len(child.children) == 1:
child.children[0].update(**properties)
else:
child.update(**properties)
children.append((child, r, c, h, w))
return children
@property
def _xoffset(self):
min_xidx = [x0 for (_, x0, _, _) in self.objects if x0 is not None]
return min(min_xidx) if min_xidx and len(min_xidx) == len(self.objects) else 0
@property
def _yoffset(self):
min_yidx = [y0 for (y0, x0, _, _) in self.objects if y0 is not None]
return min(min_yidx) if min_yidx and len(min_yidx) == len(self.objects) else 0
@property
def _object_grid(self):
grid = np.full((self.nrows, self.ncols), None, dtype=object)
for i, ((y0, x0, y1, x1), obj) in enumerate(self.objects.items()):
l = 0 if x0 is None else x0
r = self.ncols if x1 is None else x1
t = 0 if y0 is None else y0
b = self.nrows if y1 is None else y1
for y in range(t, b):
for x in range(l, r):
grid[y, x] = {((y0, x0, y1, x1), obj)}
return grid
def _cleanup(self, root):
super(GridSpec, self)._cleanup(root)
for p in self.objects.values():
p._cleanup(root)
#----------------------------------------------------------------
# Public API
#----------------------------------------------------------------
@property
def grid(self):
grid = np.zeros((self.nrows, self.ncols), dtype='uint8')
for (y0, x0, y1, x1) in self.objects:
grid[y0:y1, x0:x1] += 1
return grid
def clone(self, **params):
"""
Makes a copy of the GridSpec sharing the same parameters.
Arguments
---------
params: Keyword arguments override the parameters on the clone.
Returns
-------
Cloned GridSpec object
"""
p = dict(self.param.get_param_values(), **params)
if not self._cols_fixed:
del p['ncols']
if not self._rows_fixed:
del p['nrows']
return type(self)(**p)
def __iter__(self):
for obj in self.objects.values():
yield obj
def __delitem__(self, index):
if isinstance(index, tuple):
yidx, xidx = index
else:
yidx, xidx = index, slice(None)
subgrid = | |
= np.angle(self.curcomplex[self.curframe])
curangle2f = curanglef + (old_div(increment*np.pi,180))
currealf = curmagf * np.cos(curangle2f)
curimagf = curmagf * np.sin(curangle2f)
temp_complex = currealf + 1j*curimagf
self.curcomplex[self.curframe] = temp_complex
#Apply optimum phase shift to apodised whole spectrum
acurmagf = np.abs(self.acurcomplex[self.curframe])
acuranglef = np.angle(self.acurcomplex[self.curframe])
acurangle2f = acuranglef + (old_div(increment*np.pi,180))
acurrealf = acurmagf * np.cos(acurangle2f)
acurimagf = acurmagf * np.sin(acurangle2f)
atemp_complex = acurrealf + 1j*acurimagf
self.acurcomplex[self.curframe] = atemp_complex
#Modify storage of phasing angle for current frame
self.optphasearr[self.curframe] = self.optphasearr[self.curframe] + increment
self.addframes()
self.set_current_frame()
def Choinc(self, increment):
#Apply optimum phase shift to whole spectrum
temp_ppa = self.peakposarr[self.curframe]
temp_ppa[0] = temp_ppa[0] + increment
self.peakposarr[self.curframe] = temp_ppa
self.addframes()
self.set_current_frame()
def Crinc(self, increment):
#Apply optimum phase shift to whole spectrum
temp_ppa = self.peakposarr[self.curframe]
temp_ppa[1] = temp_ppa[1] + increment
self.peakposarr[self.curframe] = temp_ppa
self.addframes()
self.set_current_frame()
def framedown(self):
if self.curframe > 0:
self.curframe -= 1
self.set_current_frame()
def frameup(self):
if self.curframe < ((old_div(self.Frames,2)) - 1):
self.curframe += 1
self.set_current_frame()
def set_current_frame(self):
self.current_frame = self.acurcomplex[self.curframe]
#self.current_frame = self.Spectrumautoapod[self.curframe]
#print self.peakposarr[self.curframe]
#print self.shiftindex[self.curframe]
def writeTarquin(self, outpath):
#Tarquindir = outpath + '\\' + 'Tarquin_files'
outpath=Path(outpath)
Tarquindir = outpath / 'Tarquin_files'
# if os.path.isdir(Tarquindir) == False:
# os.chdir(outpath)
# os.mkdir('Tarquin_files')
Tarquindir.resolve().mkdir(parents=True, exist_ok=True)
name = self.filename[(self.filename.rfind('\\')+1):].translate(str.maketrans('','', r'.'))
#file_path = Tarquindir + '\\' + self.dirpass + '__' + name + 'proc_Tarquin'
file_path = Path(Tarquindir , name + 'proc_Tarquin')
Spec_temp = self.SpecData
counter = 0
#Need complex conj for proper display, hence -imag
for b in range(0,old_div(self.Frames,2)):
for a in range(0, self.Datapoints):
Spec_temp[counter] = self.Kspacewrite[b][a].real
counter = counter + 1
Spec_temp[counter] = -self.Kspacewrite[b][a].imag
counter = counter + 1
self.ds[0x5600,0x0020].value = Spec_temp
self.ds.save_as(str(file_path.resolve()))
def undophase(self):
self.curcomplex = self.Spectrum
self.acurcomplex = self.Spectrumapod
if self.Frames == 1:
frames = 1
else:
frames = old_div(self.Frames,2) #self.Frames / 2 because NWS data also stored in Dicom file
#Reset store of phasing angle for each from to zero
for cnt in range(0, frames):
self.optphasearr[cnt] = 0
self.addframes()
self.set_current_frame()
def undoshift(self):
if self.Frames == 1:
frames = 1
else:
frames = old_div(self.Frames,2) #self.Frames / 2 because NWS data also stored in Dicom file
temp_ppa = [1020, 1030]
for cnt in range(0, frames):
self.peakposarr[cnt] = temp_ppa
self.addframes()
self.set_current_frame()
def report_completed(self,report_path):
self.report_completed_msg=f'MRS Report saved in {report_path}'
def writelogfile(self, outpath, version):
outpath=Path(outpath)
#Logdir = outpath + '\\' + 'Log_files'
Logdir = outpath / 'Log_files'
# if os.path.isdir(Logdir) == False:
# os.chdir(outpath)
# os.mkdir('Log_files')
Logdir.resolve().mkdir(parents=True, exist_ok=True)
if self.Frames == 1:
frames = 1
else:
frames = old_div(self.Frames,2) #self.Frames / 2 because NWS data also stored in Dicom file
name = self.filename[(self.filename.rfind('\\')+1):].translate(str.maketrans('','', r'.'))
#file_path = Logdir + '\\' + self.dirpass + '__' + name + 'log_file.txt'
file_path = Path(Logdir , name + 'log_file.txt' )
#self.text_file = open(file_path, 'w')
self.text_file = open(str(file_path.resolve()), 'w')
# Write Log File
self.text_file.write('Tarquin Pre-processing Log file\n\n')
print('Filename: %s\n' % (file_path), file=self.text_file)
print('Version: %s\n' % (version), file=self.text_file)
for cnt in range(0, frames):
print('Frame: %i' % (cnt), file=self.text_file)
print('Include: %i' % (self.IncludeFrame[cnt]), file=self.text_file)
print('Phasing: %i' % (self.optphasearr[cnt]), file=self.text_file)
intostr = 'Peak positions: ' + str(self.peakposarr[cnt])
self.text_file.write(intostr + '\n\n')
self.text_file.close()
print('Log file written')
def fitTarquin(self, outpath):
nameinit = self.PatName
dialog = PatNameDialog(nameinit)
if dialog.exec_():
name = dialog.name.text()
try:
self.PatName = name
except:
self.PatName = nameinit
outpath=Path(outpath)
#Tarquindir = outpath + '\\' + 'Tarquin_files'
Tarquindir = outpath / 'Tarquin_files'
name = self.filename[(self.filename.rfind('\\')+1):].translate(str.maketrans('','', r'.'))
filename = name + 'proc_Tarquin'
#file_path = Tarquindir + '\\' + filename
file_path = str(Path(Tarquindir , filename).resolve())
#Tarquinfitdir = Tarquindir + '\\' + 'Tarquin_fit'
Tarquinfitdir = Tarquindir / 'Tarquin_fit'
# if os.path.isdir(Tarquinfitdir) == False:
# os.chdir(Tarquindir)
# os.mkdir('Tarquin_fit')
#
Tarquinfitdir.resolve().mkdir(parents=True, exist_ok=True)
# reportout = Tarquinfitdir + '\\' + self.PatName + '_Report.pdf'
# tempout = Tarquinfitdir + '\\' + filename + '_temp.pdf'
# pdfout = Tarquinfitdir + '\\' + filename + '_plot.pdf'
# dataout = Tarquinfitdir + '\\' + filename + '_data.csv'
# moddataout = Tarquinfitdir + '\\' + filename + '_data_with_ratios.csv'
# resout = Tarquinfitdir + '\\' + filename + '_results.csv'
# self.fitout = Tarquinfitdir + '\\' + filename + '_fit.txt'
# basis = 'S:\\Neonate_data\\Tarquin\\3_0T_basis_threonine_no_MM'
# tarquin = 'S:\\Neonate_data\\Tarquin\\TARQUIN_Windows_4.3.7\\tarquin\\tarquin'
reportout = str(Path(Tarquinfitdir , str(self.PatName) + '_Report.pdf').resolve())
#reportout = Path(Tarquinfitdir , self.PatName + '_Report.pdf')
tempout = str(Path(Tarquinfitdir ,filename + '_temp.pdf').resolve())
pdfout = str(Path(Tarquinfitdir , filename + '_plot.pdf').resolve())
dataout = str(Path(Tarquinfitdir , filename + '_data.csv').resolve())
moddataout = str(Path(Tarquinfitdir , filename + '_data_with_ratios.csv').resolve())
resout = str(Path(Tarquinfitdir , filename + '_results.csv').resolve())
self.fitout = str(Path(Tarquinfitdir , filename + '_fit.txt').resolve())
basis = str(Path(BASE_DIR ,'3_0T_basis_threonine_no_MM').resolve())
print(f'basis: {basis}')
if sys.platform == "darwin":
tarquin_path=Path(BASE_DIR ,'TARQUIN/mac/tarquingui.app/Contents/MacOS/tarquin')
elif sys.platform == "win32":
tarquin_path=Path(BASE_DIR ,'TARQUIN/win/TARQUIN_Windows_4.3.10/tarquin/tarquin.exe')
elif sys.platform == "linux":
tarquin_path=Path(BASE_DIR ,'TARQUIN/linux/tarquin')
if tarquin_path.exists():
tarquin = str(tarquin_path.resolve())
elif shutil.which("tarquin"):
tarquin = shutil.which("tarquin")
else:
error=f'\nTarquin not found. \nTo solve it please:\n a) copy the Tarquin app inside {BASE_DIR} folder, or\n b) add Tarquin to the Path. e.g. >> export PATH=$PATH:/Applications/tarquingui.app/Contents/MacOS\n'
print(error)
sys.exit(error)
command = (tarquin + ' --input ' + file_path + ' --output_pdf ' + pdfout +
' --output_csv ' + dataout + ' --output_fit ' + self.fitout +
' --basis_csv ' + basis)
# run the command
print('this the the command for tarquin: ',command)
os.system(command)
#Add in sode code to automatically calculate the Lac/Naa ratio
#Note that this will assume that the correct basis set is used
#csvfile = open(dataout, 'rb')
with open(dataout) as csvfile:
linereader = csv.reader(csvfile, delimiter = ',')
CSVstore = []
counter = 0
for row in linereader:
counter += 1
# print(row)
if counter == 2:
row.append('Lac+T/tNaa')
row.append('tNaa/tCho')
row.append('tNaa/Cr')
row.append('tCho/Cr')
row.append('Lac+T/tCho')
row.append('Lac+T/Cr')
if counter == 5:
row.append('Lac+T/tNaa')
row.append('tNaa/tCho')
row.append('tNaa/Cr')
row.append('tCho/Cr')
row.append('Lac+T/tCho')
row.append('Lac+T/Cr')
#Calc ratio
if counter == 3:
#dummy = str(row)
#dummy = dummy.translate(None, ''.join(["[", "'", "]"]))
#print('dummy is: ',dummy)
#fields = dummy.split(', ')
fields = row
# print('type of fields[14] is: ',type(fields[14]))
# print('fields[14] is: ',fields[14])
Lac = np.float(fields[14])
Naa = np.float(fields[15])
NaaG = np.float(fields[16])
Thre = np.float(fields[21])
Cr = np.float(fields[6])
tCho = np.float(fields[23])
L_N = old_div((Lac + Thre), (Naa + NaaG))
N_Ch = old_div((Naa + NaaG), tCho)
N_Cr = old_div((Naa + NaaG), Cr)
Ch_Cr = old_div(tCho, Cr)
L_Ch = old_div((Lac + Thre), tCho)
L_Cr = old_div((Lac + Thre), Cr)
row.append(str(L_N))
row.append(str(N_Ch))
row.append(str(N_Cr))
row.append(str(Ch_Cr))
row.append(str(L_Ch))
row.append(str(L_Cr))
#calc error
if counter == 6:
dummy = str(row)
# #dummy = dummy.translate(None, ''.join(["[", "'", "]"]))
#dummy = dummy.translate(''.join(["[", "'", "]"]))
fields = row
Lace = np.float(fields[14])
Naae = np.float(fields[15])
NaaGe = np.float(fields[16])
Three = np.float(fields[21])
Cre = np.float(fields[6])
tChoe = np.float(fields[23])
Lerr = np.sqrt(np.power(Lace,2) + np.power(Three,2))
Nerr = np.sqrt(np.power(Naae,2) + np.power(NaaGe,2))
L_Ne = np.sqrt(np.power(old_div(Lerr,(Lac + Thre)),2) + np.power(old_div(Nerr,(Naa + NaaG)), 2)) * L_N
N_Che = np.sqrt(np.power(old_div(Nerr,(Naa + NaaG)),2) + np.power(old_div(tChoe,(tCho)), 2)) * N_Ch
N_Cre = np.sqrt(np.power(old_div(Nerr,(Naa + NaaG)),2) + np.power(old_div(Cre,(Cr)), 2)) * N_Cr
Ch_Cre = np.sqrt(np.power(old_div(tChoe,(tCho)),2) + np.power(old_div(Cre,(Cr)), 2)) * Ch_Cr
L_Che = np.sqrt(np.power(old_div(Lerr,(Lac + Thre)),2) + np.power(old_div(tChoe,(tCho)), 2)) * L_Ch
L_Cre = np.sqrt(np.power(old_div(Lerr,(Lac + Thre)),2) + np.power(old_div(Cre,(Cr)), 2)) * L_Cr
row.append(str(L_Ne))
row.append(str(N_Che))
row.append(str(N_Cre))
row.append(str(Ch_Cre))
row.append(str(L_Che))
row.append(str(L_Cre))
#get FWHM and SNR
if counter == 9:
#dummy = str(row)
#dummy = dummy.translate(''.join(["[", "'", "]"]))
#fields = dummy.split(", ")
fields = row
FWHM = np.float(fields[7])
SNR = np.float(fields[9])
CSVstore.append(row)
#linewriter.writerow(row)
#
#csvfile.close()
resultsout = open(resout, 'w')
line1 = 'Ratio, Value, Error, Proc FWHM, Proc SNR'
# print(line1)
line2 = 'L+T/tNaa,' + str(L_N) + ',' + str(L_Ne) + ',' + str(FWHM) + ',' + str(SNR)
line3 = 'tNaa/tCho,' + str(N_Ch) + ',' + str(N_Che)
line4 = 'tNaa/Cr,' + str(N_Cr) + ',' + str(N_Cre)
line5 = 'tCho/Cr,' + str(Ch_Cr) + ',' + str(Ch_Cre)
line6 = 'L+T/tCho,' + str(L_Ch) + ',' + str(L_Che)
line7 = 'L+T/Cr,' + str(L_Cr) + ',' + str(L_Cre)
resultsout.write(line1)
resultsout.write('\n')
resultsout.write(line2)
resultsout.write('\n')
resultsout.write(line3)
resultsout.write('\n')
resultsout.write(line4)
resultsout.write('\n')
resultsout.write(line5)
resultsout.write('\n')
resultsout.write(line6)
resultsout.write('\n')
resultsout.write(line7)
resultsout.close()
| |
b_, (((a_ *in_* Identity(A)) & (b_ *in_* Identity(A))) & (Left(a_) == Left(b_))) >> (a_ == b_))) @ (47, TAUTOLOGY, 45, 46)
(Function(Identity(A)) == (Relation(Identity(A)) & All(a_, b_, (((a_ *in_* Identity(A)) & (b_ *in_* Identity(A))) & (Left(a_) == Left(b_))) >> (a_ == b_)))) @ (48, BY_THEOREM, "function")
Function(Identity(A)) @ (49, TAUTOLOGY, 48, 47)
All(A_, Function(Identity(A_))) @ ("identity_is_function", CLOSING, 49)
# domain of identity
clear()
with (x *in_* Domain(Identity(A))) @ 0:
((x *in_* Domain(Identity(A))) == Exist(y_, ((y_ *in_* Identity(A)) & Arity2(y_)) & (Left(y_) == x))) @ (1, BY_THEOREM, "domain")
Exist(y_, ((y_ *in_* Identity(A)) & Arity2(y_)) & (Left(y_) == x)) @ (2, TAUTOLOGY, 0, 1)
(((y *in_* Identity(A)) & Arity2(y)) & (Left(y) == x)) @ (3, LET, y, 2)
(y *in_* Identity(A)) @ (4, TAUTOLOGY, 3)
((y *in_* Identity(A)) == Exist(a_, (a_ *in_* A) & (y == OrderedPair(a_, a_)))) @ (5, BY_THEOREM, "identity")
Exist(a_, (a_ *in_* A) & (y == OrderedPair(a_, a_))) @ (6, TAUTOLOGY, 4, 5)
((a *in_* A) & (y == OrderedPair(a, a))) @ (7, LET, a, 6)
(a *in_* A) @ (8, TAUTOLOGY, 7)
Set(a) @ (9, PUT_THEOREM, "set_condition", A, 8)
(a == Left(OrderedPair(a, a))) @ (10, BY_THEOREM, "left_of_ordered_pair", 9)
(y == OrderedPair(a, a)) @ (11, TAUTOLOGY, 7)
(a == Left(y)) @ (12, REPLACE, 10, 11)
(Left(y) == x) @ (13, TAUTOLOGY, 3)
(a == x) @ (14, BY_EQUIVALENCE, 12, 13)
(x *in_* A) @ (15, REPLACE, 8, 14)
((x *in_* Domain(Identity(A))) >> (x *in_* A)) @ (16, DEDUCE)
with (x *in_* A) @ 17:
Set(x) @ (18, PUT_THEOREM, "set_condition", A, 17)
(OrderedPair(x, x) == OrderedPair(x, x)) @ (19, BY_EQUIVALENCE)
((x *in_* A) & (OrderedPair(x, x) == OrderedPair(x, x))) @ (20, TAUTOLOGY, 19, 17)
Exist(a_, (a_ *in_* A) & (OrderedPair(x, x) == OrderedPair(a_, a_))) @ (21, FOUND, x, 20)
((OrderedPair(x, x) *in_* Identity(A)) == Exist(a_, (a_ *in_* A) & (OrderedPair(x, x) == OrderedPair(a_, a_)))) @ (22, BY_THEOREM, "identity")
(OrderedPair(x, x) *in_* Identity(A)) @ (23, TAUTOLOGY, 21, 22)
(x *in_* Domain(Identity(A))) @ (24, PUT_THEOREM, "domain_condition", x, 23, 18)
((x *in_* A) >> (x *in_* Domain(Identity(A)))) @ (25, DEDUCE)
((x *in_* A) == (x *in_* Domain(Identity(A)))) @ (26, TAUTOLOGY, 16, 25)
All(x_, (x_ *in_* A) == (x_ *in_* Domain(Identity(A)))) @ (27, CLOSING, 26)
(A == Domain(Identity(A))) @ (28, BY_THEOREM, "extensionality", 27)
All(A_, A_ == Domain(Identity(A_))) @ ("domain_of_identity", CLOSING, 28)
# put condition
clear()
with (Set(x) & Set(y)) @ 0:
with (Function(F) & (OrderedPair(x, y) *in_* F)) @ 1:
(OrderedPair(x, y) *in_* F) @ (2, TAUTOLOGY, 1)
(x *in_* Domain(F)) @ (3, PUT_THEOREM, "domain_condition", y, 0, 2)
(((FindPair(F, x) *in_* F) & Arity2(FindPair(F, x))) & (Left(FindPair(F, x)) == x)) @ (4, BY_THEOREM, "find_pair", 3, 1)
(x == Left(OrderedPair(x, y))) @ (5, BY_THEOREM, "left_of_ordered_pair", 0)
(Left(FindPair(F, x)) == x) @ (6, TAUTOLOGY, 4)
(Left(FindPair(F, x)) == Left(OrderedPair(x, y))) @ (7, BY_EQUIVALENCE, 6, 5)
(FindPair(F, x) *in_* F) @ (8, TAUTOLOGY, 4)
(Function(F) == (Relation(F) & All(a_, b_, (((a_ *in_* F) & (b_ *in_* F)) & (Left(a_) == Left(b_))) >> (a_ == b_)))) @ (9, BY_THEOREM, "function")
All(a_, b_, (((a_ *in_* F) & (b_ *in_* F)) & (Left(a_) == Left(b_))) >> (a_ == b_)) @ (10, TAUTOLOGY, 9, 1)
(FindPair(F, x) == OrderedPair(x, y)) @ (11, BY_THEOREM, 10, 2, 8, 7)
(y == Right(OrderedPair(x, y))) @ (12, BY_THEOREM, "right_of_ordered_pair", 0)
(y == Right(FindPair(F, x))) @ (13, REPLACE, 12, 11)
(F(x) == Right(FindPair(F, x))) @ (14, BY_THEOREM, "put")
(y == F(x)) @ (15, REPLACE, 13, 14)
((Function(F) & (OrderedPair(x, y) *in_* F)) >> (y == F(x))) @ (16, DEDUCE)
((Set(x) & Set(y)) >> ((Function(F) & (OrderedPair(x, y) *in_* F)) >> (y == F(x)))) @ (17, DEDUCE)
(((Set(x) & Set(y)) & (Function(F) & (OrderedPair(x, y) *in_* F))) >> (y == F(x))) @ (18, TAUTOLOGY, 17)
All(x_, y_, F_, ((Set(x_) & Set(y_)) & (Function(F_) & (OrderedPair(x_, y_) *in_* F_))) >> (y_ == F_(x_))) @ ("put_condition", CLOSING, 18)
# identity output
clear()
with (a *in_* A) @ 0:
All(x_, (x_ *in_* Identity(A)) == Exist(a_, (a_ *in_* A) & (x_ == OrderedPair(a_, a_)))) @ (1, PUT, A, "identity")
((OrderedPair(a, a) *in_* Identity(A)) == Exist(a_, (a_ *in_* A) & (OrderedPair(a, a) == OrderedPair(a_, a_)))) @ (2, PUT, OrderedPair(a, a), 1)
(OrderedPair(a, a) == OrderedPair(a, a)) @ (3, BY_EQUIVALENCE)
((a *in_* A) & (OrderedPair(a, a) == OrderedPair(a, a))) @ (6, TAUTOLOGY, 0, 3)
Exist(a_, (a_ *in_* A) & (OrderedPair(a, a) == OrderedPair(a_, a_))) @ (4, FOUND, a, 6)
(OrderedPair(a, a) *in_* Identity(A)) @ (5, TAUTOLOGY, 2, 4)
Function(Identity(A)) @ (6, BY_THEOREM, "identity_is_function")
Set(a) @ (7, PUT_THEOREM, "set_condition", A, 0)
(a == Identity(A)(a)) @ (8, BY_THEOREM, "put_condition", 5, 7, 6)
((a *in_* A) >> (a == Identity(A)(a))) @ (9, DEDUCE)
All(a_, A_, (a_ *in_* A_) >> (a_ == Identity(A_)(a_))) @ ("identity_output", CLOSING, 9)
# image of identity
clear()
with (x *in_* Identity(A)[B]) @ 0: # book
((x *in_* Identity(A)[B]) == (Set(x) & Exist(a_, ((a_ *in_* B) & (a_ *in_* Domain(Identity(A)))) & (x == Identity(A)(a_))))) @ (1, BY_THEOREM, "image")
Exist(a_, ((a_ *in_* B) & (a_ *in_* Domain(Identity(A)))) & (x == Identity(A)(a_))) @ (2, TAUTOLOGY, 0, 1)
(((a *in_* B) & (a *in_* Domain(Identity(A)))) & (x == Identity(A)(a))) @ (3, LET, a, 2)
(a *in_* B) @ (4, TAUTOLOGY, 3)
(a *in_* Domain(Identity(A))) @ (5, TAUTOLOGY, 3)
(x == Identity(A)(a)) @ (6, TAUTOLOGY, 3)
(A == Domain(Identity(A))) @ (7, BY_THEOREM, "domain_of_identity")
(a *in_* A) @ (8, REPLACE, 5, 7)
(a == Identity(A)(a)) @ (9, BY_THEOREM, "identity_output", 8)
(x == a) @ (10, REPLACE, 6, 9)
(x *in_* B) @ (11, REPLACE, 4, 10)
(x *in_* A) @ (12, REPLACE, 8, 10)
((x *in_* (A *cap* B)) == ((x *in_* A) & (x *in_* B))) @ (13, BY_THEOREM, "cap")
(x *in_* (A *cap* B)) @ (14, TAUTOLOGY, 13, 11, 12)
((x *in_* Identity(A)[B]) >> (x *in_* (A *cap* B))) @ (15, DEDUCE)
with (x *in_* (A *cap* B)) @ 16:
((x *in_* (A *cap* B)) == ((x *in_* A) & (x *in_* B))) @ (17, BY_THEOREM, "cap")
(x *in_* A) @ (18, TAUTOLOGY, 16, 17)
(x *in_* B) @ (19, TAUTOLOGY, 16, 17)
((x *in_* Identity(A)[B]) == (Set(x) & Exist(a_, ((a_ *in_* B) & (a_ *in_* Domain(Identity(A)))) & (x == Identity(A)(a_))))) @ (20, BY_THEOREM, "image")
Set(x) @ (40, PUT_THEOREM, "set_condition", A, 18)
(A == Domain(Identity(A))) @ (21, BY_THEOREM, "domain_of_identity")
(x *in_* Domain(Identity(A))) @ (22, REPLACE, 18, 21)
(x == Identity(A)(x)) @ (23, BY_THEOREM, "identity_output", 18)
(((x *in_* B) & (x *in_* Domain(Identity(A)))) & (x == Identity(A)(x))) @ (24, TAUTOLOGY, 19, 22, 23)
Exist(a_, ((a_ *in_* B) & (a_ *in_* Domain(Identity(A)))) & (x == Identity(A)(a_))) @ (25, FOUND, x, 24)
(Set(x) & Exist(a_, ((a_ *in_* B) & (a_ *in_* Domain(Identity(A)))) & (x == Identity(A)(a_)))) @ (41, TAUTOLOGY, 25, 40)
(x *in_* Identity(A)[B]) @ (26, TAUTOLOGY, 41, 20)
((x *in_* (A *cap* B)) >> (x *in_* Identity(A)[B])) @ (27, DEDUCE)
((x *in_* Identity(A)[B]) == (x *in_* (A *cap* B))) @ (28, TAUTOLOGY, 27, 15)
All(x_, (x_ *in_* Identity(A)[B]) == (x_ *in_* (A *cap* B))) @ (29, CLOSING, 28)
(Identity(A)[B] == (A *cap* B)) @ (30, BY_THEOREM, "extensionality", 29)
All(A_, B_, Identity(A_)[B_] == (A_ *cap* B_)) @ ("image_of_identity", CLOSING, 30)
# element of subset
clear()
with ((x *in_* A) & (A *inc* B)) @ 0:
(x *in_* A) @ (1, TAUTOLOGY, 0)
(A *inc* B) @ (2, TAUTOLOGY, 0)
((A *inc* B) == All(x_, (x_ *in_* A) >> (x_ *in_* B))) @ (3, BY_THEOREM, "inclusion")
All(x_, (x_ *in_* A) >> (x_ *in_* B)) @ (4, TAUTOLOGY, 3, 2)
((x *in_* A) >> (x *in_* B)) @ (5, PUT, x, 4)
(x *in_* B) @ (6, TAUTOLOGY, 5, 1)
(((x *in_* A) & (A *inc* B)) >> (x *in_* B)) @ (7, DEDUCE)
All(A_, B_, x_, ((x_ *in_* A_) & (A_ *inc* B_)) >> (x_ *in_* B_)) @ ("element_of_subset", CLOSING, 7)
# cap subset
clear()
with (A *inc* B) @ 0:
with (x *in_* (A *cap* B)) @ 1:
((x *in_* (A *cap* B)) == ((x *in_* A) & (x *in_* B))) @ (2, BY_THEOREM, "cap")
(x *in_* A) @ (3, TAUTOLOGY, 1, 2)
((x *in_* (A *cap* B)) >> (x | |
= {(0,0):C.GC_1874})
V_878 = Vertex(name = 'V_878',
particles = [ P.h01, P.su2__tilde__, P.su2 ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_1873})
V_879 = Vertex(name = 'V_879',
particles = [ P.h01, P.h01, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1012})
V_880 = Vertex(name = 'V_880',
particles = [ P.h02, P.h02, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1013})
V_881 = Vertex(name = 'V_881',
particles = [ P.A0, P.A0, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1140})
V_882 = Vertex(name = 'V_882',
particles = [ P.G0, P.G0, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1142})
V_883 = Vertex(name = 'V_883',
particles = [ P.G__minus__, P.G__plus__, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1139})
V_884 = Vertex(name = 'V_884',
particles = [ P.H__minus__, P.H__plus__, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1141})
V_885 = Vertex(name = 'V_885',
particles = [ P.sl1__plus__, P.sl1__minus__, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_654})
V_886 = Vertex(name = 'V_886',
particles = [ P.sl2__plus__, P.sl2__minus__, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_655})
V_887 = Vertex(name = 'V_887',
particles = [ P.sl3__plus__, P.sl3__minus__, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_656})
V_888 = Vertex(name = 'V_888',
particles = [ P.sl4__plus__, P.sl4__minus__, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_631})
V_889 = Vertex(name = 'V_889',
particles = [ P.sl5__plus__, P.sl5__minus__, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_632})
V_890 = Vertex(name = 'V_890',
particles = [ P.sl6__plus__, P.sl6__minus__, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_633})
V_891 = Vertex(name = 'V_891',
particles = [ P.h01, P.h02, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1906})
V_892 = Vertex(name = 'V_892',
particles = [ P.A0, P.G0, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2176})
V_893 = Vertex(name = 'V_893',
particles = [ P.G__plus__, P.H__minus__, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2177})
V_894 = Vertex(name = 'V_894',
particles = [ P.G__minus__, P.H__plus__, P.su2__tilde__, P.su2 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2177})
V_895 = Vertex(name = 'V_895',
particles = [ P.su1__tilde__, P.su1, P.su2__tilde__, P.su2 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_657,(1,0):C.GC_658})
V_896 = Vertex(name = 'V_896',
particles = [ P.sd2__tilde__, P.sd2, P.su2__tilde__, P.su2 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'Identity(1,4)*Identity(2,3)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(1,0):C.GC_648,(0,0):C.GC_646,(2,0):C.GC_647})
V_897 = Vertex(name = 'V_897',
particles = [ P.sd1__tilde__, P.sd1, P.su2__tilde__, P.su2 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_644,(1,0):C.GC_645})
V_898 = Vertex(name = 'V_898',
particles = [ P.sd3__tilde__, P.sd3, P.su2__tilde__, P.su2 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_649,(1,0):C.GC_650})
V_899 = Vertex(name = 'V_899',
particles = [ P.sd4__tilde__, P.sd4, P.su2__tilde__, P.su2 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_625,(1,0):C.GC_651})
V_900 = Vertex(name = 'V_900',
particles = [ P.sd5__tilde__, P.sd5, P.su2__tilde__, P.su2 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_626,(1,0):C.GC_652})
V_901 = Vertex(name = 'V_901',
particles = [ P.sd6__tilde__, P.sd6, P.su2__tilde__, P.su2 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_627,(1,0):C.GC_653})
V_902 = Vertex(name = 'V_902',
particles = [ P.su2__tilde__, P.su2__tilde__, P.su2, P.su2 ],
color = [ 'Identity(1,3)*Identity(2,4)', 'Identity(1,4)*Identity(2,3)', 'T(-1,3,1)*T(-1,4,2)', 'T(-1,3,2)*T(-1,4,1)' ],
lorentz = [ L.SSSS1 ],
couplings = {(1,0):C.GC_659,(0,0):C.GC_659,(3,0):C.GC_660,(2,0):C.GC_660})
V_903 = Vertex(name = 'V_903',
particles = [ P.n1, P.t, P.su3__tilde__ ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_698,(0,1):C.GC_689})
V_904 = Vertex(name = 'V_904',
particles = [ P.n2, P.t, P.su3__tilde__ ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_699,(0,1):C.GC_690})
V_905 = Vertex(name = 'V_905',
particles = [ P.n3, P.t, P.su3__tilde__ ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_700,(0,1):C.GC_691})
V_906 = Vertex(name = 'V_906',
particles = [ P.n4, P.t, P.su3__tilde__ ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_701,(0,1):C.GC_692})
V_907 = Vertex(name = 'V_907',
particles = [ P.a, P.su3__tilde__, P.su3 ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.VSS1, L.VSS3 ],
couplings = {(0,0):C.GC_662,(0,1):C.GC_663})
V_908 = Vertex(name = 'V_908',
particles = [ P.G__plus__, P.sd3, P.su3__tilde__ ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_2165})
V_909 = Vertex(name = 'V_909',
particles = [ P.H__plus__, P.sd3, P.su3__tilde__ ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_2159})
V_910 = Vertex(name = 'V_910',
particles = [ P.G__plus__, P.h02, P.sd3, P.su3__tilde__ ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2236})
V_911 = Vertex(name = 'V_911',
particles = [ P.h01, P.H__plus__, P.sd3, P.su3__tilde__ ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2232})
V_912 = Vertex(name = 'V_912',
particles = [ P.G0, P.G__plus__, P.sd3, P.su3__tilde__ ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2252})
V_913 = Vertex(name = 'V_913',
particles = [ P.A0, P.H__plus__, P.sd3, P.su3__tilde__ ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2242})
V_914 = Vertex(name = 'V_914',
particles = [ P.sd3, P.sl1__plus__, P.sv1, P.su3__tilde__ ],
color = [ 'Identity(1,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_682})
V_915 = Vertex(name = 'V_915',
particles = [ P.sd3, P.sl2__plus__, P.sv2, P.su3__tilde__ ],
color = [ 'Identity(1,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_683})
V_916 = Vertex(name = 'V_916',
particles = [ P.sd3, P.sl3__plus__, P.sv3, P.su3__tilde__ ],
color = [ 'Identity(1,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_684})
V_917 = Vertex(name = 'V_917',
particles = [ P.G__plus__, P.h01, P.sd3, P.su3__tilde__ ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2295})
V_918 = Vertex(name = 'V_918',
particles = [ P.h02, P.H__plus__, P.sd3, P.su3__tilde__ ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2294})
V_919 = Vertex(name = 'V_919',
particles = [ P.A0, P.G__plus__, P.sd3, P.su3__tilde__ ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2184})
V_920 = Vertex(name = 'V_920',
particles = [ P.G0, P.H__plus__, P.sd3, P.su3__tilde__ ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2184})
V_921 = Vertex(name = 'V_921',
particles = [ P.sd1__tilde__, P.sd3, P.su1, P.su3__tilde__ ],
color = [ 'Identity(1,3)*Identity(2,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_677})
V_922 = Vertex(name = 'V_922',
particles = [ P.sd2__tilde__, P.sd3, P.su2, P.su3__tilde__ ],
color = [ 'Identity(1,3)*Identity(2,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_678})
V_923 = Vertex(name = 'V_923',
particles = [ P.t__tilde__, P.n1, P.su3 ],
color = [ 'Identity(1,3)' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,1):C.GC_967,(0,0):C.GC_108})
V_924 = Vertex(name = 'V_924',
particles = [ P.t__tilde__, P.n2, P.su3 ],
color = [ 'Identity(1,3)' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,1):C.GC_968,(0,0):C.GC_131})
V_925 = Vertex(name = 'V_925',
particles = [ P.t__tilde__, P.n3, P.su3 ],
color = [ 'Identity(1,3)' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,1):C.GC_969,(0,0):C.GC_154})
V_926 = Vertex(name = 'V_926',
particles = [ P.t__tilde__, P.n4, P.su3 ],
color = [ 'Identity(1,3)' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,1):C.GC_970,(0,0):C.GC_177})
V_927 = Vertex(name = 'V_927',
particles = [ P.x1__minus__, P.b__tilde__, P.su3 ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_900,(0,1):C.GC_927})
V_928 = Vertex(name = 'V_928',
particles = [ P.x2__minus__, P.b__tilde__, P.su3 ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.FFS3, | |
__abs__(self):
return DFNumber(abs(self.value))
def __ceil__(self):
return DFNumber(math.ceil(self.value))
def __floor__(self):
return DFNumber(math.floor(self.value))
def __bool__(self):
return self.value != 0.0
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
def __and__(self, other):
return DFNumber(self.value & DFNumber._extract_val(other))
def __rand__(self, other):
return DFNumber(DFNumber._extract_val(other) & self.value)
def __or__(self, other):
return DFNumber(self.value | DFNumber._extract_val(other))
def __ror__(self, other):
return DFNumber(DFNumber._extract_val(other) | self.value)
def __xor__(self, other):
return DFNumber(self.value ^ DFNumber._extract_val(other))
def __rxor__(self, other):
return DFNumber(DFNumber._extract_val(other) ^ self.value)
def __invert__(self):
return DFNumber(~self.value)
class DFLocation(DFType):
"""Represents a DiamondFire Location.
Parameters
----------\u200b
x : Union[:class:`int`, :class:`float`]
The value of the x position.
y : Union[:class:`int`, :class:`float`]
The value of the y position.
z : Union[:class:`int`, :class:`float`]
The value of the z position.
pitch : Union[:class:`int`, :class:`float`]
The pitch value (up/down rotation). Varies between ``-90.0`` and ``90.0`` (any higher/lower will be %'ed).
yaw : Union[:class:`int`, :class:`float`]
The yaw value (left/right rotation). Varies between ``-180.0`` and ``180.0`` (any higher/lower will be %ed).
is_block : :class:`bool`
Whether or not this location represents a solid (non-air) block. (:class:`bool`) Defaults to False.
.. container:: comparisons
.. describe:: a == b, a != b
Equal if `a` and `b` have the same x,y,z,pitch,yaw; not equal if at lesat one is different.
.. describe:: a > b, a < b
True if at least one of the coordinates x,y,z of `a` is bigger (>)/smaller (<) than the
respective coordinate's value in `b`; False otherwise.
.. describe:: a >= b, a <= b
Applies the given comparison between each coordinate x,y,z of `a` and `b`; if any is True, returns True.
.. container:: operations
.. describe:: a + b, a - b, a * b, a ** b, a / b, a // b
Executes the given operation between the two locations' x, y, z; pitch (mod 90), yaw (mod 180).
.. warning::
They are all applied in-place with given values, not dynamically in DiamondFire! For a SetVar,
see :class:`~.DFVariable`.
.. note::
If `b` is an **iterable** (tuple, list etc.), then the operation is done between the x,y,z;pitch,yaw
of `a` and with the respective items 0-4 of the iterable.
If, however, `b` is an :class:`int`/:class:`float`, then that value is used for the op. to each of
x,y,z (pitch, yaw remain untouched).
.. describe:: -a, abs(a)
Applies the given operation to each of x,y,z,;pitch,yaw of `a`, returning a new DFLocation.
.. describe:: +a
Returns `a` (self).
.. describe:: hash(a)
A unique hash representing this location's x, y, z, pitch and yaw.
Attributes\u200b
-------------
x : :class:`float`
The value of the x position.
y : :class:`float`
The value of the y position.
z : :class:`float`
The value of the z position.
pitch : :class:`float`
The pitch value (up/down rotation). Varies between ``-90.0`` and ``90.0``
yaw : :class:`float`
The yaw value (left/right rotation). Varies between ``-180.0`` and ``180.0``
is_block : :class:`bool`
Whether or not this location represents a solid (non-air) block. Defaults to False.
"""
__slots__ = ("x", "y", "z", "pitch", "yaw", "is_block") # , "world_least", "world_most")
x: float
y: float
z: float
pitch: float
yaw: float
is_block: bool
# world_least: typing.Optional[:class:`int`]
# world_most: typing.Optional[:class:`int`]
def __init__(
self, x: AnyNumber = 0.0, y: AnyNumber = 0.0, z: AnyNumber = 0.0, pitch: AnyNumber = 0.0,
yaw: AnyNumber = 0.0,
*, is_block: bool = False,
# world_least: typing.Optional[:class:`int`] = None, world_most: typing.Optional[:class:`int`] = None
# locs are now relative
):
"""
Init the location.
Parameters
----------
x : Union[:class:`int`, :class:`float`]
The value of the x position.
y : Union[:class:`int`, :class:`float`]
The value of the y position.
z : Union[:class:`int`, :class:`float`]
The value of the z position.
pitch : Union[:class:`int`, :class:`float`]
The pitch value (up/down rotation). Varies between ``-90.0`` and ``90.0`` (any higher/lower will be %'ed).
yaw : Union[:class:`int`, :class:`float`]
The yaw value (left/right rotation). Varies between ``-180.0`` and ``180.0`` (any higher/lower will be %ed).
is_block : :class:`bool`
Whether or not this location represents a solid (non-air) block. (:class:`bool`) Defaults to False.
"""
# :param world_least: A constant :class:`int` related to DF; this shouldn't need to be defined by the
# library user. None to let the library handle it.
# :param world_most: A constant :class:`int` related to DF; this shouldn't need to be defined by the
# library user. None to let the library handle it.
self.x = float(x)
self.y = float(y)
self.z = float(z)
fl_pitch = float(pitch)
fl_yaw = float(yaw)
self.pitch = math.copysign(abs(fl_pitch) % MAX_PITCH_DEGREES, fl_pitch)
self.yaw = math.copysign(abs(fl_yaw) % MAX_YAW_DEGREES, fl_yaw)
self.is_block = bool(is_block)
# self.world_least = None if world_least is None else :class:`int`(world_least)
# self.world_most = None if world_most is None else :class:`int`(world_most)
def set(
self, x: AnyNumber = DEFAULT_VAL, y: AnyNumber = DEFAULT_VAL, z: AnyNumber = DEFAULT_VAL,
pitch: AnyNumber = DEFAULT_VAL, yaw: AnyNumber = DEFAULT_VAL,
*, is_block: bool = DEFAULT_VAL,
# world_least: typing.Optional[:class:`int`] = DEFAULT_VAL, world_most: typing.Optional[:class:`int`] \
# = DEFAULT_VAL
) -> "DFLocation":
"""Set the location.
Parameters
----------
x : Union[:class:`int`, :class:`float`], optional
The value of the x position (:class:`float`).
y : Union[:class:`int`, :class:`float`], optional
The value of the y position (:class:`float`).
z : Union[:class:`int`, :class:`float`], optional
The value of the z position (:class:`float`).
pitch : Union[:class:`int`, :class:`float`], optional
The pitch value (:class:`float`).
yaw : Union[:class:`int`, :class:`float`], optional
The yaw value (:class:`float`).
is_block : :class:`bool`, optional
Whether or not this location represents a solid (non-air) block. (:class:`bool`) Defaults to False.
Returns
-------
:class:`DFLocation`
self to allow chaining
Note
----
All parameters are optional here, meaning that one can pass :const:`~py2df.constants.utility_consts.DEFAULT_VAL`
to omit a parameter - or, more simply, only use kwargs to choose which values to set.
Warnings
--------
Passing ``None`` will set the value to that! If your intention is to omit a parameter, use
:const:`~py2df.constants.utility_consts.DEFAULT_VAL` or simply use kwargs to choose which values to set.
"""
# :param world_least: A constant :class:`int` related to DF; this shouldn't need to be defined by
# the library user. None to let the library handle it.
# :param world_most: A constant :class:`int` related to DF; this shouldn't need to be defined by
# the library user. None to let the library handle it
self.x = self.x if x == DEFAULT_VAL else float(x)
self.y = self.y if y == DEFAULT_VAL else float(y)
self.z = self.z if z == DEFAULT_VAL else float(z)
self.pitch = self.pitch if pitch == DEFAULT_VAL else float(pitch)
self.yaw = self.yaw if yaw == DEFAULT_VAL else float(yaw)
self.is_block = self.is_block if is_block == DEFAULT_VAL else bool(is_block)
# self.world_least = self.world_least if world_least == DEFAULT_VAL else (
# None if world_least is None else :class:`int`(world_least)
# )
# self.world_least = self.world_most if world_most == DEFAULT_VAL else (
# None if world_most is None else :class:`int`(world_most)
# )
return self
def set_to_other(self, loc: "DFLocation") -> "DFLocation":
"""Imports another location's values into this one, making it identical.
Parameters
----------
loc : :class:`DFLocation`
Other location to set.
Returns
-------
:class:`DFLocation`
`self` to allow chaining
"""
return self.set(
loc.x, loc.y, loc.z, loc.pitch, loc.yaw, is_block=loc.is_block,
# world_least=loc.world_least, world_most=loc.world_most
)
def as_json_data(self) -> dict:
"""Obtain this location represented as a JSON object (:class:`dict`).
Returns
-------
:class:`dict`
"""
return dict(
id=constants.ITEM_ID_LOCATION,
data=dict(
isBlock=self.is_block,
loc=dict(
x=self.x,
y=self.y,
z=self.z,
pitch=self.pitch,
yaw=self.yaw
)
)
)
@classmethod
def from_json_data(cls, data: dict) -> "DFLocation":
"""Obtain variable from pre-existing parsed JSON data.
Parameters
----------
data : :class:`dict`
The parsed JSON :class:`dict`.
Returns
-------
:class:`DFLocation`
:class:`DFNumber` instance.
"""
required_attrs = ("x", "y", "z", "isBlock", "pitch", "yaw", "worldLeast", "worldMost")
if (
not isinstance(data, dict)
# or "id" not in data
or "data" not in data
or not isinstance(data["data"], dict)
or not all(attr in data["data"] for attr in required_attrs)
):
raise TypeError(
f"Malformed DFLocation parsed JSON data! Must be a dict with a 'data' dict including the \
following attributes: {', '.join(required_attrs)}."
)
d_dict = data["data"]
return cls(
d_dict.x, d_dict.y, d_dict.z, d_dict.pitch, d_dict.yaw,
is_block=d_dict.isBlock
)
def copy(self) -> "DFLocation":
"""Creates an identical copy of this location.
Returns
-------
:class:`DFLocation`
Copied location.
"""
| |
<reponame>caoxiaoyue/PyAutoGalaxy
import numpy as np
from typing import Callable, Dict, List, Union
import autoarray as aa
class OperateImage:
"""
Packages methods which operate on the 2D image returned from the `image_2d_from` function of a light object
(e.g. a `LightProfile`, `Galaxy`, `Plane`).
The majority of methods apply data operators to the 2D image which perform tasks such as a 2D convolution or
Fourier transform.
The methods in `OperateImage` are inherited by light objects to provide a concise API.
"""
def image_2d_from(self, grid: Union[aa.Grid2D, aa.Grid2DIterate]) -> aa.Array2D:
raise NotImplementedError
def blurred_image_2d_via_psf_from(
self,
grid: Union[aa.Grid2D, aa.Grid2DIterate],
psf: aa.Kernel2D,
blurring_grid: Union[aa.Grid2D, aa.Grid2DIterate],
) -> aa.Array2D:
"""
Evaluate the light object's 2D image from a input 2D grid of coordinates and convolve it with a PSF.
The input 2D grid may be masked, in which case values outside but near the edge of the mask will convolve light
into the mask. A blurring grid is therefore required, which contains image pixels on the mask edge whose light
is blurred into the light object's image by the PSF.
The grid and blurring_grid must be a `Grid2D` objects so the evaluated image can be mapped to a uniform 2D
array and binned up for convolution. They therefore cannot be `Grid2DIrregular` objects.
Parameters
----------
grid
The 2D (y,x) coordinates of the (masked) grid, in its original geometric reference frame.
psf
The PSF the light object 2D image is convolved with.
blurring_grid
The 2D (y,x) coordinates neighboring the (masked) grid whose light is blurred into the image.
"""
image_2d = self.image_2d_from(grid=grid)
blurring_image_2d = self.image_2d_from(grid=blurring_grid)
return psf.convolved_array_with_mask_from(
array=image_2d.binned.native + blurring_image_2d.binned.native,
mask=grid.mask,
)
def blurred_image_2d_via_convolver_from(
self,
grid: Union[aa.Grid2D, aa.Grid2DIterate],
convolver: aa.Convolver,
blurring_grid: Union[aa.Grid2D, aa.Grid2DIterate],
) -> aa.Array2D:
"""
Evaluate the light object's 2D image from a input 2D grid of coordinates and convolve it with a PSF, using a
`autoarray.operators.convolver.Convolver` object. The `Convolver` object performs the 2D convolution operations
using 1D NumPy arrays without mapping them to 2D, which is more efficient.
The input 2D grid may be masked, in which case values outside but near the edge of the mask will convolve light
into the mask. A blurring grid is therefore required, which contains image pixels on the mask edge whose light
is blurred into the light object's image by the PSF.
The grid and blurring_grid must be a `Grid2D` objects so the evaluated image can be mapped to a uniform 2D
array and binned up for convolution. They therefore cannot be `Grid2DIrregular` objects.
Parameters
----------
grid
The 2D (y,x) coordinates of the (masked) grid, in its original geometric reference frame.
convolver
The convolver object used perform PSF convolution on 1D numpy arrays.
blurring_grid
The 2D (y,x) coordinates neighboring the (masked) grid whose light is blurred into the image.
"""
image_2d = self.image_2d_from(grid=grid)
blurring_image_2d = self.image_2d_from(grid=blurring_grid)
return convolver.convolve_image(
image=image_2d.binned, blurring_image=blurring_image_2d.binned
)
def padded_image_2d_from(self, grid, psf_shape_2d):
"""
Evaluate the light object's 2D image from a input 2D grid of padded coordinates, where this padding is
sufficient to encapsulate all surrounding pixels that will blur light into the original image given the
2D shape of the PSF's kernel..
Convolving an unmasked 2D image with a PSF requires care, because at the edges of the 2D image the light
profile values will not be evaluated beyond its edge, even though some of its light will be blurred into these
edges.
This function creates the padded image, such that the light profile is evaluated beyond the edge. The
array will still require trimming to remove these additional pixels after convolution is performed.
Parameters
----------
grid
The 2D (y,x) coordinates of the (masked) grid, in its original geometric reference frame.
psf_shape_2d
The 2D shape of the PSF the light object 2D image is convolved with.
"""
padded_grid = grid.padded_grid_from(kernel_shape_native=psf_shape_2d)
return self.image_2d_from(grid=padded_grid)
def unmasked_blurred_image_2d_via_psf_from(self, grid, psf):
"""
Evaluate the light object's 2D image from a input 2D grid of coordinates and convolve it with a PSF, using a
grid which is not masked.
Convolving an unmasked 2D image with a PSF requires care, because at the edges of the 2D image the light
profile values will not be evaluated beyond its edge, even though some of its light will be blurred into these
edges.
This function pads the grid first, such that the light profile is evaluated beyond the edge. The function then
trims the array the image is evaluated on such that it is the original dimensions of the input 2D grid.
The grid and blurring_grid must be a `Grid2D` objects so the evaluated image can be mapped to a uniform 2D
array and binned up for convolution. They therefore cannot be `Grid2DIrregular` objects.
Parameters
----------
grid
The 2D (y,x) coordinates of the (masked) grid, in its original geometric reference frame.
psf
The PSF the light object 2D image is convolved with.
"""
padded_grid = grid.padded_grid_from(kernel_shape_native=psf.shape_native)
padded_image = self.image_2d_from(grid=padded_grid)
return padded_grid.mask.unmasked_blurred_array_from(
padded_array=padded_image, psf=psf, image_shape=grid.mask.shape
)
def visibilities_via_transformer_from(
self, grid: Union[aa.Grid2D, aa.Grid2DIterate], transformer: aa.type.Transformer
) -> aa.Visibilities:
"""
Evaluate the light object's 2D image from a input 2D grid of coordinates and transform this to an array of
visibilities using a `autoarray.operators.transformer.Transformer` object and therefore a Fourier Transform.
The input 2D grid may be masked, in which case values outside the mask are not evaluated. This does not impact
the Fourier transform.
The grid must be a `Grid2D` objects for certain Fourier transforms to be valid. It therefore cannot be a
`Grid2DIrregular` objects.
If the image is all zeros (e.g. because this light object has no light profiles, for example it is a
`Galaxy` object with only mass profiles) the Fourier transformed is skipped for efficiency and a `Visibilities`
object with all zeros is returned.
Parameters
----------
grid
The 2D (y,x) coordinates of the (masked) grid, in its original geometric reference frame.
transformer
The **PyAutoArray** `Transformer` object describing how the 2D image is Fourier transformed to visiblities
in the uv-plane.
"""
image_2d = self.image_2d_from(grid=grid)
if not np.any(image_2d):
return aa.Visibilities.zeros(
shape_slim=(transformer.uv_wavelengths.shape[0],)
)
return transformer.visibilities_from(image=image_2d.binned)
class OperateImageList(OperateImage):
"""
Packages methods which operate on the list of 2D images returned from the `image_2d_list_from` function of a light
object which contains multiple light profiles (e.g. a `Galaxy`, `Plane`).
The majority of methods apply data operators to the list of 2D images which perform tasks such as a 2D convolution
of Fourier transform.
The methods in `OperateImageList` are inherited by light objects to provide a concise API.
"""
def image_2d_list_from(self, grid: Union[aa.Grid2D, aa.Grid2DIterate]):
raise NotImplementedError
def blurred_image_2d_list_via_psf_from(
self,
grid: Union[aa.Grid2D, aa.Grid2DIterate],
psf,
blurring_grid: Union[aa.Grid2D, aa.Grid2DIterate],
) -> List[aa.Array2D]:
"""
Evaluate the light object's list of 2D images from a input 2D grid of coordinates and convolve each image with
a PSF.
The input 2D grid may be masked, in which case values outside but near the edge of the mask will convolve light
into the mask. A blurring grid is therefore required, which contains image pixels on the mask edge whose light
is blurred into the light object's image by the PSF.
The grid and blurring_grid must be a `Grid2D` objects so the evaluated image can be mapped to a uniform 2D
array and binned up for convolution. They therefore cannot be `Grid2DIrregular` objects.
Parameters
----------
grid
The 2D (y,x) coordinates of the (masked) grid, in its original geometric reference frame.
psf
The PSF the light object 2D image is convolved with.
blurring_grid
The 2D (y,x) coordinates neighboring the (masked) grid whose light is blurred into the image.
"""
image_2d_list = self.image_2d_list_from(grid=grid)
blurring_image_2d_list = self.image_2d_list_from(grid=blurring_grid)
blurred_image_2d_list = []
for image_2d, blurring_image_2d in zip(image_2d_list, blurring_image_2d_list):
blurred_image_2d = psf.convolved_array_with_mask_from(
array=image_2d.binned.native + blurring_image_2d.binned.native,
mask=grid.mask,
)
blurred_image_2d_list.append(blurred_image_2d)
return blurred_image_2d_list
def blurred_image_2d_list_via_convolver_from(
self,
grid: Union[aa.Grid2D, aa.Grid2DIterate],
convolver: aa.Convolver,
blurring_grid: Union[aa.Grid2D, aa.Grid2DIterate],
) -> List[aa.Array2D]:
"""
Evaluate the light object's list of 2D images from a input 2D grid of coordinates and convolve each image with
a PSF, using a `autoarray.operators.convolver.Convolver` object. | |
== 0 and scaled_matrix[1] == 0:
converted_h5ad.append((h5ad_file, 'X', assay))
else:
converted_h5ad.append((h5ad_file,'raw.X', assay))
return converted_h5ad
# Quality check final anndata created for cxg, sync up gene identifiers if necessary
def quality_check(adata):
if adata.obs.isnull().values.any():
print("WARNING: There is at least one 'NaN' value in the cxg anndata obs dataframe.")
elif 'default_visualization' in adata.uns:
if adata.uns['default_visualization'] not in adata.obs.values:
sys.exit("The default_visualization field is not in the cxg anndata obs dataframe.")
elif len(adata.var.index.tolist()) > len(adata.raw.var.index.tolist()):
sys.exit("There are more genes in normalized genes than in raw matrix.")
# Return value to be stored in disease field based on list of diseases from donor and sample
def clean_list(lst, exp_disease):
lst = lst.split(',')
disease = exp_disease['term_id'] if exp_disease['term_id'] in lst else 'PATO:0000461'
return disease
# Determine reported disease as unique of sample and donor diseases, removing unreported value
def report_diseases(mxr_df, exp_disease):
mxr_df['reported_diseases'] = mxr_df[['sample_diseases_term_name','donor_diseases_term_name']].stack().groupby(level=0).apply(lambda x: [i for i in x.unique() if i != unreported_value])
mxr_df['reported_diseases'] = mxr_df['reported_diseases'].astype(dtype='string')
mxr_df['reported_diseases'] = mxr_df['reported_diseases'].apply(lambda x: x.replace("'", ""))
total_reported = mxr_df['reported_diseases'].unique()
if len(total_reported) == 1:
if total_reported[0] == '[]':
mxr_df['reported_diseases'] = '[]'
elif '[]' in total_reported:
mxr_df['reported_diseases'].replace({'[]':'none'}, inplace=True)
if exp_disease == unreported_value:
mxr_df['disease_ontology_term_id'] = ['PATO:0000461'] * len(mxr_df.index)
else:
mxr_df['disease_ontology_term_id'] = mxr_df['sample_diseases_term_id'] + ',' + mxr_df['donor_diseases_term_id']
mxr_df['disease_ontology_term_id'] = mxr_df['disease_ontology_term_id'].apply(clean_list, exp_disease=exp_disease)
exp_disease_aslist = '[{}]'.format(exp_disease['term_name'])
if len([x for x in total_reported if x not in ['none', exp_disease_aslist,'[]']])==0:
mxr_df['reported_diseases'] = '[]'
# Demultiplex experimental metadata by finding demultiplexed suspension
# Determine overlapping suspension, create library & demultiplexed suspension df
# get cell_metadata from that suspension, merge in library info
# merge with mxr_df on library
def demultiplex(lib_donor_df, library_susp, donor_susp, mfinal_obj):
susp_df = pd.DataFrame()
lattice_donor = {}
lattice_donor_col = []
demult_susp_lst = []
for donor_map in mfinal_obj['donor_mappings']:
lattice_donor[donor_map['label']] = donor_map['donor']
for author_don in lib_donor_df['author_donor'].to_list():
lattice_donor_col.append(lattice_donor[author_don])
lib_donor_df['author_donor_@id'] = lattice_donor_col
lib_donor_df['library_donor_@id'] = lib_donor_df['library_@id'] + "," + lib_donor_df['author_donor_@id']
for lib_donor_unique in lib_donor_df['library_donor_@id'].to_list():
demult_susp = ''
lib_uniq = lib_donor_unique.split(',')[0]
donor_uniq = lib_donor_unique.split(',')[1]
for susp in donor_susp[donor_uniq]:
if susp in library_susp[lib_uniq]:
demult_susp = susp
if demult_susp == '':
print('ERROR: Could not find suspension for demultiplexed donor: {}, {}, {}'.format(donor, donor_susp[donor], library_susp[assoc_lib]))
else:
demult_susp_lst.append(demult_susp)
lib_donor_df['suspension_@id'] = demult_susp_lst
obj_type_subset = ['sample', 'suspension', 'donor']
for susp in set(lib_donor_df['suspension_@id'].to_list()):
values_to_add = {}
susp_obj = lattice.get_object(susp, connection)
relevant_objects = gather_objects(susp_obj, start_type='suspension')
for obj_type in obj_type_subset:
objs = relevant_objects.get(obj_type, [])
if len(objs) == 1:
gather_metdata(obj_type, cell_metadata[obj_type], values_to_add, objs)
else:
print('ERROR: Could not find suspension for demultiplexed donor: {}'.format(obj_type))
row_to_add = pd.Series(values_to_add)
susp_df = susp_df.append(row_to_add, ignore_index=True)
lib_donor_df = lib_donor_df.merge(susp_df, left_on='suspension_@id', right_on='suspension_@id', how='left')
return(lib_donor_df)
# For cell culture, tissue is not UBERON, use cell slims to get CL
def get_cell_slim(df_series, suffix):
cell = df_series['sample_biosample_ontology_cell_slims'].split("'")[1].replace(" ", "+")
df_series.drop(labels='sample_biosample_ontology_cell_slims', inplace=True)
query_url = urljoin(server, 'search/?type=OntologyTerm&term_name=' + cell + '&format=json')
r = requests.get(query_url, auth=connection.auth)
try:
r.raise_for_status()
except requests.HTTPError:
sys.exit("Error in getting cell slim as tissue ontology: {}".format(query_url))
else:
if r.json()['total']==1:
df_series['tissue_ontology_term_id'] = r.json()['@graph'][0]['term_id'] + suffix
else:
sys.exit("Error in getting organ slim as tissue ontology: {}".format(query_url))
# Ontologize sex from donor.sex enum
def get_sex_ontology(donor_df):
term_lookup = {
'female': 'PATO:0000383',
'male': 'PATO:0000384'
}
sexes = donor_df['sex'].unique()
for sex in sexes:
if sex in term_lookup:
donor_df.loc[donor_df['sex'] == sex, 'sex_ontology_term_id'] = term_lookup[sex]
elif sex == 'unknown' or sex == 'mixed':
donor_df.loc[donor_df['sex'] == sex, 'sex_ontology_term_id'] = 'unknown'
else:
sys.exit("Unexpected sex: {}".format(sex))
# Make sure cxg_adata and cxg_adata_raw have same number of features
# If not, add implied zeros to csr, and add corresponding 'feature_is_filtered'
def add_zero(cxg_adata, cxg_adata_raw):
if cxg_adata_raw.shape[1] > cxg_adata.shape[1]:
genes_add = [x for x in cxg_adata_raw.var.index.to_list() if x not in cxg_adata.var.index.to_list()]
new_matrix = sparse.csr_matrix((cxg_adata.X.data, cxg_adata.X.indices, cxg_adata.X.indptr), shape = cxg_adata_raw.shape)
all_genes = cxg_adata.var.index.to_list()
all_genes.extend(genes_add)
new_var = pd.DataFrame(index=all_genes)
new_var = pd.merge(new_var, cxg_adata_raw.var, left_index=True, right_index=True, how='left')
new_var['feature_is_filtered'] = False
new_var.loc[genes_add, 'feature_is_filtered'] = True
new_adata = ad.AnnData(X=new_matrix, obs=cxg_adata.obs, var=new_var, uns=cxg_adata.uns, obsm=cxg_adata.obsm)
new_adata = new_adata[:,cxg_adata_raw.var.index.to_list()]
return(new_adata)
else:
cxg_adata.var['feature_is_filtered'] = False
return(cxg_adata)
# Use cxg_adata_raw var to map ensembl IDs and use that as index, filter against ref_files[]
# Make sure the indices are the same order for both anndata objects & clean up var metadata
# WILL NEED TO ADD NEW BIOTYPE FOR CITE-SEQ
def set_ensembl(cxg_adata, cxg_adata_raw, redundant, feature_keys):
if 'feature_types' in cxg_adata_raw.var.columns.to_list():
cxg_adata_raw.var = cxg_adata_raw.var.rename(columns={'feature_types': 'feature_biotype'})
cxg_adata_raw.var['feature_biotype'] = cxg_adata_raw.var['feature_biotype'].str.replace('Gene Expression', 'gene')
else:
cxg_adata_raw.var.insert(0, 'feature_biotype', 'gene')
keep = ['feature_biotype', 'gene_ids']
remove = [x for x in cxg_adata_raw.var.columns.to_list() if x not in keep]
for r in remove:
cxg_adata_raw.var.drop(columns=r, inplace=True)
if feature_keys == ['gene symbol']:
if 'gene_ids' in cxg_adata_raw.var.columns.to_list():
# Check for gene symbols that are redudant and have suffix
# WILL NEED TO MAKE SURE SPLITTING ON '.' IS STILL APPROPRIATE FOR FUTURE DATASETS
norm_index = set(cxg_adata.var.index.to_list())
raw_index = set(cxg_adata_raw.var.index.to_list())
drop_redundant_with_suffix = list(norm_index.difference(raw_index))
for unmapped in drop_redundant_with_suffix:
unmapped_split = unmapped.split(".")
if unmapped_split[0] not in redundant:
logging.info('ERROR:\t{}\tnot redundant but unmapped'.format(unmapped))
logging.info('drop_redundant_with_suffix\t{}\t{}'.format(len(drop_redundant_with_suffix), drop_redundant_with_suffix))
cxg_adata = cxg_adata[:, [i for i in cxg_adata.var.index.to_list() if i not in drop_redundant_with_suffix]]
cxg_adata_raw.var_names_make_unique()
cxg_adata.var = pd.merge(cxg_adata.var, cxg_adata_raw.var, left_index=True, right_index=True, how='left', copy = True)
cxg_adata.var = cxg_adata.var.set_index('gene_ids', drop=True)
cxg_adata_raw.var = cxg_adata_raw.var.set_index('gene_ids', drop=True)
cxg_adata.var.index.name = None
cxg_adata_raw.var.index.name = None
else:
print("WARNING: raw matrix does not have genes_ids column")
elif feature_keys == ['Ensembl gene ID']:
cxg_adata_raw.var_names_make_unique()
cxg_adata_raw.var = cxg_adata_raw.var.set_index('gene_ids', drop=True)
cxg_adata_raw.var.index.name = None
print("HERE")
cxg_adata.var.insert(0, 'feature_biotype', 'gene')
unique_to_norm = set(cxg_adata.var.index.to_list()).difference(set(cxg_adata_raw.var.index.to_list()))
if len(unique_to_norm) > 0:
print("WARNING: normalized matrix contains Ensembl IDs not in raw: {}".format(unique_to_norm))
compiled_annot = compile_annotations(ref_files)
var_in_approved = cxg_adata.var.index[cxg_adata.var.index.isin(compiled_annot['feature_id'])]
rawvar_in_approved = cxg_adata_raw.var.index[cxg_adata_raw.var.index.isin(compiled_annot['feature_id'])]
cxg_adata = cxg_adata[:, var_in_approved]
cxg_adata_raw = cxg_adata_raw[:, rawvar_in_approved]
ercc_df = compile_annotations({'ercc':ref_files['ercc']})
var_ercc = cxg_adata.var.index[cxg_adata.var.index.isin(ercc_df['feature_id'])]
rawvar_ercc = cxg_adata_raw.var.index[cxg_adata_raw.var.index.isin(ercc_df['feature_id'])]
print(cxg_adata.var)
cxg_adata.var.loc[var_ercc, 'feature_biotype'] = 'spike-in'
cxg_adata_raw.var.loc[rawvar_ercc, 'feature_biotype'] = 'spike-in'
return cxg_adata, cxg_adata_raw
# Reconcile genes if raw matrices annotated to multiple version by merging raw based on Ensembl ID
def reconcile_genes(mfinal_obj, cxg_adata_lst, mfinal_adata_genes):
redundant = []
redundant_within_version = []
chain_redundant_genes = []
gene_multi_ensembl = []
versions_checked = []
genes_to_collapse_final = {}
collapsed_need_to_only_switch_id = {}
total_genes = []
stats = {}
stats['redundant'] = []
stats['redundant_within_version'] = []
stats['chain_redundant_genes'] = []
stats['gene_multi_ensembl'] = []
stats['already_being_collapsed'] = []
stats['not_collapsed_because_not_in_X'] = []
stats['not_collapsed_because_ambiguous'] = []
stats['collapsed_need_to_only_switch_id'] = []
# Join raw matrices on ensembl, gene symbols stored as metadata
for cxg_adata in cxg_adata_lst:
cxg_adata.var['gene_symbols'] = cxg_adata.var.index
cxg_adata.var = cxg_adata.var.set_index('gene_ids', drop=True)
cxg_adata_raw_ensembl = cxg_adata_lst[0].concatenate(cxg_adata_lst[1:], index_unique=None, join='outer')
# Join raw matrices on gene symbol, ensembl stored as metadata. Add suffix to make unique, using '.' as to match R default
for cxg_adata in cxg_adata_lst:
cxg_adata.var['gene_ids'] = cxg_adata.var.index
cxg_adata.var = cxg_adata.var.set_index('gene_symbols', drop=True)
cxg_adata.var_names_make_unique(join = '.')
cxg_adata_raw_symbol = cxg_adata_lst[0].concatenate(cxg_adata_lst[1:], index_unique=None, join='outer')
# Go through adata indexed on symbol to see which have > 1 Ensembl IDs
gene_pd_symbol = cxg_adata_raw_symbol.var[[i for i in cxg_adata_raw_symbol.var.columns.values.tolist() if 'gene_ids' in i]]
genes_to_drop_df = gene_pd_symbol[gene_pd_symbol.stack().groupby(level=0).apply(lambda x: len(x.unique())>1)==True]
gene_multi_ensembl.extend(genes_to_drop_df.index.to_list())
stats['gene_multi_ensembl'].extend(gene_multi_ensembl)
redundant.extend(genes_to_drop_df.index.to_list())
# Go through adata indexed on ensembl to see which have > 1 symbol
gene_pd_ensembl = cxg_adata_raw_ensembl.var[[i for i in cxg_adata_raw_ensembl.var.columns.values.tolist() if 'gene_symbols' in i]]
# Drop redundant genes symbols from normalized matrix within a single version
gene_ensembl_columns_to_drop = []
for i in range(len(gene_pd_ensembl.columns.to_list())):
redundant_within_version.extend([item for item, count in collections.Counter(gene_pd_ensembl.iloc[:,i].dropna().to_list()).items() if count > 1])
redundant_within_version = list(set(redundant_within_version))
stats['redundant_within_version'].extend(redundant_within_version)
redundant.extend(redundant_within_version)
# Store potential collapses in dictionary
genes_to_collapse_df = gene_pd_ensembl[gene_pd_ensembl.stack().groupby(level=0).apply(lambda x: len(x.unique())>1)==True]
genes_to_collapse_df.to_csv("/Users/jenny/Lattice/lattice-tools/scripts/collapse_df.csv", index=True, header=False)
genes_to_collapse_dict = genes_to_collapse_df.to_dict(orient='index')
# Clean up raw.var in outer join on ensembl and switch to gene symbol for index. Do not var_names_make_unique, or else may accidentally map redundant in normalized layer
# Track total genes for those that are not being considered for collapse, so that we can evaluate cross version redundancy
cxg_adata_raw_ensembl.var['gene_ids'] = cxg_adata_raw_ensembl.var.index
cxg_adata_raw_ensembl.var['gene_symbols'] = gene_pd_ensembl.stack().groupby(level=0).apply(lambda x: x.unique()[0]).to_frame(name='gene_symbols')
total_genes = cxg_adata_raw_ensembl.var.loc[[i for i in cxg_adata_raw_ensembl.var.index.to_list() if i not in genes_to_collapse_dict.keys()],:].index.to_list()
cxg_adata_raw_ensembl.var = cxg_adata_raw_ensembl.var.set_index('gene_symbols', drop=True)
cxg_adata_raw_ensembl.var.index.name = None
# Go through possible collapse ensembl genes, and see which one need to actually be collapsed
# Chain redundant genes need to be tracked for redundancy removal
for gene in genes_to_collapse_dict.keys():
total_genes.append(gene)
genes_to_collapse_final[gene] = []
chain_redundant_bool = False
for symbol in genes_to_collapse_dict[gene].keys():
total_genes.append(genes_to_collapse_dict[gene][symbol])
if genes_to_collapse_dict[gene][symbol] not in genes_to_collapse_final[gene]:
if genes_to_collapse_dict[gene][symbol] not in gene_multi_ensembl:
if genes_to_collapse_dict[gene][symbol] in mfinal_adata_genes:
genes_to_collapse_final[gene].append(genes_to_collapse_dict[gene][symbol])
else:
stats['not_collapsed_because_not_in_X'].append(genes_to_collapse_dict[gene][symbol])
else:
stats['not_collapsed_because_ambiguous'].append(genes_to_collapse_dict[gene][symbol])
chain_redundant_bool = True
else:
stats['already_being_collapsed'].append(genes_to_collapse_dict[gene][symbol])
# If gene not found in normalized layer, still need to make sure the gene symbol in raw and normalized are consistent
# If only 1 gene symbol in the end remains, no longer need to collapse
if len(genes_to_collapse_final[gene]) < 2:
if len(genes_to_collapse_final[gene]) == 1:
if genes_to_collapse_final[gene][0] not in cxg_adata_raw_ensembl.var.index.to_list():
for s in genes_to_collapse_dict[gene]:
if genes_to_collapse_dict[gene][s] in cxg_adata_raw_ensembl.var.index.to_list():
collapsed_need_to_only_switch_id[genes_to_collapse_dict[gene][s]] = genes_to_collapse_final[gene][0]
stats['collapsed_need_to_only_switch_id'].append(genes_to_collapse_dict[gene][s])
genes_to_collapse_final.pop(gene, None)
if chain_redundant_bool == True:
for symbol in genes_to_collapse_dict[gene].keys():
if genes_to_collapse_dict[gene][symbol] not in gene_multi_ensembl:
chain_redundant_genes.append(genes_to_collapse_dict[gene][symbol])
cxg_adata_raw_ensembl.var.rename(index=collapsed_need_to_only_switch_id, inplace=True)
stats['chain_redundant_genes'].extend(chain_redundant_genes)
redundant.extend(chain_redundant_genes)
stats['gene_multi_ensembl'] = gene_multi_ensembl
stats['collapsed'] = list(genes_to_collapse_final.keys())
stats['redundant'] = list(set(redundant))
for key in stats:
stats[key] = set(stats[key])
overlap_norm = set(mfinal_adata_genes).intersection(stats[key])
logging.info("{}\t{}\t{}\t{}".format(key, len(stats[key]), len(overlap_norm), overlap_norm, stats[key]))
return cxg_adata_raw_ensembl, genes_to_collapse_final, redundant
def main(mfinal_id):
mfinal_obj = lattice.get_object(mfinal_id, connection)
logging.basicConfig(filename='outfile_flattener.log', level=logging.INFO)
# confirm that the identifier you've provided corresponds to a ProcessedMatrixFile
mfinal_type = mfinal_obj['@type'][0]
summary_assay = ''
if mfinal_type != 'ProcessedMatrixFile':
sys.exit('{} is not a ProcessedMatrixFile, but a {}'.format(mfinal_id, mfinal_type))
if mfinal_obj['assays'] == ['snATAC-seq']:
summary_assay = 'ATAC'
elif mfinal_obj['assays'] == ['snRNA-seq'] or mfinal_obj['assays'] == ['scRNA-seq'] or\
mfinal_obj['assays'] == ['snRNA-seq', 'scRNA-seq'] or mfinal_obj['assays'] == ['spatial transcriptomics'] or\
mfinal_obj['assays'] == ['scRNA-seq', 'snRNA-seq']:
summary_assay = 'RNA'
else:
sys.exit("Unexpected assay types to generate cxg h5ad: {}".format(mfinal_obj['assays']))
# set the metadata keys based on defined metadata fields
headers = []
for obj_type in cell_metadata.keys():
for prop in cell_metadata[obj_type]:
latkey = (obj_type + '_' + prop).replace('.', '_')
key = prop_map.get(latkey, latkey)
headers.append(key)
# Dataframe that contains experimental metadata keyed off of raw matrix
df = pd.DataFrame()
results = {}
os.mkdir(tmp_dir)
download_file(mfinal_obj, tmp_dir)
# Get list of unique final cell identifiers
file_url = mfinal_obj['s3_uri']
file_ext = file_url.split('.')[-1]
mfinal_local_path = '{}/{}.{}'.format(tmp_dir, mfinal_obj['accession'], file_ext)
mfinal_adata = None
assays = []
converted_h5ad = []
for layer in mfinal_obj['layers']:
if 'assay' in layer:
assays.append(layer['assay'])
if mfinal_obj['file_format'] == 'hdf5' and re.search('h5ad$', mfinal_local_path):
mfinal_adata = sc.read_h5ad(mfinal_local_path)
elif mfinal_obj['file_format'] == 'rds':
converted_h5ad = convert_from_rds(mfinal_local_path, assays, tmp_dir, mfinal_obj['author_cell_type_column'])
mfinal_adata = sc.read_h5ad(converted_h5ad[0][0])
else:
sys.exit('Do not recognize file format or exention {} {}'.format(mfinal_obj['file_format'], mfinal_local_path))
mfinal_cell_identifiers = mfinal_adata.obs.index.to_list()
cxg_adata_lst = []
# get the list of matrix files that hold the raw counts corresponding to our Final Matrix
mxraws = gather_rawmatrices(mfinal_obj['derived_from'])
donor_susp | |
"""
Project: Asteroids
Author: <NAME>
Website: http://vu-tran.com/
Events:
- draw (DrawEvent)
- click (ClickEvent)
- keydown (KeyEvent)
- keyup (KeyEvent)
- count (TimerEvent)
"""
# import modules
import simplegui, math, random
# configurations
WINDOW_SIZE = (800, 600)
NUM_LIVES = 3
MAX_ROCK_COUNT = 6
# create cache stores for static assets
CACHE_STORE = {}
# define Event objects
class DrawEvent:
def __init__(self, canvas, frame):
self.canvas = canvas
self.frame = frame
class KeyEvent:
def __init__(self, key):
self.key = key
class ClickEvent:
def __init__(self, position):
self.position = position
class TimerEvent:
def __init__(self, time):
self.time = time
class GameEvent:
def __init__(self, score):
self.score = score
class Dispatcher:
def __init__(self):
self.events = []
def add(self, event_name, handler):
"""
Registers a new event handler
"""
data = {
"name": event_name,
"handler": handler
}
self.events.append(data)
def run(self, name, args):
"""
Runs all events that matches the given name
"""
# iterate through all events
for e in self.events:
# if it's of the draw type
if e['name'] == name:
# call the given handler
e['handler'](args)
class Frame:
def __init__(self, frame, size):
self.frame = frame
self.size = size
def get_size(self):
"""
Returns the size of the frame
"""
return self.size
def get_width(self):
"""
Returns the width of the frame
"""
return self.size[0]
def get_height(self):
"""
Returns the height of the frame
"""
return self.size[1]
def get(self):
"""
Returns the frame object
"""
return self.frame
def start(self):
"""
Starts the frame
"""
self.get().start()
def add_button(self, label, handler):
"""
Adds a new button to the frame
"""
self.get().add_button(label, handler)
def set_draw_handler(self, handler):
self.get().set_draw_handler(handler)
def set_mouseclick_handler(self, handler):
self.get().set_mouseclick_handler(handler)
def set_keydown_handler(self, handler):
self.get().set_keydown_handler(handler)
def set_keyup_handler(self, handler):
self.get().set_keyup_handler(handler)
class Timer:
def __init__(self, delay):
# sets the initial time to 0
self.time = 0
# sets the delay
self.delay = delay
# creates a timer
self.timer = simplegui.create_timer(delay, self.count)
def get_timer(self):
return self.timer
def get_time(self):
return self.time
def count(self):
self.time += self.delay
timer_event = TimerEvent(self.get_time())
dispatcher.run('count', timer_event)
def start(self):
self.get_timer().start()
class Image:
def __init__(self, url, size, center = None):
# sets the url
self.url = url
# loads the image
if CACHE_STORE.has_key(url):
self.image = CACHE_STORE[url]
else:
self.image = simplegui.load_image(url)
CACHE_STORE[url] = self.image
# sets the dimensions of the image
self.set_size(size)
# if the center is not set
if center is None:
self.set_center((size[0] / 2, size[1] / 2))
else:
self.set_center(center)
def set_size(self, size):
self.size = size
def get_size(self):
return self.size
def set_center(self, center):
self.center = center
def get_center(self):
return self.center
def get_image(self):
return self.image
def draw_at(self, canvas, center_dest, size_dest, rotation = 0):
"""
Draws the image into the canvas at the given
center destination and size
"""
canvas.draw_image(self.get_image(), self.get_center(), self.get_size(), center_dest, size_dest, rotation)
def draw_animated_at(self, canvas, center_dest, size_dest, rotation = 0, age = 0):
center = self.get_center()
size = self.get_size()
anim_center = (center[0] + (age * size[0]), center[1])
canvas.draw_image(self.get_image(), anim_center, size, center_dest, size_dest, rotation)
class Sound:
def __init__(self, url):
self.url = url
if CACHE_STORE.has_key(url):
self.sound = CACHE_STORE[url]
else:
self.sound = simplegui.load_sound(url)
CACHE_STORE[url] = self.sound
def get_sound(self):
return self.sound
def play(self):
self.sound.play()
def pause(self):
self.sound.pause()
def rewind(self):
self.sound.rewind()
class Score:
def __init__(self, lives):
self.initial_lives = lives
self.lives = lives
self.score = 0
def reset(self):
self.lives = self.initial_lives
self.score = 0
def decrease_lives(self):
self.lives -= 1
def get_lives(self):
return self.lives
def increment_score(self):
self.score += 1
def get_score(self):
return self.score
def draw(self, draw_event):
# set the gutter size
gutter_size = 24
# set the font style
font_size = 24
font_color = 'white'
# create the text
lives_text = 'Lives: ' + str(self.get_lives())
score_text = 'Score: ' + str(self.get_score())
# calculate text width
lives_text_width = draw_event.frame.get().get_canvas_textwidth(lives_text, font_size)
score_text_width = draw_event.frame.get().get_canvas_textwidth(score_text, font_size)
draw_event.canvas.draw_text(lives_text, (gutter_size, gutter_size + (font_size / 2)), font_size, font_color)
draw_event.canvas.draw_text(score_text, (draw_event.frame.get_width() - score_text_width - gutter_size, gutter_size + (font_size / 2)), font_size, font_color)
class Game:
def __init__(self, size):
"""
Creates a new game window
<tuple> size
"""
# sets the window's size
self.set_window_size(size)
# creates the Frame
self.frame = self.create_frame()
# creates a Timer
self.timer = self.create_timer()
def set_window_size(self, size):
"""
Sets the game window's size
<tuple> size
"""
self.window_size = size
def get_window_size(self):
"""
Gets the game window's size
"""
return self.window_size
def get_size(self):
"""
Alias of self.get_window_size()
"""
return self.get_window_size()
def get_window_width(self):
return self.get_window_size()[0]
def get_window_height(self):
return self.get_window_size()[1]
def get_center(self):
"""
Retrieves the center of the window
"""
return (self.get_window_width() / 2, self.get_window_height() / 2)
def create_frame(self):
"""
Creates and returns a new Frame instance and set's the draw handler
"""
sg_frame = simplegui.create_frame("Game", self.get_window_width(), self.get_window_height())
# create a new Frame instance
frame = Frame(sg_frame, self.get_window_size())
# sets the draw handler
frame.set_draw_handler(self.draw)
# sets the mouse click handler
frame.set_mouseclick_handler(self.onclick)
# sets the keydown handler
frame.set_keydown_handler(self.onkeydown)
# sets the keyup handler
frame.set_keyup_handler(self.onkeyup)
# return the Frame instance
return frame
def get_frame(self):
"""
Retrieve the Frame instance
"""
return self.frame
def create_timer(self):
"""
Creates and returns a new Timer instance
"""
# creates a timer (calls each 25ms)
timer = Timer(25)
return timer
def get_timer(self):
"""
Returns the Timer instance
"""
return self.timer
def start(self):
"""
Starts the game (opens the game frame)
"""
# starts timer
self.get_timer().start()
# starts frame
self.get_frame().get().start()
def draw(self, canvas):
"""
Draw handler
"""
# create a DrawEvent
draw_event = DrawEvent(canvas, self.get_frame())
dispatcher.run('draw', draw_event)
def onclick(self, position):
"""
Mouseclick handler
"""
click_event = ClickEvent(position)
dispatcher.run('click', click_event)
def onkeydown(self, key):
"""
Keydown handler
"""
key_event = KeyEvent(key)
dispatcher.run('keydown', key_event)
def onkeyup(self, key):
"""
Keyup handler
"""
key_event = KeyEvent(key)
dispatcher.run('keyup', key_event)
class AsteroidsGame(Game):
def __init__(self, size, lives, max_rock_count):
# calls the parent constructor
Game.__init__(self, size)
# set the initial game flag
self.started = False
# set the initial lives
self.lives = lives
# create the Background layer
self.create_background()
# creates the Player layer
self.create_player()
# spawn rocks every second (1000ms)
self.spawn_rocks_delay = 1000
# create a new set to hold rocks
self.rocks = Group(set(), max_rock_count)
# creates a new Score layer
self.score = self.create_score(lives)
# creates the splash screen
self.create_splash()
# create a new group to hold explosions
self.explosions = Group(set())
# register events
dispatcher.add('count', self.check_collisions)
dispatcher.add('count', self.spawn_rocks)
dispatcher.add('game_event', self.check_game)
def create_splash(self):
size = (400, 300)
screen_size = self.get_size()
center_position = (screen_size[0] / 2, screen_size[1] / 2)
self.splash = SplashScreen(size, center_position)
dispatcher.add('click', self.start_game)
def create_background(self):
# creates the Space layer
self.space = Space(self.get_size())
# creates the Debris layer
self.debris = Debris(self.get_size(), self.get_timer())
def create_player(self):
# creates the player spaceship
self.player = PlayerSpaceship((90, 90), self.get_center())
dispatcher.add('keyup', self.handle_keyup)
dispatcher.add('keydown', self.handle_keydown)
def spawn_rocks(self, timer_event):
# continue if the game has started
if self.started is True:
if timer_event.time % self.spawn_rocks_delay is 0:
# create a rock
rock = self.create_rock()
# add a new rock to the rock group
self.rocks.add(rock)
def create_rock(self):
# generate a random position
position = (random.randrange(0, self.get_window_width()), random.randrange(0, self.get_window_height()))
# generate a random velocity
velocity = (random.choice([-1, 1]) * random.randint(1, 3), random.choice([-1, 1]) * random.randint(1, 3))
# generate a random rotation velocity
rotation_velocity = random.choice([-1, 1]) * random.random() * 0.05
# generate a random acceleration
acceleration = random.random() / 5
# create a new rock
rock = Rock((90, 90), position, velocity, 0, rotation_velocity)
rock.set_acceleration(acceleration)
return rock
def handle_keyup(self, key_event):
# continue if the game has started
if self.started is True:
self.player.onkeyup(key_event)
def handle_keydown(self, key_event):
# continue if the game has started
if self.started is True:
self.player.onkeydown(key_event)
def check_collisions(self, timer_event):
# continue if the game has started
if self.started is True:
# iterates through all rocks and checks if
# it collides with other objects
if self.rocks.exists():
for rock in self.rocks.get_all():
# if collides with the player's spaceship
if rock.collide(self.player):
# removes the rock from the rock group
self.rocks.remove(rock)
# create an explosion
self.create_explosion(rock.get_position())
# lose a life
self.score.decrease_lives()
# create a game event
game_event = GameEvent(self.score)
dispatcher.run('game_event', game_event)
# if collides with any missles
collided_missle = rock.collide_group(self.player.get_missles())
if collided_missle is not False:
# removes the rock from the rock group
self.rocks.remove(rock)
# create an explosion
self.create_explosion(rock.get_position())
# removes the missle
self.player.get_missles().remove(collided_missle)
# increment the score
self.score.increment_score()
# create a game event
game_event = GameEvent(self.score)
dispatcher.run('game_event', game_event)
def create_explosion(self, position):
# create a new explosion
explosion = Explosion((128, 128), position)
self.explosions.add(explosion)
def create_score(self, lives):
| |
**ErrorMessage** *(string) --*
A text description of the error.
:type TextList: list
:param TextList: **[REQUIRED]**
A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.
- *(string) --*
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_document_classifier(self, DocumentClassifierName: str, DataAccessRoleArn: str, InputDataConfig: Dict, LanguageCode: str, Tags: List = None, OutputDataConfig: Dict = None, ClientRequestToken: str = None, VolumeKmsKeyId: str = None) -> Dict:
"""
Creates a new document classifier that you can use to categorize documents. To create a classifier you provide a set of training documents that labeled with the categories that you want to use. After the classifier is trained you can use it to categorize a set of labeled documents into the categories. For more information, see how-document-classification .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/CreateDocumentClassifier>`_
**Request Syntax**
::
response = client.create_document_classifier(
DocumentClassifierName='string',
DataAccessRoleArn='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
InputDataConfig={
'S3Uri': 'string'
},
OutputDataConfig={
'S3Uri': 'string',
'KmsKeyId': 'string'
},
ClientRequestToken='string',
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt',
VolumeKmsKeyId='string'
)
**Response Syntax**
::
{
'DocumentClassifierArn': 'string'
}
**Response Structure**
- *(dict) --*
- **DocumentClassifierArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the document classifier.
:type DocumentClassifierName: string
:param DocumentClassifierName: **[REQUIRED]**
The name of the document classifier.
:type DataAccessRoleArn: string
:param DataAccessRoleArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.
:type Tags: list
:param Tags:
Tags to be associated with the document classifier being created. A tag is a key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with \"Sales\" as the key might be added to a resource to indicate its use by the sales department.
- *(dict) --*
A key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with the key-value pair ‘Department’:’Sales’ might be added to a resource to indicate its use by a particular department.
- **Key** *(string) --* **[REQUIRED]**
The initial part of a key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the key portion of the pair, with multiple possible values such as “sales,” “legal,” and “administration.”
- **Value** *(string) --*
The second part of a key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the initial (key) portion of the pair, with a value of “sales” to indicate the sales department.
:type InputDataConfig: dict
:param InputDataConfig: **[REQUIRED]**
Specifies the format and location of the input data for the job.
- **S3Uri** *(string) --* **[REQUIRED]**
The Amazon S3 URI for the input data. The S3 bucket must be in the same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of input files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
:type OutputDataConfig: dict
:param OutputDataConfig:
Enables the addition of output results configuration parameters for custom classifier jobs.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object while creating a custom classifier, you specify the Amazon S3 location where you want to write the confusion matrix. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of this output file.
When the custom classifier job is finished, the service creates the output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the confusion matrix.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``\"<KEY>\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/<KEY>\"``
* KMS Key Alias: ``\"alias/ExampleAlias\"``
* ARN of a KMS Key Alias: ``\"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"``
:type ClientRequestToken: string
:param ClientRequestToken:
A unique identifier for the request. If you don\'t set the client request token, Amazon Comprehend generates one.
This field is autopopulated if not provided.
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.
:type VolumeKmsKeyId: string
:param VolumeKmsKeyId:
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``\"<KEY>\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/<KEY>\"``
:rtype: dict
:returns:
"""
pass
def create_entity_recognizer(self, RecognizerName: str, DataAccessRoleArn: str, InputDataConfig: Dict, LanguageCode: str, Tags: List = None, ClientRequestToken: str = None, VolumeKmsKeyId: str = None) -> Dict:
"""
Creates an entity recognizer using submitted files. After your ``CreateEntityRecognizer`` request is submitted, you can check job status using the API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/CreateEntityRecognizer>`_
**Request Syntax**
::
response = client.create_entity_recognizer(
RecognizerName='string',
DataAccessRoleArn='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
InputDataConfig={
'EntityTypes': [
{
'Type': 'string'
},
],
'Documents': {
'S3Uri': 'string'
},
'Annotations': {
'S3Uri': 'string'
},
'EntityList': {
'S3Uri': 'string'
}
},
ClientRequestToken='string',
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt',
VolumeKmsKeyId='string'
)
**Response Syntax**
::
{
'EntityRecognizerArn': 'string'
}
**Response Structure**
- *(dict) --*
- **EntityRecognizerArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the entity recognizer.
:type RecognizerName: string
:param RecognizerName: **[REQUIRED]**
The name given to the newly created recognizer. Recognizer names can be a maximum of 256 characters. Alphanumeric characters, hyphens (-) and underscores (_) are allowed. The name must be unique in the account/region.
:type DataAccessRoleArn: string
:param DataAccessRoleArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.
:type Tags: list
:param Tags:
Tags to be associated with the entity recognizer being created. A tag is a key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with \"Sales\" as the key might be added to a resource to indicate its use by the sales department.
- *(dict) --*
A key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with the key-value pair ‘Department’:’Sales’ might be added to a resource to indicate its use by a particular department.
- **Key** *(string) --* **[REQUIRED]**
The initial part of a | |
<reponame>bloomdt-uw/pyAFQ
from dipy.align import resample
from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import (AveragePointwiseEuclideanMetric,
ResampleFeature)
from dipy.io.streamline import load_tractogram, load_trk
from dipy.data.fetcher import _make_fetcher
import dipy.data as dpd
from io import BytesIO
import gzip
import os
import os.path as op
import json
from glob import glob
import shutil
import boto3
import s3fs
import numpy as np
import pandas as pd
import logging
import time
from bids import BIDSLayout
import bids.config as bids_config
try:
bids_config.set_option('extension_initial_dot', True)
except ValueError:
pass
from botocore import UNSIGNED
from botocore.client import Config
from dask import compute, delayed
from dask.diagnostics import ProgressBar
from pathlib import Path
from tqdm.auto import tqdm
import nibabel as nib
# capture templateflow resource warning and log
import warnings
default_warning_format = warnings.formatwarning
try:
warnings.formatwarning = lambda msg, *args, **kwargs: f'{msg}'
logging.captureWarnings(True)
pywarnings_logger = logging.getLogger('py.warnings')
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
pywarnings_logger.addHandler(console_handler)
warnings.filterwarnings(
"default", category=ResourceWarning,
module="templateflow")
from templateflow import api as tflow
finally:
logging.captureWarnings(False)
warnings.formatwarning = default_warning_format
__all__ = ["fetch_callosum_templates", "read_callosum_templates",
"fetch_or_templates", "read_or_templates",
"fetch_templates", "read_templates", "fetch_hcp",
"fetch_stanford_hardi_tractography",
"read_stanford_hardi_tractography",
"organize_stanford_data"]
BUNDLE_RECO_2_AFQ = \
{
"AF_L": "ARC_L", "AF_R": "ARC_R",
"UF_L": "UNC_L", "UF_R": "UNC_R",
"IFOF_L": "IFO_L", "IFOF_R": "IFO_R",
"CST_L": "CST_L", "CST_R": "CST_R",
"ILF_L": "ILF_L", "ILF_R": "ILF_R",
"SLF_L": "SLF_L", "SLF_R": "SLF_R"
}
BUNDLE_MAT_2_PYTHON = \
{'Right Corticospinal': 'CST_R', 'Left Corticospinal': 'CST_L',
'RightCorticospinal': 'CST_R', 'LeftCorticospinal': 'CST_L',
'Right Uncinate': 'UNC_R', 'Left Uncinate': 'UNC_L',
'RightUncinate': 'UNC_R', 'LeftUncinate': 'UNC_L',
'Left IFOF': 'IFO_L', 'Right IFOF': 'IFO_R',
'LeftIFOF': 'IFO_L', 'RightIFOF': 'IFO_R',
'Right Arcuate': 'ARC_R', 'Left Arcuate': 'ARC_L',
'RightArcuate': 'ARC_R', 'LeftArcuate': 'ARC_L',
'Right Thalamic Radiation': 'ATR_R', 'Left Thalamic Radiation': 'ATR_L',
'RightThalamicRadiation': 'ATR_R', 'LeftThalamicRadiation': 'ATR_L',
'Right Cingulum Cingulate': 'CGC_R', 'Left Cingulum Cingulate': 'CGC_L',
'RightCingulumCingulate': 'CGC_R', 'LeftCingulumCingulate': 'CGC_L',
'Right Cingulum Hippocampus': 'HCC_R',
'Left Cingulum Hippocampus': 'HCC_L',
'RightCingulumHippocampus': 'HCC_R',
'LeftCingulumHippocampus': 'HCC_L',
'Callosum Forceps Major': 'FP', 'Callosum Forceps Minor': 'FA',
'CallosumForcepsMajor': 'FP', 'CallosumForcepsMinor': 'FA',
'Right ILF': 'ILF_R', 'Left ILF': 'ILF_L',
'RightILF': 'ILF_R', 'LeftILF': 'ILF_L',
'Right SLF': 'SLF_R', 'Left SLF': 'SLF_L',
'RightSLF': 'SLF_R', 'LeftSLF': 'SLF_L'}
afq_home = op.join(op.expanduser('~'), 'AFQ_data')
baseurl = "https://ndownloader.figshare.com/files/"
callosum_fnames = ["Callosum_midsag.nii.gz",
"L_AntFrontal.nii.gz",
"L_Motor.nii.gz",
"L_Occipital.nii.gz",
"L_Orbital.nii.gz",
"L_PostParietal.nii.gz",
"L_SupFrontal.nii.gz",
"L_SupParietal.nii.gz",
"L_Temporal.nii.gz",
"R_AntFrontal.nii.gz",
"R_Motor.nii.gz",
"R_Occipital.nii.gz",
"R_Orbital.nii.gz",
"R_PostParietal.nii.gz",
"R_SupFrontal.nii.gz",
"R_SupParietal.nii.gz",
"R_Temporal.nii.gz"]
callosum_remote_fnames = ["5273794", "5273797", "5273800", "5273803",
"5273806", "5273809", "5273812", "5273815",
"5273821", "5273818", "5273824", "5273827",
"5273830", "5273833", "5273836", "5273839",
"5273842"]
callosum_md5_hashes = ["709fa90baadeacd64f1d62b5049a4125",
"987c6169de807c4e93dc2cbd7a25d506",
"0da114123d0b0097b96fe450a459550b",
"6d845bd10504f67f1dc17f9000076d7e",
"e16c7873ef4b08d26b77ef746dab8237",
"47193fd4df1ea17367817466de798b90",
"7e78bf9671e6945f4b2f5e7c30595a3c",
"8adbb947377ff7b484c88d8c0ffc2125",
"0fd981a4d0847e0642ff96e84fe44e47",
"87c4855efa406d8fb004cffb8259180e",
"c7969bcf5f2343fd9ce9c49b336cf14c",
"bb4372b88991932150205ffb22aa6cb7",
"d198d4e7db18ddc7236cf143ecb8342e",
"d0f6edef64b0c710c92e634496085dda",
"85eaee44665f244db5adae2e259833f6",
"25f24eb22879a05d12bda007c81ea55a",
"2664e0b8c2d9c59f13649a89bfcce399"]
fetch_callosum_templates = _make_fetcher("fetch_callosum_templates",
op.join(afq_home,
'callosum_templates'),
baseurl, callosum_remote_fnames,
callosum_fnames,
md5_list=callosum_md5_hashes,
doc="Download AFQ callosum templates")
def read_callosum_templates(resample_to=False):
"""Load AFQ callosum templates from file
Returns
-------
dict with: keys: names of template ROIs and values: nibabel Nifti1Image
objects from each of the ROI nifti files.
"""
logger = logging.getLogger('AFQ.data')
files, folder = fetch_callosum_templates()
logger.debug('loading callosum templates')
tic = time.perf_counter()
template_dict = {}
for f in files:
img = nib.load(op.join(folder, f))
if resample_to:
if isinstance(resample_to, str):
resample_to = nib.load(resample_to)
img = nib.Nifti1Image(resample(img.get_fdata(),
resample_to,
img.affine,
resample_to.affine).get_fdata(),
resample_to.affine)
template_dict[f.split('.')[0]] = img
toc = time.perf_counter()
logger.debug(f'callosum templates loaded in {toc - tic:0.4f} seconds')
return template_dict
def read_resample_roi(roi, resample_to=None, threshold=False):
"""
Reads an roi from file-name/img and resamples it to conform with
another file-name/img.
Parameters
----------
roi : str or nibabel image class instance.
Should contain a binary volume with 1s in the region of interest and
0s elsewhere.
resample_to : str or nibabel image class instance, optional
A template image to resample to. Typically, this should be the
template to which individual-level data are registered. Defaults to
the MNI template.
threshold: bool or float
If set to False (default), resampled result is returned. Otherwise,
the resampled result is thresholded at this value and binarized.
This is not applied if the input ROI is already in the space of the
output.
Returns
-------
nibabel image class instance that contains the binary ROI resampled into
the requested space.
"""
if isinstance(roi, str):
roi = nib.load(roi)
if resample_to is None:
resample_to = read_mni_template()
if isinstance(resample_to, str):
resample_to = nib.load(resample_to)
if np.allclose(resample_to.affine, roi.affine):
return roi
as_array = resample(
roi.get_fdata(),
resample_to,
roi.affine,
resample_to.affine).get_fdata()
if threshold:
as_array = (as_array > threshold).astype(int)
img = nib.Nifti1Image(
as_array,
resample_to.affine)
return img
template_fnames = ["ATR_roi1_L.nii.gz",
"ATR_roi1_R.nii.gz",
"ATR_roi2_L.nii.gz",
"ATR_roi2_R.nii.gz",
"ATR_L_prob_map.nii.gz",
"ATR_R_prob_map.nii.gz",
"CGC_roi1_L.nii.gz",
"CGC_roi1_R.nii.gz",
"CGC_roi2_L.nii.gz",
"CGC_roi2_R.nii.gz",
"CGC_L_prob_map.nii.gz",
"CGC_R_prob_map.nii.gz",
"CST_roi1_L.nii.gz",
"CST_roi1_R.nii.gz",
"CST_roi2_L.nii.gz",
"CST_roi2_R.nii.gz",
"CST_L_prob_map.nii.gz",
"CST_R_prob_map.nii.gz",
"FA_L.nii.gz",
"FA_R.nii.gz",
"FA_prob_map.nii.gz",
"FP_L.nii.gz",
"FP_R.nii.gz",
"FP_prob_map.nii.gz",
"HCC_roi1_L.nii.gz",
"HCC_roi1_R.nii.gz",
"HCC_roi2_L.nii.gz",
"HCC_roi2_R.nii.gz",
"HCC_L_prob_map.nii.gz",
"HCC_R_prob_map.nii.gz",
"IFO_roi1_L.nii.gz",
"IFO_roi1_R.nii.gz",
"IFO_roi2_L.nii.gz",
"IFO_roi2_R.nii.gz",
"IFO_L_prob_map.nii.gz",
"IFO_R_prob_map.nii.gz",
"ILF_roi1_L.nii.gz",
"ILF_roi1_R.nii.gz",
"ILF_roi2_L.nii.gz",
"ILF_roi2_R.nii.gz",
"ILF_L_prob_map.nii.gz",
"ILF_R_prob_map.nii.gz",
"SLF_roi1_L.nii.gz",
"SLF_roi1_R.nii.gz",
"SLF_roi2_L.nii.gz",
"SLF_roi2_R.nii.gz",
"SLFt_roi2_L.nii.gz",
"SLFt_roi2_R.nii.gz",
"SLF_L_prob_map.nii.gz",
"SLF_R_prob_map.nii.gz",
"UNC_roi1_L.nii.gz",
"UNC_roi1_R.nii.gz",
"UNC_roi2_L.nii.gz",
"UNC_roi2_R.nii.gz",
"UNC_L_prob_map.nii.gz",
"UNC_R_prob_map.nii.gz",
"ARC_L_prob_map.nii.gz",
"ARC_R_prob_map.nii.gz"]
template_remote_fnames = ["5273680", "5273683", "5273686", "5273689",
"11458274", "11458277",
"5273695", "5273692", "5273698", "5273701",
"11458268", "11458271",
"5273704", "5273707", "5273710", "5273713",
"11458262", "11458265",
"5273716", "5273719",
"11458220",
"5273722", "5273725",
"11458226",
"5273728", "5273731", "5273734", "5273746",
"11458259", "11458256",
"5273737", "5273740", "5273743", "5273749",
"11458250", "11458253",
"5273752", "5273755", "5273758", "5273761",
"11458244", "11458247",
"5273764", "5273767", "5273770", "5273773",
"5273776", "5273791",
"11458238", "11458241",
"5273779", "5273782", "5273785", "5273788",
"11458223", "11458229",
"11458232", "11458235"]
template_md5_hashes = ["6b7aaed1a2982fd0ea436a223133908b",
"fd60d46d4e3cbd906c86e4c9e4fd6e2a",
"3aba60b169a35c38640de4ec29d362c8",
"12716a5688a1809fbaed1d58d2e68b59",
"c5637f471df861d9bbb45604db34770b",
"850cc4c04d7241747063fe3cd440b2ce",
"8e8973bc7838c8744914d402f52d91ca",
"c5fa4e6e685e695c006823b6784d2407",
"e1fab77f21d5303ed52285f015e24f0b",
"5f89defec3753fd75cd688c7bfb20a36",
"a4f3cd65b06fb25f63d5dab7592f00f2",
"7e73ab02db30a3ad6bd9e82148c2486e",
"<KEY>",
"73941510c798c1ed1b03e2bd481cd5c7",
"660cdc031ee0716d60159c7d933119ea",
"660cdc031ee0716d60159c7d933119ea",
"fd012bc89f6bed7bd54530195496bac4",
"<KEY>",
"<KEY>",
"<KEY>",
"627d7bb2e6d55f8243da815a36d9ff1a",
"55adbe9b8279185eedbe342149e1ff90",
"<KEY>",
"<KEY>",
"ba453196ff179b0e31172806e313b52c",
"<KEY>",
"<KEY>",
"9806e82c250e4604534b96917f87b7e8",
"<KEY>",
"<KEY>",
"0e68a9feaaddcc9b4d667c2f15903368",
"d45020a87ee4bb496edd350631d91f6a",
"<KEY>",
"55d616ea9e0c646adc1aafa0f5fbe625",
"<KEY>",
"a13eef7059c98568adfefbab660e434e",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"7bdf5111265107091c7a2fca9215de30",
"<KEY>",
"af2bcedf47e193686af329b9a8e259da",
"9a1122943579d11ba169d3ad87a75625",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"d3e068997ebc60407bd6e9576e47dede",
"<KEY>",
"fa141bb2d951bec486916acda3652d95",
"d391d073e86e28588be9a6d01b2e7a82",
"<KEY>",
"d65c67910807504735e034f7ea92d590",
"93cb24a9128db1a6c34a09eaf79fe7f0",
"<KEY>",
"<KEY>",
"<KEY>",
"53c277be990d00f7de04f2ea35e74d73"]
fetch_templates = _make_fetcher("fetch_templates",
op.join(afq_home, 'templates'),
baseurl, template_remote_fnames,
template_fnames, md5_list=template_md5_hashes,
doc="Download AFQ templates")
def read_templates(resample_to=False):
"""Load AFQ templates from file
Returns
-------
dict with: keys: names of template ROIs and values: nibabel Nifti1Image
objects from each of the ROI nifti files.
"""
logger = logging.getLogger('AFQ.data')
files, folder = fetch_templates()
logger.debug('loading AFQ templates')
tic = time.perf_counter()
template_dict = {}
for f in files:
img = nib.load(op.join(folder, f))
if resample_to:
if isinstance(resample_to, str):
resample_to = nib.load(resample_to)
img = nib.Nifti1Image(
resample(
img.get_fdata(),
resample_to,
img.affine,
resample_to.affine).get_fdata(),
resample_to.affine)
template_dict[f.split('.')[0]] = img
toc = time.perf_counter()
logger.debug(f'AFQ templates loaded in {toc - tic:0.4f} seconds')
return template_dict
or_fnames = [
"left_thal_MNI.nii.gz",
"left_V1_MNI.nii.gz",
"right_thal_MNI.nii.gz",
"right_V1_MNI.nii.gz",
"left_OP_MNI.nii.gz",
"left_OR_1.nii.gz",
"left_OR_2.nii.gz",
"left_pos_thal_MNI.nii.gz",
"left_TP_MNI.nii.gz",
"right_OP_MNI.nii.gz",
"right_OR_1.nii.gz",
"right_OR_2.nii.gz",
"right_pos_thal_MNI.nii.gz",
"right_TP_MNI.nii.gz",
]
or_remote_fnames = [
"26831630",
"26831633",
"26831636",
"26831639",
"26831642",
"26831645",
"26831648",
"26831651",
"26831654",
"26831657",
"26831660",
"26831663",
"26831666",
"26831669",
]
or_md5_hashes = [
"c18f3f82c26f334dc26b96d21f026dd1",
"ad996c67bf5cc59fc3a7b60255873b67",
"786fb4ba915599f746950acd980e5b03",
"cc88fb4671311404eb9dfa8fa11a59e0",
"9cff03af586d9dd880750cef3e0bf63f",
"ff728ba3ffa5d1600bcd19fdef8182c4",
"4f1978e418a3169609375c28b3eba0fd",
"fd163893081b520f4594171aeea04f39",
"bf795d197912b5e074d248d2763c6930",
"13efde1efe0de52683cbf352ecba457e",
"75c7bd2092950578e599a2dcb218909f",
"8f3890fa8c26a568503226757f7e7d6c",
"f239aa3140809152da8884ff879dde1b",
"60a748567e4dd81b40ad8967a14cb09e",
]
fetch_or_templates = _make_fetcher("fetch_or_templates",
op.join(afq_home,
'or_templates'),
baseurl, or_remote_fnames,
or_fnames,
md5_list=or_md5_hashes,
doc="Download AFQ or templates")
def read_or_templates(resample_to=False):
"""Load AFQ OR templates from file
Returns
-------
dict with: keys: names of template ROIs and values: nibabel Nifti1Image
objects from each of the ROI nifti files.
"""
logger = logging.getLogger('AFQ.data')
files, folder = fetch_or_templates()
logger.debug('loading or templates')
tic = time.perf_counter()
template_dict = {}
for f in files:
img = nib.load(op.join(folder, f))
if resample_to:
if isinstance(resample_to, str):
resample_to = nib.load(resample_to)
img = nib.Nifti1Image(reg.resample(img.get_fdata(),
resample_to,
img.affine,
resample_to.affine),
resample_to.affine)
template_dict[f.split('.')[0]] = img
toc = time.perf_counter()
logger.debug(f'or templates loaded in {toc - tic:0.4f} seconds')
return template_dict
# +----------------------------------------------------+
# | Begin S3BIDSStudy classes and supporting functions |
# +----------------------------------------------------+
def get_s3_client(anon=True):
"""Return a boto3 s3 client
Global boto clients are not thread safe so we use this function
to return independent session clients for different threads.
Parameters
----------
anon : bool
Whether to use anonymous connection (public buckets only).
If False, uses the key/secret given, or boto’s credential
resolver (client_kwargs, environment, variables, config files,
EC2 IAM server, in that order). Default: True
Returns
-------
s3_client : boto3.client('s3')
"""
session = boto3.session.Session()
if anon:
s3_client = session.client(
's3',
config=Config(signature_version=UNSIGNED)
)
else:
s3_client = session.client('s3')
return s3_client
def _ls_s3fs(s3_prefix, anon=True):
"""Returns a dict of list of files using s3fs
The files are divided between subject directories/files and
non-subject directories/files.
Parameters
----------
s3_prefix : str
AWS S3 key for the study or site "directory" that contains all
of the subjects
anon : bool
Whether to use anonymous connection (public buckets only).
If False, uses the key/secret given, or boto’s credential
resolver (client_kwargs, environment, variables, config files,
EC2 IAM server, in that order). Default: True
Returns
-------
subjects : dict
"""
fs = s3fs.S3FileSystem(anon=anon)
site_files = fs.ls(s3_prefix, detail=False)
# Just need BIDSLayout for the `parse_file_entities` method
# so we can pass dev/null as the argument
layout = BIDSLayout(os.devnull, validate=False)
entities = [
layout.parse_file_entities(f) for f in site_files
]
files = {
'subjects': [
f for f, e in zip(site_files, entities)
if e.get('subject') is not None
],
'other': [
f for f, e in zip(site_files, entities)
if e.get('subject') is None
]
}
return files
def _get_matching_s3_keys(bucket, prefix='', suffix='', anon=True):
"""Generate all the matching keys in an S3 bucket.
Parameters
----------
bucket : str
Name | |
import numpy as np
from scipy.spatial import Voronoi
import matplotlib as mpl
import matplotlib.pyplot as plt
import pickle
import time
import shutil
def dirP(obj,nmax=10):
"""
Description : Gives all the attributes and values of an object
Input :* Obj : the object you want to explore
* nmax (optional): is the number of elements you want to print from the attributes
Output: None
"""
for name in dir(obj):
if(name[0]!='_'):
ok=0
val=eval('obj.'+name)
if type(val) is int : ok=1
else :
try:
if str(val)[0]!='<': ok=1
except BaseException:
print(name, " Methode locale")
if ok:
if type(val) is list :
print(name, (20-len(str(name)))*' ','is a list of size :', len(val),end=' ')
if len(val)<nmax : print(val)
else : print(',',val[0:nmax] , '\n(',nmax,' first elements)')
elif type(val) is np.ndarray :
print(name, (20-len(str(name)))*' ','is a list of size :', val.shape, end=' ')
if np.prod( val.shape )<nmax : print(val)
else : print(',')
else :
print(name, (20-len(str(name)))*' ',val)
#########################
def seg_intersect(a1,a2, b1,b2) :
da = a2-a1
db = b2-b1
dp = a1-b1
dap = perp(da)
denom = np.dot( dap, db)
num = np.dot( dap, dp )
return (num / denom.astype(float))*db + b1
def perp( a ) :
b = np.empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
def ccw(A,B,C):
return (C[1]-A[1]) * (B[0]-A[0]) > (B[1]-A[1]) * (C[0]-A[0])
def intersect(A,B,C,D):
return ccw(A,C,D) != ccw(B,C,D) and ccw(A,B,C) != ccw(A,B,D)
def length(XY):
return np.sqrt((np.roll(XY[:,0],-1)-XY[:,0])**2+(np.roll(XY[:,1],-1)-XY[:,1])**2)
def linemaison(coord1,coord2):
l=int(max(abs(coord1[0]-coord2[0]),abs(coord1[1]-coord2[1])))+1
X= np.zeros(l,dtype=np.int32)
Y= np.zeros(l,dtype=np.int32)
for i in range(l):
X[i]=round(coord1[0]+(i/(l-1))*(coord2[0]-coord1[0]))
Y[i]=round(coord1[1]+(i/(l-1))*(coord2[1]-coord1[1]))
return X,Y
def curvature(XY):
# Calcul de la courbure comme le rapport entre la différence de deux angles/pentes qui se suivent
# sur la longueur parcourue de la courbe
return angle(XY[:,0],XY[:,1])/length(XY)
def angle(x,y):
Pente=np.arctan2((np.roll(y,-1)-y),( np.roll(x,-1)-x)) ## Angle with 0
Ang=Pente-np.roll(Pente,1) #Difference of angle
Ang[Ang<-np.pi]+= 2*np.pi # Modulo
Ang[Ang> np.pi]+=-2*np.pi # Modulo
return (180/np.pi)*Ang
def dist(XY):
return np.sum(np.sqrt( (XY[:-1,0]-XY[1 :,0])**2 +
(XY[:-1,1]-XY[1 :,1])**2 ))
#########################
################################################################################
################## IMAGE OPERATIONS ############################################
def reduceIMGsize(IMG):
'''
depreciated, too many bugs
'''
bordersize=0
index=0
while sum(IMG[index,:])==0: index+=1
ymin=max(0,index-bordersize)
index=len(IMG[:,0])-1
while sum(IMG[index,:])==0: index-=1
ymax=min(index-bordersize, len(IMG[:,0])-1)
index=0
while sum(IMG[:,index])==0: index+=1
xmin=max(0,index-bordersize)
index=len(IMG[0,:])-1
while sum(IMG[:,index])==0: index-=1
xmax=min(len(IMG[0,:])-bordersize,index+1)
return(xmin,xmax,ymin,ymax)
################################################################################
################### ARC CREATION ###############################################
################################################################################
def Voronoi_Clean(XY,IMG,p):
"""
DESCRIPTION :
_Takes all the points from every contour, and generate every segments of the voronoi
diagram. It create points at mid-range between two contours, but also some useless one.
_The program set all the generated points outside of the figure to [0,0]
_It then suppress all segments which are linked to a point not in the figure (thanks to IMG)
_Then it suppress all segments not linked to 2 different contours
_Then it reorganise information
INPUT :
* XY : All the coordinates of the contour points, with their label
* IMG : Matrix of values =0 if not on the gorgonian !=O if on gorgonian
OUTPUT :
* Vertices all usefull points positions generated by the Voronoi diagram
* Segments all association of points we keep
* Nconect : number of points having the connectivity of Nconect label.
WHAT NEED TO BE UPGRADED :
* There is no real needs, although the is badly coded. It works.
Coded by <NAME>. Last update 03/03/2017
"""
t=time.time()
print('Completing Voronoi Calculation...',end='')
vorSkel=Voronoi(XY[:,0:2])
#plt.figure('Hop')
#plt.imshow(IMG.binary.T,cmap='binary')
#plt.plot(vorSkel.vertices[:,0],vorSkel.Vertices[:,1],'*',c='k')
#voronoi_plot_2d(vorSkel)
#plt.show()
print('Done, t=',time.time()-t)
# vorSkel.vertices All the points created
# vorSkel.ridge_vertices All the segments created of vertices
# vorSkel.ridge_points Link between label of original points in voronoi and real one
print('Introduced', len(vorSkel.ridge_vertices),'Ridges')
t=time.time()
print('Removing infinity/out of structure Ridges...',end='')
#Vertices Cleaning : detect all potentially good points
supprpts={} # Dictionnary : if the point is associated to true, it will be suppressed.
supprpts[-1]=True # Suppress label for all points at infinity
for i in range(len(vorSkel.vertices[:,0])): # We check every pointd
if (0<vorSkel.vertices[i,0]<IMG.X and 0<vorSkel.vertices[i,1]<IMG.Y):
if IMG.binary[int(vorSkel.vertices[i,0]),int(vorSkel.vertices[i,1])]==0:# If they are not on the structure
supprpts[i]=True # We will delete them
else:
supprpts[i]=True
# Creation of segments with only points on the structure
nsegok=0 # Number of segment "OK" I.E without values from supprpts
for i, item in enumerate(vorSkel.ridge_vertices): # For every segment
if not (supprpts.get(item[0],False) or supprpts.get(item[1],False)) : # If the first point will not be suppr
nsegok+=1 # We count this segment as kept
Segments=np.zeros((nsegok,4)) # On fill Segments of : [IndexVertice1|IndexVertice2|IndexPoint1|IndexPoint2]
index=0 # Counter
for i, item in enumerate(vorSkel.ridge_vertices): # We now add every segments, checking same conditions
if not supprpts.get(item[0],False):
if not supprpts.get(item[1],False):
Segments[index,0:2]=item # The Vertices of the segment
Segments[index,2:4]=vorSkel.ridge_points[i] # The points (Contour) linked to the segment
index+=1
print('Done, t=',time.time()-t)
print('kept', nsegok,'Ridges')
t=time.time()
print('Removing Ridges only linked to one contours...',end='')
# Segments cleaning : only the one created with two different labels contours
nsegok=0 # Number of segments associated to 2 different contours
for i in range(len(Segments[:,0])): # For Every segments
if XY[np.int(Segments[i,2]),3] != XY[np.int(Segments[i,3]),3]: #if the segment is created by 2 different contours
nsegok+=1 # We count this segment as kept
Keptseg=np.zeros((nsegok,4)) # We will only keep these segments
item=0 # Position in keptseg
for i in range(len(Segments[:,0])): # For every segments
if XY[np.int(Segments[i,2]),3] != XY[np.int(Segments[i,3]),3]: # If it satisfy our previous condition
Keptseg[item,0:2]=Segments[i,0:2] #We add it to the segment
Keptseg[item,2 ]=XY[np.int(Segments[i,2]),2]
Keptseg[item,3 ]=XY[np.int(Segments[i,3]),2]
item+=1
#Vertices cleaning
Vertices2=np.zeros((len(vorSkel.vertices),4)) # |X|Y|Connectivity for each point | Label
Vertices2[:,0:2]=vorSkel.vertices # We copy X and Y
for i in range(len(Keptseg)): # for each segment
Vertices2[np.int(Keptseg[i,0]),2]+=1 # we add 1 in connectivity for every point taht compose him
Vertices2[np.int(Keptseg[i,1]),2]+=1 # Same
print('Done. t=',time.time()-t )
t=time.time()
print('Reattribution of labels...',end='')
#We associate a label for every kept Vertice
label=0
for i in range(len(Vertices2)):
if Vertices2[i,2]>=1:
Vertices2[i,3]=label
label+=1
#We calculate the number of point having a certain connectivity
i=0
while (len(Vertices2[Vertices2[:,2]==i]) or i<3 ): # double condition so we don't skip points if there is no dead end
Nconect=np.zeros(i) # We now know the maximal connectivity
i+=1
Nconect=np.zeros(i)
i=0
while (len(Vertices2[Vertices2[:,2]==i]) or i<3 ):
Nconect[i]=len(Vertices2[Vertices2[:,2]==i]) # We count the number of points with this connectivity
i+=1
VerticesKept=np.zeros((np.int(np.sum(Nconect[1::])),5)) # We recreate a vector with only the pooints we keep
index=0
for i in range(len(Vertices2)): # We look at every points
if Vertices2[i,2]>0: # if we can keep it
VerticesKept[index,0:4]=Vertices2[i,:] # We add it
index+=1
# Reattribution des labels
Segments=1.0*Keptseg # We change all the lagbels
for i in range(len(Segments[:,0])): # For every segments
Segments[i,0]=Vertices2[int(Keptseg[i,0]),3] # We give them the new label
Segments[i,1]=Vertices2[int(Keptseg[i,1]),3]
Vertices=np.zeros(( len(VerticesKept) , 4 )) #On renomme Vertices
Vertices[:,0:3]=VerticesKept[:,0:3]
Vertices[:,3 ]=VerticesKept[:,4]
print('Done. t=',time.time()-t)
#### TERMINAL PRINT ###################################################
print("Finished with :",len(Vertices),'points et',len(Segments) ,'segments')
print("number of point :")
for i in range(1,len(Nconect)):
print(int(Nconect[i]), 'of connectivity', i)
"""
DESCRIPTION :
*This function creates arcs (associations of segments), by looking at all the association of segments
between 2 points of connectivity different of 2
INPUT :
* Vertices : array [X|Y] of every points
* Segments : array All the 2 associations of points
* Nconect : list
* Kmin : float
* IMG_DIST : 2D-Array with the distance to the closest border
OUTPUT :
* Arc : List of list of Vertices
WHAT NEED TO BE UPGRADED :
* Dictionnary could help a lot
* The system not to do the same arc twice also
Coded by <NAME>. Last update 03/03/2017
"""
######### Get parameters values ##################
Kmin=p.get('Kmin', 1.1)
##################################################
print('Arc Creation...',end='')
t=time.time()
Noeuds = np.zeros((len(Vertices[Vertices[:,2]>2]),2+len(Nconect))) # Useless, but needed to generate Ptsdepart (badly coded)
Arcs = [] # All the arcs
PtsAssoc=np.zeros((len(Vertices[:,0]),len(Nconect)-1))-1 # All the points connected to the point
Ptsdepart=0*Noeuds[:,0:len(Nconect)] #Ensemble des points ayant une connectivité != reste 4 2, desquels on va reconstruire les arcs
#At this time, I didn't know dictionnary
for i in range(len(Segments[:,0])): # For every segments
for j in range(2): # We look at both points
ok=0
index=0
while ok==0: # Yes the structure is weird but work
if PtsAssoc[int(Segments[i,j]),index]==-1: # if the case is free
PtsAssoc[int(Segments[i,j]),index]=Segments[i,1-j] #It place the coordinate of the other points
ok=1
else: index+=1 # if the case is full we go to the next one
#All the starting points
wherepts=np.zeros((len(PtsAssoc[:,0]),2))
index=0
sumpts=0
for i in range(len(PtsAssoc[:,0])): #We look at every points
if PtsAssoc[i,2]!=-1: #if their connectivity is different of 2
Ptsdepart[index,0]=i #We add its label
Ptsdepart[index,1::]=PtsAssoc[i,:] #We | |
]
if "prevalence_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_prevalence_code(incoming_column_name=vi, x=numpy.asarray(X[vi]))
]
if "indicator_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_indicator_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
min_fraction=params["indicator_min_fraction"],
sparse_indicators=params["sparse_indicators"],
)
]
xforms = [xf for xf in xforms if xf is not None]
for stp in params["user_transforms"]:
stp.fit(X=X[var_list], y=y)
return {
"outcome_name": outcome_name,
"cols_to_copy": cols_to_copy,
"xforms": xforms,
}
# noinspection PyPep8Naming
def fit_multinomial_outcome_treatment(
*, X, y, var_list, outcome_name, cols_to_copy, params, imputation_map
):
if (var_list is None) or (len(var_list) <= 0):
var_list = [co for co in X.columns]
copy_set = set(cols_to_copy)
var_list = [co for co in var_list if (not (co in copy_set))]
v_counts = {v: vtreat.util.get_unique_value_count(X[v]) for v in var_list}
var_list = {v for v in var_list if v_counts[v] > 1}
if len(var_list) <= 0:
raise ValueError("no variables")
xforms = []
n = X.shape[0]
all_bad = []
for vi in var_list:
n_bad = sum(vtreat.util.is_bad(X[vi]))
if n_bad >= n:
all_bad = all_bad + [vi]
if (n_bad > 0) and (n_bad < n):
if "missing_indicator" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
IndicateMissingTransform(
incoming_column_name=vi, derived_column_name=vi + "_is_bad"
)
]
outcomes = [oi for oi in set(y)]
var_list = [co for co in var_list if (not (co in set(all_bad)))]
num_list = [co for co in var_list if vtreat.util.can_convert_v_to_numeric(X[co])]
cat_list = [co for co in var_list if co not in set(num_list)]
id_like = [co for co in cat_list if v_counts[co] >= n]
if len(id_like) > 0:
warnings.warn("variable(s) " + ', '.join(id_like) + " have unique values per-row, dropping")
cat_list = [co for co in var_list if co not in set(id_like)]
if "clean_copy" in params["coders"]:
for vi in num_list:
xform = fit_clean_code(incoming_column_name=vi, x=X[vi], params=params, imputation_map=imputation_map)
if xform is not None:
# noinspection PyTypeChecker
xforms = xforms + [xform]
for vi in cat_list:
for outcome in outcomes:
if "impact_code" in params["coders"]:
extra_args = {
"outcome_target": outcome,
"var_suffix": ("_" + str(outcome)),
}
# noinspection PyTypeChecker
xforms = xforms + [
fit_binomial_impact_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
y=y,
extra_args=extra_args,
params=params,
)
]
if "prevalence_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_prevalence_code(incoming_column_name=vi, x=numpy.asarray(X[vi]))
]
if "indicator_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_indicator_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
min_fraction=params["indicator_min_fraction"],
sparse_indicators=params["sparse_indicators"],
)
]
xforms = [xf for xf in xforms if xf is not None]
if len(xforms) <= 0:
raise ValueError("no variables created")
for stp in params["user_transforms"]:
stp.fit(X=X[var_list], y=y)
return {
"outcome_name": outcome_name,
"cols_to_copy": cols_to_copy,
"xforms": xforms,
}
# noinspection PyPep8Naming
def fit_unsupervised_treatment(*, X, var_list, outcome_name, cols_to_copy, params, imputation_map):
if (var_list is None) or (len(var_list) <= 0):
var_list = [co for co in X.columns]
copy_set = set(cols_to_copy)
var_list = [co for co in var_list if (not (co in copy_set))]
v_counts = {v: vtreat.util.get_unique_value_count(X[v]) for v in var_list}
var_list = {v for v in var_list if v_counts[v] > 1}
if len(var_list) <= 0:
raise ValueError("no variables")
xforms = []
n = X.shape[0]
all_bad = []
for vi in var_list:
n_bad = sum(vtreat.util.is_bad(X[vi]))
if n_bad >= n:
all_bad = all_bad + [vi]
if (n_bad > 0) and (n_bad < n):
if "missing_indicator" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
IndicateMissingTransform(
incoming_column_name=vi, derived_column_name=vi + "_is_bad"
)
]
var_list = [co for co in var_list if (not (co in set(all_bad)))]
num_list = [co for co in var_list if vtreat.util.can_convert_v_to_numeric(X[co])]
cat_list = [co for co in var_list if co not in set(num_list)]
id_like = [co for co in cat_list if v_counts[co] >= n]
if len(id_like) > 0:
warnings.warn("variable(s) " + ', '.join(id_like) + " have unique values per-row, dropping")
cat_list = [co for co in var_list if co not in set(id_like)]
if "clean_copy" in params["coders"]:
for vi in num_list:
xform = fit_clean_code(incoming_column_name=vi, x=X[vi], params=params, imputation_map=imputation_map)
if xform is not None:
# noinspection PyTypeChecker
xforms = xforms + [xform]
for vi in cat_list:
if "prevalence_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_prevalence_code(incoming_column_name=vi, x=numpy.asarray(X[vi]))
]
if "indicator_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_indicator_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
min_fraction=params["indicator_min_fraction"],
sparse_indicators=params["sparse_indicators"],
)
]
xforms = [xf for xf in xforms if xf is not None]
for stp in params["user_transforms"]:
stp.fit(X=X[var_list], y=None)
return {
"outcome_name": outcome_name,
"cols_to_copy": cols_to_copy,
"xforms": xforms,
}
def pre_prep_frame(x, *, col_list, cols_to_copy):
"""Create a copy of pandas.DataFrame x restricted to col_list union cols_to_copy with col_list - cols_to_copy
converted to only string and numeric types. New pandas.DataFrame has trivial indexing. If col_list
is empty it is interpreted as all columns."""
if cols_to_copy is None:
cols_to_copy = []
if (col_list is None) or (len(col_list) <= 0):
col_list = [co for co in x.columns]
x_set = set(x.columns)
col_set = set(col_list)
for ci in cols_to_copy:
if (ci in x_set) and (ci not in col_set):
col_list = col_list + [ci]
col_set = set(col_list)
missing_cols = col_set - x_set
if len(missing_cols) > 0:
raise KeyError("referred to not-present columns " + str(missing_cols))
cset = set(cols_to_copy)
if len(col_list) <= 0:
raise ValueError("no variables")
x = x.loc[:, col_list]
x = x.reset_index(inplace=False, drop=True)
for c in x.columns:
if c in cset:
continue
bad_ind = vtreat.util.is_bad(x[c])
if vtreat.util.can_convert_v_to_numeric(x[c]):
x[c] = vtreat.util.safe_to_numeric_array(x[c])
else:
# https://stackoverflow.com/questions/22231592/pandas-change-data-type-of-series-to-string
x[c] = numpy.asarray(x[c].apply(str), dtype=str)
x.loc[bad_ind, c] = numpy.nan
return x
def perform_transform(*, x, transform, params):
plan = transform.plan_
xform_steps = [xfi for xfi in plan["xforms"]]
user_steps = [stp for stp in params["user_transforms"]]
# restrict down to to results we are going to use
if (transform.result_restriction is not None) and (len(transform.result_restriction) > 0):
xform_steps = [xfi for xfi in xform_steps
if len(set(xfi.derived_column_names_).intersection(transform.result_restriction)) > 0]
user_steps = [stp for stp in user_steps
if len(set(stp.derived_vars_).intersection(transform.result_restriction)) > 0]
# check all required columns are present
needs = set()
for xfi in xform_steps:
if xfi.incoming_column_name_ is not None:
needs.add(xfi.incoming_column_name_)
for stp in user_steps:
if stp.incoming_vars_ is not None:
needs.update(stp.incoming_vars_)
missing = needs - set(x.columns)
if len(missing) > 0:
raise ValueError("missing required input columns " + str(missing))
# do the work
new_frames = [xfi.transform(x) for xfi in (xform_steps + user_steps)]
new_frames = [frm for frm in new_frames if (frm is not None) and (frm.shape[1] > 0)]
# see if we want to copy over any columns
copy_set = set(plan["cols_to_copy"])
to_copy = [ci for ci in x.columns if ci in copy_set]
if len(to_copy) > 0:
cp = x.loc[:, to_copy].copy()
new_frames = [cp] + new_frames
if len(new_frames) <= 0:
raise ValueError("no columns transformed")
res = pandas.concat(new_frames, axis=1, sort=False)
res.reset_index(inplace=True, drop=True)
return res
def limit_to_appropriate_columns(*, res, transform):
plan = transform.plan_
to_copy = set(plan["cols_to_copy"])
to_take = set([
ci for ci in transform.score_frame_["variable"][transform.score_frame_["has_range"]]])
if (transform.result_restriction is not None) and (len(transform.result_restriction) > 0):
to_take = to_take.intersection(transform.result_restriction)
cols_to_keep = [ci for ci in res.columns if (ci in to_copy) or (ci in to_take)]
if len(cols_to_keep) <= 0:
raise ValueError("no columns retained")
res = res[cols_to_keep].copy()
res.reset_index(inplace=True, drop=True)
return res
# val_list is a list single column Pandas data frames
def mean_of_single_column_pandas_list(val_list):
if val_list is None or len(val_list) <= 0:
return numpy.nan
d = pandas.concat(val_list, axis=0, sort=False)
col = d.columns[0]
d = d.loc[numpy.logical_not(vtreat.util.is_bad(d[col])), [col]]
if d.shape[0] < 1:
return numpy.nan
return numpy.mean(d[col])
# assumes each y-aware variable produces one derived column
# also clears out refitter_ values to None
def cross_patch_refit_y_aware_cols(*, x, y, res, plan, cross_plan):
if cross_plan is None or len(cross_plan) <= 1:
for xf in plan["xforms"]:
xf.refitter_ = None
return res
incoming_colset = set(x.columns)
derived_colset = set(res.columns)
for xf in plan["xforms"]:
if not xf.need_cross_treatment_:
continue
incoming_column_name = xf.incoming_column_name_
derived_column_name = xf.derived_column_names_[0]
if derived_column_name not in derived_colset:
continue
if incoming_column_name not in incoming_colset:
raise KeyError("missing required column " + incoming_column_name)
if xf.refitter_ is None:
raise ValueError(
"refitter is None: "
+ incoming_column_name
+ " -> "
+ derived_column_name
)
# noinspection PyPep8Naming
def maybe_transform(*, fit, X):
if fit is None:
return None
return fit.transform(X)
patches = [
maybe_transform(
fit=xf.refitter_(
incoming_column_name=incoming_column_name,
x=x[incoming_column_name][cp["train"]],
y=y[cp["train"]],
extra_args=xf.extra_args_,
params=xf.params_,
),
X=x.loc[cp["app"], [incoming_column_name]],
)
for cp in cross_plan
]
# replace any missing sections with global average (slight data leak potential)
avg = mean_of_single_column_pandas_list(
[pi for pi in patches if pi is not None]
)
if numpy.isnan(avg):
avg | |
<reponame>tdcoa/usage
import subprocess, platform, os, copy #, yaml
import sys
from datetime import datetime
from tkinter import *
from tkinter.ttk import *
from PIL import Image
from PIL import ImageTk
from .tdcoa import tdcoa
import tdcsm
class coa():
version = "0.4.1.6"
debug = False
entryvars = {}
defaults = {}
#appsize = '800x500' # width x height
appwidth = 750
appheight = 550
sampleTx2 = {'systems_left':['one','two','three'],
'systems_right':['four','five','six'],
'filesets_left':['one','two','three'],
'filesets_right':['four','five','six']}
approot = ''
secrets = ''
motd = False
skip_git = False
show_hidden_filesets = False
fontsize = 10
font = 'Open Sans'
def __init__(self, approot='', secrets='', **kwargs):
print('GUI for TDCOA started')
#self.version = str(datetime.now()).replace('-','').replace(':','').split('.')[0].replace(' ','.')
if approot != '': self.defaults['approot'] = approot
if secrets != '': self.defaults['secrets'] = secrets
if platform.system()[:3]=='Win':
self.localos='Win'
self.appwidth = int(self.appwidth * 1.2)
self.appheight = int(self.appheight * 1.2)
else:
self.localos='Mac'
self.appsize = str(int(self.appwidth)) + 'x' + str(int(self.appheight))
self.images = {'banner':{'file':'pic_TDCOA_Banner.gif', 'X':700, 'Y':27, 'scale':(self.appwidth - 20) / 700, 'object':None, 'alttext':''}
,'logo' :{'file':'pic_TDCOAdot.gif', 'X':330, 'Y':55, 'scale':0.5, 'object':None, 'alttext':'Teradata'}
,'logo' :{'file':'pic_TDCOAdot2.gif', 'X':330, 'Y':55, 'scale':0.5, 'object':None, 'alttext':'Teradata'}}
if 'versionprefix' in kwargs:
self.versionprefix = kwargs['versionprefix']
self.version = self.versionprefix + '.' + self.version
else:
self.versionprefix = ''
self.run_gui()
def set_defaults(self, **kwargs):
ev = self.entryvars
for name, default in kwargs.items():
self.defaults[name] = default
if 'approot' not in self.defaults: self.defaults['approot'] = os.getcwd()
self.approot = self.defaults['approot']
if 'config' not in self.defaults: self.defaults['config'] = 'config.yaml'
if 'systems' not in self.defaults: self.defaults['systems'] = 'source_systems.yaml'
if 'filesets' not in self.defaults: self.defaults['filesets'] = '%s%sfilesets.yaml' %('1_download', os.sep)
if 'secrets' not in self.defaults: self.defaults['secrets'] = self.first_file_that_exists(
os.path.join('..','!secrets.yaml')
,os.path.join('..', 'secrets.yaml')
,os.path.join('secrets.yaml'))
self.defaults['secrets'] = self.defaults['secrets'].replace(self.defaults['approot']+os.sep,'')
for name, default in self.defaults.items():
if name not in self.entryvars: self.entryvars[name] = StringVar()
self.entryvars[name].set(default)
# =================== BEGIN: MAKE NEW GUI OBJECT COLLECTIONS ==============================
def define_styles(self, app=None):
Style(app).theme_use('clam') #clam, alt, default, classic, aqua
colors = { 'config' :'#FFE0A4',
'normalrun' :'#C6E7E7',
'assistedrun':'#C6E7E7',
'execute' :'#C6E7E7',
'upload' :'#C6E7E7',
'help' :'#BFBFBF'}
font = self.font
fontsize = self.fontsize
Style(app).configure("TButton" ,foreground="#ffffff", background="#404040", font=(font, str(fontsize)) )
Style(app).configure("TFrame" ,foreground="#ffffff", background="#394951", font=(font, str(fontsize)) )
Style(app).configure("TNotebook" ,foreground="#ffffff", background="#394951", font=(font, str(fontsize)) )
Style(app).configure("TLabel" ,foreground="#ffffff", background="#394951", font=(font, str(fontsize)) )
Style(app).configure("title.TLabel",foreground="#ffffff", background="#394951", font=(font,str(fontsize*2), 'bold') )
for name, hex in colors.items():
Style(app).configure("%s-normal.TFrame" %name, foreground="#394951", background=colors[name], font=(font, str(fontsize)) )
Style(app).configure("%s-normal.TButton" %name, foreground="#394951", background=self.shade(colors[name]), font=(font, str(fontsize)), padding=(1,1,1,1) )
Style(app).map( "%s-normal.TButton" %name, background=[("disabled",self.shade(colors[name],0.4))])
Style(app).configure("%s-normal.TCheckbutton" %name, foreground="#394951", background=colors[name])
Style(app).configure("%s-separator.TFrame" %name, foreground="#394951", background=colors[name], font=(font, str(fontsize)) )
Style(app).configure("%s-normal.TLabel" %name, foreground="#394951", background=colors[name], font=(font, str(fontsize)) )
Style(app).configure("%s-bold.TLabel" %name, foreground="#394951", background=colors[name], font=(font, str(fontsize), 'bold') )
Style(app).configure("%s-header.TLabel" %name, foreground="#394951", background=colors[name], font=(font, str(fontsize*2), 'bold') )
Style(app).configure("%s-normal.Treeview" %name, foreground="#394951", background=self.tint(colors[name],0.8), font=(font, str(fontsize)) )
Style(app).configure("%s-normal.TEntry" %name, foreground="#394951", fieldbackground=self.tint(colors[name],0.7), font=(font, str(fontsize*10)), padding=(1,1,1,1) )
def newframe_LEB(self, parent, labeltext='not set', btntext='not set', btncommand='test', style = 'default', lbl_width=12, btn_width=6):
if btncommand not in self.entryvars: self.entryvars[btncommand] = StringVar()
f = Frame(parent, padding=1, style=str('%s.TFrame' %style))
l = Label(f, text=labeltext, width = lbl_width, anchor=E, style=str('%s.TLabel' %style))
e = Entry(f, textvariable=self.entryvars[btncommand], style=str('%s.TEntry' %style), font=(self.font, self.fontsize))
b = Button(f,text=btntext, command=lambda:self.button_click(btncommand, entrytext=self.entryvars[btncommand].get()), width=btn_width, style=str('%s.TButton' %style))
l.pack(side=LEFT, fill=BOTH, expand=False, padx=0, pady=0, ipady=1)
e.pack(side=LEFT, fill=BOTH, expand=True , padx=0, pady=0, ipady=1)
b.pack(side=LEFT, fill=BOTH, expand=False, padx=0, pady=0, ipady=1)
if btntext == '': b.state(["disabled"])
if self.debug: print('created LEB: %s' %labeltext)
return f
def newframe_CLB(self, parent, labeltext='', btntext = 'not set', btncommand='test', checkcommand=print('check command'), style = 'default', show_chkbox=True):
f = Frame(parent, padding=1, style=str('%s.TFrame' %style))
if btncommand not in self.entryvars: self.entryvars[btncommand] = IntVar(value=0)
if show_chkbox:
c = Checkbutton(f, variable=self.entryvars[btncommand], command=checkcommand, style=str('%s.TCheckbutton' %style))
else:
c = Label(f, text=" ", anchor=W, style=str('%s.TLabel' %style)) # widt of missing checkbox
if labeltext != '': l = Label(f, text=labeltext, anchor=E, style=str('%s.TLabel' %style))
b = Button(f,text=btntext, command=lambda:self.button_click(btncommand, state=self.entryvars[btncommand].get()), style=str('%s.TButton' %style))
c.pack(side=LEFT, expand=False)
if labeltext != '': l.pack(side=LEFT, expand=False)
b.pack(side=RIGHT, fill=BOTH, expand=True)
if self.debug: print('created CB: %s' %btntext)
return f
def newframe_CLBB(self, parent, labeltext='', btntext = 'not set', btncommand='test', btnwidth=20, btntext2='not set', btncommand2='test2', checkcommand=print('check command'), style = 'default', show_chkbox=True):
f = Frame(parent, padding=1, style=str('%s.TFrame' %style))
if btncommand not in self.entryvars: self.entryvars[btncommand] = IntVar(value=0)
if btncommand2 not in self.entryvars: self.entryvars[btncommand2] = IntVar(value=0)
if show_chkbox:
c = Checkbutton(f, variable=self.entryvars[btncommand], command=checkcommand, style=str('%s.TCheckbutton' %style))
else:
c = Label(f, text=" ", anchor=W, style=str('%s.TLabel' %style)) # widt of missing checkbox
if labeltext != '': l = Label(f, text=labeltext, anchor=E, style=str('%s.TLabel' %style))
b = Button(f,text=btntext, command=lambda:self.button_click(btncommand, state=self.entryvars[btncommand].get()), width=btnwidth, style=str('%s.TButton' %style))
b2 = Button(f,text=btntext2, command=lambda:self.button_click(btncommand2, state=self.entryvars[btncommand2].get()), style=str('%s.TButton' %style))
c.pack(side=LEFT, expand=False)
if labeltext != '': l.pack(side=LEFT, expand=False)
b.pack(side=LEFT, fill=BOTH, expand=True)
b2.pack(side=RIGHT, fill=BOTH, expand=False, ipadx=0, padx=0)
if self.debug: print('created CB: %s' %btntext)
return f
def newframe_LC(self, parent, labeltext='', checkcommand='test', style = 'default'):
f = Frame(parent, padding=1, style=str('%s.TFrame' %style))
if checkcommand not in self.entryvars: self.entryvars[checkcommand] = IntVar(value=0)
c = Checkbutton(f, variable=self.entryvars[checkcommand], command=lambda:self.button_click(checkcommand, state=self.entryvars[checkcommand].get()), style=str('%s.TCheckbutton' %style))
if labeltext != '': l = Label(f, text=labeltext, anchor=E, style=str('%s.TLabel' %style))
c.pack(side=RIGHT, expand=False)
if labeltext != '': l.pack(side=LEFT, expand=False)
if self.debug: print('created CB: %s' %btntext)
return f
def newbutton(self, parent, btntext = 'not set', btncommand='test', btnwidth=15, style = 'default', side=RIGHT):
if btncommand not in self.entryvars: self.entryvars[btncommand] = IntVar(value=0)
b = Button(parent,text=btntext, command=lambda:self.button_click(btncommand), width=btnwidth, style=str('%s.TButton' %style))
b.pack(side=side)
return b
def newframe_Tx2(self, parent, treetext='not set', treelabel_left='left tree', treelabel_right='right tree', width=10, treeheight=5, style = 'default'):
f = Frame(parent, padding=6, style=str('%s.TFrame' %style))
Label(f, padding=0, text=treetext, anchor=S, style=str('%s.TLabel' %style)).pack(side=TOP, expand=False)
leftname = 'tv_%s_left' %treetext.replace(' ','_').replace('(','').replace(')','').lower().strip()
rightname = 'tv_%s_right' %treetext.replace(' ','_').replace('(','').replace(')','').lower().strip()
tL = Treeview(f, height=treeheight, style=str('%s.Treeview' %style))
tR = Treeview(f, height=treeheight, style=str('%s.Treeview' %style))
tL.column("#0", width=width) #, minwidth=int(width*0.9))
tR.column("#0", width=width) #, minwidth=int(width*0.9))
tL.heading("#0",text=treelabel_left, anchor=W)
tR.heading("#0",text=treelabel_right, anchor=W)
tL.bind("<<TreeviewSelect>>", lambda event: self.button_click(name=leftname , selected=tL.item(tL.focus())['text'] ))
tR.bind("<<TreeviewSelect>>", lambda event: self.button_click(name=rightname, selected=tR.item(tR.focus())['text'] ))
self.entryvars[leftname] = tL
self.entryvars[rightname] = tR
tL.pack(side=LEFT, fill=BOTH, expand=True)
tR.pack(side=RIGHT, fill=BOTH, expand=True)
if self.debug: print('created Tx2: %s' %treetext)
return f
def separator(self, parent, style='default', orient='h', width=3):
o = HORIZONTAL
if orient[:1].strip().lower() == 'v': o = VERTICAL
#s = Separator(parent, orient=o, style='%s.TSeparator' %style)
s = Frame(parent, borderwidth=width, style='%s-sep.TFrame' %style)
s.pack(fill=X, expand=True)
def newImage(self, parent, image_name='', format=True):
i = self.images[image_name]
x = int(i['X']*i['scale'])
y = int(i['Y']*i['scale'])
c = Canvas(parent, width=x+20, height=y+20, bg='#394951', bd=0, highlightthickness=0)
pix = os.path.join(os.path.dirname(tdcsm.__file__), i['file'])
try:
if format:
img = Image.open(pix).resize((x,y), Image.ANTIALIAS)
i['object'] = ImageTk.PhotoImage(img)
else:
img = PhotoImage(file=pix)
i['object'] = img
c.create_image(10,10, anchor=NW, image=i['object'])
print('created Image: %s' %image_name)
except:
print('Image Load Failed:', pix)
Label(c, text=i['alttext'], anchor=CENTER).pack(side=TOP, fill=BOTH, expand=True)
return c
# =================== END: MAKE NEW GUI OBJECT COLLECTIONS ==============================
# =================== START: HELPER FUNCTIONS =============================
# TODO: Move all of these to the tdcsm.utils, and import back here.
def first_file_that_exists(self, *args):
for file in args:
if os.path.isfile(os.path.join(self.approot, file)): return file
return ''
def split_dict(self, dict2split={}, delim_key='active', use_subdict='', default='<missing>', addifmissing=[]):
"""splits a supplied dictionary into multiple dictionaries based on some child value,
then adds the split dictionaries under new parents for return, per delim_key found.
For example:
pets = {}
pets['spot'] = {'active':'True', 'type':'dog'}
pets['jane'] = {'active':'True', 'type':'cat'}
pets['lucky'] = {'active':'False', 'type':'cat', 'legs':'3'
,'fleas':{
'bobby': {'active':'False'},
'susie': {'active':'False'},
'bitey': {'active':'True'} }}
print('By active key')
print( split_dict(pets, delim_key='active'))
{'True': { 'spot': {'active': 'True', 'type': 'dog'},
'jane': {'active': 'True', 'type': 'cat'}},
'False': { 'lucky': {'active': 'False', 'type': 'cat', 'legs': '3'
,'fleas':{'bobby': {'active':'False'},
'susie': {'active':'False'},
'bitey': {'active':'True'} }}}}
print('By type key')
print( split_dict(pets, delim_key='type'))
{'dog': { 'spot': {'active': 'True', 'type': 'dog'}},
'cat': { 'jane': {'active': 'True', 'type': 'cat'},
'lucky': {'active': 'False', 'type': 'cat', 'legs': '3'
,'fleas':{'bobby': {'active':'False'},
'susie': {'active':'False'},
'bitey': {'active':'True'} }}}}
# can also add default values, if delim_key is not found:
print('By leg count, with default')
print( split_dict(pets, delim_key='legs', default='4' ))
{'4': { 'spot': {'active': 'True', 'type': 'dog'}
'jane': {'active': 'True', 'type': 'cat'}},
'3': { 'lucky': {'active': 'False', 'type': 'cat', 'legs': '3'
,'fleas':{'bobby': {'active':'False'},
'susie': {'active':'False'},
'bitey': {'active':'True'} }}}}
# can also use child dictionaries, instead of supplied dictionary:
print('For fleas sub-dictionary, if found')
print( split_dict(pets, delim_key='active', use_subdict='fleas', default='False'))
{'False': { 'bobby': {'active': 'False'},
'susie': {'active': 'False'}},
'True': { 'bitey': {'active': 'True'}}}
# can also gaurantee keys in the return set, even if there is no data:
print('ensure you always have 4 pet types, even if empty')
print( split_dict(pets, delim_key='type', addifmissing=['cat','dog','bird','gerbil']))
{'dog': { 'spot': {'active': 'True', 'type': 'dog'}},
'cat': { 'jane': {'active': 'True', 'type': 'cat'},
'lucky': {'active': 'False', 'type': 'cat', 'legs': '3'
,'fleas':{'bobby': {'active':'False'},
'susie': {'active':'False'},
'bitey': {'active':'True'} }}}
'bird': {}
'gerbil': {} }
"""
rtn = {}
# build working dict
workdict = {}
if (use_subdict == ''):
| |
<reponame>djfkahn/MemberHubDirectoryTools
import unittest
from unittest.mock import patch
import os
import family
import hub_map_tools
import roster
import person
data_file_path = os.path.abspath("./family_tests/")
hub_file_name = data_file_path + "/hub_map.csv"
common_hub_map = hub_map_tools.ReadHubMapFromFile(hub_file_name)
with patch('builtins.input', side_effect=['y']):
common_RosterC = roster.Roster()
class UT_01_AddAdultsFromCombinedField(unittest.TestCase):
def test_01_two_parents(self):
result = family.RosterFamily(adults_raw_name='A and B C')
result.AddAdultsFromCombinedField('<NAME>', 'A and B C', common_hub_map, common_RosterC)
self.assertEqual(2, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['0000'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual('B', result.adults[1].first_name)
self.assertEqual('C', result.adults[1].last_name)
self.assertEqual(['0000'],result.adults[1].hubs)
self.assertEqual('Adult2',result.adults[1].family_relation)
self.assertEqual(0, len(result.children))
def test_02_one_parent(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddAdultsFromCombinedField('<NAME>', 'A C', common_hub_map, common_RosterC)
self.assertEqual(1, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['1111'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual(0, len(result.children))
class UT_02_Roster_AddToFamily(unittest.TestCase):
def test_01_two_parents(self):
result = family.RosterFamily(adults_raw_name='A and B C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A and B C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(2, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['0000'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual('B', result.adults[1].first_name)
self.assertEqual('C', result.adults[1].last_name)
self.assertEqual(['0000'],result.adults[1].hubs)
self.assertEqual('Adult2',result.adults[1].family_relation)
self.assertEqual(1, len(result.children))
self.assertEqual('D', result.children[0].first_name)
self.assertEqual('C', result.children[0].last_name)
self.assertEqual(['0000'],result.children[0].hubs)
self.assertEqual('Child1',result.children[0].family_relation)
def test_02_one_parent(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(1, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['1111'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual(1, len(result.children))
self.assertEqual('D', result.children[0].first_name)
self.assertEqual('C', result.children[0].last_name)
self.assertEqual(['1111'],result.children[0].hubs)
self.assertEqual('Child1',result.children[0].family_relation)
def test_03_6th_grader(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '6',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(1, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['6666'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual(1, len(result.children))
self.assertEqual('D', result.children[0].first_name)
self.assertEqual('C', result.children[0].last_name)
self.assertEqual(['6666'],result.children[0].hubs)
self.assertEqual('Child1',result.children[0].family_relation)
def test_04_8th_grader(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '8',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(1, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['8888'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual(1, len(result.children))
self.assertEqual('D', result.children[0].first_name)
self.assertEqual('C', result.children[0].last_name)
self.assertEqual(['8888'],result.children[0].hubs)
self.assertEqual('Child1',result.children[0].family_relation)
def test_05_9th_grader(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '9',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(0, len(result.adults))
self.assertEqual(0, len(result.children))
def test_06_Unknown_Teacher(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '5',
adult_names = 'A C',
teacher_name = 'Unknown Teacher',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(0, len(result.adults))
self.assertEqual(0, len(result.children))
class UT_03_Directory_AddToFamily(unittest.TestCase):
def test_01_adult_input(self):
result = family.DirectoryFamily('5678')
result.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertEqual(1, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual('1234', result.adults[0].person_id)
self.assertEqual('5678', result.adults[0].family_id)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual('email',result.adults[0].email)
self.assertEqual(['0000'],result.adults[0].hubs)
self.assertEqual(0, len(result.children))
def test_02_child_input(self):
result = family.DirectoryFamily('5678')
result.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertEqual(0, len(result.adults))
self.assertEqual(1, len(result.children))
def test_03_adult_lower_input(self):
result = family.DirectoryFamily('5678')
result.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertEqual(1, len(result.adults))
self.assertEqual(0, len(result.children))
def test_04_child_lower_input(self):
result = family.DirectoryFamily('5678')
result.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertEqual(0, len(result.adults))
self.assertEqual(1, len(result.children))
def test_05_other_input(self):
result = family.DirectoryFamily('5678')
result.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Other',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertEqual(0, len(result.adults))
self.assertEqual(0, len(result.children))
class UT_04_IsSameFamily(unittest.TestCase):
def test_01_same_family(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.IsSameFamily(that))
def test_02_same_adult_different_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = '<NAME>',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.IsSameFamily(that))
def test_03_directory_orphan(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.IsSameFamily(that))
def test_04_roster_orphan(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name=' ')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = ' ',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.IsSameFamily(that))
def test_05_different_adult_same_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='E C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'E C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.IsSameFamily(that))
def test_06_more_adults_in_directory(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1236',
last_name = 'C',
first_name = 'B',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = '<NAME>',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.IsSameFamily(that))
def test_07_more_adults_in_roster(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
| |
<gh_stars>0
# encoding=utf8
"""
Module containing front-end function with several
algorithm implementations of QR decomposition defined as methods in
the QR class.
"""
from functools import reduce
import numpy as np
from scipy.linalg import block_diag
from mathpy.linalgebra.norm import norm
from mathpy._lib import _create_array
def qr(x, method=None):
r"""
Interface to QR class for performing QR decomposition.
Several methods are available for computing the QR decomposition,
including Householder reflections and multiple implementations of
Gram-Schmidt. Please see the QR class for more on each QR method.
Parameters
----------
x : array_like
Accepts a list, nested list, dictionary, pandas DataFrame or
pandas Series. The private function _create_array is called
to create a copy of x as a numpy array.
method : {'householder', 'mgs', 'gramschmidt'}, optional
Default method for performing QR decomposition is Householder reflections.
Please refer to the QR class for implementation of these methods.
Returns
-------
qr : tuple
Returns a tuple containing the orthogonal matrix Q and the upper-triangular
matrix R resulting from QR decomposition.
Notes
-----
QR decomposition plays an important role in many statistical techniques such as least
squares estimation. Also called QR factorization, the method is a procedure to decompose
a matrix :math:`A` into a product :math:`A = QR` where :math:`Q` is an orthogonal
:math:`m \times n` matrix and :math:`R` is an upper triangular :math:`n \times n` matrix.
There are several methods to computing the QR decomposition of a matrix, of which the most
common is utilizing Householder reflections due to its relative speed and numerical stability.
Other methods include the Modified Gram-Schmidt Orthogonalization process and Givens rotation.
Examples
--------
>>> import pandas as pd
>>> a = pd.DataFrame({0: [2,2,1], 1: [-2,1,2], 2: [18,0,0]})
>>> q, r = qr(a)
>>> q
array([[-0.66666667, 0.66666667, -0.33333333],
[-0.66666667, -0.33333333, 0.66666667],
[-0.33333333, -0.66666667, -0.66666667]])
>>> r
array([[ -3.00000000e+00, 2.22044605e-16, -1.20000000e+01],
[ -1.16573418e-16, -3.00000000e+00, 1.20000000e+01],
[ 1.55431223e-16, 5.32907052e-17, -6.00000000e+00]])
>>> b = [[1,0,1], [0,1,0]]
>>> q_b, r_b = qr(b)
>>> q_b
array([[-0.70710678, 0. ],
[ 0. , -1. ],
[-0.70710678, 0. ]])
>>> r_b
array([[-1.41421356, 0. ],
[ 0. , -1. ]])
"""
x = _QR(x)
if method is None:
f = getattr(x, x.method, None)
else:
try:
f = getattr(x, method, x.method)
except ValueError:
print('no attribute with name ' + str(method))
raise
return f()
class _QR(object):
r"""
Class containing several implementations for performing QR decomposition.
These methods include Householder reflections and Modified and Classical
Gram-Schmidt.
Parameters
----------
x : array_like
Accepts a list, nested list, dictionary, pandas DataFrame or
pandas Series. The private function _create_array is called
to create a copy of x as a numpy array.
Methods
-------
householder()
Performs Householder reflection approach to QR decomposition.
mgs()
Implements the Modified Gram-Schmidt procedure for performing
QR decomposition
gramschmidt()
Implementation of the Classical Gram-Schmidt procedure
Notes
-----
See specific methods in class for more details on implementations.
"""
def __init__(self, x):
self.x = _create_array(x)[0]
self.m = self.x.shape[0]
self.n = self.x.shape[1]
self.r = np.zeros((self.n, self.n))
self.q = np.zeros((self.m, self.n))
self.method = 'householder'
def householder(self):
r"""
Implementation of Householder reflections method to performing QR
decomposition.
Returns
-------
qr : tuple
Returns a tuple containing the orthogonal matrix Q and the upper-triangular
matrix R resulting from QR decomposition.
Notes
-----
The Householder reflection approach to QR decomposition is the more common approach
due to its numerical stability compared to Gram-Schmidt and its relative speed to
Givens rotations. The orthogonal matrix :math:`Q` is defined as successive Householder
matrices :math:`H_1 \cdots H_n` while :math:`R` is upper triangular, defined as
:math:`R = Q^T A`.
Householder matrices :math:`H` are defined as:
.. math::
H = I - 2vv^T
References
----------
<NAME>., & <NAME>. (2013). Matrix computations (3rd ed.). Baltimore (MD): Johns Hopkins U.P.
Householder transformation. (2017, March 19). In Wikipedia, The Free Encyclopedia.
From https://en.wikipedia.org/w/index.php?title=Householder_transformation&oldid=771169379
<NAME>., & <NAME>. (1997). Numerical linear algebra (1st ed.). Philadelphia: SIAM.
"""
h = []
r = self.x.copy()
if self.m > self.n:
c = self.n
else:
c = self.m
for j in np.arange(c):
hj = _householder_mat(r[j:self.m, j])
if j > 0:
hj = block_diag(np.eye(j), hj)
r = np.dot(hj, r)
h.append(hj)
self.q = reduce(np.dot, reversed(h))[0:self.n].T
r = np.array(r)[0:self.n]
qr = (self.q, r)
return qr
def mgs(self):
r"""
Implementation of the Modified Gram-Schmidt procedure for computing the QR decomposition.
The modified procedure is more numerically stable than the classic Gram-Schmidt process, but
is still less stable than the Householder reflection approach.
Returns
-------
qr : tuple
Returns a tuple containing the orthogonal matrix Q and the upper-triangular
matrix R resulting from QR decomposition.
Notes
-----
The Modified Gram-Schmidt algorithm for deomposing a matrix into a product of an
orthogonal matrix :math:`Q` and an upper-triangular matrix :math:`R` is essentially
a rearrangement of the Classical Gram-Schmidt algorithm that has much more stable
numerical properties (the resulting matrix :math:`Q` is actually orthogonal).
References
----------
Gram–Schmidt process. (2017, April 4). In Wikipedia, The Free Encyclopedia.
From https://en.wikipedia.org/w/index.php?title=Gram%E2%80%93Schmidt_process&oldid=773752446
<NAME>., & <NAME>. (2013). Matrix computations (1st ed.). Baltimore (MD): Johns Hopkins U.P.
<NAME>., & <NAME>. (1997). Numerical linear algebra (1st ed.). Philadelphia: SIAM.
"""
for j in np.arange(self.n):
v = self.x[:, j]
for i in np.arange(j):
self.r[i, j] = np.dot(np.transpose(self.q[:, i]), v)
v = v - self.r[i, j] * self.q[:, i]
self.r[j, j] = norm(v)
self.q[:, j] = v / self.r[j, j]
qr = (self.q, self.r)
return qr
def gramschmidt(self):
r"""
Implementation of the classic Gram-Schmidt algortihm of QR decomposition. Returns
the 'thin' QR matrix.
Returns
-------
qr : tuple
Returns a tuple containing the orthogonal matrix Q and the upper-triangular
matrix R resulting from QR decomposition.
Notes
-----
The Classical Gram-Schmidt algorithm is another method for computing the :math:`QR`
decomposition. The classical method has very poor numerical properties which often
result in a non-orthogonal :math:`q` matrix. Thus, it is not recommended to employ
the classical method in practice, but is presented here for completeness. For a
full rank matrix :math:`A`, the :math:`QR` decomposition can be directly computed
by solving for :math:`q_k`:
.. math::
q_k = (a_k - \sum^{k-1}_{i=1} r_{ik}q_i) / r_{kk}
Where :math:`z_k` is considered a unit 2-norm vector in the direction of :math:`z_k`.
.. math::
z_k = a_k - \sum^{k-1}_{i=1} r_{ik} q_i
References
----------
Gram–Schmidt process. (2017, April 4). In Wikipedia, The Free Encyclopedia.
From https://en.wikipedia.org/w/index.php?title=Gram%E2%80%93Schmidt_process&oldid=773752446
<NAME>., & <NAME>. (2013). Matrix computations (1st ed.). Baltimore (MD): Johns Hopkins U.P.
"""
a = self.x.copy()
self.r[0,0] = norm(a[:, 0])
self.q[:, 0] = a[:, 0] / float(self.r[0,0])
for k in np.arange(1, self.n):
self.r[:k-1, k] = np.dot(np.transpose(self.q[:self.m, :k-1]), a[:self.m, k])
z = a[:self.m, k] - np.dot(self.q[:self.m, :k-1], self.r[:k -1, k])
self.r[k,k] = norm(z)
self.q[:self.m, k] = z / float(self.r[k,k])
qr = (self.q, self.r)
return qr
# TODO: Complete Reorthogonalization methods for QR decomposition
# def reorthomgs(self):
# for j in np.arange(self.n):
# tt = 0
# t = norm(self.q[j])
# reorth = 1
#
# while reorth:
# if j > 1:
# for i in np.arange(j):
# v = np.dot(np.transpose(self.q[i]), self.q[j])
# if tt == 0:
# self.r[i, j] = v
# self.q[j] = self.q[j] - v * self.q[i]
#
# tt = norm(self.q[j])
# reorth = 0
# if tt < t / 10:
# t = tt
# reorth = 1
#
# self.r[j, j] = tt
# self.q[j] = self.q[j] / self.r[j, j]
#
# qr = (self.q, self.r)
#
# return qr
#
# def reorthomgs2(self):
# z = []
# for j in np.arange(self.n):
# t = norm(self.q[j])
# nach = 1
# u = 0
# while nach:
# u += 1
# for i in np.arange(j):
# s = np.dot(np.transpose(self.q[i]), self.q[j])
# self.r[i, j] = self.r[i, j] + s
# self.q[j] = self.q[j] - s * self.q[i]
#
# tt = norm(self.q[j])
# if tt > 10 * np.finfo(float).eps * t & tt < t / 10:
# nach = 1
# t = tt
# else:
# nach = 0
# if tt < 10 * np.finfo(float).eps * t:
# tt = 0
#
# z.append(u)
#
# self.r[j, j] = tt
#
# if tt * np.finfo(float).eps == 0:
# tt = 1 / tt
# else:
# tt = 0
#
# | |
if fc != 3:
gdaltest.post_reason('fail')
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('field_not_nullable', 'not_null')
f.SetGeomFieldDirectly('geomfield_not_nullable', ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
# Not Nullable geometry field
lyr = ds.CreateLayer('test4', geom_type = ogr.wkbPoint, options = ['GEOMETRY_NULLABLE=NO'] )
if lyr.GetLayerDefn().GetGeomFieldDefn(0).IsNullable() != 0:
gdaltest.post_reason('fail')
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
ds = None
ds = ogr.Open('/vsimem/ogr_gpkg_23.gpkg')
lyr = ds.GetLayerByName('test')
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_not_nullable')).IsNullable() != 0:
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_nullable')).IsNullable() != 1:
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetLayerDefn().GetGeomFieldDefn(lyr.GetLayerDefn().GetGeomFieldIndex('geomfield_not_nullable')).IsNullable() != 0:
gdaltest.post_reason('fail')
return 'fail'
lyr = ds.GetLayerByName('test2')
if lyr.GetLayerDefn().GetGeomFieldDefn(0).IsNullable() != 1:
gdaltest.post_reason('fail')
return 'fail'
lyr = ds.GetLayerByName('test3')
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_not_nullable')).IsNullable() != 0:
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_nullable')).IsNullable() != 1:
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetLayerDefn().GetGeomFieldDefn(lyr.GetLayerDefn().GetGeomFieldIndex('geomfield_not_nullable')).IsNullable() != 0:
gdaltest.post_reason('fail')
return 'fail'
lyr = ds.GetLayerByName('test4')
if lyr.GetLayerDefn().GetGeomFieldDefn(0).IsNullable() != 0:
gdaltest.post_reason('fail')
return 'fail'
ds = None
gdal.Unlink('/vsimem/ogr_gpkg_23.gpkg')
return 'success'
###############################################################################
# Test default values
def ogr_gpkg_24():
if gdaltest.gpkg_dr is None:
return 'skip'
ds = gdaltest.gpkg_dr.CreateDataSource('/vsimem/ogr_gpkg_24.gpkg')
lyr = ds.CreateLayer('test', geom_type = ogr.wkbNone)
field_defn = ogr.FieldDefn( 'field_string', ogr.OFTString )
field_defn.SetDefault("'a''b'")
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn( 'field_int', ogr.OFTInteger )
field_defn.SetDefault('123')
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn( 'field_real', ogr.OFTReal )
field_defn.SetDefault('1.23')
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn( 'field_nodefault', ogr.OFTInteger )
lyr.CreateField(field_defn)
# This will be translated as "(strftime('%Y-%m-%dT%H:%M:%fZ','now'))"
field_defn = ogr.FieldDefn( 'field_datetime', ogr.OFTDateTime )
field_defn.SetDefault("CURRENT_TIMESTAMP")
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn( 'field_datetime2', ogr.OFTDateTime )
field_defn.SetDefault("'2015/06/30 12:34:56'")
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn( 'field_datetime3', ogr.OFTDateTime )
field_defn.SetDefault("(strftime('%Y-%m-%dT%H:%M:%fZ','now'))")
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn( 'field_datetime4', ogr.OFTDateTime )
field_defn.SetDefault("'2015/06/30 12:34:56.123'")
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn( 'field_date', ogr.OFTDate )
field_defn.SetDefault("CURRENT_DATE")
lyr.CreateField(field_defn)
#field_defn = ogr.FieldDefn( 'field_time', ogr.OFTTime )
#field_defn.SetDefault("CURRENT_TIME")
#lyr.CreateField(field_defn)
f = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(f)
f = None
# Doesn't work currently. Would require rewriting the whole table
#field_defn = ogr.FieldDefn( 'field_datetime4', ogr.OFTDateTime )
#field_defn.SetDefault("CURRENT_TIMESTAMP")
#lyr.CreateField(field_defn)
ds = None
ds = ogr.Open('/vsimem/ogr_gpkg_24.gpkg', update = 1)
lyr = ds.GetLayerByName('test')
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_string')).GetDefault() != "'a''b'":
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_int')).GetDefault() != '123':
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_real')).GetDefault() != '1.23':
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_nodefault')).GetDefault() is not None:
gdaltest.post_reason('fail')
return 'fail'
# Translated from "(strftime('%Y-%m-%dT%H:%M:%fZ','now'))" to CURRENT_TIMESTAMP
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_datetime')).GetDefault() != 'CURRENT_TIMESTAMP':
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_datetime2')).GetDefault() != "'2015/06/30 12:34:56'":
gdaltest.post_reason('fail')
print(lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_datetime2')).GetDefault())
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_datetime3')).GetDefault() != "CURRENT_TIMESTAMP":
gdaltest.post_reason('fail')
print(lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_datetime3')).GetDefault())
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_datetime4')).GetDefault() != "'2015/06/30 12:34:56.123'":
gdaltest.post_reason('fail')
print(lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_datetime4')).GetDefault())
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_date')).GetDefault() != "CURRENT_DATE":
gdaltest.post_reason('fail')
return 'fail'
#if lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_time')).GetDefault() != "CURRENT_TIME":
# gdaltest.post_reason('fail')
# return 'fail'
f = lyr.GetNextFeature()
if f.GetField('field_string') != 'a\'b' or f.GetField('field_int') != 123 or \
f.GetField('field_real') != 1.23 or \
f.IsFieldSet('field_nodefault') or not f.IsFieldSet('field_datetime') or \
f.GetField('field_datetime2') != '2015/06/30 12:34:56+00' or \
f.GetField('field_datetime4') != '2015/06/30 12:34:56.123+00' or \
not f.IsFieldSet('field_datetime3') or \
not f.IsFieldSet('field_date'):
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
ds = None
gdal.Unlink('/vsimem/ogr_gpkg_24.gpkg')
return 'success'
###############################################################################
# Test creating a field with the fid name
def ogr_gpkg_25():
if gdaltest.gpkg_dr is None:
return 'skip'
ds = gdaltest.gpkg_dr.CreateDataSource('/vsimem/ogr_gpkg_25.gpkg')
lyr = ds.CreateLayer('test', geom_type = ogr.wkbNone, options = ['FID=myfid'])
lyr.CreateField(ogr.FieldDefn('str', ogr.OFTString))
gdal.PushErrorHandler()
ret = lyr.CreateField(ogr.FieldDefn('myfid', ogr.OFTString))
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
ret = lyr.CreateField(ogr.FieldDefn('myfid', ogr.OFTInteger))
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
lyr.CreateField(ogr.FieldDefn('str2', ogr.OFTString))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('str', 'first string')
feat.SetField('myfid', 10)
feat.SetField('str2', 'second string')
ret = lyr.CreateFeature(feat)
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
if feat.GetFID() != 10:
gdaltest.post_reason('fail')
return 'fail'
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('str2', 'second string')
ret = lyr.CreateFeature(feat)
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
if feat.GetFID() < 0:
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
if feat.GetField('myfid') != feat.GetFID():
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
feat.SetField('str', 'foo')
ret = lyr.SetFeature(feat)
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetFID(1)
feat.SetField('myfid', 10)
gdal.PushErrorHandler()
ret = lyr.CreateFeature(feat)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler()
ret = lyr.SetFeature(feat)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
feat.UnsetField('myfid')
gdal.PushErrorHandler()
ret = lyr.SetFeature(feat)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
lyr.ResetReading()
f = lyr.GetNextFeature()
if f.GetFID() != 10 or f.GetField('str') != 'first string' or f.GetField('str2') != 'second string' or f.GetField('myfid') != 10:
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f = lyr.GetFeature(f.GetFID())
if f.GetFID() != 10 or f.GetField('str') != 'first string' or f.GetField('str2') != 'second string' or f.GetField('myfid') != 10:
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f = None
ds = None
gdaltest.gpkg_dr.DeleteDataSource('/vsimem/ogr_gpkg_25.sqlite')
return 'success'
###############################################################################
# Test dataset transactions
def ogr_gpkg_26():
if gdaltest.gpkg_dr is None:
return 'skip'
ds = gdaltest.gpkg_dr.CreateDataSource('/vsimem/ogr_gpkg_26.gpkg')
if ds.TestCapability(ogr.ODsCTransactions) != 1:
gdaltest.post_reason('fail')
return 'fail'
ret = ds.StartTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler()
ret = ds.StartTransaction()
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
ret = ds.RollbackTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler()
ret = ds.RollbackTransaction()
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
ds = None
ds = ogr.Open('/vsimem/ogr_gpkg_26.gpkg', update = 1)
if ds.GetLayerCount() != 0:
gdaltest.post_reason('fail')
return 'fail'
ret = ds.StartTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler()
ret = ds.StartTransaction()
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
ret = ds.CommitTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler()
ret = ds.CommitTransaction()
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
ds = None
ds = ogr.Open('/vsimem/ogr_gpkg_26.gpkg', update = 1)
if ds.GetLayerCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
lyr = ds.GetLayerByName('test')
ds.StartTransaction()
lyr.CreateFeature(ogr.Feature(lyr.GetLayerDefn()))
lyr.ResetReading()
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
ds.RollbackTransaction()
if lyr.GetFeatureCount() != 0:
gdaltest.post_reason('fail')
return 'fail'
ds.StartTransaction()
lyr.CreateFeature(ogr.Feature(lyr.GetLayerDefn()))
lyr.CreateFeature(ogr.Feature(lyr.GetLayerDefn()))
lyr.ResetReading()
f = lyr.GetNextFeature()
if f is None or f.GetFID() != 1:
gdaltest.post_reason('fail')
return 'fail'
ds.CommitTransaction()
# the cursor is still valid after CommitTransaction(), which isn't the case for other backends such as PG !
f = lyr.GetNextFeature()
if f is None or f.GetFID() != 2:
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetFeatureCount() != 2:
gdaltest.post_reason('fail')
return 'fail'
ds.StartTransaction()
lyr = ds.CreateLayer('test2', geom_type = ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT(0 0)'))
ret = lyr.CreateFeature(f)
ds.CommitTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
ds.StartTransaction()
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT(0 0)'))
ret = lyr.CreateFeature(f)
ds.CommitTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
if False:
ds.StartTransaction()
lyr = ds.CreateLayer('test3', geom_type = ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT(0 0)'))
ret = lyr.CreateFeature(f)
#ds.CommitTransaction()
ds.ReleaseResultSet(ds.ExecuteSQL('SELECT 1'))
#ds = None
#ds = ogr.Open('/vsimem/ogr_gpkg_26.gpkg', update = 1)
#lyr = ds.GetLayerByName('test3')
#ds.StartTransaction()
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT(0 0)'))
ret = lyr.CreateFeature(f)
ds.CommitTransaction()
# For some reason fails with SQLite 3.6.X with 'failed to execute insert : callback requested query abort'
# but not with later versions...
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
ds = None
gdaltest.gpkg_dr.DeleteDataSource('/vsimem/ogr_gpkg_26.gpkg')
return 'success'
###############################################################################
# Test interface with Spatialite
def ogr_gpkg_27():
if gdaltest.gpkg_dr is None:
return 'skip'
ds = gdaltest.gpkg_dr.CreateDataSource('/vsimem/ogr_gpkg_27.gpkg')
gdal.PushErrorHandler()
sql_lyr = ds.ExecuteSQL("SELECT GeomFromGPB(null)")
gdal.PopErrorHandler()
if sql_lyr is None:
ds = None
gdaltest.gpkg_dr.DeleteDataSource('/vsimem/ogr_gpkg_27.gpkg')
return 'skip'
ds.ReleaseResultSet(sql_lyr)
lyr = ds.CreateLayer('test')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT (2 49)'))
lyr.CreateFeature(f)
sql_lyr = ds.ExecuteSQL('SELECT GeomFromGPB(geom) FROM test')
f = sql_lyr.GetNextFeature()
if f.GetGeometryRef().ExportToWkt() != 'POINT (2 49)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
ds.ReleaseResultSet(sql_lyr)
ds = None
gdaltest.gpkg_dr.DeleteDataSource('/vsimem/ogr_gpkg_27.gpkg')
return 'success'
###############################################################################
# Test ogr2ogr -a_srs (as the geopackage driver doesn't clone the passed SRS
# but inc/dec its ref count, which can exhibit issues in GDALVectorTanslate())
def ogr_gpkg_28():
if gdaltest.gpkg_dr is None:
return 'skip'
srcDS = gdal.OpenEx('../ogr/data/poly.shp')
ds = gdal.VectorTranslate('/vsimem/ogr_gpkg_28.gpkg', srcDS, format = 'GPKG', dstSRS='EPSG:4326')
if str(ds.GetLayer(0).GetSpatialRef()).find('1984') == -1:
return 'fail'
ds = None
gdaltest.gpkg_dr.DeleteDataSource('/vsimem/ogr_gpkg_28.gpkg')
return 'success'
###############################################################################
# Test XYM / XYZM support
def ogr_gpkg_29():
if gdaltest.gpkg_dr is None:
return 'skip'
ds = gdaltest.gpkg_dr.CreateDataSource('/vsimem/ogr_gpkg_29.gpkg')
if ds.TestCapability(ogr.ODsCMeasuredGeometries) != 1:
gdaltest.post_reason('fail')
return 'fail'
lyr = ds.CreateLayer('pointm', geom_type = ogr.wkbPointM)
if lyr.TestCapability(ogr.OLCMeasuredGeometries) != 1:
gdaltest.post_reason('fail')
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT M (1 2 3)'))
lyr.CreateFeature(f)
lyr = ds.CreateLayer('pointzm', geom_type = ogr.wkbPointZM)
if lyr.TestCapability(ogr.OLCMeasuredGeometries) != 1:
gdaltest.post_reason('fail')
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT ZM (1 2 3 4)'))
lyr.CreateFeature(f)
ds = None
ds = ogr.Open('/vsimem/ogr_gpkg_29.gpkg', update = 1)
lyr = ds.GetLayerByName('pointm')
if lyr.GetGeomType() != ogr.wkbPointM:
gdaltest.post_reason('fail')
return 'fail'
f = lyr.GetNextFeature()
if f.GetGeometryRef().ExportToIsoWkt() != 'POINT M (1 2 3)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Generate a XYM envelope
ds.ExecuteSQL("UPDATE pointm SET geom = x'4750000700000000000000000000F03F000000000000F03F000000000000004000000000000000400000000000000840000000000000084001D1070000000000000000F03F00000000000000400000000000000840'")
lyr = ds.GetLayerByName('pointzm')
if lyr.GetGeomType() != ogr.wkbPointZM:
gdaltest.post_reason('fail')
return 'fail'
f = lyr.GetNextFeature()
if f.GetGeometryRef().ExportToIsoWkt() != 'POINT ZM (1 2 3 4)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Generate a XYZM envelope
ds.ExecuteSQL("UPDATE pointzm SET geom = x'4750000900000000000000000000F03F000000000000F03F00000000000000400000000000000040000000000000084000000000000008400000000000001040000000000000104001B90B0000000000000000F03F000000000000004000000000000008400000000000001040'")
ds = None
# Check again
ds = | |
import json
import logging
import subprocess
import mdtraj.version
import netCDF4 as nc
import numpy as np
import parmed
import simtk.openmm.version
import yaml
from mdtraj.formats.hdf5 import HDF5TrajectoryFile
from mdtraj.utils import ensure_type, in_units_of
from parmed.amber.netcdffiles import NetCDFTraj
from blues import reporters
######################
# REPORTER FORMATS #
######################
class LoggerFormatter(logging.Formatter):
"""
Formats the output of the `logger.Logger` object. Allows customization
for customized logging levels. This will add a custom level 'REPORT'
to all custom BLUES reporters from the `blues.reporters` module.
Examples
--------
Below we add a custom level 'REPORT' and have the logger module stream the
message to `sys.stdout` without any additional information to our custom
reporters from the `blues.reporters` module
>>> from blues import reporters
>>> from blues.formats import LoggerFormatter
>>> import logging, sys
>>> logger = logging.getLogger(__name__)
>>> reporters.addLoggingLevel('REPORT', logging.WARNING - 5)
>>> fmt = LoggerFormatter(fmt="%(message)s")
>>> stdout_handler = logging.StreamHandler(stream=sys.stdout)
>>> stdout_handler.setFormatter(fmt)
>>> logger.addHandler(stdout_handler)
>>> logger.report('This is a REPORT call')
This is a REPORT call
>>> logger.info('This is an INFO call')
INFO: This is an INFO call
"""
dbg_fmt = "%(levelname)s: [%(module)s.%(funcName)s] %(message)s"
info_fmt = "%(levelname)s: %(message)s"
rep_fmt = "%(message)s"
def __init__(self):
super().__init__(fmt="%(levelname)s: %(msg)s", datefmt="%H:%M:%S", style='%')
reporters.addLoggingLevel('REPORT', logging.WARNING - 5)
def format(self, record):
# Save the original format configured by the user
# when the logger formatter was instantiated
format_orig = self._style._fmt
# Replace the original format with one customized by logging level
if record.levelno == logging.DEBUG:
self._style._fmt = LoggerFormatter.dbg_fmt
elif record.levelno == logging.INFO:
self._style._fmt = LoggerFormatter.info_fmt
elif record.levelno == logging.WARNING:
self._style._fmt = LoggerFormatter.info_fmt
elif record.levelno == logging.ERROR:
self._style._fmt = LoggerFormatter.dbg_fmt
elif record.levelno == logging.REPORT:
self._style._fmt = LoggerFormatter.rep_fmt
# Call the original formatter class to do the grunt work
result = logging.Formatter.format(self, record)
# Restore the original format configured by the user
self._style._fmt = format_orig
return result
class BLUESHDF5TrajectoryFile(HDF5TrajectoryFile):
"""
Extension of the `mdtraj.formats.hdf5.HDF5TrajectoryFile` class which
handles the writing of the trajectory data to the HDF5 file format.
Additional features include writing NCMC related data to the HDF5 file.
Parameters
----------
filename : str
The filename for the HDF5 file.
mode : str, default='r'
The mode to open the HDF5 file in.
force_overwrite : bool, default=True
If True, overwrite the file if it already exists
compression : str, default='zlib'
Valid choices are ['zlib', 'lzo', 'bzip2', 'blosc']
"""
def __init__(self, filename, mode='r', force_overwrite=True, compression='zlib'):
super(BLUESHDF5TrajectoryFile, self).__init__(filename, mode, force_overwrite, compression)
def write(self,
coordinates,
parameters=None,
environment=None,
time=None,
cell_lengths=None,
cell_angles=None,
velocities=None,
kineticEnergy=None,
potentialEnergy=None,
temperature=None,
alchemicalLambda=None,
protocolWork=None,
title=None):
"""Write one or more frames of data to the file
This method saves data that is associated with one or more simulation
frames. Note that all of the arguments can either be raw numpy arrays
or unitted arrays (with simtk.unit.Quantity). If the arrays are unittted,
a unit conversion will be automatically done from the supplied units
into the proper units for saving on disk. You won't have to worry about
it.
Furthermore, if you wish to save a single frame of simulation data, you
can do so naturally, for instance by supplying a 2d array for the
coordinates and a single float for the time. This "shape deficiency"
will be recognized, and handled appropriately.
Parameters
----------
coordinates : np.ndarray, shape=(n_frames, n_atoms, 3)
The cartesian coordinates of the atoms to write. By convention, the
lengths should be in units of nanometers.
time : np.ndarray, shape=(n_frames,), optional
You may optionally specify the simulation time, in picoseconds
corresponding to each frame.
cell_lengths : np.ndarray, shape=(n_frames, 3), dtype=float32, optional
You may optionally specify the unitcell lengths.
The length of the periodic box in each frame, in each direction,
`a`, `b`, `c`. By convention the lengths should be in units
of angstroms.
cell_angles : np.ndarray, shape=(n_frames, 3), dtype=float32, optional
You may optionally specify the unitcell angles in each frame.
Organized analogously to cell_lengths. Gives the alpha, beta and
gamma angles respectively. By convention, the angles should be
in units of degrees.
velocities : np.ndarray, shape=(n_frames, n_atoms, 3), optional
You may optionally specify the cartesian components of the velocity
for each atom in each frame. By convention, the velocities
should be in units of nanometers / picosecond.
kineticEnergy : np.ndarray, shape=(n_frames,), optional
You may optionally specify the kinetic energy in each frame. By
convention the kinetic energies should b in units of kilojoules per
mole.
potentialEnergy : np.ndarray, shape=(n_frames,), optional
You may optionally specify the potential energy in each frame. By
convention the kinetic energies should b in units of kilojoules per
mole.
temperature : np.ndarray, shape=(n_frames,), optional
You may optionally specify the temperature in each frame. By
convention the temperatures should b in units of Kelvin.
alchemicalLambda : np.ndarray, shape=(n_frames,), optional
You may optionally specify the alchemicalLambda in each frame. These
have no units, but are generally between zero and one.
protocolWork : np.ndarray, shape=(n_frames,), optional
You may optionally specify the protocolWork in each frame. These
are in reduced units of kT but are stored dimensionless
title : str
Title of the HDF5 trajectory file
"""
_check_mode(self.mode, ('w', 'a'))
# these must be either both present or both absent. since
# we're going to throw an error if one is present w/o the other,
# lets do it now.
if cell_lengths is None and cell_angles is not None:
raise ValueError('cell_lengths were given, but no cell_angles')
if cell_lengths is not None and cell_angles is None:
raise ValueError('cell_angles were given, but no cell_lengths')
# if the input arrays are simtk.unit.Quantities, convert them
# into md units. Note that this acts as a no-op if the user doesn't
# have simtk.unit installed (e.g. they didn't install OpenMM)
coordinates = in_units_of(coordinates, None, 'nanometers')
time = in_units_of(time, None, 'picoseconds')
cell_lengths = in_units_of(cell_lengths, None, 'nanometers')
cell_angles = in_units_of(cell_angles, None, 'degrees')
velocities = in_units_of(velocities, None, 'nanometers/picosecond')
kineticEnergy = in_units_of(kineticEnergy, None, 'kilojoules_per_mole')
potentialEnergy = in_units_of(potentialEnergy, None, 'kilojoules_per_mole')
temperature = in_units_of(temperature, None, 'kelvin')
alchemicalLambda = in_units_of(alchemicalLambda, None, 'dimensionless')
protocolWork = in_units_of(protocolWork, None, 'kT')
# do typechecking and shapechecking on the arrays
# this ensure_type method has a lot of options, but basically it lets
# us validate most aspects of the array. Also, we can upconvert
# on defficent ndim, which means that if the user sends in a single
# frame of data (i.e. coordinates is shape=(n_atoms, 3)), we can
# realize that. obviously the default mode is that they want to
# write multiple frames at a time, so the coordinate shape is
# (n_frames, n_atoms, 3)
coordinates = ensure_type(
coordinates,
dtype=np.float32,
ndim=3,
name='coordinates',
shape=(None, None, 3),
can_be_none=False,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
n_frames, n_atoms, = coordinates.shape[0:2]
time = ensure_type(
time,
dtype=np.float32,
ndim=1,
name='time',
shape=(n_frames, ),
can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
cell_lengths = ensure_type(
cell_lengths,
dtype=np.float32,
ndim=2,
name='cell_lengths',
shape=(n_frames, 3),
can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
cell_angles = ensure_type(
cell_angles,
dtype=np.float32,
ndim=2,
name='cell_angles',
shape=(n_frames, 3),
can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
velocities = ensure_type(
velocities,
dtype=np.float32,
ndim=3,
name='velocoties',
shape=(n_frames, n_atoms, 3),
can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
kineticEnergy = ensure_type(
kineticEnergy,
dtype=np.float32,
ndim=1,
name='kineticEnergy',
shape=(n_frames, ),
can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
potentialEnergy = ensure_type(
potentialEnergy,
dtype=np.float32,
ndim=1,
name='potentialEnergy',
shape=(n_frames, ),
can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
temperature = ensure_type(
temperature,
dtype=np.float32,
ndim=1,
name='temperature',
shape=(n_frames, ),
can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
alchemicalLambda = ensure_type(
alchemicalLambda,
dtype=np.float32,
ndim=1,
name='alchemicalLambda',
shape=(n_frames, ),
can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
protocolWork = ensure_type(
protocolWork,
dtype=np.float32,
ndim=1,
name='protocolWork',
shape=(n_frames, ),
can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
# if this is our first call to write(), we need to create the headers
# and the arrays in the underlying HDF5 file
if self._needs_initialization:
self._initialize_headers(
n_atoms=n_atoms,
title=title,
parameters=parameters,
set_environment=(environment is not None),
set_coordinates=True,
set_time=(time is not None),
set_cell=(cell_lengths is not None or cell_angles is not None),
set_velocities=(velocities is not None),
set_kineticEnergy=(kineticEnergy is not None),
set_potentialEnergy=(potentialEnergy is not None),
set_temperature=(temperature is not None),
set_alchemicalLambda=(alchemicalLambda is not None),
set_protocolWork=(protocolWork is not None))
self._needs_initialization = False
# we need to check that that the entries that the user is trying
# to save are actually fields in OUR file
try:
# try to get the nodes for all of the fields that we have
# which are not None
for name in [
'coordinates', 'time', 'cell_angles', 'cell_lengths', 'velocities', 'kineticEnergy',
'potentialEnergy', 'temperature', 'protocolWork', 'alchemicalLambda'
]:
contents | |
"develop"
self.network_id = 5777
self.keyring_backend = "test"
self.ganache_db_path = self.cmd.get_user_home(".ganachedb")
self.sifnoded_path = self.cmd.get_user_home(".sifnoded")
# From ui/chains/credentials.sh
self.shadowfiend_name = "shadowfiend"
self.shadowfiend_mnemonic = ["race", "draft", "rival", "universe", "maid", "cheese", "steel", "logic", "crowd",
"fork", "comic", "easy", "truth", "drift", "tomorrow", "eye", "buddy", "head", "time", "cash", "swing",
"swift", "midnight", "borrow"]
self.akasha_name = "akasha"
self.akasha_mnemonic = ["hand", "inmate", "canvas", "head", "lunar", "naive", "increase", "recycle", "dog",
"ecology", "inhale", "december", "wide", "bubble", "hockey", "dice", "worth", "gravity", "ketchup", "feed",
"balance", "parent", "secret", "orchard"]
self.juniper_name = "juniper"
self.juniper_mnemonic = ["clump", "genre", "baby", "drum", "canvas", "uncover", "firm", "liberty", "verb",
"moment", "access", "draft", "erupt", "fog", "alter", "gadget", "elder", "elephant", "divide", "biology",
"choice", "sentence", "oppose", "avoid"]
self.ethereum_root_mnemonic = ["candy", "maple", "cake", "sugar", "pudding", "cream", "honey", "rich", "smooth",
"crumble", "sweet", "treat"]
def stack_save_snapshot(self):
# ui-stack.yml
# cd .; go get -v -t -d ./...
# cd ui; yarn install --frozen-lockfile --silent
# Compile smart contracts:
# cd ui; yarn build
# yarn stack --save-snapshot -> ui/scripts/stack.sh -> ui/scripts/stack-save-snapshot.sh
# rm ui/node_modules/.migrate-complete
# yarn stack --save-snapshot -> ui/scripts/stack.sh -> ui/scripts/stack-save-snapshot.sh => ui/scripts/stack-launch.sh
# ui/scripts/stack-launch.sh -> ui/scripts/_sif-build.sh -> ui/chains/sif/build.sh
# killall sifnoded
# rm $(which sifnoded)
self.cmd.rmdir(self.sifnoded_path)
self.cmd.execst(["make", "install"], cwd=project_dir(), pipe=False)
# ui/scripts/stack-launch.sh -> ui/scripts/_eth.sh -> ui/chains/etc/launch.sh
self.cmd.rmdir(self.ganache_db_path)
self.cmd.yarn([], cwd=project_dir("ui/chains/eth")) # Installs ui/chains/eth/node_modules
# Note that this runs ganache-cli from $PATH whereas scripts start it with yarn in ui/chains/eth
ganache_proc = self.cmd.start_ganache_cli(mnemonic=self.ethereum_root_mnemonic, db=self.ganache_db_path,
port=7545, network_id=self.network_id, gas_price=20000000000, gas_limit=6721975, host="0.0.0.0")
# ui/scripts/stack-launch.sh -> ui/scripts/_sif.sh -> ui/chains/sif/launch.sh
self.cmd.sifnoded_init("test", self.chain_id)
self.cmd.copy_file(project_dir("ui/chains/sif/app.toml"), os.path.join(self.sifnoded_path, "config/app.toml"))
log.info(f"Generating deterministic account - {self.shadowfiend_name}...")
shadowfiend_account = self.cmd.sifnoded_generate_deterministic_account(self.shadowfiend_name, self.shadowfiend_mnemonic)
log.info(f"Generating deterministic account - {self.akasha_name}...")
akasha_account = self.cmd.sifnoded_generate_deterministic_account(self.akasha_name, self.akasha_mnemonic)
log.info(f"Generating deterministic account - {self.juniper_name}...")
juniper_account = self.cmd.sifnoded_generate_deterministic_account(self.juniper_name, self.juniper_mnemonic)
shadowfiend_address = shadowfiend_account["address"]
akasha_address = akasha_account["address"]
juniper_address = juniper_account["address"]
assert shadowfiend_address == self.cmd.sifnoded_keys_show(self.shadowfiend_name)[0]["address"]
assert akasha_address == self.cmd.sifnoded_keys_show(self.akasha_name)[0]["address"]
assert juniper_address == self.cmd.sifnoded_keys_show(self.juniper_name)[0]["address"]
tokens_shadowfiend = [[10**29, "rowan"], [10**29, "catk"], [10**29, "cbtk"], [10**29, "ceth"], [10**29, "cusdc"], [10**29, "clink"], [10**26, "stake"]]
tokens_akasha = [[10**29, "rowan"], [10**29, "catk"], [10**29, "cbtk"], [10**29, "ceth"], [10**29, "cusdc"], [10**29, "clink"], [10**26, "stake"]]
tokens_juniper = [[10**22, "rowan"], [10**22, "cusdc"], [10**20, "clink"], [10**20, "ceth"]]
self.cmd.sifnoded_add_genesis_account(shadowfiend_address, tokens_shadowfiend)
self.cmd.sifnoded_add_genesis_account(akasha_address, tokens_akasha)
self.cmd.sifnoded_add_genesis_account(juniper_address, tokens_juniper)
shadowfiend_address_bech_val = self.cmd.sifnoded_keys_show(self.shadowfiend_name, bech="val")[0]["address"]
self.cmd.sifnoded_add_genesis_validators(shadowfiend_address_bech_val)
amount = sif_format_amount(10**24, "stake")
self.cmd.execst(["sifnoded", "gentx", self.shadowfiend_name, amount, f"--chain-id={self.chain_id}",
f"--keyring-backend={self.keyring_backend}"])
log.info("Collecting genesis txs...")
self.cmd.execst(["sifnoded", "collect-gentxs"])
log.info("Validating genesis file...")
self.cmd.execst(["sifnoded", "validate-genesis"])
log.info("Starting test chain...")
sifnoded_proc = self.cmd.sifnoded_launch(minimum_gas_prices=[0.5, "rowan"])
# sifnoded must be up before continuing
self.cmd.sif_wait_up("localhost", 1317)
# ui/scripts/_migrate.sh -> ui/chains/peggy/migrate.sh
self.cmd.deploy_smart_contracts_for_ui_stack()
# ui/scripts/_migrate.sh -> ui/chains/eth/migrate.sh
# send through atk and btk tokens to eth chain
self.cmd.yarn(["migrate"], cwd=project_dir("ui/chains/eth"))
# ui/scripts/_migrate.sh -> ui/chains/sif/migrate.sh
# Original scripts say "if we don't sleep there are issues"
time.sleep(10)
log.info("Creating liquidity pool from catk:rowan...")
self.cmd.sifnoded_tx_clp_create_pool(self.chain_id, self.keyring_backend, "akasha", "catk", [10**5, "rowan"], 10**25, 10**25)
time.sleep(5)
log.info("Creating liquidity pool from cbtk:rowan...")
self.cmd.sifnoded_tx_clp_create_pool(self.chain_id, self.keyring_backend, "akasha", "cbtk", [10**5, "rowan"], 10**25, 10**25)
# should now be able to swap from catk:cbtk
time.sleep(5)
log.info("Creating liquidity pool from ceth:rowan...")
self.cmd.sifnoded_tx_clp_create_pool(self.chain_id, self.keyring_backend, "akasha", "ceth", [10**5, "rowan"], 10**25, 83*10**20)
# should now be able to swap from x:ceth
time.sleep(5)
log.info("Creating liquidity pool from cusdc:rowan...")
self.cmd.sifnoded_tx_clp_create_pool(self.chain_id, self.keyring_backend, "akasha", "cusdc", [10**5, "rowan"], 10**25, 10**25)
time.sleep(5)
log.info("Creating liquidity pool from clink:rowan...")
self.cmd.sifnoded_tx_clp_create_pool(self.chain_id, self.keyring_backend, "akasha", "clink", [10**5, "rowan"], 10**25, 588235*10**18)
time.sleep(5)
log.info("Creating liquidity pool from ctest:rowan...")
self.cmd.sifnoded_tx_clp_create_pool(self.chain_id, self.keyring_backend, "akasha", "ctest", [10**5, "rowan"], 10**25, 10**13)
# ui/scripts/_migrate.sh -> ui/chains/post_migrate.sh
atk_address, btk_address, usdc_address, link_address = [
self.cmd.get_smart_contract_address(project_dir(f"ui/chains/eth/build/contracts/{x}.json"), self.network_id)
for x in ["AliceToken", "BobToken", "UsdCoin", "LinkCoin"]
]
bridge_token_address, bridge_registry_address, bridge_bank = self.cmd.get_bridge_smart_contract_addresses(self.network_id)
# From smart-contracts/.env.ui.example
smart_contracts_env_ui_example_vars = {
"ETHEREUM_PRIVATE_KEY": "c87509a1c067bbde78beb793e6fa76530b6382a4c0241e5e4a9ec0a0f44dc0d3",
"INFURA_PROJECT_ID": "JFSH7439sjsdtqTM23Dz",
"LOCAL_PROVIDER": "http://localhost:7545",
}
# NOTE: this probably doesn't work anymore since setTokenLockBurnLimit.js was replaced
burn_limits = [
[NULL_ADDRESS, 31 * 10 ** 18],
[bridge_token_address, 10 ** 25],
[atk_address, 10 ** 25],
[btk_address, 10 ** 25],
[usdc_address, 10 ** 25],
[link_address, 10 ** 25],
]
for address, amount in burn_limits:
self.cmd.set_token_lock_burn_limit(
address,
amount,
smart_contracts_env_ui_example_vars["ETHEREUM_PRIVATE_KEY"],
smart_contracts_env_ui_example_vars["INFURA_PROJECT_ID"],
smart_contracts_env_ui_example_vars["LOCAL_PROVIDER"]
)
# signal migrate-complete
# Whitelist test tokens
for addr in [atk_address, btk_address, usdc_address, link_address]:
self.cmd.yarn(["peggy:whiteList", addr, "true"], cwd=self.cmd.smart_contracts_dir)
# ui/scripts/stack-launch.sh -> ui/scripts/_peggy.sh -> ui/chains/peggy/launch.sh
# rm -rf ui/chains/peggy/relayerdb
# ebrelayer is in $GOBIN, gets installed by "make install"
ethereum_private_key = smart_contracts_env_ui_example_vars["ETHEREUM_PRIVATE_KEY"]
ebrelayer_proc = self.cmd.ebrelayer_init(ethereum_private_key, "tcp://localhost:26657", "ws://localhost:7545/",
bridge_registry_address, self.shadowfiend_name, self.shadowfiend_mnemonic, self.chain_id, gas=5*10**12,
gas_prices=[0.5, "rowan"])
# At this point we have 3 running processes - ganache_proc, sifnoded_proc and ebrelayer_proc
# await sif-node-up and migrate-complete
time.sleep(30)
# ui/scripts/_snapshot.sh
# ui/scripts/stack-pause.sh:
# killall sifnoded sifnoded ebrelayer ganache-cli
sifnoded_proc.kill()
ebrelayer_proc.kill()
ganache_proc.kill()
time.sleep(10)
snapshots_dir = project_dir("ui/chains/snapshots")
self.cmd.mkdir(snapshots_dir) # TODO self.cmd.rmdir(snapshots_dir)
# ui/chains/peggy/snapshot.sh:
# mkdir -p ui/chains/peggy/relayerdb
self.cmd.tar_create(project_dir("ui/chains/peggy/relayerdb"), os.path.join(snapshots_dir, "peggy.tar.gz"))
# mkdir -p smart-contracts/build
self.cmd.tar_create(project_dir("smart-contracts/build"), os.path.join(snapshots_dir, "peggy_build.tar.gz"))
# ui/chains/sif/snapshot.sh:
self.cmd.tar_create(self.sifnoded_path, os.path.join(snapshots_dir, "sif.tar.gz"))
# ui/chains/etc/snapshot.sh:
self.cmd.tar_create(self.ganache_db_path, os.path.join(snapshots_dir, "eth.tar.gz"))
def stack_push(self):
# ui/scripts/stack-push.sh
# $PWD=ui
# User must be logged in to Docker hub:
# ~/.docker/config.json must exist and .auths['ghcr.io'].auth != null
log.info("Github Registry Login found.")
commit = exactly_one(stdout_lines(self.cmd.execst(["git", "rev-parse", "HEAD"], cwd=project_dir())))
branch = exactly_one(stdout_lines(self.cmd.execst(["git", "rev-parse", "--abbrev-ref", "HEAD"], cwd=project_dir())))
image_root = "ghcr.io/sifchain/sifnode/ui-stack"
image_name = "{}:{}".format(image_root, commit)
stable_tag = "{}:{}".format(image_root, branch.replace("/", "__"))
running_in_ci = bool(os.environ.get("CI"))
if running_in_ci:
res = self.cmd.execst(["git", "status", "--porcelain", "--untracked-files=no"], cwd=project_dir())
# # reverse grep for go.mod because on CI this can be altered by installing go dependencies
# if [[ -z "$CI" && ! -z "$(git status --porcelain --untracked-files=no)" ]]; then
# echo "Git workspace must be clean to save git commit hash"
# exit 1
# fi
pass
log.info("Building new container...")
log.info(f"New image name: {image_name}")
self.cmd.execst(["docker", "build", "-f", project_dir("ui/scripts/stack.Dockerfile"), "-t", image_name, "."],
cwd=project_dir(), env={"DOCKER_BUILDKIT": "1"}, pipe=False)
if running_in_ci:
log.info(f"Tagging image as {stable_tag}...")
self.cmd.execst(["docker", "tag", image_name, stable_tag])
self.cmd.execst(["docker", "push", image_name])
self.cmd.execst(["docker", "push", stable_tag])
class IntegrationTestsPlaybook:
def __init__(self, cmd):
self.cmd = cmd
# Fixed, set in start-integration-env.sh
self.ethereum_private_key = "c87509a1c067bbde78beb793e6fa76530b6382a4c0241e5e4a9ec0a0f44dc0d3"
self.owner = "0x627306090abaB3A6e1400e9345bC60c78a8BEf57"
# we may eventually switch things so PAUSER and OWNER aren't the same account, but for now they're the same
self.pauser = self.owner
# set_persistant_env_var BASEDIR $(fullpath $BASEDIR) $envexportfile
# set_persistant_env_var SIFCHAIN_BIN $BASEDIR/cmd $envexportfile
# set_persistant_env_var envexportfile $(fullpath $envexportfile) $envexportfile
# set_persistant_env_var TEST_INTEGRATION_DIR ${BASEDIR}/test/integration $envexportfile
# set_persistant_env_var TEST_INTEGRATION_PY_DIR ${BASEDIR}/test/integration/src/py $envexportfile
# set_persistant_env_var SMART_CONTRACTS_DIR ${BASEDIR}/smart-contracts $envexportfile
# set_persistant_env_var datadir ${TEST_INTEGRATION_DIR}/vagrant/data $envexportfile
# set_persistant_env_var CONTAINER_NAME integration_sifnode1_1 $envexportfile
# set_persistant_env_var NETWORKDIR $BASEDIR/deploy/networks $envexportfile
# set_persistant_env_var GANACHE_DB_DIR $(mktemp -d /tmp/ganachedb.XXXX) $envexportfile
# set_persistant_env_var ETHEREUM_WEBSOCKET_ADDRESS ws://localhost:7545/ $envexportfile
# set_persistant_env_var CHAINNET localnet $envexportfile
self.network_name = "develop"
self.network_id = 5777
self.using_ganache_gui = False
self.peruser_storage_dir = self.cmd.get_user_home(".sifnode-integration")
self.state_vars = {}
self.test_integration_dir = project_dir("test/integration")
self.data_dir = project_dir("test/integration/vagrant/data")
self.chainnet = "localnet"
self.tcp_url = "tcp://0.0.0.0:26657"
self.ethereum_websocket_address = "ws://localhost:7545/"
self.ganache_mnemonic = ["candy", "maple", "cake", "sugar", "pudding", "cream", "honey", "rich", "smooth",
"crumble", "sweet", "treat"]
def make_go_binaries(self):
# make go binaries (TODO Makefile needs to be trimmed down, especially "find")
self.cmd.execst(["make"], cwd=self.test_integration_dir, env={"BASEDIR": project_dir()})
def run(self):
self.cmd.mkdir(self.data_dir)
self.make_go_binaries()
self.cmd.install_smart_contracts_dependencies()
if self.using_ganache_gui:
ebrelayer_ethereum_addr = "0x8e2bE12daDbCcbf7c98DBb59f98f22DFF0eF3F2c"
ebrelayer_ethereum_private_key = "<KEY>"
ganache_db_path = None
ganache_proc = None
else:
# test/integration/ganache-start.sh:
# 1. pkill -9 -f ganache-cli || true
# 2. while nc -z localhost 7545; do sleep 1; done
# 3. nohup tmux new-session -d -s my_session "ganache-cli ${block_delay} -h 0.0.0.0 --mnemonic \
# 'candy maple cake sugar pudding cream honey rich smooth crumble sweet treat' \
# --networkId '5777' --port '7545' --db ${GANACHE_DB_DIR} --account_keys_path $GANACHE_KEYS_JSON \
# > $GANACHE_LOG 2>&1"
# 4. sleep 5
# 5. while ! nc -z localhost 4545; do sleep 5; done
# GANACHE_LOG=ui/test/integration/vagrant/data/logs/ganache.$(filenamedate).txt
block_time = None # TODO
account_keys_path = os.path.join(self.data_dir, "ganachekeys.json")
ganache_db_path = self.cmd.mktempdir()
ganache_proc = self.cmd.start_ganache_cli(block_time=block_time, host="0.0.0.0",
mnemonic=self.ganache_mnemonic, network_id=self.network_id, port=7545, db=ganache_db_path,
account_keys_path=account_keys_path)
self.cmd.wait_for_file(account_keys_path) # Created by ganache-cli
time.sleep(2)
ganache_keys = json.loads(self.cmd.read_text_file(account_keys_path))
ebrelayer_ethereum_addr = list(ganache_keys["private_keys"].keys())[9]
ebrelayer_ethereum_private_key = ganache_keys["private_keys"][ebrelayer_ethereum_addr]
# TODO Check for possible non-determinism of dict().keys() ordering (c.f. test/integration/vagrantenv.sh)
# TODO ebrelayer_ethereum_private_key is NOT the same as in test/integration/.env.ciExample
assert ebrelayer_ethereum_addr == "0x5aeda56215b167893e80b4fe645ba6d5bab767de"
assert ebrelayer_ethereum_private_key == "<KEY>"
env_file = project_dir("test/integration/.env.ciExample")
env_vars = self.cmd.primitive_parse_env_file(env_file)
self.cmd.deploy_smart_contracts_for_integration_tests(self.network_name, owner=self.owner, pauser=self.pauser,
operator=env_vars["OPERATOR"], consensus_threshold=int(env_vars["CONSENSUS_THRESHOLD"]),
initial_validator_powers=[int(x) for x in env_vars["INITIAL_VALIDATOR_POWERS"].split(",")],
initial_validator_addresses=[ebrelayer_ethereum_addr], env_file=env_file)
bridge_token_sc_addr, bridge_registry_sc_addr, bridge_bank_sc_addr = \
self.cmd.get_bridge_smart_contract_addresses(self.network_id)
# # TODO This should be last (after return from setup_sifchain.sh)
# burn_limits = [
# [NULL_ADDRESS, 31*10**18],
# [bridge_token_sc_addr, 10**25],
# ]
# env_file_vars = self.cmd.primitive_parse_env_file(env_file)
# for address, amount in burn_limits:
# self.cmd.set_token_lock_burn_limit(
# address,
# amount,
# env_file_vars["ETHEREUM_PRIVATE_KEY"], # != ebrelayer_ethereum_private_key
# env_file_vars["INFURA_PROJECT_ID"],
# env_file_vars["LOCAL_PROVIDER"], # for web3.js to connect to ganache
# | |
<filename>dnaplotlib/datatype.py
"""
New DNAplotlib data type for designs (extendable for hierachy)
"""
__author__ = '<NAME> <<EMAIL>>'
__license__ = 'MIT'
__version__ = '2.0'
###############################################################################
# New Data Type
###############################################################################
class Part:
def __init__(self, parent_module, name, type, orientation='+', frame=None):
""" Constructor to generate a new Part.
Parameters
----------
parent_module : Module
Module that this part is a member of.
name : string
Name of part.
type : string
Type of part.
orientation : string (default: '+')
Orientation of the part (either '+' or '-')
frame : (width=float, height=float, origin=float) (default: None)
Often updated during the rendering process.
"""
self.parent_module = parent_module
self.name = name
self.type = type
self.orientation = orientation
self.frame = frame
self.options = {} # Options to tailor the rendering process
class PartList:
def __init__(self, position=None, backbone='DNA'):
""" Constructor to generate a new PartList. Used to hold a list of parts that
should be rendered as a single unit with a shared backbone. Note that PartList
does not hold all parts in a module.
Parameters
----------
position : [float, float] (default: None)
[x, y] position of the baseline start for the part. This is often updated
during the rendering process.
backbone : string (default: 'DNA')
Currently, only support DNA rendering.
Later could be updated to include RNA backbone (will affect the rendering).
parts: [Part]
options: dictionary
parts or backbone rendering options currently not supported
"""
self.position = position
self.backbone = backbone
self.parts = [] # List of parts making up the segment
self.options = {}
def add_part(self, part):
if type(part) != list:
self.parts.append(part)
else:
self.parts += part
class Interaction:
""" Constructor to generate Interaction.
Parameters
----------
part_start : Part
specifies the start part of interaction
part_end : Part
specifies the end part of interaction
coordinates : [[float, float]]
specifies the list of coordinates for the interaction arrow
updated during rendering
type : string
Options inclue: control, degradation, inhibition, process, stimulation
option: dict
Options to tailor the rendering process
"""
def __init__(self, interaction_type, part_start, part_end=None, path=None):
self.part_start = part_start
self.part_end = part_end
self.coordinates = []
self.type = interaction_type
self.options = {}
class Module:
""" Constructor to generate Module.
Parameters
----------
design : Constructor
name : String
name of the module
level : Int [0, 2]
module hierarchy level / updated during rendering
frame : (width=float, height=float, origin=float) (default: None)
Often updated during the rendering process.
children : [Module]
list of submodules contained within the module
part_list: Part_list
list of parts contained on DNA backbone
other_parts: [Part]
list of parts not contained on DNA backbone
"""
def __init__(self, design, name, parent=None):
self.design = design
self.name = name
self.level = 0
self.frame = None
self.children = []
self.part_list = None # parts on strand
self.other_parts = [] # parts off strand
def add_module(self, name):
child = Module(self.design, name, parent=self)
self.children.append(child)
return child
def add_modules(self, modules):
self.children += modules
def add_strand_part(self, part):
if self.part_list is None:
self.part_list = PartList()
self.part_list.add_part(part)
def add_non_strand_part(self, part):
if type(part) == list:
self.other_parts += part
else: self.other_parts.append(part)
class Design:
""" Constructor to generate Module.
Parameters
----------
name : String
name of the module
modules: [Module]
list of modules contained in the design (only level 0)
interactions: [Interactions]
list of interactions within design
"""
def __init__(self, name):
self.name = name
self.modules = []
self.interactions = []
def rename_design(self, name):
self.name = name
# note that this add a single supermodule containing every modules
def add_module(self, module):
if type(module) != list:
self.modules.append(module)
def add_interaction(self, interaction):
if type(interaction) != list:
self.interactions.append(interaction)
else:
self.interactions += interaction
def __print_part_list(self, part_list, indent=''):
if part_list == None or len(part_list.parts) == 0: return
names = []
for part in part_list.parts:
names.append(part.name)
print(indent + ' Parts: ' + ','.join(names))
def __print_other_parts(self, other_part_list, indent=''):
if len(other_part_list) == 0: return
names = []
for op in other_part_list:
names.append(op.name)
print(indent + ' Other parts: ' + ','.join(names))
def __print_module_tree(self, starting_module, indent=''):
# Recursive method to print tree details
print(indent + 'Module:', starting_module.name)
self.__print_part_list(starting_module.part_list, indent)
self.__print_other_parts(starting_module.other_parts, indent)
if len(starting_module.children) > 0:
for node in starting_module.children:
self.__print_module_tree(node, indent + ' ')
def print_design(self):
# Generate a human-readable version of the data type
print('Design:', self.name)
for module in self.modules:
self.__print_module_tree(module, indent=' ')
for interaction in self.interactions:
if interaction.part_end != None:
print('Interaction from part:', interaction.part_start.name,
'to part:', interaction.part_end.name,
'of type:', interaction.type)
else:
print('Interaction from part:', interaction.part_start.name,
'of type:', interaction.type)
###############################################################################
# Testing
###############################################################################
# The basic data type at the moment works by having a Design object that holds
# lists of the modules, interactions, and other parts making up the design. At
# the moment only the modules list is used. The other aspects will be added
# later. The add_module method is called with a Module object to add and it will
# be appended to the list. There are also a couple private functions that the
# print_design method uses to print out the tree making up the design, drilling
# down into each module. An example of how to generate a design is shown below.
# The key detail in the datatype is that Modules can have Modules added to them
def create_test_design ():
# You first create a design and need to give it a name
design = Design('design1')
# Create DNA module 1 (containing sub-modules)
module1 = Module(design, 'module1')
module1a = module1.add_module('module1a')
part_1aCDS = Part(module1a, '1a','Promoter')
module1a.add_strand_part( part_1aCDS )
module1a.add_strand_part( Part(module1a, '1aT','Promoter') )
module1b = module1.add_module('module1b')
module1b.add_strand_part( Part(module1b, '1b','Promoter') )
module1c = module1.add_module('module1c')
part_1cCDS = Part(module1a, '1c','Promoter')
module1c.add_strand_part( part_1cCDS )
# Create DNA module 2
module2 = Module(design, 'module2')
part_2CDS = Part(module2, '2','Promoter')
module2.add_strand_part( part_2CDS )
# Attach the different DNA segments to design
design.add_module(module1)
design.add_module(module2)
# Add some other parts (e.g. molecules like a repressor)
other_part_1Rep = Part(module2, 'R1','Unspecified')
module2.add_non_strand_part( other_part_1Rep )
# Add some basic interactions
interaction1 = Interaction('inhibition', part_1cCDS, part_1aCDS)
interaction2 = Interaction('process', part_1cCDS, other_part_1Rep)
interaction3 = Interaction('stimulation', other_part_1Rep, part_2CDS)
design.add_interaction(interaction1)
design.add_interaction(interaction2)
design.add_interaction(interaction3)
return design
# for rendering degradation
def create_test_design1_1 ():
design = Design('design1_1')
module = Module(design, module)
part1_p = Part(module, '1p', 'Promoter')
module.add_part(part1_p)
design.add_module(module)
def create_test_design2 ():
# You first create a design and need to give it a name
design = Design('design2')
# Create DNA module 1
module1 = Module(design, 'module1')
part_1_pro = Part(module1, 'p1a','Promoter')
part_1_res = Part(module1, 'p1r','RibosomeEntrySite')
part_1_cds = Part(module1, 'p1c','CDS')
part_1_ter = Part(module1, 'p1t','Terminator')
module1.add_strand_part( [part_1_pro, part_1_res, part_1_cds, part_1_ter] )
# Create DNA module 2
module2 = Module(design, 'module2')
part_2_pro = Part(module2, 'p2p','Promoter')
part_2_cds = Part(module2, 'p2c','CDS')
part_2_ter = Part(module2, 'p2t','Terminator')
module2.add_strand_part( [part_2_pro, part_2_cds, part_2_ter])
# module 3
module3 = Module(design, 'module3')
part_3_pro = Part(module3, 'p3p', 'Promoter')
part_3_ins = Part(module3, 'p3i', 'Insulator')
part_3_ter = Part(module3, 'p3t', 'Terminator')
module3.add_strand_part( [part_3_pro, part_3_ins, part_3_ter] )
# module 4
module4 = Module(design, 'module4')
part_4_pro = Part(module4, 'p4p', 'Promoter')
part_4_ori = Part(module4, 'p4o', 'OriginOfReplication')
part_4_ter = Part(module4, 'p4t', 'Terminator')
module4.add_strand_part( [part_4_pro, part_4_ori, part_4_ter] )
# module 5
module5 = Module(design, 'module5')
part_5_pro = Part(module5, 'p5p', 'Promoter')
part_5_ter = Part(module5, 'p5t', 'Terminator')
module5.add_strand_part( [part_5_pro, part_5_ter] )
# module 6
module6 = Module(design, 'module6')
part_6_pro = Part(module6, 'p6a','Promoter')
part_6_apt = Part(module6, 'p6apt', 'Aptamer')
part_6_res = Part(module6, 'p6r','RibosomeEntrySite')
part_6_ter = Part(module1, 'p6t','Terminator')
module6.add_strand_part( [part_6_pro, part_6_apt, part_6_res, part_6_ter] )
# module 7
module7 = Module(design, 'module7')
part_7_pro = Part(module7, 'p7p', 'Promoter')
part_7_res = Part(module7, 'p7r', 'RibosomeEntrySite')
part_7_ter = Part(module7, 'p7t', 'Terminator')
module7.add_strand_part( [part_7_pro, part_7_res, part_7_ter] )
# module 8
module8 = Module(design, 'module8')
part_8_pro = Part(module8, 'p8p', 'Promoter')
part_8_res = Part(module8, 'p8r', 'RibosomeEntrySite')
part_8_ter = Part(module8, 'p8t', 'Terminator')
module8.add_strand_part( [part_8_pro, part_8_res, part_8_ter] )
# Attach the different DNA segments to design
design.add_module( [module1, module2, module3, module4, module5, module6, module7, module8] )
# Add some basic interactions
interaction1 = Interaction('control', part_1_cds, part_4_pro)
int2 = Interaction('degradation', part_1_pro, part_3_pro)
int3 = Interaction('process', part_2_cds, part_4_ori)
int4 = Interaction('inhibition', part_5_pro, part_2_pro)
int5 = Interaction('stimulation', part_7_pro, part_8_res)
design.add_interaction( [interaction1, int2, int3, int4, int5] )
return design
# hierarchical | |
None:
self.stdout_redirector.start()
if self.stderr_redirector is not None:
self.stderr_redirector.start()
return 1
def _create_redirectors(self):
if self.stdout_stream:
if self.stdout_redirector is not None:
self.stdout_redirector.stop()
self.stdout_redirector = get_pipe_redirector(
self.stdout_stream, loop=self.loop)
else:
self.stdout_redirector = None
if self.stderr_stream:
if self.stderr_redirector is not None:
self.stderr_redirector.stop()
self.stderr_redirector = get_pipe_redirector(
self.stderr_stream, loop=self.loop)
else:
self.stderr_redirector = None
def _resolve_hook(self, name, callable_or_name, ignore_failure):
if is_callable(callable_or_name):
self.hooks[name] = callable_or_name
else:
# will raise ImportError on failure
self.hooks[name] = resolve_name(callable_or_name)
if ignore_failure:
self.ignore_hook_failure.append(name)
def _resolve_hooks(self, hooks):
"""Check the supplied hooks argument to make sure we can find
callables"""
if hooks is None:
return
for name, (callable_or_name, ignore_failure) in hooks.items():
self._resolve_hook(name, callable_or_name, ignore_failure)
@classmethod
def load_from_config(cls, config):
if 'env' in config:
config['env'] = parse_env_dict(config['env'])
cfg = config.copy()
w = cls(name=config.pop('name'), cmd=config.pop('cmd'), **config)
w._cfg = cfg
return w
@util.debuglog
def initialize(self, evpub_socket, sockets, arbiter):
self.evpub_socket = evpub_socket
self.sockets = sockets
self.arbiter = arbiter
def __len__(self):
return len(self.processes)
def notify_event(self, topic, msg):
"""Publish a message on the event publisher channel"""
name = bytestring(self.res_name)
multipart_msg = [b("watcher.%s.%s" % (name, topic)), json.dumps(msg)]
if self.evpub_socket is not None and not self.evpub_socket.closed:
self.evpub_socket.send_multipart(multipart_msg)
@util.debuglog
def reap_process(self, pid, status=None):
"""ensure that the process is killed (and not a zombie)"""
if pid not in self.processes:
return
process = self.processes.pop(pid)
if not status:
while True:
try:
_, status = os.waitpid(pid, os.WNOHANG)
except OSError as e:
if e.errno == errno.EAGAIN:
time.sleep(0.001)
continue
elif e.errno == errno.ECHILD:
# nothing to do here, we do not have any child
# process running
# but we still need to send the "reap" signal.
logger.debug('reaping already dead process %s [%s]',
pid, self.name)
self.notify_event(
"reap",
{"process_pid": pid, "time": time.time()})
process.stop()
return
else:
raise
# get return code
if os.WIFSIGNALED(status):
os.WTERMSIG(status)
# process exited using exit(2) system call; return the
# integer exit(2) system call has been called with
elif os.WIFEXITED(status):
os.WEXITSTATUS(status)
else:
# should never happen
raise RuntimeError("Unknown process exit status")
# if the process is dead or a zombie try to definitely stop it.
if process.status in (DEAD_OR_ZOMBIE, UNEXISTING):
process.stop()
logger.debug('reaping process %s [%s]', pid, self.name)
self.notify_event("reap", {"process_pid": pid, "time": time.time()})
@util.debuglog
def reap_processes(self):
"""Reap all the processes for this watcher.
"""
if self.is_stopped():
logger.debug('do not reap processes as the watcher is stopped')
return
# reap_process changes our dict, look through the copy of keys
for pid in list(self.processes.keys()):
self.reap_process(pid)
@gen.coroutine
@util.debuglog
def manage_processes(self):
"""Manage processes."""
if self.is_stopped():
return
# remove dead or zombie processes first
for process in list(self.processes.values()):
if process.status == DEAD_OR_ZOMBIE:
self.processes.pop(process.pid)
if self.max_age:
yield self.remove_expired_processes()
# adding fresh processes
if (self.respawn and len(self.processes) < self.numprocesses
and not self.is_stopping()):
yield self.spawn_processes()
# removing extra processes
if len(self.processes) > self.numprocesses:
processes_to_kill = []
for process in sorted(self.processes.values(),
key=lambda process: process.started,
reverse=True)[self.numprocesses:]:
if process.status == DEAD_OR_ZOMBIE:
self.processes.pop(process.pid)
else:
processes_to_kill.append(process)
removes = yield [self.kill_process(process)
for process in processes_to_kill]
for i, process in enumerate(processes_to_kill):
if removes[i]:
self.processes.pop(process.pid)
@gen.coroutine
@util.debuglog
def remove_expired_processes(self):
max_age = self.max_age + randint(0, self.max_age_variance)
expired_processes = [p for p in self.processes.values()
if p.age() > max_age]
removes = yield [self.kill_process(x) for x in expired_processes]
for i, process in enumerate(expired_processes):
if removes[i]:
self.processes.pop(process.pid)
@gen.coroutine
@util.debuglog
def reap_and_manage_processes(self):
"""Reap & manage processes."""
if self.is_stopped():
return
self.reap_processes()
yield self.manage_processes()
@gen.coroutine
@util.debuglog
def spawn_processes(self):
"""Spawn processes.
"""
# when an on_demand process dies, do not restart it until
# the next event
if self.on_demand and not self.arbiter.socket_event:
self._status = "stopped"
return
for i in range(self.numprocesses - len(self.processes)):
res = self.spawn_process()
if res is False:
yield self._stop()
break
yield tornado_sleep(self.warmup_delay)
def _get_sockets_fds(self):
# XXX should be cached
if self.sockets is None:
return {}
fds = {}
for name, sock in self.sockets.items():
fds[name] = sock.fileno()
return fds
def spawn_process(self):
"""Spawn process.
Return True if ok, False if the watcher must be stopped
"""
if self.is_stopped():
return True
if not self.call_hook('before_spawn'):
return False
cmd = util.replace_gnu_args(self.cmd, env=self.env)
nb_tries = 0
while nb_tries < self.max_retry or self.max_retry == -1:
process = None
pipe_stdout = self.stdout_redirector is not None
pipe_stderr = self.stderr_redirector is not None
try:
process = Process(self._nextwid, cmd,
args=self.args, working_dir=self.working_dir,
shell=self.shell, uid=self.uid, gid=self.gid,
env=self.env, rlimits=self.rlimits,
executable=self.executable,
use_fds=self.use_sockets, watcher=self,
pipe_stdout=pipe_stdout,
pipe_stderr=pipe_stderr,
close_child_stdout=self.close_child_stdout,
close_child_stderr=self.close_child_stderr)
# stream stderr/stdout if configured
if pipe_stdout and self.stdout_redirector is not None:
self.stdout_redirector.add_redirection('stdout',
process,
process.stdout)
if pipe_stderr and self.stderr_redirector is not None:
self.stderr_redirector.add_redirection('stderr',
process,
process.stderr)
self.processes[process.pid] = process
logger.debug('running %s process [pid %d]', self.name,
process.pid)
except OSError as e:
logger.warning('error in %r: %s', self.name, str(e))
if process is None:
nb_tries += 1
continue
else:
self.notify_event("spawn", {"process_pid": process.pid,
"time": time.time()})
return True
return False
@util.debuglog
def send_signal_process(self, process, signum):
"""Send the signum signal to the process
The signal is sent to the process itself then to all the children
"""
try:
# sending the signal to the process itself
self.send_signal(process.pid, signum)
self.notify_event("kill", {"process_pid": process.pid,
"time": time.time()})
except NoSuchProcess:
# already dead !
pass
# now sending the same signal to all the children
for child_pid in process.children():
try:
process.send_signal_child(child_pid, signum)
self.notify_event("kill", {"process_pid": child_pid,
"time": time.time()})
except NoSuchProcess:
# already dead !
pass
def _process_remove_redirections(self, process):
"""Remove process redirections
"""
if self.stdout_redirector is not None and process.stdout is not None:
self.stdout_redirector.remove_redirection(process.stdout)
if self.stderr_redirector is not None and process.stderr is not None:
self.stderr_redirector.remove_redirection(process.stderr)
@gen.coroutine
@util.debuglog
def kill_process(self, process):
"""Kill process (stop_signal, graceful_timeout then SIGKILL)
"""
if process.stopping:
raise gen.Return(False)
logger.debug("%s: kill process %s", self.name, process.pid)
if self.stop_children:
self.send_signal_process(process, self.stop_signal)
else:
self.send_signal(process.pid, self.stop_signal)
self.notify_event("kill", {"process_pid": process.pid,
"time": time.time()})
process.stopping = True
waited = 0
while waited < self.graceful_timeout:
yield tornado_sleep(1)
waited += 1
if not process.is_alive():
break
if waited >= self.graceful_timeout:
# We are not smart anymore
self.send_signal_process(process, signal.SIGKILL)
self._process_remove_redirections(process)
process.stopping = False
process.stop()
raise gen.Return(True)
@gen.coroutine
@util.debuglog
def kill_processes(self):
"""Kill all processes (stop_signal, graceful_timeout then SIGKILL)
"""
active_processes = self.get_active_processes()
try:
yield [self.kill_process(process) for process in active_processes]
except OSError as e:
if e.errno != errno.ESRCH:
raise
@util.debuglog
def send_signal(self, pid, signum):
if pid in self.processes:
process = self.processes[pid]
hook_result = self.call_hook("before_signal",
pid=pid, signum=signum)
if signum != signal.SIGKILL and not hook_result:
logger.debug("before_signal hook didn't return True "
"=> signal %i is not sent to %i" % (signum, pid))
else:
process.send_signal(signum)
self.call_hook("after_signal", pid=pid, signum=signum)
else:
logger.debug('process %s does not exist' % pid)
@util.debuglog
def send_signal_child(self, pid, child_id, signum):
"""Send signal to a child.
"""
process = self.processes[pid]
try:
process.send_signal_child(int(child_id), signum)
except OSError as e:
if e.errno != errno.ESRCH:
raise
@util.debuglog
def send_signal_children(self, pid, signum):
"""Send signal to all children.
"""
process = self.processes[int(pid)]
process.send_signal_children(signum)
@util.debuglog
def status(self):
return self._status
@util.debuglog
def process_info(self, pid):
process = self.processes[int(pid)]
return process.info()
@util.debuglog
def info(self):
return dict([(proc.pid, proc.info())
for proc in self.processes.values()])
@util.synchronized("watcher_stop")
@gen.coroutine
def stop(self):
yield self._stop()
@util.debuglog
@gen.coroutine
def _stop(self):
if self.is_stopped():
return
self._status = "stopping"
logger.debug('stopping the %s watcher' % self.name)
logger.debug('gracefully stopping processes [%s] for %ss' % (
self.name, self.graceful_timeout))
# We ignore the hook result
self.call_hook('before_stop')
yield self.kill_processes()
# stop redirectors
if self.stdout_redirector is not None:
self.stdout_redirector.stop()
self.stdout_redirector = None
if self.stderr_redirector is not None:
self.stderr_redirector.stop()
self.stderr_redirector = None
# notify about the stop
if self.evpub_socket is not None:
self.notify_event("stop", {"time": time.time()})
self._status = "stopped"
# We ignore the hook result
self.call_hook('after_stop')
logger.info('%s stopped', self.name)
def get_active_processes(self):
"""return a list of pids of active processes (not already stopped)"""
return [p for p in self.processes.values()
if p.status not in (DEAD_OR_ZOMBIE, UNEXISTING)]
def get_active_pids(self):
"""return a list of pids of active processes (not already stopped)"""
return [p.pid for p in self.processes.values()
if p.status not in (DEAD_OR_ZOMBIE, UNEXISTING)]
@property
def pids(self):
"""Returns a list of PIDs"""
return [process.pid for process in self.processes]
@property
def _nextwid(self):
used_wids = sorted([p.wid for p in self.processes.values()])
all_wids = range(1, self.numprocesses + 1)
for slot, wid in izip_longest(all_wids, used_wids, fillvalue=None):
if slot is None:
# should never happen
raise RuntimeError("Process count > numproceses")
elif wid is None:
return slot
def call_hook(self, hook_name, **kwargs):
"""Call a hook function"""
hook_kwargs = {'watcher': self, 'arbiter': self.arbiter,
'hook_name': hook_name}
hook_kwargs.update(kwargs)
if hook_name in self.hooks:
try:
result = self.hooks[hook_name](**hook_kwargs)
self.notify_event("hook_success",
{"name": hook_name, "time": time.time()})
except Exception as error:
logger.exception('Hook %r | |
Thrust = segment.conditions.frames.body.thrust_force_vector[:,0]
axes = fig.add_subplot(3,1,1)
axes.plot( time , CLift , 'bo-' )
axes.set_xlabel('Time (min)')
axes.set_ylabel('CL')
axes.get_yaxis().get_major_formatter().set_scientific(False)
axes.get_yaxis().get_major_formatter().set_useOffset(False)
axes.grid(True, which = 'both')
axes = fig.add_subplot(3,1,2)
axes.plot( time , CDrag , 'bo-' )
axes.set_xlabel('Time (min)')
axes.set_ylabel('CD')
axes.get_yaxis().get_major_formatter().set_scientific(False)
axes.get_yaxis().get_major_formatter().set_useOffset(False)
axes.grid(True, which = 'both')
axes = fig.add_subplot(3,1,3)
axes.plot( time , Drag , 'bo-' )
axes.plot( time , Thrust , 'ro-' )
axes.set_xlabel('Time (min)')
axes.set_ylabel('Drag and Thrust (N)')
axes.get_yaxis().get_major_formatter().set_scientific(False)
axes.get_yaxis().get_major_formatter().set_useOffset(False)
axes.grid(True, which = 'both')
# ------------------------------------------------------------------
# Aerodynamic Efficiency
# ------------------------------------------------------------------
fig = plt.figure("LD")
axes = plt.gca()
for i, segment in enumerate(results.segments.values()):
time = segment.conditions.frames.inertial.time[:,0] / Units.min
CLift = segment.conditions.aerodynamics.lift_coefficient[:,0]
CDrag = segment.conditions.aerodynamics.drag_coefficient[:,0]
LD = CLift/CDrag
axes.plot(time, LD, 'o-', color = colors[0], linewidth = thickness)
plot_format(fig, axes, xlabel = 'Time [mins]', xlabel_size = xlabel_size,\
ylabel = 'L/D [-]', ylabel_size = ylabel_size,\
title = 'Aerodynamic Efficiency', title_size = title_size,\
tick_size = tick_size, tick_rotation = tick_rotation,\
grid = grid, minor_ticks = minor_ticks)
# ------------------------------------------------------------------
# Aerodynamics 2
# ------------------------------------------------------------------
fig = plt.figure("Drag Components")
axes = plt.gca()
for i, segment in enumerate(results.segments.values()):
time = segment.conditions.frames.inertial.time[:,0] / Units.min
drag_breakdown = segment.conditions.aerodynamics.drag_breakdown
cdp = drag_breakdown.parasite.total[:,0]
cdi = drag_breakdown.induced.total[:,0]
cd = drag_breakdown.total[:,0]
axes.plot( time , cdp , 'ko-', label='CD_P' )
axes.plot( time , cdi , 'bo-', label='CD_I' )
axes.plot( time , cd , 'ro-', label='CD' )
if i == 0:
axes.legend(loc='upper center')
axes.set_xlabel('Time (min)')
axes.set_ylabel('CD')
axes.grid(True)
# ------------------------------------------------------------------
# Mass
# ------------------------------------------------------------------
plt.figure("Mass")
axes = plt.gca()
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
energy = results.segments[i].conditions.weights.total_mass[:,0] / Units['kg']
axes.plot(time, energy, 'o-', color = colors[0], linewidth = thickness)
plot_format(fig, axes, xlabel = 'Time [mins]', xlabel_size = xlabel_size,\
ylabel = 'Mass [kg]', ylabel_size = ylabel_size,\
title = 'Aircraft Mass', title_size = title_size,\
tick_size = tick_size, tick_rotation = tick_rotation,\
grid = grid, minor_ticks = minor_ticks)
# ------------------------------------------------------------------
# Throttle
# ------------------------------------------------------------------
fig = plt.figure("Throttle History")
axes = fig.gca()
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
eta = results.segments[i].conditions.propulsion.throttle[:,0]
axes.plot(time, eta, 'o-', color = colors[0], linewidth = thickness)
axes.set_xlabel('Time (mins)')
axes.set_ylabel('Throttle')
axes.set_title('Power Throttle', fontsize = 16)
axes.get_yaxis().get_major_formatter().set_scientific(False)
axes.get_yaxis().get_major_formatter().set_useOffset(False)
plt.ylim((0,1.05))
plot_format(fig, axes, xlabel = 'Time [mins]', xlabel_size = xlabel_size,\
ylabel = 'Throttle [-]', ylabel_size = ylabel_size,\
title = 'Power Throttle', title_size = title_size,\
tick_size = tick_size, tick_rotation = tick_rotation,\
grid = grid, minor_ticks = minor_ticks)
# ------------------------------------------------------------------
# SFC
# ------------------------------------------------------------------
fig = plt.figure("SFC History")
axes = fig.gca()
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
sfc = results.segments[i].conditions.propulsion.sfc[:,0]
axes.plot(time, sfc/Units['lb/hp/h'], 'o-', color = colors[0], linewidth = thickness)
axes.get_yaxis().get_major_formatter().set_scientific(False)
axes.get_yaxis().get_major_formatter().set_useOffset(False)
# plt.ylim((0,1.05))
plot_format(fig, axes, xlabel = 'Time [mins]', xlabel_size = xlabel_size,\
ylabel = 'SFC [lb/hp/h]', ylabel_size = ylabel_size,\
title = 'Specific Power Consumption', title_size = title_size,\
tick_size = tick_size, tick_rotation = tick_rotation,\
grid = grid, minor_ticks = minor_ticks)
# ------------------------------------------------------------------
# Fuel FLow
# ------------------------------------------------------------------
fig = plt.figure("FF History")
axes = fig.gca()
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
ff = results.segments[i].conditions.propulsion.fuel_flow[:,0]
axes.plot(time, ff/Units['lb/h'], 'o-', color = colors[0], linewidth = thickness)
axes.get_yaxis().get_major_formatter().set_scientific(False)
axes.get_yaxis().get_major_formatter().set_useOffset(False)
# plt.ylim((0,1.05))
plot_format(fig, axes, xlabel = 'Time [mins]', xlabel_size = xlabel_size,\
ylabel = 'Fuel FLow [lb/h]', ylabel_size = ylabel_size,\
title = 'Fuel Flow', title_size = title_size,\
tick_size = tick_size, tick_rotation = tick_rotation,\
grid = grid, minor_ticks = minor_ticks)
# ------------------------------------------------------------------
# Propeller RPM
# ------------------------------------------------------------------
# plt.figure("Propeller Data")
# axes = plt.gca()
# for i in range(len(results.segments)):
# time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
# rpm = results.segments[i].conditions.propulsion.propeller_omega[:,0] / Units.rpm
# axes.plot(time, rpm, 'o-', color = colors[0], linewidth = thickness)
# axes.tick_params(axis='y', labelcolor=colors[0])
# axes.set_xlabel('Time [mins]', fontsize = xlabel_size)
# axes.set_ylabel('RPM', color = colors[0], fontsize = ylabel_size)
# axes.tick_params(axis='y', labelcolor=colors[0], size = tick_size)
# axes.set_title('Propeller Data', fontsize = title_size)
# axes.get_yaxis().get_major_formatter().set_scientific(False)
# axes.get_yaxis().get_major_formatter().set_useOffset(False)
# plt.minorticks_on()
# axes.grid(True, which = 'both')
#
# axes2 = axes.twinx()
# for i in range(len(results.segments)):
# time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
# Q = results.segments[i].conditions.propulsion.propeller_torque[:,0] / Units['N*m']
# axes2.plot(time, Q, 'o-', color = colors[1], linewidth = thickness)
# axes2.set_ylabel('Torque [Nm]', color = colors[1], fontsize = ylabel_size)
# axes2.tick_params(axis='y', labelcolor= colors[1], size = tick_size)
# ------------------------------------------------------------------
# Mission Profile
# ------------------------------------------------------------------
fig = plt.figure("Mission Profile")
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
speed = segment.conditions.freestream.velocity[:,0] / Units['kt']
altitude = segment.conditions.freestream.altitude[:,0] / Units.ft
axes = fig.add_subplot(2,1,1)
axes.plot( time , altitude , 'o-', color = colors[0], linewidth = thickness )
axes.get_yaxis().get_major_formatter().set_scientific(False)
axes.get_yaxis().get_major_formatter().set_useOffset(False)
plot_format(fig, axes, xlabel = '', xlabel_size = xlabel_size,\
ylabel = 'Altitude [ft]', ylabel_size = ylabel_size,\
title = '', title_size = title_size,\
tick_size = tick_size, tick_rotation = tick_rotation,\
grid = grid, minor_ticks = minor_ticks)
# axes.set_ylim(0,300)
# axes.set_ylim(0,5000)
axes = fig.add_subplot(2,1,2)
axes.plot( time , speed, 'o-', color = colors[0], linewidth = thickness )
axes.get_yaxis().get_major_formatter().set_scientific(False)
axes.get_yaxis().get_major_formatter().set_useOffset(False)
plot_format(fig, axes, xlabel = 'Time [mins]', xlabel_size = xlabel_size,\
ylabel = 'Speed [KTAS]', ylabel_size = ylabel_size,\
title = '', title_size = title_size,\
tick_size = tick_size, tick_rotation = tick_rotation,\
grid = grid, minor_ticks = minor_ticks)
# axes.set_ylim(0,220)
fig.suptitle('Mission Profile', fontsize = title_size)
return
def polar(Cl, a, b):
return a*Cl**2 + b
if __name__ == '__main__':
# Define plot formatting
title_size = 20
xlabel_size = 16
ylabel_size = 16
tick_size = 14
tick_rotation = 90
minor_ticks = True
grid = True
# colors = ['mediumblue','darkolivegreen','sienna','purple','orange','crimson']
colors = ['royalblue','indianred','mediumorchid','yellowgreen','orange','orangered']
# colors = ['b','r','m','g','y','k']
# colors = ['deepskyblue','orangered','deeppink','limegreen','coral','darkgray']
thickness = 3
# Run the analysis
breakdown, results, vehicle = main()
# Find Average Propeller Efficiency
segment_time = np.array(range(len(results.segments.values())), float)
segment_etap = np.array(range(len(results.segments.values())), float)
for i in range(len(results.segments.values())):
time = results.segments[i].conditions.frames.inertial.time[:,0]
etap = results.segments[i].conditions.propulsion.etap[:,0]
segment_time[i] = time[-1] - time[0]
segment_etap[i] = np.average(etap)
avgetap = np.average(segment_etap, weights = segment_time)
# DRAG POLAR AND DRAG BREAKDOWN
Cl = np.empty(0)
Cd = np.empty(0)
Cdi = np.empty(0)
Cdp = np.empty(0)
Cd_wing = np.empty(0)
Cd_flap = np.empty(0)
Cd_hstab = np.empty(0)
Cd_vstab = np.empty(0)
Cd_fus = np.empty(0)
Cd_mlg = np.empty(0)
Cd_nlg = np.empty(0)
Cd_exc = np.empty(0)
Cd_para = np.empty(0)
# Put every drag component into a single array for all segments
for i in range(len(results.segments.values())):
coeffs = results.segments[i].conditions.aerodynamics
Cl = np.append(Cl,coeffs.lift_coefficient)
Cdi = np.append(Cdi,coeffs.drag_breakdown.induced.total)
Cdp = np.append(Cdp,coeffs.drag_breakdown.parasite.total)
Cd = np.append(Cd,coeffs.drag_breakdown.total)
Cd_wing = np.append(Cd_wing, coeffs.drag_breakdown.parasite.main_wing.parasite_drag_coefficient)
Cd_flap = np.append(Cd_flap, coeffs.drag_breakdown.parasite.main_wing.flap_drag)
Cd_hstab = np.append(Cd_hstab, coeffs.drag_breakdown.parasite.horizontal_stabilizer.parasite_drag_coefficient)
Cd_vstab= np.append(Cd_vstab, coeffs.drag_breakdown.parasite.vertical_stabilizer.parasite_drag_coefficient)
Cd_fus = np.append(Cd_fus, coeffs.drag_breakdown.parasite.fuselage.parasite_drag_coefficient)
Cd_mlg = np.append(Cd_mlg, coeffs.drag_breakdown.parasite.main_ldg.parasite_drag_coefficient)
Cd_nlg = np.append(Cd_nlg, coeffs.drag_breakdown.parasite.nose_ldg.parasite_drag_coefficient)
Cd_exc = np.append(Cd_exc, coeffs.drag_breakdown.miscellaneous.total)
Cd_para = np.append(Cd_para, coeffs.drag_breakdown.parasite.total)
# Get the average for each component
Cd_wing = np.mean(Cd_wing)
Cd_flap = np.mean(Cd_flap)
Cd_hstab = np.mean(Cd_hstab)
Cd_vstab = np.mean(Cd_vstab)
Cd_fus = np.mean(Cd_fus)
Cd_mlg = np.mean(Cd_mlg)
Cd_nlg = np.mean(Cd_nlg)
Cd_exc = np.mean(Cd_exc)
Cd_para = np.mean(Cd_para)
# get bkd for component ref area
parasite_bkd = results.segments[0].conditions.aerodynamics.drag_breakdown.parasite
induced_bkd = results.segments[0].conditions.aerodynamics.drag_breakdown.induced
## Cruise Induced Drag
try:
Cdi_cz = np.mean(results.segments['cruise1'].conditions.aerodynamics.drag_breakdown.induced.total)
Cdp_cz = np.mean(results.segments['cruise1'].conditions.aerodynamics.drag_breakdown.parasite.total)
except:
Cdi_cz = 0.
print "Cruise Induced Drag not calculated"
# Fit for a 2 term polar
coeffs, dummy = curve_fit(polar,Cl,Cd)
polar_a = coeffs[0]
polar_b = coeffs[1]
orig_cd0 = 0.03689
orig_k = 0.04606
sorted_Cl = np.sort(Cl)
orig_polar = orig_cd0 + orig_k*sorted_Cl**2
fig = plt.figure('Drag Polar2')
axes = fig.gca()
# axes.plot(Cl,Cdi, ls = '', marker = 'o', color = 'g', label = r'$C_{D_i}$')
# axes.plot(Cl,Cdp, ls = '', marker = 'o', color = 'b', label = r'$C_{D_p}$')
# axes.plot(Cl,Cd, ls = '', marker = 'o', color = 'k', label = r'$C_{D}$')
axes.plot(Cl,Cdi, ls = '', marker = 'o', color = 'g', label = 'Induced Drag Coeff.')
axes.plot(Cl,Cdp, ls = '', marker = 'o', color = 'b', label = 'Parasite Drag Coeff.')
axes.plot(Cl,Cd, ls = '', marker = 'o', color = 'k', label = 'Total Drag Coeff.')
axes.plot(sorted_Cl, orig_polar, ls = '-', marker = '', color = 'r', label = 'Original Polar')
plot_format(fig, axes, xlabel = r'$C_L$', ylabel = r'$C_D$', title = 'Induced and Parasite Drag Polars')
axes.legend(fontsize = 'large')
##FUEL COMSUMPTION
try:
takeoff_fuel = results.segments.takeoff1.conditions.weights.total_mass[0,0] - results.segments.takeoff3.conditions.weights.total_mass[-1,0]
except:
print 'Takeoff Fuel not calculated'
takeoff_fuel = 0.
try:
climb_fuel = results.segments.climb1.conditions.weights.total_mass[0,0] - results.segments.climb5.conditions.weights.total_mass[-1,0]
except:
print 'Climb Fuel not calculated'
climb_fuel = 0.
try:
cruise_ff = np.mean(results.segments.cruise1.conditions.propulsion.fuel_flow)
cruise_fuel | |
<filename>pygbrowse/datasources.py<gh_stars>10-100
import os
import numpy
import pandas
import pysam
from scipy.signal import convolve
from . import utilities
from .utilities import log_print
DEFAULT_TAG_COUNT_NORMALIZATION_TARGET = 10000000
DEFAULT_FEATURE_SOURCES = ('ensembl', 'havana', 'ensembl_havana')
DEFAULT_GENE_TYPES = (
'gene', 'RNA', 'mt_gene', 'lincRNA_gene', 'miRNA_gene', 'ncRNA_gene', 'rRNA_gene', 'snRNA_gene', 'snoRNA_gene',
'processed_transcript')
DEFAULT_TRANSCRIPT_TYPES = ('mRNA', 'transcript', 'lincRNA', 'lnc_RNA', 'miRNA', 'ncRNA', 'snRNA', 'snoRNA')
DEFAULT_COMPONENT_TYPES = ('CDS', 'three_prime_UTR', 'five_prime_UTR')
# DEFAULT_MAXIMUM_TRANSCRIPT_SUPPORT = 5
# ToDo: For each class, allow option of loading into memory or leaving on disk (where applicable)
# ToDo: Add indexing of on-disk csv-like files
# ToDo: Refactor vector data sources to transparently interpolate sparse vectors. Probably will have to drop dict-of-series interface.
class _ChromWrapper:
def __init__(self, chrom, parent_data_source):
self.chrom = chrom
self.parent_data_source = parent_data_source
def __getitem__(self, key):
# print(key)
# ToDo: Add support for step argument
try: # See if key is a slice
query_start = key.start
query_end = key.stop
except TypeError: # if not, treat as a scalar index
query_start = key
query_end = key + 1
return self.parent_data_source.query(query_chrom=self.chrom, query_start=query_start, query_end=query_end)
class _DataVector:
def __init__(self, chrom, parent_data_source):
self.loc = _ChromWrapper(chrom=chrom, parent_data_source=parent_data_source)
class _VectorDataSource:
# ToDo: Add methods for arithmetic and such, as done for old Pileups class
def __init__(self, transform=None, smoothing_bandwidth=0):
self.transform = transform
if smoothing_bandwidth:
self.convolution_kernel = utilities.gaussian_kernel(smoothing_bandwidth)
else:
self.convolution_kernel = None
def _query(self, query_chrom, query_start, query_end):
print('Stub method -- must be overridden by inheritors')
def query(self, query_chrom, query_start, query_end):
query_result = self._query(query_chrom=query_chrom, query_start=query_start, query_end=query_end)
if self.convolution_kernel is not None:
query_result = pandas.Series(convolve(query_result, self.convolution_kernel, mode='same'), index=query_result.index)
if self.transform:
query_result = self.transform(query_result)
return query_result
def __getitem__(self, key):
return _DataVector(chrom=key, parent_data_source=self)
class SparseVectors(_VectorDataSource):
def __init__(self, series_dict, transform=None, convolution_kernel=None):
self.data = series_dict
self.transform = transform
self.convolution_kernel = convolution_kernel
def _query(self, query_chrom, query_start, query_end):
this_chrom_vector = self.data[query_chrom]
start_ipos = numpy.searchsorted(this_chrom_vector.keys(), query_start) - 1
end_ipos = numpy.searchsorted(this_chrom_vector.keys(), query_end) + 1
return this_chrom_vector.iloc[start_ipos:end_ipos]
class TagDirectory(_VectorDataSource):
tag_strand_translator = {0: '+', 1: '-'}
def __init__(self, tag_directory_path, normalize_to=DEFAULT_TAG_COUNT_NORMALIZATION_TARGET, transform=None,
smoothing_bandwidth=0):
super(TagDirectory, self).__init__(transform=transform, smoothing_bandwidth=smoothing_bandwidth)
self.tag_directory_path = tag_directory_path
if normalize_to:
# extract total tag count from tagInfo.txt
tag_info_fname = os.path.join(tag_directory_path, 'tagInfo.txt')
with open(tag_info_fname, 'rt') as tag_info_file:
sizeline = tag_info_file.readlines()[1].strip().split('\t')
num_tags = int(float(sizeline[2]))
self.normalization_factor = normalize_to / num_tags
def _query(self, query_chrom, query_start, query_end, read_handling='reads'):
# ToDo: Add argument validation to all functions and methods with string parameters
# ToDo: Add verbosity-based logging output
# ToDo; Compare performance with memory-mapped pandas DataFrames
query_result = pandas.Series(numpy.zeros(query_end - query_start), index=numpy.arange(query_start, query_end))
tag_filename = os.path.join(self.tag_directory_path, '{}.tags.tsv'.format(query_chrom))
start_offset = utilities.binary_search_tag_file(tag_filename=tag_filename, search_target=query_start + 1)
done = False
with open(tag_filename, 'rt') as tag_file:
tag_file.seek(start_offset)
# print(start_offset)
while not done:
line_fields = tag_file.readline().strip().split('\t')
# print(line_fields)
if len(line_fields) > 1:
# chrom = line_fields[0]
read_start = int(line_fields[1]) - 1
# strand = self.tag_strand_translator[int(line_fields[2])]
depth = float(line_fields[3])
if read_handling == 'starts':
assert read_start > query_start
if read_start < query_end:
query_result.loc[read_start] += depth
else:
done = True
elif read_handling == 'reads':
# ToDo: Hard to do this in a streaming fashion because we don't know how far upstream to seek to capture left-overhanging reads.
read_len = int(line_fields[4])
if query_start < read_start <= query_end or query_start < read_start + read_len <= query_end:
# print(max(read_start, query_start), min(read_start + read_len,
# query_end))
query_result.loc[max(read_start, query_start):min(read_start + read_len,
query_end)] += depth # trim to visible vector
else:
done = True
query_result *= self.normalization_factor
return query_result
class IntervalData:
# HOMER_PEAKFILE_HEADER_ROW = 39
# HOMER_PEAKFILE_COLUMN_RENAMER = {'chr': 'chrom', 'start': 'chromStart', 'end': 'chromEnd'}
HOMER_PEAKFILE_NAMES = ('chrom', 'chromStart', 'chromEnd', 'strand', 'normed_tag_count')
HOMER_ANNOTATEDPEAKS_COLUMN_RENAMER = {'Chr': 'chrom', 'Start': 'chromStart', 'End': 'chromEnd', 'Strand': 'strand'}
def __init__(self, interval_data, format='bed'):
"""
Loads genomic interval information in various formats and stores them in a standardized form as a
pandas.DataFrame in self.data.
:param:`interval_data` should be a pandas.DataFrame representing BED-formatted genomic data, or,
alternatively, a filename pointing to one of the following file formats:
* A BED file
* A HOMER peak file
* A HOMER annotated peak file.
If a filename is passed instead of a DataFrame, :param:`format` should be specified. Allowed values are:
'bed', 'homer', 'homer_annotated'
:param interval_data:
:param format:
"""
try:
_ = interval_data.loc[:, ['chrom', 'chromStart', 'chromEnd', 'strand']]
except KeyError: # maybe it's a BED DataFrame without column names?
log_print('Guessing this is a BED-style DataFrame without column names')
assert interval_data.shape[1] >= 3, 'Not enough columns (got {})!'.format(interval_data.shape[1])
if interval_data.shape[1] >= 6: # assume name is still separate column
self.data = interval_data.copy()
self.data.columns = ['chrom', 'chromStart', 'chromEnd', 'name',
'score', 'strand'] + list(self.data.columns)[6:]
self.data.index = self.data['name']
elif interval_data.shape[1] == 5: # assume name has been made the index and deleted from the columns
self.data = interval_data.copy()
self.data.columns = ['chrom', 'chromStart', 'chromEnd', 'score',
'strand']
else:
self.data = interval_data.copy()
self.data.columns = ['chrom', 'chromStart', 'chromEnd', 'score',
'strand'][:interval_data.shape[1] - 5]
self.data.index.name = 'IntervalID'
except (AttributeError,): # guessing it's a filename string
log_print('Guessing {} is a filename'.format(interval_data))
# if format == 'auto':
# extension = filename.split('.')[-1]
# if extension.lower() == 'bed':
# format = 'bed'
# elif extension.lower() == 'homer':
# # ToDo: Add more sophisticated methods of detecting formats since, e.g. .txt can refer to many.
# format = 'homer'
if format == 'bed':
self.data = pandas.read_csv(interval_data, sep='\t', index_col=3, comment='#', header=None,
names=['chrom', 'chromStart', 'chromEnd', 'score', 'strand'])
elif format == 'homer':
self.data = pandas.read_csv(interval_data, sep='\t', index_col=0, comment='#', header=None)
self.data.columns = list(self.HOMER_PEAKFILE_NAMES) + list(self.data.columns)[len(self.HOMER_PEAKFILE_NAMES):]
self.data.index.name = 'peak_id'
# self.data = self.data.rename(columns=self.HOMER_PEAKFILE_COLUMN_RENAMER)
elif format == 'homer_annotated':
self.data = pandas.read_csv(interval_data, index_col=0, sep='\t')
self.data.index.name = self.data.index.name.split(' ')[0]
self.data = self.data.rename(columns=self.HOMER_ANNOTATEDPEAKS_COLUMN_RENAMER)
else: # seems to be a properly-formatted DataFrame so just store it
self.data = interval_data
self.data = self.data.sort_values(['chrom', 'chromStart'])
class _GeneModels():
def __init__(self):
"""
Superclass for data sources that describe gene models (gene boundaries, transcript
boundaries, exons, introns, UTRs, etc.).
"""
pass
def _query(self, query_chromosome, query_start, query_end):
print('Must be overridden by inheritors!')
def query(self, chromosome, start, end):
return self._query(query_chromosome=chromosome, query_start=start, query_end=end)
from pygbrowse.datasources import _GeneModels
class Gff3Annotations(_GeneModels):
def __init__(self,
gff3_filename,
incoming_chromosome_name_converter=lambda x: utilities.convert_chromosome_name(x, dialect='ensembl'),
outgoing_chromosome_name_converter=lambda x: utilities.convert_chromosome_name(x, dialect='ucsc'),
feature_sources=DEFAULT_FEATURE_SOURCES,
gene_types=DEFAULT_GENE_TYPES,
transcript_types=DEFAULT_TRANSCRIPT_TYPES,
component_types=DEFAULT_COMPONENT_TYPES,
# maximum_transcript_support=DEFAULT_MAXIMUM_TRANSCRIPT_SUPPORT
):
super(Gff3Annotations, self).__init__()
self.tabix_file = pysam.TabixFile(gff3_filename)
self.incoming_chromosome_name_converter = incoming_chromosome_name_converter
self.outgoing_chromosome_name_converter = outgoing_chromosome_name_converter
self.feature_sources = feature_sources
self.gene_types = gene_types
self.transcript_types = transcript_types
self.component_types = component_types
# self.maximum_transcript_support = maximum_transcript_support
def _query(self, query_chromosome, query_start, query_end):
gene_names_to_ensembl_ids = {}
genes = {}
transcripts = {}
components = {}
component_num = 0 # serial index for components without IDs
query_rows = self.tabix_file.fetch(self.incoming_chromosome_name_converter(query_chromosome), query_start,
query_end)
for line in query_rows:
split_line = line.strip('\n').split('\t')
source, feature_type = split_line[1], split_line[2]
if source in self.feature_sources:
contig = split_line[0]
start = int(split_line[3])
end = int(split_line[4])
strand = split_line[6]
fields = dict(field_value_pair.split('=') for field_value_pair in split_line[8].split(';'))
# print(line)
if feature_type in self.gene_types:
ensembl_id = fields['ID']
gene_name = fields['Name']
# assert ensembl_id not in genes, 'Duplicate entry for gene {} on line {}'.format(ensembl_id,
# line_num)
genes[ensembl_id] = {'contig': contig,
'start': start - 1, # convert 1-based to 0-based
'end': end,
'strand': strand,
'transcripts': []}
genes[ensembl_id].update(fields)
if gene_name not in gene_names_to_ensembl_ids:
gene_names_to_ensembl_ids[gene_name] = []
gene_names_to_ensembl_ids[gene_name].append(ensembl_id)
# print('\t added gene {}'.format(ensembl_id))
elif feature_type in self.transcript_types:
# print('\ttranscript has gene parent {}. {}'.format(parent, parent in genes))
# try:
# transcript_support_level = int(fields['transcript_support_level'].split(' ')[0])
# except ValueError:
# passed_support_filter = False
# else:
# passed_support_filter = transcript_support_level < self.maximum_transcript_support
ensembl_id = fields['ID']
transcripts[ensembl_id] = {'contig': contig,
'start': start - 1, # convert 1-based to 0-based
'end': end,
'strand': strand,
'components': []}
transcripts[ensembl_id].update(fields)
# print('\t added transcript {} with parent {}'.format(ensembl_id, parent))
elif feature_type in self.component_types:
# print('\tcomponent has transcript parent {}. {}'.format(parent, parent in transcripts))
if 'exon_id' in fields:
ensembl_id = fields['exon_id']
else:
ensembl_id = str(component_num)
component_num += 1
components[ensembl_id] = {'contig': contig,
'start': start - 1, # convert 1-based to 0-based
'end': end,
'strand': strand,
'type': feature_type}
components[ensembl_id].update(fields)
for transcript_id, transcript_data in transcripts.items():
parent = transcript_data['Parent']
if parent in genes:
genes[parent]['transcripts'].append(transcript_id)
else:
print('orphan transcript {} with missing parent {}!'.format(transcript_id, parent))
for component_id, component_data in components.items():
parent = component_data['Parent']
if parent in transcripts:
transcripts[parent]['components'].append(component_id)
else:
print('orphan component {} with missing parent {}!'.format(component_id, parent))
return genes, transcripts, components, gene_names_to_ensembl_ids
class _MatrixData:
def __init__(self):
pass
def _query(self):
print('Must be overridden by inheritors')
def query(self, chrom, start, end):
return self._query(query_chrom=chrom, query_start=start, query_end=end)
class HicDataDir(_MatrixData):
def __init__(self,
fname_template='/home/dskola/projects/coupled_peaks/hic/c57_hic_corrs_{}.tsv',
binsize=10000):
self.fname_template = fname_template
self.binsize = binsize
def _query(self, | |
BackLink(_('my elections'), reverse('contest_list')),
cls='main-container'),
Div(cls='side-container'),
action_section,
sub_section,
cls='flex-container'
)
)
class CandidateDetail(Div):
def __init__(self, candidate, editable=False, **kwargs):
if editable:
kwargs['tag'] = 'a'
kwargs['href'] = reverse('contest_candidate_update', args=[candidate.id])
kwargs['style'] = 'margin-left: auto; margin-top: 12px;'
extra_style = 'align-items: baseline;'
content = []
if candidate.picture:
extra_style = ''
content.append(
Div(
Image(
loading='eager',
src=candidate.picture.url,
style='width: 100%;'
'display: block;'
),
style='width: 150px; padding: 12px;'
)
)
subcontent = Div(
H5(
candidate.name,
style='margin-top: 6px; margin-bottom: 6px; word-break: break-all;'
),
I(
candidate.subtext,
style=dict(
font_size='small',
font_weight='initial',
word_break='break-all',
)
),
style='flex: 1 1 65%; padding: 12px;'
)
if candidate.description:
description = mark_safe(escape(candidate.description).replace('\n', '<br>'))
subcontent.addchild(
Div(
description,
style='margin-top: 24px; word-break: break-all;'
)
)
content.append(subcontent)
if editable and not candidate.description:
content.append(
MDCButtonOutlined(_('edit'), False, 'edit', **kwargs)
)
elif editable:
subcontent.addchild(
MDCButtonOutlined(_('edit'), False, 'edit', **kwargs)
)
if 'style' not in kwargs:
kwargs['style'] = ''
super().__init__(
*content,
style='padding: 12px;'
'display: flex;'
'flex-flow: row wrap;'
'justify-content: center;'
+ kwargs.pop('style')
+ extra_style,
cls='candidate-detail',
)
class CandidateAccordionItem(MDCAccordionSection):
tag = 'candidate-list-item'
def __init__(self, candidate, editable=False):
super().__init__(
CandidateDetail(candidate, editable),
label=candidate.name,
)
class CandidateAccordion(MDCAccordion):
tag = 'candidate-accordion'
def __init__(self, contest, editable=False):
super().__init__(
*(
CandidateAccordionItem(candidate, editable)
for candidate
in contest.candidate_set.all()
) if contest.candidate_set.count()
else [_('No candidate yet.')]
)
class CandidateListComp(MDCList):
tag = 'candidate-list'
def __init__(self, contest, editable=False):
qs = contest.candidate_set.all()[:]
def candidates(qs):
for candidate in qs:
attrs = dict()
if editable:
attrs['tag'] = 'a'
attrs['href'] = reverse(
'contest_candidate_update',
args=[candidate.id]
)
yield (candidate, attrs)
super().__init__(
*(
MDCListItem(candidate, **attrs)
for candidate, attrs in candidates(qs)
) if qs.count()
else [_('No candidate yet.')]
)
class VoterList(Ul):
def __init__(self, contest):
emails = contest.voters_emails.split('\n')
num_emails = len(emails)
if emails[0] == '':
num_emails = 0
super().__init__(
*(
MDCListItem(voter)
for voter
in emails
) if num_emails
else _('No voter yet.'),
cls='mdc-list voters-list'
)
class ClipboardCopy(MDCTextButton):
def onclick(target):
target.previousElementSibling.select()
document.execCommand('copy')
@template('djelectionguard/candidate_list.html', Document, Card)
class CandidateList(Div):
def to_html(self, *content, view, **context):
contest = view.get_object()
self.backlink = BackLink(_('back'), reverse('contest_detail', args=[contest.id]))
return super().to_html(
H4(_('Candidates'), cls='center-text'),
CandidateAccordion(
contest,
view.request.user == contest.mediator and not contest.actual_start
)
)
@template('djelectionguard/contest_voters_detail.html', Document)
class VotersDetailCard(Div):
style = dict(cls='card')
def to_html(self, *content, view, **context):
contest = view.object
self.backlink = BackLink(_('back'), reverse('contest_detail', args=[contest.id]))
voters = contest.voter_set.select_related('user')
table_head_row = Tr(cls='mdc-data-table__header-row')
for th in ('email', 'vote email sent', 'voted', 'tally email sent'):
table_head_row.addchild(
Th(
th,
role='columnheader',
scope='col',
cls='mdc-data-table__header-cell overline',
style='' if th == 'email' else 'text-align: center;'
)
)
table_head_row.addchild(Th('OTP'))
table_content = Tbody(cls='mdc-data-table__content')
cls = 'mdc-data-table__cell'
for voter in voters:
otp_link = None
if not voter.casted:
redirect = reverse('contest_vote', args=[contest.pk])
else:
redirect = reverse('contest_detail', args=[contest.pk])
token = voter.user.token_set.filter(
redirect=redirect,
used=None,
expiry__gt=timezone.now(),
).first()
if token:
otp_link = CList(
Input(
value=token.url,
style='opacity: 0; position: absolute',
),
ClipboardCopy(_('Copy link'), icon='content_copy'),
)
else:
otp_link = MDCTextButton(
'Request OTP',
href=''.join([
reverse('otp_send'),
'?email=',
voter.user.email,
'&redirect=',
redirect,
'&next=',
view.request.path_info,
]),
tag='a',
icon='shield',
)
activated = voter.user and voter.user.is_active
open_email_sent = (
voter.open_email_sent.strftime("%d/%m/%Y %H:%M")
if voter.open_email_sent else ''
)
close_email_sent = (
voter.close_email_sent.strftime("%d/%m/%Y %H:%M")
if voter.close_email_sent else ''
)
table_content.addchild(Tr(
Td(voter.user.email, cls=cls),
Td(
open_email_sent,
cls=cls + ' center',
),
Td(CheckedIcon() if voter.casted else 'No', cls=cls + ' center'),
Td(
close_email_sent,
cls=cls + ' center',
),
Td(
otp_link,
cls=cls + ' center',
),
cls='mdc-data-table__row',
style='opacity: 0.5;' if not activated else ''
))
table = Table(
Thead(table_head_row),
table_content,
**{
'class': 'mdc-data-table__table',
'aria-label': 'Voters'
}
)
edit_btn = MDCButtonOutlined(
_('edit voters'),
False,
'edit',
tag='a',
href=reverse('contest_voters_update', args=[contest.id]))
email_btn = MDCButtonOutlined(
_('invite new voters'),
False,
'email',
tag='a',
href=reverse('email_voters', args=[contest.id]))
if contest.actual_end:
edit_btn = ''
email_btn = ''
if not voters.filter(open_email_sent=None).count():
email_btn = ''
return super().to_html(
H4(
_('%(count)s Voters', n=voters.count(), count=voters.count()),
cls='center-text'
),
Div(edit_btn, email_btn, cls='center-button'),
Div(
table,
cls='table-container'),
)
class ContestCandidateForm(Div):
def __init__(self, form):
self.form = form
self.count = 0
if form.instance and form.instance.description:
self.count = len(form.instance.description)
super().__init__(form)
def init_counter(form_id, count):
form = getElementByUuid(form_id)
counter = form.querySelector('.mdc-text-field-character-counter')
counter.innerHTML = count + '/300'
def update_counter(event):
field = event.currentTarget
current_count = field.value.length
if current_count > 300:
field.value = field.value.substr(0, 300)
current_count = 300
parent = field.parentElement.parentElement.parentElement
counter = parent.querySelector('.mdc-text-field-character-counter')
counter.innerHTML = current_count + '/300'
def py2js(self):
self.init_counter(self.id, self.count)
field = document.getElementById('id_description')
field.addEventListener('keyup', self.update_counter)
@template('djelectionguard/candidate_form.html', Document, Card)
class ContestCandidateCreateCard(Div):
def to_html(self, *content, view, form, **context):
contest = view.get_object()
editable = (view.request.user == contest.mediator
and not contest.actual_start)
self.backlink = BackLink(_('back'), reverse('contest_detail', args=[contest.id]))
form_component = ''
if editable:
form_component = Form(
ContestCandidateForm(form),
CSRFInput(view.request),
MDCButton(_('Add candidate'), icon='person_add_alt_1'),
method='POST',
cls='form')
count = contest.candidate_set.count()
return super().to_html(
H4(
_('%(count)s Candidates', n=count, count=count),
cls='center-text'
),
CandidateAccordion(contest, editable),
H5(_('Add a candidate'), cls='center-text'),
form_component,
cls='card'
)
@template('djelectionguard/candidate_update.html', Document, Card)
class ContestCandidateUpdateCard(Div):
def to_html(self, *content, view, form, **context):
candidate = view.get_object()
contest = candidate.contest
self.backlink = BackLink(
_('back'),
reverse('contest_candidate_create', args=[contest.id]))
delete_btn = MDCTextButton(
_('delete'),
'delete',
tag='a',
href=reverse('contest_candidate_delete', args=[candidate.id]))
return super().to_html(
H4(
_('Edit candidate'),
style='text-align: center;'
),
Form(
CSRFInput(view.request),
ContestCandidateForm(form),
Div(
Div(delete_btn, cls='red-button-container'),
MDCButton(_('Save'), True),
style='display: flex; justify-content: space-between'),
method='POST',
cls='form'),
cls='card'
)
@template('djelectionguard/voters_update.html', Document, Card)
class ContestVotersUpdateCard(Div):
def to_html(self, *content, view, form, **context):
contest = view.get_object()
self.backlink = BackLink(
_('back'),
reverse('contest_detail', args=[contest.id]))
voters = contest.voter_set.all()
count = voters.count()
return super().to_html(
H4(_('%(count)s Voters', n=count, count=count), style='text-align: center;'),
Div(_('The list of allowed voters with one email per line (sparated by Enter/Return ⏎)'), cls='body-2', style='margin-bottom: 24px;text-align: center;'),
Form(
CSRFInput(view.request),
form,
MDCButton(_('Save')),
method='POST',
cls='form'
),
cls='card'
)
@template('djelectionguard/guardian_form.html', Document, Card)
class GuardianVerifyCard(Div):
def to_html(self, *content, view, form, **context):
guardian = view.get_object()
contest = guardian.contest
self.backlink = BackLink(
_('back'),
reverse('contest_detail', args=[contest.id]))
self.submit_btn = MDCButton(_('confirm'), True, disabled=True)
self.submit_btn_id = self.submit_btn.id
return super().to_html(
H4(_('Confirm possession of an uncompromised private key'), cls='center-text'),
Div(_('You need to upload your private key to confirm that you posses a valid key that hasn’t been temepered with.'), cls='center-text'),
Form(
MDCFileField(
Input(id='file_input', type='file', name='pkl_file'),
label=_('Choose file')),
Span(_("Your privacy key is a file with '.pkl' extension."), cls='body-2'),
self.submit_btn,
CSRFInput(view.request),
enctype='multipart/form-data',
method='POST',
cls='form',
),
cls='card'
)
def enable_post(event):
file_input = document.querySelector('#file_input')
file_name = file_input.value
btn = getElementByUuid(file_input.submit_btn_id)
btn.disabled = file_name == ''
def py2js(self):
file_input = document.querySelector('#file_input')
file_input.submit_btn_id = self.submit_btn_id
file_input.addEventListener('change', self.enable_post)
@template('djelectionguard/contest_pubkey.html', Document, Card)
class ContestPubKeyCard(Div):
def to_html(self, *content, view, form, **context):
contest = view.get_object()
self.backlink = BackLink(
_('back'),
reverse('contest_detail', args=[contest.id]))
return super().to_html(
H4(_('Lock the ballot box'), cls='center-text'),
Div(
P(_('This will remove all guardians’ private keys from the server memory.')),
P(_('When the voting is over the ballot box can only be opened when all guardians upload their private keys.')),
P(_('This is what makes the governing of the election decentralised.'))
),
Form(
CSRFInput(view.request),
Div(
MDCButton(_('create')),
style='width: fit-content; margin: 0 auto;'
),
method='POST',
cls='form',
),
cls='card'
)
@template('email_voters', Document, Card)
class ContestEmailVoters(Div):
def to_html(self, *content, view, **context):
contest = view.get_object()
self.backlink = BackLink(
_('back'),
reverse('contest_voters_detail', args=[contest.id]))
return super().to_html(
H4(_('Send an invite to the newly added voters'), cls='center-text'),
Form(
context['form']['email_title'],
context['form']['email_message'],
CSRFInput(view.request),
MDCButton(context['form'].submit_label),
method='POST',
cls='form'
),
cls='card'
)
@template('contest_open', Document, Card)
class ContestOpenCard(Div):
def to_html(self, *content, view, **context):
contest = view.get_object()
self.backlink = BackLink(
_('back'),
reverse('contest_detail', args=[contest.id]))
return super().to_html(
H4(_('Open the election for voting'), cls='center-text'),
Div(
P(_('Once you open the election for voting you can’t make changes to it.')),
cls='center-text'
),
Form(
context['form']['email_title'],
context['form']['email_message'],
MDCMultipleChoicesCheckbox(
'send_email',
((0, B(_('Do not alert voters by email')), 'true'),),
n=1
),
CSRFInput(view.request),
MDCButton(_('open')),
method='POST',
cls='form'
),
cls='card'
)
class DialogConfirmForm(Form):
def __init__(self, *content, selections=[], max_selections=1, **attrs):
def hidden_selections():
for s in selections:
candidate = CandidateDetail(s)
candidate.style.display = 'none'
candidate.attrs['data-candidate-id'] = s.id
yield candidate
self.max_selections = max_selections
actions = MDCDialogActions(
MDCDialogCloseButtonOutlined(_('modify')),
MDCDialogAcceptButton(
_('confirm'),
addcls='mdc-button--raised black-button',
),
style={
'display': 'flex',
'justify-content': 'space-around'
}
)
self.remaining_text_start = str(_('If you want it, you have'))
self.remaining_text_end = str(_('choice left'))
self.remaining_text_end_plural = str(_('choices left'))
super().__init__(
*content,
MDCDialog(
_('Confirm your selection'),
Div(
_('Be careful, once confirmed,'
' your choice is definitive and cannot be changed'),
*hidden_selections(),
),
actions=Div(
actions,
Div(
Span(id='remaining'),
style=dict(
background='aliceblue',
text_align='center',
padding='12px',
margin='24px',
margin_top='0'
),
),
)
),
**attrs
)
def ondialogclosed(event):
candidates = event.currentTarget.querySelectorAll('[data-candidate-id]')
for candidate in candidates:
candidate.style.display = 'none'
def ondialogclosing(event):
if event.detail.action == 'accept':
form.submit()
def handle_submit(event):
event.preventDefault()
this.dialog = this.querySelector('mdc-dialog')
selections = new.FormData(this).getAll('selections')
for selection in selections:
candidate = this.dialog.querySelector(
'[data-candidate-id="' + selection + '"]'
)
candidate.style.display = 'flex'
remaining = this.max_selections - len(selections)
self.update_remaining(this, remaining)
this.dialog.onclosing = self.ondialogclosing
this.dialog.onclosed = self.ondialogclosed
this.dialog.open()
def update_remaining(form, remaining):
elem = document.querySelector('#remaining')
remaining_text = (
form.remaining_text_start + ' ' + remaining + ' '
)
if remaining > 1:
remaining_text += form.remaining_text_end_plural
else:
remaining_text += form.remaining_text_end
if remaining == 0:
elem.parentElement.style.display | |
<filename>banner_ops.py
import os
import json
import copy
import decimal
decimal.getcontext()
Dec = decimal.Decimal
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
return {'__Decimal__' : str(o)}
return json.JSONEncoder.default(self, o)
def as_decimal(dct):
"""Decodes the Decimal datatype."""
if '__Decimal__' in dct:
return decimal.Decimal(dct['__Decimal__'])
return dct
def delete(banner_name):
"""Deletes a banner."""
path = os.getcwd() + '\\banner_storage' + '\\' + banner_name
try:
os.remove(path)
except FileNotFoundError:
print('That banner does not exist.')
def checkquit(s):
"""Checks for an exit input."""
if s == 'exit':
exit()
def checkname(charname):
"""Corrects names to stored formats."""
if charname == "Poli'ahu":
return 'Poliʻahu'
if charname == 'Juggernaut':
return 'Juggernaut (Dragon)'
return charname
class Banner:
"""Defines and saves a banner that you can pull on.
Pulls information from pools.json and accepts user input to
create and store a banner for later calculations. There are
a few known issues, such as the fact that you can add many
pools twice with the inclusion of 'All'. This is intended
to be fixed in a later update.
Attributes
----------
available : {dict}
Nested dictionary with four key values, denoting:
{class : {pool : {rarity : {information}}}}
class : str
'adventurer' or 'dragon'
pool : str
'All', 'Collab', 'Dragonyule', 'Gala',
'Halloween', 'Permanent', 'Seasonal',
"Valentine's", or 'Zodiac'
rarity : str
'3', '4', or '5'
information : str
'contents' or 'size'
contents : [str]
List of unit names.
size : int
Denotes the size of the pool.
template : {dict}
Nested dictionary containing information on pool
probabilities and ratios. See 'banner_templates.py'.
pools : {dict}
Nested dictionary that stores information on the pools of
the banner under construction. Information is similar to
that of available (see above), but the depth and order of
keys is different.
{'rarity' : {'class' : {'information'}}}
rarity : str
'3', '4', or '5'
class : str
'adventurer' or 'dragon'
information : str
'size' or 'contents'
size : int
Denotes the size of the pool.
contents : [str]
List of unit names.
banner : {dict}
The banner under construction. The first key denotes
what information needs to be retrieved, but the
dictionaries associated with those keys all have
wildly different structures, so it is difficult to
give a summary of them here. The first set of keys,
and the information the associated dictionaries
contain is as follows, though:
{'banner rates', 'focus', 'max pity', 'pool'}
banner rates :
Contains the breakdown of probabilities for
each unit designation
focus :
Contains the names and designations of each
of the focus units.
max pity :
Denotes the number of tenpulls needed to reach
maximum pity. Implicitly reveals whether the
banner is a normal banner or a gala banner.
pool :
Contains the contents and size of the pool
of available units. If a unit is not in this
pool, it cannot be pulled on this banner.
p_adds : [str]
A list of pool names denoting those which have already
been added.
Parameters
----------
template : {dict}
Nested dictionary of a particular format.
Expected to be one of the templates listed in
'banner_templates.py' though in theory you could
use an external one, as long as it fit the format.
"""
def __init__(self, template):
with open("pools.json", "r") as f:
self.available = json.load(f)
self.template = template
blank = {
'dragon' : {
'size' : 0,
'contents' : []
},
'adventurer' : {
'size' : 0,
'contents' : []
}
}
self.pools = {
'5' : copy.deepcopy(blank),
'4' : copy.deepcopy(blank),
'3' : copy.deepcopy(blank)
}
self.banner = {}
self.p_adds = []
self.input_pools()
self.input_focus()
self.set_rates()
commit = input('Proceed to save banner? [y/n]: ')
if commit.lower() in ['y', 'ye', 'yes', 'yeah']:
self.store_banner()
def add_pools(self, pool):
"""Attempts to add a pool to the current banner.
Parameters
----------
pool : str
Name of the pool attempting to be added
"""
if pool not in self.p_adds:
self.p_adds += [pool]
for rarity in ['5', '4', '3']:
for classification in ['adventurer', 'dragon']:
for category in ['size', 'contents']:
self.pools[rarity][classification][category] += self.available[classification][pool][rarity][category]
else:
print('That pool has already been added.')
def input_pools(self):
"""Prompts user input of pools to the current banner.
Prompts the user to input the pools relevant to the current
banner, and checks to see that entry is valid. Also checks
to see that the permanent pool is included, however it does
ensure that 'All' and 'Permanent' are mutually exclusive.
This can result in mis-sized pools, and is intended to be
addressed in a future update.
"""
breaker = 0
while breaker == 0:
new_pool = input("Please add a pool to the banner [Permanent, Gala, Seasonal, Dragonyule, Halloween, Valentine's, Zodiac, Collab]: ")
checkquit(new_pool)
try:
self.add_pools(new_pool)
except KeyError:
print('That was not a valid pool')
again = input('Add another pool? [y/n]: ')
checkquit(again)
if again.lower() in ['y', 'yes', 'ye', 'yeah']:
continue
else:
if 'Permanent' not in self.p_adds and 'All' not in self.p_adds:
no_perm = input('The permanent pool has not been included. Continue anyway? [y/n]: ')
checkquit(no_perm)
if no_perm.lower() not in ['y', 'yes', 'ye', 'yeah']:
continue
breaker = 1
def add_focus(self, unit, rarity, classification):
"""Attempts to add a focus unit to the current banner.
Parameters
----------
unit : str
The name of the unit attempting to be added.
rarity : str
The rarity of the unit attempting to be added.
classification : str
Denotes whether the unit is a dragon or an adventurer.
Returns
-------
bool
Indicates whether or not the attempted addition was
successful. Unintuitively, 'False' indicates success.
"""
if unit not in self.banner.keys():
self.banner[unit] = {}
self.banner[unit]['rarity'] = rarity
self.banner[unit]['classification'] = classification
return False
else:
print(f'{unit} is already on the banner.')
return True
def input_focus(self):
"""Prompts user input of focus units to the current banner.
Prompts the user for input of focus units, checks whether
or not the requested input is valid, and if so adds the
unit to the list of focuses. If a unit is not found to
exist, the user is asked if they would like to define
that unit. This feature has not currently been thoroughly
tested, so there may be some issues with detection further
down the line.
"""
breaker = 0
while breaker == 0:
new_focus = input('Please add a unit to the banner [NOTE - very picky]: ')
checkquit(new_focus)
new_focus = checkname(new_focus)
not_found = True
for rarity in ['5', '4', '3']:
for classification in ['adventurer', 'dragon']:
if new_focus in self.pools[rarity][classification]['contents']:
if self.template[rarity]['Focus']['base'] == 0:
print('This banner template does not have focus units of that rarity.')
continue
if self.template[rarity]['Focus'][classification] == 0:
print(f'This banner template does not have focus {classification}s of that rarity.')
continue
not_found = False
flag = self.add_focus(new_focus, rarity, classification)
if not flag:
self.pools[rarity][classification]['size'] -= 1
if not_found:
go_ahead = input('This unit was not found in the pool. Add them anyway? [y/n]: ')
checkquit(go_ahead)
if go_ahead.lower() in ['y', 'yes', 'ye', 'yeah']:
rare_repeat = True
class_repeat = True
while rare_repeat:
unit_rarity = input('What is the rarity of this unit? [5, 4, 3]: ')
checkquit(unit_rarity)
if unit_rarity in ['5', '4', '3']:
rare_repeat = False
else:
print('That is not a valid rarity.')
while class_repeat:
unit_classification = input('What type of unit is this? [dragon, adventurer]: ')
checkquit(unit_classification)
if unit_classification.lower() in ['dragon', 'd', 'drag']:
unit_classification = 'dragon'
class_repeat = False
elif unit_classification.lower() in ['adventurer', 'adv', 'a']:
unit_classification = 'adventurer'
class_repeat = False
else:
print('That is not a valid classification.')
self.pools[unit_rarity][unit_classification]['contents'] += [new_focus]
self.add_focus(new_focus, unit_rarity, unit_classification)
again = input('Add another unit? [y/n]: ')
checkquit(again)
if again.lower() in ['y', 'yes', 'ye', 'yeah']:
continue
else:
breaker = 1
def set_rates(self):
"""Sets the rates for the current banner."""
f_counts = {}
for rarity in ['5', '4', '3']:
f_counts[rarity] = {}
for classification in ['dragon', 'adventurer']:
f_counts[rarity][classification] = 0
for unit in self.banner.values():
if (unit['rarity'], unit['classification']) == (rarity, classification):
f_counts[rarity][classification] += 1
#note: very messy^
banner_rates = {}
for rarity in ['5', '4', '3']:
banner_rates[rarity] = {}
for unit_type in ['Focus', 'Non Focus']:
banner_rates[rarity][unit_type] = {}
for classification in ['dragon', 'adventurer']:
banner_rates[rarity][unit_type][classification] = self.rate_handler(f_counts, rarity, unit_type, classification)
self.banner_rates = banner_rates
def rate_handler(self, counts, | |
to global map
if not globalCompleteDsMap.has_key(tmpEleName):
globalCompleteDsMap[tmpEleName] = []
globalCompleteDsMap[tmpEleName].append(tmpEleLoc)
# use incomplete locations if no complete replica at online sites
if includeIncomplete or not tmpFoundFlag:
for tmpEleLoc in tmpEleLocs[0]:
# don't use TAPE
if isTapeSite(tmpEleLoc):
if not resTapeSites.has_key(tmpEleLoc):
resTapeSites[tmpEleLoc] = []
if not tmpEleName in resTapeSites[tmpEleLoc]:
resTapeSites[tmpEleLoc].append(tmpEleName)
continue
# append
if not outTmp.has_key(tmpEleLoc):
outTmp[tmpEleLoc] = [{'found':0,'useddatasets':[]}]
# increment
outTmp[tmpEleLoc][0]['found'] += 1
# append list
if not tmpEleName in outTmp[tmpEleLoc][0]['useddatasets']:
outTmp[tmpEleLoc][0]['useddatasets'].append(tmpEleName)
else:
# check completeness
tmpIncompList = []
tmpFoundFlag = False
for tmpOutKey,tmpOutVar in out.iteritems():
# don't use TAPE
if isTapeSite(tmpOutKey):
if not resTapeSites.has_key(tmpOutKey):
resTapeSites[tmpOutKey] = []
if not tmpName in resTapeSites[tmpOutKey]:
resTapeSites[tmpOutKey].append(tmpName)
continue
# protection against unchecked
tmpNfound = tmpOutVar[0]['found']
# complete or not
if isinstance(tmpNfound,types.IntType) and tmpNfound == tmpOutVar[0]['total']:
outTmp[tmpOutKey] = [{'found':1,'useddatasets':[tmpName]}]
# found online site
if isOnlineSite(tmpOutKey):
tmpFoundFlag = True
# add to global map
if not globalCompleteDsMap.has_key(tmpName):
globalCompleteDsMap[tmpName] = []
globalCompleteDsMap[tmpName].append(tmpOutKey)
else:
# keep just in case
if not tmpOutKey in tmpIncompList:
tmpIncompList.append(tmpOutKey)
# use incomplete replicas when no complete at online sites
if includeIncomplete or not tmpFoundFlag:
for tmpOutKey in tmpIncompList:
outTmp[tmpOutKey] = [{'found':1,'useddatasets':[tmpName]}]
# replace
out = outTmp
# sum
for tmpOutKey,tmpOutVar in out.iteritems():
if not allOut.has_key(tmpOutKey):
allOut[tmpOutKey] = [{'found':0,'useddatasets':[]}]
allOut[tmpOutKey][0]['found'] += tmpOutVar[0]['found']
allOut[tmpOutKey][0]['useddatasets'] += tmpOutVar[0]['useddatasets']
# replace
out = allOut
if verbose:
print out
# choose sites where most files are available
if not woFileCheck:
tmpMaxFiles = -1
for origTmpSite,origTmpInfo in out.iteritems():
# get PandaID
tmpPandaSite = convertDQ2toPandaID(origTmpSite)
# check status
if PandaSites.has_key(tmpPandaSite) and (notSiteStatusCheck or PandaSites[tmpPandaSite]['status'] == 'online'):
# don't use TAPE
if isTapeSite(origTmpSite):
if not resTapeSites.has_key(origTmpSite):
if origTmpInfo[0].has_key('useddatasets'):
resTapeSites[origTmpSite] = origTmpInfo[0]['useddatasets']
else:
resTapeSites[origTmpSite] = names
continue
# check the number of available files
if tmpMaxFiles < origTmpInfo[0]['found']:
tmpMaxFiles = origTmpInfo[0]['found']
# remove sites
for origTmpSite in out.keys():
if out[origTmpSite][0]['found'] < tmpMaxFiles:
# use sites where most files are avaialble if output container is not used
if not useOutContainer:
del out[origTmpSite]
if verbose:
print out
tmpFirstDump = True
for origTmpSite,origTmpInfo in out.iteritems():
# don't use TAPE
if isTapeSite(origTmpSite):
if not resTapeSites.has_key(origTmpSite):
resTapeSites[origTmpSite] = origTmpInfo[0]['useddatasets']
continue
# collect DQ2 IDs
if not origTmpSite in retDQ2IDs:
retDQ2IDs.append(origTmpSite)
for tmpUDS in origTmpInfo[0]['useddatasets']:
if not retDQ2IDmap.has_key(tmpUDS):
retDQ2IDmap[tmpUDS] = []
if not origTmpSite in retDQ2IDmap[tmpUDS]:
retDQ2IDmap[tmpUDS].append(origTmpSite)
# patch for SRM v2
tmpSite = convSrmV2ID(origTmpSite)
# if candidates are limited
if locCandidates != None and (not tmpSite in locCandidatesSrmV2):
continue
if verbose:
tmpLog.debug('%s : %s->%s' % (tmpName,origTmpSite,tmpSite))
# check cloud, DQ2 ID and status
tmpSiteBeforeLoop = tmpSite
for tmpID,tmpSpec in PandaSites.iteritems():
# reset
tmpSite = tmpSiteBeforeLoop
# get list of DQ2 IDs
srmv2ddmList = []
for tmpDdmID in tmpSpec['setokens'].values():
srmv2ddmList.append(convSrmV2ID(tmpDdmID))
# dump
if tmpFirstDump:
if verbose:
pass
if tmpSite in srmv2ddmList or convSrmV2ID(tmpSpec['ddm']).startswith(tmpSite) \
or (useCVMFS and tmpSpec['iscvmfs'] == True):
# overwrite tmpSite for srmv1
tmpSite = convSrmV2ID(tmpSpec['ddm'])
# exclude long,xrootd,local queues
if isExcudedSite(tmpID):
continue
if not tmpSite in retSites:
retSites.append(tmpSite)
# just collect locations when file check is disabled
if woFileCheck:
break
# append site
if tmpSpec['status'] == 'online' or notSiteStatusCheck:
# return sites in a cloud when it is specified or all sites
if tmpSpec['cloud'] == cloud or (not expCloud):
appendMap = retSiteMap
else:
appendMap = resRetSiteMap
# mapping between location and Panda siteID
if not appendMap.has_key(tmpSite):
appendMap[tmpSite] = []
if not tmpID in appendMap[tmpSite]:
appendMap[tmpSite].append(tmpID)
if origTmpInfo[0].has_key('useddatasets'):
if not tmpID in resUsedDsMap:
resUsedDsMap[tmpID] = []
resUsedDsMap[tmpID] += origTmpInfo[0]['useddatasets']
else:
# not interested in another cloud
if tmpSpec['cloud'] != cloud and expCloud:
continue
# keep bad status sites for info
if not resBadStSites.has_key(tmpSpec['status']):
resBadStSites[tmpSpec['status']] = []
if not tmpID in resBadStSites[tmpSpec['status']]:
resBadStSites[tmpSpec['status']].append(tmpID)
tmpFirstDump = False
# retrun DQ2 IDs
if getDQ2IDs:
if includeIncomplete:
return retDQ2IDmap,resUsedDsMap
return retDQ2IDs
# return list when file check is not required
if woFileCheck:
return retSites
# use reserved map when the cloud doesn't hold the dataset
if retSiteMap == {} and (not expCloud) and (not getReserved):
retSiteMap = resRetSiteMap
# reset reserved map for expCloud
if getReserved and expCloud:
resRetSiteMap = {}
# return map
if verbose:
if not getReserved:
tmpLog.debug("getLocations -> %s" % retSiteMap)
else:
tmpLog.debug("getLocations pri -> %s" % retSiteMap)
tmpLog.debug("getLocations sec -> %s" % resRetSiteMap)
# print bad status sites for info
if retSiteMap == {} and resRetSiteMap == {} and resBadStSites != {}:
msgFirstFlag = True
for tmpStatus,tmpSites in resBadStSites.iteritems():
# ignore panda secific site
if tmpStatus.startswith('panda_'):
continue
if msgFirstFlag:
tmpLog.warning("the following sites hold %s but they are not online" % name)
msgFirstFlag = False
print " status=%s : %s" % (tmpStatus,tmpSites)
if not getReserved:
return retSiteMap
elif not getTapeSites:
return retSiteMap,resRetSiteMap
elif not removeDS:
return retSiteMap,resRetSiteMap,resTapeSites
else:
return retSiteMap,resRetSiteMap,resTapeSites,resUsedDsMap
except:
print status,out
if errStr != '':
print errStr
else:
type, value, traceBack = sys.exc_info()
print "ERROR : invalid DQ2 response - %s %s" % (type,value)
sys.exit(EC_Failed)
#@ Returns number of events per file in a given dataset
#SP 2006
#
def nEvents(name, verbose=False, askServer=True, fileList = {}, scanDir = '.', askUser=True):
# @ These declarations can be moved to the configuration section at the very beginning
# Here just for code clarity
#
# Parts of the query
str1="/?dset="
str2="&get=evperfile"
# Form full query string
m_query = baseURLMON+str1+name+str2
manualEnter = True
# Send query get number of events per file
if askServer:
nEvents=urllib.urlopen(m_query).read()
if verbose:
print m_query
print nEvents
if re.search('HTML',nEvents) == None and nEvents != '-1':
manualEnter = False
else:
# use ROOT to get # of events
try:
import ROOT
rootFile = ROOT.TFile("%s/%s" % (scanDir,fileList[0]))
tree = ROOT.gDirectory.Get( 'CollectionTree' )
nEvents = tree.GetEntriesFast()
# disable
if nEvents > 0:
manualEnter = False
except:
if verbose:
type, value, traceBack = sys.exc_info()
print "ERROR : could not get nEvents with ROOT - %s %s" % (type,value)
# In case of error PANDAMON server returns full HTML page
# Normally return an integer
if manualEnter:
if askUser:
if askServer:
print "Could not get the # of events from MetaDB for %s " % name
while True:
str = raw_input("Enter the number of events per file (or set --nEventsPerFile) : ")
try:
nEvents = int(str)
break
except:
pass
else:
print "ERROR : Could not get the # of events from MetaDB for %s " % name
sys.exit(EC_Failed)
if verbose:
print "Dataset %s has %s evetns per file" % (name,nEvents)
return int(nEvents)
# get PFN from LRC
def _getPFNsLRC(lfns,dq2url,verbose):
pfnMap = {}
# instantiate curl
curl = _Curl()
curl.verbose = verbose
# get PoolFileCatalog
iLFN = 0
strLFNs = ''
url = dq2url + 'lrc/PoolFileCatalog'
firstError = True
# check if GUID lookup is supported
useGUID = True
status,out = curl.get(url,{'guids':'test'})
if status ==0 and out == 'Must GET or POST a list of LFNs!':
useGUID = False
for lfn,vals in lfns.iteritems():
iLFN += 1
# make argument
if useGUID:
strLFNs += '%s ' % vals['guid']
else:
strLFNs += '%s ' % lfn
if iLFN % 40 == 0 or iLFN == len(lfns):
# get PoolFileCatalog
strLFNs = strLFNs.rstrip()
if useGUID:
data = {'guids':strLFNs}
else:
data = {'lfns':strLFNs}
# avoid too long argument
strLFNs = ''
# execute
status,out = curl.get(url,data)
time.sleep(2)
if out.startswith('Error'):
# LFN not found
continue
if status != 0 or (not out.startswith('<?xml')):
if firstError:
print status,out
print "ERROR : LRC %s returned invalid response" % dq2url
firstError = False
continue
# parse
try:
root = xml.dom.minidom.parseString(out)
files = root.getElementsByTagName('File')
for file in files:
# get PFN and LFN nodes
physical = file.getElementsByTagName('physical')[0]
pfnNode = physical.getElementsByTagName('pfn')[0]
logical = file.getElementsByTagName('logical')[0]
lfnNode = logical.getElementsByTagName('lfn')[0]
# convert UTF8 to Raw
pfn = str(pfnNode.getAttribute('name'))
lfn = str(lfnNode.getAttribute('name'))
# remove /srm/managerv1?SFN=
pfn = re.sub('/srm/managerv1\?SFN=','',pfn)
# append
pfnMap[lfn] = pfn
except:
print status,out
type, value, traceBack = sys.exc_info()
print "ERROR : could not parse XML - %s %s" % (type, value)
sys.exit(EC_Failed)
# return
return pfnMap
# get list of missing LFNs from LRC
def getMissLFNsFromLRC(files,url,verbose=False,nFiles=0):
# get PFNs
pfnMap = _getPFNsLRC(files,url,verbose)
# check Files
missFiles | |
<gh_stars>0
# Copyright (c) 2020 Club Raiders Project
# https://github.com/HausReport/ClubRaiders
#
# SPDX-License-Identifier: BSD-3-Clause
#
# SPDX-License-Identifier: BSD-3-Clause
import datetime
import logging
import string
from typing import List
import ujson
from craid.eddb.States import States
from craid.eddb.base import GameConstants as gconst
from craid.eddb.faction.Faction import Faction
from craid.eddb.system.InhabitedSystem import InhabitedSystem
from craid.eddb.util.PassThroughDict import PassThroughDict
class FactionInstance(Faction):
# getters/setters for id & name in superclass
def __init__(self, par: Faction, _mySystem: InhabitedSystem, inf: float,
activeStates: States, recoveringStates: States, pendingStates: States):
super().__init__(par, True)
self.mySystem: InhabitedSystem = _mySystem
self.influence: float = inf
self.active_states: States = activeStates
self.recovering_states: States = recoveringStates
self.pending_states: States = pendingStates
def getSystem(self):
return self.mySystem
def getFactionID(self):
return self.get_id()
def get_happiness_id(self):
return self.happiness_id
def set_happiness_id(self, hid):
self.happiness_id = hid
def getPopulation(self):
return self.mySystem.getPopulation()
def getSystemID(self):
return self.mySystem.get_id()
def getSystemName(self):
return self.mySystem.get_name()
def getSystemNameById(self, _id):
return super().getSystemNameById(_id)
def get_government_id(self):
return self.mySystem.get_government_id()
def get_allegiance_id(self):
return self.mySystem.get_allegiance_id()
def get_security_id(self):
return self.mySystem.get_security_id()
def get_primary_economy_id(self):
return self.mySystem.get_primary_economy_id()
def get_power_state_id(self):
return self.mySystem.get_power_state_id()
# def getUpdated(self):
# return self.mySystem.getUpdated()
# <1, A single player can easily retreat
# 1-100, A single player can retreatA
# >50, Recommended for small groups
# >100, requires significant team
# excel formula = (D10/15000)*(E10^2)/ 1000
# d=pop, e = inf
def getDifficulty(self) -> float:
theMax = 999999.0
if self.mySystem.getNumberOfFactionsInSystem()<4:
return 400000.0
if not self.canRetreat():
return theMax
e10 = self.getInfluence()
if e10 == 0.0:
return theMax
e10 = max(0.0, e10 - 2.5) # only need to get them to 2.5% to trigger retreat
d10 = self.getPopulation()
ret = (d10 / 15000.0) * (e10 ** 2.0) / 1000.0
if ret < 0.0:
ret = 0.0
if ret > theMax:
ret = theMax
return round(ret, 1) # TODO: trimmed to 3 decimals since no format support
def getDifficultyString(self) -> str:
val = self.getDifficulty()
# "Forcing a retreat from this system would "
if val < .5: return "be extremely easy for one commander"
if val < 1: return "be very easy for one commander"
if val < 10: return "be easy for one commander"
if val < 25: return "be easy"
if val < 50: return "take some work for one commander"
if val < 100: return "be possible for one commander"
if val < 1000: return "require group effort"
if val < 10000: return "require a gargantuan effort"
if val < 100000: return "be well-nigh impossible"
return "seem an impossibility"
def canRetreat(self) -> bool:
if self.isHomeSystem(): return False
fn = self.get_name()
sn = self.getSystemName()
#if fn is not None and sn is not None:
#
# Hardcoded factions that can't be retreated
#
# if fn =="Aegis Research" and sn =="HIP 17044":
# return False
# if fn =="Aegis Research" and sn =="Pleiades Sector HR-W d1-57":
# return False
# FIXME: can't call getDifficulty here
# if self.getDifficulty() == 999999: return False
return True
def getUpdatedDateTime(self) -> datetime:
return self.mySystem.getUpdatedDateTime()
def getX(self):
return self.mySystem.getX()
def getY(self):
return self.mySystem.getY()
def getZ(self):
return self.mySystem.getZ()
def getInfluence(self):
return self.influence
def getPowerState(self):
return self.mySystem.getPowerState()
def getPower(self):
return self.mySystem.getPower()
def getVulnerableString(self):
assert self.active_states is not None, 'null vulnerable'
retval: str = self.active_states.getShortString()
retval2: str = self.pending_states.getShortString(pending=True)
if len(retval) > 0 and len(retval2) > 0:
retval = retval + "," + retval2
else:
retval += retval2
retval3: str = self.recovering_states.getShortString(recovering=True)
if len(retval) > 0 and len(retval3) > 0:
retval = retval + "," + retval3
else:
retval += retval3
assert retval is not None, 'null vulnerable 2'
return retval
def getUpdatedString(self):
date = self.mySystem.getUpdatedDateTime()
ds = date.strftime("%d-%b-%Y %H:%M")
return ds
def isHomeSystem(self) -> bool:
factionHomeSystemId: int = self.get_homesystem_id()
systemId = self.getSystemID()
return systemId == factionHomeSystemId
def controlsSystem(self) -> bool:
cid = self.mySystem.getControllingFactionId()
mid: int = int(self.get_id())
return cid == mid
def getControllingFactionName(self) -> str:
cn = self.mySystem.getControllingFactionName()
return cn
def template(self, msg: str) -> str:
myDict: PassThroughDict[str, str] = PassThroughDict()
myDict['home_system'] = self.get_homesystem_name()
myDict['allegiance'] = str(self.get_allegiance())
myDict['government'] = str(self.get_government())
# myDict['inara_link'] = self.getInaraFactionUrl()
myDict['faction_name'] = self.get_name2()
template = string.Template(msg)
output = template.substitute(myDict)
return output
def hasState(self, state: int):
return self.active_states.hasState(state)
# shared code for trade/exploration
def _ss(self) -> [float, List[str]]:
sco: float = 10.0
bonuses: List[str] = []
#
# Stage 1: Is the _system_ good for smuggling
#
from craid.eddb.Station import Station
sta: Station = self.mySystem.getBestTradeStation()
if sta is None:
return 0, ["no suitable station"]
if sta.isOrbital():
bonuses.append("station is orbital")
sco = sco * 2
if sta.hasLargePads():
bonuses.append("station has large pads")
sco = sco * 2
#
# Stage 2: Can the opposing faction benefit from smuggling
#
if sta.hasState(gconst.STATE_WAR) or sta.hasState(gconst.STATE_CIVIL_WAR):
return 0.0, "station's controlling faction is at war"
if sta.hasState(gconst.STATE_ELECTION):
bonuses.append("station is in an election state")
sco = sco * 2
if sta.hasState(gconst.STATE_INVESTMENT):
bonuses.append("station is in investment state")
sco = sco * 2
if sta.hasState(gconst.STATE_EXPANSION):
bonuses.append("station is in expansion state")
sco = sco * 2
#
# Stage 3: Can the club faction be damaged by bounty hunting
#
if self.hasState(gconst.STATE_LOCKDOWN):
return 0.0, "the Club faction is in lockdown"
if self.hasState(gconst.STATE_WAR) or self.hasState(gconst.STATE_CIVIL_WAR):
return 0.0, "the club faction is at war"
if self.hasState(gconst.STATE_ELECTION):
sco = sco * 2.0
bonuses.append("the Club faction being in elections")
return sco, bonuses
def salesScore(self) -> [float, str]:
sco: float
bonuses: List[str]
sco, bonuses = self._ss()
if sco <= 0.0:
my_string = ','.join(bonuses)
return sco, my_string
from craid.eddb.Station import Station
sta: Station = self.mySystem.getBestTradeStation()
if sta.hasState(gconst.STATE_FAMINE):
bonuses.append("food trade will help end the famine")
sco = sco * 2
if sta.hasState(gconst.STATE_OUTBREAK):
bonuses.append("medicine trade will help end the outbreak")
sco = sco * 2
my_string = ','.join(bonuses)
return sco, my_string
def explorationScore(self):
sco: float
bonuses: List[str]
sco, bonuses = self._ss()
# sco: float, bonuses: List[str] = self._ss()
# sco, bonuses = self._ss()
my_string = ','.join(bonuses)
return sco, my_string
# def explorationScore(self):
# return self.mySystem.explorationScore()
#
# def salesScore(self):
# return self.mySystem.salesScore()
# def bountyHuntingScore(self) -> float:
# return self.mySystem.bountyHuntingScore()
def smugglingScore(self) -> [float, str]:
score: float = 50.0
bonuses: List[str] = []
#
# Stage 1: Is the _system_ good for smuggling
#
from craid.eddb.Station import Station
sta: Station = self.mySystem.getBestSmugglingStation()
if sta is None:
return 0.0, "there is no suitable station"
#
# Stage 2: Can the opposing faction benefit from smuggling
#
# in this case, the opposer is either the system's controlling faction or the non-club faction with the highest influence
opposer = self.mySystem.getControllingFactionInstance()
if opposer is None:
return 0.0, "opposing faction is unknown"
if opposer.isClub():
opposer = self.mySystem.getHighestInfluenceNonClubFactionInstance()
if not opposer:
return 0.0, "there is no suitable opposition"
oppName = opposer.get_name2()
if opposer.hasState(gconst.STATE_WAR) or opposer.hasState(gconst.STATE_CIVIL_WAR):
return 0.0, f"the opposing faction {oppName} is at war"
if opposer.hasState(gconst.STATE_LOCKDOWN):
return 0.0, "the opposing faction is in lockdown"
if opposer.hasState(gconst.STATE_BOOM):
score = score * 2
bonuses.append(f"the opposing faction, {oppName}, being in a boom state")
if opposer.hasState(gconst.STATE_ELECTION):
score = score * 2.0
bonuses.append("the opposing faction being in elections")
#
# Stage 3: Can the club faction be damaged by smuggling
#
if self.hasState(gconst.STATE_LOCKDOWN):
return 0.0, "the Club faction is in lockdown"
if self.hasState(gconst.STATE_WAR) or self.hasState(gconst.STATE_CIVIL_WAR):
return 0.0, "the club faction is at war"
if self.hasState(gconst.STATE_ELECTION):
score = score * 2.0
bonuses.append("the Club faction being in elections")
my_string = ','.join(bonuses)
return round(score, 0), my_string
def getSystemEdbgsLink(self):
return self.mySystem.getEdbgsLink(self.get_name2())
# much taken from https://forums.frontier.co.uk/threads/dev-update-07-01-2016.221826/
def bountyHuntingScore(self) -> [float, str]:
score: float = 50.0
bonuses: List[str] = []
#
# Doing bounty hunting is to _benefit_ a non-club faction in control of a system
# and a non-club faction in control of a station. They may not be the same.
# If one of those factions is in lockdown, that part of the effect is lost.
#
#
# Stage 1: Is the _system_ good for bounty hunting
#
hasRings = self.mySystem.hasRings()
if not hasRings:
return 0.0, "the system has no ringed bodies"
bonuses.append("having ringed planets")
if self.mySystem.hasAnarchyFaction():
score = score * 1.1
bonuses.append("having a local pirate faction")
econ = self.mySystem.getPrimaryEconomy()
if econ.startswith('Extract') or econ.startswith('Refine'):
score = score * 1.1
bonuses.append("having a mining economy")
from craid.eddb.Station import Station
sta: Station = self.mySystem.getBestTradeStation()
if sta is None:
return 0.0, | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" tests for supporting multiple NIC's in advanced zone with security groups in cloudstack 192.168.3.11
"""
# Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
import unittest
from marvin.sshClient import SshClient
from marvin.lib.utils import (validateList,
cleanup_resources,
get_host_credentials,
get_process_status,
execute_command_in_host,
random_gen)
from marvin.lib.base import (PhysicalNetwork,
Account,
Host,
TrafficType,
Domain,
Network,
NetworkOffering,
VirtualMachine,
ServiceOffering,
Zone,
NIC,
SecurityGroup)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
list_virtual_machines,
list_routers,
list_hosts,
get_free_vlan)
from marvin.codes import (PASS, FAILED)
import logging
import random
import time
class TestMulipleNicSupport(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestMulipleNicSupport,
cls).getClsTestClient()
cls.apiclient = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.services = cls.testClient.getParsedTestDataConfig()
zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.zone = Zone(zone.__dict__)
cls._cleanup = []
cls.skip = False
if str(cls.zone.securitygroupsenabled) != "True":
cls.skip = True
return
cls.logger = logging.getLogger("TestMulipleNicSupport")
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
# Get Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.services['mode'] = cls.zone.networktype
cls.template = get_template(cls.apiclient, cls.zone.id, hypervisor="KVM")
if cls.template == FAILED:
cls.skip = True
return
# Create new domain, account, network and VM
cls.user_domain = Domain.create(
cls.apiclient,
services=cls.testdata["acl"]["domain2"],
parentdomainid=cls.domain.id)
# Create account
cls.account1 = Account.create(
cls.apiclient,
cls.testdata["acl"]["accountD2"],
admin=True,
domainid=cls.user_domain.id
)
# Create small service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.testdata["service_offerings"]["small"]
)
cls._cleanup.append(cls.service_offering)
cls.services["network"]["zoneid"] = cls.zone.id
cls.network_offering = NetworkOffering.create(
cls.apiclient,
cls.services["network_offering"],
)
# Enable Network offering
cls.network_offering.update(cls.apiclient, state='Enabled')
cls._cleanup.append(cls.network_offering)
cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
cls.testdata["virtual_machine"]["template"] = cls.template.id
if cls.zone.securitygroupsenabled:
# Enable networking for reaching to VM thorugh SSH
security_group = SecurityGroup.create(
cls.apiclient,
cls.testdata["security_group"],
account=cls.account1.name,
domainid=cls.account1.domainid
)
# Authorize Security group to SSH to VM
ingress_rule = security_group.authorize(
cls.apiclient,
cls.testdata["ingress_rule"],
account=cls.account1.name,
domainid=cls.account1.domainid
)
# Authorize Security group to SSH to VM
ingress_rule2 = security_group.authorize(
cls.apiclient,
cls.testdata["ingress_rule_ICMP"],
account=cls.account1.name,
domainid=cls.account1.domainid
)
cls.testdata["shared_network_offering_sg"]["specifyVlan"] = 'True'
cls.testdata["shared_network_offering_sg"]["specifyIpRanges"] = 'True'
cls.shared_network_offering = NetworkOffering.create(
cls.apiclient,
cls.testdata["shared_network_offering_sg"],
conservemode=False
)
NetworkOffering.update(
cls.shared_network_offering,
cls.apiclient,
id=cls.shared_network_offering.id,
state="enabled"
)
physical_network, vlan = get_free_vlan(cls.apiclient, cls.zone.id)
cls.testdata["shared_network_sg"]["physicalnetworkid"] = physical_network.id
random_subnet_number = random.randrange(90, 99)
cls.testdata["shared_network_sg"]["name"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["displaytext"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["vlan"] = "vlan://" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["startip"] = "192.168." + str(random_subnet_number) + ".240"
cls.testdata["shared_network_sg"]["endip"] = "192.168." + str(random_subnet_number) + ".250"
cls.testdata["shared_network_sg"]["gateway"] = "192.168." + str(random_subnet_number) + ".254"
cls.network1 = Network.create(
cls.apiclient,
cls.testdata["shared_network_sg"],
networkofferingid=cls.shared_network_offering.id,
zoneid=cls.zone.id,
accountid=cls.account1.name,
domainid=cls.account1.domainid
)
random_subnet_number = random.randrange(100, 110)
cls.testdata["shared_network_sg"]["name"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["displaytext"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["vlan"] = "vlan://" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["startip"] = "192.168." + str(random_subnet_number) + ".240"
cls.testdata["shared_network_sg"]["endip"] = "192.168." + str(random_subnet_number) + ".250"
cls.testdata["shared_network_sg"]["gateway"] = "192.168." + str(random_subnet_number) + ".254"
cls.network2 = Network.create(
cls.apiclient,
cls.testdata["shared_network_sg"],
networkofferingid=cls.shared_network_offering.id,
zoneid=cls.zone.id,
accountid=cls.account1.name,
domainid=cls.account1.domainid
)
random_subnet_number = random.randrange(111, 120)
cls.testdata["shared_network_sg"]["name"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["displaytext"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["vlan"] = "vlan://" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["startip"] = "192.168." + str(random_subnet_number) + ".240"
cls.testdata["shared_network_sg"]["endip"] = "192.168." + str(random_subnet_number) + ".250"
cls.testdata["shared_network_sg"]["gateway"] = "192.168." + str(random_subnet_number) + ".254"
cls.network3 = Network.create(
cls.apiclient,
cls.testdata["shared_network_sg"],
networkofferingid=cls.shared_network_offering.id,
zoneid=cls.zone.id,
accountid=cls.account1.name,
domainid=cls.account1.domainid
)
try:
cls.virtual_machine1 = VirtualMachine.create(
cls.apiclient,
cls.testdata["virtual_machine"],
accountid=cls.account1.name,
domainid=cls.account1.domainid,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id,
securitygroupids=[security_group.id],
networkids=cls.network1.id
)
for nic in cls.virtual_machine1.nic:
if nic.isdefault:
cls.virtual_machine1.ssh_ip = nic.ipaddress
cls.virtual_machine1.default_network_id = nic.networkid
break
except Exception as e:
cls.fail("Exception while deploying virtual machine: %s" % {e})
try:
cls.virtual_machine2 = VirtualMachine.create(
cls.apiclient,
cls.testdata["virtual_machine"],
accountid=cls.account1.name,
domainid=cls.account1.domainid,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id,
securitygroupids=[security_group.id],
networkids=[str(cls.network1.id), str(cls.network2.id)]
)
for nic in cls.virtual_machine2.nic:
if nic.isdefault:
cls.virtual_machine2.ssh_ip = nic.ipaddress
cls.virtual_machine2.default_network_id = nic.networkid
break
except Exception as e:
cls.fail("Exception while deploying virtual machine: %s" % {e})
cls._cleanup.append(cls.virtual_machine1)
cls._cleanup.append(cls.virtual_machine2)
cls._cleanup.append(cls.network1)
cls._cleanup.append(cls.network2)
cls._cleanup.append(cls.network3)
cls._cleanup.append(cls.shared_network_offering)
if cls.zone.securitygroupsenabled:
cls._cleanup.append(security_group)
cls._cleanup.append(cls.account1)
cls._cleanup.append(cls.user_domain)
@classmethod
def tearDownClass(self):
try:
cleanup_resources(self.apiclient, self._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
if self.skip:
self.skipTest("Test can be run only on advanced zone and KVM hypervisor")
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def verify_network_rules(self, vm_id):
virtual_machine = VirtualMachine.list(
self.apiclient,
id=vm_id
)
vm = virtual_machine[0]
hosts = list_hosts(
self.apiclient,
id=vm.hostid
)
host = hosts[0]
if host.hypervisor.lower() not in "kvm":
return
host.user, host.password = get_host_credentials(self.config, host.ipaddress)
for nic in vm.nic:
secips = ""
if len(nic.secondaryip) > 0:
for secip in nic.secondaryip:
secips += secip.ipaddress + ";"
command="/usr/share/cloudstack-common/scripts/vm/network/security_group.py verify_network_rules --vmname %s --vmip %s --vmmac %s --nicsecips '%s'" % (vm.instancename, nic.ipaddress, nic.macaddress, secips)
self.logger.debug("Executing command '%s' in host %s" % (command, host.ipaddress))
result=execute_command_in_host(host.ipaddress, 22,
host.user,
host.password,
command)
if len(result) > 0:
self.fail("The iptables/ebtables rules for nic %s on vm %s on host %s are not correct" %(nic.ipaddress, vm.instancename, host.name))
@attr(tags=["advancedsg"], required_hardware="false")
def test_01_create_vm_with_multiple_nics(self):
"""Create Vm with multiple NIC's
Steps:
# 1. Create more than 1 isolated or shared network
# 2. Create a vm and select more than 1 network while deploying
# 3. Vm is deployed successfully with 1 nic from each network
# 4. All the vm's should be pingable
:return:
"""
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine2.id
)
self.assertEqual(
len(virtual_machine), 1,
"Virtual Machine create with 2 NIC's failed")
nicIdInVm = virtual_machine[0].nic[0]
self.assertIsNotNone(nicIdInVm, "NIC 1 not found in Virtual Machine")
nicIdInVm = virtual_machine[0].nic[1]
self.assertIsNotNone(nicIdInVm, "NIC 2 not found in Virtual Machine")
self.verify_network_rules(self.virtual_machine2.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_02_add_nic_to_vm(self):
"""Create VM with single NIC and then add additional NIC
Steps:
# 1. Create a VM by selecting one default NIC
# 2. Create few more isolated or shared networks
# 3. Add extra NIC's to the vm from the newly created networks
# 4. The deployed VM should have extra nic's added in the above
# step without any fail
# 5. The IP's of the extra NIC's should be pingable
:return:
"""
self.virtual_machine1.add_nic(self.apiclient, self.network2.id)
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine1.id
)
nicIdInVm = virtual_machine[0].nic[1]
self.assertIsNotNone(nicIdInVm, "Second NIC not found")
self.verify_network_rules(self.virtual_machine1.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_03_add_ip_to_default_nic(self):
""" Add secondary IP's to the VM
Steps:
# 1. Create a VM with more than 1 NIC
# 2) Navigate to Instances->NIC->Edit Secondary IP's
# ->Aquire new Secondary IP"
# 3) Add as many secondary Ip as possible to the VM
# 4) Configure the secondary IP's by referring to "Configure
# the secondary IP's" in the "Action Item" section
:return:
"""
ipaddress = NIC.addIp(
self.apiclient,
id=self.virtual_machine2.nic[0].id
)
self.assertIsNotNone(
ipaddress,
"Unable to add secondary IP to the default NIC")
self.verify_network_rules(self.virtual_machine2.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_04_add_ip_to_remaining_nics(self):
""" Add secondary IP's to remaining NIC's
Steps:
# 1) Create a VM with more than 1 NIC
# 2)Navigate to Instances-NIC's->Edit Secondary IP's
# ->Acquire new Secondary IP
# 3) Add secondary IP to all the NIC's of the VM
# 4) Confiugre the secondary IP's by referring to "Configure the
# secondary IP's" in the "Action Item" section
:return:
"""
self.virtual_machine1.add_nic(self.apiclient, self.network3.id)
vms = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine1.id
)
self.assertIsNotNone(
vms[0].nic[2],
"Third NIC is not added successfully to the VM")
vms1_nic1_id = vms[0].nic[1]['id']
vms1_nic2_id = vms[0].nic[2]['id']
ipaddress21 = NIC.addIp(
self.apiclient,
id=vms1_nic1_id
)
ipaddress22 = NIC.addIp(
self.apiclient,
id=vms1_nic1_id
)
self.assertIsNotNone(
ipaddress21,
"Unable to add first secondary IP to the second nic")
self.assertIsNotNone(
ipaddress22,
"Unable to add second secondary IP to second NIC")
ipaddress31 = NIC.addIp(
self.apiclient,
id=vms1_nic2_id
)
ipaddress32 = NIC.addIp(
self.apiclient,
id=vms1_nic2_id
)
self.assertIsNotNone(
ipaddress31,
"Unable to add first secondary IP to third NIC")
self.assertIsNotNone(
ipaddress32,
"Unable to add second secondary IP to third NIC")
self.verify_network_rules(self.virtual_machine1.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_05_stop_start_vm_with_multiple_nic(self):
""" Stop and Start a VM with Multple NIC
Steps:
# 1) Create a Vm with multiple NIC's
# 2) Configure secondary IP's on the VM
# 3) Try to stop/start the VM
# 4) Ping the IP's of the vm
# 5) Remove Secondary IP from one of the NIC
:return:
"""
| |
None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'hatch' in list(opts.keys()):
hatch = opts['hatch']
if 'y_offset' in list(opts.keys()):
y_offset = opts['y_offset']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
dir_fac = 1.0
if start_bp > end_bp:
dir_fac = -1.0
# Draw the CDS symbol
p1 = Polygon([(start_bp, y_extent+y_offset),
(start_bp, -y_extent+y_offset),
(end_bp-dir_fac*scale, -y_extent+y_offset),
(end_bp-dir_fac*scale, y_extent+y_offset)],
edgecolor=(0.0,0.0,0.0), facecolor=color, linewidth=linewidth,
hatch=hatch, zorder=15+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
if opts != None and 'label' in list(opts.keys()):
if start_bp > end_bp:
write_label(ax, opts['label'], end_bp+((start_bp-end_bp)/2.0), opts=opts)
else:
write_label(ax, opts['label'], start_bp+((end_bp-start_bp)/2.0), opts=opts)
if start_bp > end_bp:
return end_bp, start_bp
else:
return start_bp, end_bp
def trace_cds (ax, type, num, start_bp, end_bp, prev_end, scale, linewidth, opts):
""" Built-in trace-based coding sequence renderer.
"""
# Default options
zorder_add = 0.0
color = (0.7,0.7,0.7)
hatch = ''
y_offset = 0.0
y_extent = 1.5
arrowhead_height = 1.0
arrowhead_length = 30.0
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'hatch' in list(opts.keys()):
hatch = opts['hatch']
if 'y_offset' in list(opts.keys()):
y_offset = opts['y_offset']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'arrowhead_height' in list(opts.keys()):
arrowhead_height = opts['arrowhead_height']
if 'arrowhead_length' in list(opts.keys()):
arrowhead_length = opts['arrowhead_length']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
dir_fac = 1.0
if start_bp > end_bp:
dir_fac = -1.0
# Draw the CDS symbol
p1 = Polygon([(start_bp, y_extent+y_offset),
(start_bp, -y_extent+y_offset),
(end_bp-dir_fac*arrowhead_length*scale, -y_extent+y_offset),
(end_bp-dir_fac*arrowhead_length*scale, -y_extent-arrowhead_height+y_offset),
(end_bp, 0+y_offset),
(end_bp-dir_fac*arrowhead_length*scale, y_extent+arrowhead_height+y_offset),
(end_bp-dir_fac*arrowhead_length*scale, y_extent+y_offset)],
edgecolor=(0.0,0.0,0.0), facecolor=color, linewidth=linewidth,
hatch=hatch, zorder=15+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
if opts != None and 'label' in list(opts.keys()):
if start_bp > end_bp:
write_label(ax, opts['label'], end_bp+((start_bp-end_bp)/2.0), opts=opts)
else:
write_label(ax, opts['label'], start_bp+((end_bp-start_bp)/2.0), opts=opts)
if start_bp > end_bp:
return end_bp, start_bp
else:
return start_bp, end_bp
def trace_terminator (ax, type, num, start_bp, end_bp, prev_end, scale, linewidth, opts):
""" Built-in trace-based terminator renderer.
"""
# Default options
zorder_add = 0.0
color = (1.0,0.0,0.0)
y_offset = 0.0
y_extent = 3.5
x_extent = 10.0
highlight_y_extent = 0.8
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'y_offset' in list(opts.keys()):
y_offset = opts['y_offset']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'highlight_y_extent' in list(opts.keys()):
highlight_y_extent = opts['highlight_y_extent']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
dir_fac = 1.0
if start_bp > end_bp:
dir_fac = -1.0
# Draw the terminator symbol
l1 = Line2D([start_bp,start_bp],[0+y_offset,dir_fac*y_extent+y_offset], linewidth=linewidth, color=color, zorder=8+zorder_add)
l2 = Line2D([start_bp-(x_extent*scale),start_bp+(x_extent*scale)],[dir_fac*y_extent+y_offset,dir_fac*y_extent+y_offset], linewidth=linewidth, color=color, zorder=14+zorder_add)
ax.add_line(l1)
ax.add_line(l2)
# Shade the terminator area (normally smaller than symbol extent)
p2 = Polygon([(start_bp, -highlight_y_extent+y_offset),
(start_bp, highlight_y_extent+y_offset),
(end_bp, highlight_y_extent+y_offset),
(end_bp, -highlight_y_extent+y_offset)], facecolor=color, edgecolor=color, linewidth=linewidth, zorder=13,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p2)
if opts != None and 'label' in list(opts.keys()):
if start_bp > end_bp:
write_label(ax, opts['label'], end_bp+((start_bp-end_bp)/2.0), opts=opts)
else:
write_label(ax, opts['label'], start_bp+((end_bp-start_bp)/2.0), opts=opts)
if start_bp > end_bp:
return end_bp, start_bp
else:
return start_bp, end_bp
###############################################################################
# The DNA renderer
###############################################################################
class DNARenderer:
""" Class defining the DNA rendering funtionality.
"""
# Standard part types
STD_PART_TYPES = ['Promoter',
'CDS',
'Terminator',
'RBS',
'Scar',
'Spacer',
'EmptySpace',
'Ribozyme',
'Ribonuclease',
'Protease',
'DNACleavageSite',
'RNACleavageSite',
'ProteinCleavageSite',
'DNALocation',
'RNALocation',
'ProteinLocation',
'DNAStability',
'RNAStability',
'ProteinStability',
'StemTop',
'Operator',
'Origin',
'Insulator',
'5Overhang',
'3Overhang',
'RestrictionSite',
'BluntRestrictionSite',
'PrimerBindingSite',
'5StickyRestrictionSite',
'3StickyRestrictionSite',
'UserDefined',
'Signature']
# Standard regulatory types
STD_REG_TYPES = ['Repression',
'Activation',
'Connection']
def __init__(self, scale=1.0, linewidth=1.0, linecolor=(0,0,0),
backbone_pad_left=0.0, backbone_pad_right=0.0):
""" Constructor to generate an empty DNARenderer.
Parameters
----------
scale : float (default=1.0)
A scaling factor for the plot. Only used if rendering traces.
linewidth : float (default=1.0)
The default linewidth for all part drawing.
backbone_pad_left : float (default=0.0)
Padding to add to the left side of the backbone.
backbone_pad_right : float (default=0.0)
Padding to add to the left side of the backbone.
"""
self.scale = scale
self.linewidth = linewidth
self.linecolor = linecolor
self.backbone_pad_left = backbone_pad_left
self.backbone_pad_right = backbone_pad_right
self.reg_height = 15
def SBOL_part_renderers (self):
""" Return dictionary of all standard built-in SBOL part renderers.
"""
return {
'Promoter' :sbol_promoter,
'CDS' :sbol_cds,
'Terminator' :sbol_terminator,
'RBS' :sbol_rbs,
'Scar' :sbol_scar,
'Spacer' :sbol_spacer,
'EmptySpace' :sbol_empty_space,
'Ribozyme' :sbol_ribozyme,
'Ribonuclease' :sbol_stem_top,
'Protease' :sbol_stem_top,
'DNACleavageSite' :sbol_stem_top,
'RNACleavageSite' :sbol_stem_top,
'ProteinCleavageSite':sbol_stem_top,
'DNALocation' :sbol_stem_top,
'RNALocation' :sbol_stem_top,
'ProteinLocation' :sbol_stem_top,
'DNAStability' :sbol_stem_top,
'RNAStability' :sbol_stem_top,
'ProteinStability' :sbol_stem_top,
'StemTop' :sbol_stem_top,
'Operator' :sbol_operator,
'Origin' :sbol_origin,
'Insulator' :sbol_insulator,
'5Overhang' :sbol_5_overhang,
'3Overhang' :sbol_3_overhang,
'RestrictionSite' :sbol_restriction_site,
'BluntRestrictionSite' :sbol_blunt_restriction_site,
'PrimerBindingSite' :sbol_primer_binding_site,
'5StickyRestrictionSite' :sbol_5_sticky_restriction_site,
'3StickyRestrictionSite' :sbol_3_sticky_restriction_site,
'UserDefined' :sbol_user_defined,
'Signature' :sbol_signature}
def trace_part_renderers (self):
""" Return dictionary of all standard built-in trace part renderers.
"""
return {
'Promoter' :trace_promoter,
'CDS' :trace_cds,
'Terminator' :trace_terminator,
'RBS' :trace_rbs,
'UserDefined' :trace_user_defined}
def std_reg_renderers (self):
""" Return dictionary of all standard built-in regulation renderers.
"""
return {
'Repression' :repress,
'Activation' :induce,
'Connection' :connect}
def renderDNA (self, ax, parts, part_renderers, regs=None, reg_renderers=None, plot_backbone=True):
""" Render the parts on the DNA and regulation.
Parameters
----------
ax : matplotlib.axes
Axes to draw the design to.
parts : list(dict)
The design to draw. This is a list of dicts, where each dict relates to
a part and must contain the following keys:
- name (string)
- type (string)
- fwd (bool)
- start (float, optional)
- end (float, optional)
These will then be drawn in accordance with the renders selected
part_renderers : dict(functions)
Dict of functions where the key in the part type and the dictionary returns
the function to be used to draw that part type.
regs : list(dict) (default=None)
Regulation present in the design. This is a list of dicts, where each dict
relates to a single regulation arc and must contain the following keys:
- type (string)
- from_part (part object dict)
- to_part (part object dict)
These will then be drawn in accordance with the renders selected.
reg_renderers : dict(functions) (default=None)
Dict of functions where the key in the regulation type and the dictionary
returns the function to be used to draw that regulation type.
Returns
-------
start : float
The x-point in the axis space that drawing begins.
end : float
The x-point in the axis space that drawing ends.
"""
# Update the matplotlib rendering default for drawing the parts (we want mitered edges)
matplotlib.rcParams['lines.dash_joinstyle'] = 'miter'
matplotlib.rcParams['lines.dash_capstyle'] = 'butt'
matplotlib.rcParams['lines.solid_joinstyle'] = 'miter'
matplotlib.rcParams['lines.solid_capstyle'] = 'projecting'
# Make text editable in Adobe Illustrator
matplotlib.rcParams['pdf.fonttype'] = 42
# Plot the parts to the axis
part_num = 0
prev_end = 0
first_start = 0
first_part = True
for part in parts:
keys = list(part.keys())
# Check the part has minimal details required
if 'type' in keys:
if 'fwd' not in keys:
part['fwd'] = True
elif part['fwd'] == False and 'start' in keys and 'end' in keys:
start = part['start']
end = part['end']
part['end'] = start
part['start'] = end
if 'start' not in keys:
if part['fwd'] == True:
part['start'] = part_num
else:
part['start'] = part_num+1
if 'end' not in keys:
if part['fwd'] == True:
part['end'] = part_num+1
else:
part['end'] = part_num
# Extract custom part options (if available)
part_opts = None
if 'opts' in list(part.keys()):
part_opts = part['opts']
# Use the correct renderer
if 'renderer' in list(part.keys()):
# Use custom renderer
prev_start, prev_end = part['renderer'](ax, part['type'], part_num,
part['start'], part['end'], prev_end,
self.scale, self.linewidth,
opts=part_opts)
#update start,end for regulation
#part['start'] = prev_start
#part['end'] = prev_end
if first_part == True:
first_start = prev_start
first_part = False
else:
# Use standard renderer, if one exists
if part['type'] in list(part_renderers.keys()):
prev_start, prev_end = part_renderers[part['type']](ax,
part['type'], part_num,
part['start'], part['end'],
prev_end, self.scale,
self.linewidth, opts=part_opts)
#update | |
import aiohttp
import asyncio
from bs4 import BeautifulSoup as BS
from . import error, content, utils
import random
import re
import json
import mimetypes
import time
import json
import re
import functools
#==================================================================================================================================================
USER_AGENTS = (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/601.1.10 (KHTML, like Gecko) Version/8.0.5 Safari/601.1.10",
"Mozilla/5.0 (Windows NT 6.3; WOW64; ; NCT50_AAP285C84A1328) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6"
)
try:
import lxml
except ImportError:
PARSER = "html.parser"
else:
PARSER = "lxml"
#==================================================================================================================================================
_DIGITS = "0123456789abcdefghijklmnopqrstuvwxyz"
def str_base(number, base=36):
if isinstance(number, int):
if number == 0:
return "0"
elif number < 0:
value = -number
sign = "-"
else:
value = number
sign = ""
ret = ""
while value > 0:
value, remainder = divmod(value, base)
ret = _DIGITS[remainder] + ret
return sign + ret
else:
raise TypeError("Input number must be int.")
def now():
return int(time.time()*1000)
def generate_offline_threading_id():
t = now()
v = random.randrange(0xffffffff)
return (t << 22) + (v & 0x7ffff)
def strip_to_json(s):
start = "[{"
for i, c in enumerate(s):
if c in start:
return s[i:]
else:
return None
def load_broken_json(b):
return json.loads(strip_to_json(b.decode("utf-8")))
def get_jsmods_require(d, index, default=None):
try:
return d["jsmods"]["require"][0][index][0]
except (KeyError, IndexError):
return default
def get_between(text, start, end):
parts = text.partition(start)
if parts[2]:
ret = parts[2].partition(end)
if ret[2]:
return ret[0]
else:
raise IndexError("Cannot find end token.")
else:
raise IndexError("Cannot find start token.")
def flatten(data, prefix):
if isinstance(data, dict):
iterator = data.items()
elif isinstance(data, (list, tuple)):
iterator = enumerate(data)
else:
data = data or ""
return {prefix: data}
ret = {}
for key, value in iterator:
proc = flatten(value, "{}[{}]".format(prefix, key))
ret.update(proc)
return ret
#==================================================================================================================================================
_WHITESPACE = re.compile(r"\s*")
class ConcatJSONDecoder(json.JSONDecoder):
def decode(self, s, _w=_WHITESPACE.match):
s = s.strip()
s_len = len(s)
objs = []
end = 0
while end != s_len:
obj, end = self.raw_decode(s, idx=_w(s, end).end())
objs.append(obj)
return objs
load_concat_json = functools.partial(json.loads, cls=ConcatJSONDecoder)
#==================================================================================================================================================
#straight up stolen from fbchat
class GraphQL:
FRAGMENT_USER = """
QueryFragment User: User {
id,
name,
first_name,
last_name,
profile_picture.width(<pic_size>).height(<pic_size>) {
uri
},
is_viewer_friend,
url,
gender,
viewer_affinity
}
"""
FRAGMENT_GROUP = """
QueryFragment Group: MessageThread {
name,
thread_key {
thread_fbid
},
image {
uri
},
is_group_thread,
all_participants {
nodes {
messaging_actor {
id
}
}
},
customization_info {
participant_customizations {
participant_id,
nickname
},
outgoing_bubble_color,
emoji
}
}
"""
FRAGMENT_PAGE = """
QueryFragment Page: Page {
id,
name,
profile_picture.width(32).height(32) {
uri
},
url,
category_type,
city {
name
}
}
"""
SEARCH_USER = """
Query SearchUser(<search> = '', <limit> = 1) {
entities_named(<search>) {
search_results.of_type(user).first(<limit>) as users {
nodes {
@User
}
}
}
}
""" + FRAGMENT_USER
SEARCH_GROUP = """
Query SearchGroup(<search> = '', <limit> = 1, <pic_size> = 32) {
viewer() {
message_threads.with_thread_name(<search>).last(<limit>) as groups {
nodes {
@Group
}
}
}
}
""" + FRAGMENT_GROUP
SEARCH_PAGE = """
Query SearchPage(<search> = '', <limit> = 1) {
entities_named(<search>) {
search_results.of_type(page).first(<limit>) as pages {
nodes {
@Page
}
}
}
}
""" + FRAGMENT_PAGE
SEARCH_THREAD = """
Query SearchThread(<search> = '', <limit> = 1) {
entities_named(<search>) {
search_results.first(<limit>) as threads {
nodes {
__typename,
@User,
@Group,
@Page
}
}
}
}
""" + FRAGMENT_USER + FRAGMENT_GROUP + FRAGMENT_PAGE
def __init__(self, *, query={}, params={}, doc_id=None):
if query:
self.value = {
"priority": 0,
"q": query,
"query_params": params
}
elif doc_id:
self.value = {
"doc_id": doc_id,
"query_params": params
}
else:
raise ValueError("Need either query or doc_id.")
@classmethod
def fetch_thread_info(cls, thread_id):
return cls(
doc_id="1386147188135407",
params={
"id": thread_id,
"message_limit": 0,
"load_messages": False,
"load_read_receipts": False,
"before": None
}
)
#==================================================================================================================================================
#partially stolen from fbchat, and converted to aiohttp
class HTTPRequest:
SEARCH = "https://www.facebook.com/ajax/typeahead/search.php"
LOGIN = "https://m.facebook.com/login.php?login_attempt=1"
SEND = "https://www.facebook.com/messaging/send/"
UNREAD_THREADS = "https://www.facebook.com/ajax/mercury/unread_threads.php"
UNSEEN_THREADS = "https://www.facebook.com/mercury/unseen_thread_ids/"
THREADS = "https://www.facebook.com/ajax/mercury/threadlist_info.php"
MESSAGES = "https://www.facebook.com/ajax/mercury/thread_info.php"
READ_STATUS = "https://www.facebook.com/ajax/mercury/change_read_status.php"
DELIVERED = "https://www.facebook.com/ajax/mercury/delivery_receipts.php"
MARK_SEEN = "https://www.facebook.com/ajax/mercury/mark_seen.php"
BASE = "https://www.facebook.com"
MOBILE = "https://m.facebook.com/"
STICKY = "https://{}-edge-chat.facebook.com/pull"
PING = "https://{}-edge-chat.facebook.com/active_ping"
UPLOAD = "https://upload.facebook.com/ajax/mercury/upload.php"
USER_INFO = "https://www.facebook.com/chat/user_info/"
CONNECT = "https://www.facebook.com/ajax/add_friend/action.php?dpr=1"
REMOVE_USER = "https://www.facebook.com/chat/remove_participants/"
LOGOUT = "https://www.facebook.com/logout.php"
ALL_USERS = "https://www.facebook.com/chat/user_info_all"
SAVE_DEVICE = "https://m.facebook.com/login/save-device/cancel/"
CHECKPOINT = "https://m.facebook.com/login/checkpoint/"
THREAD_COLOR = "https://www.facebook.com/messaging/save_thread_color/?source=thread_settings&dpr=1"
THREAD_NICKNAME = "https://www.facebook.com/messaging/save_thread_nickname/?source=thread_settings&dpr=1"
THREAD_EMOJI = "https://www.facebook.com/messaging/save_thread_emoji/?source=thread_settings&dpr=1"
THREAD_IMAGE = "https://www.facebook.com/messaging/set_thread_image/?dpr=1"
THREAD_NAME = "https://www.facebook.com/messaging/set_thread_name/?dpr=1"
WEBGRAPHQL = "https://www.facebook.com/webgraphql/query/"
MESSAGE_REACTION = "https://www.facebook.com/webgraphql/mutation"
TYPING = "https://www.facebook.com/ajax/messaging/typ.php"
GRAPHQL = "https://www.facebook.com/api/graphqlbatch/"
ATTACHMENT_PHOTO = "https://www.facebook.com/mercury/attachments/photo/"
EVENT_REMINDER = "https://www.facebook.com/ajax/eventreminder/create"
MODERN_SETTINGS_MENU = "https://www.facebook.com/bluebar/modern_settings_menu/"
REMOVE_FRIEND = "https://m.facebook.com/a/removefriend.php"
EMBED_LINK = "https://www.facebook.com/message_share_attachment/fromURI/"
MARK_FOLDER_AS_READ = "https://www.facebook.com/ajax/mercury/mark_folder_as_read.php?dpr=1"
def __init__(self, *, loop=None, user_agent=None, cookie_jar=None):
self.loop = loop or asyncio.get_event_loop()
self.pull_channel = 0
self.client = "mercury"
self.headers = {
"Content-Type" : "application/x-www-form-urlencoded",
"Referer": self.BASE,
"Origin": self.BASE,
"User-Agent": user_agent or USER_AGENTS[0],
"Connection": "keep-alive",
}
self.cookie_jar = cookie_jar
self.clear()
def change_pull_channel(self):
self.pull_channel = (self.pull_channel + 1) % 6
def clear(self):
self.session = aiohttp.ClientSession(loop=self.loop, cookie_jar=self.cookie_jar)
self.params = {}
self.request_counter = 1
self.seq = "0"
async def close(self):
await self.session.close()
def update_params(self, extra={}):
params = self.params.copy()
params["__req"] = str_base(self.request_counter)
params["seq"] = self.seq
params.update(extra)
self.request_counter += 1
return params
def retries_wrap(times, *, verbose=True):
def wrapped(func):
@functools.wraps(func)
async def new_func(self, *args, **kwargs):
for i in range(times):
try:
return await func(self, *args, **kwargs)
except (asyncio.TimeoutError, KeyboardInterrupt, RuntimeError, asyncio.CancelledError):
raise
except error.HTTPRequestFailure as e:
status = e.response.status
if status in (502, 503):
self.change_pull_channel()
elif status == 1357004:
await self.save_login_state()
continue
except Exception as e:
if verbose:
print("Ignored {}, retrying... ({}/{})".format(type(e), i+1, times))
else:
raise error.HTTPException("Cannot send HTTP request.")
return new_func
return wrapped
@retries_wrap(3)
async def get(self, url, *, headers=None, params=None, timeout=30, as_json=False, json_decoder=load_broken_json, **kwargs):
headers = headers or self.headers
params = self.update_params(params or {})
async with self.session.get(url, headers=headers, params=params, timeout=timeout, **kwargs) as response:
if response.status != 200:
raise error.HTTPRequestFailure(response)
bytes_ = await response.read()
if as_json:
return json_decoder(bytes_)
else:
return bytes_
@retries_wrap(3)
async def post(self, url, *, headers=None, data=None, timeout=30, as_json=False, json_decoder=load_broken_json, **kwargs):
headers = headers or self.headers
data = self.update_params(data or {})
async with self.session.post(url, headers=headers, data=data, timeout=timeout, **kwargs) as response:
if response.status != 200:
raise error.HTTPRequestFailure(response)
bytes_ = await response.read()
if as_json:
return json_decoder(bytes_)
else:
return bytes_
async def login(self, username, password):
if username and password:
#self.username = username
#self.password = password
pass
else:
raise error.LoginError("Username and password must be non-empty.")
bytes_ = await self.get(self.MOBILE)
soup = BS(bytes_.decode("utf-8"), PARSER)
data = {tag["name"]: tag["value"] for tag in soup.find_all("input") if "name" in tag.attrs and "value" in tag.attrs}
data["email"] = username
data["pass"] = password
data["login"] = "Log In"
self.request_counter += 1
resp = await self.session.post(self.LOGIN, headers=self.headers, data=data)
if "checkpoint" in resp.url.human_repr():
bytes_ = await resp.read()
#resp = await self.handle_2FA(bytes_)
#I don't think this does anything anymore
if "save-device" in resp.url.human_repr():
resp = await self.session.get(self.SAVE_DEVICE, headers=self.headers)
if "home" in resp.url.human_repr():
return await self.save_login_state()
else:
raise error.LoginError("Login failed, got directed to {}".formet(resp.url.human_repr()))
async def handle_2FA(self, bytes_):
soup = BS(bytes_.decode("utf-8"), PARSER)
code = input("Input 2FA code here: ")
data = {
"approvals_code": code,
"fb_dtsg": soup.find("input", attrs={"name": "fb_dtsg"})["value"],
"nh": soup.find("input", attrs={"name": "nh"})["value"],
"submit[Submit Code]": "Submit Code",
"codes_submitted": 0
}
resp = await self.session.post(self.CHECKPOINT, headers=self.headers, data=data)
if "home" in resp.url.human_repr():
return resp
data.pop("approvals_code")
data.pop("submit[Submit Code]")
data.pop("codes_submitted")
data["name_action_selected"] = "save_device"
data["submit[Continue]"] = "Continue"
resp = await self.session.post(self.CHECKPOINT, headers=self.headers, data=data)
if "home" in resp.url.human_repr():
return resp
data.pop("name_action_selected")
resp = await self.session.post(self.CHECKPOINT, headers=self.headers, data=data)
if "home" in resp.url.human_repr():
return resp
data.pop("submit[Continue]")
data["submit[This was me]"] = "This Was Me"
resp = await self.session.post(self.CHECKPOINT, headers=self.headers, data=data)
if "home" in resp.url.human_repr():
return resp
data.pop("submit[This was me]")
data["submit[Continue]"] = "Continue"
data["name_action_selected"] = "save_device"
return await self.session.post(self.CHECKPOINT, headers=self.headers, data=data)
async def save_login_state(self):
self.params.clear()
self.client_id = "{:x}".format(random.randrange(0x80000000))
self.start_time = now()
for cookie in self.session.cookie_jar:
if cookie.key == "c_user":
self.user_id = str(cookie.value)
break
else:
raise error.LoginError("Cannot find c_user cookie.")
self.user_channel = "p_" + self.user_id
self.ttstamp = ""
bytes_ = await self.get(self.BASE)
html = bytes_.decode("utf-8")
soup = BS(html, PARSER)
fb_dtsg = soup.find("input", attrs={"name": "fb_dtsg"})
if fb_dtsg:
self.fb_dtsg = fb_dtsg["value"]
else:
m = re.search(r"name=\"fb_dtsg\"\svalue=\"(.?*)\"", html)
self.fb_dtsg = m.group(1)
jazoest = soup.find("input", attrs={"name": "jazoest"})
if jazoest:
self.jazoest = jazoest["value"]
else:
m = re.search(r"name=\"jazoest\"\svalue=\"(.?*)\"", html)
self.jazoest = m.group(1)
h = soup.find("input", attrs={"name": "h"})
if h:
self.h = h["value"]
t = "".join((str(ord(c)) for c in | |
import numpy as np
import matplotlib.pyplot as plt
import os
import warnings
from datetime import date
from math import e
def calc_rate(data1, data2):
if(data2 == 0):
return data1
else:
if(data1 < data2):
return (data2 / data1) * -1
else:
return data1 / data2
def calc_mort_rate(data1, data2):
if(data2 == 0):
return 0
else:
return data1 / data2
def compute_data(parsed_data):
days = np.array([])
new_cases = np.array([])
cases_growth_factor = np.array([])
new_deaths = np.array([])
deaths_growth_factor = np.array([])
new_tests = np.array([])
tests_growth_factor = np.array([])
new_recovered = np.array([])
recovered_growth_factor = np.array([])
new_hospitalized = np.array([])
hospitalized_growth_factor = np.array([])
mortality_rate = np.array([])
active_cases = np.array([])
for i, entry in enumerate(parsed_data[0]):
if(i == 0):
new_cases = np.append(new_cases, parsed_data[1][i] - 0)
cases_growth_factor = np.append(cases_growth_factor, 0)
new_deaths = np.append(new_deaths, parsed_data[2][i] - 0)
deaths_growth_factor = np.append(deaths_growth_factor, 0)
new_tests = np.append(new_tests, parsed_data[3][i] - 0)
tests_growth_factor = np.append(tests_growth_factor, 0)
new_recovered = np.append(new_recovered, parsed_data[4][i] - 0)
recovered_growth_factor = np.append(recovered_growth_factor, 0)
new_hospitalized = np.append(new_hospitalized, parsed_data[5][i] - 0)
hospitalized_growth_factor = np.append(hospitalized_growth_factor, 0)
mortality_rate = np.append(mortality_rate, calc_mort_rate(parsed_data[2][i], parsed_data[1][i]))
active_cases = np.append(active_cases, (parsed_data[1][i] - parsed_data[4][i] - parsed_data[2][i]))
days = np.append(days, i)
continue
new_cases = np.append(new_cases, parsed_data[1][i] - parsed_data[1][i-1])
cases_growth_factor = np.append(cases_growth_factor, calc_rate(parsed_data[1][i], parsed_data[1][i-1]))
new_deaths = np.append(new_deaths, parsed_data[2][i] - parsed_data[2][i-1])
deaths_growth_factor = np.append(deaths_growth_factor, calc_rate(parsed_data[2][i], parsed_data[2][i-1]))
new_tests = np.append(new_tests, parsed_data[3][i] - parsed_data[3][i-1])
tests_growth_factor = np.append(tests_growth_factor, calc_rate(parsed_data[3][i], parsed_data[3][i-1]))
new_recovered = np.append(new_recovered, parsed_data[4][i] - parsed_data[4][i-1])
recovered_growth_factor = np.append(recovered_growth_factor, calc_rate(parsed_data[4][i], parsed_data[4][i-1]))
new_hospitalized = np.append(new_hospitalized, parsed_data[5][i] - parsed_data[5][i-1])
hospitalized_growth_factor = np.append(hospitalized_growth_factor, calc_rate(parsed_data[5][i], parsed_data[5][i-1]))
mortality_rate = np.append(mortality_rate, calc_mort_rate(parsed_data[2][i], parsed_data[1][i]))
active_cases = np.append(active_cases, (parsed_data[1][i] - parsed_data[4][i] - parsed_data[2][i]))
days = np.append(days, i)
parsed_data.append(days)
parsed_data.append(new_cases)
parsed_data.append(cases_growth_factor)
parsed_data.append(new_deaths)
parsed_data.append(deaths_growth_factor)
parsed_data.append(new_recovered)
parsed_data.append(recovered_growth_factor)
parsed_data.append(new_hospitalized)
parsed_data.append(hospitalized_growth_factor)
parsed_data.append(new_tests)
parsed_data.append(tests_growth_factor)
parsed_data.append(mortality_rate)
parsed_data.append(active_cases)
return parsed_data
def logistic_fn(population):
day_counter = 1
days = np.array([])
logistic = np.array([])
current_cases = 1
while (day_counter < 60):
days = np.append(days, day_counter)
log_fn = population / (1 + ((population / current_cases) - 1) * e ** (-0.38 * day_counter))
print(log_fn)
logistic = np.append(logistic, log_fn)
day_counter += 1
return (days, logistic)
def difference(parsed_data, day1, day2):
print("Data difference between:", parsed_data[0][day1], 'and', parsed_data[0][day2])
print("\u0394Days:\t", parsed_data[6][day2] - parsed_data[6][day1])
print("\u0394Cases:\t", parsed_data[1][day2] - parsed_data[1][day1])
print("\u0394Deaths: ", parsed_data[2][day2] - parsed_data[2][day1])
print("\u0394Recov.: ", parsed_data[4][day2] - parsed_data[4][day1])
print("\u0394Hospi.: ", parsed_data[5][day2] - parsed_data[5][day1])
print("\u0394Tests:\t", parsed_data[3][day2] - parsed_data[3][day1])
def projection(next_days, days_passed, parsed_data):
total_cases = float(parsed_data[1][len(parsed_data[1])-1])
total_deaths = float(parsed_data[2][len(parsed_data[2])-1])
total_tests = float(parsed_data[3][len(parsed_data[4])-1])
total_recovered = float(parsed_data[4][len(parsed_data[4])-1])
total_hospitalized = float(parsed_data[5][len(parsed_data[5])-1])
total_active = float(parsed_data[18][len(parsed_data[18])-1])
counter = 0
avg_cases_gf = 0.0
avg_deaths_gf = 0.0
avg_tests_gf = 0.0
avg_recovered_gf = 0.0
avg_hospitalized_gf = 0.0
avg_active_gf = 0.0
while(counter < days_passed):
avg_cases_gf += parsed_data[8][len(parsed_data[8]) - 1 - counter]
avg_deaths_gf += parsed_data[10][len(parsed_data[10]) - 1 - counter]
avg_tests_gf += parsed_data[16][len(parsed_data[16]) - 1 - counter]
avg_recovered_gf += parsed_data[12][len(parsed_data[12]) - 1 - counter]
avg_hospitalized_gf += parsed_data[14][len(parsed_data[14]) - 1 - counter]
avg_active_gf += parsed_data[18][len(parsed_data[18]) - 1 - counter]
counter += 1
avg_cases_gf /= days_passed
avg_deaths_gf /= days_passed
avg_tests_gf /= days_passed
avg_recovered_gf /= days_passed
avg_hospitalized_gf /= days_passed
avg_active_gf /= days_passed
print('Avg Cases Growth Factor (past', days_passed ,'days):', round(avg_cases_gf, 5))
print('Avg Deaths Growth Factor (past', days_passed ,'days):', round(avg_deaths_gf, 5))
print('Avg Tests Growth Factor (past', days_passed ,'days):', round(avg_tests_gf, 5))
print('Avg Recovered Growth Factor (past', days_passed ,'days):', round(avg_recovered_gf, 5))
print('Avg Hospitalized Growth Factor (past', days_passed ,'days):', round(avg_hospitalized_gf, 5))
print('Avg Active Cases Growth Factor (past', days_passed ,'days):', round(avg_active_gf, 5))
counter = 0
while(counter < next_days):
total_cases = total_cases * avg_cases_gf
total_deaths = total_deaths * avg_deaths_gf
total_tests = total_tests * avg_tests_gf
total_recovered = total_recovered * avg_recovered_gf
total_hospitalized = total_hospitalized * avg_hospitalized_gf
total_active = total_active * avg_active_gf
counter += 1
print("Projections for the next", next_days, "days:")
print("Cases:", round(total_cases))
print("Active:", round(total_active))
print("Deaths:", round(total_deaths))
print("Tests:", round(total_tests))
print("Recovered:", round(total_recovered))
print("Hospitalized:", round(total_hospitalized))
def linear_regression(x, y):
x_nums = [i for i in range(0, len(x))] #create list of integers given that original x are string values
n = len(x_nums) #number of elements in x axis (same as y axis)
add_x = sum(x_nums) #add all x axis elements
add_y = sum(y) #add all y axis elements
add_x_sqr = sum([i**2 for i in x_nums]) #add all y axis elements squared
add_xy = sum([x_nums[i] * y[i] for i in range(0, n)]) #add the product of each corresponding pair from x_nums and y
slope = (n * add_xy - add_x * add_y) / (n * add_x_sqr - add_x**2) #compute slope of linear regression
y_intercept = (add_y * add_x_sqr - add_x * add_xy) / (n * add_x_sqr - add_x**2) #compute the y intercept of the linear regression
lin_reg_x = [i for i in range(0, len(x_nums))] #create list of elements from 0 to length of x_nums
lin_reg_y = [slope * i + y_intercept for i in lin_reg_x] #replace x value in equation to find the y in linear regression
return [slope, y_intercept, lin_reg_y] #return slope, y_intercept, and linear regression list for y
def plot_graph(x, y, color, x_label, y_label, chart_title, file_name='', save=False, log_view=False, trend=False):
plt.figure(figsize=(14,10))
plt.ticklabel_format(style='plain')
plt.title(chart_title, fontdict={'fontsize' : 25})
if(log_view):
plt.yscale('log')
if(trend):
lin_reg_result = linear_regression(x, y)
lin_reg_equation = str(lin_reg_result[0])[:10] + 'X '
if(lin_reg_result[1] >= 0):
lin_reg_equation += '+'
lin_reg_equation += str(lin_reg_result[1])[:10]
plt.plot(x, lin_reg_result[2], color + '--', label = lin_reg_equation)
plt.legend(loc='upper left')
plt.plot(x, y, 'ko', x, y, color)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.grid()
if(save):
warnings.filterwarnings('ignore')
plt.savefig('../export/graphs/' + file_name)
else:
plt.show()
def plot_graph_all(parsed_data, chart_title, from_day, to_day, file_name='', save=False):
plt.figure(figsize=(14,10))
plt.ticklabel_format(style='plain')
plt.title(chart_title, fontdict={'fontsize' : 25})
plt.plot(parsed_data[4][from_day:to_day], parsed_data[1][from_day:to_day], 'ko')
plt.plot(parsed_data[4][from_day:to_day], parsed_data[1][from_day:to_day], 'b', label = "Cases")
plt.plot(parsed_data[4][from_day:to_day], parsed_data[2][from_day:to_day], 'ko')
plt.plot(parsed_data[4][from_day:to_day], parsed_data[2][from_day:to_day], 'r', label = "Deaths")
plt.plot(parsed_data[4][from_day:to_day], parsed_data[4][from_day:to_day], 'ko')
plt.plot(parsed_data[4][from_day:to_day], parsed_data[4][from_day:to_day], 'g', label = "Recovered")
plt.plot(parsed_data[4][from_day:to_day], parsed_data[18][from_day:to_day], 'ko')
plt.plot(parsed_data[4][from_day:to_day], parsed_data[18][from_day:to_day], 'k', label = "Active Cases")
plt.legend(loc="upper left")
plt.xlabel("Days")
plt.grid()
if(save):
warnings.filterwarnings('ignore')
plt.plotplt.savefig('../export/graphs/' + file_name)
else:
plt.show()
def print_cases(header, data):
np.set_printoptions(precision=3)
print('%10s'%(header[0]), end = '')
print('%9s'%(header[1]), end = '')
print('%13s'%(header[2]), end = '')
print('%13s'%(header[3]), end = '')
print('%13s'%(header[4]), end = '')
print('%13s'%(header[18]))
for i in range(len(data[0])):
print('%10s'%(data[0][i]), '%8s'%(data[6][i]), '%12s'%(data[1][i]), '%12s'%(data[7][i]), '%12s'%(str(data[8][i])[:8]), '%12s'%(data[18][i]))
def print_deaths(header, data):
np.set_printoptions(precision=3)
print('%10s'%(header[0]), end = '')
print('%9s'%(header[1]), end = '')
print('%13s'%(header[5]), end = '')
print('%13s'%(header[6]), end = '')
print('%13s'%(header[7]), end = '')
print('%13s'%(header[5]))
for i in range(len(data[0])):
print('%10s'%(data[0][i]), '%8s'%(data[6][i]), '%12s'%(data[2][i]), '%12s'%(data[9][i]), '%12s'%(data[10][i]), '%12s'%(data[17][i]))
def print_tests(header, data):
np.set_printoptions(precision=3)
print('%10s'%(header[0]), end = '')
print('%9s'%(header[1]), end = '')
print('%13s'%(header[14]), end = '')
print('%13s'%(header[15]), end = '')
print('%13s'%(header[16]))
for i in range(len(data[0])):
print('%10s'%(data[0][i]), '%8s'%(data[6][i]), '%12s'%(data[3][i]), '%12s'%(data[15][i]), '%12s'%(data[16][i]))
def print_recovered(header, data):
np.set_printoptions(precision=3)
print('%10s'%(header[0]), end = '')
print('%9s'%(header[1]), end = '')
print('%13s'%(header[8]), end = '')
print('%13s'%(header[9]), end = '')
print('%13s'%(header[10]))
for i in range(len(data[0])):
print('%10s'%(data[0][i]), '%8s'%(data[6][i]), '%12s'%(data[4][i]), '%12s'%(data[11][i]), '%12s'%(data[12][i]))
def print_hospitalized(header, data):
np.set_printoptions(precision=3)
print('%10s'%(header[0]), end = '')
print('%9s'%(header[1]), end = '')
print('%13s'%(header[11]), end = '')
print('%13s'%(header[12]), end = '')
print('%13s'%(header[13]))
for i in range(len(data[0])):
print('%10s'%(data[0][i]), '%8s'%(data[6][i]), '%12s'%(data[5][i]), '%12s'%(data[13][i]), '%12s'%(data[14][i]))
def print_diff_data(header, data):
np.set_printoptions(precision=3)
'''
Header index Data Index Title
0 0 Date
1 6 Day
2 1 Cases
3 7 New Cases
18 18 Active Cases
5 2 Deaths
6 9 New Deaths
17 17 Mortality Rate
8 4 Recovered
9 11 New Recovered
11 5 Hospitalized
12 13 New Hospitalized
14 3 Tests
15 15 New Tests
'''
print('%10s'%(header[0]), end = '')
print('%9s'%(header[1]), end = '')
print('%13s'%(header[2]), end = '')
print('%13s'%(header[3]), end = '')
print('%13s'%(header[18]), end = '')
print('%13s'%(header[5]), end = '')
print('%13s'%(header[6]), end = '')
print('%13s'%(header[8]), end = '')
print('%13s'%(header[9]), end = '')
print('%13s'%(header[11]), end = '')
print('%13s'%(header[12]), end = '')
print('%13s'%(header[14]), end = '')
print('%13s'%(header[15]))
for i in range(len(data[0])):
print('%10s'%(data[0][i]), '%8s'%(data[6][i]),
'%12s'%(data[1][i]), '%12s'%(data[7][i]), '%12s'%(data[18][i]),
'%12s'%(data[2][i]), '%12s'%(data[9][i]),
'%12s'%(data[4][i]), '%12s'%(data[11][i]),
'%12s'%(data[5][i]), '%12s'%(data[13][i]),
'%12s'%(data[3][i]), '%12s'%(data[15][i]))
def print_gf_data(header, data):
np.set_printoptions(precision=3)
'''
Header index Data Index Title
0 0 Date
1 6 Day
2 1 Cases
4 8 % Cases
5 2 Deaths
7 10 % Deaths
8 4 Recovered
10 12 % Recovered
11 5 Hospitalized
13 14 % Hospitalized
14 3 Tests
16 16 % Tests
'''
print('%10s'%(header[0]), end = '')
print('%9s'%(header[1]), end = '')
print('%13s'%(header[2]), end = '')
print('%13s'%(header[4]), end = '')
print('%13s'%(header[5]), end = '')
print('%13s'%(header[7]), end = '')
print('%13s'%(header[17]), end = '')
print('%13s'%(header[8]), end = '')
print('%13s'%(header[10]), end = '')
print('%13s'%(header[11]), end = '')
print('%13s'%(header[13]), end = '')
print('%13s'%(header[14]), end = '')
print('%13s'%(header[16]))
for i in range(len(data[0])):
print('%10s'%(data[0][i]), '%8s'%(data[6][i]),
'%12s'%(data[1][i]), '%12s'%(str(data[8][i])[:8]),
'%12s'%(data[2][i]), '%12s'%(str(data[10][i])[:8]), '%12s'%(str(data[17][i])[:5]),
'%12s'%(data[4][i]), '%12s'%(str(data[12][i])[:8]),
'%12s'%(data[5][i]), '%12s'%(str(data[14][i])[:8]),
'%12s'%(data[3][i]), '%12s'%(str(data[16][i])[:8]))
def print_new_data(new_data):
print('INDEX', '%14s'%('DATE'), '%11s'%('CASES'), '%11s'%('DEATHS'), '%11s'%('TESTS'), '%11s'%('RECOVERED'), '%11s'%('HOSPITAL'))
for i, entry in | |
<reponame>WeiChengTseng/DL_final_project
import time
import matplotlib
import time
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from a2c.models import AtariCNN, A2C, A2CLarge
from a2c.envs import make_env, RenderSubprocVecEnv
from a2c.train_multi import train
from ppo.PPO import PPO
from maac.attention_sac import AttentionSAC
from maac_double.attention_sac import AttentionSACDouble
from env_exp import SocTwoEnv
def parse_double(obs):
parsed_obs = [None] * 4
parsed_obs[0] = obs[0][:8]
parsed_obs[2] = obs[0][8:]
parsed_obs[1] = obs[1][:8]
parsed_obs[3] = obs[1][8:]
return np.array(parsed_obs)
def eval_with_random_agent(net_striker,
net_goalie,
env,
device,
eval_epsoid=40):
obs_striker, obs_goalie = env.reset('team')
# time.sleep(5)
epsoid = 0
while epsoid < eval_epsoid:
obs_striker = Variable(
torch.from_numpy(obs_striker).float()).to(device)
obs_goalie = Variable(torch.from_numpy(obs_goalie).float()).to(device)
policies_striker, values_striker = net_striker(obs_striker)
policies_goalie, values_goalie = net_goalie(obs_goalie)
probs_striker = F.softmax(policies_striker, dim=-1)
probs_goalie = F.softmax(policies_goalie, dim=-1)
actions_striker = probs_striker.multinomial(1).data
actions_goalie = probs_goalie.multinomial(1).data
actions_striker = torch.cat([
torch.LongTensor(np.random.randint(0, 7, (8, 1))),
actions_striker[8:],
],
dim=0)
actions_goalie = torch.cat([
torch.LongTensor(np.random.randint(0, 5, (8, 1))),
actions_goalie[8:],
],
dim=0)
# actions_striker = torch.cat([
# actions_striker[:8],
# torch.LongTensor(np.random.randint(0, 7, (8, 1)))
# ],
# dim=0)
# actions_goalie = torch.cat([
# actions_goalie[:8],
# torch.LongTensor(np.random.randint(0, 5, (8, 1)))
# ],
# dim=0)
obs, rewards, dones, _ = env.step(actions_striker, actions_goalie,
'team')
obs_striker, obs_goalie = obs
rewards_striker = torch.from_numpy(
rewards[0]).float().unsqueeze(1).to(device)
rewards_goalie = torch.from_numpy(
rewards[1]).float().unsqueeze(1).to(device)
for i in np.argwhere(dones[0]).flatten():
epsoid += 1
return
def eval_self_complete(net_striker,
net_goalie,
env,
device,
order='team',
eval_epsoid=40):
obs_striker, obs_goalie = env.reset(order)
epsoid = 0
while epsoid < eval_epsoid:
obs_striker = Variable(
torch.from_numpy(obs_striker).float()).to(device)
obs_goalie = Variable(torch.from_numpy(obs_goalie).float()).to(device)
policies_striker, values_striker = net_striker(obs_striker)
policies_goalie, values_goalie = net_goalie(obs_goalie)
probs_striker = F.softmax(policies_striker, dim=-1)
probs_goalie = F.softmax(policies_goalie, dim=-1)
actions_striker = probs_striker.multinomial(1).data
actions_goalie = probs_goalie.multinomial(1).data
obs, rewards, dones, _ = env.step(actions_striker, actions_goalie,
order)
obs_striker, obs_goalie = obs
rewards_striker = torch.from_numpy(
rewards[0]).float().unsqueeze(1).to(device)
rewards_goalie = torch.from_numpy(
rewards[1]).float().unsqueeze(1).to(device)
for i in np.argwhere(dones[0]).flatten():
epsoid += 1
return
def eval_self_striker_goalie(net_striker,
net_goalie,
env,
device,
order='team',
eval_epsoid=40):
obs_striker, obs_goalie = env.reset(order)
epsoid = 0
while epsoid < eval_epsoid:
obs_striker = Variable(
torch.from_numpy(obs_striker).float()).to(device)
obs_goalie = Variable(torch.from_numpy(obs_goalie).float()).to(device)
policies_striker, values_striker = net_striker(obs_striker)
policies_goalie, values_goalie = net_goalie(obs_goalie)
probs_striker = F.softmax(policies_striker, dim=-1)
probs_goalie = F.softmax(policies_goalie, dim=-1)
actions_striker = probs_striker.multinomial(1).data
actions_goalie = probs_goalie.multinomial(1).data
actions_striker = torch.cat([
actions_striker[:8],
torch.LongTensor(np.random.randint(0, 7, (8, 1)))
],
dim=0)
actions_goalie = torch.cat([
torch.LongTensor(np.random.randint(0, 5,
(8, 1))), actions_goalie[8:]
],
dim=0)
obs, rewards, dones, _ = env.step(actions_striker, actions_goalie,
order)
obs_striker, obs_goalie = obs
rewards_striker = torch.from_numpy(
rewards[0]).float().unsqueeze(1).to(device)
rewards_goalie = torch.from_numpy(
rewards[1]).float().unsqueeze(1).to(device)
for i in np.argwhere(dones[0]).flatten():
epsoid += 1
return
def eval_agents_compete(strikers,
goalies,
env,
device,
order='team',
eval_epsoid=40):
obs_striker, obs_goalie = env.reset(order)
policies_striker = [None, None]
policies_goalie = [None, None]
# time.sleep(5)
records = [0] * 3
epsoid = 0
while epsoid < eval_epsoid:
obs_striker = Variable(
torch.from_numpy(obs_striker).float()).to(device)
obs_goalie = Variable(torch.from_numpy(obs_goalie).float()).to(device)
policies_striker[0], _ = strikers[0](obs_striker[:8])
policies_goalie[0], _ = goalies[0](obs_goalie[:8])
policies_striker[1], _ = strikers[1](obs_striker[8:])
policies_goalie[1], _ = goalies[1](obs_goalie[8:])
policy_strikers = torch.cat(policies_striker, dim=0)
policy_goalies = torch.cat(policies_goalie, dim=0)
probs_striker = F.softmax(policy_strikers, dim=-1)
probs_goalie = F.softmax(policy_goalies, dim=-1)
actions_striker = probs_striker.multinomial(1).data
actions_goalie = probs_goalie.multinomial(1).data
obs, rewards, dones, _ = env.step(actions_striker, actions_goalie,
order)
obs_striker, obs_goalie = obs
rewards_striker = torch.from_numpy(
rewards[0]).float().unsqueeze(1).to(device)
rewards_goalie = torch.from_numpy(
rewards[1]).float().unsqueeze(1).to(device)
for i in np.argwhere(dones[0][:8]).flatten():
epsoid += 1
if rewards[1][i + 8] < 0:
records[0] += 1
elif rewards[0][i] < 0:
records[1] += 1
else:
records[2] += 1
print(records)
return
def eval_compete_acppo(strikers,
goalies,
env,
device,
order='team',
eval_epsoid=40):
# env.train()
obs_striker, obs_goalie = env.reset(order)
policies_striker = [None, None]
policies_goalie = [None, None]
# time.sleep(5)
records = [0] * 3
epsoid = 0
while epsoid < eval_epsoid:
obs_striker = Variable(
torch.from_numpy(obs_striker).float()).to(device)
obs_goalie = Variable(torch.from_numpy(obs_goalie).float()).to(device)
policies_striker[0], _ = strikers[0](obs_striker[:8])
policies_goalie[0], _ = goalies[0](obs_goalie[:8])
# policies_striker[1], _ = strikers[1](obs_striker[8:])
# policies_goalie[1], _ = goalies[1](obs_goalie[8:])
action_ppo_striker = strikers[1].act(obs_striker[8:])
action_ppo_goalie = goalies[1].act(obs_goalie[8:])
policy_strikers = policies_striker[0]
policy_goalies = policies_goalie[0]
probs_striker = F.softmax(policy_strikers, dim=-1)
probs_goalie = F.softmax(policy_goalies, dim=-1)
actions_striker = probs_striker.multinomial(1).data
actions_goalie = probs_goalie.multinomial(1).data
# print(actions_striker)
actions_striker = torch.cat((actions_striker, action_ppo_striker),
dim=0)
actions_goalie = torch.cat((actions_goalie, action_ppo_goalie), dim=0)
# random_act_striker = torch.LongTensor(np.random.randint(7, size=(8,1)))
# random_act_goalie = torch.LongTensor(np.random.randint(5, size=(8,1)))
# actions_striker = torch.cat((random_act_striker, action_ppo_striker), dim=0)
# actions_goalie = torch.cat((random_act_goalie, action_ppo_goalie), dim=0)
obs, rewards, dones, _ = env.step(actions_striker, actions_goalie,
order)
obs_striker, obs_goalie = obs
rewards_striker = torch.from_numpy(
rewards[0]).float().unsqueeze(1).to(device)
rewards_goalie = torch.from_numpy(
rewards[1]).float().unsqueeze(1).to(device)
for i in np.argwhere(dones[0][:8]).flatten():
epsoid += 1
if rewards[1][i + 8] < 0:
records[0] += 1
elif rewards[1][i] < 0:
records[1] += 1
else:
records[2] += 1
print(records)
return
def eval_agents_compete_(strikers,
goalies,
env,
device,
order='team',
eval_epsoid=40):
obs_striker, obs_goalie = env.reset(order)
actions_strikers = [None, None]
actions_goalies = [None, None]
records = [0, 0, 0]
epsoid = 0
while epsoid < eval_epsoid:
obs_striker = Variable(
torch.from_numpy(obs_striker).float()).to(device)
obs_goalie = Variable(torch.from_numpy(obs_goalie).float()).to(device)
actions_strikers[0], _ = strikers[0](obs_striker[:8])
actions_goalies[0], _ = goalies[0](obs_goalie[:8])
actions_strikers[1], _ = strikers[1](obs_striker[8:])
actions_goalies[1], _ = goalies[1](obs_goalie[8:])
actions_striker = torch.cat(actions_strikers, 0)
actions_goalie = torch.cat(actions_goalies, 0)
obs, rewards, dones, _ = env.step(actions_striker, actions_goalie,
order)
obs_striker, obs_goalie = obs
for i in np.argwhere(dones[0]).flatten():
epsoid += 1
if rewards[1][i] < 0:
records[0] += 1
elif rewards[0][i] < 0:
records[1] += 1
else:
records[2] += 1
return
def eval_maac_with_random(model_path, env, order='team', eval_epsoid=40):
maac = AttentionSAC.init_from_save(model_path)
obs_striker, obs_goalie = env.reset(order)
actions_strikers = [None, None]
actions_goalies = [None, None]
records = [0, 0, 0]
epsoid = 0
while epsoid < eval_epsoid:
obs_striker = Variable(
torch.from_numpy(obs_striker).float()).to(device)
obs_goalie = Variable(torch.from_numpy(obs_goalie).float()).to(device)
action_maac = maac.step((obs_striker, obs_goalie), explore=True)
# print(action_maac)
actions_strikers[0] = torch.argmax(action_maac[0][:8], dim=-1)
actions_goalies[0] = torch.argmax(action_maac[1][:8], dim=-1)
# print(actions_strikers[0])
actions_strikers[1] = torch.randint(7, size=(8, ))
actions_goalies[1] = torch.randint(5, size=(8, ))
# print(actions_strikers)
actions_striker = torch.cat(actions_strikers, 0)
actions_goalie = torch.cat(actions_goalies, 0)
obs, rewards, dones, _ = env.step(actions_striker, actions_goalie,
order)
obs_striker, obs_goalie = obs
for i in np.argwhere(dones[0]).flatten():
epsoid += 1
if rewards[1][i] < 0:
records[0] += 1
elif rewards[0][i] < 0:
records[1] += 1
else:
records[2] += 1
return
def eval_maac_self_compete(model_path, env, order='team', eval_epsoid=40):
maac = AttentionSAC.init_from_save(model_path)
obs_striker, obs_goalie = env.reset(order)
actions_strikers = [None, None]
actions_goalies = [None, None]
records = [0, 0, 0]
epsoid = 0
while epsoid < eval_epsoid:
obs_striker = Variable(
torch.from_numpy(obs_striker).float()).to(device)
obs_goalie = Variable(torch.from_numpy(obs_goalie).float()).to(device)
action_maac = maac.step((obs_striker, obs_goalie), explore=True)
# print(action_maac)
actions_strikers[0] = torch.argmax(action_maac[0], dim=-1)
actions_goalies[0] = torch.argmax(action_maac[1], dim=-1)
# print(actions_strikers[0])
# print(actions_strikers)
# actions_striker = torch.cat(actions_strikers, 0)
# actions_goalie = torch.cat(actions_goalies, 0)
obs, rewards, dones, _ = env.step(actions_strikers[0],
actions_goalies[0], order)
obs_striker, obs_goalie = obs
for i in np.argwhere(dones[0]).flatten():
epsoid += 1
if rewards[1][i] < 0:
records[0] += 1
elif rewards[0][i] < 0:
records[1] += 1
else:
records[2] += 1
return
def eval_maacac_compete(model_path,
strikers,
goalies,
env,
order='team',
eval_epsoid=200):
maac = AttentionSAC.init_from_save(model_path)
obs_striker, obs_goalie = env.reset(order)
actions_strikers = [None, None]
actions_goalies = [None, None]
records = [0, 0, 0]
epsoid = 0
while epsoid < eval_epsoid:
obs_striker = Variable(
torch.from_numpy(obs_striker).float()).to(device)
obs_goalie = Variable(torch.from_numpy(obs_goalie).float()).to(device)
action_maac = maac.step((obs_striker, obs_goalie), explore=True)
# print(action_maac)
actions_strikers[0] = torch.argmax(action_maac[0][:8], dim=-1)
actions_goalies[0] = torch.argmax(action_maac[1][:8], dim=-1)
# print(actions_strikers[0])
policy_strikers, _ = strikers(obs_striker[8:])
policy_goalies, _ = goalies(obs_goalie[8:])
probs_striker = F.softmax(policy_strikers, dim=-1)
probs_goalie = F.softmax(policy_goalies, dim=-1)
actions_strikers[1] = probs_striker.multinomial(1).data.flatten()
actions_goalies[1] = probs_goalie.multinomial(1).data.flatten()
# print(actions_strikers)
actions_striker = torch.cat(actions_strikers, 0)
actions_goalie = torch.cat(actions_goalies, 0)
obs, rewards, dones, _ = env.step(actions_striker, actions_goalie,
order)
obs_striker, obs_goalie = obs
for i in np.argwhere(dones[0]).flatten():
epsoid += 1
if rewards[1][i] < 0:
records[0] += 1
elif rewards[0][i] < 0:
records[1] += 1
else:
records[2] += 1
return
def eval_maacdoubleac_compete(model_path,
strikers,
goalies,
env,
order='team',
eval_epsoid=200):
maac = AttentionSACDouble.init_from_save(model_path)
# obs_striker, obs_goalie = env.reset(order)
obs_striker, obs_goalie, obs_striker2, obs_goalie2 = parse_double(
env.reset(order))
actions_strikers = [None, None]
actions_goalies = [None, None]
records = [0, 0, 0]
epsoid = 0
while epsoid < eval_epsoid:
obs_striker = (torch.from_numpy(obs_striker).float()).to(device)
obs_goalie = (torch.from_numpy(obs_goalie).float()).to(device)
obs_striker2 = (torch.from_numpy(obs_striker2).float()).to(device)
obs_goalie2 = (torch.from_numpy(obs_goalie2).float()).to(device)
action_maac = maac.step(
(obs_striker, obs_goalie, obs_striker2, obs_goalie2), explore=True)
# print(action_maac)
actions_strikers[0] = torch.argmax(action_maac[0], dim=-1)
actions_goalies[0] = torch.argmax(action_maac[1], dim=-1)
# print(actions_strikers[0])
policy_strikers, _ = strikers(obs_striker2[:])
policy_goalies, _ = goalies(obs_goalie2[:])
probs_striker = F.softmax(policy_strikers, dim=-1)
probs_goalie = F.softmax(policy_goalies, dim=-1)
actions_strikers[1] = probs_striker.multinomial(1).data.flatten()
actions_goalies[1] = probs_goalie.multinomial(1).data.flatten()
# print(actions_strikers)
actions_striker = torch.cat(actions_strikers, 0)
actions_goalie = torch.cat(actions_goalies, 0)
obs, rewards, dones, _ = env.step(actions_striker, actions_goalie,
order)
obs_striker, obs_goalie, obs_striker2, obs_goalie2 = parse_double(obs)
for i in np.argwhere(dones[0]).flatten():
epsoid += 1
if rewards[1][i] < 0:
records[0] += 1
elif rewards[0][i] < 0:
records[1] += 1
else:
records[2] += 1
return
def eval_maacdoubleppo_compete(model_path,
strikers,
goalies,
env,
order='team',
eval_epsoid=200):
maac = AttentionSACDouble.init_from_save(model_path)
# obs_striker, obs_goalie = env.reset(order)
obs_striker, obs_goalie, obs_striker2, obs_goalie2 = parse_double(
env.reset(order))
actions_strikers = [None, None]
actions_goalies = [None, None]
records = [0, 0, 0]
epsoid = 0
| |
<reponame>manez/islandora_workbench<filename>tests/islandora_tests.py
"""unittest tests that require a live Drupal. In most cases, the URL, credentials, etc.
are in a configuration file referenced in the test.
"""
import sys
import os
from ruamel.yaml import YAML
import tempfile
import subprocess
import argparse
import requests
import json
import unittest
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import workbench_utils
class TestCheckFunction(unittest.TestCase):
def test_strings_match(self):
res = workbench_utils.compare_strings('foo', 'foo ')
self.assertTrue(res)
res = workbench_utils.compare_strings('foo', 'Foo')
self.assertTrue(res)
res = workbench_utils.compare_strings('foo', 'Foo#~^.')
self.assertTrue(res)
res = workbench_utils.compare_strings('foo bar baz', 'foo bar baz')
self.assertTrue(res)
def test_strings_do_not_match(self):
res = workbench_utils.compare_strings('foo', 'foot')
self.assertFalse(res)
class TestCheckCreate(unittest.TestCase):
def setUp(self):
cmd = ["./workbench", "--config", "create.yml", "--check"]
output = subprocess.check_output(cmd)
self.output = output.decode().strip()
def test_create_check(self):
lines = self.output.splitlines()
self.assertRegex(self.output, 'Configuration and input data appear to be valid', '')
class TestCheckCreateFromGoogleSpreadsheet(unittest.TestCase):
def setUp(self):
cmd = ["./workbench", "--config", "google_spreadsheet.yml", "--check"]
output = subprocess.check_output(cmd)
self.output = output.decode().strip()
def test_create_check(self):
lines = self.output.splitlines()
self.assertRegex(self.output, 'Saving data from https://docs.google.com', '')
self.assertRegex(self.output, 'Configuration and input data appear to be valid', '')
class TestCheckUpdate(unittest.TestCase):
def setUp(self):
cmd = ["./workbench", "--config", "update.yml", "--check"]
output = subprocess.check_output(cmd)
self.output = output.decode().strip()
def test_create_check(self):
lines = self.output.splitlines()
self.assertRegex(self.output, 'Configuration and input data appear to be valid', '')
class TestCheckDelete(unittest.TestCase):
def setUp(self):
cmd = ["./workbench", "--config", "delete.yml", "--check"]
output = subprocess.check_output(cmd)
self.output = output.decode().strip()
def test_create_check(self):
lines = self.output.splitlines()
self.assertRegex(self.output, 'Configuration and input data appear to be valid', '')
class TestCheckAddMedia(unittest.TestCase):
def setUp(self):
cmd = ["./workbench", "--config", "add_media.yml", "--check"]
output = subprocess.check_output(cmd)
self.output = output.decode().strip()
def test_create_check(self):
lines = self.output.splitlines()
self.assertRegex(self.output, 'Configuration and input data appear to be valid', '')
class TestTypedRelationCheck(unittest.TestCase):
def test_create_check_fail(self):
current_dir = os.path.dirname(os.path.abspath(__file__))
config_file_path = os.path.join(current_dir, 'assets', 'typed_relation_test', 'bad_typed_relation.yml')
cmd = ["./workbench", "--config", config_file_path, "--check"]
try:
output = subprocess.check_output(cmd)
output = output.decode().strip()
lines = output.splitlines()
self.assertRegex(output, 'does not use the pattern required for typed relation fields', '')
except subprocess.CalledProcessError as err:
pass
class TestHeaderColumnMismatch(unittest.TestCase):
def test_header_column_mismatch_fail(self):
current_dir = os.path.dirname(os.path.abspath(__file__))
config_file_path = os.path.join(current_dir, 'assets', 'header_column_mismatch_test', 'create.yml')
cmd = ["./workbench", "--config", config_file_path, "--check"]
try:
output = subprocess.check_output(cmd)
output = output.decode().strip()
lines = output.splitlines()
self.assertRegex(output, 'Row 2 of your CSV file does not', '')
except subprocess.CalledProcessError as err:
pass
class TestExecuteBootstrapScript(unittest.TestCase):
def setUp(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.script_path = os.path.join(dir_path, 'assets', 'execute_bootstrap_script_test', 'script.py')
self.config_file_path = os.path.join(dir_path, 'assets', 'execute_bootstrap_script_test', 'config.yml')
def test_python_script(self):
output, return_code = workbench_utils.execute_bootstrap_script(self.script_path, self.config_file_path)
self.assertEqual(output.strip(), b'Hello')
class TestExecutePreprocessorScript(unittest.TestCase):
def setUp(self):
yaml = YAML()
dir_path = os.path.dirname(os.path.realpath(__file__))
self.script_path = os.path.join(dir_path, 'assets', 'preprocess_field_data', 'script.py')
def test_preprocessor_script_single_field_value(self):
output, return_code = workbench_utils.preprocess_field_data('|', 'hello', self.script_path)
self.assertEqual(output.strip(), b'HELLO')
def test_preprocessor_script_multiple_field_value(self):
output, return_code = workbench_utils.preprocess_field_data('|', 'hello|there', self.script_path)
self.assertEqual(output.strip(), b'HELLO|THERE')
class TestCreate(unittest.TestCase):
def setUp(self):
self.current_dir = os.path.dirname(os.path.abspath(__file__))
create_config_file_path = os.path.join(self.current_dir, 'assets', 'createtest', 'create.yml')
self.create_cmd = ["./workbench", "--config", create_config_file_path]
self.temp_dir = tempfile.gettempdir()
self.nid_file = os.path.join(self.temp_dir, 'workbenchcreatetestnids.txt')
def test_create_check(self):
nids = list()
create_output = subprocess.check_output(self.create_cmd)
create_output = create_output.decode().strip()
create_lines = create_output.splitlines()
with open(self.nid_file, "a") as fh:
fh.write("node_id\n")
for line in create_lines:
if 'created at' in line:
nid = line.rsplit('/', 1)[-1]
nid = nid.strip('.')
nids.append(nid)
fh.write(nid + "\n")
self.assertEqual(len(nids), 5)
def tearDown(self):
delete_config_file_path = os.path.join(self.current_dir, 'assets', 'createtest', 'delete.yml')
delete_cmd = ["./workbench", "--config", delete_config_file_path]
delete_output = subprocess.check_output(delete_cmd)
delete_output = delete_output.decode().strip()
delete_lines = delete_output.splitlines()
os.remove(self.nid_file)
class TestCreateFromFiles(unittest.TestCase):
def setUp(self):
self.current_dir = os.path.dirname(os.path.abspath(__file__))
create_config_file_path = os.path.join(self.current_dir, 'assets', 'create_from_files_test', 'create.yml')
self.create_cmd = ["./workbench", "--config", create_config_file_path]
self.temp_dir = tempfile.gettempdir()
self.nid_file = os.path.join(self.temp_dir, 'workbenchcreatefromfilestestnids.txt')
self.rollback_file_path = os.path.join(self.current_dir, 'assets', 'create_from_files_test', 'files', 'rollback.csv')
if os.path.exists(self.rollback_file_path):
os.remove(self.rollback_file_path)
def test_create_check(self):
nids = list()
create_output = subprocess.check_output(self.create_cmd)
create_output = create_output.decode().strip()
create_lines = create_output.splitlines()
with open(self.nid_file, "a") as fh:
fh.write("node_id\n")
for line in create_lines:
if 'created at' in line:
nid = line.rsplit('/', 1)[-1]
nid = nid.strip('.')
nids.append(nid)
fh.write(nid + "\n")
self.assertEqual(len(nids), 2)
def tearDown(self):
delete_config_file_path = os.path.join(self.current_dir, 'assets', 'create_from_files_test', 'delete.yml')
delete_cmd = ["./workbench", "--config", delete_config_file_path]
delete_output = subprocess.check_output(delete_cmd)
os.remove(self.nid_file)
if os.path.exists(self.rollback_file_path):
os.remove(self.rollback_file_path)
class TestDelete(unittest.TestCase):
def setUp(self):
self.current_dir = os.path.dirname(os.path.abspath(__file__))
create_config_file_path = os.path.join(self.current_dir, 'assets', 'deletetest', 'create.yml')
self.create_cmd = ["./workbench", "--config", create_config_file_path]
self.temp_dir = tempfile.gettempdir()
self.nid_file = os.path.join(self.temp_dir, 'workbenchdeletetesttnids.txt')
nids = list()
create_output = subprocess.check_output(self.create_cmd)
create_output = create_output.decode().strip()
create_lines = create_output.splitlines()
with open(self.nid_file, "a") as fh:
fh.write("node_id\n")
for line in create_lines:
if 'created at' in line:
nid = line.rsplit('/', 1)[-1]
nid = nid.strip('.')
nids.append(nid)
fh.write(nid + "\n")
def test_delete_check(self):
delete_config_file_path = os.path.join(self.current_dir, 'assets', 'deletetest', 'delete.yml')
delete_cmd = ["./workbench", "--config", delete_config_file_path]
delete_output = subprocess.check_output(delete_cmd)
delete_output = delete_output.decode().strip()
delete_lines = delete_output.splitlines()
self.assertEqual(len(delete_lines), 5)
def tearDown(self):
os.remove(self.nid_file)
class TestCreatePagedContent(unittest.TestCase):
def setUp(self):
self.current_dir = os.path.dirname(os.path.abspath(__file__))
create_config_file_path = os.path.join(self.current_dir, 'assets', 'create_paged_content_test', 'create.yml')
yaml = YAML()
with open(create_config_file_path, 'r') as f:
config_file_contents = f.read()
config_data = yaml.load(config_file_contents)
config = {}
for k, v in config_data.items():
config[k] = v
self.islandora_host = config['host']
self.create_cmd = ["./workbench", "--config", create_config_file_path]
self.temp_dir = tempfile.gettempdir()
self.nid_file = os.path.join(self.temp_dir, 'workbenchcreatepagedcontenttestnids.txt')
def test_create_paged_content(self):
nids = list()
create_output = subprocess.check_output(self.create_cmd)
create_output = create_output.decode().strip()
# Write a file to the system's temp directory containing the node IDs of the
# nodes created during this test so they can be deleted in tearDown().
create_lines = create_output.splitlines()
with open(self.nid_file, "a") as fh:
fh.write("node_id\n")
for line in create_lines:
if 'created at' in line:
nid = line.rsplit('/', 1)[-1]
nid = nid.strip('.')
nids.append(nid)
fh.write(nid + "\n")
self.assertEqual(len(nids), 6)
# Test a page object's 'field_member_of' value to see if it matches
# its parent's node ID. In this test, the last paged content object's
# node ID will be the fourth node ID in nids (the previous three were
# for the first paged content object plus its two pages). Note: the
# metadata.csv file used to create the paged content and page objects
# uses hard-coded term IDs from the Islandora Models taxonomy as used
# in the Islandora Playbook. If they change or are different in the
# Islandora this test is running against, this test will fail.
parent_node_id_to_test = nids[3]
# The last node to be created was a page.
child_node_id_to_test = nids[5]
node_url = self.islandora_host + '/node/' + child_node_id_to_test + '?_format=json'
response = requests.get(node_url)
node_json = json.loads(response.text)
field_member_of = node_json['field_member_of'][0]['target_id']
self.assertEqual(int(parent_node_id_to_test), field_member_of)
def tearDown(self):
delete_config_file_path = os.path.join(self.current_dir, 'assets', 'create_paged_content_test', 'delete.yml')
delete_cmd = ["./workbench", "--config", delete_config_file_path]
delete_output = subprocess.check_output(delete_cmd)
delete_output = delete_output.decode().strip()
delete_lines = delete_output.splitlines()
os.remove(self.nid_file)
class TestCreatePagedContentFromDirectories (unittest.TestCase):
def setUp(self):
self.current_dir = os.path.dirname(os.path.abspath(__file__))
create_config_file_path = os.path.join(self.current_dir, 'assets', 'create_paged_content_from_directories_test', 'books.yml')
yaml = YAML()
with open(create_config_file_path, 'r') as f:
config_file_contents = f.read()
config_data = yaml.load(config_file_contents)
config = {}
for k, v in config_data.items():
config[k] = v
self.islandora_host = config['host']
self.create_cmd = ["./workbench", "--config", create_config_file_path]
self.temp_dir = tempfile.gettempdir()
self.nid_file = os.path.join(self.temp_dir, 'workbenchcreatepagedcontentfromdirectoriestestnids.txt')
def test_create_paged_content_from_directories(self):
nids = list()
create_output = subprocess.check_output(self.create_cmd)
create_output = create_output.decode().strip()
# Write a file to the system's temp directory containing the node IDs of the
# nodes created during this test so they can be deleted in tearDown().
create_lines = create_output.splitlines()
with open(self.nid_file, "a") as fh:
fh.write("node_id\n")
for line in create_lines:
if 'created at' in line:
nid = line.rsplit('/', 1)[-1]
nid = nid.strip('.')
nids.append(nid)
fh.write(nid + "\n")
self.assertEqual(len(nids), 4)
# Test a page object's 'field_member_of' value to see if it matches its
# parent's node ID. In this test, we'll test the second page. Note: the
# metadata CSV file used to create the paged content and page objects
# uses hard-coded term IDs from the Islandora Models taxonomy as used
# in the Islandora Playbook. If they change or are different in the
# Islandora this test is running against, this test will fail. Also note
# that this test creates media and does not delete them.
parent_node_id_to_test = nids[0]
child_node_id_to_test = nids[2]
node_url = self.islandora_host + '/node/' + child_node_id_to_test + '?_format=json'
response = requests.get(node_url)
node_json = json.loads(response.text)
field_member_of = node_json['field_member_of'][0]['target_id']
self.assertEqual(int(parent_node_id_to_test), field_member_of)
# Test that the 'field_weight' value of the second node is 2.
self.assertEqual(2, node_json['field_weight'][0]['value'])
def tearDown(self):
delete_config_file_path = os.path.join(self.current_dir, 'assets', 'create_paged_content_from_directories_test', 'delete.yml')
delete_cmd = ["./workbench", "--config", delete_config_file_path]
delete_output = subprocess.check_output(delete_cmd)
delete_output = delete_output.decode().strip()
delete_lines = delete_output.splitlines()
os.remove(self.nid_file)
class TestTaxonomies (unittest.TestCase):
def setUp(self):
self.current_dir = os.path.dirname(os.path.abspath(__file__))
taxonomies_config_file_path = os.path.join(self.current_dir, 'assets', 'taxonomies_test', 'create.yml')
yaml = YAML()
with open(taxonomies_config_file_path, 'r') as f:
config_file_contents = f.read()
config_data = yaml.load(config_file_contents)
config = {}
for k, v in config_data.items():
config[k] = v
self.islandora_host = config['host']
self.islandora_username = config['username']
self.islandora_password = config['password']
self.create_cmd = ["./workbench", "--config", taxonomies_config_file_path]
self.temp_dir = tempfile.gettempdir()
self.nid_file = os.path.join(self.temp_dir, 'workbenchtaxonomiestestnids.txt')
nids = list()
create_output = subprocess.check_output(self.create_cmd)
create_output = create_output.decode().strip()
# Write a file to | |
r"""
Kyoto Path Model for Affine Highest Weight Crystals
"""
#*****************************************************************************
# Copyright (C) 2013 <NAME> <tscrim at ucdavis.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#****************************************************************************
from sage.structure.parent import Parent
from sage.categories.infinite_enumerated_sets import InfiniteEnumeratedSets
from sage.categories.highest_weight_crystals import HighestWeightCrystals
from sage.combinat.crystals.tensor_product import TensorProductOfCrystals, \
TensorProductOfRegularCrystalsElement
from sage.combinat.root_system.root_system import RootSystem
class KyotoPathModel(TensorProductOfCrystals):
r"""
The Kyoto path model for an affine highest weight crystal.
.. NOTE::
Here we are using anti-Kashiwara notation and might differ from
some of the literature.
Consider a Kac--Moody algebra `\mathfrak{g}` of affine Cartan type `X`,
and we want to model the `U_q(\mathfrak{g})`-crystal `B(\lambda)`.
First we consider the set of fundamental weights `\{\Lambda_i\}_{i \in I}`
of `\mathfrak{g}` and let `\{\overline{\Lambda}_i\}_{i \in I_0}` be the
corresponding fundamental weights of the corresponding classical Lie
algebra `\mathfrak{g}_0`. To model `B(\lambda)`, we start with a sequence
of perfect `U_q^{\prime}(\mathfrak{g})`-crystals `(B^{(i)})_i` of level
`l` such that
.. MATH::
\lambda \in \overline{P}_l^+ = \left\{ \mu \in \overline{P}^+ \mid
\langle c, \mu \rangle = l \right\}
where `c` is the canonical central element of `U_q(\mathfrak{g})`
and `\overline{P}^+` is the nonnegative weight lattice spanned by
`\{ \overline{\Lambda}_i \mid i \in I \}`.
Next we consider the crystal isomorphism `\Phi_0 : B(\lambda_0) \to B^{(0)}
\otimes B(\lambda_1)` defined by `u_{\lambda_0} \mapsto b^{(0)}_{\lambda_0}
\otimes u_{\lambda_1}` where `b^{(0)}_{\lambda_0}` is the unique element in
`B^{(0)}` such that `\varphi\left( b^{(0)}_{\lambda_0} \right) = \lambda_0`
and `\lambda_1 = \varepsilon\left( b^{(0)}_{\lambda_0} \right)` and
`u_{\mu}` is the highest weight element in `B(\mu)`. Iterating this, we
obtain the following isomorphism:
.. MATH::
\Phi_n : B(\lambda) \to B^{(0)} \otimes B^{(1)} \otimes \cdots
\otimes B^{(N)} \otimes B(\lambda_{N+1}).
We note by Lemma 10.6.2 in [HK02]_ that for any `b \in B(\lambda)` there
exists a finite `N` such that
.. MATH::
\Phi_N(b) = \left( \bigotimes_{k=0}^{N-1} b^{(k)} \right)
\otimes u_{\lambda_N}.
Therefore we can model elements `b \in B(\lambda)` as a
`U_q^{\prime}(\mathfrak{g})`-crystal by considering an infinite list of
elements `b^{(k)} \in B^{(k)}` and defining the crystal structure by:
.. MATH::
\begin{aligned}
\overline{\mathrm{wt}}(b) & = \lambda_N + \sum_{k=0}^{N-1}
\overline{\mathrm{wt}}\left( b^{(k)} \right)
\\ e_i(b) & = e_i\left( b^{\prime} \otimes b^{(N)} \right) \otimes
u_{\lambda_N},
\\ f_i(b) & = f_i\left( b^{\prime} \otimes b^{(N)} \right) \otimes
u_{\lambda_N},
\\ \varepsilon_i(b) & = \max\bigl( \varepsilon_i(b^{\prime}) -
\varphi_i\left( b^{(N)} \right), 0 \bigr),
\\ \varphi_i(b) & = \varphi_i(b^{\prime}) + \max\left(
\varphi_i\left( b^{(N)} \right) - \varepsilon_i(b^{\prime}), 0 \right),
\end{aligned}
where `b^{\prime} = b^{(0)} \otimes \cdots \otimes b^{(N-1)}`. To
translate this into a finite list, we consider a finite sequence
`b^{(0)} \otimes \cdots \otimes b^{(N-1)} \otimes b^{(N)}_{\lambda_N}`
and if
.. MATH::
f_i\left( b^{(0)} \otimes \cdots b^{(N-1)} \otimes
b^{(N)}_{\lambda_N} \right) = b_0 \otimes \cdots \otimes b^{(N-1)}
\otimes f_i\left( b^{(N)}_{\lambda_N} \right),
then we take the image as `b^{(0)} \otimes \cdots \otimes f_i\left(
b^{(N)}_{\lambda_N}\right) \otimes b^{(N+1)}_{\lambda_{N+1}}`. Similarly
we remove `b^{(N)}_{\lambda_{N}}` if we have `b_0 \otimes \cdots
\otimes b^{(N-1)} \otimes b^{(N-1)}_{\lambda_{N-1}} \otimes
b^{(N)}_{\lambda_N}`. Additionally if
.. MATH::
e_i\left( b^{(0)} \otimes \cdots \otimes b^{(N-1)} \otimes
b^{(N)}_{\lambda_N} \right) = b^{(0)} \otimes \cdots \otimes
b^{(N-1)} \otimes e_i\left( b^{(N)}_{\lambda_N} \right),
then we consider this to be `0`.
REFERENCES:
.. [HK02] *Introduction to Quantum Groups and Crystal Bases.*
<NAME> and <NAME>. 2002. Volume 42.
Graduate Studies in Mathematics. American Mathematical Society.
INPUT:
- ``B`` -- A single or list of `U_q^{\prime}` perfect crystal(s) of
level `l`
- ``weight`` -- A weight in `\overline{P}_l^+`
EXAMPLES::
sage: B = crystals.KirillovReshetikhin(['A',2,1], 1,1)
sage: L = RootSystem(['A',2,1]).weight_space()
sage: C = crystals.KyotoPathModel(B, L.fundamental_weight(0))
sage: mg = C.module_generators[0]; mg
[[[3]]]
sage: mg.f_string([0,1,2,2])
[[[3]], [[3]], [[1]]]
An example of type `A_5^{(2)}`::
sage: B = crystals.KirillovReshetikhin(['A',5,2], 1,1)
sage: L = RootSystem(['A',5,2]).weight_space()
sage: C = crystals.KyotoPathModel(B, L.fundamental_weight(0))
sage: mg = C.module_generators[0]; mg
[[[-1]]]
sage: mg.f_string([0,2,1,3])
[[[-3]], [[2]], [[-1]]]
sage: mg.f_string([0,2,3,1])
[[[-3]], [[2]], [[-1]]]
An example of type `D_3^{(2)}`::
sage: B = crystals.KirillovReshetikhin(['D',3,2], 1,1)
sage: L = RootSystem(['D',3,2]).weight_space()
sage: C = crystals.KyotoPathModel(B, L.fundamental_weight(0))
sage: mg = C.module_generators[0]; mg
[[]]
sage: mg.f_string([0,1,2,0])
[[[0]], [[1]], []]
An example using multiple crystals of the same level::
sage: B1 = crystals.KirillovReshetikhin(['A',2,1], 1,1)
sage: B2 = crystals.KirillovReshetikhin(['A',2,1], 2,1)
sage: L = RootSystem(['A',2,1]).weight_space()
sage: C = crystals.KyotoPathModel([B1, B2, B1], L.fundamental_weight(0))
sage: mg = C.module_generators[0]; mg
[[[3]]]
sage: mg.f_string([0,1,2,2])
[[[3]], [[1], [3]], [[3]]]
sage: mg.f_string([0,1,2,2,2])
sage: mg.f_string([0,1,2,2,1,0])
[[[3]], [[2], [3]], [[1]], [[2]]]
sage: mg.f_string([0,1,2,2,1,0,0,2])
[[[3]], [[1], [2]], [[1]], [[3]], [[1], [3]]]
"""
@staticmethod
def __classcall_private__(cls, crystals, weight):
"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: B = crystals.KirillovReshetikhin(['A',2,1], 1,1)
sage: L = RootSystem(['A',2,1]).weight_space()
sage: C = crystals.KyotoPathModel(B, L.fundamental_weight(0))
sage: C2 = crystals.KyotoPathModel((B,), L.fundamental_weight(0))
sage: C3 = crystals.KyotoPathModel([B], L.fundamental_weight(0))
sage: C is C2 and C2 is C3
True
"""
if isinstance(crystals, list):
crystals = tuple(crystals)
elif not isinstance(crystals, tuple):
crystals = (crystals,)
if any(not B.is_perfect() for B in crystals):
raise ValueError("all crystals must be perfect")
level = crystals[0].level()
if any(B.level() != level for B in crystals[1:]):
raise ValueError("all crystals must have the same level")
ct = crystals[0].cartan_type()
if sum( ct.dual().c()[i] * weight.scalar(h) for i,h in
enumerate(RootSystem(ct).weight_space().simple_coroots()) ) != level:
raise ValueError( "%s is not a level %s weight"%(weight, level) )
return super(KyotoPathModel, cls).__classcall__(cls, crystals, weight)
def __init__(self, crystals, weight):
"""
Initialize ``self``.
EXAMPLES::
sage: B = crystals.KirillovReshetikhin(['A',2,1], 1,1)
sage: L = RootSystem(['A',2,1]).weight_space()
sage: C = crystals.KyotoPathModel(B, L.fundamental_weight(0))
sage: TestSuite(C).run() # long time
"""
Parent.__init__(self, category=(HighestWeightCrystals(), InfiniteEnumeratedSets()))
self._cartan_type = crystals[0].cartan_type()
self.crystals = crystals # public for TensorProductOfCrystals
self._weight = weight
self._epsilon_dicts = [{b.Epsilon():b for b in B} for B in crystals]
self._phi_dicts = [{b.Phi():b for b in B} for B in crystals]
self.module_generators = (self.element_class(self, [self._phi_dicts[0][weight]]),)
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: B = crystals.KirillovReshetikhin(['A',2,1], 1,1)
sage: L = RootSystem(['A',2,1]).weight_space()
sage: crystals.KyotoPathModel(B, L.fundamental_weight(0))
Kyoto path realization of B(Lambda[0]) using [Kirillov-Reshetikhin crystal of type ['A', 2, 1] with (r,s)=(1,1)]
"""
return "Kyoto path realization of B(%s) using %s"%(self._weight, list(self.crystals))
class Element(TensorProductOfRegularCrystalsElement):
"""
An element in the Kyoto path model.
"""
# For simplicity (and safety), we use the regular crystals implementation
def epsilon(self, i):
r"""
Return `\varepsilon_i` of ``self``.
EXAMPLES::
sage: B = crystals.KirillovReshetikhin(['A',2,1], 1,1)
sage: L = RootSystem(['A',2,1]).weight_space()
sage: C = crystals.KyotoPathModel(B, L.fundamental_weight(0))
sage: mg = C.module_generators[0]
sage: [mg.epsilon(i) for i in C.index_set()]
[0, 0, 0]
sage: elt = mg.f(0)
sage: [elt.epsilon(i) for i in C.index_set()]
[1, 0, 0]
sage: elt = mg.f_string([0,1,2])
sage: [elt.epsilon(i) for i in C.index_set()]
[0, 0, 1]
sage: elt = mg.f_string([0,1,2,2])
sage: [elt.epsilon(i) for i in C.index_set()]
[0, 0, 2]
"""
x = self.e(i)
eps = 0
while x is not None:
x = x.e(i)
eps = eps + 1
return eps
def phi(self, i):
r"""
Return `\varphi_i` of ``self``.
EXAMPLES::
sage: B = crystals.KirillovReshetikhin(['A',2,1], 1,1)
sage: L = RootSystem(['A',2,1]).weight_space()
sage: C = crystals.KyotoPathModel(B, L.fundamental_weight(0))
sage: mg = C.module_generators[0]
sage: [mg.phi(i) for i in C.index_set()]
[1, 0, 0]
sage: elt = mg.f(0)
sage: [elt.phi(i) for i in C.index_set()]
[0, 1, 1]
sage: elt = mg.f_string([0,1])
sage: [elt.phi(i) for i in C.index_set()]
[0, 0, 2]
"""
x = self.f(i)
phi = 0
while x is not None:
x = x.f(i)
phi = phi + 1
return phi
def e(self, i):
"""
Return the action of `e_i` on ``self``.
EXAMPLES::
sage: B = crystals.KirillovReshetikhin(['A',2,1], 1,1)
sage: L = RootSystem(['A',2,1]).weight_space()
sage: C = crystals.KyotoPathModel(B, L.fundamental_weight(0))
sage: mg = C.module_generators[0]
sage: all(mg.e(i) is None for i in C.index_set())
True
sage: mg.f(0).e(0) == mg
True
"""
position = self.positions_of_unmatched_plus(i)
if position == []:
return None
k = position[0]
if k == len(self)-1:
return None
crystal = self[k].e(i)
if k == len(self)-2 and crystal.Epsilon() == self._list[-1].Phi():
l = self._list[:-1]
l[-1] = crystal
return self.__class__(self.parent(), l)
return self.set_index(k, crystal)
def f(self, i):
"""
Return the action of `f_i` on ``self``.
EXAMPLES::
sage: B = crystals.KirillovReshetikhin(['A',2,1], 1,1)
sage: L = RootSystem(['A',2,1]).weight_space()
sage: C = crystals.KyotoPathModel(B, L.fundamental_weight(0))
sage: | |
import dataclasses
import keyword
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from typing import ClassVar, Dict, List, Optional, Set, Tuple, Union, cast
from uuid import UUID, uuid4
import humps # type: ignore
import marshmallow # type: ignore
from marshmallow import ( # type: ignore
ValidationError,
fields,
post_load,
pre_dump,
validates,
)
from more_itertools import partition # type: ignore
from core import types as t
from core.json import CamelCaseSchema, Serializable
from core.util import is_datetime, normalize_datetime
from .. import errors as err
from . import datatypes as dt
from .datatypes import into_uuid
from .util import (
RESERVED_PREFIX,
is_reserved_model_name,
is_reserved_property_name,
strip_reserved_prefix,
)
def get_organization_id(thing: object) -> t.OrganizationId:
"""
Extract an organization ID from an input value.
Raises
------
InvalidOrganizationError
"""
if isinstance(thing, int):
return t.OrganizationId(thing)
try:
int_id = int(thing) # type: ignore
return t.OrganizationId(int_id)
except ValueError:
raise err.InvalidOrganizationError(id=str(thing))
def get_dataset_id(thing: object) -> t.DatasetId:
"""
Extract an dataset ID from an input value.
Raises
------
InvalidDatasetError
"""
if isinstance(thing, int):
return t.DatasetId(thing)
try:
int_id = int(thing) # type: ignore
return t.DatasetId(int_id)
except ValueError:
raise err.InvalidDatasetError(id=str(thing))
def get_model_id(thing: Union["Model", t.ModelId, UUID, str]) -> t.ModelId:
"""
Given a `Model` or an model ID, return a model ID.
"""
if isinstance(thing, UUID):
return t.ModelId(thing)
elif isinstance(thing, Model):
return thing.id
return t.ModelId(UUID(thing))
is_model_id = dt.is_uuid
def get_model_property_id(
thing: Union["ModelProperty", t.ModelPropertyId, UUID, str]
) -> t.ModelPropertyId:
"""
Given a `ModelProperty` or an model property ID, return a model property ID.
"""
if isinstance(thing, UUID):
return t.ModelPropertyId(thing)
elif isinstance(thing, ModelProperty):
return thing.id
return t.ModelPropertyId(UUID(thing))
is_model_property_id = dt.is_uuid
def get_record_id(thing: Union["Record", t.RecordId, UUID, str]) -> t.RecordId:
"""
Given a `Record` or an record ID, return a record ID.
"""
if isinstance(thing, UUID):
return t.RecordId(thing)
elif isinstance(thing, Record):
return thing.id
return t.RecordId(UUID(thing))
is_record_id = dt.is_uuid
def get_model_relationship_id(
thing: Union["ModelRelationship", t.ModelRelationshipId, UUID, str]
) -> t.ModelRelationshipId:
"""
Given a `ModelRelationship` or a relationship ID, return a relationship ID.
"""
if isinstance(thing, UUID):
return t.ModelRelationshipId(thing)
elif isinstance(thing, ModelRelationship):
return thing.id
return t.ModelRelationshipId(UUID(thing))
is_model_relationship_id = dt.is_uuid
# UUIDs have 36 characters
UUID_LENGTH = 36
def normalize_relationship_type(relationship_name: str) -> str:
"""
Normalizes a relationship name to upper-snake-case.
If the relationship name has a UUID suffix added by the Python client
or frontend, it is removed.
This also helps dealing with the Neo4j relationship type limitation of max
of 65K unique names.
Examples
--------
- ""belongs_to_478e215d-04ec-4cdf-ac8b-d5289601c9f7" -> "BELONGS_TO"
"""
from .validate import validate_relationship_name
validate_relationship_name(relationship_name)
if (
len(relationship_name) > UUID_LENGTH + 1
and relationship_name[-(UUID_LENGTH + 1)] == "_"
and dt.is_uuid(relationship_name[-UUID_LENGTH:])
):
relationship_name = relationship_name[: -(UUID_LENGTH + 1)]
return relationship_name.replace("/", "_").replace(".", "_").upper().strip()
def get_relationship_type(
r: Union["ModelRelationship", t.RelationshipType, t.RelationshipName, str]
) -> t.RelationshipType:
"""
Transform and format a string into a relationship type.
A relationship type is the canonical representation of a relationship
in Neo4j: a typeful, an upper-snake-cased name.
Examples
--------
- "foo" -> "FOO"
- "DoctorVisit" -> "DOCTOR_VISIT"
- "tHiS_IsATesT" -> "THIS_IS_A_TEST"
"""
relationship_type = r.type if isinstance(r, ModelRelationship) else r
return t.RelationshipType(normalize_relationship_type(relationship_type))
def get_record_relationship_id(
thing: Union["RecordRelationship", t.RecordRelationshipId, UUID, str]
) -> t.RecordRelationshipId:
"""
Given a `RecordRelationship` or a relationship ID, return a typed
relationship ID.
"""
if isinstance(thing, UUID):
return t.RecordRelationshipId(thing)
elif isinstance(thing, RecordRelationship):
return thing.id
return t.RecordRelationshipId(UUID(thing))
is_record_relationship_id = dt.is_uuid
def get_package_proxy_id(
r: Union["PackageProxy", t.PackageProxyId, UUID, str]
) -> t.PackageProxyId:
"""
Given a `PackageProxy` or an PackageProxy ID, return a PackageProxy ID.
"""
if isinstance(r, UUID):
return t.PackageProxyId(r)
elif isinstance(r, PackageProxy):
return t.PackageProxyId(r.id)
return t.PackageProxyId(UUID(r))
is_package_proxy_id = dt.is_uuid
###############################################################################
class OrderDirection(str, Enum):
ASC = "asc"
DESC = "desc"
@classmethod
def parse(cls, s: str) -> "OrderDirection":
return OrderDirection(s.strip().lower())
# Order by
@dataclass(frozen=True)
class OrderBy:
@classmethod
def field(cls, name: str, ascending: bool = True) -> "OrderBy":
return OrderByField(name=name, ascending=ascending)
@property
def is_field(self) -> bool:
return False
@classmethod
def relationship(cls, type: str, ascending: bool = True) -> "OrderBy":
return OrderByRelationship(type=type, ascending=ascending)
@property
def is_relationship(self) -> bool:
return False
@dataclass(frozen=True)
class OrderByField(OrderBy):
CREATED_AT_FIELDS: ClassVar[Set[str]] = set(
[
"~created_at",
"created_at",
"createdAt",
"$created_at",
"$createdAt",
RESERVED_PREFIX + "created_at",
RESERVED_PREFIX + "createdAt",
]
)
UPDATED_AT_FIELDS: ClassVar[Set[str]] = set(
[
"~updated_at",
"updated_at",
"updatedAt",
"$updated_at",
"$updatedAt",
RESERVED_PREFIX + "updated_at",
RESERVED_PREFIX + "updatedAt",
]
)
name: str
ascending: bool = field(default=True)
@property
def is_field(self) -> bool:
return True
@property
def is_created_at(self) -> bool:
name = self.name.strip()
return name in self.CREATED_AT_FIELDS or name.lower() in self.CREATED_AT_FIELDS
@property
def is_updated_at(self) -> bool:
name = self.name.strip()
return name in self.UPDATED_AT_FIELDS or name.lower() in self.UPDATED_AT_FIELDS
@property
def direction(self) -> OrderDirection:
if self.ascending:
return OrderDirection.ASC
else:
return OrderDirection.DESC
@dataclass(frozen=True)
class OrderByRelationship(OrderBy):
SUPPORTED_LABELS: ClassVar[Set[str]] = set(
[
"~label",
"label",
"$label",
RESERVED_PREFIX + "label",
"type",
"$type",
RESERVED_PREFIX + "type",
]
)
type: str
ascending: bool = field(default=True)
@property
def is_relationship(self) -> bool:
return True
@property
def is_supported_type(self) -> bool:
"""
Only relationship labels (types) are supported for sorting.
"""
t = self.type.strip()
return t in self.SUPPORTED_LABELS or t.lower() in self.SUPPORTED_LABELS
###############################################################################
class FromNodeMixin:
@classmethod
def _is_reserved(cls, t: Tuple[str, t.GraphValue]) -> bool:
k, _ = t
return is_reserved_property_name(k)
@classmethod
def from_node(cls, **data) -> object:
defined_properties = set([f.name for f in dataclasses.fields(cls)])
# Partition all reserved properties (those whose name begin with the
# RESERVED_PREFIX character), and user-settable properties:
user_props, reserved_props = partition(cls._is_reserved, data.items())
props = {humps.decamelize(k): v for k, v in user_props}
for k, v in reserved_props:
kk = strip_reserved_prefix(humps.decamelize(k))
if kk in defined_properties:
props[kk] = v
# Append '_' to any kwargs that are a reserved word:
for k in props:
if keyword.iskeyword(k):
props[k + "_"] = props.pop(k)
return cls(**props) # type: ignore
class DatasetSchema(CamelCaseSchema):
id = fields.Integer()
node_id = fields.String(allow_none=True)
@post_load
def make(self, data, **kwargs):
return PropertyValue(**data)
@dataclass(frozen=True)
class Dataset(Serializable):
__schema__: ClassVar[DatasetSchema] = DatasetSchema(unknown=marshmallow.EXCLUDE)
PUBLIC: ClassVar[Set[str]] = set(["id", "node_id"])
id: t.DatasetId
node_id: Optional[str] = field(default=None)
@classmethod
def from_node(cls, data) -> "Dataset":
id = t.DatasetId(data["id"])
node_id: Optional[str] = data.get("node_id")
return Dataset(id=id, node_id=node_id)
@dataclass(frozen=True)
class Package(Serializable):
id: int
node_id: str
class ModelSchema(CamelCaseSchema):
id = fields.UUID()
name = fields.String()
display_name = fields.String()
description = fields.String()
count = fields.Integer(default=0)
created_at = fields.DateTime(format="iso")
updated_at = fields.DateTime(format="iso")
created_by = fields.String()
updated_by = fields.String()
template_id = fields.UUID(required=False, allow_none=True)
@post_load
def make(self, data, **kwargs):
return Model(**data)
@dataclass(frozen=True)
class Model(FromNodeMixin, Serializable):
__schema__: ClassVar[ModelSchema] = ModelSchema(unknown=marshmallow.EXCLUDE)
PUBLIC: ClassVar[Set[str]] = set(
["id", "name", "display_name", "description", "template_id"]
)
id: t.ModelId
name: str
display_name: str
description: str
count: int
created_at: datetime
updated_at: datetime
created_by: t.UserNodeId
updated_by: t.UserNodeId
template_id: Optional[UUID] = field(default=None)
@validates("name")
def validate_name(self, name):
# HACK: this validation is defined as a method to work around a
# circular import between `models.validation` and `models.types`
from .validate import validate_model_name
try:
validate_model_name(name)
except err.ModelNameValidationError as e:
raise ValidationError from e
def __post_init__(self):
# Needed since neotime.DateTime does not work with Python copy.deepcopy
# HACK: This is required to mutate frozen dataclasses
object.__setattr__(self, "created_at", normalize_datetime(self.created_at))
object.__setattr__(self, "updated_at", normalize_datetime(self.updated_at))
class ModelPropertySchema(CamelCaseSchema):
name = fields.String(required=True)
display_name = fields.String(required=True)
data_type = fields.Function(
required=True,
serialize=lambda o: o.data_type.to_dict(),
deserialize=dt.deserialize,
)
description = fields.String(required=False)
index = fields.Integer(required=False, default=0)
locked = fields.Boolean(required=False, default=False)
required = fields.Boolean(required=False, default=False)
model_title = fields.Boolean(required=False, default=False)
# If True, show this property as a column in tables of records
default = fields.Boolean(required=False, default=True)
default_value = fields.Raw(required=False, allow_none=True)
created_at = fields.DateTime(required=False, format="iso")
updated_at = fields.DateTime(required=False, format="iso")
id = fields.UUID(required=False, allow_none=True)
@post_load
def make(self, data, **kwargs):
return ModelProperty(**data)
@dataclass(frozen=True)
class ModelProperty(FromNodeMixin, Serializable):
"""
A property on a model represented using a node and modelled as
(m:Model)--[r:MODEL_RELATIONSHIP_TYPE]->(p:ModelProperty)
"""
IMMUTABLE: ClassVar[Set[str]] = set(["name", "data_type"])
PUBLIC: ClassVar[Set[str]] = set(
[
"id",
"name",
"display_name",
"data_type",
"description",
"index",
"locked",
"required",
"model_title",
"default_value",
"default",
]
)
__schema__: ClassVar[ModelPropertySchema] = ModelPropertySchema(
unknown=marshmallow.EXCLUDE
)
name: str
display_name: str
data_type: dt.DataType
description: str = field(default="")
index: int = field(default=0)
locked: bool = field(default=False)
required: bool = field(default=False)
model_title: bool = field(default=False)
default: bool = field(default=True)
default_value: Optional[t.GraphValue] = field(default=None)
created_at: datetime = field(default_factory=datetime.now)
updated_at: datetime = field(default_factory=datetime.now)
created_by: str = field(default="")
updated_by: str = field(default="")
id: t.ModelPropertyId = field(default_factory=lambda: t.ModelPropertyId(uuid4()))
@validates("name")
def validate_name(self, name):
# HACK: this validation is defined as a method to work around a
# circular import between `models.validation` and `models.types`
from .validate import validate_property_name
try:
validate_property_name(name)
except err.PropertyNameValidationError as e:
raise ValidationError from e
def to_dict_with_string_datatype(self, camel_case: bool = False):
"""
Special method for serializing properties with the datatype represented
as a serialized JSON dict.
"""
d = self.to_dict(camel_case=camel_case)
if camel_case:
d["dataType"] = dt.serialize(self.data_type)
else:
d["data_type"] = dt.serialize(self.data_type)
return d
def __post_init__(self):
if isinstance(self.data_type, str):
# HACK: | |
<gh_stars>0
import logging
import time
import numpy as np
import torch
import multiprocessing as mp
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from transformer import TransformerEncoderLayer, TransformerDecoderLayer
from utils import *
from position import *
# from numba import jit
PRECISION = 5
POS_DIM_ALTER = 100
class MergeLayer(torch.nn.Module):
def __init__(self, dim1, dim2, dim3, dim4, non_linear=True):
super().__init__()
#self.layer_norm = torch.nn.LayerNorm(dim1 + dim2)
self.fc1 = torch.nn.Linear(dim1 + dim2, dim3)
self.fc2 = torch.nn.Linear(dim3, dim4)
self.act = torch.nn.ReLU()
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc2.weight)
# special linear layer for motif explainability
self.non_linear = non_linear
if not non_linear:
assert(dim1 == dim2)
self.fc = nn.Linear(dim1, 1)
torch.nn.init.xavier_normal_(self.fc1.weight)
def forward(self, x1, x2):
z_walk = None
if self.non_linear:
x = torch.cat([x1, x2], dim=-1)
#x = self.layer_norm(x)
h = self.act(self.fc1(x))
z = self.fc2(h)
else: # for explainability
# x1, x2 shape: [B, M, F]
x = torch.cat([x1, x2], dim=-2) # x shape: [B, 2M, F]
z_walk = self.fc(x).squeeze(-1) # z_walk shape: [B, 2M]
z = z_walk.sum(dim=-1, keepdim=True) # z shape [B, 1]
return z, z_walk
class finalClassifier_time_prediction(torch.nn.Module):
"""
N: Gaussian Mixture model
Input: Go through a two-layer-MLP
Output: vector 3*N (Weight [1*N] => Softmax; Mean [1*N]; Var[1*N])
Loss: NLL
"""
def __init__(self, dim1, dim2, dim3, dim4, dim5, N=3):
super().__init__()
self.fc2 = torch.nn.Linear(dim4, dim5)
self.act = torch.nn.ReLU()
torch.nn.init.xavier_normal_(self.fc2.weight)
self.MLP_two_nodes = torch.nn.Linear(dim1 + dim3, dim4)
self.N = N
self.fc_mu = nn.Sequential(nn.Linear(dim4, dim4), nn.ReLU(), nn.Linear(dim4, N))
self.fc_log_var = nn.Sequential(nn.Linear(dim4, dim4), nn.ReLU(), nn.Linear(dim4, N)) # because may <0 so add exp to make sure it is greater than 0
self.fc_weight = nn.Sequential(nn.Linear(dim4, dim4), nn.ReLU(), nn.Linear(dim4, N), nn.Softmax())
for m in [self.fc_mu.modules(), self.fc_log_var.modules(), self.fc_weight.modules()]:
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
self.sum_data = True
self.log_norm_constant = -0.5 * np.log(2 * np.pi)
def forward(self, x1, x2, x3, t):
t += 1 # in case t = 1, then log t = 0
f = self.act(self.MLP_two_nodes(torch.cat([x1 + x2, x3], dim=-1)))
mu = self.fc_mu(f).view(-1, self.N)
t = torch.log(t.view(-1, 1))
var = torch.exp(self.fc_log_var(f)).view(-1, self.N) # var
prec = torch.sqrt(var * 2 * np.pi)
weight = self.fc_weight(f).view(-1, self.N)
log_p = torch.exp( - (mu * mu + t * t - 2 * t * mu) / (var * 2)) / prec * weight # TODO: no torch.pi and pi should be on the device prec=>(prec * 2 * \pi)
result = - torch.log(torch.sum(log_p, dim=1))
time_predicted = torch.sum(mu * weight, dim=1)
time_gt = t.view(-1)
MSE = torch.norm(time_predicted - time_gt, 2)
if self.sum_data:
return MSE, torch.sum(result), [time_predicted, time_gt]
else:
return MSE, result, [time_predicted, time_gt]
class finalClassifier_inter(torch.nn.Module):
def __init__(self, dim1, dim2, dim3, dim4, dim5):
super().__init__()
self.fc2 = torch.nn.Linear(dim4, dim5)
torch.nn.init.xavier_normal_(self.fc2.weight)
def forward(self, x1, x2, x3):
if x2 is None:
h = self.fc2(x1)
else:
h = self.fc2((x1+x2+x3) / 3)
return h
class finalClassifier(torch.nn.Module):
def __init__(self, dim1, dim2, dim3, dim4, dim5):
super().__init__()
self.fc2 = torch.nn.Linear(dim4, dim5)
self.act = torch.nn.ReLU()
torch.nn.init.xavier_normal_(self.fc2.weight)
self.MLP_two_nodes = torch.nn.Linear(dim1 + dim2 + dim3, dim4)
self.MLP_last = torch.nn.Linear(dim4, dim4)
torch.nn.init.xavier_normal_(self.MLP_two_nodes.weight)
torch.nn.init.xavier_normal_(self.MLP_last.weight)
def forward(self, x1, x2, x3):
f_uw = torch.cat([x1, x2, x3], dim=-1)
f_vw = torch.cat([x2, x1, x3], dim=-1)
f_uw = self.act(self.MLP_two_nodes(f_uw))
f_vw = self.act(self.MLP_two_nodes(f_vw))
h = f_uw + f_vw
h = self.fc2(self.act(self.MLP_last(h)))
return h
class ScaledDotProductAttention(torch.nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = torch.nn.Dropout(attn_dropout)
self.softmax = torch.nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
# q: [B*N_src*n_head, 1, d_k]; k: [B*N_src*n_head, num_neighbors, d_k]
# v: [B*N_src*n_head, num_neighbors, d_v], mask: [B*N_src*n_head, 1, num_neighbors]
attn = torch.bmm(q, k.transpose(-1, -2)) # [B*N_src*n_head, 1, num_neighbors]
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -1e10)
attn = self.softmax(attn) # [n * b, l_q, l_k]
attn = self.dropout(attn) # [n * b, l_v, d]
output = torch.bmm(attn, v) # [B*N_src*n_head, 1, d_v]
return output, attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5), attn_dropout=dropout)
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
B, N_src, _ = q.size() # [B, N_src, model_dim]
B, N_ngh, _ = k.size() # [B, N_ngh, model_dim]
B, N_ngh, _ = v.size() # [B, N_ngh, model_dim]
assert(N_ngh % N_src == 0)
num_neighbors = int(N_ngh / N_src)
residual = q
q = self.w_qs(q).view(B, N_src, 1, n_head, d_k) # [B, N_src, 1, n_head, d_k]
k = self.w_ks(k).view(B, N_src, num_neighbors, n_head, d_k) # [B, N_src, num_neighbors, n_head, d_k]
v = self.w_vs(v).view(B, N_src, num_neighbors, n_head, d_v) # [B, N_src, num_neighbors, n_head, d_k]
q = q.transpose(2, 3).contiguous().view(B*N_src*n_head, 1, d_k) # [B*N_src*n_head, 1, d_k]
k = k.transpose(2, 3).contiguous().view(B*N_src*n_head, num_neighbors, d_k) # [B*N_src*n_head, num_neighbors, d_k]
v = v.transpose(2, 3).contiguous().view(B*N_src*n_head, num_neighbors, d_v) # [B*N_src*n_head, num_neighbors, d_v]
mask = mask.view(B*N_src, 1, num_neighbors).repeat(n_head, 1, 1) # [B*N_src*n_head, 1, num_neighbors]
output, attn_map = self.attention(q, k, v, mask=mask) # output: [B*N_src*n_head, 1, d_v], attn_map: [B*N_src*n_head, 1, num_neighbors]
output = output.view(B, N_src, n_head*d_v) # [B, N_src, n_head*d_v]
output = self.dropout(self.fc(output)) # [B, N_src, model_dim]
output = self.layer_norm(output + residual) # [B, N_src, model_dim]
attn_map = attn_map.view(B, N_src, n_head, num_neighbors)
return output, attn_map
class MapBasedMultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.wq_node_transform = nn.Linear(d_model, n_head * d_k, bias=False)
self.wk_node_transform = nn.Linear(d_model, n_head * d_k, bias=False)
self.wv_node_transform = nn.Linear(d_model, n_head * d_k, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
self.act = nn.LeakyReLU(negative_slope=0.2)
self.weight_map = nn.Linear(2 * d_k, 1, bias=False)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = torch.nn.Dropout(dropout)
self.softmax = torch.nn.Softmax(dim=2)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.wq_node_transform(q).view(sz_b, len_q, n_head, d_k)
k = self.wk_node_transform(k).view(sz_b, len_k, n_head, d_k)
v = self.wv_node_transform(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
q = torch.unsqueeze(q, dim=2) # [(n*b), lq, 1, dk]
q = q.expand(q.shape[0], q.shape[1], len_k, q.shape[3]) # [(n*b), lq, lk, dk]
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
k = torch.unsqueeze(k, dim=1) # [(n*b), 1, lk, dk]
k = k.expand(k.shape[0], len_q, k.shape[2], k.shape[3]) # [(n*b), lq, lk, dk]
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
mask = mask.repeat(n_head, 1, 1) # (n*b) x lq x lk
# Map based Attention
# output, attn = self.attention(q, k, v, mask=mask)
q_k = torch.cat([q, k], dim=3) # [(n*b), lq, lk, dk * 2]
attn = self.weight_map(q_k).squeeze(dim=3) # [(n*b), lq, lk]
if mask is not None:
attn = attn.masked_fill(mask, -1e10)
attn = self.softmax(attn) # [n * b, l_q, l_k]
attn = self.dropout(attn) # [n * b, l_q, l_k]
# [n * b, l_q, l_k] * [n * b, l_v, d_v] >> [n * b, l_q, d_v]
output = torch.bmm(attn, v)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.act(self.fc(output)))
output = self.layer_norm(output + residual)
return output, attn
def expand_last_dim(x, num):
view_size = list(x.size()) + [1]
expand_size = list(x.size()) + [num]
return x.view(view_size).expand(expand_size)
class TimeEncode(torch.nn.Module):
def __init__(self, expand_dim, factor=5):
super(TimeEncode, self).__init__()
self.time_dim = expand_dim
self.factor = factor
self.basis_freq = torch.nn.Parameter((torch.from_numpy(1 / 10 ** np.linspace(0, 9, self.time_dim))).float())
self.phase = torch.nn.Parameter(torch.zeros(self.time_dim).float())
def forward(self, ts):
# ts: [N, L]
batch_size = ts.size(0)
seq_len = ts.size(1)
ts = ts.view(batch_size, seq_len, 1) # [N, L, 1]
map_ts = ts * self.basis_freq.view(1, 1, -1) # [N, L, time_dim]
map_ts += self.phase.view(1, 1, -1)
harmonic = torch.cos(map_ts)
return harmonic #self.dense(harmonic)
class PosEncode(torch.nn.Module):
def __init__(self, expand_dim, seq_len):
super().__init__()
self.pos_embeddings = nn.Embedding(num_embeddings=seq_len, embedding_dim=expand_dim)
def forward(self, ts):
# ts: [N, L]
order = ts.argsort()
ts_emb = | |
fit
cadence_mask : np.ndarray of bools (optional)
Mask, where True indicates a cadence that should be used.
**kwargs : dict
Additional keyword arguments passed to
`sklearn.linear_model.ElasticNet`.
Returns
-------
`.LightCurve`
Corrected light curve, with noise removed. In units of electrons / second
Examples
--------
The following example will perform the ElasticNet correction using the
SingleScale and Spike basis vectors with a strong regualrization alpha
term of 1.0 and an L1 ratio of 0.9 which means predominantly a Lasso
regularization but with a slight amount of Ridge Regression.
>>> cbv_type = ['SingleScale', 'Spike']
>>> cbv_indices = [np.arange(1,9), 'ALL']
>>> corrected_lc = cbvCorrector.correct_elasticnet(cbv_type=cbv_type, # doctest: +SKIP
>>> cbv_indices=cbv_indices, alpha=1.0, l1_ratio=0.9) # doctest: +SKIP
"""
# Perform all the preparatory stuff common to all correct methods
self._correct_initialization(cbv_type=cbv_type,
cbv_indices=cbv_indices, ext_dm=ext_dm)
# Default cadence mask
if cadence_mask is None:
cadence_mask = np.ones(len(self.lc.flux), bool)
# Use Scikit-learn ElasticNet
self.regressor = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False, **kwargs)
X = self.design_matrix_collection.values
y = self.lc.flux
# Set mask
# note: ElasticNet has no internal way to do this so we have to just
# remove the cadences from X and y
XMasked = X.copy()
yMasked = y.copy()
XMasked = XMasked[cadence_mask,:]
yMasked = yMasked[cadence_mask]
# Perform the ElasticNet fit
self.regressor.fit(XMasked, yMasked)
# Finishing work
# When creating the model do not include the constant
model_flux = np.dot(X[:,0:-1], self.regressor.coef_[0:-1])
model_flux -= np.median(model_flux)
# TODO: Propagation of uncertainties. They really do not change much.
model_err = np.zeros(len(model_flux))
self.coefficients = self.regressor.coef_
self.model_lc = LightCurve(time=self.lc.time,
flux=model_flux*self.lc.flux.unit,
flux_err=model_err*self.lc.flux_err.unit)
self.corrected_lc = self.lc.copy()
self.corrected_lc.flux = self.lc.flux - self.model_lc.flux
self.corrected_lc.flux_err = (self.lc.flux_err**2 + model_err**2)**0.5
self.diagnostic_lightcurves = self._create_diagnostic_lightcurves()
self.cadence_mask = cadence_mask
self.alpha = alpha
return self.corrected_lc
def correct(self, cbv_type=['SingleScale'],
cbv_indices=[np.arange(1,9)],
ext_dm=None, cadence_mask=None, alpha_bounds=[1e-4,1e4],
target_over_score=0.5, target_under_score=0.5, max_iter=100):
""" Optimizes the correction by adjusting the L2-Norm (Ridge Regression)
regularization penalty term, alpha, based on the introduced noise
(over-fitting) and residual correlation (under-fitting) goodness
metrics. The numercial optimization is performed using the
scipy.optimize.minimize_scalar Brent's method.
The optimizer attempts to maximize the over- and under-fitting goodness
metrics. However, once the target_over_score or target_under_score is
reached, a "Leaky ReLU" is used so that the optimization "pressure"
concentrates on the other metric until both metrics rise above their
respective target scores, instead of driving a single metric to near
1.0.
The optimization parameters used are stored in self.optimization_params
as a record of how the optimization was performed.
The optimized correction is performed using LightKurve's
RegressionCorrector methods. See correct_gaussian_prior for details.
Parameters
----------
cbv_type : str list
List of CBV types to use in correction {'ALL' => Use all}
cbv_indices : list of lists
List of CBV vectors to use in each of cbv_type passed. {'ALL' => Use all}
NOTE: 1-Based indexing!
ext_dm : `.DesignMatrix` or `.DesignMatrixCollection`
Optionally pass an extra design matrix to also be used in the fit
cadence_mask : np.ndarray of bools (optional)
Mask, where True indicates a cadence that should be used.
alpha_bounds : float list(len=2)
upper anbd lowe bounds for alpha
target_over_score : float
Target Over-fitting metric score
target_under_score : float
Target under-fitting metric score
max_iter : int
Maximum number of iterations to optimize goodness metrics
Returns
-------
`.LightCurve`
Corrected light curve, with noise removed. In units of electrons / second
Examples
--------
The following example will perform the correction using the
SingleScale and Spike basis vectors. It will use alpha bounds of
[1.0,1e3]. The target over-fitting score is 0.5 and the target
under-fitting score is 0.8.
>>> cbv_type = ['SingleScale', 'Spike']
>>> cbv_indices = [np.arange(1,9), 'ALL']
>>> cbvCorrector.correct(cbv_type=cbv_type, cbv_indices=cbv_indices, # doctest: +SKIP
>>> alpha_bounds=[1.0,1e3], # doctest: +SKIP
>>> target_over_score=0.5, target_under_score=0.8) # doctest: +SKIP
"""
# Perform all the preparatory stuff common to all correct methods
self._correct_initialization(cbv_type=cbv_type,
cbv_indices=cbv_indices, ext_dm=ext_dm)
# Create a dictionary for optimization parameters to easily pass to the
# objective function, and also to save for posterity
self.optimization_params = {'alpha_bounds': alpha_bounds,
'target_over_score': target_over_score,
'target_under_score': target_under_score,
'max_iter': max_iter,
'cadence_mask': cadence_mask,
'over_metric_nSamples': 1}
#***
# Use scipy.optimize.minimize_scalar
# Minimize the introduced metric
minimize_result = minimize_scalar(self._goodness_metric_obj_fun, method='Bounded',
bounds=alpha_bounds,
options={'maxiter':max_iter, 'disp': False})
# Re-fit with final alpha value
# (scipy.optimize.minimize_scalar does not exit with the final fit!)
self._goodness_metric_obj_fun(minimize_result.x)
# Only display over- or under-fitting scores if requested to optimize
# for each
if (self.optimization_params['target_over_score'] > 0):
self.over_fitting_score = self.over_fitting_metric(n_samples=10)
print('Optimized Over-fitting metric: {}'.format(self.over_fitting_score))
else:
self.over_fitting_score = -1.0
if (self.optimization_params['target_under_score'] > 0):
self.under_fitting_score = self.under_fitting_metric()
print('Optimized Under-fitting metric: {}'.format(self.under_fitting_score))
else:
self.under_fitting_score = -1.0
self.alpha = minimize_result.x
print('Optimized Alpha: {0:2.3e}'.format(self.alpha))
return self.corrected_lc
def correct_regressioncorrector(self, design_matrix_collection, **kwargs):
""" Pass-through method to gain access to the superclass
RegressionCorrector.correct() method.
"""
# All this does is call the superclass 'correct' method as pass the
# input arguments.
return super(CBVCorrector, self).correct(design_matrix_collection, **kwargs)
def over_fitting_metric(self,
n_samples: int = 10):
""" Computes the over-fitting metric using
metrics.overfit_metric_lombscargle
See that function for a description of the algorithm.
Parameters
----------
n_samples : int
The number of times to compute and average the metric
This can stabalize the value, defaut = 10
Returns
-------
over_fitting_metric : float
A float in the range [0,1] where 0 => Bad, 1 => Good
"""
# Check if corrected_lc is present
if (self.corrected_lc is None):
log.warning('A corrected light curve does not exist, please run '
'correct first')
return None
# Ignore masked cadences
orig_lc = self.lc.copy()
orig_lc = orig_lc[self.cadence_mask]
corrected_lc = self.corrected_lc.copy()
corrected_lc = corrected_lc[self.cadence_mask]
return overfit_metric_lombscargle (orig_lc, corrected_lc, n_samples=n_samples)
def under_fitting_metric(self,
radius: float = None,
min_targets: int = 30,
max_targets: int = 50):
""" Computes the under-fitting metric using
metrics.underfit_metric_neighbors
See that function for a description of the algorithm.
For TESS, the default radius is 5000 arcseconds.
For Kepler/K2, the default radius is 1000 arcseconds
This function will begin with the given radius in arcseconds and
finds all neighboring targets. If not enough were found (< min_targets)
the radius is increased until a minimum number are found.
The downloaded neighboring targets will be "aligned" to the
corrected_lc, meaning the cadence numbers are used to align the targets
to the corrected_lc. However, if the CBVCorrector object was
instantiated with interpolated_cbvs=True then the targets will be
interpolated to the corrected_lc cadence times.
Parameters
----------
radius : float
Search radius to find neighboring targets in arcseconds
min_targets : float
Minimum number of targets to use in correlation metric
Using too few can cause unreliable results. Default = 30
max_targets : float
Maximum number of targets to use in correlation metric
Using too many can slow down the metric due to large data
download. Default = 50
Returns
-------
under_fitting_metric : float
A float in the range [0,1] where 0 => Bad, 1 => Good
"""
# Check if corrected_lc is present
if (self.corrected_lc is None):
raise Exception('A corrected light curve does not exist, please run '
'correct first')
return None
# Set default radius if one is not provided.
if (radius is None):
if (self.lc.mission == 'TESS'):
radius = 5000
else:
radius = 1000
if self.interpolated_cbvs:
interpolate = True
else:
interpolate = False
# Make a copy of radius because it changes locally
dynamic_search_radius = radius
# Max search radius is the diagonal distance along a CCD in arcseconds
# 1 pixel in TESS is 21.09 arcseconds
# 1 pixel in Kepler/K2 is 3.98 arcseconds
if (self.lc.mission == 'TESS'):
# 24 degrees of a TESS CCD array (2 CCD's wide) is 86,400 arcseconds
max_search_radius = np.sqrt(2) * (86400/2.0)
elif (self.lc.mission == 'Kepler' or self.lc.mission == 'Kepler'):
# One Kepler CCD spans 4,096 arcseconds
max_search_radius = np.sqrt(2) * 4096
else:
raise Exception('Unknown mission')
# Ignore masked cadences
corrected_lc = self.corrected_lc.copy()
corrected_lc = corrected_lc[self.cadence_mask]
# Dynamically increase radius until min_targets reached.
continue_searching = True
while (continue_searching):
try:
metric = underfit_metric_neighbors (corrected_lc,
dynamic_search_radius, min_targets, max_targets,
interpolate)
except MinTargetsError:
# Too few targets found, try increasing search radius
if (dynamic_search_radius > max_search_radius):
# Hit the edge of the CCD, we | |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Useful subroutines that don't fit in any place to particularly for aquilon.
"""
from __future__ import print_function
import errno
import gzip
import json
import logging
import os
import re
import signal
import time
from itertools import islice
from tempfile import mkstemp
from uuid import UUID
from ipaddress import IPv6Address, ip_address
import jsonschema
from six.moves import cStringIO as StringIO # pylint: disable=F0401
from six import text_type
from aquilon.exceptions_ import (ArgumentError, AquilonError,
AuthorizationException)
from aquilon.config import Config
from aquilon.aqdb.types.mac_address import MACAddress
LOGGER = logging.getLogger(__name__)
yes_re = re.compile(r"^(true|yes|y|1|on|enabled)$", re.I)
no_re = re.compile(r"^(false|no|n|0|off|disabled)$", re.I)
_hex_re = re.compile(r'[0-9a-f]+$')
# Regexp used to check if a value is suitable to be used as an nlist key,
# without escaping.
nlist_key_re = re.compile('^[a-zA-Z_][a-zA-Z0-9_.-]*$')
# Regexp used to check if a value is suitable to be used as a template name
template_name_re = re.compile(r'^[a-zA-Z0-9_.-]+$')
def kill_from_pid_file(pid_file): # pragma: no cover
if os.path.isfile(pid_file):
f = open(pid_file)
p = f.read()
f.close()
pid = int(p)
print('Killing pid %s' % pid)
try:
os.kill(pid, signal.SIGQUIT)
except OSError as err:
print('Failed to kill %s: %s' % (pid, err.strerror))
def monkeypatch(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
def validate_nlist_key(label, value):
if not nlist_key_re.match(value):
raise ArgumentError("'%s' is not a valid value for %s." %
(value, label))
def validate_template_name(label, value):
if not template_name_re.match(value):
raise ArgumentError("'%s' is not a valid value for %s." %
(value, label))
def force_ip(label, value):
if value is None:
return None
try:
ip = ip_address(text_type(value))
except ValueError as e:
raise ArgumentError("Expected an IP address for %s: %s." % (label, e))
# We could auto-convert mapped addresses to real IPv4 here, but that would
# make e.g. audit log lookups more difficult
if isinstance(ip, IPv6Address) and ip.ipv4_mapped:
raise ArgumentError("IPv6-mapped IPv4 addresses are not supported.")
return ip
def force_int(label, value):
"""Utility method to force incoming values to int and wrap errors."""
if value is None:
return None
try:
result = int(value)
except ValueError:
raise ArgumentError("Expected an integer for %s." % label)
return result
def force_float(label, value):
"""Utility method to force incoming values to float and wrap errors."""
if value is None:
return None
try:
result = float(value)
except ValueError:
raise ArgumentError("Expected an floating point number for %s." % label)
return result
def force_boolean(label, value):
"""Utility method to force incoming values to boolean and wrap errors."""
if value is None:
return None
if yes_re.match(value):
return True
if no_re.match(value):
return False
raise ArgumentError("Expected a boolean value for %s." % label)
def force_mac(label, value):
# Allow nullable Mac Addresses, consistent with behavior of IPV4
if value is None:
return None
try:
return MACAddress(value)
except ValueError as err:
raise ArgumentError("Expected a MAC address for %s: %s" % (label, err))
def force_wwn(label, value):
# See http://standards.ieee.org/develop/regauth/tut/fibre.pdf for a more
# detailed description for the WWN format. We do not want to be very
# strict here, to accomodate possibly broken devices out there.
if not value:
return None
# Strip separators if present
value = str(value).strip().lower().translate(None, ':-')
if not _hex_re.match(value):
raise ArgumentError("The value of %s may contain hexadecimal "
"characters only." % label)
if len(value) != 16 and len(value) != 32:
raise ArgumentError("The value of %s must contain either 16 or 32 "
"hexadecimal digits." % label)
return value
def force_ascii(label, value):
if value is None:
return None
try:
value = value.encode('ascii')
except UnicodeEncodeError:
raise ArgumentError("Only ASCII characters are allowed for %s." % label)
return value
def force_list(label, value):
"""
Convert a value containing embedded newlines to a list.
The function also removes empty lines and lines starting with '#'.
"""
if value is None:
return None
lines = [force_ascii(label, x.strip()) for x in value.splitlines()]
# Check that we have no duplicates in the list -- this is typically
# an error and will cause an audit-log PK violation.
seen = set()
items = [] # preserve the order
for x in lines:
if not x or x.startswith("#"):
pass
elif x in seen:
raise ArgumentError("Provided list contains duplicate "
"entry: {0:s}".format(x))
else:
seen.add(x)
items.append(x)
return items
def force_json(label, value):
if value is None:
return None
try:
value = json.loads(value)
except ValueError as e:
raise ArgumentError("Expected a JSON-encoded value for %s: %s" %
(label, e))
return value
def force_uuid(label, value):
"""Utility method to force incoming values to boolean and wrap errors."""
if value is None:
return None
try:
value = UUID(value)
except ValueError:
raise ArgumentError("Expected an UUID for %s." % label)
return value
def first_of(iterable, function):
"""
Return the first matching element of an iterable
This function is useful if you already know there is at most one matching
element.
"""
for item in iterable:
if function(item):
return item
return None
def chunk(iterable, size):
"""
Return tuples of at most the given size from the iterable.
"""
iterator = iter(iterable)
while True:
next_chunk = tuple(islice(iterator, size))
if not next_chunk:
return
yield next_chunk
def remove_dir(dir, logger=LOGGER):
"""Remove a directory. Could have been implemented as a call to rm -rf."""
for root, dirs, files in os.walk(dir, topdown=False):
for name in files:
try:
thisfile = os.path.join(root, name)
os.remove(thisfile)
except OSError as e:
logger.info("Failed to remove '%s': %s", thisfile, e)
for name in dirs:
try:
thisdir = os.path.join(root, name)
os.rmdir(thisdir)
except OSError as e:
# If this 'directory' is a symlink, the rmdir command
# will fail. Try to remove it as a file. If this
# fails, report the original error.
try:
os.remove(thisdir)
except OSError:
logger.info("Failed to remove '%s': %s", thisdir, e)
try:
os.rmdir(dir)
except OSError as e:
logger.info("Failed to remove '%s': %s", dir, e)
return
def write_file(filename, content, mode=None, compress=None,
create_directory=False, logger=LOGGER):
"""Atomically write content into the specified filename.
The content is written into a temp file in the same directory as
filename, and then swapped into place with rename. This assumes
that both the file and the directory can be written to by the
broker. The same directory was used instead of a temporary
directory because atomic swaps are generally only available when
the source and the target are on the same filesystem.
If mode is set, change permissions on the file (newly created or
pre-existing) to the new mode. If unset and the file exists, the
current permissions will be kept. If unset and the file is new,
the default is 0644.
This method may raise OSError if any of the OS-related methods
(creating the temp file, writing to it, correcting permissions,
swapping into place) fail. The method will attempt to remove
the temp file if it had been created.
If the compress keyword is passed, the content is compressed in
memory before writing. The only compression currently supported
is gzip.
"""
if compress == 'gzip':
config = Config()
buffer = StringIO()
compress = config.getint('broker', 'gzip_level')
zipper = gzip.GzipFile(filename, 'wb', compress, buffer)
zipper.write(content)
zipper.close()
content = buffer.getvalue()
if mode is None:
try:
old_mode = os.stat(filename).st_mode
except OSError:
old_mode = 0o644
dirname, basename = os.path.split(filename)
if not os.path.exists(dirname) and create_directory:
os.makedirs(dirname)
fd, fpath = mkstemp(prefix=basename, dir=dirname)
try:
with os.fdopen(fd, 'w') as f:
f.write(content)
if mode is None:
os.chmod(fpath, old_mode)
else:
os.chmod(fpath, mode)
os.rename(fpath, filename)
finally:
if os.path.exists(fpath):
os.remove(fpath)
def remove_file(filename, cleanup_directory=False, logger=LOGGER):
try:
os.remove(filename)
if cleanup_directory:
try:
os.removedirs(os.path.dirname(filename))
except OSError:
pass
return True
except OSError as e:
if e.errno != errno.ENOENT:
logger.info("Could not remove file '%s': %s", filename, e)
else:
return False
class ProgressReport(object):
def __init__(self, logger, total, item_name, interval=10.0):
self.logger = logger
self.total = total
self.item_name = item_name
self.interval = interval
self.count = 0
self.last_report = time.time()
def step(self):
self.count += 1
now = time.time()
if now - self.last_report >= self.interval: # pragma: no cover
self.last_report = now
self.logger.client_info("Processing %s %d of %d..." %
| |
transition_matrix.transpose()
for i in range(1, sequence_length):
prev_p = state_probabilities[i - 1, :] + transpose
best_previous_nodes = np.argmax(prev_p, axis=1)
state_probabilities[i] = np.max(prev_p, axis=1)
state_probabilities[i] += potentials[i, :]
best_paths[i, :] = best_previous_nodes
best_path[-1] = np.argmax(state_probabilities[-1, :])
for i in reversed(range(1, sequence_length)):
best_path[i - 1] = best_paths[i, best_path[i]]
return best_path
class NeuralLogDataset(ABC):
"""
Represents a NeuralLog dataset to train a NeuralLog network.
"""
program: NeuralLogProgram
"The NeuralLog program"
def __init__(self, program, inverse_relations=True):
"""
Creates a NeuralLogNetwork.
:param program: the NeuralLog program
:type program: NeuralLogProgram
:param inverse_relations: whether the dataset must consider the
inverse relations
:type inverse_relations: bool
"""
self.program = program
self.inverse_relations = inverse_relations
self._target_predicates = None
@property
def target_predicates(self):
"""
Gets the target predicates.
:return: the target predicates
:rtype: List[Tuple[Predicate, bool]]
"""
return self._target_predicates
@target_predicates.setter
def target_predicates(self, value):
"""
Sets the target predicates.
:param value: the target predicates
:type value: List[Tuple[Predicate, bool]]
"""
self._target_predicates = value
@abstractmethod
def has_example_key(self, key):
"""
Checks if the dataset contains the example key.
:param key: the example key
:type key: Any
:return: if the dataset contains the atom example
:rtype: bool
"""
pass
@abstractmethod
def get_dataset(self, example_set=NO_EXAMPLE_SET,
batch_size=1, shuffle=False):
"""
Gets the data set for the example set.
:param example_set: the name of the example set
:type example_set: str
:param batch_size: the batch size
:type batch_size: int
:param shuffle: if `True`, shuffles the dataset.
:type shuffle: bool
:return: the dataset
:rtype: tf.data.Dataset
"""
pass
@abstractmethod
def build(self, example_set=NO_EXAMPLE_SET):
"""
Builds the features and label to train the neural network based on
the `example_set`.
:param example_set: the name of the set of examples
:type example_set: str
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[tf.SparseTensor], tuple[tf.SparseTensor])
"""
pass
def get_target_predicates(self):
"""
Gets a list of tuples containing the target predicates and whether it
is inverted or not.
:return: the list of target predicates
:rtype: list[tuple[Predicate, bool]]
"""
return self._target_predicates
@abstractmethod
def print_predictions(self, model, program, dataset, writer=sys.stdout,
dataset_name=None, print_batch_header=False):
"""
Prints the predictions of `model` to `writer`.
:param model: the model
:type model: NeuralLogNetwork
:param program: the neural program
:type program: NeuralLogProgram
:param dataset: the dataset
:type dataset: tf.data.Dataset
:param writer: the writer. Default is to write to the standard output
:type writer: Any
:param dataset_name: the name of the dataset
:type dataset_name: str
:param print_batch_header: if `True`, prints a commented line before
each batch
:type print_batch_header: bool
"""
pass
@neural_log_dataset("default_dataset")
class DefaultDataset(NeuralLogDataset):
"""
The default NeuralLog dataset.
"""
def __init__(self, program, inverse_relations=True):
"""
Creates a DefaultDataset.
:param program: the NeuralLog program
:type program: NeuralLogProgram
:param inverse_relations: whether the dataset must consider the
inverse relations
:type inverse_relations: bool
"""
super(DefaultDataset, self).__init__(program, inverse_relations)
self._target_predicates = self._compute_target_predicates()
self.example_keys = self._load_example_keys()
def _load_example_keys(self):
example_keys = set()
for example_set in self.program.examples.values():
for examples_by_predicate in example_set.values():
for keys in examples_by_predicate.keys():
example_keys.add(keys)
return example_keys
def _compute_target_predicates(self):
target_predicates = []
predicates = set()
for example_set in self.program.examples.values():
for predicate in example_set:
if predicate in predicates:
continue
predicates.add(predicate)
target_predicates.append((predicate, False))
if self.inverse_relations and predicate.arity == 2:
target_predicates.append((predicate, True))
return target_predicates
# noinspection PyMissingOrEmptyDocstring
def has_example_key(self, key):
return key in self.example_keys
# noinspection PyUnusedLocal,DuplicatedCode
def call(self, features, labels, *args, **kwargs):
"""
Used to transform the features and examples from the sparse
representation to dense in order to train the network.
:param features: A dense index tensor of the features
:type features: tuple[tf.SparseTensor]
:param labels: A tuple sparse tensor of labels
:type labels: tuple[tf.SparseTensor]
:param args: additional arguments
:type args: list
:param kwargs: additional arguments
:type kwargs: dict
:return: the features and label tensors
:rtype: (tf.Tensor or tuple[tf.Tensor], tuple[tf.Tensor])
"""
dense_features = []
count = 0
for i in range(len(self._target_predicates)):
predicate, inverted = self._target_predicates[i]
indices, _ = get_predicate_indices(predicate, inverted)
for index in indices:
feature = tf.one_hot(
features[count],
self.program.get_constant_size(predicate, index))
dense_features.append(feature)
count += 1
labels = tuple(map(lambda x: tf.sparse.to_dense(x), labels))
if len(dense_features) > 1:
dense_features = tuple(dense_features)
else:
dense_features = dense_features[0]
# all_dense_features = tuple(all_dense_features)
return dense_features, labels
__call__ = call
# noinspection PyMissingOrEmptyDocstring
def get_dataset(self, example_set=NO_EXAMPLE_SET,
batch_size=1, shuffle=False):
features, labels = self.build(example_set=example_set)
# noinspection PyTypeChecker
if not features:
return None
if self.are_features_empty(features):
return None
dataset_size = len(features[0])
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
if shuffle:
dataset = dataset.shuffle(dataset_size)
dataset = dataset.batch(batch_size)
dataset = dataset.map(self)
logger.debug("Dataset %s created with %d example(s)", example_set,
dataset_size)
return dataset
def are_features_empty(self, features):
"""
Checks if the features are empty.
:param features: the features
:type features: List[List[int]] or Tuple[List[int]]
:return: `True`, if the features are empty
:rtype: bool
"""
size = len(features[0])
if not size:
return True
if size > 1:
return False
index = 0
for i in range(len(self._target_predicates)):
in_indices, out_index = \
get_predicate_indices(*self._target_predicates[i])
for j in range(len(in_indices)):
empty_value = self._get_out_of_vocabulary_index(
self._target_predicates[i][0], in_indices[j])
if empty_value != features[index][0]:
return False
index += 1
return True
def build(self, example_set=NO_EXAMPLE_SET):
"""
Builds the features and label to train the neural network
based on
the `example_set`.
The labels are always a sparse tensor.
:param example_set: the name of the set of examples
:type example_set: str
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[tf.SparseTensor], tuple[tf.SparseTensor]) or
(list[tuple[tf.SparseTensor]], tuple[tf.SparseTensor])
"""
examples = self.program.examples.get(example_set, OrderedDict())
return self._build(examples)
def ground_atom(self, example):
"""
Grounds the example by replacing the value of the variables for each
possible value found in the program.
:param example: the example
:type example: Atom
:return: the grounded atoms
:rtype: collections.Iterable[Atom]
"""
if example.is_grounded():
return example,
current_atoms = deque([example])
predicate = example.predicate
term_types: Tuple[TermType] = self.program.predicates[predicate]
for i in range(example.arity()):
if example.terms[i].is_constant():
continue
next_atoms = deque()
for atom in current_atoms:
if term_types[i].number:
terms = list(atom.terms)
terms[i] = 0.0
next_atoms.append(
Atom(predicate, *terms, weight=example.weight))
else:
possible_terms = \
self.program.iterable_constants_per_term[(predicate, i)]
for constant in possible_terms.values():
terms = list(atom.terms)
terms[i] = constant
next_atoms.append(
Atom(predicate, *terms, weight=example.weight))
current_atoms = next_atoms
return current_atoms
def _build(self, examples):
"""
Builds the features and label to train the neural network based on
the `example_set`.
The labels are always a sparse tensor.
:param examples: the set of examples
:type examples: Examples
:return: the features and labels
:rtype: (tuple[tf.SparseTensor], tuple[tf.SparseTensor])
"""
output_by_term = OrderedDict()
input_terms = []
for predicate, inverted in self._target_predicates:
facts = examples.get(predicate, dict())
facts = facts.values()
for example in facts:
for fact in self.ground_atom(example):
if predicate.arity < 3:
input_term = (fact.terms[-1 if inverted else 0],)
else:
input_term = tuple(fact.terms[0:predicate.arity - 1])
if input_term not in output_by_term:
output = dict()
output_by_term[input_term] = output
input_terms.append(input_term)
else:
output = output_by_term[input_term]
if predicate.arity == 1:
output[(predicate, inverted)] = fact.weight
else:
output_term = fact.terms[0 if inverted else -1]
# noinspection PyTypeChecker
output.setdefault((predicate, inverted), []).append(
(output_term, fact.weight))
all_features = []
all_labels = []
for predicate, inverted in self._target_predicates:
features = [[] for _ in range(max(1, predicate.arity - 1))]
label_values = []
label_indices = []
in_indices, out_index = get_predicate_indices(predicate, inverted)
for i in range(len(input_terms)):
outputs = output_by_term[input_terms[i]].get(
(predicate, inverted), None)
constant_index = 0
for input_index in in_indices:
index = None
if outputs is not None:
index = self.program.get_index_of_constant(
predicate, input_index,
input_terms[i][constant_index])
if index is None:
index = self._get_out_of_vocabulary_index(
predicate, input_index)
features[constant_index].append(index)
constant_index += 1
if outputs is not None:
if predicate.arity == 1:
label_indices.append([i, 0])
label_values.append(outputs)
else:
# noinspection PyTypeChecker
for output_term, output_value in outputs:
output_term_index = \
self.program.get_index_of_constant(
predicate, out_index, output_term)
label_indices.append([i, output_term_index])
label_values.append(output_value)
all_features += features
if predicate.arity == 1:
dense_shape = [len(input_terms), 1]
empty_index = [[0, 0]]
else:
dense_shape = [
len(input_terms),
self.program.get_constant_size(predicate, out_index)]
empty_index = [[0, 0]]
if len(label_values) == 0:
sparse_tensor = tf.SparseTensor(indices=empty_index,
values=[0.0],
dense_shape=dense_shape)
else:
sparse_tensor = tf.SparseTensor(indices=label_indices,
values=label_values,
dense_shape=dense_shape)
sparse_tensor = tf.sparse.reorder(sparse_tensor)
all_labels.append(sparse_tensor)
return tuple(all_features), tuple(all_labels)
def _get_out_of_vocabulary_index(self, predicate, term_index):
"""
Returns the index of the entity to replace the not found entity.
:param predicate: the predicate
:type predicate: Predicate
:param term_index: the index of the term
:type term_index: int
:return: the index of entity to replace | |
<reponame>eneelo/qats<filename>qats/stats/gumbelmin.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:class:`GumbelMin` class and functions related to Gumbel (minima) distribution.
"""
import numpy as np
from scipy.special import zetac
from scipy.optimize import leastsq, fsolve
from matplotlib.pyplot import figure, ylabel, yticks, plot, legend, grid, show, xlabel, ylim, savefig
from .empirical import empirical_cdf
from .gumbel import _euler_masceroni as em
# todo: move fit methods e.g. _msm from class to standalone functions (importable)
# todo: check fit procedures (read up once more and check implementation)
# todo: create unit tests
class GumbelMin(object):
"""
The Gumbel minima distribution.
The cumulative distribution function is defined as::
F(x) = 1 - exp{-exp[(x-a)/b]}
where `a` is location parameter and `b` is the scale parameter.
Parameters
----------
loc : float
Gumbel location parameter.
scale : float
Gumbel scale parameter.
data : array_like, optional
Sample data, used to establish empirical cdf and is included in plots.
To fit the Gumbel distribution to the sample data, use :meth:`GumbelMin.fit`.
Attributes
----------
loc : float
Gumbel location parameter.
scale : float
Gumbel scale parameter.
data : array_like
Sample data.
Examples
--------
To initiate an instance based on parameters, use:
>>> from qats.stats.gumbelmin import GumbelMin
>>> gumb = GumbelMin(loc, scale)
If you need to establish a Gumbel instance based on a sample data set, use:
>>> gumb = GumbelMin.fit(data, method='msm')
References
----------
1. <NAME>. (1975) Statistical models in applied science. Wiley, New York
2. <NAME>. (2007), "Bruk av asymptotiske ekstremverdifordelinger"
3. `Plotting positions <http://en.wikipedia.org/wiki/Q%E2%80%93Q_plot>`_, About plotting positions
4. `Usable estimators for parameters in Gumbel distribution
<http://stats.stackexchange.com/questions/71197/usable-estimators-for-parameters-in-gumbel-distribution>`_
5. `Bootstrapping statistics <https://en.wikipedia.org/wiki/Bootstrapping_(statistics)>`_
"""
def __init__(self, loc=None, scale=None, data=None):
self.location = loc
self.scale = scale
if data is not None:
self.data = np.array(data)
else:
self.data = None
@property
def cov(self):
"""
Distribution coefficient of variation (C.O.V.)
Returns
-------
c : float
distribution c.o.v.
"""
return self.std / self.mean
@property
def ecdf(self):
"""
Median rank empirical cumulative distribution function associated with the sample
Notes
-----
Gumbel recommended the following mean rank quantile formulation Pi = i/(n+1).
This formulation produces a symmetrical CDF in the sense that the
same plotting positions will result from the data regardless of
whether they are assembled in ascending or descending order.
A more sophisticated median rank formulation Pi = (i-0.3)/(n+0.4) approximates the
median of the distribution free estimate of the sample variate to about
0.1% and, even for small values of n, produces parameter estimations
comparable to the result obtained by maximum likelihood estimations (Bury, 1999, p43)
A median rank method, pi=(i-0.3)/(n+0.4), is chosen to approximate the mean of the distribution [2]
The empirical cdf is also used as plotting positions when plotting the sample
on probability paper.
"""
x = np.array(self.data)
try:
#p = (np.arange(x.size) + 1. - 0.3) / (x.size + 0.4)
p = empirical_cdf(self.data.size, kind='median')
return p
except TypeError:
print("The sample is not defined.")
@property
def kurt(self):
"""
Distribution kurtosis
Returns
-------
k : float
distribution kurtosis
"""
try:
k = 12. / 5.
return k
except TypeError:
print("Distribution parameters are not defined.")
@property
def mean(self):
"""
Distribution mean value
Returns
-------
m : float
distribution mean value
"""
try:
m = self.location - self.scale * em()
return m
except TypeError:
print("Distribution parameters are not defined.")
@property
def median(self):
"""
Distribution median value
Returns
-------
m : float
distribution median value
"""
try:
m = self.location + self.scale * np.log(np.log(2.))
return m
except TypeError:
print("Distribution parameters are not defined.")
@property
def mode(self):
"""
Distribution mode value
Returns
-------
m : float
distribution mode value
"""
try:
m = self.location
return m
except TypeError:
print("Distribution parameters are not defined.")
@property
def std(self):
"""
Distribution standard deviation
Returns
-------
s : float
distribution standard deviation
"""
try:
s = np.pi * self.scale / np.sqrt(6)
return s
except TypeError:
print("Distribution parameters are not defined.")
@property
def skew(self):
"""
Distribution skewness
Returns
-------
s : float
distribution skewness
"""
try:
# zetac is the complementary Riemann zeta function (zeta function minus 1)
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.special.zetac.html
s = -12. * np.sqrt(6.) * (1. + zetac(3)) / np.pi ** 3
return s
except TypeError:
print("Distribution parameters are not defined.")
def bootstrap(self, size=None, method='msm', N=100):
"""
Parametric bootstrapping of source distribution
Parameters
----------
size : int
bootstrap sample size. default equal to source sample size
method : {'msm','lse','mle'}
method of fit, optional
'msm' = method of sample moments
'lse' = least-square estimation
'mle' = maximum likelihood estimation
N : int
number of bootstrap samples. default equal to 10
Returns
-------
array-like
m - mean distribution parameters
array_like
cv - coefficient of variation of distribution parameter
Notes
-----
In statistics, bootstrapping is a method for assigning measures of accuracy
to sample estimates (variance,quantiles). This technique allows estimation of the
sampling distribution of almost any statistic using only very simple methods. Generally,
it falls in the broader class of resampling methods. In this case a parametric model is fitted
to the data, and samples of random numbers with the same size as the original data,
are drawn from this fitted model. Then the quantity, or estimate, of interest is
calculated from these data. This sampling process is repeated many times as for other
bootstrap methods. If the results really matter, as many samples as is reasonable,
given available computing power and time, should be used. Increasing the number of
samples cannot increase the amount of information in the original data, it can only
reduce the effects of random sampling errors which can arise from a bootstrap procedure itself.
See [5] about bootstrapping.
"""
options = {'msm': self._msm, 'lse': self._lse, 'mle': self._mle}
assert method.lower() in options.keys(), "Method must be either %s" % (' or '.join(options.keys()))
if size is None:
assert self.data is not None, "Either size has to be specified or a sample has to be specified."
size = np.size(self.data)
i = 0
par = np.zeros((N, 2))
while (i < N):
x = self.rnd(size=size)
par[i, :] = options[method](x)
i += 1
m = par.mean(axis=0)
cv = par.std(axis=0, ddof=1) / m
return m, cv
def cdf(self, x=None):
"""
Cumulative distribution function (cumulative probability) for specified values x
Parameters
----------
x : array_like
values
Returns
-------
cdf : array
cumulative probabilities for specified values x
Notes
-----
A range of x values [location, location+3*std] are applied if x is not specified.
"""
try:
if x is None:
x = np.linspace(self.loc, self.loc - 3. * self.std, 100)
else:
x = np.array(x)
assert self.scale > 0., "The scale parameter must be larger than 0."
z = (x - self.location) / self.scale
p = 1. - np.exp(-np.exp(z))
return p
except TypeError:
print("Distribution parameters are not defined")
def fit(self, data=None, method='msm', verbose=False):
"""
Determine distribution parameters by fit to sample.
Parameters
----------
data : array_like
sample, optional
method : {'msm','lse','mle'}
method of fit, optional
'msm' = method of sample moments
'lse' = least-square estimation
'mle' = maximum likelihood estimation
verbose : bool
turn on output of fitted parameters
Notes
-----
If data is not input any data stored in object (self.data) will be used.
"""
options = {'msm': msm, 'lse': lse, 'mle': mle}
assert method.lower() in options.keys(), "Method must be either %s" % (' or '.join(options.keys()))
if data is not None:
# update sample data
self.data = np.array(data).reshape(np.shape(data)) # make vector shaped
try:
self.location, self.scale = options[method](self.data)
if verbose:
print("Fitted parameters:\nlocation = %(location)5.3g\nscale = %(scale)5.3g" % self.__dict__)
except TypeError:
print("The sample data is not defined.")
def fit_from_weibull_parameters(self, wa, wb, wc, n, verbose=False):
"""
Calculate Gumbel distribution parameters from n independent Weibull distributed variables.
Parameters
----------
wa : float
Weibull location parameter
wb : float
Weibull scale parameter
wc : float
Weibull shape parameter
n : int
Number independently distributed variables
verbose : bool
print fitted parameters
Notes
-----
A warning is issued if Weibull shape parameter less than 1. In this case,
the convergence towards asymptotic extreme value distribution is slow
, and the asymptotic distribution will be non-conservative relative
to the exact | |
<reponame>mcnowinski/various-and-sundry
# this program requires the 32 bit version of Python!!
import os
import glob
import math
import subprocess
import re
import sys
import string
from decimal import Decimal
from astropy.io import fits
from astropy.wcs import WCS
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
from scipy.ndimage import median_filter
from pyds9 import DS9
import argparse
import pandas as pd
import ch # custom callHorizons library
import dateutil
from datetime import datetime
from datetime import timedelta
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import pandas as pd
from astropy.time import Time
import struct
import logging
# logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(name)s - %(funcName)s - %(message)s',
handlers=[
logging.FileHandler("sexerizer.log"),
logging.StreamHandler()
])
logger = logging.getLogger('sexerizer')
#
# START SETTINGS
# MODIFY THESE FIELDS AS NEEDED!
#
# input path *with* ending forward slash
input_path = './'
# output path *with* ending forward slash
sex_output_path = './sexerizer/'
# suffix for output files, if any...
sex_output_suffix = '.sex'
# path to sextractor executable and config files (incl. the filenames!)
sextractor_bin_fname = os.path.dirname(
os.path.realpath(__file__)) + '\\' + 'sextractor.exe'
sextractor_cfg_fname = os.path.dirname(
os.path.realpath(__file__)) + '\\' + 'sexcurve.sex'
sextractor_param_fname = os.path.dirname(
os.path.realpath(__file__)) + '\\' + 'sexcurve.param'
sextractor_filter_fname = os.path.dirname(
os.path.realpath(__file__)) + '\\' + 'sexcurve.conv'
# tolerance for object matching
dRa = 0.00062
dDec = 0.00062
# target/comp list
comps_fname = './comps.in.txt'
targets_out_fname = './targets.out.csv'
counts_out_fname = './counts.out.csv'
# mask file that identifies bad pixels
bad_pixels_fname = './bad_pixels.txt'
cleaned_output_path = './cor/'
# observatory code
obs_code = 'G52'
# panstarrs
# panstarrs ref magnitude
pso_ref_mag = 'rPSFMag'
# panstarrs max magnitude
pso_max_mag = 15
# panstarrs min magnitude
pso_min_mag = 0
# figure count
figure_count = 1
#
# END SETTINGS
#
# run external process
def runSubprocess(command_array):
# command array is array with command and all required parameters
try:
with open(os.devnull, 'w') as fp:
sp = subprocess.Popen(command_array, stderr=fp, stdout=fp)
# logger.info('Running subprocess ("%s" %s)...'%(' '.join(command_array), sp.pid))
sp.wait()
output, error = sp.communicate()
return (output, error, sp.pid)
except:
logger.info('Error. Subprocess ("%s" %d) failed.' %
(' '.join(command_array), sp.pid))
return ('', '', 0)
def jdToYYMMDD_HHMMSS(jd):
t = Time(jd, format='mjd', scale='utc')
return t.iso
def onclick(event):
logger.info('xdata=%f, ydata=%f' % (event.xdata, event.ydata))
def onclick_mag_vs_JD(event):
logger.info('JD=%s, mag=%f' %
(jdToYYMMDD_HHMMSS(event.xdata), event.ydata))
# set up the command line argument parser
parser = argparse.ArgumentParser(
description='Perform lightcurve photometry using sextractor.')
parser.add_argument('--plot_apd', action='store_true',
help='Plot average object magnitude vs. aperture diameter for all images.')
parser.add_argument('--plot_ds9', action='store_true',
help='Plot apertures for each image using DS9.')
parser.add_argument('--labels', action='store_true',
help='Add labels to magnitude plot(s).')
parser.add_argument("--apd", help="Perform analysis for one or more apertures (csv).",
type=str)
args = parser.parse_args()
# make sure input files and folder exist
inputs = [input_path, sextractor_bin_fname, sextractor_cfg_fname,
sextractor_param_fname, sextractor_filter_fname, comps_fname]
for input in inputs:
if not os.path.exists(input_path):
raise Exception('Error. The file or path (%s) does not exist.' % input)
# do output directories exist? If not, create them...
outputs = [sex_output_path, cleaned_output_path]
for output in outputs:
try:
os.mkdir(output)
except:
pass
# grab aperture settings from sextractor config file, hopefully it will match the magnitude array ;)
apertures = []
apertures_string = ''
with open(sextractor_cfg_fname) as f:
lines = f.readlines()
for line in lines:
match = re.match(r'^PHOT_APERTURES([\s\.0-9\,]+)', line)
if match:
apertures_string = match.group(1).strip()
apertures = np.array(
[int(aperture) for aperture in apertures_string.split(',')])
logger.info('Photometry to be performed for %d aperture diameters: %s.' %
(len(apertures), apertures_string))
image_data = []
# get a list of all FITS files in the input directory
fits_files = glob.glob(input_path+'*.fits')+glob.glob(input_path+'*.fit')
# loop through all qualifying files and perform sextraction
for fits_file in sorted(fits_files):
fits_data = fits.open(fits_file)
header = fits_data[0].header
wcs = WCS(header)
try:
airmass = header['AIRMASS']
dt_obs = dateutil.parser.parse(header['DATE-OBS'])
naxis1 = header['NAXIS1']
naxis2 = header['NAXIS2']
ra = header['CRVAL1']
dec = header['CRVAL2']
JD = header['MJD-OBS']
except KeyError:
raise Exception('Error. Invalid FITS header in %s.' % fits_file)
# calculate image corners in ra/dec
ra1, dec1 = wcs.all_pix2world(0, 0, 0)
ra2, dec2 = wcs.all_pix2world(naxis1, naxis2, 0)
# calculate search radius in degrees from the center!
c1 = SkyCoord(ra1, dec1, unit="deg")
c2 = SkyCoord(ra2, dec2, unit="deg")
# estimate radius of FOV in arcmin
r_arcmin = '%f' % (c1.separation(c2).deg*60/2)
logger.info("Sextracting %s" % (fits_file))
output_file = sex_output_path + \
fits_file.replace('\\', '/').rsplit('/', 1)[1]
output_file = '%s%s.txt' % (output_file, sex_output_suffix)
# add input filename, output filename, airmass, and jd to sex_file list
image_data.append(
{'image': fits_file, 'sex': output_file, 'jd': JD, 'airmass': airmass, 'ra': ra, 'dec': dec, 'dt_obs': dt_obs, 'r_arcmin': r_arcmin, 'found_target': True})
# sextract this file
(output, error, id) = runSubprocess([sextractor_bin_fname, fits_file, '-c', sextractor_cfg_fname, '-catalog_name',
output_file, '-parameters_name', sextractor_param_fname, '-filter_name', sextractor_filter_fname])
if error:
raise Exception('Error. Sextractor failed: %s' % output)
logger.info('Sextracted %d files.' % len(image_data))
# build list of comparison stars in comps_fname using
# PanSTARRS Stack Object Catalog Search
logger.info('Searching for comparison stars in the PANSTARRS catalog (ra=%s deg, dec=%s deg, radius=%s min)...' %
(image_data[0]['ra'], image_data[0]['dec'], image_data[0]['r_arcmin']))
pso_url_base = 'http://archive.stsci.edu/panstarrs/stackobject/search.php'
pso_url_parms = '?resolver=Resolve&radius=%s&ra=%s&dec=%s&equinox=J2000&nDetections=&selectedColumnsCsv=objname%%2Cobjid%%2Cramean%%2Cdecmean%%2Cgpsfmag%%2Crpsfmag%%2Cipsfmag' + \
'&coordformat=dec&outputformat=CSV_file&skipformat=on' + \
'&max_records=50001&action=Search'
url = pso_url_base + \
pso_url_parms % (image_data[0]['r_arcmin'], image_data[0]['ra'], image_data[0]
['dec'])
# get the results of the REST query
comps = pd.read_csv(url)
if len(comps) <= 0:
raise Exception('Error. No comparison stars found!')
# remove dupes, keep first
comps.drop_duplicates(subset=['objName'], keep='first', inplace=True)
# make sure magnitudes are treated as floats
comps[pso_ref_mag] = pd.to_numeric(comps[pso_ref_mag], errors='coerce')
# remove spaces from obj names
comps['objName'] = comps['objName'].str.replace('PSO ', '')
# filter based on ref (r?) magnitude!
comps = comps.query("%s > %f & %s < %f" %
(pso_ref_mag, pso_min_mag, pso_ref_mag, pso_max_mag))
if len(comps) <= 0:
logger.info('Error. No comparison stars meet the criteria (%s > %f & %s < %f)!' %
(pso_ref_mag, pso_min_mag, pso_ref_mag, pso_max_mag))
exit()
logger.info('A total of %d comparison star(s) met the criteria (%s > %f & %s < %f)!' %
(len(comps), pso_ref_mag, pso_min_mag, pso_ref_mag, pso_max_mag))
# output objects to comps_fname in sextract input format
comps_for_sex = comps[['raMean', 'decMean',
'objName', 'gPSFMag', 'rPSFMag', 'iPSFMag']]
comps_for_sex.to_csv(comps_fname, sep=' ', index=False, header=False)
# read ra/dec from target/comp stars list
# this is legacy and duplicative, but we will go with it
object_data = []
sfile = file('%s' % comps_fname, 'rt')
lines = [s for s in sfile if len(s) > 2 and s[0] != '#']
sfile.close()
count = 0
target_index = -1
for index, l in enumerate(lines):
spl = l.split()
ra = float(spl[0])
dec = float(spl[1])
name = spl[2]
G = float(spl[3])
R = float(spl[4])
I = float(spl[5])
object_data.append(
{'index': index, 'ra': ra, 'dec': dec, 'object_name': name, 'found': True, 'G': G, 'R': R, 'I': I})
logger.info('Searching for %d objects in sextracted data.' % len(object_data))
ofile = file(counts_out_fname, 'wt')
# look for target/comp matches in sextracted files
sex_data = []
for image in image_data:
num_found = 0
lines = [s for s in file(image['sex'], 'rt') if len(s) > 2]
# unless object is target, stop looking for it if it was not found in one of the images
for s in (x for x in object_data):
found = False
for l in lines:
spl = l.split()
ra = float(spl[0])
dec = float(spl[1])
if abs(ra-s['ra']) < dRa and abs(dec-s['dec']) < dDec:
sex_data_element = {'object_index': s['index'], 'object_name': s['object_name'], 'object_ra': s['ra'], 'object_dec': s[
'dec'], 'jd': image['jd'], 'airmass': image['airmass'], 'image': image['image'], 'sex': image['sex']}
sex_data_element['ra'] = spl[0]
sex_data_element['dec'] = spl[1]
sex_data_element['x'] = spl[2]
sex_data_element['y'] = spl[3]
sex_data_element['num_apertures'] = len(apertures)
for i in range(0, len(apertures)):
sex_data_element['mag%02d' % apertures[i]] = spl[4+i]
sex_data_element['magerr%02d' %
apertures[i]] = spl[4+len(apertures)+i]
sex_data.append(sex_data_element)
num_found += 1
found = True
break
ofile.write('%s,%d\n' % (image['sex'], num_found))
ofile.close()
logger.info('Found %d observations of %d objects in %d sextracted files.' %
(len(sex_data), len(object_data), len(image_data)))
# save compiled sex data to a new file
ofile = file(targets_out_fname, 'wt')
line = 'index,name,airmass,jd'
jd_index = 3
mag_start_index = len(line.split(','))
for i in range(0, len(apertures)):
line += ',mag%02d' % apertures[i]
for i in range(0, len(apertures)):
line += ',magerr%02d' % apertures[i]
ofile.write(line+'\n')
# sort by star desig, then JD
sex_data = sorted(sex_data, key=lambda x: (x['object_name'], x['jd']))
for o in sex_data:
line = '%d,%s,%f,%f' % (
o['object_index'], o['object_name'], o['airmass'], o['jd'])
for i in range(0, len(apertures)):
line += ',%s' % o['mag%02d' % apertures[i]]
for i in range(0, len(apertures)):
line += ',%s' % o['magerr%02d' % apertures[i]]
ofile.write(line+'\n')
ofile.close()
# plot average mag vs aperture diameter if requested
if args.plot_apd:
ofile = file(targets_out_fname, 'r')
data = np.genfromtxt(ofile, delimiter=',', skip_header=1)
for index, s in enumerate(object_data):
filtered_array = np.array(filter(lambda row: row[0] == index, data))
# ensure this object was detected!
if len(filtered_array) == 0:
continue
fig = plt.figure(figure_count)
cid = fig.canvas.mpl_connect('button_press_event', onclick)
figure_count += 1
magnitudes = np.mean(filtered_array, axis=0)[
mag_start_index:mag_start_index+len(apertures)]
# magnitude_stdevs = np.std(filtered_array, axis=0)[
# mag_start_index:mag_start_index+len(apertures)]
# error of the mean mag is the quadrature of the original errors divided by the number of
magnitude_errors = np.sum(filtered_array*filtered_array,
axis=0)[mag_start_index+len(apertures):mag_start_index+2*len(apertures)]
magnitude_errors = np.sqrt(magnitude_errors) / \
np.ma.size(filtered_array, axis=0)
plt.errorbar(apertures, magnitudes, yerr=magnitude_errors, marker='o',
color='black', elinewidth=0.5, linestyle='None', markersize=3)
plt.gca().invert_yaxis()
plt.xlabel('Aperture Diameter, D (pixels)')
plt.ylabel('Ave. Instrumental Magnitude, m')
plt.title(s['object_name'])
# plot target and comp stars in ds9
if args.plot_ds9:
ds = DS9()
ds.set('frame clear #all')
for image in image_data:
fits_file = image['image']
fname = os.path.abspath(fits_file).replace('\\', | |
File = "HRSA_FIPS_FL_v1.3_un"
path = "fp/VBHC/ADJ/delta/"
title = "FractureProof Final Payment Adjustments from CMS with HRSA County Data in FLorida"
author = "DrewC!"
### Import FractureProof Libraries
import os # Operating system navigation
import pandas as pd # Widely used data manipulation library with R/Excel like tables named 'data frames'
import numpy as np # Widely used matrix library for numerical processes
import statsmodels.api as sm # Statistics package best for regression models for statistical tests
from sklearn.preprocessing import StandardScaler # Standard scaling for easier use of machine learning algorithms
from sklearn.impute import SimpleImputer # Univariate imputation for missing data
from sklearn.cluster import KMeans # clusters data by trying to separate samples in n groups of equal variance
from sklearn.decomposition import PCA # Principal compnents analysis from sklearn
from sklearn.ensemble import RandomForestRegressor # Random Forest regression component
from sklearn.ensemble import RandomForestClassifier # Random Forest classification component
from sklearn.feature_selection import RFECV # Recursive Feature elimination with cross validation
from sklearn.linear_model import LinearRegression # Used for machine learning with quantitative outcome
from sklearn.linear_model import LogisticRegression # Used for machine learning with quantitative outcome
from sklearn.metrics import roc_curve # Reciever operator curve
from sklearn.metrics import auc # Area under the curve
from keras import Sequential # Sequential neural network modeling
from keras.layers import Dense # Used for creating layers within neural network
### Set Directory
os.chdir("C:/Users/drewc/GitHub/allocativ") # Set wd to project repository
# Identify Clusters at COunty Level from HRSA Area Health Resource File
## Process Data: Subset HRSA for Floria
### Import HRSA and FIPS key then join
df_hrsa = pd.read_csv("hnb/HRSA/AHRF/AHRF_2018_2019_SAS/AHRF_full.csv", encoding = "ISO-8859-1") # Import dataset saved as csv in _data folder
df_hrsa = df_hrsa.set_index("FIPS") # Set column as index
df_hrsa = df_hrsa.loc[:, df_hrsa.columns.str.contains('2018')] # Select columns by string value
df_hrsa = df_hrsa.reset_index(level = ["FIPS"]) # Reset Index
df_key = pd.read_csv("hnb/FIPS/FIPS_ZCTA_key.csv", encoding = "ISO-8859-1") # Import dataset saved as csv in _data folder
df_key = df_key.filter(["FIPS", "ST"]) # Keep only selected columns
df_key = df_key.drop_duplicates(keep = "first", inplace = False) # Drop all dupliacted values
df_hrsa = pd.merge(df_hrsa, df_key, on = "FIPS", how = "inner") # Join by column while keeping only items that exist in both, select outer or left for other options
df_hrsa.info() # Get class, memory, and column info: names, data types, obs.
### Join with key, subset for FL and tidy
df_fl = df_hrsa[df_hrsa["ST"] == "FL"] # Susbet numeric column by condition
df_fl = df_fl.drop(columns = ["ST"]) # Drop Unwanted Columns
first = df_fl.pop("FIPS") # 'pop' column from df
df_fl.insert(0, "FIPS", first) # reinsert in index
df_fl.info() # Get class, memory, and column info: names, data types, obs.
## Process Data for Unsupervised Algorthms
### Process Data for KMeans
df_NA = df_fl.dropna(subset = ["FIPS"])
df_NA = df_NA.reset_index() # Reset Index
df_NA = df_NA.drop(columns = ["index"]) # Drop Unwanted Columns
df_ZCTA = df_NA.filter(["FIPS"])
df_NA = df_NA.drop(columns = ["FIPS"]) # Drop Unwanted Columns
df_NA = df_NA.dropna(axis = 1, thresh = 0.75*len(df_NA)) # Drop features less than 75% non-NA count for all columns
df_NA = pd.DataFrame(SimpleImputer(strategy = "median").fit_transform(df_NA), columns = df_NA.columns) # Impute missing data
df_NA = pd.DataFrame(StandardScaler().fit_transform(df_NA.values), columns = df_NA.columns) # Standard scale values by converting the normalized features into a tabular format with the help of DataFrame.
df_NA["FIPS"] = df_ZCTA["FIPS"]
df_NA = df_NA.dropna() # Drop all rows with NA values (should be none, this is just to confirm)
df_NA.info() # Get class, memory, and column info: names, data types, obs.
### PCA to determine cluster count
df_comp = df_NA.drop(columns = ["FIPS"]) # Drop Unwanted Columns
degree = len(df_comp.index) - 2 # Save number of features -1 to get degrees of freedom
pca = PCA(n_components = degree) # Pass the number of components to make PCA model based on degrees of freedom
pca.fit(df_comp) # Fit initial PCA model
df_pca = pd.DataFrame(pca.explained_variance_, columns = ["Eigenvalues"]) # Print explained variance of components
df_pca = df_pca.sort_values(by = ["Eigenvalues"], ascending = False) # Sort Columns by Value
print(df_pca) # Print value, select "elbow" to determine number of components
# K-Means Unsupervised Clustering
kmeans = KMeans(n_clusters = 3, random_state = 0) # Setup Kmeans model, pre-select number of clusters
kmeans.fit(df_comp) # Fit Kmeans
km = kmeans.labels_ # Output importances of features
l_km = list(zip(df_NA["FIPS"], km)) # Create list of variables alongside importance scores
df_km = pd.DataFrame(l_km, columns = ["FIPS", "Cluster"]) # Create data frame of importances with variables and gini column names
df_km["Cluster"] = df_km["Cluster"] + 1 # Add 1 to cluster array since numpy array starts at zero
df_km = pd.merge(df_km, df_NA, on = "FIPS", how = "inner") # Join by column while keeping only items that exist in both, select outer or left for other options
df_km.info() # Get class, memory, and column info: names, data types, obs.
### Import CMS VBHC Outcomes
df_cms = pd.read_csv("hnb/CMS/CMS_2018_FIPS_full.csv", low_memory = 'false') # Import dataset saved as csv in _data folder
df_cms = df_cms.rename(columns = {"2020 VBP Adjustment Factor": "quant"}) # Rename quantitative outcome
df_cms["test"] = np.where(df_cms["quant"] < 1, 1, 0) # Create categorical test target outcome based on conditions
df_cms = df_cms.filter(["FIPS", "quant", "test"]) # Keep only selected columns
df_cms = pd.merge(df_cms, df_km, on = "FIPS", how = "right") # Join by column while keeping only items that exist in both, select outer or left for other options
df_cms = df_cms.dropna() # Drop all rows with NA values
df_cms.info() # Get class, memory, and column info: names, data types, obs.
### Prep Clusters
df_cms = df_cms[df_cms["Cluster"] == 1] # Susbet numeric column by condition
Y = df_cms["quant"] # Isolate Outcome variable
X = df_cms.drop(columns = ["Cluster", "FIPS", "quant", "test"]) # Drop outcomes and targets
df_cms.info() # Get class, memory, and column info: names, data types, obs.
df_cms = df_cms[df_cms["Cluster"] == 2] # Susbet numeric column by condition
Y = df_cms["quant"] # Isolate Outcome variable
X = df_cms.drop(columns = ["Cluster", "FIPS", "quant", "test"]) # Drop outcomes and targets
df_cms.info() # Get class, memory, and column info: names, data types, obs.
df_cms = df_cms[df_cms["Cluster"] == 3] # Susbet numeric column by condition
Y = df_cms["quant"] # Isolate Outcome variable
X = df_cms.drop(columns = ["Cluster", "FIPS", "quant", "test"]) # Drop outcomes and targets
df_cms.info() # Get class, memory, and column info: names, data types, obs.
### Run Random Forest
forest = RandomForestRegressor(n_estimators = 1000, max_depth = 10) #Use default values except for number of trees. For a further explanation see readme included in repository.
forest.fit(X, Y) # Fit Forest model, This will take time
rf = forest.feature_importances_ # Output importances of features
l_rf = list(zip(X, rf)) # Create list of variables alongside importance scores
df_rf = pd.DataFrame(l_rf, columns = ["Features", "Gini"]) # Create data frame of importances with variables and gini column names
df_rf = df_rf[(df_rf["Gini"] > df_rf["Gini"].quantile(0.9))] # Subset by Gini values higher than mean
df_rf.info() # Get class, memory, and column info: names, data types, obs.
### Recursive Feature Elimination
features = df_rf["Features"].tolist() # Save features from data frame
X = df_cms[features] # Selected quantitative outcome from original data frame
recursive = RFECV(estimator = LinearRegression(), min_features_to_select = 5, cv = 5) # define selection parameters, in this case all features are selected. See Readme for more ifo
recursive.fit(X, Y) # This will take time
rfe = recursive.support_ # Save Boolean values as numpy array
l_rfe = list(zip(X, rfe)) # Create list of variables alongside RFE value
df_rfe = pd.DataFrame(l_rfe, columns = ["Features", "RFE"]) # Create data frame of importances with variables and gini column names
df_rfe = df_rfe[df_rfe.RFE == True] # Select Variables that were True
df_rfe.info() # Get class, memory, and column info: names, data types, obs.
### Multiple Regression
features = df_rfe["Features"].tolist() # Save chosen featres as list
X = df_cms.filter(features) # Keep only selected columns from rfe
regression = LinearRegression() # Linear Regression in scikit learn
regression.fit(X, Y) # Fit model
coef = regression.coef_ # Coefficient models as scipy array
l_reg = list(zip(X, coef)) # Create list of variables alongside coefficient
df_reg = pd.DataFrame(l_reg, columns = ["Features", "Coefficients"]) # Create data frame of importances with variables and gini column names
df_reg = df_reg.sort_values(by = ["Coefficients"], ascending = False) # Sort Columns by Value
### Examing Clusters
F1 = df_reg # Rename df
print(F1) # Print value, select "elbow" to determine number of components
F2 = df_reg # Rename df
print(F2) # Print value, select "elbow" to determine number of components
F3 = df_reg # Rename df
print(F3) # Print value, select "elbow" to determine number of components
### Export Results to Text File
text_file = open(path + File + "_results.txt", "w") # Open | |
<filename>lib/pwiki/DocPagePresenter.py<gh_stars>10-100
## import hotshot
## _prof = hotshot.Profile("hotshot.prf")
import traceback
import wx
import wx.xrc as xrc
from WikiExceptions import *
from wxHelper import getAccelPairFromKeyDown, copyTextToClipboard, GUI_ID
from .MiscEvent import ProxyMiscEvent # , KeyFunctionSink
from .WikiHtmlView import createWikiHtmlView
from . import DocPages
from . import SystemInfo
from .StringOps import uniToGui, escapeForIni, unescapeForIni
from .WindowLayout import LayeredControlPresenter, LayerSizer, StorablePerspective
from .PageHistory import PageHistory
from . import pygauge as PG
class BasicDocPagePresenter(LayeredControlPresenter):
"""
Controls the group of all widgets (subcontrols) used to present/edit
a particular doc page, currently only the WikiTxtCtrl (subcontrol name
"textedit") and WikiHtmlView or WikiHtmlViewIE (name "preview").
This version isn't itself a wx panel and is mainly thought for
controlling e.g. a notebook which has the actual subcontrols as
children
"""
def __init__(self, mainControl):
LayeredControlPresenter.__init__(self)
self.mainControl = mainControl
self.docPage = None
self.currentDocPageProxyEvent = ProxyMiscEvent(self)
self.currentDocPageProxyEvent.addListener(self)
# Connect page history
self.pageHistory = PageHistory(self.getMainControl(), self)
self.getMainControl().getMiscEvent().addListener(self)
def getMainControl(self):
return self.mainControl
def getConfig(self):
return self.getMainControl().getConfig()
def getDefaultFontFaces(self):
return self.getMainControl().presentationExt.faces
def getWikiDocument(self):
return self.getMainControl().getWikiDocument()
def getPageHistory(self):
return self.pageHistory
def getActiveEditor(self):
"""
For compatibility with older scripts.
"""
return self.getSubControl("textedit")
def SetStatusText(self, text, field):
self.getStatusBar().SetStatusText(uniToGui(text), field)
def showStatusMessage(self, msg, duration=0, key=None):
self.getMainControl().showStatusMessage(msg, duration, key)
def isCurrent(self):
return self.getMainControl().getCurrentDocPagePresenter() is self
def makeCurrent(self):
self.mainControl.getMainAreaPanel().prepareCurrentPresenter(self)
def close(self):
LayeredControlPresenter.close(self)
self.getMainControl().getMiscEvent().removeListener(self)
self.pageHistory.close()
self.setDocPage(None) # TODO: Was commented out?
def getDocPage(self):
return self.docPage
def setDocPage(self, dp):
self.docPage = dp
self.currentDocPageProxyEvent.setWatchedSource(dp)
def getCurrentDocPageProxyEvent(self):
"""
This ProxyMiscEvent resends any messsages from the currently
active DocPage
"""
return self.currentDocPageProxyEvent
def getWikiWord(self):
docPage = self.getDocPage()
if docPage is None or not isinstance(docPage,
(DocPages.WikiPage, DocPages.AliasWikiPage)):
return None
return docPage.getWikiWord()
def getUnifiedPageName(self):
docPage = self.getDocPage()
if docPage is None:
return None
return docPage.getUnifiedPageName()
def getLiveText(self):
docPage = self.getDocPage()
if docPage is None:
return None
return docPage.getLiveText()
def informEditorTextChanged(self, changer):
"""
Called by the txt editor control
"""
if self.getDocPage() is not None:
self.getDocPage().informEditorTextChanged(changer)
self.fireMiscEventProps({"changed editor text": True,
"changed live text": True, "changer": changer})
def miscEventHappened(self, miscevt):
"""
Handle misc events
"""
if miscevt.getSource() is self.getMainControl():
# TODO? Check if mainControl's current presenter is this one
self.fireMiscEventProps(miscevt.getProps())
elif miscevt.getSource() is self.docPage:
# if miscevt.has_key("changed editor text"):
# self.fireMiscEventProps(miscevt.getProps())
# elif miscevt.has_key("deleted page"):
# self.pageHistory.goAfterDeletion()
if miscevt.has_key("renamed wiki page"):
# oldWord = self.docPage.getWikiWord()
newWord = miscevt.get("newWord")
self.getSubControl("textedit").loadWikiPage(None)
self.openWikiPage(newWord, forceTreeSyncFromRoot=False)
def getStatusBar(self):
return self.getMainControl().GetStatusBar()
def openDocPage(self, unifiedPageName, *args, **kwargs):
"""
Open a doc page identified by its unified page name
"""
if len(unifiedPageName) == 0:
return
if unifiedPageName.startswith(u"wikipage/"):
self.openWikiPage(unifiedPageName[9:], *args, **kwargs)
else:
self.openFuncPage(unifiedPageName, *args, **kwargs)
def openFuncPage(self, funcTag, addToHistory=True, **evtprops):
if not self.getMainControl().requireReadAccess():
return
oldPage = self.getDocPage()
evtprops["addToHistory"] = addToHistory
try:
page = self.getMainControl().getWikiDocument().getFuncPage(funcTag)
self.getSubControl("textedit").loadFuncPage(page, evtprops)
except (IOError, OSError, DbAccessError), e:
self.getMainControl().lostAccess(e)
raise
self.switchSubControl("textedit")
p2 = evtprops.copy()
p2.update({"loaded current doc page": True,
"loaded current functional page": True,
"docPage": page,
"oldDocPage": oldPage})
# p2.update({"loaded current page": True})
self.fireMiscEventProps(p2)
page.informVisited()
def openWikiPage(self, wikiWord, addToHistory=True,
forceTreeSyncFromRoot=False, forceReopen=False,
suggNewPageTitle=None, **evtprops):
"""
Opens a wiki page in the editor of this presenter
"""
if not self.getMainControl().requireReadAccess():
return
# oldPage = self.getDocPage()
evtprops["addToHistory"] = addToHistory
evtprops["forceTreeSyncFromRoot"] = forceTreeSyncFromRoot
langHelper = wx.GetApp().createWikiLanguageHelper(
self.getWikiDocument().getWikiDefaultWikiLanguage())
errMsg = None
# The "if" ensures that existing pages can be opened even
# if the syntax is (or became) incompatible
if not self.getWikiDocument().isDefinedWikiPageName(wikiWord):
errMsg = langHelper.checkForInvalidWikiWord(wikiWord,
self.getWikiDocument())
if errMsg is not None:
self.getMainControl().displayErrorMessage(
_(u"'%s' is an invalid wiki word. %s.") % (wikiWord, errMsg))
return
try:
# don't reopen the currently open page, only send an event
if (wikiWord == self.getWikiWord()) and not forceReopen:
p2 = evtprops.copy()
p2.update({"reloaded current doc page": True,
"reloaded current wiki page": True})
self.fireMiscEventProps(p2)
if forceTreeSyncFromRoot:
self.getMainControl().findCurrentWordInTree()
return
# trigger hook
self.getMainControl().hooks.openWikiWord(self, wikiWord)
# check if this is an alias
wikiDoc = self.getMainControl().getWikiDocument()
wikiWord = wikiDoc.getWikiPageNameForLinkTermOrAsIs(wikiWord)
# fetch the page info from the database
try:
page = wikiDoc.getWikiPage(wikiWord)
# self.getStatusBar().SetStatusText(uniToGui(_(u"Opened wiki word '%s'") %
# wikiWord), 0)
except (WikiWordNotFoundException, WikiFileNotFoundException), e:
page = wikiDoc.createWikiPage(wikiWord,
suggNewPageTitle=suggNewPageTitle)
# trigger hooks
self.getMainControl().hooks.newWikiWord(self, wikiWord)
self.showStatusMessage(
uniToGui(_(u"Wiki page not found, a new "
u"page will be created")))
# self.getStatusBar().SetStatusText(uniToGui(u""), 1)
self.loadWikiPage(page, **evtprops)
page.informVisited()
# sync the tree
if forceTreeSyncFromRoot:
self.getMainControl().findCurrentWordInTree() # TODO ?
except (IOError, OSError, DbAccessError), e:
self.getMainControl().lostAccess(e)
raise
# trigger hook
self.getMainControl().hooks.openedWikiWord(self, wikiWord)
def loadWikiPage(self, page, **evtprops):
oldPage = self.getDocPage() # TODO Test if too late to retrieve old page here
self.getSubControl("textedit").loadWikiPage(page, evtprops)
self.getMainControl().refreshPageStatus() # page)
p2 = evtprops.copy()
p2.update({"loaded current doc page": True,
"loaded current wiki page": True,
"docPage": page,
"oldDocPage": oldPage})
self.fireMiscEventProps(p2)
self.getMainControl().getMainAreaPanel().updateConfig()
# Should the page by default be presented in editor or preview mode?
pv = page.getAttributeOrGlobal(u"view_pane")
if pv is not None:
pv = pv.lower()
if pv == u"preview":
self.switchSubControl("preview")
elif pv == u"editor":
self.switchSubControl("textedit")
# else: do nothing (pv == u"off")
def saveCurrentDocPage(self, force = False):
## _prof.start()
if (force or self.getDocPage().getDirty()[0]) and \
self.getMainControl().requireWriteAccess():
# Reset error flag here, it can be set true again by saveDocPage
# self.getWikiDocument().setNoAutoSaveFlag(False)
try:
# this calls in turn saveDocPage() in PersonalWikiFrame
self.getSubControl("textedit").saveLoadedDocPage()
except (IOError, OSError, DbAccessError), e:
self.getMainControl().lostAccess(e)
raise
self.getMainControl().refreshPageStatus()
## _prof.stop()
def stdDialog(self, dlgtype, title, message, additional=None):
"""
Show message dialogs, used for scripts.
Calls same function from PersonalWikiFrame.
"""
return self.mainControl.stdDialog(dlgtype, title, message, additional)
def displayMessage(self, title, str):
"""pops up a dialog box,
used by scripts only
"""
self.mainControl.displayMessage(title, str)
def displayErrorMessage(self, errorStr, e=u""):
self.mainControl.displayErrorMessage(errorStr, e)
class DocPagePresenter(wx.Panel, BasicDocPagePresenter, StorablePerspective):
"""
Controls the group of all widgets (subcontrols) used to present/edit
a particular doc page, currently only WikiTxtCtrl and WikiHtmlView.
This version is a panel and contains the children itself.
"""
def __init__(self, parent, mainControl, id=-1):
wx.Panel.__init__(self, parent, id, style=wx.WANTS_CHARS)
BasicDocPagePresenter.__init__(self, mainControl)
self.SetSizer(LayerSizer())
res = xrc.XmlResource.Get()
self.tabContextMenu = res.LoadMenu("MenuDocPagePresenterTabPopup")
self.mainTreePositionHint = None # The tree ctrl uses this to remember
# which element was selected if same page appears multiple
# times in tree. DocPagePresenter class itself does not modify it.
self.tabProgressBar = None
self.tabProgressCount = {}
wx.GetApp().getMiscEvent().addListener(self)
wx.EVT_MENU(self, GUI_ID.CMD_PAGE_HISTORY_LIST,
lambda evt: self.viewPageHistory())
wx.EVT_MENU(self, GUI_ID.CMD_PAGE_HISTORY_LIST_UP,
lambda evt: self.viewPageHistory(-1))
wx.EVT_MENU(self, GUI_ID.CMD_PAGE_HISTORY_LIST_DOWN,
lambda evt: self.viewPageHistory(1))
wx.EVT_MENU(self, GUI_ID.CMD_PAGE_HISTORY_GO_BACK,
lambda evt: self.pageHistory.goInHistory(-1))
wx.EVT_MENU(self, GUI_ID.CMD_PAGE_HISTORY_GO_FORWARD,
lambda evt: self.pageHistory.goInHistory(1))
wx.EVT_MENU(self, GUI_ID.CMD_PAGE_GO_UPWARD_FROM_SUBPAGE,
lambda evt: self.goUpwardFromSubpage())
def close(self):
wx.GetApp().getMiscEvent().removeListener(self)
BasicDocPagePresenter.close(self)
def setSubControl(self, scName, sc):
oldSc = self.getSubControl(scName)
if oldSc is not None:
self.GetSizer().Detach(oldSc)
oldSc.close()
BasicDocPagePresenter.setSubControl(self, scName, sc)
if sc is not None:
self.GetSizer().Add(sc)
self.Layout()
def switchSubControl(self, scName, gainFocus=False):
"""
Make the chosen subcontrol visible, all other invisible
"""
try:
subControl = self.subControls[scName]
except KeyError:
traceback.print_exc()
return
# First show subControl scName, then hide the others
# to avoid flicker
if self.visible and self.lastVisibleCtrlName != scName:
subControl.setLayerVisible(True)
subControl.Show(True)
if gainFocus:
subControl.SetFocus()
for n, c in self.subControls.iteritems():
# if n != scName:
if c is not subControl:
if self.visible:
c.setLayerVisible(False)
c.Show(False)
self.lastVisibleCtrlName = scName
self.setTitle(self.shortTitle) #?
if SystemInfo.isLinux():
def SetFocus(self):
try:
ctrl = self.subControls[self.lastVisibleCtrlName]
wx.CallAfter(ctrl.SetFocus)
except KeyError:
wx.Panel.SetFocus(self)
else:
def SetFocus(self):
try:
self.subControls[self.lastVisibleCtrlName].SetFocus()
except KeyError:
wx.Panel.SetFocus(self)
def viewPageHistory(self, posDelta=0):
if not self.getMainControl().requireReadAccess():
return
try:
hist = self.pageHistory.getHrHistoryList()
histpos = self.pageHistory.getPosition()
except (IOError, OSError, DbAccessError), e:
self.getMainControl().lostAccess(e)
raise
historyLen = len(hist)
dlg = wx.SingleChoiceDialog(self,
_(u"History"),
_(u"History"),
hist,
wx.CHOICEDLG_STYLE | wx.OK | wx.CANCEL)
if historyLen > 0:
position = histpos + posDelta - 1
if (position < 0):
position = 0
elif (position >= historyLen):
position = historyLen-1
dlg.SetSelection(position)
if dlg.ShowModal() == wx.ID_OK and dlg.GetSelection() > -1:
self.pageHistory.goInHistory(dlg.GetSelection() - (histpos - 1))
dlg.Destroy()
def goUpwardFromSubpage(self):
wikiWord = self.getWikiWord()
if wikiWord is None:
return
langHelper = wx.GetApp().createWikiLanguageHelper(
self.getWikiDocument().getWikiDefaultWikiLanguage())
wikiPath = langHelper.createWikiLinkPathObject(pageName=wikiWord)
wikiPath.join(langHelper.createWikiLinkPathObject(upwardCount=1))
upwardPageName = wikiPath.resolveWikiWord(None)
if not upwardPageName or wikiWord == upwardPageName:
# No way upward
# TODO: Maybe alternative reaction?
return
# motion type "parent" isn't exactly right but a good guess
self.openWikiPage(upwardPageName, motionType="parent")
def getTabContextMenu(self):
return self.tabContextMenu
def setTitle(self, shortTitle):
LayeredControlPresenter.setTitle(self, shortTitle)
# Shorten title if too long
maxLen = self.getConfig().getint("main", "tabs_maxCharacters", 0)
if maxLen > 0 and len(shortTitle) > maxLen:
shortTitle = shortTitle[:(maxLen//2)] + u"..." + \
shortTitle[-((maxLen+1)//2):]
self.fireMiscEventProps({"changed presenter title": True,
"title": shortTitle})
def miscEventHappened(self, miscevt):
if miscevt.getSource() is wx.GetApp():
# The option "tabs_maxCharacters" may be changed, so set title again
if miscevt.has_key("options changed"):
self.setTitle(self.shortTitle)
return
return BasicDocPagePresenter.miscEventHappened(self, miscevt)
def fillDefaultSubControls(self):
self.setLayerVisible(False)
self.Hide()
editor = self.getMainControl().createWindow({"name": "txteditor1",
"presenter": self}, self)
editor.setLayerVisible(False, "textedit")
self.setSubControl("textedit", editor)
htmlView | |
import copy
import json
import logging
import os
import sys
import tempfile
import time
import traceback
import h5py
import numpy as np
import tables
import tensorflow as tf
from opentamp.src.policy_hooks.vae.vae_networks import *
'''
Random things to remember:
- End with no-op task (since we go obs + task -> next_obs, we want last obs + task -> last obs for code simplicity)
- Or cut last timestep?
- Policy gets a reward for finding bad encode/decode paths?
- Constrain conditional encoding (i.e. latent output) against prior?
'''
LATENT_DIM = 16
ENCODER_CONFIG = {
'n_channels': [16, 32, 32],
'filter_sizes': [5, 5, 5],
'strides': [3, 3, 3],
'fc_dims': [LATENT_DIM] # [2 * 3 * 32]
# 'out_act': 'tanh',
}
DECODER_CONFIG = {
'conv_init_shape': [2, 3, 32],
'n_channels': [32, 16, 3],
'filter_sizes': [5, 5, 5],
'strides': [3, 3, 3],
'fc_dims': None,
'out_act': 'sigmoid',
}
LATENT_DYNAMICS_CONFIG = {
'fc_dims': [LATENT_DIM, LATENT_DIM],
}
class VAE(object):
def __init__(self, hyperparams):
self.config = hyperparams
tf.reset_default_graph()
tf.set_random_seed(self.config.get('random_seed', 1234))
self.tf_iter = 0
self.batch_size = self.config.get('batch_size', 64)
self.train_iters = self.config.get('train_iters', 100)
self.T = self.config['rollout_len'] - 2
self.rollout_len = self.config['rollout_len'] - 2
self.obs_dims = [80, 107, 3] # list(hyperparams['obs_dims'])
self.task_dim = hyperparams['task_dims']
# The following hyperparameters also describe where the weights are saved
self.weight_dir = hyperparams['weight_dir']
# if self.load_step < 0:
# is_rnn = 'rnn' if self.use_recurrent_dynamics else 'fc'
# overshoot = 'overshoot' if self.use_overshooting else 'onestep'
# self.ckpt_name = self.weight_dir+'/vae_{0}_{1}_{2}.ckpt'.format(self.train_mode, is_rnn, overshoot)
# else:
# self.ckpt_name = self.weight_dir+'/vae_{0}_{1}_{2}.ckpt'.format(self.train_mode, is_rnn, overshoot, load_step)
if hyperparams.get('load_data', True):
f_mode = 'a'
self.data_file = self.weight_dir+'/vae_buffer.hdf5'
self.data = h5py.File(self.data_file, f_mode)
try:
self.obs_data = self.data['obs_data']
self.task_data = self.data['task_data']
self.task_data = self.task_data[:, :, :self.task_dim]
self.task_dim = self.task_data.shape[-1]
except:
obs_data = np.zeros([0, self.rollout_len]+list(self.obs_dims))
task_data = np.zeros((0, self.rollout_len, self.task_dim))
self.obs_data = self.data.create_dataset('obs_data', data=obs_data, maxshape=(None, None, None, None, None), dtype='uint8')
self.task_data = self.data.create_dataset('task_data', data=task_data, maxshape=(None, None, None), dtype='uint8')
# self.data.swmr_mode=True
elif hyperparams.get('data_read_only', False):
f_mode = 'r'
self.data_file = self.weight_dir+'/vae_buffer.hdf5'
self.data = h5py.File(self.data_file, f_mode, swmr=True)
self.obs_data = self.data['obs_data']
self.task_data = self.data['task_data']
# while not os.path.isfile(self.weight_dir+'/vae_buffer.hdf5'):
# time.sleep(1)
self.train_mode = hyperparams.get('train_mode', 'online')
assert self.train_mode in ['online', 'conditional', 'unconditional']
self.use_recurrent_dynamics = hyperparams.get('use_recurrent_dynamics', False)
self.use_overshooting = hyperparams.get('use_overshooting', False)
self.use_prior = hyperparams.get('use_prior', True)
self.load_step = hyperparams.get('load_step', 0)
# self.beta = hyperparams.get('beta', 10)
# self.beta_d = hyperparams.get('overshoot_beta', 1./self.T)
self.beta = 0.2 # hyperparams.get('beta', 0.5)
self.beta_d = hyperparams.get('overshoot_beta', 0.1)
self.data_limit = hyperparams.get('data_limit', None)
self.data_limit = self.data_limit if self.data_limit is not None else len(self.obs_data)
self.obs_data = self.obs_data[:self.data_limit]
self.task_data = self.task_data[:self.data_limit]
self.dist_constraint = hyperparams.get('dist_constraint', False)
self.ckpt_name = self.get_weight_file()
# self.data_file = self.weight_dir+'/vae_buffer.npz'
# try:
# data = np.load(self.data_file, mmap_mode='w+')
# except:
# pass
# self.obs_data = np.zeros((0, self.dT, self.dO))
# self.task_data = np.zeros((0, self.dT, self.dU))
self.max_buffer = hyperparams.get('max_buffer', 1e6)
self.dist_constraint = hyperparams.get('distance_constraint', False)
self.cur_lr = 1e-3
with tf.variable_scope('vae', reuse=False):
self.init_network()
self.init_solver()
self.scope = 'vae'
self.gpu_fraction = self.config['gpu_fraction'] if 'gpu_fraction' in self.config else 0.95
if 'allow_growth' in self.config and not self.config['allow_growth']:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=self.gpu_fraction)
else:
gpu_options = tf.GPUOptions(allow_growth=True)
self.sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True))
init_op = tf.initialize_all_variables()
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope)
self.saver = tf.train.Saver(variables)
if self.use_recurrent_dynamics:
zero_state = self.latent_dynamics.lstm_cell.zero_state(batch_size=1, dtype=tf.float32)
self.zero_state = tuple(self.sess.run(zero_state))
try:
self.saver.restore(self.sess, self.ckpt_name)
except Exception as e:
self.sess.run(init_op)
print(('\n\nCould not load previous weights for {0} from {1}\n\n'.format(self.scope, self.weight_dir)))
self.update_count = 0
self.n_updates = 0
self.update_size = self.config.get('update_size', 1)
def get_weight_file(self, addendum=None):
is_rnn = 'rnn' if self.use_recurrent_dynamics else 'fc'
overshoot = 'overshoot' if self.use_overshooting else 'onestep'
step = self.load_step
mode = self.train_mode
prior = 'prior' if self.use_prior else 'noprior'
beta = 'beta'+str(self.beta)
overshoot_beta = 'beta_d'+str(self.beta_d)
limit = self.data_limit if self.data_limit is not None else len(self.obs_data)
limit = str(limit)+'nsamples'
dist = 'distconstr' if self.dist_constraint else 'nodistconstr'
if addendum is None:
ext = "vae_{0}_{1}_{2}_{3}_{4}_{5}_{6}.ckpt".format(mode, is_rnn, overshoot, prior, beta, dist, limit)
else:
ext = "vae_{0}_{1}_{2}_{3}_{4}_{5}_{6}_{7}.ckpt".format(mode, is_rnn, overshoot, prior, beta, dist, limit, addendum)
file_name = self.weight_dir + ext
return file_name
def serialize_weights(self):
print('Serializing vae weights')
var_to_val = {}
variables = self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='vae')
for v in variables:
var_to_val[v.name] = self.sess.run(v).tolist()
return json.dumps(var_to_val)
def deserialize_weights(self, json_wts, save=True):
var_to_val = json.loads(json_wts)
# print 'Deserializing', scopes
variables = self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='vae')
for var in variables:
var.load(var_to_val[var.name], session=self.sess)
if save: self.store_scope_weights()
# print 'Weights for {0} successfully deserialized and stored.'.format(scopes)
# def update_weights(self, weight_dir=None):
# if weight_dir is None:
# weight_dir = self.weight_dir
# self.saver.restore(self.sess, weight_dir+'/vae_{0}.ckpt'.format(self.train_mode))
def store_scope_weights(self, weight_dir=None, addendum=None):
if weight_dir is None:
weight_dir = self.weight_dir
try:
variables = self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='vae')
saver = tf.train.Saver(variables)
saver.save(self.sess, self.get_weight_file(addendum))
print(('Saved vae weights for', self.train_mode, 'in', self.weight_dir))
except:
print('Saving variables encountered an issue but it will not crash:')
traceback.print_exception(*sys.exc_info())
def store_weights(self, weight_dir=None):
self.store_scope_weights(weight_dir)
def store(self, obs, task_list):
print(('Storing data for', self.scope))
assert len(obs) == len(task_list)
# self.T = len(obs)
# self.obs_data = np.r_[self.obs_data, obs]
# self.task_data = np.r_[self.task_data, task_list]
# obs = obs[:self.T]
# task_list = task_list[:self.T]
obs = obs.reshape((1,)+obs.shape)
task_list = task_list.reshape((1,)+task_list.shape)
self.obs_data.resize((len(self.obs_data)+1,) + obs.shape[1:])
self.obs_data[-1] = obs.astype(np.uint8)
self.task_data.resize((len(self.task_data)+1,) + task_list.shape[1:])
self.task_data[-1] = task_list.astype(np.uint8)
# if len(self.obs_data) > self.max_buffer:
# self.obs_data = self.obs_data[-self.max_buffer:]
# self.task_data = self.task_data[-self.max_buffer:]
self.update_count += 1
if self.update_count > self.update_size and len(self.obs_data) > 10:
print('Updating vae')
# self.update()
self.n_updates += 1
self.update_count = 0
if not self.n_updates % 5:
self.save_buffers()
if self.n_updates > 10:
self.store_scope_weights()
self.n_updates = 0
return True
return False
def save_buffers(self):
# np.savez(self.data_file, task_data=self.task_data, obs_data=self.obs_data)
self.data.flush()
def init_network(self):
import tensorflow as tf
self.x_in = tf.compat.v1.placeholder(tf.float32, shape=[self.batch_size*self.T]+list(self.obs_dims))
self.latent_in = tf.compat.v1.placeholder(tf.float32, shape=[1, 1, LATENT_DIM])
self.task_in = tf.compat.v1.placeholder(tf.float32, shape=[self.batch_size*self.T]+[self.task_dim])
self.latent_task_in = tf.compat.v1.placeholder(tf.float32, shape=[1, 1, self.task_dim])
self.offset_in = tf.compat.v1.placeholder(tf.float32, shape=[self.batch_size*self.T]+list(self.obs_dims))
self.before_offset_in = tf.compat.v1.placeholder(tf.float32, shape=[self.batch_size*self.T]+list(self.obs_dims))
self.training = tf.compat.v1.placeholder(tf.bool)
if len(self.obs_dims) == 1:
pass
else:
pass
self.fc_in = None # tf.compat.v1.placeholder(tf.float32, shape=[None, self.task_dim])
self.offset_fc_in = None #tf.compat.v1.placeholder(tf.float32, shape=[None, self.task_dim])
self.far_offset_fc_in = None # tf.compat.v1.placeholder(tf.float32, shape=[None, self.task_dim])
# mask = tf.ones((self.batch_size, self.T))
# mask[:,-1] = 0
# self.far_offset_loss_mask = tf.constant(mask.reshape([self.batch_size*self.T]))
self.encoder = Encoder()
self.encode_mu, self.encode_logvar = self.encoder.get_net(self.x_in / 255., self.training, fc_in=self.fc_in, config=ENCODER_CONFIG)
self.encode_posterior = tf.distributions.Normal(self.encode_mu, tf.sqrt(tf.exp(self.encode_logvar)))
# self.offset_encode_mu, self.offset_encode_logvar = self.encoder.get_net(self.offset_in, self.training, fc_in=self.offset_fc_in, reuse=True, config=ENCODER_CONFIG)
# self.far_offset_encode_mu, self.far_offset_encode_logvar = self.encoder.get_net(self.far_offset_in, self.training, fc_in=self.far_offset_fc_in, reuse=True, config=ENCODER_CONFIG)
self.decoder_in = self.encode_mu + tf.sqrt(tf.exp(self.encode_logvar)) * tf.random_normal(tf.shape(self.encode_mu), 0, 1)
self.decoder = Decoder()
self.decode_mu, self.decode_logvar = self.decoder.get_net(self.decoder_in, self.training, config=DECODER_CONFIG)
self.decode_posterior = tf.distributions.Normal(self.decode_mu, tf.sqrt(tf.exp(self.decode_logvar)))
# self.sample_decode_mu, self.sample_decode_logvar = self.decoder.get_net(self.decoder_in, self.training, config=DECODER_CONFIG, reuse=reuse)
# self.sample_decode_posterior = tf.distributions.Normal(self.sample_decode_mu, tf.sqrt(tf.exp(self.sample_decode_logvar)))
if 'unconditional' not in self.train_mode:
if self.use_recurrent_dynamics:
self.latent_dynamics = RecurrentLatentDynamics()
in_shape = tf.shape(self.decoder_in)
z_in = tf.reshape(self.decoder_in, (self.batch_size, self.T, LATENT_DIM))
task_in = tf.reshape(self.task_in, (self.batch_size, self.T, self.task_dim))
mu, logvar, self.rnn_initial_state, self.rnn_final_state = self.latent_dynamics.get_net(z_in, task_in, self.T, self.training, config=LATENT_DYNAMICS_CONFIG)
self.conditional_encode_mu = tf.reshape(mu, in_shape)
self.conditional_encode_logvar = tf.reshape(logvar, in_shape)
self.conditional_encode_posterior = tf.distributions.Normal(self.conditional_encode_mu, tf.sqrt(tf.exp(self.conditional_encode_logvar)))
trans_mu, trans_logvar, self.trans_rnn_initial_state, self.trans_rnn_final_state = self.latent_dynamics.get_net(self.latent_in, self.latent_task_in, 1, self.training, config=LATENT_DYNAMICS_CONFIG, reuse=True)
self.latent_trans_mu = tf.reshape(trans_mu, [1, 1, LATENT_DIM])
self.latent_trans_logvar = tf.reshape(trans_logvar, [1, 1, LATENT_DIM])
self.latent_trans_posterior = tf.distributions.Normal(self.latent_trans_mu, tf.sqrt(tf.exp(self.latent_trans_logvar)))
else:
self.latent_dynamics = LatentDynamics()
self.conditional_encode_mu, self.conditional_encode_logvar = self.latent_dynamics.get_net(self.decoder_in, self.task_in, self.training, config=LATENT_DYNAMICS_CONFIG)
self.conditional_encode_posterior = tf.distributions.Normal(self.conditional_encode_mu, tf.sqrt(tf.exp(self.conditional_encode_logvar)))
self.latent_trans_mu, self.latent_trans_logvar = self.latent_dynamics.get_net(tf.reshape(self.latent_in, (1, LATENT_DIM)), tf.reshape(self.latent_task_in, (1, self.task_dim)), self.training, config=LATENT_DYNAMICS_CONFIG, reuse=True)
self.latent_trans_posterior = tf.distributions.Normal(self.latent_trans_mu, tf.sqrt(tf.exp(self.latent_trans_logvar)))
self.conditional_decoder_in = self.conditional_encode_mu + tf.sqrt(tf.exp(self.conditional_encode_logvar)) * tf.random_normal(tf.shape(self.conditional_encode_mu), 0, 1)
self.conditional_decode_mu, self.conditional_decode_logvar = self.decoder.get_net(self.conditional_decoder_in, self.training, config=DECODER_CONFIG, reuse=True)
self.conditional_decode_posterior = tf.distributions.Normal(self.conditional_decode_mu, tf.sqrt(tf.exp(self.conditional_decode_logvar)))
self.offset_encode_mu, self.offset_encode_logvar = self.encoder.get_net(self.offset_in / 255., self.training, fc_in=self.offset_fc_in, config=ENCODER_CONFIG, reuse=True)
self.offset_encode_posterior = tf.distributions.Normal(self.offset_encode_mu, tf.sqrt(tf.exp(self.offset_encode_logvar)))
if self.dist_constraint:
self.before_offset_encode_mu, self.before_offset_encode_logvar = self.Encoder.get_net(self.before_offset_in/255., self.training, fc_in=self.fc_in, config=ENCODER_CONFIG, reuse=True)
self.before_offset_encode_posterior = tf.distributions.Normal(self.before_offset_encode_mu, tf.sqrt(tf.exp(self.before_offset_encode_logvar)))
self.latent_prior = tf.distributions.Normal(tf.zeros_initializer()(tf.shape(self.encode_mu)), 1.)
self.fitted_prior = tf.distributions.Normal(tf.zeros_initializer()(LATENT_DIM), 1.)
def overshoot_latents(self, d=-1):
if d < 0:
d = self.T
if self.use_recurrent_dynamics:
latent_in = tf.reshape(self.decoder_in, [self.batch_size, self.T, LATENT_DIM])
task_in = tf.reshape(self.task_in, [self.batch_size, self.T, self.task_dim])
z_in = tf.concat([latent_in, task_in], axis=-1)
latent_mu = tf.reshape(self.conditional_encode_mu, [self.batch_size, self.T, LATENT_DIM])
latent_logvar= tf.reshape(self.conditional_encode_logvar, [self.batch_size, self.T, LATENT_DIM])
cell = self.latent_dynamics.lstm_cell
w = self.latent_dynamics.weights
b = self.latent_dynamics.bias
init_state = self.latent_dynamics.initial_state
last_state = self.latent_dynamics.last_state
zero_state = cell.zero_state(batch_size=self.batch_size, dtype=tf.float32)
outs = {i: [] for i in range(self.T)}
cur_state = zero_state
for i in range(self.T):
cur_out = z_in[:, i, :]
for j in range(i+1, np.minimum(self.T, i+d+1)):
cur_out, cur_state = cell(cur_out, cur_state)
if j == i+1:
next_state = cur_state
cur_out = tf.nn.bias_add(tf.matmul(cur_out, w), b)
outs[j].append(cur_out)
cur_out = tf.split(cur_out, 2, -1)[0]
cur_out = tf.concat([cur_out, task_in[:, j, :]], axis=-1)
cur_state = next_state
else:
latent_in = tf.reshape(self.decoder_in, [self.batch_size, self.T, LATENT_DIM])
task_in = tf.reshape(self.task_in, [self.batch_size, self.T, self.task_dim])
z_in = tf.concat([latent_in, task_in], axis=-1)
latent_mu = tf.reshape(self.conditional_encode_mu, [self.batch_size, self.T, LATENT_DIM])
latent_logvar= tf.reshape(self.conditional_encode_logvar, [self.batch_size, self.T, LATENT_DIM])
outs = {i: [] for i in range(self.T)}
for i in range(self.T):
cur_out = z_in[:, i, :]
for j in | |
<filename>simbench/converter/format_information.py
# -*- coding: utf-8 -*-
# Copyright (c) 2019 by University of Kassel, T<NAME>, RWTH Aachen University and Fraunhofer
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
# contributors (see AUTHORS file for details). All rights reserved.
from numpy import pi
from pandas import DataFrame
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
__author__ = 'smeinecke'
def sb2pp_base(variable="power"):
""" converting factor from simbench data structure to pandapower:
power: simbench in MVA - pandapower in kVA
current: simbench in A, pandapower in kA
"""
if variable == "power":
return 1
elif variable == "current":
return 1e-3
else:
raise ValueError("The variable %s is unknown to sb2pp_base().")
def csv_tablenames(which):
"""
Returns specific simbench csv format table names. which can be 'elements', res_elements,
'profiles', 'types' or a list of these.
"""
if isinstance(which, str):
which = [which]
csv_tablenames = []
if 'elements' in which:
csv_tablenames += ["ExternalNet", "Line", "Load", "Shunt", "Node", "Measurement",
"PowerPlant", "RES", "Storage", "Substation", "Switch", "Transformer",
"Transformer3W", "Coordinates"]
if 'profiles' in which:
csv_tablenames += ["LoadProfile", "PowerPlantProfile", "RESProfile", "StorageProfile"]
if 'types' in which:
csv_tablenames += ["LineType", "DCLineType", "TransformerType",
"Transformer3WType"]
if 'cases' in which:
csv_tablenames += ["StudyCases"]
if 'res_elements' in which:
csv_tablenames += ["NodePFResult"]
return csv_tablenames
def _csv_table_pp_dataframe_correspondings(type_):
csv_tablenames_ = csv_tablenames(['elements', 'types', 'res_elements'])
# corresponding pandapower dataframe names
pp_dfnames = ['ext_grid', 'line', 'load', 'shunt', 'bus', 'measurement', 'gen', 'sgen',
'storage', 'substation', 'switch', 'trafo', 'trafo3w', 'bus_geodata',
'std_types|line', 'dcline', 'std_types|trafo', 'std_types|trafo3w',
"res_bus"]
# append table name lists by combinations of generating elements
csv_tablenames_ += ['ExternalNet', 'ExternalNet', 'PowerPlant', 'PowerPlant', 'RES', 'RES',
'ExternalNet', 'ExternalNet', 'Line']
pp_dfnames += ['gen', 'sgen', 'ext_grid', 'sgen', 'ext_grid', 'gen', 'ward', 'xward', 'dcline']
assert len(csv_tablenames_) == len(pp_dfnames)
if type_ is list:
return csv_tablenames_, pp_dfnames
elif type_ is str:
return ["%s*%s" % (csv_tablename, pp_dfname) for csv_tablename, pp_dfname in
zip(csv_tablenames_, pp_dfnames)]
elif type_ is tuple:
return [(csv_tablename, pp_dfname) for csv_tablename, pp_dfname in
zip(csv_tablenames_, pp_dfnames)]
elif type_ is DataFrame:
# is like pd.DataFrame(_csv_table_pp_dataframe_correspondings(tuple))
return DataFrame([(csv_tablename, pp_dfname) for csv_tablename, pp_dfname in
zip(csv_tablenames_, pp_dfnames)], columns=["csv", "pp"])
elif isinstance(type_, str):
if type_ in csv_tablenames_:
corr = [pp for csv, pp in zip(csv_tablenames_, pp_dfnames) if csv == type_]
else:
corr = [csv for csv, pp in zip(csv_tablenames_, pp_dfnames) if pp == type_]
if len(corr) == 1:
return corr[0]
else:
return corr
else:
raise NotImplementedError("_csv_table_pp_dataframe_correspondings() is not implemented " +
"for %s as input" % str(type_))
def all_dtypes():
""" This function returns a dict of all simbench csv file column dtypes. """
dtypes = {
"Coordinates": [object, float, float, object, int],
"ExternalNet": [object]*3 + [float]*8 + [object, int],
"Line": [object]*4 + [float, float, object, int],
"LineType": [object] + [float]*4 + [object],
"Load": [object]*3 + [float]*3 + [object, int],
"LoadProfile": [object] + [float]*len(load_profiles_list(pq_both=True)),
"Shunt": [object, object, float, float, float, int, object, int],
"Node": [object]*2 + [float]*5 + [object]*3 + [int],
"Measurement": [object]*5 + [int],
"PowerPlant": [object]*5 + [float]*8 + [object, int],
"PowerPlantProfile": [object],
"RES": [object]*5 + [float]*3 + [object, int],
"RESProfile": [object] + [float]*21,
"Storage": [object]*4 + [float]*11 + [object, int],
"StorageProfile": [object] + [float]*2,
"Substation": [object, object, int],
"Switch": [object]*4 + [int] + [object]*2 + [int],
"Transformer": [object]*4 + [int, int, object, float, object, object, int],
"TransformerType": [object] + [float]*8 + [int, object, float, float, int, int, int],
"Transformer3W": [object]*5 + [float]*3 + [int, object, float, object, object, int],
"Transformer3WType": [object] + [float]*16 + [int, object] + [float]*6 + [int]*9,
"DCLineType": [object] + [float]*8,
"StudyCases": [object] + [float]*6,
"NodePFResult": [object, float, float, object, object, int]}
return dtypes
def all_columns():
""" This function returns a dict of all simbench csv file column names. """
tablenames = {
"Coordinates": ['id', 'x', 'y', 'subnet', 'voltLvl'],
"ExternalNet": ['id', 'node', 'calc_type', 'dspf', 'pExtNet', 'qExtNet',
'pWardShunt', 'qWardShunt', 'rXWard',
'xXWard', 'vmXWard', 'subnet', 'voltLvl'],
"Line": ['id', 'nodeA', 'nodeB', 'type', 'length', 'loadingMax', 'subnet', 'voltLvl'],
"LineType": ['id', 'r', 'x', 'b', 'iMax', 'type'],
"Load": ['id', 'node', 'profile', 'pLoad', 'qLoad', 'sR', 'subnet', 'voltLvl'],
"LoadProfile": ['time'] + load_profiles_list(pq_both=True),
"Shunt": ['id', 'node', 'p0', 'q0', 'vmR', 'step', 'subnet', 'voltLvl'],
"Node": ['id', 'type', 'vmSetp', 'vaSetp', 'vmR', 'vmMin', 'vmMax', 'substation',
'coordID', 'subnet', 'voltLvl'],
"Measurement": ['id', 'element1', 'element2', 'variable', 'subnet', 'voltLvl'],
"PowerPlant": ['id', 'node', 'type', 'profile', 'calc_type', 'dspf', 'pPP', 'qPP', 'sR',
'pMin', 'pMax', 'qMin', 'qMax', 'subnet', 'voltLvl'],
"PowerPlantProfile": ['id'],
"RES": ['id', 'node', 'type', 'profile', 'calc_type', 'pRES', 'qRES', 'sR',
'subnet', 'voltLvl'],
"RESProfile": ['time'] + ["%s%i" % (b, x) for b in ["PV", "WP", "BM"] for x in
range(1, 6)] + ['Hydro1', 'Hydro2', 'Waste1', 'Waste2',
'Gas1', 'Gas2'],
"Storage": ['id', 'node', 'type', 'profile', 'pStor', 'qStor', 'chargeLevel', 'sR',
'eStore', 'etaStore', 'sdStore',
'pMin', 'pMax', 'qMin', 'qMax', 'subnet', 'voltLvl'],
"StorageProfile": ['time', 'PV_Battery', 'E_Mobility'],
"Substation": ['id', 'subnet', 'voltLvl'],
"Switch": ['id', 'nodeA', 'nodeB', 'type', 'cond', 'substation', 'subnet',
'voltLvl'],
"Transformer": ['id', 'nodeHV', 'nodeLV', 'type', 'tappos',
'autoTap', 'autoTapSide', 'loadingMax', 'substation', 'subnet', 'voltLvl'],
"TransformerType": ['id', 'sR', 'vmHV', 'vmLV', 'va0', 'vmImp', 'pCu', 'pFe',
'iNoLoad', 'tapable', 'tapside', 'dVm', 'dVa', 'tapNeutr',
'tapMin', 'tapMax'],
"Transformer3W": ['id', 'nodeHV', 'nodeMV', 'nodeLV', 'type',
'tapposHV', 'tapposMV', 'tapposLV', 'autoTap', 'autoTapSide',
'loadingMax', 'substation', 'subnet', 'voltLvl'],
"Transformer3WType": ['id', 'sRHV', 'sRMV', 'sRLV', 'vmHV', 'vmMV', 'vmLV',
'vaHVMV', 'vaHVLV', 'vmImpHVMV', 'vmImpHVLV', 'vmImpMVLV',
'pCuHV', 'pCuMV', 'pCuLV', 'pFe', 'iNoLoad', 'tapable',
'tapside', 'dVmHV', 'dVmMV', 'dVmLV', 'dVaHV', 'dVaMV',
'dVaLV', 'tapNeutrHV', 'tapNeutrMV', 'tapNeutrLV',
'tapMinHV', 'tapMinMV', 'tapMinLV', 'tapMaxHV', 'tapMaxMV',
'tapMaxLV'],
"DCLineType": ['id', 'pDCLine', 'relPLosses', 'fixPLosses', 'pMax',
'qMinA', 'qMinB', 'qMaxA', 'qMaxB'],
"StudyCases": ['Study Cases', 'pload', 'qload', 'Wind_p', 'PV_p', 'RES_p', 'Slack_vm'],
"NodePFResult": ['node', 'vm', 'va', 'substation', 'subnet', 'voltLvl']
}
return tablenames
def get_dtypes(tablename):
""" This function returns simbench csv file column dtypes for a given table name. """
alldtypes = all_dtypes()
if tablename in alldtypes.keys():
if "Profile" in tablename:
logger.debug("The returned dtypes list of %s is for 1000 profiles columns." % tablename)
return alldtypes[tablename]
else:
raise ValueError('The tablename %s is unknown.' % tablename)
def get_columns(tablename):
""" This function returns simbench csv file column names for a given table name. """
allcolumns = all_columns()
if tablename in allcolumns.keys():
if "Profile" in tablename:
logger.debug("The returned column list of %s is given for simbench " % tablename +
"dataset and may be incomplete")
return allcolumns[tablename]
else:
raise ValueError('The tablename %s is unknown.' % tablename)
def _csv_pp_column_correspondings(tablename):
""" Returns a list of tuples giving corresponding parameter names. The tuples are structured as:
(column name in csv_table,
column name in pp_df,
factor to multiply csv column value to receive pp column value) """
tuples = [
# Node and node names
("node", "bus", None), ("nodeA", "from_bus", None), ("nodeB", "to_bus", None),
("nodeHV", "hv_bus", None), ("nodeMV", "mv_bus", None), ("nodeLV", "lv_bus", None),
("sR", "sn_mva", sb2pp_base()), ("vmR", "vn_kv", None), ("nodeA", "bus", None),
("nodeB", "element", None), ("vmMin", "min_vm_pu", None), ("vmMax", "max_vm_pu", None),
# Line, LineType and DCLineType
("length", "length_km", None), ("pDCLine", "p_mw", sb2pp_base()),
("relPLosses", "loss_percent", None), ("fixPLosses", "loss_mw", sb2pp_base()),
("qMaxA", "max_q_from_mvar", sb2pp_base()), ("qMinA", "min_q_from_mvar", sb2pp_base()),
("qMaxB", "max_q_to_mvar", sb2pp_base()), ("qMinB", "min_q_to_mvar", sb2pp_base()),
("r", "r_ohm_per_km", None), ("x", "x_ohm_per_km", None),
("b", "c_nf_per_km", 1e3 / (2*pi*50)), ("iMax", "max_i_ka", sb2pp_base("current")),
("loadingMax", "max_loading_percent", None),
# Ward and xWard
("pExtNet", "ps_mw", sb2pp_base()),
("qExtNet", "qs_mvar", sb2pp_base()), ("cond", "closed", None),
("pWardShunt", "pz_mw", sb2pp_base()),
("qWardShunt", "qz_mvar", sb2pp_base()), ("rXWard", "r_ohm", None),
("xXWard", "x_ohm", None), ("vmXWard", "vm_pu", None),
# Measurement
("variable", "type", None),
# Storage
("eStore", "max_e_mwh", sb2pp_base()), ("etaStore", "efficiency_percent", None),
("sdStore", "self-discharge_percent_per_day", sb2pp_base()),
("chargeLevel", "soc_percent", 100),
# NodePFResult
("vm", "vm_pu", None), ("va", "va_degree", None),
# TransformerType
("vmHV", "vn_hv_kv", None), ("vmMV", "vn_mv_kv", None), ("vmLV", "vn_lv_kv", None),
("pFe", "pfe_kw", None), ("iNoLoad", "i0_percent", None),
("tappos", "tap_pos", None), ("tapside", "tap_side", None),
("vmImp", "vk_percent", None), ("dVm", "tap_step_percent", None),
("va0", "shift_degree", None), ("tapNeutr", "tap_neutral", None),
("tapMin", "tap_min", None), ("tapMax", "tap_max", None),
# Transformer3WType
("sRHV", "sn_hv_mva", sb2pp_base()), ("sRMV", "sn_mv_mva", sb2pp_base()),
("sRLV", "sn_lv_mva", sb2pp_base()), ("vaHVMV", "shift_mv_degree", None),
("vaHVLV", "shift_lv_degree", None), ("vmImpHVMV", "vk_hv_percent", None),
("vmImpHVLV", "vk_mv_percent", None), ("vmImpMVLV", "vk_lv_percent", None),
("dVmHV", "tap_step_percent", None), ("tapNeutrHV", "tap_neutral", None),
# ("dVmMV", "xxxxxxxx", None), ("dVmLV", "xxxxxxxx", None),
# ("dVaHV", "xxxxxxxx", None), ("dVaMV", "xxxxxxxx", None),
# ("dVaLV", "xxxxxxxx", None),
# ("tapNeutrMV", "xxxxxxxx", None), ("tapNeutrLV", "xxxxxxxx", None),
("tapMinHV", "tap_min", None), ("tapMaxHV", "tap_max", None)
# ("tapMinMV", "xxxxxxxx", None), ("tapMinLV", "xxxxxxxx", None),
# ("tapMaxMV", "xxxxxxxx", None), ("tapMaxLV", "xxxxxxxx", None)
# cosidered by _add_vm_va_setpoints_to_buses() and _add_phys_type_and_vm_va_setpoints_to_generation_element_tables():
# ("vmSetp", "vm_pu", None), ("vaSetp", "va:degree", None),
]
# --- add "pLoad", "qLoad" respectively "pPP", "qPP" or others, according to tablename
shortcuts = {"PowerPlant": "PP", "ExternalNet": "ExtNet", "Storage": "Stor", "Shunt": "0"}
if | |
TargetAdd('p3showbase_showBase.obj', opts=OPTS, input='showBase.cxx')
if GetTarget() == 'darwin':
TargetAdd('p3showbase_showBase_assist.obj', opts=OPTS, input='showBase_assist.mm')
OPTS=['DIR:direct/src/showbase']
IGATEFILES=GetDirectoryContents('direct/src/showbase', ["*.h", "showBase.cxx"])
TargetAdd('libp3showbase.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3showbase.in', opts=['IMOD:panda3d.direct', 'ILIB:libp3showbase', 'SRCDIR:direct/src/showbase'])
#
# DIRECTORY: direct/src/motiontrail/
#
if (PkgSkip("DIRECT")==0):
OPTS=['DIR:direct/src/motiontrail', 'BUILDING:DIRECT']
TargetAdd('p3motiontrail_cMotionTrail.obj', opts=OPTS, input='cMotionTrail.cxx')
TargetAdd('p3motiontrail_config_motiontrail.obj', opts=OPTS, input='config_motiontrail.cxx')
OPTS=['DIR:direct/src/motiontrail']
IGATEFILES=GetDirectoryContents('direct/src/motiontrail', ["*.h", "cMotionTrail.cxx"])
TargetAdd('libp3motiontrail.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3motiontrail.in', opts=['IMOD:panda3d.direct', 'ILIB:libp3motiontrail', 'SRCDIR:direct/src/motiontrail'])
#
# DIRECTORY: direct/metalibs/direct/
#
if (PkgSkip("DIRECT")==0):
TargetAdd('libp3direct.dll', input='p3directbase_directbase.obj')
TargetAdd('libp3direct.dll', input='p3showbase_showBase.obj')
if GetTarget() == 'darwin':
TargetAdd('libp3direct.dll', input='p3showbase_showBase_assist.obj')
TargetAdd('libp3direct.dll', input='p3deadrec_composite1.obj')
TargetAdd('libp3direct.dll', input='p3interval_composite1.obj')
TargetAdd('libp3direct.dll', input='p3motiontrail_config_motiontrail.obj')
TargetAdd('libp3direct.dll', input='p3motiontrail_cMotionTrail.obj')
TargetAdd('libp3direct.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3direct.dll', opts=['ADVAPI', 'OPENSSL', 'WINUSER', 'WINGDI'])
PyTargetAdd('direct_module.obj', input='libp3dcparser.in')
PyTargetAdd('direct_module.obj', input='libp3showbase.in')
PyTargetAdd('direct_module.obj', input='libp3deadrec.in')
PyTargetAdd('direct_module.obj', input='libp3interval.in')
PyTargetAdd('direct_module.obj', input='libp3distributed.in')
PyTargetAdd('direct_module.obj', input='libp3motiontrail.in')
PyTargetAdd('direct_module.obj', opts=['IMOD:panda3d.direct', 'ILIB:direct', 'IMPORT:panda3d.core'])
PyTargetAdd('direct.pyd', input='libp3dcparser_igate.obj')
PyTargetAdd('direct.pyd', input='libp3showbase_igate.obj')
PyTargetAdd('direct.pyd', input='libp3deadrec_igate.obj')
PyTargetAdd('direct.pyd', input='libp3interval_igate.obj')
PyTargetAdd('direct.pyd', input='libp3distributed_igate.obj')
PyTargetAdd('direct.pyd', input='libp3motiontrail_igate.obj')
# These are part of direct.pyd, not libp3direct.dll, because they rely on
# the Python libraries. If a C++ user needs these modules, we can move them
# back and filter out the Python-specific code.
PyTargetAdd('direct.pyd', input='p3dcparser_composite1.obj')
PyTargetAdd('direct.pyd', input='p3dcparser_composite2.obj')
PyTargetAdd('direct.pyd', input='p3dcparser_dcParser.obj')
PyTargetAdd('direct.pyd', input='p3dcparser_dcLexer.obj')
PyTargetAdd('direct.pyd', input='p3distributed_config_distributed.obj')
PyTargetAdd('direct.pyd', input='p3distributed_cConnectionRepository.obj')
PyTargetAdd('direct.pyd', input='p3distributed_cDistributedSmoothNodeBase.obj')
PyTargetAdd('direct.pyd', input='direct_module.obj')
PyTargetAdd('direct.pyd', input='libp3direct.dll')
PyTargetAdd('direct.pyd', input='libp3interrogatedb.dll')
PyTargetAdd('direct.pyd', input=COMMON_PANDA_LIBS)
PyTargetAdd('direct.pyd', opts=['OPENSSL', 'WINUSER', 'WINGDI', 'WINSOCK2'])
#
# DIRECTORY: direct/src/dcparse/
#
if (PkgSkip("PYTHON")==0 and PkgSkip("DIRECT")==0 and not RTDIST and not RUNTIME):
OPTS=['DIR:direct/src/dcparse', 'DIR:direct/src/dcparser', 'WITHINPANDA', 'ADVAPI']
PyTargetAdd('dcparse_dcparse.obj', opts=OPTS, input='dcparse.cxx')
PyTargetAdd('p3dcparse.exe', input='p3dcparser_composite1.obj')
PyTargetAdd('p3dcparse.exe', input='p3dcparser_composite2.obj')
PyTargetAdd('p3dcparse.exe', input='p3dcparser_dcParser.obj')
PyTargetAdd('p3dcparse.exe', input='p3dcparser_dcLexer.obj')
PyTargetAdd('p3dcparse.exe', input='dcparse_dcparse.obj')
PyTargetAdd('p3dcparse.exe', input='libp3direct.dll')
PyTargetAdd('p3dcparse.exe', input=COMMON_PANDA_LIBS)
PyTargetAdd('p3dcparse.exe', input='libp3pystub.lib')
PyTargetAdd('p3dcparse.exe', opts=['ADVAPI'])
#
# DIRECTORY: direct/src/plugin/
#
if (RTDIST or RUNTIME):
# Explicitly define this as we don't include dtool_config.h here.
if GetTarget() not in ('windows', 'darwin'):
DefSymbol("RUNTIME", "HAVE_X11", "1")
OPTS=['DIR:direct/src/plugin', 'BUILDING:P3D_PLUGIN', 'RUNTIME', 'OPENSSL']
TargetAdd('plugin_common.obj', opts=OPTS, input='plugin_common_composite1.cxx')
OPTS += ['ZLIB', 'MSIMG']
TargetAdd('plugin_plugin.obj', opts=OPTS, input='p3d_plugin_composite1.cxx')
TargetAdd('plugin_mkdir_complete.obj', opts=OPTS, input='mkdir_complete.cxx')
TargetAdd('plugin_wstring_encode.obj', opts=OPTS, input='wstring_encode.cxx')
TargetAdd('plugin_parse_color.obj', opts=OPTS, input='parse_color.cxx')
TargetAdd('plugin_get_twirl_data.obj', opts=OPTS, input='get_twirl_data.cxx')
TargetAdd('plugin_find_root_dir.obj', opts=OPTS, input='find_root_dir.cxx')
if GetTarget() == 'darwin':
TargetAdd('plugin_find_root_dir_assist.obj', opts=OPTS, input='find_root_dir_assist.mm')
TargetAdd('plugin_binaryXml.obj', opts=OPTS, input='binaryXml.cxx')
TargetAdd('plugin_fileSpec.obj', opts=OPTS, input='fileSpec.cxx')
TargetAdd('plugin_handleStream.obj', opts=OPTS, input='handleStream.cxx')
TargetAdd('plugin_handleStreamBuf.obj', opts=OPTS, input='handleStreamBuf.cxx')
if (RTDIST):
for fname in ("p3d_plugin.dll", "libp3d_plugin_static.ilb"):
TargetAdd(fname, input='plugin_plugin.obj')
TargetAdd(fname, input='plugin_mkdir_complete.obj')
TargetAdd(fname, input='plugin_wstring_encode.obj')
TargetAdd(fname, input='plugin_parse_color.obj')
TargetAdd(fname, input='plugin_find_root_dir.obj')
if GetTarget() == 'darwin':
TargetAdd(fname, input='plugin_find_root_dir_assist.obj')
TargetAdd(fname, input='plugin_fileSpec.obj')
TargetAdd(fname, input='plugin_binaryXml.obj')
TargetAdd(fname, input='plugin_handleStream.obj')
TargetAdd(fname, input='plugin_handleStreamBuf.obj')
TargetAdd(fname, input='libp3tinyxml.ilb')
if GetTarget() == 'darwin':
TargetAdd(fname, input='libp3subprocbuffer.ilb')
TargetAdd(fname, opts=['OPENSSL', 'ZLIB', 'X11', 'ADVAPI', 'WINUSER', 'WINGDI', 'WINSHELL', 'WINCOMCTL', 'WINOLE', 'MSIMG'])
TargetAdd("libp3d_plugin_static.ilb", input='plugin_get_twirl_data.obj')
if (PkgSkip("PYTHON")==0 and RTDIST):
# Freeze VFSImporter and its dependency modules into p3dpython.
# Mark panda3d.core as a dependency to make sure to build that first.
TargetAdd('p3dpython_frozen.obj', input='VFSImporter.py', opts=['DIR:direct/src/showbase', 'FREEZE_STARTUP'])
TargetAdd('p3dpython_frozen.obj', dep='core.pyd')
OPTS += ['PYTHON']
TargetAdd('p3dpython_p3dpython_composite1.obj', opts=OPTS, input='p3dpython_composite1.cxx')
TargetAdd('p3dpython_p3dPythonMain.obj', opts=OPTS, input='p3dPythonMain.cxx')
TargetAdd('p3dpython.exe', input='p3dpython_p3dpython_composite1.obj')
TargetAdd('p3dpython.exe', input='p3dpython_p3dPythonMain.obj')
TargetAdd('p3dpython.exe', input='p3dpython_frozen.obj')
TargetAdd('p3dpython.exe', input=COMMON_PANDA_LIBS)
TargetAdd('p3dpython.exe', input='libp3tinyxml.ilb')
TargetAdd('p3dpython.exe', input='libp3interrogatedb.dll')
TargetAdd('p3dpython.exe', opts=['PYTHON', 'WINUSER'])
TargetAdd('libp3dpython.dll', input='p3dpython_p3dpython_composite1.obj')
TargetAdd('libp3dpython.dll', input='p3dpython_frozen.obj')
TargetAdd('libp3dpython.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3dpython.dll', input='libp3tinyxml.ilb')
TargetAdd('libp3dpython.dll', input='libp3interrogatedb.dll')
TargetAdd('libp3dpython.dll', opts=['PYTHON', 'WINUSER'])
if GetTarget() == 'windows':
DefSymbol("NON_CONSOLE", "NON_CONSOLE", "")
OPTS.append("NON_CONSOLE")
TargetAdd('p3dpythonw_p3dpython_composite1.obj', opts=OPTS, input='p3dpython_composite1.cxx')
TargetAdd('p3dpythonw_p3dPythonMain.obj', opts=OPTS, input='p3dPythonMain.cxx')
TargetAdd('p3dpythonw.exe', input='p3dpythonw_p3dpython_composite1.obj')
TargetAdd('p3dpythonw.exe', input='p3dpythonw_p3dPythonMain.obj')
TargetAdd('p3dpythonw.exe', input='p3dpython_frozen.obj')
TargetAdd('p3dpythonw.exe', input=COMMON_PANDA_LIBS)
TargetAdd('p3dpythonw.exe', input='libp3tinyxml.ilb')
TargetAdd('p3dpythonw.exe', input='libp3interrogatedb.dll')
TargetAdd('p3dpythonw.exe', opts=['SUBSYSTEM:WINDOWS', 'WINUSER'])
if (PkgSkip("OPENSSL")==0 and RTDIST and False):
OPTS=['DIR:direct/src/plugin', 'DIR:panda/src/express', 'OPENSSL']
if GetTarget() == 'darwin':
OPTS += ['OPT:2']
if (PkgSkip("FLTK")==0):
OPTS.append("FLTK")
TargetAdd('plugin_p3dCert.obj', opts=OPTS, input='p3dCert.cxx')
TargetAdd('plugin_p3dCert_strings.obj', opts=OPTS, input='p3dCert_strings.cxx')
TargetAdd('p3dcert.exe', input='plugin_mkdir_complete.obj')
TargetAdd('p3dcert.exe', input='plugin_wstring_encode.obj')
TargetAdd('p3dcert.exe', input='plugin_p3dCert.obj')
TargetAdd('p3dcert.exe', input='plugin_p3dCert_strings.obj')
OPTS=['SUBSYSTEM:WINDOWS', 'OPENSSL', 'FLTK', 'X11', 'WINCOMCTL', 'WINSOCK', 'WINGDI', 'WINUSER', 'ADVAPI', 'WINOLE', 'WINSHELL', 'SUBSYSTEM:WINDOWS']
if GetTarget() == 'darwin':
OPTS += ['OPT:2']
TargetAdd('p3dcert.exe', opts=OPTS)
elif (PkgSkip("WX")==0):
OPTS += ["WX", "RTTI"]
TargetAdd('plugin_p3dCert.obj', opts=OPTS, input='p3dCert_wx.cxx')
TargetAdd('p3dcert.exe', input='plugin_mkdir_complete.obj')
TargetAdd('p3dcert.exe', input='plugin_wstring_encode.obj')
TargetAdd('p3dcert.exe', input='plugin_p3dCert.obj')
OPTS=['SUBSYSTEM:WINDOWS', 'OPENSSL', 'WX', 'CARBON', 'WINOLE', 'WINOLEAUT', 'WINUSER', 'ADVAPI', 'WINSHELL', 'WINCOMCTL', 'WINGDI', 'WINCOMDLG']
if GetTarget() == "darwin":
OPTS += ['GL', 'OPT:2']
TargetAdd('p3dcert.exe', opts=OPTS)
#
# DIRECTORY: direct/src/plugin_npapi/
#
if RUNTIME:
OPTS=['DIR:direct/src/plugin_npapi', 'RUNTIME', 'GTK2']
if GetTarget() == 'windows':
nppanda3d_rc = {"name" : "Panda3D Game Engine Plug-in",
"version" : VERSION,
"description" : "Runs 3-D games and interactive applets",
"filename" : "nppanda3d.dll",
"mimetype" : "application/x-panda3d",
"extension" : "p3d",
"filedesc" : "Panda3D applet"}
TargetAdd('nppanda3d.res', opts=OPTS, winrc=nppanda3d_rc)
elif GetTarget() == 'darwin':
TargetAdd('nppanda3d.rsrc', opts=OPTS, input='nppanda3d.r')
OPTS += ['GTK2']
TargetAdd('plugin_npapi_nppanda3d_composite1.obj', opts=OPTS, input='nppanda3d_composite1.cxx')
TargetAdd('nppanda3d.plugin', input='plugin_common.obj')
TargetAdd('nppanda3d.plugin', input='plugin_parse_color.obj')
TargetAdd('nppanda3d.plugin', input='plugin_get_twirl_data.obj')
TargetAdd('nppanda3d.plugin', input='plugin_wstring_encode.obj')
TargetAdd('nppanda3d.plugin', input='plugin_npapi_nppanda3d_composite1.obj')
if GetTarget() == 'windows':
TargetAdd('nppanda3d.plugin', input='nppanda3d.res')
TargetAdd('nppanda3d.plugin', input='nppanda3d.def', ipath=OPTS)
elif GetTarget() == 'darwin':
TargetAdd('nppanda3d.plugin', input='nppanda3d.rsrc')
TargetAdd('nppanda3d.plugin', input='nppanda3d.plist', ipath=OPTS)
TargetAdd('nppanda3d.plugin', input='plugin_find_root_dir_assist.obj')
TargetAdd('nppanda3d.plugin', input='libp3tinyxml.ilb')
TargetAdd('nppanda3d.plugin', opts=['OPENSSL', 'WINGDI', 'WINUSER', 'WINSHELL', 'WINOLE', 'CARBON'])
#
# DIRECTORY: direct/src/plugin_activex/
#
if (RUNTIME and GetTarget() == 'windows' and PkgSkip("MFC")==0):
OPTS=['DIR:direct/src/plugin_activex', 'RUNTIME', 'ACTIVEX', 'MFC']
DefSymbol('ACTIVEX', '_USRDLL', '')
DefSymbol('ACTIVEX', '_WINDLL', '')
DefSymbol('ACTIVEX', '_AFXDLL', '')
DefSymbol('ACTIVEX', '_MBCS', '')
TargetAdd('P3DActiveX.tlb', opts=OPTS, input='P3DActiveX.idl')
TargetAdd('P3DActiveX.res', opts=OPTS, input='P3DActiveX.rc')
TargetAdd('plugin_activex_p3dactivex_composite1.obj', opts=OPTS, input='p3dactivex_composite1.cxx')
TargetAdd('p3dactivex.ocx', input='plugin_common.obj')
TargetAdd('p3dactivex.ocx', input='plugin_parse_color.obj')
TargetAdd('p3dactivex.ocx', input='plugin_get_twirl_data.obj')
TargetAdd('p3dactivex.ocx', input='plugin_wstring_encode.obj')
TargetAdd('p3dactivex.ocx', input='plugin_activex_p3dactivex_composite1.obj')
TargetAdd('p3dactivex.ocx', input='P3DActiveX.res')
TargetAdd('p3dactivex.ocx', input='P3DActiveX.def', ipath=OPTS)
TargetAdd('p3dactivex.ocx', input='libp3tinyxml.ilb')
TargetAdd('p3dactivex.ocx', opts=['MFC', 'WINSOCK2', 'OPENSSL', 'WINGDI', 'WINUSER'])
#
# DIRECTORY: direct/src/plugin_standalone/
#
if (RUNTIME):
OPTS=['DIR:direct/src/plugin_standalone', 'RUNTIME', 'OPENSSL']
TargetAdd('plugin_standalone_panda3d.obj', opts=OPTS, input='panda3d.cxx')
TargetAdd('plugin_standalone_panda3dBase.obj', opts=OPTS, input='panda3dBase.cxx')
if GetTarget() == 'windows':
panda3d_rc = {"name" : "Panda3D Game Engine Plug-in",
"version" : VERSION,
"description" : "Runs 3-D games and interactive applets",
"filename" : "panda3d.exe",
"mimetype" : "application/x-panda3d",
"extension" : "p3d",
"filedesc" : "Panda3D applet",
"icon" : "panda3d.ico"}
TargetAdd('panda3d.res', opts=OPTS, winrc=panda3d_rc)
TargetAdd('plugin_standalone_panda3dMain.obj', opts=OPTS, input='panda3dMain.cxx')
TargetAdd('panda3d.exe', input='plugin_standalone_panda3d.obj')
TargetAdd('panda3d.exe', input='plugin_standalone_panda3dMain.obj')
TargetAdd('panda3d.exe', input='plugin_standalone_panda3dBase.obj')
TargetAdd('panda3d.exe', input='plugin_common.obj')
TargetAdd('panda3d.exe', input='plugin_wstring_encode.obj')
if GetTarget() == 'darwin':
TargetAdd('panda3d.exe', input='plugin_find_root_dir_assist.obj')
elif GetTarget() == 'windows':
TargetAdd('panda3d.exe', input='panda3d.res')
TargetAdd('panda3d.exe', input='libpandaexpress.dll')
TargetAdd('panda3d.exe', input='libp3dtoolconfig.dll')
TargetAdd('panda3d.exe', input='libp3dtool.dll')
#TargetAdd('panda3d.exe', input='libp3pystub.lib')
TargetAdd('panda3d.exe', input='libp3tinyxml.ilb')
TargetAdd('panda3d.exe', opts=['NOICON', 'OPENSSL', 'ZLIB', 'WINGDI', 'WINUSER', 'WINSHELL', 'ADVAPI', 'WINSOCK2', 'WINOLE', 'CARBON'])
if (GetTarget() == 'darwin'):
TargetAdd('plugin_standalone_panda3dMac.obj', opts=OPTS, input='panda3dMac.cxx')
TargetAdd('Panda3D.app', input='plugin_standalone_panda3d.obj')
TargetAdd('Panda3D.app', input='plugin_standalone_panda3dMac.obj')
TargetAdd('Panda3D.app', input='plugin_standalone_panda3dBase.obj')
TargetAdd('Panda3D.app', input='plugin_common.obj')
TargetAdd('Panda3D.app', input='plugin_find_root_dir_assist.obj')
TargetAdd('Panda3D.app', input='libpandaexpress.dll')
TargetAdd('Panda3D.app', input='libp3dtoolconfig.dll')
TargetAdd('Panda3D.app', input='libp3dtool.dll')
#TargetAdd('Panda3D.app', input='libp3pystub.lib')
TargetAdd('Panda3D.app', input='libp3tinyxml.ilb')
TargetAdd('Panda3D.app', input='panda3d_mac.plist', ipath=OPTS)
TargetAdd('Panda3D.app', input='models/plugin_images/panda3d.icns')
TargetAdd('Panda3D.app', opts=['OPENSSL', 'ZLIB', 'WINGDI', 'WINUSER', 'WINSHELL', 'ADVAPI', 'WINSOCK2', 'WINOLE', 'CARBON'])
elif (GetTarget() == 'windows'):
TargetAdd('plugin_standalone_panda3dWinMain.obj', opts=OPTS, input='panda3dWinMain.cxx')
TargetAdd('panda3dw.exe', input='plugin_standalone_panda3d.obj')
TargetAdd('panda3dw.exe', input='plugin_standalone_panda3dWinMain.obj')
TargetAdd('panda3dw.exe', input='plugin_standalone_panda3dBase.obj')
TargetAdd('panda3dw.exe', input='plugin_wstring_encode.obj')
TargetAdd('panda3dw.exe', input='plugin_common.obj')
TargetAdd('panda3dw.exe', input='libpandaexpress.dll')
TargetAdd('panda3dw.exe', input='libp3dtoolconfig.dll')
TargetAdd('panda3dw.exe', input='libp3dtool.dll')
#TargetAdd('panda3dw.exe', input='libp3pystub.lib')
TargetAdd('panda3dw.exe', input='libp3tinyxml.ilb')
TargetAdd('panda3dw.exe', opts=['SUBSYSTEM:WINDOWS', 'OPENSSL', 'ZLIB', 'WINGDI', 'WINUSER', 'WINSHELL', 'ADVAPI', 'WINSOCK2', 'WINOLE', 'CARBON'])
if (RTDIST):
OPTS=['BUILDING:P3D_PLUGIN', 'DIR:direct/src/plugin_standalone', 'DIR:direct/src/plugin', 'DIR:dtool/src/dtoolbase', 'DIR:dtool/src/dtoolutil', 'DIR:dtool/src/pystub', 'DIR:dtool/src/prc', 'DIR:dtool/src/dconfig', 'DIR:panda/src/express', 'DIR:panda/src/downloader', 'RUNTIME', 'P3DEMBED', 'OPENSSL', 'ZLIB']
# This is arguably a big fat ugly hack, but doing it otherwise would complicate the build process considerably.
DefSymbol("P3DEMBED", "LINK_ALL_STATIC", "")
TargetAdd('plugin_standalone_panda3dBase.obj', opts=OPTS, input='panda3dBase.cxx')
TargetAdd('plugin_standalone_p3dEmbedMain.obj', opts=OPTS, input='p3dEmbedMain.cxx')
TargetAdd('plugin_standalone_p3dEmbed.obj', opts=OPTS, input='p3dEmbed.cxx')
#TargetAdd('plugin_standalone_pystub.obj', opts=OPTS, input='pystub.cxx')
TargetAdd('plugin_standalone_dtoolbase_composite1.obj', opts=OPTS, input='p3dtoolbase_composite1.cxx')
TargetAdd('plugin_standalone_dtoolbase_composite2.obj', opts=OPTS, input='p3dtoolbase_composite2.cxx')
TargetAdd('plugin_standalone_lookup3.obj', opts=OPTS, input='lookup3.c')
TargetAdd('plugin_standalone_indent.obj', opts=OPTS, input='indent.cxx')
TargetAdd('plugin_standalone_dtoolutil_composite1.obj', opts=OPTS, input='p3dtoolutil_composite1.cxx')
TargetAdd('plugin_standalone_dtoolutil_composite2.obj', opts=OPTS, input='p3dtoolutil_composite2.cxx')
if (GetTarget() == 'darwin'):
TargetAdd('plugin_standalone_dtoolutil_filename_assist.obj', opts=OPTS, input='filename_assist.mm')
TargetAdd('plugin_standalone_prc_composite1.obj', opts=OPTS, input='p3prc_composite1.cxx')
TargetAdd('plugin_standalone_prc_composite2.obj', opts=OPTS, input='p3prc_composite2.cxx')
TargetAdd('plugin_standalone_express_composite1.obj', opts=OPTS, input='p3express_composite1.cxx')
TargetAdd('plugin_standalone_express_composite2.obj', opts=OPTS, input='p3express_composite2.cxx')
TargetAdd('plugin_standalone_downloader_composite1.obj', opts=OPTS, input='p3downloader_composite1.cxx')
TargetAdd('plugin_standalone_downloader_composite2.obj', opts=OPTS, input='p3downloader_composite2.cxx')
TargetAdd('p3dembed.exe', input='plugin_standalone_panda3dBase.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_p3dEmbedMain.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_p3dEmbed.obj')
#TargetAdd('p3dembed.exe', input='plugin_standalone_pystub.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_dtoolbase_composite1.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_dtoolbase_composite2.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_lookup3.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_indent.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_dtoolutil_composite1.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_dtoolutil_composite2.obj')
if GetTarget() == 'darwin':
TargetAdd('p3dembed.exe', input='plugin_standalone_dtoolutil_filename_assist.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_prc_composite1.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_prc_composite2.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_express_composite1.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_express_composite2.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_downloader_composite1.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_downloader_composite2.obj')
TargetAdd('p3dembed.exe', input='plugin_common.obj')
if GetTarget() == 'darwin':
TargetAdd('p3dembed.exe', input='plugin_find_root_dir_assist.obj')
TargetAdd('p3dembed.exe', input='libp3subprocbuffer.ilb')
TargetAdd('p3dembed.exe', input='libp3tinyxml.ilb')
TargetAdd('p3dembed.exe', input='libp3d_plugin_static.ilb')
TargetAdd('p3dembed.exe', opts=['NOICON', 'WINGDI', 'WINSOCK2', 'ZLIB', 'WINUSER', 'OPENSSL', 'WINOLE', 'CARBON', 'MSIMG', 'WINCOMCTL', 'ADVAPI', 'WINSHELL', 'X11'])
if GetTarget() == 'windows':
OPTS.append("P3DEMBEDW")
DefSymbol("P3DEMBEDW", "P3DEMBEDW", "")
TargetAdd('plugin_standalone_p3dEmbedWinMain.obj', opts=OPTS, input='p3dEmbedMain.cxx')
TargetAdd('p3dembedw.exe', input='plugin_standalone_panda3dBase.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_p3dEmbedWinMain.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_p3dEmbed.obj')
#TargetAdd('p3dembedw.exe', input='plugin_standalone_pystub.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_dtoolbase_composite1.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_dtoolbase_composite2.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_lookup3.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_indent.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_dtoolutil_composite1.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_dtoolutil_composite2.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_prc_composite1.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_prc_composite2.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_express_composite1.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_express_composite2.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_downloader_composite1.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_downloader_composite2.obj')
TargetAdd('p3dembedw.exe', input='plugin_common.obj')
TargetAdd('p3dembedw.exe', input='libp3tinyxml.ilb')
TargetAdd('p3dembedw.exe', input='libp3d_plugin_static.ilb')
TargetAdd('p3dembedw.exe', opts=['SUBSYSTEM:WINDOWS', 'NOICON', 'WINGDI', 'WINSOCK2', 'ZLIB', 'WINUSER', 'OPENSSL', 'WINOLE', 'MSIMG', 'WINCOMCTL', 'ADVAPI', 'WINSHELL'])
#
# DIRECTORY: pandatool/src/pandatoolbase/
#
if (PkgSkip("PANDATOOL")==0):
OPTS=['DIR:pandatool/src/pandatoolbase']
TargetAdd('p3pandatoolbase_composite1.obj', opts=OPTS, input='p3pandatoolbase_composite1.cxx')
TargetAdd('libp3pandatoolbase.lib', input='p3pandatoolbase_composite1.obj')
#
# DIRECTORY: pandatool/src/converter/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/converter']
TargetAdd('p3converter_somethingToEggConverter.obj', opts=OPTS, input='somethingToEggConverter.cxx')
TargetAdd('p3converter_eggToSomethingConverter.obj', opts=OPTS, input='eggToSomethingConverter.cxx')
TargetAdd('libp3converter.lib', input='p3converter_somethingToEggConverter.obj')
TargetAdd('libp3converter.lib', input='p3converter_eggToSomethingConverter.obj')
#
# DIRECTORY: pandatool/src/progbase/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/progbase', 'ZLIB']
TargetAdd('p3progbase_composite1.obj', opts=OPTS, input='p3progbase_composite1.cxx')
TargetAdd('libp3progbase.lib', input='p3progbase_composite1.obj')
#
# DIRECTORY: pandatool/src/eggbase/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/eggbase']
TargetAdd('p3eggbase_composite1.obj', opts=OPTS, input='p3eggbase_composite1.cxx')
TargetAdd('libp3eggbase.lib', input='p3eggbase_composite1.obj')
#
# DIRECTORY: pandatool/src/bam/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/bam']
TargetAdd('bam-info_bamInfo.obj', opts=OPTS, input='bamInfo.cxx')
TargetAdd('bam-info.exe', input='bam-info_bamInfo.obj')
TargetAdd('bam-info.exe', input='libp3progbase.lib')
TargetAdd('bam-info.exe', input='libp3pandatoolbase.lib')
TargetAdd('bam-info.exe', input=COMMON_PANDA_LIBS)
TargetAdd('bam-info.exe', opts=['ADVAPI', 'FFTW'])
if not PkgSkip("EGG"):
TargetAdd('bam2egg_bamToEgg.obj', opts=OPTS, input='bamToEgg.cxx')
TargetAdd('bam2egg.exe', input='bam2egg_bamToEgg.obj')
TargetAdd('bam2egg.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('bam2egg.exe', opts=['ADVAPI', 'FFTW'])
TargetAdd('egg2bam_eggToBam.obj', opts=OPTS, input='eggToBam.cxx')
TargetAdd('egg2bam.exe', input='egg2bam_eggToBam.obj')
TargetAdd('egg2bam.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg2bam.exe', opts=['ADVAPI', 'FFTW'])
#
# DIRECTORY: pandatool/src/cvscopy/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/cvscopy']
TargetAdd('p3cvscopy_composite1.obj', opts=OPTS, input='p3cvscopy_composite1.cxx')
TargetAdd('libp3cvscopy.lib', input='p3cvscopy_composite1.obj')
#
# DIRECTORY: pandatool/src/daeegg/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("FCOLLADA") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/daeegg', 'FCOLLADA']
TargetAdd('p3daeegg_composite1.obj', opts=OPTS, input='p3daeegg_composite1.cxx')
TargetAdd('libp3daeegg.lib', input='p3daeegg_composite1.obj')
TargetAdd('libp3daeegg.lib', opts=['FCOLLADA', 'CARBON'])
#
# DIRECTORY: pandatool/src/assimp
#
if not PkgSkip("PANDATOOL") and not PkgSkip("ASSIMP"):
OPTS=['DIR:pandatool/src/assimp', 'BUILDING:ASSIMP', 'ASSIMP', 'MODULE']
TargetAdd('p3assimp_composite1.obj', opts=OPTS, input='p3assimp_composite1.cxx')
TargetAdd('libp3assimp.dll', input='p3assimp_composite1.obj')
TargetAdd('libp3assimp.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3assimp.dll', opts=OPTS+['ZLIB'])
#
# DIRECTORY: pandatool/src/daeprogs/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("FCOLLADA") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/daeprogs', 'FCOLLADA']
TargetAdd('dae2egg_daeToEgg.obj', opts=OPTS, input='daeToEgg.cxx')
TargetAdd('dae2egg.exe', input='dae2egg_daeToEgg.obj')
TargetAdd('dae2egg.exe', input='libp3daeegg.lib')
TargetAdd('dae2egg.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('dae2egg.exe', opts=['WINUSER', 'FCOLLADA', 'CARBON'])
#
# DIRECTORY: pandatool/src/dxf/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/dxf']
TargetAdd('p3dxf_composite1.obj', opts=OPTS, input='p3dxf_composite1.cxx')
TargetAdd('libp3dxf.lib', input='p3dxf_composite1.obj')
#
# DIRECTORY: pandatool/src/dxfegg/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/dxfegg']
TargetAdd('p3dxfegg_dxfToEggConverter.obj', opts=OPTS, input='dxfToEggConverter.cxx')
TargetAdd('p3dxfegg_dxfToEggLayer.obj', opts=OPTS, input='dxfToEggLayer.cxx')
TargetAdd('libp3dxfegg.lib', input='p3dxfegg_dxfToEggConverter.obj')
TargetAdd('libp3dxfegg.lib', input='p3dxfegg_dxfToEggLayer.obj')
#
# DIRECTORY: pandatool/src/dxfprogs/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/dxfprogs']
TargetAdd('dxf-points_dxfPoints.obj', opts=OPTS, input='dxfPoints.cxx')
TargetAdd('dxf-points.exe', input='dxf-points_dxfPoints.obj')
TargetAdd('dxf-points.exe', input='libp3progbase.lib')
TargetAdd('dxf-points.exe', input='libp3dxf.lib')
TargetAdd('dxf-points.exe', input='libp3pandatoolbase.lib')
TargetAdd('dxf-points.exe', input=COMMON_PANDA_LIBS)
TargetAdd('dxf-points.exe', opts=['ADVAPI', | |
Type.\n"
"It simply changes the Type set on the account to the new Type.\n"
"You should carefully review your data afterwards and revert\n"
"to a backup if you are not happy with the results....\n"
"\n",
lCancelButton=True,
OKButtonText="I AGREE - PROCEED",
lAlertLevel=2)
if not ask.go():
statusLabel.setText(("User did not say yes to FORCE change an Account's type - no changes made").ljust(800, " "))
statusLabel.setForeground(Color.BLUE)
myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE)
return
del ask
accounts = AccountUtil.allMatchesForSearch(moneydance_data, MyAcctFilter(19))
accounts = sorted(accounts, key=lambda sort_x: (sort_x.getAccountType(), sort_x.getFullAccountName().upper()))
newAccounts = []
for acct in accounts:
newAccounts.append(StoreAccountList(acct))
selectedAccount = JOptionPane.showInputDialog(toolbox_frame_,
"Select the Account to FORCE change its Type",
"FORCE CHANGE ACCOUNT's TYPE",
JOptionPane.WARNING_MESSAGE,
None,
newAccounts,
None) # type: StoreAccountList
if not selectedAccount:
statusLabel.setText(("User did not Select an Account to FORCE change its Type - no changes made").ljust(800, " "))
statusLabel.setForeground(Color.BLUE)
myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE)
return
selectedAccount = selectedAccount.obj # type: Account
if selectedAccount.getAccountType() == Account.AccountType.ROOT:
if not myPopupAskQuestion(toolbox_frame_,"FORCE CHANGE ACCOUNT TYPE","THIS ACCOUNT IS ROOT (SPECIAL). DO YOU REALLY WANT TO CHANGE IT'S TYPE (Normally a bad idea!) ?", theMessageType=JOptionPane.ERROR_MESSAGE):
statusLabel.setText(("User Aborted change of Root's Account Type (phew!) - no changes made").ljust(800, " "))
statusLabel.setForeground(Color.BLUE)
myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE)
return
possTypes = Account.AccountType.values()
possTypes.remove(Account.AccountType.ROOT)
possTypes.remove(Account.AccountType.SECURITY)
if selectedAccount.getAccountType() in possTypes:
possTypes.remove(selectedAccount.getAccountType())
selectedType = JOptionPane.showInputDialog(toolbox_frame_,
"Select the new Account Type",
"FORCE CHANGE ACCOUNT's TYPE",
JOptionPane.WARNING_MESSAGE,
None,
possTypes,
None) # type: Account.AccountType
if not selectedType:
statusLabel.setText(("User did not Select a new Account Type - no changes made").ljust(800, " "))
statusLabel.setForeground(Color.BLUE)
myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE)
return
if selectedType == Account.AccountType.ROOT:
if not myPopupAskQuestion(toolbox_frame_,"FORCE CHANGE ACCOUNT TYPE","DO YOU REALLY WANT TO CHANGE TO ROOT (Normally a bad idea!)?", theMessageType=JOptionPane.ERROR_MESSAGE):
statusLabel.setText(("User Aborted change Account to type Root (phew!) - no changes made").ljust(800, " "))
statusLabel.setForeground(Color.BLUE)
myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE)
return
ask=MyPopUpDialogBox(toolbox_frame_,
theStatus="Are you sure you want to FORCE change this Account's Type?",
theTitle="FORCE CHANGE TYPE",
theMessage="Account: %s\n"
"Old Type: %s\n"
"New Type: %s\n"
%(selectedAccount.getFullAccountName(), selectedAccount.getAccountType(),selectedType), # noqa
lCancelButton=True,
OKButtonText="I AGREE - PROCEED",
lAlertLevel=2)
if not ask.go():
statusLabel.setText(("User aborted the FORCE change to an Account's type - no changes made").ljust(800, " "))
statusLabel.setForeground(Color.RED)
myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE)
return
if not confirm_backup_confirm_disclaimer(toolbox_frame_, statusLabel, "FORCE CHANGE TYPE", "FORCE CHANGE ACCOUNT %s TYPE to %s" %(selectedAccount.getFullAccountName(),selectedType)): # noqa
return
myPrint("B","@@ User requested to Force Change the Type of Account: %s from: %s to %s - APPLYING UPDATE NOW...."
%(selectedAccount.getFullAccountName(),selectedAccount.getAccountType(),selectedType)) # noqa
moneydance_data.setRecalcBalances(False)
moneydance_ui.setSuspendRefresh(True)
selectedAccount.setAccountType(selectedType) # noqa
selectedAccount.syncItem() # noqa
moneydance_ui.getMain().saveCurrentAccount()
moneydance_data.setRecalcBalances(True)
moneydance_ui.setSuspendRefresh(False) # This does this too: book.notifyAccountModified(root)
root = moneydance.getRootAccount()
moneydance_data.notifyAccountModified(root)
statusLabel.setText(("The Account: %s has been changed to Type: %s- PLEASE REVIEW"
%(selectedAccount.getAccountName(),selectedAccount.getAccountType())).ljust(800, " ")) # noqa
statusLabel.setForeground(Color.RED)
play_the_money_sound()
myPopupInformationBox(toolbox_frame_,"The Account: %s has been changed to Type: %s - PLEASE RESTART MD & REVIEW"
%(selectedAccount.getAccountName(),selectedAccount.getAccountType()),theMessageType=JOptionPane.ERROR_MESSAGE) # noqa
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
# noinspection PyUnresolvedReferences
def force_change_all_accounts_currencies(statusLabel):
global toolbox_frame_, debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
# force_change_all_currencies.py
ask=MyPopUpDialogBox(toolbox_frame_,
theStatus="Are you sure you want to FORCE change ALL Account's Currencies?",
theTitle="FORCE CHANGE ALL ACCOUNTS' CURRENCIES",
theMessage="This is normally a BAD idea, unless you know you want to do it....!\n"
"The typical scenario is where you have a missing currency, or need to change them all\n"
"This fix will not touch the ROOT account nor Security sub-accounts (which are stocks/shares)\n"
"This fix will NOT attempt to correct any transactions or fx rates etc... It simply changes the currency\n"
"set on all accounts to the new currency. You should carefully review your data afterwards and revert\n"
"to a backup if you are not happy with the results....\n"
"\n",
lCancelButton=True,
OKButtonText="I AGREE - PROCEED",
lAlertLevel=2)
if not ask.go():
statusLabel.setText(("User did not say yes to FORCE change ALL Account's currencies - no changes made").ljust(800, " "))
statusLabel.setForeground(Color.BLUE)
myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE)
return
del ask
accounts = AccountUtil.allMatchesForSearch(moneydance_data, MyAcctFilter(19))
accounts = sorted(accounts, key=lambda sort_x: (sort_x.getAccountType(), sort_x.getFullAccountName().upper()))
currencies=[]
book = moneydance.getCurrentAccountBook()
allCurrencies = book.getCurrencies().getAllCurrencies()
for c in allCurrencies:
if c.getCurrencyType() == CurrencyType.Type.CURRENCY: # noqa
currencies.append(c)
currencies = sorted(currencies, key=lambda sort_x: (sort_x.getName().upper()))
if len(currencies) < 1:
myPrint("B", "FORCE CHANGE ALL ACCOUNTS' CURRENCIES - Creating new currency record!")
selectedCurrency = CurrencyType(book.getCurrencies()) # Creates a null:null CT record
selectedCurrency.setName("NEW CURRENCY - PLEASE EDIT ME LATER")
selectedCurrency.setIDString("AAA")
selectedCurrency.setDecimalPlaces(2)
selectedCurrency.syncItem()
myPrint("B", "FORCE CHANGE ALL ACCOUNTS' CURRENCIES - Creating new currency: %s" %(selectedCurrency))
myPopupInformationBox(toolbox_frame_,"FYI - I have created a new Currency %s for you (Edit me later)" %(selectedCurrency),
"FORCE CHANGE ALL ACCOUNTS' CURRENCIES")
else:
selectedCurrency = JOptionPane.showInputDialog(toolbox_frame_,
"Select a currency to assign to *ALL* accounts",
"FORCE CHANGE ALL ACCOUNT's CURRENCIES",
JOptionPane.ERROR_MESSAGE,
None,
currencies,
None) # type: CurrencyType
if not selectedCurrency:
statusLabel.setText(("User did not Select a new currency for FORCE change ALL Accounts' Currencies - no changes made").ljust(800, " "))
statusLabel.setForeground(Color.BLUE)
myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE)
return
if not confirm_backup_confirm_disclaimer(toolbox_frame_, statusLabel, "FORCE CHANGE ALL ACCOUNTS' CURRENCIES", "FORCE CHANGE ALL %s ACCOUNT's CURRENCIES TO %s?" %(len(accounts),selectedCurrency)): # noqa
return
myPrint("B","@@ User requested to Force Change the Currency of ALL %s Accounts to %s - APPLYING UPDATE NOW...."
%(len(accounts),selectedCurrency)) # noqa
moneydance_data.setRecalcBalances(False)
moneydance_ui.setSuspendRefresh(True)
accountsChanged = 0
for account in accounts:
if account.getAccountType() == Account.AccountType.ROOT:
continue
if account.getAccountType() == Account.AccountType.SECURITY:
continue
if account.getCurrencyType() == selectedCurrency:
continue
myPrint("B","Setting account %s to currency %s" %(account, selectedCurrency))
account.setCurrencyType(selectedCurrency)
account.syncItem()
accountsChanged += 1
moneydance_ui.getMain().saveCurrentAccount()
moneydance_data.setRecalcBalances(True)
moneydance_ui.setSuspendRefresh(False)
root = moneydance.getRootAccount()
moneydance_data.notifyAccountModified(root)
statusLabel.setText(("FORCE CHANGE ALL ACCOUNTS' CURRENCIES: %s Accounts changed to currency: %s - PLEASE RESTART MD & REVIEW"
%(accountsChanged,selectedCurrency)).ljust(800, " ")) # noqa
statusLabel.setForeground(Color.RED)
myPrint("B", "FORCE CHANGE ALL ACCOUNTS' CURRENCIES: %s Accounts changed to currency: %s - PLEASE RESTART MD & REVIEW"
%(accountsChanged,selectedCurrency))
play_the_money_sound()
myPopupInformationBox(toolbox_frame_,"%s Accounts changed to currency: %s - PLEASE RESTART MD & REVIEW"
%(accountsChanged,selectedCurrency),theMessageType=JOptionPane.ERROR_MESSAGE) # noqa
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
def fix_invalid_relative_currency_rates(statusLabel):
global toolbox_frame_, debug
myPrint(u"D", u"In ", inspect.currentframe().f_code.co_name, u"()")
if moneydance_data is None: return
book = moneydance.getCurrentAccountBook()
currencies = book.getCurrencies().getAllCurrencies()
currencies = sorted(currencies, key=lambda sort_x: (sort_x.getCurrencyType(),sort_x.getName().upper()))
output=u"FIX INVALID RELATIVE CURRENCIES\n" \
u" ==============================\n\n"
upperLimit = 9999999999
iErrors = 0
for curr in currencies:
if curr.getRelativeRate() <= 0 or curr.getRelativeRate() > upperLimit:
iErrors += 1
output += u"Invalid - Type: %s Name: %s Relative Rate: %s\n" %(curr.getCurrencyType(),pad(curr.getName(),25),rpad(curr.getRelativeRate(),20))
if iErrors < 1:
statusLabel.setText((u"FIX INVALID REL CURR RATES: You have no relative rates <0 or >%s to fix - NO CHANGES MADE" %upperLimit).ljust(800, u" "))
statusLabel.setForeground(Color.BLUE)
myPopupInformationBox(toolbox_frame_,u"You have no relative rates <0 or >%s to fix - NO CHANGES MADE" %upperLimit,u"FIX INVALID REL CURR RATES")
return
jif=QuickJFrame(u"FIX INVALID RELATIVE CURRENCIES",output).show_the_frame()
# force_change_account_currency.py
ask=MyPopUpDialogBox(jif,
theStatus=u"Are you sure you want to FIX these %s INVALID RELATIVE CURRENCIES?" %iErrors,
theTitle=u"FIX INVALID RELATIVE CURRENCIES",
theMessage=u"Do not proceed unless you know you want to do this....!\n"
u"This fix will NOT attempt to correct any transactions or fx rates etc... It simply changes the relative rate(s)\n"
u"You should carefully review your data afterwards and revert to a backup if you are not happy with the results....\n",
lCancelButton=True,
OKButtonText=u"I AGREE - PROCEED",
lAlertLevel=2)
if not ask.go():
statusLabel.setText((u"User did not say yes to fix invalid relative currencies - no changes made").ljust(800, " "))
statusLabel.setForeground(Color.BLUE)
myPopupInformationBox(toolbox_frame_,u"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE)
return
del ask
if not confirm_backup_confirm_disclaimer(jif, statusLabel, u"FIX INVALID RELATIVE CURR RATES", u"FIX %s INVALID RELATIVE CURRENCY RATES" %(iErrors)):
return
jif.dispose()
myPrint(u"B",u"@@ User requested to fix %s invalid relative currency rates - APPLYING UPDATE NOW...." %(iErrors) )
output += u"\n\n APPLYING FIXES\n" \
u" ==============\n\n"
moneydance_data.setRecalcBalances(False)
moneydance_ui.setSuspendRefresh(True)
for curr in currencies:
if curr.getRelativeRate() <= 0 or curr.getRelativeRate() > upperLimit:
output += u"FIXING >> Invalid - Type: %s Name: %s Relative Rate: %s - RESET TO 1.0\n" %(curr.getCurrencyType(),pad(curr.getName(),25),rpad(curr.getRelativeRate(),20))
myPrint(u"B", u"FIXING >> Invalid - Type: %s Name: %s Relative Rate: %s - RESET TO 1.0" %(curr.getCurrencyType(),pad(curr.getName(),25),rpad(curr.getRelativeRate(),20)))
curr.setRelativeRate(1.0)
curr.syncItem()
myPrint(u"P", output)
moneydance_ui.getMain().saveCurrentAccount()
moneydance_data.setRecalcBalances(True)
moneydance_ui.setSuspendRefresh(False) # This does this too: book.notifyAccountModified(root)
jif=QuickJFrame(u"FIX INVALID RELATIVE CURRENCIES",output).show_the_frame()
statusLabel.setText((u"FIX INVALID RELATIVE CURRENCIES: %s Invalid Currency relative rates have been reset to 1.0 - PLEASE REVIEW" %(iErrors)).ljust(800, u" ")) # noqa
statusLabel.setForeground(Color.RED)
play_the_money_sound()
myPopupInformationBox(jif,u"%s Invalid Currency relative rates have been reset to 1.0 - PLEASE RESTART MD & REVIEW" %(iErrors),
u"FIX INVALID RELATIVE CURRENCIES",
theMessageType=JOptionPane.ERROR_MESSAGE) # noqa
myPrint(u"D", u"Exiting ", inspect.currentframe().f_code.co_name, u"()")
return
def force_change_account_currency(statusLabel):
global toolbox_frame_, | |
[Target 3] [Optimal input pars] [-0.94248, 1.0472 , 0.34907, 1.09956, 1.72788,-1.0472 ,-0.23271, 1.06465, 0.58333]
# [Target 4] [Optimal input pars] [-1.36136, 0. ,-0.34907, 1.41372, 1.72788, 0. , 0. , 1.41372, 0.95 ]
# [Target 5] [Optimal input pars] [-0.94248,-0.11636,-0.34907, 1.41372, 1.72788,-0.34907, 0. , 1.09956, 0.58333]
# =================================================================================================== #
# =============================== Position Controller Result ================================== #
# =================================================================================================== #
# For tolerance 0.1 and nTol 15
# [REF] /Users/mosesnah/Documents/projects/whip-project-targeting/MATLAB/myData/optimization_process_3_new
# =================================================================================================== #
# [Target 1] [Optimal value] [0.03446] [idx] [318]
# [Target 1] [Optimal input pars] [-1.51650, 0, 0, 0.15708, 1.7279, 0, 0, 0.15708, 0.82778]
# [Target 2] [Optimal value] [0.10437] [idx] [440]
# [Target 2] [Optimal input pars] [-1.26830, 1.0472, -0.81449, 0.12217, 1.7279, -1.0472, -0.34907, 0.36652, 0.95000]
# [Target 3] [Optimal value] [0.14324] [idx] [376]
# [Target 3] [Optimal input pars] [-1.03560, 0.69813, -1.1636, 0.05236, 1.7279, -1.0472, -0.11636, 0.95993, 0.58333]
# [Target 4] [Optimal input pars] [-1.36136, 0. ,-0.34907, 1.41372, 1.72788, 0. , 0. , 1.41372, 0.95 ]
# [Target 5] [Optimal input pars] [-0.94248, 0. ,-1.0472 , 1.41372, 1.72788,-1.0472 , 0. , 0.47124, 0.95 ]
# [Target 6] [Optimal input pars] [-0.94248, 1.0472 , 0. , 1.41372, 1.72788,-1.0472 , 0. , 1.41372, 0.58333]
# [Target 7] [Optimal input pars] [0.135, 0, 0, 0.29, 1.88496,0. ,0. ,0.86103,0.7 ]
# [optimalOutput]: 0.04980
# =================================================================================================== #
# =============================== Gravity Compensation OFf ================================== #
# =================================================================================================== #
# [Target 1]
# [optimalInput ]: [-1.5165 , 0. ,-1.5514 , 0.57596, 2.32129, 0. , 1.20234, 0.01745, 0.89115]
# [optimalOutput]: 0.04307
# ================================================= #
# =============== For optimization ================ #
# ================================================= #
# =================================================================================================== #
# =============================== WITH WRIST ================================== #
# =================================================================================================== #
# [Target 1] [Optimal value] [0.05758] [idx] [232]
# [Target 1] [Optimal input pars] [-1.0821, 0, -0.34907, 1.4137, 1.7279, 0, -0.34907, 0.33161, 0.95]
# [Target 2] [Optimal value] [0.05071] [idx] [232]
# [Target 2] [Optimal input pars] [-1.501, 0, -0.2715, 1.4137, 1.7279, 0, 0, 0.36652, 0.95]
# =================================================================================================== #
# =============================== For Tapered Whip Model ================================== #
# =================================================================================================== #
# [Target 1] [Optimal value] [0.05638] [idx] [334]
# [Target 1] [Optimal input pars] [-0.84939, -0.03879, 0.46542, 0.05236, 1.4137, 0, -0.46542, 0.15708, 1.1537]
# When considering the whole 600 iterations
# [Target 1] [-0.8649, 0.0, 0.5042, 0.0175, 1.4021, 0.0, -0.5042,0.1920, 1.1356]
# lb = np.array( [ -0.5 * np.pi, -0.5 * np.pi, -0.5 * np.pi, 0, 0.1 * np.pi, -0.5 * np.pi, -0.5 * np.pi, 0.0, 0.4 ] ) # Defining the bound. with np array.
# ub = np.array( [ -0.1 * np.pi, 0.5 * np.pi, 0.5 * np.pi, 0.9 * np.pi, 1.0 * np.pi, 0.5 * np.pi, 0.5 * np.pi, 0.9 * np.pi, 1.5 ] ) # Defining the bound. with np array.
# n_opt = 9
# Upper/Lower bound for the simulation
lb = np.array( [ 0.1 * np.pi, -0.5 * np.pi, -0.5 * np.pi, 0.0, 0.4 ] ) # Defining the bound. with np array.
ub = np.array( [ 1.0 * np.pi, 0.5 * np.pi, 0.5 * np.pi, 0.9 * np.pi, 1.5 ] ) # Defining the bound. with np array.
n_opt = 5
# [TODO] This is for fixing the initial condition of the system
# lb = np.array( [ -np.pi/2, 0, 0, 0, 0.4 ] )
# ub = np.array( [ 0, np.pi, np.pi, np.pi, 1.2 ] )
# n_opt = 5
elif "2" == args.model_name[ 0 ]:
ctrl = JointSlidingController( mySim.mjModel, mySim.mjData, args )
ctrl.set_ctrl_par( Kl = 20 * np.identity( ctrl.n_act ) , Kd = 7 * np.identity( ctrl.n_act ) )
mov_pars = np.array( [1.72788, 0. , 0. , 1.41372,2.87979,-1.39626, 1.51262, 2.67035, 0.42037])
ctrl.traj = MinJerkTrajectory( { "pi" : mov_pars[ 0 : 4 ], "pf" : mov_pars[ 4 : 8 ], "D" : mov_pars[ -1 ] } ) # Setting the trajectory of the controller, for this case, traj = x0
obj1 = DistFromTip2Target( mySim.mjModel, mySim.mjData, args ) if "_w_" in args.model_name else None
init_cond = { 'qpos': np.array( [ 1.71907, 0., 0., 1.40283, 0.,-1.86358, 0., 0.0069 , 0., 0.00867, 0., 0.00746, 0. , 0.00527, 0. , 0.00348, 0. , 0.00286, 0. , 0.00367, 0. , 0.00582, 0. , 0.00902, 0. , 0.01283, 0. , 0.0168 , 0. , 0.02056, 0. , 0.02383, 0. , 0.02648, 0. , 0.02845, 0. , 0.02955, 0. , 0.02945, 0. , 0.02767, 0. , 0.02385, 0. , 0.01806, 0. , 0.01106, 0. , 0.00433, 0. ,-0.00027, 0. ,-0.00146] ),
'qvel': np.array( [-0.07107, 0., 0.,-0.0762 , 0.,-2.92087, 0.,-0.05708, 0.,-0.10891, 0.,-0.11822, 0.,-0.0725 , 0. , 0.02682, 0. , 0.17135, 0. , 0.34963, 0. , 0.54902, 0. , 0.75647, 0. , 0.95885, 0. , 1.14317, 0. , 1.29701, 0. , 1.40942, 0. , 1.47229, 0. , 1.48203, 0. , 1.44063, 0. , 1.35522, 0. , 1.2356 , 0. , 1.09041, 0. , 0.92418, 0. , 0.73758, 0. , 0.53229, 0. , 0.31926, 0. , 0.12636] ) }
objective = lambda : obj1.output_calc( )
# ================================================= #
# =============== For optimization ================ #
# ================================================= #
# For 3D case (9 movement parameters)
lb = np.array( [ -0.5 * np.pi, -0.5 * np.pi, -0.5 * np.pi, 0, 0.1 * np.pi, -0.5 * np.pi, -0.5 * np.pi, 0.0, 0.4 ] ) # Defining the bound. with np array.
ub = np.array( [ -0.1 * np.pi, 0.5 * np.pi, 0.5 * np.pi, 0.9 * np.pi, 1.0 * np.pi, 0.5 * np.pi, 0.5 * np.pi, 0.9 * np.pi, 1.5 ] ) # Defining the bound. with np array.
n_opt = 9 # The number of parameters that are aimed to be optimized
# [TEMP] [TODO] Setting the Initial condition for the optimization
# It might be great to have a separete function to set the controller.
# The initial condition is extracted from [REF] /Users/mosesnah/Documents/projects/whip-project-targeting/MuJoCo/results/Modularity Tasks/primitive1/data_log.txt
# [BACKUP] [<NAME>] [2021.08.03]
# For upper/lower bounds for a 2D robot (5 movement parameters)
# [TEMP] [2021.07.22]
# lb = np.array( [ -0.5 * np.pi, -0.5 * np.pi, -0.5 * np.pi, 0.0, 0.4 ] ) # Defining the bound. with np array.
# ub = np.array( [ 1.0 * np.pi, 0.5 * np.pi, 0.5 * np.pi, 0.9 * np.pi, 1.5 ] ) # Defining the bound. with np array.
elif "cart" and "pole" in args.model_name:
ctrl1 = JointImpedanceController( mySim.mjModel, mySim.mjData, args )
ctrl1.set_ctrl_par( K = 10, B = 5 )
mov_pars = np.array( [ 0.0, 0.57407, 0.64815 ] )
ctrl1.traj = MinJerkTrajectory( { "pi" : mov_pars[ 0 ], "pf" : mov_pars[ 1 ], "D" : mov_pars[ 2 ] } )
ctrl2 = JointImpedanceController( mySim.mjModel, mySim.mjData, args )
ctrl2.set_ctrl_par( K = 10, B = 5 )
mov_pars = np.array( [ 0.57407, -1.32937, 0.72403] )
ctrl2.traj = MinJerkTrajectory( { "pi" : mov_pars[ 0 ], "pf" : mov_pars[ 1 ], "D" : mov_pars[ 2 ] } )
ctrl = ControllerBinder( mySim.mjModel, mySim.mjData, args )
ctrl.add_ctrl( ctrl1 )
ctrl.add_ctrl( ctrl2, t_offset = 0.31185, width = 0.2 )
obj1 = TargetState( mySim.mjModel, mySim.mjData, args )
# Boundary needs to be defined as follows
# [1] Which variable, is it the qpos or the qvel?
# [2] For the variable defined in [1], which specific index?
# [3] target value for that value
# [4] And tolerance for that, +- [3]'s value
target1 = { "which_var" : "qpos", "idx" : 1, "target_val": np.pi, "tolerance": 0.01 }
target2 = { "which_var" : "qvel", "idx" : 1, "target_val": 0 , "tolerance": 0.01 }
obj1.set_target( target1 )
obj1.set_target( target2 )
objective = lambda : obj1.output_calc( )
init_cond = { 'qpos': np.array( [ 0.0, 0.0 ] ),
'qvel': np.array( [ 0.0, 0.0 ] ) }
# [BACKUP][-1.83887, 0.94443]
# [qPos ]: [ 0.0, 0.0 ]
# [qVel ]: [ 0.0, 0.0 ]
# target1 = { "which_var" : "qpos", "idx" : 1, "target_val": -0.25*np.pi, "tolerance": 0.01 }
# target2 = { "which_var" : "qvel", "idx" : 1, "target_val": | |
# Eve W-Space
# Copyright 2014 <NAME> and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import csv
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.http import Http404, HttpResponseRedirect, HttpResponse, JsonResponse
from django.template.response import TemplateResponse
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.template.loader import render_to_string
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.models import Permission
from django.shortcuts import render, get_object_or_404
from django.db.models import Q
from django.views.decorators.cache import cache_page
from django.views.decorators.csrf import csrf_exempt
from Map.models import *
from Map import utils, signals
from core.utils import get_config
from core.models import ConfigEntry
# Decorator to check map permissions. Takes request and map_id
# Permissions are 0 = None, 1 = View, 2 = Change
# When used without a permission=x specification, requires Change access
def require_map_permission(permission=2):
def _dec(view_func):
def _view(request, map_id, *args, **kwargs):
current_map = get_object_or_404(Map, pk=map_id)
if current_map.get_permission(request.user) < permission:
raise PermissionDenied
else:
return view_func(request, map_id, *args, **kwargs)
_view.__name__ = view_func.__name__
_view.__doc__ = view_func.__doc__
_view.__dict__ = view_func.__dict__
return _view
return _dec
@login_required
@require_map_permission(permission=1)
def get_map(request, map_id):
"""Get the map and determine if we have permissions to see it.
If we do, then return a TemplateResponse for the map. If map does not
exist, return 404. If we don't have permission, return PermissionDenied.
"""
current_map = get_object_or_404(Map, pk=map_id)
context = {
'map': current_map,
'access': current_map.get_permission(request.user),
}
template = 'map.html'
return TemplateResponse(request, template, context)
@login_required
@require_map_permission(permission=1)
def map_checkin(request, map_id):
# Initialize json return dict
json_values = {}
current_map = get_object_or_404(Map, pk=map_id)
# AJAX requests should post a JSON datetime called loadtime
# back that we use to get recent logs.
if 'loadtime' not in request.POST:
return HttpResponse(json.dumps({'error': "No loadtime"}),
content_type="application/json")
time_string = request.POST['loadtime']
load_time = datetime.strptime(time_string, "%Y-%m-%d %H:%M:%S.%f")
load_time = load_time.replace(tzinfo=pytz.utc)
if request.is_igb_trusted:
dialog_html = _checkin_igb_trusted(request, current_map)
if dialog_html is not None:
json_values.update({'dialogHTML': dialog_html})
log_list = MapLog.objects.filter(timestamp__gt=load_time,
visible=True,
map=current_map)
log_string = render_to_string('log_div.html', {'logs': log_list})
json_values.update({'logs': log_string})
return HttpResponse(json.dumps(json_values), content_type="application/json")
@login_required
@require_map_permission(permission=1)
def map_refresh(request, map_id):
"""
Returns an HttpResponse with the updated systemJSON for an asynchronous
map refresh.
"""
if not request.is_ajax():
raise PermissionDenied
current_map = get_object_or_404(Map, pk=map_id)
result = [
datetime.now(pytz.utc).strftime("%Y-%m-%d %H:%M:%S.%f"),
utils.MapJSONGenerator(current_map, request.user).get_systems_json(),
]
return HttpResponse(json.dumps(result))
def _checkin_igb_trusted(request, current_map):
"""
Runs the specific code for the case that the request came from an igb that
trusts us, returns None if no further action is required, returns a string
containing the html for a system add dialog if we detect that a new system
needs to be added
"""
can_edit = current_map.get_permission(request.user) == 2
current_location = (request.eve_systemid, request.eve_charname,
request.eve_shipname, request.eve_shiptypename)
char_cache_key = 'char_%s_location' % request.eve_charid
old_location = cache.get(char_cache_key)
result = None
current_system = get_object_or_404(System, pk=current_location[0])
silent_map = request.POST.get('silent', 'false') == 'true'
kspace_map = request.POST.get('kspace', 'false') == 'true'
if old_location != current_location:
if old_location:
old_system = get_object_or_404(System, pk=old_location[0])
old_system.remove_active_pilot(request.eve_charid)
request.user.update_location(
current_system.pk,
request.eve_charid, request.eve_charname,
request.eve_shipname, request.eve_shiptypename)
cache.set(char_cache_key, current_location, 60 * 5)
# Conditions for the system to be automagically added to the map.
if (can_edit and
old_location and
old_system in current_map and
current_system not in current_map and
not _is_moving_from_kspace_to_kspace(
old_system, current_system, kspace_map)):
context = {
'oldsystem':
current_map.systems.filter(system=old_system).all()[0],
'newsystem': current_system,
'wormholes': utils.get_possible_wh_types(old_system,
current_system),
}
if request.POST.get('silent', 'false') != 'true':
result = render_to_string(
'igb_system_add_dialog.html', context,
context_instance=RequestContext(request))
else:
new_ms = current_map.add_system(
request.user, current_system, '', context['oldsystem'])
k162_type = WormholeType.objects.get(name="K162")
new_ms.connect_to(context['oldsystem'], k162_type, k162_type)
result = 'silent'
else:
cache.set(char_cache_key, current_location, 60 * 5)
# Use add_active_pilot to refresh the user's record in the global
# location cache
current_system.add_active_pilot(
request.user.username, request.eve_charid, request.eve_charname,
request.eve_shipname, request.eve_shiptypename
)
return result
def _is_moving_from_kspace_to_kspace(old_system, current_system, kspace_map):
"""
returns whether we are moving through kspace
:param old_system:
:param current_system:
:return:
"""
if not kspace_map:
return old_system.is_kspace() and current_system.is_kspace()
else:
# K-space mapping enabled, pass the check
return False
def get_system_context(ms_id, user):
map_system = get_object_or_404(MapSystem, pk=ms_id)
if map_system.map.get_permission(user) == 2:
can_edit = True
else:
can_edit = False
# If map_system represents a k-space system get the relevant KSystem object
if map_system.system.is_kspace():
system = map_system.system.ksystem
else:
system = map_system.system.wsystem
scan_threshold = datetime.now(pytz.utc) - timedelta(
hours=int(get_config("MAP_SCAN_WARNING", None).value)
)
interest_offset = int(get_config("MAP_INTEREST_TIME", None).value)
interest_threshold = (datetime.now(pytz.utc) -
timedelta(minutes=interest_offset))
scan_warning = system.lastscanned < scan_threshold
if interest_offset > 0:
interest = (map_system.interesttime and
map_system.interesttime > interest_threshold)
else:
interest = map_system.interesttime
# Include any SiteTracker fleets that are active
st_fleets = map_system.system.stfleets.filter(ended=None).all()
locations = cache.get('sys_%s_locations' % map_system.system.pk)
if not locations:
locations = {}
has_siblings = map_system.has_siblings()
return {'system': system, 'mapsys': map_system,
'scanwarning': scan_warning, 'isinterest': interest,
'stfleets': st_fleets, 'locations': locations,
'can_edit': can_edit, 'has_siblings': has_siblings}
@login_required
@require_map_permission(permission=2)
def add_system(request, map_id):
"""
AJAX view to add a system to a current_map. Requires POST containing:
topMsID: map_system ID of the parent map_system
bottomSystem: Name of the new system
topType: WormholeType name of the parent side
bottomType: WormholeType name of the new side
timeStatus: Wormhole time status integer value
massStatus: Wormhole mass status integer value
topBubbled: 1 if Parent side bubbled
bottomBubbled: 1 if new side bubbled
friendlyName: Friendly name for the new map_system
"""
if not request.is_ajax():
raise PermissionDenied
try:
# Prepare data
current_map = Map.objects.get(pk=map_id)
top_ms = MapSystem.objects.get(pk=request.POST.get('topMsID'))
if request.POST.get('bottomSystem') == "Unknown":
bottom_sys, created = System.objects.get_or_create(
id=99999999, defaults={"name":"Unknown",
"id": int(99999999),
"constellation_id": int(10000001),
"region_id": int(20000001),
"x": float(0),
"y": float(0),
"z": float(0),
"security": float(0.0),
"sysclass": int(99)
}
)
else:
bottom_sys = System.objects.get(
name=request.POST.get('bottomSystem')
)
top_type = WormholeType.objects.get(
name=request.POST.get('topType')
)
bottom_type = WormholeType.objects.get(
name=request.POST.get('bottomType')
)
time_status = int(request.POST.get('timeStatus'))
mass_status = int(request.POST.get('massStatus'))
if request.POST.get('topBubbled', '0') != "0":
top_bubbled = True
else:
top_bubbled = False
if request.POST.get('bottomBubbled', '0') != "0":
bottom_bubbled = True
else:
bottom_bubbled = False
# Add System
bottom_ms = current_map.add_system(
request.user, bottom_sys,
request.POST.get('friendlyName'), top_ms
)
# Add Wormhole
bottom_ms.connect_to(top_ms, top_type, bottom_type, top_bubbled,
bottom_bubbled, time_status, mass_status)
# delete old signatures
if int(get_config("MAP_AUTODELETE_SIGS", request.user).value) == 1:
bottom_ms.delete_old_sigs(request.user)
current_map.clear_caches()
return HttpResponse()
except ObjectDoesNotExist:
return HttpResponse(status=400)
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def remove_system(request, map_id, ms_id):
"""
Removes the supplied map_system from a map.
"""
system = get_object_or_404(MapSystem, pk=ms_id)
system.remove_system(request.user)
return HttpResponse()
@login_required
@require_map_permission(permission=2)
def promote_system(request, map_id, ms_id):
"""
Promotes the MapSystem to map root and truncates other chains.
"""
map_obj = get_object_or_404(Map, pk=map_id)
if map_obj.truncate_allowed:
system = get_object_or_404(MapSystem, pk=ms_id)
system.promote_system(request.user)
return HttpResponse()
else:
raise PermissionDenied
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def system_details(request, map_id, ms_id):
"""
Returns a html div representing details of the System given by ms_id in
map map_id
"""
if not request.is_ajax():
raise PermissionDenied
system = get_object_or_404(MapSystem, pk=ms_id)
if system.system.sysclass == 99:
if request.is_igb_trusted:
current_system = System.objects.get(name=request.eve_systemname)
else:
current_system = ""
wormhole = get_object_or_404(Wormhole, bottom=ms_id)
template = 'edit_unknown_system.html'
return render(request, template, {'ms_id': ms_id, 'system': system, 'wormhole': wormhole})
template = 'system_details.html'
return render(request, template,
get_system_context(ms_id, request.user))
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def system_menu(request, map_id, ms_id):
"""
Returns the html for system menu
"""
if not request.is_ajax():
raise PermissionDenied
return render(request, 'system_menu.html',
get_system_context(ms_id, request.user))
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def system_tooltips(request, map_id):
"""
Returns the system tooltips for map_id
"""
if not request.is_ajax():
raise PermissionDenied
cache_key = 'map_%s_sys_tooltip' % map_id
cached_tips = cache.get(cache_key)
if not cached_tips:
ms_list = (MapSystem.objects.filter(map_id=map_id)
.select_related('parent_wormhole', 'system__region')
.iterator())
new_tips = render_to_string('system_tooltip.html',
{'map_systems': ms_list},
RequestContext(request))
cache.set(cache_key, new_tips, 60)
return HttpResponse(new_tips)
else:
return HttpResponse(cached_tips)
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def wormhole_tooltips(request, map_id):
"""Takes a POST request from AJAX with a Wormhole ID and renders the
wormhole tooltip for that ID to response.
"""
if not request.is_ajax():
raise PermissionDenied
cache_key = 'map_%s_wh_tooltip' % map_id
cached_tips = cache.get(cache_key)
if not cached_tips:
cur_map = get_object_or_404(Map, pk=map_id)
ms_list = MapSystem.objects.filter(map=cur_map).all()
whs = Wormhole.objects.filter(top__in=ms_list).all()
new_tips = render_to_string('wormhole_tooltip.html',
{'wormholes': whs},
RequestContext(request))
cache.set(cache_key, new_tips, 60)
return HttpResponse(new_tips)
else:
return HttpResponse(cached_tips)
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=2)
def collapse_system(request, map_id, ms_id):
"""
Mark the system as collapsed.
"""
if not request.is_ajax():
raise PermissionDenied
map_sys = get_object_or_404(MapSystem, pk=ms_id)
parent_wh = map_sys.parent_wormhole
parent_wh.collapsed = True
parent_wh.save()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=2)
def resurrect_system(request, map_id, ms_id):
"""
Unmark the system as collapsed.
"""
if not request.is_ajax():
raise PermissionDenied
map_sys = get_object_or_404(MapSystem, pk=ms_id)
parent_wh = map_sys.parent_wormhole
parent_wh.collapsed = False
parent_wh.save()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=2)
def mark_scanned(request, map_id, ms_id):
"""Takes a POST request from AJAX | |
kw.pop('no_move_handler', False)
# TODO: Also do '>' and '<' indent/unindent operators.
# TODO: Also "gq": text formatting
# See: :help motion.txt
def decorator(func):
if not no_move_handler:
@handle(*keys, in_mode=InputMode.VI_NAVIGATION)
@handle(*keys, in_mode=InputMode.SELECTION)
def move(event):
""" Create move handler. """
region = func(event)
line.cursor_position += region.start
def create_transform_handler(transform_func, *a):
@handle(*(a + keys), in_mode=InputMode.VI_NAVIGATION)
def _(event):
""" Apply transformation (uppercase, lowercase, rot13, swap case). """
region = func(event)
start, end = region.sorted()
# Transform.
line.transform_region(
line.cursor_position + start,
line.cursor_position + end,
transform_func)
# Move cursor
line.cursor_position += (region.end or region.start)
for k, f in vi_transform_functions:
create_transform_handler(f, *k)
@handle('y', *keys, in_mode=InputMode.VI_NAVIGATION)
def yank_handler(event):
""" Create yank handler. """
region = func(event)
start, end = region.sorted()
substring = line.text[line.cursor_position + start: line.cursor_position + end]
if substring:
line.set_clipboard(ClipboardData(substring))
def create(delete_only):
""" Create delete and change handlers. """
@handle('cd'[delete_only], *keys, in_mode=InputMode.VI_NAVIGATION)
@handle('cd'[delete_only], *keys, in_mode=InputMode.VI_NAVIGATION)
def _(event):
region = func(event)
deleted = ''
if region:
start, end = region.sorted()
# Move to the start of the region.
line.cursor_position += start
# Delete until end of region.
deleted = line.delete(count=end-start)
# Set deleted/changed text to clipboard.
if deleted:
line.set_clipboard(ClipboardData(''.join(deleted)))
# Only go back to insert mode in case of 'change'.
if not delete_only:
event.input_processor.input_mode = InputMode.INSERT
create(True)
create(False)
return func
return decorator
@change_delete_move_yank_handler('b') # Move one word or token left.
@change_delete_move_yank_handler('B') # Move one non-blank word left ((# TODO: difference between 'b' and 'B')
def key_b(event):
return CursorRegion(line.document.find_start_of_previous_word(count=event.arg) or 0)
@change_delete_move_yank_handler('$')
def key_dollar(event):
""" 'c$', 'd$' and '$': Delete/change/move until end of line. """
return CursorRegion(line.document.get_end_of_line_position())
@change_delete_move_yank_handler('w') # TODO: difference between 'w' and 'W'
def key_w(event):
""" 'cw', 'de', 'w': Delete/change/move one word. """
return CursorRegion(line.document.find_next_word_beginning(count=event.arg) or 0)
@change_delete_move_yank_handler('e') # TODO: difference between 'e' and 'E'
def key_e(event):
""" 'ce', 'de', 'e' """
end = line.document.find_next_word_ending(count=event.arg)
return CursorRegion(end - 1 if end else 0)
@change_delete_move_yank_handler('i', 'w', no_move_handler=True)
def key_iw(event):
""" ciw and diw """
# Change inner word: change word under cursor.
start, end = line.document.find_boundaries_of_current_word()
return CursorRegion(start, end)
@change_delete_move_yank_handler('^')
def key_circumflex(event):
""" 'c^', 'd^' and '^': Soft start of line, after whitespace. """
return CursorRegion(line.document.get_start_of_line_position(after_whitespace=True))
@change_delete_move_yank_handler('0', no_move_handler=True)
def key_zero(event):
"""
'c0', 'd0': Hard start of line, before whitespace.
(The move '0' key is implemented elsewhere, because a '0' could also change the `arg`.)
"""
return CursorRegion(line.document.get_start_of_line_position(after_whitespace=False))
def create_ci_ca_handles(ci_start, ci_end, inner):
# TODO: 'dab', 'dib', (brackets or block) 'daB', 'diB', Braces.
# TODO: 'dat', 'dit', (tags (like xml)
"""
Delete/Change string between this start and stop character. But keep these characters.
This implements all the ci", ci<, ci{, ci(, di", di<, ca", ca<, ... combinations.
"""
@change_delete_move_yank_handler('ai'[inner], ci_start, no_move_handler=True)
@change_delete_move_yank_handler('ai'[inner], ci_end, no_move_handler=True)
def _(event):
start = line.document.find_backwards(ci_start, in_current_line=True)
end = line.document.find(ci_end, in_current_line=True)
if start is not None and end is not None:
offset = 0 if inner else 1
return CursorRegion(start + 1 - offset, end + offset)
for inner in (False, True):
for ci_start, ci_end in [('"', '"'), ("'", "'"), ("`", "`"),
('[', ']'), ('<', '>'), ('{', '}'), ('(', ')')]:
create_ci_ca_handles(ci_start, ci_end, inner)
@change_delete_move_yank_handler('{') # TODO: implement 'arg'
def _(event):
"""
Move to previous blank-line separated section.
Implements '{', 'c{', 'd{', 'y{'
"""
line_index = line.document.find_previous_matching_line(
lambda text: not text or text.isspace())
if line_index:
index = line.document.get_cursor_up_position(count=-line_index)
else:
index = 0
return CursorRegion(index)
@change_delete_move_yank_handler('}') # TODO: implement 'arg'
def _(event):
"""
Move to next blank-line separated section.
Implements '}', 'c}', 'd}', 'y}'
"""
line_index = line.document.find_next_matching_line(
lambda text: not text or text.isspace())
if line_index:
index = line.document.get_cursor_down_position(count=line_index)
else:
index = 0
return CursorRegion(index)
@change_delete_move_yank_handler('f', Keys.Any)
def _(event):
"""
Go to next occurance of character. Typing 'fx' will move the
cursor to the next occurance of character. 'x'.
"""
_last_character_find[0] = (event.data, False)
match = line.document.find(event.data, in_current_line=True, count=event.arg)
return CursorRegion(match or 0)
@change_delete_move_yank_handler('F', Keys.Any)
def _(event):
"""
Go to previous occurance of character. Typing 'Fx' will move the
cursor to the previous occurance of character. 'x'.
"""
_last_character_find[0] = (event.data, True)
return CursorRegion(line.document.find_backwards(event.data, in_current_line=True, count=event.arg) or 0)
@change_delete_move_yank_handler('t', Keys.Any)
def _(event):
"""
Move right to the next occurance of c, then one char backward.
"""
_last_character_find[0] = (event.data, False)
match = line.document.find(event.data, in_current_line=True, count=event.arg)
return CursorRegion(match - 1 if match else 0)
@change_delete_move_yank_handler('T', Keys.Any)
def _(event):
"""
Move left to the previous occurance of c, then one char forward.
"""
_last_character_find[0] = (event.data, True)
match = line.document.find_backwards(event.data, in_current_line=True, count=event.arg)
return CursorRegion(match + 1 if match else 0)
def repeat(reverse):
"""
Create ',' and ';' commands.
"""
@change_delete_move_yank_handler(',' if reverse else ';')
def _(event):
# Repeat the last 'f'/'F'/'t'/'T' command.
pos = 0
if _last_character_find[0]:
char, backwards = _last_character_find[0]
if reverse:
backwards = not backwards
if backwards:
pos = line.document.find_backwards(char, in_current_line=True, count=event.arg)
else:
pos = line.document.find(char, in_current_line=True, count=event.arg)
return CursorRegion(pos or 0)
repeat(True)
repeat(False)
@change_delete_move_yank_handler('h')
@change_delete_move_yank_handler(Keys.Left)
def _(event):
""" Implements 'ch', 'dh', 'h': Cursor left. """
return CursorRegion(line.document.get_cursor_left_position(count=event.arg))
@change_delete_move_yank_handler('j')
def _(event):
""" Implements 'cj', 'dj', 'j', ... Cursor up. """
return CursorRegion(line.document.get_cursor_down_position(count=event.arg))
@change_delete_move_yank_handler('k')
def _(event):
""" Implements 'ck', 'dk', 'k', ... Cursor up. """
return CursorRegion(line.document.get_cursor_up_position(count=event.arg))
@change_delete_move_yank_handler('l')
@change_delete_move_yank_handler(' ')
@change_delete_move_yank_handler(Keys.Right)
def _(event):
""" Implements 'cl', 'dl', 'l', 'c ', 'd ', ' '. Cursor right. """
return CursorRegion(line.document.get_cursor_right_position(count=event.arg))
@change_delete_move_yank_handler('H')
def _(event):
""" Implements 'cH', 'dH', 'H'. """
# Vi moves to the start of the visible region.
# cursor position 0 is okay for us.
return CursorRegion(-len(line.document.text_before_cursor))
@change_delete_move_yank_handler('L')
def _(event):
# Vi moves to the end of the visible region.
# cursor position 0 is okay for us.
return CursorRegion(len(line.document.text_after_cursor))
@change_delete_move_yank_handler('%')
def _(event):
"""
Implements 'c%', 'd%', '%, 'y%' (Move to corresponding bracket.)
If an 'arg' has been given, go this this % position in the file.
"""
if event._arg:
# If 'arg' has been given, the meaning of % is to go to the 'x%'
# row in the file.
if 0 < event.arg <= 100:
absolute_index = line.document.translate_row_col_to_index(
int(event.arg * line.document.line_count / 100), 0)
return CursorRegion(absolute_index - line.document.cursor_position)
else:
return CursorRegion(0) # Do nothing.
else:
# Move to the corresponding opening/closing bracket (()'s, []'s and {}'s).
return CursorRegion(line.document.matching_bracket_position)
@change_delete_move_yank_handler('|')
def _(event):
# Move to the n-th column (you may specify the argument n by typing
# it on number keys, for example, 20|).
return CursorRegion(line.document.get_column_cursor_position(event.arg))
@change_delete_move_yank_handler('g', 'g')
def _(event):
"""
Implements 'gg', 'cgg', 'ygg'
"""
# Move to the top of the input.
return CursorRegion(line.document.home_position)
@handle('!', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
'!' opens the system prompt.
"""
event.input_processor.push_input_mode(InputMode.SYSTEM)
@handle(Keys.Any, in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.Any, in_mode=InputMode.SELECTION)
def _(event):
"""
Always handle numberics in navigation mode as arg.
"""
if event.data in '123456789' or (event._arg and event.data == '0'):
event.append_to_arg_count(event.data)
elif event.data == '0':
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=False)
@handle(Keys.Any, in_mode=InputMode.VI_REPLACE)
def _(event):
"""
Insert data at cursor position.
"""
line.insert_text(event.data, overwrite=True)
@handle(Keys.Any, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Insert text after the / or ? prompt.
"""
search_line.insert_text(event.data)
line.set_search_text(search_line.text)
@handle(Keys.ControlJ, in_mode=InputMode.VI_SEARCH)
@handle(Keys.ControlM, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Enter at the / or ? prompt.
"""
# Add query to history of searh line.
search_line.add_to_history()
search_line.reset()
# Go back to navigation mode.
event.input_processor.pop_input_mode()
@handle(Keys.Backspace, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Backspace at the vi-search prompt.
"""
if search_line.text:
search_line.delete_before_cursor()
line.set_search_text(search_line.text)
else:
# If no text after the prompt, cancel search.
line.exit_isearch(restore_original_line=True)
search_line.reset()
event.input_processor.pop_input_mode()
@handle(Keys.Up, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Go to the previous history item at the search prompt.
"""
search_line.auto_up()
line.set_search_text(search_line.text)
@handle(Keys.Down, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Go to the next history item at the search prompt.
"""
search_line.auto_down()
search_line.cursor_position = len(search_line.text)
line.set_search_text(search_line.text)
@handle(Keys.Left, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Arrow left at the search prompt.
"""
search_line.cursor_left()
@handle(Keys.Right, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Arrow right at the search prompt.
"""
search_line.cursor_right()
@handle(Keys.ControlC, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Cancel search.
"""
line.exit_isearch(restore_original_line=True)
search_line.reset()
event.input_processor.pop_input_mode()
def create_selection_transform_handler(keys, transform_func):
"""
Apply transformation on selection (uppercase, lowercase, rot13, swap case).
"""
@handle(*keys, in_mode=InputMode.SELECTION)
def _(event):
range = line.document.selection_range()
if range:
line.transform_region(range[0], range[1], transform_func)
event.input_processor.pop_input_mode()
for k, f in vi_transform_functions:
create_selection_transform_handler(k, f)
@handle(Keys.ControlX, Keys.ControlL, in_mode=InputMode.INSERT)
def _(event):
"""
Pressing the ControlX - ControlL sequence in Vi mode does line
completion based | |
from serial import Serial,SerialException
from serial.tools.list_ports import comports
from threading import Lock
from queue import Queue
import time,random,struct
from base64 import b64encode, b64decode
from binascii import Error as BAError
from mirage.libs.ble_utils.constants import *
from mirage.libs.ble_utils.scapy_sniffle_layers import *
from mirage.libs import io,utils,wireless
class SniffleDevice(wireless.Device):
'''
This device allows to communicate with a Sniffle Device in order to sniff Bluetooth Low Energy protocol.
The corresponding interfaces are : ``sniffleX`` (e.g. "sniffle0")
The following capabilities are actually supported :
+-------------------------------------------+----------------+
| Capability | Available ? |
+===========================================+================+
| SCANNING | yes |
+-------------------------------------------+----------------+
| ADVERTISING | yes |
+-------------------------------------------+----------------+
| SNIFFING_ADVERTISEMENTS | yes |
+-------------------------------------------+----------------+
| SNIFFING_NEW_CONNECTION | yes |
+-------------------------------------------+----------------+
| SNIFFING_EXISTING_CONNECTION | no |
+-------------------------------------------+----------------+
| JAMMING_CONNECTIONS | no |
+-------------------------------------------+----------------+
| JAMMING_ADVERTISEMENTS | no |
+-------------------------------------------+----------------+
| INJECTING | no |
+-------------------------------------------+----------------+
| MITMING_EXISTING_CONNECTION | no |
+-------------------------------------------+----------------+
| HIJACKING_MASTER | no |
+-------------------------------------------+----------------+
| HIJACKING_SLAVE | no |
+-------------------------------------------+----------------+
| INITIATING_CONNECTION | yes |
+-------------------------------------------+----------------+
| RECEIVING_CONNECTION | no |
+-------------------------------------------+----------------+
| COMMUNICATING_AS_MASTER | yes |
+-------------------------------------------+----------------+
| COMMUNICATING_AS_SLAVE | no |
+-------------------------------------------+----------------+
| HCI_MONITORING | no |
+-------------------------------------------+----------------+
'''
sharedMethods = [
"getFirmwareVersion",
"getDeviceIndex",
"setCRCChecking",
"setChannel",
"getChannel",
"getConnections",
"switchConnection",
"getCurrentConnection",
"getCurrentHandle",
"isConnected",
"updateConnectionParameters",
"setAddress",
"getAddress",
"setAdvertising",
"setAdvertisingParameters",
"setScanningParameters",
"sniffNewConnections",
"sniffAdvertisements",
"setSweepingMode",
"setScan",
"setScanInterval",
"isSynchronized",
"getAccessAddress",
"getCrcInit",
"getChannelMap",
"getHopInterval",
"getHopIncrement",
]
@classmethod
def findSniffleSniffers(cls,index=None):
'''
This class method allows to find a specific Sniffle device, by providing the device's index.
If no index is provided, it returns a list of every devices found.
If no device has been found, None is returned.
:param index: device's index
:type index: int
:return: string indicating the device
:rtype: str
:Example:
>>> NRFSnifferDevice.findSniffleSniffers(0)
'/dev/ttyACM0'
>>> NRFSnifferDevice.findSniffleSniffers()
['/dev/ttyACM0','/dev/ttyACM1']
'''
sniffleList = sorted([i[0] for i in comports() if
(isinstance(i,tuple) and "VID:PID=0451:BEF3" in port[-1]) or
(i.vid == 0x0451 and i.pid == 0xBEF3)
])
if index is None:
return sniffleList
else:
try:
sniffle = sniffleList[index]
except IndexError:
return None
return sniffle
return None
def isUp(self):
return self.sniffle is not None and self.ready
def _setAccessAddress(self,accessAddress=None):
self.accessAddress = accessAddress
def _setCrcInit(self,crcInit=None):
self.crcInit = crcInit
def _setChannelMap(self,channelMap=None):
self.channelMap = channelMap
def _setHopInterval(self,hopInterval=None):
self.hopInterval = hopInterval
def _getHopInterval(self):
return self.hopInterval
def _setHopIncrement(self,hopIncrement):
self.hopIncrement = hopIncrement
def _getHopIncrement(self):
return self.hopIncrement
def _getChannelMap(self):
return self.channelMap
def _getAccessAddress(self):
return self.accessAddress
def _getCrcInit(self):
return self.crcInit
def _sendCommand(self,command):
cmd = SniffleCommand()/command
#cmd.show()
size = (len(bytes(cmd)) + 3) // 3
uartCommand = b64encode(bytes([size]) + bytes(cmd))
self.lock.acquire()
self.sniffle.write(uartCommand+b"\r\n")
self.lock.release()
def _setPauseWhenDone(self, enabled=False):
command = SnifflePauseWhenDoneCommand(pause_when_done=1 if enabled else 0)
self._sendCommand(command)
def _initCommand(self):
self.sniffle.write(b'@@@@@@@@\r\n')
def _setConfiguration(self,channel = 37, accessAddress = 0x8E89BED6, phyMode = "1M", crcInit=0x555555):
self.channel = channel
command = SniffleSetConfigurationCommand(channel=channel, access_address=accessAddress,phy_mode=phyMode, crc_init=crcInit)
self._sendCommand(command)
def _setMACFilter(self,mac=None):
if mac is None or mac.upper() == "FF:FF:FF:FF:FF:FF":
pkt = SniffleDisableMACFilterCommand()
else:
pkt = SniffleEnableMACFilterCommand(address=mac)
self._sendCommand(pkt)
def _enableHop(self):
command = SniffleEnableAdvertisementsHoppingCommand()
self._sendCommand(command)
def _reset(self):
command = SniffleResetCommand()
self._sendCommand(command)
def _setAddress(self,address,addressType='public'):
command = SniffleSetAddressCommand(address=address, address_type=addressType)
self._sendCommand(command)
def _setAdvertisingInterval(self,interval=200):
command = SniffleAdvertiseIntervalCommand(interval=interval)
self._sendCommand(command)
def _advertise(self,advertisingData=b"",scanRspData=b""):
command = SniffleAdvertiseCommand(adv_data=advertisingData,scan_resp_data=scanRspData)
self._sendCommand(command)
def _setFilter(self,advertisementsOnly=False):
command = SniffleFollowCommand(follow="advertisements_only" if advertisementsOnly else "all")
self._sendCommand(command)
def _sendConnectionRequest(self, address="00:00:00:00:00:00", addressType="public"):
accessAddress = random.randint(0,(2**32)-1)
crcInit = random.randint(0,(2**24)-1)
channelMap = 0x1fffffffff
hopIncrement = 5
hopInterval = 24
command = SniffleConnectCommand(
address_type=0x00 if addressType == "public" else 0x01,
address=address,
AA=accessAddress,
crc_init=crcInit,
win_size=3,
win_offset=random.randint(5,15),
interval=hopInterval,
latency=1,
timeout=50,
chM=channelMap,
SCA=0,
hop=hopIncrement
)
self._setAccessAddress(accessAddress)
self._setCrcInit(crcInit)
self._setChannelMap(channelMap)
self._setHopInterval(hopInterval)
self._setHopIncrement(hopIncrement)
self._sendCommand(command)
def _initiateConnection(self, address="00:00:00:00:00:00", addressType="public"):
self._reset()
self._setConfiguration(channel = 37, accessAddress = 0x8E89BED6, phyMode = "1M", crcInit=0x555555)
self._setPauseWhenDone(True)
self._setFilter(advertisementsOnly=True)
self._setMACFilter(mac=None)
self._setAddress(address=self.address,addressType=0x01 if self.addressType == "random" else 0x00)
self._sendConnectionRequest(address,addressType)
def _flush(self):
self.lock.acquire()
self.sniffle.flush()
self.lock.release()
def _transmit(self,pkt):
command = SniffleTransmitCommand(ble_payload=pkt[BTLE_DATA:])
self._sendCommand(command)
def _enterListening(self):
self.isListening = True
def _exitListening(self):
self.isListening = False
def _isListening(self):
return self.isListening
def isConnected(self):
'''
This method returns a boolean indicating if the device is connected.
:return: boolean indicating if the device is connected
:rtype: bool
:Example:
>>> device.isConnected()
True
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.connected
def getAccessAddress(self):
'''
This method returns the access address actually in use.
:return: access address
:rtype: int
:Example:
>>> hex(device.getAccessAddress())
'0xe5e296e9'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.accessAddress
def getCrcInit(self):
'''
This method returns the CRCInit actually in use.
:return: CRCInit
:rtype: int
:Example:
>>> hex(device.getCrcInit())
'0x0bd54a'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.crcInit
def getChannelMap(self):
'''
This method returns the Channel Map actually in use.
:return: Channel Map
:rtype: int
:Example:
>>> hex(device.getChannelMap())
'0x1fffffffff'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.channelMap
def getHopInterval(self):
'''
This method returns the Hop Interval actually in use.
:return: Hop Interval
:rtype: int
:Example:
>>> device.getHopInterval()
36
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.hopInterval
def getHopIncrement(self):
'''
This method returns the Hop Increment actually in use.
:return: Hop Increment
:rtype: int
:Example:
>>> device.getHopIncrement()
11
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.hopIncrement
def isSynchronized(self):
'''
This method indicates if the sniffer is actually synchronized with a connection.
:return: boolean indicating if the sniffer is synchronized
:rtype: bool
:Example:
>>> device.isSynchronized()
True
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.synchronized
def getDeviceIndex(self):
'''
This method returns the index of the current Sniffle device.
:return: device's index
:rtype: int
:Example:
>>> device.getDeviceIndex()
0
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.index
def getFirmwareVersion(self):
'''
This method returns the firmware version of the current Sniffle device.
:return: firmware version
:rtype: int
:Example:
>>> device.getFirmwareVersion()
(1,5)
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
version = (1,5)
return version
def setCRCChecking(self,enable=True):
'''
This method enables CRC Checking.
:param enable: boolean indicating if CRC Checking must be enabled
:type enable: bool
:Example:
>>> device.setCRCChecking(enable=True) # CRC Checking enabled
>>> device.setCRCChecking(enable=False) # CRC Checking disabled
.. warning::
Sniffle calculates the CRC directly in the firmware, so this command is ignored. It is present in order to provide a similar API to Ubertooth.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.crcEnabled = enable
def setScanInterval(self,seconds=1):
'''
This method allows to provide the scan interval (in second).
:param seconds: number of seconds to wait between two channels
:type seconds: float
:Example:
>>> device.setScanInterval(seconds=1)
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.scanInterval = seconds
def _scanThread(self):
self.setChannel(37)
utils.wait(seconds=self.scanInterval)
self.setChannel(38)
utils.wait(seconds=self.scanInterval)
self.setChannel(39)
utils.wait(seconds=self.scanInterval)
def setScan(self,enable=True):
'''
This method enables or disables the scanning mode. It allows to change the channel according to the scan interval parameter.
:param enable: boolean indicating if the scanning mode must be enabled
:type enable: bool
:Example:
>>> device.setScan(enable=True) # scanning mode enabled
>>> device.setScan(enable=False) # scanning mode disabled
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if enable:
self.sniffAdvertisements()
if self.scanThreadInstance is None:
self.scanThreadInstance = wireless.StoppableThread(target=self._scanThread)
self.scanThreadInstance.start()
else:
self.scanThreadInstance.stop()
self.scanThreadInstance = None
def getCurrentHandle(self):
'''
This method returns the connection Handle actually in use.
If no connection is established, its value is equal to -1.
:return: connection Handle
:rtype: int
.. warning::
This method always returns 1, it allows to provides the same API as the HCI Device.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return 1
def getConnections(self):
'''
This method returns a list of couple (connection handle / address) representing the connections actually established.
A connection is described by a dictionary containing an handle and an access address : ``{"handle":1, "address":"0x12345678"}``
:return: list of connections established
:rtype: list of dict
:Example:
>>> device.getConnections()
[{'handle':1, 'address':'0x12345678'}]
.. warning::
The connection handle is always 1, it allows to provides the same API as the HCI Device.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return [{"address":"0x{:08x}".format(self.accessAddress),"handle":1}]
def getCurrentConnection(self):
'''
This method returns the access address associated to the current connection. If no connection is established, it returns None.
:return: access address of the current connection
:rtype: str
:Example:
>>> device.getCurrentConnection()
'0x12345678'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return "0x{:08x}".format(self.accessAddress)
def switchConnection(self,address):
'''
This method is provided in order to provide the same API as an HCI Device, it actually has no effects.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
io.fail("Switching connection not allowed with Sniffle Device !")
def close(self):
self.lock.acquire()
self.sniffle.close()
self.sniffle = None
self.lock.release()
def setAdvertisingParameters(self,type = "ADV_IND",destAddr = "00:00:00:00:00:00",data = b"",intervalMin = 200, intervalMax = 210, daType='public', oaType='public'):
'''
This method sets advertising parameters according to the data provided.
It will mainly be used by *ADV_IND-like* packets.
:param type: type of advertisement (*available values :* "ADV_IND", "ADV_DIRECT_IND", "ADV_SCAN_IND", "ADV_NONCONN_IND", "ADV_DIRECT_IND_LOW")
:type type: str
:param destAddress: destination address (it will be used if needed)
:type destAddress: str
:param data: data included in the payload
:type data: bytes
:param intervalMin: minimal interval
:type intervalMin: int
:param intervalMax: maximal interval
:type intervalMax: int
:param daType: string indicating the destination address type ("public" or "random")
:type daType: str
:param oaType: string indicating the origin address type ("public" or "random")
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if type == "ADV_IND":
self.advType = ADV_IND
elif type == "ADV_DIRECT_IND":
self.advType = ADV_DIRECT_IND
elif type == "ADV_SCAN_IND":
self.advType = ADV_SCAN_IND
elif type == "ADV_NONCONN_IND":
self.advType = ADV_NONCONN_IND
elif type == "ADV_DIRECT_IND_LOW":
self.advType = ADV_DIRECT_IND_LOW
else:
io.fail("Advertisements type not recognized, using ADV_IND.")
self.advType = ADV_IND
self.destAddress = None if destAddr == "00:00:00:00:00:00" else destAddr
advData = data
self.advDataLength = len(data) if len(data) <= 31 else 31
if isinstance(data,list):
advData = b""
for i in data:
advData += bytes(i)
data = advData
if isinstance(data,bytes):
advData = b""
if len(data) > 31:
advData = data[:31]
else:
advData = data+(31 - len(data))*b"\x00"
self.advData = advData
self.destAddressType = daType
self.addressType = oaType
self.intervalMin = intervalMin
self.intervalMax = intervalMax
def setScanningParameters(self, data=b""):
'''
This method sets scanning parameters according to the data provided.
It will mainly be used by *SCAN_RESP* packets.
:param data: data to use in *SCAN_RESP*
:type data: bytes
.. note::
This method is a **shared method** and can be called from | |
<filename>darts/utils/data/encoder_base.py
"""
Encoder Base Classes
--------------------
"""
import numpy as np
import pandas as pd
from abc import ABC, abstractmethod
from enum import Enum, auto
from typing import Union, Optional, Tuple, Sequence, List
from darts import TimeSeries
from darts.logging import get_logger
from darts.utils.timeseries_generation import _generate_index
from darts.dataprocessing.transformers import FittableDataTransformer
SupportedIndex = Union[pd.DatetimeIndex, pd.Int64Index, pd.RangeIndex]
EncoderOutputType = Optional[Union[Sequence[TimeSeries], List[TimeSeries]]]
logger = get_logger(__name__)
class ReferenceIndexType(Enum):
PREDICTION = auto()
START = auto()
NONE = auto()
class CovariateIndexGenerator(ABC):
def __init__(self,
input_chunk_length: int,
output_chunk_length: int,
reference_index_type: ReferenceIndexType = ReferenceIndexType.NONE):
"""
Parameters
----------
input_chunk_length
The length of the emitted past series.
output_chunk_length
The length of the emitted future series.
reference_index
If a reference index should be saved, set `reference_index` to one of `(ReferenceIndexType.PREDICTION,
ReferenceIndexType.START)`
"""
self.input_chunk_length = input_chunk_length
self.output_chunk_length = output_chunk_length
self.reference_index_type = reference_index_type
self.reference_index: Optional[Tuple[int, Union[pd.Timestamp, int]]] = None
@abstractmethod
def generate_train_series(self,
target: TimeSeries,
covariate: Optional[TimeSeries] = None) -> SupportedIndex:
"""
Implement a method that extracts the required covariate index for training.
Parameters
----------
target
The target TimeSeries used during training
covariate
Optionally, the future covariates used for training
"""
pass
@abstractmethod
def generate_inference_series(self,
n: int,
target: TimeSeries,
covariate: Optional[TimeSeries] = None) -> SupportedIndex:
"""
Implement a method that extracts the required covariate index for prediction.
Parameters
----------
n
The forecast horizon
target
The target TimeSeries used during training or passed to prediction as `series`
covariate
Optionally, the future covariates used for prediction
"""
pass
class PastCovariateIndexGenerator(CovariateIndexGenerator):
"""Generates index for past covariates on train and inference datasets"""
def generate_train_series(self,
target: TimeSeries,
covariate: Optional[TimeSeries] = None) -> SupportedIndex:
super(PastCovariateIndexGenerator, self).generate_train_series(target, covariate)
# save a reference index if specified
if self.reference_index_type is not ReferenceIndexType.NONE and self.reference_index is None:
if self.reference_index_type is ReferenceIndexType.PREDICTION:
self.reference_index = (len(target) - 1, target.end_time())
else: # save the time step before start of target series
self.reference_index = (-1, target.start_time() - target.freq)
return covariate.time_index if covariate is not None else target.time_index
def generate_inference_series(self,
n: int,
target: TimeSeries,
covariate: Optional[TimeSeries] = None) -> SupportedIndex:
"""For prediction (`n` is given) with past covariates we have to distinguish between two cases:
1) If past covariates are given, we can use them as reference
2) If past covariates are missing, we need to generate a time index that starts `input_chunk_length`
before the end of `target` and ends `max(0, n - output_chunk_length)` after the end of `target`
"""
super(PastCovariateIndexGenerator, self).generate_inference_series(n, target, covariate)
if covariate is not None:
return covariate.time_index
else:
return _generate_index(start=target.end_time() - target.freq * (self.input_chunk_length - 1),
length=self.input_chunk_length + max(0, n - self.output_chunk_length),
freq=target.freq)
class FutureCovariateIndexGenerator(CovariateIndexGenerator):
"""Generates index for future covariates on train and inference datasets."""
def generate_train_series(self,
target: TimeSeries,
covariate: Optional[TimeSeries] = None) -> SupportedIndex:
"""For training (when `n` is `None`) we can simply use the future covariates (if available) or target as
reference to extract the time index.
"""
super(FutureCovariateIndexGenerator, self).generate_train_series(target, covariate)
# save a reference index if specified
if self.reference_index_type is not ReferenceIndexType.NONE and self.reference_index is None:
if self.reference_index_type is ReferenceIndexType.PREDICTION:
self.reference_index = (len(target) - 1, target.end_time())
else: # save the time step before start of target series
self.reference_index = (-1, target.start_time() - target.freq)
return covariate.time_index if covariate is not None else target.time_index
def generate_inference_series(self,
n: int,
target: TimeSeries,
covariate: Optional[TimeSeries] = None) -> SupportedIndex:
"""For prediction (`n` is given) with future covariates we have to distinguish between two cases:
1) If future covariates are given, we can use them as reference
2) If future covariates are missing, we need to generate a time index that starts `input_chunk_length`
before the end of `target` and ends `max(n, output_chunk_length)` after the end of `target`
"""
super(FutureCovariateIndexGenerator, self).generate_inference_series(n, target, covariate)
if covariate is not None:
return covariate.time_index
else:
return _generate_index(start=target.end_time() - target.freq * (self.input_chunk_length - 1),
length=self.input_chunk_length + max(n, self.output_chunk_length),
freq=target.freq)
class Encoder(ABC):
"""Abstract class for all encoders"""
@abstractmethod
def __init__(self):
self.attribute = None
self.dtype = np.float64
@abstractmethod
def encode_train(self,
target: TimeSeries,
covariate: Optional[TimeSeries] = None,
merge_covariate: bool = True,
**kwargs) -> TimeSeries:
"""Each subclass must implement a method to encode covariate index for training.
Parameters
----------
target
The target TimeSeries used during training or passed to prediction as `series`
covariate
Optionally, the future covariates used for prediction
merge_covariate
Whether or not to merge the encoded TimeSeries with `covariate`.
"""
pass
@abstractmethod
def encode_inference(self,
n: int,
target: TimeSeries,
covariate: Optional[TimeSeries] = None,
merge_covariate: bool = True,
**kwargs) -> TimeSeries:
"""Each subclass must implement a method to encode covariate index for prediction
Parameters
----------
n
The forecast horizon
target
The target TimeSeries used during training or passed to prediction as `series`
covariate
Optionally, the future covariates used for prediction
merge_covariate
Whether or not to merge the encoded TimeSeries with `covariate`.
"""
pass
@staticmethod
def _merge_covariate(encoded: TimeSeries, covariate: Optional[TimeSeries] = None) -> TimeSeries:
"""If (actual) covariates are given, merge the encoded index with the covariates
Parameters
----------
encoded
The encoded TimeSeries either from `encode_train()` or `encode_inference()`
covariate
Optionally, the future covariates used for prediction
"""
return covariate.stack(encoded) if covariate is not None else encoded
class SingleEncoder(Encoder, ABC):
"""Abstract class for single index encoders.
Single encoders can be used to implement new encoding techniques.
Each single encoder must implement an `_encode()` method that carries the encoding logic.
The `_encode()` method must take an `index` as input and generate a encoded single `TimeSeries` as output.
"""
def __init__(self, index_generator: CovariateIndexGenerator):
"""Single encoders take an `index_generator` to generate the required index for encoding past and future
covariates.
See darts.utils.data.covariate_index_generators.py for the `CovariateIndexGenerator` subclasses.
For past covariate encoders, use a `PastCovariateIndexGenerator`.
For future covariate encoders use a `FutureCovariateIndexGenerator`.
Parameters
----------
index_generator
An instance of `CovariateIndexGenerator` with methods `generate_train_series()` and
`generate_inference_series()`. Used to generate the index for encoders.
"""
super(SingleEncoder, self).__init__()
self.index_generator = index_generator
@abstractmethod
def _encode(self, index: SupportedIndex, dtype: np.dtype) -> TimeSeries:
"""Single Encoders must implement an _encode() method to encode the index.
Parameters
----------
index
The index generated from `self.index_generator` for either the train or inference dataset.
:param dtype:
dtype
The dtype of the encoded index
"""
pass
def encode_train(self,
target: TimeSeries,
covariate: Optional[TimeSeries] = None,
merge_covariate: bool = True,
**kwargs) -> TimeSeries:
"""Returns encoded index for training.
Parameters
----------
target
The target TimeSeries used during training or passed to prediction as `series`
covariate
Optionally, the covariate used for training: past covariate if `self.index_generator` is instance of
`PastCovariateIndexGenerator`, future covariate if `self.index_generator` is instance of
`FutureCovariateIndexGenerator`
merge_covariate
Whether or not to merge the encoded TimeSeries with `covariate`.
"""
index = self.index_generator.generate_train_series(target, covariate)
encoded = self._encode(index, target.dtype)
if merge_covariate:
return self._merge_covariate(encoded, covariate=covariate)
else:
return encoded
def encode_inference(self,
n: int,
target: TimeSeries,
covariate: Optional[TimeSeries] = None,
merge_covariate: bool = True,
**kwargs) -> TimeSeries:
"""Returns encoded index for inference/prediction.
Parameters
----------
n
The forecast horizon
target
The target TimeSeries used during training or passed to prediction as `series`
covariate
Optionally, the covariate used for prediction: past covariate if `self.index_generator` is instance of
`PastCovariateIndexGenerator`, future covariate if `self.index_generator` is instance of
`FutureCovariateIndexGenerator`
merge_covariate
Whether or not to merge the encoded TimeSeries with `covariate`.
"""
index = self.index_generator.generate_inference_series(n, target, covariate)
encoded = self._encode(index, target.dtype)
if merge_covariate:
return self._merge_covariate(encoded, covariate=covariate)
else:
return encoded
@property
@abstractmethod
def accept_transformer(self) -> List[bool]:
"""Whether or not the SingleEncoder sub class accepts to be transformed."""
pass
class SequentialEncoderTransformer:
"""`SequentialEncoderTransformer` applies transformation to the non-transformed encoded covariate output of
`SequentialEncoder.encode_train()` and `SequentialEncoder.encode_inference()`. The transformer is fitted
when `transform()` is called for the first time. This ensures proper transformation of train, validation and
inference dataset covariates. User-supplied covariates are not transformed."""
def __init__(self, transformer: FittableDataTransformer, transform_mask: List[bool]):
"""
Parameters
----------
transformer
A `FittableDataTransformer` object with a `fit_transform()` and `transform()` method.
transform_mask
A boolean 1-D mask specifying which of the input covariates to :meth:`transform()
<SequentialEncoderTransformer.transform()>` must be transformed.
"""
self.transformer: FittableDataTransformer = transformer
self.transform_mask: np.ndarray = np.array(transform_mask)
self._fit_called: bool = False
def transform(self, covariate: List[TimeSeries]) -> List[TimeSeries]:
"""This method applies transformation to the non-transformed encoded covariate output of
`SequentialEncoder._encode_sequence()` after being merged with user-defined covariates. The transformer is
fitted when `transform()` is called for the first time. This ensures proper transformation of train, validation
and inference dataset covariates. The masks | |
<filename>arc/reactionTest.py
#!/usr/bin/env python3
# encoding: utf-8
"""
This module contains unit tests of the arc.reaction module
"""
import unittest
from rmgpy.reaction import Reaction
from rmgpy.species import Species
import arc.rmgdb as rmgdb
from arc.exceptions import ReactionError
from arc.imports import settings
from arc.reaction import ARCReaction
from arc.species import ARCSpecies
default_ts_methods = settings['default_ts_methods']
class TestARCReaction(unittest.TestCase):
"""
Contains unit tests for the ARCSpecies class
"""
@classmethod
def setUpClass(cls):
"""
A method that is run before all unit tests in this class.
"""
cls.maxDiff = None
cls.rmgdb = rmgdb.make_rmg_database_object()
rmgdb.load_families_only(cls.rmgdb)
cls.rxn1 = ARCReaction(reactants=['CH4', 'OH'], products=['CH3', 'H2O'])
cls.rxn1.rmg_reaction = Reaction(reactants=[Species().from_smiles('C'), Species().from_smiles('[OH]')],
products=[Species().from_smiles('[CH3]'), Species().from_smiles('O')])
cls.rxn2 = ARCReaction(reactants=['C2H5', 'OH'], products=['C2H4', 'H2O'])
cls.rxn2.rmg_reaction = Reaction(reactants=[Species().from_smiles('C[CH2]'),
Species().from_smiles('[OH]')],
products=[Species().from_smiles('C=C'), Species().from_smiles('O')])
cls.rxn3 = ARCReaction(reactants=['CH3CH2NH'], products=['CH2CH2NH2'])
cls.rxn3.rmg_reaction = Reaction(reactants=[Species().from_smiles('CC[NH]')],
products=[Species().from_smiles('[CH2]CN')])
cls.rxn4 = ARCReaction(reactants=['[NH2]', 'N[NH]'], products=['N', 'N[N]'])
cls.rxn4.rmg_reaction = Reaction(reactants=[Species().from_smiles('[NH2]'), Species().from_smiles('N[NH]')],
products=[Species().from_smiles('N'), Species().from_smiles('N[N]')])
def test_str(self):
"""Test the string representation of the object"""
str_representation = str(self.rxn1)
expected_representation = 'ARCReaction(label="CH4 + OH <=> CH3 + H2O", ' \
'rmg_reaction="C + [OH] <=> [CH3] + O", ' \
'multiplicity=2, charge=0)'
self.assertEqual(str_representation, expected_representation)
def test_as_dict(self):
"""Test Species.as_dict()"""
rxn_dict = self.rxn1.as_dict()
expected_dict = {'charge': 0,
'multiplicity': None,
'family': None,
'family_own_reverse': 0,
'label': 'CH4 + OH <=> CH3 + H2O',
'long_kinetic_description': u'',
'index': None,
'p_species': [],
'products': ['CH3', 'H2O'],
'r_species': [],
'reactants': ['CH4', 'OH'],
'ts_label': None,
'ts_xyz_guess': [],
'ts_methods': [tsm.lower() for tsm in default_ts_methods]}
self.assertEqual(rxn_dict, expected_dict)
def test_from_dict(self):
"""Test Species.from_dict()"""
rxn_dict = self.rxn1.as_dict()
rxn = ARCReaction(reaction_dict=rxn_dict)
self.assertEqual(rxn.label, 'CH4 + OH <=> CH3 + H2O')
self.assertEqual(rxn.ts_methods, [tsm.lower() for tsm in default_ts_methods])
def test_rmg_reaction_to_str(self):
"""Test the rmg_reaction_to_str() method and the reaction label generated"""
spc1 = Species().from_smiles('CON=O')
spc1.label = 'CONO'
spc2 = Species().from_smiles('C[N+](=O)[O-]')
spc2.label = 'CNO2'
rmg_reaction = Reaction(reactants=[spc1], products=[spc2])
rxn = ARCReaction(rmg_reaction=rmg_reaction)
rxn_str = rxn.rmg_reaction_to_str()
self.assertEqual(rxn_str, 'CON=O <=> [O-][N+](=O)C')
self.assertEqual(rxn.label, 'CONO <=> CNO2')
def test_rxn_family(self):
"""Test that ARC gets the correct RMG family for different reactions"""
self.rxn1.determine_family(rmg_database=self.rmgdb)
self.assertEqual(self.rxn1.family.label, 'H_Abstraction')
self.assertTrue(self.rxn1.family_own_reverse)
self.rxn2.determine_family(rmg_database=self.rmgdb)
self.assertEqual(self.rxn2.family.label, 'Disproportionation')
self.assertFalse(self.rxn2.family_own_reverse)
self.rxn3.determine_family(rmg_database=self.rmgdb)
self.assertEqual(self.rxn3.family.label, 'intra_H_migration')
self.assertTrue(self.rxn3.family_own_reverse)
def test_determine_charge(self):
"""Test determine charge"""
self.rxn1.determine_rxn_charge()
self.assertEqual(self.rxn1.charge, 0)
def test_determine_multiplicity(self):
"""Test determine multiplicity"""
self.rxn1.determine_rxn_multiplicity()
self.assertEqual(self.rxn1.multiplicity, 2)
self.rxn2.arc_species_from_rmg_reaction()
self.rxn2.determine_rxn_multiplicity()
self.assertEqual(self.rxn2.multiplicity, 1)
self.rxn3.determine_rxn_multiplicity()
self.assertEqual(self.rxn3.multiplicity, 2)
self.rxn4.determine_rxn_multiplicity()
self.assertEqual(self.rxn4.multiplicity, 3)
def test_check_atom_balance(self):
"""Test the Reaction check_atom_balance method"""
# A normal reaction
rxn1 = ARCReaction(reactants=['CH4', 'OH'], products=['CH3', 'H2O'])
rxn1.r_species = [ARCSpecies(label='CH4', smiles='C'),
ARCSpecies(label='OH', smiles='[OH]')]
rxn1.p_species = [ARCSpecies(label='CH3', smiles='[CH3]'),
ARCSpecies(label='H2O', smiles='O')]
self.assertTrue(rxn1.check_atom_balance())
# A non-balanced reaction
rxn2 = ARCReaction(reactants=['CH4', 'OH'], products=['CH4', 'H2O'])
rxn2.r_species = [ARCSpecies(label='CH4', smiles='C'),
ARCSpecies(label='OH', smiles='[OH]')]
rxn2.p_species = [ARCSpecies(label='CH4', smiles='C'),
ARCSpecies(label='H2O', smiles='O')]
self.assertFalse(rxn2.check_atom_balance(raise_error=False))
with self.assertRaises(ReactionError):
rxn2.check_atom_balance()
# A reaction with the same species twice on one side
rxn3 = ARCReaction(reactants=['CH4', 'OH', 'H2O'], products=['CH3', 'H2O', 'H2O'])
rxn3.r_species = [ARCSpecies(label='CH4', smiles='C'),
ARCSpecies(label='OH', smiles='[OH]'),
ARCSpecies(label='H2O', smiles='O')]
rxn3.p_species = [ARCSpecies(label='CH3', smiles='[CH3]'),
ARCSpecies(label='H2O', smiles='O')]
self.assertTrue(rxn3.check_atom_balance())
def test_get_species_count(self):
"""Test the get_species_count() method"""
rxn1 = ARCReaction(reactants=['CH4', 'OH', 'H2O'], products=['CH3', 'H2O', 'H2O'])
spc1 = ARCSpecies(label='OH', smiles='[OH]')
spc2 = ARCSpecies(label='H2O', smiles='O')
self.assertEqual(rxn1.get_species_count(species=spc1, well=0), 1)
self.assertEqual(rxn1.get_species_count(species=spc1, well=1), 0)
self.assertEqual(rxn1.get_species_count(species=spc2, well=0), 1)
self.assertEqual(rxn1.get_species_count(species=spc2, well=1), 2)
def test_get_atom_map(self):
"""Test getting an atom map for a reaction"""
# 1. trivial unimolecular: H2O <=> H2O
h2o_xyz_1 = {'symbols': ('O', 'H', 'H'),
'isotopes': (16, 1, 1),
'coords': ((-0.0003283189391273643, 0.39781490416473486, 0.0),
(-0.7633034507689803, -0.19953755103743254, 0.0),
(0.7636317697081081, -0.19827735312730177, 0.0))}
r_1 = ARCSpecies(label='H2O', smiles='O', xyz=h2o_xyz_1)
p_1 = ARCSpecies(label='H2O', smiles='O', xyz=h2o_xyz_1)
rxn_1 = ARCReaction(reactants=['H2O'], products=['H2O'])
rxn_1.r_species = [r_1]
rxn_1.p_species = [p_1]
self.assertEqual(rxn_1.atom_map, [0, 1, 2])
self.assertTrue(check_atom_map(rxn_1))
# 2. trivial unimolecular with an intentional mixed atom order: H2O <=> H2O
h2o_xyz_2 = {'symbols': ('H', 'H', 'O'),
'isotopes': (1, 1, 16),
'coords': ((0.39781, 0.0, -0.00032),
(-0.19953, 0.0, -0.76330),
(-0.19827, 0.0, 0.76363))}
p_1 = ARCSpecies(label='H2O', smiles='O', xyz=h2o_xyz_2)
rxn_2 = ARCReaction(reactants=['H2O'], products=['H2O'])
rxn_2.r_species = [r_1]
rxn_2.p_species = [p_1]
self.assertEqual(rxn_2.atom_map, [2, 0, 1])
self.assertTrue(check_atom_map(rxn_2))
# 3. trivial bimolecular: H + CH3NH2 <=> H2 + CH2NH2
ch3nh2_xyz = {'coords': ((-0.5734111454228507, 0.0203516083213337, 0.03088703933770556),
(0.8105595891860601, 0.00017446498908627427, -0.4077728757313545),
(-1.1234549667791063, -0.8123899006368857, -0.41607711106038836),
(-0.6332220120842996, -0.06381791823047896, 1.1196983583774054),
(-1.053200912106195, 0.9539501896695028, -0.27567270246542575),
(1.3186422395164141, 0.7623906284020254, 0.038976118645639976),
(1.2540872076899663, -0.8606590725145833, -0.09003882710357966)),
'isotopes': (12, 14, 1, 1, 1, 1, 1),
'symbols': ('C', 'N', 'H', 'H', 'H', 'H', 'H')}
ch2nh2_xyz = {'coords': ((0.6919493009211066, 0.054389375309083846, 0.02065422596281878),
(1.3094508022837807, -0.830934909576592, 0.14456347719459348),
(1.1649142139806816, 1.030396183273415, 0.08526955368597328),
(-0.7278194451655412, -0.06628299353512612, -0.30657582460750543),
(-1.2832757211903472, 0.7307667658607352, 0.00177732009031573),
(-1.155219150829674, -0.9183344213315149, 0.05431124767380799)),
'isotopes': (12, 1, 1, 14, 1, 1),
'symbols': ('C', 'H', 'H', 'N', 'H', 'H')}
h2_xyz = {'coords': ((0, 0, 0.3736550),
(0, 0, -0.3736550)),
'isotopes': (1, 1),
'symbols': ('H', 'H')}
r_1 = ARCSpecies(label='H', smiles='[H]', xyz={'coords': ((0, 0, 0),), 'isotopes': (1,),'symbols': ('H',)})
r_2 = ARCSpecies(label='CH3NH2', smiles='CN', xyz=ch3nh2_xyz)
p_1 = ARCSpecies(label='H2', smiles='[H][H]', xyz=h2_xyz)
p_2 = ARCSpecies(label='CH2NH2', smiles='[CH2]N', xyz=ch2nh2_xyz)
rxn_3 = ARCReaction(reactants=['H', 'CH3NH2'], products=['H2', 'CH2NH2'])
rxn_3.r_species = [r_1, r_2]
rxn_3.p_species = [p_1, p_2]
self.assertEqual(rxn_3.atom_map, [0, 2, 5, 6, 1, 7, 3, 4])
self.assertTrue(check_atom_map(rxn_3))
# 4. trivial bimolecular in reverse order: H + CH3NH2 <=> CH2NH2 + H2
ch3nh2_xyz = {'coords': ((-0.5734111454228507, 0.0203516083213337, 0.03088703933770556),
(0.8105595891860601, 0.00017446498908627427, -0.4077728757313545),
(-1.1234549667791063, -0.8123899006368857, -0.41607711106038836),
(-0.6332220120842996, -0.06381791823047896, 1.1196983583774054),
(-1.053200912106195, 0.9539501896695028, -0.27567270246542575),
(1.3186422395164141, 0.7623906284020254, 0.038976118645639976),
(1.2540872076899663, -0.8606590725145833, -0.09003882710357966)),
'isotopes': (12, 14, 1, 1, 1, 1, 1),
'symbols': ('C', 'N', 'H', 'H', 'H', 'H', 'H')}
ch2nh2_xyz = {'coords': ((0.6919493009211066, 0.054389375309083846, 0.02065422596281878),
(1.3094508022837807, -0.830934909576592, 0.14456347719459348),
(1.1649142139806816, 1.030396183273415, 0.08526955368597328),
(-0.7278194451655412, -0.06628299353512612, -0.30657582460750543),
(-1.2832757211903472, 0.7307667658607352, 0.00177732009031573),
(-1.155219150829674, -0.9183344213315149, 0.05431124767380799)),
'isotopes': (12, 1, 1, 14, 1, 1),
'symbols': ('C', 'H', 'H', 'N', 'H', 'H')}
h2_xyz = {'coords': ((0, 0, 0.3736550),
(0, 0, -0.3736550)),
'isotopes': (1, 1),
'symbols': ('H', 'H')}
r_1 = ARCSpecies(label='H', smiles='[H]', xyz={'coords': ((0, 0, 0),), 'isotopes': (1,),'symbols': ('H',)})
r_2 = ARCSpecies(label='CH3NH2', smiles='CN', xyz=ch3nh2_xyz)
p_1 = ARCSpecies(label='H2', smiles='[H][H]', xyz=h2_xyz)
p_2 = ARCSpecies(label='CH2NH2', smiles='[CH2]N', xyz=ch2nh2_xyz)
rxn_4 = ARCReaction(reactants=['H', 'CH3NH2'], products=['CH2NH2', 'H2'])
rxn_4.r_species = [r_1, r_2]
rxn_4.p_species = [p_2, p_1]
self.assertEqual(rxn_4.atom_map, [6, 0, 3, 4, 7, 5, 1, 2])
self.assertTrue(check_atom_map(rxn_4))
# 5. representative reactions from RMG families
# 1+2_Cycloaddition: CH2 + C2H4 <=> C3H6
ch2_xyz = {'coords': ((-1.3519460059345912e-10, -5.04203763365717e-10, 0.0),
(-1.064874800478917, -0.016329711355091817, 0.0),
(1.0648748006141107, 0.016329711859301474, 0.0)),
'isotopes': (12, 1, 1),
'symbols': ('C', 'H', 'H')}
c2h4_xyz = {'coords': ((0.6664040429179742, 0.044298334171779405, -0.0050238049104911735),
(-0.6664040438461246, -0.04429833352898575, 0.00502380522486473),
(1.1686968388986039, 0.8743086488169786, -0.4919298928897832),
(1.2813853343929593, -0.7114426553520238, 0.4734595111827543),
(-1.2813853352424778, 0.7114426574294024, -0.4734595076873365),
(-1.1686968371212578, -0.8743086515369692, 0.49192988907998186)),
'isotopes': (12, 12, 1, 1, 1, 1),
'symbols': ('C', 'C', 'H', 'H', 'H', 'H')}
c_c3h6_xyz = {'coords': ((0.7868661913782324, -0.3644249639827158, -0.016337299842911886),
(-0.07793785747147405, 0.8603229755261934, 0.07746513362297117),
(-0.708928275400647, -0.4958980792223481, -0.06112784358024908),
(1.339749295484817, -0.5278616711993785, -0.9341881111902739),
(1.3001119953298585, -0.6947493102195698, 0.8793780279658545),
(-0.15055582331881673, 1.3597070015370083, 1.0367271647162946),
(-0.11091839380255, 1.5265948517709569, -0.7768389650606503),
(-1.1693748373792934, -0.7484015319217499, -1.0093221066790388),
(-1.2090122948201234, -0.9152892722884018, 0.8042440000480116)),
'isotopes': (12, 12, 12, 1, 1, 1, 1, 1, 1),
'symbols': ('C', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H')}
r_1 = ARCSpecies(label='CH2', adjlist="""1 C u0 p1 c0 {2,S} {3,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}""", xyz=ch2_xyz)
r_2 = ARCSpecies(label='C2H4', smiles='C=C', xyz=c2h4_xyz)
p_1 = ARCSpecies(label='cC3H6', smiles='C1CC1', xyz=c_c3h6_xyz)
rxn = ARCReaction(reactants=['CH2', 'C2H4'], products=['cC3H6'])
rxn.r_species = [r_1, r_2]
rxn.p_species = [p_1]
self.assertEqual(rxn.atom_map, [0, 7, 6, 1, 2, 5, 4, 8, 3])
self.assertTrue(check_atom_map(rxn))
# 1,2-Birad_to_alkene: SO2(T) => SO2(S)
so2_t_xyz = {'coords': ((0.02724478716956233, 0.6093829407458188, 0.0),
(-1.3946381818031768, -0.24294788636871906, 0.0),
(1.3673933946336125, -0.36643505437710233, 0.0)),
'isotopes': (32, 16, 16),
'symbols': ('S', 'O', 'O')}
so2_s_xyz = {'coords': ((-1.3554230894998571, -0.4084942756329785, 0.0),
(-0.04605352293144468, 0.6082507106551855, 0.0),
(1.4014766124312934, -0.19975643502220325, 0.0)),
'isotopes': (16, 32, 16),
'symbols': ('O', 'S', 'O')}
r_1 = ARCSpecies(label='SO2(T)', smiles='O=[S][O]', multiplicity=3, xyz=so2_t_xyz)
p_1 = ARCSpecies(label='SO2(S)', smiles='O=S=O', multiplicity=1, xyz=so2_s_xyz)
rxn = ARCReaction(reactants=['SO2(T)'], products=['SO2(S)'])
rxn.r_species = [r_1]
rxn.p_species = [p_1]
self.assertEqual(rxn.atom_map, [1, 0, 2])
self.assertTrue(check_atom_map(rxn))
# 1,2_Insertion_CO: C4H10 + CO <=> C5H10O
c4h10_xyz = {'coords': ((-0.5828455298013108, 1.3281531294599287, -0.04960015063595639),
(0.20452033859928953, 0.05503751610159247, -0.351590668388836),
(1.2187217734495472, -0.22435034939324036, 0.7553438935018645),
(-0.7402757883531311, -1.131897259046642, -0.526270047908048),
(-1.149632334529979, 1.2345299096044358, 0.8830543278319224),
(-1.2910247691071444, 1.5474495198220646, -0.8556442099189145),
(0.08958996004802251, 2.187049294072444, 0.047578963870699015),
(0.7510696374695547, 0.20211678856476709, -1.2911649516059494),
(1.9161788635733445, 0.6129834282608764, 0.8637033961259424),
(0.723393227383255, -0.37955365746174813, 1.7199258030015812),
(1.8052293751859985, -1.1207509229675587, 0.5277678765569422),
(-1.4506401201091412, -0.9467671747910582, -1.3389353480864132),
(-1.31330819789714, -1.3230974306153704, 0.3874767468986707),
(-0.18097643591114793, -2.04090279161046, -0.7716456312435797)),
'isotopes': (12, 12, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
'symbols': ('C', 'C', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H')}
co_xyz = {'coords': ((0, 0, -0.6748240),
(0, 0, 0.5061180)),
'isotopes': (12, 16),
'symbols': ('C', 'O')}
c5h10o_xyz = {'coords': ((1.4311352287218408, -0.1713595727440808, -0.4215888483848517),
(-0.007186117613478591, 0.06984820110647515, 0.04712543561838732),
(-0.9581449869575146, 0.0768516496023853, -1.153820345745391),
(-0.42459441572492335, -1.0196556708425513, 1.0398144790596706),
(-0.06395445555126768, 1.4459669990683603, 0.6988370467311186),
(-0.39842691952831133, 1.6544415349370807, 1.860895997103657),
(1.7538538399565853, 0.5988164487250668, -1.1317944597170102),
(1.5308761668570723, -1.1450780226312873, -0.9137377478255552),
(2.130467943093651, -0.145756780679422, 0.4221764324976206),
(-1.9882342251557934, 0.2821166362714845, -0.8400630940054319),
(-0.6807867076715277, 0.8517398665867646, -1.8779276281234922),
(-0.9490513003000888, -0.8874499123119038, -1.6737493906621435),
(0.23329847490706446, -1.0315570674753483, 1.9164599735169805),
(-0.3863240121264062, -2.0126378831961222, 0.578337115559457),
(-1.4463966539332702, -0.8570614833514035, 1.4016914821743647),
(0.22346814102625032, 2.2907750569345855, 0.04734355220249537)),
'isotopes': (12, 12, 12, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
'symbols': ('C', 'C', 'C', 'C', 'C', 'O', 'H', 'H', | |
from dataclasses import dataclass
from typing import List, Optional, Union
from fedot.core.dag.graph_node import GraphNode
from fedot.core.data.data import InputData, OutputData
from fedot.core.data.merge.data_merger import DataMerger
from fedot.core.log import Log, default_log
from fedot.core.operations.factory import OperationFactory
from fedot.core.operations.operation import Operation
from fedot.core.optimisers.timer import Timer
from fedot.core.repository.default_params_repository import DefaultOperationParamsRepository
from fedot.core.repository.operation_types_repository import OperationTypesRepository
from fedot.core.utils import DEFAULT_PARAMS_STUB
@dataclass
class NodeMetadata:
metric: float = None
class Node(GraphNode):
"""
Base class for Node definition in Pipeline structure
:param nodes_from: parent nodes which information comes from
:param operation_type: str type of the operation defined in operation repository
the custom prefix can be added after / (to highlight the specific node)
The prefix will be ignored at Implementation stage
:param log: Log object to record messages
"""
def __init__(self, nodes_from: Optional[List['Node']],
operation_type: Optional[Union[str, 'Operation']] = None,
log: Optional[Log] = None, **kwargs):
passed_content = kwargs.get('content')
if passed_content:
# Define operation, based on content dictionary
operation = self._process_content_init(passed_content)
default_params = get_default_params(operation.operation_type)
if passed_content['params'] == DEFAULT_PARAMS_STUB and default_params is not None:
# Replace 'default_params' with params from json file
default_params = get_default_params(operation.operation_type)
else:
# Store passed
default_params = passed_content['params']
self.metadata = passed_content.get('metadata', NodeMetadata())
else:
# There is no content for node
operation = self._process_direct_init(operation_type)
# Define operation with default parameters
default_params = get_default_params(operation.operation_type)
self.metadata = NodeMetadata()
if not default_params:
default_params = DEFAULT_PARAMS_STUB
self.fit_time_in_seconds = 0
self.inference_time_in_seconds = 0
# Create Node with default content
super().__init__(content={'name': operation,
'params': default_params}, nodes_from=nodes_from)
self.log = log or default_log(__name__)
self._fitted_operation = None
self.rating = None
def _process_content_init(self, passed_content: dict) -> Operation:
""" Updating content in the node """
if isinstance(passed_content['name'], str):
# Need to convert name of operation into operation class object
operation_factory = OperationFactory(operation_name=passed_content['name'])
operation = operation_factory.get_operation()
passed_content.update({'name': operation})
else:
operation = passed_content['name']
self.content = passed_content
return operation
@staticmethod
def _process_direct_init(operation_type) -> Operation:
""" Define operation based on direct operation_type without defining content in the node """
if not operation_type:
raise ValueError('Operation is not defined in the node')
if not isinstance(operation_type, str):
# AtomizedModel
operation = operation_type
else:
# Define appropriate operation or data operation
operation_factory = OperationFactory(operation_name=operation_type)
operation = operation_factory.get_operation()
return operation
def _filter_params(self, returned_params: Union[dict, tuple]) -> dict:
"""
Filters out the desired parameter values from what Implementation returns
:param returned_params: dictionary with
"""
if isinstance(returned_params, tuple):
params_dict = returned_params[0]
changed_param_names = returned_params[1]
# Parameters were changed during the Implementation fitting
if self.custom_params != DEFAULT_PARAMS_STUB:
current_params = self.custom_params
# Delete keys from source params
for changed_param in changed_param_names:
current_params.pop(changed_param, None)
# Take only changed parameters from returned ones
changed_params = {key: params_dict[key] for key in changed_param_names}
filtered_params = {**current_params, **changed_params}
return filtered_params
else:
# Default parameters were changed
changed_params = {key: params_dict[key] for key in changed_param_names}
return changed_params
else:
# Parameters were not changed
return self.custom_params
def update_params(self):
new_params = self.fitted_operation.get_params()
# Filter parameters
filtered_params = self._filter_params(new_params)
if filtered_params != DEFAULT_PARAMS_STUB:
self.custom_params = filtered_params
# wrappers for 'operation' field from GraphNode class
@property
def operation(self):
return self.content['name']
@operation.setter
def operation(self, value):
self.content.update({'name': value})
@property
def fitted_operation(self):
if hasattr(self, '_fitted_operation'):
return self._fitted_operation
else:
return None
@fitted_operation.setter
def fitted_operation(self, value):
if value is None:
if hasattr(self, '_fitted_operation'):
del self._fitted_operation
else:
self._fitted_operation = value
def unfit(self):
self.fitted_operation = None
def fit(self, input_data: InputData) -> OutputData:
"""
Run training process in the node
:param input_data: data used for operation training
"""
if self.fitted_operation is None:
with Timer(log=self.log) as t:
self.fitted_operation, operation_predict = self.operation.fit(params=self.content['params'],
data=input_data,
is_fit_pipeline_stage=True)
self.fit_time_in_seconds = round(t.seconds_from_start, 3)
else:
operation_predict = self.operation.predict(fitted_operation=self.fitted_operation,
data=input_data,
is_fit_pipeline_stage=True,
params=self.content['params'])
# Update parameters after operation fitting (they can be corrected)
not_atomized_operation = 'atomized' not in self.operation.operation_type
if not_atomized_operation and 'correct_params' in self.operation.metadata.tags:
self.update_params()
return operation_predict
def predict(self, input_data: InputData, output_mode: str = 'default') -> OutputData:
"""
Run prediction process in the node
:param input_data: data used for prediction
:param output_mode: desired output for operations (e.g. labels, probs, full_probs)
"""
with Timer(log=self.log) as t:
operation_predict = self.operation.predict(fitted_operation=self.fitted_operation,
params=self.content['params'],
data=input_data,
output_mode=output_mode,
is_fit_pipeline_stage=False)
self.inference_time_in_seconds = round(t.seconds_from_start, 3)
return operation_predict
@property
def custom_params(self) -> dict:
return self.content.get('params')
@custom_params.setter
def custom_params(self, params):
if params:
if params != DEFAULT_PARAMS_STUB:
# Complete the dictionary if it is incomplete
default_params = get_default_params(self.operation.operation_type)
if default_params is not None:
params = {**default_params, **params}
# take nested composer params if they appeared
if 'nested_space' in params:
params = params['nested_space']
self.content.update({'params': params})
def __str__(self):
return str(self.operation.operation_type)
@property
def tags(self):
""" Return tags of operation in the node. """
if 'atomized' in self.operation.operation_type:
# There are no tags for atomized operation
return []
info = OperationTypesRepository(operation_type='all').operation_info_by_id(self.operation.operation_type)
if info is not None:
return info.tags
class PrimaryNode(Node):
"""
The class defines the interface of Primary nodes where initial task data is located
:param operation_type: str type of the operation defined in operation repository
:param node_data: dictionary with InputData for fit and predict stage
:param kwargs: optional arguments (i.e. logger)
"""
def __init__(self, operation_type: Optional[Union[str, 'Operation']] = None, node_data: dict = None, **kwargs):
if 'nodes_from' in kwargs:
del kwargs['nodes_from']
super().__init__(nodes_from=None, operation_type=operation_type, **kwargs)
if node_data is None:
self._node_data = {}
self.direct_set = False
else:
self._node_data = node_data
# Was the data passed directly to the node or not
self.direct_set = True
def fit(self, input_data: InputData, **kwargs) -> OutputData:
"""
Fit the operation located in the primary node
:param input_data: data used for operation training
"""
self.log.ext_debug(f'Trying to fit primary node with operation: {self.operation}')
if self.direct_set:
input_data = self.node_data
else:
self.node_data = input_data
return super().fit(input_data)
def unfit(self):
self.fitted_operation = None
if hasattr(self, 'node_data'):
self.node_data = None
def predict(self, input_data: InputData,
output_mode: str = 'default') -> OutputData:
"""
Predict using the operation located in the primary node
:param input_data: data used for prediction
:param output_mode: desired output for operations (e.g. labels, probs, full_probs)
"""
self.log.ext_debug(f'Predict in primary node by operation: {self.operation}')
if self.direct_set:
input_data = self.node_data
else:
self.node_data = input_data
return super().predict(input_data, output_mode)
def get_data_from_node(self):
""" Method returns data if the data was set to the nodes directly """
return self.node_data
@property
def node_data(self):
if hasattr(self, '_node_data'):
return self._node_data
else:
return {}
@node_data.setter
def node_data(self, value):
if value is None:
if hasattr(self, '_node_data'):
del self._node_data
else:
self._node_data = value
class SecondaryNode(Node):
"""
The class defines the interface of Secondary nodes modifying tha data flow in Pipeline
:param operation_type: str type of the operation defined in operation repository
:param nodes_from: parent nodes where data comes from
:param kwargs: optional arguments (i.e. logger)
"""
def __init__(self, operation_type: Optional[Union[str, 'Operation']] = None,
nodes_from: Optional[List['Node']] = None, **kwargs):
if nodes_from is None:
nodes_from = []
super().__init__(nodes_from=nodes_from, operation_type=operation_type, **kwargs)
def fit(self, input_data: InputData, **kwargs) -> OutputData:
"""
Fit the operation located in the secondary node
:param input_data: data used for operation training
"""
self.log.ext_debug(f'Trying to fit secondary node with operation: {self.operation}')
secondary_input = self._input_from_parents(input_data=input_data, parent_operation='fit')
return super().fit(input_data=secondary_input)
def predict(self, input_data: InputData, output_mode: str = 'default') -> OutputData:
"""
Predict using the operation located in the secondary node
:param input_data: data used for prediction
:param output_mode: desired output for operations (e.g. labels, probs, full_probs)
"""
self.log.ext_debug(f'Obtain prediction in secondary node with operation: {self.operation}')
secondary_input = self._input_from_parents(input_data=input_data,
parent_operation='predict')
return super().predict(input_data=secondary_input, output_mode=output_mode)
def _input_from_parents(self, input_data: InputData,
parent_operation: str) -> InputData:
if len(self.nodes_from) == 0:
raise ValueError('No parent nodes found')
self.log.ext_debug(f'Fit all parent nodes in secondary node with operation: {self.operation}')
parent_nodes = self._nodes_from_with_fixed_order()
parent_results, target = _combine_parents(parent_nodes, input_data,
parent_operation)
secondary_input = DataMerger.get(parent_results, log=self.log).merge()
# Update info about visited nodes
parent_operations = [node.operation.operation_type for node in parent_nodes]
secondary_input.supplementary_data.previous_operations = parent_operations
return secondary_input
def _nodes_from_with_fixed_order(self):
if self.nodes_from is not None:
return sorted(self.nodes_from, key=lambda node: node.descriptive_id)
else:
return None
def _combine_parents(parent_nodes: List[Node],
input_data: InputData,
parent_operation: str):
"""
Method for combining predictions from parent node or nodes
:param parent_nodes: list of parent nodes, from which predictions will
be combined
:param input_data: input data from pipeline abstraction (source input data)
:param parent_operation: name of parent operation (fit or predict)
:return parent_results: list with OutputData from parent nodes
:return target: target for final pipeline prediction
"""
if input_data is not None:
# InputData was set to pipeline
target = input_data.target
parent_results = []
for parent in parent_nodes:
if parent_operation == 'predict':
prediction = | |
<reponame>BogdanNovikov/Youtube-notifier<filename>venv/Lib/site-packages/googleapiclient/discovery.py
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for discovery based APIs.
A client library for Google's discovery based APIs.
"""
from __future__ import absolute_import
import six
from six.moves import zip
__author__ = "<EMAIL> (<NAME>)"
__all__ = ["build", "build_from_document", "fix_method_name", "key2param"]
from six import BytesIO
from six.moves import http_client
from six.moves.urllib.parse import urlencode, urlparse, urljoin, urlunparse, parse_qsl
# Standard library imports
import copy
from collections import OrderedDict
try:
from email.generator import BytesGenerator
except ImportError:
from email.generator import Generator as BytesGenerator
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
import json
import keyword
import logging
import mimetypes
import os
import re
# Third-party imports
import httplib2
import uritemplate
import google.api_core.client_options
from google.auth.transport import mtls
from google.auth.exceptions import MutualTLSChannelError
try:
import google_auth_httplib2
except ImportError: # pragma: NO COVER
google_auth_httplib2 = None
# Local imports
from googleapiclient import _auth
from googleapiclient import mimeparse
from googleapiclient.errors import HttpError
from googleapiclient.errors import InvalidJsonError
from googleapiclient.errors import MediaUploadSizeError
from googleapiclient.errors import UnacceptableMimeTypeError
from googleapiclient.errors import UnknownApiNameOrVersion
from googleapiclient.errors import UnknownFileType
from googleapiclient.http import build_http
from googleapiclient.http import BatchHttpRequest
from googleapiclient.http import HttpMock
from googleapiclient.http import HttpMockSequence
from googleapiclient.http import HttpRequest
from googleapiclient.http import MediaFileUpload
from googleapiclient.http import MediaUpload
from googleapiclient.model import JsonModel
from googleapiclient.model import MediaModel
from googleapiclient.model import RawModel
from googleapiclient.schema import Schemas
from googleapiclient._helpers import _add_query_parameter
from googleapiclient._helpers import positional
# The client library requires a version of httplib2 that supports RETRIES.
httplib2.RETRIES = 1
logger = logging.getLogger(__name__)
URITEMPLATE = re.compile("{[^}]*}")
VARNAME = re.compile("[a-zA-Z0-9_-]+")
DISCOVERY_URI = (
"https://www.googleapis.com/discovery/v1/apis/" "{api}/{apiVersion}/rest"
)
V1_DISCOVERY_URI = DISCOVERY_URI
V2_DISCOVERY_URI = (
"https://{api}.googleapis.com/$discovery/rest?" "version={apiVersion}"
)
DEFAULT_METHOD_DOC = "A description of how to use this function"
HTTP_PAYLOAD_METHODS = frozenset(["PUT", "POST", "PATCH"])
_MEDIA_SIZE_BIT_SHIFTS = {"KB": 10, "MB": 20, "GB": 30, "TB": 40}
BODY_PARAMETER_DEFAULT_VALUE = {"description": "The request body.", "type": "object"}
MEDIA_BODY_PARAMETER_DEFAULT_VALUE = {
"description": (
"The filename of the media request body, or an instance "
"of a MediaUpload object."
),
"type": "string",
"required": False,
}
MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE = {
"description": (
"The MIME type of the media request body, or an instance "
"of a MediaUpload object."
),
"type": "string",
"required": False,
}
_PAGE_TOKEN_NAMES = ("pageToken", "nextPageToken")
# Parameters controlling mTLS behavior. See https://google.aip.dev/auth/4114.
GOOGLE_API_USE_CLIENT_CERTIFICATE = "GOOGLE_API_USE_CLIENT_CERTIFICATE"
GOOGLE_API_USE_MTLS_ENDPOINT = "GOOGLE_API_USE_MTLS_ENDPOINT"
# Parameters accepted by the stack, but not visible via discovery.
# TODO(dhermes): Remove 'userip' in 'v2'.
STACK_QUERY_PARAMETERS = frozenset(["trace", "pp", "userip", "strict"])
STACK_QUERY_PARAMETER_DEFAULT_VALUE = {"type": "string", "location": "query"}
# Library-specific reserved words beyond Python keywords.
RESERVED_WORDS = frozenset(["body"])
# patch _write_lines to avoid munging '\r' into '\n'
# ( https://bugs.python.org/issue18886 https://bugs.python.org/issue19003 )
class _BytesGenerator(BytesGenerator):
_write_lines = BytesGenerator.write
def fix_method_name(name):
"""Fix method names to avoid '$' characters and reserved word conflicts.
Args:
name: string, method name.
Returns:
The name with '_' appended if the name is a reserved word and '$' and '-'
replaced with '_'.
"""
name = name.replace("$", "_").replace("-", "_")
if keyword.iskeyword(name) or name in RESERVED_WORDS:
return name + "_"
else:
return name
def key2param(key):
"""Converts key names into parameter names.
For example, converting "max-results" -> "max_results"
Args:
key: string, the method key name.
Returns:
A safe method name based on the key name.
"""
result = []
key = list(key)
if not key[0].isalpha():
result.append("x")
for c in key:
if c.isalnum():
result.append(c)
else:
result.append("_")
return "".join(result)
@positional(2)
def build(
serviceName,
version,
http=None,
discoveryServiceUrl=DISCOVERY_URI,
developerKey=None,
model=None,
requestBuilder=HttpRequest,
credentials=None,
cache_discovery=True,
cache=None,
client_options=None,
adc_cert_path=None,
adc_key_path=None,
num_retries=1,
):
"""Construct a Resource for interacting with an API.
Construct a Resource object for interacting with an API. The serviceName and
version are the names from the Discovery service.
Args:
serviceName: string, name of the service.
version: string, the version of the service.
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
discoveryServiceUrl: string, a URI Template that points to the location of
the discovery service. It should have two parameters {api} and
{apiVersion} that when filled in produce an absolute URI to the discovery
document for that service.
developerKey: string, key obtained from
https://code.google.com/apis/console.
model: googleapiclient.Model, converts to and from the wire format.
requestBuilder: googleapiclient.http.HttpRequest, encapsulator for an HTTP
request.
credentials: oauth2client.Credentials or
google.auth.credentials.Credentials, credentials to be used for
authentication.
cache_discovery: Boolean, whether or not to cache the discovery doc.
cache: googleapiclient.discovery_cache.base.CacheBase, an optional
cache object for the discovery documents.
client_options: Mapping object or google.api_core.client_options, client
options to set user options on the client.
(1) The API endpoint should be set through client_options. If API endpoint
is not set, `GOOGLE_API_USE_MTLS_ENDPOINT` environment variable can be used
to control which endpoint to use.
(2) client_cert_source is not supported, client cert should be provided using
client_encrypted_cert_source instead. In order to use the provided client
cert, `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable must be
set to `true`.
More details on the environment variables are here:
https://google.aip.dev/auth/4114
adc_cert_path: str, client certificate file path to save the application
default client certificate for mTLS. This field is required if you want to
use the default client certificate. `GOOGLE_API_USE_CLIENT_CERTIFICATE`
environment variable must be set to `true` in order to use this field,
otherwise this field doesn't nothing.
More details on the environment variables are here:
https://google.aip.dev/auth/4114
adc_key_path: str, client encrypted private key file path to save the
application default client encrypted private key for mTLS. This field is
required if you want to use the default client certificate.
`GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable must be set to
`true` in order to use this field, otherwise this field doesn't nothing.
More details on the environment variables are here:
https://google.aip.dev/auth/4114
num_retries: Integer, number of times to retry discovery with
randomized exponential backoff in case of intermittent/connection issues.
Returns:
A Resource object with methods for interacting with the service.
Raises:
google.auth.exceptions.MutualTLSChannelError: if there are any problems
setting up mutual TLS channel.
"""
params = {"api": serviceName, "apiVersion": version}
if http is None:
discovery_http = build_http()
else:
discovery_http = http
for discovery_url in \
_discovery_service_uri_options(discoveryServiceUrl, version):
requested_url = uritemplate.expand(discovery_url, params)
try:
content = _retrieve_discovery_doc(
requested_url, discovery_http, cache_discovery, cache,
developerKey, num_retries=num_retries
)
return build_from_document(
content,
base=discovery_url,
http=http,
developerKey=developerKey,
model=model,
requestBuilder=requestBuilder,
credentials=credentials,
client_options=client_options,
adc_cert_path=adc_cert_path,
adc_key_path=adc_key_path,
)
except HttpError as e:
if e.resp.status == http_client.NOT_FOUND:
continue
else:
raise e
raise UnknownApiNameOrVersion("name: %s version: %s" % (serviceName, version))
def _discovery_service_uri_options(discoveryServiceUrl, version):
"""
Returns Discovery URIs to be used for attemnting to build the API Resource.
Args:
discoveryServiceUrl:
string, the Original Discovery Service URL preferred by the customer.
version:
string, API Version requested
Returns:
A list of URIs to be tried for the Service Discovery, in order.
"""
urls = [discoveryServiceUrl, V2_DISCOVERY_URI]
# V1 Discovery won't work if the requested version is None
if discoveryServiceUrl == V1_DISCOVERY_URI and version is None:
logger.warning(
"Discovery V1 does not support empty versions. Defaulting to V2...")
urls.pop(0)
return list(OrderedDict.fromkeys(urls))
def _retrieve_discovery_doc(url, http, cache_discovery,
cache=None, developerKey=None, num_retries=1):
"""Retrieves the discovery_doc from cache or the internet.
Args:
url: string, the URL of the discovery document.
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it through which HTTP requests will be made.
cache_discovery: Boolean, whether or not to cache the discovery doc.
cache: googleapiclient.discovery_cache.base.Cache, an optional cache
object for the discovery documents.
developerKey: string, Key for controlling API usage, generated
from the API Console.
num_retries: Integer, number of times to retry discovery with
randomized exponential backoff in case of intermittent/connection issues.
Returns:
A unicode string representation of the discovery document.
"""
if cache_discovery:
from . import discovery_cache
if cache is None:
cache = discovery_cache.autodetect()
if cache:
content = cache.get(url)
if content:
return content
actual_url = url
# REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
# variable that contains the network address of the client sending the
# request. If it exists then add that to the request for the discovery
# document to avoid exceeding the quota on discovery requests.
if "REMOTE_ADDR" in os.environ:
actual_url = _add_query_parameter(url, "userIp", os.environ["REMOTE_ADDR"])
if developerKey:
actual_url = _add_query_parameter(url, "key", developerKey)
logger.debug("URL being requested: GET %s", actual_url)
# Execute this request with retries build into HttpRequest
| |
<filename>src/experiments/model.py
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch
import matplotlib.pyplot as plt
import torchvision
from torchvision import datasets, models, transforms
class DoubleConv(nn.Module):
def __init__(self,input_channels, output_channels):
super(DoubleConv,self).__init__()
self.double_conv = nn.Sequential(
nn.Conv2d(input_channels, output_channels,3, padding=1),
nn.BatchNorm2d(output_channels),
nn.ReLU(True),
nn.Conv2d(output_channels,output_channels,3, padding=1),
nn.BatchNorm2d(output_channels),
nn.ReLU(True)
)
def forward(self, x):
x = self.double_conv(x)
return x
class SingleConv(nn.Module):
def __init__(self,input_channels, output_channels):
super(SingleConv,self).__init__()
self.single_conv = nn.Sequential(
nn.Conv2d(input_channels, output_channels,3, padding=1),
nn.BatchNorm2d(output_channels),
nn.ReLU(True),
)
def forward(self, x):
x = self.single_conv(x)
return x
class DoubleConv2(nn.Module):
def __init__(self,input_channels, middle_channels, output_channels):
super(DoubleConv2,self).__init__()
self.double_conv = nn.Sequential(
nn.Conv2d(input_channels, middle_channels,3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(True),
nn.Conv2d(middle_channels,output_channels,3, padding=1),
nn.BatchNorm2d(output_channels),
nn.ReLU(True)
)
def forward(self, x):
x = self.double_conv(x)
return x
class DownConv(nn.Module):
def __init__(self, input_channels, output_channels):
super(DownConv,self).__init__()
self.down_conv = nn.Sequential(
nn.MaxPool2d(2, 2),
DoubleConv(input_channels, output_channels)
)
def forward(self,x):
x = self.down_conv(x)
return x
class DownConv2(nn.Module):
def __init__(self, input_channels, middle_channels, output_channels):
super(DownConv,self).__init__()
self.down_conv = nn.Sequential(
nn.MaxPool2d(2, 2),
DoubleConv2(input_channels,middle_channels, output_channels)
)
def forward(self,x):
x = self.down_conv(x)
return x
class UpConv(nn.Module):
def __init__(self, input_channels,output_channels):
super(UpConv,self).__init__()
self.up_conv = nn.ConvTranspose2d(input_channels,input_channels//2, 2, stride =2)
self.conv = DoubleConv(input_channels, output_channels)
def forward(self,x, x_prev):
x = self.up_conv(x)
x = torch.cat((x, x_prev), 1)
x = self.conv(x)
return x
class SimpleUpConv(nn.Module):
def __init__(self, input_channels,output_channels):
super(SimpleUpConv,self).__init__()
self.up_conv = nn.ConvTranspose2d(input_channels,input_channels//2, 2, stride =2)
self.conv = SingleConv(input_channels, output_channels)
def forward(self,x, x_prev):
x = self.up_conv(x)
x = self.conv(x)
return x
class DownSpatial(nn.Module):
def __init__(self, input_channels, output_channels, H_in, W_in, depth, dropout = 0.5):
super(DownSpatial,self).__init__()
self.down_spatial = nn.Sequential(
nn.MaxPool2d(2, 2),
DoubleConv(input_channels, output_channels),
SpatialTransformer(output_channels, H_in, W_in, depth, dropout= dropout),
DoubleConv(output_channels, output_channels),
)
def forward(self,x):
x = self.down_spatial(x)
return x
class UpSpatial(nn.Module):
def __init__(self, input_channels,output_channels, H_in, W_in, depth, dropout = 0.5):
super(UpSpatial,self).__init__()
self.up_conv = nn.ConvTranspose2d(input_channels,input_channels//2, 2, stride =2)
self.conv = DoubleConv(input_channels, output_channels)
self.spatial = SpatialTransformer(input_channels//2,H_in,W_in, depth, dropout =dropout )
def forward(self,x, x_prev):
x = self.up_conv(x)
x_prev = self.spatial(x_prev)
x = torch.cat((x, x_prev), 1)
x = self.conv(x)
return x
class ConvPool(nn.Module):
def __init__(self, input_channels,output_channels):
super(ConvPool,self).__init__()
self.conv_pool = nn.Sequential(
DoubleConv(input_channels, output_channels),
nn.MaxPool2d(2, stride=2),
)
def forward(self, x):
return self.conv_pool(x)
class LocalisationNetwork(nn.Module):
def __init__(self, channels_in,H_in,W_in, depth, dropout = 0.5):
super(LocalisationNetwork,self).__init__()
# Spatial transformer localization-network
layers = []
layers.append(ConvPool(channels_in, channels_in //2))
channels = channels_in//2
H_in = H_in //2
W_in = W_in //2
for i in range(1,depth):
layers.append(ConvPool(channels,channels//2))
channels = channels //2
H_in = H_in //2
W_in = W_in //2
self.localization = nn.Sequential(*layers)
# Regressor for the 3 * 2 affine matrix
self.fc_loc = nn.Sequential( # VERIFY THIS
nn.Linear(channels*H_in*W_in, 64),
nn.ReLU(True),
nn.Dropout(p = dropout),
nn.Linear(64, 32),
nn.ReLU(True),
nn.Linear(32, 3 * 2)
)
self.channels = channels
self.H_out = H_in
self.W_out = W_in
# Initialize the weights/bias with identity transformation
self.fc_loc[-1].weight.data.zero_()
self.fc_loc[-1].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))
def forward(self, x):
xs = self.localization(x)
xs = xs.view(-1, self.channels*self.H_out*self.W_out)
theta = self.fc_loc(xs)
theta = theta.view(-1, 2, 3)
return theta
class SpatialTransformer(nn.Module):
def __init__(self, channels_in,H_in,W_in, depth, dropout = 0.5):
super(SpatialTransformer,self).__init__()
# Spatial transformer localization-network
self.localisation_network = LocalisationNetwork( channels_in,H_in,W_in, depth, dropout =dropout)
def forward(self, x):
theta = self.localisation_network(x)
grid = F.affine_grid(theta, x.size())
x = F.grid_sample(x, grid)
return x
class AffineTransformer(nn.Module):
def __init__(self):
super(AffineTransformer,self).__init__()
def forward(self, x, theta):
grid = F.affine_grid(theta, x.size())
x = F.grid_sample(x, grid)
return x
class UpAffine(nn.Module):
def __init__(self, input_channels,middle_channels, output_channels):
super(UpAffine,self).__init__()
self.up_conv = nn.ConvTranspose2d(input_channels,input_channels//2, 2, stride =2)
self.conv = DoubleConv2(int(3*input_channels//2), middle_channels,output_channels)
self.spatial = AffineTransformer()
def forward(self,x, x_prev,theta):
x = self.up_conv(x)
x_trans = self.spatial(x_prev, theta)
x = torch.cat((x, x_trans, x_prev), 1)
x = self.conv(x)
return x
class UpAffineBaseline(nn.Module):
def __init__(self, input_channels):
super(UpAffineBaseline,self).__init__()
self.up_conv = nn.ConvTranspose2d(input_channels,input_channels//2, 2, stride =2)
self.spatial = AffineTransformer()
def forward(self,x, x_prev,theta):
x = self.up_conv(x)
x_trans = self.spatial(x_prev, theta)
x = torch.cat((x, x_trans, x_prev), 1)
return x
class UpAffineWithRelu(nn.Module):
def __init__(self, input_channels,middle_channels, output_channels):
super(UpAffineWithRelu,self).__init__()
self.up_conv = nn.Sequential( nn.ConvTranspose2d(input_channels,input_channels//2, 2, stride =2),
nn.BatchNorm2d(input_channels//2),
nn.ReLU(True),
)
self.conv = DoubleConv2(int(3*input_channels//2), middle_channels,output_channels)
self.spatial = AffineTransformer()
def forward(self,x, x_prev,theta):
x = self.up_conv(x)
x_trans = self.spatial(x_prev, theta)
x = torch.cat((x, x_trans, x_prev), 1)
x = self.conv(x)
return x
class UpAffineBaselineWithRelu(nn.Module):
def __init__(self, input_channels):
super(UpAffineBaselineWithRelu,self).__init__()
self.up_conv = nn.Sequential( nn.ConvTranspose2d(input_channels,input_channels//2, 2, stride =2),
nn.BatchNorm2d(input_channels//2),
nn.ReLU(True),
)
self.spatial = AffineTransformer()
def forward(self,x, x_prev,theta):
x = self.up_conv(x)
x_trans = self.spatial(x_prev, theta)
x = torch.cat((x, x_trans, x_prev), 1)
return x
class SpatialConv(nn.Module):
def __init__(self, input_channels, H_in, W_in, depth, dropout = 0.5):
super(SpatialConv,self).__init__()
self.local = LocalisationNetwork(input_channels, H_in, W_in, depth, dropout= dropout)
self.affine = AffineTransformer()
self.conv = DoubleConv(int(input_channels*2),input_channels)
def forward(self,x):
theta = self.local(x)
x_trans = self.affine(x, theta)
x_final = torch.cat((x_trans, x), 1)
x_final = self.conv(x_final)
return x_final, theta
# Uncompatible with binary cross-entropy loss
class SpatialUnet2(nn.Module):
def __init__(self,initial_channels, initial_h = 128, initial_w = 256, dropout = 0.5, wrap_input_mask = True, starting_channels = 64, extra_relu = False, adjust_transform =False):
super(SpatialUnet2, self).__init__()
self.wrap_input_mask = wrap_input_mask
self.adjust_transform = adjust_transform
self.initial_conv = DoubleConv(initial_channels,starting_channels) #128*256*64
self.down1 = DownConv(starting_channels,starting_channels*2) #64*128*128
self.down2 = DownConv(starting_channels*2,starting_channels*4) #32*64*256
self.down3 = DownConv(starting_channels*4,starting_channels*8) #16*32*512
self.bottle_neck_spatial = SpatialConv(starting_channels*8,initial_h//8,initial_w//8, 3, dropout) #16*32*1024
if(adjust_transform):
self.multiplier1 = torch.ones((1,2,3),requires_grad = True)
self.multiplier2 = torch.ones((1,2,3),requires_grad = True)
self.multiplier3 = torch.ones((1,2,3),requires_grad = True)
if(extra_relu):
self.up2 = UpAffineWithRelu(starting_channels*8,starting_channels*8, starting_channels*4) #32*64*256
self.up3 = UpAffineWithRelu( starting_channels*4, starting_channels*4, starting_channels*2) #64*128 *128
else:
self.up2 = UpAffine(starting_channels*8,starting_channels*8, starting_channels*4) #32*64*256
self.up3 = UpAffine( starting_channels*4, starting_channels*4, starting_channels*2) #64*128 *128
if(wrap_input_mask):
if(extra_relu):
self.up4 = UpAffineBaselineWithRelu(starting_channels*2) #128*256 *192
else:
self.up4 = UpAffineBaseline(starting_channels*2) #128*256 *192
self.mask_transformer = AffineTransformer()
self.double_conv = DoubleConv2(starting_channels*3+1,starting_channels*2,starting_channels)
self.single_conv = SingleConv(starting_channels,starting_channels//2)
else:
if(extra_relu):
self.up4 = UpAffineWithRelu(starting_channels*2,starting_channels*2,starting_channels)
else:
self.up4 = UpAffine(starting_channels*2,starting_channels*2,starting_channels)
self.double_conv = SingleConv(starting_channels,starting_channels//2)
self.final_conv = nn.Conv2d(starting_channels//2, 1, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x1 = self.initial_conv(x) #128*256*64
x2 = self.down1(x1) #64*128*128
x3 = self.down2(x2) #32*64*256
x4 = self.down3(x3) #16*32*512
x5, theta = self.bottle_neck_spatial(x4) #16*32*1024
if(self.adjust_transform):
theta1 = torch.mul(theta,self.multiplier1)
theta2 = torch.mul(theta,self.multiplier2)
theta3 = torch.mul(theta,self.multiplier3)
x6 = self.up2(x5,x3, theta1) #32*64*512
x6 = self.up3(x6,x2,theta2) # 32*128*256
x6 = self.up4(x6,x1, theta3) #64*128*256 (or 128)
else:
x6 = self.up2(x5,x3, theta) #32*64*512
x6 = self.up3(x6,x2,theta) # 32*128*256
x6 = self.up4(x6,x1, theta) #64*128*256 (or 128)
if(self.wrap_input_mask):
mask = x[:,9,:,:]
mask = mask.unsqueeze(1)
mask_translated = self.mask_transformer(mask,theta)
x = torch.cat((x6,mask_translated),1) #64*128*257
x = self.double_conv(x)
x = self.single_conv(x)
else:
x = self.double_conv(x6)
x = self.final_conv(x)
if(self.wrap_input_mask):
return x, mask_translated
else:
return x
def eval_forward(self,x):
if(self.wrap_input_mask):
x,mask_translated = self.forward(x)
return self.sigmoid(x), mask_translated
else:
return self.sigmoid(self.forward(x))
def forward_mask(self,x):
if(self.wrap_input_mask):
x,_ = self.eval_forward(x)
else:
x = self.eval_forward(x)
return torch.ge(x,0.5)
class SpatialUnet(nn.Module):
def __init__(self,initial_channels, initial_h = 128, initial_w = 256, dropout = 0.5):
super(SpatialUnet, self).__init__()
depth = int(4 - (np.log2(128) - np.log2(initial_h)))
self.initial_conv = DoubleConv(initial_channels,64) #128*256*64
self.down1 = DownConv(64,128) #64*128*128
self.down2 = DownConv(128,256) #32*64*256
self.down3 = DownSpatial(256,512,initial_h//8,initial_w//8, depth-1, dropout) #16*32*512
self.up2 = UpSpatial(512,256, initial_h//4, initial_w//4, depth= depth, dropout = dropout) #32*64*256
self.up3 = UpSpatial(256,128, initial_h//2, initial_w//2, depth= depth, dropout = dropout) #64*128 *128
self.up4 = UpSpatial(128,64, initial_h, initial_w, depth = depth, dropout = dropout) #128*256 *64
self.final_conv = nn.Conv2d(64, 1, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x1 = self.initial_conv(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x = self.up2(x4,x3)
x = self.up3(x,x2)
x = self.up4(x,x1)
x = self.final_conv(x)
return x
def eval_forward(self,x):
x = self.forward(x)
return self.sigmoid(x)
def forward_mask(self,x):
x = self.eval_forward(x)
return torch.ge(x,0.5)
class SingleConvAvgPool(nn.Module):
def __init__(self, input_channels,output_channels):
super(SingleConvAvgPool,self).__init__()
self.single_conv_pool = nn.Sequential(
nn.Conv2d(input_channels, output_channels,3, padding=1),
nn.AvgPool2d(2, stride=2),
nn.BatchNorm2d(output_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.single_conv_pool(x)
class SingleConvAvgPool2(nn.Module):
def __init__(self, input_channels,output_channels):
super(SingleConvAvgPool2,self).__init__()
self.single_conv_pool = nn.Sequential(
nn.Conv2d(input_channels, output_channels,3, padding=1),
nn.BatchNorm2d(output_channels),
nn.ReLU(True),
nn.AvgPool2d(2, stride=2)
)
def forward(self, x):
return self.single_conv_pool(x)
class ResnetFeatureExtractor(nn.Module):
def __init__(self, mask_channels, correction = False):
super(ResnetFeatureExtractor, self).__init__()
resnet = models.resnet18(pretrained=True)
self.resnet_fixed = nn.Sequential(resnet.conv1,resnet.bn1,resnet.relu, resnet.maxpool, resnet.layer1, resnet.layer2)
for param in self.resnet_fixed.parameters():
param.requires_grad = False
if(correction):
self.triple_down_sample = nn.Sequential(
SingleConvAvgPool2(mask_channels,16),
SingleConvAvgPool2(16,32),
SingleConvAvgPool2(32,64)
)
else:
self.triple_down_sample = nn.Sequential(
SingleConvAvgPool(mask_channels,16),
SingleConvAvgPool(16,32),
SingleConvAvgPool(32,64)
)
def forward(self, x): #TODO: Check that I'm getting the images correctly
rgb1 = x[:,0:3,:,:]
rgb2 = x[:,3:6,:,:]
rgb3 = x[:,6:9,:,:]
masks = x[:,9:,:,:]
if(len(masks.size())<4):
masks = masks.unsqueeze(1)
rgb1 = self.resnet_fixed(rgb1)
rgb2 = self.resnet_fixed(rgb2)
rgb3 = self.resnet_fixed(rgb3)
masks = self.triple_down_sample(masks)
images = torch.cat((rgb1, rgb2,rgb3, masks), 1)
return images
# Uncompatible with binary cross-entropy loss
class SpatialUnet2SM(nn.Module):
def __init__(self,initial_channels, initial_h = 32, initial_w = 64, dropout = 0.5, starting_channels = 64, final_upscaling = 4):
super(SpatialUnet2SM, self).__init__()
self.initial_conv = DoubleConv2(initial_channels,initial_channels//2, starting_channels) #64*128*32
self.down1 = DownConv(starting_channels,starting_channels*2) #64*128*64
self.down2 = DownConv(starting_channels*2,starting_channels*4) #32*64*128
self.bottle_neck_spatial = SpatialConv(starting_channels*4,initial_h//4,initial_w//4, 2, dropout) #16*32*128
self.up2 = UpAffine(starting_channels*4,starting_channels*4, starting_channels*2) #32*64*64
self.up3 = UpAffine( starting_channels*2, starting_channels*2, starting_channels) #64*128 *32
self.double_conv = SingleConv(starting_channels,starting_channels//2)
self.final_conv = nn.Conv2d(starting_channels//2, 1, 1)
self.upsample = nn.Upsample(scale_factor=final_upscaling,mode = 'bilinear')
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x1 = self.initial_conv(x) #128*256*32
x2 = self.down1(x1) #64*128*64
| |
item named "Bone Lake Route, Scandia, MN"
# already in the db...
if route_name.count(',') == 0:
addr_example = '123 %s, City, MN' % (route_name,)
elif route_name.count(',') == 1:
addr_example = '123 %s, MN' % (route_name,)
else:
addr_example = '123 %s' % (route_name,)
addyp = streetaddress.parse(addr_example)
if not addyp:
addyp = streetaddress.parse(route_name)
if addyp:
try:
if ((int(addyp['number']) != 123)
and (addyp['city'] != 'MPLS')
and (addyp['state'] != 'MN')):
log.debug('parse_streetaddy: parsed odd/1: %s' % (route_name,))
addyp = None
# else, addr parsed our fake data correctly.
except KeyError:
log.debug('parse_streetaddy: parsed odd/2: %s' % (route_name,))
addyp = None
if addyp is not None:
unprepared = False
for component in ('unit',
'unit_prefix',
'postal_code',
'postal_code_ext',):
try:
if addyp[component]:
unprepared = True
break
except KeyError:
pass
if unprepared:
log.debug('parse_streetaddy: parsed odd/3: %s' % (route_name,))
addyp = None
if addyp is not None:
for component in ('street',
'street_type',):
if addyp[component]:
#addyp[component] = ' '.join(
# [x.capitalize() if x not in ('US',) else x
# for x in addyp[component].split() if x])
# Capitalize some words, lower case others, and upper others.
new_val = []
for word in addyp[component].split():
if ( (word == 'US')
or (word in addressconf.States.FIPS_STATES)
or (word.startswith('UT-'))):
new_val.append(word)
elif word in ('OF',):
new_val.append(word.lower())
else:
new_val.append(word.capitalize())
addyp[component] = ' '.join(new_val)
return addyp
#
def record_match_results(self, suspects):
unique_matches = set() # For DEV logging.
matches_by_new = {} # To find row_hm by stack ID.
split_into_donatees = {}
for row_hm in suspects:
# Use a dict list b/c each line seg may have been many times suspect.
misc.dict_list_append(
matches_by_new, row_hm['gf_new_stack_id'], row_hm)
unique_matches.add(row_hm['gf_new_stack_id'])
# Figure out what items are marked duplicates of others, so we can
# mark the others' CCP_FROMS_ values. This fixes a problem with
# SPLIT_INTO, e.g., a user made a really long line for USTH 55
# but in Statewide MN we got hundreds of line segments for that
# road. Since Shapefile has a 254-char limit on text length,
# we cannot store the list of stack IDs in the really long
# line being deleted, but we need to store that stack ID in
# all of the small replacement lines.
if ((row_hm['verdict'].startswith('copy'))
and (row_hm['gf_new_match_cmd'] == 'donate')):
misc.dict_list_append(
split_into_donatees, row_hm['gf_old_stack_id'], row_hm)
log.info(
'record_match_results: no. new w/ 1+ match: %d / no. tested: %d'
% (len(unique_matches), len(suspects),))
source_files = self.get_source_files('Prepared')
for source_shp in source_files:
with fiona.open(source_shp, 'r') as source_data:
log.info('record_match_results: Processing %d features from %s...'
% (len(source_data), os.path.basename(source_shp),))
prog_log = Debug_Progress_Logger(copy_this=debug_prog_log_match)
prog_log.setup(prog_log, 10000, len(source_data))
for shpfeat in source_data:
if (debug_filter_sids
and (row_hm['gf_new_stack_id'] not in debug_filter_sids)
and (row_hm['gf_old_stack_id'] not in debug_filter_sids)):
continue
self.record_match_result_feat(shpfeat, matches_by_new,
split_into_donatees)
if prog_log.loops_inc():
break
if debug_group_shp_writes:
if prog_log.progress % prog_log.log_freq == 0:
log.debug('Writing features to Shapefiles...')
for lyr_name in self.slayers.keys():
feat_list = self.intermed_feats[lyr_name]
if feat_list:
self.slayers[lyr_name].writerecords(feat_list)
self.everylayer.writerecords(feat_list)
self.intermed_feats[lyr_name] = []
# No: self.intermed_feats = {}
prog_log.loops_fin()
#
def record_match_result_feat(self, shpfeat, matches_by_new,
split_into_donatees):
guidance, ccp_stack_id, ccp_ref_sids = (
self.parse_guidance_and_stk_ids(shpfeat))
try:
rows_hm = matches_by_new[ccp_stack_id]
except KeyError:
# This means there were no matches for this line segment,
# either because the segment is already okay, or because
# it's not near any other okay lines.
rows_hm = None
# Hrm. Who created Shape_Length? It was truncated to "Shape_Leng".
# And while I [lb] assumed it was standard to record the geometry
# length for each feature, it doesn't seem to be so.
try:
del shpfeat['properties']['Shape_Leng']
except KeyError:
pass
try:
# If another item was marked SPLIT_INTO/donate, it was looking
# for a match to consume it -- this is useful for importing
# segmentized lines and then getting rid of really long lines,
# so that you don't have to manually match or segmentize the
# long line to get the road better connected to the network.
# E.g., a user draws 60 miles of highway as one line, and
# then you import the segmentized road network from the DOT.
donation_rows = split_into_donatees[ccp_stack_id]
except KeyError:
donation_rows = []
# FIXME: Tabulate results.
# Look for 'couplet', cul-de-sac, extended dead end, etc.
if rows_hm is not None:
verdicts = {}
verdicts['keep'] = []
verdicts['copy attrs'] = []
verdicts['copy geom'] = []
verdicts['unsure'] = []
# Not needed: verdicts['unsure']
flags = []
for row_hm in rows_hm:
resfeat = copy.deepcopy(shpfeat)
resfeat['properties']['old_stk_id'] = (
row_hm['gf_old_stack_id'])
resfeat['properties']['old_name'] = row_hm['gf_old_name']
for fldn, rown in Hausdorff_Import.match_fields.iteritems():
# At least set the property to None, else Fiona complains:
# "Record does not match collection schema: [..] != [..]"
if Hausdorff_Import.fragment_lookup[fldn] == 'float:19.11':
resfeat['properties'].setdefault(fldn, 0.0)
elif Hausdorff_Import.fragment_lookup[fldn] == 'int:9':
resfeat['properties'].setdefault(fldn, 0)
elif Hausdorff_Import.fragment_lookup[fldn] == 'str':
resfeat['properties'].setdefault(fldn, '')
else:
resfeat['properties'].setdefault(fldn, None)
try:
resfeat['properties'][fldn] = row_hm[rown]
except KeyError:
pass
if self.cli_opts.show_conflations:
self.record_match_result_target(resfeat)
# Could do: if resfeat['properties']['GUIDANCE']
#verdicts[row_hm['verdict']].append((resfeat, row_hm,))
verdicts[row_hm['verdict']].append(row_hm)
#verdicts[row_hm['verdict']].append(row_hm['gf_old_stack_id'])
flags.append('%s: %s' % (row_hm['verdict'], row_hm['reasoning'],))
# Make a feature for the segmentized line fragment.
if ((self.cli_opts.show_conflations)
and (self.cli_opts.show_fragments)):
for prefix in ('gf_old', 'gf_new',):
try:
frag = copy.deepcopy(resfeat)
frag['geometry']['coordinates'
] = row_hm['%s_frag_xys' % (prefix,)]
frag['properties']['new_length'
] = row_hm['%s_frag_len' % (prefix,)]
frag['properties']['verdict'
] = ('FRAG_%s' % (prefix,))
self.mstrength_init(frag)
self.record_match_result_target(frag)
except KeyError:
pass # Didn't compute HD.
# end: for row_hm in rows_hm
shpfeat['properties']['matches'] = (
'Of %d: %d Data / %d Geom / %d Keep / %d Unsure / %s'
% (len(rows_hm),
len(verdicts['copy attrs']),
len(verdicts['copy geom']),
len(verdicts['keep']),
len(verdicts['unsure']),
' | '.join(set(flags)),))
prefix = ''
ref_sids = []
duplicates = verdicts['copy attrs'] + verdicts['copy geom']
duplicate_sids = set([x['gf_old_stack_id'] for x in duplicates])
donator_sids = set([x['gf_new_stack_id'] for x in donation_rows])
#matched_sids = set([x['gf_old_stack_id'] for x in duplicates])
#duplicate_sids = matched_sids.difference(donator_sids)
duplicate_sids = set(duplicate_sids).difference(donator_sids)
if donator_sids:
# MAYBE: Take the best match rather than a bunch?
# ref_sids += donator_sids
best_ref = None
for donat_hm in donation_rows:
if ((best_ref is None)
#or (best_ref['hausdorff_dist'] < donat_hm['hausdorff_dist'])
#or (best_ref['frag_haus'] < donat_hm['frag_haus'])
or (best_ref['frag_norm'] > donat_hm['frag_norm'])
):
best_ref = donat_hm
if len(duplicates) > 1:
log.warning('chose best donator (of %d) / %s / %s'
% (len(duplicates), ccp_stack_id,
#best_ref['gf_old_stack_id'],))
best_ref['gf_new_stack_id'],))
#ref_sids.append(best_ref['gf_old_stack_id'])
#ref_sids.append(best_ref['gf_new_stack_id'])
#prefix += 'ACCEPT-FROM-'
#prefix += 'ACCEPT-FROM-' + str(best_ref['gf_old_stack_id'])
prefix += 'ACCEPT-FROM-' + str(best_ref['gf_new_stack_id'])
#
shpfeat['properties']['CCP_FROMS_'] = str(
best_ref['gf_new_stack_id'])
#shpfeat['properties']['OPERATION'] =
shpfeat['properties']['GUIDANCE'] = 'update'
#
if guidance == 'donate':
log.warning(
'donator becomes the donatee! was donate: %s / matched: %s'
#% (ccp_stack_id, best_ref['gf_old_stack_id'],))
% (ccp_stack_id, best_ref['gf_new_stack_id'],))
if duplicate_sids:
ref_sids += duplicate_sids
if guidance == 'donate':
prefix += 'DONATE-ATTRS-TO-'
shpfeat['properties']['CCP_FROMS_'] = ','.join(
[str(x) for x in ref_sids])
shpfeat['properties']['GUIDANCE'] = 'delete'
elif len(duplicates) == 1:
if verdicts['copy attrs']:
prefix += 'COPY-ATTR-TO-'
else:
g.assurt(verdicts['copy geom'])
prefix += 'COPY-GEOM-TO-'
else:
# NOTE: MNTH MnDOT says Highway, but we say Freeway, so we don't
# match... see: 2874006, which says couplet against
# two small frags...
prefix += 'COUPLETS?-'
elif not donator_sids:
if verdicts['keep']:
prefix += 'KEEPER'
if not verdicts['unsure']:
prefix += 'KEEPER'
# No: Conflation isn't working well.
# We want to re-investigate these...
# shpfeat['properties']['GUIDANCE'] = 'update'
# CHECK: 1100289/2801362
else:
prefix += 'KEEPER?'
elif verdicts['unsure']:
prefix += 'UNSURE'
else:
g.assurt(False)
#shpfeat['properties']['CCP_FROMS_'] = row_hm['gf_old_stack_id']
#shpfeat['properties']['OPERATION'] =
#shpfeat['properties']['GUIDANCE'] =
#guidance=
shpfeat['properties']['verdict'] = prefix + ','.join(
[str(x) for x in ref_sids])
# SPLIT_INTO is coming here- wow, does that save a lot of busy work!
# You'll get "Warning 1: Value 'COUPLETS-3854183,3853879,...' if
# field verdict has been truncated to 254 characters. This should be
# a problem so long as it's just a donatee which is being deleted,
# since only then is CCP_FROMS_ not used.
if ((len(shpfeat['properties']['verdict']) > 254)
and (guidance not in ('delete', 'donate',))):
log.warning('long verdict: %s' % (shpfeat,))
self.record_match_result_target(shpfeat)
else:
# rows_hm is None, so either this item wasn't part of matching, or we
# didn't find any matches.
for fldn in Hausdorff_Import.match_fields.iterkeys():
if Hausdorff_Import.fragment_lookup[fldn] == 'float:19.11':
shpfeat['properties'].setdefault(fldn, 0.0)
elif Hausdorff_Import.fragment_lookup[fldn] == 'int:9':
shpfeat['properties'].setdefault(fldn, 0)
else:
shpfeat['properties'].setdefault(fldn, None)
self.mstrength_init(shpfeat)
#if ((self.cli_opts.first_suspect)
# and (ccp_stack_id >= self.cli_opts.first_suspect)
# and (ccp_stack_id <= self.cli_opts.final_suspect)
# and (not guidance)):
if ccp_stack_id in self.analyzed_sids:
if not guidance:
#shpfeat['properties']['OPERATION'] = 'U'
#shpfeat['properties']['GUIDANCE'] = 'no_match'
shpfeat['properties']['verdict'] = 'keep'
shpfeat['properties']['reasoning'] = 'no Cyclopath match'
else:
#shpfeat['properties']['OPERATION'] = 'U'
# Skip: shpfeat['properties']['GUIDANCE'] = 'update'
shpfeat['properties']['verdict'] | |
#/************************************************************************************************************************
# Copyright (c) 2016, Imagination Technologies Limited and/or its affiliated group companies.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#************************************************************************************************************************/
from ctypes import Structure, POINTER, c_void_p, c_size_t, c_char_p, c_char, c_int, cast, byref, CFUNCTYPE, cdll, c_ulong, c_longlong, c_double, c_bool
import cPickle as pickle
import binascii
from framework import awa_enums
from framework.awa_enums import AwaChangeType
from framework.awa_enums import AwaResourceType
from framework.awa_enums import SessionType
from framework.path import pathToIDs
def AwaOpaqueToBase64(opaqueValue):
data = bytearray(opaqueValue.Size)
for i in range(opaqueValue.Size):
data[i] = cast(opaqueValue.Data, POINTER(c_char))[i]
#print "opaqueValue.Data", data
#print "opaqueValue.Size", opaqueValue.Size
return binascii.b2a_base64(data)
class AwaOpaque(Structure):
_fields_ = [("Data", c_void_p),
("Size", c_size_t)]
class AwaCommonAPIWrapper(object):
def __init__(self, sessionType):
#self.running = True
self.sessionType = sessionType
# create our ctypes "change" callback for change subscriptions and observations.
C_CHANGE_FUNC = CFUNCTYPE(c_void_p, c_void_p)
callback = C_CHANGE_FUNC(self.ChangeCallback)
self.changeCallbackMemory = cast(callback, c_void_p)
self.ClearNotifyData()
def loadAwaLibrary(self, path):
# link libawa
self._lib = cdll.LoadLibrary(path)
def AwaStringArray_New(self):
self._lib.AwaStringArray_New.restype = c_void_p
return self._lib.AwaStringArray_New()
def AwaIntegerArray_New(self):
self._lib.AwaIntegerArray_New.restype = c_void_p
return self._lib.AwaIntegerArray_New()
def AwaFloatArray_New(self):
self._lib.AwaFloatArray_New.restype = c_void_p
return self._lib.AwaFloatArray_New()
def AwaBooleanArray_New(self):
self._lib.AwaBooleanArray_New.restype = c_void_p
return self._lib.AwaBooleanArray_New()
def AwaOpaqueArray_New(self):
self._lib.AwaOpaqueArray_New.restype = c_void_p
return self._lib.AwaOpaqueArray_New()
def AwaTimeArray_New(self):
self._lib.AwaTimeArray_New.restype = c_void_p
return self._lib.AwaTimeArray_New()
def AwaObjectLinkArray_New(self):
self._lib.AwaObjectLinkArray_New.restype = c_void_p
return self._lib.AwaObjectLinkArray_New()
# * @}
def AwaStringArray_Free(self, array):
self._lib.AwaStringArray_Free.restype = None
mem = cast(array, POINTER(c_void_p))
return self._lib.AwaStringArray_Free(byref(mem))
def AwaIntegerArray_Free(self, array):
self._lib.AwaIntegerArray_Free.restype = None
mem = cast(array, POINTER(c_void_p))
return self._lib.AwaIntegerArray_Free(byref(mem))
def AwaFloatArray_Free(self, array):
self._lib.AwaFloatArray_Free.restype = None
mem = cast(array, POINTER(c_void_p))
return self._lib.AwaFloatArray_Free(byref(mem))
def AwaBooleanArray_Free(self, array):
self._lib.AwaBooleanArray_Free.restype = None
mem = cast(array, POINTER(c_void_p))
return self._lib.AwaBooleanArray_Free(byref(mem))
def AwaOpaqueArray_Free(self, array):
self._lib.AwaOpaqueArray_Free.restype = None
mem = cast(array, POINTER(c_void_p))
return self._lib.AwaOpaqueArray_Free(byref(mem))
def AwaTimeArray_Free(self, array):
self._lib.AwaTimeArray_Free.restype = None
mem = cast(array, POINTER(c_void_p))
return self._lib.AwaTimeArray_Free(byref(mem))
def AwaObjectLinkArray_Free(self, array):
self._lib.AwaObjectLinkArray_Free.restype = None
mem = cast(array, POINTER(c_void_p))
return self._lib.AwaObjectLinkArray_Free(byref(mem))
# * @}
def AwaStringArray_SetValueAsCString(self, array, index, value):
self._lib.AwaStringArray_SetValueAsCString.restype = c_void_p
self._lib.AwaStringArray_SetValueAsCString.argtypes = [c_void_p, c_ulong, c_char_p]
return self._lib.AwaStringArray_SetValueAsCString(array, index, value)
def AwaIntegerArray_SetValue(self, array, index, value):
self._lib.AwaIntegerArray_SetValue.restype = c_void_p
self._lib.AwaIntegerArray_SetValue.argtypes = [c_void_p, c_ulong, c_longlong]
return self._lib.AwaIntegerArray_SetValue(array, index, value)
def AwaFloatArray_SetValue(self, array, index, value):
self._lib.AwaFloatArray_SetValue.restype = c_void_p
self._lib.AwaFloatArray_SetValue.argtypes = [c_void_p, c_ulong, c_double]
return self._lib.AwaFloatArray_SetValue(array, index, value)
def AwaBooleanArray_SetValue(self, array, index, value):
self._lib.AwaBooleanArray_SetValue.restype = c_void_p
self._lib.AwaBooleanArray_SetValue.argtypes = [c_void_p, c_ulong, c_bool]
return self._lib.AwaBooleanArray_SetValue(array, index, value)
def AwaOpaqueArray_SetValue(self, array, index, value):
self._lib.AwaOpaqueArray_SetValue.restype = c_void_p
self._lib.AwaOpaqueArray_SetValue.argtypes = [c_void_p, c_ulong, c_void_p]
return self._lib.AwaOpaqueArray_SetValue(array, index, value)
def AwaTimeArray_SetValue(self, array, index, value):
self._lib.AwaTimeArray_SetValue.restype = c_void_p
self._lib.AwaTimeArray_SetValue.argtypes = [c_void_p, c_ulong, c_longlong]
return self._lib.AwaTimeArray_SetValue(array, index, value)
def AwaObjectLinkArray_SetValue(self, array, index, value):
self._lib.AwaObjectLinkArray_SetValue.restype = c_void_p
self._lib.AwaObjectLinkArray_SetValue.argtypes = [c_void_p, c_ulong, c_void_p]
return self._lib.AwaObjectLinkArray_SetValue(array, index, value)
# * @}
def AwaStringArray_DeleteValue(self, array, index):
self._lib.AwaStringArray_DeleteValue.restype = c_void_p
self._lib.AwaStringArray_DeleteValue.argtypes = [c_void_p, c_ulong]
return self._lib.AwaStringArray_DeleteValue(array, index)
def AwaIntegerArray_DeleteValue(self, array, index):
self._lib.AwaIntegerArray_DeleteValue.restype = c_void_p
self._lib.AwaIntegerArray_DeleteValue.argtypes = [c_void_p, c_ulong]
return self._lib.AwaIntegerArray_DeleteValue(array, index)
def AwaFloatArray_DeleteValue(self, array, index):
self._lib.AwaFloatArray_DeleteValue.restype = c_void_p
self._lib.AwaFloatArray_DeleteValue.argtypes = [c_void_p, c_ulong]
return self._lib.AwaFloatArray_DeleteValue(array, index)
def AwaBooleanArray_DeleteValue(self, array, index):
self._lib.AwaBooleanArray_DeleteValue.restype = c_void_p
self._lib.AwaBooleanArray_DeleteValue.argtypes = [c_void_p, c_ulong]
return self._lib.AwaBooleanArray_DeleteValue(array, index)
def AwaOpaqueArray_DeleteValue(self, array, index):
self._lib.AwaOpaqueArray_DeleteValue.restype = c_void_p
self._lib.AwaOpaqueArray_DeleteValue.argtypes = [c_void_p, c_ulong]
return self._lib.AwaOpaqueArray_DeleteValue(array, index)
def AwaTimeArray_DeleteValue(self, array, index):
self._lib.AwaTimeArray_DeleteValue.restype = c_void_p
self._lib.AwaTimeArray_DeleteValue.argtypes = [c_void_p, c_ulong]
return self._lib.AwaTimeArray_DeleteValue(array, index)
def AwaObjectLinkArray_DeleteValue(self, array, index):
self._lib.AwaObjectLinkArray_DeleteValue.restype = c_void_p
self._lib.AwaObjectLinkArray_DeleteValue.argtypes = [c_void_p, c_ulong]
return self._lib.AwaObjectLinkArray_DeleteValue(array, index)
# * @}
def AwaStringArray_GetValueAsCString(self, array, index):
self._lib.AwaStringArray_GetValueAsCString.restype = c_char_p
self._lib.AwaStringArray_GetValueAsCString.argtypes = [c_void_p, c_ulong]
return self._lib.AwaStringArray_GetValueAsCString(array, index)
def AwaIntegerArray_GetValue(self, array, index):
self._lib.AwaIntegerArray_GetValue.restype = c_longlong
self._lib.AwaIntegerArray_GetValue.argtypes = [c_void_p, c_ulong]
return self._lib.AwaIntegerArray_GetValue(array, index)
def AwaFloatArray_GetValue(self, array, index):
self._lib.AwaFloatArray_GetValue.restype = c_double
self._lib.AwaFloatArray_GetValue.argtypes = [c_void_p, c_ulong]
return self._lib.AwaFloatArray_GetValue(array, index)
def AwaBooleanArray_GetValue(self, array, index):
self._lib.AwaBooleanArray_GetValue.restype = c_bool
self._lib.AwaBooleanArray_GetValue.argtypes = [c_void_p, c_ulong]
return self._lib.AwaBooleanArray_GetValue(array, index)
def AwaOpaqueArray_GetValue(self, array, index):
self._lib.AwaOpaqueArray_GetValue.restype = c_void_p
self._lib.AwaOpaqueArray_GetValue.argtypes = [c_void_p, c_ulong]
return self._lib.AwaOpaqueArray_GetValue(array, index)
def AwaTimeArray_GetValue(self, array, index):
self._lib.AwaTimeArray_GetValue.restype = c_longlong
self._lib.AwaTimeArray_GetValue.argtypes = [c_void_p, c_ulong]
return self._lib.AwaTimeArray_GetValue(array, index)
def AwaObjectLinkArray_GetValue(self, array, index):
self._lib.AwaObjectLinkArray_GetValue.restype = c_void_p
self._lib.AwaObjectLinkArray_GetValue.argtypes = [c_void_p, c_ulong]
return self._lib.AwaObjectLinkArray_GetValue(array, index)
# * @}
def AwaStringArray_GetValueCount(self, array):
self._lib.AwaStringArray_GetValueCount.restype = c_ulong
self._lib.AwaStringArray_GetValueCount.argtypes = [c_void_p]
return self._lib.AwaStringArray_GetValueCount(array)
def AwaIntegerArray_GetValueCount(self, array):
self._lib.AwaIntegerArray_GetValueCount.restype = c_ulong
self._lib.AwaIntegerArray_GetValueCount.argtypes = [c_void_p]
return self._lib.AwaIntegerArray_GetValueCount(array)
def AwaFloatArray_GetValueCount(self, array):
self._lib.AwaFloatArray_GetValueCount.restype = c_ulong
self._lib.AwaFloatArray_GetValueCount.argtypes = [c_void_p]
return self._lib.AwaFloatArray_GetValueCount(array)
def AwaBooleanArray_GetValueCount(self, array):
self._lib.AwaBooleanArray_GetValueCount.restype = c_ulong
self._lib.AwaBooleanArray_GetValueCount.argtypes = [c_void_p]
return self._lib.AwaBooleanArray_GetValueCount(array)
def AwaOpaqueArray_GetValueCount(self, array):
self._lib.AwaOpaqueArray_GetValueCount.restype = c_ulong
self._lib.AwaOpaqueArray_GetValueCount.argtypes = [c_void_p]
return self._lib.AwaOpaqueArray_GetValueCount(array)
def AwaTimeArray_GetValueCount(self, array):
self._lib.AwaTimeArray_GetValueCount.restype = c_ulong
self._lib.AwaTimeArray_GetValueCount.argtypes = [c_void_p]
return self._lib.AwaTimeArray_GetValueCount(array)
def AwaObjectLinkArray_GetValueCount(self, array):
self._lib.AwaObjectLinkArray_GetValueCount.restype = c_ulong
self._lib.AwaObjectLinkArray_GetValueCount.argtypes = [c_void_p]
return self._lib.AwaObjectLinkArray_GetValueCount(array)
# * @}
def AwaStringArray_NewCStringArrayIterator(self, array):
self._lib.AwaStringArray_NewCStringArrayIterator.restype = c_void_p
self._lib.AwaStringArray_NewCStringArrayIterator.argtypes = [c_void_p]
return self._lib.AwaStringArray_NewCStringArrayIterator(array)
def AwaIntegerArray_NewIntegerArrayIterator(self, array):
self._lib.AwaIntegerArray_NewIntegerArrayIterator.restype = c_void_p
self._lib.AwaIntegerArray_NewIntegerArrayIterator.argtypes = [c_void_p]
return self._lib.AwaIntegerArray_NewIntegerArrayIterator(array)
def AwaFloatArray_NewFloatArrayIterator(self, array):
self._lib.AwaFloatArray_NewFloatArrayIterator.restype = c_void_p
self._lib.AwaFloatArray_NewFloatArrayIterator.argtypes = [c_void_p]
return self._lib.AwaFloatArray_NewFloatArrayIterator(array)
def AwaBooleanArray_NewBooleanArrayIterator(self, array):
self._lib.AwaBooleanArray_NewBooleanArrayIterator.restype = c_void_p
self._lib.AwaBooleanArray_NewBooleanArrayIterator.argtypes = [c_void_p]
return self._lib.AwaBooleanArray_NewBooleanArrayIterator(array)
def AwaOpaqueArray_NewOpaqueArrayIterator(self, array):
self._lib.AwaOpaqueArray_NewOpaqueArrayIterator.restype = c_void_p
self._lib.AwaOpaqueArray_NewOpaqueArrayIterator.argtypes = [c_void_p]
return self._lib.AwaOpaqueArray_NewOpaqueArrayIterator(array)
def AwaTimeArray_NewTimeArrayIterator(self, array):
self._lib.AwaTimeArray_NewTimeArrayIterator.restype = c_void_p
self._lib.AwaTimeArray_NewTimeArrayIterator.argtypes = [c_void_p]
return self._lib.AwaTimeArray_NewTimeArrayIterator(array)
def AwaObjectLinkArray_NewObjectLinkArrayIterator(self, array):
self._lib.AwaObjectLinkArray_NewObjectLinkArrayIterator.restype = c_void_p
self._lib.AwaObjectLinkArray_NewObjectLinkArrayIterator.argtypes = [c_void_p]
return self._lib.AwaObjectLinkArray_NewObjectLinkArrayIterator(array)
# * @}
def AwaStringArray_IsValid(self, array, index):
self._lib.AwaStringArray_IsValid.restype = c_bool
self._lib.AwaStringArray_IsValid.argtypes = [c_void_p, c_ulong]
return self._lib.AwaStringArray_IsValid(array, index)
def AwaIntegerArray_IsValid(self, array, index):
self._lib.AwaIntegerArray_IsValid.restype = c_bool
self._lib.AwaIntegerArray_IsValid.argtypes = [c_void_p, c_ulong]
return self._lib.AwaIntegerArray_IsValid(array, index)
def AwaFloatArray_IsValid(self, array, index):
self._lib.AwaFloatArray_IsValid.restype = c_bool
self._lib.AwaFloatArray_IsValid.argtypes = [c_void_p, c_ulong]
return self._lib.AwaFloatArray_IsValid(array, index)
def AwaBooleanArray_IsValid(self, array, index):
self._lib.AwaBooleanArray_IsValid.restype = c_bool
self._lib.AwaBooleanArray_IsValid.argtypes = [c_void_p, c_ulong]
return self._lib.AwaBooleanArray_IsValid(array, index)
def AwaOpaqueArray_IsValid(self, array, index):
self._lib.AwaOpaqueArray_IsValid.restype = c_bool
self._lib.AwaOpaqueArray_IsValid.argtypes = [c_void_p, c_ulong]
return self._lib.AwaOpaqueArray_IsValid(array, index)
def AwaTimeArray_IsValid(self, array, index):
self._lib.AwaTimeArray_IsValid.restype = c_bool
self._lib.AwaTimeArray_IsValid.argtypes = [c_void_p, c_ulong]
return self._lib.AwaTimeArray_IsValid(array, index)
def AwaObjectLinkArray_IsValid(self, array, index):
self._lib.AwaObjectLinkArray_IsValid.restype = c_bool
self._lib.AwaObjectLinkArray_IsValid.argtypes = [c_void_p, c_ulong]
return self._lib.AwaObjectLinkArray_IsValid(array, index)
# * @}
def AwaCStringArrayIterator_Next(self, iterator):
self._lib.AwaCStringArrayIterator_Next.restype = c_bool
self._lib.AwaCStringArrayIterator_Next.argtypes = [c_void_p]
return self._lib.AwaCStringArrayIterator_Next(iterator)
def AwaIntegerArrayIterator_Next(self, iterator):
self._lib.AwaIntegerArrayIterator_Next.restype = c_bool
self._lib.AwaIntegerArrayIterator_Next.argtypes = [c_void_p]
return self._lib.AwaIntegerArrayIterator_Next(iterator)
def AwaFloatArrayIterator_Next(self, iterator):
self._lib.AwaFloatArrayIterator_Next.restype = c_bool
self._lib.AwaFloatArrayIterator_Next.argtypes = [c_void_p]
return self._lib.AwaFloatArrayIterator_Next(iterator)
def AwaBooleanArrayIterator_Next(self, iterator):
self._lib.AwaBooleanArrayIterator_Next.restype = c_bool
self._lib.AwaBooleanArrayIterator_Next.argtypes = [c_void_p]
return self._lib.AwaBooleanArrayIterator_Next(iterator)
def AwaOpaqueArrayIterator_Next(self, iterator):
self._lib.AwaOpaqueArrayIterator_Next.restype = c_bool
self._lib.AwaOpaqueArrayIterator_Next.argtypes = [c_void_p]
return self._lib.AwaOpaqueArrayIterator_Next(iterator)
def AwaTimeArrayIterator_Next(self, iterator):
self._lib.AwaTimeArrayIterator_Next.restype = c_bool
self._lib.AwaTimeArrayIterator_Next.argtypes = [c_void_p]
return self._lib.AwaTimeArrayIterator_Next(iterator)
def AwaObjectLinkArrayIterator_Next(self, iterator):
self._lib.AwaObjectLinkArrayIterator_Next.restype = c_bool
self._lib.AwaObjectLinkArrayIterator_Next.argtypes = [c_void_p]
return self._lib.AwaObjectLinkArrayIterator_Next(iterator)
# * @}
def AwaCStringArrayIterator_GetIndex(self, iterator):
self._lib.AwaCStringArrayIterator_GetIndex.restype = c_ulong
self._lib.AwaCStringArrayIterator_GetIndex.argtypes = [c_void_p]
return self._lib.AwaCStringArrayIterator_GetIndex(iterator)
def AwaIntegerArrayIterator_GetIndex(self, iterator):
self._lib.AwaIntegerArrayIterator_GetIndex.restype = c_ulong
self._lib.AwaIntegerArrayIterator_GetIndex.argtypes = [c_void_p]
return self._lib.AwaIntegerArrayIterator_GetIndex(iterator)
def AwaFloatArrayIterator_GetIndex(self, iterator):
self._lib.AwaFloatArrayIterator_GetIndex.restype = c_ulong
self._lib.AwaFloatArrayIterator_GetIndex.argtypes = [c_void_p]
return self._lib.AwaFloatArrayIterator_GetIndex(iterator)
def AwaBooleanArrayIterator_GetIndex(self, iterator):
self._lib.AwaBooleanArrayIterator_GetIndex.restype = c_ulong
self._lib.AwaBooleanArrayIterator_GetIndex.argtypes = [c_void_p]
return self._lib.AwaBooleanArrayIterator_GetIndex(iterator)
def AwaOpaqueArrayIterator_GetIndex(self, iterator):
self._lib.AwaOpaqueArrayIterator_GetIndex.restype = c_ulong
self._lib.AwaOpaqueArrayIterator_GetIndex.argtypes = [c_void_p]
return self._lib.AwaOpaqueArrayIterator_GetIndex(iterator)
def AwaTimeArrayIterator_GetIndex(self, iterator):
self._lib.AwaTimeArrayIterator_GetIndex.restype = c_ulong
self._lib.AwaTimeArrayIterator_GetIndex.argtypes = [c_void_p]
return self._lib.AwaTimeArrayIterator_GetIndex(iterator)
def AwaObjectLinkArrayIterator_GetIndex(self, iterator):
self._lib.AwaObjectLinkArrayIterator_GetIndex.restype = c_ulong
self._lib.AwaObjectLinkArrayIterator_GetIndex.argtypes = [c_void_p]
return self._lib.AwaObjectLinkArrayIterator_GetIndex(iterator)
# * @}
def AwaCStringArrayIterator_GetValueAsCString(self, iterator):
self._lib.AwaCStringArrayIterator_GetValueAsCString.restype = c_char_p
self._lib.AwaCStringArrayIterator_GetValueAsCString.argtypes = [c_void_p]
return self._lib.AwaCStringArrayIterator_GetValueAsCString(iterator)
def AwaIntegerArrayIterator_GetValue(self, iterator):
self._lib.AwaIntegerArrayIterator_GetValue.restype = c_longlong
self._lib.AwaIntegerArrayIterator_GetValue.argtypes = [c_void_p]
return self._lib.AwaIntegerArrayIterator_GetValue(iterator)
def AwaFloatArrayIterator_GetValue(self, iterator):
self._lib.AwaFloatArrayIterator_GetValue.restype = c_double
self._lib.AwaFloatArrayIterator_GetValue.argtypes = [c_void_p]
return self._lib.AwaFloatArrayIterator_GetValue(iterator)
def AwaBooleanArrayIterator_GetValue(self, iterator):
self._lib.AwaBooleanArrayIterator_GetValue.restype = c_bool
self._lib.AwaBooleanArrayIterator_GetValue.argtypes = [c_void_p]
return self._lib.AwaBooleanArrayIterator_GetValue(iterator)
def AwaOpaqueArrayIterator_GetValue(self, iterator):
self._lib.AwaOpaqueArrayIterator_GetValue.restype = c_void_p
self._lib.AwaOpaqueArrayIterator_GetValue.argtypes = [c_void_p]
return self._lib.AwaOpaqueArrayIterator_GetValue(iterator)
def AwaTimeArrayIterator_GetValue(self, iterator):
self._lib.AwaTimeArrayIterator_GetValue.restype = c_longlong
self._lib.AwaTimeArrayIterator_GetValue.argtypes = [c_void_p]
return self._lib.AwaTimeArrayIterator_GetValue(iterator)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.