repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
flav-io/flavio | flavio/physics/bdecays/bll.py | 1 | 10206 | r"""Functions for the branching ratios and effective lifetimes of the leptonic
decays $B_q \to \ell^+\ell^-$, where $q=d$ or $s$ and $\ell=e$, $\mu$. or
$\tau$."""
from math import pi,sqrt
from flavio.physics import ckm
from flavio.physics.running import running
from flavio.physics.bdecays.common import meson_quark, lambda_K
from flavio.classes import Observable, Prediction
from flavio.config import config
from flavio.physics.bdecays.wilsoncoefficients import wctot_dict
def br_lifetime_corr(y, ADeltaGamma):
r"""Correction factor relating the experimentally measured branching ratio
(time-integrated) to the theoretical one (instantaneous), see e.g. eq. (8)
of arXiv:1204.1735.
Parameters
----------
- `y`: relative decay rate difference, $y_q = \tau_{B_q} \Delta\Gamma_q /2$
- `ADeltaGamma`: $A_{\Delta\Gamma_q}$ as defined, e.g., in arXiv:1204.1735
Returns
-------
$\frac{1-y_q^2}{1+A_{\Delta\Gamma_q} y_q}$
"""
return (1 - y**2)/(1 + ADeltaGamma*y)
def amplitudes(par, wc, B, l1, l2):
r"""Amplitudes P and S entering the $B_q\to\ell_1^+\ell_2^-$ observables.
Parameters
----------
- `par`: parameter dictionary
- `B`: should be `'Bs'` or `'B0'`
- `l1` and `l2`: should be `'e'`, `'mu'`, or `'tau'`
Returns
-------
`(P, S)` where for the special case `l1 == l2` one has
- $P = \frac{2m_\ell}{m_{B_q}} (C_{10}-C_{10}') + m_{B_q} (C_P-C_P')$
- $S = m_{B_q} (C_S-C_S')$
"""
scale = config['renormalization scale']['bll']
# masses
ml1 = par['m_'+l1]
ml2 = par['m_'+l2]
mB = par['m_'+B]
mb = running.get_mb(par, scale, nf_out=5)
# get the mass of the spectator quark
if B=='Bs':
mspec = running.get_ms(par, scale, nf_out=5)
elif B=='B0':
mspec = running.get_md(par, scale, nf_out=5)
# Wilson coefficients
qqll = meson_quark[B] + l1 + l2
# For LFV expressions see arXiv:1602.00881 eq. (5)
C9m = wc['C9_'+qqll] - wc['C9p_'+qqll] # only relevant for l1 != l2!
C10m = wc['C10_'+qqll] - wc['C10p_'+qqll]
CPm = wc['CP_'+qqll] - wc['CPp_'+qqll]
CSm = wc['CS_'+qqll] - wc['CSp_'+qqll]
beta_m = sqrt(1 - (ml1 - ml2)**2/mB**2)
beta_p = sqrt(1 - (ml1 + ml2)**2/mB**2)
P = beta_m * ( (ml2 + ml1)/mB * C10m + mB * mb/(mb + mspec) * CPm )
S = beta_p * ( (ml2 - ml1)/mB * C9m + mB * mb/(mb + mspec) * CSm )
return P, S
def ADeltaGamma(par, wc, B, lep):
P, S = amplitudes(par, wc, B, lep, lep)
# cf. eq. (17) of arXiv:1204.1737
return ((P**2).real - (S**2).real)/(abs(P)**2 + abs(S)**2)
def br_inst(par, wc, B, l1, l2):
r"""Branching ratio of $B_q\to\ell_1^+\ell_2^-$ in the absence of mixing.
Parameters
----------
- `par`: parameter dictionary
- `B`: should be `'Bs'` or `'B0'`
- `lep`: should be `'e'`, `'mu'`, or `'tau'`
"""
# paramaeters
GF = par['GF']
alphaem = running.get_alpha(par, 4.8)['alpha_e']
ml1 = par['m_'+l1]
ml2 = par['m_'+l2]
mB = par['m_'+B]
tauB = par['tau_'+B]
fB = par['f_'+B]
# appropriate CKM elements
if B == 'Bs':
xi_t = ckm.xi('t','bs')(par)
elif B == 'B0':
xi_t = ckm.xi('t','bd')(par)
N = xi_t * 4*GF/sqrt(2) * alphaem/(4*pi)
beta = sqrt(lambda_K(mB**2,ml1**2,ml2**2))/mB**2
prefactor = abs(N)**2 / 32. / pi * mB**3 * tauB * beta * fB**2
P, S = amplitudes(par, wc, B, l1, l2)
return prefactor * ( abs(P)**2 + abs(S)**2 )
def br_timeint(par, wc, B, l1, l2):
r"""Time-integrated branching ratio of $B_q\to\ell^+\ell^-$."""
if l1 != l2:
raise ValueError("Time-integrated branching ratio only defined for equal lepton flavours")
lep = l1
br0 = br_inst(par, wc, B, lep, lep)
y = par['DeltaGamma/Gamma_'+B]/2.
ADG = ADeltaGamma(par, wc, B, lep)
corr = br_lifetime_corr(y, ADG)
return br0 / corr
def bqll_obs(function, wc_obj, par, B, l1, l2):
scale = config['renormalization scale']['bll']
label = meson_quark[B]+l1+l2
if l1 == l2:
# include SM contributions for LF conserving decay
wc = wctot_dict(wc_obj, label, scale, par)
else:
wc = wc_obj.get_wc(label, scale, par)
return function(par, wc, B, l1, l2)
def bqll_obs_lsum(function, wc_obj, par, B, l1, l2):
if l1 == l2:
raise ValueError("This function is defined only for LFV decays")
scale = config['renormalization scale']['bll']
wc12 = wc_obj.get_wc(meson_quark[B]+l1+l2, scale, par)
wc21 = wc_obj.get_wc(meson_quark[B]+l2+l1, scale, par)
return function(par, wc12, B, l1, l2) + function(par, wc21, B, l2, l1)
def bqll_obs_function(function, B, l1, l2):
return lambda wc_obj, par: bqll_obs(function, wc_obj, par, B, l1, l2)
def bqll_obs_function_lsum(function, B, l1, l2):
return lambda wc_obj, par: bqll_obs_lsum(function, wc_obj, par, B, l1, l2)
# Bs -> l+l- effective lifetime
def tau_ll(wc, par, B, lep):
r"""Effective B->l+l- lifetime as defined in eq. (26) of arXiv:1204.1737 .
This formula one either gets by integrating eq. (21) or by inverting eq. (27) of arXiv:1204.1737.
Parameters
----------
- `wc` : dict of Wilson coefficients
- `par` : parameter dictionary
- `B` : should be `'Bs'` or `'B0'`
- `lep` : lepton: 'e', 'mu' or 'tau'
Returns
-------
$-\frac{\tau_{B_s} \left(y_s^2+2 A_{\Delta\Gamma_q} ys+1\right)}{\left(ys^2-1\right) (A_{\Delta\Gamma_q} ys+1)}$
"""
ADG = ADeltaGamma(par, wc, B, lep)
y = .5*par['DeltaGamma/Gamma_'+B]
tauB = par['tau_'+B]
return -(((1 + y**2 + 2*y*ADG)*tauB)/((-1 + y**2)*(1 + y*ADG)))
def tau_ll_func(wc_obj, par, B, lep):
scale = config['renormalization scale']['bll']
label = meson_quark[B]+lep+lep
wc = wctot_dict(wc_obj, label, scale, par)
return tau_ll(wc, par, B, lep)
def ADG_func(wc_obj, par, B, lep):
scale = config['renormalization scale']['bll']
label = meson_quark[B]+lep+lep
wc = wctot_dict(wc_obj, label, scale, par)
return ADeltaGamma(par, wc, B, lep)
def ADeltaGamma_func(B, lep):
def ADG_func(wc_obj, par):
scale = config['renormalization scale']['bll']
label = meson_quark[B]+lep+lep
wc = wctot_dict(wc_obj, label, scale, par)
return ADeltaGamma(par, wc, B, lep)
return ADG_func
# Observable and Prediction instances
_tex = {'e': 'e', 'mu': '\mu', 'tau': r'\tau'}
for l in ['e', 'mu', 'tau']:
_process_taxonomy = r'Process :: $b$ hadron decays :: FCNC decays :: $B\to\ell^+\ell^-$ :: $'
# For the Bs decay, we take the time-integrated branching ratio
_obs_name = "BR(Bs->"+l+l+")"
_obs = Observable(_obs_name)
_process_tex = r"B_s\to "+_tex[l]+r"^+"+_tex[l]+r"^-"
_obs.set_description(r"Time-integrated branching ratio of $" + _process_tex + r"$.")
_obs.tex = r"$\overline{\text{BR}}(" + _process_tex + r")$"
_obs.add_taxonomy(_process_taxonomy + _process_tex + r"$")
Prediction(_obs_name, bqll_obs_function(br_timeint, 'Bs', l, l))
# Add the effective lifetimes for Bs
_obs_name = 'tau_'+l+l
_obs = Observable(_obs_name)
_obs.set_description(r"Effective lifetime for $"+ _process_tex + r"$.")
_obs.tex = r"$\tau_{B_s \to " +_tex[l] +_tex[l] + "}$"
_obs.add_taxonomy(_process_taxonomy + _process_tex + r"$")
if l=='e':
Prediction(_obs_name, lambda wc_obj, par: tau_ll_func(wc_obj, par, 'Bs', 'e'))
if l=='mu':
Prediction(_obs_name, lambda wc_obj, par: tau_ll_func(wc_obj, par, 'Bs', 'mu'))
if l=='tau':
Prediction(_obs_name, lambda wc_obj, par: tau_ll_func(wc_obj, par, 'Bs', 'tau'))
_obs_name = 'ADeltaGamma(Bs->'+l+l+')'
_obs = Observable(_obs_name)
_process_tex = r"B_s\to "+_tex[l]+r"^+"+_tex[l]+r"^-"
_obs.set_description(r"Mass-eigenstate rate asymmetry in $" + _process_tex + r"$.")
_obs.tex = r"$A_{\Delta\Gamma}(" + _process_tex + r")$"
_obs.add_taxonomy(_process_taxonomy + _process_tex + r"$")
Prediction(_obs_name, ADeltaGamma_func('Bs', l))
# For the B^0 decay, we take the prompt branching ratio since DeltaGamma is negligible
_obs_name = "BR(B0->"+l+l+")"
_obs = Observable(_obs_name)
_process_tex = r"B^0\to "+_tex[l]+r"^+"+_tex[l]+r"^-"
_obs.set_description(r"Branching ratio of $" + _process_tex + r"$")
_obs.tex = r"$\text{BR}(" + _process_tex + r")$"
_obs.add_taxonomy(_process_taxonomy + _process_tex + r"$")
Prediction(_obs_name, bqll_obs_function(br_inst, 'B0', l, l))
_tex_l = {'e': 'e', 'mu': r'\mu', 'tau': r'\tau'}
_tex_B = {'B0': r'\bar B^0', 'Bs': r'\bar B_s'}
_tex_lfv = {'emu': r'e^+\mu^-', 'mue': r'\mu^+e^-',
'taue': r'\tau^+e^-', 'etau': r'e^+\tau^-',
'taumu': r'\tau^+\mu^-', 'mutau': r'\mu^+\tau^-'}
for ll_1 in [('e','mu'), ('e','tau'), ('mu','tau'),]:
for B in ['Bs', 'B0']:
ll_2 = ll_1[::-1] # now if ll_1 is (e, mu), ll_2 is (mu, e)
for ll in [ll_1, ll_2]:
# the individual BRs
_obs_name = "BR("+B+"->"+''.join(ll)+")"
_obs = Observable(_obs_name)
_process_tex = _tex_B[B]+r"\to "+_tex_lfv[''.join(ll)]
_obs.set_description(r"Branching ratio of $" + _process_tex + r"$")
_obs.tex = r"$\text{BR}(" + _process_tex + r")$"
_obs.add_taxonomy(r'Process :: $b$ hadron decays :: FCNC decays :: $B\to\ell^+\ell^-$ :: $' + _process_tex + r'$')
Prediction(_obs_name, bqll_obs_function(br_inst, B, ll[0], ll[1]))
# the individual BR where ll' and l'l are added
_obs_name = "BR("+B+"->"+''.join(ll_1)+","+''.join(ll_2)+")"
_obs = Observable(_obs_name)
for ll in [ll_1, ll_1]:
_process_tex = _tex_B[B]+r"\to "+_tex_lfv[''.join(ll)]
_obs.add_taxonomy(r'Process :: $b$ hadron decays :: FCNC decays :: $B\to\ell^+\ell^-$ :: $' + _process_tex + r'$')
_process_tex = _tex_B[B]+r"\to "+_tex_l[ll_1[0]]+r"^\pm "+_tex_l[ll_1[1]]+r"^\mp"
_obs.tex = r"$\text{BR}(" + _process_tex + r")$"
_obs.set_description(r"Branching ratio of $" + _process_tex + r"$")
Prediction(_obs_name, bqll_obs_function_lsum(br_inst, B, ll_1[0], ll_1[1]))
| mit | f702a8ce021f96690773c9731560968b | 37.954198 | 127 | 0.552714 | 2.524988 | false | false | false | false |
flav-io/flavio | flavio/plots/plotfunctions.py | 1 | 39693 | from collections import OrderedDict
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import flavio
from flavio.statistics.functions import delta_chi2, confidence_level
import scipy.optimize
import scipy.interpolate
import scipy.stats
from numbers import Number
from math import sqrt
import warnings
import inspect
from multiprocessing import Pool
from pickle import PicklingError
from flavio.plots.colors import lighten_color, get_color
def error_budget_pie(err_dict, other_cutoff=0.03):
"""Pie chart of an observable's error budget.
Parameters:
- `err_dict`: Dictionary as return from `flavio.sm_error_budget`
- `other_cutoff`: If an individual error contribution divided by the total
error is smaller than this number, it is lumped under "other". Defaults
to 0.03.
Note that for uncorrelated parameters, the total uncertainty is the squared
sum of the individual uncertainties, so the relative size of the wedges does
not correspond to the relative contribution to the total uncertainty.
If the uncertainties of individual parameters are correlated, the total
uncertainty can be larger or smaller than the squared sum of the individual
uncertainties, so the representation can be misleading.
"""
err_tot = sum(err_dict.values()) # linear sum of individual errors
err_dict_sorted = OrderedDict(sorted(err_dict.items(), key=lambda t: -t[1]))
labels = []
fracs = []
small_frac = []
for key, value in err_dict_sorted.items():
frac = value/err_tot
if frac > other_cutoff:
if isinstance(key, str):
try:
labels.append(flavio.Parameter[key].tex)
except KeyError:
# if 'key' is not actually a parameter (e.g. manually set by the user)
labels.append(key)
elif isinstance(key, tuple):
key_strings = [flavio.Parameter[k].tex for k in key]
labels.append(', '.join(key_strings))
fracs.append(frac)
else:
small_frac.append(frac)
if small_frac:
labels.append('other')
# the fraction for the "other" errors is obtained by adding them in quadrature
fracs.append(np.sqrt(np.sum(np.array(small_frac)**2)))
# initially, the fractions had been calculated assuming that they add to
# one, but adding the "other" errors in quadrature changed that - correct
# all the fractions to account for this
corr = sum(fracs)
fracs = [f/corr for f in fracs]
def my_autopct(pct):
return r'{p:.2g}\%'.format(p=pct*err_tot)
plt.axis('equal')
return plt.pie(fracs,
labels=labels,
autopct=my_autopct,
wedgeprops={'linewidth':0.5},
colors=[lighten_color('C{}'.format(i), 0.5) for i in range(10)]
)
def diff_plot_th(obs_name, x_min, x_max, wc=None, steps=100, scale_factor=1, **kwargs):
r"""Plot the central theory prediction of an observable dependending on
a continuous parameter, e.g. $q^2$.
Parameters:
- `x_min`, `x_max`: minimum and maximum values of the parameter
- `wc` (optional): `WilsonCoefficient` instance to define beyond-the-SM
Wilson coefficients
- `steps` (optional): number of steps in x. Defaults to 100. Less is
faster but less precise.
- `scale_factor` (optional): factor by which all values will be multiplied.
Defaults to 1.
Additional keyword arguments are passed to the matplotlib plot function,
e.g. 'c' for colour.
"""
obs = flavio.classes.Observable[obs_name]
if not obs.arguments or len(obs.arguments) != 1:
raise ValueError(r"Only observables that depend on a single parameter are allowed")
x_arr = np.arange(x_min, x_max, (x_max-x_min)/(steps-1))
if wc is None:
wc = flavio.physics.eft._wc_sm # SM Wilson coefficients
obs_arr = [flavio.sm_prediction(obs_name, x) for x in x_arr]
else:
obs_arr = [flavio.np_prediction(obs_name, wc, x) for x in x_arr]
ax = plt.gca()
if 'c' not in kwargs and 'color' not in kwargs:
kwargs['c'] = 'k'
ax.plot(x_arr, scale_factor * np.asarray(obs_arr), **kwargs)
def diff_plot_th_err(obs_name, x_min, x_max, wc=None, steps=100,
steps_err=5, N=100, threads=1, label=None,
plot_args=None, fill_args=None,
scale_factor=1):
r"""Plot the theory prediction of an observable dependending on
a continuous parameter, e.g. $q^2$,
with uncertainties as a function of this parameter.
Parameters:
- `x_min`, `x_max`: minimum and maximum values of the parameter
- `wc` (optional): `WilsonCoefficient` instance to define beyond-the-SM
Wilson coefficients
- `steps` (optional): number of steps for the computation of the
central value. Defaults to 100. Less is faster but less precise.
- `steps_err` (optional): number of steps for the computation of the
uncertainty. Defaults to 5 and should be at least 3. Larger is slower
but more precise. See caveat below.
- `N` (optional): number of random evaluations to determine the uncertainty.
Defaults to 100. Less is faster but less precise.
- `threads` (optional): if bigger than 1, number of threads to use for
parallel computation of uncertainties
- `plot_args` (optional): dictionary with keyword arguments to be passed
to the matplotlib plot function, e.g. 'c' for colour.
- `fill_args` (optional): dictionary with keyword arguments to be passed
to the matplotlib fill_between function, e.g. 'facecolor'
- `scale_factor` (optional): factor by which all values will be multiplied.
Defaults to 1.
A word of caution regarding the `steps_err` option. By default, the
uncertainty is only computed at 10 steps and is interpolated in
between. This can be enough if the uncertainty does not vary strongly
with the parameter. However, when the starting point or end point of the plot range
is outside the physical phase space, the uncertainty will vanish at that
point and the interpolation might be inaccurate.
"""
obs = flavio.classes.Observable[obs_name]
if not obs.arguments or len(obs.arguments) != 1:
raise ValueError(r"Only observables that depend on a single parameter are allowed")
step = (x_max-x_min)/(steps-1)
x_arr = np.arange(x_min, x_max+step, step)
step = (x_max-x_min)/(steps_err-1)
x_err_arr = np.arange(x_min, x_max+step, step)
# fix to avoid bounds_error in interp1d due to lack of numerical precision
x_err_arr[-1] = x_arr[-1]
if wc is None:
wc = flavio.physics.eft._wc_sm # SM Wilson coefficients
obs_err_arr = [flavio.sm_uncertainty(obs_name, x, threads=threads) for x in x_err_arr]
obs_arr = [flavio.sm_prediction(obs_name, x) for x in x_arr]
else:
obs_err_arr = [flavio.np_uncertainty(obs_name, wc, x, threads=threads) for x in x_err_arr]
obs_arr = [flavio.np_prediction(obs_name, wc, x) for x in x_arr]
ax = plt.gca()
plot_args = plot_args or {}
fill_args = fill_args or {}
if label is not None:
plot_args['label'] = label
if 'alpha' not in fill_args:
fill_args['alpha'] = 0.5
ax.plot(x_arr, scale_factor * np.asarray(obs_arr), **plot_args)
interp_err = scipy.interpolate.interp1d(x_err_arr, obs_err_arr,
kind='quadratic')
obs_err_arr_int = interp_err(x_arr)
ax.fill_between(x_arr,
scale_factor * np.asarray(obs_arr - obs_err_arr_int),
scale_factor * np.asarray(obs_arr + obs_err_arr_int),
**fill_args)
def bin_plot_th(obs_name, bin_list, wc=None, divide_binwidth=False, N=50, threads=1, **kwargs):
r"""Plot the binned theory prediction with uncertainties of an observable
dependending on a continuous parameter, e.g. $q^2$ (in the form of coloured
boxes).
Parameters:
- `bin_list`: a list of tuples containing bin boundaries
- `wc` (optional): `WilsonCoefficient` instance to define beyond-the-SM
Wilson coefficients
- `divide_binwidth` (optional): this should be set to True when comparing
integrated branching ratios from experiments with different bin widths
or to theory predictions for a differential branching ratio. It will
divide all values and uncertainties by the bin width (i.e. dimensionless
integrated BRs will be converted to $q^2$-integrated differential BRs with
dimensions of GeV$^{-2}$). Defaults to False.
- `N` (optional): number of random draws to determine the uncertainty.
Defaults to 50. Larger is slower but more precise. The relative
error of the theory uncertainty scales as $1/\sqrt{2N}$.
Additional keyword arguments are passed to the matplotlib add_patch function,
e.g. 'fc' for face colour.
"""
obs = flavio.classes.Observable[obs_name]
if not obs.arguments or len(obs.arguments) != 2:
raise ValueError(r"Only observables that depend on the two bin boundaries (and nothing else) are allowed")
if wc is None:
wc = flavio.physics.eft._wc_sm # SM Wilson coefficients
obs_dict = {bin_: flavio.sm_prediction(obs_name, *bin_) for bin_ in bin_list}
obs_err_dict = {bin_: flavio.sm_uncertainty(obs_name, *bin_, N=N, threads=threads) for bin_ in bin_list}
else:
obs_dict = {bin_:flavio.np_prediction(obs_name, wc, *bin_) for bin_ in bin_list}
obs_err_dict = {bin_: flavio.np_uncertainty(obs_name, wc, *bin_, N=N, threads=threads) for bin_ in bin_list}
ax = plt.gca()
for _i, (bin_, central_) in enumerate(obs_dict.items()):
xmin, xmax = bin_
err = obs_err_dict[bin_]
if divide_binwidth:
err = err/(xmax-xmin)
central = central_/(xmax-xmin)
else:
central = central_
if 'fc' not in kwargs and 'facecolor' not in kwargs:
kwargs['fc'] = 'C6'
if 'linewidth' not in kwargs and 'lw' not in kwargs:
kwargs['lw'] = 0
if _i > 0:
# the label should only be set for one (i.e. the first)
# of the boxes, otherwise it will appear multiply in the legend
kwargs.pop('label', None)
ax.add_patch(patches.Rectangle((xmin, central-err), xmax-xmin, 2*err,**kwargs))
def bin_plot_exp(obs_name, col_dict=None, divide_binwidth=False, include_measurements=None,
include_bins=None, exclude_bins=None,
scale_factor=1,
**kwargs):
r"""Plot all existing binned experimental measurements of an observable
dependending on a continuous parameter, e.g. $q^2$ (in the form of
coloured crosses).
Parameters:
- `col_dict` (optional): a dictionary to assign colours to specific
experiments, e.g. `{'BaBar': 'b', 'Belle': 'r'}`
- `divide_binwidth` (optional): this should be set to True when comparing
integrated branching ratios from experiments with different bin widths
or to theory predictions for a differential branching ratio. It will
divide all values and uncertainties by the bin width (i.e. dimensionless
integrated BRs will be converted to $q^2$-integrated differential BRs with
dimensions of GeV$^{-2}$). Defaults to False.
- `include_measurements` (optional): a list of strings with measurement
names (see measurements.yml) to include in the plot. By default, all
existing measurements will be included.
- `include_bins` (optional): a list of bins (as tuples of the bin
boundaries) to include in the plot. By default, all measured bins
will be included. Should not be specified simultaneously with
`exclude_bins`.
- `exclude_bins` (optional): a list of bins (as tuples of the bin
boundaries) not to include in the plot. By default, all measured bins
will be included. Should not be specified simultaneously with
`include_bins`.
- `scale_factor` (optional): factor by which all values will be multiplied.
Defaults to 1.
Additional keyword arguments are passed to the matplotlib errorbar function,
e.g. 'c' for colour.
"""
obs = flavio.classes.Observable[obs_name]
if not obs.arguments or len(obs.arguments) != 2:
raise ValueError(r"Only observables that depend on the two bin boundaries (and nothing else) are allowed")
_experiment_labels = [] # list of experiments appearing in the plot legend
bins = []
for m_name, m_obj in flavio.Measurement.instances.items():
if include_measurements is not None and m_name not in include_measurements:
continue
obs_name_list = m_obj.all_parameters
obs_name_list_binned = [o for o in obs_name_list if isinstance(o, tuple) and o[0]==obs_name]
if not obs_name_list_binned:
continue
central = m_obj.get_central_all()
err = m_obj.get_1d_errors_rightleft()
x = []
y = []
dx = []
dy_lower = []
dy_upper = []
for _, xmin, xmax in obs_name_list_binned:
if include_bins is not None:
if exclude_bins is not None:
raise ValueError("Please only specify include_bins or exclude_bins, not both")
elif (xmin, xmax) not in include_bins:
continue
elif exclude_bins is not None:
if (xmin, xmax) in exclude_bins:
continue
bins.append((xmin, xmax))
c = central[(obs_name, xmin, xmax)]
e_right, e_left = err[(obs_name, xmin, xmax)]
if divide_binwidth:
c = c/(xmax-xmin)
e_left = e_left/(xmax-xmin)
e_right = e_right/(xmax-xmin)
ax=plt.gca()
x.append((xmax+xmin)/2.)
dx.append((xmax-xmin)/2)
y.append(c)
dy_lower.append(e_left)
dy_upper.append(e_right)
kwargs_m = kwargs.copy() # copy valid for this measurement only
if x or y: # only if a data point exists
if col_dict is not None:
if m_obj.experiment in col_dict:
col = col_dict[m_obj.experiment]
kwargs_m['c'] = col
if 'label' not in kwargs_m:
if m_obj.experiment not in _experiment_labels:
# if there is no plot legend entry for the experiment yet,
# add it and add the experiment to the list keeping track
# of existing labels (we don't want an experiment to appear
# twice in the legend)
kwargs_m['label'] = m_obj.experiment
_experiment_labels.append(m_obj.experiment)
y = scale_factor * np.array(y)
dy_lower = scale_factor * np.array(dy_lower)
dy_upper = scale_factor * np.array(dy_upper)
ax.errorbar(x, y, yerr=[dy_lower, dy_upper], xerr=dx, fmt='.', **kwargs_m)
return y, bins
def diff_plot_exp(obs_name, col_dict=None, include_measurements=None,
include_x=None, exclude_x=None,
scale_factor=1,
**kwargs):
r"""Plot all existing experimental measurements of an observable
dependending on a continuous parameter, e.g. $q^2$ (in the form of
coloured error bars).
Parameters:
- `col_dict` (optional): a dictionary to assign colours to specific
experiments, e.g. `{'BaBar': 'b', 'Belle': 'r'}`
- `include_measurements` (optional): a list of strings with measurement
names (see measurements.yml) to include in the plot. By default, all
existing measurements will be included.
- `include_x` (optional): a list of values
to include in the plot. By default, all measured values
will be included. Should not be specified simultaneously with
`exclude_x`.
- `exclude_x` (optional): a list of values
not to include in the plot. By default, all measured values
will be included. Should not be specified simultaneously with
`include_x`.
- `scale_factor` (optional): factor by which all values will be multiplied.
Defaults to 1.
Additional keyword arguments are passed to the matplotlib errorbar function,
e.g. 'c' for colour.
"""
obs = flavio.classes.Observable[obs_name]
if not obs.arguments or len(obs.arguments) != 1:
raise ValueError(r"Only observables that depend on a single variable are allowed")
_experiment_labels = [] # list of experiments appearing in the plot legend
xs = []
for m_name, m_obj in flavio.Measurement.instances.items():
if include_measurements is not None and m_name not in include_measurements:
continue
obs_name_list = m_obj.all_parameters
obs_name_list_x = [o for o in obs_name_list if isinstance(o, tuple) and o[0]==obs_name]
if not obs_name_list_x:
continue
central = m_obj.get_central_all()
err = m_obj.get_1d_errors_rightleft()
x = []
y = []
dy_lower = []
dy_upper = []
for _, X in obs_name_list_x:
if include_x is not None:
if exclude_x is not None:
raise ValueError("Please only specify include_x or exclude_x, not both")
elif X not in include_x:
continue
elif exclude_x is not None:
if X in exclude_x:
continue
xs.append(X)
c = central[(obs_name, X)]
e_right, e_left = err[(obs_name, X)]
ax=plt.gca()
x.append(X)
y.append(c)
dy_lower.append(e_left)
dy_upper.append(e_right)
kwargs_m = kwargs.copy() # copy valid for this measurement only
if x or y: # only if a data point exists
if col_dict is not None:
if m_obj.experiment in col_dict:
col = col_dict[m_obj.experiment]
kwargs_m['c'] = col
if 'label' not in kwargs_m:
if m_obj.experiment not in _experiment_labels:
# if there is no plot legend entry for the experiment yet,
# add it and add the experiment to the list keeping track
# of existing labels (we don't want an experiment to appear
# twice in the legend)
kwargs_m['label'] = m_obj.experiment
_experiment_labels.append(m_obj.experiment)
y = scale_factor * np.array(y)
dy_lower = scale_factor * np.array(dy_lower)
dy_upper = scale_factor * np.array(dy_upper)
ax.errorbar(x, y, yerr=[dy_lower, dy_upper], fmt='.', **kwargs_m)
return y, xs
def density_contour_data(x, y, covariance_factor=None, n_bins=None, n_sigma=(1, 2)):
r"""Generate the data for a plot with confidence contours of the density
of points (useful for MCMC analyses).
Parameters:
- `x`, `y`: lists or numpy arrays with the x and y coordinates of the points
- `covariance_factor`: optional, numerical factor to tweak the smoothness
of the contours. If not specified, estimated using Scott's/Silverman's rule.
The factor should be between 0 and 1; larger values means more smoothing is
applied.
- n_bins: number of bins in the histogram created as an intermediate step.
this usually does not have to be changed.
- n_sigma: integer or iterable of integers specifying the contours
corresponding to the number of sigmas to be drawn. For instance, the
default (1, 2) draws the contours containing approximately 68 and 95%
of the points, respectively.
"""
if n_bins is None:
n_bins = min(10*int(sqrt(len(x))), 200)
f_binned, x_edges, y_edges = np.histogram2d(x, y, density=True, bins=n_bins)
x_centers = (x_edges[:-1] + x_edges[1:])/2.
y_centers = (y_edges[:-1] + y_edges[1:])/2.
x_mean = np.mean(x_centers)
y_mean = np.mean(y_centers)
dataset = np.vstack([x, y])
d = 2 # no. of dimensions
if covariance_factor is None:
# Scott's/Silverman's rule
n = len(x) # no. of data points
_covariance_factor = n**(-1/6.)
else:
_covariance_factor = covariance_factor
cov = np.cov(dataset) * _covariance_factor**2
gaussian_kernel = scipy.stats.multivariate_normal(mean=[x_mean, y_mean], cov=cov)
x_grid, y_grid = np.meshgrid(x_centers, y_centers)
xy_grid = np.vstack([x_grid.ravel(), y_grid.ravel()])
f_gauss = gaussian_kernel.pdf(xy_grid.T)
f_gauss = np.reshape(f_gauss, (len(x_centers), len(y_centers))).T
f = scipy.signal.fftconvolve(f_binned, f_gauss, mode='same').T
f = f/f.sum()
def find_confidence_interval(x, pdf, confidence_level):
return pdf[pdf > x].sum() - confidence_level
def get_level(n):
return scipy.optimize.brentq(find_confidence_interval, 0., 1.,
args=(f.T, confidence_level(n)))
if isinstance(n_sigma, Number):
levels = [get_level(n_sigma)]
else:
levels = [get_level(m) for m in sorted(n_sigma)]
# replace negative or zero values by a tiny number before taking the log
f[f <= 0] = 1e-32
# convert probability to -2*log(probability), i.e. a chi^2
f = -2*np.log(f)
# convert levels to chi^2 and make the mode equal chi^2=0
levels = list(-2*np.log(levels) - np.min(f))
f = f - np.min(f)
return {'x': x_grid, 'y': y_grid, 'z': f, 'levels': levels}
def density_contour(x, y, covariance_factor=None, n_bins=None, n_sigma=(1, 2),
**kwargs):
r"""A plot with confidence contours of the density of points
(useful for MCMC analyses).
Parameters:
- `x`, `y`: lists or numpy arrays with the x and y coordinates of the points
- `covariance_factor`: optional, numerical factor to tweak the smoothness
of the contours. If not specified, estimated using Scott's/Silverman's rule.
The factor should be between 0 and 1; larger values means more smoothing is
applied.
- n_bins: number of bins in the histogram created as an intermediate step.
this usually does not have to be changed.
- n_sigma: integer or iterable of integers specifying the contours
corresponding to the number of sigmas to be drawn. For instance, the
default (1, 2) draws the contours containing approximately 68 and 95%
of the points, respectively.
All remaining keyword arguments are passed to the `contour` function
and allow to control the presentation of the plot (see docstring of
`flavio.plots.plotfunctions.contour`).
"""
data = density_contour_data(x=x, y=y, covariance_factor=covariance_factor,
n_bins=n_bins, n_sigma=n_sigma)
data['z_min'] = np.min(data['z']) # set minimum to prevent warning
data.update(kwargs) # since we cannot do **data, **kwargs in Python <3.5
return contour(**data)
def likelihood_contour_data(log_likelihood, x_min, x_max, y_min, y_max,
n_sigma=1, steps=20, threads=1, pool=None):
r"""Generate data required to plot coloured confidence contours (or bands)
given a log likelihood function.
Parameters:
- `log_likelihood`: function returning the logarithm of the likelihood.
Can e.g. be the method of the same name of a FastFit instance.
- `x_min`, `x_max`, `y_min`, `y_max`: data boundaries
- `n_sigma`: plot confidence level corresponding to this number of standard
deviations. Either a number (defaults to 1) or a tuple to plot several
contours.
- `steps`: number of grid steps in each dimension (total computing time is
this number squared times the computing time of one `log_likelihood` call!)
- `threads`: number of threads, defaults to 1. If greater than one,
computation of z values will be done in parallel.
- `pool`: an instance of `multiprocessing.Pool` (or a compatible
implementation, e.g. from `multiprocess` or `schwimmbad`). Overrides the
`threads` argument.
"""
_x = np.linspace(x_min, x_max, steps)
_y = np.linspace(y_min, y_max, steps)
x, y = np.meshgrid(_x, _y)
if threads == 1:
@np.vectorize
def chi2_vect(x, y): # needed for evaluation on meshgrid
return -2*log_likelihood([x,y])
z = chi2_vect(x, y)
else:
xy = np.array([x, y]).reshape(2, steps**2).T
pool = pool or Pool(threads)
try:
z = -2*np.array(pool.map(log_likelihood, xy )).reshape((steps, steps))
except PicklingError:
pool.close()
raise PicklingError("When using more than 1 thread, the "
"log_likelihood function must be picklable; "
"in particular, you cannot use lambda expressions.")
pool.close()
pool.join()
# get the correct values for 2D confidence/credibility contours for n sigma
if isinstance(n_sigma, Number):
levels = [delta_chi2(n_sigma, dof=2)]
else:
levels = [delta_chi2(n, dof=2) for n in n_sigma]
return {'x': x, 'y': y, 'z': z, 'levels': levels}
def likelihood_contour(log_likelihood, x_min, x_max, y_min, y_max,
n_sigma=1, steps=20, threads=1,
**kwargs):
r"""Plot coloured confidence contours (or bands) given a log likelihood
function.
Parameters:
- `log_likelihood`: function returning the logarithm of the likelihood.
Can e.g. be the method of the same name of a FastFit instance.
- `x_min`, `x_max`, `y_min`, `y_max`: data boundaries
- `n_sigma`: plot confidence level corresponding to this number of standard
deviations. Either a number (defaults to 1) or a tuple to plot several
contours.
- `steps`: number of grid steps in each dimension (total computing time is
this number squared times the computing time of one `log_likelihood` call!)
All remaining keyword arguments are passed to the `contour` function
and allow to control the presentation of the plot (see docstring of
`flavio.plots.plotfunctions.contour`).
"""
data = likelihood_contour_data(log_likelihood=log_likelihood,
x_min=x_min, x_max=x_max,
y_min=y_min, y_max=y_max,
n_sigma=n_sigma, steps=steps, threads=threads)
data.update(kwargs) # since we cannot do **data, **kwargs in Python <3.5
return contour(**data)
# alias for backward compatibility
def band_plot(log_likelihood, x_min, x_max, y_min, y_max,
n_sigma=1, steps=20, **kwargs):
r"""This is an alias for `likelihood_contour` which is present for
backward compatibility."""
warnings.warn("The `band_plot` function has been replaced "
"by `likelihood_contour` (or "
"`likelihood_contour_data` in conjunction with `contour`) "
"and might be removed in the future. "
"Please update your code.", FutureWarning)
valid_args = inspect.signature(likelihood_contour_data).parameters.keys()
data_kwargs = {k:v for k,v in kwargs.items() if k in valid_args}
if 'pre_calculated_z' not in kwargs:
contour_kwargs = likelihood_contour_data(log_likelihood,
x_min, x_max, y_min, y_max,
n_sigma, steps, **data_kwargs)
else:
contour_kwargs = {}
nx, ny = kwargs['pre_calculated_z'].shape
_x = np.linspace(x_min, x_max, nx)
_y = np.linspace(y_min, y_max, ny)
x, y = np.meshgrid(_x, _y)
contour_kwargs['x'] = x
contour_kwargs['y'] = y
contour_kwargs['z'] = kwargs['pre_calculated_z']
if isinstance(n_sigma, Number):
contour_kwargs['levels'] = [delta_chi2(n_sigma, dof=2)]
else:
contour_kwargs['levels'] = [delta_chi2(n, dof=2) for n in n_sigma]
valid_args = inspect.signature(contour).parameters.keys()
contour_kwargs.update({k:v for k,v in kwargs.items() if k in valid_args})
contour(**contour_kwargs)
return contour_kwargs['x'], contour_kwargs['y'], contour_kwargs['z']
def contour(x, y, z, levels, *, z_min=None,
interpolation_factor=1,
interpolation_order=2,
col=None, color=None, label=None,
filled=True,
contour_args={}, contourf_args={},
**kwargs):
r"""Plot coloured confidence contours (or bands) given numerical input
arrays.
Parameters:
- `x`, `y`: 2D arrays containg x and y values as returned by numpy.meshgrid
- `z` value of the function to plot. 2D array in the same shape as `x` and
`y`.
- levels: list of function values where to draw the contours. They should
be positive and in ascending order.
- `z_min` (optional): lowest value of the function to plot (i.e. value at
the best fit point). If not provided, the smallest value on the grid is
used.
- `interpolation factor` (optional): in between the points on the grid,
the functioncan be interpolated to get smoother contours.
This parameter sets the number of subdivisions (default: 1, i.e. no
interpolation). It should be larger than 1.
- `col` (optional): number between 0 and 9 to choose the color of the plot
from a predefined palette
- `label` (optional): label that will be added to a legend created with
maplotlib.pyplot.legend()
- `filled` (optional): if False, contours will be drawn without shading
- `contour_args`: dictionary of additional options that will be passed
to matplotlib.pyplot.contour() (that draws the contour lines)
- `contourf_args`: dictionary of additional options that will be passed
to matplotlib.pyplot.contourf() (that paints the contour filling).
Ignored if `filled` is false.
"""
if z_min is None:
warnings.warn("The smallest `z` value on the grid will be used as the "
"minimum of the function to plot. This can lead to "
"undesired results if the actual minimum is considerably "
"different from the minimum on the grid. For better "
"precision, the actual minimum should be provided in the "
"`z_min` argument.")
z_min = np.min(z) # use minmum on the grid
elif np.min(z) < z_min:
raise ValueError("The provided minimum `z_min` has to be smaller than "
"the smallest `z` value on the grid.")
z = z - z_min # subtract z minimum to make value of new z minimum 0
if interpolation_factor > 1:
x = scipy.ndimage.zoom(x, zoom=interpolation_factor, order=1)
y = scipy.ndimage.zoom(y, zoom=interpolation_factor, order=1)
z = scipy.ndimage.zoom(z, zoom=interpolation_factor, order=interpolation_order)
_contour_args = {}
_contourf_args = {}
color = get_color(col=col, color=color)
_contour_args['colors'] = color
if filled:
_contour_args['linewidths'] = 0.6
else:
_contour_args['linewidths'] = 0.8
N = len(levels)
_contourf_args['colors'] = [lighten_color(color, 0.5)# RGB
+ (max(1-n/N, 0),) # alpha, decreasing for contours
for n in range(N)]
_contour_args['linestyles'] = 'solid'
_contour_args.update(contour_args)
_contourf_args.update(contourf_args)
# for the filling, need to add zero contour
zero_contour = min(np.min(z),np.min(levels)*(1-1e-16))
levelsf = [zero_contour] + list(levels)
ax = plt.gca()
if filled:
ax.contourf(x, y, z, levels=levelsf, **_contourf_args)
CS = ax.contour(x, y, z, levels=levels, **_contour_args)
if label is not None:
handle = CS.legend_elements()[0][0]
handle.set_label(label)
ax.add_container(handle)
return CS
def flavio_branding(x=0.8, y=0.94, version=True):
"""Displays a little box containing 'flavio'"""
props = dict(facecolor='white', alpha=0.4, lw=0)
ax = plt.gca()
text = r'\textsf{\textbf{flavio}}'
if version:
text += r'\textsf{\scriptsize{ v' + flavio.__version__ + '}}'
ax.text(x, y, text, transform=ax.transAxes, fontsize=12, verticalalignment='top', bbox=props, alpha=0.4)
def flavio_box(x_min, x_max, y_min, y_max):
ax = plt.gca()
ax.add_patch(patches.Rectangle((x_min, y_min), x_max-x_min, y_max-y_min, facecolor='#ffffff', edgecolor='#666666', alpha=0.5, ls=':', lw=0.7))
def smooth_histogram(data, bandwidth=None, **kwargs):
"""A smooth histogram based on a Gaussian kernel density estimate.
Parameters:
- `data`: input array
- `bandwidth`: (optional) smoothing bandwidth for the Gaussian kernel
The remaining parameters will be passed to `pdf_plot`.
"""
kde = flavio.statistics.probability.GaussianKDE(data, bandwidth=bandwidth)
pdf_plot(kde, **kwargs)
def pdf_plot(dist, x_min=None, x_max=None, fill=True, steps=500, normed=True, **kwargs):
"""Plot of a 1D probability density function.
Parameters:
- `dist`: an instance of ProbabilityDistribution
- `x_min`, `x_max`: plot boundaries
- `steps`: optional, number of points (default: 200)
The remaining parameters will be passed to `likelihood_plot`.
"""
_x_min = x_min or dist.support[0]
_x_max = x_max or dist.support[1]
x = np.linspace(_x_min, _x_max, steps)
try:
y = dist.pdf(x)
except:
y = np.exp(dist.logpdf(x))
if normed == 'max':
y = y/np.max(y)
if fill:
fill_left = dist.central_value - dist.get_error_left(method='hpd')
fill_right = dist.central_value + dist.get_error_right(method='hpd')
fill_x=[fill_left, fill_right]
else:
fill_x=None
likelihood_plot(x, y, fill_x=fill_x, **kwargs)
def likelihood_plot(x, y, fill_x=None, col=None, color=None, label=None, plotargs={}, fillargs={},
flipped=False):
"""Plot of a 1D probability density function.
Parameters:
- `x`: x values
- `y`: y values
- `fill_x`: 2-tuple of x-values in between which the curve will be filled
- `col`: (optional) integer to select one of the colours from the default
palette
- `plotargs`: keyword arguments passed to the `plot` function
- `fillargs`: keyword arguments passed to the `fill_between` function
- `flipped`: exchange x and y axes (needed for `density_contour_joint`)
"""
ax = plt.gca()
_plotargs = {}
_fillargs = {}
# default values
_plotargs['linewidth'] = 0.6
if label is not None:
_plotargs['label'] = label
color = get_color(col=col, color=color)
_plotargs['color'] = color
_fillargs['facecolor'] = lighten_color(color, 0.5)
_fillargs.update(fillargs)
_plotargs.update(plotargs)
if not flipped:
ax.plot(x, y, **_plotargs)
if fill_x is not None:
ax.fill_between(x, 0, y,
where=np.logical_and(fill_x[0] < x, x < fill_x[1]),
**_fillargs)
else:
ax.plot(y, x, **_plotargs)
if fill_x is not None:
ax.fill_betweenx(x, 0, y,
where=np.logical_and(fill_x[0] < x, x < fill_x[1]),
**_fillargs)
def pvalue_plot(x, y, fill_y=None, col=None, color=None, label=None,
plotargs={}, fillargs={}):
"""Plot of a 1D confidence level distribution, where the y axis is 1-CL.
Parameters:
- `x`: x values
- `y`: y values
- `fill_y`: for x-values where y is larger than this number, the area
between the x-axis and the curve will be filled
- `col`: (optional) integer to select one of the colours from the default
palette
- `plotargs`: keyword arguments passed to the `plot` function
- `fillargs`: keyword arguments passed to the `fill_between` function
"""
ax = plt.gca()
_plotargs = {}
_fillargs = {}
# default values
_plotargs['linewidth'] = 0.6
if label is not None:
_plotargs['label'] = label
color = get_color(col=col, color=color)
_plotargs['color'] = color
_fillargs['facecolor'] = lighten_color(color, 0.5)
_fillargs.update(fillargs)
_plotargs.update(plotargs)
ax.plot(x, y, **_plotargs)
if fill_y is not None:
x_zoom = scipy.ndimage.zoom(x, zoom=10, order=1)
y_zoom = scipy.ndimage.zoom(y, zoom=10, order=1)
ax.fill_between(x_zoom, 0, y_zoom,
where=y_zoom > fill_y,
**_fillargs)
ax.set_ylim([0, 1])
def density_contour_joint(x, y,
col=None, color=None,
bandwidth_x=None, bandwidth_y=None,
hist_args=None,
ax_2d=None, ax_x=None, ax_y=None,
**kwargs):
r"""A density contour plot that additionally has the 1D marginals for
the x and y dsitribution plotted as smooth histograms along the axes.
Parameters:
- `x`, `y`: lists or numpy arrays with the x and y coordinates of the points
- `covariance_factor`: optional, numerical factor to tweak the smoothness
of the 2D contours (see `density_contour_data`)
- `col`: optional, integer specifying the colour, will be applied to both
contour plot and marginals
- `bandwidth_x`: optional, smoothing bandwidth for the Gaussian kernel of the
x marginal distribution
- `bandwidth_y`: optional, smoothing bandwidth for the Gaussian kernel of the
y marginal distribution
Additional options can be passed as follows:
- `hist_args`: dictionary with keyword arguments passed to the 1D
`smooth_histogram` for both the x and y distribution
- Additional keyword arguments will be passed to `density_contour`
To plot multiple distributions in one figure, the function returns a
dictionary with the three axis instances that can then be passed into
another call of the function, e.g.
```
axes1 = density_contour_joint(x1, y1, col=0)
axes2 = density_contour_joint(x2, y2, col=1, **axes1)
```
"""
# define the plot grid
gs = matplotlib.gridspec.GridSpec(2, 2, width_ratios=[3, 1], height_ratios = [1, 4])
gs.update(hspace=0, wspace=0)
# set up axes (unless they are already given)
if ax_2d is None:
ax_2d = plt.subplot(gs[1, 0])
# make the 2D density contour plot
density_contour(x, y, col=col, color=color, **kwargs)
# x axis histogram
if hist_args is None:
hist_args = {}
if ax_x is None:
ax_x = plt.subplot(gs[0, 0], sharex=ax_2d, yticks=[], frameon=False)
plt.sca(ax_x)
smooth_histogram(x, bandwidth=bandwidth_x, col=col, color=color, **hist_args)
# y axis histogram
if ax_y is None:
ax_y = plt.subplot(gs[1, 1], sharey=ax_2d, xticks=[], frameon=False)
plt.sca(ax_y)
smooth_histogram(y, flipped=True, bandwidth=bandwidth_y, col=col, color=color, **hist_args)
# remove x and y histogram tick labels
plt.setp(ax_x.get_xticklabels(), visible=False);
plt.setp(ax_y.get_yticklabels(), visible=False);
# set 2D plot as active axis for adding legends etc.
plt.sca(ax_2d)
plt.tight_layout()
return {'ax_2d': ax_2d, 'ax_x': ax_x, 'ax_y': ax_y}
| mit | 43482d98cdbfaee869f2ed7ccb8b3e04 | 43.103333 | 146 | 0.622553 | 3.65161 | false | false | false | false |
flav-io/flavio | flavio/physics/bdecays/formfactors/b_p/bcl.py | 1 | 3568 | from math import sqrt
import numpy as np
import flavio
from flavio.physics.bdecays.formfactors.common import z
from flavio.physics.bdecays.formfactors.b_p.isgurwise import isgur_wise
def pole(ff, mres, q2):
if mres == 0 or mres is None:
return 1
return 1/(1-q2/mres**2)
# the following dict maps transitions to mesons. Note that it doesn't really
# matter whether the charged or neutral B/K/pi are used here. We don't
# distinguish between charged and neutral form factors anyway.
process_dict = {}
process_dict['B->K'] = {'B': 'B0', 'P': 'K0'}
process_dict['Bs->K'] = {'B': 'Bs', 'P': 'K+'}
process_dict['B->D'] = {'B': 'B+', 'P': 'D0'}
process_dict['B->pi'] = {'B': 'B+', 'P': 'pi0'}
def param_fplusT(mB, mP, a_i, q2, t0=None):
Z = z(mB, mP, q2, t0)
n = len(a_i)
k = np.arange(n)
return ( a_i * (Z**k - (-1.)**(k - n) * k/n * Z**n) ).sum()
def param_f0(mB, mP, a_i, q2, t0=None):
Z = z(mB, mP, q2, t0)
k = np.arange(len(a_i))
return ( a_i * Z**k ).sum()
def ff(process, q2, par, n=3, t0=None):
r"""Central value of $B\to P$ form factors in the standard convention
and BCL parametrization (arXiv:0807.2722).
The standard convention defines the form factors $f_+$, $f_0$, and $f_T$.
"""
flavio.citations.register("Bourrely:2008za")
pd = process_dict[process]
mpl = par[process + ' BCL m+']
m0 = par[process + ' BCL m0']
mB = par['m_'+pd['B']]
mP = par['m_'+pd['P']]
ff = {}
a={}
for i in ['f+', 'fT']:
a[i] = [ par[process + ' BCL' + ' a' + str(j) + '_' + i] for j in range(n) ]
# only the first n-1 parameters for f0 are taken from par
# the nth one is chosen to fulfill the kinematic constraint f+(0)=f0(0)
a['f0'] = [ par[process + ' BCL' + ' a' + str(j) + '_f0'] for j in range(n-1) ]
fplus_q20 = pole('f+', mpl, 0) * param_fplusT(mB, mP, a['f+'], 0, t0)
f0_q20 = pole('f0', m0, 0) * param_f0(mB, mP, a['f0'], 0, t0)
an_f0 = (f0_q20-fplus_q20)/z(mB, mP, 0, t0)**(n-1)
a['f0'].append(an_f0)
# evaluate FFs
ff['f+'] = pole('f+', mpl, q2) * param_fplusT(mB, mP, a['f+'], q2, t0)
ff['fT'] = pole('fT', mpl, q2) * param_fplusT(mB, mP, a['fT'], q2, t0)
ff['f0'] = pole('f0', m0, q2) * param_f0(mB, mP, a['f0'], q2, t0)
return ff
def ff_isgurwise(process, q2, par, scale, n=3, t0=None):
r"""Central value of $B\to P$ form factors in the standard convention
and BCL parametrization (arXiv:0807.2722) for $f_0$ and $f_+$, but using
an improved Isgur-Wise relation in the heavy quark limit for $f_T$.
"""
flavio.citations.register("Bourrely:2008za")
pd = process_dict[process]
mpl = par[process + ' BCL m+']
m0 = par[process + ' BCL m0']
mB = par['m_'+pd['B']]
mP = par['m_'+pd['P']]
ff = {}
a={}
a['f+'] = [ par[process + ' BCL' + ' a' + str(j) + '_f+'] for j in range(n) ]
# only the first n-1 parameters for f0 are taken from par
# the nth one is chosen to fulfill the kinematic constraint f+(0)=f0(0)
a['f0'] = [ par[process + ' BCL' + ' a' + str(j) + '_f0'] for j in range(n-1) ]
fplus_q20 = pole('f+', mpl, 0) * param_fplusT(mB, mP, a['f+'], 0, t0)
f0_q20 = pole('f0', m0, 0) * param_f0(mB, mP, a['f0'], 0, t0)
an_f0 = (fplus_q20-f0_q20)/z(mB, mP, 0, t0)**(n-1)
a['f0'].append(an_f0)
# evaluate FFs
ff['f+'] = pole('f+', mpl, q2) * param_fplusT(mB, mP, a['f+'], q2, t0)
ff['f0'] = pole('f0', m0, q2) * param_f0(mB, mP, a['f0'], q2, t0)
ff = isgur_wise(process, q2, ff, par, scale=scale)
return ff
| mit | e496204b2464168e699fc9aa5f157593 | 40.011494 | 84 | 0.554372 | 2.328982 | false | false | false | false |
flav-io/flavio | flavio/physics/higgs/width.py | 1 | 1589 | r"""Functions for the total Higgs width."""
from . import decay
from math import pi
# SM Higgs BRs, taken from Higgs XSWG for m_h=125.10 GeV
BR_SM = {
'bb': 0.5807,
'WW': 0.2154,
'ZZ': 0.02643,
'gg': 0.08179,
'tautau': 0.06256,
}
# SM total width over Higgs mass
Gamma_rel_SM = 4.101e-3 / 125.10
def Gamma_rel_ff(y_ff, Nc):
"""Higgs partial width to massless fermion pair divided by Higgs mass
as function of the effective Yukawa coupling and "colour" multiplicity."""
return abs(y_ff)**2 * Nc / (8 * pi)
def Gamma_h(par, C):
"""Higgs total width, normalized to its SM value.
For the 5 most frequent SM decay modes, only the interference terms
of SM and NP are taken into account.
Additionally, squared contributions are included for contributions from
modified couplings to the four lightest quarks, that become relevant
for very nonstandard light quark Yukawas.
"""
R_bb = (decay.h_bb(C) - 1) * BR_SM['bb']
R_tautau = (decay.h_tautau(C) - 1) * BR_SM['tautau']
R_WW = (decay.h_ww(C) - 1) * BR_SM['WW']
R_ZZ = (decay.h_zz(C) - 1) * BR_SM['ZZ']
R_gg = (decay.h_gg(C) - 1) * BR_SM['gg']
R_Gamma_SM = 1
R_Gamma_linear = R_bb + R_tautau + R_WW + R_ZZ + R_gg
R_Gamma_quadratic = 0
for q in ['u', 'd']:
for i in [1, 2]:
# here the shift in G_F is neglected
y_eff = 1 / par['GF'] / 2 * C['{}phi_{}{}'.format(q, i, i)]
R_Gamma_quadratic += Gamma_rel_ff(y_eff, Nc=3) / Gamma_rel_SM
return R_Gamma_SM + R_Gamma_linear + R_Gamma_quadratic
| mit | a53858bb3577898590d090dbb76ed94e | 30.78 | 78 | 0.596602 | 2.679595 | false | false | false | false |
bugsnag/bugsnag-python | example/django31/todo/settings.py | 1 | 3237 | # flake8: noqa
"""
Django settings for todo project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from os.path import dirname, abspath
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = dirname(dirname(abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$#yb)ri-v$^#8o1r0x%5$0yy&aqu2yefwo10zm+(=7!t+d^6ft'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
BUGSNAG = {
'project_root': BASE_DIR,
}
# Application definition
INSTALLED_APPS = [
'notes.apps.NotesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'bugsnag.django.middleware.BugsnagMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| mit | 8e27196f688fec5da83fe12c51eac9bd | 24.690476 | 91 | 0.684584 | 3.476907 | false | false | false | false |
flav-io/flavio | flavio/physics/edms/paraedm.py | 1 | 2479 | """Functions for EDMs of paramagnetic atoms and molecules."""
import flavio
from flavio.physics.edms.common import edm_f
from flavio.physics.edms.slcouplings import CS
from math import pi, sqrt
atoms = {
'Tl': {
'Z': 3, 'N': 4, 'tex': r'\text{Tl}', 'name': 'Thallium'
},
}
molecules = {
'YbF': {
'Z': 1, 'N': 1, 'tex': r'\text{HfF}', 'name': 'Ytterbium fluoride'
},
'HfF': {
'Z': 1, 'N': 1, 'tex': r'\text{HfF}', 'name': 'Hafnium fluoride'
},
'ThO': {
'Z': 1, 'N': 1, 'tex': r'\text{HfF}', 'name': 'Thorium monoxide'
},
}
def de(wc, par, scale):
return edm_f(f='e', par=par, wc=wc, scale=scale, eft='WET-3')
def omega_para(wc_obj, par, molecule):
wc = wc_obj.get_wc('dF=0', scale=2, par=par, eft='WET-3', basis='flavio')
a_de = par['alpha_de({})'.format(molecule)]
a_CS = par['alpha_CS({})'.format(molecule)]
Z = molecules[molecule]['Z']
N = molecules[molecule]['N']
return a_de * de(wc, par, scale=2) + a_CS * CS(wc, par, scale=2, Z=Z, N=N)
def d_para(wc_obj, par, atom):
wc = wc_obj.get_wc('dF=0', scale=2, par=par, eft='WET-3', basis='flavio')
a_de = par['alpha_de({})'.format(atom)]
a_CS = par['alpha_CS({})'.format(atom)]
Z = atoms[atom]['Z']
N = atoms[atom]['N']
return a_de * de(wc, par, scale=2) + a_CS * CS(wc, par, scale=2, Z=Z, N=N)
# Observable and Prediction instances
def make_obs_d(symbol, texsymbol, name):
_obs_name = "d_{}".format(symbol)
_obs = flavio.classes.Observable(name=_obs_name)
_obs.set_description(r"Electric dipole moment of {}".format(name))
_obs.tex = r"$d_{}$".format(texsymbol)
_obs.add_taxonomy(r'Process :: Dipole moments :: Atomic electric dipole moments :: $d_{}$'.format(texsymbol))
flavio.classes.Prediction(_obs_name, lambda wc_obj, par: d_para(wc_obj, par, symbol))
def make_obs_omega(symbol, texsymbol, name):
_obs_name = "omega_{}".format(symbol)
_obs = flavio.classes.Observable(name=_obs_name)
_obs.set_description(r"P- and T-violating energy shift in {}".format(name))
_obs.tex = r"$\omega_{}$".format(texsymbol)
_obs.add_taxonomy(r'Process :: Dipole moments :: Molecular energy shifts :: $d_{}$'.format(texsymbol))
flavio.classes.Prediction(_obs_name, lambda wc_obj, par: omega_para(wc_obj, par, symbol))
for k, v in molecules.items():
make_obs_omega(k, v['tex'], v['name'])
for k, v in atoms.items():
make_obs_d(k, v['tex'], v['name'])
| mit | 8a3d0af490874179a7a9819960f9e2fb | 31.618421 | 113 | 0.594595 | 2.620507 | false | false | false | false |
pythonindia/wye | tests/functional/test_forgot_password_flow.py | 4 | 1705 | import re
from .. import factories as f
def test_forgot_password_flow(base_url, browser, outbox):
f.create_usertype(slug='tutor', display_name='tutor')
user = f.UserFactory()
# Forgot password link must be present on login page
url = base_url + '/accounts/login/'
browser.visit(url)
forgot_pass_link = browser.find_by_text('Forgot Password?')[0]
assert forgot_pass_link
# When clicking on the link it should open a page and prompt for email
forgot_pass_link.click()
assert 'Password Reset' in browser.title
browser.fill('email', 'no-existent-email@email.com')
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present(
'The e-mail address is not assigned to any user account')
assert len(outbox) == 0
# Now, enter a valid email
browser.fill('email', user.email)
browser.find_by_css('button[type=submit]')[0].click()
assert browser.is_text_present('We have sent you an e-mail.')
assert len(outbox) == 1
mail = outbox[0]
reset_link = re.findall(r'http.*/reset/.*/', mail.body)
assert reset_link
browser.visit(reset_link[0])
assert "Change Password" in browser.title
assert browser.is_text_present('Change Password')
browser.fill('password1', 'mynewpassword')
browser.fill('password2', 'mynewpassword_wrong')
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present(
'You must type the same password each time.')
browser.fill('password1', 'mynewpassword')
browser.fill('password2', 'mynewpassword')
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('Your password is now changed.')
| mit | d7dee1bcd23d31f5d26f9982cddce32b | 37.75 | 74 | 0.676833 | 3.479592 | false | false | false | false |
bugsnag/bugsnag-python | bugsnag/legacy.py | 1 | 4636 | from typing import Dict, Any, Tuple, Type
import types
import sys
from bugsnag.breadcrumbs import BreadcrumbType, OnBreadcrumbCallback
from bugsnag.configuration import RequestConfiguration
from bugsnag.client import Client
default_client = Client()
configuration = default_client.configuration
logger = configuration.logger
ExcInfoType = Tuple[Type, Exception, types.TracebackType]
__all__ = ('configure', 'configure_request', 'add_metadata_tab',
'clear_request_config', 'notify', 'start_session', 'auto_notify',
'auto_notify_exc_info', 'before_notify', 'leave_breadcrumb')
def configure(**options):
"""
Configure the Bugsnag notifier application-wide settings.
"""
return configuration.configure(**options)
def configure_request(**options):
"""
Configure the Bugsnag notifier per-request settings.
"""
RequestConfiguration.get_instance().configure(**options)
def add_metadata_tab(tab_name: str, data: Dict[str, Any]):
"""
Add metaData to the tab
bugsnag.add_metadata_tab("user", {"id": "1", "name": "Conrad"})
"""
metadata = RequestConfiguration.get_instance().metadata
if tab_name not in metadata:
metadata[tab_name] = {}
metadata[tab_name].update(data)
def clear_request_config():
"""
Clears the per-request settings.
"""
RequestConfiguration.clear()
def notify(exception: BaseException, **options):
"""
Notify bugsnag of an exception.
"""
if 'severity' in options:
options['severity_reason'] = {'type': 'userSpecifiedSeverity'}
else:
options['severity_reason'] = {'type': 'handledException'}
if (isinstance(exception, (list, tuple)) and len(exception) == 3 and
isinstance(exception[2], types.TracebackType)):
default_client.notify_exc_info(*exception, **options)
else:
if not isinstance(exception, BaseException):
try:
value = repr(exception)
except Exception:
value = '[BADENCODING]'
default_client.configuration.logger.warning(
'Coercing invalid notify() value to RuntimeError: %s' % value
)
exception = RuntimeError(value)
default_client.notify(exception, **options)
def start_session():
"""
Creates a new session
"""
default_client.session_tracker.start_session()
def auto_notify(exception: BaseException, **options):
"""
Notify bugsnag of an exception if auto_notify is enabled.
"""
if configuration.auto_notify:
default_client.notify(
exception,
unhandled=options.pop('unhandled', True),
severity=options.pop('severity', 'error'),
severity_reason=options.pop('severity_reason', {
'type': 'unhandledException'
}),
**options
)
def auto_notify_exc_info(exc_info: ExcInfoType = None, **options):
"""
Notify bugsnag of a exc_info tuple if auto_notify is enabled
"""
if configuration.auto_notify:
info = exc_info or sys.exc_info()
if info is not None:
exc_type, value, tb = info
default_client.notify_exc_info(
exc_type, value, tb,
unhandled=options.pop('unhandled', True),
severity=options.pop('severity', 'error'),
severity_reason=options.pop('severity_reason', {
'type': 'unhandledException'
}),
**options
)
def before_notify(callback):
"""
Add a callback to be called before bugsnag is notified
This can be used to alter the event before sending it to Bugsnag.
"""
configuration.middleware.before_notify(callback)
def leave_breadcrumb(
message: str,
metadata: Dict[str, Any] = {},
type: BreadcrumbType = BreadcrumbType.MANUAL
) -> None:
default_client.leave_breadcrumb(message, metadata, type)
def add_on_breadcrumb(on_breadcrumb: OnBreadcrumbCallback) -> None:
"""
Add a callback to be called each time a breadcrumb is left
This can be used to alter the breadcrumb, or discard it by returning False
"""
default_client.add_on_breadcrumb(on_breadcrumb)
def remove_on_breadcrumb(on_breadcrumb: OnBreadcrumbCallback) -> None:
"""
Remove an existing on_breadcrumb callback
"""
default_client.remove_on_breadcrumb(on_breadcrumb)
def _auto_leave_breadcrumb(
message: str,
metadata: Dict[str, Any],
type: BreadcrumbType
) -> None:
default_client._auto_leave_breadcrumb(message, metadata, type)
| mit | 1bb50ad2a91050eba18add116d73be54 | 27.617284 | 78 | 0.636324 | 4 | false | true | false | false |
flav-io/flavio | flavio/statistics/probability.py | 1 | 79875 | """Probability distributions and auxiliary functions to deal with them."""
import numpy as np
import scipy.stats
from scipy.interpolate import interp1d, RegularGridInterpolator
import scipy.signal
import math
from flavio.math.functions import normal_logpdf, normal_pdf
from flavio.statistics.functions import confidence_level
import warnings
import inspect
from collections import OrderedDict
import yaml
import re
def _camel_to_underscore(s):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def string_to_class(string):
"""Get a ProbabilityDistribution subclass from a string. This can
either be the class name itself or a string in underscore format
as returned from `class_to_string`."""
try:
return eval(string)
except NameError:
pass
for c in ProbabilityDistribution.get_subclasses():
if c.class_to_string() == string:
return c
raise NameError("Distribution " + string + " not found.")
class ProbabilityDistribution(object):
"""Common base class for all probability distributions"""
def __init__(self, central_value, support):
self.central_value = central_value
self.support = support
@classmethod
def get_subclasses(cls):
"""Return all subclasses (including subclasses of subclasses)."""
for subclass in cls.__subclasses__():
yield from subclass.get_subclasses()
yield subclass
def get_central(self):
return self.central_value
@property
def error_left(self):
"""Return the lower error"""
return self.get_error_left()
@property
def error_right(self):
"""Return the upper error"""
return self.get_error_right()
@classmethod
def class_to_string(cls):
"""Get a string name for a given ProbabilityDistribution subclass.
This converts camel case to underscore and removes the word
'distribution'.
Example: class_to_string(AsymmetricNormalDistribution) returns
'asymmetric_normal'.
"""
name = _camel_to_underscore(cls.__name__)
return name.replace('_distribution', '')
def get_dict(self, distribution=False, iterate=False, arraytolist=False):
"""Get an ordered dictionary with arguments and values needed to
the instantiate the distribution.
Optional arguments (default to False):
- `distribution`: add a 'distribution' key to the dictionary with the
value being the string representation of the distribution's name
(e.g. 'asymmetric_normal').
- `iterate`: If ProbabilityDistribution instances are among the
arguments (e.g. for KernelDensityEstimate), return the instance's
get_dict instead of the instance as value.
- `arraytolist`: convert numpy arrays to lists
"""
args = inspect.signature(self.__class__).parameters.keys()
d = self.__dict__
od = OrderedDict()
if distribution:
od['distribution'] = self.class_to_string()
od.update(OrderedDict((a, d[a]) for a in args))
if iterate:
for k in od:
if isinstance(od[k], ProbabilityDistribution):
od[k] = od[k].get_dict(distribution=True)
if arraytolist:
for k in od:
if isinstance(od[k], np.ndarray):
od[k] = od[k].tolist()
if isinstance(od[k], list):
for i, x in enumerate(od[k]):
if isinstance(x, np.ndarray):
od[k][i] = od[k][i].tolist()
for k in od:
if isinstance(od[k], np.int):
od[k] = int(od[k])
elif isinstance(od[k], np.float):
od[k] = float(od[k])
if isinstance(od[k], list):
for i, x in enumerate(od[k]):
if isinstance(x, np.float):
od[k][i] = float(od[k][i])
elif isinstance(x, np.int):
od[k][i] = int(od[k][i])
return od
def get_yaml(self, *args, **kwargs):
"""Get a YAML string representing the dictionary returned by the
get_dict method.
Arguments will be passed to `yaml.dump`."""
od = self.get_dict(distribution=True, iterate=True, arraytolist=True)
return yaml.dump(od, *args, **kwargs)
def delta_logpdf(self, x, **kwargs):
exclude = kwargs.get('exclude', None)
if exclude is not None:
d = len(self.central_value)
cv = [self.central_value[i] for i in range(d) if i not in exclude]
else:
cv = self.central_value
return self.logpdf(x, **kwargs) - self.logpdf(cv, **kwargs)
class UniformDistribution(ProbabilityDistribution):
"""Distribution with constant PDF in a range and zero otherwise."""
def __init__(self, central_value, half_range):
"""Initialize the distribution.
Parameters:
- central_value: arithmetic mean of the upper and lower range boundaries
- half_range: half the difference of upper and lower range boundaries
Example:
central_value = 5 and half_range = 3 leads to the range [2, 8].
"""
self.half_range = half_range
self.range = (central_value - half_range,
central_value + half_range)
super().__init__(central_value, support=self.range)
def __repr__(self):
return 'flavio.statistics.probability.UniformDistribution' + \
'({}, {})'.format(self.central_value, self.half_range)
def get_random(self, size=None):
return np.random.uniform(self.range[0], self.range[1], size)
def _logpdf(self, x):
if x < self.range[0] or x >= self.range[1]:
return -np.inf
else:
return -math.log(2 * self.half_range)
def logpdf(self, x):
_lpvect = np.vectorize(self._logpdf)
return _lpvect(x)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
return confidence_level(nsigma) * self.half_range
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
return confidence_level(nsigma) * self.half_range
class DeltaDistribution(ProbabilityDistribution):
"""Delta Distrubution that is non-vanishing only at a single point."""
def __init__(self, central_value):
"""Initialize the distribution.
Parameters:
- central_value: point where the PDF does not vanish.
"""
super().__init__(central_value, support=(central_value, central_value))
def __repr__(self):
return 'flavio.statistics.probability.DeltaDistribution' + \
'({})'.format(self.central_value)
def get_random(self, size=None):
if size is None:
return self.central_value
else:
return self.central_value * np.ones(size)
def logpdf(self, x):
if np.ndim(x) == 0:
if x == self.central_value:
return 0.
else:
return -np.inf
y = -np.inf*np.ones(np.asarray(x).shape)
y[np.asarray(x) == self.central_value] = 0
return y
def get_error_left(self, *args, **kwargs):
return 0
def get_error_right(self, *args, **kwargs):
return 0
class NormalDistribution(ProbabilityDistribution):
"""Univariate normal or Gaussian distribution."""
def __init__(self, central_value, standard_deviation):
"""Initialize the distribution.
Parameters:
- central_value: location (mode and mean)
- standard_deviation: standard deviation
"""
super().__init__(central_value,
support=(central_value - 6 * standard_deviation,
central_value + 6 * standard_deviation))
if standard_deviation <= 0:
raise ValueError("Standard deviation must be positive number")
self.standard_deviation = standard_deviation
def __repr__(self):
return 'flavio.statistics.probability.NormalDistribution' + \
'({}, {})'.format(self.central_value, self.standard_deviation)
def get_random(self, size=None):
return np.random.normal(self.central_value, self.standard_deviation, size)
def logpdf(self, x):
return normal_logpdf(x, self.central_value, self.standard_deviation)
def pdf(self, x):
return normal_pdf(x, self.central_value, self.standard_deviation)
def cdf(self, x):
return scipy.stats.norm.cdf(x, self.central_value, self.standard_deviation)
def ppf(self, x):
return scipy.stats.norm.ppf(x, self.central_value, self.standard_deviation)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
return nsigma * self.standard_deviation
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
return nsigma * self.standard_deviation
class LogNormalDistribution(ProbabilityDistribution):
"""Univariate log-normal distribution."""
def __init__(self, central_value, factor):
r"""Initialize the distribution.
Parameters:
- central_value: median of the distribution (neither mode nor mean!).
Can be positive or negative, but must be nonzero.
- factor: must be larger than 1. 68% of the probability will be between
`central_value * factor` and `central_value / factor`.
The mean and standard deviation of the underlying normal distribution
correspond to `log(abs(central_value))` and `log(factor)`, respectively.
Example:
`LogNormalDistribution(central_value=3, factor=2)`
corresponds to the distribution of the exponential of a normally
distributed variable with mean ln(3) and standard deviation ln(2).
68% of the probability is within 6=3*2 and 1.5=4/2.
"""
if central_value == 0:
raise ValueError("Central value must not be zero")
if factor <= 1:
raise ValueError("Factor must be bigger than 1")
self.factor = factor
self.log_standard_deviation = np.log(factor)
self.log_central_value = math.log(abs(central_value))
if central_value < 0:
self.central_sign = -1
slim = math.exp(math.log(abs(central_value))
- 6 * self.log_standard_deviation)
super().__init__(central_value,
support=(slim, 0))
else:
self.central_sign = +1
slim = math.exp(math.log(abs(central_value))
+ 6 * self.log_standard_deviation)
super().__init__(central_value,
support=(0, slim))
def __repr__(self):
return 'flavio.statistics.probability.LogNormalDistribution' + \
'({}, {})'.format(self.central_value, self.factor)
def get_random(self, size=None):
s = self.central_sign
return s * np.random.lognormal(self.log_central_value, self.log_standard_deviation, size)
def logpdf(self, x):
s = self.central_sign
return scipy.stats.lognorm.logpdf(s * x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
def pdf(self, x):
s = self.central_sign
return scipy.stats.lognorm.pdf(s * x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
def cdf(self, x):
if self.central_sign == -1:
return 1 - scipy.stats.lognorm.cdf(-x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
else:
return scipy.stats.lognorm.cdf(x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
def ppf(self, x):
if self.central_sign == -1:
return -scipy.stats.lognorm.ppf(1 - x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
else:
return scipy.stats.lognorm.ppf(x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
cl = confidence_level(nsigma)
return self.central_value - self.ppf(0.5 - cl/2.)
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
cl = confidence_level(nsigma)
return self.ppf(0.5 + cl/2.) - self.central_value
class AsymmetricNormalDistribution(ProbabilityDistribution):
"""An asymmetric normal distribution obtained by gluing together two
half-Gaussians and demanding the PDF to be continuous."""
def __init__(self, central_value, right_deviation, left_deviation):
"""Initialize the distribution.
Parameters:
- central_value: mode of the distribution (not equal to its mean!)
- right_deviation: standard deviation of the upper half-Gaussian
- left_deviation: standard deviation of the lower half-Gaussian
"""
super().__init__(central_value,
support=(central_value - 6 * left_deviation,
central_value + 6 * right_deviation))
if right_deviation <= 0 or left_deviation <= 0:
raise ValueError(
"Left and right standard deviations must be positive numbers")
self.right_deviation = right_deviation
self.left_deviation = left_deviation
self.p_right = normal_pdf(
self.central_value, self.central_value, self.right_deviation)
self.p_left = normal_pdf(
self.central_value, self.central_value, self.left_deviation)
def __repr__(self):
return 'flavio.statistics.probability.AsymmetricNormalDistribution' + \
'({}, {}, {})'.format(self.central_value,
self.right_deviation,
self.left_deviation)
def get_random(self, size=None):
if size is None:
return self._get_random()
else:
return np.array([self._get_random() for i in range(size)])
def _get_random(self):
r = np.random.uniform()
a = abs(self.left_deviation /
(self.right_deviation + self.left_deviation))
if r > a:
x = abs(np.random.normal(0, self.right_deviation))
return self.central_value + x
else:
x = abs(np.random.normal(0, self.left_deviation))
return self.central_value - x
def _logpdf(self, x):
# values of the PDF at the central value
if x < self.central_value:
# left-hand side: scale factor
r = 2 * self.p_right / (self.p_left + self.p_right)
return math.log(r) + normal_logpdf(x, self.central_value, self.left_deviation)
else:
# left-hand side: scale factor
r = 2 * self.p_left / (self.p_left + self.p_right)
return math.log(r) + normal_logpdf(x, self.central_value, self.right_deviation)
def logpdf(self, x):
_lpvect = np.vectorize(self._logpdf)
return _lpvect(x)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
return nsigma * self.left_deviation
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
return nsigma * self.right_deviation
class HalfNormalDistribution(ProbabilityDistribution):
"""Half-normal distribution with zero PDF above or below the mode."""
def __init__(self, central_value, standard_deviation):
"""Initialize the distribution.
Parameters:
- central_value: mode of the distribution.
- standard_deviation:
If positive, the PDF is zero below central_value and (twice) that of
a Gaussian with this standard deviation above.
If negative, the PDF is zero above central_value and (twice) that of
a Gaussian with standard deviation equal to abs(standard_deviation)
below.
"""
super().__init__(central_value,
support=sorted((central_value,
central_value + 6 * standard_deviation)))
if standard_deviation == 0:
raise ValueError("Standard deviation must be non-zero number")
self.standard_deviation = standard_deviation
def __repr__(self):
return 'flavio.statistics.probability.HalfNormalDistribution' + \
'({}, {})'.format(self.central_value, self.standard_deviation)
def get_random(self, size=None):
return self.central_value + np.sign(self.standard_deviation) * abs(np.random.normal(0, abs(self.standard_deviation), size))
def _logpdf(self, x):
if np.sign(self.standard_deviation) * (x - self.central_value) < 0:
return -np.inf
else:
return math.log(2) + normal_logpdf(x, self.central_value, abs(self.standard_deviation))
def logpdf(self, x):
_lpvect = np.vectorize(self._logpdf)
return _lpvect(x)
def cdf(self, x):
if np.sign(self.standard_deviation) == -1:
return 1 - scipy.stats.halfnorm.cdf(-x,
loc=-self.central_value,
scale=-self.standard_deviation)
else:
return scipy.stats.halfnorm.cdf(x,
loc=self.central_value,
scale=self.standard_deviation)
def ppf(self, x):
if np.sign(self.standard_deviation) == -1:
return -scipy.stats.halfnorm.ppf(1 - x,
loc=-self.central_value,
scale=-self.standard_deviation)
else:
return scipy.stats.halfnorm.ppf(x,
loc=self.central_value,
scale=self.standard_deviation)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
if self.standard_deviation >= 0:
return 0
else:
return nsigma * (-self.standard_deviation) # return a positive value!
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
if self.standard_deviation <= 0:
return 0
else:
return nsigma * self.standard_deviation
class GaussianUpperLimit(HalfNormalDistribution):
"""Upper limit defined as a half-normal distribution."""
def __init__(self, limit, confidence_level):
"""Initialize the distribution.
Parameters:
- limit: value of the upper limit
- confidence_level: confidence_level of the upper limit. Float between
0 and 1.
"""
if confidence_level > 1 or confidence_level < 0:
raise ValueError("Confidence level should be between 0 und 1")
if limit <= 0:
raise ValueError("The upper limit should be a positive number")
super().__init__(central_value=0,
standard_deviation=self.get_standard_deviation(limit, confidence_level))
self.limit = limit
self.confidence_level = confidence_level
def __repr__(self):
return 'flavio.statistics.probability.GaussianUpperLimit' + \
'({}, {})'.format(self.limit, self.confidence_level)
def get_standard_deviation(self, limit, confidence_level):
"""Convert the confidence level into a Gaussian standard deviation"""
return limit / scipy.stats.norm.ppf(0.5 + confidence_level / 2.)
class GammaDistribution(ProbabilityDistribution):
r"""A Gamma distribution defined like the `gamma` distribution in
`scipy.stats` (with parameters `a`, `loc`, `scale`).
The `central_value` attribute returns the location of the mode.
"""
def __init__(self, a, loc, scale):
if loc > 0:
raise ValueError("loc must be negative or zero")
# "frozen" scipy distribution object
self.scipy_dist = scipy.stats.gamma(a=a, loc=loc, scale=scale)
mode = loc + (a-1)*scale
# support extends until the CDF is roughly "6 sigma"
support_limit = self.scipy_dist.ppf(1-2e-9)
super().__init__(central_value=mode, # the mode
support=(loc, support_limit))
self.a = a
self.loc = loc
self.scale = scale
def __repr__(self):
return 'flavio.statistics.probability.GammaDistribution' + \
'({}, {}, {})'.format(self.a, self.loc, self.scale)
def get_random(self, size):
return self.scipy_dist.rvs(size=size)
def cdf(self, x):
return self.scipy_dist.cdf(x)
def ppf(self, x):
return self.scipy_dist.ppf(x)
def logpdf(self, x):
return self.scipy_dist.logpdf(x)
def _find_error_cdf(self, confidence_level):
# find the value of the CDF at the position of the left boundary
# of the `confidence_level`% CL range by demanding that the value
# of the PDF is the same at the two boundaries
def x_left(a):
return self.ppf(a)
def x_right(a):
return self.ppf(a + confidence_level)
def diff_logpdf(a):
logpdf_x_left = self.logpdf(x_left(a))
logpdf_x_right = self.logpdf(x_right(a))
return logpdf_x_left - logpdf_x_right
return scipy.optimize.brentq(diff_logpdf, 0, 1 - confidence_level-1e-6)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
a = self._find_error_cdf(confidence_level(nsigma))
return self.central_value - self.ppf(a)
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
a = self._find_error_cdf(confidence_level(nsigma))
return self.ppf(a + confidence_level(nsigma)) - self.central_value
class GammaDistributionPositive(ProbabilityDistribution):
r"""A Gamma distribution defined like the `gamma` distribution in
`scipy.stats` (with parameters `a`, `loc`, `scale`), but restricted to
positive values for x and correspondingly rescaled PDF.
The `central_value` attribute returns the location of the mode.
"""
def __init__(self, a, loc, scale):
if loc > 0:
raise ValueError("loc must be negative or zero")
# "frozen" scipy distribution object (without restricting x>0!)
self.scipy_dist = scipy.stats.gamma(a=a, loc=loc, scale=scale)
mode = loc + (a-1)*scale
if mode < 0:
mode = 0
# support extends until the CDF is roughly "6 sigma", assuming x>0
support_limit = self.scipy_dist.ppf(1-2e-9*(1-self.scipy_dist.cdf(0)))
super().__init__(central_value=mode, # the mode
support=(0, support_limit))
self.a = a
self.loc = loc
self.scale = scale
# scale factor for PDF to account for x>0
self._pdf_scale = 1/(1 - self.scipy_dist.cdf(0))
def __repr__(self):
return 'flavio.statistics.probability.GammaDistributionPositive' + \
'({}, {}, {})'.format(self.a, self.loc, self.scale)
def get_random(self, size=None):
if size is None:
return self._get_random(size=size)
else:
# some iteration necessary as discarding negative values
# might lead to too small size
r = np.array([], dtype=float)
while len(r) < size:
r = np.concatenate((r, self._get_random(size=2*size)))
return r[:size]
def _get_random(self, size):
r = self.scipy_dist.rvs(size=size)
return r[(r >= 0)]
def cdf(self, x):
cdf0 = self.scipy_dist.cdf(0)
cdf = (self.scipy_dist.cdf(x) - cdf0)/(1-cdf0)
return np.piecewise(
np.asarray(x, dtype=float),
[x<0, x>=0],
[0., cdf]) # return 0 for negative x
def ppf(self, x):
cdf0 = self.scipy_dist.cdf(0)
return self.scipy_dist.ppf((1-cdf0)*x + cdf0)
def logpdf(self, x):
# return -inf for negative x values
inf0 = np.piecewise(np.asarray(x, dtype=float), [x<0, x>=0], [-np.inf, 0.])
return inf0 + self.scipy_dist.logpdf(x) + np.log(self._pdf_scale)
def _find_error_cdf(self, confidence_level):
# find the value of the CDF at the position of the left boundary
# of the `confidence_level`% CL range by demanding that the value
# of the PDF is the same at the two boundaries
def x_left(a):
return self.ppf(a)
def x_right(a):
return self.ppf(a + confidence_level)
def diff_logpdf(a):
logpdf_x_left = self.logpdf(x_left(a))
logpdf_x_right = self.logpdf(x_right(a))
return logpdf_x_left - logpdf_x_right
return scipy.optimize.brentq(diff_logpdf, 0, 1 - confidence_level-1e-6)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
if self.logpdf(0) > self.logpdf(self.ppf(confidence_level(nsigma))):
# look at a one-sided 1 sigma range. If the PDF at 0
# is smaller than the PDF at the boundary of this range, it means
# that the left-hand error is not meaningful to define.
return self.central_value
else:
a = self._find_error_cdf(confidence_level(nsigma))
return self.central_value - self.ppf(a)
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
one_sided_error = self.ppf(confidence_level(nsigma))
if self.logpdf(0) > self.logpdf(one_sided_error):
# look at a one-sided 1 sigma range. If the PDF at 0
# is smaller than the PDF at the boundary of this range, return the
# boundary of the range as the right-hand error
return one_sided_error
else:
a = self._find_error_cdf(confidence_level(nsigma))
return self.ppf(a + confidence_level(nsigma)) - self.central_value
class GammaUpperLimit(GammaDistributionPositive):
r"""Gamma distribution with x restricted to be positive appropriate for
a positive quantitity obtained from a low-statistics counting experiment,
e.g. a rare decay rate, given an upper limit on x."""
def __init__(self, counts_total, counts_background, limit, confidence_level):
r"""Initialize the distribution.
Parameters:
- counts_total: observed total number (signal and background) of counts.
- counts_background: number of expected background counts, assumed to be
known.
- limit: upper limit on x, which is proportional (with a positive
proportionality factor) to the number of signal events.
- confidence_level: confidence level of the upper limit, i.e. the value
of the CDF at the limit. Float between 0 and 1. Frequently used values
are 0.90 and 0.95.
"""
if confidence_level > 1 or confidence_level < 0:
raise ValueError("Confidence level should be between 0 und 1")
if limit <= 0:
raise ValueError("The upper limit should be a positive number")
if counts_total < 0:
raise ValueError("counts_total should be a positive number or zero")
if counts_background < 0:
raise ValueError("counts_background should be a positive number or zero")
self.limit = limit
self.confidence_level = confidence_level
self.counts_total = counts_total
self.counts_background = counts_background
a, loc, scale = self._get_a_loc_scale()
super().__init__(a=a, loc=loc, scale=scale)
def __repr__(self):
return 'flavio.statistics.probability.GammaUpperLimit' + \
'({}, {}, {}, {})'.format(self.counts_total,
self.counts_background,
self.limit,
self.confidence_level)
def _get_a_loc_scale(self):
"""Convert the counts and limit to the input parameters needed for
GammaDistributionPositive"""
a = self.counts_total + 1
loc_unscaled = -self.counts_background
dist_unscaled = GammaDistributionPositive(a=a, loc=loc_unscaled, scale=1)
limit_unscaled = dist_unscaled.ppf(self.confidence_level)
# rescale
scale = self.limit/limit_unscaled
loc = -self.counts_background*scale
return a, loc, scale
class NumericalDistribution(ProbabilityDistribution):
"""Univariate distribution defined in terms of numerical values for the
PDF."""
def __init__(self, x, y, central_value=None):
"""Initialize a 1D numerical distribution.
Parameters:
- `x`: x-axis values. Must be a 1D array of real values in strictly
ascending order (but not necessarily evenly spaced)
- `y`: PDF values. Must be a 1D array of real positive values with the
same length as `x`
- central_value: if None (default), will be set to the mode of the
distribution, i.e. the x-value where y is largest (by looking up
the input arrays, i.e. without interpolation!)
"""
self.x = x
self.y = y
if central_value is not None:
if x[0] <= central_value <= x[-1]:
super().__init__(central_value=central_value,
support=(x[0], x[-1]))
else:
raise ValueError("Central value must be within range provided")
else:
mode = x[np.argmax(y)]
super().__init__(central_value=mode, support=(x[0], x[-1]))
self.y_norm = y / np.trapz(y, x=x) # normalize PDF to 1
self.y_norm[self.y_norm < 0] = 0
self.pdf_interp = interp1d(x, self.y_norm,
fill_value=0, bounds_error=False)
_cdf = np.zeros(len(x))
_cdf[1:] = np.cumsum(self.y_norm[:-1] * np.diff(x))
_cdf = _cdf/_cdf[-1] # normalize CDF to 1
self.ppf_interp = interp1d(_cdf, x)
self.cdf_interp = interp1d(x, _cdf)
def __repr__(self):
return 'flavio.statistics.probability.NumericalDistribution' + \
'({}, {})'.format(self.x, self.y)
def get_random(self, size=None):
"""Draw a random number from the distribution.
If size is not None but an integer N, return an array of N numbers."""
r = np.random.uniform(size=size)
return self.ppf_interp(r)
def ppf(self, x):
return self.ppf_interp(x)
def cdf(self, x):
return self.cdf_interp(x)
def pdf(self, x):
return self.pdf_interp(x)
def logpdf(self, x):
# ignore warning from log(0)=-np.inf
with np.errstate(divide='ignore', invalid='ignore'):
return np.log(self.pdf_interp(x))
def _find_error_cdf(self, confidence_level):
# find the value of the CDF at the position of the left boundary
# of the `confidence_level`% CL range by demanding that the value
# of the PDF is the same at the two boundaries
def x_left(a):
return self.ppf(a)
def x_right(a):
return self.ppf(a + confidence_level)
def diff_logpdf(a):
logpdf_x_left = self.logpdf(x_left(a))
logpdf_x_right = self.logpdf(x_right(a))
return logpdf_x_left - logpdf_x_right
return scipy.optimize.brentq(diff_logpdf, 0, 1 - confidence_level-1e-6)
def get_error_left(self, nsigma=1, method='central'):
"""Return the lower error.
'method' should be one of:
- 'central' for a central interval (same probability on both sides of
the central value)
- 'hpd' for highest posterior density, i.e. probability is larger inside
the interval than outside
- 'limit' for a one-sided error, i.e. a lower limit"""
if method == 'limit':
return self.central_value - self.ppf(1 - confidence_level(nsigma))
cdf_central = self.cdf(self.central_value)
err_left = self.central_value - self.ppf(cdf_central * (1 - confidence_level(nsigma)))
if method == 'central':
return err_left
elif method == 'hpd':
if self.pdf(self.central_value + self.get_error_right(method='central')) == self.pdf(self.central_value - err_left):
return err_left
try:
a = self._find_error_cdf(confidence_level(nsigma))
except ValueError:
return np.nan
return self.central_value - self.ppf(a)
else:
raise ValueError("Method " + str(method) + " unknown")
def get_error_right(self, nsigma=1, method='central'):
"""Return the upper error
'method' should be one of:
- 'central' for a central interval (same probability on both sides of
the central value)
- 'hpd' for highest posterior density, i.e. probability is larger inside
the interval than outside
- 'limit' for a one-sided error, i.e. an upper limit"""
if method == 'limit':
return self.ppf(confidence_level(nsigma)) - self.central_value
cdf_central = self.cdf(self.central_value)
err_right = self.ppf(cdf_central + (1 - cdf_central) * confidence_level(nsigma)) - self.central_value
if method == 'central':
return err_right
elif method == 'hpd':
if self.pdf(self.central_value - self.get_error_left(method='central')) == self.pdf(self.central_value + err_right):
return err_right
try:
a = self._find_error_cdf(confidence_level(nsigma))
except ValueError:
return np.nan
return self.ppf(a + confidence_level(nsigma)) - self.central_value
else:
raise ValueError("Method " + str(method) + " unknown")
@classmethod
def from_pd(cls, pd, nsteps=1000):
if isinstance(pd, NumericalDistribution):
return pd
_x = np.linspace(pd.support[0], pd.support[-1], nsteps)
_y = np.exp(pd.logpdf(_x))
return cls(central_value=pd.central_value, x=_x, y=_y)
class GeneralGammaDistributionPositive(NumericalDistribution):
r"""Distribution appropriate for
a positive quantitity obtained from a low-statistics counting experiment,
e.g. a rare decay rate.
The difference to `GammaUpperLimit` is that this class also allows to
specify an uncertainty on the number of background events. The result
is a numerical distribution obtained from the convolution of a normal
distribution (for the background uncertainty) and a gamma distribution,
restricted to positive values.
In contrast to `GammaUpperLimit`, the scale factor (the relational between
the observable of interest and the raw number of counts) is not determined
from a limit and a confidence level, but specified explicitly.
For the case of a limit, see `GeneralGammaUpperLimit`.
"""
def __init__(self,
scale_factor=1,
counts_total=None,
counts_background=None,
counts_signal=None,
background_variance=0):
r"""Initialize the distribution.
Parameters:
Parameters:
- `scale_factor`: scale factor by which the number of counts is multiplied
to get the observable of interest.
- `counts_total`: observed total number (signal and background) of counts.
- `counts_background`: expected mean number of expected background counts
- `counts_signal`: mean obseved number of signal events
- `background_variance`: standard deviation of the expected number of
background events
Of the three parameters `counts_total`, `counts_background`, and
`counts_signal`, only two must be specified. The third one will
be determined from the relation
`counts_total = counts_signal + counts_background`
Note that if `background_variance=0`, it makes more sense to use
`GammaUpperLimit`, which is equivalent but analytical rather than
numerical.
"""
if scale_factor <= 0:
raise ValueError("Scale factor should be positive")
self.scale_factor = scale_factor
if counts_total is not None and counts_total < 0:
raise ValueError("counts_total should be a positive number, zero, or None")
if counts_background is not None and counts_background <= 0:
raise ValueError("counts_background should be a positive number or None")
if background_variance < 0:
raise ValueError("background_variance should be a positive number")
if [counts_total, counts_signal, counts_background].count(None) == 0:
# if all three are specified, check the relation holds!
if abs((counts_total - counts_background - counts_signal)/(counts_total if counts_total != 0 else 1)) > 1e-15:
raise ValueError("The relation `counts_total = counts_signal + counts_background` is not satisfied")
if counts_background is None:
self.counts_background = counts_total - counts_signal
else:
self.counts_background = counts_background
if counts_signal is None:
self.counts_signal = counts_total - counts_background
else:
self.counts_signal = counts_signal
if counts_total is None:
self.counts_total = counts_signal + counts_background
else:
self.counts_total = counts_total
self.background_variance = background_variance
x, y = self._get_xy()
if self.counts_total != 0 and self.background_variance/self.counts_total <= 1/100.:
warnings.warn("For vanishing or very small background variance, "
"it is safer to use GammaUpperLimit instead of "
"GeneralGammaUpperLimit to avoid numerical "
"instability.")
super().__init__(x=x, y=y)
def __repr__(self):
return ('flavio.statistics.probability.GeneralGammaDistributionPositive'
'({}, counts_total={}, counts_signal={}, '
'background_variance={})').format(self.scale_factor,
self.counts_total,
self.counts_signal,
self.background_variance)
def _get_xy(self):
if self.background_variance == 0:
# this is a bit pointless as in this case it makes more
# sense to use GammaUpperLimit itself
gamma_unscaled = GammaDistributionPositive(a = self.counts_total + 1,
loc = -self.counts_background,
scale = 1)
num_unscaled = NumericalDistribution.from_pd(gamma_unscaled)
else:
# define a gamma distribution (with x>loc, not x>0!) and convolve
# it with a Gaussian
gamma_unscaled = GammaDistribution(a = self.counts_total + 1,
loc = -self.counts_background,
scale = 1)
norm_bg = NormalDistribution(0, self.background_variance)
num_unscaled = convolve_distributions([gamma_unscaled, norm_bg], central_values='sum')
# now that we have convolved, cut off anything below x=0
x = num_unscaled.x
y = num_unscaled.y_norm
y = y[np.where(x >= 0)]
x = x[np.where(x >= 0)]
if x[0] != 0: # make sure the PDF at 0 exists
x = np.insert(x, 0, 0.) # add 0 as first element
y = np.insert(y, 0, y[0]) # copy first element
y[0]
num_unscaled = NumericalDistribution(x, y)
x = x * self.scale_factor
return x, y
class GeneralGammaUpperLimit(GeneralGammaDistributionPositive):
r"""Distribution appropriate for
a positive quantitity obtained from a low-statistics counting experiment,
e.g. a rare decay rate, given an upper limit on x.
The difference to `GammaUpperLimit` is that this class also allows to
specify an uncertainty on the number of background events. The result
is a numerical distribution obtained from the convolution of a normal
distribution (for the background uncertainty) and a gamma distribution,
restricted to positive values.
The only difference to `GeneralGammaDistributionPositive` is that the scale
factor is determined from the limit and confidence level.
"""
def __init__(self,
limit, confidence_level,
counts_total=None,
counts_background=None,
counts_signal=None,
background_variance=0):
r"""Initialize the distribution.
Parameters:
Parameters:
- `limit`: upper limit on x, which is proportional (with a positive
proportionality factor) to the number of signal events.
- `confidence_level`: confidence level of the upper limit, i.e. the value
of the CDF at the limit. Float between 0 and 1. Frequently used values
are 0.90 and 0.95.
- `counts_total`: observed total number (signal and background) of counts.
- `counts_background`: expected mean number of expected background counts
- `counts_signal`: mean obseved number of signal events
- `background_variance`: standard deviation of the expected number of
background events
Of the three parameters `counts_total`, `counts_background`, and
`counts_signal`, only two must be specified. The third one will
be determined from the relation
`counts_total = counts_signal + counts_background`
Note that if `background_variance=0`, it makes more sense to use
`GammaUpperLimit`, which is equivalent but analytical rather than
numerical.
"""
self.limit = limit
self.confidence_level = confidence_level
_d_unscaled = GeneralGammaDistributionPositive(
scale_factor=1,
counts_total=counts_total,
counts_background=counts_background,
counts_signal=counts_signal,
background_variance=background_variance)
limit_unscaled = _d_unscaled.ppf(self.confidence_level)
# use the value of the limit to determine the scale factor
scale_factor = self.limit / limit_unscaled
super().__init__(
scale_factor=scale_factor,
counts_total=counts_total,
counts_background=counts_background,
counts_signal=counts_signal,
background_variance=background_variance)
def __repr__(self):
return ('flavio.statistics.probability.GeneralGammaUpperLimit'
'({}, {}, counts_total={}, counts_signal={}, '
'background_variance={})').format(self.limit,
self.confidence_level,
self.counts_total,
self.counts_signal,
self.background_variance)
class KernelDensityEstimate(NumericalDistribution):
"""Univariate kernel density estimate.
Parameters:
- `data`: 1D array
- `kernel`: instance of `ProbabilityDistribution` used as smoothing kernel
- `n_bins` (optional): number of bins used in the intermediate step. This normally
does not have to be changed.
"""
def __init__(self, data, kernel, n_bins=None):
self.data = data
assert kernel.central_value == 0, "Kernel density must have zero central value"
self.kernel = kernel
self.n = len(data)
if n_bins is None:
self.n_bins = min(1000, self.n)
else:
self.n_bins = n_bins
y, x_edges = np.histogram(data, bins=self.n_bins, density=True)
x = (x_edges[:-1] + x_edges[1:])/2.
self.y_raw = y
self.raw_dist = NumericalDistribution(x, y)
cdist = convolve_distributions([self.raw_dist, self.kernel], 'sum')
super().__init__(cdist.x, cdist.y)
def __repr__(self):
return 'flavio.statistics.probability.KernelDensityEstimate' + \
'({}, {}, {})'.format(self.data, repr(self.kernel), self.n_bins)
class GaussianKDE(KernelDensityEstimate):
"""Univariate Gaussian kernel density estimate.
Parameters:
- `data`: 1D array
- `bandwidth` (optional): standard deviation of the Gaussian smoothing kernel.
If not provided, Scott's rule is used to estimate it.
- `n_bins` (optional): number of bins used in the intermediate step. This normally
does not have to be changed.
"""
def __init__(self, data, bandwidth=None, n_bins=None):
if bandwidth is None:
self.bandwidth = len(data)**(-1/5.) * np.std(data)
else:
self.bandwidth = bandwidth
super().__init__(data=data,
kernel = NormalDistribution(0, self.bandwidth),
n_bins=n_bins)
def __repr__(self):
return 'flavio.statistics.probability.GaussianKDE' + \
'({}, {}, {})'.format(self.data, self.bandwidth, self.n_bins)
class MultivariateNormalDistribution(ProbabilityDistribution):
"""A multivariate normal distribution.
Parameters:
- central_value: the location vector
- covariance: the covariance matrix
- standard_deviation: the square root of the variance vector
- correlation: the correlation matrix
If the covariance matrix is not specified, standard_deviation and the
correlation matrix have to be specified.
Methods:
- get_random(size=None): get `size` random numbers (default: a single one)
- logpdf(x, exclude=None): get the logarithm of the probability density
function. If an iterable of integers is given for `exclude`, the parameters
at these positions will be removed from the covariance before evaluating
the PDF, effectively ignoring certain dimensions.
Properties:
- error_left, error_right: both return the vector of standard deviations
"""
def __init__(self, central_value, covariance=None,
standard_deviation=None, correlation=None):
"""Initialize PDF instance.
Parameters:
- central_value: vector of means, shape (n)
- covariance: covariance matrix, shape (n,n)
"""
if covariance is not None:
self.covariance = covariance
self.standard_deviation = np.sqrt(np.diag(self.covariance))
self.correlation = self.covariance/np.outer(self.standard_deviation,
self.standard_deviation)
np.fill_diagonal(self.correlation, 1.)
else:
if standard_deviation is None:
raise ValueError("You must specify either covariance or standard_deviation")
self.standard_deviation = np.array(standard_deviation)
if correlation is None:
self.correlation = np.eye(len(self.standard_deviation))
else:
if isinstance(correlation, (int, float)):
# if it's a number, return delta_ij + (1-delta_ij)*x
n_dim = len(central_value)
self.correlation = np.eye(n_dim) + (np.ones((n_dim, n_dim))-np.eye(n_dim))*float(correlation)
else:
self.correlation = np.array(correlation)
self.covariance = np.outer(self.standard_deviation,
self.standard_deviation)*self.correlation
super().__init__(central_value, support=np.array([
np.asarray(central_value) - 6*self.standard_deviation,
np.asarray(central_value) + 6*self.standard_deviation
]))
# to avoid ill-conditioned covariance matrices, all data are rescaled
# by the inverse variances
self.err = np.sqrt(np.diag(self.covariance))
self.scaled_covariance = self.covariance / np.outer(self.err, self.err)
assert np.all(np.linalg.eigvals(self.scaled_covariance) >
0), "The covariance matrix is not positive definite!" + str(covariance)
def __repr__(self):
return 'flavio.statistics.probability.MultivariateNormalDistribution' + \
'({}, {})'.format(self.central_value, self.covariance)
def get_random(self, size=None):
"""Get `size` random numbers (default: a single one)"""
return np.random.multivariate_normal(self.central_value, self.covariance, size)
def reduce_dimension(self, exclude=None):
"""Return a different instance where certain dimensions, specified by
the iterable of integers `exclude`, are removed from the covariance.
If `exclude` contains all indices but one, an instance of
`NormalDistribution` will be returned.
"""
if not exclude:
return self
# if parameters are to be excluded, construct a
# distribution with reduced mean vector and covariance matrix
_cent_ex = np.delete(self.central_value, exclude)
_cov_ex = np.delete(
np.delete(self.covariance, exclude, axis=0), exclude, axis=1)
if len(_cent_ex) == 1:
# if only 1 dimension remains, can use a univariate Gaussian
_dist_ex = NormalDistribution(
central_value=_cent_ex[0], standard_deviation=np.sqrt(_cov_ex[0, 0]))
else:
# if more than 1 dimension remains, use a (smaller)
# multivariate Gaussian
_dist_ex = MultivariateNormalDistribution(
central_value=_cent_ex, covariance=_cov_ex)
return _dist_ex
def logpdf(self, x, exclude=None):
"""Get the logarithm of the probability density function.
Parameters:
- x: vector; position at which PDF should be evaluated
- exclude: optional; if an iterable of integers is given, the parameters
at these positions will be removed from the covariance before
evaluating the PDF, effectively ignoring certain dimensions.
"""
if exclude is not None:
# if parameters are to be excluded, construct a temporary
# distribution with reduced mean vector and covariance matrix
# and call its logpdf method
_dist_ex = self.reduce_dimension(exclude=exclude)
return _dist_ex.logpdf(x)
# undoing the rescaling of the covariance
pdf_scaled = scipy.stats.multivariate_normal.logpdf(
x / self.err, self.central_value / self.err, self.scaled_covariance)
sign, logdet = np.linalg.slogdet(self.covariance)
return pdf_scaled + (np.linalg.slogdet(self.scaled_covariance)[1] - np.linalg.slogdet(self.covariance)[1]) / 2.
def get_error_left(self, nsigma=1):
"""Return the lower errors"""
return nsigma * self.err
def get_error_right(self, nsigma=1):
"""Return the upper errors"""
return nsigma * self.err
class MultivariateNumericalDistribution(ProbabilityDistribution):
"""A multivariate distribution with PDF specified numerically."""
def __init__(self, xi, y, central_value=None):
"""Initialize a multivariate numerical distribution.
Parameters:
- `xi`: for an N-dimensional distribution, a list of N 1D arrays
specifiying the grid in N dimensions. The 1D arrays must contain
real, evenly spaced values in strictly ascending order (but the
spacing can be different for different dimensions). Any of the 1D
arrays can also be given alternatively as a list of two numbers, which
will be assumed to be the upper and lower boundaries, while the
spacing will be determined from the shape of `y`.
- `y`: PDF values on the grid defined by the `xi`. If the N `xi` have
length M1, ..., MN, `y` has dimension (M1, ..., MN). This is the same
shape as the grid obtained from `numpy.meshgrid(*xi, indexing='ij')`.
- central_value: if None (default), will be set to the mode of the
distribution, i.e. the N-dimensional xi-vector where y is largest
(by looking up the input arrays, i.e. without interpolation!)
"""
for x in xi:
# check that grid spacings are even up to per mille precision
d = np.diff(x)
if abs(np.min(d)/np.max(d)-1) > 1e-3:
raise ValueError("Grid must be evenly spaced per dimension")
self.xi = [np.asarray(x) for x in xi]
self.y = np.asarray(y)
for i, x in enumerate(xi):
if len(x) == 2:
self.xi[i] = np.linspace(x[0], x[1], self.y.shape[i])
if central_value is not None:
super().__init__(central_value=central_value,
support=(np.asarray(self.xi).T[0], np.asarray(self.xi).T[-1]))
else:
# if no central value is specified, set it to the mode
mode_index = (slice(None),) + np.unravel_index(self.y.argmax(), self.y.shape)
mode = np.asarray(np.meshgrid(*self.xi, indexing='ij'))[mode_index]
super().__init__(central_value=mode, support=None)
_bin_volume = np.prod([x[1] - x[0] for x in self.xi])
self.y_norm = self.y / np.sum(self.y) / _bin_volume # normalize PDF to 1
# ignore warning from log(0)=-np.inf
with np.errstate(divide='ignore', invalid='ignore'):
# logy = np.nan_to_num(np.log(self.y_norm))
logy = np.log(self.y_norm)
logy[np.isneginf(logy)] = -1e100
self.logpdf_interp = RegularGridInterpolator(self.xi, logy,
fill_value=-np.inf, bounds_error=False)
# the following is needed for get_random: initialize to None
self._y_flat = None
self._cdf_flat = None
def __repr__(self):
return 'flavio.statistics.probability.MultivariateNumericalDistribution' + \
'({}, {}, {})'.format([x.tolist() for x in self.xi], self.y.tolist(), list(self.central_value))
def get_random(self, size=None):
"""Draw a random number from the distribution.
If size is not None but an integer N, return an array of N numbers.
For the MultivariateNumericalDistribution, the PDF from which the
random numbers are drawn is approximated to be piecewise constant in
hypercubes around the points of the lattice spanned by the `xi`. A finer
lattice spacing will lead to a smoother distribution of random numbers
(but will also be slower).
"""
if size is None:
return self._get_random()
else:
return np.array([self._get_random() for i in range(size)])
def _get_random(self):
# if these have not been initialized, do it (once)
if self._y_flat is None:
# get a flattened array of the PDF
self._y_flat = self.y.flatten()
if self._cdf_flat is None:
# get the (discrete) 1D CDF
_cdf_flat = np.cumsum(self._y_flat)
# normalize to 1
self._cdf_flat = _cdf_flat/_cdf_flat[-1]
# draw a number between 0 and 1
r = np.random.uniform()
# find the index of the CDF-value closest to r
i_r = np.argmin(np.abs(self._cdf_flat-r))
indices = np.where(self.y == self._y_flat[i_r])
i_bla = np.random.choice(len(indices[0]))
index = tuple([a[i_bla] for a in indices])
xi_r = [ self.xi[i][index[i]] for i in range(len(self.xi)) ]
xi_diff = np.array([ X[1]-X[0] for X in self.xi ])
return xi_r + np.random.uniform(low=-0.5, high=0.5, size=len(self.xi)) * xi_diff
def reduce_dimension(self, exclude=None):
"""Return a different instance where certain dimensions, specified by
the iterable of integers `exclude`, are removed from the covariance.
If `exclude` contains all indices but one, an instance of
`NumericalDistribution` will be returned.
"""
if not exclude:
return self
# if parameters are to be excluded, construct a
# distribution with reduced mean vector and covariance matrix
try:
exclude = tuple(exclude)
except TypeError:
exclude = (exclude,)
xi = np.delete(self.xi, tuple(exclude), axis=0)
y = np.amax(self.y_norm, axis=tuple(exclude))
cv = np.delete(self.central_value, tuple(exclude))
if len(xi) == 1:
# if there is just 1 dimension left, use univariate
dist = NumericalDistribution(xi[0], y, cv)
else:
dist = MultivariateNumericalDistribution(xi, y, cv)
return dist
def logpdf(self, x, exclude=None):
"""Get the logarithm of the probability density function.
Parameters:
- x: vector; position at which PDF should be evaluated
- exclude: optional; if an iterable of integers is given, the parameters
at these positions will be ignored by maximizing the likelihood
along the remaining directions, i.e., they will be "profiled out".
"""
if exclude is not None:
# if parameters are to be excluded, construct a temporary
# distribution with reduced mean vector and covariance matrix
# and call its logpdf method
dist = self.reduce_dimension(exclude=exclude)
return dist.logpdf(x)
if np.asarray(x).shape == (len(self.central_value),):
# return a scalar
return self.logpdf_interp(x)[0]
else:
return self.logpdf_interp(x)
def get_error_left(self, *args, **kwargs):
raise NotImplementedError(
"1D errors not implemented for multivariate numerical distributions")
def get_error_right(self, *args, **kwargs):
raise NotImplementedError(
"1D errors not implemented for multivariate numerical distributions")
@classmethod
def from_pd(cls, pd, nsteps=100):
if isinstance(pd, cls):
# nothing to do
return pd
_xi = np.array([np.linspace(pd.support[0][i], pd.support[-1][i], nsteps)
for i in range(len(pd.central_value))])
ndim = len(_xi)
_xlist = np.array(np.meshgrid(*_xi, indexing='ij')).reshape(ndim, nsteps**ndim).T
_ylist = np.exp(pd.logpdf(_xlist))
_y = _ylist.reshape(tuple(nsteps for i in range(ndim)))
return cls(central_value=pd.central_value, xi=_xi, y=_y)
# Auxiliary functions
def convolve_distributions(probability_distributions, central_values='same'):
"""Combine a set of probability distributions by convoluting the PDFs.
This function can be used in two different ways:
- for `central_values='same'`, it can be used to combine uncertainties on a
single parameter/observable expressed in terms of probability distributions
with the same central value.
- for `central_values='sum'`, it can be used to determine the probability
distribution of a sum of random variables.
The only difference between the two cases is a shift: for 'same', the
central value of the convolution is the same as the original central value,
for 'sum', it is the sum of the individual central values.
`probability_distributions` must be a list of instances of descendants of
`ProbabilityDistribution`.
"""
if central_values not in ['same', 'sum']:
raise ValueError("central_values must be either 'same' or 'sum'")
def dim(x):
# 1 for floats and length for arrays
try:
float(x)
except:
return len(x)
else:
return 1
dims = [dim(p.central_value) for p in probability_distributions]
assert all([d == dims[0] for d in dims]), "All distributions must have the same number of dimensions"
if dims[0] == 1:
return _convolve_distributions_univariate(probability_distributions, central_values)
else:
return _convolve_distributions_multivariate(probability_distributions, central_values)
def _convolve_distributions_univariate(probability_distributions, central_values='same'):
"""Combine a set of univariate probability distributions."""
# if there's just one: return it immediately
if len(probability_distributions) == 1:
return probability_distributions[0]
if central_values == 'same':
central_value = probability_distributions[0].central_value
assert all(p.central_value == central_value for p in probability_distributions), \
"Distributions must all have the same central value"
# all delta dists
deltas = [p for p in probability_distributions if isinstance(
p, DeltaDistribution)]
if central_values == 'sum' and deltas:
raise NotImplementedError("Convolution of DeltaDistributions only implemented for equal central values")
# central_values is 'same', we can instead just ignore the delta distributions!
# all normal dists
gaussians = [p for p in probability_distributions if isinstance(
p, NormalDistribution)]
# all other univariate dists
others = [p for p in probability_distributions
if not isinstance(p, NormalDistribution)
and not isinstance(p, DeltaDistribution)]
if not others and not gaussians:
# if there is only a delta (or more than one), just return it
if central_values == 'same':
return deltas[0]
elif central_values == 'same':
return DeltaDistribution(sum([p.central_value for p in deltas]))
# let's combine the normal distributions into 1
if gaussians:
gaussian = _convolve_gaussians(gaussians, central_values=central_values)
if gaussians and not others:
# if there are only the gaussians, we are done.
return gaussian
else:
# otherwise, we need to combine the (combined) gaussian with the others
if gaussians:
to_be_combined = others + [gaussian]
else:
to_be_combined = others
# turn all distributions into numerical distributions!
numerical = [NumericalDistribution.from_pd(p) for p in to_be_combined]
return _convolve_numerical(numerical, central_values=central_values)
def _convolve_distributions_multivariate(probability_distributions, central_values='same'):
"""Combine a set of multivariate probability distributions."""
# if there's just one: return it immediately
if len(probability_distributions) == 1:
return probability_distributions[0]
if central_values == 'same':
central_value = probability_distributions[0].central_value
assert all(p.central_value[i] == central_value[i] for p in probability_distributions for i in range(len(central_value))), \
"Distributions must all have the same central value"
for p in probability_distributions:
if not ( isinstance(p, MultivariateNormalDistribution)
or isinstance(p, MultivariateNumericalDistribution) ):
raise ValueError("Multivariate convolution only implemented "
"for normal and numerical distributions")
# all normal dists
gaussians = [p for p in probability_distributions if isinstance(
p, MultivariateNormalDistribution)]
# all numerical dists
others = [p for p in probability_distributions if isinstance(
p, MultivariateNumericalDistribution)]
# let's combine the normal distributions into 1
if gaussians:
gaussian = _convolve_multivariate_gaussians(gaussians,
central_values=central_values)
if gaussians and not others:
# if there are only the gaussians, we are done.
return gaussian
else:
# otherwise, we need to combine the (combined) gaussian with the others
if len(others) > 1:
NotImplementedError("Combining multivariate numerical distributions not implemented")
else:
num = _convolve_multivariate_gaussian_numerical(gaussian, others[0],
central_values=central_values)
return num
def _convolve_gaussians(probability_distributions, central_values='same'):
# if there's just one: return it immediately
if len(probability_distributions) == 1:
return probability_distributions[0]
assert all(isinstance(p, NormalDistribution) for p in probability_distributions), \
"Distributions should all be instances of NormalDistribution"
if central_values == 'same':
central_value = probability_distributions[0].central_value # central value of the first dist
assert all(p.central_value == central_value for p in probability_distributions), \
"Distrubtions must all have the same central value"
elif central_values == 'sum':
central_value = sum([p.central_value for p in probability_distributions])
sigmas = np.array(
[p.standard_deviation for p in probability_distributions])
sigma = math.sqrt(np.sum(sigmas**2))
return NormalDistribution(central_value=central_value, standard_deviation=sigma)
def _convolve_multivariate_gaussians(probability_distributions, central_values='same'):
# if there's just one: return it immediately
if len(probability_distributions) == 1:
return probability_distributions[0]
assert all(isinstance(p, MultivariateNormalDistribution) for p in probability_distributions), \
"Distributions should all be instances of MultivariateNormalDistribution"
if central_values == 'same':
central_value = probability_distributions[0].central_value # central value of the first dist
assert all(p.central_value == central_value for p in probability_distributions), \
"Distrubtions must all have the same central value"
elif central_values == 'sum':
central_value = np.sum([p.central_value for p in probability_distributions], axis=0)
cov = np.sum([p.covariance for p in probability_distributions], axis=0)
return MultivariateNormalDistribution(central_value=central_value, covariance=cov)
def _convolve_numerical(probability_distributions, nsteps=10000, central_values='same'):
# if there's just one: return it immediately
if len(probability_distributions) == 1:
return probability_distributions[0]
assert all(isinstance(p, NumericalDistribution) for p in probability_distributions), \
"Distributions should all be instances of NumericalDistribution"
if central_values == 'same':
central_value = probability_distributions[0].central_value # central value of the first dist
assert all(p.central_value == central_value for p in probability_distributions), \
"Distrubtions must all have the same central value"
elif central_values == 'sum':
central_value = sum([p.central_value for p in probability_distributions])
# differences of individual central values from combined central value
central_diffs = [central_value - p.central_value for p in probability_distributions]
# (shifted appropriately)
supports = (np.array([p.support for p in probability_distributions]).T + central_diffs).T
support = (central_value - (central_value - supports[:, 0]).sum(),
central_value - (central_value - supports[:, 1]).sum())
delta = (support[1] - support[0]) / (nsteps - 1)
x = np.linspace(support[0], support[1], nsteps)
# position of the central value
n_x_central = math.floor((central_value - support[0]) / delta)
y = None
for i, pd in enumerate(probability_distributions):
y1 = np.exp(pd.logpdf(x - central_diffs[i])) * delta
if y is None:
# first step
y = y1
else:
# convolution
y = scipy.signal.fftconvolve(y, y1, 'full')
# cut out the convolved signal at the right place
y = y[n_x_central:nsteps + n_x_central]
return NumericalDistribution(central_value=central_value, x=x, y=y)
def _convolve_multivariate_gaussian_numerical(mvgaussian,
mvnumerical,
central_values='same'):
assert isinstance(mvgaussian, MultivariateNormalDistribution), \
"mvgaussian must be a single instance of MultivariateNormalDistribution"
assert isinstance(mvnumerical, MultivariateNumericalDistribution), \
"mvgaussian must be a single instance of MultivariateNumericalDistribution"
nsteps = max(200, *[len(x) for x in mvnumerical.xi])
xi = np.zeros((len(mvnumerical.xi), nsteps))
for i, x in enumerate(mvnumerical.xi):
# enlarge the support
cvn = mvnumerical.central_value[i]
cvg = mvgaussian.central_value[i]
supp = [s[i] for s in mvgaussian.support]
x_max = cvn + (x[-1] - cvn) + (supp[-1] - cvn) + np.mean(x) - cvg
x_min = cvn + (x[0] - cvn) + (supp[0] - cvn) + np.mean(x) - cvg
xi[i] = np.linspace(x_min, x_max, nsteps)
xi_grid = np.array(np.meshgrid(*xi, indexing='ij'))
# this will transpose from shape (0, 1, 2, ...) to (1, 2, ..., 0)
xi_grid = np.transpose(xi_grid, tuple(range(1, xi_grid.ndim)) + (0,))
y_num = np.exp(mvnumerical.logpdf(xi_grid))
# shift Gaussian to the mean of the support
xi_grid = xi_grid - np.array([np.mean(x) for x in xi]) + np.array(mvgaussian.central_value)
y_gauss = np.exp(mvgaussian.logpdf(xi_grid))
f = scipy.signal.fftconvolve(y_num, y_gauss, mode='same')
f[f < 0] = 0
f = f/f.sum()
if central_values == 'sum':
# shift back
xi = (xi.T + np.array(mvgaussian.central_value)).T
return MultivariateNumericalDistribution(xi, f)
def combine_distributions(probability_distributions):
"""Combine a set of probability distributions by multiplying the PDFs.
`probability_distributions` must be a list of instances of descendants of
`ProbabilityDistribution`.
"""
def dim(x):
# 1 for floats and length for arrays
try:
float(x)
except:
return len(x)
else:
return 1
dims = [dim(p.central_value) for p in probability_distributions]
assert all([d == dims[0] for d in dims]), "All distributions must have the same number of dimensions"
if dims[0] == 1:
return _combine_distributions_univariate(probability_distributions)
else:
return _combine_distributions_multivariate(probability_distributions)
def _combine_distributions_univariate(probability_distributions):
# if there's just one: return it immediately
if len(probability_distributions) == 1:
return probability_distributions[0]
# all delta dists
deltas = [p for p in probability_distributions if isinstance(
p, DeltaDistribution)]
if len(deltas) > 1:
# for multiple delta dists, check if central values are the same
cvs = set([p.central_value for p in deltas])
if len(cvs) > 1:
raise ValueError("Combining multiple delta distributions with different central values yields zero PDF")
else:
return deltas[0]
elif len(deltas) == 1:
# for single delta dist, nothing to combine: delta always wins!
return deltas[0]
# all normal dists
gaussians = [p for p in probability_distributions if isinstance(
p, NormalDistribution)]
# all other univariate dists
others = [p for p in probability_distributions
if not isinstance(p, NormalDistribution)
and not isinstance(p, DeltaDistribution)]
# let's combine the normal distributions into 1
if gaussians:
gaussian = _combine_gaussians(gaussians)
if gaussians and not others:
# if there are only the gaussians, we are done.
return gaussian
else:
# otherwise, we need to combine the (combined) gaussian with the others
if gaussians:
to_be_combined = others + [gaussian]
else:
to_be_combined = others
# turn all distributions into numerical distributions!
numerical = [NumericalDistribution.from_pd(p) for p in to_be_combined]
return _combine_numerical(numerical)
def weighted_average(central_values, standard_deviations):
"""Return the central value and standard deviation of the weighted average
if a set of normal distributions specified by a list of central values
and standard deviations"""
c = np.average(central_values, weights=1 / np.asarray(standard_deviations)**2)
u = np.sqrt(1 / np.sum(1 / np.asarray(standard_deviations)**2))
return c, u
def _combine_gaussians(probability_distributions):
# if there's just one: return it immediately
if len(probability_distributions) == 1:
return probability_distributions[0]
assert all(isinstance(p, NormalDistribution) for p in probability_distributions), \
"Distributions should all be instances of NormalDistribution"
central_values = [p.central_value for p in probability_distributions]
standard_deviations = [p.standard_deviation for p in probability_distributions]
c, u = weighted_average(central_values, standard_deviations)
return NormalDistribution(central_value=c, standard_deviation=u)
def _combine_numerical(probability_distributions, nsteps=1000):
if len(probability_distributions) == 1:
return probability_distributions[0]
assert all(isinstance(p, NumericalDistribution) for p in probability_distributions), \
"Distributions should all be instances of NumericalDistribution"
supports = np.array([p.support for p in probability_distributions])
support = (np.max(supports[:, 0]), np.min(supports[:, 1]))
if support [1] <= support[0]:
raise ValueError("Numerical distributions to not have overlapping support")
x = np.linspace(support[0], support[1], nsteps)
y = np.exp(np.sum([pd.logpdf(x) for pd in probability_distributions], axis=0))
return NumericalDistribution(x=x, y=y)
def _combine_distributions_multivariate(probability_distributions):
# if there's just one: return it immediately
if len(probability_distributions) == 1:
return probability_distributions[0]
# all normal dists
gaussians = [p for p in probability_distributions if isinstance(
p, MultivariateNormalDistribution)]
# all other univariate dists
others = [p for p in probability_distributions
if not isinstance(p, MultivariateNormalDistribution)]
# let's combine the normal distributions into 1
if gaussians:
gaussian = _combine_multivariate_gaussians(gaussians)
if gaussians and not others:
# if there are only the gaussians, we are done.
return gaussian
else:
# otherwise, we need to combine the (combined) gaussian with the others
if gaussians:
to_be_combined = others + [gaussian]
else:
to_be_combined = others
# turn all distributions into numerical distributions!
numerical = [MultivariateNumericalDistribution.from_pd(p) for p in to_be_combined]
return _combine_multivariate_numerical(numerical)
def _combine_multivariate_gaussians(probability_distributions):
assert all(isinstance(p, MultivariateNormalDistribution) for p in probability_distributions), \
"Distributions should all be instances of MultivariateNormalDistribution"
# covariances: [Sigma_1, Sigma_2, ...]
# means: [x_1, x_2, ...]
# weights_ [W_1, W_2, ...] where W_i = (Sigma_i)^(-1)
# weighted covariance is (W_1 + W_2 + ...)^(-1) = Sigma
# weigted mean is Sigma.(W_1.x_1 + W_2.x_2 + ...) = x
covariances = [d.covariance for d in probability_distributions]
means = [d.central_value for d in probability_distributions]
weights = [np.linalg.inv(c) for c in covariances]
weighted_covariance = np.linalg.inv(np.sum(weights, axis=0))
weighted_mean = np.dot(weighted_covariance, np.sum(
[np.dot(weights[i], means[i]) for i in range(len(means))],
axis=0))
return MultivariateNormalDistribution(weighted_mean,
covariance=weighted_covariance)
def _combine_multivariate_numerical(probability_distributions, nsteps=200):
assert all(isinstance(p, MultivariateNumericalDistribution) for p in probability_distributions), \
"Distributions should all be instances of MultivariateNumericalDistribution"
supports = np.array([d.support for d in probability_distributions])
xi_min = np.max(supports[:, 0], axis=0)
xi_max = np.min(supports[:, 1], axis=0)
assert np.all(xi_min < xi_max), \
"""Support of the multivariate distributions vanishes."""
ndim = len(probability_distributions[0].central_value)
_xi = np.array([np.linspace(xi_min[i], xi_max[i], nsteps)
for i in range(ndim)])
_xlist = np.array(np.meshgrid(*_xi, indexing='ij')).reshape(ndim, nsteps**ndim).T
from functools import reduce
import operator
_ylist = reduce(operator.mul, [np.exp(d.logpdf(_xlist)) for d in probability_distributions], 1)
_y = _ylist.reshape(tuple(nsteps for i in range(ndim)))
return MultivariateNumericalDistribution(_xi, _y)
def dict2dist(constraint_dict):
r"""Get a list of probability distributions from a list of dictionaries
(or a single dictionary) specifying the distributions.
Arguments:
- constraint_dict: dictionary or list of several dictionaries of the
form `{'distribution': 'distribution_name', 'arg1': val1, ...}`, where
'distribution_name' is a string name associated to each probability
distribution (see `class_from_string`)
and `'arg1'`, `val1` are argument/value pairs of the arguments of
the distribution class's constructor (e.g.`central_value`,
`standard_deviation` for a normal distribution).
"""
if isinstance(constraint_dict, dict):
dict_list = [constraint_dict]
else:
dict_list = constraint_dict
pds = []
def convertv(v):
# convert v to float if possible
try:
return float(v)
except:
return v
for d in dict_list:
dist = class_from_string[d['distribution']]
pds.append(dist(**{k: convertv(v) for k, v in d.items() if k!='distribution'}))
return pds
# this dictionary is used for parsing low-level distribution definitions
# in YAML files. A string name is associated to every (relevant) distribution.
class_from_string = { c.class_to_string(): c
for c in ProbabilityDistribution.get_subclasses() }
| mit | ab438957d979a76439404a7bb83d079e | 41.759636 | 131 | 0.615937 | 4.093215 | false | false | false | false |
flav-io/flavio | flavio/physics/kdecays/kpipi.py | 1 | 7624 | r"""Functions for observables in $K\to\pi\pi$ decays, in particular
$\epsilon'/\epsilon$."""
import flavio
from flavio.classes import Prediction, Observable
from flavio.config import config
from flavio.physics import ckm
from flavio.physics.kdecays.wilsoncoefficients import wilsoncoefficients_sm_fourquark
from math import sqrt
def Kpipi_matrixelements_SM(par, scale):
r"""Return the $K\to\pi\pi$ matrix elements of the SM operators in the
traditional 10-operator basis.
Returns a dictionary with keys 0 and 2 corresponding to the $\Delta I=1/2$
and 3/2 matrix elements, respectively, and values being again dictionaries
with the keys being the operator index and the value being the matrix
element in units of GeV³, using the same normalization as in
arXiv:1502.00263."""
M0 = {}
M2 = {}
for i in [3, 4, 5, 6, 7, 8, 9]:
M0[str(i)] = par['Kpipi M0 {}'.format(i)]
for i in [7, 8, 9]:
M2[str(i)] = par['Kpipi M2 {}'.format(i)]
# Exact relations
for i in [3, 4, 5, 6]:
M2[str(i)] = 0
M0['1'] = 1 / 3 * (M0['3'] + 2 * M0['9'])
M0['2'] = 1 / 3 * (-2 * M0['3'] + 3 * M0['4'] + 2 * M0['9'])
M0['10'] = -M0['3'] + M0['4'] + M0['9']
M2['1'] = 2 / 3 * M2['9']
M2['2'] = M2['1']
M2['10'] = M2['9']
return {0: M0, 2: M2}
def Kpipi_matrixelements_NP(par, scale):
r"""Return the $K\to\pi\pi$ matrix elements of all $s\to d$ operators in the
flavio basis.
Returns a dictionary with keys 0 and 2 corresponding to the $\Delta I=1/2$
and 3/2 matrix elements, respectively, and values being again dictionaries
with the keys being the operator name and the value being the matrix
element in units of GeV³, using the same normalization as in
arXiv:1502.00263."""
MSM = Kpipi_matrixelements_SM(par, scale)
M = {0: {}, 2: {}}
ms = flavio.physics.running.running.get_ms(par, scale, nf_out=3)
# follows appendix A4 of Aebischer/Buras/Gerard arXiv:1807.01709
for i in (0, 2):
M[i]['CVLL_sduu'] = MSM[i]['1'] / 4
M[i]['CVLLt_sduu'] = MSM[i]['2'] / 4
M[i]['CVLR_sduu'] = (MSM[i]['5'] / 3 + 2 * MSM[i]['7'] / 3) / 4
M[i]['CVLRt_sduu'] = (MSM[i]['6'] / 3 + 2 * MSM[i]['8'] / 3) / 4
M[i]['CVLL_sddd'] = (2 * MSM[i]['3'] / 3 - 2 * MSM[i]['9'] / 3) / 4
M[i]['CVLR_sddd'] = (2 * MSM[i]['5'] / 3 - 2 * MSM[i]['7'] / 3) / 4
M[i]['CSRL_sddd'] = (MSM[i]['6'] / 3 - MSM[i]['8'] / 3) / 4
M[i]['CSRR_sddd'] = par['Kpipi M{} SLL2_d'.format(i)]
M[i]['CTRR_sddd'] = -8 * par['Kpipi M{} SLL1_d'.format(i)] -4 * par['Kpipi M{} SLL2_d'.format(i)]
for i in (2, ):
# isospin relations valid for I=2 amplitude
M[i]['CSRL_sduu'] = -M[i]['CSRL_sddd']
M[i]['CSRR_sduu'] = -M[i]['CSRR_sddd']
M[i]['CTRR_sduu'] = -M[i]['CTRR_sddd']
M[i]['CSRLt_sduu'] = 1 / 2 * M[i]['CVLR_sddd'] # -1 from isospin, -1/2 from Fierz
M[i]['CSRRt_sduu'] = -(-1 / 2 * M[i]['CSRR_sddd'] - 1 / 8 * M[i]['CTRR_sddd'])
M[i]['CTRRt_sduu'] = -(-6 * M[i]['CSRR_sddd'] + 1 / 2 * M[i]['CTRR_sddd'])
M[i]['C8_sd'] = 0
for i in (0, ):
M[i]['CSRL_sduu'] = par['Kpipi M{} SLR2_u'.format(i)]
M[i]['CSRLt_sduu'] = par['Kpipi M{} SLR1_u'.format(i)]
M[i]['CSRR_sduu'] = par['Kpipi M{} SLL2_u'.format(i)]
M[i]['CSRRt_sduu'] = par['Kpipi M{} SLL1_u'.format(i)]
M[i]['CTRR_sduu'] = -par['Kpipi M{} SLL4_u'.format(i)]
M[i]['CTRRt_sduu'] = -par['Kpipi M{} SLL3_u'.format(i)]
M[i]['C8_sd'] = -ms * par['Kpipi M0 g-'] / 2
for i in (0, 2):
M[i]['CVRR_sduu'] = -M[i]['CVLL_sduu']
M[i]['CVRRt_sduu'] = -M[i]['CVLLt_sduu']
M[i]['CVRR_sddd'] = -M[i]['CVLL_sddd']
M[i]['CVRL_sduu'] = -M[i]['CVLR_sduu']
M[i]['CVRLt_sduu'] = -M[i]['CVLRt_sduu']
M[i]['CVRL_sddd'] = -M[i]['CVLR_sddd']
M[i]['CSLR_sduu'] = -M[i]['CSRL_sduu']
M[i]['CSLRt_sduu'] = -M[i]['CSRLt_sduu']
M[i]['CSLR_sddd'] = -M[i]['CSRL_sddd']
M[i]['CSLL_sduu'] = -M[i]['CSRR_sduu']
M[i]['CSLLt_sduu'] = -M[i]['CSRRt_sduu']
M[i]['CSLL_sddd'] = -M[i]['CSRR_sddd']
M[i]['CTLL_sduu'] = -M[i]['CTRR_sduu']
M[i]['CTLLt_sduu'] = -M[i]['CTRRt_sduu']
M[i]['CTLL_sddd'] = -M[i]['CTRR_sddd']
M[i]['C8p_sd'] = -M[i]['C8_sd']
return M
def Kpipi_amplitudes_SM(par,
include_VmA=True, include_VpA=True,
scale_ImA0EW=False):
r"""Compute the SM contribution to the two isospin amplitudes of
the $K\to\pi\pi$ transition."""
scale = config['renormalization scale']['kpipi']
pref = par['GF'] / sqrt(2) * ckm.xi('u', 'ds')(par) # GF/sqrt(2) Vus* Vud
me = Kpipi_matrixelements_SM(par, scale)
# Wilson coefficients
wc = wilsoncoefficients_sm_fourquark(par, scale)
tau = -ckm.xi('t', 'ds')(par) / ckm.xi('u', 'ds')(par)
k = [1, 2]
if include_VmA:
k = k + [3, 4, 9, 10]
if include_VpA:
k = k + [5, 6, 7, 8]
A = {0: 0, 2: 0}
for i in [0, 2]:
for j in k:
m = me[i][str(j)]
yj = wc.get('y{}'.format(j), 0)
zj = wc.get('z{}'.format(j), 0)
dA = pref * m * (zj + tau * yj)
if scale_ImA0EW and i == 0 and j in [7, 8, 9, 10]:
b = 1 / par['epsp a'] / (1 - par['Omegahat_eff'])
dA = dA.real + 1j * b * dA.imag
A[i] += dA
return A
def Kpipi_amplitudes_NP(wc_obj, par):
r"""Compute the new physics contribution to the two isospin amplitudes
of the $K\to\pi\pi$ transition."""
scale = config['renormalization scale']['kpipi']
pref = 4 * par['GF'] / sqrt(2) * ckm.xi('t', 'ds')(par) # 4GF/sqrt(2) Vts* Vtd
me = Kpipi_matrixelements_NP(par, scale)
wc = wc_obj.get_wc(sector='sd', scale=scale, par=par, eft='WET-3')
A = {0: 0, 2: 0}
for i in [0, 2]:
for j, m in me[i].items():
A[i] += -pref * m * complex(wc[j]).conjugate() # conjugate!
return A
def epsprime_SM(par):
r"""Compute the SM contribution to $\epsilon'/\epsilon$, including
isospin breaking corrections."""
a = par['epsp a']
A = Kpipi_amplitudes_SM(par)
ImA0 = A[0].imag
ImA2 = A[2].imag
ReA0 = par['ReA0(K->pipi)']
ReA2 = par['ReA2(K->pipi)']
# eq. (19) of arXiv:1507.06345
flavio.citations.register("Buras:2015yba")
return (-par['omega+'] / (sqrt(2) * par['eps_K'])
* (ImA0 / ReA0 * (1 - par['Omegahat_eff'])
- 1 / a * ImA2 / ReA2).real)
def epsprime_NP(wc_obj, par):
r"""Compute the NP contribution to $\epsilon'/\epsilon$."""
# Neglecting isospin breaking corrections!
A = Kpipi_amplitudes_NP(wc_obj, par)
ImA0 = A[0].imag
ImA2 = A[2].imag
ReA0 = par['ReA0(K->pipi)']
ReA2 = par['ReA2(K->pipi)']
a = par['epsp a'] # eq. (16)
# dividing by a to remove the isospin brk corr in omega+, cf. (16) in 1507.06345
flavio.citations.register("Buras:2015yba")
return (-par['omega+'] / a / (sqrt(2) * par['eps_K'])
* (ImA0 / ReA0 - ImA2 / ReA2).real)
def epsprime(wc_obj, par):
r"""Compute $\epsilon'/\epsilon$, parametrizing direct CPV in
$K\to\pi\pi$."""
return epsprime_SM(par) + epsprime_NP(wc_obj, par)
# Observable and Prediction instances
o = Observable('epsp/eps')
o.tex = r"$\epsilon^\prime/\epsilon$"
Prediction('epsp/eps', epsprime)
o.set_description(r"Direct CP violation parameter")
o.add_taxonomy(r'Process :: $s$ hadron decays :: Non-leptonic decays :: $K\to \pi\pi$')
| mit | 90ef45424b0bfd7cbb203a09099a973f | 40.423913 | 106 | 0.534112 | 2.44687 | false | false | false | false |
flav-io/flavio | flavio/physics/zdecays/gammaz.py | 1 | 7028 | r"""$Z$ pole observables beyond the SM."""
import flavio
from flavio.physics.zdecays import gammazsm, smeftew
from math import sqrt, pi
def GammaZ_NP(par, Nc, gV_SM, d_gV, gA_SM, d_gA):
GF, mZ = par['GF'], par['m_Z']
return (
sqrt(2) * GF * mZ**3 / (3 * pi) * Nc * (
2*(gV_SM*d_gV).real + 2*(gA_SM*d_gA).real
+ abs(d_gV)**2 + abs(d_gA)**2
)
)
def GammaZ(wc_obj, par, f1, f2):
scale = flavio.config['renormalization scale']['zdecays']
wc_dict = wc_obj.get_wcxf(sector='all', scale=scale, par=par,
eft='SMEFT', basis='Warsaw')
Nc = smeftew._QN[f1]['Nc']
if f1 == f2:
gV_SM = smeftew.gV_SM(f1, par)
gA_SM = smeftew.gA_SM(f1, par)
GSM = gammazsm.GammaZ_SM(par, f1)
else:
gV_SM = 0
gA_SM = 0
GSM = 0
d_gV = smeftew.d_gV(f1, f2, par, wc_dict)
d_gA = smeftew.d_gA(f1, f2, par, wc_dict)
GNP = GammaZ_NP(par, Nc, gV_SM, d_gV, gA_SM, d_gA)
return GSM + GNP
def GammaZ_fct(f1, f2):
def fu(wc_obj, par):
return GammaZ(wc_obj, par, f1, f2)
return fu
def BRZ_fct(f1, f2):
def fu(wc_obj, par):
return par['tau_Z'] * GammaZ(wc_obj, par, f1, f2)
return fu
def BRZ_fct_av(f1, f2):
def fu(wc_obj, par):
return par['tau_Z'] * (GammaZ(wc_obj, par, f1, f2) + GammaZ(wc_obj, par, f2, f1))
return fu
def GammaZnu(wc_obj, par):
# sum over all 9 possibilities (they are exp. indistinguishable)
lep = ['e', 'mu', 'tau']
return sum([GammaZ(wc_obj, par, 'nu' + l1, 'nu' + l2) for l1 in lep for l2 in lep]) / 3.
def Gammal(wc_obj, par):
# sum only over the three possibilities with equal leptons (they are exp. distinguished)
return sum([GammaZ(wc_obj, par, l, l) for l in ['e', 'mu', 'tau']]) / 3.
def Gammahad(wc_obj, par):
# sum over all 9 possibilities (they are exp. not distinguished)
Gu = sum([GammaZ(wc_obj, par, q1, q2) for q1 in 'uc' for q2 in 'uc'])
Gd = sum([GammaZ(wc_obj, par, q1, q2) for q1 in 'dsb' for q2 in 'dsb'])
return Gu + Gd
def sigmahad(wc_obj, par):
# e+e- hadronic Z production xsec
return 12 * pi / par['m_Z']**2 / Gammatot(wc_obj, par)**2 * GammaZ(wc_obj, par, 'e', 'e') * Gammahad(wc_obj, par)
def Gammatot(wc_obj, par):
# total Z width
return Gammahad(wc_obj, par) + Gammal(wc_obj, par) * 3 + GammaZnu(wc_obj, par) * 3
def Rl(wc_obj, par):
return Gammahad(wc_obj, par) / Gammal(wc_obj, par)
def Rq(f):
def _Rq(wc_obj, par):
return GammaZ(wc_obj, par, f, f) / Gammahad(wc_obj, par)
return _Rq
def Rq1q2(f1, f2):
def _Rq1q2(wc_obj, par):
return (GammaZ(wc_obj, par, f1, f1) + GammaZ(wc_obj, par, f2, f2)) / (2 * Gammahad(wc_obj, par))
return _Rq1q2
def Remutau(f):
def _Remutau(wc_obj, par):
return Gammahad(wc_obj, par) / GammaZ(wc_obj, par, f, f)
return _Remutau
_leptons = {'e': ' e', 'mu': r'\mu', 'tau': r'\tau'}
_uquarks = {'u': ' u', 'c': ' c', 'c': ' c'}
_dquarks = {'d': ' d', 's': ' s', 'b': ' b'}
for _f in (_leptons, _uquarks, _dquarks):
for f, tex in _f.items():
if f in _leptons:
_process_tex = r"Z^0\to {}^+{}^-".format(tex, tex)
else:
_process_tex = r"Z^0\to{}\bar{}".format(tex, tex)
_obs_name = "Gamma(Z->{})".format(2 * f)
_obs = flavio.classes.Observable(_obs_name)
_obs.tex = r"$\Gamma(" + _process_tex + r")$"
_obs.set_description(r"Partial width of $" + _process_tex + r"$")
_obs.add_taxonomy(r'Process :: $Z^0$ decays :: Flavour conserving decays :: $' + _process_tex + r"$")
flavio.classes.Prediction(_obs_name, GammaZ_fct(f, f))
_obs_name = "R_{}".format(f)
_obs = flavio.classes.Observable(_obs_name)
_obs.tex = r"$R_{}^0$".format(tex)
_obs.add_taxonomy(r'Process :: $Z^0$ decays :: Flavour conserving decays :: $' + _process_tex + r"$")
if f in _leptons:
_obs.set_description(r"Ratio of $Z^0$ partial widths to hadrons vs. ${}$ pairs".format(tex))
flavio.classes.Prediction(_obs_name, Remutau(f))
else:
_obs.set_description(r"Ratio of $Z^0$ partial widths to ${}$ pairs vs. all hadrons".format(tex))
flavio.classes.Prediction(_obs_name, Rq(f))
_obs_name = "R_uc"
_obs = flavio.classes.Observable(_obs_name)
_obs.tex = r"$R_{uc}^0$"
for q in ('u', 'c'):
_obs.add_taxonomy(r"Process :: $Z^0$ decays :: Flavour conserving decays :: $Z^0\to {}\bar {}$".format(q, q))
_obs.set_description(r"Average ratio of $Z^0$ partial widths to $u$ or $c$ pairs vs. all hadrons")
flavio.classes.Prediction(_obs_name, Rq1q2('u', 'c'))
# LFV Z decays
for (f1, f2) in [('e', 'mu'), ('e', 'tau'), ('mu', 'tau'), ]:
tex1 = _leptons[f1]
tex2 = _leptons[f2]
_obs_name = "BR(Z->{}{})".format(f1, f2)
_obs = flavio.classes.Observable(_obs_name)
_process_tex = r"Z^0\to {}^\pm{}^\mp".format(tex1, tex2)
_obs.tex = r"$\text{BR}(" + _process_tex + r")$"
_obs.set_description(r"Branching ratio of $" + _process_tex + r"$")
_obs.add_taxonomy(r'Process :: $Z^0$ decays :: FCNC decays :: $' + _process_tex + r"$")
flavio.classes.Prediction(_obs_name, BRZ_fct_av(f1, f2))
_obs_name = "GammaZ"
_obs = flavio.classes.Observable(_obs_name)
_obs.tex = r"$\Gamma_Z$"
_obs.set_description(r"Total width of the $Z^0$ boson")
for _f in (_leptons, _uquarks, _dquarks):
for f, tex in _f.items():
if f in _leptons:
_process_tex = r"Z^0\to {}^+{}^-".format(tex, tex)
else:
_process_tex = r"Z^0\to{}\bar{}".format(tex, tex)
_obs.add_taxonomy(r'Process :: $Z^0$ decays :: Flavour conserving decays :: $' + _process_tex + r"$")
flavio.classes.Prediction(_obs_name, Gammatot)
_obs_name = "sigma_had"
_obs = flavio.classes.Observable(_obs_name)
_obs.tex = r"$\sigma_\text{had}^0$"
_obs.set_description(r"$e^+e^-\to Z^0$ hadronic pole cross-section")
for _f in (_uquarks, _dquarks):
for f, tex in _f.items():
_process_tex = r"Z^0\to{}\bar{}".format(tex, tex)
_obs.add_taxonomy(r'Process :: $Z^0$ decays :: Flavour conserving decays :: $' + _process_tex + r"$")
flavio.classes.Prediction(_obs_name, sigmahad)
_obs_name = "Gamma(Z->nunu)"
_obs = flavio.classes.Observable(_obs_name)
_process_tex = r"Z^0\to\nu\bar\nu"
_obs.tex = r"$\Gamma(" + _process_tex + r")$"
_obs.set_description(r"Partial width of $" + _process_tex + r"$, averaged over neutrino flavours")
_obs.add_taxonomy(r'Process :: $Z^0$ decays :: Flavour conserving decays :: $' + _process_tex + r"$")
flavio.classes.Prediction(_obs_name, GammaZnu)
_obs_name = "R_l"
_obs = flavio.classes.Observable(_obs_name)
_obs.tex = r"$R_l^0$"
_obs.set_description(r"Ratio of $Z^0$ partial widths to hadrons vs. leptons, averaged over lepton flavours")
for l in [' e', r'\mu', r'\tau']:
_obs.add_taxonomy(r"Process :: $Z^0$ decays :: Flavour conserving decays :: $Z^0\to {}^+{}^-$".format(l, l))
flavio.classes.Prediction(_obs_name, Rl)
| mit | dde0f76c1e91f5876fb37c63e566e3b6 | 35.041026 | 117 | 0.574986 | 2.412633 | false | false | false | false |
bugsnag/bugsnag-python | bugsnag/utils.py | 1 | 14034 | from functools import wraps, partial
import inspect
from json import JSONEncoder
from threading import local as threadlocal
from typing import AnyStr, Tuple, Optional
import warnings
import copy
import logging
from datetime import datetime, timedelta
from urllib.parse import urlparse, urlunsplit, parse_qs
MAX_PAYLOAD_LENGTH = 128 * 1024
MAX_STRING_LENGTH = 1024
__all__ = [] # type: ignore
class SanitizingJSONEncoder(JSONEncoder):
"""
A JSON encoder which handles filtering and conversion from JSON-
incompatible types to strings.
>>> import logging
>>> from json import loads
>>> logger = logging.getLogger(__name__)
>>> encoder = SanitizingJSONEncoder(logger, keyword_filters=['bananas'])
>>> items = loads(encoder.encode(FilterDict({'carrots': 4, 'bananas': 5})))
>>> items['bananas']
'[FILTERED]'
>>> items['carrots']
4
"""
filtered_value = '[FILTERED]'
recursive_value = '[RECURSIVE]'
unencodeable_value = '[BADENCODING]'
def __init__(self, logger: logging.Logger, keyword_filters=None, **kwargs):
self.logger = logger
self.filters = list(map(str.lower, keyword_filters or []))
self.bytes_filters = [x.encode('utf-8') for x in self.filters]
super(SanitizingJSONEncoder, self).__init__(**kwargs)
def encode(self, obj):
safe_obj = self._sanitize(obj, False)
payload = super(SanitizingJSONEncoder, self).encode(safe_obj)
if len(payload) > MAX_PAYLOAD_LENGTH:
safe_obj = self._sanitize(safe_obj, True)
return super(SanitizingJSONEncoder, self).encode(safe_obj)
else:
return payload
def filter_string_values(self, obj, ignored=None, seen=None):
"""
Remove any value from the dictionary which match the key filters
"""
if not ignored:
ignored = set()
# Keep track of nested objects to avoid having references garbage
# collected (which would cause id reuse and false positive recursion
if seen is None:
seen = []
if type(ignored) is list:
ignored = set(ignored)
if id(obj) in ignored:
return self.recursive_value
if isinstance(obj, dict):
ignored.add(id(obj))
seen.append(obj)
clean_dict = {}
for key, value in obj.items():
if self._should_filter(key):
clean_dict[key] = self.filtered_value
else:
clean_dict[key] = self.filter_string_values(
value, ignored, seen)
return clean_dict
return obj
def default(self, obj):
"""
Coerce values to strings if possible, otherwise replace with
'[BADENCODING]'
"""
try:
if isinstance(obj, bytes):
return str(obj, encoding='utf-8', errors='replace')
else:
return str(obj)
except Exception:
self.logger.exception('Could not add object to payload')
return self.unencodeable_value
def _sanitize(self, obj, trim_strings, ignored=None, seen=None):
"""
Replace recursive values and trim strings longer than
MAX_STRING_LENGTH
"""
if not ignored:
ignored = set()
# Keep track of nested objects to avoid having references garbage
# collected (which would cause id reuse and false positive recursion)
if seen is None:
seen = []
if type(ignored) is list:
ignored = set(ignored)
if id(obj) in ignored:
return self.recursive_value
elif isinstance(obj, dict):
ignored.add(id(obj))
seen.append(obj)
return self._sanitize_dict(obj, trim_strings, ignored, seen)
elif isinstance(obj, (set, tuple, list)):
ignored.add(id(obj))
seen.append(obj)
items = []
for value in obj:
items.append(
self._sanitize(value, trim_strings, ignored, seen))
return items
elif trim_strings and isinstance(obj, str):
return obj[:MAX_STRING_LENGTH]
else:
return obj
def _sanitize_dict_key_value(self, clean_dict, key, clean_value):
"""
Safely sets the provided key on the dictionary by coercing the key
to a string
"""
if isinstance(key, bytes):
try:
key = str(key, encoding='utf-8', errors='replace')
clean_dict[key] = clean_value
except Exception:
self.logger.exception(
'Could not add sanitize key for dictionary, '
'dropping value.')
if isinstance(key, str):
clean_dict[key] = clean_value
else:
try:
clean_dict[str(key)] = clean_value
except Exception:
self.logger.exception(
'Could not add sanitize key for dictionary, '
'dropping value.')
def _sanitize_dict(self, obj, trim_strings, ignored, seen):
"""
Trim individual values in an object, applying filtering if the object
is a FilterDict
"""
if isinstance(obj, FilterDict):
obj = self.filter_string_values(obj)
clean_dict = {}
for key, value in obj.items():
clean_value = self._sanitize(value, trim_strings, ignored, seen)
self._sanitize_dict_key_value(clean_dict, key, clean_value)
return clean_dict
def _should_filter(self, key):
if isinstance(key, str):
key_lower = key.lower()
return any(f in key_lower for f in self.filters)
if isinstance(key, bytes):
key_lower = key.lower()
return any(f in key_lower for f in self.bytes_filters)
return False
class FilterDict(dict):
"""
Object which will be filtered when encoded
"""
pass
ContentType = Tuple[str, Optional[str], Optional[str], Optional[str]]
def parse_content_type(value: str) -> ContentType:
"""
Generate a tuple of (type, subtype, suffix, parameters) from a type based
on RFC 6838
>>> parse_content_type("text/plain")
('text', 'plain', None, None)
>>> parse_content_type("application/hal+json")
('application', 'hal', 'json', None)
>>> parse_content_type("application/json;schema=\\"ftp://example.com/a\\"")
('application', 'json', None, 'schema="ftp://example.com/a"')
"""
parameters = None # type: Optional[str]
if ';' in value:
types, parameters = value.split(';', 1)
else:
types = value
if '/' in types:
maintype, subtype = types.split('/', 1)
if '+' in subtype:
subtype, suffix = subtype.split('+', 1)
return (maintype, subtype, suffix, parameters)
else:
return (maintype, subtype, None, parameters)
else:
return (types, None, None, parameters)
def is_json_content_type(value: str) -> bool:
"""
Check if a content type is JSON-parseable
>>> is_json_content_type('text/plain')
False
>>> is_json_content_type('application/schema+json')
True
>>> is_json_content_type('application/json')
True
"""
type, subtype, suffix, _ = parse_content_type(value.lower())
return type == 'application' and (subtype == 'json' or suffix == 'json')
def fully_qualified_class_name(obj):
module = inspect.getmodule(obj)
if module is not None and module.__name__ != "__main__":
return module.__name__ + "." + obj.__class__.__name__
else:
return obj.__class__.__name__
def package_version(package_name):
try:
import pkg_resources
except ImportError:
return None
else:
try:
return pkg_resources.get_distribution(package_name).version
except pkg_resources.DistributionNotFound:
return None
def _validate_setter(types, func, should_error=False):
"""
Check that the first argument of a function is of a provided set of types
before calling the body of the wrapped function, printing a runtime warning
(or raising a TypeError) if the validation fails.
"""
@wraps(func)
def wrapper(obj, value):
option_name = func.__name__
if value is None or isinstance(value, types):
func(obj, value)
else:
error_format = '{0} should be {1}, got {2}'
actual = type(value).__name__
requirement = ' or '.join([t.__name__ for t in types])
message = error_format.format(option_name, requirement, actual)
if should_error:
raise TypeError(message)
else:
warnings.warn(message, RuntimeWarning)
return wrapper
validate_str_setter = partial(_validate_setter, (str,))
validate_required_str_setter = partial(_validate_setter, (str,),
should_error=True)
validate_bool_setter = partial(_validate_setter, (bool,))
validate_iterable_setter = partial(_validate_setter, (list, tuple))
validate_int_setter = partial(_validate_setter, (int,))
class ThreadContextVar:
"""
A wrapper around thread-local variables to mimic the API of contextvars
"""
LOCALS = None
@classmethod
def local_context(cls):
if not ThreadContextVar.LOCALS:
ThreadContextVar.LOCALS = threadlocal()
return ThreadContextVar.LOCALS
def __init__(self, name, **kwargs):
self.name = name
# Mimic the behaviour of ContextVar - if a default has been explicitly
# passed then we will use it, otherwise don't set an initial value
# This allows 'get' to know when to raise a LookupError
if 'default' in kwargs:
self.default = kwargs['default']
# Make a deep copy so this thread starts with a fresh default
self.set(copy.deepcopy(self.default))
def get(self):
local = ThreadContextVar.local_context()
if hasattr(local, self.name):
return getattr(local, self.name)
if hasattr(self, 'default'):
# Make a deep copy so that each thread starts with a fresh default
result = copy.deepcopy(self.default)
self.set(result)
return result
raise LookupError("No value for '{}'".format(self.name))
def set(self, new_value):
setattr(ThreadContextVar.local_context(), self.name, new_value)
def sanitize_url(url: AnyStr, config) -> Optional[str]:
try:
if isinstance(url, str):
url_str = url
else:
url_str = str(url, encoding='utf-8', errors='replace')
parsed = urlparse(url_str)
# if there's no query string there's nothing to redact
if not parsed.query:
return url_str
url_without_query = urlunsplit(
# urlunsplit always requires 5 elements in this tuple
(parsed.scheme, parsed.netloc, parsed.path, None, None)
).strip()
query_parameters = parse_qs(parsed.query)
except Exception:
# if we can't parse the url or query string then we can't know if
# there's anything to redact, so have to omit the URL entirely
return None
encoder = SanitizingJSONEncoder(config.logger, config.params_filters)
redacted_parameter_dict = encoder.filter_string_values(query_parameters)
filtered_value = SanitizingJSONEncoder.filtered_value
redacted_parameters = []
for key, values in redacted_parameter_dict.items():
# if "values" has been redacted it's a string, otherwise it's a list
if values == filtered_value:
redacted_parameters.append(key + "=" + values)
else:
for value in values:
redacted_parameters.append(key + "=" + value)
return url_without_query + "?" + "&".join(redacted_parameters)
def remove_query_from_url(url: AnyStr) -> Optional[AnyStr]:
try:
parsed = urlparse(url)
url_without_query = urlunsplit(
# urlunsplit always requires 5 elements in this tuple
(parsed.scheme, parsed.netloc, parsed.path, None, None)
).strip()
except Exception:
return None
# If the returned url is empty then it did not have any of the components
# we are interested in, so return None to indicate failure
if not url_without_query:
return None
return url_without_query
# to_rfc3339: format a datetime instance to match to_rfc3339/iso8601 with
# milliseconds precision
# Python can do this natively from version 3.6, but we need to include a
# fallback implementation for Python 3.5
try:
# this will raise if 'timespec' isn't supported
datetime.utcnow().isoformat(timespec='milliseconds') # type: ignore
def to_rfc3339(dt: datetime) -> str:
return dt.isoformat(timespec='milliseconds') # type: ignore
except Exception:
def _get_timezone_offset(dt: datetime) -> str:
if dt.tzinfo is None:
return ''
utc_offset = dt.tzinfo.utcoffset(dt)
if utc_offset is None:
return ''
sign = '+'
if utc_offset.days < 0:
sign = '-'
utc_offset = -utc_offset
hours_offset, minutes = divmod(utc_offset, timedelta(hours=1))
minutes_offset, seconds = divmod(minutes, timedelta(minutes=1))
return '{:s}{:02d}:{:02d}'.format(sign, hours_offset, minutes_offset)
def to_rfc3339(dt: datetime) -> str:
return '{:04d}-{:02d}-{:02d}T{:02d}:{:02d}:{:02d}.{:03d}{:s}'.format(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
int(dt.microsecond / 1000),
_get_timezone_offset(dt)
)
| mit | 926b75c9897f81e0be8413b3a78cc452 | 31.188073 | 79 | 0.591706 | 4.199282 | false | false | false | false |
flav-io/flavio | flavio/physics/kdecays/wilsoncoefficients.py | 1 | 2016 | r"""Functions for SM Wilson coefficients in kaon decays."""
import scipy.interpolate
import flavio
from flavio.physics import ckm
# Table 1 of 1507.06345: Wilson coefficients at 1.3 GeV
# for alpha_s = [0.1179, 0.1185, 0.1191]
_yz = [[-0.4036, -0.4092, -0.4150],
[1.2084, 1.2120, 1.2157],
[0.0275, 0.0280, 0.0285],
[-0.0555, -0.0563, -0.0571],
[0.0054, 0.0052, 0.0050],
[-0.0849, -0.0867, -0.0887],
[-0.0404, -0.0403, -0.0402],
[0.1207, 0.1234, 0.1261],
[-1.3936, -1.3981, -1.4027],
[0.4997, 0.5071, 0.5146]]
_yz_rows = ["z1", "z2", "y3", "y4", "y5", "y6", "y7/al", "y8/al", "y9/al", "y10/al",]
# inter- & extrapolating alpha_s dependence
wcsm = scipy.interpolate.interp1d([0.1179, 0.1185, 0.1191], _yz, fill_value="extrapolate")
def wilsoncoefficients_sm_fourquark(par, scale):
r"""Return the $\Delta S=1$ Wilson coefficients of four-quark operators
in the SM at the scale `scale`.
Currently only implemented for `scale=1.3`."""
if scale != 1.3:
raise ValueError("Wilson coefficients only implemented for scale=1.3")
flavio.citations.register("Buras:2015yba")
wcarr = wcsm(par['alpha_s'])
wc_dict = dict(zip(["z1", "z2", "y3", "y4", "y5", "y6",
"y7/al", "y8/al", "y9/al", "y10/al",], wcarr))
for k in ['y7', 'y8', 'y9', 'y10']:
wc_dict[k] = wc_dict.pop('{}/al'.format(k)) / 128
return wc_dict
def wilsoncoefficients_sm_sl(par, scale):
r"""Return the $\Delta S=1$ Wilson coefficients of semi-leptonic operators
in the SM at the scale `scale`.
Currently only $C_{10}$ (top and charm contributions) is implemented."""
wc_dict = {}
# fold in approximate m_t-dependence of C_10 (see eq. 4 of arXiv:1311.0903)
flavio.citations.register("Bobeth:2013uxa")
wc_dict['C10_t'] = -4.10 * (par['m_t']/173.1)**1.53
Vus = abs(ckm.get_ckm(par)[0, 1])
Pc = 0.115 # +-0.011, arXiv:hep-ph/0605203
flavio.citations.register("Gorbahn:2006bm")
wc_dict['C10_c'] = -Pc / par['s2w'] * Vus**4
return wc_dict
| mit | f774029d5596dd1308f6bef2ebcb4873 | 36.333333 | 90 | 0.612599 | 2.391459 | false | false | false | false |
catalyst-cooperative/pudl | src/pudl/output/epacems.py | 1 | 6538 | """Routines that provide user-friendly access to the partitioned EPA CEMS dataset."""
from itertools import product
from pathlib import Path
from typing import Iterable, List, Optional, Sequence, Tuple, Union
import dask.dataframe as dd
import pandas as pd
import pudl
from pudl.settings import EpaCemsSettings
# TODO: hardcoded data version doesn't belong here, but will defer fixing it until
# the crosswalk is formally integrated into PUDL. See Issue #1123
EPA_CROSSWALK_RELEASE = (
"https://github.com/USEPA/camd-eia-crosswalk/releases/download/v0.2.1/"
)
def epa_crosswalk() -> pd.DataFrame:
# TODO: formally integrate this into PUDL. See Issue #1123
"""Read EPA/EIA crosswalk from EPA github repo.
See https://github.com/USEPA/camd-eia-crosswalk for details and data dictionary
Returns:
pd.Dataframe: EPA/EIA crosswalk
"""
return pd.read_csv(EPA_CROSSWALK_RELEASE + "epa_eia_crosswalk.csv")
def year_state_filter(
years: Iterable[int] = None, states: Iterable[str] = None
) -> List[List[Tuple[Union[str, int]]]]:
"""Create filters to read given years and states from partitioned parquet dataset.
A subset of an Apache Parquet dataset can be read in more efficiently if files which
don't need to be queried are avoideed. Some datasets are partitioned based on the
values of columns to make this easier. The EPA CEMS dataset which we publish is
partitioned by state and report year.
However, the way the filters are specified can be unintuitive. They use DNF
(disjunctive normal form) See this blog post for more details:
https://blog.datasyndrome.com/python-and-parquet-performance-e71da65269ce
This function takes a set of years, and a set of states, and returns a list of lists
of tuples, appropriate for use with the read_parquet() methods of pandas and dask
dataframes. The filter will include all combinations of the specified years and
states. E.g. if years=(2018, 2019) and states=("CA", "CO") then the filter would
result in getting 2018 and 2019 data for CO, as well as 2018 and 2019 data for CA.
Args:
years: 4-digit integers indicating the years of data you would like
to read. By default it includes all available years.
states: 2-letter state abbreviations indicating what states you would
like to include. By default it includes all available states.
Returns:
A list of lists of tuples, suitable for use as a filter in the
read_parquet() method of pandas and dask dataframes.
"""
if years is not None:
year_filters = [("year", "=", year) for year in years]
if states is not None:
state_filters = [("state", "=", state.upper()) for state in states]
if states and not years:
filters = [[tuple(x)] for x in state_filters]
elif years and not states:
filters = [[tuple(x)] for x in year_filters]
elif years and states:
filters = [list(x) for x in product(year_filters, state_filters)]
else:
filters = None
return filters
def get_plant_states(plant_ids, pudl_out):
"""Determine what set of states a given set of EIA plant IDs are within.
If you only want to select data about a particular set of power plants from the EPA
CEMS data, this is useful for identifying which patitions of the Parquet dataset
you will need to search.
Args:
plant_ids (iterable): A collection of integers representing valid plant_id_eia
values within the PUDL DB.
pudl_out (pudl.output.pudltabl.PudlTabl): A PudlTabl output object to use to
access the PUDL DB.
Returns:
list: A list containing the 2-letter state abbreviations for any state that was
found in association with one or more of the plant_ids.
"""
return list(
pudl_out.plants_eia860().query("plant_id_eia in @plant_ids").state.unique()
)
def get_plant_years(plant_ids, pudl_out):
"""Determine which years a given set of EIA plant IDs appear in.
If you only want to select data about a particular set of power plants from the EPA
CEMS data, this is useful for identifying which patitions of the Parquet dataset
you will need to search.
NOTE: the EIA-860 and EIA-923 data which are used here don't cover as many years as
the EPA CEMS, so this is probably of limited utility -- you may want to simply
include all years, or manually specify the years of interest instead.
Args:
plant_ids (iterable): A collection of integers representing valid plant_id_eia
values within the PUDL DB.
pudl_out (pudl.output.pudltabl.PudlTabl): A PudlTabl output object to use to
access the PUDL DB.
Returns:
list: A list containing the 4-digit integer years found in association with one
or more of the plant_ids.
"""
return list(
pudl_out.plants_eia860()
.query("plant_id_eia in @plant_ids")
.report_date.dt.year.unique()
)
def epacems(
states: Optional[Sequence[str]] = None,
years: Optional[Sequence[int]] = None,
columns: Optional[Sequence[str]] = None,
epacems_path: Optional[Path] = None,
) -> dd.DataFrame:
"""Load EPA CEMS data from PUDL with optional subsetting.
Args:
states: subset by state abbreviation. Defaults to None (which gets all states).
years: subset by year. Defaults to None (which gets all years).
columns: subset by column. Defaults to None (which gets all columns).
epacems_path: path to parquet dir. By default it automatically loads the path
from :mod:`pudl.workspace`
Returns:
The requested epacems data
"""
epacems_settings = EpaCemsSettings(states=states, years=years)
# columns=None is handled by dd.read_parquet; gives all columns
if columns is not None:
# nonexistent columns are handled by dd.read_parquet; raises ValueError
columns = list(columns)
if epacems_path is None:
pudl_settings = pudl.workspace.setup.get_defaults()
epacems_path = Path(pudl_settings["parquet_dir"]) / "epacems"
epacems = dd.read_parquet(
epacems_path,
use_nullable_dtypes=True,
columns=columns,
engine="pyarrow",
index=False,
split_row_groups=True,
filters=year_state_filter(
states=epacems_settings.states,
years=epacems_settings.years,
),
)
return epacems
| mit | 8fcdd3df6bd610ac7d391b1606d8179a | 36.147727 | 88 | 0.683695 | 3.763961 | false | false | false | false |
catalyst-cooperative/pudl | src/pudl/metadata/enums.py | 1 | 5641 | """Enumerations of valid field values."""
from typing import Dict, List
US_STATES: Dict[str, str] = {
"AK": "Alaska",
"AL": "Alabama",
"AR": "Arkansas",
"AZ": "Arizona",
"CA": "California",
"CO": "Colorado",
"CT": "Connecticut",
"DE": "Delaware",
"FL": "Florida",
"GA": "Georgia",
"HI": "Hawaii",
"IA": "Iowa",
"ID": "Idaho",
"IL": "Illinois",
"IN": "Indiana",
"KS": "Kansas",
"KY": "Kentucky",
"LA": "Louisiana",
"MA": "Massachusetts",
"MD": "Maryland",
"ME": "Maine",
"MI": "Michigan",
"MN": "Minnesota",
"MO": "Missouri",
"MS": "Mississippi",
"MT": "Montana",
"NC": "North Carolina",
"ND": "North Dakota",
"NE": "Nebraska",
"NH": "New Hampshire",
"NJ": "New Jersey",
"NM": "New Mexico",
"NV": "Nevada",
"NY": "New York",
"OH": "Ohio",
"OK": "Oklahoma",
"OR": "Oregon",
"PA": "Pennsylvania",
"RI": "Rhode Island",
"SC": "South Carolina",
"SD": "South Dakota",
"TN": "Tennessee",
"TX": "Texas",
"UT": "Utah",
"VA": "Virginia",
"VT": "Vermont",
"WA": "Washington",
"WI": "Wisconsin",
"WV": "West Virginia",
"WY": "Wyoming",
}
"""Mapping of US state abbreviations to their full names."""
US_TERRITORIES: Dict[str, str] = {
"AS": "American Samoa",
"DC": "District of Columbia",
"GU": "Guam",
"MP": "Northern Mariana Islands",
"PR": "Puerto Rico",
"VI": "Virgin Islands",
}
"""Mapping of US territory abbreviations to their full names."""
US_STATES_TERRITORIES: Dict[str, str] = {**US_STATES, **US_TERRITORIES}
EPACEMS_STATES: List[str] = [
state
for state in US_STATES_TERRITORIES
# AK and PR have data but only a few years, and that breaks the Datastore.
# See https://github.com/catalyst-cooperative/pudl/issues/1264
if state not in {"AK", "AS", "GU", "HI", "MP", "PR", "VI"}
]
"""The US states and territories that are present in the EPA CEMS dataset."""
CANADA_PROVINCES_TERRITORIES: Dict[str, str] = {
"AB": "Alberta",
"BC": "British Columbia",
"CN": "Canada",
"MB": "Manitoba",
"NB": "New Brunswick",
"NS": "Nova Scotia",
"NL": "Newfoundland and Labrador",
"NT": "Northwest Territories",
"NU": "Nunavut",
"ON": "Ontario",
"PE": "Prince Edwards Island",
"QC": "Quebec",
"SK": "Saskatchewan",
"YT": "Yukon Territory",
}
"""Mapping of Canadian province and territory abbreviations to their full names"""
NERC_REGIONS: List[str] = [
"BASN", # ASSESSMENT AREA Basin (WECC)
"CALN", # ASSESSMENT AREA California (WECC)
"CALS", # ASSESSMENT AREA California (WECC)
"DSW", # ASSESSMENT AREA Desert Southwest (WECC)
"ASCC", # Alaska
"ISONE", # ISO New England (NPCC)
"ERCOT", # lumped under TRE in 2017 Form instructions
"NORW", # ASSESSMENT AREA Northwest (WECC)
"NYISO", # ISO (NPCC)
"PJM", # RTO
"ROCK", # ASSESSMENT AREA Rockies (WECC)
"ECAR", # OLD RE Now part of RFC and SERC
"FRCC", # included in 2017 Form instructions, recently joined with SERC
"HICC", # Hawaii
"MAAC", # OLD RE Now part of RFC
"MAIN", # OLD RE Now part of SERC, RFC, MRO
"MAPP", # OLD/NEW RE Became part of MRO, resurfaced in 2010
"MRO", # RE included in 2017 Form instructions
"NPCC", # RE included in 2017 Form instructions
"RFC", # RE included in 2017 Form instructions
"SERC", # RE included in 2017 Form instructions
"SPP", # RE included in 2017 Form instructions
"TRE", # RE included in 2017 Form instructions (included ERCOT)
"WECC", # RE included in 2017 Form instructions
"WSCC", # OLD RE pre-2002 version of WECC
"MISO", # ISO unclear whether technically a regional entity, but lots of entries
"ECAR_MAAC",
"MAPP_WECC",
"RFC_SERC",
"SPP_WECC",
"MRO_WECC",
"ERCOT_SPP",
"SPP_TRE",
"ERCOT_TRE",
"MISO_TRE",
"VI", # Virgin Islands
"GU", # Guam
"PR", # Puerto Rico
"AS", # American Samoa
"UNK",
]
"""
North American Reliability Corporation (NERC) regions.
See https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3.
"""
CUSTOMER_CLASSES: List[str] = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation",
]
TECH_CLASSES: List[str] = [
"backup", # WHERE Is this used? because removed from DG table b/c not a real component
"chp_cogen",
"combustion_turbine",
"fuel_cell",
"hydro",
"internal_combustion",
"other",
"pv",
"steam",
"storage_pv",
"all_storage", # need 'all' as prefix so as not to confuse with other storage category
"total",
"virtual_pv",
"wind",
]
REVENUE_CLASSES: List[str] = [
"credits_or_adjustments",
"delivery_customers",
"other",
"retail_sales",
"sales_for_resale",
"total",
"transmission",
"unbundled",
]
RELIABILITY_STANDARDS: List[str] = ["ieee_standard", "other_standard"]
FUEL_CLASSES: List[str] = [
"gas",
"oil",
"other",
"renewable",
"water",
"wind",
"wood",
]
RTO_CLASSES: List[str] = [
"caiso",
"ercot",
"isone",
"miso",
"nyiso",
"other",
"pjm",
"spp",
]
EPACEMS_MEASUREMENT_CODES: List[str] = [
"Calculated",
"LME",
"Measured",
"Measured and Substitute",
"Other", # ¿Should be replaced with NA?
"Substitute",
"Undetermined", # Should be replaced with NA
"Unknown Code", # Should be replaced with NA
]
"""Valid emissions measurement codes for the EPA CEMS hourly data."""
| mit | 6e8b3e8fbb7f51778af563df697a9799 | 25.111111 | 91 | 0.578369 | 2.712843 | false | false | false | false |
catalyst-cooperative/pudl | src/pudl/analysis/epa_crosswalk.py | 1 | 10658 | """Use the EPA crosswalk to connect EPA units to EIA generators and other data.
A major use case for this dataset is to identify subplants within plant_ids,
which are the smallest coherent units for aggregation.
Despite the name, plant_id refers to a legal entity that often contains
multiple distinct power plants, even of different technology or fuel types.
EPA CEMS data combines information from several parts of a power plant:
* emissions from smokestacks
* fuel use from combustors
* electricty production from generators
But smokestacks, combustors, and generators can be connected in
complex, many-to-many relationships. This complexity makes attribution difficult for,
as an example, allocating pollution to energy producers.
Furthermore, heterogeneity within plant_ids make aggregation
to the parent entity difficult or inappropriate.
But by analyzing the relationships between combustors and generators,
as provided in the EPA/EIA crosswalk, we can identify distinct power plants.
These are the smallest coherent units of aggregation.
In graph analysis terminology, the crosswalk is a list of edges between nodes
(combustors and generators) in a bipartite graph. The networkx python package provides
functions to analyze this edge list and extract disjoint subgraphs (groups of combustors
and generators that are connected to each other). These are the distinct power plants.
To avoid a name collision with plant_id, we term these collections 'subplants', and
identify them with a subplant_id that is unique within each plant_id. Subplants are thus
identified with the composite key (plant_id, subplant_id).
Through this analysis, we found that 56% of plant_ids contain multiple distinct
subplants, and 11% contain subplants with different technology types, such as a gas
boiler and gas turbine (not in a combined cycle).
Usage Example:
epacems = pudl.output.epacems.epacems(states=['ID']) # small subset for quick test
epa_crosswalk_df = pudl.output.epacems.epa_crosswalk()
filtered_crosswalk = filter_crosswalk(epa_crosswalk_df, epacems)
crosswalk_with_subplant_ids = make_subplant_ids(filtered_crosswalk)
"""
from typing import Union
import dask.dataframe as dd
import networkx as nx
import pandas as pd
def _get_unique_keys(epacems: Union[pd.DataFrame, dd.DataFrame]) -> pd.DataFrame:
"""Get unique unit IDs from CEMS data.
Args:
epacems (Union[pd.DataFrame, dd.DataFrame]): epacems dataset from pudl.output.epacems.epacems
Returns:
pd.DataFrame: unique keys from the epacems dataset
"""
# The purpose of this function is mostly to resolve the
# ambiguity between dask and pandas dataframes
ids = epacems[["plant_id_eia", "unitid", "unit_id_epa"]].drop_duplicates()
if isinstance(epacems, dd.DataFrame):
ids = ids.compute()
return ids
def filter_crosswalk_by_epacems(
crosswalk: pd.DataFrame, epacems: Union[pd.DataFrame, dd.DataFrame]
) -> pd.DataFrame:
"""Inner join unique CEMS units with the EPA crosswalk.
This is essentially an empirical filter on EPA units. Instead of filtering by
construction/retirement dates in the crosswalk (thus assuming they are accurate),
use the presence/absence of CEMS data to filter the units.
Args:
crosswalk: the EPA crosswalk, as from pudl.output.epacems.epa_crosswalk()
unique_epacems_ids (pd.DataFrame): unique ids from _get_unique_keys
Returns:
The inner join of the EPA crosswalk and unique epacems units. Adds
the global ID column unit_id_epa.
"""
unique_epacems_ids = _get_unique_keys(epacems)
key_map = unique_epacems_ids.merge(
crosswalk,
left_on=["plant_id_eia", "unitid"],
right_on=["CAMD_PLANT_ID", "CAMD_UNIT_ID"],
how="inner",
)
return key_map
def filter_out_unmatched(crosswalk: pd.DataFrame) -> pd.DataFrame:
"""Remove unmatched or excluded (non-exporting) units.
Unmatched rows are limitations of the completeness of the EPA crosswalk itself, not of PUDL.
Args:
crosswalk (pd.DataFrame): the EPA crosswalk, as from pudl.output.epacems.epa_crosswalk()
Returns:
pd.DataFrame: the EPA crosswalk with unmatched units removed
"""
bad = crosswalk["MATCH_TYPE_GEN"].isin({"CAMD Unmatched", "Manual CAMD Excluded"})
return crosswalk.loc[~bad].copy()
def filter_out_boiler_rows(crosswalk: pd.DataFrame) -> pd.DataFrame:
"""Remove rows that represent graph edges between generators and boilers.
Args:
crosswalk (pd.DataFrame): the EPA crosswalk, as from pudl.output.epacems.epa_crosswalk()
Returns:
pd.DataFrame: the EPA crosswalk with boiler rows (many/one-to-many) removed
"""
crosswalk = crosswalk.drop_duplicates(
subset=["CAMD_PLANT_ID", "CAMD_UNIT_ID", "EIA_GENERATOR_ID"]
)
return crosswalk
def _prep_for_networkx(crosswalk: pd.DataFrame) -> pd.DataFrame:
"""Make surrogate keys for combustors and generators.
Args:
crosswalk (pd.DataFrame): EPA crosswalk, as from pudl.output.epacems.epa_crosswalk()
Returns:
pd.DataFrame: copy of EPA crosswalk with new surrogate ID columns 'combustor_id' and 'generator_id'
"""
prepped = crosswalk.copy()
# networkx can't handle composite keys, so make surrogates
prepped["combustor_id"] = prepped.groupby(
by=["CAMD_PLANT_ID", "CAMD_UNIT_ID"]
).ngroup()
# node IDs can't overlap so add (max + 1)
prepped["generator_id"] = (
prepped.groupby(by=["CAMD_PLANT_ID", "EIA_GENERATOR_ID"]).ngroup()
+ prepped["combustor_id"].max()
+ 1
)
return prepped
def _subplant_ids_from_prepped_crosswalk(prepped: pd.DataFrame) -> pd.DataFrame:
"""Use networkx graph analysis to create global subplant IDs from a preprocessed crosswalk edge list.
Args:
prepped (pd.DataFrame): an EPA crosswalk that has passed through _prep_for_networkx()
Returns:
pd.DataFrame: copy of EPA crosswalk plus new column 'global_subplant_id'
"""
graph = nx.from_pandas_edgelist(
prepped,
source="combustor_id",
target="generator_id",
edge_attr=True,
)
for i, node_set in enumerate(nx.connected_components(graph)):
subgraph = graph.subgraph(node_set)
assert nx.algorithms.bipartite.is_bipartite(
subgraph
), f"non-bipartite: i={i}, node_set={node_set}"
nx.set_edge_attributes(subgraph, name="global_subplant_id", values=i)
return nx.to_pandas_edgelist(graph)
def _convert_global_id_to_composite_id(
crosswalk_with_ids: pd.DataFrame,
) -> pd.DataFrame:
"""Convert global_subplant_id to an equivalent composite key (CAMD_PLANT_ID, subplant_id).
The composite key will be much more stable (though not fully stable!) in time.
The global ID changes if ANY unit or generator changes, whereas the
compound key only changes if units/generators change within that specific plant.
A global ID could also tempt users into using it as a crutch, even though it isn't stable.
A compound key should discourage that behavior.
Args:
crosswalk_with_ids (pd.DataFrame): crosswalk with global_subplant_id, as from _subplant_ids_from_prepped_crosswalk()
Raises:
ValueError: if crosswalk_with_ids has a MultiIndex
Returns:
pd.DataFrame: copy of crosswalk_with_ids with an added column: 'subplant_id'
"""
if isinstance(crosswalk_with_ids.index, pd.MultiIndex):
raise ValueError(
f"Input crosswalk must have single level index. Given levels: {crosswalk_with_ids.index.names}"
)
reindexed = crosswalk_with_ids.reset_index() # copy
idx_name = crosswalk_with_ids.index.name
if idx_name is None:
# Indices with no name (None) are set to a pandas default name ('index'), which
# could (though probably won't) change.
idx_col = reindexed.columns.symmetric_difference(crosswalk_with_ids.columns)[
0
] # get index name
else:
idx_col = idx_name
composite_key: pd.Series = reindexed.groupby("CAMD_PLANT_ID", as_index=False).apply(
lambda x: x.groupby("global_subplant_id").ngroup()
)
# Recombine. Could use index join but I chose to reindex, sort and assign.
# Errors like mismatched length will raise exceptions, which is good.
# drop the outer group, leave the reindexed row index
composite_key.reset_index(level=0, drop=True, inplace=True)
composite_key.sort_index(inplace=True) # put back in same order as reindexed
reindexed["subplant_id"] = composite_key
# restore original index
reindexed.set_index(idx_col, inplace=True) # restore values
reindexed.index.rename(idx_name, inplace=True) # restore original name
return reindexed
def filter_crosswalk(
crosswalk: pd.DataFrame, epacems: Union[pd.DataFrame, dd.DataFrame]
) -> pd.DataFrame:
"""Remove crosswalk rows that do not correspond to an EIA facility or are duplicated due to many-to-many boiler relationships.
Args:
crosswalk (pd.DataFrame): The EPA/EIA crosswalk, as from pudl.output.epacems.epa_crosswalk()
epacems (Union[pd.DataFrame, dd.DataFrame]): Emissions data. Must contain columns named ["plant_id_eia", "unitid", "unit_id_epa"]
Returns:
pd.DataFrame: A filtered copy of EPA crosswalk
"""
filtered_crosswalk = filter_out_unmatched(crosswalk)
filtered_crosswalk = filter_out_boiler_rows(filtered_crosswalk)
key_map = filter_crosswalk_by_epacems(filtered_crosswalk, epacems)
return key_map
def make_subplant_ids(crosswalk: pd.DataFrame) -> pd.DataFrame:
"""Identify sub-plants in the EPA/EIA crosswalk graph. Any row filtering should be done before this step.
Usage Example:
epacems = pudl.output.epacems.epacems(states=['ID']) # small subset for quick test
epa_crosswalk_df = pudl.output.epacems.epa_crosswalk()
filtered_crosswalk = filter_crosswalk(epa_crosswalk_df, epacems)
crosswalk_with_subplant_ids = make_subplant_ids(filtered_crosswalk)
Args:
crosswalk (pd.DataFrame): The EPA/EIA crosswalk, as from pudl.output.epacems.epa_crosswalk()
Returns:
pd.DataFrame: An edge list connecting EPA units to EIA generators, with connected pieces issued a subplant_id
"""
edge_list = _prep_for_networkx(crosswalk)
edge_list = _subplant_ids_from_prepped_crosswalk(edge_list)
edge_list = _convert_global_id_to_composite_id(edge_list)
column_order = ["subplant_id"] + list(crosswalk.columns)
return edge_list[column_order] # reorder and drop global_subplant_id
| mit | 799a28a64b53c210d36c60ff593e5148 | 39.524715 | 137 | 0.711766 | 3.542041 | false | false | false | false |
catalyst-cooperative/pudl | src/pudl/transform/ferc1.py | 1 | 112928 | """Routines for transforming FERC Form 1 data before loading into the PUDL DB.
This module provides a variety of functions that are used in cleaning up the FERC Form 1
data prior to loading into our database. This includes adopting standardized units and
column names, standardizing the formatting of some string values, and correcting data
entry errors which we can infer based on the existing data. It may also include removing
bad data, or replacing it with the appropriate NA values.
"""
import importlib.resources
import logging
import re
from difflib import SequenceMatcher
from typing import Dict, List
# NetworkX is used to knit incomplete ferc plant time series together.
import networkx as nx
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.compose import ColumnTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
# These modules are required for the FERC Form 1 Plant ID & Time Series
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, Normalizer, OneHotEncoder
import pudl
from pudl.helpers import convert_cols_dtypes
from pudl.metadata.classes import DataSource
from pudl.metadata.dfs import FERC_DEPRECIATION_LINES
from pudl.settings import Ferc1Settings
logger = logging.getLogger(__name__)
##############################################################################
# Dicts for categorizing freeform strings ####################################
##############################################################################
FUEL_STRINGS: Dict[str, List[str]] = {
"coal": [
"coal",
"coal-subbit",
"lignite",
"coal(sb)",
"coal (sb)",
"coal-lignite",
"coke",
"coa",
"lignite/coal",
"coal - subbit",
"coal-subb",
"coal-sub",
"coal-lig",
"coal-sub bit",
"coals",
"ciak",
"petcoke",
"coal.oil",
"coal/gas",
"bit coal",
"coal-unit #3",
"coal-subbitum",
"coal tons",
"coal mcf",
"coal unit #3",
"pet. coke",
"coal-u3",
"coal&coke",
"tons",
],
"oil": [
"oil",
"#6 oil",
"#2 oil",
"fuel oil",
"jet",
"no. 2 oil",
"no.2 oil",
"no.6& used",
"used oil",
"oil-2",
"oil (#2)",
"diesel oil",
"residual oil",
"# 2 oil",
"resid. oil",
"tall oil",
"oil/gas",
"no.6 oil",
"oil-fuel",
"oil-diesel",
"oil / gas",
"oil bbls",
"oil bls",
"no. 6 oil",
"#1 kerosene",
"diesel",
"no. 2 oils",
"blend oil",
"#2oil diesel",
"#2 oil-diesel",
"# 2 oil",
"light oil",
"heavy oil",
"gas.oil",
"#2",
"2",
"6",
"bbl",
"no 2 oil",
"no 6 oil",
"#1 oil",
"#6",
"oil-kero",
"oil bbl",
"biofuel",
"no 2",
"kero",
"#1 fuel oil",
"no. 2 oil",
"blended oil",
"no 2. oil",
"# 6 oil",
"nno. 2 oil",
"#2 fuel",
"oill",
"oils",
"gas/oil",
"no.2 oil gas",
"#2 fuel oil",
"oli",
"oil (#6)",
"oil/diesel",
"2 oil",
"#6 hvy oil",
"jet fuel",
"diesel/compos",
"oil-8",
"oil {6}", # noqa: FS003
"oil-unit #1",
"bbl.",
"oil.",
"oil #6",
"oil (6)",
"oil(#2)",
"oil-unit1&2",
"oil-6",
"#2 fue oil",
"dielel oil",
"dielsel oil",
"#6 & used",
"barrels",
"oil un 1 & 2",
"jet oil",
"oil-u1&2",
"oiul",
"pil",
"oil - 2",
"#6 & used",
"oial",
"diesel fuel",
"diesel/compo",
"oil (used)",
],
"gas": [
"gas",
"gass",
"methane",
"natural gas",
"blast gas",
"gas mcf",
"propane",
"prop",
"natural gas",
"nat.gas",
"nat gas",
"nat. gas",
"natl gas",
"ga",
"gas`",
"syngas",
"ng",
"mcf",
"blast gaa",
"nat gas",
"gac",
"syngass",
"prop.",
"natural",
"coal.gas",
"n. gas",
"lp gas",
"natuaral gas",
"coke gas",
"gas #2016",
"propane**",
"* propane",
"propane **",
"gas expander",
"gas ct",
"# 6 gas",
"#6 gas",
"coke oven gas",
],
"solar": [],
"wind": [],
"hydro": [],
"nuclear": [
"nuclear",
"grams of uran",
"grams of",
"grams of ura",
"grams",
"nucleur",
"nulear",
"nucl",
"nucleart",
"nucelar",
"gr.uranium",
"grams of urm",
"nuclear (9)",
"nulcear",
"nuc",
"gr. uranium",
"nuclear mw da",
"grams of ura",
"nucvlear",
],
"waste": [
"tires",
"tire",
"refuse",
"switchgrass",
"wood waste",
"woodchips",
"biomass",
"wood",
"wood chips",
"rdf",
"tires/refuse",
"tire refuse",
"waste oil",
"waste",
"woodships",
"tire chips",
"tdf",
],
"other": [
"steam",
"purch steam",
"all",
"n/a",
"purch. steam",
"other",
"composite",
"composit",
"mbtus",
"total",
"avg",
"avg.",
"blo",
"all fuel",
"comb.",
"alt. fuels",
"na",
"comb",
"/#=2\x80â\x91?",
"kã\xadgv¸\x9d?",
"mbtu's",
"gas, oil",
"rrm",
"3\x9c",
"average",
"furfural",
"0",
"watson bng",
"toal",
"bng",
"# 6 & used",
"combined",
"blo bls",
"compsite",
"*",
"compos.",
"gas / oil",
"mw days",
"g",
"c",
"lime",
"all fuels",
"at right",
"20",
"1",
"comp oil/gas",
"all fuels to",
"the right are",
"c omposite",
"all fuels are",
"total pr crk",
"all fuels =",
"total pc",
"comp",
"alternative",
"alt. fuel",
"bio fuel",
"total prairie",
"",
"kã\xadgv¸?",
"m",
"waste heat",
"/#=2â?",
"3",
],
}
"""
A mapping a canonical fuel name to a list of strings which are used to represent that
fuel in the FERC Form 1 Reporting. Case is ignored, as all fuel strings are converted to
a lower case in the data set.
"""
FUEL_UNIT_STRINGS: Dict[str, List[str]] = {
"ton": [
"toms",
"taons",
"tones",
"col-tons",
"toncoaleq",
"coal",
"tons coal eq",
"coal-tons",
"ton",
"tons",
"tons coal",
"coal-ton",
"tires-tons",
"coal tons -2 ",
"oil-tons",
"coal tons 200",
"ton-2000",
"coal tons",
"coal tons -2",
"coal-tone",
"tire-ton",
"tire-tons",
"ton coal eqv",
"tos",
"coal tons - 2",
"c. t.",
"c.t.",
"toncoalequiv",
],
"mcf": [
"mcf",
"mcf's",
"mcfs",
"mcf.",
"gas mcf",
'"gas" mcf',
"gas-mcf",
"mfc",
"mct",
" mcf",
"msfs",
"mlf",
"mscf",
"mci",
"mcl",
"mcg",
"m.cu.ft.",
"kcf",
"(mcf)",
"mcf *(4)",
"mcf00",
"m.cu.ft..",
"1000 c.f",
],
"bbl": [
"barrel",
"bbls",
"bbl",
"barrels",
"bbrl",
"bbl.",
"bbls.",
"oil 42 gal",
"oil-barrels",
"barrrels",
"bbl-42 gal",
"oil-barrel",
"bb.",
"barrells",
"bar",
"bbld",
"oil- barrel",
"barrels .",
"bbl .",
"barels",
"barrell",
"berrels",
"bb",
"bbl.s",
"oil-bbl",
"bls",
"bbl:",
"barrles",
"blb",
"propane-bbl",
"barriel",
"berriel",
"barrile",
"(bbl.)",
"barrel *(4)",
"(4) barrel",
"bbf",
"blb.",
"(bbl)",
"bb1",
"bbsl",
"barrrel",
"barrels 100%",
"bsrrels",
"bbl's",
"*barrels",
"oil - barrels",
"oil 42 gal ba",
"bll",
"boiler barrel",
"gas barrel",
'"boiler" barr',
'"gas" barrel',
'"boiler"barre',
'"boiler barre',
"barrels .",
"bariel",
"brrels",
"oil barrel",
"barreks",
"oil-bbls",
"oil-bbs",
],
"gal": ["gallons", "gal.", "gals", "gals.", "gallon", "gal", "galllons"],
"kgal": [
"oil(1000 gal)",
"oil(1000)",
"oil (1000)",
"oil(1000",
"oil(1000ga)",
"1000 gals",
"1000 gal",
],
"gramsU": [
"gram",
"grams",
"gm u",
"grams u235",
"grams u-235",
"grams of uran",
"grams: u-235",
"grams:u-235",
"grams:u235",
"grams u308",
"grams: u235",
"grams of",
"grams - n/a",
"gms uran",
"s e uo2 grams",
"gms uranium",
"grams of urm",
"gms. of uran",
"grams (100%)",
"grams v-235",
"se uo2 grams",
"grams u",
],
"kgU": [
"kg of uranium",
"kg uranium",
"kilg. u-235",
"kg u-235",
"kilograms-u23",
"kg",
"kilograms u-2",
"kilograms",
"kg of",
"kg-u-235",
"kilgrams",
"kilogr. u235",
"uranium kg",
"kg uranium25",
"kilogr. u-235",
"kg uranium 25",
"kilgr. u-235",
"kguranium 25",
"kg-u235",
"kgm",
],
"klbs": [
"k lbs.",
"k lbs",
"1000 / lbs",
"1000 lbs",
],
"mmbtu": [
"mmbtu",
"mmbtus",
"mbtus",
"(mmbtu)",
"mmbtu's",
"nuclear-mmbtu",
"nuclear-mmbt",
"mmbtul",
],
"btu": [
"btus",
"btu",
],
"mwdth": [
"mwd therman",
"mw days-therm",
"mwd thrml",
"mwd thermal",
"mwd/mtu",
"mw days",
"mwdth",
"mwd",
"mw day",
"dth",
"mwdaysthermal",
"mw day therml",
"mw days thrml",
"nuclear mwd",
"mmwd",
"mw day/therml" "mw days/therm",
"mw days (th",
"ermal)",
],
"mwhth": [
"mwh them",
"mwh threm",
"nwh therm",
"mwhth",
"mwh therm",
"mwh",
"mwh therms.",
"mwh term.uts",
"mwh thermal",
"mwh thermals",
"mw hr therm",
"mwh therma",
"mwh therm.uts",
],
"unknown": [
"",
"1265",
"mwh units",
"composite",
"therms",
"n/a",
"mbtu/kg",
"uranium 235",
"oil",
"ccf",
"2261",
"uo2",
"(7)",
"oil #2",
"oil #6",
'\x99å\x83\x90?"',
"dekatherm",
"0",
"mw day/therml",
"nuclear",
"gas",
"62,679",
"mw days/therm",
"na",
"uranium",
"oil/gas",
"thermal",
"(thermal)",
"se uo2",
"181679",
"83",
"3070",
"248",
"273976",
"747",
"-",
"are total",
"pr. creek",
"decatherms",
"uramium",
".",
"total pr crk",
">>>>>>>>",
"all",
"total",
"alternative-t",
"oil-mcf",
"3303671",
"929",
"7182175",
"319",
"1490442",
"10881",
"1363663",
"7171",
"1726497",
"4783",
"7800",
"12559",
"2398",
"creek fuels",
"propane-barre",
"509",
"barrels/mcf",
"propane-bar",
"4853325",
"4069628",
"1431536",
"708903",
"mcf/oil (1000",
"344",
'å?"',
"mcf / gallen",
],
}
"""
A dictionary linking fuel units (keys) to lists of various strings representing those
fuel units (values)
"""
PLANT_KIND_STRINGS: Dict[str, List[str]] = {
"steam": [
"coal",
"steam",
"steam units 1 2 3",
"steam units 4 5",
"steam fossil",
"steam turbine",
"steam a",
"steam 100",
"steam units 1 2 3",
"steams",
"steam 1",
"steam retired 2013",
"stream",
"steam units 1,2,3",
"steam units 4&5",
"steam units 4&6",
"steam conventional",
"unit total-steam",
"unit total steam",
"*resp. share steam",
"resp. share steam",
"steam (see note 1,",
"steam (see note 3)",
"mpc 50%share steam",
"40% share steam" "steam (2)",
"steam (3)",
"steam (4)",
"steam (5)",
"steam (6)",
"steam (7)",
"steam (8)",
"steam units 1 and 2",
"steam units 3 and 4",
"steam (note 1)",
"steam (retired)",
"steam (leased)",
"coal-fired steam",
"oil-fired steam",
"steam/fossil",
"steam (a,b)",
"steam (a)",
"stean",
"steam-internal comb",
"steam (see notes)",
"steam units 4 & 6",
"resp share stm note3",
"mpc50% share steam",
"mpc40%share steam",
"steam - 64%",
"steam - 100%",
"steam (1) & (2)",
"resp share st note3",
"mpc 50% shares steam",
"steam-64%",
"steam-100%",
"steam (see note 1)",
"mpc 50% share steam",
"steam units 1, 2, 3",
"steam units 4, 5",
"steam (2)",
"steam (1)",
"steam 4, 5",
"steam - 72%",
"steam (incl i.c.)",
"steam- 72%",
"steam;retired - 2013",
"respondent's sh.-st.",
"respondent's sh-st",
"40% share steam",
"resp share stm note3",
"mpc50% share steam",
"resp share st note 3",
"\x02steam (1)",
"coal fired steam tur",
"steam- 64%",
],
"combustion_turbine": [
"combustion turbine",
"gt",
"gas turbine",
"gas turbine # 1",
"gas turbine",
"gas turbine (note 1)",
"gas turbines",
"simple cycle",
"combustion turbine",
"comb.turb.peak.units",
"gas turbine",
"combustion turbine",
"com turbine peaking",
"gas turbine peaking",
"comb turb peaking",
"combustine turbine",
"comb. turine",
"conbustion turbine",
"combustine turbine",
"gas turbine (leased)",
"combustion tubine",
"gas turb",
"gas turbine peaker",
"gtg/gas",
"simple cycle turbine",
"gas-turbine",
"gas turbine-simple",
"gas turbine - note 1",
"gas turbine #1",
"simple cycle",
"gasturbine",
"combustionturbine",
"gas turbine (2)",
"comb turb peak units",
"jet engine",
"jet powered turbine",
"*gas turbine",
"gas turb.(see note5)",
"gas turb. (see note",
"combutsion turbine",
"combustion turbin",
"gas turbine-unit 2",
"gas - turbine",
"comb turbine peaking",
"gas expander turbine",
"jet turbine",
"gas turbin (lease",
"gas turbine (leased",
"gas turbine/int. cm",
"comb.turb-gas oper.",
"comb.turb.gas/oil op",
"comb.turb.oil oper.",
"jet",
"comb. turbine (a)",
"gas turb.(see notes)",
"gas turb(see notes)",
"comb. turb-gas oper",
"comb.turb.oil oper",
"gas turbin (leasd)",
"gas turbne/int comb",
"gas turbine (note1)",
"combution turbin",
"* gas turbine",
"add to gas turbine",
"gas turbine (a)",
"gas turbinint comb",
"gas turbine (note 3)",
"resp share gas note3",
"gas trubine",
"*gas turbine(note3)",
"gas turbine note 3,6",
"gas turbine note 4,6",
"gas turbine peakload",
"combusition turbine",
"gas turbine (lease)",
"comb. turb-gas oper.",
"combution turbine",
"combusion turbine",
"comb. turb. oil oper",
"combustion burbine",
"combustion and gas",
"comb. turb.",
"gas turbine (lease",
"gas turbine (leasd)",
"gas turbine/int comb",
"*gas turbine(note 3)",
"gas turbine (see nos",
"i.c.e./gas turbine",
"gas turbine/intcomb",
"cumbustion turbine",
"gas turb, int. comb.",
"gas turb, diesel",
"gas turb, int. comb",
"i.c.e/gas turbine",
"diesel turbine",
"comubstion turbine",
"i.c.e. /gas turbine",
"i.c.e/ gas turbine",
"i.c.e./gas tubine",
"gas turbine; retired",
],
"combined_cycle": [
"Combined cycle",
"combined cycle",
"combined",
"gas & steam turbine",
"gas turb. & heat rec",
"combined cycle",
"com. cyc",
"com. cycle",
"gas turb-combined cy",
"combined cycle ctg",
"combined cycle - 40%",
"com cycle gas turb",
"combined cycle oper",
"gas turb/comb. cyc",
"combine cycle",
"cc",
"comb. cycle",
"gas turb-combined cy",
"steam and cc",
"steam cc",
"gas steam",
"ctg steam gas",
"steam comb cycle",
"gas/steam comb. cycl",
"steam (comb. cycle)" "gas turbine/steam",
"steam & gas turbine",
"gas trb & heat rec",
"steam & combined ce",
"st/gas turb comb cyc",
"gas tur & comb cycl",
"combined cycle (a,b)",
"gas turbine/ steam",
"steam/gas turb.",
"steam & comb cycle",
"gas/steam comb cycle",
"comb cycle (a,b)",
"igcc",
"steam/gas turbine",
"gas turbine / steam",
"gas tur & comb cyc",
"comb cyc (a) (b)",
"comb cycle",
"comb cyc",
"combined turbine",
"combine cycle oper",
"comb cycle/steam tur",
"cc / gas turb",
"steam (comb. cycle)",
"steam & cc",
"gas turbine/steam",
"gas turb/cumbus cycl",
"gas turb/comb cycle",
"gasturb/comb cycle",
"gas turb/cumb. cyc",
"igcc/gas turbine",
"gas / steam",
"ctg/steam-gas",
"ctg/steam -gas",
"gas fired cc turbine",
"combinedcycle",
"comb cycle gas turb",
"combined cycle opern",
"comb. cycle gas turb",
"ngcc",
],
"nuclear": [
"nuclear",
"nuclear (3)",
"steam(nuclear)",
"nuclear(see note4)" "nuclear steam",
"nuclear turbine",
"nuclear - steam",
"nuclear (a)(b)(c)",
"nuclear (b)(c)",
"* nuclear",
"nuclear (b) (c)",
"nuclear (see notes)",
"steam (nuclear)",
"* nuclear (note 2)",
"nuclear (note 2)",
"nuclear (see note 2)",
"nuclear(see note4)",
"nuclear steam",
"nuclear(see notes)",
"nuclear-steam",
"nuclear (see note 3)",
],
"geothermal": ["steam - geothermal", "steam_geothermal", "geothermal"],
"internal_combustion": [
"ic",
"internal combustion",
"internal comb.",
"internl combustion" "diesel turbine",
"int combust (note 1)",
"int. combust (note1)",
"int.combustine",
"comb. cyc",
"internal comb",
"diesel",
"diesel engine",
"internal combustion",
"int combust - note 1",
"int. combust - note1",
"internal comb recip",
"reciprocating engine",
"comb. turbine",
"internal combust.",
"int. combustion (1)",
"*int combustion (1)",
"*internal combust'n",
"internal",
"internal comb.",
"steam internal comb",
"combustion",
"int. combustion",
"int combust (note1)",
"int. combustine",
"internl combustion",
"*int. combustion (1)",
"internal conbustion",
],
"wind": [
"wind",
"wind energy",
"wind turbine",
"wind - turbine",
"wind generation",
"wind turbin",
],
"photovoltaic": ["solar photovoltaic", "photovoltaic", "solar", "solar project"],
"solar_thermal": ["solar thermal"],
"unknown": [
"",
"n/a",
"see pgs 402.1-402.3",
"see pgs 403.1-403.9",
"respondent's share",
"--",
"(see note 7)",
"other",
"not applicable",
"peach bottom",
"none.",
"fuel facilities",
"0",
"not in service",
"none",
"common expenses",
"expenses common to",
"retired in 1981",
"retired in 1978",
"na",
"unit total (note3)",
"unit total (note2)",
"resp. share (note2)",
"resp. share (note8)",
"resp. share (note 9)",
"resp. share (note11)",
"resp. share (note4)",
"resp. share (note6)",
"conventional",
"expenses commom to",
"not in service in",
"unit total (note 3)",
"unit total (note 2)",
"resp. share (note 8)",
"resp. share (note 3)",
"resp. share note 11",
"resp. share (note 4)",
"resp. share (note 6)",
"(see note 5)",
"resp. share (note 2)",
"package",
"(left blank)",
"common",
"0.0000",
"other generation",
"resp share (note 11)",
"retired",
"storage/pipelines",
"sold april 16, 1999",
"sold may 07, 1999",
"plants sold in 1999",
"gas",
"not applicable.",
"resp. share - note 2",
"resp. share - note 8",
"resp. share - note 9",
"resp share - note 11",
"resp. share - note 4",
"resp. share - note 6",
"plant retired- 2013",
"retired - 2013",
"resp share - note 5",
"resp. share - note 7",
"non-applicable",
"other generation plt",
"combined heat/power",
"oil",
],
}
"""
A mapping from canonical plant kinds (keys) to the associated freeform strings (values)
identified as being associated with that kind of plant in the FERC Form 1 raw data.
There are many strings that weren't categorized, Solar and Solar Project were not
classified as these do not indicate if they are solar thermal or photovoltaic. Variants
on Steam (e.g. "steam 72" and "steam and gas") were classified based on additional
research of the plants on the Internet.
"""
CONSTRUCTION_TYPE_STRINGS: Dict[str, List[str]] = {
"outdoor": [
"outdoor",
"outdoor boiler",
"full outdoor",
"outdoor boiler",
"outdoor boilers",
"outboilers",
"fuel outdoor",
"full outdoor",
"outdoors",
"outdoor",
"boiler outdoor& full",
"boiler outdoor&full",
"outdoor boiler& full",
"full -outdoor",
"outdoor steam",
"outdoor boiler",
"ob",
"outdoor automatic",
"outdoor repower",
"full outdoor boiler",
"fo",
"outdoor boiler & ful",
"full-outdoor",
"fuel outdoor",
"outoor",
"outdoor",
"outdoor boiler&full",
"boiler outdoor &full",
"outdoor boiler &full",
"boiler outdoor & ful",
"outdoor-boiler",
"outdoor - boiler",
"outdoor const.",
"4 outdoor boilers",
"3 outdoor boilers",
"full outdoor",
"full outdoors",
"full oudoors",
"outdoor (auto oper)",
"outside boiler",
"outdoor boiler&full",
"outdoor hrsg",
"outdoor hrsg",
"outdoor-steel encl.",
"boiler-outdr & full",
"con.& full outdoor",
"partial outdoor",
"outdoor (auto. oper)",
"outdoor (auto.oper)",
"outdoor construction",
"1 outdoor boiler",
"2 outdoor boilers",
"outdoor enclosure",
"2 outoor boilers",
"boiler outdr.& full",
"boiler outdr. & full",
"ful outdoor",
"outdoor-steel enclos",
"outdoor (auto oper.)",
"con. & full outdoor",
"outdore",
"boiler & full outdor",
"full & outdr boilers",
"outodoor (auto oper)",
"outdoor steel encl.",
"full outoor",
"boiler & outdoor ful",
"otdr. blr. & f. otdr",
"f.otdr & otdr.blr.",
"oudoor (auto oper)",
"outdoor constructin",
"f. otdr. & otdr. blr",
"outdoor boiler & fue",
"outdoor boiler &fuel",
],
"semioutdoor": [
"more than 50% outdoo",
"more than 50% outdos",
"over 50% outdoor",
"over 50% outdoors",
"semi-outdoor",
"semi - outdoor",
"semi outdoor",
"semi-enclosed",
"semi-outdoor boiler",
"semi outdoor boiler",
"semi- outdoor",
"semi - outdoors",
"semi -outdoor" "conven & semi-outdr",
"conv & semi-outdoor",
"conv & semi- outdoor",
"convent. semi-outdr",
"conv. semi outdoor",
"conv(u1)/semiod(u2)",
"conv u1/semi-od u2",
"conv-one blr-semi-od",
"convent semioutdoor",
"conv. u1/semi-od u2",
"conv - 1 blr semi od",
"conv. ui/semi-od u2",
"conv-1 blr semi-od",
"conven. semi-outdoor",
"conv semi-outdoor",
"u1-conv./u2-semi-od",
"u1-conv./u2-semi -od",
"convent. semi-outdoo",
"u1-conv. / u2-semi",
"conven & semi-outdr",
"semi -outdoor",
"outdr & conventnl",
"conven. full outdoor",
"conv. & outdoor blr",
"conv. & outdoor blr.",
"conv. & outdoor boil",
"conv. & outdr boiler",
"conv. & out. boiler",
"convntl,outdoor blr",
"outdoor & conv.",
"2 conv., 1 out. boil",
"outdoor/conventional",
"conv. boiler outdoor",
"conv-one boiler-outd",
"conventional outdoor",
"conventional outdor",
"conv. outdoor boiler",
"conv.outdoor boiler",
"conventional outdr.",
"conven,outdoorboiler",
"conven full outdoor",
"conven,full outdoor",
"1 out boil, 2 conv",
"conv. & full outdoor",
"conv. & outdr. boilr",
"conv outdoor boiler",
"convention. outdoor",
"conv. sem. outdoor",
"convntl, outdoor blr",
"conv & outdoor boil",
"conv & outdoor boil.",
"outdoor & conv",
"conv. broiler outdor",
"1 out boilr, 2 conv",
"conv.& outdoor boil.",
"conven,outdr.boiler",
"conven,outdr boiler",
"outdoor & conventil",
"1 out boilr 2 conv",
"conv & outdr. boilr",
"conven, full outdoor",
"conven full outdr.",
"conven, full outdr.",
"conv/outdoor boiler",
"convnt'l outdr boilr",
"1 out boil 2 conv",
"conv full outdoor",
"conven, outdr boiler",
"conventional/outdoor",
"conv&outdoor boiler",
"outdoor & convention",
"conv & outdoor boilr",
"conv & full outdoor",
"convntl. outdoor blr",
"conv - ob",
"1conv'l/2odboilers",
"2conv'l/1odboiler",
"conv-ob",
"conv.-ob",
"1 conv/ 2odboilers",
"2 conv /1 odboilers",
"conv- ob",
"conv -ob",
"con sem outdoor",
"cnvntl, outdr, boilr",
"less than 50% outdoo",
"under 50% outdoor",
"under 50% outdoors",
"1cnvntnl/2odboilers",
"2cnvntnl1/1odboiler",
"con & ob",
"combination (b)",
"indoor & outdoor",
"conven. blr. & full",
"conv. & otdr. blr.",
"combination",
"indoor and outdoor",
"conven boiler & full",
"2conv'l/10dboiler",
"4 indor/outdr boiler",
"4 indr/outdr boilerr",
"4 indr/outdr boiler",
"indoor & outdoof",
],
"conventional": [
"conventional",
"conventional",
"conventional boiler",
"conv-b",
"conventionall",
"convention",
"conventional",
"coventional",
"conven full boiler",
"c0nventional",
"conventtional",
"convential" "underground",
"conventional bulb",
"conventrional",
"*conventional",
"convential",
"convetional",
"conventioanl",
"conventioinal",
"conventaional",
"indoor construction",
"convenional",
"conventional steam",
"conventinal",
"convntional",
"conventionl",
"conventionsl",
"conventiional",
"convntl steam plants",
"indoor const.",
"full indoor",
"indoor",
"indoor automatic",
"indoor boiler",
"(peak load) indoor",
"conventionl,indoor",
"conventionl, indoor",
"conventional, indoor",
"conventional;outdoor",
"conven./outdoor",
"conventional;semi-ou",
"comb. cycle indoor",
"3 indoor boiler",
"2 indoor boilers",
"1 indoor boiler",
"2 indoor boiler",
"3 indoor boilers",
"fully contained",
"conv - b",
"conventional/boiler",
"cnventional",
"comb. cycle indooor",
"sonventional",
"ind enclosures",
"conentional",
"conventional - boilr",
"indoor boiler and st",
],
"unknown": [
"",
"automatic operation",
"comb. turb. installn",
"comb. turb. instaln",
"com. turb. installn",
"n/a",
"for detailed info.",
"for detailed info",
"combined cycle",
"na",
"not applicable",
"gas",
"heated individually",
"metal enclosure",
"pressurized water",
"nuclear",
"jet engine",
"gas turbine",
"storage/pipelines",
"0",
"during 1994",
"peaking - automatic",
"gas turbine/int. cm",
"2 oil/gas turbines",
"wind",
"package",
"mobile",
"auto-operated",
"steam plants",
"other production",
"all nuclear plants",
"other power gen.",
"automatically operad",
"automatically operd",
"circ fluidized bed",
"jet turbine",
"gas turbne/int comb",
"automatically oper.",
"retired 1/1/95",
"during 1995",
"1996. plant sold",
"reactivated 7/1/96",
"gas turbine/int comb",
"portable",
"head individually",
"automatic opertion",
"peaking-automatic",
"cycle",
"full order",
"circ. fluidized bed",
"gas turbine/intcomb",
"0.0000",
"none",
"2 oil / gas",
"block & steel",
"and 2000",
"comb.turb. instaln",
"automatic oper.",
"pakage",
"---",
"n/a (ct)",
"comb turb instain",
"ind encloures",
"2 oil /gas turbines",
"combustion turbine",
"1970",
"gas/oil turbines",
"combined cycle steam",
"pwr",
"2 oil/ gas",
"2 oil / gas turbines",
"gas / oil turbines",
"no boiler",
"internal combustion",
"gasturbine no boiler",
"boiler",
"tower -10 unit facy",
"gas trubine",
"4 gas/oil trubines",
"2 oil/ 4 gas/oil tur",
"5 gas/oil turbines",
"tower 16",
"2 on 1 gas turbine",
"tower 23",
"tower -10 unit",
"tower - 101 unit",
"3 on 1 gas turbine",
"tower - 10 units",
"tower - 165 units",
"wind turbine",
"fixed tilt pv",
"tracking pv",
"o",
"wind trubine",
"subcritical",
"sucritical",
"simple cycle",
"simple & reciprocat",
"solar",
"pre-fab power plant",
"prefab power plant",
"prefab. power plant",
"pump storage",
"underground",
"see page 402",
"conv. underground",
"conven. underground",
"conventional (a)",
"non-applicable",
],
}
"""
A dictionary of construction types (keys) and lists of construction type strings
associated with each type (values) from FERC Form 1.
There are many strings that weren't categorized, including crosses between conventional
and outdoor, PV, wind, combined cycle, and internal combustion. The lists are broken out
into the two types specified in Form 1: conventional and outdoor. These lists are
inclusive so that variants of conventional (e.g. "conventional full") and outdoor (e.g.
"outdoor full" and "outdoor hrsg") are included.
"""
##############################################################################
# FERC TRANSFORM HELPER FUNCTIONS ############################################
##############################################################################
def unpack_table(ferc1_df, table_name, data_cols, data_rows):
"""Normalize a row-and-column based FERC Form 1 table.
Pulls the named database table from the FERC Form 1 DB and uses the corresponding
ferc1_row_map to unpack the row_number coded data.
Args:
ferc1_df (pandas.DataFrame): Raw FERC Form 1 DataFrame from the DB.
table_name (str): Original name of the FERC Form 1 DB table.
data_cols (list): List of strings corresponding to the original FERC Form 1
database table column labels -- these are the columns of data that we are
extracting (it can be a subset of the columns which are present in the
original database).
data_rows (list): List of row_names to extract, as defined in the FERC 1 row
maps. Set to slice(None) if you want all rows.
Returns:
pandas.DataFrame
"""
# Read in the corresponding row map:
row_map = (
pd.read_csv(
importlib.resources.open_text(
"pudl.package_data.ferc1.row_maps", f"{table_name}.csv"
),
index_col=0,
comment="#",
)
.copy()
.transpose()
.rename_axis(index="year_index", columns=None)
)
row_map.index = row_map.index.astype(int)
# For each year, rename row numbers to variable names based on row_map.
rename_dict = {}
out_df = pd.DataFrame()
for year in row_map.index:
rename_dict = {v: k for k, v in dict(row_map.loc[year, :]).items()}
_ = rename_dict.pop(-1, None)
df = ferc1_df.loc[ferc1_df.report_year == year].copy()
df.loc[:, "row_name"] = df.loc[:, "row_number"].replace(rename_dict)
# The concatenate according to row_name
out_df = pd.concat([out_df, df], axis="index")
# Is this list of index columns universal? Or should they be an argument?
idx_cols = ["respondent_id", "report_year", "report_prd", "spplmnt_num", "row_name"]
logger.info(
f"{len(out_df[out_df.duplicated(idx_cols)])/len(out_df):.4%} "
f"of unpacked records were duplicates, and discarded."
)
# Index the dataframe based on the list of index_cols
# Unstack the dataframe based on variable names
out_df = (
out_df.loc[:, idx_cols + data_cols]
# These lost records should be minimal. If not, something's wrong.
.drop_duplicates(subset=idx_cols)
.set_index(idx_cols)
.unstack("row_name")
.loc[:, (slice(None), data_rows)]
)
return out_df
def cols_to_cats(df, cat_name, col_cats):
"""Turn top-level MultiIndex columns into a categorial column.
In some cases FERC Form 1 data comes with many different types of related values
interleaved in the same table -- e.g. current year and previous year income -- this
can result in DataFrames that are hundreds of columns wide, which is unwieldy. This
function takes those top level MultiIndex labels and turns them into categories in a
single column, which can be used to select a particular type of report.
Args:
df (pandas.DataFrame): the dataframe to be simplified.
cat_name (str): the label of the column to be created indicating what
MultiIndex label the values came from.
col_cats (dict): a dictionary with top level MultiIndex labels as keys,
and the category to which they should be mapped as values.
Returns:
pandas.DataFrame: A re-shaped/re-labeled dataframe with one fewer levels of
MultiIndex in the columns, and an additional column containing the assigned
labels.
"""
out_df = pd.DataFrame()
for col, cat in col_cats.items():
logger.info(f"Col: {col}, Cat: {cat}")
tmp_df = df.loc[:, col].copy().dropna(how="all")
tmp_df.loc[:, cat_name] = cat
out_df = pd.concat([out_df, tmp_df])
return out_df.reset_index()
def _clean_cols(df, table_name):
"""Adds a FERC record ID and drop FERC columns not to be loaded into PUDL.
It is often useful to be able to tell exactly which record in the FERC Form 1
database a given record within the PUDL database came from. Within each FERC Form 1
table, each record is supposed to be uniquely identified by the combination of:
report_year, report_prd, respondent_id, spplmnt_num, row_number.
So this function takes a dataframe, checks to make sure it contains each of those
columns and that none of them are NULL, and adds a new column to the dataframe
containing a string of the format:
{table_name}_{report_year}_{report_prd}_{respondent_id}_{spplmnt_num}_{row_number}
In some PUDL FERC Form 1 tables (e.g. plant_in_service_ferc1) a single row is
re-organized into several new records in order to normalize the data and ensure it
is stored in a "tidy" format. In such cases each of the resulting PUDL records will
have the same ``record_id``. Otherwise, the ``record_id`` is expected to be unique
within each FERC Form 1 table. However there are a handful of cases in which this
uniqueness constraint is violated due to data reporting issues in FERC Form 1.
In addition to those primary key columns, there are some columns which are not
meaningful or useful in the context of PUDL, but which show up in virtually every
FERC table, and this function drops them if they are present. These columns include:
row_prvlg, row_seq, item, record_number (a temporary column used in plants_small)
and all the footnote columns, which end in "_f".
Args:
df (pandas.DataFrame): The DataFrame in which the function looks for columns
for the unique identification of FERC records, and ensures that those
columns are not NULL.
table_name (str): The name of the table that we are cleaning.
Returns:
pandas.DataFrame: The same DataFrame with a column appended containing a string
of the format
{table_name}_{report_year}_{report_prd}_{respondent_id}_{spplmnt_num}_{row_number}
Raises:
AssertionError: If the table input contains NULL columns
"""
# Make sure that *all* of these columns exist in the proffered table:
for field in [
"report_year",
"report_prd",
"respondent_id",
"spplmnt_num",
"row_number",
]:
if field in df.columns:
if df[field].isnull().any():
raise AssertionError(
f"Null field {field} found in ferc1 table {table_name}."
)
# Create a unique inter-year FERC table record ID:
df["record_id"] = (
table_name
+ "_"
+ df.report_year.astype(str)
+ "_"
+ df.report_prd.astype(str)
+ "_"
+ df.respondent_id.astype(str)
+ "_"
+ df.spplmnt_num.astype(str)
)
# Because of the way we are re-organizing columns and rows to create well
# normalized tables, there may or may not be a row number available.
if "row_number" in df.columns:
df["record_id"] = df["record_id"] + "_" + df.row_number.astype(str)
# Check to make sure that the generated record_id is unique... since
# that's kind of the whole point. There are couple of genuine bad
# records here that are taken care of in the transform step, so just
# print a warning.
n_dupes = df.record_id.duplicated().values.sum()
if n_dupes:
dupe_ids = df.record_id[df.record_id.duplicated()].values
logger.warning(
f"{n_dupes} duplicate record_id values found "
f"in pre-transform table {table_name}: {dupe_ids}."
)
# Drop any _f columns... since we're not using the FERC Footnotes...
# Drop columns and don't complain about it if they don't exist:
no_f = [c for c in df.columns if not re.match(".*_f$", c)]
df = (
df.loc[:, no_f]
.drop(
[
"spplmnt_num",
"row_number",
"row_prvlg",
"row_seq",
"report_prd",
"item",
"record_number",
],
errors="ignore",
axis="columns",
)
.rename(columns={"respondent_id": "utility_id_ferc1"})
)
return df
def _multiplicative_error_correction(tofix, mask, minval, maxval, mults):
"""Corrects data entry errors where data being multiplied by a factor.
In many cases we know that a particular column in the database should have a value
in a particular rage (e.g. the heat content of a ton of coal is a well defined
physical quantity -- it can be 15 mmBTU/ton or 22 mmBTU/ton, but it can't be 1
mmBTU/ton or 100 mmBTU/ton). Sometimes these fields are reported in the wrong units
(e.g. kWh of electricity generated rather than MWh) resulting in several
distributions that have a similar shape showing up at different ranges of value
within the data. This function takes a one dimensional data series, a description
of a valid range for the values, and a list of factors by which we expect to see
some of the data multiplied due to unit errors. Data found in these "ghost"
distributions are multiplied by the appropriate factor to bring them into the
expected range.
Data values which are not found in one of the acceptable multiplicative ranges are
set to NA.
Args:
tofix (pandas.Series): A 1-dimensional data series containing the values to be
fixed.
mask (pandas.Series): A 1-dimensional masking array of True/False values, which
will be used to select a subset of the tofix series onto which we will apply
the multiplicative fixes.
min (float): the minimum realistic value for the data series.
max (float): the maximum realistic value for the data series.
mults (list of floats): values by which "real" data may have been multiplied
due to common data entry errors. These values both show us where to look in
the full data series to find recoverable data, and also tell us by what
factor those values need to be multiplied to bring them back into the
reasonable range.
Returns:
fixed (pandas.Series): a data series of the same length as the input, but with
the transformed values.
"""
# Grab the subset of the input series we are going to work on:
records_to_fix = tofix[mask]
# Drop those records from our output series
fixed = tofix.drop(records_to_fix.index)
# Iterate over the multipliers, applying fixes to outlying populations
for mult in mults:
records_to_fix = records_to_fix.apply(
lambda x: x * mult if x > minval / mult and x < maxval / mult else x
)
# Set any record that wasn't inside one of our identified populations to
# NA -- we are saying that these are true outliers, which can't be part
# of the population of values we are examining.
records_to_fix = records_to_fix.apply(
lambda x: np.nan if x < minval or x > maxval else x
)
# Add our fixed records back to the complete data series and return it
fixed = pd.concat([fixed, records_to_fix])
return fixed
##############################################################################
# DATABASE TABLE SPECIFIC PROCEDURES ##########################################
##############################################################################
def plants_steam(ferc1_raw_dfs, ferc1_transformed_dfs):
"""Transforms FERC Form 1 plant_steam data for loading into PUDL Database.
This includes converting to our preferred units of MWh and MW, as well as
standardizing the strings describing the kind of plant and construction.
Args:
ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects
corresponds to a table from the FERC Form 1 DBC database.
ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed.
Returns:
dict: of transformed dataframes, including the newly transformed
plants_steam_ferc1 dataframe.
"""
ferc1_steam_df = (
ferc1_raw_dfs["plants_steam_ferc1"]
.pipe(_plants_steam_clean)
.pipe(_plants_steam_assign_plant_ids, ferc1_transformed_dfs["fuel_ferc1"])
)
plants_steam_validate_ids(ferc1_steam_df)
ferc1_steam_df = ferc1_steam_df.replace(
{"construction_type": "unknown", "plant_type": "unknown"}, pd.NA
)
ferc1_transformed_dfs["plants_steam_ferc1"] = ferc1_steam_df
return ferc1_transformed_dfs
def _plants_steam_clean(ferc1_steam_df):
ferc1_steam_df = (
ferc1_steam_df.rename(
columns={
"plant_name": "plant_name_ferc1",
"yr_const": "construction_year",
"plant_kind": "plant_type",
"type_const": "construction_type",
"asset_retire_cost": "asset_retirement_cost",
"yr_installed": "installation_year",
"tot_capacity": "capacity_mw",
"peak_demand": "peak_demand_mw",
"plant_hours": "plant_hours_connected_while_generating",
"plnt_capability": "plant_capability_mw",
"when_limited": "water_limited_capacity_mw",
"when_not_limited": "not_water_limited_capacity_mw",
"avg_num_of_emp": "avg_num_employees",
"net_generation": "net_generation_kwh",
"cost_land": "capex_land",
"cost_structure": "capex_structures",
"cost_equipment": "capex_equipment",
"cost_of_plant_to": "capex_total",
"cost_per_kw": "capex_per_kw",
"expns_operations": "opex_operations",
"expns_fuel": "opex_fuel",
"expns_coolants": "opex_coolants",
"expns_steam": "opex_steam",
"expns_steam_othr": "opex_steam_other",
"expns_transfer": "opex_transfer",
"expns_electric": "opex_electric",
"expns_misc_power": "opex_misc_power",
"expns_rents": "opex_rents",
"expns_allowances": "opex_allowances",
"expns_engnr": "opex_engineering",
"expns_structures": "opex_structures",
"expns_boiler": "opex_boiler",
"expns_plants": "opex_plants",
"expns_misc_steam": "opex_misc_steam",
"tot_prdctn_expns": "opex_production_total",
"expns_kwh": "opex_per_kwh",
}
)
.pipe(_clean_cols, "f1_steam")
.pipe(pudl.helpers.simplify_strings, ["plant_name_ferc1"])
.pipe(
pudl.helpers.cleanstrings,
["construction_type", "plant_type"],
[CONSTRUCTION_TYPE_STRINGS, PLANT_KIND_STRINGS],
unmapped=pd.NA,
)
.pipe(
pudl.helpers.oob_to_nan,
cols=["construction_year", "installation_year"],
lb=1850,
ub=max(DataSource.from_id("ferc1").working_partitions["years"]) + 1,
)
.assign(
capex_per_mw=lambda x: 1000.0 * x.capex_per_kw,
opex_per_mwh=lambda x: 1000.0 * x.opex_per_kwh,
net_generation_mwh=lambda x: x.net_generation_kwh / 1000.0,
)
.drop(columns=["capex_per_kw", "opex_per_kwh", "net_generation_kwh"])
)
for col in ["construction_type", "plant_type"]:
if ferc1_steam_df[col].isnull().any():
raise AssertionError(
f"NA values found in {col} column during FERC 1 steam clean, add string to dictionary for this column"
)
return ferc1_steam_df
def _plants_steam_assign_plant_ids(ferc1_steam_df, ferc1_fuel_df):
"""Assign IDs to the large steam plants."""
###########################################################################
# FERC PLANT ID ASSIGNMENT
###########################################################################
# Now we need to assign IDs to the large steam plants, since FERC doesn't
# do this for us.
logger.info("Identifying distinct large FERC plants for ID assignment.")
# scikit-learn still doesn't deal well with NA values (this will be fixed
# eventually) We need to massage the type and missing data for the
# Classifier to work.
ferc1_steam_df = pudl.helpers.fix_int_na(
ferc1_steam_df, columns=["construction_year"]
)
# Grab fuel consumption proportions for use in assigning plant IDs:
fuel_fractions = fuel_by_plant_ferc1(ferc1_fuel_df)
ffc = list(fuel_fractions.filter(regex=".*_fraction_mmbtu$").columns)
ferc1_steam_df = ferc1_steam_df.merge(
fuel_fractions[["utility_id_ferc1", "plant_name_ferc1", "report_year"] + ffc],
on=["utility_id_ferc1", "plant_name_ferc1", "report_year"],
how="left",
)
# We need to fill the null values for these numerical feature vectors with
# zeros. not ideal, but the model requires dealing with nulls
null_to_zero = ffc + ["capacity_mw"]
ferc1_steam_df[null_to_zero] = ferc1_steam_df[null_to_zero].fillna(value=0.0)
# Train the classifier using DEFAULT weights, parameters not listed here.
ferc1_clf = pudl.transform.ferc1.make_ferc1_clf(ferc1_steam_df)
ferc1_clf = ferc1_clf.fit_transform(ferc1_steam_df)
# Use the classifier to generate groupings of similar records:
record_groups = ferc1_clf.predict(ferc1_steam_df.record_id)
n_tot = len(ferc1_steam_df)
n_grp = len(record_groups)
pct_grp = n_grp / n_tot
logger.info(
f"Successfully associated {n_grp} of {n_tot} ({pct_grp:.2%}) "
f"FERC Form 1 plant records with multi-year plant entities."
)
record_groups.columns = record_groups.columns.astype(str)
cols = record_groups.columns
record_groups = record_groups.reset_index()
# Now we are going to create a graph (network) that describes all of the
# binary relationships between a seed_id and the record_ids that it has
# been associated with in any other year. Each connected component of that
# graph is a ferc plant time series / plant_id
logger.info("Assigning IDs to multi-year FERC plant entities.")
edges_df = pd.DataFrame(columns=["source", "target"])
for col in cols:
new_edges = record_groups[["seed_id", col]]
new_edges = new_edges.rename({"seed_id": "source", col: "target"}, axis=1)
edges_df = pd.concat([edges_df, new_edges], sort=True)
# Drop any records where there's no target ID (no match in a year)
edges_df = edges_df[edges_df.target != ""]
# We still have to deal with the orphaned records -- any record which
# wasn't place in a time series but is still valid should be included as
# its own independent "plant" for completeness, and use in aggregate
# analysis.
orphan_record_ids = np.setdiff1d(
ferc1_steam_df.record_id.unique(), record_groups.values.flatten()
)
logger.info(
f"Identified {len(orphan_record_ids)} orphaned FERC plant records. "
f"Adding orphans to list of plant entities."
)
orphan_df = pd.DataFrame({"source": orphan_record_ids, "target": orphan_record_ids})
edges_df = pd.concat([edges_df, orphan_df], sort=True)
# Use the data frame we've compiled to create a graph
G = nx.from_pandas_edgelist( # noqa: N806
edges_df, source="source", target="target"
)
# Find the connected components of the graph
ferc1_plants = (G.subgraph(c) for c in nx.connected_components(G))
# Now we'll iterate through the connected components and assign each of
# them a FERC Plant ID, and pull the results back out into a dataframe:
plants_w_ids = []
for plant_id_ferc1, plant in enumerate(ferc1_plants):
nx.set_edge_attributes(plant, plant_id_ferc1 + 1, name="plant_id_ferc1")
new_plant_df = nx.to_pandas_edgelist(plant)
plants_w_ids.append(new_plant_df)
plants_w_ids = pd.concat(plants_w_ids)
logger.info(
f"Successfully Identified {plant_id_ferc1+1-len(orphan_record_ids)} "
f"multi-year plant entities."
)
# Set the construction year back to numeric because it is.
ferc1_steam_df["construction_year"] = pd.to_numeric(
ferc1_steam_df["construction_year"], errors="coerce"
)
# We don't actually want to save the fuel fractions in this table... they
# were only here to help us match up the plants.
ferc1_steam_df = ferc1_steam_df.drop(ffc, axis=1)
# Now we need a list of all the record IDs, with their associated
# FERC 1 plant IDs. However, the source-target listing isn't
# guaranteed to list every one of the nodes in either list, so we
# need to compile them together to ensure that we get every single
sources = (
plants_w_ids.drop("target", axis=1)
.drop_duplicates()
.rename({"source": "record_id"}, axis=1)
)
targets = (
plants_w_ids.drop("source", axis=1)
.drop_duplicates()
.rename({"target": "record_id"}, axis=1)
)
plants_w_ids = (
pd.concat([sources, targets])
.drop_duplicates()
.sort_values(["plant_id_ferc1", "record_id"])
)
steam_rids = ferc1_steam_df.record_id.values
pwids_rids = plants_w_ids.record_id.values
missing_ids = [rid for rid in steam_rids if rid not in pwids_rids]
if missing_ids:
raise AssertionError(
f"Uh oh, we lost {abs(len(steam_rids)-len(pwids_rids))} FERC "
f"steam plant record IDs: {missing_ids}"
)
ferc1_steam_df = pd.merge(ferc1_steam_df, plants_w_ids, on="record_id")
return ferc1_steam_df
def plants_steam_validate_ids(ferc1_steam_df):
"""Tests that plant_id_ferc1 times series includes one record per year.
Args:
ferc1_steam_df (pandas.DataFrame): A DataFrame of the data from the FERC 1
Steam table.
Returns:
None
"""
##########################################################################
# FERC PLANT ID ERROR CHECKING STUFF
##########################################################################
# Test to make sure that we don't have any plant_id_ferc1 time series
# which include more than one record from a given year. Warn the user
# if we find such cases (which... we do, as of writing)
year_dupes = (
ferc1_steam_df.groupby(["plant_id_ferc1", "report_year"])["utility_id_ferc1"]
.count()
.reset_index()
.rename(columns={"utility_id_ferc1": "year_dupes"})
.query("year_dupes>1")
)
if len(year_dupes) > 0:
for dupe in year_dupes.itertuples():
logger.error(
f"Found report_year={dupe.report_year} "
f"{dupe.year_dupes} times in "
f"plant_id_ferc1={dupe.plant_id_ferc1}"
)
else:
logger.info("No duplicate years found in any plant_id_ferc1. Hooray!")
def fuel(ferc1_raw_dfs, ferc1_transformed_dfs):
"""Transforms FERC Form 1 fuel data for loading into PUDL Database.
This process includes converting some columns to be in terms of our preferred units,
like MWh and mmbtu instead of kWh and btu. Plant names are also standardized
(stripped & lower). Fuel and fuel unit strings are also standardized using our
cleanstrings() function and string cleaning dictionaries found above (FUEL_STRINGS,
etc.)
Args:
ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects
corresponds to a table from the FERC Form 1 DBC database.
ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed.
Returns:
dict: The dictionary of transformed dataframes.
"""
# grab table from dictionary of dfs, clean it up a bit
fuel_ferc1_df = (
_clean_cols(ferc1_raw_dfs["fuel_ferc1"], "f1_fuel")
# Standardize plant_name capitalization and remove leading/trailing
# white space -- necesary b/c plant_name is part of many foreign keys.
.pipe(pudl.helpers.simplify_strings, ["plant_name"])
# Take the messy free-form fuel & fuel_unit fields, and do our best to
# map them to some canonical categories... this is necessarily
# imperfect:
.pipe(
pudl.helpers.cleanstrings,
["fuel", "fuel_unit"],
[FUEL_STRINGS, FUEL_UNIT_STRINGS],
unmapped=pd.NA,
)
# Fuel cost per kWh is a per-unit value that doesn't make sense to
# report for a single fuel that may be only a small part of the fuel
# consumed. "fuel generaton" is heat rate, but as it's based only on
# the heat content of a given fuel which may only be a small portion of
# the overall fuel # consumption, it doesn't make any sense here. Drop
# it.
.drop(["fuel_cost_kwh", "fuel_generaton"], axis=1)
# Convert from BTU/unit of fuel to 1e6 BTU/unit.
.assign(fuel_avg_mmbtu_per_unit=lambda x: x.fuel_avg_heat / 1e6).drop(
"fuel_avg_heat", axis=1
)
# Rename the columns to match our DB definitions
.rename(
columns={
# FERC 1 DB Name PUDL DB Name
"plant_name": "plant_name_ferc1",
"fuel": "fuel_type_code_pudl",
"fuel_unit": "fuel_units",
"fuel_avg_mmbtu_per_unit": "fuel_mmbtu_per_unit",
"fuel_quantity": "fuel_consumed_units",
"fuel_cost_burned": "fuel_cost_per_unit_burned",
"fuel_cost_delvd": "fuel_cost_per_unit_delivered",
"fuel_cost_btu": "fuel_cost_per_mmbtu",
}
)
)
if fuel_ferc1_df["fuel_units"].isnull().any():
raise AssertionError(
"NA values found in fuel_units column during FERC 1 fuel clean, add string to dictionary"
)
#########################################################################
# CORRECT DATA ENTRY ERRORS #############################################
#########################################################################
coal_mask = fuel_ferc1_df["fuel_type_code_pudl"] == "coal"
gas_mask = fuel_ferc1_df["fuel_type_code_pudl"] == "gas"
oil_mask = fuel_ferc1_df["fuel_type_code_pudl"] == "oil"
corrections = [
# mult = 2000: reported in units of lbs instead of short tons
# mult = 1e6: reported BTUs instead of mmBTUs
# minval and maxval of 10 and 29 mmBTUs are the range of values
# specified by EIA 923 instructions at:
# https://www.eia.gov/survey/form/eia_923/instructions.pdf
["fuel_mmbtu_per_unit", coal_mask, 10.0, 29.0, (2e3, 1e6)],
# mult = 1e-2: reported cents/mmBTU instead of USD/mmBTU
# minval and maxval of .5 and 7.5 dollars per mmBTUs are the
# end points of the primary distribution of EIA 923 fuel receipts
# and cost per mmBTU data weighted by quantity delivered
["fuel_cost_per_mmbtu", coal_mask, 0.5, 7.5, (1e-2,)],
# mult = 1e3: reported fuel quantity in cubic feet, not mcf
# mult = 1e6: reported fuel quantity in BTU, not mmBTU
# minval and maxval of .8 and 1.2 mmBTUs are the range of values
# specified by EIA 923 instructions
["fuel_mmbtu_per_unit", gas_mask, 0.8, 1.2, (1e3, 1e6)],
# mult = 1e-2: reported in cents/mmBTU instead of USD/mmBTU
# minval and maxval of 1 and 35 dollars per mmBTUs are the
# end points of the primary distribution of EIA 923 fuel receipts
# and cost per mmBTU data weighted by quantity delivered
["fuel_cost_per_mmbtu", gas_mask, 1, 35, (1e-2,)],
# mult = 42: reported fuel quantity in gallons, not barrels
# mult = 1e6: reported fuel quantity in BTU, not mmBTU
# minval and maxval of 3 and 6.9 mmBTUs are the range of values
# specified by EIA 923 instructions
["fuel_mmbtu_per_unit", oil_mask, 3, 6.9, (42,)],
# mult = 1e-2: reported in cents/mmBTU instead of USD/mmBTU
# minval and maxval of 5 and 33 dollars per mmBTUs are the
# end points of the primary distribution of EIA 923 fuel receipts
# and cost per mmBTU data weighted by quantity delivered
["fuel_cost_per_mmbtu", oil_mask, 5, 33, (1e-2,)],
]
for (coltofix, mask, minval, maxval, mults) in corrections:
fuel_ferc1_df[coltofix] = _multiplicative_error_correction(
fuel_ferc1_df[coltofix], mask, minval, maxval, mults
)
#########################################################################
# REMOVE BAD DATA #######################################################
#########################################################################
# Drop any records that are missing data. This is a blunt instrument, to
# be sure. In some cases we lose data here, because some utilities have
# (for example) a "Total" line w/ only fuel_mmbtu_per_kwh on it. Grr.
fuel_ferc1_df.dropna(inplace=True)
# Replace "unkown" fuel unit with NAs - this comes after we drop missing data with NAs
fuel_ferc1_df = fuel_ferc1_df.replace({"fuel_units": "unknown"}, pd.NA)
ferc1_transformed_dfs["fuel_ferc1"] = fuel_ferc1_df
return ferc1_transformed_dfs
def plants_small(ferc1_raw_dfs, ferc1_transformed_dfs):
"""Transforms FERC Form 1 plant_small data for loading into PUDL Database.
This FERC Form 1 table contains information about a large number of small plants,
including many small hydroelectric and other renewable generation facilities.
Unfortunately the data is not well standardized, and so the plants have been
categorized manually, with the results of that categorization stored in an Excel
spreadsheet. This function reads in the plant type data from the spreadsheet and
merges it with the rest of the information from the FERC DB based on record number,
FERC respondent ID, and report year. When possible the FERC license number for small
hydro plants is also manually extracted from the data.
This categorization will need to be renewed with each additional year of FERC data
we pull in. As of v0.1 the small plants have been categorized for 2004-2015.
Args:
ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects
corresponds to a table from the FERC Form 1 DBC database.
ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed.
Returns:
dict: The dictionary of transformed dataframes.
"""
# grab table from dictionary of dfs
ferc1_small_df = ferc1_raw_dfs["plants_small_ferc1"]
# Standardize plant_name_raw capitalization and remove leading/trailing
# white space -- necesary b/c plant_name_raw is part of many foreign keys.
ferc1_small_df = pudl.helpers.simplify_strings(
ferc1_small_df, ["plant_name", "kind_of_fuel"]
)
# Force the construction and installation years to be numeric values, and
# set them to NA if they can't be converted. (table has some junk values)
ferc1_small_df = pudl.helpers.oob_to_nan(
ferc1_small_df,
cols=["yr_constructed"],
lb=1850,
ub=max(DataSource.from_id("ferc1").working_partitions["years"]) + 1,
)
# Convert from cents per mmbtu to dollars per mmbtu to be consistent
# with the f1_fuel table data. Also, let's use a clearer name.
ferc1_small_df["fuel_cost_per_mmbtu"] = ferc1_small_df["fuel_cost"] / 100.0
ferc1_small_df.drop("fuel_cost", axis=1, inplace=True)
# Create a single "record number" for the individual lines in the FERC
# Form 1 that report different small plants, so that we can more easily
# tell whether they are adjacent to each other in the reporting.
ferc1_small_df["record_number"] = (
46 * ferc1_small_df["spplmnt_num"] + ferc1_small_df["row_number"]
)
# Unforunately the plant types were not able to be parsed automatically
# in this table. It's been done manually for 2004-2015, and the results
# get merged in in the following section.
small_types_file = importlib.resources.open_binary(
"pudl.package_data.ferc1", "small_plants_2004-2016.xlsx"
)
small_types_df = pd.read_excel(small_types_file)
# Only rows with plant_type set will give us novel information.
small_types_df.dropna(
subset=[
"plant_type",
],
inplace=True,
)
# We only need this small subset of the columns to extract the plant type.
small_types_df = small_types_df[
[
"report_year",
"respondent_id",
"record_number",
"plant_name_clean",
"plant_type",
"ferc_license",
]
]
# Munge the two dataframes together, keeping everything from the
# frame we pulled out of the FERC1 DB, and supplementing it with the
# plant_name, plant_type, and ferc_license fields from our hand
# made file.
ferc1_small_df = pd.merge(
ferc1_small_df,
small_types_df,
how="left",
on=["report_year", "respondent_id", "record_number"],
)
# Remove extraneous columns and add a record ID
ferc1_small_df = _clean_cols(ferc1_small_df, "f1_gnrt_plant")
# Standardize plant_name capitalization and remove leading/trailing white
# space, so that plant_name matches formatting of plant_name_raw
ferc1_small_df = pudl.helpers.simplify_strings(ferc1_small_df, ["plant_name_clean"])
# in order to create one complete column of plant names, we have to use the
# cleaned plant names when available and the orignial plant names when the
# cleaned version is not available, but the strings first need cleaning
ferc1_small_df["plant_name_clean"] = ferc1_small_df["plant_name_clean"].fillna(
value=""
)
ferc1_small_df["plant_name_clean"] = ferc1_small_df.apply(
lambda row: row["plant_name"]
if (row["plant_name_clean"] == "")
else row["plant_name_clean"],
axis=1,
)
# now we don't need the uncleaned version anymore
# ferc1_small_df.drop(['plant_name'], axis=1, inplace=True)
ferc1_small_df.rename(
columns={
# FERC 1 DB Name PUDL DB Name
"plant_name": "plant_name_ferc1",
"ferc_license": "ferc_license_id",
"yr_constructed": "construction_year",
"capacity_rating": "capacity_mw",
"net_demand": "peak_demand_mw",
"net_generation": "net_generation_mwh",
"plant_cost": "total_cost_of_plant",
"plant_cost_mw": "capex_per_mw",
"operation": "opex_total",
"expns_fuel": "opex_fuel",
"expns_maint": "opex_maintenance",
"kind_of_fuel": "fuel_type",
"fuel_cost": "fuel_cost_per_mmbtu",
},
inplace=True,
)
ferc1_transformed_dfs["plants_small_ferc1"] = ferc1_small_df
return ferc1_transformed_dfs
def plants_hydro(ferc1_raw_dfs, ferc1_transformed_dfs):
"""Transforms FERC Form 1 plant_hydro data for loading into PUDL Database.
Standardizes plant names (stripping whitespace and Using Title Case). Also converts
into our preferred units of MW and MWh.
Args:
ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects
corresponds to a table from the FERC Form 1 DBC database.
ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed.
Returns:
dict: The dictionary of transformed dataframes.
"""
# grab table from dictionary of dfs
ferc1_hydro_df = (
_clean_cols(ferc1_raw_dfs["plants_hydro_ferc1"], "f1_hydro")
# Standardize plant_name capitalization and remove leading/trailing
# white space -- necesary b/c plant_name is part of many foreign keys.
.pipe(pudl.helpers.simplify_strings, ["plant_name"])
.pipe(
pudl.helpers.cleanstrings,
["plant_const"],
[CONSTRUCTION_TYPE_STRINGS],
unmapped=pd.NA,
)
.assign(
# Converting kWh to MWh
net_generation_mwh=lambda x: x.net_generation / 1000.0,
# Converting cost per kW installed to cost per MW installed:
cost_per_mw=lambda x: x.cost_per_kw * 1000.0,
# Converting kWh to MWh
expns_per_mwh=lambda x: x.expns_kwh * 1000.0,
)
.pipe(
pudl.helpers.oob_to_nan,
cols=["yr_const", "yr_installed"],
lb=1850,
ub=max(DataSource.from_id("ferc1").working_partitions["years"]) + 1,
)
.drop(columns=["net_generation", "cost_per_kw", "expns_kwh"])
.rename(
columns={
# FERC1 DB PUDL DB
"plant_name": "plant_name_ferc1",
"project_no": "project_num",
"yr_const": "construction_year",
"plant_kind": "plant_type",
"plant_const": "construction_type",
"yr_installed": "installation_year",
"tot_capacity": "capacity_mw",
"peak_demand": "peak_demand_mw",
"plant_hours": "plant_hours_connected_while_generating",
"favorable_cond": "net_capacity_favorable_conditions_mw",
"adverse_cond": "net_capacity_adverse_conditions_mw",
"avg_num_of_emp": "avg_num_employees",
"cost_of_land": "capex_land",
"cost_structure": "capex_structures",
"cost_facilities": "capex_facilities",
"cost_equipment": "capex_equipment",
"cost_roads": "capex_roads",
"cost_plant_total": "capex_total",
"cost_per_mw": "capex_per_mw",
"expns_operations": "opex_operations",
"expns_water_pwr": "opex_water_for_power",
"expns_hydraulic": "opex_hydraulic",
"expns_electric": "opex_electric",
"expns_generation": "opex_generation_misc",
"expns_rents": "opex_rents",
"expns_engineering": "opex_engineering",
"expns_structures": "opex_structures",
"expns_dams": "opex_dams",
"expns_plant": "opex_plant",
"expns_misc_plant": "opex_misc_plant",
"expns_per_mwh": "opex_per_mwh",
"expns_engnr": "opex_engineering",
"expns_total": "opex_total",
"asset_retire_cost": "asset_retirement_cost",
"": "",
}
)
.drop_duplicates(
subset=[
"report_year",
"utility_id_ferc1",
"plant_name_ferc1",
"capacity_mw",
],
keep=False,
)
)
if ferc1_hydro_df["construction_type"].isnull().any():
raise AssertionError(
"NA values found in construction_type column during FERC1 hydro clean, add string to CONSTRUCTION_TYPE_STRINGS"
)
ferc1_hydro_df = ferc1_hydro_df.replace({"construction_type": "unknown"}, pd.NA)
ferc1_transformed_dfs["plants_hydro_ferc1"] = ferc1_hydro_df
return ferc1_transformed_dfs
def plants_pumped_storage(ferc1_raw_dfs, ferc1_transformed_dfs):
"""Transforms FERC Form 1 pumped storage data for loading into PUDL.
Standardizes plant names (stripping whitespace and Using Title Case). Also converts
into our preferred units of MW and MWh.
Args:
ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects
corresponds to a table from the FERC Form 1 DBC database.
ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed.
Returns:
dict: The dictionary of transformed dataframes.
"""
# grab table from dictionary of dfs
ferc1_pump_df = (
_clean_cols(ferc1_raw_dfs["plants_pumped_storage_ferc1"], "f1_pumped_storage")
# Standardize plant_name capitalization and remove leading/trailing
# white space -- necesary b/c plant_name is part of many foreign keys.
.pipe(pudl.helpers.simplify_strings, ["plant_name"])
# Clean up the messy plant construction type column:
.pipe(
pudl.helpers.cleanstrings,
["plant_kind"],
[CONSTRUCTION_TYPE_STRINGS],
unmapped=pd.NA,
)
.assign(
# Converting from kW/kWh to MW/MWh
net_generation_mwh=lambda x: x.net_generation / 1000.0,
energy_used_for_pumping_mwh=lambda x: x.energy_used / 1000.0,
net_load_mwh=lambda x: x.net_load / 1000.0,
cost_per_mw=lambda x: x.cost_per_kw * 1000.0,
expns_per_mwh=lambda x: x.expns_kwh * 1000.0,
)
.pipe(
pudl.helpers.oob_to_nan,
cols=["yr_const", "yr_installed"],
lb=1850,
ub=max(DataSource.from_id("ferc1").working_partitions["years"]) + 1,
)
.drop(
columns=[
"net_generation",
"energy_used",
"net_load",
"cost_per_kw",
"expns_kwh",
]
)
.rename(
columns={
# FERC1 DB PUDL DB
"plant_name": "plant_name_ferc1",
"project_number": "project_num",
"tot_capacity": "capacity_mw",
"project_no": "project_num",
"plant_kind": "construction_type",
"peak_demand": "peak_demand_mw",
"yr_const": "construction_year",
"yr_installed": "installation_year",
"plant_hours": "plant_hours_connected_while_generating",
"plant_capability": "plant_capability_mw",
"avg_num_of_emp": "avg_num_employees",
"cost_wheels": "capex_wheels_turbines_generators",
"cost_land": "capex_land",
"cost_structures": "capex_structures",
"cost_facilties": "capex_facilities",
"cost_wheels_turbines_generators": "capex_wheels_turbines_generators",
"cost_electric": "capex_equipment_electric",
"cost_misc_eqpmnt": "capex_equipment_misc",
"cost_roads": "capex_roads",
"asset_retire_cost": "asset_retirement_cost",
"cost_of_plant": "capex_total",
"cost_per_mw": "capex_per_mw",
"expns_operations": "opex_operations",
"expns_water_pwr": "opex_water_for_power",
"expns_pump_strg": "opex_pumped_storage",
"expns_electric": "opex_electric",
"expns_misc_power": "opex_generation_misc",
"expns_rents": "opex_rents",
"expns_engneering": "opex_engineering",
"expns_structures": "opex_structures",
"expns_dams": "opex_dams",
"expns_plant": "opex_plant",
"expns_misc_plnt": "opex_misc_plant",
"expns_producton": "opex_production_before_pumping",
"pumping_expenses": "opex_pumping",
"tot_prdctn_exns": "opex_total",
"expns_per_mwh": "opex_per_mwh",
}
)
.drop_duplicates(
subset=[
"report_year",
"utility_id_ferc1",
"plant_name_ferc1",
"capacity_mw",
],
keep=False,
)
)
if ferc1_pump_df["construction_type"].isnull().any():
raise AssertionError(
"NA values found in construction_type column during FERC 1 pumped storage clean, add string to CONSTRUCTION_TYPE_STRINGS"
)
ferc1_pump_df = ferc1_pump_df.replace({"construction_type": "unknown"}, pd.NA)
ferc1_transformed_dfs["plants_pumped_storage_ferc1"] = ferc1_pump_df
return ferc1_transformed_dfs
def plant_in_service(ferc1_raw_dfs, ferc1_transformed_dfs):
"""Transforms FERC Form 1 Plant in Service data for loading into PUDL.
Re-organizes the original FERC Form 1 Plant in Service data by unpacking the rows as
needed on a year by year basis, to organize them into columns. The "columns" in the
original FERC Form 1 denote starting balancing, ending balance, additions,
retirements, adjustments, and transfers -- these categories are turned into labels
in a column called "amount_type". Because each row in the transformed table is
composed of many individual records (rows) from the original table, row_number can't
be part of the record_id, which means they are no longer unique. To infer exactly
what record a given piece of data came from, the record_id and the row_map (found in
the PUDL package_data directory) can be used.
Args:
ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects
corresponds to a table from the FERC Form 1 DBC database.
ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed.
Returns:
dict: The dictionary of the transformed DataFrames.
"""
pis_df = (
unpack_table(
ferc1_df=ferc1_raw_dfs["plant_in_service_ferc1"],
table_name="f1_plant_in_srvce",
data_rows=slice(None), # Gotta catch 'em all!
data_cols=[
"begin_yr_bal",
"addition",
"retirements",
"adjustments",
"transfers",
"yr_end_bal",
],
)
.pipe( # Convert top level of column index into a categorical column:
cols_to_cats,
cat_name="amount_type",
col_cats={
"begin_yr_bal": "starting_balance",
"addition": "additions",
"retirements": "retirements",
"adjustments": "adjustments",
"transfers": "transfers",
"yr_end_bal": "ending_balance",
},
)
.rename_axis(columns=None)
.pipe(_clean_cols, "f1_plant_in_srvce")
.set_index(["utility_id_ferc1", "report_year", "amount_type", "record_id"])
.reset_index()
)
# Get rid of the columns corresponding to "header" rows in the FERC
# form, which should *never* contain data... but in about 2 dozen cases,
# they do. See this issue on Github for more information:
# https://github.com/catalyst-cooperative/pudl/issues/471
pis_df = pis_df.drop(columns=pis_df.filter(regex=".*_head$").columns)
ferc1_transformed_dfs["plant_in_service_ferc1"] = pis_df
return ferc1_transformed_dfs
def purchased_power(ferc1_raw_dfs, ferc1_transformed_dfs):
"""Transforms FERC Form 1 pumped storage data for loading into PUDL.
This table has data about inter-utility power purchases into the PUDL DB. This
includes how much electricty was purchased, how much it cost, and who it was
purchased from. Unfortunately the field describing which other utility the power was
being bought from is poorly standardized, making it difficult to correlate with
other data. It will need to be categorized by hand or with some fuzzy matching
eventually.
Args:
ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects
corresponds to a table from the FERC Form 1 DBC database.
ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed.
Returns:
dict: The dictionary of the transformed DataFrames.
"""
# grab table from dictionary of dfs
df = (
_clean_cols(ferc1_raw_dfs["purchased_power_ferc1"], "f1_purchased_pwr")
.rename(
columns={
"athrty_co_name": "seller_name",
"sttstcl_clssfctn": "purchase_type_code",
"rtsched_trffnbr": "tariff",
"avgmth_bill_dmnd": "billing_demand_mw",
"avgmth_ncp_dmnd": "non_coincident_peak_demand_mw",
"avgmth_cp_dmnd": "coincident_peak_demand_mw",
"mwh_purchased": "purchased_mwh",
"mwh_recv": "received_mwh",
"mwh_delvd": "delivered_mwh",
"dmnd_charges": "demand_charges",
"erg_charges": "energy_charges",
"othr_charges": "other_charges",
"settlement_tot": "total_settlement",
}
)
.assign( # Require these columns to numeric, or NaN
billing_demand_mw=lambda x: pd.to_numeric(
x.billing_demand_mw, errors="coerce"
),
non_coincident_peak_demand_mw=lambda x: pd.to_numeric(
x.non_coincident_peak_demand_mw, errors="coerce"
),
coincident_peak_demand_mw=lambda x: pd.to_numeric(
x.coincident_peak_demand_mw, errors="coerce"
),
)
.fillna(
{ # Replace blanks w/ 0.0 in data columns.
"purchased_mwh": 0.0,
"received_mwh": 0.0,
"delivered_mwh": 0.0,
"demand_charges": 0.0,
"energy_charges": 0.0,
"other_charges": 0.0,
"total_settlement": 0.0,
}
)
)
# Reencode the power purchase types:
df = (
pudl.metadata.classes.Package.from_resource_ids()
.get_resource("purchased_power_ferc1")
.encode(df)
)
# Drop records containing no useful data and also any completely duplicate
# records -- there are 6 in 1998 for utility 238 for some reason...
df = df.drop_duplicates().drop(
df.loc[
(
(df.purchased_mwh == 0)
& (df.received_mwh == 0)
& (df.delivered_mwh == 0)
& (df.demand_charges == 0)
& (df.energy_charges == 0)
& (df.other_charges == 0)
& (df.total_settlement == 0)
),
:,
].index
)
ferc1_transformed_dfs["purchased_power_ferc1"] = df
return ferc1_transformed_dfs
def accumulated_depreciation(ferc1_raw_dfs, ferc1_transformed_dfs):
"""Transforms FERC Form 1 depreciation data for loading into PUDL.
This information is organized by FERC account, with each line of the FERC Form 1
having a different descriptive identifier like 'balance_end_of_year' or
'transmission'.
Args:
ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects
corresponds to a table from the FERC Form 1 DBC database.
ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed.
Returns:
dict: The dictionary of the transformed DataFrames.
"""
# grab table from dictionary of dfs
ferc1_apd_df = ferc1_raw_dfs["accumulated_depreciation_ferc1"]
ferc1_acct_apd = FERC_DEPRECIATION_LINES.drop(["ferc_account_description"], axis=1)
ferc1_acct_apd.dropna(inplace=True)
ferc1_acct_apd["row_number"] = ferc1_acct_apd["row_number"].astype(int)
ferc1_accumdepr_prvsn_df = pd.merge(
ferc1_apd_df, ferc1_acct_apd, how="left", on="row_number"
)
ferc1_accumdepr_prvsn_df = _clean_cols(
ferc1_accumdepr_prvsn_df, "f1_accumdepr_prvsn"
)
ferc1_accumdepr_prvsn_df.rename(
columns={
# FERC1 DB PUDL DB
"total_cde": "total"
},
inplace=True,
)
ferc1_transformed_dfs["accumulated_depreciation_ferc1"] = ferc1_accumdepr_prvsn_df
return ferc1_transformed_dfs
def transform(ferc1_raw_dfs, ferc1_settings: Ferc1Settings = Ferc1Settings()):
"""Transforms FERC 1.
Args:
ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects
corresponds to a table from the FERC Form 1 DBC database
ferc1_settings: Validated ETL parameters required by
this data source.
Returns:
dict: A dictionary of the transformed DataFrames.
"""
ferc1_tfr_funcs = {
# fuel must come before steam b/c fuel proportions are used to aid in
# plant # ID assignment.
"fuel_ferc1": fuel,
"plants_steam_ferc1": plants_steam,
"plants_small_ferc1": plants_small,
"plants_hydro_ferc1": plants_hydro,
"plants_pumped_storage_ferc1": plants_pumped_storage,
"plant_in_service_ferc1": plant_in_service,
"purchased_power_ferc1": purchased_power,
"accumulated_depreciation_ferc1": accumulated_depreciation,
}
# create an empty ditctionary to fill up through the transform fuctions
ferc1_tfr_dfs = {}
# for each ferc table,
for table in ferc1_tfr_funcs:
if table in ferc1_settings.tables:
logger.info(
f"Transforming raw FERC Form 1 dataframe for loading into {table}"
)
ferc1_tfr_funcs[table](ferc1_raw_dfs, ferc1_tfr_dfs)
# convert types and return:
return {
name: convert_cols_dtypes(df, data_source="ferc1")
for name, df in ferc1_tfr_dfs.items()
}
###############################################################################
# Identifying FERC Plants
###############################################################################
# Sadly FERC doesn't provide any kind of real IDs for the plants that report to
# them -- all we have is their names (a freeform string) and the data that is
# reported alongside them. This is often enough information to be able to
# recognize which records ought to be associated with each other year to year
# to create a continuous time series. However, we want to do that
# programmatically, which means using some clustering / categorization tools
# from scikit-learn
class FERCPlantClassifier(BaseEstimator, ClassifierMixin):
"""A classifier for identifying FERC plant time series in FERC Form 1 data.
We want to be able to give the classifier a FERC plant record, and get back the
group of records(or the ID of the group of records) that it ought to be part of.
There are hundreds of different groups of records, and we can only know what they
are by looking at the whole dataset ahead of time. This is the "fitting" step, in
which the groups of records resulting from a particular set of model parameters(e.g.
the weights that are attributes of the class) are generated.
Once we have that set of record categories, we can test how well the classifier
performs, by checking it against test / training data which we have already
classified by hand. The test / training set is a list of lists of unique FERC plant
record IDs(each record ID is the concatenation of: report year, respondent id,
supplement number, and row number). It could also be stored as a dataframe where
each column is associated with a year of data(some of which could be empty). Not
sure what the best structure would be.
If it's useful, we can assign each group a unique ID that is the time ordered
concatenation of each of the constituent record IDs. Need to understand what the
process for checking the classification of an input record looks like.
To score a given classifier, we can look at what proportion of the records in the
test dataset are assigned to the same group as in our manual classification of those
records. There are much more complicated ways to do the scoring too... but for now
let's just keep it as simple as possible.
"""
def __init__(self, min_sim=0.75, plants_df=None):
"""Initialize the classifier.
Args:
min_sim : Number between 0.0 and 1.0, indicating the minimum value of
cosine similarity that we are willing to accept as indicating two
records are part of the same plant record time series. All entries in
the pairwise similarity matrix below this value will be zeroed out.
plants_df : The entire FERC Form 1 plant table as a dataframe. Needed in
order to calculate the distance metrics between all of the records so we
can group the plants in the fit() step, so we can check how well they
are categorized later...
Todo:
Zane revisit plants_df
"""
self.min_sim = min_sim
self.plants_df = plants_df
self._years = self.plants_df.report_year.unique()
def fit(self, X, y=None): # noqa: N803 Canonical capital letter...
"""Use weighted FERC plant features to group records into time series.
The fit method takes the vectorized, normalized, weighted FERC plant
features (X) as input, calculates the pairwise cosine similarity matrix
between all records, and groups the records in their best time series.
The similarity matrix and best time series are stored as data members
in the object for later use in scoring & predicting.
This isn't quite the way a fit method would normally work.
Args:
X (): a sparse matrix of size n_samples x n_features.
y ():
Returns:
pandas.DataFrame:
TODO:
Zane revisit args and returns
"""
self._cossim_df = pd.DataFrame(cosine_similarity(X))
self._best_of = self._best_by_year()
# Make the best match indices integers rather than floats w/ NA values.
self._best_of[self._years] = self._best_of[self._years].fillna(-1).astype(int)
return self
def transform(self, X, y=None): # noqa: N803
"""Passthrough transform method -- just returns self."""
return self
def predict(self, X, y=None): # noqa: N803
"""Identify time series of similar records to input record_ids.
Given a one-dimensional dataframe X, containing FERC record IDs, return
a dataframe in which each row corresponds to one of the input record_id
values (ordered as the input was ordered), with each column
corresponding to one of the years worth of data. Values in the returned
dataframe are the FERC record_ids of the record most similar to the
input record within that year. Some of them may be null, if there was
no sufficiently good match.
Row index is the seed record IDs. Column index is years.
Todo:
* This method is hideously inefficient. It should be vectorized.
* There's a line that throws a FutureWarning that needs to be fixed.
"""
try:
getattr(self, "_cossim_df")
except AttributeError:
raise RuntimeError("You must train classifer before predicting data!")
tmp_best = pd.concat(
[
self._best_of.loc[:, ["record_id"] + list(self._years)],
pd.DataFrame(data=[""], index=[-1], columns=["record_id"]),
]
)
out_dfs = []
# For each record_id we've been given:
for x in X:
# Find the index associated with the record ID we are predicting
# a grouping for:
idx = tmp_best[tmp_best.record_id == x].index.values[0]
# Mask the best_of dataframe, keeping only those entries where
# the index of the chosen record_id appears -- this results in a
# huge dataframe almost full of NaN values.
w_m = (
tmp_best[self._years][tmp_best[self._years] == idx]
# Grab the index values of the rows in the masked dataframe which
# are NOT all NaN -- these are the indices of the *other* records
# which found the record x to be one of their best matches.
.dropna(how="all").index.values
)
# Now look up the indices of the records which were found to be
# best matches to the record x.
b_m = tmp_best.loc[idx, self._years].astype(int)
# Here we require that there is no conflict between the two sets
# of indices -- that every time a record shows up in a grouping,
# that grouping is either the same, or a subset of the other
# groupings that it appears in. When no sufficiently good match
# is found the "index" in the _best_of array is set to -1, so
# requiring that the b_m value be >=0 screens out those no-match
# cases. This is okay -- we're just trying to require that the
# groupings be internally self-consistent, not that they are
# completely identical. Being flexible on this dramatically
# increases the number of records that get assigned a plant ID.
if np.array_equiv(w_m, b_m[b_m >= 0].values):
# This line is causing a warning. In cases where there are
# some years no sufficiently good match exists, and so b_m
# doesn't contain an index. Instead, it has a -1 sentinel
# value, which isn't a label for which a record exists, which
# upsets .loc. Need to find some way around this... but for
# now it does what we want. We could use .iloc instead, but
# then the -1 sentinel value maps to the last entry in the
# dataframe, which also isn't what we want. Blargh.
new_grp = tmp_best.loc[b_m, "record_id"]
# Stack the new list of record_ids on our output DataFrame:
out_dfs.append(
pd.DataFrame(
data=new_grp.values.reshape(1, len(self._years)),
index=pd.Index(
[tmp_best.loc[idx, "record_id"]], name="seed_id"
),
columns=self._years,
)
)
return pd.concat(out_dfs)
def score(self, X, y=None): # noqa: N803
"""Scores a collection of FERC plant categorizations.
For every record ID in X, predict its record group and calculate
a metric of similarity between the prediction and the "ground
truth" group that was passed in for that value of X.
Args:
X (pandas.DataFrame): an n_samples x 1 pandas dataframe of FERC
Form 1 record IDs.
y (pandas.DataFrame): a dataframe of "ground truth" FERC Form 1
record groups, corresponding to the list record IDs in X
Returns:
numpy.ndarray: The average of all the similarity metrics as the
score.
"""
scores = []
for true_group in y:
true_group = str.split(true_group, sep=",")
true_group = [s for s in true_group if s != ""]
predicted_groups = self.predict(pd.DataFrame(true_group))
for rec_id in true_group:
sm = SequenceMatcher(None, true_group, predicted_groups.loc[rec_id])
scores = scores + [sm.ratio()]
return np.mean(scores)
def _best_by_year(self):
"""Finds the best match for each plant record in each other year."""
# only keep similarity matrix entries above our minimum threshold:
out_df = self.plants_df.copy()
sim_df = self._cossim_df[self._cossim_df >= self.min_sim]
# Add a column for each of the years, in which we will store indices
# of the records which best match the record in question:
for yr in self._years:
newcol = yr
out_df[newcol] = -1
# seed_yr is the year we are matching *from* -- we do the entire
# matching process from each year, since it may not be symmetric:
for seed_yr in self._years:
seed_idx = self.plants_df.index[self.plants_df.report_year == seed_yr]
# match_yr is all the other years, in which we are finding the best
# match
for match_yr in self._years:
best_of_yr = match_yr
match_idx = self.plants_df.index[self.plants_df.report_year == match_yr]
# For each record specified by seed_idx, obtain the index of
# the record within match_idx that that is the most similar.
best_idx = sim_df.iloc[seed_idx, match_idx].idxmax(axis=1)
out_df.iloc[seed_idx, out_df.columns.get_loc(best_of_yr)] = best_idx
return out_df
def make_ferc1_clf(
plants_df,
ngram_min=2,
ngram_max=10,
min_sim=0.75,
plant_name_ferc1_wt=2.0,
plant_type_wt=2.0,
construction_type_wt=1.0,
capacity_mw_wt=1.0,
construction_year_wt=1.0,
utility_id_ferc1_wt=1.0,
fuel_fraction_wt=1.0,
):
"""Create a FERC Plant Classifier using several weighted features.
Given a FERC steam plants dataframe plants_df, which also includes fuel consumption
information, transform a selection of useful columns into features suitable for use
in calculating inter-record cosine similarities. Individual features are weighted
according to the keyword arguments.
Features include:
* plant_name (via TF-IDF, with ngram_min and ngram_max as parameters)
* plant_type (OneHot encoded categorical feature)
* construction_type (OneHot encoded categorical feature)
* capacity_mw (MinMax scaled numerical feature)
* construction year (OneHot encoded categorical feature)
* utility_id_ferc1 (OneHot encoded categorical feature)
* fuel_fraction_mmbtu (several MinMax scaled numerical columns, which are
normalized and treated as a single feature.)
This feature matrix is then used to instantiate a FERCPlantClassifier.
The combination of the ColumnTransformer and FERCPlantClassifier are combined in a
sklearn Pipeline, which is returned by the function.
Arguments:
ngram_min (int): the minimum lengths to consider in the vectorization of the
plant_name feature.
ngram_max (int): the maximum n-gram lengths to consider in the vectorization of
the plant_name feature.
min_sim (float): the minimum cosine similarity between two records that can be
considered a "match" (a number between 0.0 and 1.0).
plant_name_ferc1_wt (float): weight used to determine the relative importance
of each of the features in the feature matrix used to calculate the cosine
similarity between records. Used to scale each individual feature before the
vectors are normalized.
plant_type_wt (float): weight used to determine the relative importance of each
of the features in the feature matrix used to calculate the cosine
similarity between records. Used to scale each individual feature before the
vectors are normalized.
construction_type_wt (float): weight used to determine the relative importance
of each of the features in the feature matrix used to calculate the cosine
similarity between records. Used to scale each individual feature before the
vectors are normalized.
capacity_mw_wt (float):weight used to determine the relative importance of each
of the features in the feature matrix used to calculate the cosine
similarity between records. Used to scale each individual feature before the
vectors are normalized.
construction_year_wt (float): weight used to determine the relative importance
of each of the features in the feature matrix used to calculate the cosine
similarity between records. Used to scale each individual feature before the
vectors are normalized.
utility_id_ferc1_wt (float): weight used to determine the relative importance
of each of the features in the feature matrix used to calculate the cosine
similarity between records. Used to scale each individual feature before the
vectors are normalized.
fuel_fraction_wt (float): weight used to determine the relative importance of
each of the features in the feature matrix used to calculate the cosine
similarity between records. Used to scale each individual feature before the
vectors are normalized.
Returns:
sklearn.pipeline.Pipeline: an sklearn Pipeline that performs reprocessing and
classification with a FERCPlantClassifier object.
"""
# Make a list of all the fuel fraction columns for use as one feature.
fuel_cols = list(plants_df.filter(regex=".*_fraction_mmbtu$").columns)
ferc1_pipe = Pipeline(
[
(
"preprocessor",
ColumnTransformer(
transformers=[
(
"plant_name_ferc1",
TfidfVectorizer(
analyzer="char", ngram_range=(ngram_min, ngram_max)
),
"plant_name_ferc1",
),
(
"plant_type",
OneHotEncoder(categories="auto"),
["plant_type"],
),
(
"construction_type",
OneHotEncoder(categories="auto"),
["construction_type"],
),
("capacity_mw", MinMaxScaler(), ["capacity_mw"]),
(
"construction_year",
OneHotEncoder(categories="auto"),
["construction_year"],
),
(
"utility_id_ferc1",
OneHotEncoder(categories="auto"),
["utility_id_ferc1"],
),
(
"fuel_fraction_mmbtu",
Pipeline(
[("scaler", MinMaxScaler()), ("norm", Normalizer())]
),
fuel_cols,
),
],
transformer_weights={
"plant_name_ferc1": plant_name_ferc1_wt,
"plant_type": plant_type_wt,
"construction_type": construction_type_wt,
"capacity_mw": capacity_mw_wt,
"construction_year": construction_year_wt,
"utility_id_ferc1": utility_id_ferc1_wt,
"fuel_fraction_mmbtu": fuel_fraction_wt,
},
),
),
(
"classifier",
pudl.transform.ferc1.FERCPlantClassifier(
min_sim=min_sim, plants_df=plants_df
),
),
]
)
return ferc1_pipe
def fuel_by_plant_ferc1(fuel_df, thresh=0.5):
"""Calculates useful FERC Form 1 fuel metrics on a per plant-year basis.
Each record in the FERC Form 1 corresponds to a particular type of fuel. Many plants
-- especially coal plants -- use more than one fuel, with gas and/or diesel serving
as startup fuels. In order to be able to classify the type of plant based on
relative proportions of fuel consumed or fuel costs it is useful to aggregate these
per-fuel records into a single record for each plant.
Fuel cost (in nominal dollars) and fuel heat content (in mmBTU) are calculated for
each fuel based on the cost and heat content per unit, and the number of units
consumed, and then summed by fuel type (there can be more than one record for a
given type of fuel in each plant because we are simplifying the fuel categories).
The per-fuel records are then pivoted to create one column per fuel type. The total
is summed and stored separately, and the individual fuel costs & heat contents are
divided by that total, to yield fuel proportions. Based on those proportions and a
minimum threshold that's passed in, a "primary" fuel type is then assigned to the
plant-year record and given a string label.
Args:
fuel_df (pandas.DataFrame): Pandas DataFrame resembling the post-transform
result for the fuel_ferc1 table.
thresh (float): A value between 0.5 and 1.0 indicating the minimum fraction of
overall heat content that must have been provided by a fuel in a plant-year
for it to be considered the "primary" fuel for the plant in that year.
Default value: 0.5.
Returns:
pandas.DataFrame: A DataFrame with a single record for each plant-year,
including the columns required to merge it with the plants_steam_ferc1
table/DataFrame (report_year, utility_id_ferc1, and plant_name) as well as
totals for fuel mmbtu consumed in that plant-year, and the cost of fuel in that
year, the proportions of heat content and fuel costs for each fuel in that year,
and a column that labels the plant's primary fuel for that year.
Raises:
AssertionError: If the DataFrame input does not have the columns required to
run the function.
"""
keep_cols = [
"report_year", # key
"utility_id_ferc1", # key
"plant_name_ferc1", # key
"fuel_type_code_pudl", # pivot
"fuel_consumed_units", # value
"fuel_mmbtu_per_unit", # value
"fuel_cost_per_unit_burned", # value
]
# Ensure that the dataframe we've gotten has all the information we need:
for col in keep_cols:
if col not in fuel_df.columns:
raise AssertionError(f"Required column {col} not found in input fuel_df.")
# Calculate per-fuel derived values and add them to the DataFrame
df = (
# Really there should *not* be any duplicates here but... there's a
# bug somewhere that introduces them into the fuel_ferc1 table.
fuel_df[keep_cols]
.drop_duplicates()
# Calculate totals for each record based on per-unit values:
.assign(fuel_mmbtu=lambda x: x.fuel_consumed_units * x.fuel_mmbtu_per_unit)
.assign(fuel_cost=lambda x: x.fuel_consumed_units * x.fuel_cost_per_unit_burned)
# Drop the ratios and heterogeneous fuel "units"
.drop(
["fuel_mmbtu_per_unit", "fuel_cost_per_unit_burned", "fuel_consumed_units"],
axis=1,
)
# Group by the keys and fuel type, and sum:
.groupby(
[
"utility_id_ferc1",
"plant_name_ferc1",
"report_year",
"fuel_type_code_pudl",
]
)
.agg(sum)
.reset_index()
# Set the index to the keys, and pivot to get per-fuel columns:
.set_index(["utility_id_ferc1", "plant_name_ferc1", "report_year"])
.pivot(columns="fuel_type_code_pudl")
.fillna(0.0)
)
# Undo pivot. Could refactor this old function
plant_year_totals = df.stack("fuel_type_code_pudl").groupby(level=[0, 1, 2]).sum()
# Calculate total heat content burned for each plant, and divide it out
mmbtu_group = (
pd.merge(
# Sum up all the fuel heat content, and divide the individual fuel
# heat contents by it (they are all contained in single higher
# level group of columns labeled fuel_mmbtu)
df.loc[:, "fuel_mmbtu"].div(
df.loc[:, "fuel_mmbtu"].sum(axis=1), axis="rows"
),
# Merge that same total into the dataframe separately as well.
plant_year_totals.loc[:, "fuel_mmbtu"],
right_index=True,
left_index=True,
)
.rename(columns=lambda x: re.sub(r"$", "_fraction_mmbtu", x))
.rename(columns=lambda x: re.sub(r"_mmbtu_fraction_mmbtu$", "_mmbtu", x))
)
# Calculate total fuel cost for each plant, and divide it out
cost_group = (
pd.merge(
# Sum up all the fuel costs, and divide the individual fuel
# costs by it (they are all contained in single higher
# level group of columns labeled fuel_cost)
df.loc[:, "fuel_cost"].div(df.loc[:, "fuel_cost"].sum(axis=1), axis="rows"),
# Merge that same total into the dataframe separately as well.
plant_year_totals.loc[:, "fuel_cost"],
right_index=True,
left_index=True,
)
.rename(columns=lambda x: re.sub(r"$", "_fraction_cost", x))
.rename(columns=lambda x: re.sub(r"_cost_fraction_cost$", "_cost", x))
)
# Re-unify the cost and heat content information:
df = pd.merge(
mmbtu_group, cost_group, left_index=True, right_index=True
).reset_index()
# Label each plant-year record by primary fuel:
for fuel_str in FUEL_STRINGS:
try:
mmbtu_mask = df[f"{fuel_str}_fraction_mmbtu"] > thresh
df.loc[mmbtu_mask, "primary_fuel_by_mmbtu"] = fuel_str
except KeyError:
pass
try:
cost_mask = df[f"{fuel_str}_fraction_cost"] > thresh
df.loc[cost_mask, "primary_fuel_by_cost"] = fuel_str
except KeyError:
pass
df[["primary_fuel_by_cost", "primary_fuel_by_mmbtu"]] = df[
["primary_fuel_by_cost", "primary_fuel_by_mmbtu"]
].fillna("")
return df
| mit | 306ab25fe2adadd7e2ced0fe65b323da | 34.509434 | 133 | 0.55279 | 3.603638 | false | false | false | false |
frigg/frigg-hq | tests/helpers/test_github.py | 1 | 1420 | from django.test import TestCase
from frigg.builds.models import Build, BuildResult, Project
from frigg.helpers.github import _get_status_from_build, get_pull_request_url
class GithubHelpersTestCase(TestCase):
def test__get_status_from_build(self):
error = RuntimeError()
build = Build.objects.create(build_number=1, branch='master', sha='sha')
BuildResult.objects.create(build=build, result_log='result', succeeded=True)
status = _get_status_from_build(build, True, None)[0]
self.assertEqual(status, 'pending')
status = _get_status_from_build(build, False, None)[0]
self.assertEqual(status, 'success')
status = _get_status_from_build(build, False, error)[0]
self.assertEqual(status, 'error')
build.result.succeeded = False
build.result.save()
status = _get_status_from_build(build, False, None)[0]
self.assertEqual(status, 'failure')
def test_get_pull_request_url(self):
project = Project.objects.create(owner='frigg', name='frigg-worker')
build = Build(project=project, branch='master')
self.assertEqual(get_pull_request_url(build), 'https://github.com/frigg/frigg-worker')
build = Build(project=project, branch='master', pull_request_id=1)
self.assertEqual(get_pull_request_url(build),
'https://github.com/frigg/frigg-worker/pull/1')
| mit | 30c5d8a629a2fdc465dc963131c63d5a | 44.806452 | 94 | 0.666901 | 3.604061 | false | true | false | false |
frigg/frigg-hq | frigg/deployments/models.py | 1 | 2932 | import json
from datetime import timedelta
import redis
from django.conf import settings
from django.db import models
from django.utils.timezone import now
from frigg.helpers import github
from .managers import PRDeploymentManager
class PRDeployment(models.Model):
build = models.OneToOneField('builds.Build', related_name='deployment', unique=True)
port = models.IntegerField()
image = models.CharField(max_length=255, default=settings.FRIGG_PREVIEW_IMAGE)
log = models.TextField(blank=True)
succeeded = models.NullBooleanField()
docker_id = models.CharField(max_length=150, blank=True)
start_time = models.DateTimeField(blank=True, null=True)
objects = PRDeploymentManager()
class Meta:
verbose_name = 'Pull request deployment'
verbose_name_plural = 'Pull request deployments'
def __str__(self):
return 'Deployment: {}'.format(self.build)
def get_deployment_url(self):
return 'http://{port}.pr.frigg.io'.format(port=self.port)
@property
def ttl(self):
if self.build.project.owner == 'frigg':
return 86400
# This value should be calculated based on the owner
return 1800
@property
def is_alive(self):
if self.start_time is None:
return False
return bool(self.succeeded) and self.start_time + timedelta(seconds=self.ttl) > now()
@property
def is_pending(self):
return self.succeeded is None
@property
def queue_object(self):
obj = self.build.queue_object
obj.update({
'id': self.pk,
'port': self.port,
'image': self.image,
'ttl': self.ttl
})
return obj
@property
def tasks(self):
return json.loads(self.log or '[]')
def start(self):
r = redis.Redis(**settings.REDIS_SETTINGS)
r.lpush('frigg:queue:pr-deployments', json.dumps(self.queue_object))
github.set_commit_status(self.build, pending=True, context='frigg-preview')
self.start_time = now()
self.save()
def stop(self):
r = redis.Redis(**settings.REDIS_SETTINGS)
r.lpush('frigg:queue:pr-deployments', json.dumps({
'stop': True,
'docker_id': self.docker_id
}))
def handle_report(self, payload):
self.log = json.dumps(payload['results'])
self.succeeded = True
if 'docker_id' in payload:
self.docker_id = payload['docker_id']
for result in payload['results']:
if 'pending' in result and result['pending']:
self.succeeded = None
break
if result['succeeded'] is False:
self.succeeded = False
break
self.save()
if self.succeeded is True or self.succeeded is False:
github.set_commit_status(self.build, context='frigg-preview')
| mit | 3ab6e444c7f8f11a55023c3ff890a890 | 28.32 | 93 | 0.614939 | 4.027473 | false | false | false | false |
frigg/frigg-hq | frigg/projects/views.py | 1 | 1336 | from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from .models import Project
@never_cache
@csrf_exempt
def build_badge(request, owner, name, branch='master'):
project = get_object_or_404(Project, owner=owner, name=name)
badge = project.get_badge(branch)
return HttpResponse(content=badge, content_type='image/svg+xml')
@never_cache
@csrf_exempt
def coverage_badge(request, owner, name, branch='master'):
project = get_object_or_404(Project, owner=owner, name=name)
badge = project.get_coverage_badge(branch)
return HttpResponse(content=badge, content_type='image/svg+xml')
def approve_projects(request, project_id=None):
if not request.user.is_superuser:
raise Http404
if project_id and request.method == 'POST' and request.POST.get('approve') == "yes":
project = Project.objects.get(id=project_id)
project.approved = True
project.save()
if project.builds.all():
project.builds.last().start()
return redirect('approve_projects_overview')
return render(request, "builds/approve_projects.html", {
'projects': Project.objects.filter(approved=False)
})
| mit | 89f5782cc6177b42b5d8e8bad8135274 | 32.4 | 88 | 0.710329 | 3.731844 | false | false | false | false |
csparpa/pyowm | tests/unit/utils/test_measurables.py | 1 | 4964 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from pyowm.utils import measurables
class TestMeasurablesUtils(unittest.TestCase):
def test_kelvin_dict_to(self):
kelvin_dict = {'a': 301.0, 'b': 280}
celsius_dict = {'a': 27.85, 'b': 6.85}
fahrenheit_dict = {'a': 82.13, 'b': 44.33}
self.assertEqual(celsius_dict,
measurables.kelvin_dict_to(
kelvin_dict,
"celsius")
)
self.assertEqual(fahrenheit_dict,
measurables.kelvin_dict_to(
kelvin_dict,
"fahrenheit")
)
self.assertEqual(kelvin_dict,
measurables.kelvin_dict_to(
kelvin_dict,
"kelvin")
)
def test_kelvin_dict_to_fails_with_unknown_temperature_units(self):
self.assertRaises(ValueError, measurables.kelvin_dict_to, {}, "xyz")
def test_kelvin_to_celsius(self):
kelvin = 301.0
expected = 27.85
result = measurables.kelvin_to_celsius(kelvin)
self.assertEqual(expected, result)
def test_kelvin_to_celsius_fails_with_negative_values(self):
self.assertRaises(ValueError, measurables.kelvin_to_celsius, -137.0)
def test_kelvin_to_fahrenheit(self):
kelvin = 301.0
expected = 82.13
result = measurables.kelvin_to_fahrenheit(kelvin)
self.assertEqual(expected, result)
def test_kelvin_to_fahrenheit_fails_with_negative_values(self):
self.assertRaises(ValueError, measurables.kelvin_to_fahrenheit, -137.0)
def test_metric_wind_dict_to_imperial(self):
input = {
'speed': 2,
'gust': 3,
'deg': 7.89
}
expected = {
'speed': 4.47388,
'gust': 6.71082,
'deg': 7.89
}
result = measurables.metric_wind_dict_to_imperial(input)
self.assertEqual(expected, result)
def test_metric_wind_dict_to_km_h(self):
input = {
'speed': 2,
'gust': 3,
'deg': 7.89
}
expected = {
'speed': 7.2,
'gust': 10.8,
'deg': 7.89
}
result = measurables.metric_wind_dict_to_km_h(input)
self.assertEqual(expected, result)
def test_metric_wind_dict_to_knots(self):
input = {
'speed': 2,
'gust': 3,
'deg': 7.89
}
expected = {'speed': 3.88768, 'gust': 5.83152, 'deg': 7.89}
result = measurables.metric_wind_dict_to_knots(input)
self.assertEqual(expected, result)
def test_metric_wind_dict_to_beaufort(self):
corner_values = {
'lower': 0.01,
'a': 0.2,
'b': 1.5,
'c': 3.3,
'd': 5.4,
'e': 7.9,
'f': 10.7,
'g': 13.8,
'h': 17.1,
'i': 20.7,
'j': 24.4,
'k': 28.4,
'l': 32.6,
'upper': 345,
'deg': 7.89
}
expected_corner_values_beaufort = {
'lower': 0,
'a': 0,
'b': 1,
'c': 2,
'd': 3,
'e': 4,
'f': 5,
'g': 6,
'h': 7,
'i': 8,
'j': 9,
'k': 10,
'l': 11,
'upper': 12,
'deg': 7.89
}
result_corner_values = measurables.metric_wind_dict_to_beaufort(corner_values)
self.assertEqual(result_corner_values, expected_corner_values_beaufort)
input = {
'speed': 17.9,
'gust': 2.89,
'deg': 7.89
}
expected = {'speed': 8, 'gust': 2, 'deg': 7.89}
result = measurables.metric_wind_dict_to_beaufort(input)
self.assertEqual(expected, result)
def test_metric_pressure_dict_to_inhg(self):
input = {'press': 1000, 'sea_level': 1, 'grnd_level': None}
expected = {'press': 29.53, 'sea_level': .03}
result = measurables.metric_pressure_dict_to_inhg(input)
print(result)
self.assertEqual(expected, result)
def test_visibility_distance_to(self):
distances = (100, 200, None)
cmp_kms = (.1, .2, None)
cmp_miles = (.06, .12, None)
case_one, case_two = list(), list()
for distance in distances:
case_one.append(measurables.visibility_distance_to(distance))
case_two.append(measurables.visibility_distance_to(distance, 'miles'))
self.assertTrue(tuple(case_one) == cmp_kms and tuple(case_two) == cmp_miles)
def test_visibility_distance_to_fails_with_invalid_unit(self):
self.assertRaises(ValueError, measurables.visibility_distance_to, 10, 'xyz') | mit | d54c6febe3a39b3fe9960d21f394ccfb | 31.24026 | 86 | 0.494762 | 3.349528 | false | true | false | false |
csparpa/pyowm | pyowm/utils/geo.py | 1 | 13168 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import geojson
import json
import math
EARTH_RADIUS_KM = 6378.1
# utilities
def assert_is_lat(val):
"""
Checks it the given value is a feasible decimal latitude
:param val: value to be checked
:type val: int of float
:returns: `None`
:raises: *ValueError* if value is out of latitude boundaries, *AssertionError* if type is wrong
"""
assert type(val) is float or type(val) is int, "Value must be a number"
if val < -90.0 or val > 90.0:
raise ValueError("Latitude value must be between -90 and 90")
def assert_is_lon(val):
"""
Checks it the given value is a feasible decimal longitude
:param val: value to be checked
:type val: int of float
:returns: `None`
:raises: *ValueError* if value is out of longitude boundaries, *AssertionError* if type is wrong
"""
assert type(val) is float or type(val) is int, "Value must be a number"
if val < -180.0 or val > 180.0:
raise ValueError("Longitude value must be between -180 and 180")
# classes
class Geometry:
"""
Abstract parent class for geotypes
"""
def geojson(self):
"""
Returns a GeoJSON string representation of this geotype, compliant to
RFC 7946 (https://tools.ietf.org/html/rfc7946)
:return: str
"""
raise NotImplementedError()
def to_dict(self):
"""
Returns a dict representation of this geotype
:return: dict
"""
raise NotImplementedError()
class Point(Geometry):
"""
A Point geotype. Represents a single geographic point
:param lon: decimal longitude for the geopoint
:type lon: int of float
:param lat: decimal latitude for the geopoint
:type lat: int of float
:returns: a *Point* instance
:raises: *ValueError* when negative values are provided
"""
def __init__(self, lon, lat):
assert_is_lon(lon)
assert_is_lat(lat)
self._geom = geojson.Point((lon, lat))
@property
def lon(self):
return self._geom['coordinates'][0]
@property
def lat(self):
return self._geom['coordinates'][1]
def bounding_square_polygon(self, inscribed_circle_radius_km=10.0):
"""
Returns a square polygon (bounding box) that circumscribes the circle having this geopoint as centre and
having the specified radius in kilometers.
The polygon's points calculation is based on theory exposed by: http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates
by Jan Philip Matuschek, owner of the intellectual property of such material.
In short:
- locally to the geopoint, the Earth's surface is approximated to a sphere with radius = Earth's radius
- the calculation works fine also when the bounding box contains the Earth's poles and the 180 deg meridian
:param inscribed_circle_radius_km: the radius of the inscribed circle, defaults to 10 kms
:type inscribed_circle_radius_km: int or float
:return: a `pyowm.utils.geo.Polygon` instance
"""
assert isinstance(inscribed_circle_radius_km, (int, float))
assert inscribed_circle_radius_km > 0., 'Radius must be greater than zero'
# turn metric distance to radians on the approximated local sphere
rad_distance = float(inscribed_circle_radius_km) / EARTH_RADIUS_KM
# calculating min/max lat for bounding box
bb_min_lat_deg = self.lat * math.pi/180. - rad_distance
bb_max_lat_deg = self.lat * math.pi/180. + rad_distance
# now checking for poles...
if bb_min_lat_deg > math.radians(-90) and bb_max_lat_deg < math.radians(90): # no poles in the bounding box
delta_lon = math.asin(math.sin(rad_distance) / math.cos(math.radians(self.lat)))
bb_min_lon_deg = math.radians(self.lon) - delta_lon
if bb_min_lon_deg < math.radians(-180):
bb_min_lon_deg += 2 * math.pi
bb_max_lon_deg = math.radians(self.lon) + delta_lon
if bb_max_lon_deg > math.radians(180):
bb_max_lon_deg -= 2 * math.pi
else: # a pole is contained in the bounding box
bb_min_lat_deg = max(bb_min_lat_deg, math.radians(-90))
bb_max_lat_deg = min(bb_max_lat_deg, math.radians(90))
bb_min_lon_deg = math.radians(-180)
bb_max_lon_deg = math.radians(180)
# turn back from radians to decimal
bb_min_lat = bb_min_lat_deg * 180./math.pi
bb_max_lat = bb_max_lat_deg * 180./math.pi
bb_min_lon = bb_min_lon_deg * 180./math.pi
bb_max_lon = bb_max_lon_deg * 180./math.pi
return Polygon([[
[bb_min_lon, bb_max_lat],
[bb_max_lon, bb_max_lat],
[bb_max_lon, bb_min_lat],
[bb_min_lon, bb_min_lat],
[bb_min_lon, bb_max_lat]
]])
def geojson(self):
return geojson.dumps(self._geom)
def to_dict(self):
return json.loads(self.geojson())
@classmethod
def from_dict(cls, the_dict):
"""
Builds a Point instance out of a geoJSON compliant dict
:param the_dict: the geoJSON dict
:return: `pyowm.utils.geo.Point` instance
"""
geom = geojson.loads(json.dumps(the_dict))
result = Point(0, 0)
result._geom = geom
return result
def __repr__(self):
return "<%s.%s - lon=%s, lat=%s>" % (__name__, self.__class__.__name__, self.lon, self.lat)
class MultiPoint(Geometry):
"""
A MultiPoint geotype. Represents a set of geographic points
:param list_of_tuples: list of tuples, each one being the decimal (lon, lat) coordinates of a geopoint
:type list_of_tuples: list
:returns: a *MultiPoint* instance
"""
def __init__(self, list_of_tuples):
if not list_of_tuples:
raise ValueError("A MultiPoint cannot be empty")
for t in list_of_tuples:
assert_is_lon(t[0])
assert_is_lat(t[1])
self._geom = geojson.MultiPoint(list_of_tuples)
@classmethod
def from_points(cls, iterable_of_points):
"""
Creates a MultiPoint from an iterable collection of `pyowm.utils.geo.Point` instances
:param iterable_of_points: iterable whose items are `pyowm.utils.geo.Point` instances
:type iterable_of_points: iterable
:return: a *MultiPoint* instance
"""
return MultiPoint([(p.lon, p.lat) for p in iterable_of_points])
@property
def longitudes(self):
"""
List of decimal longitudes of this MultiPoint instance
:return: list of tuples
"""
return [coords[0] for coords in self._geom['coordinates']]
@property
def latitudes(self):
"""
List of decimal latitudes of this MultiPoint instance
:return: list of tuples
"""
return [coords[1] for coords in self._geom['coordinates']]
def geojson(self):
return geojson.dumps(self._geom)
def to_dict(self):
return json.loads(self.geojson())
@classmethod
def from_dict(cls, the_dict):
"""
Builds a MultiPoint instance out of a geoJSON compliant dict
:param the_dict: the geoJSON dict
:return: `pyowm.utils.geo.MultiPoint` instance
"""
geom = geojson.loads(json.dumps(the_dict))
result = MultiPoint([(0, 0), (0, 0)])
result._geom = geom
return result
class Polygon(Geometry):
"""
A Polygon geotype. Each Polygon is made up by one or more lines: a line represents a set of connected geographic
points and is conveyed by a list of points, the last one of which must coincide with the its very first one.
As said, Polygons can be also made up by multiple lines (therefore, Polygons with "holes" are allowed)
:param list_of_lists: list of lists, each sublist being a line and being composed by tuples - each one being the
decimal (lon, lat) couple of a geopoint. The last point specified MUST coincide with the first one specified
:type list_of_lists: list
:returns: a *MultiPoint* instance
:raises: *ValueError* when last point and fist point do not coincide or when no points are specified at all
"""
def __init__(self, list_of_lists):
for l in list_of_lists:
for t in l:
assert_is_lon(t[0])
assert_is_lat(t[1])
if not list_of_lists:
raise ValueError("A Polygon cannot be empty")
first, last = list_of_lists[0][0], list_of_lists[0][-1]
if first != last:
raise ValueError("The start and end point of Polygon must coincide")
self._geom = geojson.Polygon(list_of_lists)
def geojson(self):
return geojson.dumps(self._geom)
def to_dict(self):
return json.loads(self.geojson())
@property
def points(self):
"""
Returns the list of *Point* instances representing the points of the polygon
:return: list of *Point* objects
"""
feature = geojson.Feature(geometry=self._geom)
points_coords = list(geojson.utils.coords(feature))
return [Point(p[0], p[1]) for p in points_coords]
@classmethod
def from_dict(cls, the_dict):
"""
Builds a Polygon instance out of a geoJSON compliant dict
:param the_dict: the geoJSON dict
:return: `pyowm.utils.geo.Polygon` instance
"""
geom = geojson.loads(json.dumps(the_dict))
result = Polygon([[[0, 0], [0, 0]]])
result._geom = geom
return result
@classmethod
def from_points(cls, list_of_lists):
"""
Creates a *Polygon* instance out of a list of lists, each sublist being populated with
`pyowm.utils.geo.Point` instances
:param list_of_lists: list
:type: list_of_lists: iterable_of_polygons
:returns: a *Polygon* instance
"""
result = []
for l in list_of_lists:
curve = [(point.lon, point.lat) for point in l]
result.append(curve)
return Polygon(result)
class MultiPolygon(Geometry):
"""
A MultiPolygon geotype. Each MultiPolygon represents a set of (also djsjoint) Polygons. Each MultiPolygon is composed
by an iterable whose elements are the list of lists defining a Polygon geotype. Please refer to the
`pyowm.utils.geo.Point` documentation for details
:param iterable_of_list_of_lists: iterable whose elements are list of lists of tuples
:type iterable_of_list_of_lists: iterable
:returns: a *MultiPolygon* instance
:raises: *ValueError* when last point and fist point do not coincide or when no points are specified at all
"""
def __init__(self, iterable_of_list_of_lists):
if not iterable_of_list_of_lists:
raise ValueError("A MultiPolygon cannot be empty")
for list_of_lists in iterable_of_list_of_lists:
Polygon(list_of_lists)
self._geom = geojson.MultiPolygon(iterable_of_list_of_lists)
def geojson(self):
return geojson.dumps(self._geom)
def to_dict(self):
return json.loads(self.geojson())
@classmethod
def from_dict(cls, the_dict):
"""
Builds a MultiPolygoninstance out of a geoJSON compliant dict
:param the_dict: the geoJSON dict
:return: `pyowm.utils.geo.MultiPolygon` instance
"""
geom = geojson.loads(json.dumps(the_dict))
result = MultiPolygon([
[[[0, 0], [0, 0]]],
[[[1, 1], [1, 1]]]
])
result._geom = geom
return result
@classmethod
def from_polygons(cls, iterable_of_polygons):
"""
Creates a *MultiPolygon* instance out of an iterable of Polygon geotypes
:param iterable_of_polygons: list of `pyowm.utils.geo.Point` instances
:type iterable_of_polygons: iterable
:returns: a *MultiPolygon* instance
"""
return MultiPolygon([polygon.to_dict()['coordinates'] for polygon in iterable_of_polygons])
class GeometryBuilder:
@classmethod
def build(cls, the_dict):
"""
Builds a `pyowm.utils.geo.Geometry` subtype based on the geoJSON geometry type specified on the input dictionary
:param the_dict: a geoJSON compliant dict
:return: a `pyowm.utils.geo.Geometry` subtype instance
:raises `ValueError` if unable to the geometry type cannot be recognized
"""
assert isinstance(the_dict, dict), 'Geometry must be a dict'
geom_type = the_dict.get('type', None)
if geom_type == 'Point':
return Point.from_dict(the_dict)
elif geom_type == 'MultiPoint':
return MultiPoint.from_dict(the_dict)
elif geom_type == 'Polygon':
return Polygon.from_dict(the_dict)
elif geom_type == 'MultiPolygon':
return MultiPolygon.from_dict(the_dict)
else:
raise ValueError('Unable to build a GeoType object: unrecognized geometry type')
| mit | d77db0c88fde53af9af5387174a918d3 | 34.302949 | 132 | 0.620216 | 3.795907 | false | false | false | false |
csparpa/pyowm | pyowm/commons/http_client.py | 1 | 14255 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from pyowm.commons import exceptions
from pyowm.commons.enums import ImageTypeEnum
class HttpRequestBuilder:
URL_TEMPLATE_WITH_SUBDOMAINS = '{}://{}.{}/{}'
URL_TEMPLATE_WITHOUT_SUBDOMAINS = '{}://{}/{}'
"""
A stateful HTTP URL, params and headers builder with a fluent interface
"""
def __init__(self, root_uri_token, api_key, config, has_subdomains=True):
assert isinstance(root_uri_token, str)
self.root = root_uri_token
assert isinstance(api_key, str)
self.api_key = api_key
assert isinstance(config, dict)
self.config = config
assert isinstance(has_subdomains, bool)
self.has_subdomains = has_subdomains
self.schema = None
self.subdomain = None
self.proxies = None
self.path = None
self.params = {}
self.headers = {}
self._set_schema()
self._set_subdomain()
self._set_proxies()
def _set_schema(self):
use_ssl = self.config['connection']['use_ssl']
self.schema = 'https' if use_ssl else 'http'
def _set_subdomain(self):
if self.has_subdomains:
st = self.config['subscription_type']
self.subdomain = st.subdomain
def _set_proxies(self):
if self.config['connection']['use_proxy']:
self.proxies = self.config['proxies']
else:
self.proxies = {}
def with_path(self, path_uri_token):
assert isinstance(path_uri_token, str)
self.path = path_uri_token
return self
def with_headers(self, headers):
assert isinstance(headers, dict)
self.headers.update(headers)
return self
def with_header(self, key, value):
assert isinstance(key, str)
try:
json.dumps(value)
except TypeError:
raise ValueError('Header value is not JSON serializable')
self.headers.update({key: value})
return self
def with_query_params(self, query_params):
assert isinstance(query_params, dict)
self.params.update(query_params)
return self
def with_api_key(self):
self.params['APPID'] = self.api_key
return self
def with_language(self):
self.params['lang'] = self.config['language']
return self
def build(self):
if self.has_subdomains:
return self.URL_TEMPLATE_WITH_SUBDOMAINS.format(self.schema, self.subdomain, self.root, self.path), \
self.params, self.headers, self.proxies
else:
return self.URL_TEMPLATE_WITHOUT_SUBDOMAINS.format(self.schema, self.root, self.path), \
self.params, self.headers, self.proxies
def __repr__(self):
return "<%s.%s>" % (__name__, self.__class__.__name__)
class HttpClient:
"""
An HTTP client encapsulating some config data and abstarcting away data raw retrieval
:param api_key: the OWM API key
:type api_key: str
:param config: the configuration dictionary (if not provided, a default one will be used)
:type config: dict
:param root_uri: the root URI of the API endpoint
:type root_uri: str
:param admits_subdomains: if the root URI of the API endpoint admits subdomains based on the subcription type (default: True)
:type admits_subdomains: bool
"""
def __init__(self, api_key, config, root_uri, admits_subdomains=True):
assert isinstance(api_key, str)
self.api_key = api_key
assert isinstance(config, dict)
self.config = config
assert isinstance(root_uri, str)
self.root_uri = root_uri
assert isinstance(admits_subdomains, bool)
self.admits_subdomains = admits_subdomains
if self.config['connection']['max_retries'] is not None:
# this adapter tells how to perform retries
self.session_adapter = HTTPAdapter(
max_retries=Retry(
total=self.config['connection']['max_retries'],
status_forcelist=[429, 500, 502, 503, 504],
method_whitelist=["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
)
)
# this is the adapted requests client
self.http = requests.Session()
self.http.mount("https://", self.session_adapter)
self.http.mount("http://", self.session_adapter)
else:
self.http = requests
def get_json(self, path, params=None, headers=None):
builder = HttpRequestBuilder(self.root_uri, self.api_key, self.config, has_subdomains=self.admits_subdomains)\
.with_path(path)\
.with_api_key()\
.with_language()\
.with_query_params(params if params is not None else dict())\
.with_headers(headers if headers is not None else dict())
url, params, headers, proxies = builder.build()
try:
resp = self.http.get(url, params=params, headers=headers, proxies=proxies,
timeout=self.config['connection']['timeout_secs'],
verify=self.config['connection']['verify_ssl_certs'])
except requests.exceptions.SSLError as e:
raise exceptions.InvalidSSLCertificateError(str(e))
except requests.exceptions.ConnectionError as e:
raise exceptions.InvalidSSLCertificateError(str(e))
except requests.exceptions.Timeout:
raise exceptions.TimeoutError('API call timeouted')
HttpClient.check_status_code(resp.status_code, resp.text)
try:
return resp.status_code, resp.json()
except:
raise exceptions.ParseAPIResponseError('Impossible to parse API response data')
def get_png(self, path, params=None, headers=None):
# check URL fromt the metaimage: if it looks like a complete URL, use that one (I know, it's a hack...)
try:
partial_path = path.split(self.root_uri)[1].lstrip('/')
except:
partial_path = '' # fallback so that a 404 is issued
builder = HttpRequestBuilder(self.root_uri, self.api_key, self.config, has_subdomains=self.admits_subdomains)\
.with_path(partial_path)\
.with_api_key()\
.with_language()\
.with_query_params(params if params is not None else dict())\
.with_headers(headers if headers is not None else dict())\
.with_header('Accept', ImageTypeEnum.PNG.mime_type)
url, params, headers, proxies = builder.build()
try:
resp = self.http.get(url, stream=True, params=params, headers=headers, proxies=proxies,
timeout=self.config['connection']['timeout_secs'],
verify=self.config['connection']['verify_ssl_certs'])
except requests.exceptions.SSLError as e:
raise exceptions.InvalidSSLCertificateError(str(e))
except requests.exceptions.ConnectionError as e:
raise exceptions.InvalidSSLCertificateError(str(e))
except requests.exceptions.Timeout:
raise exceptions.TimeoutError('API call timeouted')
HttpClient.check_status_code(resp.status_code, resp.text)
try:
return resp.status_code, resp.content
except:
raise exceptions.ParseAPIResponseError('Impossible to parse'
'API response data')
def get_geotiff(self, path, params=None, headers=None):
# check URL fromt the metaimage: if it looks like a complete URL, use that one (I know, it's a hack...)
try:
partial_path = path.split(self.root_uri)[1].lstrip('/')
except:
partial_path = '' # fallback so that a 404 is issued
builder = HttpRequestBuilder(self.root_uri, self.api_key, self.config, has_subdomains=self.admits_subdomains)\
.with_path(partial_path)\
.with_api_key()\
.with_language()\
.with_query_params(params if params is not None else dict())\
.with_headers(headers if headers is not None else dict())\
.with_header('Accept', ImageTypeEnum.GEOTIFF.mime_type)
url, params, headers, proxies = builder.build()
try:
resp = self.http.get(url, stream=True, params=params, headers=headers, proxies=proxies,
timeout=self.config['connection']['timeout_secs'],
verify=self.config['connection']['verify_ssl_certs'])
except requests.exceptions.SSLError as e:
raise exceptions.InvalidSSLCertificateError(str(e))
except requests.exceptions.ConnectionError as e:
raise exceptions.InvalidSSLCertificateError(str(e))
except requests.exceptions.Timeout:
raise exceptions.TimeoutError('API call timeouted')
HttpClient.check_status_code(resp.status_code, resp.text)
try:
return resp.status_code, resp.content
except:
raise exceptions.ParseAPIResponseError('Impossible to parse'
'API response data')
def post(self, path, params=None, data=None, headers=None):
builder = HttpRequestBuilder(self.root_uri, self.api_key, self.config, has_subdomains=self.admits_subdomains)\
.with_path(path)\
.with_api_key()\
.with_language()\
.with_query_params(params if params is not None else dict())\
.with_headers(headers if headers is not None else dict())
url, params, headers, proxies = builder.build()
try:
resp = self.http.post(url, params=params, json=data, headers=headers, proxies=proxies,
timeout=self.config['connection']['timeout_secs'],
verify=self.config['connection']['verify_ssl_certs'])
except requests.exceptions.SSLError as e:
raise exceptions.InvalidSSLCertificateError(str(e))
except requests.exceptions.ConnectionError as e:
raise exceptions.InvalidSSLCertificateError(str(e))
except requests.exceptions.Timeout:
raise exceptions.TimeoutError('API call timeouted')
HttpClient.check_status_code(resp.status_code, resp.text)
# this is a defense against OWM API responses containing an empty body!
try:
json_data = resp.json()
except:
json_data = {}
return resp.status_code, json_data
def put(self, path, params=None, data=None, headers=None):
builder = HttpRequestBuilder(self.root_uri, self.api_key, self.config, has_subdomains=self.admits_subdomains)\
.with_path(path)\
.with_api_key()\
.with_language()\
.with_query_params(params if params is not None else dict())\
.with_headers(headers if headers is not None else dict())
url, params, headers, proxies = builder.build()
try:
resp = self.http.put(url, params=params, json=data, headers=headers, proxies=proxies,
timeout=self.config['connection']['timeout_secs'],
verify=self.config['connection']['verify_ssl_certs'])
except requests.exceptions.SSLError as e:
raise exceptions.InvalidSSLCertificateError(str(e))
except requests.exceptions.ConnectionError as e:
raise exceptions.InvalidSSLCertificateError(str(e))
except requests.exceptions.Timeout:
raise exceptions.TimeoutError('API call timeouted')
HttpClient.check_status_code(resp.status_code, resp.text)
# this is a defense against OWM API responses containing an empty body!
try:
json_data = resp.json()
except:
json_data = {}
return resp.status_code, json_data
def delete(self, path, params=None, data=None, headers=None):
builder = HttpRequestBuilder(self.root_uri, self.api_key, self.config, has_subdomains=self.admits_subdomains)\
.with_path(path)\
.with_api_key()\
.with_language()\
.with_query_params(params if params is not None else dict())\
.with_headers(headers if headers is not None else dict())
url, params, headers, proxies = builder.build()
try:
resp = self.http.delete(url, params=params, json=data, headers=headers, proxies=proxies,
timeout=self.config['connection']['timeout_secs'],
verify=self.config['connection']['verify_ssl_certs'])
except requests.exceptions.SSLError as e:
raise exceptions.InvalidSSLCertificateError(str(e))
except requests.exceptions.ConnectionError as e:
raise exceptions.InvalidSSLCertificateError(str(e))
except requests.exceptions.Timeout:
raise exceptions.TimeoutError('API call timeouted')
HttpClient.check_status_code(resp.status_code, resp.text)
# this is a defense against OWM API responses containing an empty body!
try:
json_data = resp.json()
except:
json_data = None
return resp.status_code, json_data
@classmethod
def check_status_code(cls, status_code, payload):
if status_code < 400:
return
if status_code == 400 or status_code not in [401, 404, 502]:
raise exceptions.APIRequestError(payload)
elif status_code == 401:
raise exceptions.UnauthorizedError('Invalid API Key provided')
elif status_code == 404:
raise exceptions.NotFoundError('Unable to find the resource')
else:
raise exceptions.BadGatewayError('Unable to contact the upstream server')
def __repr__(self):
return "<%s.%s - root: %s>" % (__name__, self.__class__.__name__, self.root_uri)
| mit | 76d06f7bdb74fd1b8b11bbc2eff93928 | 43.546875 | 129 | 0.607226 | 4.252685 | false | true | false | false |
peterhinch/micropython-async | v3/as_drivers/nec_ir/art.py | 1 | 1557 | # art.py Test program for IR remote control decoder aremote.py
# Supports Pyboard and ESP8266
# Author: Peter Hinch
# Copyright Peter Hinch 2017 Released under the MIT license
# Run this to characterise a remote.
# import as_drivers.nec_ir.art
from sys import platform
import uasyncio as asyncio
ESP32 = platform == 'esp32' or platform == 'esp32_LoBo'
if platform == 'pyboard':
from pyb import Pin
elif platform == 'esp8266' or ESP32:
from machine import Pin, freq
else:
print('Unsupported platform', platform)
from .aremote import *
errors = {BADSTART : 'Invalid start pulse', BADBLOCK : 'Error: bad block',
BADREP : 'Error: repeat', OVERRUN : 'Error: overrun',
BADDATA : 'Error: invalid data', BADADDR : 'Error: invalid address'}
def cb(data, addr):
if data == REPEAT:
print('Repeat')
elif data >= 0:
print(hex(data), hex(addr))
else:
print('{} Address: {}'.format(errors[data], hex(addr)))
def test():
print('Test for IR receiver. Assumes NEC protocol.')
print('ctrl-c to stop.')
if platform == 'pyboard':
p = Pin('X3', Pin.IN)
elif platform == 'esp8266':
freq(160000000)
p = Pin(13, Pin.IN)
elif ESP32:
p = Pin(23, Pin.IN)
ir = NEC_IR(p, cb, True) # Assume r/c uses extended addressing
loop = asyncio.get_event_loop()
try:
loop.run_forever()
except KeyboardInterrupt:
print('Interrupted')
finally:
asyncio.new_event_loop() # Still need ctrl-d because of interrupt vector
test()
| mit | 2c5578040be00bcd3014f3f5f1f5ed71 | 27.833333 | 81 | 0.637765 | 3.414474 | false | true | false | false |
luispedro/jug | jug/subcommands/check.py | 1 | 2974 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2019, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from .. import task
from ..jug import init
from . import SubCommand
__all__ = [
'check',
'sleep_until',
]
class CheckCommand(SubCommand):
'''Returns 0 if all tasks are finished. 1 otherwise.
check(store, options)
Executes check subcommand
Parameters
----------
store : jug.backend
backend to use
options : jug options
'''
name = "check"
def run(self, store, options, *args, **kwargs):
sys.exit(_check_or_sleep_until(store, False))
class SleepUntilCommand(SubCommand):
'''Wait until all tasks are done, then exit.
sleep_until(store, options)
Execute sleep-until subcommand
Parameters
----------
store : jug.backend
backend to use
options : jug options
ignored
'''
name = "sleep-until"
def run(self, options, store, jugspace, *args, **kwargs):
while True:
_check_or_sleep_until(store, True)
hasbarrier = jugspace.get('__jug__hasbarrier__', False)
if not hasbarrier:
sys.exit(0)
store, jugspace = init(options.jugfile, options.jugdir, store=store)
def _check_or_sleep_until(store, sleep_until):
tasks = task.alltasks
active = set(tasks)
for t in reversed(tasks):
if t not in active:
continue
while not t.can_load(store):
if sleep_until:
from time import sleep
sleep(12)
else:
return 1
for dep in task.recursive_dependencies(t):
try:
active.remove(dep)
except KeyError:
pass
return 0
check = CheckCommand()
sleep_until = SleepUntilCommand()
| mit | ded2ceee4974a6bbe7d572e1d9d6700f | 28.445545 | 80 | 0.646604 | 4.002692 | false | false | false | false |
peterhinch/micropython-async | v2/sock_nonblock.py | 1 | 4430 | # sock_nonblock.py Illustration of the type of code required to use nonblocking
# sockets. It is not a working demo and probably has silly errors.
# It is intended as an outline of requirements and also to illustrate some of the
# nasty hacks required on current builds of ESP32 firmware. Platform detection is
# done at runtime.
# If running on ESP8266 these hacks can be eliminated.
# Working implementations may be found in the asynchronous MQTT library.
# https://github.com/peterhinch/micropython-mqtt
# Author: Peter Hinch
# Copyright Peter Hinch 2018 Released under the MIT license
import usocket as socket
import network
import machine
import sys
from micropython import const
from uerrno import EINPROGRESS, ETIMEDOUT
from utime import ticks_ms, ticks_diff, sleep_ms
ESP32 = sys.platform == 'esp32'
BUSY_ERRORS = [EINPROGRESS, ETIMEDOUT]
# ESP32. It is not enough to regularly yield to RTOS with machine.idle(). There are
# two cases where an explicit sleep() is required. Where data has been written to the
# socket and a response is awaited, a timeout may occur without a >= 20ms sleep.
# Secondly during WiFi connection sleeps are required to prevent hangs.
if ESP32:
# https://forum.micropython.org/viewtopic.php?f=16&t=3608&p=20942#p20942
BUSY_ERRORS += [118, 119] # Add in weird ESP32 errors
# 20ms seems about the minimum before we miss data read from a socket.
def esp32_pause(): # https://github.com/micropython/micropython-esp32/issues/167
sleep_ms(20) # This is horrible.
else:
esp32_pause = lambda *_ : None # Do nothing on sane platforms
# How long to delay between polls. Too long affects throughput, too short can
# starve other coroutines.
_SOCKET_POLL_DELAY = const(5) # ms
_RESPONSE_TIME = const(30000) # ms. max server latency before timeout
class FOO:
def __init__(self, server, port):
# On ESP32 need to submit WiFi credentials
self._sta_if = network.WLAN(network.STA_IF)
self._sta_if.active(True)
# Note that the following blocks, potentially for seconds, owing to DNS lookup
self._addr = socket.getaddrinfo(server, port)[0][-1]
self._sock = socket.socket()
self._sock.setblocking(False)
try:
self._sock.connect(addr)
except OSError as e:
if e.args[0] not in BUSY_ERRORS:
raise
if ESP32: # Revolting kludge :-(
loop = asyncio.get_event_loop()
loop.create_task(self._idle_task())
def _timeout(self, t):
return ticks_diff(ticks_ms(), t) > _RESPONSE_TIME
# Read and return n bytes. Raise OSError on timeout ( caught by superclass).
async def _as_read(self, n):
sock = self._sock
data = b''
t = ticks_ms()
while len(data) < n:
esp32_pause() # Necessary on ESP32 or we can time out.
if self._timeout(t) or not self._sta_if.isconnected():
raise OSError(-1)
try:
msg = sock.read(n - len(data))
except OSError as e: # ESP32 issues weird 119 errors here
msg = None
if e.args[0] not in BUSY_ERRORS:
raise
if msg == b'': # Connection closed by host (?)
raise OSError(-1)
if msg is not None: # data received
data = b''.join((data, msg))
t = ticks_ms() # reset timeout
await asyncio.sleep_ms(_SOCKET_POLL_DELAY)
return data
# Write a buffer
async def _as_write(self, bytes_wr):
sock = self._sock
t = ticks_ms()
while bytes_wr:
if self._timeout(t) or not self._sta_if.isconnected():
raise OSError(-1)
try:
n = sock.write(bytes_wr)
except OSError as e: # ESP32 issues weird 119 errors here
n = 0
if e.args[0] not in BUSY_ERRORS:
raise
if n: # Bytes still to write
t = ticks_ms() # Something was written: reset t/o
bytes_wr = bytes_wr[n:]
esp32_pause() # Precaution. How to prove whether it's necessary?
await asyncio.sleep_ms(_SOCKET_POLL_DELAY)
# ESP32 kludge :-(
async def _idle_task(self):
while True:
await asyncio.sleep_ms(10)
machine.idle() # Yield to underlying RTOS
| mit | 2962d408ed996e038c9e0b8d31f5e9bd | 39.272727 | 86 | 0.61693 | 3.728956 | false | false | false | false |
peterhinch/micropython-async | v3/as_drivers/as_GPS/as_GPS_utils.py | 1 | 1725 | # as_GPS_utils.py Extra functionality for as_GPS.py
# Put in separate file to minimise size of as_GPS.py for resource constrained
# systems.
# Copyright (c) 2018 Peter Hinch
# Released under the MIT License (MIT) - see LICENSE file
from .as_GPS import MDY, DMY, LONG
_DIRECTIONS = ('N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW',
'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW')
def compass_direction(gps): # Return cardinal point as string.
# Calculate the offset for a rotated compass
if gps.course >= 348.75:
offset_course = 360 - gps.course
else:
offset_course = gps.course + 11.25
# Each compass point is separated by 22.5°, divide to find lookup value
return _DIRECTIONS[int(offset_course // 22.5)]
_MONTHS = ('January', 'February', 'March', 'April', 'May',
'June', 'July', 'August', 'September', 'October',
'November', 'December')
def date_string(gps, formatting=MDY):
day, month, year = gps.date
# Long Format January 1st, 2014
if formatting == LONG:
dform = '{:s} {:2d}{:s}, 20{:2d}'
# Retrieve Month string from private set
month = _MONTHS[month - 1]
# Determine Date Suffix
if day in (1, 21, 31):
suffix = 'st'
elif day in (2, 22):
suffix = 'nd'
elif day in (3, 23):
suffix = 'rd'
else:
suffix = 'th'
return dform.format(month, day, suffix, year)
dform = '{:02d}/{:02d}/{:02d}'
if formatting == DMY:
return dform.format(day, month, year)
elif formatting == MDY: # Default date format
return dform.format(month, day, year)
raise ValueError('Unknown date format.')
| mit | ce03208096f0d55b72ec5a9e6dac6c15 | 34.916667 | 77 | 0.580046 | 3.25283 | false | false | false | false |
peterhinch/micropython-async | v2/i2c/asi2c_i.py | 1 | 5636 | # asi2c_i.py A communications link using I2C slave mode on Pyboard.
# Initiator class
# The MIT License (MIT)
#
# Copyright (c) 2018 Peter Hinch
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import uasyncio as asyncio
import machine
import utime
import gc
from asi2c import Channel
# The initiator is an I2C slave. It runs on a Pyboard. I2C uses pyb for slave
# mode, but pins are instantiated using machine.
# reset (if provided) is a means of resetting Responder in case of error: it
# is (pin, active_level, ms)
class Initiator(Channel):
t_poll = 100 # ms between Initiator polling Responder
rxbufsize = 200
def __init__(self, i2c, pin, pinack, reset=None, verbose=True,
cr_go=False, go_args=(), cr_fail=False, f_args=()):
super().__init__(i2c, pin, pinack, verbose, self.rxbufsize)
self.reset = reset
self.cr_go = cr_go
self.go_args = go_args
self.cr_fail = cr_fail
self.f_args = f_args
if reset is not None:
reset[0].init(mode=machine.Pin.OUT, value=not (reset[1]))
# Self measurement
self.nboots = 0 # No. of reboots of Responder
self.block_max = 0 # Blocking times: max
self.block_sum = 0 # Total
self.block_cnt = 0 # Count
self.loop = asyncio.get_event_loop()
self.loop.create_task(self._run())
def waitfor(self, val): # Wait for response for 1 sec
tim = utime.ticks_ms()
while not self.rem() == val:
if utime.ticks_diff(utime.ticks_ms(), tim) > 1000:
raise OSError
async def reboot(self):
self.close() # Leave own pin high
if self.reset is not None:
rspin, rsval, rstim = self.reset
self.verbose and print('Resetting target.')
rspin(rsval) # Pulse reset line
await asyncio.sleep_ms(rstim)
rspin(not rsval)
async def _run(self):
while True:
# If hardware link exists reboot Responder
await self.reboot()
self.txbyt = b''
self.rxbyt = b''
await self._sync()
await asyncio.sleep(1) # Ensure Responder is ready
if self.cr_go:
self.loop.create_task(self.cr_go(*self.go_args))
while True:
gc.collect()
try:
tstart = utime.ticks_us()
self._sendrx()
t = utime.ticks_diff(utime.ticks_us(), tstart)
except OSError:
break
await asyncio.sleep_ms(Initiator.t_poll)
self.block_max = max(self.block_max, t) # self measurement
self.block_cnt += 1
self.block_sum += t
self.nboots += 1
if self.cr_fail:
await self.cr_fail(*self.f_args)
if self.reset is None: # No means of recovery
raise OSError('Responder fail.')
# Send payload length (may be 0) then payload (if any)
def _sendrx(self, sn=bytearray(2), txnull=bytearray(2)):
siz = self.txsiz if self.cantx else txnull
if self.rxbyt:
siz[1] |= 0x80 # Hold off further received data
else:
siz[1] &= 0x7f
# CRITICAL TIMING. Trigger interrupt on responder immediately before
# send. Send must start before RX begins. Fast responders may need to
# do a short blocking wait to guarantee this.
self.own(1) # Trigger interrupt.
self.i2c.send(siz) # Blocks until RX complete.
self.waitfor(1)
self.own(0)
self.waitfor(0)
if self.txbyt and self.cantx:
self.own(1)
self.i2c.send(self.txbyt)
self.waitfor(1)
self.own(0)
self.waitfor(0)
self._txdone() # Invalidate source
# Send complete
self.waitfor(1) # Wait for responder to request send
self.own(1) # Acknowledge
self.i2c.recv(sn)
self.waitfor(0)
self.own(0)
n = sn[0] + ((sn[1] & 0x7f) << 8) # no of bytes to receive
if n > self.rxbufsize:
raise ValueError('Receive data too large for buffer.')
self.cantx = not bool(sn[1] & 0x80)
if n:
self.waitfor(1) # Wait for responder to request send
# print('setting up receive', n,' bytes')
self.own(1) # Acknowledge
mv = memoryview(self.rx_mv[0: n])
self.i2c.recv(mv)
self.waitfor(0)
self.own(0)
self._handle_rxd(mv)
| mit | 5d8e569c6cfcc5a11d64082bfc90c1a7 | 38.690141 | 79 | 0.596345 | 3.727513 | false | false | false | false |
peterhinch/micropython-async | v3/as_drivers/syncom/syncom.py | 2 | 9641 | # syncom.py Synchronous communication channel between two MicroPython
# platforms. 4 June 2017
# Uses uasyncio.
# The MIT License (MIT)
#
# Copyright (c) 2017-2021 Peter Hinch
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Timing: was 4.5mS per char between Pyboard and ESP8266 i.e. ~1.55Kbps. But
# this version didn't yield on every bit, invalidating t/o detection.
# New asyncio version yields on every bit.
# Instantaneous bit rate running ESP8266 at 160MHz: 1.6Kbps
# Mean throughput running test programs 8.8ms per char (800bps).
from utime import ticks_diff, ticks_ms
import uasyncio as asyncio
from micropython import const
import ujson
_BITS_PER_CH = const(7)
_BITS_SYN = const(8)
_SYN = const(0x9d)
_RX_BUFLEN = const(100)
class SynComError(Exception):
pass
class SynCom:
def __init__(self, passive, ckin, ckout, din, dout, pin_reset=None,
timeout=0, string_mode=False, verbose=True): # Signal unsupported on rp2
self.passive = passive
self.string_mode = string_mode
self._running = False # _run coro is down
self._synchronised = False
self.verbose = verbose
self.idstr = 'passive' if self.passive else 'initiator'
self.ckin = ckin # Interface pins
self.ckout = ckout
self.din = din
self.dout = dout
self.pin_reset = pin_reset
self._timeout = timeout # In ms. 0 == No timeout.
self.lsttx = [] # Queue of strings to send
self.lstrx = [] # Queue of received strings
# Start interface and initiate an optional user task. If a timeout and reset
# signal are specified and the target times out, the target is reset and the
# interface restarted. If a user task is provided, this must return if a
# timeout occurs (i.e. not running() or await_obj returns None).
# If it returns for other (error) reasons, a timeout event is forced.
async def start(self, user_task=None, awaitable=None):
while True:
if not self._running: # Restarting
self.lstrx = [] # Clear down queues
self.lsttx = []
self._synchronised = False
asyncio.create_task(self._run()) # Reset target (if possible)
while not self._synchronised: # Wait for sync
await asyncio.sleep_ms(100)
if user_task is None:
while self._running:
await asyncio.sleep_ms(100)
else:
await user_task(self) # User task must quit on timeout
# If it quit for other reasons force a t/o exception
self.stop()
await asyncio.sleep_ms(0)
if awaitable is not None:
await awaitable() # Optional user coro
# Can be used to force a failure
def stop(self):
self._running = False
self.dout(0)
self.ckout(0)
# Queue an object for tx. Convert to string NOW: snapshot of current
# object state
def send(self, obj):
if self.string_mode:
self.lsttx.append(obj) # strings are immutable
else:
self.lsttx.append(ujson.dumps(obj))
# Number of queued objects (None on timeout)
def any(self):
if self._running:
return len(self.lstrx)
# Wait for an object. Return None on timeout.
# If in string mode returns a string (or None on t/o)
async def await_obj(self, t_ms=10):
while self._running:
await asyncio.sleep_ms(t_ms)
if len(self.lstrx):
return self.lstrx.pop(0)
# running() is False if the target has timed out.
def running(self):
return self._running
# Private methods
async def _run(self):
self.indata = 0 # Current data bits
self.inbits = 0
self.odata = _SYN
self.phase = 0 # Interface initial conditions
if self.passive:
self.dout(0)
self.ckout(0)
else:
self.dout(self.odata & 1)
self.ckout(1)
self.odata >>= 1 # we've sent that bit
self.phase = 1
if self.pin_reset is not None:
self.verbose and print(self.idstr, ' resetting target...')
self.pin_reset(0)
await asyncio.sleep_ms(100)
self.pin_reset(1)
await asyncio.sleep(1) # let target settle down
self.verbose and print(self.idstr, ' awaiting sync...')
try:
self._running = True # False on failure: can be cleared by other tasks
while self.indata != _SYN: # Don't hog CPU while waiting for start
await self._synchronise()
self._synchronised = True
self.verbose and print(self.idstr, ' synchronised.')
sendstr = '' # string for transmission
send_idx = None # character index. None: no current string
getstr = '' # receive string
rxbuf = bytearray(_RX_BUFLEN)
rxidx = 0
while True:
if send_idx is None:
if len(self.lsttx):
sendstr = self.lsttx.pop(0) # oldest first
send_idx = 0
if send_idx is not None:
if send_idx < len(sendstr):
self.odata = ord(sendstr[send_idx])
send_idx += 1
else:
send_idx = None
if send_idx is None: # send zeros when nothing to send
self.odata = 0
if self.passive:
await self._get_byte_passive()
else:
await self._get_byte_active()
if self.indata: # Optimisation: buffer reduces allocations.
if rxidx >= _RX_BUFLEN: # Buffer full: append to string.
getstr = ''.join((getstr, bytes(rxbuf).decode()))
rxidx = 0
rxbuf[rxidx] = self.indata
rxidx += 1
elif rxidx or len(getstr): # Got 0 but have data so string is complete.
# Append buffer.
getstr = ''.join((getstr, bytes(rxbuf[:rxidx]).decode()))
if self.string_mode:
self.lstrx.append(getstr)
else:
try:
self.lstrx.append(ujson.loads(getstr))
except: # ujson fail means target has crashed
raise SynComError
getstr = '' # Reset for next string
rxidx = 0
except SynComError:
if self._running:
self.verbose and print('SynCom Timeout.')
else:
self.verbose and print('SynCom was stopped.')
finally:
self.stop()
async def _get_byte_active(self):
inbits = 0
for _ in range(_BITS_PER_CH):
inbits = await self._get_bit(inbits) # LSB first
self.indata = inbits
async def _get_byte_passive(self):
self.indata = await self._get_bit(self.inbits) # MSB is outstanding
inbits = 0
for _ in range(_BITS_PER_CH - 1):
inbits = await self._get_bit(inbits)
self.inbits = inbits
async def _synchronise(self): # wait for clock
t = ticks_ms()
while self.ckin() == self.phase ^ self.passive ^ 1:
# Other tasks can clear self._running by calling stop()
if (self._timeout and ticks_diff(ticks_ms(), t) > self._timeout) or not self._running:
raise SynComError
await asyncio.sleep_ms(0)
self.indata = (self.indata | (self.din() << _BITS_SYN)) >> 1
odata = self.odata
self.dout(odata & 1)
self.odata = odata >> 1
self.phase ^= 1
self.ckout(self.phase) # set clock
async def _get_bit(self, dest):
t = ticks_ms()
while self.ckin() == self.phase ^ self.passive ^ 1:
if (self._timeout and ticks_diff(ticks_ms(), t) > self._timeout) or not self._running:
raise SynComError
await asyncio.sleep_ms(0)
dest = (dest | (self.din() << _BITS_PER_CH)) >> 1
obyte = self.odata
self.dout(obyte & 1)
self.odata = obyte >> 1
self.phase ^= 1
self.ckout(self.phase)
return dest
| mit | 59bd78cad46de45a35cae32aae39de5e | 39.338912 | 98 | 0.563946 | 4.052543 | false | false | false | false |
luispedro/jug | jug/backends/redis_store.py | 1 | 6093 | #-*- coding: utf-8 -*-
# Copyright (C) 2009-2022, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
redis_store: store based on a redis backend
'''
import re
import logging
from base64 import b64encode, b64decode
from jug.backends.encode import encode, decode
from .base import base_store, base_lock
try:
import redis
redis_functional = True
except ImportError:
redis_functional = False
def _resultname(name):
if type(name) == str:
name = name.encode('utf-8')
return b'result:' + name
def _lockname(name):
if type(name) == str:
name = name.encode('utf-8')
return b'lock:' + name
_LOCKED = b'L'
_FAILED = b'F'
_redis_urlpat = re.compile(r'redis://(?P<host>[A-Za-z0-9\.\-]+)(\:(?P<port>[0-9]+))?/?')
class redis_store(base_store):
def __init__(self, url):
'''
'''
if not redis_functional:
raise IOError('jug.redis_store: redis module is not found!')
redis_params = {}
match = _redis_urlpat.match(url)
if match:
redis_params = match.groupdict()
if redis_params['port'] == None:
del redis_params['port']
else:
redis_params['port'] = int( redis_params['port'] )
logging.info('connecting to %s' % redis_params)
self.redis = redis.Redis(**redis_params)
def dump(self, object, name):
'''
dump(object, name)
'''
s = encode(object)
if s:
s = b64encode(s)
self.redis.set(_resultname(name), s)
def can_load(self, name):
'''
can = can_load(name)
'''
return self.redis.exists(_resultname(name))
def load(self, name):
'''
obj = load(name)
Loads the object identified by `name`.
'''
s = self.redis.get(_resultname(name))
if s:
s = b64decode(s)
return decode(s)
def remove(self, name):
'''
was_removed = remove(name)
Remove the entry associated with name.
Returns whether any entry was actually removed.
'''
return self.redis.delete(_resultname(name))
def cleanup(self, active, keeplocks=False):
'''
cleanup()
Implement 'cleanup' command
'''
existing = set(self.list())
existing -= set(act.hash() for act in active)
cleaned = len(existing)
for superflous in existing:
self.redis.delete(_resultname(superflous))
if not keeplocks:
cleaned += self.remove_locks()
return cleaned
def remove_locks(self):
locks = self.redis.keys('lock:*')
for lk in locks:
self.redis.delete(lk)
return len(locks)
def list(self):
existing = self.redis.keys('result:*')
for ex in existing:
yield ex[len('result:'):]
def listlocks(self):
locks = self.redis.keys('lock:*')
for lk in locks:
yield lk[len('lock:'):]
def getlock(self, name):
return redis_lock(self.redis, name)
def close(self):
# It seems some versions of the protocol are implemented differently
# and do not have the ``disconnect`` method
try:
self.redis.disconnect()
except:
pass
class redis_lock(base_lock):
'''
redis_lock
Functions:
----------
- get(): acquire the lock
- release(): release the lock
- is_locked(): check lock state
'''
def __init__(self, redis, name):
self.name = _lockname(name)
self.redis = redis
def get(self):
'''
lock.get()
'''
# We need getset to be race-free
previous = self.redis.getset(self.name, _LOCKED)
if previous == _FAILED:
self.redis.set(self.name, previous)
return (previous is None)
def release(self):
'''
lock.release()
Removes lock
'''
self.redis.delete(self.name)
def is_locked(self):
'''
locked = lock.is_locked()
'''
status = self.redis.get(self.name)
return status is not None and status in (_LOCKED, _FAILED)
def fail(self):
'''
lock.fail()
Mark a task as failed.
Has no effect if the task isn't locked
Since we have to check the state of the lock before failing this
call is not atomic nor race-free.
'''
status = self.redis.get(self.name)
if status == _LOCKED:
self.redis.set(self.name, _FAILED)
return True
elif status == _FAILED:
return True
else:
return False
def is_failed(self):
'''
failed = lock.is_failed()
Returns whether this task is marked as failed.
'''
status = self.redis.get(self.name)
return status is not None and status == _FAILED
| mit | 9aa50bac68725441ecbd4a48f649d9d4 | 24.282158 | 88 | 0.581815 | 3.977154 | false | false | false | false |
luispedro/jug | jug/tests/test_lock.py | 1 | 3139 | from jug.backends.file_store import file_store, file_based_lock, file_keepalive_based_lock
from jug.backends.dict_store import dict_store
from jug.tests.utils import tmp_file_store
from .task_reset import task_reset_at_exit, task_reset
from jug.backends import memoize_store
from jug import Task
from time import sleep
def test_twice(tmpdir):
lock = file_based_lock(str(tmpdir), 'foo')
assert lock.get()
assert not lock.get()
lock.release()
assert lock.get()
assert not lock.get()
lock.release()
def test_twolocks(tmpdir):
foo = file_based_lock(str(tmpdir), 'foo')
bar = file_based_lock(str(tmpdir), 'bar')
assert foo.get()
assert bar.get()
assert not foo.get()
assert not bar.get()
foo.release()
bar.release()
def test_fail_and_lock(tmpdir):
lock = file_based_lock(str(tmpdir), 'foo')
assert not lock.is_failed()
assert not lock.is_locked()
assert not lock.fail()
assert not lock.is_failed()
assert not lock.is_locked()
assert lock.get()
assert not lock.is_failed()
assert lock.is_locked()
assert lock.fail()
assert lock.is_failed()
assert lock.is_locked()
assert lock.fail()
assert lock.is_failed()
assert lock.is_locked()
assert not lock.get()
assert lock.is_failed()
assert lock.is_locked()
lock.release()
assert not lock.is_failed()
assert not lock.is_locked()
def double(x):
return x*2
@task_reset
def test_memoize_lock(tmp_file_store):
t = Task(double, 2)
assert t.lock()
Task.store = memoize_store(Task.store, list_base=True)
assert t.is_locked()
t2 = Task(double, 2)
assert t2.is_locked()
def test_lock_bytes(tmp_file_store):
store = tmp_file_store
lock = store.getlock('foo')
lock2 = store.getlock(b'foo')
assert lock.fullname == lock2.fullname
def test_lock_bytes2():
store = dict_store()
lock = store.getlock('foo')
lock2 = store.getlock(b'foo')
lock.get()
assert lock2.is_locked()
def test_lock_keepalive(tmpdir):
lock = file_keepalive_based_lock(str(tmpdir), 'foo')
assert lock.monitor is None
assert lock.get()
assert lock.monitor.poll() is None
p = lock.monitor
assert not lock.get()
assert p == lock.monitor, "A new process was started and shouldn't"
assert lock.monitor.poll() is None
lock.release()
# Give max 5 secs for subprocess to return an exitcode
for i in range(25):
ret = p.poll()
if ret is None:
sleep(.2)
else:
break
assert ret == -9 # SIGKILL
assert lock.get()
assert p != lock.monitor, "A new process should have been started but wasn't"
assert lock.monitor.poll() is None
p = lock.monitor
assert not lock.get()
assert lock.monitor.poll() is None
assert p == lock.monitor, "A new process was started and shouldn't"
lock.release()
# Give max 5 secs for subprocess to return an exitcode
for i in range(25):
ret = p.poll()
if ret is None:
sleep(.2)
else:
break
assert ret == -9 # SIGKILL
| mit | 700a9b207ec88448e8172dea2a294edf | 23.912698 | 90 | 0.636827 | 3.434354 | false | true | false | false |
peterhinch/micropython-async | v2/check_async_code.py | 1 | 7687 | #! /usr/bin/python3
# -*- coding: utf-8 -*-
# check_async_code.py
# A simple script to identify a common error which causes silent failures under
# MicroPython (issue #3241).
# This is where a task is declared with async def and then called as if it were
# a regular function.
# Copyright Peter Hinch 2017
# Issued under the MIT licence
import sys
import re
tasks = set()
mismatch = False
def pass1(part, lnum):
global mismatch
opart = part
sysnames = ('__aenter__', '__aexit__', '__aiter__', '__anext__')
# These are the commonest system functions declared with async def.
# Mimimise spurious duplicate function definition error messages.
good = True
if not part.startswith('#'):
mismatch = False
part = stripquotes(part, lnum) # Remove quoted strings (which might contain code)
good &= not mismatch
if part.startswith('async'):
pos = part.find('def')
if pos >= 0:
part = part[pos + 3:]
part = part.lstrip()
pos = part.find('(')
if pos >= 0:
fname = part[:pos].strip()
if fname in tasks and fname not in sysnames:
# Note this gives a false positive if a method of the same name
# exists in more than one class.
print('Duplicate function declaration "{}" in line {}'.format(fname, lnum))
print(opart)
print()
good = False
else:
tasks.add(fname)
return good
# Strip quoted strings (which may contain code)
def stripquotes(part, lnum=0):
global mismatch
for qchar in ('"', "'"):
pos = part.find(qchar)
if pos >= 0:
part = part[:pos] + part[pos + 1:] # strip 1st qchar
pos1 = part.find(qchar)
if pos > 0:
part = part[:pos] + part[pos1+1:] # Strip whole quoted string
part = stripquotes(part, lnum)
else:
print('Mismatched quotes in line', lnum)
mismatch = True
return part # for what it's worth
return part
def pass2(part, lnum):
global mismatch
opart = part
good = True
if not part.startswith('#') and not part.startswith('async'):
mismatch = False
part = stripquotes(part, lnum) # Remove quoted strings (which might contain code)
good &= not mismatch
for task in tasks:
sstr = ''.join((task, r'\w*'))
match = re.search(sstr, part)
if match is None: # No match
continue
if match.group(0) != task: # No exact match
continue
# Accept await task, await task(args), a = await task(args)
sstr = ''.join((r'.*await[ \t]+', task))
if re.search(sstr, part):
continue
# Accept await obj.task, await obj.task(args), a = await obj.task(args)
sstr = ''.join((r'.*await[ \t]+\w+\.', task))
if re.search(sstr, part):
continue
# Accept assignments e.g. a = mytask or
# after = asyncio.after if p_version else asyncio.sleep
# or comparisons thistask == thattask
sstr = ''.join((r'=[ \t]*', task, r'[ \t]*[^(]'))
if re.search(sstr, part):
continue
# Not awaited but could be passed to function e.g.
# run_until_complete(mytask(args))
sstr = ''.join((r'.*\w+[ \t]*\([ \t]*', task, r'[ \t]*\('))
if re.search(sstr, part):
sstr = r'run_until_complete|run_forever|create_task|NamedTask'
if re.search(sstr, part):
continue
print('Please review line {}: async function "{}" is passed to a function.'.format(lnum, task))
print(opart)
print()
good = False
continue
# func(mytask, more_args) may or may not be an error
sstr = ''.join((r'.*\w+[ \t]*\([ \t]*', task, r'[ \t]*[^\(]'))
if re.search(sstr, part):
print('Please review line {}: async function "{}" is passed to a function.'.format(lnum, task))
print(opart)
print()
good = False
continue
# Might be a method. Discard object.
sstr = ''.join((r'.*\w+[ \t]*\([ \t]*\w+\.', task))
if re.search(sstr, part):
continue
print('Please review line {}: async function "{}" is not awaited.'.format(lnum, task))
print(opart)
print()
good = False
return good
txt = '''check_async_code.py
usage: check_async_code.py sourcefile.py
This rather crude script is designed to locate a single type of coding error
which leads to silent runtime failure and hence can be hard to locate.
It is intended to be used on otherwise correct source files and is not robust
in the face of syntax errors. Use pylint or other tools for general syntax
checking.
It assumes code is written in the style advocated in the tutorial where coros
are declared with "async def".
Under certain circumstances it can produce false positives. In some cases this
is by design. Given an asynchronous function foo the following is correct:
loop.run_until_complete(foo())
The following line may or may not be an error depending on the design of bar()
bar(foo, args)
Likewise asynchronous functions can be put into objects such as dicts, lists or
sets. You may wish to review such lines to check that the intention was to put
the function rather than its result into the object.
A false positive which is a consequence of the hacky nature of this script is
where a task has the same name as a synchronous bound method of some class. A
call to the bound method will produce an erroneous warning. This is because the
code does not parse class definitions.
In practice the odd false positive is easily spotted in the code.
'''
def usage(code=0):
print(txt)
sys.exit(code)
# Process a line
in_triple_quote = False
def do_line(line, passn, lnum):
global in_triple_quote
ignore = False
good = True
# TODO The following isn't strictly correct. A line might be of the form
# erroneous Python ; ''' start of string
# It could therefore miss the error.
if re.search(r'[^"]*"""|[^\']*\'\'\'', line):
if in_triple_quote:
# Discard rest of line which terminates triple quote
ignore = True
in_triple_quote = not in_triple_quote
if not in_triple_quote and not ignore:
parts = line.split(';')
for part in parts:
# discard comments and whitespace at start and end
part = part.split('#')[0].strip()
if part:
good &= passn(part, lnum)
return good
def main(fn):
global in_triple_quote
good = True
try:
with open(fn, 'r') as f:
for passn in (pass1, pass2):
in_triple_quote = False
lnum = 1
for line in f:
good &= do_line(line, passn, lnum)
lnum += 1
f.seek(0)
except FileNotFoundError:
print('File {} does not exist.'.format(fn))
return
if good:
print('No errors found!')
if __name__ == "__main__":
if len(sys.argv) !=2:
usage(1)
arg = sys.argv[1].strip()
if arg == '--help' or arg == '-h':
usage()
main(arg)
| mit | 395d0e78d2354482bf3b160a499e9f49 | 36.315534 | 111 | 0.557565 | 4.115096 | false | false | false | false |
peterhinch/micropython-async | v2/gps/as_GPS_utils.py | 1 | 1724 | # as_GPS_utils.py Extra functionality for as_GPS.py
# Put in separate file to minimise size of as_GPS.py for resource constrained
# systems.
# Copyright (c) 2018 Peter Hinch
# Released under the MIT License (MIT) - see LICENSE file
from as_GPS import MDY, DMY, LONG
_DIRECTIONS = ('N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW',
'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW')
def compass_direction(gps): # Return cardinal point as string.
# Calculate the offset for a rotated compass
if gps.course >= 348.75:
offset_course = 360 - gps.course
else:
offset_course = gps.course + 11.25
# Each compass point is separated by 22.5°, divide to find lookup value
return _DIRECTIONS[int(offset_course // 22.5)]
_MONTHS = ('January', 'February', 'March', 'April', 'May',
'June', 'July', 'August', 'September', 'October',
'November', 'December')
def date_string(gps, formatting=MDY):
day, month, year = gps.date
# Long Format January 1st, 2014
if formatting == LONG:
dform = '{:s} {:2d}{:s}, 20{:2d}'
# Retrieve Month string from private set
month = _MONTHS[month - 1]
# Determine Date Suffix
if day in (1, 21, 31):
suffix = 'st'
elif day in (2, 22):
suffix = 'nd'
elif day in (3, 23):
suffix = 'rd'
else:
suffix = 'th'
return dform.format(month, day, suffix, year)
dform = '{:02d}/{:02d}/{:02d}'
if formatting == DMY:
return dform.format(day, month, year)
elif formatting == MDY: # Default date format
return dform.format(month, day, year)
raise ValueError('Unknown date format.')
| mit | 8c54ccb79797237558c00e2386aebdaf | 34.895833 | 77 | 0.580383 | 3.257089 | false | false | false | false |
dedupeio/dedupe | dedupe/blocking.py | 1 | 7454 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import annotations
import logging
import time
from collections import defaultdict
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import (
Any,
Callable,
DefaultDict,
Generator,
Iterable,
List,
Sequence,
Union,
)
import dedupe.predicates
from dedupe._typing import Data, Record, RecordID
from dedupe.index import Index
Docs = Union[Iterable[str], Iterable[Iterable[str]]]
IndexList = DefaultDict[str, List[dedupe.predicates.IndexPredicate]]
logger = logging.getLogger(__name__)
def index_list() -> IndexList:
return defaultdict(list)
class Fingerprinter(object):
"""Takes in a record and returns all blocks that record belongs to"""
def __init__(self, predicates: Iterable[dedupe.predicates.Predicate]) -> None:
self.predicates = predicates
self.index_fields: dict[str, IndexList]
self.index_fields = defaultdict(index_list)
"""
A dictionary of all the fingerprinter methods that use an
index of data field values. The keys are the field names,
which can be useful to know for indexing the data.
"""
self.index_predicates = []
for full_predicate in predicates:
for predicate in full_predicate:
if hasattr(predicate, "index"):
self.index_fields[predicate.field][predicate.type].append(predicate)
self.index_predicates.append(predicate)
def __call__(
self, records: Iterable[Record], target: bool = False
) -> Generator[tuple[str, RecordID], None, None]:
"""
Generate the predicates for records. Yields tuples of (predicate,
record_id).
Args:
records: A sequence of tuples of (record_id,
record_dict). Can often be created by
`data_dict.items()`.
target: Indicates whether the data should be treated as
the target data. This effects the behavior of
search predicates. If `target` is set to
`True`, an search predicate will return the
value itself. If `target` is set to `False` the
search predicate will return all possible
values within the specified search distance.
Let's say we have a
`LevenshteinSearchPredicate` with an associated
distance of `1` on a `"name"` field; and we
have a record like `{"name": "thomas"}`. If the
`target` is set to `True` then the predicate
will return `"thomas"`. If `target` is set to
`False`, then the blocker could return
`"thomas"`, `"tomas"`, and `"thoms"`. By using
the `target` argument on one of your datasets,
you will dramatically reduce the total number
of comparisons without a loss of accuracy.
.. code:: python
> data = [(1, {'name' : 'bob'}), (2, {'name' : 'suzanne'})]
> blocked_ids = deduper.fingerprinter(data)
> print list(blocked_ids)
[('foo:1', 1), ..., ('bar:1', 100)]
"""
start_time = time.perf_counter()
predicates = [
(":" + str(i), predicate) for i, predicate in enumerate(self.predicates)
]
for i, record in enumerate(records):
record_id, instance = record
for pred_id, predicate in predicates:
block_keys = predicate(instance, target=target)
for block_key in block_keys:
yield block_key + pred_id, record_id
if i and i % 10000 == 0:
logger.info(
"%(iteration)d, %(elapsed)f2 seconds",
{"iteration": i, "elapsed": time.perf_counter() - start_time},
)
def reset_indices(self) -> None:
"""
Fingeprinter indicdes can take up a lot of memory. If you are
done with blocking, the method will reset the indices to free up.
If you need to block again, the data will need to be re-indexed.
"""
for predicate in self.index_predicates:
predicate.reset()
def index(self, docs: Docs, field: str) -> None:
"""
Add docs to the indices used by fingerprinters.
Some fingerprinter methods depend upon having an index of
values that a field may have in the data. This method adds
those values to the index. If you don't have any fingerprinter
methods that use an index, this method will do nothing.
Args:
docs: an iterator of values from your data to index. While
not required, it is recommended that docs be a unique
set of of those values. Indexing can be an expensive
operation.
field: fieldname or key associated with the values you are
indexing
"""
indices = extractIndices(self.index_fields[field])
for doc in docs:
if doc:
for _, index, preprocess in indices:
index.index(preprocess(doc))
for index_type, index, _ in indices:
index.initSearch()
for predicate in self.index_fields[field][index_type]:
logger.debug("Canopy: %s", str(predicate))
predicate.index = index
predicate.bust_cache()
def unindex(self, docs: Docs, field: str) -> None:
"""Remove docs from indices used by fingerprinters
Args:
docs: an iterator of values from your data to remove. While
not required, it is recommended that docs be a unique
set of of those values. Indexing can be an expensive
operation.
field: fieldname or key associated with the values you are
unindexing
"""
indices = extractIndices(self.index_fields[field])
for doc in docs:
if doc:
for _, index, preprocess in indices:
try:
index.unindex(preprocess(doc))
except KeyError:
pass
for index_type, index, _ in indices:
index.initSearch()
for predicate in self.index_fields[field][index_type]:
logger.debug("Canopy: %s", str(predicate))
predicate.index = index
predicate.bust_cache()
def index_all(self, data: Data) -> None:
for field in self.index_fields:
unique_fields = {record[field] for record in data.values() if record[field]}
self.index(unique_fields, field)
def extractIndices(
index_fields: IndexList,
) -> Sequence[tuple[str, Index, Callable[[Any], Any]]]:
indices = []
for index_type, predicates in index_fields.items():
predicate = predicates[0]
index = predicate.index
preprocess = predicate.preprocess
if predicate.index is None:
index = predicate.initIndex()
assert index is not None
indices.append((index_type, index, preprocess))
return indices
| mit | 24aa74f331c319c2594832599c338ea2 | 33.995305 | 88 | 0.565334 | 4.62694 | false | false | false | false |
peterhinch/micropython-async | v3/as_drivers/hd44780/alcd.py | 1 | 4911 | # LCD class for Micropython and uasyncio.
# Author: Peter Hinch
# Copyright Peter Hinch 2017 Released under the MIT license
# V1.1 24 Apr 2020 Updated for uasyncio V3
# V1.0 13 May 2017
# Assumes an LCD with standard Hitachi HD44780 controller chip wired using four data lines
# Code has only been tested on two line LCD displays.
# My code is based on this program written for the Raspberry Pi
# http://www.raspberrypi-spy.co.uk/2012/07/16x2-lcd-module-control-using-python/
# HD44780 LCD Test Script for
# Raspberry Pi
#
# Author : Matt Hawkins
# Site : http://www.raspberrypi-spy.co.uk
from machine import Pin
import utime as time
import uasyncio as asyncio
# ********************************** GLOBAL CONSTANTS: TARGET BOARD PIN NUMBERS *************************************
# Supply board pin numbers as a tuple in order Rs, E, D4, D5, D6, D7
PINLIST = ('Y1','Y2','Y6','Y5','Y4','Y3') # As used in testing.
# **************************************************** LCD CLASS ****************************************************
# Initstring:
# 0x33, 0x32: See flowchart P24 send 3,3,3,2
# 0x28: Function set DL = 1 (4 bit) N = 1 (2 lines) F = 0 (5*8 bit font)
# 0x0C: Display on/off: D = 1 display on C, B = 0 cursor off, blink off
# 0x06: Entry mode set: ID = 1 increment S = 0 display shift??
# 0x01: Clear display, set DDRAM address = 0
# Original code had timing delays of 50uS. Testing with the Pi indicates that time.sleep() can't issue delays shorter
# than about 250uS. There also seems to be an error in the original code in that the datasheet specifies a delay of
# >4.1mS after the first 3 is sent. To simplify I've imposed a delay of 5mS after each initialisation pulse: the time to
# initialise is hardly critical. The original code worked, but I'm happier with something that complies with the spec.
# Async version:
# No point in having a message queue: people's eyes aren't that quick. Just display the most recent data for each line.
# Assigning changed data to the LCD object sets a "dirty" flag for that line. The LCD's runlcd thread then updates the
# hardware and clears the flag
# lcd_byte and lcd_nybble method use explicit delays. This is because execution
# time is short relative to general latency (on the order of 300μs).
class LCD: # LCD objects appear as read/write lists
INITSTRING = b'\x33\x32\x28\x0C\x06\x01'
LCD_LINES = b'\x80\xC0' # LCD RAM address for the 1st and 2nd line (0 and 40H)
CHR = True
CMD = False
E_PULSE = 50 # Timing constants in uS
E_DELAY = 50
def __init__(self, pinlist, cols, rows = 2): # Init with pin nos for enable, rs, D4, D5, D6, D7
self.initialising = True
self.LCD_E = Pin(pinlist[1], Pin.OUT) # Create and initialise the hardware pins
self.LCD_RS = Pin(pinlist[0], Pin.OUT)
self.datapins = [Pin(pin_name, Pin.OUT) for pin_name in pinlist[2:]]
self.cols = cols
self.rows = rows
self.lines = [""] * self.rows
self.dirty = [False] * self.rows
for thisbyte in LCD.INITSTRING:
self.lcd_byte(thisbyte, LCD.CMD)
self.initialising = False # Long delay after first byte only
asyncio.create_task(self.runlcd())
def lcd_nybble(self, bits): # send the LS 4 bits
for pin in self.datapins:
pin.value(bits & 0x01)
bits >>= 1
time.sleep_us(LCD.E_DELAY) # 50μs
self.LCD_E.value(True) # Toggle the enable pin
time.sleep_us(LCD.E_PULSE)
self.LCD_E.value(False)
if self.initialising:
time.sleep_ms(5)
else:
time.sleep_us(LCD.E_DELAY) # 50μs
def lcd_byte(self, bits, mode): # Send byte to data pins: bits = data
self.LCD_RS.value(mode) # mode = True for character, False for command
self.lcd_nybble(bits >>4) # send high bits
self.lcd_nybble(bits) # then low ones
def __setitem__(self, line, message): # Send string to display line 0 or 1
message = "{0:{1}.{1}}".format(message, self.cols)
if message != self.lines[line]: # Only update LCD if data has changed
self.lines[line] = message # Update stored line
self.dirty[line] = True # Flag its non-correspondence with the LCD device
def __getitem__(self, line):
return self.lines[line]
async def runlcd(self): # Periodically check for changed text and update LCD if so
while(True):
for row in range(self.rows):
if self.dirty[row]:
msg = self[row]
self.lcd_byte(LCD.LCD_LINES[row], LCD.CMD)
for thisbyte in msg:
self.lcd_byte(ord(thisbyte), LCD.CHR)
await asyncio.sleep_ms(0) # Reshedule ASAP
self.dirty[row] = False
await asyncio.sleep_ms(20) # Give other coros a look-in
| mit | 888cc125bd602810ab35e594e87df4b1 | 45.301887 | 120 | 0.623268 | 3.331976 | false | false | false | false |
peterhinch/micropython-async | v3/as_drivers/client_server/userver.py | 1 | 1936 | # userver.py Demo of simple uasyncio-based echo server
# Released under the MIT licence
# Copyright (c) Peter Hinch 2019-2020
import usocket as socket
import uasyncio as asyncio
import uselect as select
import ujson
from heartbeat import heartbeat # Optional LED flash
class Server:
def __init__(self, host='0.0.0.0', port=8123, backlog=5, timeout=20):
self.host = host
self.port = port
self.backlog = backlog
self.timeout = timeout
async def run(self):
print('Awaiting client connection.')
self.cid = 0
asyncio.create_task(heartbeat(100))
self.server = await asyncio.start_server(self.run_client, self.host, self.port, self.backlog)
while True:
await asyncio.sleep(100)
async def run_client(self, sreader, swriter):
self.cid += 1
print('Got connection from client', self.cid)
try:
while True:
try:
res = await asyncio.wait_for(sreader.readline(), self.timeout)
except asyncio.TimeoutError:
res = b''
if res == b'':
raise OSError
print('Received {} from client {}'.format(ujson.loads(res.rstrip()), self.cid))
swriter.write(res)
await swriter.drain() # Echo back
except OSError:
pass
print('Client {} disconnect.'.format(self.cid))
await sreader.wait_closed()
print('Client {} socket closed.'.format(self.cid))
async def close(self):
print('Closing server')
self.server.close()
await self.server.wait_closed()
print('Server closed.')
server = Server()
try:
asyncio.run(server.run())
except KeyboardInterrupt:
print('Interrupted') # This mechanism doesn't work on Unix build.
finally:
asyncio.run(server.close())
_ = asyncio.new_event_loop()
| mit | 65a1e1938e6581b20f73432f5c5cd29d | 30.737705 | 101 | 0.596074 | 4.110403 | false | false | false | false |
peterhinch/micropython-async | v2/fast_io/fast_can_test.py | 1 | 1871 | # fast_can_test.py Test of cancellation of tasks which call sleep
# Copyright (c) Peter Hinch 2019
# Released under the MIT licence
import uasyncio as asyncio
import sys
ermsg = 'This test requires the fast_io version of uasyncio V2.4 or later.'
try:
print('Uasyncio version', asyncio.version)
if not isinstance(asyncio.version, tuple):
print(ermsg)
sys.exit(0)
except AttributeError:
print(ermsg)
sys.exit(0)
# If a task times out the TimeoutError can't be trapped:
# no exception is thrown to the task
async def foo(t):
try:
print('foo started')
await asyncio.sleep(t)
print('foo ended', t)
except asyncio.CancelledError:
print('foo cancelled', t)
async def lpfoo(t):
try:
print('lpfoo started')
await asyncio.after(t)
print('lpfoo ended', t)
except asyncio.CancelledError:
print('lpfoo cancelled', t)
async def run(coro, t):
await asyncio.wait_for(coro, t)
async def bar(loop):
foo1 = foo(1)
foo5 = foo(5)
lpfoo1 = lpfoo(1)
lpfoo5 = lpfoo(5)
loop.create_task(foo1)
loop.create_task(foo5)
loop.create_task(lpfoo1)
loop.create_task(lpfoo5)
await asyncio.sleep(2)
print('Cancelling tasks')
asyncio.cancel(foo1)
asyncio.cancel(foo5)
asyncio.cancel(lpfoo1)
asyncio.cancel(lpfoo5)
await asyncio.sleep(0) # Allow cancellation to occur
print('Pausing 7s to ensure no task still running.')
await asyncio.sleep(7)
print('Launching tasks with 2s timeout')
loop.create_task(run(foo(1), 2))
loop.create_task(run(lpfoo(1), 2))
loop.create_task(run(foo(20), 2))
loop.create_task(run(lpfoo(20), 2))
print('Pausing 7s to ensure no task still running.')
await asyncio.sleep(7)
loop = asyncio.get_event_loop(ioq_len=16, lp_len=16)
loop.run_until_complete(bar(loop))
| mit | b49db5913eec3c61c3fc36a801f3fed9 | 26.925373 | 75 | 0.663282 | 3.22031 | false | false | false | false |
dedupeio/dedupe | dedupe/variables/string.py | 1 | 3255 | from typing import Callable, Iterable, Sequence, Type
from affinegap import normalizedAffineGapDistance as affineGap
from highered import CRFEditDistance
from simplecosine.cosine import CosineTextSimilarity
from dedupe import predicates
from dedupe._typing import VariableDefinition
from dedupe.variables.base import FieldType, indexPredicates
crfEd = CRFEditDistance()
base_predicates = (
predicates.wholeFieldPredicate,
predicates.firstTokenPredicate,
predicates.firstTwoTokensPredicate,
predicates.commonIntegerPredicate,
predicates.nearIntegersPredicate,
predicates.firstIntegerPredicate,
predicates.hundredIntegerPredicate,
predicates.hundredIntegersOddPredicate,
predicates.alphaNumericPredicate,
predicates.sameThreeCharStartPredicate,
predicates.sameFiveCharStartPredicate,
predicates.sameSevenCharStartPredicate,
predicates.commonTwoTokens,
predicates.commonThreeTokens,
predicates.fingerprint,
predicates.oneGramFingerprint,
predicates.twoGramFingerprint,
predicates.sortedAcronym,
)
class BaseStringType(FieldType):
_Predicate = predicates.StringPredicate
_predicate_functions: Sequence[Callable[[str], Iterable[str]]] = ()
def __init__(self, definition: VariableDefinition):
super(BaseStringType, self).__init__(definition)
self.predicates += indexPredicates(
(
predicates.LevenshteinCanopyPredicate,
predicates.LevenshteinSearchPredicate,
),
(1, 2, 3, 4),
self.field,
)
class ShortStringType(BaseStringType):
type = "ShortString"
_predicate_functions = base_predicates + (
predicates.commonFourGram,
predicates.commonSixGram,
predicates.tokenFieldPredicate,
predicates.suffixArray,
predicates.doubleMetaphone,
predicates.metaphoneToken,
)
_index_predicates: Sequence[Type[predicates.IndexPredicate]] = [
predicates.TfidfNGramCanopyPredicate,
predicates.TfidfNGramSearchPredicate,
]
_index_thresholds = (0.2, 0.4, 0.6, 0.8)
def __init__(self, definition: VariableDefinition):
super(ShortStringType, self).__init__(definition)
if definition.get("crf", False) is True:
self.comparator = crfEd # type: ignore[assignment]
else:
self.comparator = affineGap # type: ignore[assignment]
class StringType(ShortStringType):
type = "String"
_index_predicates = [
predicates.TfidfNGramCanopyPredicate,
predicates.TfidfNGramSearchPredicate,
predicates.TfidfTextCanopyPredicate,
predicates.TfidfTextSearchPredicate,
]
class TextType(BaseStringType):
type = "Text"
_predicate_functions = base_predicates
_index_predicates = [
predicates.TfidfTextCanopyPredicate,
predicates.TfidfTextSearchPredicate,
]
_index_thresholds = (0.2, 0.4, 0.6, 0.8)
def __init__(self, definition: VariableDefinition):
super(TextType, self).__init__(definition)
if "corpus" not in definition:
definition["corpus"] = []
self.comparator = CosineTextSimilarity(definition["corpus"]) # type: ignore[assignment]
| mit | 3a4321bef459354ef925d2ba1fd52c51 | 29.420561 | 96 | 0.70384 | 3.979218 | false | false | false | false |
dedupeio/dedupe | benchmarks/benchmarks/canonical_matching.py | 1 | 2796 | import os
import time
import dedupe
from benchmarks import common
def get_true_dupes(data):
data_1, data_2 = data
all_data = data_1.copy()
all_data.update(data_2)
return common.get_true_dupes(all_data)
def make_report(data, clustering):
true_dupes = get_true_dupes(data)
predicted_dupes = set(frozenset(pair) for pair, _ in clustering)
return common.Report.from_scores(true_dupes, predicted_dupes)
class Matching:
settings_file = common.DATASETS_DIR / "canonical_data_matching_learned_settings"
data_1_file = common.DATASETS_DIR / "restaurant-1.csv"
data_2_file = common.DATASETS_DIR / "restaurant-2.csv"
params = [
{"threshold": 0.5},
{"threshold": 0.5, "constraint": "many-to-one"},
]
param_names = ["kwargs"]
def setup(self, kwargs):
data_1 = common.load_data(self.data_1_file)
data_2 = common.load_data(self.data_2_file)
self.data = (data_1, data_2)
self.training_pairs = dedupe.training_data_link(
data_1, data_2, "unique_id", 5000
)
def run(self, kwargs, use_settings=False):
data_1, data_2 = self.data
if use_settings and os.path.exists(self.settings_file):
with open(self.settings_file, "rb") as f:
deduper = dedupe.StaticRecordLink(f)
else:
variables = [
{"field": "name", "type": "String"},
{"field": "address", "type": "String"},
{"field": "cuisine", "type": "String"},
{"field": "city", "type": "String"},
]
deduper = dedupe.RecordLink(variables)
deduper.prepare_training(data_1, data_2, sample_size=10000)
deduper.mark_pairs(self.training_pairs)
deduper.train()
with open(self.settings_file, "wb") as f:
deduper.write_settings(f)
return deduper.join(data_1, data_2, **kwargs)
def make_report(self, clustering):
return make_report(self.data, clustering)
def time_run(self, kwargs):
return self.run(kwargs)
def peakmem_run(self, kwargs):
return self.run(kwargs)
def track_precision(self, kwargs):
return self.make_report(self.run(kwargs)).precision
def track_recall(self, kwargs):
return self.make_report(self.run(kwargs)).recall
def cli():
common.configure_logging()
m = Matching()
for kwargs in m.params:
m.setup(kwargs)
print()
print(f"running with kwargs: {kwargs}")
t0 = time.time()
clustering = m.run(kwargs=kwargs, use_settings=True)
elapsed = time.time() - t0
print(m.make_report(clustering))
print(f"ran in {elapsed} seconds")
if __name__ == "__main__":
cli()
| mit | 84166ee71675ba650ea7f81dcf9fafe0 | 28.125 | 84 | 0.590129 | 3.430675 | false | false | false | false |
dedupeio/dedupe | dedupe/canopy_index.py | 1 | 2603 | from __future__ import annotations
import logging
import math
from typing import Iterable
import numpy
from BTrees.Length import Length
from zope.index.text.cosineindex import CosineIndex
from zope.index.text.lexicon import Lexicon
from zope.index.text.setops import mass_weightedUnion
from zope.index.text.textindex import TextIndex
logger = logging.getLogger(__name__)
class CanopyIndex(TextIndex): # pragma: no cover
def __init__(self) -> None:
lexicon = CanopyLexicon()
self.index = CosineIndex(lexicon)
self.lexicon = lexicon
def initSearch(self) -> None:
N = len(self.index._docweight)
threshold = int(max(1000, N * 0.05))
stop_words = []
self._wids_dict = {}
bucket = self.index.family.IF.Bucket
for wid, docs in self.index._wordinfo.items():
if len(docs) > threshold:
stop_words.append(wid)
continue
if isinstance(docs, dict):
docs = bucket(docs)
self.index._wordinfo[wid] = docs
idf = numpy.log1p(N / len(docs))
term = self.lexicon._words[wid]
self._wids_dict[term] = (wid, idf)
for wid in stop_words:
word = self.lexicon._words.pop(wid)
del self.lexicon._wids[word]
logger.info(f"Removing stop word {word}")
del self.index._wordinfo[wid]
def apply(
self,
query_list: Iterable[str],
threshold: float,
start: int = 0,
count: int | None = None,
) -> list[tuple[float, int]]:
_wids_dict = self._wids_dict
_wordinfo = self.index._wordinfo
l_pow = float.__pow__
L = []
qw = 0.0
for term in query_list:
wid, weight = _wids_dict.get(term, (None, None))
if wid is None:
continue
docs = _wordinfo[wid]
L.append((docs, weight))
qw += l_pow(weight, 2)
results = mass_weightedUnion(L)
qw = math.sqrt(qw)
filtered_results: list[tuple[float, int]] = results.byValue(qw * threshold)
return filtered_results
class CanopyLexicon(Lexicon): # pragma: no cover
def sourceToWordIds(self, last: list | None = None) -> list[int]:
if last is None:
last = []
if not isinstance(self.wordCount, Length): # type: ignore[has-type]
self.wordCount = Length(self.wordCount()) # type: ignore[has-type]
self.wordCount._p_deactivate()
return list(map(self._getWordIdCreate, last))
| mit | c0172e04fbd2ec3db9f48ca0415f49d0 | 28.579545 | 83 | 0.576642 | 3.630404 | false | false | false | false |
virtool/virtool | virtool/downloads/db.py | 2 | 3097 | """
Functions for working with the database to provide file downloads. All functions are currently related to OTUs and their
child models.
"""
from typing import Tuple
import virtool.downloads.utils
import virtool.errors
import virtool.otus.utils
async def generate_isolate_fasta(db, otu_id: str, isolate_id: str) -> Tuple[str, str]:
"""
Generate a FASTA filename and body for the sequences associated with the isolate identified by the passed
``otu_id`` and ``isolate_id``.
:param db: the application database client
:param otu_id: the id of the isolates' parent otu
:param isolate_id: the id of the isolate to FASTAfy
:return: as FASTA filename and body
"""
_, isolate_name = await get_otu_and_isolate_names(db, otu_id, isolate_id)
otu = await db.otus.find_one(
{"_id": otu_id, "isolates.id": isolate_id}, ["name", "isolates"]
)
fasta = []
async for sequence in db.sequences.find(
{"otu_id": otu_id, "isolate_id": isolate_id}, ["sequence"]
):
fasta.append(
virtool.downloads.utils.format_fasta_entry(
otu["name"], isolate_name, sequence["_id"], sequence["sequence"]
)
)
return virtool.downloads.utils.format_fasta_filename(
otu["name"], isolate_name
), "\n".join(fasta)
async def generate_sequence_fasta(db, sequence_id: str) -> Tuple[str, str]:
"""
Generate a FASTA filename and body for the sequence associated with the passed ``sequence_id``.
:param db: the application database client
:param sequence_id: the id sequence of the sequence to FASTAfy
:return: as FASTA filename and body
"""
sequence = await db.sequences.find_one(
sequence_id, ["sequence", "otu_id", "isolate_id"]
)
if not sequence:
raise virtool.errors.DatabaseError("Sequence does not exist")
otu_name, isolate_name = await get_otu_and_isolate_names(
db, sequence["otu_id"], sequence["isolate_id"]
)
fasta = virtool.downloads.utils.format_fasta_entry(
otu_name, isolate_name, sequence_id, sequence["sequence"]
)
return (
virtool.downloads.utils.format_fasta_filename(
otu_name, isolate_name, sequence["_id"]
),
fasta,
)
async def get_otu_and_isolate_names(
db, otu_id: str, isolate_id: str
) -> Tuple[str, str]:
"""
Get the OTU name and isolate name for a OTU-isolate combination specified by `otu_id` and `isolate_id`.
:param db: the application database object
:param otu_id: the OTU ID
:param isolate_id: the isolate ID
:return: an OTU name and isolate name
"""
otu = await db.otus.find_one(
{"_id": otu_id, "isolates.id": isolate_id}, ["name", "isolates"]
)
if not otu:
raise virtool.errors.DatabaseError("OTU does not exist")
isolate = virtool.otus.utils.find_isolate(otu["isolates"], isolate_id)
if not isolate:
raise virtool.errors.DatabaseError("Isolate does not exist")
return otu["name"], virtool.otus.utils.format_isolate_name(isolate)
| mit | 1d793744cf86ec856b934e236b20ace9 | 29.362745 | 120 | 0.647078 | 3.437292 | false | false | false | false |
virtool/virtool | virtool/otus/utils.py | 2 | 6728 | from copy import deepcopy
from typing import List, Optional, Tuple, Union
from virtool.types import Document
from virtool.utils import base_processor
def evaluate_changes(data: dict, document: dict) -> Tuple[str, str, Document]:
name = data.get("name")
abbreviation = data.get("abbreviation")
schema = data.get("schema")
try:
name = name.strip()
except AttributeError:
pass
try:
abbreviation = abbreviation.strip()
except AttributeError:
pass
if name == document["name"]:
name = None
old_abbreviation = document.get("abbreviation", "")
if abbreviation == old_abbreviation:
abbreviation = None
if schema == document.get("schema"):
schema = None
return name, abbreviation, schema
def extract_sequence_ids(otu: dict) -> List[str]:
"""
Extract all sequence ids from a merged otu.
:param otu: the merged otu
:return: the sequence ids belonging to ``otu``
"""
sequence_ids = []
isolates = otu["isolates"]
if not isolates:
raise ValueError("Empty isolates list in merged otu")
for isolate in isolates:
if "sequences" not in isolate:
raise KeyError("Isolate in merged otu missing sequences field")
if not isolate["sequences"]:
raise ValueError("Empty sequences list in merged otu")
sequence_ids += [sequence["_id"] for sequence in isolate["sequences"]]
return sequence_ids
def find_isolate(isolates: List[dict], isolate_id: str) -> dict:
"""
Return the isolate identified by ``isolate_id`` from a list of isolates.
:param isolates: a list of isolate dicts
:param isolate_id: the isolate_id of the isolate to return
:return: an isolate
"""
return next((isolate for isolate in isolates if isolate["id"] == isolate_id), None)
def format_otu(
joined: Optional[Document],
issues: Optional[Union[Document, bool]] = False,
most_recent_change: Optional[Document] = None,
) -> Document:
"""
Join and format an OTU.
Join the otu identified by the passed ``otu_id`` or use the ``joined`` otu document
if available. Then, format the joined otu into a format that can be directly
returned to API clients.
:param joined:
:param issues: an object describing issues in the otu
:param most_recent_change: a change document for the most recent change made to OTU
:return: a joined and formatted otu
"""
formatted = base_processor(joined)
del formatted["lower_name"]
for isolate in formatted["isolates"]:
for sequence in isolate["sequences"]:
del sequence["otu_id"]
del sequence["isolate_id"]
sequence["id"] = sequence.pop("_id")
formatted["most_recent_change"] = None
if most_recent_change:
formatted["most_recent_change"] = base_processor(most_recent_change)
if issues is False:
issues = verify(joined)
formatted["issues"] = issues
return formatted
def format_isolate_name(isolate: Document) -> str:
"""
Take a complete or partial isolate ``dict`` and return a readable isolate name.
:param isolate: an isolate containing source_type and source_name fields
:return: an isolate name
"""
if not isolate["source_type"] or not isolate["source_name"]:
return "Unnamed Isolate"
return " ".join((isolate["source_type"].capitalize(), isolate["source_name"]))
def merge_otu(otu: dict, sequences: List[dict]) -> dict:
"""
Merge the given sequences in the given otu document.
The otu will gain a ``sequences`` field containing a list of its associated sequence
documents.
:param otu: a otu document.
:param sequences: the sequence documents to merge into the otu.
:return: the merged otu.
"""
merged = deepcopy(otu)
for isolate in merged["isolates"]:
isolate_id = isolate["id"]
isolate["sequences"] = [s for s in sequences if s["isolate_id"] == isolate_id]
return merged
def split(merged: Document) -> Tuple[Document, List[Document]]:
"""
Split a merged otu document into a list of sequence documents associated with the
otu and a regular otu document containing no sequence sub-documents.
:param merged: the merged otu to split
:return: a tuple containing the new otu document and a list of sequence documents
"""
sequences = []
otu = deepcopy(merged)
for isolate in otu["isolates"]:
sequences += isolate.pop("sequences")
return otu, sequences
def verify(joined: Document) -> Union[bool, Document]:
"""
Checks that the passed otu and sequences constitute valid Virtool records and can be
included in an index.
Error fields are:
* emtpy_otu - otu has no isolates associated with it.
* empty_isolate - isolates that have no sequences associated with them.
* empty_sequence - sequences that have a zero length sequence field.
* isolate_inconsistency - otu has different sequence counts between isolates.
:param joined: a joined otu
:return: return any errors or False if there are no errors.
"""
errors = {
"empty_otu": len(joined["isolates"]) == 0,
"empty_isolate": [],
"empty_sequence": [],
"isolate_inconsistency": False,
}
isolate_sequence_counts = []
# Append the isolate_ids of any isolates without sequences to empty_isolate. Append
# the isolate_id and sequence id of any sequences that have an empty sequence.
for isolate in joined["isolates"]:
isolate_sequences = isolate["sequences"]
isolate_sequence_count = len(isolate_sequences)
# If there are no sequences attached to the isolate it gets an empty_isolate
# error.
if isolate_sequence_count == 0:
errors["empty_isolate"].append(isolate["id"])
isolate_sequence_counts.append(isolate_sequence_count)
errors["empty_sequence"] += filter(
lambda sequence: len(sequence["sequence"]) == 0, isolate_sequences
)
# Give an isolate_inconsistency error the number of sequences is not the same for
# every isolate. Only give the error if the otu is not also emtpy (empty_otu error).
errors["isolate_inconsistency"] = len(set(isolate_sequence_counts)) != 1 and not (
errors["empty_otu"] or errors["empty_isolate"]
)
# If there is an error in the otu, return the errors object. Otherwise return False.
has_errors = False
for key, value in errors.items():
if value:
has_errors = True
else:
errors[key] = False
if has_errors:
return errors
return None
| mit | b86196873f3efc7497f3e1e3aaa1b05f | 28.125541 | 88 | 0.65324 | 4.028743 | false | false | false | false |
virtool/virtool | virtool/tasks/runner.py | 2 | 1274 | import asyncio
import logging
import virtool.tasks.task
from virtool.pg.utils import get_row_by_id
from virtool.tasks.models import Task
class TaskRunner:
def __init__(self, channel, app):
self._channel = channel
self.app = app
async def run(self):
logging.info("Started task runner")
try:
while True:
logging.info("Waiting for next task")
await asyncio.sleep(0.3)
task_id = await self._channel.get_json()
await self.run_task(task_id)
logging.info("Finished task: %s", task_id)
except asyncio.CancelledError:
logging.info("Stopped task runner")
async def run_task(self, task_id: int):
"""
Run task with given `task_id`.
:param task_id: ID of the task
"""
task: Task = await get_row_by_id(self.app["pg"], Task, task_id)
logging.info(f"Starting task: %s %s", task.id, task.type)
loop = asyncio.get_event_loop()
for task_class in virtool.tasks.task.Task.__subclasses__():
if task.type == task_class.task_type:
current_task = task_class(self.app, task_id)
await loop.create_task(current_task.run())
| mit | 8dc3fb36d9b2305f0c7ea743338808c8 | 26.106383 | 71 | 0.573783 | 3.825826 | false | false | false | false |
virtool/virtool | tests/fixtures/client.py | 2 | 6341 | import json
from typing import Optional
import aiohttp
import pytest
from aiohttp.web_routedef import RouteTableDef
from virtool_core.models.session import Session
import virtool.app
import virtool.jobs.main
from virtool.api.custom_json import dumps
from virtool.config.cls import Config
from virtool.mongo.identifier import FakeIdProvider
from virtool.users.utils import generate_base_permissions
from virtool.utils import hash_key
class VirtoolTestClient:
def __init__(self, test_client):
self._test_client = test_client
self.server = self._test_client.server
self.app = self.server.app
self.db = self.app["db"]
self.auth = self._test_client.session.auth
self.cookie_jar = self._test_client.session.cookie_jar
def get_cookie(self, key):
for cookie in self._test_client.session.cookie_jar:
if cookie.key == key:
return cookie.value
return None
def has_cookie(self, key, value):
return self.get_cookie(key) == value
async def get(self, url, headers=None, params=None):
return await self._test_client.get(url, headers=headers, params=params)
async def post(self, url, data=None):
payload = None
if data:
payload = json.dumps(data)
return await self._test_client.post(url, data=payload)
async def post_form(self, url, data):
return await self._test_client.post(url, data=data)
async def patch(self, url, data):
return await self._test_client.patch(url, data=json.dumps(data))
async def put(self, url, data):
return await self._test_client.put(url, data=json.dumps(data))
async def delete(self, url):
return await self._test_client.delete(url)
@pytest.fixture
def create_app(
create_user,
mongo,
pg_connection_string,
redis_connection_string,
test_db_connection_string,
test_db_name,
):
def _create_app(dev: bool = False, base_url: str = ""):
config = Config(
base_url=base_url,
db_connection_string=test_db_connection_string,
db_name=test_db_name,
dev=dev,
force_version="v0.0.0",
no_check_db=True,
no_check_files=True,
no_fetching=True,
no_sentry=True,
postgres_connection_string=pg_connection_string,
redis_connection_string=redis_connection_string,
fake=False,
)
return virtool.app.create_app(config)
return _create_app
@pytest.fixture
def spawn_client(
pg,
redis,
aiohttp_client,
test_motor,
mongo,
create_app,
create_user,
data_layer,
):
async def func(
addon_route_table: Optional[RouteTableDef] = None,
auth=None,
authorize=False,
administrator=False,
base_url="",
dev=False,
enable_api=False,
groups=None,
permissions=None,
use_b2c=False,
):
app = create_app(dev, base_url)
if groups is not None:
complete_groups = [
{
"_id": group,
"name": group,
"permissions": generate_base_permissions(),
}
for group in groups
]
await mongo.groups.insert_many(complete_groups)
user_document = create_user(
user_id="test",
administrator=administrator,
groups=groups,
permissions=permissions,
)
await mongo.users.insert_one(user_document)
if addon_route_table:
app.add_routes(addon_route_table)
if authorize:
session_token = "bar"
session_id = "foobar"
await redis.set(
session_id,
dumps(
Session(
**{
"created_at": virtool.utils.timestamp(),
"ip": "127.0.0.1",
"authentication": {
"token": hash_key(session_token),
"user_id": "test",
},
}
)
),
expire=3600,
)
cookies = {"session_id": session_id, "session_token": session_token}
elif use_b2c:
cookies = {"id_token": "foobar"}
else:
cookies = {"session_id": "dne"}
test_client = await aiohttp_client(
app, auth=auth, cookies=cookies, auto_decompress=False
)
test_client.app["db"].id_provider = FakeIdProvider()
return VirtoolTestClient(test_client)
return func
@pytest.fixture
def spawn_job_client(
mongo,
aiohttp_client,
test_db_connection_string,
redis_connection_string,
pg_connection_string,
pg,
test_db_name,
):
"""A factory method for creating an aiohttp client which can authenticate with the API as a Job."""
async def _spawn_job_client(
authorize: bool = False,
dev: bool = False,
add_route_table: RouteTableDef = None,
):
# Create a test job to use for authentication.
if authorize:
job_id, key = "test_job", "test_key"
await mongo.jobs.insert_one(
{
"_id": job_id,
"key": hash_key(key),
}
)
# Create Basic Authentication header.
auth = aiohttp.BasicAuth(login=f"job-{job_id}", password=key)
else:
auth = None
app = await virtool.jobs.main.create_app(
Config(
db_connection_string=test_db_connection_string,
db_name=test_db_name,
dev=dev,
fake=False,
postgres_connection_string=pg_connection_string,
redis_connection_string=redis_connection_string,
no_sentry=True,
)
)
if add_route_table:
app.add_routes(add_route_table)
client = await aiohttp_client(app, auth=auth, auto_decompress=False)
client.db = mongo
return client
return _spawn_job_client
| mit | 4c51106e04fe68eb269f02ad4de940e1 | 26.331897 | 103 | 0.547075 | 4.117532 | false | true | false | false |
virtool/virtool | tests/tasks/test_api.py | 2 | 2129 | import pytest
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
from virtool.tasks.models import Task
async def test_find(spawn_client, pg: AsyncEngine, snapshot, static_time):
"""
Test that a ``GET /tasks`` return a complete list of tasks.
"""
client = await spawn_client(authorize=True, administrator=True)
task_1 = Task(
id=1,
complete=True,
context={"user_id": "test_1"},
count=40,
created_at=static_time.datetime,
file_size=1024,
progress=100,
step="download",
type="clone_reference",
)
task_2 = Task(
id=2,
complete=False,
context={"user_id": "test_2"},
count=30,
created_at=static_time.datetime,
file_size=14754,
progress=80,
step="download",
type="import_reference",
)
async with AsyncSession(pg) as session:
session.add_all([task_1, task_2])
await session.commit()
resp = await client.get("/tasks")
assert resp.status == 200
assert await resp.json() == snapshot
@pytest.mark.parametrize("error", [None, "404"])
async def test_get(
error, spawn_client, all_permissions, pg: AsyncEngine, static_time, snapshot, resp_is
):
"""
Test that a ``GET /tasks/:task_id`` return the correct task document.
"""
client = await spawn_client(authorize=True, administrator=True)
if not error:
async with AsyncSession(pg) as session:
session.add(
Task(
id=1,
complete=True,
context={"user_id": "test_1"},
count=40,
created_at=static_time.datetime,
file_size=1024,
progress=100,
step="download",
type="clone_reference",
)
)
await session.commit()
resp = await client.get("/tasks/1")
if error:
await resp_is.not_found(resp)
return
assert resp.status == 200
assert await resp.json() == snapshot
| mit | 04d8d09a62ba629dde91c7dbf909a751 | 25.283951 | 89 | 0.549554 | 4.102119 | false | true | false | false |
virtool/virtool | virtool/blast/transform.py | 2 | 1496 | from typing import Any
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
from virtool.blast.models import NuVsBlast
from virtool.mongo.transforms import AbstractTransform
from virtool.types import Document
class AttachNuVsBLAST(AbstractTransform):
"""
Attach BLAST records to NuVs analysis documents.
"""
def __init__(self, pg: AsyncEngine):
self._pg = pg
async def prepare_one(self, document: Document) -> Any:
async with AsyncSession(self._pg) as session:
result = await session.execute(
select(NuVsBlast).where(NuVsBlast.analysis_id == document["id"])
)
return {
blast.sequence_index: {
"id": blast.id,
"created_at": blast.created_at,
"updated_at": blast.updated_at,
"last_checked_at": blast.last_checked_at,
"interval": blast.interval,
"rid": blast.rid,
"ready": blast.ready,
"result": blast.result,
}
for blast in result.scalars()
}
async def attach_one(self, document: Document, prepared: Any) -> Document:
hits = document["results"]["hits"]
return {
**document,
"results": {
"hits": [{**hit, "blast": prepared.get(hit["index"])} for hit in hits]
},
}
| mit | 4a78a3a4680f28859b2c6507945892f0 | 30.829787 | 86 | 0.544786 | 4.25 | false | false | false | false |
virtool/virtool | virtool/references/api.py | 2 | 17274 | from typing import Union, List, Optional
from aiohttp.web_exceptions import (
HTTPBadGateway,
HTTPBadRequest,
HTTPConflict,
HTTPNoContent,
)
from aiohttp_pydantic import PydanticView
from aiohttp_pydantic.oas.typing import r200, r201, r202, r204, r400, r403, r404, r502
from virtool_core.models.enums import Permission
from virtool_core.models.otu import OTU
from virtool.api.response import NotFound, json_response
from virtool.data.errors import (
ResourceNotFoundError,
ResourceRemoteError,
ResourceConflictError,
ResourceError,
)
from virtool.data.utils import get_data_from_req
from virtool.http.policy import policy, PermissionsRoutePolicy
from virtool.http.routes import Routes
from virtool.indexes.oas import ListIndexesResponse
from virtool.otus.oas import CreateOTURequest
from virtool.otus.oas import FindOTUsResponse
from virtool.references.oas import (
CreateReferenceRequest,
UpdateReferenceRequest,
CreateReferenceGroupsSchema,
ReferenceRightsRequest,
CreateReferenceUsersRequest,
CreateReferenceResponse,
FindReferencesResponse,
ReferenceResponse,
ReferenceReleaseResponse,
CreateReferenceUpdateResponse,
GetReferenceUpdateResponse,
CreateReferenceIndexesResponse,
ReferenceGroupsResponse,
CreateReferenceGroupResponse,
ReferenceGroupResponse,
ReferenceUsersResponse,
ReferenceHistoryResponse,
)
routes = Routes()
RIGHTS_SCHEMA = {
"build": {"type": "boolean"},
"modify": {"type": "boolean"},
"modify_otu": {"type": "boolean"},
"remove": {"type": "boolean"},
}
@routes.view("/refs")
class ReferencesView(PydanticView):
async def get(self, find: Optional[str]) -> r200[FindReferencesResponse]:
"""
Find references.
Status Codes:
200: Successful operation
"""
search_result = await get_data_from_req(self.request).references.find(
find,
self.request["client"].user_id,
self.request["client"].administrator,
self.request["client"].groups,
self.request.query,
)
return json_response(search_result)
@policy(PermissionsRoutePolicy(Permission.create_ref))
async def post(
self, data: CreateReferenceRequest
) -> Union[r200[CreateReferenceResponse], r400, r403, r502]:
"""
Create a reference.
Creates an empty reference.
Status Codes:
200: Successful operation
400: Source reference does not exist
403: Not permitted
502: Could not reach GitHub
"""
try:
reference = await get_data_from_req(self.request).references.create(
data, self.request["client"].user_id
)
except ResourceNotFoundError as err:
if "Source reference does not exist" in str(err):
raise HTTPBadRequest(text=str(err))
if "File not found" in str(err):
raise NotFound(str(err))
raise
except ResourceRemoteError as err:
if "Could not reach GitHub" in str(err):
raise HTTPBadGateway(text=str(err))
if "Could not retrieve latest GitHub release" in str(err):
raise HTTPBadGateway(text=str(err))
raise
return json_response(
reference,
status=201,
headers={"Location": f"/refs/{reference.id}"},
)
@routes.view("/refs/{ref_id}")
@routes.jobs_api.get("/refs/{ref_id}")
class ReferenceView(PydanticView):
async def get(self, ref_id: str, /) -> Union[r200[ReferenceResponse], r403, r404]:
"""
Get a reference.
Retrieves the details of a reference.
Status Codes:
200: Successful operation
403: Not permitted
404: Not found
"""
try:
reference = await get_data_from_req(self.request).references.get(ref_id)
except ResourceNotFoundError:
raise NotFound()
return json_response(reference)
async def patch(
self,
ref_id: str,
/,
data: UpdateReferenceRequest,
) -> Union[r200[ReferenceResponse], r403, r404]:
"""
Update a reference.
Updates an existing reference.
Status Codes:
200: Successful operation
403: Insufficient rights
404: Not found
"""
try:
reference = await get_data_from_req(self.request).references.update(
ref_id, data, self.request
)
except ResourceNotFoundError:
raise NotFound()
except ResourceConflictError as err:
raise HTTPBadRequest(text=str(err))
return json_response(reference)
async def delete(self, ref_id: str, /) -> Union[r202, r403, r404]:
"""
Delete a reference.
Deletes a reference and its associated OTUs, history, and indexes. Deleting a
reference does not break dependent analyses and other resources.
Status Codes:
202: Accepted
403: Insufficient rights
404: Not found
"""
try:
task = await get_data_from_req(self.request).references.remove(
ref_id, self.request["client"].user_id, self.request
)
except ResourceNotFoundError:
raise NotFound()
return json_response(
task, status=202, headers={"Content-Location": f"/tasks/{task.id}"}
)
@routes.view("/refs/{ref_id}/release")
class ReferenceReleaseView(PydanticView):
async def get(self, ref_id: str, /) -> r200[ReferenceReleaseResponse]:
"""
Get latest update.
Retrieves the latest remote reference update from GitHub.
Also updates the reference document. This is the only way of doing so without
waiting for an automatic refresh every 10 minutes.
Status Codes:
200: Successful operation
"""
try:
release = await get_data_from_req(self.request).references.get_release(
ref_id, self.request.app
)
except ResourceNotFoundError:
raise NotFound()
except ResourceConflictError as err:
raise HTTPBadRequest(text=str(err))
except ResourceRemoteError as err:
raise HTTPBadGateway(text=str(err))
return json_response(release)
@routes.view("/refs/{ref_id}/updates")
class ReferenceUpdatesView(PydanticView):
async def get(self, ref_id: str, /) -> r200[GetReferenceUpdateResponse]:
"""
List updates.
Lists all updates made to the reference.
Status Codes:
200: Successful operation
"""
try:
updates = await get_data_from_req(self.request).references.get_updates(
ref_id
)
except ResourceNotFoundError:
raise NotFound()
return json_response(updates)
async def post(
self, ref_id: str, /
) -> Union[r201[CreateReferenceUpdateResponse], r403, r404]:
"""
Update a reference.
Updates the reference to the last version of the linked remote reference.
Status Codes:
201: Successful operation
403: Insufficient rights
404: Not found
"""
try:
sub_document = await get_data_from_req(
self.request
).references.create_update(
ref_id, self.request["client"].user_id, self.request
)
except ResourceNotFoundError:
raise NotFound()
except ResourceError as err:
raise HTTPBadRequest(text=str(err))
return json_response(sub_document, status=201)
@routes.view("/refs/{ref_id}/otus")
class ReferenceOTUsView(PydanticView):
async def get(
self,
find: Optional[str],
verified: Optional[bool],
names: Optional[Union[bool, str]],
ref_id: str,
/,
) -> Union[r200[FindOTUsResponse], r404]:
"""
Find OTUs.
Finds OTUs by name or abbreviation. Results are paginated.
Status Codes:
200: Successful operation
404: Not found
"""
try:
data = await get_data_from_req(self.request).references.get_otus(
find, verified, names, ref_id, self.request.query
)
except ResourceNotFoundError:
raise NotFound()
return json_response(data)
async def post(
self, ref_id: str, /, data: CreateOTURequest
) -> Union[r201[OTU], r400, r403, r404]:
"""
Create an OTU.
"""
try:
otu = await get_data_from_req(self.request).references.create_otus(
ref_id, data, self.request, self.request["client"].user_id
)
except ResourceNotFoundError:
raise NotFound()
except ResourceError as err:
raise HTTPBadRequest(text=str(err))
return json_response(otu, status=201, headers={"Location": f"/otus/{otu.id}"})
@routes.view("/refs/{ref_id}/history")
class ReferenceHistoryView(PydanticView):
async def get(
self, unbuilt: Optional[str], ref_id: str, /
) -> Union[r200[ReferenceHistoryResponse], r404]:
"""
List history.
Retrieves a paginated list of changes made to OTUs in the reference.
Status Codes:
200: Successful operation
404: Not found
"""
try:
data = await get_data_from_req(self.request).references.get_history(
ref_id, unbuilt, self.request.query
)
except ResourceNotFoundError:
raise NotFound()
return json_response(data)
@routes.view("/refs/{ref_id}/indexes")
class ReferenceIndexesView(PydanticView):
async def get(self, ref_id: str, /) -> Union[r200[ListIndexesResponse], r404]:
"""
List indexes.
Retrieves a paginated list of indexes that have been created for the reference.
Status Codes:
200: Successful operation
404: Not found
"""
try:
data = await get_data_from_req(self.request).references.find_indexes(
ref_id, self.request.query
)
except ResourceNotFoundError:
raise NotFound()
return json_response(data)
async def post(
self, ref_id: str, /
) -> Union[r201[CreateReferenceIndexesResponse], r403, r404]:
"""
Create an index.
Starts a job to rebuild the otus Bowtie2 index on disk.
Does a check to make sure there are no unverified OTUs in the collection
and updates otu history to show the version and id of the new index.
Status Codes:
201: Successful operation
403: Insufficient rights
404: Not found
"""
try:
document = await get_data_from_req(self.request).references.create_index(
ref_id, self.request, self.request["client"].user_id
)
except ResourceNotFoundError:
raise NotFound()
except ResourceConflictError as err:
raise HTTPConflict(text=str(err))
except ResourceError as err:
raise HTTPBadRequest(text=str(err))
return json_response(
document,
status=201,
headers={"Location": f"/indexes/{document.id}"},
)
@routes.view("/refs/{ref_id}/groups")
class ReferenceGroupsView(PydanticView):
async def get(self, ref_id: str, /) -> Union[r200[ReferenceGroupsResponse], r404]:
"""
List groups.
Lists all groups that have access to the reference.
Status Codes:
200: Successful operation
404: Not found
"""
try:
groups = await get_data_from_req(self.request).references.list_groups(
ref_id
)
except ResourceNotFoundError:
raise NotFound()
return json_response(groups)
async def post(
self, ref_id: str, /, data: CreateReferenceGroupsSchema
) -> Union[r201[CreateReferenceGroupResponse], r400, r403, r404]:
"""
Add a group.
Adds a group to the reference. Groups can view, use, and modify the reference.
Status Codes:
201: Successful operation
400: Bad request
403: Insufficient rights
404: Not found
"""
try:
group = await get_data_from_req(self.request).references.create_group(
ref_id, data, self.request
)
except ResourceNotFoundError:
raise NotFound()
except ResourceConflictError as err:
raise HTTPBadRequest(text=str(err))
return json_response(
group,
status=201,
headers={"Location": f"/refs/{ref_id}/groups/{group.id}"},
)
@routes.view("/refs/{ref_id}/groups/{group_id}")
class ReferenceGroupView(PydanticView):
async def get(
self, ref_id: str, group_id: str, /
) -> Union[r200[ReferenceGroupResponse], r404]:
"""
Get a group.
Retrieves the details of a group that has access to the reference.
Status Codes:
200: Successful operation
404: Not found
"""
try:
group = await get_data_from_req(self.request).references.get_group(
ref_id, group_id
)
except ResourceNotFoundError:
raise NotFound()
return json_response(group)
async def patch(
self,
ref_id: str,
group_id: str,
/,
data: ReferenceRightsRequest,
) -> Union[r200[ReferenceGroupResponse], r403, r404]:
"""
Update a group.
Updates the access rights a group has on the reference.
Status Codes:
200: Successful operation
403: Insufficient rights
404: Not found
"""
try:
group = await get_data_from_req(self.request).references.update_group(
data, ref_id, group_id, self.request
)
except ResourceNotFoundError:
raise NotFound()
return json_response(group)
async def delete(self, ref_id: str, group_id: str, /) -> Union[r204, r403, r404]:
"""
Remove a group.
Removes a group from the reference.
Status Codes:
204: No content
403: Insufficient rights
404: Not found
"""
try:
await get_data_from_req(self.request).references.delete_group(
ref_id, group_id, self.request
)
except ResourceNotFoundError:
raise NotFound()
raise HTTPNoContent
@routes.view("/refs/{ref_id}/users")
class ReferenceUsersView(PydanticView):
async def post(
self, ref_id: str, /, data: CreateReferenceUsersRequest
) -> Union[r201[List[ReferenceUsersResponse]], r400, r403, r404]:
"""
Add a user.
Adds a user to the reference. Users can view, use, and modify the reference.
Status Codes:
201: Successful operation
400: Bad request
403: Insufficient rights
404: Not found
"""
try:
user = await get_data_from_req(self.request).references.create_user(
data, ref_id, self.request
)
except ResourceNotFoundError:
raise NotFound()
except ResourceConflictError as err:
raise HTTPBadRequest(text=str(err))
return json_response(
user, status=201, headers={"Location": f"/refs/{ref_id}/users/{user.id}"}
)
@routes.view("/refs/{ref_id}/users/{user_id}")
class ReferenceUserView(PydanticView):
async def patch(
self, ref_id: str, user_id: str, /, data: ReferenceRightsRequest
) -> Union[r200[ReferenceGroupResponse], r403, r404]:
"""
Update a user.
Updates the access rights a user has on the reference.
Status Codes:
200: Successful operation
403: Insufficient rights
404: Not found
"""
try:
user = await get_data_from_req(self.request).references.update_user(
data, ref_id, user_id, self.request
)
except ResourceNotFoundError:
raise NotFound()
return json_response(user)
async def delete(self, ref_id: str, user_id: str, /) -> Union[r204, r403, r404]:
"""
Remove a user.
Removes a user from the reference.
Status Codes:
204: No content
403: Insufficient rights
404: Not found
"""
try:
await get_data_from_req(self.request).references.delete_user(
ref_id, user_id, self.request
)
except ResourceNotFoundError:
raise NotFound()
raise HTTPNoContent
| mit | d939004394f797d69f96545d98c16a71 | 28.377551 | 87 | 0.585909 | 4.372058 | false | false | false | false |
virtool/virtool | virtool/analyses/db.py | 2 | 3799 | """
Work with analyses in the database.
"""
from typing import Any, Dict, List, Optional
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
import virtool.mongo.utils
import virtool.utils
from virtool.analyses.models import AnalysisFile
from virtool.mongo.transforms import AbstractTransform, apply_transforms
from virtool.indexes.db import get_current_id_and_version
from virtool.subtractions.db import AttachSubtractionTransform
from virtool.types import Document
from virtool.users.db import AttachUserTransform
from virtool.utils import base_processor
PROJECTION = (
"_id",
"workflow",
"created_at",
"index",
"job",
"ready",
"reference",
"sample",
"subtractions",
"updated_at",
"user",
)
TARGET_FILES = (
"hmm.tsv",
"assembly.fa",
"orfs.fa",
"unmapped_hosts.fq",
"unmapped_otus.fq",
)
class AttachAnalysisFileTransform(AbstractTransform):
def __init__(self, pg: AsyncEngine):
self._pg = pg
async def attach_one(self, document: Document, prepared: Any) -> Document:
return {**document, "files": prepared}
async def prepare_one(self, document: Document) -> Any:
async with AsyncSession(self._pg) as session:
results = (
(
await session.execute(
select(AnalysisFile).filter_by(analysis=document["id"])
)
)
.scalars()
.all()
)
return [result.to_dict() for result in results]
async def processor(db, document: Dict[str, Any]) -> Dict[str, Any]:
"""
Process an analysis document by attaching user and subtraction data.
:param db: the application database object
:param document: the analysis document
:return: the processed analysis document
"""
return await apply_transforms(
base_processor(document),
[AttachSubtractionTransform(db), AttachUserTransform(db)],
)
async def create(
db,
sample_id: str,
ref_id: str,
subtractions: List[str],
user_id: str,
workflow: str,
job_id: str,
analysis_id: Optional[str] = None,
) -> dict:
"""
Creates a new analysis.
Ensures that a valid subtraction host was the submitted. Configures read and write
permissions on the sample document and assigns it a creator username based on the
requesting connection.
:param db: the application database object
:param sample_id: the ID of the sample to create an analysis for
:param ref_id: the ID of the reference to analyze against
:param subtractions: the list of the subtraction IDs to remove from the analysis
:param user_id: the ID of the user starting the job
:param workflow: the analysis workflow to run
:param job_id: the ID of the job
:param analysis_id: the ID of the analysis
:return: the analysis document
"""
index_id, index_version = await get_current_id_and_version(db, ref_id)
created_at = virtool.utils.timestamp()
document = {
"created_at": created_at,
"files": [],
"job": {"id": job_id},
"index": {"id": index_id, "version": index_version},
"reference": {
"id": ref_id,
"name": await virtool.mongo.utils.get_one_field(
db.references, "name", ref_id
),
},
"ready": False,
"results": None,
"sample": {"id": sample_id},
"subtractions": subtractions,
"updated_at": created_at,
"user": {
"id": user_id,
},
"workflow": workflow,
}
if analysis_id:
document["_id"] = analysis_id
return base_processor(await db.analyses.insert_one(document))
| mit | 7a2a988d15600ee106c0a1b8ab9d51f9 | 26.729927 | 86 | 0.623322 | 3.957292 | false | false | false | false |
virtool/virtool | virtool/account/oas.py | 2 | 9266 | from typing import Union, Optional
from pydantic import BaseModel, constr, Field, root_validator, validator
from virtool_core.models.enums import QuickAnalyzeWorkflow
from virtool_core.models.account import Account, AccountSettings, check_email, APIKey
from virtool_core.models.validators import prevent_none
from virtool.groups.oas import UpdatePermissionsRequest
class UpdateAccountRequest(BaseModel):
"""
Fields for updating a user account.
"""
email: Optional[constr(strip_whitespace=True)] = Field(
description="an email address"
)
old_password: Optional[str] = Field(description="the old password for verification")
password: Optional[str] = Field(description="the new password")
class Config:
schema_extra = {
"example": {
"email": "dev@virtool.ca",
"password": "foo_bar_1",
"old_password": "hello_world",
}
}
@root_validator
def check_password(cls, values: Union[str, constr]):
"""
Checks if old_password has also been input if a new password
is provided.
"""
old_password, password = values.get("old_password"), values.get("password")
if password:
if not old_password:
raise ValueError(
"The old password needs to be given in order for the password to be changed"
)
else:
if old_password:
raise ValueError(
"The new password needs to be given in order for the password to be changed"
)
return values
_email_validation = validator("email", allow_reuse=True)(check_email)
_prevent_none = prevent_none("*")
class UpdateAccountResponse(Account):
class Config:
schema_extra = {
"example": {
"administrator": False,
"groups": [],
"handle": "bob",
"id": "test",
"last_password_change": "2015-10-06T20:00:00Z",
"permissions": {
"cancel_job": False,
"create_ref": False,
"create_sample": False,
"modify_hmm": False,
"modify_subtraction": False,
"remove_file": False,
"remove_job": False,
"upload_file": False,
},
"primary_group": "technician",
"settings": {
"quick_analyze_workflow": "pathoscope_bowtie",
"show_ids": True,
"show_versions": True,
"skip_quick_analyze_dialog": True,
},
"email": "dev@virtool.ca",
}
}
class UpdateSettingsRequest(BaseModel):
"""
Fields for updating a user account's settings.
"""
show_ids: Optional[bool] = Field(
description="show document ids in client where possible"
)
skip_quick_analyze_dialog: Optional[bool] = Field(
description="don’t show the quick analysis dialog"
)
quick_analyze_workflow: Optional[QuickAnalyzeWorkflow] = Field(
description="workflow to use for quick analysis"
)
show_versions: Optional[bool] = Field(
description="show document versions in client where possible"
)
class Config:
schema_extra = {
"example": {
"show_ids": False,
}
}
_prevent_none = prevent_none(
"*"
)
class CreateKeysRequest(BaseModel):
name: constr(strip_whitespace=True, min_length=1) = Field(
description="a non-unique name for the API key"
)
permissions: Optional[UpdatePermissionsRequest] = Field(
default=UpdatePermissionsRequest(),
description="an object describing the permissions the new key will have. "
"Any unset permissions will default to false",
)
class Config:
schema_extra = {
"example": {"name": "Foobar", "permissions": {"create_sample": True}}
}
_prevent_none = prevent_none(
"permissions"
)
class CreateAPIKeyResponse(APIKey):
key: str
class Config:
schema_extra = {
"example": {
"created_at": "2015-10-06T20:00:00Z",
"groups": [],
"id": "foobar_0",
"key": "raw_key",
"name": "Foobar",
"permissions": {
"cancel_job": False,
"create_ref": False,
"create_sample": True,
"modify_hmm": False,
"modify_subtraction": False,
"remove_file": False,
"remove_job": False,
"upload_file": False,
},
}
}
class UpdateKeyRequest(BaseModel):
permissions: Optional[UpdatePermissionsRequest] = Field(
description="a permission update comprising an object keyed by permissions "
"with boolean values"
)
class Config:
schema_extra = {"example": {"permissions": {"modify_subtraction": True}}}
_prevent_none = prevent_none(
"permissions"
)
class APIKeyResponse(APIKey):
class Config:
schema_extra = {
"example": {
"created_at": "2015-10-06T20:00:00Z",
"groups": [],
"id": "foobar_0",
"name": "Foobar",
"permissions": {
"cancel_job": False,
"create_ref": False,
"create_sample": True,
"modify_hmm": False,
"modify_subtraction": False,
"remove_file": False,
"remove_job": False,
"upload_file": False,
},
}
}
class CreateLoginRequest(BaseModel):
username: constr(min_length=1) = Field(description="account username")
password: constr(min_length=1) = Field(description="account password")
remember: Optional[bool] = Field(
default=False,
description="value determining whether the session will last for 1 month or "
"1 hour",
)
class Config:
schema_extra = {
"example": {
"username": "foobar",
"password": "p@ssword123",
"remember": False,
}
}
_prevent_none = prevent_none(
"remember"
)
class LoginResponse(BaseModel):
class Config:
schema_extra = {"example": {"reset": False}}
class ResetPasswordRequest(BaseModel):
password: str
reset_code: str
class Config:
schema_extra = {
"example": {
"password": "p@ssword123",
"reset_code": "4bcda8b3bcaf5f84cc6e26a3d23a6179f29d356e43c9ced1b6de0d8f4946555e",
}
}
class AccountResetPasswordResponse(BaseModel):
class Config:
schema_extra = {"example": {"login": False, "reset": False}}
class AccountResponse(Account):
class Config:
schema_extra = {
"example": {
"administrator": False,
"groups": [],
"handle": "bob",
"id": "test",
"last_password_change": "2015-10-06T20:00:00Z",
"permissions": {
"cancel_job": False,
"create_ref": False,
"create_sample": False,
"modify_hmm": False,
"modify_subtraction": False,
"remove_file": False,
"remove_job": False,
"upload_file": False,
},
"primary_group": "technician",
"settings": {
"quick_analyze_workflow": "pathoscope_bowtie",
"show_ids": True,
"show_versions": True,
"skip_quick_analyze_dialog": True,
},
}
}
class AccountSettingsResponse(AccountSettings):
class Config:
schema_extra = {
"example": {
"skip_quick_analyze_dialog": True,
"show_ids": True,
"show_versions": True,
"quick_analyze_workflow": "pathoscope_bowtie",
}
}
class ListAPIKeysResponse(APIKey):
class Config:
schema_extra = {
"example": [
{
"created_at": "2015-10-06T20:00:00Z",
"groups": [],
"id": "baz_1",
"name": "Baz",
"permissions": {
"cancel_job": False,
"create_ref": False,
"create_sample": True,
"modify_hmm": False,
"modify_subtraction": False,
"remove_file": False,
"remove_job": False,
"upload_file": False,
},
},
]
}
| mit | 41d4f360bd57286effa0d7f301fc1a39 | 29.175896 | 97 | 0.492444 | 4.508029 | false | false | false | false |
virtool/virtool | virtool/blast/utils.py | 2 | 5098 | import io
import json
import re
from logging import getLogger
from typing import Tuple
from zipfile import ZipFile
import aiohttp
import virtool.errors
logger = getLogger("blast")
#: The URL to send BLAST requests to.
BLAST_URL = "https://blast.ncbi.nlm.nih.gov/Blast.cgi"
def extract_blast_info(html: str) -> Tuple[str, int]:
"""
Extract the RID and RTOE from BLAST HTML data containing a <QBlastInfo /> tag.
:param html: the input HTML
:return: a tuple containing the RID and RTOE
"""
string = html.split("<!--QBlastInfoBegin")[1].split("QBlastInfoEnd")[0]
match = re.search(r"RID = (.+)", string)
rid = match.group(1)
match = re.search(r"RTOE = (.+)", string)
rtoe = match.group(1)
return rid, int(rtoe)
def extract_blast_zip(data, rid: str) -> dict:
"""
Extract the BLAST result JSON data given zipped binary data.
Fails if the data is not valid zip.
:param data: the binary zip data
:param rid: the RID for the blast request
:return: the extracted BLAST JSON data
"""
zipped = ZipFile(io.BytesIO(data))
string = zipped.open(rid + "_1.json", "r").read().decode()
return json.loads(string)
def format_blast_content(result: dict) -> dict:
"""
Format the BLAST result data from NCBI into a format easily usable by Virtool.
:param result: the raw BLAST result
:return: the formatted BLAST result
"""
if len(result) != 1:
raise virtool.errors.NCBIError(
f"Unexpected BLAST result count {len(result)} returned"
)
result = result["BlastOutput2"]
if len(result) != 1:
raise virtool.errors.NCBIError(
f"Unexpected BLAST result count {len(result)} returned"
)
result = result["report"]
output = {key: result[key] for key in ["program", "params", "version"]}
output["target"] = result["search_target"]
result = result["results"]["search"]
return {
**output,
"hits": [format_blast_hit(h) for h in result["hits"]],
"stat": result["stat"],
"masking": result.get("query_masking"),
}
def format_blast_hit(hit: dict) -> dict:
"""
Format a BLAST hit from NCBI into a format more usable by Virtool.
:param hit: the BLAST hit
:return: the formatted hit
"""
cleaned = {
key: hit["description"][0].get(key, "")
for key in ["accession", "taxid", "title"]
}
hsps = {
key: hit["hsps"][0][key]
for key in ["identity", "evalue", "align_len", "score", "bit_score", "gaps"]
}
return {
**cleaned,
**hsps,
"name": hit["description"][0].get("sciname", "No name"),
"len": hit["len"],
}
async def check_rid(rid: str) -> bool:
"""
Check if the BLAST process identified by the passed RID is ready.
:param rid: the RID to check
:return: ``True`` if ready, ``False`` otherwise
"""
params = {"CMD": "Get", "RID": rid, "FORMAT_OBJECT": "SearchInfo"}
async with aiohttp.ClientSession() as session, session.get(
BLAST_URL, params=params
) as resp:
if resp.status != 200:
raise virtool.errors.NCBIError(
f"RID check request returned status {resp.status}"
)
return "Status=WAITING" not in await resp.text()
async def initialize_ncbi_blast(sequence: str) -> Tuple[str, int]:
"""
Send a request to NCBI to BLAST the passed sequence.
Return the RID and RTOE from the response.
:param sequence: the nucleotide sequence to BLAST
:return: the RID and RTOE for the request
"""
# Parameters passed in the URL string. eg. ?CMD=Put&DATABASE=nr
params = {
"CMD": "Put",
"DATABASE": "nr",
"PROGRAM": "blastn",
"MEGABLAST": "on",
"HITLIST_SIZE": 5,
"FILTER": "mL",
"FORMAT_TYPE": "JSON2",
}
# Data passed as POST content.
data = {"QUERY": sequence}
async with aiohttp.ClientSession() as session, session.post(
BLAST_URL, params=params, data=data
) as resp:
if resp.status != 200:
raise virtool.errors.NCBIError(
f"BLAST request returned status: {resp.status}"
)
# Extract and return the RID and RTOE from the QBlastInfo tag.
html = await resp.text()
logger.debug("Started BLAST on NCBI")
return extract_blast_info(html)
async def get_ncbi_blast_result(run_in_process: callable, rid: str) -> dict:
"""
Retrieve the BLAST result with the given `rid` from NCBI.
:param run_in_process: the application processing running function
:param rid: the rid to retrieve a result for
:return: the BLAST result
"""
params = {
"CMD": "Get",
"RID": rid,
"FORMAT_TYPE": "JSON2",
"FORMAT_OBJECT": "Alignment",
}
async with aiohttp.ClientSession() as session, session.get(
BLAST_URL, params=params
) as resp:
data = await resp.read()
return await run_in_process(extract_blast_zip, data, rid)
| mit | f952a8e45a864435eb7514a485096daf | 25.010204 | 84 | 0.60102 | 3.520718 | false | false | false | false |
virtool/virtool | tests/indexes/test_checks.py | 2 | 1141 | import pytest
from virtool.data.errors import ResourceConflictError
from virtool.indexes.checks import check_fasta_file_uploaded, check_index_files_uploaded
from virtool.indexes.db import FILES
@pytest.mark.parametrize("error", [None, 409])
async def test_check_fasta_file_uploaded(error):
results = {"file1": "gz", "file2": "fasta"}
if error == 409:
del results["file2"]
with pytest.raises(ResourceConflictError) as err:
await check_fasta_file_uploaded(results)
assert "FASTA" in str(err)
return
assert await check_fasta_file_uploaded(results) is None
@pytest.mark.parametrize("error", [None, 409])
async def test_check_index_files_uploaded(error):
results = {file: FILES.index(file) for file in FILES}
if error == 409:
del results["reference.2.bt2"]
del results["reference.3.bt2"]
with pytest.raises(ResourceConflictError) as err:
await check_index_files_uploaded(results)
assert "reference.2.bt2" in str(err) and "reference.3.bt2" in str(err)
return
assert await check_index_files_uploaded(results) is None
| mit | b766206305db7cfd7708a5362746264a | 30.694444 | 88 | 0.68624 | 3.633758 | false | true | false | false |
danielyule/hearthbreaker | hearthbreaker/cards/spells/warrior.py | 3 | 9859 | import copy
from hearthbreaker.cards.base import SpellCard
from hearthbreaker.tags.action import Damage, Draw, RemoveFromHand
from hearthbreaker.tags.base import AuraUntil, Buff, Effect, ActionTag
from hearthbreaker.tags.card_source import Same
from hearthbreaker.tags.condition import GreaterThan, IsDamaged
from hearthbreaker.tags.event import TurnEnded, Drawn
from hearthbreaker.tags.selector import MinionSelector, HeroSelector, PlayerSelector, Count
from hearthbreaker.tags.status import Charge as _Charge, MinimumHealth, ManaChange
import hearthbreaker.targeting
import hearthbreaker.tags.action
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY
class BattleRage(SpellCard):
def __init__(self):
super().__init__("Battle Rage", 2, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON)
def use(self, player, game):
def damaged_character(character):
return character.health < character.calculate_max_health()
super().use(player, game)
characters = copy.copy(player.minions)
characters.append(player.hero)
characters = [character for character in characters if damaged_character(character)]
for i in range(0, len(characters)):
player.draw()
class Brawl(SpellCard):
def __init__(self):
super().__init__("Brawl", 5, CHARACTER_CLASS.WARRIOR, CARD_RARITY.EPIC)
def can_use(self, player, game):
return super().can_use(player, game) and len(player.minions) + len(player.opponent.minions) >= 2
def use(self, player, game):
super().use(player, game)
minions = copy.copy(player.minions)
minions.extend(game.other_player.minions)
if len(minions) > 1:
survivor = game.random_choice(minions)
for minion in minions:
if minion is not survivor:
minion.die(self)
class Charge(SpellCard):
def __init__(self):
super().__init__("Charge", 3, CHARACTER_CLASS.WARRIOR, CARD_RARITY.FREE,
target_func=hearthbreaker.targeting.find_friendly_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.change_attack(2)
self.target.add_buff(Buff(_Charge()))
class Cleave(SpellCard):
def __init__(self):
super().__init__("Cleave", 2, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
minions = copy.copy(game.other_player.minions)
for i in range(0, 2):
minion = game.random_choice(minions)
minions.remove(minion)
minion.damage(player.effective_spell_damage(2), self)
def can_use(self, player, game):
return super().can_use(player, game) and len(game.other_player.minions) >= 2
class CommandingShout(SpellCard):
def __init__(self):
super().__init__("Commanding Shout", 2, CHARACTER_CLASS.WARRIOR, CARD_RARITY.RARE)
def use(self, player, game):
super().use(player, game)
player.add_aura(AuraUntil(MinimumHealth(1), MinionSelector(), TurnEnded()))
player.draw()
class Execute(SpellCard):
def __init__(self):
super().__init__("Execute", 1, CHARACTER_CLASS.WARRIOR, CARD_RARITY.FREE,
target_func=hearthbreaker.targeting.find_enemy_minion_spell_target,
filter_func=lambda target: target.health != target.calculate_max_health() and
target.spell_targetable())
def use(self, player, game):
super().use(player, game)
self.target.die(self)
class HeroicStrike(SpellCard):
def __init__(self):
super().__init__("Heroic Strike", 2, CHARACTER_CLASS.WARRIOR, CARD_RARITY.FREE)
def use(self, player, game):
super().use(player, game)
player.hero.change_temp_attack(4)
class InnerRage(SpellCard):
def __init__(self):
super().__init__("Inner Rage", 0, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.damage(1, self)
self.target.change_attack(2)
class MortalStrike(SpellCard):
def __init__(self):
super().__init__("Mortal Strike", 4, CHARACTER_CLASS.WARRIOR, CARD_RARITY.RARE,
target_func=hearthbreaker.targeting.find_spell_target)
def use(self, player, game):
super().use(player, game)
if player.hero.health <= 12:
self.target.damage(player.effective_spell_damage(6), self)
else:
self.target.damage(player.effective_spell_damage(4), self)
class Rampage(SpellCard):
def __init__(self):
super().__init__("Rampage", 2, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON,
target_func=hearthbreaker.targeting.find_minion_spell_target,
filter_func=lambda target: target.health != target.calculate_max_health() and
target.spell_targetable())
def use(self, player, game):
super().use(player, game)
self.target.change_attack(3)
self.target.increase_health(3)
class ShieldBlock(SpellCard):
def __init__(self):
super().__init__("Shield Block", 3, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
player.hero.increase_armor(5)
player.draw()
class ShieldSlam(SpellCard):
def __init__(self):
super().__init__("Shield Slam", 1, CHARACTER_CLASS.WARRIOR, CARD_RARITY.EPIC,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.damage(player.effective_spell_damage(player.hero.armor), self)
class Slam(SpellCard):
def __init__(self):
super().__init__("Slam", 2, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
if self.target.health > player.effective_spell_damage(2) or self.target.divine_shield:
self.target.damage(player.effective_spell_damage(2), self)
player.draw()
else:
self.target.damage(player.effective_spell_damage(2), self)
class Upgrade(SpellCard):
def __init__(self):
super().__init__("Upgrade!", 1, CHARACTER_CLASS.WARRIOR, CARD_RARITY.RARE)
def use(self, player, game):
super().use(player, game)
from hearthbreaker.cards.weapons.warrior import HeavyAxe
if player.weapon:
player.weapon.durability += 1
player.weapon.base_attack += 1
else:
heavy_axe = HeavyAxe().create_weapon(player)
heavy_axe.equip(player)
class Whirlwind(SpellCard):
def __init__(self):
super().__init__("Whirlwind", 1, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
targets = copy.copy(game.other_player.minions)
targets.extend(game.current_player.minions)
for minion in targets:
minion.damage(player.effective_spell_damage(1), self)
class BouncingBlade(SpellCard):
def __init__(self):
super().__init__("Bouncing Blade", 3, CHARACTER_CLASS.WARRIOR, CARD_RARITY.EPIC)
def can_use(self, player, game):
return super().can_use(player, game) and len(player.minions) + len(player.opponent.minions) >= 1
def use(self, player, game):
super().use(player, game)
# According to https://www.youtube.com/watch?v=7ij_6_Dx47g, Bouncing Blade bounces at most 80 times
# TODO Bouncing blade should only target those minions whose health is above minimum
# See http://us.battle.net/hearthstone/en/forum/topic/15142084659
targets = player.minions[:] + player.opponent.minions[:]
if len(targets):
for bounces in range(80):
target = game.random_choice(targets)
target.damage(player.effective_spell_damage(1), self)
if target.dead:
break
class Crush(SpellCard):
def __init__(self):
super().__init__("Crush", 7, CHARACTER_CLASS.WARRIOR, CARD_RARITY.EPIC,
target_func=hearthbreaker.targeting.find_minion_spell_target,
buffs=[Buff(ManaChange(-4), GreaterThan(Count(MinionSelector(IsDamaged())), value=0))])
def use(self, player, game):
super().use(player, game)
self.target.die(self)
class BurrowingMine(SpellCard):
def __init__(self):
super().__init__("Burrowing Mine", 0, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON, False,
effects=[Effect(Drawn(), ActionTag(Damage(10), HeroSelector())),
Effect(Drawn(), ActionTag(RemoveFromHand(Same()),
PlayerSelector())),
Effect(Drawn(), ActionTag(Draw(), PlayerSelector()))])
def use(self, player, game):
super().use(player, game)
class Revenge(SpellCard):
def __init__(self):
super().__init__("Revenge", 2, CHARACTER_CLASS.WARRIOR, CARD_RARITY.RARE)
def use(self, player, game):
super().use(player, game)
targets = copy.copy(game.other_player.minions)
targets.extend(game.current_player.minions)
if player.hero.health <= 12:
for minion in targets:
minion.damage(player.effective_spell_damage(3), self)
else:
for minion in targets:
minion.damage(player.effective_spell_damage(1), self)
| mit | 5643d795590136d2abc53142aed49b4f | 34.981752 | 112 | 0.621564 | 3.529896 | false | false | false | false |
danielyule/hearthbreaker | tests/power_tests.py | 7 | 7968 | import random
import unittest
from hearthbreaker.agents.basic_agents import PredictableAgent, DoNothingAgent
from hearthbreaker.cards.minions.hunter import SteamwheedleSniper
from hearthbreaker.cards.minions.neutral import StonetuskBoar
from hearthbreaker.cards.minions.priest import ProphetVelen
from hearthbreaker.cards.minions.warlock import DreadInfernal
from hearthbreaker.cards.spells.mage import Pyroblast
from tests.agents.testing_agents import CardTestingAgent
from hearthbreaker.cards import HuntersMark, MogushanWarden, AvengingWrath, CircleOfHealing, AlAkirTheWindlord, \
Shadowform, DefiasRingleader, Doomguard, ArcaneIntellect, Swipe, ArathiWeaponsmith, MassDispel
from hearthbreaker.powers import MindSpike, MindShatter
from tests.testing_utils import generate_game_for
class TestPowers(unittest.TestCase):
def setUp(self):
random.seed(1857)
def test_DruidPower(self):
game = generate_game_for(Swipe, MogushanWarden, PredictableAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, game.players[0].hero.armor)
self.assertEqual(29, game.players[1].hero.health)
def test_HunterPower(self):
game = generate_game_for(HuntersMark, MogushanWarden, PredictableAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(28, game.other_player.hero.health)
def test_MagePower(self):
game = generate_game_for(ArcaneIntellect, MogushanWarden, PredictableAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(29, game.players[1].hero.health)
def test_PaladinPower(self):
game = generate_game_for(AvengingWrath, MogushanWarden, PredictableAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(1, game.current_player.minions[0].calculate_attack())
self.assertEqual(1, game.current_player.minions[0].health)
self.assertEqual("Silver Hand Recruit", game.current_player.minions[0].card.name)
def test_PriestPower(self):
game = generate_game_for(CircleOfHealing, MogushanWarden, PredictableAgent, DoNothingAgent)
game.players[1].hero.health = 20
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(22, game.players[1].hero.health)
def test_MindSpike(self):
game = generate_game_for(Shadowform, MogushanWarden, PredictableAgent, DoNothingAgent)
game.players[0].hero.power = MindSpike()
game.players[0].hero.power.hero = game.players[0].hero
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(28, game.players[1].hero.health)
def test_MindShatter(self):
game = generate_game_for(Shadowform, Shadowform, PredictableAgent, DoNothingAgent)
game.players[0].hero.power = MindShatter()
game.players[0].hero.power.hero = game.players[0].hero
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(27, game.players[1].hero.health)
def test_RoguePower(self):
game = generate_game_for(DefiasRingleader, MogushanWarden, PredictableAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, game.players[0].weapon.base_attack)
self.assertEqual(1, game.players[0].weapon.durability)
self.assertEqual(29, game.players[1].hero.health)
def test_ShamanPower(self):
game = generate_game_for(AlAkirTheWindlord, MassDispel, PredictableAgent, CardTestingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Stoneclaw Totem", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].taunt)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual("Healing Totem", game.players[0].minions[1].card.name)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual("Searing Totem", game.players[0].minions[2].card.name)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual("Wrath of Air Totem", game.players[0].minions[3].card.name)
self.assertEqual(1, game.players[0].spell_damage)
# All Totems are out, nothing should be summoned
game.play_single_turn()
game.play_single_turn()
self.assertEqual(4, len(game.players[0].minions))
def test_WarlockPower(self):
game = generate_game_for(Doomguard, MogushanWarden, PredictableAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(28, game.players[0].hero.health)
self.assertEqual(6, len(game.players[0].hand))
def test_WarriorPower(self):
game = generate_game_for(ArathiWeaponsmith, MogushanWarden, PredictableAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(2, game.players[0].hero.armor)
def test_double_power_use(self):
testing_env = self
class PowerTestingAgent(DoNothingAgent):
def __init__(self):
super().__init__()
self.turn = 0
def do_turn(self, player):
self.turn += 1
if self.turn is 4:
player.hero.power.use()
testing_env.assertFalse(player.hero.power.can_use())
elif self.turn is 7:
player.hero.power.use()
player.game.play_card(player.hand[0])
testing_env.assertTrue(player.hero.power.can_use())
game = generate_game_for(Shadowform, MogushanWarden, PowerTestingAgent, DoNothingAgent)
for turn in range(0, 13):
game.play_single_turn()
def test_Velen_and_Hunter(self):
game = generate_game_for(HuntersMark, StonetuskBoar, PredictableAgent, DoNothingAgent)
ProphetVelen().summon(game.players[0], game, 0)
for turn in range(3):
game.play_single_turn()
# Velen attacks once for 7 damage, and the hero power attacks once for 4 damage
self.assertEqual(19, game.other_player.hero.health)
def test_Velen_SteamwheedleSniper_and_Hunter(self):
game = generate_game_for(SteamwheedleSniper, StonetuskBoar, PredictableAgent, DoNothingAgent)
for turn in range(8):
game.play_single_turn()
ProphetVelen().summon(game.players[0], game, 0)
game.play_single_turn()
self.assertEqual(22, game.other_player.hero.health)
self.assertEqual(3, game.current_player.minions[1].health)
self.assertEqual("Prophet Velen", game.current_player.minions[1].card.name)
def test_Velen_and_Warlock(self):
game = generate_game_for(DreadInfernal, StonetuskBoar, PredictableAgent, DoNothingAgent)
ProphetVelen().summon(game.players[0], game, 0)
for turn in range(3):
game.play_single_turn()
# The player's hero is damaged for 4 rather than 2 because of Velen
self.assertEqual(26, game.current_player.hero.health)
def test_Velen_and_Mage(self):
game = generate_game_for(Pyroblast, StonetuskBoar, PredictableAgent, DoNothingAgent)
ProphetVelen().summon(game.players[0], game, 0)
for turn in range(3):
game.play_single_turn()
# Velen is Hero powered for two damage
self.assertEqual(5, game.current_player.minions[0].health)
| mit | 65c2dabd596ce8518c29a1eaa295a218 | 37.679612 | 113 | 0.664659 | 3.345088 | false | true | false | false |
danielyule/hearthbreaker | hearthbreaker/cards/minions/rogue.py | 3 | 7301 | from hearthbreaker.cards.base import MinionCard
from hearthbreaker.cards.minions.neutral import Nerubian
from hearthbreaker.cards.spells.neutral import GallywixsCoin
from hearthbreaker.game_objects import Minion
from hearthbreaker.tags.action import Kill, Bounce, Summon, Give, Damage, ChangeTarget, AddCard, IncreaseWeaponAttack
from hearthbreaker.tags.base import Effect, Deathrattle, Battlecry, Buff, ActionTag
from hearthbreaker.tags.card_source import LastCard
from hearthbreaker.tags.condition import IsMinion, IsType, NotCurrentTarget, OneIn, Not, HasCardName, \
OpponentMinionCountIsGreaterThan, And, IsDamaged
from hearthbreaker.tags.event import DidDamage, MinionSummoned, TurnEnded, Attack, SpellCast
from hearthbreaker.tags.selector import TargetSelector, MinionSelector, PlayerSelector, UserPicker, \
BothPlayer, CharacterSelector, RandomPicker, SelfSelector, EnemyPlayer, FriendlyPlayer, Attribute, WeaponSelector
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY, MINION_TYPE
from hearthbreaker.tags.status import Stealth, ChangeAttack, ChangeHealth
class DefiasBandit(MinionCard):
def __init__(self):
super().__init__("Defias Bandit", 1, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON, False)
def create_minion(self, player):
return Minion(2, 1)
class DefiasRingleader(MinionCard):
def __init__(self):
super().__init__("Defias Ringleader", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
combo=Battlecry(Summon(DefiasBandit()), PlayerSelector()))
def create_minion(self, player):
return Minion(2, 2)
class EdwinVanCleef(MinionCard):
def __init__(self):
super().__init__("Edwin VanCleef", 3, CHARACTER_CLASS.ROGUE, CARD_RARITY.LEGENDARY,
battlecry=Battlecry(Give([Buff(ChangeAttack(Attribute("cards_played", PlayerSelector()), 2)),
Buff(ChangeHealth(Attribute("cards_played", PlayerSelector()), 2))]),
SelfSelector()))
def create_minion(self, player):
return Minion(2, 2)
class Kidnapper(MinionCard):
def __init__(self):
super().__init__("Kidnapper", 6, CHARACTER_CLASS.ROGUE, CARD_RARITY.EPIC,
combo=Battlecry(Bounce(), MinionSelector(picker=UserPicker(), players=BothPlayer())))
def create_minion(self, player):
return Minion(5, 3)
class MasterOfDisguise(MinionCard):
def __init__(self):
super().__init__("Master of Disguise", 4, CHARACTER_CLASS.ROGUE, CARD_RARITY.RARE,
battlecry=Battlecry(Give(Stealth()), MinionSelector(picker=UserPicker())))
def create_minion(self, player):
return Minion(4, 4)
class PatientAssassin(MinionCard):
def __init__(self):
super().__init__("Patient Assassin", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.EPIC)
def create_minion(self, player):
return Minion(1, 1, stealth=True, effects=[Effect(DidDamage(), ActionTag(Kill(), TargetSelector(IsMinion())))])
class SI7Agent(MinionCard):
def __init__(self):
super().__init__("SI:7 Agent", 3, CHARACTER_CLASS.ROGUE, CARD_RARITY.RARE,
combo=Battlecry(Damage(2), CharacterSelector(
players=BothPlayer(), picker=UserPicker())
))
def create_minion(self, player):
return Minion(3, 3)
class AnubarAmbusher(MinionCard):
def __init__(self):
super().__init__("Anub'ar Ambusher", 4, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def create_minion(self, player):
return Minion(5, 5, deathrattle=Deathrattle(Bounce(), MinionSelector(picker=RandomPicker())))
class OneeyedCheat(MinionCard):
def __init__(self):
super().__init__("One-eyed Cheat", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.RARE, minion_type=MINION_TYPE.PIRATE)
def create_minion(self, player):
return Minion(4, 1, effects=[Effect(MinionSummoned(IsType(MINION_TYPE.PIRATE)),
ActionTag(Give(Stealth()), SelfSelector()))])
class IronSensei(MinionCard):
def __init__(self):
super().__init__("Iron Sensei", 3, CHARACTER_CLASS.ROGUE, CARD_RARITY.RARE, minion_type=MINION_TYPE.MECH)
def create_minion(self, player):
return Minion(2, 2, effects=[Effect(TurnEnded(), ActionTag(Give([Buff(ChangeAttack(2)), Buff(ChangeHealth(2))]),
MinionSelector(IsType(MINION_TYPE.MECH), picker=RandomPicker())))])
class OgreNinja(MinionCard):
def __init__(self):
super().__init__("Ogre Ninja", 5, CHARACTER_CLASS.ROGUE, CARD_RARITY.RARE)
def create_minion(self, player):
return Minion(6, 6, stealth=True, effects=[Effect(Attack(),
ActionTag(ChangeTarget(
CharacterSelector(NotCurrentTarget(),
EnemyPlayer(),
RandomPicker())),
SelfSelector(),
And(OneIn(2),
OpponentMinionCountIsGreaterThan(0))))])
class TradePrinceGallywix(MinionCard):
def __init__(self):
super().__init__("Trade Prince Gallywix", 6, CHARACTER_CLASS.ROGUE, CARD_RARITY.LEGENDARY)
def create_minion(self, player):
return Minion(5, 8, effects=[Effect(SpellCast(Not(HasCardName("Gallywix's Coin")), EnemyPlayer()),
ActionTag(AddCard(LastCard()),
PlayerSelector(FriendlyPlayer()))),
Effect(SpellCast(Not(HasCardName("Gallywix's Coin")), EnemyPlayer()),
ActionTag(AddCard(GallywixsCoin()),
PlayerSelector(EnemyPlayer())))])
class GoblinAutoBarber(MinionCard):
def __init__(self):
super().__init__("Goblin Auto-Barber", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
minion_type=MINION_TYPE.MECH, battlecry=Battlecry(IncreaseWeaponAttack(1), WeaponSelector()))
def create_minion(self, player):
return Minion(3, 2)
class DarkIronSkulker(MinionCard):
def __init__(self):
super().__init__("Dark Iron Skulker", 5, CHARACTER_CLASS.ROGUE, CARD_RARITY.RARE,
battlecry=Battlecry(Damage(2), MinionSelector(condition=Not(IsDamaged()),
players=EnemyPlayer())))
def create_minion(self, player):
return Minion(4, 3)
class Anubarak(MinionCard):
def __init__(self):
super().__init__("Anub'arak", 9, CHARACTER_CLASS.ROGUE, CARD_RARITY.LEGENDARY)
def create_minion(self, player):
return Minion(8, 4, deathrattle=[Deathrattle(Bounce(), SelfSelector()),
Deathrattle(Summon(Nerubian()), PlayerSelector())])
| mit | 68a09936317f4b050e15c801968852de | 44.347826 | 120 | 0.592796 | 3.670689 | false | false | false | false |
astanin/python-tabulate | test/test_output.py | 1 | 106667 | """Test output of the various forms of tabular data."""
import tabulate as tabulate_module
from common import assert_equal, raises, skip
from tabulate import tabulate, simple_separated_format, SEPARATING_LINE
# _test_table shows
# - coercion of a string to a number,
# - left alignment of text,
# - decimal point alignment of numbers
_test_table = [["spam", 41.9999], ["eggs", "451.0"]]
_test_table_with_sep_line = [["spam", 41.9999], SEPARATING_LINE, ["eggs", "451.0"]]
_test_table_headers = ["strings", "numbers"]
def test_plain():
"Output: plain with headers"
expected = "\n".join(
["strings numbers", "spam 41.9999", "eggs 451"]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="plain")
assert_equal(expected, result)
def test_plain_headerless():
"Output: plain without headers"
expected = "\n".join(["spam 41.9999", "eggs 451"])
result = tabulate(_test_table, tablefmt="plain")
assert_equal(expected, result)
def test_plain_multiline_headerless():
"Output: plain with multiline cells without headers"
table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
expected = "\n".join(
["foo bar hello", " baz", " bau", " multiline", " world"]
)
result = tabulate(table, stralign="center", tablefmt="plain")
assert_equal(expected, result)
def test_plain_multiline():
"Output: plain with multiline cells with headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
expected = "\n".join(
[
" more more spam",
" spam \x1b[31meggs\x1b[0m & eggs",
" 2 foo",
" bar",
]
)
result = tabulate(table, headers, tablefmt="plain")
assert_equal(expected, result)
def test_plain_multiline_with_links():
"Output: plain with multiline cells with links and headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\", "more spam\n& eggs")
expected = "\n".join(
[
" more more spam",
" spam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\ & eggs",
" 2 foo",
" bar",
]
)
result = tabulate(table, headers, tablefmt="plain")
assert_equal(expected, result)
def test_plain_multiline_with_empty_cells():
"Output: plain with multiline cells and empty cells with headers"
table = [
["hdr", "data", "fold"],
["1", "", ""],
["2", "very long data", "fold\nthis"],
]
expected = "\n".join(
[
" hdr data fold",
" 1",
" 2 very long data fold",
" this",
]
)
result = tabulate(table, headers="firstrow", tablefmt="plain")
assert_equal(expected, result)
def test_plain_multiline_with_empty_cells_headerless():
"Output: plain with multiline cells and empty cells without headers"
table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
expected = "\n".join(
["0", "1", "2 very long data fold", " this"]
)
result = tabulate(table, tablefmt="plain")
assert_equal(expected, result)
def test_plain_maxcolwidth_autowraps():
"Output: maxcolwidth will result in autowrapping longer cells"
table = [["hdr", "fold"], ["1", "very long data"]]
expected = "\n".join([" hdr fold", " 1 very long", " data"])
result = tabulate(
table, headers="firstrow", tablefmt="plain", maxcolwidths=[10, 10]
)
assert_equal(expected, result)
def test_plain_maxcolwidth_autowraps_with_sep():
"Output: maxcolwidth will result in autowrapping longer cells and separating line"
table = [
["hdr", "fold"],
["1", "very long data"],
SEPARATING_LINE,
["2", "last line"],
]
expected = "\n".join(
[" hdr fold", " 1 very long", " data", "", " 2 last line"]
)
result = tabulate(
table, headers="firstrow", tablefmt="plain", maxcolwidths=[10, 10]
)
assert_equal(expected, result)
def test_plain_maxcolwidth_autowraps_wide_chars():
"Output: maxcolwidth and autowrapping functions with wide characters"
try:
import wcwidth # noqa
except ImportError:
skip("test_wrap_text_wide_chars is skipped")
table = [
["hdr", "fold"],
["1", "약간 감싸면 더 잘 보일 수있는 다소 긴 설명입니다 설명입니다 설명입니다 설명입니다 설명"],
]
expected = "\n".join(
[
" hdr fold",
" 1 약간 감싸면 더 잘 보일 수있는",
" 다소 긴 설명입니다 설명입니다",
" 설명입니다 설명입니다 설명",
]
)
result = tabulate(
table, headers="firstrow", tablefmt="plain", maxcolwidths=[10, 30]
)
assert_equal(expected, result)
def test_maxcolwidth_single_value():
"Output: maxcolwidth can be specified as a single number that works for each column"
table = [
["hdr", "fold1", "fold2"],
["mini", "this is short", "this is a bit longer"],
]
expected = "\n".join(
[
"hdr fold1 fold2",
"mini this this",
" is is a",
" short bit",
" longer",
]
)
result = tabulate(table, headers="firstrow", tablefmt="plain", maxcolwidths=6)
assert_equal(expected, result)
def test_maxcolwidth_pad_tailing_widths():
"Output: maxcolwidth, if only partly specified, pads tailing cols with None"
table = [
["hdr", "fold1", "fold2"],
["mini", "this is short", "this is a bit longer"],
]
expected = "\n".join(
[
"hdr fold1 fold2",
"mini this this is a bit longer",
" is",
" short",
]
)
result = tabulate(
table, headers="firstrow", tablefmt="plain", maxcolwidths=[None, 6]
)
assert_equal(expected, result)
def test_maxcolwidth_honor_disable_parsenum():
"Output: Using maxcolwidth in conjunction with disable_parsenum is honored"
table = [
["first number", 123.456789, "123.456789"],
["second number", "987654321.123", "987654321.123"],
]
expected = "\n".join(
[
"+--------+---------------+--------+",
"| first | 123.457 | 123.45 |",
"| number | | 6789 |",
"+--------+---------------+--------+",
"| second | 9.87654e+08 | 987654 |",
"| number | | 321.12 |",
"| | | 3 |",
"+--------+---------------+--------+",
]
)
# Grid makes showing the alignment difference a little easier
result = tabulate(table, tablefmt="grid", maxcolwidths=6, disable_numparse=[2])
assert_equal(expected, result)
def test_plain_maxheadercolwidths_autowraps():
"Output: maxheadercolwidths will result in autowrapping header cell"
table = [["hdr", "fold"], ["1", "very long data"]]
expected = "\n".join([" hdr fo", " ld", " 1 very long", " data"])
result = tabulate(
table,
headers="firstrow",
tablefmt="plain",
maxcolwidths=[10, 10],
maxheadercolwidths=[None, 2],
)
assert_equal(expected, result)
def test_simple():
"Output: simple with headers"
expected = "\n".join(
[
"strings numbers",
"--------- ---------",
"spam 41.9999",
"eggs 451",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="simple")
assert_equal(expected, result)
def test_simple_with_sep_line():
"Output: simple with headers and separating line"
expected = "\n".join(
[
"strings numbers",
"--------- ---------",
"spam 41.9999",
"--------- ---------",
"eggs 451",
]
)
result = tabulate(_test_table_with_sep_line, _test_table_headers, tablefmt="simple")
assert_equal(expected, result)
def test_readme_example_with_sep():
table = [["Earth", 6371], ["Mars", 3390], SEPARATING_LINE, ["Moon", 1737]]
expected = "\n".join(
[
"----- ----",
"Earth 6371",
"Mars 3390",
"----- ----",
"Moon 1737",
"----- ----",
]
)
result = tabulate(table, tablefmt="simple")
assert_equal(expected, result)
def test_simple_multiline_2():
"Output: simple with multiline cells"
expected = "\n".join(
[
" key value",
"----- ---------",
" foo bar",
"spam multiline",
" world",
]
)
table = [["key", "value"], ["foo", "bar"], ["spam", "multiline\nworld"]]
result = tabulate(table, headers="firstrow", stralign="center", tablefmt="simple")
assert_equal(expected, result)
def test_simple_multiline_2_with_sep_line():
"Output: simple with multiline cells"
expected = "\n".join(
[
" key value",
"----- ---------",
" foo bar",
"----- ---------",
"spam multiline",
" world",
]
)
table = [
["key", "value"],
["foo", "bar"],
SEPARATING_LINE,
["spam", "multiline\nworld"],
]
result = tabulate(table, headers="firstrow", stralign="center", tablefmt="simple")
assert_equal(expected, result)
def test_simple_headerless():
"Output: simple without headers"
expected = "\n".join(
["---- --------", "spam 41.9999", "eggs 451", "---- --------"]
)
result = tabulate(_test_table, tablefmt="simple")
assert_equal(expected, result)
def test_simple_headerless_with_sep_line():
"Output: simple without headers"
expected = "\n".join(
[
"---- --------",
"spam 41.9999",
"---- --------",
"eggs 451",
"---- --------",
]
)
result = tabulate(_test_table_with_sep_line, tablefmt="simple")
assert_equal(expected, result)
def test_simple_multiline_headerless():
"Output: simple with multiline cells without headers"
table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
expected = "\n".join(
[
"------- ---------",
"foo bar hello",
" baz",
" bau",
" multiline",
" world",
"------- ---------",
]
)
result = tabulate(table, stralign="center", tablefmt="simple")
assert_equal(expected, result)
def test_simple_multiline():
"Output: simple with multiline cells with headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
expected = "\n".join(
[
" more more spam",
" spam \x1b[31meggs\x1b[0m & eggs",
"----------- -----------",
" 2 foo",
" bar",
]
)
result = tabulate(table, headers, tablefmt="simple")
assert_equal(expected, result)
def test_simple_multiline_with_links():
"Output: simple with multiline cells with links and headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\", "more spam\n& eggs")
expected = "\n".join(
[
" more more spam",
" spam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\ & eggs",
"----------- -----------",
" 2 foo",
" bar",
]
)
result = tabulate(table, headers, tablefmt="simple")
assert_equal(expected, result)
def test_simple_multiline_with_empty_cells():
"Output: simple with multiline cells and empty cells with headers"
table = [
["hdr", "data", "fold"],
["1", "", ""],
["2", "very long data", "fold\nthis"],
]
expected = "\n".join(
[
" hdr data fold",
"----- -------------- ------",
" 1",
" 2 very long data fold",
" this",
]
)
result = tabulate(table, headers="firstrow", tablefmt="simple")
assert_equal(expected, result)
def test_simple_multiline_with_empty_cells_headerless():
"Output: simple with multiline cells and empty cells without headers"
table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
expected = "\n".join(
[
"- -------------- ----",
"0",
"1",
"2 very long data fold",
" this",
"- -------------- ----",
]
)
result = tabulate(table, tablefmt="simple")
assert_equal(expected, result)
def test_github():
"Output: github with headers"
expected = "\n".join(
[
"| strings | numbers |",
"|-----------|-----------|",
"| spam | 41.9999 |",
"| eggs | 451 |",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="github")
assert_equal(expected, result)
def test_grid():
"Output: grid with headers"
expected = "\n".join(
[
"+-----------+-----------+",
"| strings | numbers |",
"+===========+===========+",
"| spam | 41.9999 |",
"+-----------+-----------+",
"| eggs | 451 |",
"+-----------+-----------+",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="grid")
assert_equal(expected, result)
def test_grid_wide_characters():
"Output: grid with wide characters in headers"
try:
import wcwidth # noqa
except ImportError:
skip("test_grid_wide_characters is skipped")
headers = list(_test_table_headers)
headers[1] = "配列"
expected = "\n".join(
[
"+-----------+----------+",
"| strings | 配列 |",
"+===========+==========+",
"| spam | 41.9999 |",
"+-----------+----------+",
"| eggs | 451 |",
"+-----------+----------+",
]
)
result = tabulate(_test_table, headers, tablefmt="grid")
assert_equal(expected, result)
def test_grid_headerless():
"Output: grid without headers"
expected = "\n".join(
[
"+------+----------+",
"| spam | 41.9999 |",
"+------+----------+",
"| eggs | 451 |",
"+------+----------+",
]
)
result = tabulate(_test_table, tablefmt="grid")
assert_equal(expected, result)
def test_grid_multiline_headerless():
"Output: grid with multiline cells without headers"
table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
expected = "\n".join(
[
"+---------+-----------+",
"| foo bar | hello |",
"| baz | |",
"| bau | |",
"+---------+-----------+",
"| | multiline |",
"| | world |",
"+---------+-----------+",
]
)
result = tabulate(table, stralign="center", tablefmt="grid")
assert_equal(expected, result)
def test_grid_multiline():
"Output: grid with multiline cells with headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
expected = "\n".join(
[
"+-------------+-------------+",
"| more | more spam |",
"| spam \x1b[31meggs\x1b[0m | & eggs |",
"+=============+=============+",
"| 2 | foo |",
"| | bar |",
"+-------------+-------------+",
]
)
result = tabulate(table, headers, tablefmt="grid")
assert_equal(expected, result)
def test_grid_multiline_with_empty_cells():
"Output: grid with multiline cells and empty cells with headers"
table = [
["hdr", "data", "fold"],
["1", "", ""],
["2", "very long data", "fold\nthis"],
]
expected = "\n".join(
[
"+-------+----------------+--------+",
"| hdr | data | fold |",
"+=======+================+========+",
"| 1 | | |",
"+-------+----------------+--------+",
"| 2 | very long data | fold |",
"| | | this |",
"+-------+----------------+--------+",
]
)
result = tabulate(table, headers="firstrow", tablefmt="grid")
assert_equal(expected, result)
def test_grid_multiline_with_empty_cells_headerless():
"Output: grid with multiline cells and empty cells without headers"
table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
expected = "\n".join(
[
"+---+----------------+------+",
"| 0 | | |",
"+---+----------------+------+",
"| 1 | | |",
"+---+----------------+------+",
"| 2 | very long data | fold |",
"| | | this |",
"+---+----------------+------+",
]
)
result = tabulate(table, tablefmt="grid")
assert_equal(expected, result)
def test_simple_grid():
"Output: simple_grid with headers"
expected = "\n".join(
[
"┌───────────┬───────────┐",
"│ strings │ numbers │",
"├───────────┼───────────┤",
"│ spam │ 41.9999 │",
"├───────────┼───────────┤",
"│ eggs │ 451 │",
"└───────────┴───────────┘",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="simple_grid")
assert_equal(expected, result)
def test_simple_grid_wide_characters():
"Output: simple_grid with wide characters in headers"
try:
import wcwidth # noqa
except ImportError:
skip("test_simple_grid_wide_characters is skipped")
headers = list(_test_table_headers)
headers[1] = "配列"
expected = "\n".join(
[
"┌───────────┬──────────┐",
"│ strings │ 配列 │",
"├───────────┼──────────┤",
"│ spam │ 41.9999 │",
"├───────────┼──────────┤",
"│ eggs │ 451 │",
"└───────────┴──────────┘",
]
)
result = tabulate(_test_table, headers, tablefmt="simple_grid")
assert_equal(expected, result)
def test_simple_grid_headerless():
"Output: simple_grid without headers"
expected = "\n".join(
[
"┌──────┬──────────┐",
"│ spam │ 41.9999 │",
"├──────┼──────────┤",
"│ eggs │ 451 │",
"└──────┴──────────┘",
]
)
result = tabulate(_test_table, tablefmt="simple_grid")
assert_equal(expected, result)
def test_simple_grid_multiline_headerless():
"Output: simple_grid with multiline cells without headers"
table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
expected = "\n".join(
[
"┌─────────┬───────────┐",
"│ foo bar │ hello │",
"│ baz │ │",
"│ bau │ │",
"├─────────┼───────────┤",
"│ │ multiline │",
"│ │ world │",
"└─────────┴───────────┘",
]
)
result = tabulate(table, stralign="center", tablefmt="simple_grid")
assert_equal(expected, result)
def test_simple_grid_multiline():
"Output: simple_grid with multiline cells with headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
expected = "\n".join(
[
"┌─────────────┬─────────────┐",
"│ more │ more spam │",
"│ spam \x1b[31meggs\x1b[0m │ & eggs │",
"├─────────────┼─────────────┤",
"│ 2 │ foo │",
"│ │ bar │",
"└─────────────┴─────────────┘",
]
)
result = tabulate(table, headers, tablefmt="simple_grid")
assert_equal(expected, result)
def test_simple_grid_multiline_with_empty_cells():
"Output: simple_grid with multiline cells and empty cells with headers"
table = [
["hdr", "data", "fold"],
["1", "", ""],
["2", "very long data", "fold\nthis"],
]
expected = "\n".join(
[
"┌───────┬────────────────┬────────┐",
"│ hdr │ data │ fold │",
"├───────┼────────────────┼────────┤",
"│ 1 │ │ │",
"├───────┼────────────────┼────────┤",
"│ 2 │ very long data │ fold │",
"│ │ │ this │",
"└───────┴────────────────┴────────┘",
]
)
result = tabulate(table, headers="firstrow", tablefmt="simple_grid")
assert_equal(expected, result)
def test_simple_grid_multiline_with_empty_cells_headerless():
"Output: simple_grid with multiline cells and empty cells without headers"
table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
expected = "\n".join(
[
"┌───┬────────────────┬──────┐",
"│ 0 │ │ │",
"├───┼────────────────┼──────┤",
"│ 1 │ │ │",
"├───┼────────────────┼──────┤",
"│ 2 │ very long data │ fold │",
"│ │ │ this │",
"└───┴────────────────┴──────┘",
]
)
result = tabulate(table, tablefmt="simple_grid")
assert_equal(expected, result)
def test_rounded_grid():
"Output: rounded_grid with headers"
expected = "\n".join(
[
"╭───────────┬───────────╮",
"│ strings │ numbers │",
"├───────────┼───────────┤",
"│ spam │ 41.9999 │",
"├───────────┼───────────┤",
"│ eggs │ 451 │",
"╰───────────┴───────────╯",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="rounded_grid")
assert_equal(expected, result)
def test_rounded_grid_wide_characters():
"Output: rounded_grid with wide characters in headers"
try:
import wcwidth # noqa
except ImportError:
skip("test_rounded_grid_wide_characters is skipped")
headers = list(_test_table_headers)
headers[1] = "配列"
expected = "\n".join(
[
"╭───────────┬──────────╮",
"│ strings │ 配列 │",
"├───────────┼──────────┤",
"│ spam │ 41.9999 │",
"├───────────┼──────────┤",
"│ eggs │ 451 │",
"╰───────────┴──────────╯",
]
)
result = tabulate(_test_table, headers, tablefmt="rounded_grid")
assert_equal(expected, result)
def test_rounded_grid_headerless():
"Output: rounded_grid without headers"
expected = "\n".join(
[
"╭──────┬──────────╮",
"│ spam │ 41.9999 │",
"├──────┼──────────┤",
"│ eggs │ 451 │",
"╰──────┴──────────╯",
]
)
result = tabulate(_test_table, tablefmt="rounded_grid")
assert_equal(expected, result)
def test_rounded_grid_multiline_headerless():
"Output: rounded_grid with multiline cells without headers"
table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
expected = "\n".join(
[
"╭─────────┬───────────╮",
"│ foo bar │ hello │",
"│ baz │ │",
"│ bau │ │",
"├─────────┼───────────┤",
"│ │ multiline │",
"│ │ world │",
"╰─────────┴───────────╯",
]
)
result = tabulate(table, stralign="center", tablefmt="rounded_grid")
assert_equal(expected, result)
def test_rounded_grid_multiline():
"Output: rounded_grid with multiline cells with headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
expected = "\n".join(
[
"╭─────────────┬─────────────╮",
"│ more │ more spam │",
"│ spam \x1b[31meggs\x1b[0m │ & eggs │",
"├─────────────┼─────────────┤",
"│ 2 │ foo │",
"│ │ bar │",
"╰─────────────┴─────────────╯",
]
)
result = tabulate(table, headers, tablefmt="rounded_grid")
assert_equal(expected, result)
def test_rounded_grid_multiline_with_empty_cells():
"Output: rounded_grid with multiline cells and empty cells with headers"
table = [
["hdr", "data", "fold"],
["1", "", ""],
["2", "very long data", "fold\nthis"],
]
expected = "\n".join(
[
"╭───────┬────────────────┬────────╮",
"│ hdr │ data │ fold │",
"├───────┼────────────────┼────────┤",
"│ 1 │ │ │",
"├───────┼────────────────┼────────┤",
"│ 2 │ very long data │ fold │",
"│ │ │ this │",
"╰───────┴────────────────┴────────╯",
]
)
result = tabulate(table, headers="firstrow", tablefmt="rounded_grid")
assert_equal(expected, result)
def test_rounded_grid_multiline_with_empty_cells_headerless():
"Output: rounded_grid with multiline cells and empty cells without headers"
table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
expected = "\n".join(
[
"╭───┬────────────────┬──────╮",
"│ 0 │ │ │",
"├───┼────────────────┼──────┤",
"│ 1 │ │ │",
"├───┼────────────────┼──────┤",
"│ 2 │ very long data │ fold │",
"│ │ │ this │",
"╰───┴────────────────┴──────╯",
]
)
result = tabulate(table, tablefmt="rounded_grid")
assert_equal(expected, result)
def test_heavy_grid():
"Output: heavy_grid with headers"
expected = "\n".join(
[
"┏━━━━━━━━━━━┳━━━━━━━━━━━┓",
"┃ strings ┃ numbers ┃",
"┣━━━━━━━━━━━╋━━━━━━━━━━━┫",
"┃ spam ┃ 41.9999 ┃",
"┣━━━━━━━━━━━╋━━━━━━━━━━━┫",
"┃ eggs ┃ 451 ┃",
"┗━━━━━━━━━━━┻━━━━━━━━━━━┛",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="heavy_grid")
assert_equal(expected, result)
def test_heavy_grid_wide_characters():
"Output: heavy_grid with wide characters in headers"
try:
import wcwidth # noqa
except ImportError:
skip("test_heavy_grid_wide_characters is skipped")
headers = list(_test_table_headers)
headers[1] = "配列"
expected = "\n".join(
[
"┏━━━━━━━━━━━┳━━━━━━━━━━┓",
"┃ strings ┃ 配列 ┃",
"┣━━━━━━━━━━━╋━━━━━━━━━━┫",
"┃ spam ┃ 41.9999 ┃",
"┣━━━━━━━━━━━╋━━━━━━━━━━┫",
"┃ eggs ┃ 451 ┃",
"┗━━━━━━━━━━━┻━━━━━━━━━━┛",
]
)
result = tabulate(_test_table, headers, tablefmt="heavy_grid")
assert_equal(expected, result)
def test_heavy_grid_headerless():
"Output: heavy_grid without headers"
expected = "\n".join(
[
"┏━━━━━━┳━━━━━━━━━━┓",
"┃ spam ┃ 41.9999 ┃",
"┣━━━━━━╋━━━━━━━━━━┫",
"┃ eggs ┃ 451 ┃",
"┗━━━━━━┻━━━━━━━━━━┛",
]
)
result = tabulate(_test_table, tablefmt="heavy_grid")
assert_equal(expected, result)
def test_heavy_grid_multiline_headerless():
"Output: heavy_grid with multiline cells without headers"
table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
expected = "\n".join(
[
"┏━━━━━━━━━┳━━━━━━━━━━━┓",
"┃ foo bar ┃ hello ┃",
"┃ baz ┃ ┃",
"┃ bau ┃ ┃",
"┣━━━━━━━━━╋━━━━━━━━━━━┫",
"┃ ┃ multiline ┃",
"┃ ┃ world ┃",
"┗━━━━━━━━━┻━━━━━━━━━━━┛",
]
)
result = tabulate(table, stralign="center", tablefmt="heavy_grid")
assert_equal(expected, result)
def test_heavy_grid_multiline():
"Output: heavy_grid with multiline cells with headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
expected = "\n".join(
[
"┏━━━━━━━━━━━━━┳━━━━━━━━━━━━━┓",
"┃ more ┃ more spam ┃",
"┃ spam \x1b[31meggs\x1b[0m ┃ & eggs ┃",
"┣━━━━━━━━━━━━━╋━━━━━━━━━━━━━┫",
"┃ 2 ┃ foo ┃",
"┃ ┃ bar ┃",
"┗━━━━━━━━━━━━━┻━━━━━━━━━━━━━┛",
]
)
result = tabulate(table, headers, tablefmt="heavy_grid")
assert_equal(expected, result)
def test_heavy_grid_multiline_with_empty_cells():
"Output: heavy_grid with multiline cells and empty cells with headers"
table = [
["hdr", "data", "fold"],
["1", "", ""],
["2", "very long data", "fold\nthis"],
]
expected = "\n".join(
[
"┏━━━━━━━┳━━━━━━━━━━━━━━━━┳━━━━━━━━┓",
"┃ hdr ┃ data ┃ fold ┃",
"┣━━━━━━━╋━━━━━━━━━━━━━━━━╋━━━━━━━━┫",
"┃ 1 ┃ ┃ ┃",
"┣━━━━━━━╋━━━━━━━━━━━━━━━━╋━━━━━━━━┫",
"┃ 2 ┃ very long data ┃ fold ┃",
"┃ ┃ ┃ this ┃",
"┗━━━━━━━┻━━━━━━━━━━━━━━━━┻━━━━━━━━┛",
]
)
result = tabulate(table, headers="firstrow", tablefmt="heavy_grid")
assert_equal(expected, result)
def test_heavy_grid_multiline_with_empty_cells_headerless():
"Output: heavy_grid with multiline cells and empty cells without headers"
table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
expected = "\n".join(
[
"┏━━━┳━━━━━━━━━━━━━━━━┳━━━━━━┓",
"┃ 0 ┃ ┃ ┃",
"┣━━━╋━━━━━━━━━━━━━━━━╋━━━━━━┫",
"┃ 1 ┃ ┃ ┃",
"┣━━━╋━━━━━━━━━━━━━━━━╋━━━━━━┫",
"┃ 2 ┃ very long data ┃ fold ┃",
"┃ ┃ ┃ this ┃",
"┗━━━┻━━━━━━━━━━━━━━━━┻━━━━━━┛",
]
)
result = tabulate(table, tablefmt="heavy_grid")
assert_equal(expected, result)
def test_mixed_grid():
"Output: mixed_grid with headers"
expected = "\n".join(
[
"┍━━━━━━━━━━━┯━━━━━━━━━━━┑",
"│ strings │ numbers │",
"┝━━━━━━━━━━━┿━━━━━━━━━━━┥",
"│ spam │ 41.9999 │",
"├───────────┼───────────┤",
"│ eggs │ 451 │",
"┕━━━━━━━━━━━┷━━━━━━━━━━━┙",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="mixed_grid")
assert_equal(expected, result)
def test_mixed_grid_wide_characters():
"Output: mixed_grid with wide characters in headers"
try:
import wcwidth # noqa
except ImportError:
skip("test_mixed_grid_wide_characters is skipped")
headers = list(_test_table_headers)
headers[1] = "配列"
expected = "\n".join(
[
"┍━━━━━━━━━━━┯━━━━━━━━━━┑",
"│ strings │ 配列 │",
"┝━━━━━━━━━━━┿━━━━━━━━━━┥",
"│ spam │ 41.9999 │",
"├───────────┼──────────┤",
"│ eggs │ 451 │",
"┕━━━━━━━━━━━┷━━━━━━━━━━┙",
]
)
result = tabulate(_test_table, headers, tablefmt="mixed_grid")
assert_equal(expected, result)
def test_mixed_grid_headerless():
"Output: mixed_grid without headers"
expected = "\n".join(
[
"┍━━━━━━┯━━━━━━━━━━┑",
"│ spam │ 41.9999 │",
"├──────┼──────────┤",
"│ eggs │ 451 │",
"┕━━━━━━┷━━━━━━━━━━┙",
]
)
result = tabulate(_test_table, tablefmt="mixed_grid")
assert_equal(expected, result)
def test_mixed_grid_multiline_headerless():
"Output: mixed_grid with multiline cells without headers"
table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
expected = "\n".join(
[
"┍━━━━━━━━━┯━━━━━━━━━━━┑",
"│ foo bar │ hello │",
"│ baz │ │",
"│ bau │ │",
"├─────────┼───────────┤",
"│ │ multiline │",
"│ │ world │",
"┕━━━━━━━━━┷━━━━━━━━━━━┙",
]
)
result = tabulate(table, stralign="center", tablefmt="mixed_grid")
assert_equal(expected, result)
def test_mixed_grid_multiline():
"Output: mixed_grid with multiline cells with headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
expected = "\n".join(
[
"┍━━━━━━━━━━━━━┯━━━━━━━━━━━━━┑",
"│ more │ more spam │",
"│ spam \x1b[31meggs\x1b[0m │ & eggs │",
"┝━━━━━━━━━━━━━┿━━━━━━━━━━━━━┥",
"│ 2 │ foo │",
"│ │ bar │",
"┕━━━━━━━━━━━━━┷━━━━━━━━━━━━━┙",
]
)
result = tabulate(table, headers, tablefmt="mixed_grid")
assert_equal(expected, result)
def test_mixed_grid_multiline_with_empty_cells():
"Output: mixed_grid with multiline cells and empty cells with headers"
table = [
["hdr", "data", "fold"],
["1", "", ""],
["2", "very long data", "fold\nthis"],
]
expected = "\n".join(
[
"┍━━━━━━━┯━━━━━━━━━━━━━━━━┯━━━━━━━━┑",
"│ hdr │ data │ fold │",
"┝━━━━━━━┿━━━━━━━━━━━━━━━━┿━━━━━━━━┥",
"│ 1 │ │ │",
"├───────┼────────────────┼────────┤",
"│ 2 │ very long data │ fold │",
"│ │ │ this │",
"┕━━━━━━━┷━━━━━━━━━━━━━━━━┷━━━━━━━━┙",
]
)
result = tabulate(table, headers="firstrow", tablefmt="mixed_grid")
assert_equal(expected, result)
def test_mixed_grid_multiline_with_empty_cells_headerless():
"Output: mixed_grid with multiline cells and empty cells without headers"
table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
expected = "\n".join(
[
"┍━━━┯━━━━━━━━━━━━━━━━┯━━━━━━┑",
"│ 0 │ │ │",
"├───┼────────────────┼──────┤",
"│ 1 │ │ │",
"├───┼────────────────┼──────┤",
"│ 2 │ very long data │ fold │",
"│ │ │ this │",
"┕━━━┷━━━━━━━━━━━━━━━━┷━━━━━━┙",
]
)
result = tabulate(table, tablefmt="mixed_grid")
assert_equal(expected, result)
def test_double_grid():
"Output: double_grid with headers"
expected = "\n".join(
[
"╔═══════════╦═══════════╗",
"║ strings ║ numbers ║",
"╠═══════════╬═══════════╣",
"║ spam ║ 41.9999 ║",
"╠═══════════╬═══════════╣",
"║ eggs ║ 451 ║",
"╚═══════════╩═══════════╝",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="double_grid")
assert_equal(expected, result)
def test_double_grid_wide_characters():
"Output: double_grid with wide characters in headers"
try:
import wcwidth # noqa
except ImportError:
skip("test_double_grid_wide_characters is skipped")
headers = list(_test_table_headers)
headers[1] = "配列"
expected = "\n".join(
[
"╔═══════════╦══════════╗",
"║ strings ║ 配列 ║",
"╠═══════════╬══════════╣",
"║ spam ║ 41.9999 ║",
"╠═══════════╬══════════╣",
"║ eggs ║ 451 ║",
"╚═══════════╩══════════╝",
]
)
result = tabulate(_test_table, headers, tablefmt="double_grid")
assert_equal(expected, result)
def test_double_grid_headerless():
"Output: double_grid without headers"
expected = "\n".join(
[
"╔══════╦══════════╗",
"║ spam ║ 41.9999 ║",
"╠══════╬══════════╣",
"║ eggs ║ 451 ║",
"╚══════╩══════════╝",
]
)
result = tabulate(_test_table, tablefmt="double_grid")
assert_equal(expected, result)
def test_double_grid_multiline_headerless():
"Output: double_grid with multiline cells without headers"
table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
expected = "\n".join(
[
"╔═════════╦═══════════╗",
"║ foo bar ║ hello ║",
"║ baz ║ ║",
"║ bau ║ ║",
"╠═════════╬═══════════╣",
"║ ║ multiline ║",
"║ ║ world ║",
"╚═════════╩═══════════╝",
]
)
result = tabulate(table, stralign="center", tablefmt="double_grid")
assert_equal(expected, result)
def test_double_grid_multiline():
"Output: double_grid with multiline cells with headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
expected = "\n".join(
[
"╔═════════════╦═════════════╗",
"║ more ║ more spam ║",
"║ spam \x1b[31meggs\x1b[0m ║ & eggs ║",
"╠═════════════╬═════════════╣",
"║ 2 ║ foo ║",
"║ ║ bar ║",
"╚═════════════╩═════════════╝",
]
)
result = tabulate(table, headers, tablefmt="double_grid")
assert_equal(expected, result)
def test_double_grid_multiline_with_empty_cells():
"Output: double_grid with multiline cells and empty cells with headers"
table = [
["hdr", "data", "fold"],
["1", "", ""],
["2", "very long data", "fold\nthis"],
]
expected = "\n".join(
[
"╔═══════╦════════════════╦════════╗",
"║ hdr ║ data ║ fold ║",
"╠═══════╬════════════════╬════════╣",
"║ 1 ║ ║ ║",
"╠═══════╬════════════════╬════════╣",
"║ 2 ║ very long data ║ fold ║",
"║ ║ ║ this ║",
"╚═══════╩════════════════╩════════╝",
]
)
result = tabulate(table, headers="firstrow", tablefmt="double_grid")
assert_equal(expected, result)
def test_double_grid_multiline_with_empty_cells_headerless():
"Output: double_grid with multiline cells and empty cells without headers"
table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
expected = "\n".join(
[
"╔═══╦════════════════╦══════╗",
"║ 0 ║ ║ ║",
"╠═══╬════════════════╬══════╣",
"║ 1 ║ ║ ║",
"╠═══╬════════════════╬══════╣",
"║ 2 ║ very long data ║ fold ║",
"║ ║ ║ this ║",
"╚═══╩════════════════╩══════╝",
]
)
result = tabulate(table, tablefmt="double_grid")
assert_equal(expected, result)
def test_fancy_grid():
"Output: fancy_grid with headers"
expected = "\n".join(
[
"╒═══════════╤═══════════╕",
"│ strings │ numbers │",
"╞═══════════╪═══════════╡",
"│ spam │ 41.9999 │",
"├───────────┼───────────┤",
"│ eggs │ 451 │",
"╘═══════════╧═══════════╛",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="fancy_grid")
assert_equal(expected, result)
def test_fancy_grid_wide_characters():
"Output: fancy_grid with wide characters in headers"
try:
import wcwidth # noqa
except ImportError:
skip("test_fancy_grid_wide_characters is skipped")
headers = list(_test_table_headers)
headers[1] = "配列"
expected = "\n".join(
[
"╒═══════════╤══════════╕",
"│ strings │ 配列 │",
"╞═══════════╪══════════╡",
"│ spam │ 41.9999 │",
"├───────────┼──────────┤",
"│ eggs │ 451 │",
"╘═══════════╧══════════╛",
]
)
result = tabulate(_test_table, headers, tablefmt="fancy_grid")
assert_equal(expected, result)
def test_fancy_grid_headerless():
"Output: fancy_grid without headers"
expected = "\n".join(
[
"╒══════╤══════════╕",
"│ spam │ 41.9999 │",
"├──────┼──────────┤",
"│ eggs │ 451 │",
"╘══════╧══════════╛",
]
)
result = tabulate(_test_table, tablefmt="fancy_grid")
assert_equal(expected, result)
def test_fancy_grid_multiline_headerless():
"Output: fancy_grid with multiline cells without headers"
table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
expected = "\n".join(
[
"╒═════════╤═══════════╕",
"│ foo bar │ hello │",
"│ baz │ │",
"│ bau │ │",
"├─────────┼───────────┤",
"│ │ multiline │",
"│ │ world │",
"╘═════════╧═══════════╛",
]
)
result = tabulate(table, stralign="center", tablefmt="fancy_grid")
assert_equal(expected, result)
def test_fancy_grid_multiline():
"Output: fancy_grid with multiline cells with headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
expected = "\n".join(
[
"╒═════════════╤═════════════╕",
"│ more │ more spam │",
"│ spam \x1b[31meggs\x1b[0m │ & eggs │",
"╞═════════════╪═════════════╡",
"│ 2 │ foo │",
"│ │ bar │",
"╘═════════════╧═════════════╛",
]
)
result = tabulate(table, headers, tablefmt="fancy_grid")
assert_equal(expected, result)
def test_fancy_grid_multiline_with_empty_cells():
"Output: fancy_grid with multiline cells and empty cells with headers"
table = [
["hdr", "data", "fold"],
["1", "", ""],
["2", "very long data", "fold\nthis"],
]
expected = "\n".join(
[
"╒═══════╤════════════════╤════════╕",
"│ hdr │ data │ fold │",
"╞═══════╪════════════════╪════════╡",
"│ 1 │ │ │",
"├───────┼────────────────┼────────┤",
"│ 2 │ very long data │ fold │",
"│ │ │ this │",
"╘═══════╧════════════════╧════════╛",
]
)
result = tabulate(table, headers="firstrow", tablefmt="fancy_grid")
assert_equal(expected, result)
def test_fancy_grid_multiline_with_empty_cells_headerless():
"Output: fancy_grid with multiline cells and empty cells without headers"
table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
expected = "\n".join(
[
"╒═══╤════════════════╤══════╕",
"│ 0 │ │ │",
"├───┼────────────────┼──────┤",
"│ 1 │ │ │",
"├───┼────────────────┼──────┤",
"│ 2 │ very long data │ fold │",
"│ │ │ this │",
"╘═══╧════════════════╧══════╛",
]
)
result = tabulate(table, tablefmt="fancy_grid")
assert_equal(expected, result)
def test_fancy_grid_multiline_row_align():
"Output: fancy_grid with multiline cells aligning some text not to top of cell"
table = [
["0", "some\ndefault\ntext", "up\ntop"],
["1", "very\nlong\ndata\ncell", "mid\ntest"],
["2", "also\nvery\nlong\ndata\ncell", "fold\nthis"],
]
expected = "\n".join(
[
"╒═══╤═════════╤══════╕",
"│ 0 │ some │ up │",
"│ │ default │ top │",
"│ │ text │ │",
"├───┼─────────┼──────┤",
"│ │ very │ │",
"│ 1 │ long │ mid │",
"│ │ data │ test │",
"│ │ cell │ │",
"├───┼─────────┼──────┤",
"│ │ also │ │",
"│ │ very │ │",
"│ │ long │ │",
"│ │ data │ fold │",
"│ 2 │ cell │ this │",
"╘═══╧═════════╧══════╛",
]
)
result = tabulate(table, tablefmt="fancy_grid", rowalign=[None, "center", "bottom"])
assert_equal(expected, result)
def test_outline():
"Output: outline with headers"
expected = "\n".join(
[
"+-----------+-----------+",
"| strings | numbers |",
"+===========+===========+",
"| spam | 41.9999 |",
"| eggs | 451 |",
"+-----------+-----------+",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="outline")
assert_equal(expected, result)
def test_outline_wide_characters():
"Output: outline with wide characters in headers"
try:
import wcwidth # noqa
except ImportError:
skip("test_outline_wide_characters is skipped")
headers = list(_test_table_headers)
headers[1] = "配列"
expected = "\n".join(
[
"+-----------+----------+",
"| strings | 配列 |",
"+===========+==========+",
"| spam | 41.9999 |",
"| eggs | 451 |",
"+-----------+----------+",
]
)
result = tabulate(_test_table, headers, tablefmt="outline")
assert_equal(expected, result)
def test_outline_headerless():
"Output: outline without headers"
expected = "\n".join(
[
"+------+----------+",
"| spam | 41.9999 |",
"| eggs | 451 |",
"+------+----------+",
]
)
result = tabulate(_test_table, tablefmt="outline")
assert_equal(expected, result)
def test_simple_outline():
"Output: simple_outline with headers"
expected = "\n".join(
[
"┌───────────┬───────────┐",
"│ strings │ numbers │",
"├───────────┼───────────┤",
"│ spam │ 41.9999 │",
"│ eggs │ 451 │",
"└───────────┴───────────┘",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="simple_outline")
assert_equal(expected, result)
def test_simple_outline_wide_characters():
"Output: simple_outline with wide characters in headers"
try:
import wcwidth # noqa
except ImportError:
skip("test_simple_outline_wide_characters is skipped")
headers = list(_test_table_headers)
headers[1] = "配列"
expected = "\n".join(
[
"┌───────────┬──────────┐",
"│ strings │ 配列 │",
"├───────────┼──────────┤",
"│ spam │ 41.9999 │",
"│ eggs │ 451 │",
"└───────────┴──────────┘",
]
)
result = tabulate(_test_table, headers, tablefmt="simple_outline")
assert_equal(expected, result)
def test_simple_outline_headerless():
"Output: simple_outline without headers"
expected = "\n".join(
[
"┌──────┬──────────┐",
"│ spam │ 41.9999 │",
"│ eggs │ 451 │",
"└──────┴──────────┘",
]
)
result = tabulate(_test_table, tablefmt="simple_outline")
assert_equal(expected, result)
def test_rounded_outline():
"Output: rounded_outline with headers"
expected = "\n".join(
[
"╭───────────┬───────────╮",
"│ strings │ numbers │",
"├───────────┼───────────┤",
"│ spam │ 41.9999 │",
"│ eggs │ 451 │",
"╰───────────┴───────────╯",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="rounded_outline")
assert_equal(expected, result)
def test_rounded_outline_wide_characters():
"Output: rounded_outline with wide characters in headers"
try:
import wcwidth # noqa
except ImportError:
skip("test_rounded_outline_wide_characters is skipped")
headers = list(_test_table_headers)
headers[1] = "配列"
expected = "\n".join(
[
"╭───────────┬──────────╮",
"│ strings │ 配列 │",
"├───────────┼──────────┤",
"│ spam │ 41.9999 │",
"│ eggs │ 451 │",
"╰───────────┴──────────╯",
]
)
result = tabulate(_test_table, headers, tablefmt="rounded_outline")
assert_equal(expected, result)
def test_rounded_outline_headerless():
"Output: rounded_outline without headers"
expected = "\n".join(
[
"╭──────┬──────────╮",
"│ spam │ 41.9999 │",
"│ eggs │ 451 │",
"╰──────┴──────────╯",
]
)
result = tabulate(_test_table, tablefmt="rounded_outline")
assert_equal(expected, result)
def test_heavy_outline():
"Output: heavy_outline with headers"
expected = "\n".join(
[
"┏━━━━━━━━━━━┳━━━━━━━━━━━┓",
"┃ strings ┃ numbers ┃",
"┣━━━━━━━━━━━╋━━━━━━━━━━━┫",
"┃ spam ┃ 41.9999 ┃",
"┃ eggs ┃ 451 ┃",
"┗━━━━━━━━━━━┻━━━━━━━━━━━┛",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="heavy_outline")
assert_equal(expected, result)
def test_heavy_outline_wide_characters():
"Output: heavy_outline with wide characters in headers"
try:
import wcwidth # noqa
except ImportError:
skip("test_heavy_outline_wide_characters is skipped")
headers = list(_test_table_headers)
headers[1] = "配列"
expected = "\n".join(
[
"┏━━━━━━━━━━━┳━━━━━━━━━━┓",
"┃ strings ┃ 配列 ┃",
"┣━━━━━━━━━━━╋━━━━━━━━━━┫",
"┃ spam ┃ 41.9999 ┃",
"┃ eggs ┃ 451 ┃",
"┗━━━━━━━━━━━┻━━━━━━━━━━┛",
]
)
result = tabulate(_test_table, headers, tablefmt="heavy_outline")
assert_equal(expected, result)
def test_heavy_outline_headerless():
"Output: heavy_outline without headers"
expected = "\n".join(
[
"┏━━━━━━┳━━━━━━━━━━┓",
"┃ spam ┃ 41.9999 ┃",
"┃ eggs ┃ 451 ┃",
"┗━━━━━━┻━━━━━━━━━━┛",
]
)
result = tabulate(_test_table, tablefmt="heavy_outline")
assert_equal(expected, result)
def test_mixed_outline():
"Output: mixed_outline with headers"
expected = "\n".join(
[
"┍━━━━━━━━━━━┯━━━━━━━━━━━┑",
"│ strings │ numbers │",
"┝━━━━━━━━━━━┿━━━━━━━━━━━┥",
"│ spam │ 41.9999 │",
"│ eggs │ 451 │",
"┕━━━━━━━━━━━┷━━━━━━━━━━━┙",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="mixed_outline")
assert_equal(expected, result)
def test_mixed_outline_wide_characters():
"Output: mixed_outline with wide characters in headers"
try:
import wcwidth # noqa
except ImportError:
skip("test_mixed_outline_wide_characters is skipped")
headers = list(_test_table_headers)
headers[1] = "配列"
expected = "\n".join(
[
"┍━━━━━━━━━━━┯━━━━━━━━━━┑",
"│ strings │ 配列 │",
"┝━━━━━━━━━━━┿━━━━━━━━━━┥",
"│ spam │ 41.9999 │",
"│ eggs │ 451 │",
"┕━━━━━━━━━━━┷━━━━━━━━━━┙",
]
)
result = tabulate(_test_table, headers, tablefmt="mixed_outline")
assert_equal(expected, result)
def test_mixed_outline_headerless():
"Output: mixed_outline without headers"
expected = "\n".join(
[
"┍━━━━━━┯━━━━━━━━━━┑",
"│ spam │ 41.9999 │",
"│ eggs │ 451 │",
"┕━━━━━━┷━━━━━━━━━━┙",
]
)
result = tabulate(_test_table, tablefmt="mixed_outline")
assert_equal(expected, result)
def test_double_outline():
"Output: double_outline with headers"
expected = "\n".join(
[
"╔═══════════╦═══════════╗",
"║ strings ║ numbers ║",
"╠═══════════╬═══════════╣",
"║ spam ║ 41.9999 ║",
"║ eggs ║ 451 ║",
"╚═══════════╩═══════════╝",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="double_outline")
assert_equal(expected, result)
def test_double_outline_wide_characters():
"Output: double_outline with wide characters in headers"
try:
import wcwidth # noqa
except ImportError:
skip("test_double_outline_wide_characters is skipped")
headers = list(_test_table_headers)
headers[1] = "配列"
expected = "\n".join(
[
"╔═══════════╦══════════╗",
"║ strings ║ 配列 ║",
"╠═══════════╬══════════╣",
"║ spam ║ 41.9999 ║",
"║ eggs ║ 451 ║",
"╚═══════════╩══════════╝",
]
)
result = tabulate(_test_table, headers, tablefmt="double_outline")
assert_equal(expected, result)
def test_double_outline_headerless():
"Output: double_outline without headers"
expected = "\n".join(
[
"╔══════╦══════════╗",
"║ spam ║ 41.9999 ║",
"║ eggs ║ 451 ║",
"╚══════╩══════════╝",
]
)
result = tabulate(_test_table, tablefmt="double_outline")
assert_equal(expected, result)
def test_fancy_outline():
"Output: fancy_outline with headers"
expected = "\n".join(
[
"╒═══════════╤═══════════╕",
"│ strings │ numbers │",
"╞═══════════╪═══════════╡",
"│ spam │ 41.9999 │",
"│ eggs │ 451 │",
"╘═══════════╧═══════════╛",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="fancy_outline")
assert_equal(expected, result)
def test_fancy_outline_wide_characters():
"Output: fancy_outline with wide characters in headers"
try:
import wcwidth # noqa
except ImportError:
skip("test_fancy_outline_wide_characters is skipped")
headers = list(_test_table_headers)
headers[1] = "配列"
expected = "\n".join(
[
"╒═══════════╤══════════╕",
"│ strings │ 配列 │",
"╞═══════════╪══════════╡",
"│ spam │ 41.9999 │",
"│ eggs │ 451 │",
"╘═══════════╧══════════╛",
]
)
result = tabulate(_test_table, headers, tablefmt="fancy_outline")
assert_equal(expected, result)
def test_fancy_outline_headerless():
"Output: fancy_outline without headers"
expected = "\n".join(
[
"╒══════╤══════════╕",
"│ spam │ 41.9999 │",
"│ eggs │ 451 │",
"╘══════╧══════════╛",
]
)
result = tabulate(_test_table, tablefmt="fancy_outline")
assert_equal(expected, result)
def test_pipe():
"Output: pipe with headers"
expected = "\n".join(
[
"| strings | numbers |",
"|:----------|----------:|",
"| spam | 41.9999 |",
"| eggs | 451 |",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="pipe")
assert_equal(expected, result)
def test_pipe_headerless():
"Output: pipe without headers"
expected = "\n".join(
["|:-----|---------:|", "| spam | 41.9999 |", "| eggs | 451 |"]
)
result = tabulate(_test_table, tablefmt="pipe")
assert_equal(expected, result)
def test_presto():
"Output: presto with headers"
expected = "\n".join(
[
" strings | numbers",
"-----------+-----------",
" spam | 41.9999",
" eggs | 451",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="presto")
assert_equal(expected, result)
def test_presto_headerless():
"Output: presto without headers"
expected = "\n".join([" spam | 41.9999", " eggs | 451"])
result = tabulate(_test_table, tablefmt="presto")
assert_equal(expected, result)
def test_presto_multiline_headerless():
"Output: presto with multiline cells without headers"
table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
expected = "\n".join(
[
" foo bar | hello",
" baz |",
" bau |",
" | multiline",
" | world",
]
)
result = tabulate(table, stralign="center", tablefmt="presto")
assert_equal(expected, result)
def test_presto_multiline():
"Output: presto with multiline cells with headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
expected = "\n".join(
[
" more | more spam",
" spam \x1b[31meggs\x1b[0m | & eggs",
"-------------+-------------",
" 2 | foo",
" | bar",
]
)
result = tabulate(table, headers, tablefmt="presto")
assert_equal(expected, result)
def test_presto_multiline_with_empty_cells():
"Output: presto with multiline cells and empty cells with headers"
table = [
["hdr", "data", "fold"],
["1", "", ""],
["2", "very long data", "fold\nthis"],
]
expected = "\n".join(
[
" hdr | data | fold",
"-------+----------------+--------",
" 1 | |",
" 2 | very long data | fold",
" | | this",
]
)
result = tabulate(table, headers="firstrow", tablefmt="presto")
assert_equal(expected, result)
def test_presto_multiline_with_empty_cells_headerless():
"Output: presto with multiline cells and empty cells without headers"
table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
expected = "\n".join(
[
" 0 | |",
" 1 | |",
" 2 | very long data | fold",
" | | this",
]
)
result = tabulate(table, tablefmt="presto")
assert_equal(expected, result)
def test_orgtbl():
"Output: orgtbl with headers"
expected = "\n".join(
[
"| strings | numbers |",
"|-----------+-----------|",
"| spam | 41.9999 |",
"| eggs | 451 |",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="orgtbl")
assert_equal(expected, result)
def test_orgtbl_headerless():
"Output: orgtbl without headers"
expected = "\n".join(["| spam | 41.9999 |", "| eggs | 451 |"])
result = tabulate(_test_table, tablefmt="orgtbl")
assert_equal(expected, result)
def test_asciidoc():
"Output: asciidoc with headers"
expected = "\n".join(
[
'[cols="11<,11>",options="header"]',
"|====",
"| strings | numbers ",
"| spam | 41.9999 ",
"| eggs | 451 ",
"|====",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="asciidoc")
assert_equal(expected, result)
def test_asciidoc_headerless():
"Output: asciidoc without headers"
expected = "\n".join(
[
'[cols="6<,10>"]',
"|====",
"| spam | 41.9999 ",
"| eggs | 451 ",
"|====",
]
)
result = tabulate(_test_table, tablefmt="asciidoc")
assert_equal(expected, result)
def test_psql():
"Output: psql with headers"
expected = "\n".join(
[
"+-----------+-----------+",
"| strings | numbers |",
"|-----------+-----------|",
"| spam | 41.9999 |",
"| eggs | 451 |",
"+-----------+-----------+",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="psql")
assert_equal(expected, result)
def test_psql_headerless():
"Output: psql without headers"
expected = "\n".join(
[
"+------+----------+",
"| spam | 41.9999 |",
"| eggs | 451 |",
"+------+----------+",
]
)
result = tabulate(_test_table, tablefmt="psql")
assert_equal(expected, result)
def test_psql_multiline_headerless():
"Output: psql with multiline cells without headers"
table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
expected = "\n".join(
[
"+---------+-----------+",
"| foo bar | hello |",
"| baz | |",
"| bau | |",
"| | multiline |",
"| | world |",
"+---------+-----------+",
]
)
result = tabulate(table, stralign="center", tablefmt="psql")
assert_equal(expected, result)
def test_psql_multiline():
"Output: psql with multiline cells with headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
expected = "\n".join(
[
"+-------------+-------------+",
"| more | more spam |",
"| spam \x1b[31meggs\x1b[0m | & eggs |",
"|-------------+-------------|",
"| 2 | foo |",
"| | bar |",
"+-------------+-------------+",
]
)
result = tabulate(table, headers, tablefmt="psql")
assert_equal(expected, result)
def test_psql_multiline_with_empty_cells():
"Output: psql with multiline cells and empty cells with headers"
table = [
["hdr", "data", "fold"],
["1", "", ""],
["2", "very long data", "fold\nthis"],
]
expected = "\n".join(
[
"+-------+----------------+--------+",
"| hdr | data | fold |",
"|-------+----------------+--------|",
"| 1 | | |",
"| 2 | very long data | fold |",
"| | | this |",
"+-------+----------------+--------+",
]
)
result = tabulate(table, headers="firstrow", tablefmt="psql")
assert_equal(expected, result)
def test_psql_multiline_with_empty_cells_headerless():
"Output: psql with multiline cells and empty cells without headers"
table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
expected = "\n".join(
[
"+---+----------------+------+",
"| 0 | | |",
"| 1 | | |",
"| 2 | very long data | fold |",
"| | | this |",
"+---+----------------+------+",
]
)
result = tabulate(table, tablefmt="psql")
assert_equal(expected, result)
def test_pretty():
"Output: pretty with headers"
expected = "\n".join(
[
"+---------+---------+",
"| strings | numbers |",
"+---------+---------+",
"| spam | 41.9999 |",
"| eggs | 451.0 |",
"+---------+---------+",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="pretty")
assert_equal(expected, result)
def test_pretty_headerless():
"Output: pretty without headers"
expected = "\n".join(
[
"+------+---------+",
"| spam | 41.9999 |",
"| eggs | 451.0 |",
"+------+---------+",
]
)
result = tabulate(_test_table, tablefmt="pretty")
assert_equal(expected, result)
def test_pretty_multiline_headerless():
"Output: pretty with multiline cells without headers"
table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
expected = "\n".join(
[
"+---------+-----------+",
"| foo bar | hello |",
"| baz | |",
"| bau | |",
"| | multiline |",
"| | world |",
"+---------+-----------+",
]
)
result = tabulate(table, tablefmt="pretty")
assert_equal(expected, result)
def test_pretty_multiline():
"Output: pretty with multiline cells with headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
expected = "\n".join(
[
"+-----------+-----------+",
"| more | more spam |",
"| spam \x1b[31meggs\x1b[0m | & eggs |",
"+-----------+-----------+",
"| 2 | foo |",
"| | bar |",
"+-----------+-----------+",
]
)
result = tabulate(table, headers, tablefmt="pretty")
assert_equal(expected, result)
def test_pretty_multiline_with_links():
"Output: pretty with multiline cells with headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\", "more spam\n& eggs")
expected = "\n".join(
[
"+-----------+-----------+",
"| more | more spam |",
"| spam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\ | & eggs |",
"+-----------+-----------+",
"| 2 | foo |",
"| | bar |",
"+-----------+-----------+",
]
)
result = tabulate(table, headers, tablefmt="pretty")
assert_equal(expected, result)
def test_pretty_multiline_with_empty_cells():
"Output: pretty with multiline cells and empty cells with headers"
table = [
["hdr", "data", "fold"],
["1", "", ""],
["2", "very long data", "fold\nthis"],
]
expected = "\n".join(
[
"+-----+----------------+------+",
"| hdr | data | fold |",
"+-----+----------------+------+",
"| 1 | | |",
"| 2 | very long data | fold |",
"| | | this |",
"+-----+----------------+------+",
]
)
result = tabulate(table, headers="firstrow", tablefmt="pretty")
assert_equal(expected, result)
def test_pretty_multiline_with_empty_cells_headerless():
"Output: pretty with multiline cells and empty cells without headers"
table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
expected = "\n".join(
[
"+---+----------------+------+",
"| 0 | | |",
"| 1 | | |",
"| 2 | very long data | fold |",
"| | | this |",
"+---+----------------+------+",
]
)
result = tabulate(table, tablefmt="pretty")
assert_equal(expected, result)
def test_jira():
"Output: jira with headers"
expected = "\n".join(
[
"|| strings || numbers ||",
"| spam | 41.9999 |",
"| eggs | 451 |",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="jira")
assert_equal(expected, result)
def test_jira_headerless():
"Output: jira without headers"
expected = "\n".join(["| spam | 41.9999 |", "| eggs | 451 |"])
result = tabulate(_test_table, tablefmt="jira")
assert_equal(expected, result)
def test_rst():
"Output: rst with headers"
expected = "\n".join(
[
"========= =========",
"strings numbers",
"========= =========",
"spam 41.9999",
"eggs 451",
"========= =========",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="rst")
assert_equal(expected, result)
def test_rst_with_empty_values_in_first_column():
"Output: rst with dots in first column"
test_headers = ["", "what"]
test_data = [("", "spam"), ("", "eggs")]
expected = "\n".join(
[
"==== ======",
".. what",
"==== ======",
".. spam",
".. eggs",
"==== ======",
]
)
result = tabulate(test_data, test_headers, tablefmt="rst")
assert_equal(expected, result)
def test_rst_headerless():
"Output: rst without headers"
expected = "\n".join(
["==== ========", "spam 41.9999", "eggs 451", "==== ========"]
)
result = tabulate(_test_table, tablefmt="rst")
assert_equal(expected, result)
def test_rst_multiline():
"Output: rst with multiline cells with headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
expected = "\n".join(
[
"=========== ===========",
" more more spam",
" spam \x1b[31meggs\x1b[0m & eggs",
"=========== ===========",
" 2 foo",
" bar",
"=========== ===========",
]
)
result = tabulate(table, headers, tablefmt="rst")
assert_equal(expected, result)
def test_rst_multiline_with_links():
"Output: rst with multiline cells with headers"
table = [[2, "foo\nbar"]]
headers = ("more\nspam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\", "more spam\n& eggs")
expected = "\n".join(
[
"=========== ===========",
" more more spam",
" spam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\ & eggs",
"=========== ===========",
" 2 foo",
" bar",
"=========== ===========",
]
)
result = tabulate(table, headers, tablefmt="rst")
assert_equal(expected, result)
def test_rst_multiline_with_empty_cells():
"Output: rst with multiline cells and empty cells with headers"
table = [
["hdr", "data", "fold"],
["1", "", ""],
["2", "very long data", "fold\nthis"],
]
expected = "\n".join(
[
"===== ============== ======",
" hdr data fold",
"===== ============== ======",
" 1",
" 2 very long data fold",
" this",
"===== ============== ======",
]
)
result = tabulate(table, headers="firstrow", tablefmt="rst")
assert_equal(expected, result)
def test_rst_multiline_with_empty_cells_headerless():
"Output: rst with multiline cells and empty cells without headers"
table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
expected = "\n".join(
[
"= ============== ====",
"0",
"1",
"2 very long data fold",
" this",
"= ============== ====",
]
)
result = tabulate(table, tablefmt="rst")
assert_equal(expected, result)
def test_mediawiki():
"Output: mediawiki with headers"
expected = "\n".join(
[
'{| class="wikitable" style="text-align: left;"',
"|+ <!-- caption -->",
"|-",
'! strings !! style="text-align: right;"| numbers',
"|-",
'| spam || style="text-align: right;"| 41.9999',
"|-",
'| eggs || style="text-align: right;"| 451',
"|}",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="mediawiki")
assert_equal(expected, result)
def test_mediawiki_headerless():
"Output: mediawiki without headers"
expected = "\n".join(
[
'{| class="wikitable" style="text-align: left;"',
"|+ <!-- caption -->",
"|-",
'| spam || style="text-align: right;"| 41.9999',
"|-",
'| eggs || style="text-align: right;"| 451',
"|}",
]
)
result = tabulate(_test_table, tablefmt="mediawiki")
assert_equal(expected, result)
def test_moinmoin():
"Output: moinmoin with headers"
expected = "\n".join(
[
"|| ''' strings ''' ||<style=\"text-align: right;\"> ''' numbers ''' ||",
'|| spam ||<style="text-align: right;"> 41.9999 ||',
'|| eggs ||<style="text-align: right;"> 451 ||',
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="moinmoin")
assert_equal(expected, result)
def test_youtrack():
"Output: youtrack with headers"
expected = "\n".join(
[
"|| strings || numbers ||",
"| spam | 41.9999 |",
"| eggs | 451 |",
]
)
result = tabulate(_test_table, _test_table_headers, tablefmt="youtrack")
assert_equal(expected, result)
def test_moinmoin_headerless():
"Output: moinmoin without headers"
expected = "\n".join(
[
'|| spam ||<style="text-align: right;"> 41.9999 ||',
'|| eggs ||<style="text-align: right;"> 451 ||',
]
)
result = tabulate(_test_table, tablefmt="moinmoin")
assert_equal(expected, result)
_test_table_html_headers = ["<strings>", "<&numbers&>"]
_test_table_html = [["spam >", 41.9999], ["eggs &", 451.0]]
_test_table_unsafehtml_headers = ["strings", "numbers"]
_test_table_unsafehtml = [
["spam", '<font color="red">41.9999</font>'],
["eggs", '<font color="red">451.0</font>'],
]
def test_html():
"Output: html with headers"
expected = "\n".join(
[
"<table>",
"<thead>",
'<tr><th><strings> </th><th style="text-align: right;"> <&numbers&></th></tr>', # noqa
"</thead>",
"<tbody>",
'<tr><td>spam > </td><td style="text-align: right;"> 41.9999</td></tr>',
'<tr><td>eggs & </td><td style="text-align: right;"> 451 </td></tr>',
"</tbody>",
"</table>",
]
)
result = tabulate(_test_table_html, _test_table_html_headers, tablefmt="html")
assert_equal(expected, result)
assert hasattr(result, "_repr_html_")
assert result._repr_html_() == result.str
def test_unsafehtml():
"Output: unsafe html with headers"
expected = "\n".join(
[
"<table>",
"<thead>",
"<tr><th>strings </th><th>numbers </th></tr>", # noqa
"</thead>",
"<tbody>",
'<tr><td>spam </td><td><font color="red">41.9999</font></td></tr>',
'<tr><td>eggs </td><td><font color="red">451.0</font> </td></tr>',
"</tbody>",
"</table>",
]
)
result = tabulate(
_test_table_unsafehtml, _test_table_unsafehtml_headers, tablefmt="unsafehtml"
)
assert_equal(expected, result)
assert hasattr(result, "_repr_html_")
assert result._repr_html_() == result.str
def test_html_headerless():
"Output: html without headers"
expected = "\n".join(
[
"<table>",
"<tbody>",
'<tr><td>spam ></td><td style="text-align: right;"> 41.9999</td></tr>',
'<tr><td>eggs &</td><td style="text-align: right;">451 </td></tr>',
"</tbody>",
"</table>",
]
)
result = tabulate(_test_table_html, tablefmt="html")
assert_equal(expected, result)
assert hasattr(result, "_repr_html_")
assert result._repr_html_() == result.str
def test_unsafehtml_headerless():
"Output: unsafe html without headers"
expected = "\n".join(
[
"<table>",
"<tbody>",
'<tr><td>spam</td><td><font color="red">41.9999</font></td></tr>',
'<tr><td>eggs</td><td><font color="red">451.0</font> </td></tr>',
"</tbody>",
"</table>",
]
)
result = tabulate(_test_table_unsafehtml, tablefmt="unsafehtml")
assert_equal(expected, result)
assert hasattr(result, "_repr_html_")
assert result._repr_html_() == result.str
def test_latex():
"Output: latex with headers and replaced characters"
raw_test_table_headers = list(_test_table_headers)
raw_test_table_headers[-1] += " ($N_0$)"
result = tabulate(_test_table, raw_test_table_headers, tablefmt="latex")
expected = "\n".join(
[
r"\begin{tabular}{lr}",
r"\hline",
r" strings & numbers (\$N\_0\$) \\",
r"\hline",
r" spam & 41.9999 \\",
r" eggs & 451 \\",
r"\hline",
r"\end{tabular}",
]
)
assert_equal(expected, result)
def test_latex_raw():
"Output: raw latex with headers"
raw_test_table_headers = list(_test_table_headers)
raw_test_table_headers[-1] += " ($N_0$)"
raw_test_table = list(map(list, _test_table))
raw_test_table[0][0] += "$_1$"
raw_test_table[1][0] = "\\emph{" + raw_test_table[1][0] + "}"
print(raw_test_table)
result = tabulate(raw_test_table, raw_test_table_headers, tablefmt="latex_raw")
expected = "\n".join(
[
r"\begin{tabular}{lr}",
r"\hline",
r" strings & numbers ($N_0$) \\",
r"\hline",
r" spam$_1$ & 41.9999 \\",
r" \emph{eggs} & 451 \\",
r"\hline",
r"\end{tabular}",
]
)
assert_equal(expected, result)
def test_latex_headerless():
"Output: latex without headers"
result = tabulate(_test_table, tablefmt="latex")
expected = "\n".join(
[
r"\begin{tabular}{lr}",
r"\hline",
r" spam & 41.9999 \\",
r" eggs & 451 \\",
r"\hline",
r"\end{tabular}",
]
)
assert_equal(expected, result)
def test_latex_booktabs():
"Output: latex with headers, using the booktabs format"
result = tabulate(_test_table, _test_table_headers, tablefmt="latex_booktabs")
expected = "\n".join(
[
r"\begin{tabular}{lr}",
r"\toprule",
r" strings & numbers \\",
r"\midrule",
r" spam & 41.9999 \\",
r" eggs & 451 \\",
r"\bottomrule",
r"\end{tabular}",
]
)
assert_equal(expected, result)
def test_latex_booktabs_headerless():
"Output: latex without headers, using the booktabs format"
result = tabulate(_test_table, tablefmt="latex_booktabs")
expected = "\n".join(
[
r"\begin{tabular}{lr}",
r"\toprule",
r" spam & 41.9999 \\",
r" eggs & 451 \\",
r"\bottomrule",
r"\end{tabular}",
]
)
assert_equal(expected, result)
def test_textile():
"Output: textile without header"
result = tabulate(_test_table, tablefmt="textile")
expected = """\
|<. spam |>. 41.9999 |
|<. eggs |>. 451 |"""
assert_equal(expected, result)
def test_textile_with_header():
"Output: textile with header"
result = tabulate(_test_table, ["strings", "numbers"], tablefmt="textile")
expected = """\
|_. strings |_. numbers |
|<. spam |>. 41.9999 |
|<. eggs |>. 451 |"""
assert_equal(expected, result)
def test_textile_with_center_align():
"Output: textile with center align"
result = tabulate(_test_table, tablefmt="textile", stralign="center")
expected = """\
|=. spam |>. 41.9999 |
|=. eggs |>. 451 |"""
assert_equal(expected, result)
def test_no_data():
"Output: table with no data"
expected = "\n".join(["strings numbers", "--------- ---------"])
result = tabulate(None, _test_table_headers, tablefmt="simple")
assert_equal(expected, result)
def test_empty_data():
"Output: table with empty data"
expected = "\n".join(["strings numbers", "--------- ---------"])
result = tabulate([], _test_table_headers, tablefmt="simple")
assert_equal(expected, result)
def test_no_data_without_headers():
"Output: table with no data and no headers"
expected = ""
result = tabulate(None, tablefmt="simple")
assert_equal(expected, result)
def test_empty_data_without_headers():
"Output: table with empty data and no headers"
expected = ""
result = tabulate([], tablefmt="simple")
assert_equal(expected, result)
def test_intfmt():
"Output: integer format"
result = tabulate([[10000], [10]], intfmt=",", tablefmt="plain")
expected = "10,000\n 10"
assert_equal(expected, result)
def test_empty_data_with_headers():
"Output: table with empty data and headers as firstrow"
expected = ""
result = tabulate([], headers="firstrow")
assert_equal(expected, result)
def test_floatfmt():
"Output: floating point format"
result = tabulate([["1.23456789"], [1.0]], floatfmt=".3f", tablefmt="plain")
expected = "1.235\n1.000"
assert_equal(expected, result)
def test_floatfmt_multi():
"Output: floating point format different for each column"
result = tabulate(
[[0.12345, 0.12345, 0.12345]], floatfmt=(".1f", ".3f"), tablefmt="plain"
)
expected = "0.1 0.123 0.12345"
assert_equal(expected, result)
def test_colalign_multi():
"Output: string columns with custom colalign"
result = tabulate(
[["one", "two"], ["three", "four"]], colalign=("right",), tablefmt="plain"
)
expected = " one two\nthree four"
assert_equal(expected, result)
def test_colalign_multi_with_sep_line():
"Output: string columns with custom colalign"
result = tabulate(
[["one", "two"], SEPARATING_LINE, ["three", "four"]],
colalign=("right",),
tablefmt="plain",
)
expected = " one two\n\nthree four"
assert_equal(expected, result)
def test_float_conversions():
"Output: float format parsed"
test_headers = ["str", "bad_float", "just_float", "with_inf", "with_nan", "neg_inf"]
test_table = [
["spam", 41.9999, "123.345", "12.2", "nan", "0.123123"],
["eggs", "451.0", 66.2222, "inf", 123.1234, "-inf"],
["asd", "437e6548", 1.234e2, float("inf"), float("nan"), 0.22e23],
]
result = tabulate(test_table, test_headers, tablefmt="grid")
expected = "\n".join(
[
"+-------+-------------+--------------+------------+------------+-------------+",
"| str | bad_float | just_float | with_inf | with_nan | neg_inf |",
"+=======+=============+==============+============+============+=============+",
"| spam | 41.9999 | 123.345 | 12.2 | nan | 0.123123 |",
"+-------+-------------+--------------+------------+------------+-------------+",
"| eggs | 451.0 | 66.2222 | inf | 123.123 | -inf |",
"+-------+-------------+--------------+------------+------------+-------------+",
"| asd | 437e6548 | 123.4 | inf | nan | 2.2e+22 |",
"+-------+-------------+--------------+------------+------------+-------------+",
]
)
assert_equal(expected, result)
def test_missingval():
"Output: substitution of missing values"
result = tabulate(
[["Alice", 10], ["Bob", None]], missingval="n/a", tablefmt="plain"
)
expected = "Alice 10\nBob n/a"
assert_equal(expected, result)
def test_missingval_multi():
"Output: substitution of missing values with different values per column"
result = tabulate(
[["Alice", "Bob", "Charlie"], [None, None, None]],
missingval=("n/a", "?"),
tablefmt="plain",
)
expected = "Alice Bob Charlie\nn/a ?"
assert_equal(expected, result)
def test_column_alignment():
"Output: custom alignment for text and numbers"
expected = "\n".join(["----- ---", "Alice 1", " Bob 333", "----- ---"])
result = tabulate([["Alice", 1], ["Bob", 333]], stralign="right", numalign="center")
assert_equal(expected, result)
def test_unaligned_separated():
"Output: non-aligned data columns"
expected = "\n".join(["name|score", "Alice|1", "Bob|333"])
fmt = simple_separated_format("|")
result = tabulate(
[["Alice", 1], ["Bob", 333]],
["name", "score"],
tablefmt=fmt,
stralign=None,
numalign=None,
)
assert_equal(expected, result)
def test_pandas_with_index():
"Output: a pandas Dataframe with an index"
try:
import pandas
df = pandas.DataFrame(
[["one", 1], ["two", None]], columns=["string", "number"], index=["a", "b"]
)
expected = "\n".join(
[
" string number",
"-- -------- --------",
"a one 1",
"b two nan",
]
)
result = tabulate(df, headers="keys")
assert_equal(expected, result)
except ImportError:
skip("test_pandas_with_index is skipped")
def test_pandas_without_index():
"Output: a pandas Dataframe without an index"
try:
import pandas
df = pandas.DataFrame(
[["one", 1], ["two", None]],
columns=["string", "number"],
index=pandas.Index(["a", "b"], name="index"),
)
expected = "\n".join(
[
"string number",
"-------- --------",
"one 1",
"two nan",
]
)
result = tabulate(df, headers="keys", showindex=False)
assert_equal(expected, result)
except ImportError:
skip("test_pandas_without_index is skipped")
def test_pandas_rst_with_index():
"Output: a pandas Dataframe with an index in ReStructuredText format"
try:
import pandas
df = pandas.DataFrame(
[["one", 1], ["two", None]], columns=["string", "number"], index=["a", "b"]
)
expected = "\n".join(
[
"==== ======== ========",
".. string number",
"==== ======== ========",
"a one 1",
"b two nan",
"==== ======== ========",
]
)
result = tabulate(df, tablefmt="rst", headers="keys")
assert_equal(expected, result)
except ImportError:
skip("test_pandas_rst_with_index is skipped")
def test_pandas_rst_with_named_index():
"Output: a pandas Dataframe with a named index in ReStructuredText format"
try:
import pandas
index = pandas.Index(["a", "b"], name="index")
df = pandas.DataFrame(
[["one", 1], ["two", None]], columns=["string", "number"], index=index
)
expected = "\n".join(
[
"======= ======== ========",
"index string number",
"======= ======== ========",
"a one 1",
"b two nan",
"======= ======== ========",
]
)
result = tabulate(df, tablefmt="rst", headers="keys")
assert_equal(expected, result)
except ImportError:
skip("test_pandas_rst_with_index is skipped")
def test_dict_like_with_index():
"Output: a table with a running index"
dd = {"b": range(101, 104)}
expected = "\n".join([" b", "-- ---", " 0 101", " 1 102", " 2 103"])
result = tabulate(dd, "keys", showindex=True)
assert_equal(expected, result)
def test_list_of_lists_with_index():
"Output: a table with a running index"
dd = zip(*[range(3), range(101, 104)])
# keys' order (hence columns' order) is not deterministic in Python 3
# => we have to consider both possible results as valid
expected = "\n".join(
[" a b", "-- --- ---", " 0 0 101", " 1 1 102", " 2 2 103"]
)
result = tabulate(dd, headers=["a", "b"], showindex=True)
assert_equal(expected, result)
def test_list_of_lists_with_index_with_sep_line():
"Output: a table with a running index"
dd = [(0, 101), SEPARATING_LINE, (1, 102), (2, 103)]
# keys' order (hence columns' order) is not deterministic in Python 3
# => we have to consider both possible results as valid
expected = "\n".join(
[
" a b",
"-- --- ---",
" 0 0 101",
"-- --- ---",
" 1 1 102",
" 2 2 103",
]
)
result = tabulate(dd, headers=["a", "b"], showindex=True)
assert_equal(expected, result)
def test_list_of_lists_with_supplied_index():
"Output: a table with a supplied index"
dd = zip(*[list(range(3)), list(range(101, 104))])
expected = "\n".join(
[" a b", "-- --- ---", " 1 0 101", " 2 1 102", " 3 2 103"]
)
result = tabulate(dd, headers=["a", "b"], showindex=[1, 2, 3])
assert_equal(expected, result)
# TODO: make it a separate test case
# the index must be as long as the number of rows
with raises(ValueError):
tabulate(dd, headers=["a", "b"], showindex=[1, 2])
def test_list_of_lists_with_index_firstrow():
"Output: a table with a running index and header='firstrow'"
dd = zip(*[["a"] + list(range(3)), ["b"] + list(range(101, 104))])
expected = "\n".join(
[" a b", "-- --- ---", " 0 0 101", " 1 1 102", " 2 2 103"]
)
result = tabulate(dd, headers="firstrow", showindex=True)
assert_equal(expected, result)
# TODO: make it a separate test case
# the index must be as long as the number of rows
with raises(ValueError):
tabulate(dd, headers="firstrow", showindex=[1, 2])
def test_disable_numparse_default():
"Output: Default table output with number parsing and alignment"
expected = "\n".join(
[
"strings numbers",
"--------- ---------",
"spam 41.9999",
"eggs 451",
]
)
result = tabulate(_test_table, _test_table_headers)
assert_equal(expected, result)
result = tabulate(_test_table, _test_table_headers, disable_numparse=False)
assert_equal(expected, result)
def test_disable_numparse_true():
"Output: Default table output, but without number parsing and alignment"
expected = "\n".join(
[
"strings numbers",
"--------- ---------",
"spam 41.9999",
"eggs 451.0",
]
)
result = tabulate(_test_table, _test_table_headers, disable_numparse=True)
assert_equal(expected, result)
def test_disable_numparse_list():
"Output: Default table output, but with number parsing selectively disabled"
table_headers = ["h1", "h2", "h3"]
test_table = [["foo", "bar", "42992e1"]]
expected = "\n".join(
["h1 h2 h3", "---- ---- -------", "foo bar 42992e1"]
)
result = tabulate(test_table, table_headers, disable_numparse=[2])
assert_equal(expected, result)
expected = "\n".join(
["h1 h2 h3", "---- ---- ------", "foo bar 429920"]
)
result = tabulate(test_table, table_headers, disable_numparse=[0, 1])
assert_equal(expected, result)
def test_preserve_whitespace():
"Output: Default table output, but with preserved leading whitespace."
tabulate_module.PRESERVE_WHITESPACE = True
table_headers = ["h1", "h2", "h3"]
test_table = [[" foo", " bar ", "foo"]]
expected = "\n".join(
["h1 h2 h3", "----- ------- ----", " foo bar foo"]
)
result = tabulate(test_table, table_headers)
assert_equal(expected, result)
tabulate_module.PRESERVE_WHITESPACE = False
table_headers = ["h1", "h2", "h3"]
test_table = [[" foo", " bar ", "foo"]]
expected = "\n".join(["h1 h2 h3", "---- ---- ----", "foo bar foo"])
result = tabulate(test_table, table_headers)
assert_equal(expected, result)
| mit | cd3b766ef9b4388a312efecb1c504f10 | 30.815462 | 121 | 0.420925 | 3.044811 | false | true | false | false |
chicagopython/chipy.org | chipy_org/apps/meetings/utils.py | 1 | 3730 | from __future__ import unicode_literals
import logging
import requests
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from .models import RSVP, Meeting
logger = logging.getLogger(__name__)
def get_rsvp(meeting, meetup_member):
"""
Handles getting the rsvp instance to update from Meetup.
Will return a new instance if needed.
If there is a name collision, it will update the current RSVP with the
Meetup Info. This isn't perfect by any stretch, but for our uses it
should be good enough.
"""
meetup_user_id = meetup_member["member_id"]
name_collisions = RSVP.objects.filter(name=meetup_member["name"], meeting=meeting)
if name_collisions:
rsvp = name_collisions[0]
rsvp.meetup_user_id = meetup_user_id
else:
try:
rsvp = RSVP.objects.get(meetup_user_id=meetup_user_id, meeting=meeting)
except ObjectDoesNotExist:
rsvp = RSVP(meetup_user_id=meetup_user_id, meeting=meeting)
return rsvp
def get_best_name_available(result, real_names):
name = " ".join(s.capitalize() for s in result["member"]["name"].split())
real_name = real_names.get(result["member"]["member_id"], None)
name_response = None
# If "please provide your name" was in the event's question list
if "answers" in result:
for answer in result["answers"]:
if "question" in answer and "name" in answer["question"].lower():
if "answer" in answer:
name_response = answer["answer"]
break
if name_response:
return " ".join(s.capitalize() for s in name_response.split())
elif real_name:
return real_name
else:
return name
def get_real_names(api_key, results):
real_names = {}
url = "https://api.meetup.com/2/profiles"
realname_question_id = 8181568
attendee_ids = ",".join(str(r["member"]["member_id"]) for r in results)
params = dict(member_id=attendee_ids, group_urlname="_ChiPy_", key=api_key)
api_response = requests.get(url, params=params)
response = api_response.json()
results = response["results"]
for result in results:
mid = result["member_id"]
for ans in result["answers"]:
if ans["question_id"] == realname_question_id:
if "answer" in ans:
real_names[mid] = ans["answer"]
if mid not in real_names:
real_names[mid] = " ".join(s.capitalize() for s in result["name"].split())
return real_names
def meetup_meeting_sync(api_key, meetup_event_id):
url = "http://api.meetup.com/2/rsvps"
params = dict(key=api_key, event_id=meetup_event_id, fields="answer_info", page=1000)
api_response = requests.get(url, params=params)
meeting = Meeting.objects.get(meetup_id=meetup_event_id)
response = api_response.json()
results = response["results"]
logger.info("Got %s results for Meetup sync", len(results))
real_names = get_real_names(api_key, results)
for result in results:
rsvp = get_rsvp(meeting, result["member"])
rsvp.response = "Y" if result["response"] == "yes" else "N"
rsvp.name = get_best_name_available(result, real_names)
rsvp.guests = int(result["guests"])
try:
rsvp.save()
except ValidationError as exc:
logger.warning(
"Error saving RSVP for %s with response of %s. Error is %s",
result["member"]["name"],
rsvp.response,
exc,
)
else:
logger.info(
"Saved RSVP for %s with response of %s", result["member"]["name"], rsvp.response
)
| mit | a776dfba7ce02952b22516df9ad96b46 | 32.909091 | 96 | 0.613673 | 3.583093 | false | false | false | false |
chicagopython/chipy.org | chipy_org/libs/middleware.py | 1 | 1030 | # -*- coding: utf-8 -*-
import traceback
from django.contrib import messages
from django.shortcuts import redirect
from social_core.exceptions import SocialAuthBaseException
from social_django.middleware import SocialAuthExceptionMiddleware
class ChipySocialAuthExceptionMiddleware(SocialAuthExceptionMiddleware):
"""
We're overriding this because modern django, when setup
correctly can handle messages for anonymous users.
"""
def process_exception(self, request, exception):
backend = getattr(request, "backend", None)
if isinstance(exception, SocialAuthBaseException):
message = self.get_message(request, exception)
url = self.get_redirect_uri(request, exception)
if backend:
extra_tags = f"social-auth {getattr(backend, 'name', 'unknown')}"
else:
extra_tags = ""
messages.error(request, message, extra_tags=extra_tags)
return redirect(url)
print(traceback.print_exc())
| mit | 57f0333ff8fc229db18ea01c9c6a816f | 32.225806 | 81 | 0.679612 | 4.618834 | false | false | false | false |
chicagopython/chipy.org | chipy_org/apps/job_board/forms.py | 1 | 1619 | from django import forms
from django.contrib.auth.models import User
from django.forms import Textarea
from .models import JobPost
class JobPostForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["agree_to_terms"].required = True
class Meta:
model = JobPost
fields = [
"contact",
"is_from_recruiting_agency",
"company_name",
"position",
"job_type",
"location",
"description",
"is_sponsor",
"affiliation",
"can_host_meeting",
"company_website",
"how_to_apply",
"agree_to_terms",
]
help_texts = {
"affiliation": ("Is this posting affiliated with a 3rd party? Please select:"),
}
widgets = {
"description": Textarea(
attrs={
"cols": 60,
"rows": 20,
}
),
"contact": forms.HiddenInput(),
"how_to_apply": Textarea(
attrs={
"cols": 60,
"rows": 20,
}
),
}
class JobUserForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["first_name"].required = True
self.fields["last_name"].required = True
self.fields["email"].required = True
class Meta:
model = User
fields = ["first_name", "last_name", "email"]
| mit | 7cf8f6692941a7b1e037d494931058e4 | 24.698413 | 91 | 0.46819 | 4.352151 | false | false | false | false |
moble/quaternion | src/quaternion/__init__.py | 1 | 33974 | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Michael Boyle
# See LICENSE file for details: <https://github.com/moble/quaternion/blob/main/LICENSE>
__version__ = "2022.4.2"
__doc_title__ = "Quaternion dtype for NumPy"
__doc__ = "Adds a quaternion dtype to NumPy."
__all__ = ['quaternion',
'as_quat_array', 'as_spinor_array',
'as_float_array', 'from_float_array',
'as_vector_part', 'from_vector_part',
'as_rotation_matrix', 'from_rotation_matrix',
'as_rotation_vector', 'from_rotation_vector',
'as_euler_angles', 'from_euler_angles',
'as_spherical_coords', 'from_spherical_coords',
'rotate_vectors', 'allclose',
'rotor_intrinsic_distance', 'rotor_chordal_distance',
'rotation_intrinsic_distance', 'rotation_chordal_distance',
'slerp_evaluate', 'squad_evaluate',
'zero', 'one', 'x', 'y', 'z', 'integrate_angular_velocity',
'squad', 'slerp', 'derivative', 'definite_integral', 'indefinite_integral']
import numpy as np
from .numpy_quaternion import (
quaternion, _eps, slerp_evaluate, squad_evaluate,
# slerp_vectorized, squad_vectorized, slerp, squad,
)
from .quaternion_time_series import (
unflip_rotors, slerp, squad, integrate_angular_velocity, minimal_rotation, angular_velocity,
)
from .calculus import (
derivative, antiderivative, definite_integral, indefinite_integral,
fd_derivative, fd_definite_integral, fd_indefinite_integral,
spline_derivative, spline_definite_integral, spline_indefinite_integral,
)
try:
from .calculus import spline
except:
pass
from .means import (
mean_rotor_in_chordal_metric,
optimal_alignment_in_chordal_metric,
optimal_alignment_in_Euclidean_metric
)
np.quaternion = quaternion
np.sctypeDict['quaternion'] = np.dtype(quaternion)
zero = np.quaternion(0, 0, 0, 0)
one = np.quaternion(1, 0, 0, 0)
x = np.quaternion(0, 1, 0, 0)
y = np.quaternion(0, 0, 1, 0)
z = np.quaternion(0, 0, 0, 1)
rotor_intrinsic_distance = np.rotor_intrinsic_distance
rotor_chordal_distance = np.rotor_chordal_distance
rotation_intrinsic_distance = np.rotation_intrinsic_distance
rotation_chordal_distance = np.rotation_chordal_distance
def as_float_array(a):
"""View the quaternion array as an array of floats
This function is fast (of order 1 microsecond) because no data is
copied; the returned quantity is just a "view" of the original.
The output view has one more dimension (of size 4) than the input
array, but is otherwise the same shape. The components along
that last dimension represent the scalar and vector components of
each quaternion in that order: `w`, `x`, `y`, `z`.
"""
return np.asarray(a, dtype=np.quaternion).view((np.double, 4))
def as_quat_array(a):
"""View a float array as an array of quaternions
The input array must have a final dimension whose size is
divisible by four (or better yet *is* 4), because successive
indices in that last dimension will be considered successive
components of the output quaternion. Each set of 4 components
will be interpreted as the scalar and vector components of a
quaternion in that order: `w`, `x`, `y`, `z`.
This function is usually fast (of order 1 microsecond) because no
data is copied; the returned quantity is just a "view" of the
original. However, if the input array is not C-contiguous
(basically, as you increment the index into the last dimension of
the array, you just move to the neighboring float in memory), the
data will need to be copied which may be quite slow. Therefore,
you should try to ensure that the input array is in that order.
Slices and transpositions will frequently break that rule.
We will not convert back from a two-spinor array because there is
no unique convention for them, so I don't want to mess with that.
Also, we want to discourage users from the slow, memory-copying
process of swapping columns required for useful definitions of
the two-spinors.
"""
a = np.asarray(a, dtype=np.double)
# fast path
if a.shape == (4,):
return quaternion(a[0], a[1], a[2], a[3])
# view only works if the last axis is C-contiguous
if not a.flags['C_CONTIGUOUS'] or a.strides[-1] != a.itemsize:
a = a.copy(order='C')
try:
av = a.view(np.quaternion)
except ValueError as e:
message = (str(e) + '\n '
+ 'Failed to view input data as a series of quaternions. '
+ 'Please ensure that the last dimension has size divisible by 4.\n '
+ 'Input data has shape {0} and dtype {1}.'.format(a.shape, a.dtype))
raise ValueError(message)
# special case: don't create an axis for a single quaternion, to
# match the output of `as_float_array`
if av.shape[-1] == 1:
av = av.reshape(a.shape[:-1])
return av
def from_float_array(a):
return as_quat_array(a)
def from_vector_part(v, vector_axis=-1):
"""Create a quaternion array from an array of the vector parts.
Essentially, this just inserts a 0 in front of each vector part, and
re-interprets the result as a quaternion.
Parameters
----------
v : array_like
Array of vector parts of quaternions. When interpreted as a numpy array,
if the dtype is `quaternion`, the array is returned immediately, and the
following argument is ignored. Otherwise, it it must be a float array with
dimension `vector_axis` of size 3 or 4.
vector_axis : int, optional
The axis to interpret as containing the vector components. The default is
-1.
Returns
-------
q : array of quaternions
Quaternions with vector parts corresponding to input vectors.
"""
v = np.asarray(v)
if v.dtype != np.quaternion:
input_shape = v.shape
if vector_axis != -1:
v = np.moveaxis(v, vector_axis, -1)
if v.shape[-1] == 3:
v = from_float_array(np.insert(v, 0, 0.0, axis=-1))
elif v.shape[-1] == 4:
v = v.copy()
v[..., 0] = 0.0
v = from_float_array(v)
else:
raise ValueError(
"Vector input has shape {0}, which cannot be interpreted as quaternions ".format(input_shape)
+ "with vector axis {0}".format(vector_axis)
)
return v
def as_vector_part(q):
"""Create an array of vector parts from an array of quaternions.
Parameters
----------
q : quaternion array_like
Array of quaternions.
Returns
-------
v : array
Float array of shape `q.shape + (3,)`
"""
q = np.asarray(q, dtype=np.quaternion)
return as_float_array(q)[..., 1:]
def as_spinor_array(a):
"""View a quaternion array as spinors in two-complex representation
This function is relatively slow and scales poorly, because memory
copying is apparently involved -- I think it's due to the
"advanced indexing" required to swap the columns.
"""
a = np.atleast_1d(a)
assert a.dtype == np.dtype(np.quaternion)
# I'm not sure why it has to be so complicated, but all of these steps
# appear to be necessary in this case.
return a.view(np.float64).reshape(a.shape + (4,))[..., [0, 3, 2, 1]].ravel().view(np.complex128).reshape(a.shape + (2,))
def as_rotation_matrix(q):
"""Convert input quaternion to 3x3 rotation matrix
For any quaternion `q`, this function returns a matrix `m` such that, for every
vector `v`, we have
m @ v.vec == q * v * q.conjugate()
Here, `@` is the standard python matrix multiplication operator and `v.vec` is
the 3-vector part of the quaternion `v`.
Parameters
----------
q : quaternion or array of quaternions
The quaternion(s) need not be normalized, but must all be nonzero
Returns
-------
m : float array
Output shape is q.shape+(3,3). This matrix should multiply (from
the left) a column vector to produce the rotated column vector.
Raises
------
ZeroDivisionError
If any of the input quaternions have norm 0.0.
"""
if q.shape == () and not isinstance(q, np.ndarray): # This is just a single quaternion
n = q.norm()
if n == 0.0:
raise ZeroDivisionError("Input to `as_rotation_matrix({0})` has zero norm".format(q))
elif abs(n-1.0) < _eps: # Input q is basically normalized
return np.array([
[1 - 2*(q.y**2 + q.z**2), 2*(q.x*q.y - q.z*q.w), 2*(q.x*q.z + q.y*q.w)],
[2*(q.x*q.y + q.z*q.w), 1 - 2*(q.x**2 + q.z**2), 2*(q.y*q.z - q.x*q.w)],
[2*(q.x*q.z - q.y*q.w), 2*(q.y*q.z + q.x*q.w), 1 - 2*(q.x**2 + q.y**2)]
])
else: # Input q is not normalized
return np.array([
[1 - 2*(q.y**2 + q.z**2)/n, 2*(q.x*q.y - q.z*q.w)/n, 2*(q.x*q.z + q.y*q.w)/n],
[2*(q.x*q.y + q.z*q.w)/n, 1 - 2*(q.x**2 + q.z**2)/n, 2*(q.y*q.z - q.x*q.w)/n],
[2*(q.x*q.z - q.y*q.w)/n, 2*(q.y*q.z + q.x*q.w)/n, 1 - 2*(q.x**2 + q.y**2)/n]
])
else: # This is an array of quaternions
n = np.norm(q)
if np.any(n == 0.0):
raise ZeroDivisionError("Array input to `as_rotation_matrix` has at least one element with zero norm")
else: # Assume input q is not normalized
m = np.empty(q.shape + (3, 3))
q = as_float_array(q)
m[..., 0, 0] = 1.0 - 2*(q[..., 2]**2 + q[..., 3]**2)/n
m[..., 0, 1] = 2*(q[..., 1]*q[..., 2] - q[..., 3]*q[..., 0])/n
m[..., 0, 2] = 2*(q[..., 1]*q[..., 3] + q[..., 2]*q[..., 0])/n
m[..., 1, 0] = 2*(q[..., 1]*q[..., 2] + q[..., 3]*q[..., 0])/n
m[..., 1, 1] = 1.0 - 2*(q[..., 1]**2 + q[..., 3]**2)/n
m[..., 1, 2] = 2*(q[..., 2]*q[..., 3] - q[..., 1]*q[..., 0])/n
m[..., 2, 0] = 2*(q[..., 1]*q[..., 3] - q[..., 2]*q[..., 0])/n
m[..., 2, 1] = 2*(q[..., 2]*q[..., 3] + q[..., 1]*q[..., 0])/n
m[..., 2, 2] = 1.0 - 2*(q[..., 1]**2 + q[..., 2]**2)/n
return m
def from_rotation_matrix(rot, nonorthogonal=True):
"""Convert input 3x3 rotation matrix to unit quaternion
For any orthogonal matrix `rot`, this function returns a quaternion `q` such
that, for every pure-vector quaternion `v`, we have
q * v * q.conjugate() == rot @ v.vec
Here, `@` is the standard python matrix multiplication operator and `v.vec` is
the 3-vector part of the quaternion `v`. If `rot` is not orthogonal the
"closest" orthogonal matrix is used; see Notes below.
Parameters
----------
rot : (..., N, 3, 3) float array
Each 3x3 matrix represents a rotation by multiplying (from the left) a
column vector to produce a rotated column vector. Note that this input may
actually have ndims>3; it is just assumed that the last two dimensions have
size 3, representing the matrix.
nonorthogonal : bool, optional
If scipy.linalg is available, use the more robust algorithm of Bar-Itzhack.
Default value is True.
Returns
-------
q : array of quaternions
Unit quaternions resulting in rotations corresponding to input rotations.
Output shape is rot.shape[:-2].
Raises
------
LinAlgError
If any of the eigenvalue solutions does not converge
Notes
-----
By default, if scipy.linalg is available, this function uses Bar-Itzhack's
algorithm to allow for non-orthogonal matrices. [J. Guidance, Vol. 23, No. 6,
p. 1085 <http://dx.doi.org/10.2514/2.4654>] This will almost certainly be quite
a bit slower than simpler versions, though it will be more robust to numerical
errors in the rotation matrix. Also note that Bar-Itzhack uses some pretty
weird conventions. The last component of the quaternion appears to represent
the scalar, and the quaternion itself is conjugated relative to the convention
used throughout this module.
If scipy.linalg is not available or if the optional `nonorthogonal` parameter
is set to `False`, this function falls back to the possibly faster, but less
robust, algorithm of Markley [J. Guidance, Vol. 31, No. 2, p. 440
<http://dx.doi.org/10.2514/1.31730>].
"""
try:
from scipy import linalg
except ImportError:
linalg = False
rot = np.array(rot, copy=False)
shape = rot.shape[:-2]
if linalg and nonorthogonal:
from operator import mul
from functools import reduce
K3 = np.empty(shape+(4, 4))
K3[..., 0, 0] = (rot[..., 0, 0] - rot[..., 1, 1] - rot[..., 2, 2])/3.0
K3[..., 0, 1] = (rot[..., 1, 0] + rot[..., 0, 1])/3.0
K3[..., 0, 2] = (rot[..., 2, 0] + rot[..., 0, 2])/3.0
K3[..., 0, 3] = (rot[..., 1, 2] - rot[..., 2, 1])/3.0
K3[..., 1, 0] = K3[..., 0, 1]
K3[..., 1, 1] = (rot[..., 1, 1] - rot[..., 0, 0] - rot[..., 2, 2])/3.0
K3[..., 1, 2] = (rot[..., 2, 1] + rot[..., 1, 2])/3.0
K3[..., 1, 3] = (rot[..., 2, 0] - rot[..., 0, 2])/3.0
K3[..., 2, 0] = K3[..., 0, 2]
K3[..., 2, 1] = K3[..., 1, 2]
K3[..., 2, 2] = (rot[..., 2, 2] - rot[..., 0, 0] - rot[..., 1, 1])/3.0
K3[..., 2, 3] = (rot[..., 0, 1] - rot[..., 1, 0])/3.0
K3[..., 3, 0] = K3[..., 0, 3]
K3[..., 3, 1] = K3[..., 1, 3]
K3[..., 3, 2] = K3[..., 2, 3]
K3[..., 3, 3] = (rot[..., 0, 0] + rot[..., 1, 1] + rot[..., 2, 2])/3.0
if not shape:
q = zero.copy()
eigvals, eigvecs = linalg.eigh(K3.T, eigvals=(3, 3))
q.components[0] = eigvecs[-1]
q.components[1:] = -eigvecs[:-1].flatten()
return q
else:
q = np.empty(shape+(4,), dtype=np.float64)
for flat_index in range(reduce(mul, shape)):
multi_index = np.unravel_index(flat_index, shape)
eigvals, eigvecs = linalg.eigh(K3[multi_index], eigvals=(3, 3))
q[multi_index+(0,)] = eigvecs[-1]
q[multi_index+(slice(1,None),)] = -eigvecs[:-1].flatten()
return as_quat_array(q)
else: # No scipy.linalg or not `nonorthogonal`
diagonals = np.empty(shape+(4,))
diagonals[..., 0] = rot[..., 0, 0]
diagonals[..., 1] = rot[..., 1, 1]
diagonals[..., 2] = rot[..., 2, 2]
diagonals[..., 3] = rot[..., 0, 0] + rot[..., 1, 1] + rot[..., 2, 2]
indices = np.argmax(diagonals, axis=-1)
q = diagonals # reuse storage space
indices_i = (indices == 0)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = rot_i[..., 2, 1] - rot_i[..., 1, 2]
q[indices_i, 1] = 1 + rot_i[..., 0, 0] - rot_i[..., 1, 1] - rot_i[..., 2, 2]
q[indices_i, 2] = rot_i[..., 0, 1] + rot_i[..., 1, 0]
q[indices_i, 3] = rot_i[..., 0, 2] + rot_i[..., 2, 0]
indices_i = (indices == 1)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = rot_i[..., 0, 2] - rot_i[..., 2, 0]
q[indices_i, 1] = rot_i[..., 1, 0] + rot_i[..., 0, 1]
q[indices_i, 2] = 1 - rot_i[..., 0, 0] + rot_i[..., 1, 1] - rot_i[..., 2, 2]
q[indices_i, 3] = rot_i[..., 1, 2] + rot_i[..., 2, 1]
indices_i = (indices == 2)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = rot_i[..., 1, 0] - rot_i[..., 0, 1]
q[indices_i, 1] = rot_i[..., 2, 0] + rot_i[..., 0, 2]
q[indices_i, 2] = rot_i[..., 2, 1] + rot_i[..., 1, 2]
q[indices_i, 3] = 1 - rot_i[..., 0, 0] - rot_i[..., 1, 1] + rot_i[..., 2, 2]
indices_i = (indices == 3)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = 1 + rot_i[..., 0, 0] + rot_i[..., 1, 1] + rot_i[..., 2, 2]
q[indices_i, 1] = rot_i[..., 2, 1] - rot_i[..., 1, 2]
q[indices_i, 2] = rot_i[..., 0, 2] - rot_i[..., 2, 0]
q[indices_i, 3] = rot_i[..., 1, 0] - rot_i[..., 0, 1]
q /= np.linalg.norm(q, axis=-1)[..., np.newaxis]
return as_quat_array(q)
def as_rotation_vector(q):
"""Convert input quaternion to the axis-angle representation
Note that if any of the input quaternions has norm zero, no error is
raised, but NaNs will appear in the output.
Parameters
----------
q : quaternion or array of quaternions
The quaternion(s) need not be normalized, but must all be nonzero
Returns
-------
rot : float array
Output shape is q.shape+(3,). Each vector represents the axis of
the rotation, with norm proportional to the angle of the rotation in
radians.
"""
return as_float_array(2*np.log(np.normalized(q)))[..., 1:]
def from_rotation_vector(rot):
"""Convert input 3-vector in axis-angle representation to unit quaternion
Parameters
----------
rot : (Nx3) float array
Each vector represents the axis of the rotation, with norm
proportional to the angle of the rotation in radians.
Returns
-------
q : array of quaternions
Unit quaternions resulting in rotations corresponding to input
rotations. Output shape is rot.shape[:-1].
"""
rot = np.array(rot, copy=False)
quats = np.zeros(rot.shape[:-1]+(4,))
quats[..., 1:] = rot[...]/2
quats = as_quat_array(quats)
return np.exp(quats)
def as_euler_angles(q):
"""Open Pandora's Box
If somebody is trying to make you use Euler angles, tell them no, and
walk away, and go and tell your mum.
You don't want to use Euler angles. They are awful. Stay away. It's
one thing to convert from Euler angles to quaternions; at least you're
moving in the right direction. But to go the other way?! It's just not
right.
Assumes the Euler angles correspond to the quaternion R via
R = exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)
The angles are naturally in radians.
NOTE: Before opening an issue reporting something "wrong" with this
function, be sure to read all of the following page, *especially* the
very last section about opening issues or pull requests.
<https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible>
Parameters
----------
q : quaternion or array of quaternions
The quaternion(s) need not be normalized, but must all be nonzero
Returns
-------
alpha_beta_gamma : float array
Output shape is q.shape+(3,). These represent the angles (alpha,
beta, gamma) in radians, where the normalized input quaternion
represents `exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)`.
Raises
------
AllHell
...if you try to actually use Euler angles, when you could have
been using quaternions like a sensible person.
"""
alpha_beta_gamma = np.empty(q.shape + (3,), dtype=np.float64)
n = np.norm(q)
q = as_float_array(q)
alpha_beta_gamma[..., 0] = np.arctan2(q[..., 3], q[..., 0]) + np.arctan2(-q[..., 1], q[..., 2])
alpha_beta_gamma[..., 1] = 2*np.arccos(np.sqrt((q[..., 0]**2 + q[..., 3]**2)/n))
alpha_beta_gamma[..., 2] = np.arctan2(q[..., 3], q[..., 0]) - np.arctan2(-q[..., 1], q[..., 2])
return alpha_beta_gamma
def from_euler_angles(alpha_beta_gamma, beta=None, gamma=None):
"""Improve your life drastically
Assumes the Euler angles correspond to the quaternion R via
R = exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)
The angles naturally must be in radians for this to make any sense.
NOTE: Before opening an issue reporting something "wrong" with this
function, be sure to read all of the following page, *especially* the
very last section about opening issues or pull requests.
<https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible>
Parameters
----------
alpha_beta_gamma : float or array of floats
This argument may either contain an array with last dimension of
size 3, where those three elements describe the (alpha, beta, gamma)
radian values for each rotation; or it may contain just the alpha
values, in which case the next two arguments must also be given.
beta : None, float, or array of floats
If this array is given, it must be able to broadcast against the
first and third arguments.
gamma : None, float, or array of floats
If this array is given, it must be able to broadcast against the
first and second arguments.
Returns
-------
R : quaternion array
The shape of this array will be the same as the input, except that
the last dimension will be removed.
"""
# Figure out the input angles from either type of input
if gamma is None:
alpha_beta_gamma = np.asarray(alpha_beta_gamma, dtype=np.double)
alpha = alpha_beta_gamma[..., 0]
beta = alpha_beta_gamma[..., 1]
gamma = alpha_beta_gamma[..., 2]
else:
alpha = np.asarray(alpha_beta_gamma, dtype=np.double)
beta = np.asarray(beta, dtype=np.double)
gamma = np.asarray(gamma, dtype=np.double)
# Set up the output array
R = np.empty(np.broadcast(alpha, beta, gamma).shape + (4,), dtype=np.double)
# Compute the actual values of the quaternion components
R[..., 0] = np.cos(beta/2)*np.cos((alpha+gamma)/2) # scalar quaternion components
R[..., 1] = -np.sin(beta/2)*np.sin((alpha-gamma)/2) # x quaternion components
R[..., 2] = np.sin(beta/2)*np.cos((alpha-gamma)/2) # y quaternion components
R[..., 3] = np.cos(beta/2)*np.sin((alpha+gamma)/2) # z quaternion components
return as_quat_array(R)
def as_spherical_coords(q):
"""Return the spherical coordinates corresponding to this quaternion
Obviously, spherical coordinates do not contain as much information as a
quaternion, so this function does lose some information. However, the
returned spherical coordinates will represent the point(s) on the sphere
to which the input quaternion(s) rotate the z axis.
Parameters
----------
q : quaternion or array of quaternions
The quaternion(s) need not be normalized, but must be nonzero
Returns
-------
vartheta_varphi : float array
Output shape is q.shape+(2,). These represent the angles (vartheta,
varphi) in radians, where the normalized input quaternion represents
`exp(varphi*z/2) * exp(vartheta*y/2)`, up to an arbitrary inital
rotation about `z`.
"""
return as_euler_angles(q)[..., 1::-1]
def from_spherical_coords(theta_phi, phi=None):
"""Return the quaternion corresponding to these spherical coordinates
Assumes the spherical coordinates correspond to the quaternion R via
R = exp(phi*z/2) * exp(theta*y/2)
The angles naturally must be in radians for this to make any sense.
Note that this quaternion rotates `z` onto the point with the given
spherical coordinates, but also rotates `x` and `y` onto the usual basis
vectors (theta and phi, respectively) at that point.
Parameters
----------
theta_phi : float or array of floats
This argument may either contain an array with last dimension of
size 2, where those two elements describe the (theta, phi) values in
radians for each point; or it may contain just the theta values in
radians, in which case the next argument must also be given.
phi : None, float, or array of floats
If this array is given, it must be able to broadcast against the
first argument.
Returns
-------
R : quaternion array
If the second argument is not given to this function, the shape
will be the same as the input shape except for the last dimension,
which will be removed. If the second argument is given, this
output array will have the shape resulting from broadcasting the
two input arrays against each other.
"""
# Figure out the input angles from either type of input
if phi is None:
theta_phi = np.asarray(theta_phi, dtype=np.double)
theta = theta_phi[..., 0]
phi = theta_phi[..., 1]
else:
theta = np.asarray(theta_phi, dtype=np.double)
phi = np.asarray(phi, dtype=np.double)
# Set up the output array
R = np.empty(np.broadcast(theta, phi).shape + (4,), dtype=np.double)
# Compute the actual values of the quaternion components
R[..., 0] = np.cos(phi/2)*np.cos(theta/2) # scalar quaternion components
R[..., 1] = -np.sin(phi/2)*np.sin(theta/2) # x quaternion components
R[..., 2] = np.cos(phi/2)*np.sin(theta/2) # y quaternion components
R[..., 3] = np.sin(phi/2)*np.cos(theta/2) # z quaternion components
return as_quat_array(R)
def rotate_vectors(R, v, axis=-1):
"""Rotate vectors by given quaternions
This function is for the case where each quaternion (possibly the only input
quaternion) is used to rotate multiple vectors. If each quaternion is only
rotating a single vector, it is more efficient to use the standard formula
vprime = R * v * R.conjugate()
(Note that `from_vector_part` and `as_vector_part` may be helpful.)
Parameters
----------
R : quaternion array
Quaternions by which to rotate the input vectors
v : float array
Three-vectors to be rotated.
axis : int
Axis of the `v` array to use as the vector dimension. This
axis of `v` must have length 3.
Returns
-------
vprime : float array
The rotated vectors. This array has shape R.shape+v.shape.
Notes
-----
For simplicity, this function converts the input quaternion(s) to matrix form,
and rotates the input vector(s) by the usual matrix multiplication. As noted
above, if each input quaternion is only used to rotate a single vector, this is
not the most efficient approach. The simple formula shown above is faster than
this function, though it should be noted that the most efficient approach (in
terms of operation counts) is to use the formula
v' = v + 2 * r x (s * v + r x v) / m
where x represents the cross product, s and r are the scalar and vector parts
of the quaternion, respectively, and m is the sum of the squares of the
components of the quaternion. If you are looping over a very large number of
quaternions, and just rotating a single vector each time, you might want to
implement that alternative algorithm using numba (or something that doesn't use
python).
"""
R = np.asarray(R, dtype=np.quaternion)
v = np.asarray(v, dtype=float)
if v.ndim < 1 or 3 not in v.shape:
raise ValueError("Input `v` does not have at least one dimension of length 3")
if v.shape[axis] != 3:
raise ValueError("Input `v` axis {0} has length {1}, not 3.".format(axis, v.shape[axis]))
m = as_rotation_matrix(R)
tensordot_axis = m.ndim-2
final_axis = tensordot_axis + (axis % v.ndim)
return np.moveaxis(
np.tensordot(m, v, axes=(-1, axis)),
tensordot_axis, final_axis
)
def isclose(a, b, rtol=4*np.finfo(float).eps, atol=0.0, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
This function is essentially a copy of the `numpy.isclose` function,
with different default tolerances and one minor changes necessary to
deal correctly with quaternions.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
Notes
-----
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent:
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`isclose(a, b)` might be different from `isclose(b, a)` in
some rare cases.
Examples
--------
>>> quaternion.isclose([1e10*quaternion.x, 1e-7*quaternion.y], [1.00001e10*quaternion.x, 1e-8*quaternion.y],
... rtol=1.e-5, atol=1.e-8)
array([True, False])
>>> quaternion.isclose([1e10*quaternion.x, 1e-8*quaternion.y], [1.00001e10*quaternion.x, 1e-9*quaternion.y],
... rtol=1.e-5, atol=1.e-8)
array([True, True])
>>> quaternion.isclose([1e10*quaternion.x, 1e-8*quaternion.y], [1.0001e10*quaternion.x, 1e-9*quaternion.y],
... rtol=1.e-5, atol=1.e-8)
array([False, True])
>>> quaternion.isclose([quaternion.x, np.nan*quaternion.y], [quaternion.x, np.nan*quaternion.y])
array([True, False])
>>> quaternion.isclose([quaternion.x, np.nan*quaternion.y], [quaternion.x, np.nan*quaternion.y], equal_nan=True)
array([True, True])
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x-y), atol + rtol * abs(y))
return result[()]
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
# (e.g., for numpy.ma).
try:
dt = np.result_type(y, 1.)
except TypeError:
dt = np.dtype(np.quaternion)
y = np.array(y, dtype=dt, copy=False, subok=True)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if np.all(xfin) and np.all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
both_nan = np.isnan(x) & np.isnan(y)
cond[both_nan] = both_nan[both_nan]
return cond[()]
def allclose(a, b, rtol=4*np.finfo(float).eps, atol=0.0, equal_nan=False, verbose=False):
"""Returns True if two arrays are element-wise equal within a tolerance.
This function is essentially a wrapper for the `quaternion.isclose`
function, but returns a single boolean value of True if all elements
of the output from `quaternion.isclose` are True, and False otherwise.
This function also adds the option.
Note that this function has stricter tolerances than the
`numpy.allclose` function, as well as the additional `verbose` option.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
verbose : bool
If the return value is False, all the non-close values are printed,
iterating through the non-close indices in order, displaying the
array values along with the index, with a separate line for each
pair of values.
See Also
--------
isclose, numpy.all, numpy.any, numpy.allclose
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise.
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`allclose(a, b)` might be different from `allclose(b, a)` in
some rare cases.
"""
close = isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
result = np.all(close)
if verbose and not result:
a, b = np.atleast_1d(a), np.atleast_1d(b)
a, b = np.broadcast_arrays(a, b)
print('Non-close values:')
for i in np.nonzero(close == False):
print(' a[{0}]={1}\n b[{0}]={2}'.format(i, a[i], b[i]))
return result
| mit | 614504dc9bd1dd339027983ba41d1266 | 37.694761 | 124 | 0.595308 | 3.484513 | false | false | false | false |
pythondigest/pythondigest | digest/migrations/0011_auto_20150730_0556.py | 1 | 1234 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('digest', '0010_auto_20150730_0553'), ]
operations = [
migrations.AlterField(
model_name='parsingrules',
name='then_action',
field=models.CharField(
default=b'item_title',
max_length=255,
verbose_name='\u0414\u0435\u0439\u0441\u0442\u0432\u0438\u0435',
choices=[(
b'set',
'\u0423\u0441\u0442\u0430\u043d\u043e\u0432\u0438\u0442\u044c'
)]), ),
migrations.AlterField(
model_name='parsingrules',
name='then_element',
field=models.CharField(
default=b'item_title',
max_length=255,
verbose_name='\u042d\u043b\u0435\u043c\u0435\u043d\u0442 \u0434\u0435\u0439\u0441\u0442\u0432\u0438\u044f',
choices=[(
b'category',
'\u041a\u0430\u0442\u0435\u0433\u043e\u0440\u0438\u044f'
), (b'status', '\u0421\u0442\u0430\u0442\u0443\u0441')]), ),
]
| mit | 963ecbb7715f9ee335d95ff1e601e26e | 36.393939 | 123 | 0.535656 | 3.505682 | false | false | false | false |
open2c/cooltools | cooltools/sandbox/pairs_scaling_functions.py | 1 | 1921 | ###################################
#
# several functions for calculating scalings using pairs
# they used to reside in cooltools.expected module
#
####################################
import numpy as np
from ..lib import numutils
def _contact_areas(distbins, scaffold_length):
distbins = distbins.astype(float)
scaffold_length = float(scaffold_length)
outer_areas = np.maximum(scaffold_length - distbins[:-1], 0) ** 2
inner_areas = np.maximum(scaffold_length - distbins[1:], 0) ** 2
return 0.5 * (outer_areas - inner_areas)
def contact_areas(distbins, region1, region2):
if region1 == region2:
start, end = region1
areas = _contact_areas(distbins, end - start)
else:
start1, end1 = region1
start2, end2 = region2
if start2 <= start1:
start1, start2 = start2, start1
end1, end2 = end2, end1
areas = (
_contact_areas(distbins, end2 - start1)
- _contact_areas(distbins, start2 - start1)
- _contact_areas(distbins, end2 - end1)
)
if end1 < start2:
areas += _contact_areas(distbins, start2 - end1)
return areas
def compute_scaling(df, region1, region2=None, dmin=int(1e1), dmax=int(1e7), n_bins=50):
import dask.array as da
if region2 is None:
region2 = region1
distbins = numutils.logbins(dmin, dmax, N=n_bins)
areas = contact_areas(distbins, region1, region2)
df = df[
(df["pos1"] >= region1[0])
& (df["pos1"] < region1[1])
& (df["pos2"] >= region2[0])
& (df["pos2"] < region2[1])
]
dists = (df["pos2"] - df["pos1"]).values
if isinstance(dists, da.Array):
obs, _ = da.histogram(dists[(dists >= dmin) & (dists < dmax)], bins=distbins)
else:
obs, _ = np.histogram(dists[(dists >= dmin) & (dists < dmax)], bins=distbins)
return distbins, obs, areas
| mit | 2721ff477e3679f85d288066045fafa7 | 29.492063 | 88 | 0.57418 | 3.295026 | false | false | false | false |
open2c/cooltools | cooltools/lib/plotting.py | 1 | 3091 | """
Migrated from :mod:`mirnylib.plotting`.
"""
from matplotlib.cm import register_cmap
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
PALETTES = {
"fall": np.array(
(
(255, 255, 255),
(255, 255, 204),
(255, 237, 160),
(254, 217, 118),
(254, 178, 76),
(253, 141, 60),
(252, 78, 42),
(227, 26, 28),
(189, 0, 38),
(128, 0, 38),
(0, 0, 0),
)
)
/ 255,
"blues": np.array(
(
(255, 255, 255),
(180, 204, 225),
(116, 169, 207),
(54, 144, 192),
(5, 112, 176),
(4, 87, 135),
(3, 65, 100),
(2, 40, 66),
(1, 20, 30),
(0, 0, 0),
)
)
/ 255,
"acidblues": np.array(
(
(255, 255, 255),
(162, 192, 222),
(140, 137, 187),
(140, 87, 167),
(140, 45, 143),
(120, 20, 120),
(90, 15, 90),
(60, 10, 60),
(30, 5, 30),
(0, 0, 0),
)
)
/ 255,
"nmeth": np.array(
(
(236, 250, 255),
(148, 189, 217),
(118, 169, 68),
(131, 111, 43),
(122, 47, 25),
(41, 0, 20),
)
)
/ 255,
}
def list_to_colormap(color_list, name=None):
color_list = np.array(color_list)
if color_list.min() < 0:
raise ValueError("Colors should be 0 to 1, or 0 to 255")
if color_list.max() > 1.0:
if color_list.max() > 255:
raise ValueError("Colors should be 0 to 1 or 0 to 255")
else:
color_list = color_list / 255.0
return mpl.colors.LinearSegmentedColormap.from_list(name, color_list, 256)
def get_cmap(name):
is_reversed = name.endswith("_r")
try:
if is_reversed:
pal = PALETTES[name[:-2]][::-1]
else:
pal = PALETTES[name]
except KeyError:
raise ValueError('Palette not found "{}"'.format(name))
return list_to_colormap(pal)
def _register_cmaps():
for name, pal in PALETTES.items():
register_cmap(name, list_to_colormap(pal))
register_cmap(name + "_r", list_to_colormap(pal[::-1]))
_register_cmaps()
def gridspec_inches(wcols, hrows, fig_kwargs={}):
fig_height_inches = sum(hrows)
fig_width_inches = sum(wcols)
fig = plt.figure(
figsize=(fig_width_inches, fig_height_inches),
subplotpars=mpl.figure.SubplotParams(
left=0, right=1, bottom=0, top=1, wspace=0, hspace=0.0
),
# frameon=False,
**fig_kwargs
)
fig.set_size_inches(fig_width_inches, fig_height_inches, forward=True)
gs = mpl.gridspec.GridSpec(
len(hrows),
len(wcols),
left=0,
right=1,
top=1,
bottom=0,
wspace=0,
hspace=0,
width_ratios=wcols,
height_ratios=hrows,
)
return fig, gs
| mit | ad9b4d5eb9db04e7e3097f9c23f02361 | 22.067164 | 78 | 0.459722 | 3.216441 | false | false | false | false |
thouska/spotpy | src/spotpy/database/sql.py | 1 | 2744 | import sqlite3
import sys
import numpy as np
from .base import database
class PickalableSWIG:
def __setstate__(self, state):
self.__init__(*state["args"])
def __getstate__(self):
return {"args": self.args}
class PickalableSQL3Connect(sqlite3.Connection, PickalableSWIG):
def __init__(self, *args, **kwargs):
self.args = args
sqlite3.Connection.__init__(self, *args, **kwargs)
class PickalableSQL3Cursor(sqlite3.Cursor, PickalableSWIG):
def __init__(self, *args, **kwargs):
self.args = args
sqlite3.Cursor.__init__(self, *args, **kwargs)
class sql(database):
"""
This class saves the process in the working storage. It can be used if
safety matters.
"""
def __init__(self, *args, **kwargs):
import os
# init base class
super(sql, self).__init__(*args, **kwargs)
# Create a open file, which needs to be closed after the sampling
try:
os.remove(self.dbname + ".db")
except:
pass
self.db = PickalableSQL3Connect(self.dbname + ".db")
self.db_cursor = PickalableSQL3Cursor(self.db)
# Create Table
# self.db_cursor.execute('''CREATE TABLE IF NOT EXISTS '''+self.dbname+'''
# (like1 real, parx real, pary real, simulation1 real, chain int)''')
self.db_cursor.execute(
"""CREATE TABLE IF NOT EXISTS """
+ self.dbname
+ """
("""
+ " real ,".join(self.header)
+ """)"""
)
def save(self, objectivefunction, parameterlist, simulations=None, chains=1):
coll = (
self.dim_dict["like"](objectivefunction)
+ self.dim_dict["par"](parameterlist)
+ self.dim_dict["simulation"](simulations)
+ [chains]
)
# Apply rounding of floats
coll = map(self.db_precision, coll)
self.db_cursor.execute(
"INSERT INTO "
+ self.dbname
+ " VALUES ("
+ '"'
+ str('","'.join(map(str, coll)))
+ '"'
+ ")"
)
self.db.commit()
def finalize(self):
self.db.close()
def getdata(self):
self.db = PickalableSQL3Connect(self.dbname + ".db")
self.db_cursor = PickalableSQL3Cursor(self.db)
headers = [
(row[1], "<f8")
for row in self.db_cursor.execute("PRAGMA table_info(" + self.dbname + ");")
]
back = np.array(
[row for row in self.db_cursor.execute("SELECT * FROM " + self.dbname)],
dtype=headers,
)
self.db.close()
return back
| mit | 1e7c66a77d323c15c3b9e126d56b475c | 26.717172 | 97 | 0.525875 | 3.875706 | false | false | false | false |
thouska/spotpy | src/spotpy/algorithms/abc.py | 1 | 8953 | # -*- coding: utf-8 -*-
"""
Copyright (c) 2018 by Tobias Houska
This file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).
:author: Patrick Lauer
"""
import random
import numpy as np
from . import _algorithm
class abc(_algorithm):
"""
This class holds the Artificial Bee Colony (ABC) algorithm, based on Karaboga (2007).
D. Karaboga, AN IDEA BASED ON HONEY BEE SWARM FOR NUMERICAL OPTIMIZATION,TECHNICAL REPORT-TR06, Erciyes University, Engineering Faculty, Computer Engineering Department 2005.
D. Karaboga, B. Basturk, A powerful and Efficient Algorithm for Numerical Function Optimization: Artificial Bee Colony (ABC) Algorithm, Journal of Global Optimization, Volume:39, Issue:3,pp:459-171, November 2007,ISSN:0925-5001 , doi: 10.1007/s10898-007-9149-x
"""
def __init__(self, *args, **kwargs):
"""
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
* Name of the database where parameter, objectivefunction value and simulation results will be saved.
dbformat: str
* ram: fast suited for short sampling time. no file will be created and results are saved in an array.
* csv: A csv file will be created, which you can import afterwards.
parallel: str
* seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
* mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).
save_sim: boolean
* True: Simulation results will be saved
* False: Simulation results will not be saved
"""
kwargs["optimization_direction"] = "maximize"
kwargs["algorithm_name"] = "Artificial Bee Colony (ABC) algorithm"
super(abc, self).__init__(*args, **kwargs)
def sample(
self, repetitions, eb=48, a=(1 / 10), peps=0.0001, ownlimit=False, limit=24
):
"""
Parameters
----------
repetitions: int
maximum number of function evaluations allowed during optimization
eb: int
number of employed bees (half of population size)
a: float
mutation factor
peps: float
Convergence criterium
ownlimit: boolean
determines if an userdefined limit is set or not
limit: int
sets the limit
"""
self.set_repetiton(repetitions)
print(
"Starting the ABC algotrithm with " + str(repetitions) + " repetitions..."
)
# Initialize ABC parameters:
randompar = self.parameter()["random"]
self.nopt = randompar.size
random.seed()
if ownlimit == True:
self.limit = limit
else:
self.limit = eb
lb, ub = self.parameter()["minbound"], self.parameter()["maxbound"]
# Initialization
work = []
icall = 0
gnrng = 1e100
# Calculate the objective function
param_generator = ((rep, self.parameter()["random"]) for rep in range(eb))
for rep, randompar, simulations in self.repeat(param_generator):
# Calculate fitness
like = self.postprocessing(
rep, randompar, simulations, chains=1, negativlike=True
)
c = 0
p = 0
work.append([like, randompar, like, randompar, c, p])
icall += 1
if self.status.stop:
print("Stopping sampling")
break
while icall < repetitions and gnrng > peps:
psum = 0
# Employed bee phase
# Generate new input parameters
for i, val in enumerate(work):
k = i
while k == i:
k = random.randint(0, (eb - 1))
j = random.randint(0, (self.nopt - 1))
work[i][3][j] = work[i][1][j] + random.uniform(-a, a) * (
work[i][1][j] - work[k][1][j]
)
if work[i][3][j] < lb[j]:
work[i][3][j] = lb[j]
if work[i][3][j] > ub[j]:
work[i][3][j] = ub[j]
# Calculate the objective function
param_generator = ((rep, work[rep][3]) for rep in range(eb))
for rep, randompar, simulations in self.repeat(param_generator):
# Calculate fitness
clike = self.postprocessing(
icall + eb, randompar, simulations, chains=2, negativlike=True
)
if clike > work[rep][0]:
work[rep][1] = work[rep][3]
work[rep][0] = clike
work[rep][4] = 0
else:
work[rep][4] = work[rep][4] + 1
icall += 1
if self.status.stop:
print("Stopping samplig")
break # Probability distribution for roulette wheel selection
bn = []
for i, val in enumerate(work):
psum = psum + (1 / work[i][0])
for i, val in enumerate(work):
work[i][5] = (1 / work[i][0]) / psum
bn.append(work[i][5])
bounds = np.cumsum(bn)
# Onlooker bee phase
# Roulette wheel selection
for i, val in enumerate(work):
pn = random.uniform(0, 1)
k = i
while k == i:
k = random.randint(0, eb - 1)
for t, vol in enumerate(bounds):
if bounds[t] - pn >= 0:
z = t
break
j = random.randint(0, (self.nopt - 1))
# Generate new input parameters
try:
work[i][3][j] = work[z][1][j] + random.uniform(-a, a) * (
work[z][1][j] - work[k][1][j]
)
except UnboundLocalError:
z = 0
work[i][3][j] = work[z][1][j] + random.uniform(-a, a) * (
work[z][1][j] - work[k][1][j]
)
if work[i][3][j] < lb[j]:
work[i][3][j] = lb[j]
if work[i][3][j] > ub[j]:
work[i][3][j] = ub[j]
# Calculate the objective function
param_generator = ((rep, work[rep][3]) for rep in range(eb))
for rep, randompar, simulations in self.repeat(param_generator):
# Calculate fitness
clike = self.postprocessing(
icall + eb, randompar, simulations, chains=3, negativlike=True
)
if clike > work[rep][0]:
work[rep][1] = work[rep][3]
work[rep][0] = clike
work[rep][4] = 0
else:
work[rep][4] = work[rep][4] + 1
icall += 1
if self.status.stop:
print("Stopping samplig")
break
# Scout bee phase
for i, val in enumerate(work):
if work[i][4] >= self.limit:
work[i][1] = self.parameter()["random"]
work[i][4] = 0
t, work[i][0], simulations = self.simulate((icall, work[i][1]))
clike = self.postprocessing(
icall + eb, randompar, simulations, chains=4, negativlike=True
)
work[i][0] = clike
icall += 1
if self.status.stop:
print("Stopping samplig")
break
gnrng = -self.status.objectivefunction_max
if icall >= repetitions:
print("*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT")
print("ON THE MAXIMUM NUMBER OF TRIALS ")
print(repetitions)
print("HAS BEEN EXCEEDED.")
if gnrng < peps:
print(
"THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE AT RUN"
)
print(icall)
self.final_call()
| mit | d3609282f1b9edd13bb6a2fd2d06ac83 | 39.328829 | 264 | 0.497822 | 4.185601 | false | false | false | false |
znick/anytask | anytask/common/ordered_dict.py | 2 | 8806 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self) == len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| mit | b76ba8c47937a84fbc73f7204a7d1977 | 33 | 87 | 0.546446 | 3.982813 | false | false | false | false |
znick/anytask | anytask/users/migrations/0003_auto_20210228_1721.py | 1 | 2111 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-02-28 14:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20200416_1732'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='deleted_messages',
field=models.ManyToManyField(blank=True, related_name='deleted_messages', to='mail.Message'),
),
migrations.AlterField(
model_name='userprofile',
name='send_notify_messages',
field=models.ManyToManyField(blank=True, related_name='send_notify_messages', to='mail.Message'),
),
migrations.AlterField(
model_name='userprofile',
name='unread_messages',
field=models.ManyToManyField(blank=True, related_name='unread_messages', to='mail.Message'),
),
migrations.AlterField(
model_name='userprofile',
name='user_status',
field=models.ManyToManyField(blank=True, db_index=True, related_name='users_by_status', to='users.UserStatus'),
),
migrations.AlterField(
model_name='userprofilelog',
name='deleted_messages',
field=models.ManyToManyField(blank=True, related_name='log_deleted_messages', to='mail.Message'),
),
migrations.AlterField(
model_name='userprofilelog',
name='send_notify_messages',
field=models.ManyToManyField(blank=True, related_name='log_send_notify_messages', to='mail.Message'),
),
migrations.AlterField(
model_name='userprofilelog',
name='unread_messages',
field=models.ManyToManyField(blank=True, related_name='log_unread_messages', to='mail.Message'),
),
migrations.AlterField(
model_name='userprofilelog',
name='user_status',
field=models.ManyToManyField(blank=True, db_index=True, to='users.UserStatus'),
),
]
| mit | 1ffb13c0d599709eed2d4607191b6865 | 37.381818 | 123 | 0.604453 | 4.407098 | false | false | false | false |
znick/anytask | anytask/mail/views.py | 1 | 10115 | # -*- coding: utf-8 -*-
import json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseForbidden, HttpResponse
from django.shortcuts import render
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST, require_GET
from pytz import timezone as timezone_pytz
from courses.models import Course
from groups.models import Group
from mail.common import EmailRenderer
from mail.models import Message
from users.model_user_status import get_statuses
from users.models import UserProfile
MONTH = {
1: _(u"january"),
2: _(u"february"),
3: _(u"march"),
4: _(u"april"),
5: _(u"may"),
6: _(u"june"),
7: _(u"july"),
8: _(u"august"),
9: _(u"september"),
10: _(u"october"),
11: _(u"november"),
12: _(u"december")
}
@require_GET
@login_required
def mail_page(request):
user = request.user
user_profile = user.profile
users_from_staff_len = {}
if user.is_staff and 'from_staff' in request.GET and 'user_ids_send_mail_counter' in request.session:
key = 'user_ids_send_mail_' + request.GET['from_staff']
if key in request.session:
users_from_staff_len = {
'index': request.GET['from_staff'],
'length': len(request.session[key]),
}
if user.is_staff:
courses_teacher = Course.objects.filter(is_active=True)
else:
courses_teacher = Course.objects.filter(teachers=user, is_active=True)
context = {
"user": user,
"user_profile": user_profile,
"courses_teacher": courses_teacher,
'user_statuses': get_statuses(),
"users_from_staff_len": users_from_staff_len,
"snow_alert_message_fulltext": hasattr(settings, 'SEND_MESSAGE_FULLTEXT') and settings.SEND_MESSAGE_FULLTEXT,
}
return render(request, 'mail.html', context)
@require_GET
@login_required
def ajax_get_mailbox(request):
response = dict()
user = request.user
user_profile = user.profile
datatable_data = dict(request.GET)
if "draw" not in datatable_data:
return HttpResponseForbidden()
if "make_read[]" in datatable_data:
if datatable_data["make_read[]"][0] == "all":
user_profile.unread_messages.clear()
user_profile.send_notify_messages.clear()
else:
user_profile.unread_messages = list(
user_profile.unread_messages
.exclude(id__in=datatable_data["make_read[]"])
.values_list("id", flat=True)
)
user_profile.send_notify_messages = list(
user_profile.send_notify_messages
.exclude(id__in=datatable_data["make_read[]"])
.values_list("id", flat=True)
)
if "make_unread[]" in datatable_data:
user_profile.unread_messages.add(*Message.objects.filter(id__in=datatable_data["make_unread[]"]))
if "make_delete[]" in datatable_data:
user_profile.deleted_messages.add(*Message.objects.filter(id__in=datatable_data["make_delete[]"]))
if "make_undelete[]" in datatable_data:
user_profile.deleted_messages = list(
user_profile.deleted_messages
.exclude(id__in=datatable_data["make_undelete[]"])
.values_list("id", flat=True)
)
messages = Message.objects.none()
messages_deleted = user_profile.deleted_messages.all()
type_msg = datatable_data['type'][0]
if type_msg == "inbox":
messages = Message.objects.filter(recipients=user).exclude(id__in=messages_deleted)
elif type_msg == "sent":
messages = Message.objects.filter(sender=user).exclude(id__in=messages_deleted)
elif type_msg == "trash":
messages = messages_deleted
data = list()
start = int(datatable_data['start'][0])
end = start + int(datatable_data['length'][0])
unread = user_profile.unread_messages.all()
for msg in messages[start:end]:
data.append({
"0": "",
"1": u'%s %s' % (msg.sender.last_name, msg.sender.first_name),
"2": msg.title,
"3": format_date(msg.create_time.astimezone(timezone_pytz(user_profile.time_zone))),
"DT_RowClass": "unread" if msg in unread else "",
"DT_RowId": "row_msg_" + type_msg + "_" + str(msg.id),
"DT_RowData": {
"id": msg.id
},
})
response['draw'] = datatable_data['draw']
response['recordsTotal'] = messages.count()
response['recordsFiltered'] = messages.count()
response['data'] = data
response['unread_count'] = user_profile.get_unread_count()
response['type'] = type_msg
return HttpResponse(json.dumps(response),
content_type="application/json")
def format_date(date):
date_str = ""
now = timezone.now()
if now.year == date.year:
if now.day == date.day and now.month == date.month:
date_str = date.strftime("%H:%M")
else:
date_str = unicode(date.day) + u" " + MONTH[date.month]
else:
date_str = date.strftime("%d.%m.%y")
return date_str
@require_GET
@login_required
def ajax_get_message(request):
response = dict()
user = request.user
user_profile = user.profile
if "msg_id" not in request.GET:
return HttpResponseForbidden()
msg_id = int(request.GET["msg_id"])
message = Message.objects.get(id=msg_id)
if message.sender != user and user not in message.recipients.all():
return HttpResponseForbidden()
unread_count = int(request.GET["unread_count"])
if message in user_profile.unread_messages.all():
message.read_message(user)
unread_count -= 1
recipients_user = []
recipients_group = []
recipients_course = []
recipients_status = []
if message.hidden_copy and message.sender != user:
recipients_user.append({
"id": user.id,
"fullname": u'%s %s' % (user.last_name, user.first_name),
"url": user.get_absolute_url()
})
else:
for recipient in message.recipients_user.all():
recipients_user.append({
"id": recipient.id,
"fullname": u'%s %s' % (recipient.last_name, recipient.first_name),
"url": recipient.get_absolute_url()
})
for group in message.recipients_group.all():
recipients_group.append({
"id": group.id,
"name": group.name
})
for course in message.recipients_course.all():
recipients_course.append({
"id": course.id,
"name": course.name,
"url": course.get_absolute_url(),
})
for status in message.recipients_status.all():
recipients_status.append({
"id": status.id,
"name": status.name
})
if message.sender != user or request.GET["mailbox"] == 'inbox':
text = EmailRenderer.fill_name(message, user)
else:
text = message.text
response['sender'] = {
"id": message.sender.id,
"fullname": u'%s %s' % (message.sender.last_name, message.sender.first_name),
"url": message.sender.get_absolute_url(),
"avatar": message.sender.profile.avatar.url if message.sender.profile.avatar else "",
}
response['recipients_user'] = recipients_user
response['recipients_group'] = recipients_group
response['recipients_course'] = recipients_course
response['recipients_status'] = recipients_status
response['date'] = message.create_time.astimezone(timezone_pytz(user_profile.time_zone))\
.strftime("%d.%m.%y %H:%M:%S")
response['text'] = text
response['unread_count'] = unread_count
return HttpResponse(json.dumps(response),
content_type="application/json")
@require_POST
@login_required
def ajax_send_message(request):
user = request.user
data = dict(request.POST)
hidden_copy = False
if 'hidden_copy' in data and data['hidden_copy'][0]:
hidden_copy = True
variable = False
if 'variable' in data and data['variable'][0]:
variable = True
message = Message()
message.sender = user
message.title = data['new_title'][0]
message.text = data['new_text'][0]
message.hidden_copy = hidden_copy
message.variable = variable
message.save()
recipients_ids = set()
if "new_recipients_user[]" in data or "new_recipients_preinit[]" in data:
users = data.get("new_recipients_user[]", [])
if "new_recipients_preinit[]" in data:
users += request.session.get('user_ids_send_mail_' + data["new_recipients_preinit[]"][0], [])
message.recipients_user = users
recipients_ids.update(message.recipients_user.values_list('id', flat=True))
group_ids = []
if "new_recipients_group[]" in data:
message.recipients_group = data["new_recipients_group[]"]
for group in Group.objects.filter(id__in=data["new_recipients_group[]"]):
recipients_ids.update(group.students.exclude(id=user.id).values_list('id', flat=True))
group_ids.append(group.id)
if "new_recipients_course[]" in data:
message.recipients_course = data["new_recipients_course[]"]
for course in Course.objects.filter(id__in=data["new_recipients_course[]"]):
for group in course.groups.exclude(id__in=group_ids).distinct():
recipients_ids.update(group.students.exclude(id=user.id).values_list('id', flat=True))
if "new_recipients_status[]" in data:
message.recipients_status = data["new_recipients_status[]"]
recipients_ids.update(UserProfile.objects.filter(user_status__in=data["new_recipients_status[]"])
.values_list('user__id', flat=True))
message.recipients = list(recipients_ids)
return HttpResponse("OK")
| mit | 9d8be5f1a2b6f0aa5a2a63355bc32719 | 33.288136 | 117 | 0.606228 | 3.735229 | false | false | false | false |
znick/anytask | anytask/invites/migrations/0001_initial.py | 1 | 1308 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('groups', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Invite',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(unique=True, max_length=10, db_index=True)),
('added_time', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('update_time', models.DateTimeField(default=django.utils.timezone.now, auto_now=True)),
('generated_by', models.ForeignKey(related_name=b'invite_generated_by', to=settings.AUTH_USER_MODEL, db_index=False)),
('group', models.ForeignKey(to='groups.Group', blank=True, null=True, db_index=False)),
('invited_users', models.ManyToManyField(to=settings.AUTH_USER_MODEL, null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
]
| mit | d505e3cf4c5e08f1013083214473dd8c | 39.875 | 134 | 0.611621 | 4.074766 | false | false | false | false |
znick/anytask | dependencies/django-registration/registration/backends/default/__init__.py | 1 | 5257 | from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from registration import signals
from registration.forms import RegistrationForm
from registration.models import RegistrationProfile
class DefaultBackend(object):
"""
A registration backend which follows a simple workflow:
1. User signs up, inactive account is created.
2. Email is sent to user with activation link.
3. User clicks activation link, account is now active.
Using this backend requires that
* ``registration`` be listed in the ``INSTALLED_APPS`` setting
(since this backend makes use of models defined in this
application).
* The setting ``ACCOUNT_ACTIVATION_DAYS`` be supplied, specifying
(as an integer) the number of days from registration during
which a user may activate their account (after that period
expires, activation will be disallowed).
* The creation of the templates
``registration/activation_email_subject.txt`` and
``registration/activation_email.txt``, which will be used for
the activation email. See the notes for this backends
``register`` method for details regarding these templates.
Additionally, registration can be temporarily closed by adding the
setting ``REGISTRATION_OPEN`` and setting it to
``False``. Omitting this setting, or setting it to ``True``, will
be interpreted as meaning that registration is currently open and
permitted.
Internally, this is accomplished via storing an activation key in
an instance of ``registration.models.RegistrationProfile``. See
that model and its custom manager for full documentation of its
fields and supported operations.
"""
def register(self, request, **kwargs):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
An email will be sent to the supplied email address; this
email should contain an activation link. The email will be
rendered using two templates. See the documentation for
``RegistrationProfile.send_activation_email()`` for
information about these templates and the contexts provided to
them.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
username, email, password = kwargs['username'], kwargs['email'], kwargs['password1']
site = get_current_site(request)
new_user = RegistrationProfile.objects.create_inactive_user(username, email,
password, site)
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=request)
return new_user
def activate(self, request, activation_key):
"""
Given an an activation key, look up and activate the user
account corresponding to that key (if possible).
After successful activation, the signal
``registration.signals.user_activated`` will be sent, with the
newly activated ``User`` as the keyword argument ``user`` and
the class of this backend as the sender.
"""
activated = RegistrationProfile.objects.activate_user(activation_key)
if activated:
signals.user_activated.send(sender=self.__class__,
user=activated,
request=request)
return activated
def registration_allowed(self, request):
"""
Indicate whether account registration is currently permitted,
based on the value of the setting ``REGISTRATION_OPEN``. This
is determined as follows:
* If ``REGISTRATION_OPEN`` is not specified in settings, or is
set to ``True``, registration is permitted.
* If ``REGISTRATION_OPEN`` is both specified and set to
``False``, registration is not permitted.
"""
return getattr(settings, 'REGISTRATION_OPEN', True)
def get_form_class(self, request):
"""
Return the default form class used for user registration.
"""
return RegistrationForm
def post_registration_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
user registration.
"""
return ('registration_complete', (), {})
def post_activation_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
account activation.
"""
return ('registration_activation_complete', (), {})
| mit | 33f4d7abccf8f78641ac469072219f65 | 37.654412 | 92 | 0.642382 | 5.225646 | false | false | false | false |
python-attrs/attrs | src/attr/__init__.py | 2 | 1952 | # SPDX-License-Identifier: MIT
import sys
import warnings
from functools import partial
from . import converters, exceptions, filters, setters, validators
from ._cmp import cmp_using
from ._config import get_run_validators, set_run_validators
from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types
from ._make import (
NOTHING,
Attribute,
Factory,
attrib,
attrs,
fields,
fields_dict,
make_class,
validate,
)
from ._next_gen import define, field, frozen, mutable
from ._version_info import VersionInfo
if sys.version_info < (3, 7): # pragma: no cover
warnings.warn(
"Running attrs on Python 3.6 is deprecated & we intend to drop "
"support soon. If that's a problem for you, please let us know why & "
"we MAY re-evaluate: <https://github.com/python-attrs/attrs/pull/993>",
DeprecationWarning,
)
__version__ = "22.2.0.dev0"
__version_info__ = VersionInfo._from_version_string(__version__)
__title__ = "attrs"
__description__ = "Classes Without Boilerplate"
__url__ = "https://www.attrs.org/"
__uri__ = __url__
__doc__ = __description__ + " <" + __uri__ + ">"
__author__ = "Hynek Schlawack"
__email__ = "hs@ox.cx"
__license__ = "MIT"
__copyright__ = "Copyright (c) 2015 Hynek Schlawack"
s = attributes = attrs
ib = attr = attrib
dataclass = partial(attrs, auto_attribs=True) # happy Easter ;)
class AttrsInstance:
pass
__all__ = [
"Attribute",
"AttrsInstance",
"Factory",
"NOTHING",
"asdict",
"assoc",
"astuple",
"attr",
"attrib",
"attributes",
"attrs",
"cmp_using",
"converters",
"define",
"evolve",
"exceptions",
"field",
"fields",
"fields_dict",
"filters",
"frozen",
"get_run_validators",
"has",
"ib",
"make_class",
"mutable",
"resolve_types",
"s",
"set_run_validators",
"setters",
"validate",
"validators",
]
| mit | 09be19a54fa7f6c319eab80948fffd2e | 19.989247 | 79 | 0.607582 | 3.297297 | false | false | false | false |
znick/anytask | anytask/admission/views.py | 1 | 7238 | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST, require_GET
from django.http import HttpResponse, HttpResponseForbidden, HttpResponsePermanentRedirect
from django.conf import settings
from django.shortcuts import render
from anycontest.common import user_register_to_contest
from admission.models import AdmissionRegistrationProfile
import json
import logging
import datetime
logger = logging.getLogger('django.request')
ANOTHER = u'Другое'
YES = u'Да'
MONTH = {
u'января': 1,
u'февраля': 2,
u'марта': 3,
u'апреля': 4,
u'мая': 5,
u'июня': 6,
u'июля': 7,
u'августа': 8,
u'сентября': 9,
u'октября': 10,
u'ноября': 11,
u'декабря': 12
}
def get_post_value(post_data, key):
return json.loads(post_data[key])['value']
def get_post_question(post_data, key):
return json.loads(post_data[key])['question']['label']['ru']
def set_user_info(user, user_info):
user.first_name = user_info['first_name']
user.last_name = user_info['last_name']
user.email = user_info['email']
user.save()
user_profile = user.profile
user_profile.set_status(settings.FILIAL_STATUSES[user_info['filial']])
user_profile.set_status(settings.ENROLLEE_STATUS)
user_profile.middle_name = user_info['middle_name']
birth_date_split = user_info['birth_date'][:-3].split()
user_profile.birth_date = datetime.datetime(
int(birth_date_split[2]),
MONTH[birth_date_split[1]],
int(birth_date_split[0])
)
user_profile.phone = user_info['phone']
user_profile.city_of_residence = user_info['city_of_residence']
if user_info['university'] == ANOTHER:
user_profile.university = user_info['university_text']
else:
user_profile.university = user_info['university']
if user_info['university_in_process'] == YES:
user_profile.university_in_process = True
else:
user_profile.university_in_process = False
if user_info['university_class'] == ANOTHER:
user_profile.university_class = user_info['university_class_text']
else:
user_profile.university_class = user_info['university_class']
user_profile.university_department = user_info['university_department']
user_profile.university_year_end = user_info['university_year_end']
user_profile.additional_info = user_info['additional_info']
user_profile.ya_contest_uid = user_info['uid']
user_profile.ya_contest_login = user_info['username']
user_profile.ya_passport_uid = user_info['uid']
user_profile.ya_passport_email = user_info['ya_email']
user_profile.ya_passport_login = user_info['username']
if not user_info['is_updating']:
user_profile.login_via_yandex = True
user_profile.save()
@csrf_exempt
@require_POST
def register(request):
if not ('HTTP_OAUTH' in request.META and request.META['HTTP_OAUTH'] == settings.YA_FORMS_OAUTH):
return HttpResponseForbidden()
post_data = request.POST.dict()
username = request.META['HTTP_LOGIN']
email = get_post_value(post_data, settings.YA_FORMS_FIELDS['email'])
password = None
uid = request.META['HTTP_UID']
new_user, registration_profile = AdmissionRegistrationProfile.objects.create_or_update_user(username, email,
password, uid,
send_email=False,
request=request)
if new_user is not None and registration_profile is not None:
user_info = {
'username': username,
'uid': uid,
'ya_email': '',
'is_updating': registration_profile.is_updating
}
if request.META['HTTP_EMAIL'] and request.META['HTTP_EMAIL'] != 'None':
user_info['ya_email'] = request.META['HTTP_EMAIL']
for key, post_data_key in settings.YA_FORMS_FIELDS.iteritems():
user_info[key] = get_post_value(post_data, post_data_key)
for key, post_data_keys in settings.YA_FORMS_FIELDS_ADDITIONAL.iteritems():
info_json = []
for post_data_key in post_data_keys:
info_json.append({
'question': get_post_question(post_data, post_data_key),
'value': get_post_value(post_data, post_data_key)
})
user_info[key] = json.dumps(info_json)
registration_profile.user_info = json.dumps(user_info)
registration_profile.save()
if not registration_profile.is_updating:
set_user_info(new_user, user_info)
registration_profile.send_activation_email()
return HttpResponse("OK")
def contest_register(user):
contest_id = settings.ADMISSION_CONTESTS[user.email.__hash__() % len(settings.ADMISSION_CONTESTS)]
got_info, response_text = user_register_to_contest(contest_id, user.profile.ya_contest_uid)
if not got_info:
if response_text == 'User already registered for contest':
logger.info("Activate user - %s %s", user.username, response_text)
return contest_id
logger.error("Activate user - Cant register user %s to contest %s. Error: %s", user.username, contest_id,
response_text)
return False
logger.info("Activate user - user %s was successfully registered to contest %s.", user.username, contest_id)
return contest_id
@never_cache
@require_GET
def activate(request, activation_key):
context = {'info_title': _(u'oshibka')}
user, user_info = AdmissionRegistrationProfile.objects.activate_user(activation_key)
if user:
if user_info:
set_user_info(user, json.loads(user_info))
contest_id = contest_register(user)
if contest_id:
return HttpResponsePermanentRedirect(settings.CONTEST_URL + 'contest/' + str(contest_id))
else:
context['info_text'] = _(u'oshibka_registracii_v_contest')
else:
context['info_text'] = _(u'nevernyy_kod_aktivatsii')
return render(request, 'info_page.html', context)
@never_cache
@require_GET
def decline(request, activation_key):
try:
registration_profile = AdmissionRegistrationProfile.objects.decline_user(activation_key)
if registration_profile:
logger.info("Decline user - user %s requests deletion. Activation key %s",
registration_profile.user.username, registration_profile.activation_key)
else:
logger.warning("Decline user - wrong activation key %s", activation_key)
except Exception as e:
logger.error("Decline user - %s", e)
context = {
'info_title': _(u'spasibo'),
'info_text': _(u'informatsiya_o_vas_byla_udalena'),
}
return render(request, 'info_page.html', context)
| mit | 4b01fe9a249818449e974f1d1eb0d870 | 33.757282 | 113 | 0.629609 | 3.549826 | false | true | false | false |
grokzen/pykwalify | pykwalify/errors.py | 2 | 6209 | # -*- coding: utf-8 -*-
""" pyKwalify - errors.py """
# python stdlib
from pykwalify.compat import basestring
retcodes = {
# PyKwalifyExit
0: 'noerror',
# UnknownError
1: 'unknownerror',
# SchemaError
# e.g. when a rule or the core finds an error
2: 'schemaerror',
# CoreError
# e.g. when the core finds an error that is not a SchemaError
3: 'coreerror',
# RuleError
# e.g. when the rule class finds an error that is not a SchemaError, similar to CoreError
4: 'ruleerror',
# SchemaConflict
# e.g. when a schema conflict occurs
5: 'schemaconflict',
# NotMappingError
# e.g. when a value is not a mapping when it was expected it should be
6: 'notmaperror',
# NotSequenceError
# e.g. when a value is not a sequence when it was expected it should be
7: 'notsequenceerror',
}
retnames = dict((v, k) for (k, v) in retcodes.items())
class PyKwalifyException(RuntimeError):
"""
"""
def __init__(self, msg=None, error_key=None, retcode=None, path=None):
"""
Arguments:
- `msg`: a string
- `error_key`: a unique string that makes it easier to identify what error it is
- `retcode`: an integer, defined in PyKwalify.errors.retcodes
"""
self.msg = msg or ""
self.retcode = retcode or retnames['unknownerror']
self.retname = retcodes[retcode]
self.error_key = error_key
self.path = path or "/"
def __str__(self):
"""
"""
# <PyKwalifyException msg='foo bar' retcode=1>
# kwargs = []
# if self.msg:
# kwargs.append("msg='{0}'".format(self.msg))
# if self.retcode != retnames['noerror']:
# kwargs.append("retcode=%d" % self.retcode)
# if kwargs:
# kwargs.insert(0, '')
# return "<{0}{1}>".format(self.__class__.__name__, ' '.join(kwargs))
# <PyKwalifyException: error code 1: foo bar>
kwargs = []
if self.retcode != retnames['noerror']:
kwargs.append("error code {0}".format(self.retcode))
if self.msg:
kwargs.append(self.msg)
if kwargs:
kwargs.insert(0, '')
if self.path:
kwargs.append("Path: '{0}'".format(self.path))
return "<{0}{1}>".format(self.__class__.__name__, ': '.join(kwargs))
def __repr__(self):
"""
"""
kwargs = []
if self.msg:
kwargs.append("msg='{0}'".format(self.msg))
return "{0}({1})".format(self.__class__.__name__, ', '.join(kwargs))
def msg():
doc = """ """
def fget(self):
return self._msg
def fset(self, value):
assert isinstance(value, basestring), "argument is not string"
self._msg = value
return locals()
msg = property(**msg())
def retcode():
doc = """ """
def fget(self):
return self._retcode
def fset(self, value):
assert isinstance(value, int), "argument is not integer"
self._retcode = value
return locals()
retcode = property(**retcode())
def retname():
doc = """ """
def fget(self):
return self._retname
def fset(self, value):
assert isinstance(value, str), "argument is not string"
self._retname = value
return locals()
retname = property(**retname())
class UnknownError(PyKwalifyException):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
assert 'retcode' not in kwargs, "keyword retcode implicitly defined"
super(self.__class__, self).__init__(
retcode=retnames['unknownerror'],
*args, **kwargs
)
class SchemaError(PyKwalifyException):
"""
"""
class SchemaErrorEntry(object):
"""
"""
def __init__(self, msg, path, value, **kwargs):
"""
"""
self.msg = msg
self.path = path
self.value = value
for key, value in kwargs.items():
self.__setattr__(key, value)
def __repr__(self):
return self.msg.format(**self.__dict__)
def __init__(self, *args, **kwargs):
"""
"""
assert "retcode" not in kwargs, "keyword retcode implicitly defined"
super(self.__class__, self).__init__(
retcode=retnames["schemaerror"],
*args, **kwargs
)
class CoreError(PyKwalifyException):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
assert "retcode" not in kwargs, "keyword retcode implicitly defined"
super(self.__class__, self).__init__(
retcode=retnames["coreerror"],
*args, **kwargs
)
class NotMappingError(PyKwalifyException):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
assert "retcode" not in kwargs, "keyword retcode implicitly defined"
super(self.__class__, self).__init__(
retcode=retnames['notmaperror'],
*args, **kwargs
)
class NotSequenceError(PyKwalifyException):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
assert "retcode" not in kwargs, "keyword retcode implicitly defined"
super(self.__class__, self).__init__(
retcode=retnames['notsequenceerror'],
*args, **kwargs
)
class RuleError(PyKwalifyException):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
assert "retcode" not in kwargs, "keyword retcode implicitly defined"
super(self.__class__, self).__init__(
retcode=retnames["ruleerror"],
*args, **kwargs
)
class SchemaConflict(PyKwalifyException):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
assert "retcode" not in kwargs, "keyword retcode implicitly defined"
super(self.__class__, self).__init__(
retcode=retnames["schemaconflict"],
*args, **kwargs
)
| mit | 1f0804968e105c8aa62ac5a1c28ff334 | 25.198312 | 93 | 0.523112 | 4.05817 | false | false | false | false |
django-admin-tools/django-admin-tools | admin_tools/menu/views.py | 1 | 3319 | from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.contrib import messages
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
from django.contrib.csrf.middleware import csrf_exempt
from .forms import BookmarkForm
from .models import Bookmark
from admin_tools.utils import is_xhr
@staff_member_required
@csrf_exempt
def add_bookmark(request):
"""
This view serves and validates a bookmark form.
If requested via ajax it also returns the drop bookmark form to replace the
add bookmark form.
"""
if request.method == "POST":
form = BookmarkForm(user=request.user, data=request.POST)
if form.is_valid():
bookmark = form.save()
if not is_xhr(request):
messages.success(request, "Bookmark added")
if request.POST.get("next"):
return HttpResponseRedirect(request.POST.get("next"))
return HttpResponse("Added")
return render(
request,
"admin_tools/menu/remove_bookmark_form.html",
context={"bookmark": bookmark, "url": bookmark.url},
)
else:
form = BookmarkForm(user=request.user)
return render(
request,
"admin_tools/menu/form.html",
context={"form": form, "title": "Add Bookmark"},
)
@staff_member_required
@csrf_exempt
def edit_bookmark(request, id):
bookmark = get_object_or_404(Bookmark, id=id)
if request.method == "POST":
form = BookmarkForm(
user=request.user, data=request.POST, instance=bookmark
)
if form.is_valid():
form.save()
if not is_xhr(request):
messages.success(request, "Bookmark updated")
if request.POST.get("next"):
return HttpResponseRedirect(request.POST.get("next"))
return HttpResponse("Saved")
else:
form = BookmarkForm(user=request.user, instance=bookmark)
return render(
request,
"admin_tools/menu/form.html",
context={"form": form, "title": "Edit Bookmark"},
)
@staff_member_required
@csrf_exempt
def remove_bookmark(request, id):
"""
This view deletes a bookmark.
If requested via ajax it also returns the add bookmark form to replace the
drop bookmark form.
"""
bookmark = get_object_or_404(Bookmark, id=id, user=request.user)
if request.method == "POST":
bookmark.delete()
if not is_xhr(request):
messages.success(request, "Bookmark removed")
if request.POST.get("next"):
return HttpResponseRedirect(request.POST.get("next"))
return HttpResponse("Deleted")
return render(
request,
"admin_tools/menu/add_bookmark_form.html",
context={
"url": request.POST.get("next"),
"title": "**title**", # replaced on the javascript side
},
)
return render(
request,
"admin_tools/menu/delete_confirm.html",
context={"bookmark": bookmark, "title": "Delete Bookmark"},
)
| mit | b1af1cc48f0efac7d7c5160fea5111e6 | 32.525253 | 79 | 0.610124 | 4.24968 | false | false | false | false |
django-admin-tools/django-admin-tools | admin_tools/menu/menus.py | 1 | 3921 | """
Module where admin tools menu classes are defined.
"""
try:
# we use django.urls import as version detection as it will fail on django 1.11 and thus we are safe to use
# gettext_lazy instead of ugettext_lazy instead
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
except ImportError:
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from admin_tools.menu import items
from admin_tools.utils import get_admin_site_name
class Menu(object):
"""
This is the base class for creating custom navigation menus.
A menu can have the following properties:
``template``
A string representing the path to template to use to render the menu.
As for any other template, the path must be relative to one of the
directories of your ``TEMPLATE_DIRS`` setting.
Default value: "admin_tools/menu/menu.html".
``children``
A list of children menu items. All children items mus be instances of
the :class:`~admin_tools.menu.items.MenuItem` class.
If you want to customize the look of your menu and it's menu items, you
can declare css stylesheets and/or javascript files to include when
rendering the menu, for example::
from admin_tools.menu import Menu
class MyMenu(Menu):
class Media:
css = {'all': ('css/mymenu.css',)}
js = ('js/mymenu.js',)
Here's a concrete example of a custom menu::
from django.core.urlresolvers import reverse
from admin_tools.menu import items, Menu
class MyMenu(Menu):
def __init__(self, **kwargs):
super(MyMenu, self).__init__(**kwargs)
self.children += [
items.MenuItem('Home', reverse('admin:index')),
items.AppList('Applications'),
items.MenuItem('Multi level menu item',
children=[
items.MenuItem('Child 1', '/foo/'),
items.MenuItem('Child 2', '/bar/'),
]
),
]
Below is a screenshot of the resulting menu:
.. image:: images/menu_example.png
"""
template = 'admin_tools/menu/menu.html'
children = None
class Media:
css = ()
js = ()
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self.__class__, key):
setattr(self, key, kwargs[key])
self.children = kwargs.get('children', [])
def init_with_context(self, context):
"""
Sometimes you may need to access context or request variables to build
your menu, this is what the ``init_with_context()`` method is for.
This method is called just before the display with a
``django.template.RequestContext`` as unique argument, so you can
access to all context variables and to the ``django.http.HttpRequest``.
"""
pass
class DefaultMenu(Menu):
"""
The default menu displayed by django-admin-tools.
To change the default menu you'll have to type the following from the
commandline in your project root directory::
python manage.py custommenu
And then set the ``ADMIN_TOOLS_MENU`` settings variable to point to your
custom menu class.
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
self.children += [
items.MenuItem(_('Dashboard'), reverse('%s:index' % site_name)),
items.Bookmarks(),
items.AppList(
_('Applications'),
exclude=('django.contrib.*',)
),
items.AppList(
_('Administration'),
models=('django.contrib.*',)
)
]
| mit | d6ea70fa39f33d0e03c64cf7274bd632 | 33.095652 | 111 | 0.590666 | 4.667857 | false | false | false | false |
django-admin-tools/django-admin-tools | admin_tools/dashboard/views.py | 1 | 1564 | from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse
from django.shortcuts import render
from django.contrib import messages
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
from django.contrib.csrf.middleware import csrf_exempt
from .forms import DashboardPreferencesForm
from .models import DashboardPreferences
from admin_tools.utils import is_xhr
@staff_member_required
@csrf_exempt
def set_preferences(request, dashboard_id):
"""
This view serves and validates a preferences form.
"""
try:
preferences = DashboardPreferences.objects.get(
user=request.user, dashboard_id=dashboard_id
)
except DashboardPreferences.DoesNotExist:
preferences = None
if request.method == "POST":
form = DashboardPreferencesForm(
user=request.user,
dashboard_id=dashboard_id,
data=request.POST,
instance=preferences,
)
if form.is_valid():
preferences = form.save()
if is_xhr(request):
return HttpResponse("true")
messages.success(request, "Preferences saved")
elif is_xhr(request):
return HttpResponse("false")
else:
form = DashboardPreferencesForm(
user=request.user, dashboard_id=dashboard_id, instance=preferences
)
return render(
request,
"admin_tools/dashboard/preferences_form.html",
context={"form": form},
)
| mit | 855751e6af77f3906931ef86d0a24a1a | 30.28 | 78 | 0.662404 | 4.430595 | false | false | false | false |
django-admin-tools/django-admin-tools | admin_tools/dashboard/migrations/0001_initial.py | 1 | 1079 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DashboardPreferences',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', models.TextField()),
('dashboard_id', models.CharField(max_length=100)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
'ordering': ('user',),
'db_table': 'admin_tools_dashboard_preferences',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='dashboardpreferences',
unique_together=set([('user', 'dashboard_id')]),
),
]
| mit | 075f95a4bc5995b7dc6f7de06e6cba05 | 31.69697 | 114 | 0.565338 | 4.630901 | false | false | false | false |
django-admin-tools/django-admin-tools | admin_tools/dashboard/management/commands/customdashboard.py | 1 | 1043 | import os
from django.core.management.base import BaseCommand, CommandError
from django.template.loader import render_to_string
DEFAULT_FILE = "dashboard.py"
class Command(BaseCommand):
help = (
"Creates a template file containing the base code to get you "
"started with your custom dashboard."
)
def add_arguments(self, parser):
parser.add_argument("output_file", type=str)
def handle(self, output_file=None, **options):
context = {}
context["project"] = os.path.basename(os.getcwd())
tpl = [
"dashboard/dashboard.txt",
"admin_tools/dashboard/dashboard.txt",
]
dst = output_file is not None and output_file or DEFAULT_FILE
if os.path.exists(dst):
raise CommandError('file "%s" already exists' % dst)
context["file"] = os.path.basename(dst).split(".")[0]
with open(dst, "w") as f:
f.write(render_to_string(tpl, context))
self.stdout.write('"%s" written.' % os.path.join(dst))
| mit | e60a9811039dfb28504d494fe51d9f97 | 32.645161 | 70 | 0.619367 | 3.891791 | false | false | false | false |
grokzen/pykwalify | pykwalify/cli.py | 1 | 3248 | # -*- coding: utf-8 -*-
""" pyKwalify - cli.py """
# python std lib
import logging
import logging.config
import sys
# 3rd party imports
from docopt import docopt
def parse_cli():
"""
The outline of this function needs to be like this:
1. parse arguments
2. validate arguments only, dont go into other logic/code
3. run application logic
"""
#
# 1. parse cli arguments
#
__docopt__ = """
usage: pykwalify -d FILE -s FILE ... [-e FILE ...]
[--strict-rule-validation] [--fix-ruby-style-regex] [--allow-assertions] [--encoding ENCODING]
[-v ...] [-q]
optional arguments:
-d FILE, --data-file FILE the file to be tested
-e FILE, --extension FILE file containing python extension
-s FILE, --schema-file FILE schema definition file
--fix-ruby-style-regex This flag fixes some of the quirks of ruby style regex
that is not compatible with python style regex
--strict-rule-validation enables strict validation of all keywords for all
Rule objects to find unsupported keyword usage
--allow-assertions By default assertions is disabled due to security risk.
Error will be raised if assertion is used in schema
but this flag is not used. This option enables assert keyword.
--encoding ENCODING Specify encoding to open data and schema files with.
-h, --help show this help message and exit
-q, --quiet suppress terminal output
-v, --verbose verbose terminal output (multiple -v increases verbosity)
--version display the version number and exit
"""
# Import pykwalify package
import pykwalify
args = docopt(__docopt__, version=pykwalify.__version__)
pykwalify.init_logging(1 if args["--quiet"] else args["--verbose"])
log = logging.getLogger(__name__)
#
# 2. validate arguments only, dont go into other code/logic
#
log.debug("Setting verbose level: %s", args["--verbose"])
log.debug("Arguments from CLI: %s", args)
return args
def run(cli_args):
"""
Split the functionality into 2 methods.
One for parsing the cli and one that runs the application.
"""
from .core import Core
c = Core(
source_file=cli_args["--data-file"],
schema_files=cli_args["--schema-file"],
extensions=cli_args['--extension'],
strict_rule_validation=cli_args['--strict-rule-validation'],
fix_ruby_style_regex=cli_args['--fix-ruby-style-regex'],
allow_assertions=cli_args['--allow-assertions'],
file_encoding=cli_args['--encoding'],
)
c.validate()
return c
def cli_entrypoint():
"""
Main entrypoint for script. Used by setup.py to automatically
create a cli script
"""
# Check minimum version of Python
if sys.version_info < (2, 7, 0):
sys.stderr.write(u"WARNING: pykwalify: It is recommended to run pykwalify on python version 2.7.x or later...\n\n")
run(parse_cli())
| mit | deb0b70e193dec53a2bb89ce6ed3339b | 32.142857 | 123 | 0.595135 | 4.251309 | false | false | false | false |
kytos/python-openflow | tests/unit/v0x01/test_controller2switch/test_packet_out.py | 1 | 2854 | """Packet out message tests."""
from pyof.foundation.exceptions import ValidationError
from pyof.v0x01.common.action import ActionOutput
from pyof.v0x01.common.phy_port import Port
from pyof.v0x01.controller2switch.packet_out import PacketOut
from tests.unit.test_struct import TestStruct
class TestPacketOut(TestStruct):
"""Packet out message tests (also those in :class:`.TestDump`).
Attributes:
message (PacketOut): The message configured in :meth:`setUpClass`.
"""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x01', 'ofpt_packet_out')
super().set_raw_dump_object(PacketOut, xid=8, buffer_id=4294967295,
in_port=Port.OFPP_NONE, data=_get_data(),
actions=_get_actions())
super().set_minimum_size(16)
def setUp(self):
"""Run before every test."""
self.message = self.get_raw_object()
def test_valid_virtual_in_ports(self):
"""Valid virtual ports as defined in 1.0.1 spec."""
valid = (Port.OFPP_LOCAL, Port.OFPP_CONTROLLER, Port.OFPP_NONE)
for in_port in valid:
self.message.in_port = in_port
self.assertTrue(self.message.is_valid())
def test_invalid_virtual_in_ports(self):
"""Invalid virtual ports as defined in 1.0.1 spec."""
invalid = (Port.OFPP_IN_PORT, Port.OFPP_TABLE, Port.OFPP_NORMAL,
Port.OFPP_FLOOD, Port.OFPP_ALL)
for in_port in invalid:
self.message.in_port = in_port
self.assertFalse(self.message.is_valid())
self.assertRaises(ValidationError, self.message.validate)
def test_valid_physical_in_ports(self):
"""Physical port limits from 1.0.0 spec."""
max_valid = int(Port.OFPP_MAX.value)
for in_port in (1, max_valid):
self.message.in_port = in_port
self.assertTrue(self.message.is_valid())
def test_invalid_physical_in_port(self):
"""Physical port limits from 1.0.0 spec."""
max_valid = int(Port.OFPP_MAX.value)
for in_port in (-1, 0, max_valid + 1, max_valid + 2):
self.message.in_port = in_port
self.assertFalse(self.message.is_valid())
self.assertRaises(ValidationError, self.message.validate)
def _get_actions():
"""Function used to return a list of actions used by packetout instance."""
action = ActionOutput(port=1, max_length=0)
return [action]
def _get_data():
"""Function used to return a BinaryData used by packetout instance."""
data = b'\x01# \x00\x00\x01\xd2A\xc6.*@\x88\xcc\x02\x07\x07dpi'
data += b'd:1\x04\x02\x021\x06\x02\x00x\x0c\x06dpid:1\x00\x00'
return data
| mit | 9dd9e730b373622ada28d9f43c94b2ad | 38.638889 | 79 | 0.627891 | 3.488998 | false | true | false | false |
kytos/python-openflow | pyof/v0x01/controller2switch/common.py | 1 | 16355 | """Defines common structures and enums for controller2switch."""
# System imports
from enum import IntEnum
from pyof.foundation.base import GenericMessage, GenericStruct
from pyof.foundation.basic_types import (
BinaryData, Char, Pad, UBInt8, UBInt16, UBInt32, UBInt64)
from pyof.foundation.constants import (
DESC_STR_LEN, OFP_MAX_TABLE_NAME_LEN, SERIAL_NUM_LEN)
# Local source tree imports
from pyof.v0x01.common.action import ListOfActions
from pyof.v0x01.common.flow_match import FlowWildCards, Match
from pyof.v0x01.common.header import Header
from pyof.v0x01.common.phy_port import Port
# Third-party imports
__all__ = ('ConfigFlag', 'StatsType', 'AggregateStatsReply',
'AggregateStatsRequest', 'DescStats', 'FlowStats',
'FlowStatsRequest', 'PortStats', 'PortStatsRequest', 'QueueStats',
'QueueStatsRequest', 'TableStats', 'VendorStats',
'VendorStatsRequest')
# Enums
class ConfigFlag(IntEnum):
"""Configuration Flags. Handling of IP Fragments."""
#: No special handling for fragments
OFPC_FRAG_NORMAL = 0
#: Drop fragments
OFPC_FRAG_DROP = 1
#: Reassemble (only if OFPC_IP_REASM set)
OFPC_FRAG_REASM = 2
OFPC_FRAG_MASK = 3
class StatsType(IntEnum):
"""Type field to be used both in both request and reply.
It specifies the kind of information being passed and determines how the
body field is interpreted.
"""
#: Description of this OpenFlow switch. The request body is empty.
OFPST_DESC = 0
#: Individual flow statistics. The request body is struct
#: ofp_flow_stats_request.
OFPST_FLOW = 1
#: Aggregate flow statistics. The request body is struct
#: ofp_aggregate_stats_request.
OFPST_AGGREGATE = 2
#: Flow table statistics. The request body is empty.
OFPST_TABLE = 3
#: Physical port statistics. The request body is empty.
OFPST_PORT = 4
#: Queue statistics for a port. The request body defines the port
OFPST_QUEUE = 5
#: Vendor extension. The request and reply bodies begin with a 32-bit
#: vendor ID
OFPST_VENDOR = 0xffff
# Classes
class SwitchConfig(GenericMessage):
"""Used as base class for SET_CONFIG and GET_CONFIG_REPLY messages."""
header = Header()
flags = UBInt16(enum_ref=ConfigFlag)
miss_send_len = UBInt16()
def __init__(self, xid=None, flags=None, miss_send_len=None):
"""Create a SwitchConfig with the optional parameters below.
Args:
xid (int): xid to be used on the message header.
flags (ConfigFlag): OFPC_* flags.
miss_send_len (int): UBInt16 max bytes of new flow that the
datapath should send to the controller.
"""
super().__init__(xid)
self.flags = flags
self.miss_send_len = miss_send_len
def __repr__(self):
"""Show a full representation of the object."""
return "%s(xid=%r, flags=%s, miss_send_len=%r)" \
% (self.__class__.__name__, self.header.xid, self.flags,
self.miss_send_len)
class AggregateStatsReply(GenericStruct):
"""Body of reply to OFPST_AGGREGATE request."""
packet_count = UBInt64()
byte_count = UBInt64()
flow_count = UBInt32()
#: Align to 64 bits
pad = Pad(4)
def __init__(self, packet_count=None, byte_count=None, flow_count=None):
"""Create a AggregateStatsReply with the optional parameters below.
Args:
packet_count (int): Number of packets in flows
byte_count (int): Number of bytes in flows
flow_count (int): Number of flows
"""
super().__init__()
self.packet_count = packet_count
self.byte_count = byte_count
self.flow_count = flow_count
class AggregateStatsRequest(GenericStruct):
"""Body for ofp_stats_request of type OFPST_AGGREGATE."""
match = Match()
table_id = UBInt8()
#: Align to 32 bits
pad = Pad(1)
out_port = UBInt16()
def __init__(self, match=Match(), table_id=0xff, out_port=Port.OFPP_NONE):
"""Create a AggregateStatsRequest with the optional parameters below.
Args:
match (~pyof.v0x01.common.flow_match.Match): Fields to match.
table_id (int): ID of table to read (from pyof_table_stats) 0xff
for all tables or 0xfe for emergency.
out_port (int): Require matching entries to include this as an
output port. A value of OFPP_NONE indicates no restriction.
"""
super().__init__()
self.match = match
self.table_id = table_id
self.out_port = out_port
class DescStats(GenericStruct):
"""Information available from the OFPST_DESC stats request.
Information about the switch manufacturer, hardware revision, software
revision, serial number and a description field.
"""
mfr_desc = Char(length=DESC_STR_LEN)
hw_desc = Char(length=DESC_STR_LEN)
sw_desc = Char(length=DESC_STR_LEN)
serial_num = Char(length=SERIAL_NUM_LEN)
dp_desc = Char(length=DESC_STR_LEN)
def __init__(self, mfr_desc=None, hw_desc=None, sw_desc=None,
serial_num=None, dp_desc=None):
"""Create a DescStats with the optional parameters below.
Args:
mfr_desc (str): Manufacturer description
hw_desc (str): Hardware description
sw_desc (str): Software description
serial_num (str): Serial number
dp_desc (str): Human readable description of datapath
"""
super().__init__()
self.mfr_desc = mfr_desc
self.hw_desc = hw_desc
self.sw_desc = sw_desc
self.serial_num = serial_num
self.dp_desc = dp_desc
class FlowStats(GenericStruct):
"""Body of reply to OFPST_FLOW request."""
length = UBInt16()
table_id = UBInt8()
#: Align to 32 bits.
pad = Pad(1)
match = Match()
duration_sec = UBInt32()
duration_nsec = UBInt32()
priority = UBInt16()
idle_timeout = UBInt16()
hard_timeout = UBInt16()
#: Align to 64-bits
pad2 = Pad(6)
cookie = UBInt64()
packet_count = UBInt64()
byte_count = UBInt64()
actions = ListOfActions()
def __init__(self, length=None, table_id=None, match=None,
duration_sec=None, duration_nsec=None, priority=None,
idle_timeout=None, hard_timeout=None, cookie=None,
packet_count=None, byte_count=None, actions=None):
"""Create a FlowStats with the optional parameters below.
Args:
length (int): Length of this entry.
table_id (int): ID of table flow came from.
match (~pyof.v0x01.common.flow_match.Match): Description of fields.
duration_sec (int): Time flow has been alive in seconds.
duration_nsec (int): Time flow has been alive in nanoseconds in
addition to duration_sec.
priority (int): Priority of the entry. Only meaningful when this
is not an exact-match entry.
idle_timeout (int): Number of seconds idle before expiration.
hard_timeout (int): Number of seconds before expiration.
cookie (int): Opaque controller-issued identifier.
packet_count (int): Number of packets in flow.
byte_count (int): Number of bytes in flow.
actions (:class:`~pyof.v0x01.common.actions.ListOfActions`):
List of Actions.
"""
super().__init__()
self.length = length
self.table_id = table_id
self.match = match
self.duration_sec = duration_sec
self.duration_nsec = duration_nsec
self.priority = priority
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.cookie = cookie
self.packet_count = packet_count
self.byte_count = byte_count
self.actions = [] if actions is None else actions
def unpack(self, buff, offset=0):
"""Unpack *buff* into this object.
Do nothing, since the _length is already defined and it is just a Pad.
Keep buff and offset just for compability with other unpack methods.
Args:
buff (bytes): Buffer where data is located.
offset (int): Where data stream begins.
"""
self.length = UBInt16()
self.length.unpack(buff, offset)
max_length = offset + self.length.value
super().unpack(buff[:max_length], offset)
class FlowStatsRequest(GenericStruct):
"""Body for ofp_stats_request of type OFPST_FLOW."""
match = Match()
table_id = UBInt8()
#: Align to 32 bits.
pad = Pad(1)
out_port = UBInt16()
def __init__(self, match=None, table_id=0xff, out_port=Port.OFPP_NONE):
"""Create a FlowStatsRequest with the optional parameters below.
Args:
match (:class:`~pyof.v0x01.common.flow_match.Match`):
Fields to match.
table_id (int): ID of table to read (from pyof_table_stats)
0xff for all tables or 0xfe for emergency.
out_port (:class:`int`, :class:`~pyof.v0x01.common.phy_port.Port`):
Require matching entries to include this as an output port.
A value of :attr:`.Port.OFPP_NONE` indicates no restriction.
"""
super().__init__()
self.match = Match() if match is None else match
self.table_id = table_id
self.out_port = out_port
class PortStats(GenericStruct):
"""Body of reply to OFPST_PORT request.
If a counter is unsupported, set the field to all ones.
"""
port_no = UBInt16()
#: Align to 64-bits.
pad = Pad(6)
rx_packets = UBInt64()
tx_packets = UBInt64()
rx_bytes = UBInt64()
tx_bytes = UBInt64()
rx_dropped = UBInt64()
tx_dropped = UBInt64()
rx_errors = UBInt64()
tx_errors = UBInt64()
rx_frame_err = UBInt64()
rx_over_err = UBInt64()
rx_crc_err = UBInt64()
collisions = UBInt64()
def __init__(self, port_no=None, rx_packets=None,
tx_packets=None, rx_bytes=None, tx_bytes=None,
rx_dropped=None, tx_dropped=None, rx_errors=None,
tx_errors=None, rx_frame_err=None, rx_over_err=None,
rx_crc_err=None, collisions=None):
"""Create a PortStats with the optional parameters below.
Args:
port_no (:class:`int`, :class:`~pyof.v0x01.common.phy_port.Port`):
Port number.
rx_packets (int): Number of received packets.
tx_packets (int): Number of transmitted packets.
rx_bytes (int): Number of received bytes.
tx_bytes (int): Number of transmitted bytes.
rx_dropped (int): Number of packets dropped by RX.
tx_dropped (int): Number of packets dropped by TX.
rx_errors (int): Number of receive errors. This is a super-set of
more specific receive errors and should be greater than or
equal to the sum of all rx_*_err values.
tx_errors (int): Number of transmit errors. This is a super-set of
more specific transmit errors and should be greater than or
equal to the sum of all tx_*_err values (none currently
defined).
rx_frame_err (int): Number of frame alignment errors.
rx_over_err (int): Number of packets with RX overrun.
rx_crc_err (int): Number of CRC errors.
collisions (int): Number of collisions.
"""
super().__init__()
self.port_no = port_no
self.rx_packets = rx_packets
self.tx_packets = tx_packets
self.rx_bytes = rx_bytes
self.tx_bytes = tx_bytes
self.rx_dropped = rx_dropped
self.tx_dropped = tx_dropped
self.rx_errors = rx_errors
self.tx_errors = tx_errors
self.rx_frame_err = rx_frame_err
self.rx_over_err = rx_over_err
self.rx_crc_err = rx_crc_err
self.collisions = collisions
class PortStatsRequest(GenericStruct):
"""Body for ofp_stats_request of type OFPST_PORT."""
port_no = UBInt16()
#: Align to 64-bits.
pad = Pad(6)
def __init__(self, port_no=Port.OFPP_NONE):
"""Create a PortStatsRequest with the optional parameters below.
Args:
port_no (:class:`int`, :class:`~pyof.v0x01.common.phy_port.Port`):
OFPST_PORT message must request statistics either for a single
port (specified in ``port_no``) or for all ports
(if ``port_no`` == :attr:`.Port.OFPP_NONE`).
"""
super().__init__()
self.port_no = port_no
class QueueStats(GenericStruct):
"""Implements the reply body of a port_no."""
port_no = UBInt16()
#: Align to 32-bits.
pad = Pad(2)
queue_id = UBInt32()
tx_bytes = UBInt64()
tx_packets = UBInt64()
tx_errors = UBInt64()
def __init__(self, port_no=None, queue_id=None, tx_bytes=None,
tx_packets=None, tx_errors=None):
"""Create a QueueStats with the optional parameters below.
Args:
port_no (:class:`int`, :class:`~pyof.v0x01.common.phy_port.Port`):
Port Number.
queue_id (int): Queue ID.
tx_bytes (int): Number of transmitted bytes.
tx_packets (int): Number of transmitted packets.
tx_errors (int): Number of packets dropped due to overrun.
"""
super().__init__()
self.port_no = port_no
self.queue_id = queue_id
self.tx_bytes = tx_bytes
self.tx_packets = tx_packets
self.tx_errors = tx_errors
class QueueStatsRequest(GenericStruct):
"""Implements the request body of a ``port_no``."""
port_no = UBInt16()
#: Align to 32-bits
pad = Pad(2)
queue_id = UBInt32()
def __init__(self, port_no=None, queue_id=None):
"""Create a QueueStatsRequest with the optional parameters below.
Args:
port_no (:class:`int`, :class:`~pyof.v0x01.common.phy_port.Port`):
All ports if :attr:`.Port.OFPP_ALL`.
queue_id (int): All queues if OFPQ_ALL (``0xfffffff``).
"""
super().__init__()
self.port_no = port_no
self.queue_id = queue_id
class TableStats(GenericStruct):
"""Body of reply to OFPST_TABLE request."""
table_id = UBInt8()
#: Align to 32-bits.
pad = Pad(3)
name = Char(length=OFP_MAX_TABLE_NAME_LEN)
wildcards = UBInt32(enum_ref=FlowWildCards)
max_entries = UBInt32()
active_count = UBInt32()
count_lookup = UBInt64()
count_matched = UBInt64()
def __init__(self, table_id=None, name=None, wildcards=None,
max_entries=None, active_count=None, count_lookup=None,
count_matched=None):
"""Create a TableStats with the optional parameters below.
Args:
table_id (int): Identifier of table. Lower numbered tables are
consulted first.
name (str): Table name.
wildcards (:class:`~pyof.v0x01.common.flow_match.FlowWildCards`):
Bitmap of OFPFW_* wildcards that are supported by the table.
max_entries (int): Max number of entries supported.
active_count (int): Number of active entries.
count_lookup (int): Number of packets looked up in table.
count_matched (int): Number of packets that hit table.
"""
super().__init__()
self.table_id = table_id
self.name = name
self.wildcards = wildcards
self.max_entries = max_entries
self.active_count = active_count
self.count_lookup = count_lookup
self.count_matched = count_matched
class VendorStats(GenericStruct):
"""Vendor extension."""
vendor = UBInt32()
body = BinaryData()
def __init__(self, vendor=None, body=b''):
"""Create instance attributes.
Args:
vendor (int): 32-bit vendor ID.
body (bytes): Vendor-defined body
"""
super().__init__()
self.vendor = vendor
self.body = body
VendorStatsRequest = VendorStats
| mit | 006d8ce662e5e13cf631448da2e27485 | 33.723992 | 79 | 0.603057 | 3.781503 | false | false | false | false |
kytos/python-openflow | pyof/foundation/basic_types.py | 1 | 26514 | """Basic types used in structures and messages."""
# System imports
import struct
from copy import deepcopy
# Local source tree imports
from pyof.foundation import exceptions
from pyof.foundation.base import GenericStruct, GenericType, UBIntBase
__all__ = ('BinaryData', 'Char', 'ConstantTypeList', 'FixedTypeList',
'IPAddress', 'DPID', 'HWAddress', 'Pad', 'UBInt8', 'UBInt16',
'UBInt32', 'UBInt64', 'UBInt128')
class Pad(GenericType):
"""Class for padding attributes."""
_fmt = ''
def __init__(self, length=0):
"""Pad up to ``length``, in bytes.
Args:
length (int): Total length, in bytes.
"""
super().__init__()
self._length = length
def __repr__(self):
return "{}({})".format(type(self).__name__, self._length)
def __str__(self):
return '0' * self._length
def get_size(self, value=None):
"""Return the type size in bytes.
Args:
value (int): In structs, the user can assign other value instead of
this class' instance. Here, in such cases, ``self`` is a class
attribute of the struct.
Returns:
int: Size in bytes.
"""
return self._length
def unpack(self, buff, offset=0):
"""Unpack *buff* into this object.
Do nothing, since the _length is already defined and it is just a Pad.
Keep buff and offset just for compability with other unpack methods.
Args:
buff: Buffer where data is located.
offset (int): Where data stream begins.
"""
def pack(self, value=None):
"""Pack the object.
Args:
value (int): In structs, the user can assign other value instead of
this class' instance. Here, in such cases, ``self`` is a class
attribute of the struct.
Returns:
bytes: the byte 0 (zero) *length* times.
"""
return b'\x00' * self._length
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
return Pad(length=self._length)
class UBInt8(UBIntBase):
"""Format character for an Unsigned Char.
Class for an 8-bit (1-byte) Unsigned Integer.
"""
_fmt = "!B"
class UBInt16(UBIntBase):
"""Format character for an Unsigned Short.
Class for an 16-bit (2-byte) Unsigned Integer.
"""
_fmt = "!H"
class UBInt32(UBIntBase):
"""Format character for an Unsigned Int.
Class for an 32-bit (4-byte) Unsigned Integer.
"""
_fmt = "!I"
class UBInt64(UBIntBase):
"""Format character for an Unsigned Long Long.
Class for an 64-bit (8-byte) Unsigned Integer.
"""
_fmt = "!Q"
class UBInt128(UBIntBase):
"""Format character for an Unsigned Long Long.
Class for an 128-bit (16-byte) Unsigned Integer.
"""
_fmt = "!8H"
class DPID(GenericType):
"""DataPath ID. Identifies a switch."""
_fmt = "!8B"
def __init__(self, dpid=None):
"""Create an instance and optionally set its dpid value.
Args:
dpid (str): String with DPID value(e.g. `00:00:00:00:00:00:00:01`).
"""
super().__init__(value=dpid)
def __str__(self):
return self._value
@property
def value(self):
"""Return dpid value.
Returns:
str: DataPath ID stored by DPID class.
"""
return self._value
def pack(self, value=None):
"""Pack the value as a binary representation.
Returns:
bytes: The binary representation.
Raises:
struct.error: If the value does not fit the binary format.
"""
if isinstance(value, type(self)):
return value.pack()
if value is None:
value = self._value
return struct.pack('!8B', *[int(v, 16) for v in value.split(':')])
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
begin = offset
hexas = []
while begin < offset + 8:
number = struct.unpack("!B", buff[begin:begin+1])[0]
hexas.append("%.2x" % number)
begin += 1
self._value = ':'.join(hexas)
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
return DPID(dpid=self._value)
class Char(GenericType):
"""Build a double char type according to the length."""
def __init__(self, value=None, length=0):
"""Create a Char with the optional parameters below.
Args:
value: The character to be build.
length (int): Character size.
"""
super().__init__(value)
self.length = length
self._fmt = '!{}{}'.format(self.length, 's')
def pack(self, value=None):
"""Pack the value as a binary representation.
Returns:
bytes: The binary representation.
Raises:
struct.error: If the value does not fit the binary format.
"""
if isinstance(value, type(self)):
return value.pack()
try:
if value is None:
value = self.value
packed = struct.pack(self._fmt, bytes(value, 'ascii'))
return packed[:-1] + b'\0' # null-terminated
except struct.error as err:
msg = "Char Pack error. "
msg += "Class: {}, struct error: {} ".format(type(value).__name__,
err)
raise exceptions.PackException(msg)
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
try:
begin = offset
end = begin + self.length
unpacked_data = struct.unpack(self._fmt, buff[begin:end])[0]
except struct.error:
raise Exception("%s: %s" % (offset, buff))
self._value = unpacked_data.decode('ascii').rstrip('\0')
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
return Char(value=self._value, length=self.length)
class IPAddress(GenericType):
"""Defines a IP address."""
netmask = UBInt32()
max_prefix = UBInt32(32)
def __init__(self, address="0.0.0.0/32", netmask=None):
"""Create an IPAddress with the parameters below.
Args:
address (str): IP Address using ipv4. Defaults to '0.0.0.0/32'
"""
if '/' in address:
address, netmask = address.split('/')
else:
netmask = 32 if netmask is None else netmask
super().__init__(address)
self.netmask = int(netmask)
def pack(self, value=None):
"""Pack the value as a binary representation.
If the value is None the self._value will be used to pack.
Args:
value (str): IP Address with ipv4 format.
Returns:
bytes: The binary representation.
Raises:
struct.error: If the value does not fit the binary format.
"""
if isinstance(value, type(self)):
return value.pack()
if value is None:
value = self._value
if value.find('/') >= 0:
value = value.split('/')[0]
try:
value = value.split('.')
return struct.pack('!4B', *[int(x) for x in value])
except struct.error as err:
msg = "IPAddress error. "
msg += "Class: {}, struct error: {} ".format(type(value).__name__,
err)
raise exceptions.PackException(msg)
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
try:
unpacked_data = struct.unpack('!4B', buff[offset:offset+4])
self._value = '.'.join([str(x) for x in unpacked_data])
except struct.error as exception:
raise exceptions.UnpackException('%s; %s: %s' % (exception,
offset, buff))
def get_size(self, value=None):
"""Return the ip address size in bytes.
Args:
value: In structs, the user can assign other value instead of
this class' instance. Here, in such cases, ``self`` is a class
attribute of the struct.
Returns:
int: The address size in bytes.
"""
return 4
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
return IPAddress(address=self._value, netmask=self.netmask)
class IPv6Address(GenericType):
"""Defines a IPv6 address."""
netmask = UBInt128()
def __init__(self, address="0000:0000:0000:0000:0000:0000:0000:0000/128",
netmask=None):
"""Create an IPv6Address with the parameters below.
Args:
address (str): IP Address using IPv6.
Defaults to '0000:0000:0000:0000:0000:0000:0000:0000/128'
"""
if '/' in address:
address, netmask = address.split('/')
else:
netmask = 128 if netmask is None else netmask
if address == '::':
address = '0:0:0:0:0:0:0:0'
elif '::' in address:
temp = address.split(':')
index = temp.index('')
temp = [x for x in temp if x != '']
address = temp[:index] + ['0'] * (8 - len(temp)) + temp[index:]
address = ':'.join(address)
super().__init__(address)
self.netmask = int(netmask)
def pack(self, value=None):
"""Pack the value as a binary representation.
If the value is None the self._value will be used to pack.
Args:
value (str): IP Address with IPv6 format.
Returns:
bytes: The binary representation.
Raises:
struct.error: If the value does not fit the binary format.
"""
if isinstance(value, type(self)):
return value.pack()
if value is None:
value = self._value
if value.find('/') >= 0:
value = value.split('/')[0]
try:
value = value.split(':')
return struct.pack('!8H', *[int(x, 16) for x in value])
except struct.error as err:
msg = "IPv6Address error. "
msg += "Class: {}, struct error: {} ".format(type(value).__name__,
err)
raise exceptions.PackException(msg)
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
def _int2hex(number):
return "{0:0{1}x}".format(number, 4)
try:
unpacked_data = struct.unpack('!8H', buff[offset:offset+16])
except struct.error as exception:
raise exceptions.UnpackException('%s; %s: %s' % (exception,
offset, buff))
transformed_data = ':'.join([_int2hex(x) for x in unpacked_data])
self._value = transformed_data
def get_size(self, value=None):
"""Return the IPv6 address size in bytes.
Args:
value: In structs, the user can assign other value instead of
this class' instance. Here, in such cases, ``self`` is a class
attribute of the struct.
Returns:
int: The address size in bytes.
"""
return 16
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
return IPv6Address(address=self._value, netmask=self.netmask)
class HWAddress(GenericType):
"""Defines a hardware address."""
# pylint: disable=useless-super-delegation
def __init__(self, hw_address='00:00:00:00:00:00'):
"""Create a HWAddress with the parameters below.
Args:
hw_address (bytes): Hardware address. Defaults to
'00:00:00:00:00:00'.
"""
super().__init__(hw_address)
def pack(self, value=None):
"""Pack the value as a binary representation.
If the passed value (or the self._value) is zero (int), then the pack
will assume that the value to be packed is '00:00:00:00:00:00'.
Returns
bytes: The binary representation.
Raises:
struct.error: If the value does not fit the binary format.
"""
if isinstance(value, type(self)):
return value.pack()
if value is None:
value = self._value
if value == 0:
value = '00:00:00:00:00:00'
value = value.split(':')
try:
return struct.pack('!6B', *[int(x, 16) for x in value])
except struct.error as err:
msg = "HWAddress error. "
msg += "Class: {}, struct error: {} ".format(type(value).__name__,
err)
raise exceptions.PackException(msg)
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
def _int2hex(number):
return "{0:0{1}x}".format(number, 2)
try:
unpacked_data = struct.unpack('!6B', buff[offset:offset+6])
except struct.error as exception:
raise exceptions.UnpackException('%s; %s: %s' % (exception,
offset, buff))
transformed_data = ':'.join([_int2hex(x) for x in unpacked_data])
self._value = transformed_data
def get_size(self, value=None):
"""Return the address size in bytes.
Args:
value: In structs, the user can assign other value instead of
this class' instance. Here, in such cases, ``self`` is a class
attribute of the struct.
Returns:
int: The address size in bytes.
"""
return 6
def is_broadcast(self):
"""Return true if the value is a broadcast address. False otherwise."""
return self.value == 'ff:ff:ff:ff:ff:ff'
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
return HWAddress(hw_address=self._value)
class BinaryData(GenericType):
"""Class to create objects that represent binary data.
This is used in the ``data`` attribute from
:class:`~pyof.v0x01.asynchronous.packet_in.PacketIn` and
:class:`~pyof.v0x01.controller2switch.packet_out.PacketOut` messages.
Both the :meth:`pack` and :meth:`unpack` methods will return the
binary data itself. :meth:`get_size` method will
return the size of the instance using Python's :func:`len`.
"""
def __init__(self, value=None): # pylint: disable=useless-super-delegation
"""Initialize with a value (optional).
Args:
value (bytes): The binary data. Defaults to an empty value.
"""
super().__init__(value)
def pack(self, value=None):
"""Pack the value as a binary representation.
Returns:
bytes: The binary representation.
Raises:
ValueError: If value can't be represented with bytes
"""
if value is None:
value = self._value
if hasattr(value, 'pack') and callable(value.pack):
return value.pack()
if isinstance(value, bytes):
return value
if value is None:
return b''
raise ValueError(f"BinaryData can't be {type(value)} = '{value}'")
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results. Since the *buff* is binary data, no conversion is done.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
"""
self._value = buff[offset:]
def get_size(self, value=None):
"""Return the size in bytes.
Args:
value (bytes): In structs, the user can assign other value instead
of this class' instance. Here, in such cases, ``self`` is a
class attribute of the struct.
Returns:
int: The address size in bytes.
"""
if value is None:
value = self._value
if hasattr(value, 'get_size'):
return value.get_size()
return len(self.pack(value))
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
return BinaryData(value=self._value)
class TypeList(list, GenericStruct):
"""Base class for lists that store objects of one single type."""
def __init__(self, items):
"""Initialize the list with one item or a list of items.
Args:
items (iterable, ``pyof_class``): Items to be stored.
"""
super().__init__()
if isinstance(items, list):
self.extend(items)
elif items:
self.append(items)
def extend(self, items):
"""Extend the list by adding all items of ``items``.
Args:
items (iterable): Items to be added to the list.
Raises:
:exc:`~.exceptions.WrongListItemType`: If an item has an unexpected
type.
"""
for item in items:
self.append(item)
def pack(self, value=None):
"""Pack the value as a binary representation.
Returns:
bytes: The binary representation.
"""
if isinstance(value, type(self)):
return value.pack()
if value is None:
value = self
else:
container = type(self)(items=None)
container.extend(value)
value = container
bin_message = b''
try:
for item in value:
bin_message += item.pack()
return bin_message
except exceptions.PackException as err:
msg = "{} pack error: {}".format(type(self).__name__, err)
raise exceptions.PackException(msg)
# pylint: disable=arguments-differ
def unpack(self, buff, item_class, offset=0):
"""Unpack the elements of the list.
Args:
buff (bytes): The binary data to be unpacked.
item_class (:obj:`type`): Class of the expected items on this list.
offset (int): If we need to shift the beginning of the data.
"""
begin = offset
limit_buff = len(buff)
while begin < limit_buff:
item = item_class()
item.unpack(buff, begin)
self.append(item)
begin += item.get_size()
# pylint: enable=arguments-differ
def get_size(self, value=None):
"""Return the size in bytes.
Args:
value: In structs, the user can assign other value instead of
this class' instance. Here, in such cases, ``self`` is a class
attribute of the struct.
Returns:
int: The size in bytes.
"""
if value is None:
if not self:
# If this is a empty list, then returns zero
return 0
if issubclass(type(self[0]), GenericType):
# If the type of the elements is GenericType, then returns the
# length of the list multiplied by the size of the GenericType.
return len(self) * self[0].get_size()
# Otherwise iter over the list accumulating the sizes.
return sum(item.get_size() for item in self)
return type(self)(value).get_size()
def __str__(self):
"""Human-readable object representantion."""
return "{}".format([str(item) for item in self])
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
items = [deepcopy(item) for item in self]
return TypeList(items=items)
class FixedTypeList(TypeList):
"""A list that stores instances of one pyof class."""
_pyof_class = None
def __init__(self, pyof_class, items=None):
"""Create a FixedTypeList with the parameters follows.
Args:
pyof_class (:obj:`type`): Class of the items to be stored.
items (iterable, ``pyof_class``): Items to be stored.
"""
self._pyof_class = pyof_class
super().__init__(items)
def append(self, item):
"""Append one item to the list.
Args:
item: Item to be appended. Its type must match the one defined in
the constructor.
Raises:
:exc:`~.exceptions.WrongListItemType`: If the item has a different
type than the one specified in the constructor.
"""
if isinstance(item, list):
self.extend(item)
elif issubclass(item.__class__, self._pyof_class):
list.append(self, item)
else:
raise exceptions.WrongListItemType(item.__class__.__name__,
self._pyof_class.__name__)
def insert(self, index, item):
"""Insert an item at the specified index.
Args:
index (int): Position to insert the item.
item: Item to be inserted. It must have the type specified in the
constructor.
Raises:
:exc:`~.exceptions.WrongListItemType`: If the item has a different
type than the one specified in the constructor.
"""
if issubclass(item.__class__, self._pyof_class):
list.insert(self, index, item)
else:
raise exceptions.WrongListItemType(item.__class__.__name__,
self._pyof_class.__name__)
def unpack(self, buff, offset=0): # pylint: disable=arguments-differ
"""Unpack the elements of the list.
This unpack method considers that all elements have the same size.
To use this class with a pyof_class that accepts elements with
different sizes, you must reimplement the unpack method.
Args:
buff (bytes): The binary data to be unpacked.
offset (int): If we need to shift the beginning of the data.
"""
super().unpack(buff, self._pyof_class, offset)
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
items = [deepcopy(item) for item in self]
return FixedTypeList(pyof_class=self._pyof_class, items=items)
class ConstantTypeList(TypeList):
"""List that contains only objects of the same type (class).
The types of all items are expected to be the same as the first item's.
Otherwise, :exc:`~.exceptions.WrongListItemType` is raised in many
list operations.
"""
# pylint: disable=useless-super-delegation
def __init__(self, items=None):
"""Create a ConstantTypeList that can contain itens to be stored.
Args:
items (iterable, :class:`object`): Items to be stored.
Raises:
:exc:`~.exceptions.WrongListItemType`: If an item has a different
type than the first item to be stored.
"""
super().__init__(items)
def append(self, item):
"""Append one item to the list.
Args:
item: Item to be appended.
Raises:
:exc:`~.exceptions.WrongListItemType`: If an item has a different
type than the first item to be stored.
"""
if isinstance(item, list):
self.extend(item)
elif not self:
list.append(self, item)
elif item.__class__ == self[0].__class__:
list.append(self, item)
else:
raise exceptions.WrongListItemType(item.__class__.__name__,
self[0].__class__.__name__)
def insert(self, index, item):
"""Insert an item at the specified index.
Args:
index (int): Position to insert the item.
item: Item to be inserted.
Raises:
:exc:`~.exceptions.WrongListItemType`: If an item has a different
type than the first item to be stored.
"""
if not self:
list.append(self, item)
elif item.__class__ == self[0].__class__:
list.insert(self, index, item)
else:
raise exceptions.WrongListItemType(item.__class__.__name__,
self[0].__class__.__name__)
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
items = [deepcopy(item) for item in self]
return ConstantTypeList(items=items)
| mit | fe96064fc63e2e05a1cf05858e2c65aa | 29.095346 | 79 | 0.548503 | 4.399204 | false | false | false | false |
wradlib/wradlib | wradlib/__init__.py | 3 | 1147 | #!/usr/bin/env python
# Copyright (c) 2011-2020, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
wradlib
=======
"""
# Make sure that deprecation warnings get printed by default
import warnings as _warnings
_warnings.filterwarnings("always", category=DeprecationWarning, module="wradlib")
# versioning
try:
from .version import version as __version__
except Exception:
# Local copy or not installed with setuptools.
# Disable minimum version checks on downstream libraries.
__version__ = "999"
# import subpackages
from . import adjust # noqa
from . import atten # noqa
from . import classify # noqa
from . import clutter # noqa
from . import comp # noqa
from . import dp # noqa
from . import georef # noqa
from . import io # noqa
from . import ipol # noqa
from . import qual # noqa
from . import trafo # noqa
from . import util # noqa
from . import verify # noqa
from . import vis # noqa
from . import vpr # noqa
from . import zonalstats # noqa
from . import zr # noqa
from .util import show_versions # noqa
__all__ = [s for s in dir() if not s.startswith("_")]
| mit | 28babfb38ac155906403d4863df33f47 | 25.674419 | 81 | 0.699215 | 3.584375 | false | false | false | false |
wradlib/wradlib | wradlib/trafo.py | 3 | 10505 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2011-2020, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Data Transformation
^^^^^^^^^^^^^^^^^^^
Module <trafo> transforms data e.g. from RVP-units
to dBZ-values to Z-values and vice versa.
.. currentmodule:: wradlib.trafo
.. autosummary::
:nosignatures:
:toctree: generated/
{}
"""
__all__ = [
"rvp_to_dbz",
"decibel",
"idecibel",
"r_to_depth",
"kdp_to_r",
"si_to_kmh",
"si_to_mph",
"si_2_kts",
"kmh_to_si",
"mph_to_si",
"kts_to_si",
"KuBandToS",
"SBandToKu",
]
__doc__ = __doc__.format("\n ".join(__all__))
import numpy as np
# CONSTANTS
meters_per_mile = 1609.344
meters_per_nautical_mile = 1852.0
class SBandToKu:
"""Class to hold coefficients for Radar Reflectivity Conversion
From S-band (2.8GHz) to Ku-band (13.8GHz)
See :cite:`Liao2009` for reference.
"""
snow = np.array([0.185074, 1.01378, -0.00189212])
rain = np.array([-1.50393, 1.07274, 0.000165393])
class KuBandToS:
"""Class to hold coefficients for Radar Reflectivity Conversion
From Ku-band (13.8 GHz) to S-band (2.8 GHz)
See :cite:`Cao2013` for reference.
"""
# Rain 90% 80% 70% 60% 50% 40% 30% 20% 10% Snow
snow = np.array(
[
[
4.78e-2,
4.12e-2,
8.12e-2,
1.59e-1,
2.87e-1,
4.93e-1,
8.16e-1,
1.31e0,
2.01e0,
2.82e0,
1.74e-1,
],
[
1.23e-2,
3.66e-3,
2.00e-3,
9.42e-4,
5.29e-4,
5.96e-4,
1.22e-3,
2.11e-3,
3.34e-3,
5.33e-3,
1.35e-2,
],
[
-3.50e-4,
1.17e-3,
1.04e-3,
8.16e-4,
6.59e-4,
5.85e-4,
6.13e-4,
7.01e-4,
8.24e-4,
1.01e-3,
-1.38e-3,
],
[
-3.30e-5,
-8.08e-5,
-6.44e-5,
-4.97e-5,
-4.15e-5,
-3.89e-5,
-4.15e-5,
-4.58e-5,
-5.06e-5,
-5.78e-5,
4.74e-5,
],
[
4.27e-7,
9.25e-7,
7.41e-7,
6.13e-7,
5.80e-7,
6.16e-7,
7.12e-7,
8.22e-7,
9.39e-7,
1.10e-6,
0.00e0,
],
]
)
# Rain 90% 80% 70% 60% 50% 40% 30% 20% 10% Hail
hail = np.array(
[
[
4.78e-2,
1.80e-1,
1.95e-1,
1.88e-1,
2.36e-1,
2.70e-1,
2.98e-1,
2.85e-1,
1.75e-1,
4.30e-2,
8.80e-2,
],
[
1.23e-2,
-3.73e-2,
-3.83e-2,
-3.29e-2,
-3.46e-2,
-2.94e-2,
-2.10e-2,
-9.96e-3,
-8.05e-3,
-8.27e-3,
5.39e-2,
],
[
-3.50e-4,
4.08e-3,
4.14e-3,
3.75e-3,
3.71e-3,
3.22e-3,
2.44e-3,
1.45e-3,
1.21e-3,
1.66e-3,
-2.99e-4,
],
[
-3.30e-5,
-1.59e-4,
-1.54e-4,
-1.39e-4,
-1.30e-4,
-1.12e-4,
-8.56e-5,
-5.33e-5,
-4.66e-5,
-7.19e-5,
1.90e-5,
],
[
4.27e-7,
1.59e-6,
1.51e-6,
1.37e-6,
1.29e-6,
1.15e-6,
9.40e-7,
6.71e-7,
6.33e-7,
9.52e-7,
0.00e0,
],
]
)
def rvp_to_dbz(x):
"""Calculates dBZ-values from DWD RVP6 values as given in DX-product
files.
Parameters
----------
x : float or :class:`numpy:numpy.ndarray`
a number or an array
Examples
--------
>>> from wradlib.trafo import rvp_to_dbz
>>> print(rvp_to_dbz(65.))
0.0
"""
return x * 0.5 - 32.5
def decibel(x):
"""Calculates the decibel representation of the input values
:math:`dBZ=10 \\cdot \\log_{10} z`
Parameters
----------
x : float or :class:`numpy:numpy.ndarray`
(must not be <= 0.)
Examples
--------
>>> from wradlib.trafo import decibel
>>> print(decibel(100.))
20.0
"""
return 10.0 * np.log10(x)
def idecibel(x):
"""Calculates the inverse of input decibel values
:math:`z=10^{x \\over 10}`
Parameters
----------
x : float or :class:`numpy:numpy.ndarray`
Examples
--------
>>> from wradlib.trafo import idecibel
>>> print(idecibel(10.))
10.0
"""
return 10.0 ** (x / 10.0)
def r_to_depth(x, interval):
"""Computes rainfall depth (mm) from rainfall intensity (mm/h)
Parameters
----------
x : float or :class:`numpy:numpy.ndarray`
rainfall intensity in mm/h
interval : float
time interval (s) the values of `x` represent
Returns
-------
output : float or :class:`numpy:numpy.ndarray`
rainfall depth (mm)
"""
return x * interval / 3600.0
def kdp_to_r(kdp, f, a=129.0, b=0.85):
"""Estimating rainfall intensity directly from specific differential phase.
The general power law expression has been suggested by :cite:`Ryzhkov2005`.
The default parameters have been set according to :cite:`Bringi2001`.
Note
----
**Please note that this way, rainfall intensities can become negative.**
This is an intended behaviour in order to account for noisy :math:`K_{DP}`
values.
Parameters
----------
kdp : float or :class:`numpy:numpy.ndarray`
:math:`K_{DP}` as array of floats
f : float
radar frequency [GHz]
- Standard frequencies in X-band range between 8.0 and 12.0 GHz,
- Standard frequencies in C-band range between 4.0 and 8.0 GHz,
- Standard frequencies in S-band range between 2.0 and 4.0 GHz.
a : float
linear coefficient of the power law
b : float
exponent of the power law
Returns
-------
output : :class:`numpy:numpy.ndarray`
array of rainfall intensity
"""
return np.sign(kdp) * a * (np.abs(kdp) / f) ** b
def si_to_kmh(vals):
"""Conversion from SI wind speed units to km/hr.
Note
----
Code was migrated from https://github.com/nguy/PyRadarMet.
Parameters
----------
vals : float or :class:`numpy:numpy.ndarray`
Speed in SI units (m/s)
Returns
-------
output: float or :class:`numpy:numpy.ndarray`
Speed in km/hr
Examples
--------
>>> from wradlib.trafo import si_to_kmh
>>> print(si_to_kmh(1.))
3.6
"""
return vals * 3600.0 / 1000.0
def si_to_mph(vals):
"""Conversion from SI wind speed units to miles/hr
Note
----
Code was migrated from https://github.com/nguy/PyRadarMet.
Parameters
----------
vals : float or :class:`numpy:numpy.ndarray`
Speed in SI units (m/s)
Returns
-------
output: float or :class:`numpy:numpy.ndarray`
Speed in miles per hour
Examples
--------
>>> from wradlib.trafo import si_to_mph
>>> print(np.round(si_to_mph(1.), 3))
2.237
"""
return vals * 3600.0 / meters_per_mile
def si_2_kts(vals):
"""Conversion from SI wind speed units to knots
Note
----
Code was migrated from https://github.com/nguy/PyRadarMet.
Parameters
----------
vals : float or :class:`numpy:numpy.ndarray`
Speed in SI units (m/s)
Returns
-------
output: float or :class:`numpy:numpy.ndarray`
Speed in knots
Examples
--------
>>> from wradlib.trafo import si_2_kts
>>> print(np.round(si_2_kts(1.), 3))
1.944
"""
return vals * 3600.0 / meters_per_nautical_mile
def kmh_to_si(vals):
"""Conversion from km/hr to SI wind speed units
Note
----
Code was migrated from https://github.com/nguy/PyRadarMet.
Parameters
----------
vals: float or :class:`numpy:numpy.ndarray`
Wind speed in km/hr
Returns
-------
output: float or :class:`numpy:numpy.ndarray`
Wind speed in SI units (m/s)
Examples
--------
>>> from wradlib.trafo import kmh_to_si
>>> print(np.round(kmh_to_si(10.), 3))
2.778
"""
return vals * 1000.0 / 3600.0
def mph_to_si(vals):
"""Conversion from miles/hr to SI wind speed units
Note
----
Code was migrated from https://github.com/nguy/PyRadarMet.
Parameters
----------
vals: float or :class:`numpy:numpy.ndarray`
Wind speed in miles per hour
Returns
-------
output: float or :class:`numpy:numpy.ndarray`
Wind speed in SI units (m/s)
Examples
--------
>>> from wradlib.trafo import mph_to_si
>>> print(np.round(mph_to_si(10.), 2))
4.47
"""
return vals * meters_per_mile / 3600.0
def kts_to_si(vals):
"""Conversion from knots to SI wind speed units
Note
----
Code was migrated from https://github.com/nguy/PyRadarMet.
Parameters
----------
vals: float or :class:`numpy:numpy.ndarray`
Wind speed in knots
Returns
-------
output: float or :class:`numpy:numpy.ndarray`
Wind speed in SI units (m/s)
Examples
--------
>>> from wradlib.trafo import kts_to_si
>>> print(np.round(kts_to_si(1.), 3))
0.514
"""
return vals * meters_per_nautical_mile / 3600.0
if __name__ == "__main__":
print("wradlib: Calling module <trafo> as main...")
| mit | e36f9038bb78e666af079eff6da4b9f2 | 20.786307 | 79 | 0.459004 | 3.241049 | false | false | false | false |
prkumar/uplink | uplink/converters/__init__.py | 1 | 5621 | # Local imports
from uplink._extras import installer, plugin
from uplink.compat import abc
from uplink.converters import keys
from uplink.converters.interfaces import Factory, ConverterFactory, Converter
from uplink.converters.register import (
get_default_converter_factories,
register_default_converter_factory,
)
# Default converters - load standard first so it's ensured to be the
# last in the converter chain.
# fmt: off
from uplink.converters.standard import StandardConverter
from uplink.converters.marshmallow_ import MarshmallowConverter
from uplink.converters.pydantic_ import PydanticConverter
from uplink.converters.typing_ import TypingConverter
# fmt: on
__all__ = [
"StandardConverter",
"MarshmallowConverter",
"PydanticConverter",
"TypingConverter",
"get_default_converter_factories",
"register_default_converter_factory",
"Factory",
"ConverterFactory", # TODO: Remove this in v1.0.0
"Converter",
"keys",
]
install = register_default_converter_factory
"""
Registers the given converter as a default converter, meaning the
converter will be included automatically with any consumer instance
and doesn't need to be explicitly provided through the ``converter``
parameter to be used.
"""
# Define plugin and installer
plugin("converters")(install)
installer(Factory)(install)
class ConverterChain(object):
def __init__(self, converter_factory):
self._converter_factory = converter_factory
def __call__(self, *args, **kwargs):
converter = self._converter_factory(*args, **kwargs)
if isinstance(converter, Converter):
converter.set_chain(self)
return converter
class ConverterFactoryRegistry(abc.Mapping):
"""
A registry that chains together
:py:class:`interfaces.ConverterFactory` instances.
When queried for a factory that can handle a particular converter
type (e.g., ``keys.CONVERT_TO_REQUEST_BODY``), the registry
traverses the chain until it finds a converter factory that can
handle the request (i.e., the type's associated method returns a
value other than ``None``).
Here's an example -- it's contrived but effectively details the
expected pattern of usage::
# Create a registry with a single factory in its chain.
registry = ConverterFactoryRegistry((StandardConverter,))
# Get a callable that returns converters for turning arbitrary
# objects into strings.
get_str_converter_for_type = registry[keys.CONVERT_TO_STRING]
# Traverse the chain to find a converter that can handle
# converting ints into strings.
converter = get_str_converter_for_type(int)
Args:
factories: An iterable of converter factories. Factories that
appear earlier in the chain are given the opportunity to
handle a request before those that appear later.
"""
#: A mapping of keys to callables. Each callable value accepts a
#: single argument, a :py:class:`interfaces.ConverterFactory`
#: subclass, and returns another callable, which should return a
#: :py:`interfaces.Converter` instance.
_converter_factory_registry = {}
def __init__(self, factories=(), *args, **kwargs):
self._factories = tuple(factories)
self._args = args
self._kwargs = kwargs
@property
def factories(self):
"""
Yields the registry's chain of converter factories, in order.
"""
return iter(self._factories)
def _make_chain_for_func(self, func):
def chain(*args, **kwargs):
args = args + self._args
kwargs = dict(self._kwargs, **kwargs)
for factory in self.factories:
converter = func(factory)(*args, **kwargs)
if callable(converter):
return converter
return ConverterChain(chain)
def _make_chain_for_key(self, converter_key):
return self._make_chain_for_func(
self._converter_factory_registry[converter_key]
)
def __getitem__(self, converter_key):
"""
Retrieves a callable that creates converters for the type
associated to the given key.
If the given key is a callable, it will be recursively invoked
to retrieve the final callable. See :py:class:`keys.Map` for
an example of such a key. These callable keys should accept a
single argument, a :py:class:`ConverterFactoryRegistry`.
"""
if callable(converter_key):
return converter_key(self)
else:
return self._make_chain_for_key(converter_key)
def __len__(self):
return len(self._converter_factory_registry)
def __iter__(self):
return iter(self._converter_factory_registry)
@classmethod
def register(cls, converter_key):
"""
Returns a decorator that can be used to register a callable for
the given ``converter_key``.
"""
def wrapper(func):
cls._converter_factory_registry[converter_key] = func
return func
return wrapper
@ConverterFactoryRegistry.register(keys.CONVERT_TO_REQUEST_BODY)
def create_request_body_converter(factory):
return factory.create_request_body_converter
@ConverterFactoryRegistry.register(keys.CONVERT_FROM_RESPONSE_BODY)
def create_response_body_converter(factory):
return factory.create_response_body_converter
@ConverterFactoryRegistry.register(keys.CONVERT_TO_STRING)
def create_string_converter(factory):
return factory.create_string_converter
| mit | 10b8c0186d2376c6adb28bc3782b4902 | 32.064706 | 77 | 0.682085 | 4.377726 | false | false | false | false |
theonion/betty-cropper | betty/cropper/dssim.py | 1 | 5775 | try:
import numpy as np
import scipy.ndimage
except ImportError:
pass
from betty.conf.app import settings
import io
import math
from PIL import Image
MIN_UNIQUE_COLORS = 4096
COLOR_DENSITY_RATIO = 0.11
QUALITY_IN_MIN = 82
ERROR_THRESHOLD = 1.3
ERROR_THRESHOLD_INACCURACY = 0.01
def compute_ssim(im1, im2, l=255):
# k1,k2 & c1,c2 depend on L (width of color map)
k_1 = 0.01
c_1 = (k_1 * l) ** 2
k_2 = 0.03
c_2 = (k_2 * l) ** 2
window = np.ones((8, 8)) / 64.0
# Convert image matrices to double precision (like in the Matlab version)
im1 = im1.astype(np.float)
im2 = im2.astype(np.float)
# Means obtained by Gaussian filtering of inputs
mu_1 = scipy.ndimage.filters.convolve(im1, window)
mu_2 = scipy.ndimage.filters.convolve(im2, window)
# Squares of means
mu_1_sq = mu_1 ** 2
mu_2_sq = mu_2 ** 2
mu_1_mu_2 = mu_1 * mu_2
# Squares of input matrices
im1_sq = im1 ** 2
im2_sq = im2 ** 2
im12 = im1 * im2
# Variances obtained by Gaussian filtering of inputs' squares
sigma_1_sq = scipy.ndimage.filters.convolve(im1_sq, window)
sigma_2_sq = scipy.ndimage.filters.convolve(im2_sq, window)
# Covariance
sigma_12 = scipy.ndimage.filters.convolve(im12, window)
# Centered squares of variances
sigma_1_sq -= mu_1_sq
sigma_2_sq -= mu_2_sq
sigma_12 -= mu_1_mu_2
if (c_1 > 0) & (c_2 > 0):
ssim_map = (((2 * mu_1_mu_2 + c_1) * (2 * sigma_12 + c_2)) /
((mu_1_sq + mu_2_sq + c_1) * (sigma_1_sq + sigma_2_sq + c_2)))
else:
numerator1 = 2 * mu_1_mu_2 + c_1
numerator2 = 2 * sigma_12 + c_2
denominator1 = mu_1_sq + mu_2_sq + c_1
denominator2 = sigma_1_sq + sigma_2_sq + c_2
ssim_map = np.ones(mu_1.size)
index = (denominator1 * denominator2 > 0)
ssim_map[index] = ((numerator1[index] * numerator2[index]) /
(denominator1[index] * denominator2[index]))
index = (denominator1 != 0) & (denominator2 == 0)
ssim_map[index] = numerator1[index] / denominator1[index]
# return MSSIM
index = np.mean(ssim_map)
return index
def unique_colors(img):
# For RGB, we need to get unique "rows" basically, as the color dimesion is an array.
# This is taken from: http://stackoverflow.com/a/16973510
color_view = np.ascontiguousarray(img).view(np.dtype((np.void,
img.dtype.itemsize * img.shape[2])))
unique = np.unique(color_view)
return unique.size
def color_density(img):
area = img.shape[0] * img.shape[1]
density = unique_colors(img) / float(area)
return density
def enough_colors(img):
return True
if unique_colors(img) < MIN_UNIQUE_COLORS:
return False
# Someday, check if the image is greyscale...
return True
def get_distortion(one, two):
# This computes the "DSSIM" of the images, using the SSIM of each channel
ssims = []
for channel in range(one.shape[2]):
one_channeled = np.ascontiguousarray(one[:, :, channel])
two_channeled = np.ascontiguousarray(two[:, :, channel])
ssim = compute_ssim(one_channeled, two_channeled)
ssims.append(ssim)
return (1 / np.mean(ssims) - 1) * 20
def detect_optimal_quality(image_buffer, width=None, verbose=False):
"""Returns the optimal quality for a given image, at a given width"""
# Open the image...
pil_original = Image.open(image_buffer)
icc_profile = pil_original.info.get("icc_profile")
if pil_original.format != "JPEG":
# Uhoh, this isn't a JPEG, let's convert it to one.
pillow_kwargs = {
"format": "jpeg",
"quality": 100,
"subsampling": 2
}
if icc_profile:
pillow_kwargs["icc_profile"] = icc_profile
tmp = io.BytesIO()
pil_original.save(tmp, **pillow_kwargs)
tmp.seek(0)
pil_original = Image.open(tmp)
if width:
height = int(math.ceil((pil_original.size[1] * width) / float(pil_original.size[0])))
pil_original = pil_original.resize((width, height), resample=Image.ANTIALIAS)
np_original = np.asarray(pil_original)
original_density = color_density(np_original)
# Check if there are enough colors (assuming RGB for the moment)
if not enough_colors(np_original):
return None
# TODO: Check if the quality is lower than we'd want... (probably impossible)
qmin = settings.BETTY_JPEG_QUALITY_RANGE[0]
qmax = settings.BETTY_JPEG_QUALITY_RANGE[1]
# Do a binary search of image quality...
while qmax > qmin + 1:
quality = int(round((qmax + qmin) / 2.0))
tmp = io.BytesIO()
pillow_kwargs = {
"format": "jpeg",
"quality": quality,
"subsampling": 2
}
if icc_profile:
pillow_kwargs["icc_profile"] = icc_profile
pil_original.save(tmp, **pillow_kwargs)
tmp.seek(0)
pil_compressed = Image.open(tmp)
np_compressed = np.asarray(pil_compressed)
density_ratio = abs(color_density(np_compressed) - original_density) / original_density
error = get_distortion(np_original, np_compressed)
if density_ratio > COLOR_DENSITY_RATIO:
error *= 1.25 + density_ratio
if error > ERROR_THRESHOLD:
qmin = quality
else:
qmax = quality
if verbose:
print("{:.2f}/{:.2f}@{}".format(error, density_ratio, quality))
if abs(error - ERROR_THRESHOLD) < ERROR_THRESHOLD * ERROR_THRESHOLD_INACCURACY:
# Close enough!
qmax = quality
break
return qmax
| mit | 4e4d1e9568ee843732c1994a0464204a | 27.448276 | 95 | 0.592727 | 3.224456 | false | false | false | false |
prkumar/uplink | uplink/converters/marshmallow_.py | 1 | 3461 | """
This module defines a converter that uses :py:mod:`marshmallow` schemas
to deserialize and serialize values.
"""
# Local imports
from uplink import utils
from uplink.converters import interfaces, register_default_converter_factory
class MarshmallowConverter(interfaces.Factory):
"""
A converter that serializes and deserializes values using
:py:mod:`marshmallow` schemas.
To deserialize JSON responses into Python objects with this
converter, define a :py:class:`marshmallow.Schema` subclass and set
it as the return annotation of a consumer method:
.. code-block:: python
@get("/users")
def get_users(self, username) -> UserSchema():
'''Fetch a single user'''
Note:
This converter is an optional feature and requires the
:py:mod:`marshmallow` package. For example, here's how to
install this feature using pip::
$ pip install uplink[marshmallow]
"""
try:
import marshmallow
except ImportError: # pragma: no cover
marshmallow = None
is_marshmallow_3 = None
else:
is_marshmallow_3 = marshmallow.__version__ >= "3.0"
def __init__(self):
if self.marshmallow is None:
raise ImportError("No module named 'marshmallow'")
class ResponseBodyConverter(interfaces.Converter):
def __init__(self, extract_data, schema):
self._extract_data = extract_data
self._schema = schema
def convert(self, response):
try:
json = response.json()
except AttributeError:
# Assume that the response is already json
json = response
return self._extract_data(self._schema.load(json))
class RequestBodyConverter(interfaces.Converter):
def __init__(self, extract_data, schema):
self._extract_data = extract_data
self._schema = schema
def convert(self, value):
return self._extract_data(self._schema.dump(value))
@classmethod
def _get_schema(cls, type_):
if utils.is_subclass(type_, cls.marshmallow.Schema):
return type_()
elif isinstance(type_, cls.marshmallow.Schema):
return type_
raise ValueError("Expected marshmallow.Scheme subclass or instance.")
def _extract_data(self, m):
# After marshmallow 3.0, Schema.load() and Schema.dump() don't
# return a (data, errors) tuple any more. Only `data` is returned.
return m if self.is_marshmallow_3 else m.data
def _make_converter(self, converter_cls, type_):
try:
# Try to generate schema instance from the given type.
schema = self._get_schema(type_)
except ValueError:
# Failure: the given type is not a `marshmallow.Schema`.
return None
else:
return converter_cls(self._extract_data, schema)
def create_request_body_converter(self, type_, *args, **kwargs):
return self._make_converter(self.RequestBodyConverter, type_)
def create_response_body_converter(self, type_, *args, **kwargs):
return self._make_converter(self.ResponseBodyConverter, type_)
@classmethod
def register_if_necessary(cls, register_func):
if cls.marshmallow is not None:
register_func(cls)
MarshmallowConverter.register_if_necessary(register_default_converter_factory)
| mit | 67b2c5f5cb43ca27e9ef0377f68c52b7 | 32.278846 | 78 | 0.636232 | 4.251843 | false | false | false | false |
prkumar/uplink | examples/async-requests/asyncio_example.py | 1 | 1165 | """
Example of using Uplink with aiohttp for non-blocking HTTP requests.
This should work on Python 3.7 and above.
"""
import asyncio
import uplink
# Local imports
from github import BASE_URL, GitHub
async def get_contributors(full_name):
print("Getting GitHub repository `{}`".format(full_name))
response = await gh_async.get_contributors(*full_name.split("/"))
json = await response.json()
print("response for {}: {}".format(full_name, json))
return json
if __name__ == "__main__":
# This consumer instance uses Requests to make blocking requests.
gh_sync = GitHub(base_url=BASE_URL)
# This uses aiohttp, an HTTP client for asyncio.
gh_async = GitHub(base_url=BASE_URL, client=uplink.AiohttpClient())
# First, let's fetch a list of all public repositories.
repos = gh_sync.get_repos().json()
# Use only the first 10 results to avoid hitting the rate limit.
repos = repos[:10]
# Concurrently fetch the contributors for those repositories.
futures = [get_contributors(repo["full_name"]) for repo in repos]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*futures))
| mit | 7cd28f82c5efa193fb617495b7bc96bc | 31.361111 | 71 | 0.695279 | 3.844884 | false | false | false | false |
prkumar/uplink | docs/source/conf.py | 1 | 6483 | # -*- coding: utf-8 -*-
#
# Uplink documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 24 19:40:30 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# Import package metadata
from uplink.__about__ import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Uplink"
copyright = "2018, P. Raj Kumar"
author = "Raj Kumar"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "A Declarative HTTP Client for Python, inspired by Retrofit.",
"github_user": "prkumar",
"github_repo": "uplink",
"github_button": "true",
"github_banner": "true",
"show_powered_by": "false",
"fixed_sidebar": "true",
"github_type": "star",
"show_related": "true",
"sidebar_collapse": "false",
"sidebar_showhidden": "false",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
# html_sidebars = {
# "**": [
# "about.html",
# "navigation.html",
# "relations.html", # needs 'show_related': True theme option to display
# "searchbox.html",
# "donate.html",
# ]
# }
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['about.html', 'links.html', 'navigation.html', 'searchbox.html'],
'**': ["about.html", 'localtoc.html', 'relations.html', 'searchbox.html'],
'changes': ['about.html', 'searchbox.html']
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "Uplinkdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "Uplink.tex", "Uplink Documentation", "Raj Kumar", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "uplink", "Uplink Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Uplink",
"Uplink Documentation",
author,
"Uplink",
"One line description of project.",
"Miscellaneous",
)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/", None),
"requests": ("http://docs.python-requests.org/en/master/", None),
"aiohttp": ("https://aiohttp.readthedocs.io/en/stable/", None),
"marshmallow": ("https://marshmallow.readthedocs.io/en/latest/", None),
"twisted": ("http://twistedmatrix.com/documents/current/api/", None),
}
| mit | 83d61c26d7cfc681b60f30ddd06f3a1c | 30.779412 | 81 | 0.660342 | 3.795667 | false | true | false | false |
prkumar/uplink | uplink/utils.py | 1 | 2899 | # Standard library imports
import collections
import inspect
try:
# Python 3.2+
from inspect import signature
except ImportError: # pragma: no cover
# Python 2.7
from inspect import getcallargs as get_call_args, getargspec as _getargspec
def signature(_):
raise ImportError
def get_arg_spec(f):
arg_spec = _getargspec(f)
args = arg_spec.args
if arg_spec.varargs is not None:
args.append(arg_spec.varargs)
if arg_spec.keywords is not None:
args.append(arg_spec.keywords)
return Signature(args, {}, None)
else: # pragma: no cover
def get_call_args(f, *args, **kwargs):
sig = signature(f)
arguments = sig.bind(*args, **kwargs).arguments
# apply defaults:
new_arguments = []
for name, param in sig.parameters.items():
try:
new_arguments.append((name, arguments[name]))
except KeyError:
if param.default is not param.empty:
val = param.default
elif param.kind is param.VAR_POSITIONAL:
val = ()
elif param.kind is param.VAR_KEYWORD:
val = {}
else:
continue
new_arguments.append((name, val))
return collections.OrderedDict(new_arguments)
def get_arg_spec(f):
sig = signature(f)
parameters = sig.parameters
args = []
annotations = collections.OrderedDict()
has_return_type = sig.return_annotation is not sig.empty
return_type = sig.return_annotation if has_return_type else None
for p in parameters:
if parameters[p].annotation is not sig.empty:
annotations[p] = parameters[p].annotation
args.append(p)
return Signature(args, annotations, return_type)
try:
import urllib.parse as _urlparse
except ImportError:
import urlparse as _urlparse
# Third-party imports
import uritemplate
urlparse = _urlparse
Signature = collections.namedtuple(
"Signature", "args annotations return_annotation"
)
Request = collections.namedtuple("Request", "method uri info return_type")
def is_subclass(cls, class_info):
return inspect.isclass(cls) and issubclass(cls, class_info)
def no_op(*_, **__):
pass
class URIBuilder(object):
@staticmethod
def variables(uri):
try:
return uritemplate.URITemplate(uri).variable_names
except TypeError:
return set()
def __init__(self, uri):
self._uri = uritemplate.URITemplate(uri or "")
def set_variable(self, var_dict=None, **kwargs):
self._uri = self._uri.partial(var_dict, **kwargs)
def remaining_variables(self):
return self._uri.variable_names
def build(self):
return self._uri.expand()
| mit | 38988879600fae9075ecb8bb7ab953a0 | 26.349057 | 79 | 0.602967 | 4.232117 | false | false | false | false |
prkumar/uplink | tests/unit/test_arguments.py | 1 | 17177 | import pytest
# Local imports
from uplink import hooks, arguments
from uplink.converters import keys
inject_args = pytest.mark.parametrize("args", (["arg1", "arg2", "arg3"],))
@pytest.fixture
def argument_mock(mocker):
return mocker.Mock(spec=arguments.ArgumentAnnotation)
@pytest.fixture
def named_argument_mock(mocker):
return mocker.Mock(spec=arguments.NamedArgument)
class ArgumentTestCase(object):
@property
def type_cls(self):
raise NotImplementedError
@property
def expected_converter_key(self):
raise NotImplementedError
@property
def expected_can_be_static(self):
return True
def test_converter_type(self):
assert self.type_cls().converter_key == self.expected_converter_key
def test_is_static(self):
assert self.type_cls._can_be_static == self.expected_can_be_static
def test_static_call(self, mocker, request_definition_builder):
request_definition_builder = self.type_cls(request_definition_builder)
builder = request_definition_builder.argument_handler_builder
builder.add_annotation.assert_called_with(mocker.ANY)
class FuncDecoratorTestCase(object):
def test_static_call_with_function(self):
def func(a1, a2):
return a1, a2
output = self.type_cls(func)
assert output is func
def test_equals(self):
assert isinstance(
self.type_cls().with_value("hello"), hooks.TransactionHook
)
class TestArgumentAnnotationHandlerBuilder(object):
def test_from_func(self):
def func(_):
pass
handler = arguments.ArgumentAnnotationHandlerBuilder.from_func(func)
another_handler = arguments.ArgumentAnnotationHandlerBuilder.from_func(
func
)
assert handler is another_handler
@inject_args
def test_missing_arguments(self, args):
builder = arguments.ArgumentAnnotationHandlerBuilder(None, args, False)
assert list(builder.missing_arguments) == args
@inject_args
def test_remaining_args_count(self, args):
builder = arguments.ArgumentAnnotationHandlerBuilder(None, args, False)
assert builder.remaining_args_count == len(args)
@inject_args
def test_add_annotation_without_name(self, mocker, argument_mock, args):
builder = arguments.ArgumentAnnotationHandlerBuilder(None, args, False)
builder.listener = mocker.stub()
builder.add_annotation(argument_mock)
builder.listener.assert_called_with(argument_mock)
assert args[0] not in builder.missing_arguments
@inject_args
def test_add_annotation_with_name(self, mocker, argument_mock, args):
builder = arguments.ArgumentAnnotationHandlerBuilder(None, args, False)
builder.listener = mocker.stub()
builder.add_annotation(argument_mock, name=args[-1])
builder.listener.assert_called_with(argument_mock)
assert args[-1] not in builder.missing_arguments
@inject_args
def test_add_named_annotation_without_name(
self, mocker, named_argument_mock, args
):
builder = arguments.ArgumentAnnotationHandlerBuilder(None, args, False)
named_argument_mock.name = None
builder.listener = mocker.stub()
builder.add_annotation(named_argument_mock)
builder.listener.assert_called_with(named_argument_mock)
assert named_argument_mock.name == args[0]
assert args[0] not in builder.missing_arguments
@inject_args
def test_add_annotation_class(self, mocker, args):
builder = arguments.ArgumentAnnotationHandlerBuilder(None, args, False)
builder.listener = mocker.stub()
argument = builder.add_annotation(arguments.ArgumentAnnotation())
builder.listener.assert_called_with(argument)
assert args[0] not in builder.missing_arguments
@inject_args
def test_add_annotation_with_name_not_recognized(self, argument_mock, args):
def dummy():
pass
assert -1 not in args
builder = arguments.ArgumentAnnotationHandlerBuilder(dummy, args, False)
with pytest.raises(arguments.ArgumentNotFound):
builder.add_annotation(argument_mock, name=-1)
def test_add_annotation_with_no_missing_arguments(self, argument_mock):
def dummy():
pass
builder = arguments.ArgumentAnnotationHandlerBuilder(dummy, [], False)
with pytest.raises(arguments.ExhaustedArguments):
builder.add_annotation(argument_mock)
def test_add_annotation_that_is_not_an_annotation(self):
def dummy():
pass
builder = arguments.ArgumentAnnotationHandlerBuilder(
dummy, ["arg1"], False
)
builder.add_annotation(type, "arg1")
assert builder.remaining_args_count == 1
@inject_args
def test_set_annotations(self, mocker, argument_mock, args):
builder = arguments.ArgumentAnnotationHandlerBuilder(None, args, False)
builder.listener = mocker.stub()
builder.set_annotations((argument_mock,))
builder.listener.assert_called_with(argument_mock)
assert args[0] not in builder.missing_arguments
@inject_args
def test_set_annotations_with_dict(self, mocker, argument_mock, args):
builder = arguments.ArgumentAnnotationHandlerBuilder(None, args, False)
builder.listener = mocker.stub()
builder.set_annotations(**{args[0]: argument_mock})
builder.listener.assert_called_with(argument_mock)
assert args[0] not in builder.missing_arguments
def test_is_done(self, argument_mock):
builder = arguments.ArgumentAnnotationHandlerBuilder(
None, ("arg1",), False
)
assert not builder.is_done()
builder.add_annotation(argument_mock)
assert builder.is_done()
class TestArgumentAnnotationHandler(object):
def test_get_relevant_arguments(self):
args = {"arg1": "value1"}
annotation_handler = arguments.ArgumentAnnotationHandler(None, args)
relevant = annotation_handler.get_relevant_arguments(args)
assert list(relevant) == list(args.items())
def test_handle_call(self, request_builder, mocker):
def dummy(arg1):
return arg1
request_builder.get_converter.return_value = dummy
get_call_args = mocker.patch("uplink.utils.get_call_args")
get_call_args.return_value = {"arg1": "hello"}
annotation = mocker.Mock(arguments.ArgumentAnnotation)
handlers = arguments.ArgumentAnnotationHandler(
dummy, {"arg1": annotation}
)
handlers.handle_call(request_builder, (), {})
annotation.modify_request.assert_called_with(request_builder, "hello")
@inject_args
def test_annotations(self, args):
annotations = ["annotation"] * len(args)
arg_dict = dict(zip(args, annotations))
annotation_handler = arguments.ArgumentAnnotationHandler(None, arg_dict)
assert list(annotation_handler.annotations) == annotations
class TestArgumentAnnotation(object):
def test_call(self, request_definition_builder):
annotation = arguments.ArgumentAnnotation()
return_value = annotation(request_definition_builder)
handler_builder = request_definition_builder.argument_handler_builder
handler_builder.add_annotation.assert_called_with(annotation)
assert return_value is request_definition_builder
class TestTypedArgument(object):
def test_type(self):
assert arguments.TypedArgument("hello").type == "hello"
def test_set_type(self):
annotation = arguments.TypedArgument()
assert annotation.type is None
annotation.type = "type"
assert annotation.type == "type"
def test_set_type_with_type_already_set(self):
annotation = arguments.TypedArgument("type")
with pytest.raises(AttributeError):
annotation.type = "new type"
class TestNamedArgument(object):
def test_name(self):
assert arguments.NamedArgument("name").name == "name"
def test_set_name(self):
annotation = arguments.NamedArgument()
assert annotation.name is None
annotation.name = "name"
assert annotation.name == "name"
def test_set_name_with_name_already_set(self):
annotation = arguments.NamedArgument("name")
with pytest.raises(AttributeError):
annotation.name = "new name"
class TestPath(ArgumentTestCase):
type_cls = arguments.Path
expected_converter_key = keys.CONVERT_TO_STRING
def test_modify_request_definition(self, request_definition_builder):
arguments.Path("name").modify_request_definition(
request_definition_builder
)
request_definition_builder.uri.add_variable.assert_called_with("name")
def test_modify_request(self, request_builder):
arguments.Path("name").modify_request(request_builder, "value")
request_builder.set_url_variable.assert_called_with({"name": "value"})
class TestQuery(ArgumentTestCase, FuncDecoratorTestCase):
type_cls = arguments.Query
expected_converter_key = keys.Sequence(keys.CONVERT_TO_STRING)
def test_modify_request(self, request_builder):
arguments.Query("name").modify_request(request_builder, "value")
assert request_builder.info["params"] == {"name": "value"}
def test_modify_request_with_encoded(self, request_builder):
arguments.Query("name", encoded=True).modify_request(
request_builder, "value"
)
assert request_builder.info["params"] == "name=value"
def test_modify_request_with_mismatched_encoding(self, request_builder):
arguments.Query("name", encoded=True).modify_request(
request_builder, "value"
)
with pytest.raises(arguments.Query.QueryStringEncodingError):
arguments.Query("name2", encoded=False).modify_request(
request_builder, "value2"
)
def test_skip_none(self, request_builder):
arguments.Query("name").modify_request(request_builder, None)
assert request_builder.info["params"] == {}
def test_encode_none(self, request_builder):
arguments.Query("name", encode_none="null").modify_request(
request_builder, None
)
assert request_builder.info["params"] == {"name": "null"}
def test_converter_key_with_encoded(self):
query = arguments.Query("name", encoded=True)
assert query.converter_key == keys.CONVERT_TO_STRING
def test_converter_type(self):
query = arguments.Query("name", encoded=False)
assert query.converter_key == keys.Sequence(keys.CONVERT_TO_STRING)
class TestQueryMap(ArgumentTestCase, FuncDecoratorTestCase):
type_cls = arguments.QueryMap
expected_converter_key = keys.Map(TestQuery.expected_converter_key)
def test_modify_request(self, request_builder):
arguments.QueryMap().modify_request(request_builder, {"hello": "world"})
assert request_builder.info["params"] == {"hello": "world"}
def test_modify_request_with_encoded(self, request_builder):
arguments.QueryMap(encoded=True).modify_request(
request_builder, {"name": "value"}
)
assert request_builder.info["params"] == "name=value"
def test_converter_key_with_encoded(self):
query = arguments.QueryMap(encoded=True)
assert query.converter_key == keys.Map(keys.CONVERT_TO_STRING)
def test_converter_type(self):
query = arguments.QueryMap(encoded=False)
assert query.converter_key == keys.Map(
keys.Sequence(keys.CONVERT_TO_STRING)
)
class TestHeader(ArgumentTestCase, FuncDecoratorTestCase):
type_cls = arguments.Header
expected_converter_key = keys.CONVERT_TO_STRING
def test_modify_request(self, request_builder):
arguments.Header("hello").modify_request(request_builder, "world")
assert request_builder.info["headers"] == {"hello": "world"}
def test_skip_none(self, request_builder):
arguments.Header("hello").modify_request(request_builder, None)
assert request_builder.info["headers"] == {}
class TestHeaderMap(ArgumentTestCase, FuncDecoratorTestCase):
type_cls = arguments.HeaderMap
expected_converter_key = keys.Map(TestHeader.expected_converter_key)
def test_modify_request(self, request_builder):
arguments.HeaderMap().modify_request(
request_builder, {"hello": "world"}
)
assert request_builder.info["headers"] == {"hello": "world"}
class TestField(ArgumentTestCase):
type_cls = arguments.Field
expected_converter_key = keys.CONVERT_TO_REQUEST_BODY
def test_modify_request(self, request_builder):
arguments.Field("hello").modify_request(request_builder, "world")
assert request_builder.info["data"]["hello"] == "world"
def test_modify_request_failure(self, request_builder):
request_builder.info["data"] = object()
with pytest.raises(arguments.Field.FieldAssignmentFailed):
arguments.Field("hello").modify_request(request_builder, "world")
class TestFieldMap(ArgumentTestCase):
type_cls = arguments.FieldMap
expected_converter_key = keys.Map(TestField.expected_converter_key)
def test_modify_request(self, request_builder):
arguments.FieldMap().modify_request(request_builder, {"hello": "world"})
assert request_builder.info["data"] == {"hello": "world"}
def test_modify_request_failure(self, request_builder):
request_builder.info["data"] = object()
with pytest.raises(arguments.FieldMap.FieldMapUpdateFailed):
arguments.FieldMap().modify_request(request_builder, {})
class TestPart(ArgumentTestCase):
type_cls = arguments.Part
expected_converter_key = keys.CONVERT_TO_REQUEST_BODY
def test_modify_request(self, request_builder):
arguments.Part("hello").modify_request(request_builder, "world")
assert request_builder.info["files"] == {"hello": "world"}
class TestPartMap(ArgumentTestCase):
type_cls = arguments.PartMap
expected_converter_key = keys.Map(TestPart.expected_converter_key)
def test_modify_request(self, request_builder):
arguments.PartMap().modify_request(request_builder, {"hello": "world"})
assert request_builder.info["files"] == {"hello": "world"}
class TestBody(ArgumentTestCase):
type_cls = arguments.Body
expected_converter_key = keys.CONVERT_TO_REQUEST_BODY
def test_modify_request(self, request_builder):
# Verify with dict
arguments.Body().modify_request(request_builder, {"hello": "world"})
assert request_builder.info["data"] == {"hello": "world"}
# Verify with non-mapping
body = object()
arguments.Body().modify_request(request_builder, body)
assert request_builder.info["data"] is body
class TestUrl(ArgumentTestCase):
type_cls = arguments.Url
expected_converter_key = keys.CONVERT_TO_STRING
def test_modify_request_definition(self, request_definition_builder):
arguments.Url().modify_request_definition(request_definition_builder)
assert request_definition_builder.uri.is_dynamic
def test_modify_request_definition_failure(
self, mocker, request_definition_builder
):
is_dynamic_mock = mocker.PropertyMock(side_effect=ValueError())
type(request_definition_builder.uri).is_dynamic = is_dynamic_mock
request_definition_builder.__name__ = "dummy"
with pytest.raises(arguments.Url.DynamicUrlAssignmentFailed):
arguments.Url().modify_request_definition(
request_definition_builder
)
def test_modify_request(self, request_builder):
arguments.Url().modify_request(request_builder, "/some/path")
assert request_builder.relative_url == "/some/path"
class TestTimeout(ArgumentTestCase, FuncDecoratorTestCase):
type_cls = arguments.Timeout
expected_converter_key = keys.Identity()
def test_modify_request(self, request_builder):
arguments.Timeout().modify_request(request_builder, 10)
assert request_builder.info["timeout"] == 10
class TestContext(ArgumentTestCase, FuncDecoratorTestCase):
type_cls = arguments.Context
expected_converter_key = keys.Identity()
def test_modify_request(self, request_builder):
arguments.Context("key").modify_request(request_builder, "value")
assert request_builder.context["key"] == "value"
class TestContextMap(ArgumentTestCase, FuncDecoratorTestCase):
type_cls = arguments.ContextMap
expected_converter_key = keys.Identity()
def test_modify_request(self, request_builder):
arguments.ContextMap().modify_request(request_builder, {"key": "value"})
assert request_builder.context == {"key": "value"}
def test_modify_request_not_mapping(self, request_builder):
with pytest.raises(TypeError):
arguments.ContextMap().modify_request(request_builder, "value")
| mit | d0b4f6a52abb1061cb8c04b80aa6a047 | 36.422658 | 80 | 0.680794 | 4.116223 | false | true | false | false |
theonion/betty-cropper | betty/cropper/tasks.py | 1 | 2450 | from __future__ import absolute_import
import io
from celery import shared_task
from PIL import Image as PILImage
from betty.conf.app import settings
from .dssim import detect_optimal_quality
try:
# Legacy check: Try import here to determine if we should enable IMGMIN
import numpy # NOQA
import scipy # NOQA
IMGMIN_DISABLED = False
except ImportError:
IMGMIN_DISABLED = True
def is_optimized(image_field):
"""Checks if the image is already optimized
For our purposes, we check to see if the existing file will be smaller than
a version saved at the default quality (80)."""
source_buffer = image_field.read_source_bytes()
im = PILImage.open(source_buffer)
icc_profile = im.info.get("icc_profile")
# First, let's check to make sure that this image isn't already an optimized JPEG
if im.format == "JPEG":
optimized_buffer = io.BytesIO()
im.save(
optimized_buffer,
format="JPEG",
quality=settings.BETTY_DEFAULT_JPEG_QUALITY,
icc_profile=icc_profile,
optimize=True)
# Note: .getbuffer().nbytes is preferred, but not supported in Python 2.7
if len(source_buffer.getvalue()) < len(optimized_buffer.getvalue()):
# Looks like the original was already compressed, let's bail.
return True
return False
@shared_task
def search_image_quality(image_id):
if IMGMIN_DISABLED:
return
from betty.cropper.models import Image
image = Image.objects.get(id=image_id)
if is_optimized(image):
# If the image is already optimized, let's leave this alone...
return
# Read buffer from storage once and reset on each iteration
optimized_buffer = image.read_optimized_bytes()
image.jpeg_quality_settings = {}
last_width = 0
for width in sorted(settings.BETTY_WIDTHS, reverse=True):
if abs(last_width - width) < 100:
# Sometimes the widths are really too close. We only need to check every 100 px
continue
if width > 0:
optimized_buffer.seek(0)
quality = detect_optimal_quality(optimized_buffer, width)
image.jpeg_quality_settings[width] = quality
if quality == settings.BETTY_JPEG_QUALITY_RANGE[-1]:
# We'are already at max...
break
last_width = width
image.save()
image.clear_crops()
| mit | ea4b6400425bc259bc3385527c862f10 | 28.518072 | 91 | 0.645714 | 4.036244 | false | false | false | false |
stevearc/pypicloud | pypicloud/util.py | 1 | 10262 | """ Utilities """
import logging
import os
import re
import time
import unicodedata
from typing import (
IO,
Any,
AnyStr,
Callable,
Dict,
ItemsView,
Iterator,
KeysView,
List,
Optional,
Tuple,
Union,
)
from distlib.locators import Locator
from distlib.util import split_filename
from distlib.wheel import Wheel
LOG = logging.getLogger(__name__)
ALL_EXTENSIONS = Locator.source_extensions + Locator.binary_extensions
SENTINEL = object()
CHUNK_SIZE = 1 << 20 # read 1MB chunks
class PackageParseError(ValueError):
pass
def parse_filename(filename: str, name: Optional[str] = None) -> Tuple[str, str]:
"""Parse a name and version out of a filename"""
version = None
for ext in ALL_EXTENSIONS:
if filename.endswith(ext):
if ext == ".whl":
wheel = Wheel(filename)
return wheel.name, wheel.version
trimmed = filename[: -len(ext)]
parsed = split_filename(trimmed, name)
if parsed is None:
break
else:
parsed_name, version = parsed[:2]
break
if version is None:
raise PackageParseError("Cannot parse package file '%s'" % filename)
if name is None:
name = parsed_name
return normalize_name(name), version
def get_packagetype(name: str) -> str:
"""Get package type out of a filename"""
if name.endswith(".tar.gz"):
return "sdist"
elif name.endswith(".egg"):
return "bdist_egg"
elif name.endswith(".whl"):
return "bdist_wheel"
else:
return ""
def normalize_name(name: str) -> str:
"""Normalize a python package name"""
# Lifted directly from PEP503:
# https://www.python.org/dev/peps/pep-0503/#id4
return re.sub(r"[-_.]+", "-", name).lower()
def normalize_metadata_value(value: Union[str, bytes]) -> str:
"""Strip non-ASCII characters from metadata values"""
if isinstance(value, bytes):
value = value.decode("utf-8")
if isinstance(value, str):
value = "".join(c for c in unicodedata.normalize("NFKD", value) if ord(c) < 128)
return re.sub(r"\s+", " ", value)
def normalize_metadata(metadata: Dict[str, Union[str, bytes]]) -> Dict[str, str]:
"""
Strip non-ASCII characters from metadata values
and replace "_" in metadata keys to "-"
"""
return {
key.replace("_", "-"): normalize_metadata_value(value)
for key, value in metadata.items()
}
def create_matcher(queries: List[str], query_type: str) -> Callable[[str], bool]:
"""
Create a matcher for a list of queries
Parameters
----------
queries : list
List of queries
query_type: str
Type of query to run: ["or"|"and"]
Returns
-------
Matcher function
"""
queries = [query.lower() for query in queries]
if query_type == "or":
return lambda x: any((q in x.lower() for q in queries))
else:
return lambda x: all((q in x.lower() for q in queries))
def stream_file(fp: IO[AnyStr], chunk_size: int = CHUNK_SIZE) -> Iterator[AnyStr]:
"""
Read an (opened) file in chunks of chunk_size bytes
"""
while True:
chunk = fp.read(chunk_size)
if not chunk:
break
yield chunk
class EnvironSettings:
def __init__(self, settings: Dict[str, Any], env: Dict[str, str] = None):
self._settings = settings
if env is None:
self._env = dict(os.environ)
else:
self._env = env
@staticmethod
def _get_environ_key(key: str) -> str:
return "PPC_" + key.upper().replace(".", "_")
def __iter__(self) -> Iterator[str]:
return iter(self._settings)
def keys(self) -> KeysView[str]:
return self._settings.keys()
def items(self) -> ItemsView[str, Any]:
return self._settings.items()
def __contains__(self, key: str) -> bool:
env_key = self._get_environ_key(key)
return env_key in self._env or key in self._settings
def __setitem__(self, key: str, value: str) -> None:
env_key = self._get_environ_key(key)
self._env.pop(env_key, None)
self._settings[key] = value
def __getitem__(self, key: str) -> str:
env_key = self._get_environ_key(key)
if env_key in self._env:
return self._env[env_key]
return self._settings[key]
def __str__(self) -> str:
return str(self._settings)
def __repr__(self) -> str:
return "EnvironSettings(%s)" % self._settings
def read_prefix_from_environ(self, prefix: str) -> None:
if not prefix:
return
# Make sure the prefix has a '.' at the end (e.g. "sqlalchemy.")
if prefix[-1] == "_":
prefix = prefix[:-1] + "."
elif prefix[-1] != ".":
prefix = prefix + "."
env_prefix = self._get_environ_key(prefix)
for key, val in self._env.items():
if key.startswith(env_prefix):
setting_key = prefix + key[len(env_prefix) :].lower()
self._settings[setting_key] = val
def get(self, key: str, default: Any = None) -> Any:
try:
return self[key]
except KeyError:
return default
def pop(self, key: str, default: Any = SENTINEL) -> Any:
env_key = self._get_environ_key(key)
if env_key in self._env:
self._settings.pop(key, None)
return self._env.pop(env_key)
try:
return self._settings.pop(key)
except KeyError:
if default is SENTINEL:
raise
else:
return default
def setdefault(self, key: str, value: Any) -> Any:
try:
return self[key]
except KeyError:
self._settings[key] = value
return value
def get_as_dict(
self, prefix: str, **kwargs: Callable[[Any], Any]
) -> Dict[str, Any]:
"""
Convenience method for fetching settings
Returns a dict; any settings that were missing from the config file will
not be present in the returned dict (as opposed to being present with a
None value)
Parameters
----------
prefix : str
String to prefix all keys with when fetching value from settings
**kwargs : dict
Mapping of setting name to conversion function (e.g. str or asbool)
"""
computed = {}
for name, fxn in kwargs.items():
val = self.get(prefix + name)
if val is not None:
computed[name] = fxn(val)
return computed
def clone(self) -> "EnvironSettings":
return EnvironSettings(dict(self._settings), dict(self._env))
class TimedCache(dict):
"""
Dict that will store entries for a given time, then evict them
Parameters
----------
cache_time : int or None
The amount of time to cache entries for, in seconds. 0 will not cache.
None will cache forever.
factory : callable, optional
If provided, when the TimedCache is accessed and has no value, it will
attempt to populate itself by calling this function with the key it was
accessed with. This function should return a value to cache, or None if
no value is found.
"""
def __init__(
self, cache_time: Optional[int], factory: Optional[Callable[[Any], Any]] = None
):
super(TimedCache, self).__init__()
if cache_time is not None and cache_time < 0:
raise ValueError("cache_time cannot be negative")
self._cache_time = cache_time
self._factory = factory
self._times = {} # type: Dict[str, float]
def _has_expired(self, key):
"""Check if a key is both present and expired"""
if key not in self._times or self._cache_time is None:
return False
updated = self._times[key]
return updated is not None and time.time() - updated > self._cache_time
def _evict(self, key):
"""Remove a key if it has expired"""
if self._has_expired(key):
del self[key]
def __contains__(self, key):
self._evict(key)
return super(TimedCache, self).__contains__(key)
def __delitem__(self, key):
del self._times[key]
super(TimedCache, self).__delitem__(key)
def __setitem__(self, key, value):
if self._cache_time == 0:
return
self._times[key] = time.time()
super(TimedCache, self).__setitem__(key, value)
def __getitem__(self, key):
self._evict(key)
try:
value = super(TimedCache, self).__getitem__(key)
except KeyError:
if self._factory:
value = self._factory(key)
if value is None:
raise
self[key] = value
else:
raise
return value
def get(self, key, default=None):
self._evict(key)
value = super(TimedCache, self).get(key, SENTINEL)
if value is SENTINEL:
if self._factory is not None:
value = self._factory(key)
if value is not None:
self[key] = value
return value
else:
return default
else:
return default
else:
return value
def set_expire(self, key, value, expiration):
"""
Set a value in the cache with a specific expiration
Parameters
----------
key : str
value : value
expiration : int or None
Sets the value to expire this many seconds from now. If None, will
never expire.
"""
if expiration is not None:
if expiration <= 0:
try:
del self[key]
except KeyError:
pass
return
expiration = time.time() + expiration - self._cache_time
self._times[key] = expiration
super(TimedCache, self).__setitem__(key, value)
| mit | ab26508a8cc17b29ff4377dcbae8e72b | 28.404011 | 88 | 0.554668 | 4.101519 | false | false | false | false |
stevearc/pypicloud | pypicloud/storage/files.py | 1 | 4250 | """ Store packages as files on disk """
import json
import os
from binascii import hexlify
from pyramid.response import FileResponse
from pypicloud.dateutil import utcfromtimestamp
from pypicloud.models import Package
from pypicloud.util import stream_file
from .base import IStorage
class FileStorage(IStorage):
"""Stores package files on the filesystem"""
def __init__(self, request=None, **kwargs):
self.directory = kwargs.pop("directory")
super(FileStorage, self).__init__(request, **kwargs)
@classmethod
def configure(cls, settings):
kwargs = super(FileStorage, cls).configure(settings)
directory = os.path.abspath(settings["storage.dir"]).rstrip(os.sep)
if not os.path.exists(directory):
os.makedirs(directory)
kwargs["directory"] = directory
return kwargs
def get_path(self, package):
"""Get the fully-qualified file path for a package"""
return os.path.join(
self.directory, package.name, package.version, package.filename
)
def path_to_meta_path(self, path):
"""Construct the filename for a metadata file"""
return path + ".meta"
def get_metadata_path(self, package):
"""Get the fully-qualified file path for a package metadata file"""
return self.path_to_meta_path(self.get_path(package))
def list(self, factory=Package):
for root, _, files in os.walk(self.directory):
for filename in files:
if filename.endswith(".meta"):
# We don't want to yield for this file
continue
shortpath = root[len(self.directory) :].strip(os.sep)
name, version = shortpath.split(os.sep)
fullpath = os.path.join(root, filename)
last_modified = utcfromtimestamp(os.path.getmtime(fullpath))
metadata = {}
metafile = self.path_to_meta_path(fullpath)
if os.path.exists(metafile):
with open(metafile, "r", encoding="utf-8") as mfile:
try:
metadata = json.load(mfile)
except ValueError:
# If JSON fails to decode, don't sweat it.
pass
yield factory(name, version, filename, last_modified, **metadata)
def download_response(self, package):
return FileResponse(
self.get_path(package),
request=self.request,
content_type="application/octet-stream",
)
def upload(self, package, datastream):
destfile = self.get_path(package)
dest_meta_file = self.get_metadata_path(package)
destdir = os.path.dirname(destfile)
if not os.path.exists(destdir):
os.makedirs(destdir)
uid = hexlify(os.urandom(4)).decode("utf-8")
# Store metadata as JSON. This could be expanded in the future
# to store additional metadata about a package (i.e. author)
tempfile = os.path.join(destdir, ".metadata." + uid)
with open(tempfile, "w", encoding="utf-8") as mfile:
json.dump(package.get_metadata(), mfile)
os.rename(tempfile, dest_meta_file)
# Write to a temporary file
tempfile = os.path.join(destdir, "." + package.filename + "." + uid)
with open(tempfile, "wb") as ofile:
for chunk in stream_file(datastream):
ofile.write(chunk)
os.rename(tempfile, destfile)
def delete(self, package):
filename = self.get_path(package)
meta_file = self.get_metadata_path(package)
os.unlink(filename)
try:
os.unlink(meta_file)
except OSError:
# Metadata file may not exist
pass
version_dir = os.path.dirname(filename)
try:
os.rmdir(version_dir)
except OSError:
return
package_dir = os.path.dirname(version_dir)
try:
os.rmdir(package_dir)
except OSError:
return
def open(self, package):
filename = self.get_path(package)
return open(filename, "rb")
| mit | 2b2d175bff52036669312905c8bf3e89 | 34.123967 | 81 | 0.584941 | 4.233068 | false | false | false | false |
stevearc/pypicloud | pypicloud/storage/object_store.py | 1 | 3871 | """ Store packages in S3 """
import logging
from binascii import hexlify
from hashlib import md5
from pyramid.httpexceptions import HTTPFound
from pyramid.settings import asbool
from smart_open import open as _open
from pypicloud.models import Package
from .base import IStorage
LOG = logging.getLogger(__name__)
class ObjectStoreStorage(IStorage):
"""Storage backend base class containing code that is common between
supported object stores (S3 / GCS)
"""
test = False
def __init__(
self,
request=None,
expire_after=None,
bucket_prefix=None,
prepend_hash=None,
redirect_urls=None,
sse=None,
object_acl=None,
storage_class=None,
region_name=None,
public_url=False,
**kwargs
):
super(ObjectStoreStorage, self).__init__(request, **kwargs)
self.expire_after = expire_after
self.bucket_prefix = bucket_prefix
self.prepend_hash = prepend_hash
self.redirect_urls = redirect_urls
self.sse = sse
self.object_acl = object_acl
self.storage_class = storage_class
self.region_name = region_name
self.public_url = public_url
def _generate_url(self, package: Package) -> str:
"""Subclasses must implement a method for generating signed URLs to
the package in the object store
"""
raise NotImplementedError
@classmethod
def package_from_object(cls, obj, factory):
"""Subclasses must implement a method for constructing a Package
instance from the backend's storage object format
"""
raise NotImplementedError
@classmethod
def _subclass_specific_config(cls, settings, common_config):
"""Method to allow subclasses to extract configuration parameters
specific to them and not covered in the common configuration
in this class.
"""
return {}
@classmethod
def configure(cls, settings):
kwargs = super(ObjectStoreStorage, cls).configure(settings)
kwargs["expire_after"] = int(settings.get("storage.expire_after", 60 * 60 * 24))
kwargs["bucket_prefix"] = settings.get("storage.prefix", "")
kwargs["prepend_hash"] = asbool(settings.get("storage.prepend_hash", True))
kwargs["object_acl"] = settings.get("storage.object_acl", None)
kwargs["storage_class"] = settings.get("storage.storage_class")
kwargs["redirect_urls"] = asbool(settings.get("storage.redirect_urls", True))
kwargs["region_name"] = settings.get("storage.region_name")
kwargs["public_url"] = asbool(settings.get("storage.public_url"))
kwargs.update(cls._subclass_specific_config(settings, kwargs))
return kwargs
def calculate_path(self, package):
"""Calculates the path of a package"""
path = package.name + "/" + package.filename
if self.prepend_hash:
m = md5()
m.update(package.filename.encode("utf-8"))
prefix = hexlify(m.digest()).decode("utf-8")[:4]
path = prefix + "/" + path
return path
def get_path(self, package):
"""Get the fully-qualified bucket path for a package"""
if "path" not in package.data:
filename = self.calculate_path(package)
package.data["path"] = self.bucket_prefix + filename
return package.data["path"]
def get_url(self, package):
if self.redirect_urls:
return super(ObjectStoreStorage, self).get_url(package)
else:
return self._generate_url(package)
def download_response(self, package):
return HTTPFound(location=self._generate_url(package))
def open(self, package):
url = self._generate_url(package)
return _open(url, "rb", compression="disable")
| mit | 6c9131375dd7fb144d965f5dfd96c4e1 | 32.95614 | 88 | 0.632653 | 4.230601 | false | true | false | false |
stevearc/pypicloud | pypicloud/storage/gcs.py | 1 | 8067 | """ Store packages in GCS """
import json
import logging
import os
import posixpath
from datetime import timedelta
from google.auth import compute_engine
from google.auth.transport import requests
from google.cloud import storage
from pyramid.settings import asbool
from smart_open import open as _open
from pypicloud.models import Package
from pypicloud.util import stream_file
from .object_store import ObjectStoreStorage
LOG = logging.getLogger(__name__)
class GoogleCloudStorage(ObjectStoreStorage):
"""Storage backend that uses GCS"""
test = False
def __init__(
self,
request=None,
bucket_factory=None,
service_account_json_filename=None,
project_id=None,
use_iam_signer=False,
iam_signer_service_account_email=None,
**kwargs,
):
super(GoogleCloudStorage, self).__init__(request=request, **kwargs)
self._bucket = None
self._bucket_factory = bucket_factory
self.use_iam_signer = use_iam_signer
self.iam_signer_service_account_email = iam_signer_service_account_email
if self.public_url:
raise NotImplementedError(
"GoogleCloudStorage backend does not yet support public URLs"
)
if self.sse:
raise NotImplementedError(
"GoogleCloudStorage backend does not yet support customized "
"server-side encryption"
)
@classmethod
def _subclass_specific_config(cls, settings, common_config):
"""Extract GCP-specific config settings: specifically, the path to
the service account key file, and the project id. Both are
optional.
"""
service_account_json_filename = settings.get(
"storage.gcp_service_account_json_filename"
) or os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
if (
service_account_json_filename
and not os.path.isfile(service_account_json_filename)
and not cls.test
):
raise Exception(
"Service account json file not found at path {}".format(
service_account_json_filename
)
)
bucket_name = settings.get("storage.bucket")
if bucket_name is None:
raise ValueError("You must specify the 'storage.bucket'")
iam_signer_service_account_email = settings.get(
"storage.iam_signer_service_account_email"
)
if iam_signer_service_account_email is None and service_account_json_filename:
with open(service_account_json_filename, "r", encoding="utf-8") as ifile:
credentials = json.load(ifile)
iam_signer_service_account_email = credentials.get("client_email")
return {
"service_account_json_filename": service_account_json_filename,
"project_id": settings.get("storage.gcp_project_id"),
"use_iam_signer": asbool(settings.get("storage.gcp_use_iam_signer", False)),
"iam_signer_service_account_email": iam_signer_service_account_email,
"bucket_factory": lambda: cls.get_bucket(bucket_name, settings),
}
@classmethod
def _get_storage_client(cls, settings):
"""Helper method for constructing a properly-configured GCS client
object from the provided settings.
"""
client_settings = cls._subclass_specific_config(settings, {})
client_args = {}
if client_settings["project_id"]:
LOG.info("Using GCP project id `%s`", client_settings["project_id"])
client_args["project"] = client_settings["project_id"]
service_account_json_filename = client_settings.get(
"service_account_json_filename"
)
if not service_account_json_filename:
LOG.info("Creating GCS client without service account JSON file")
client = storage.Client(**client_args)
else:
if not os.path.isfile(service_account_json_filename) and not cls.test:
raise Exception(
"Service account JSON file not found at provided "
"path {}".format(service_account_json_filename)
)
LOG.info(
"Creating GCS client from service account JSON file %s",
service_account_json_filename,
)
client = storage.Client.from_service_account_json(
service_account_json_filename, **client_args
)
return client
@classmethod
def get_bucket(cls, bucket_name, settings):
client = cls._get_storage_client(settings)
bucket = client.bucket(bucket_name)
if not bucket.exists():
bucket.location = settings.get("storage.region_name")
LOG.info(
"Creating GCS bucket %s in location %s", bucket_name, bucket.location
)
bucket.create()
return bucket
@classmethod
def package_from_object(cls, obj, factory):
"""Create a package from a GCS object"""
filename = posixpath.basename(obj.name)
if obj.metadata is None:
return None
name = obj.metadata.get("name")
version = obj.metadata.get("version")
if name is None or version is None:
return None
metadata = Package.read_metadata(obj.metadata)
return factory(name, version, filename, obj.updated, path=obj.name, **metadata)
@property
def bucket(self):
if self._bucket is None:
self._bucket = self._bucket_factory()
return self._bucket
def list(self, factory=Package):
blobs = self.bucket.list_blobs(prefix=self.bucket_prefix or None)
for blob in blobs:
pkg = self.package_from_object(blob, factory)
if pkg is not None:
yield pkg
def _generate_url(self, package):
"""Generate a signed url to the GCS file"""
blob = self._get_gcs_blob(package)
if self.use_iam_signer:
# Workaround for https://github.com/googleapis/google-auth-library-python/issues/50
signing_credentials = compute_engine.IDTokenCredentials(
requests.Request(),
"",
service_account_email=self.iam_signer_service_account_email,
)
else:
signing_credentials = None
return blob.generate_signed_url(
expiration=timedelta(seconds=self.expire_after),
credentials=signing_credentials,
version="v4",
)
def _get_gcs_blob(self, package):
"""Get a GCS blob object for the specified package"""
return self.bucket.blob(self.get_path(package))
def get_uri(self, package):
return f"gs://{self.bucket.name}/{self.get_path(package)}"
def upload(self, package, datastream):
"""Upload the package to GCS"""
metadata = {"name": package.name, "version": package.version}
metadata.update(package.get_metadata())
with _open(
self.get_uri(package),
"wb",
compression="disable",
transport_params={
"client": self.bucket.client,
"blob_properties": {
"metadata": metadata,
"acl": self.object_acl,
"storage_class": self.storage_class,
},
},
) as fp:
for chunk in stream_file(datastream):
fp.write(chunk) # multipart upload
def open(self, package):
"""Overwrite open method to re-use client instead of using signed url."""
return _open(
self.get_uri(package),
"rb",
compression="disable",
transport_params={"client": self.bucket.client},
)
def delete(self, package):
"""Delete the package"""
blob = self._get_gcs_blob(package)
blob.delete()
| mit | c4c9464262e8f12496e133d6a47c59e3 | 33.32766 | 95 | 0.59167 | 4.41302 | false | false | false | false |
stevearc/pypicloud | pypicloud/access/ldap_.py | 1 | 12920 | """LDAP authentication plugin for pypicloud."""
import logging
from collections import namedtuple
from functools import wraps
from pyramid.settings import asbool, aslist
from pypicloud.util import TimedCache
from .base import IAccessBackend
from .config import ConfigAccessBackend
try:
import ldap
except ImportError as err: # pragma: no cover
raise ImportError(
"You must 'pip install pypicloud[ldap]' before using ldap as the "
"authentication backend."
) from err
LOG = logging.getLogger(__name__)
def reconnect(func):
"""
If the LDAP connection dies underneath us, recreate it
"""
@wraps(func)
def _reconnect(self, *args, **kwargs):
"""
Inner wrap function to reconnect on failure
"""
try:
return func(self, *args, **kwargs)
except ldap.LDAPError:
self.connect()
return func(self, *args, **kwargs)
return _reconnect
User = namedtuple("User", ["username", "dn", "is_admin"])
class LDAP(object):
"""Handles interactions with the remote LDAP server"""
def __init__(
self,
admin_field,
admin_group_dn,
admin_value,
base_dn,
cache_time,
service_dn,
service_password,
service_username,
url,
user_search_filter,
user_dn_format,
ignore_cert,
ignore_referrals,
ignore_multiple_results,
):
self._url = url
self._service_dn = service_dn
self._service_password = service_password
self._base_dn = base_dn
self._user_search_filter = user_search_filter
self._user_dn_format = user_dn_format
if user_dn_format is not None:
if base_dn is not None or user_search_filter is not None:
raise ValueError(
"Cannot use user_dn_format with base_dn " "and user_search_filter"
)
else:
if base_dn is None or user_search_filter is None:
raise ValueError(
"Must provide user_dn_format or both base_dn "
"and user_search_filter"
)
self._admin_field = admin_field
self._admin_group_dn = admin_group_dn
if admin_group_dn and not self._user_dn_format:
raise ValueError(
"ldap.admin_group_dn must be used with ldap.user_dn_format"
)
self._admin_value = set(admin_value)
self._server = None
if cache_time is not None:
cache_time = int(cache_time)
self._cache = TimedCache(cache_time, self._fetch_user)
if service_username is not None:
self._cache.set_expire(
service_username, User(service_username, service_dn, True), None
)
self._ignore_cert = ignore_cert
self._ignore_referrals = ignore_referrals
self._ignore_multiple_results = ignore_multiple_results
self._admin_member_type = None
def connect(self):
"""Initializes the python-ldap module and does the initial bind"""
if self._ignore_cert:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
if self._ignore_referrals:
ldap.set_option(ldap.OPT_REFERRALS, ldap.OPT_OFF)
LOG.debug("LDAP connecting to %s", self._url)
self._server = ldap.initialize(self._url, bytes_mode=False)
self._bind_to_service()
@property
def admin_member_type(self):
if self._admin_member_type is None:
LOG.debug("Fetching admin group %s", self._admin_group_dn)
try:
results = self._server.search_s(
self._admin_group_dn, ldap.SCOPE_BASE, attrlist=["objectClass"]
)
except ldap.NO_SUCH_OBJECT as e:
LOG.debug("NO_SUCH_OBJECT %s", e)
return "member"
dn, attributes = results[0]
classes = [self._decode_attribute(x) for x in attributes["objectClass"]]
if "groupOfUniqueNames" in classes:
self._admin_member_type = "uniqueMember"
else:
self._admin_member_type = "member"
return self._admin_member_type
def _bind_to_service(self):
"""Bind to the service account or anonymous"""
if self._service_dn:
# bind with the service_dn
self._server.simple_bind_s(self._service_dn, self._service_password)
else:
# force a connection without binding
self._server.whoami_s()
@reconnect
def test_connection(self):
"""Binds to service. Will throw if bad connection"""
self._bind_to_service()
@reconnect
def _fetch_user(self, username):
"""Fetch a user entry from the LDAP server"""
LOG.debug("LDAP fetching user %s", username)
search_attrs = []
if self._admin_field is not None:
search_attrs.append(self._admin_field)
if self._user_dn_format is not None:
dn = self._user_dn_format.format(username=username)
LOG.debug("LDAP searching user %r with dn %r", username, dn)
try:
results = self._server.search_s(
dn, ldap.SCOPE_BASE, attrlist=search_attrs
)
except ldap.NO_SUCH_OBJECT as e:
LOG.debug("NO_SUCH_OBJECT %s", e)
return
else:
search_filter = self._user_search_filter.format(username=username)
LOG.debug("LDAP searching user %r with filter %r", username, search_filter)
try:
results = self._server.search_s(
self._base_dn, ldap.SCOPE_SUBTREE, search_filter, search_attrs
)
except ldap.NO_SUCH_OBJECT as e:
LOG.debug("NO_SUCH_OBJECT %s", e)
return
except ldap.NO_RESULTS_RETURNED as e:
LOG.debug("NO_RESULTS_RETURNED %s", e)
return
if not results:
LOG.debug("LDAP user %r not found", username)
return None
if len(results) > 1:
err_msg = "More than one user found for %r: %r" % (
username,
[r[0] for r in results],
)
if self._ignore_multiple_results:
LOG.warning(err_msg)
else:
raise ValueError(err_msg)
dn, attributes = results[0]
LOG.debug("dn: %r, attributes %r", dn, attributes)
is_admin = False
if self._admin_field is not None:
if self._admin_field in attributes:
is_admin = bool(
self._admin_value.intersection(
[
self._decode_attribute(x)
for x in attributes[self._admin_field]
]
)
)
if not is_admin and self._admin_group_dn:
user_dn = self._user_dn_format.format(username=username)
search_filter = "(%s=%s)" % (self.admin_member_type, user_dn)
LOG.debug(
"Searching admin group %s for %s", self._admin_group_dn, search_filter
)
try:
results = self._server.search_s(
self._admin_group_dn, ldap.SCOPE_BASE, search_filter
)
except ldap.NO_SUCH_OBJECT as e:
LOG.debug("NO_SUCH_OBJECT %s", e)
else:
is_admin = bool(results)
return User(username, dn, is_admin)
def _decode_attribute(self, attribute):
if attribute and hasattr(attribute, "decode"):
decoded = attribute.decode("utf-8")
return decoded
else:
return attribute
def get_user(self, username):
"""Get the User object or None"""
return self._cache.get(username)
@reconnect
def verify_user(self, username, password):
"""
Attempts to bind as the user, then rebinds as service user again
"""
LOG.debug("LDAP verifying user %s", username)
# Empty password may successfully complete an anonymous bind.
# Explicitly disallow empty passwords.
if password == "":
return False
user = self._cache.get(username)
if user is None:
return False
try:
LOG.debug("LDAP binding user %r", user.dn)
self._server.simple_bind_s(user.dn, password)
except ldap.INVALID_CREDENTIALS:
return False
else:
return True
finally:
self._bind_to_service()
class LDAPAccessBackend(IAccessBackend):
"""
This backend allows you to authenticate against a remote LDAP server.
"""
def __init__(self, request=None, conn=None, fallback_factory=None, **kwargs):
super(LDAPAccessBackend, self).__init__(request, **kwargs)
self.conn = conn
self._fallback = None
self._fallback_factory = fallback_factory
@property
def fallback(self):
if self._fallback is None and self._fallback_factory is not None:
self._fallback = self._fallback_factory(self.request)
return self._fallback
@classmethod
def configure(cls, settings):
kwargs = super(LDAPAccessBackend, cls).configure(settings)
conn = LDAP(
admin_field=settings.get("auth.ldap.admin_field"),
admin_group_dn=settings.get("auth.ldap.admin_group_dn"),
admin_value=aslist(
settings.get("auth.ldap.admin_value", []), flatten=False
),
base_dn=settings.get("auth.ldap.base_dn"),
cache_time=settings.get("auth.ldap.cache_time"),
service_dn=settings.get("auth.ldap.service_dn"),
service_password=settings.get("auth.ldap.service_password", ""),
service_username=settings.get("auth.ldap.service_username"),
url=settings["auth.ldap.url"],
user_dn_format=settings.get("auth.ldap.user_dn_format"),
user_search_filter=settings.get("auth.ldap.user_search_filter"),
ignore_cert=asbool(settings.get("auth.ldap.ignore_cert")),
ignore_referrals=asbool(settings.get("auth.ldap.ignore_referrals", False)),
ignore_multiple_results=asbool(
settings.get("auth.ldap.ignore_multiple_results", False)
),
)
conn.connect()
kwargs["conn"] = conn
fallback = settings.get("auth.ldap.fallback")
if fallback == "config":
kw = ConfigAccessBackend.configure(settings)
kwargs["fallback_factory"] = lambda r: ConfigAccessBackend(r, **kw)
return kwargs
def _get_password_hash(self, *_): # pragma: no cover
raise RuntimeError("LDAP should never call _get_password_hash")
def verify_user(self, username, password):
return self.conn.verify_user(username, password)
def groups(self, username=None):
if self.fallback is not None:
return self.fallback.groups(username)
# LDAP doesn't support groups by default
return []
def group_members(self, group):
if self.fallback is not None:
return self.fallback.group_members(group)
# LDAP doesn't support groups by default
return []
def is_admin(self, username):
if not username:
return False
user = self.conn.get_user(username)
return user is not None and user.is_admin
def group_permissions(self, package):
if self.fallback is not None:
return self.fallback.group_permissions(package)
return {}
def user_permissions(self, package):
if self.fallback is not None:
return self.fallback.user_permissions(package)
return {}
def user_package_permissions(self, username):
if self.fallback is not None:
return self.fallback.user_package_permissions(username)
return []
def group_package_permissions(self, group):
if self.fallback is not None:
return self.fallback.group_package_permissions(group)
return []
def user_data(self, username=None):
if username is None:
if self.fallback is not None:
return self.fallback.user_data()
return []
else:
return {
"username": username,
"admin": self.is_admin(username),
"groups": self.groups(username),
}
def check_health(self):
try:
self.conn.test_connection()
except ldap.LDAPError as e:
return (False, str(e))
else:
return (True, "")
| mit | 5242187a11eac0ee5c94148d613e3075 | 34.108696 | 87 | 0.564628 | 4.21396 | false | false | false | false |
stevearc/pypicloud | pypicloud/scripts.py | 1 | 9897 | """ Commandline scripts """
import argparse
import getpass
import gzip
import json
import logging
import os
import sys
from base64 import b64encode
import transaction
from jinja2 import Template
from pkg_resources import resource_string # pylint: disable=E0611
from pyramid.paster import bootstrap
from pypicloud.access import SCHEMES, get_pwd_context
def gen_password(argv=None):
"""Generate a salted password"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(gen_password.__doc__)
parser.add_argument("-i", help="Read password from stdin", action="store_true")
parser.add_argument("-r", help="Number of rounds", type=int)
parser.add_argument(
"-s",
help="Hashing scheme (default %(default)s)",
default=SCHEMES[0],
choices=SCHEMES,
)
args = parser.parse_args(argv)
if args.i:
password = sys.stdin.readline()
else:
password = _get_password()
print(_gen_password(password, args.s, args.r))
def _get_password():
"""Prompt user for a password twice for safety"""
while True:
password = getpass.getpass()
verify = getpass.getpass()
if password == verify:
return password
else:
print("Passwords do not match!")
def _gen_password(password: str, scheme: str = None, rounds: int = None) -> str:
pwd_context = get_pwd_context(scheme, rounds)
return pwd_context.hash(password)
NO_DEFAULT = object()
def wrapped_input(msg):
"""Wraps input for tests"""
return input(msg)
def prompt(msg, default=NO_DEFAULT, validate=None):
"""Prompt user for input"""
while True:
response = wrapped_input(msg + " ").strip()
if not response:
if default is NO_DEFAULT:
continue
return default
if validate is None or validate(response):
return response
def prompt_option(text, choices, default=NO_DEFAULT):
"""Prompt the user to choose one of a list of options"""
while True:
for i, msg in enumerate(choices):
print("[%d] %s" % (i + 1, msg))
response = prompt(text, default=default)
try:
idx = int(response) - 1
return choices[idx]
except (ValueError, IndexError):
print("Invalid choice\n")
def promptyn(msg, default=None):
"""Display a blocking prompt until the user confirms"""
while True:
yes = "Y" if default else "y"
if default or default is None:
no = "n"
else:
no = "N"
confirm = prompt("%s [%s/%s]" % (msg, yes, no), "").lower()
if confirm in ("y", "yes"):
return True
elif confirm in ("n", "no"):
return False
elif not confirm and default is not None:
return default
def bucket_validate(name):
"""Check for valid bucket name"""
if name.startswith("."):
print("Bucket names cannot start with '.'")
return False
if name.endswith("."):
print("Bucket names cannot end with '.'")
return False
if ".." in name:
print("Bucket names cannot contain '..'")
return False
return True
def storage_account_name_validate(name):
"""Check for valid storage account name"""
if "." in name:
print("Storage account names cannot contain '.'")
return False
return True
def make_config(argv=None):
"""Create a server config file"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description=make_config.__doc__)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-d", action="store_true", help="Create config file for development"
)
group.add_argument("-t", action="store_true", help="Create config file for testing")
group.add_argument(
"-p", action="store_true", help="Create config file for production"
)
group.add_argument(
"-r", action="store_true", help="Create config file for docker image"
)
parser.add_argument(
"outfile", nargs="?", help="Name of output file (default stdout)"
)
args = parser.parse_args(argv)
if args.outfile is not None and os.path.exists(args.outfile):
msg = "'%s' already exists. Overwrite?" % args.outfile
if not promptyn(msg, False):
return
if args.d:
env = "dev"
elif args.t:
env = "test"
elif args.p:
env = "prod"
elif args.r:
env = "docker"
else:
env = prompt_option("What is this config file for?", ["dev", "test", "prod"])
data = {
"env": env,
"workdir": "/var/lib/pypicloud" if env == "docker" else r"%(here)s",
}
data["reload_templates"] = env == "dev"
storage = prompt_option(
"Where do you want to store your packages?",
["s3", "gcs", "filesystem", "azure-blob"],
)
if storage == "filesystem":
storage = "file"
data["storage"] = storage
if storage == "s3":
if "AWS_ACCESS_KEY_ID" in os.environ:
data["access_key"] = os.environ["AWS_ACCESS_KEY_ID"]
else:
data["access_key"] = prompt("AWS access key id?")
if "AWS_SECRET_ACCESS_KEY" in os.environ:
data["secret_key"] = os.environ["AWS_SECRET_ACCESS_KEY"]
else:
data["secret_key"] = prompt("AWS secret access key?")
data["s3_bucket"] = prompt("S3 bucket name?", validate=bucket_validate)
if "." in data["s3_bucket"]:
data["bucket_region"] = prompt("S3 bucket region?")
if storage == "gcs":
data["gcs_bucket"] = prompt("GCS bucket name?", validate=bucket_validate)
if storage == "azure-blob":
data["storage_account_name"] = prompt(
"Storage account name?", validate=storage_account_name_validate
)
data["storage_account_key"] = prompt("Storage account key?")
data["storage_container_name"] = prompt("Container name?")
data["encrypt_key"] = b64encode(os.urandom(32)).decode("utf-8")
data["validate_key"] = b64encode(os.urandom(32)).decode("utf-8")
data["admin"] = prompt("Admin username?")
data["password"] = _gen_password(_get_password())
data["session_secure"] = env == "prod"
data["env"] = env
if env == "dev" or env == "test":
data["wsgi"] = "waitress"
else:
if hasattr(sys, "real_prefix"):
data["venv"] = sys.prefix
data["wsgi"] = "uwsgi"
tmpl_str = resource_string("pypicloud", "templates/config.ini.jinja2").decode(
"utf-8"
)
template = Template(tmpl_str)
config_file = template.render(**data)
if args.outfile is None:
sys.stdout.write(config_file)
sys.stdout.write(os.linesep)
else:
with open(args.outfile, "w", encoding="utf-8") as ofile:
ofile.write(config_file)
print("Config file written to '%s'" % args.outfile)
def migrate_packages(argv=None):
"""
Migrate packages from one storage backend to another
Create two config.ini files that are configured to use different storage
backends. All packages will be migrated from the storage backend in the
first to the storage backend in the second.
ex: pypicloud-migrate-packages file_config.ini s3_config.ini
"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description=migrate_packages.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("config_from", help="Name of config file to migrate from")
parser.add_argument("config_to", help="Name of config file to migrate to")
args = parser.parse_args(argv)
logging.basicConfig()
old_env = bootstrap(args.config_from)
old_storage = old_env["request"].db.storage
all_packages = old_storage.list()
new_env = bootstrap(args.config_to)
new_storage = new_env["request"].db.storage
for package in all_packages:
print("Migrating %s" % package)
with old_storage.open(package) as data:
# we need to recalculate the path for the new storage config
package.data.pop("path", None)
new_storage.upload(package, data)
def export_access(argv=None):
"""Dump the access control data to a universal format"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description=export_access.__doc__)
parser.add_argument("config", help="Name of config file")
parser.add_argument("-o", help="Name of output file")
args = parser.parse_args(argv)
logging.basicConfig()
env = bootstrap(args.config)
access = env["request"].access
data = access.dump()
if args.o:
with gzip.open(args.o, "w") as ofile:
json.dump(data, ofile)
else:
print(json.dumps(data, indent=2))
def import_access(argv=None):
"""
Load the access control data from a dump file or stdin
This operation is idempotent and graceful. It will not clobber your
existing ACL.
"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description=import_access.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("config", help="Name of config file")
parser.add_argument("-i", help="Name of input file")
args = parser.parse_args(argv)
logging.basicConfig()
if args.i:
with gzip.open(args.i, "r") as ifile:
data = json.load(ifile)
else:
print("Reading data from stdin...")
data = json.load(sys.stdin)
env = bootstrap(args.config)
access = env["request"].access
result = access.load(data)
transaction.commit()
if result is not None:
print(result)
| mit | 145319f2742699491a9889eea30a2d34 | 28.990909 | 88 | 0.606547 | 3.881176 | false | true | false | false |
poliastro/poliastro | contrib/satgpio.py | 1 | 2511 | """
Author: Juan Luis Cano Rodríguez
Code to read GP data from Celestrak using the HTTP API and python-sgp4.
Requires some extra dependencies:
$ pip install httpx sgp4
"""
import io
import json
import xml.etree.ElementTree as ET
import httpx
from sgp4 import exporter, omm
from sgp4.api import Satrec
def _generate_url(catalog_number, international_designator, name):
params = {
"CATNR": catalog_number,
"INTDES": international_designator,
"NAME": name,
}
param_names = [
param_name
for param_name, param_value in params.items()
if param_value is not None
]
if len(param_names) != 1:
raise ValueError(
"Specify exactly one of catalog_number, international_designator, or name"
)
param_name = param_names[0]
param_value = params[param_name]
url = (
"https://celestrak.com/NORAD/elements/gp.php?"
f"{param_name}={param_value}"
"&FORMAT=XML"
)
return url
def _make_query(url):
response = httpx.get(url)
response.raise_for_status()
if response.text == "No GP data found":
raise ValueError(
f"Query '{url}' did not return any results, try a different one"
)
tree = ET.parse(io.StringIO(response.text))
root = tree.getroot()
if len(root) != 1:
raise ValueError(
f"Query '{url}' returned {len(root)} results, try a different one"
)
fields = next(omm.parse_xml(io.StringIO(response.text)))
return fields
def load_gp_from_celestrak(
*, catalog_number=None, international_designator=None, name=None
):
"""Load general perturbations orbital data from Celestrak.
Returns
-------
Satrec
Orbital data from specified object.
Notes
-----
This uses the OMM XML format from Celestrak as described in [1]_.
References
----------
.. [1] Kelso, T.S. "A New Way to Obtain GP Data (aka TLEs)"
https://celestrak.com/NORAD/documentation/gp-data-formats.php
"""
# Assemble query, raise an error if malformed
url = _generate_url(catalog_number, international_designator, name)
# Make API call, raise an error if data is malformed
fields = _make_query(url)
# Initialize and return Satrec object
sat = Satrec()
omm.initialize(sat, fields)
return sat
def print_sat(sat, name):
"""Prints Satrec object in convenient form."""
print(json.dumps(exporter.export_omm(sat, name), indent=2))
| mit | 5ce6ee1c9206851d05cad2a4bfbb7e55 | 24.1 | 86 | 0.63506 | 3.51049 | false | false | false | false |
poliastro/poliastro | src/poliastro/spheroid_location.py | 1 | 5009 | import astropy.units as u
import numpy as np
from poliastro.core.spheroid_location import (
N as N_fast,
cartesian_cords as cartesian_cords_fast,
cartesian_to_ellipsoidal as cartesian_to_ellipsoidal_fast,
distance as distance_fast,
f as f_fast,
is_visible as is_visible_fast,
radius_of_curvature as radius_of_curvature_fast,
tangential_vecs as tangential_vecs_fast,
)
class SpheroidLocation:
"""Class representing a ground station on an oblate ellipsoid."""
def __init__(self, lon, lat, h, body):
"""
Parameters
----------
lon : ~astropy.units.quantity.Quantity
Geodetic longitude
lat : ~astropy.units.quantity.Quantity
Geodetic latitude
h : ~astropy.units.quantity.Quantity
Geodetic height
body : ~poliastro.bodies.Body
Planetary body the spheroid location lies on
"""
self._lon = lon
self._lat = lat
self._h = h
self._a = body.R
self._b = body.R
self._c = body.R_polar
@property
def cartesian_cords(self):
"""Convert to the Cartesian Coordinate system."""
_a, _c, _lon, _lat, _h = (
self._a.to_value(u.m),
self._c.to_value(u.m),
self._lon.to_value(u.rad),
self._lat.to_value(u.rad),
self._h.to_value(u.m),
)
return (
cartesian_cords_fast(
_a,
_c,
_lon,
_lat,
_h,
)
* u.m
)
@property
def f(self):
"""Get first flattening."""
_a, _c = self._a.to_value(u.m), self._c.to_value(u.m)
return f_fast(_a, _c)
@property
def N(self):
"""Normal vector of the ellipsoid at the given location."""
a, b, c = (
self._a.to_value(u.m),
self._b.to_value(u.m),
self._c.to_value(u.m),
)
cartesian_cords = np.array(
[coord.value for coord in self.cartesian_cords]
)
return N_fast(a, b, c, cartesian_cords)
@property
def tangential_vecs(self):
"""Returns orthonormal vectors tangential to the ellipsoid at the given location."""
N = self.N
return tangential_vecs_fast(N)
@property
def radius_of_curvature(self):
"""Radius of curvature of the meridian at the latitude of the given location."""
_a, _c, _lat = (
self._a.to_value(u.m),
self._c.to_value(u.m),
self._lat.to_value(u.rad),
)
return (
radius_of_curvature_fast(_a, _c, _lat) * u.m
) # Need to convert units to u.rad and then take value because numpy expects angles in radians if unit is not given.
def distance(self, px, py, pz):
"""
Calculates the distance from an arbitrary point to the given location (Cartesian coordinates).
Parameters
----------
px : ~astropy.units.quantity.Quantity
x-coordinate of the point
py : ~astropy.units.quantity.Quantity
y-coordinate of the point
pz : ~astropy.units.quantity.Quantity
z-coordinate of the point
"""
px, py, pz = px.to_value(u.m), py.to_value(u.m), pz.to_value(u.m)
cartesian_cords = np.array(
[coord.value for coord in self.cartesian_cords]
)
return (
distance_fast(cartesian_cords, px, py, pz) * u.m
) # body.R and body.R_polar has u.m as units
def is_visible(self, px, py, pz):
"""
Determines whether an object located at a given point is visible from the given location.
Returns true if true, false otherwise.
Parameters
----------
px : ~astropy.units.quantity.Quantity
x-coordinate of the point
py : ~astropy.units.quantity.Quantity
y-coordinate of the point
pz : ~astropy.units.quantity.Quantity
z-coordinate of the point
"""
px, py, pz = px.to_value(u.m), py.to_value(u.m), pz.to_value(u.m)
cartesian_cords = np.array(
[coord.value for coord in self.cartesian_cords]
)
return is_visible_fast(cartesian_cords, px, py, pz, self.N)
def cartesian_to_ellipsoidal(self, x, y, z):
"""
Converts cartesian coordinates to ellipsoidal coordinates for this ellipsoid.
Parameters
----------
x : ~astropy.units.quantity.Quantity
x-coordinate
y : ~astropy.units.quantity.Quantity
y-coordinate
z : ~astropy.units.quantity.Quantity
z-coordinate
"""
_a, _c = self._a.to_value(u.m), self._c.to_value(u.m)
x, y, z = x.to_value(u.m), y.to_value(u.m), z.to_value(u.m)
lon, lat, h = cartesian_to_ellipsoidal_fast(_a, _c, x, y, z)
return lon * u.rad, lat * u.rad, h * u.m
| mit | 2208906843eb464159d77f133697c578 | 30.503145 | 125 | 0.54442 | 3.534933 | false | false | false | false |
poliastro/poliastro | src/poliastro/twobody/thrust/change_ecc_quasioptimal.py | 1 | 1419 | """Quasi optimal eccentricity-only change, with formulas developed by Pollard.
References
----------
* Pollard, J. E. "Simplified Approach for Assessment of Low-Thrust
Elliptical Orbit Transfers", 1997.
"""
import numpy as np
from astropy import units as u
from numba import njit
from numpy import cross
from poliastro.core.thrust.change_ecc_quasioptimal import extra_quantities
from poliastro.util import norm
def change_ecc_quasioptimal(orb_0, ecc_f, f):
"""Guidance law from the model.
Thrust is aligned with an inertially fixed direction perpendicular to the
semimajor axis of the orbit.
Parameters
----------
orb_0 : Orbit
Initial orbit, containing all the information.
ecc_f : float
Final eccentricity.
f : float
Magnitude of constant acceleration
"""
# We fix the inertial direction at the beginning
k = orb_0.attractor.k.to(u.km**3 / u.s**2).value
a = orb_0.a.to(u.km).value
ecc_0 = orb_0.ecc.value
if ecc_0 > 0.001: # Arbitrary tolerance
ref_vec = orb_0.e_vec / ecc_0
else:
ref_vec = orb_0.r / norm(orb_0.r)
h_unit = orb_0.h_vec / norm(orb_0.h_vec)
thrust_unit = cross(h_unit, ref_vec) * np.sign(ecc_f - ecc_0)
@njit
def a_d(t0, u_, k):
accel_v = f * thrust_unit
return accel_v
delta_V, t_f = extra_quantities(k, a, ecc_0, ecc_f, f)
return a_d, delta_V, t_f
| mit | 871fc55218a9d8b42f2ff0c321a8821e | 27.38 | 78 | 0.644116 | 2.809901 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.