repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
astroduff/commah | commah/commah.py | calc_ab | python | def calc_ab(zi, Mi, **cosmo):
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde) | Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L349-L397 | [
"def _deriv_growth(z, **cosmo):\n \"\"\" Returns derivative of the linear growth factor at z\n for a given cosmology **cosmo \"\"\"\n\n inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)\n fz = (1 + z) * inv_h**3\n\n deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\\\n 1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\\\n fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)\n\n return(deriv_g)\n",
"def growthfactor(z, norm=True, **cosmo):\n \"\"\" Returns linear growth factor at a given redshift, normalised to z=0\n by default, for a given cosmology\n\n Parameters\n ----------\n\n z : float or numpy array\n The redshift at which the growth factor should be calculated\n norm : boolean, optional\n If true then normalise the growth factor to z=0 case defaults True\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n float or numpy array\n The growth factor at a range of redshifts 'z'\n\n Raises\n ------\n\n \"\"\"\n H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +\n cosmo['omega_lambda_0'])\n growthval = H * _int_growth(z, **cosmo)\n if norm:\n growthval /= _int_growth(0, **cosmo)\n\n return(growthval)\n"
] | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
astroduff/commah | commah/commah.py | acc_rate | python | def acc_rate(z, zi, Mi, **cosmo):
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz) | Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z' | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L400-L438 | [
"def calc_ab(zi, Mi, **cosmo):\n \"\"\" Calculate growth rate indices a_tilde and b_tilde\n\n Parameters\n ----------\n zi : float\n Redshift\n Mi : float\n Halo mass at redshift 'zi'\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n (a_tilde, b_tilde) : float\n \"\"\"\n\n # When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta\n\n # Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)\n # Arbitray formation redshift, z_-2 in COM is more physically motivated\n zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837\n\n # Eqn 22 of Correa et al 2015a\n q = 4.137 * zf**(-0.9476)\n\n # Radius of a mass Mi\n R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]\n # Radius of a mass Mi/q\n Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]\n\n # Mass variance 'sigma' evaluate at z=0 to a good approximation\n sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]\n sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]\n\n f = (sigq**2 - sig**2)**(-0.5)\n\n # Eqn 9 and 10 from Correa et al 2015c\n # (generalised to zi from Correa et al 2015a's z=0 special case)\n # a_tilde is power law growth rate\n a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /\n growthfactor(zi, norm=True, **cosmo)**2 + 1)*f\n # b_tilde is exponential growth rate\n b_tilde = -f\n\n return(a_tilde, b_tilde)\n"
] | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
astroduff/commah | commah/commah.py | MAH | python | def MAH(z, zi, Mi, **cosmo):
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array) | Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z' | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L441-L480 | [
"def acc_rate(z, zi, Mi, **cosmo):\n \"\"\" Calculate accretion rate and mass history of a halo at any\n redshift 'z' with mass 'Mi' at a lower redshift 'z'\n\n Parameters\n ----------\n z : float\n Redshift to solve acc_rate / mass history. Note zi<z\n zi : float\n Redshift\n Mi : float\n Halo mass at redshift 'zi'\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n (dMdt, Mz) : float\n Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'\n\n \"\"\"\n # Find parameters a_tilde and b_tilde for initial redshift\n # use Eqn 9 and 10 of Correa et al. (2015c)\n a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)\n\n # Halo mass at z, in Msol\n # use Eqn 8 in Correa et al. (2015c)\n Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))\n\n # Accretion rate at z, Msol yr^-1\n # use Eqn 11 from Correa et al. (2015c)\n dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\\\n (-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\\\n np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])\n\n return(dMdt, Mz)\n"
] | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
astroduff/commah | commah/commah.py | COM | python | def COM(z, M, **cosmo):
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array) | Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L483-L552 | [
"def _izip(*iterables):\n \"\"\" Iterate through multiple lists or arrays of equal size \"\"\"\n # This izip routine is from itertools\n # izip('ABCD', 'xy') --> Ax By\n\n iterators = map(iter, iterables)\n while iterators:\n yield tuple(map(next, iterators))\n",
"def growthfactor(z, norm=True, **cosmo):\n \"\"\" Returns linear growth factor at a given redshift, normalised to z=0\n by default, for a given cosmology\n\n Parameters\n ----------\n\n z : float or numpy array\n The redshift at which the growth factor should be calculated\n norm : boolean, optional\n If true then normalise the growth factor to z=0 case defaults True\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n float or numpy array\n The growth factor at a range of redshifts 'z'\n\n Raises\n ------\n\n \"\"\"\n H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +\n cosmo['omega_lambda_0'])\n growthval = H * _int_growth(z, **cosmo)\n if norm:\n growthval /= _int_growth(0, **cosmo)\n\n return(growthval)\n",
"def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):\n \"\"\" Rearrange eqn 18 from Correa et al (2015c) to return\n formation redshift for a concentration at a given redshift\n\n Parameters\n ----------\n c : float / numpy array\n Concentration of halo\n z : float / numpy array\n Redshift of halo with concentration c\n Ascaling : float\n Cosmological dependent scaling between densities, use function\n getAscaling('WMAP5') if unsure. Default is 900.\n omega_M_0 : float\n Mass density of the universe. Default is 0.25\n omega_lambda_0 : float\n Dark Energy density of the universe. Default is 0.75\n\n Returns\n -------\n zf : float / numpy array\n Formation redshift for halo of concentration 'c' at redshift 'z'\n\n \"\"\"\n Y1 = np.log(2) - 0.5\n Yc = np.log(1+c) - c/(1+c)\n rho_2 = 200*(c**3)*Y1/Yc\n\n zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *\n (rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1\n\n return(zf)\n",
"def calc_ab(zi, Mi, **cosmo):\n \"\"\" Calculate growth rate indices a_tilde and b_tilde\n\n Parameters\n ----------\n zi : float\n Redshift\n Mi : float\n Halo mass at redshift 'zi'\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n (a_tilde, b_tilde) : float\n \"\"\"\n\n # When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta\n\n # Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)\n # Arbitray formation redshift, z_-2 in COM is more physically motivated\n zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837\n\n # Eqn 22 of Correa et al 2015a\n q = 4.137 * zf**(-0.9476)\n\n # Radius of a mass Mi\n R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]\n # Radius of a mass Mi/q\n Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]\n\n # Mass variance 'sigma' evaluate at z=0 to a good approximation\n sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]\n sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]\n\n f = (sigq**2 - sig**2)**(-0.5)\n\n # Eqn 9 and 10 from Correa et al 2015c\n # (generalised to zi from Correa et al 2015a's z=0 special case)\n # a_tilde is power law growth rate\n a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /\n growthfactor(zi, norm=True, **cosmo)**2 + 1)*f\n # b_tilde is exponential growth rate\n b_tilde = -f\n\n return(a_tilde, b_tilde)\n"
] | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
astroduff/commah | commah/commah.py | run | python | def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset) | Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L555-L799 | [
"def getcosmo(cosmology):\n \"\"\" Find cosmological parameters for named cosmo in cosmology.py list \"\"\"\n\n defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),\n 'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),\n 'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),\n 'wmap1_lss': cg.WMAP1_2dF_mean(),\n 'wmap3_mean': cg.WMAP3_mean(),\n 'wmap5_ml': cg.WMAP5_ML(),\n 'wmap5_lss': cg.WMAP5_BAO_SN_mean(),\n 'wmap7_lss': cg.WMAP7_BAO_H0_mean(),\n 'planck13': cg.Planck_2013(),\n 'planck15': cg.Planck_2015()}\n\n if isinstance(cosmology, dict):\n # User providing their own variables\n cosmo = cosmology\n if 'A_scaling' not in cosmology.keys():\n A_scaling = getAscaling(cosmology, newcosmo=True)\n cosmo.update({'A_scaling': A_scaling})\n\n # Add extra variables by hand that cosmolopy requires\n # note that they aren't used (set to zero)\n for paramnames in cg.WMAP5_mean().keys():\n if paramnames not in cosmology.keys():\n cosmo.update({paramnames: 0})\n elif cosmology.lower() in defaultcosmologies.keys():\n # Load by name of cosmology instead\n cosmo = defaultcosmologies[cosmology.lower()]\n A_scaling = getAscaling(cosmology)\n cosmo.update({'A_scaling': A_scaling})\n else:\n print(\"You haven't passed a dict of cosmological parameters \")\n print(\"OR a recognised cosmology, you gave %s\" % (cosmology))\n # No idea why this has to be done by hand but should be O_k = 0\n cosmo = cp.distance.set_omega_k_0(cosmo)\n\n # Use the cosmology as **cosmo passed to cosmolopy routines\n return(cosmo)\n",
"def _izip(*iterables):\n \"\"\" Iterate through multiple lists or arrays of equal size \"\"\"\n # This izip routine is from itertools\n # izip('ABCD', 'xy') --> Ax By\n\n iterators = map(iter, iterables)\n while iterators:\n yield tuple(map(next, iterators))\n",
"def _checkinput(zi, Mi, z=False, verbose=None):\n \"\"\" Check and convert any input scalar or array to numpy array \"\"\"\n # How many halo redshifts provided?\n zi = np.array(zi, ndmin=1, dtype=float)\n\n # How many halo masses provided?\n Mi = np.array(Mi, ndmin=1, dtype=float)\n\n # Check the input sizes for zi and Mi make sense, if not then exit unless\n # one axis is length one, then replicate values to the size of the other\n if (zi.size > 1) and (Mi.size > 1):\n if(zi.size != Mi.size):\n print(\"Error ambiguous request\")\n print(\"Need individual redshifts for all haloes provided \")\n print(\"Or have all haloes at same redshift \")\n return(-1)\n elif (zi.size == 1) and (Mi.size > 1):\n if verbose:\n print(\"Assume zi is the same for all Mi halo masses provided\")\n # Replicate redshift for all halo masses\n zi = np.ones_like(Mi)*zi[0]\n elif (Mi.size == 1) and (zi.size > 1):\n if verbose:\n print(\"Assume Mi halo masses are the same for all zi provided\")\n # Replicate redshift for all halo masses\n Mi = np.ones_like(zi)*Mi[0]\n else:\n if verbose:\n print(\"A single Mi and zi provided\")\n\n # Very simple test for size / type of incoming array\n # just in case numpy / list given\n if z is False:\n # Didn't pass anything, set zi = z\n lenzout = 1\n else:\n # If something was passed, convert to 1D NumPy array\n z = np.array(z, ndmin=1, dtype=float)\n lenzout = z.size\n\n return(zi, Mi, z, zi.size, Mi.size, lenzout)\n",
"def _getcosmoheader(cosmo):\n \"\"\" Output the cosmology to a string for writing to file \"\"\"\n\n cosmoheader = (\"# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, \"\n \"sigma8:{3:.3f}, ns:{4:.2f}\".format(\n cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],\n cosmo['sigma_8'], cosmo['n']))\n\n return(cosmoheader)\n",
"def MAH(z, zi, Mi, **cosmo):\n \"\"\" Calculate mass accretion history by looping function acc_rate\n over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'\n\n Parameters\n ----------\n z : float / numpy array\n Redshift to output MAH over. Note zi<z always\n zi : float\n Redshift\n Mi : float\n Halo mass at redshift 'zi'\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n (dMdt, Mz) : float / numpy arrays of equivalent size to 'z'\n Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'\n\n \"\"\"\n\n # Ensure that z is a 1D NumPy array\n z = np.array(z, ndmin=1, dtype=float)\n\n # Create a full array\n dMdt_array = np.empty_like(z)\n Mz_array = np.empty_like(z)\n\n for i_ind, zval in enumerate(z):\n # Solve the accretion rate and halo mass at each redshift step\n dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)\n\n dMdt_array[i_ind] = dMdt\n Mz_array[i_ind] = Mz\n\n return(dMdt_array, Mz_array)\n",
"def COM(z, M, **cosmo):\n \"\"\" Calculate concentration for halo of mass 'M' at redshift 'z'\n\n Parameters\n ----------\n z : float / numpy array\n Redshift to find concentration of halo\n M : float / numpy array\n Halo mass at redshift 'z'. Must be same size as 'z'\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n (c_array, sig_array, nu_array, zf_array) : float / numpy arrays\n of equivalent size to 'z' and 'M'. Variables are\n Concentration, Mass Variance 'sigma' this corresponds too,\n the dimnesionless fluctuation this represents and formation redshift\n\n \"\"\"\n # Check that z and M are arrays\n z = np.array(z, ndmin=1, dtype=float)\n M = np.array(M, ndmin=1, dtype=float)\n\n # Create array\n c_array = np.empty_like(z)\n sig_array = np.empty_like(z)\n nu_array = np.empty_like(z)\n zf_array = np.empty_like(z)\n\n for i_ind, (zval, Mval) in enumerate(_izip(z, M)):\n # Evaluate the indices at each redshift and mass combination\n # that you want a concentration for, different to MAH which\n # uses one a_tilde and b_tilde at the starting redshift only\n a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)\n\n # Minimize equation to solve for 1 unknown, 'c'\n c = scipy.optimize.brentq(_minimize_c, 2, 1000,\n args=(zval, a_tilde, b_tilde,\n cosmo['A_scaling'], cosmo['omega_M_0'],\n cosmo['omega_lambda_0']))\n\n if np.isclose(c, 0):\n print(\"Error solving for concentration with given redshift and \"\n \"(probably) too small a mass\")\n c = -1\n sig = -1\n nu = -1\n zf = -1\n else:\n # Calculate formation redshift for this concentration,\n # redshift at which the scale radius = virial radius: z_-2\n zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],\n omega_M_0=cosmo['omega_M_0'],\n omega_lambda_0=cosmo['omega_lambda_0'])\n\n R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)\n\n sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)\n nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))\n\n c_array[i_ind] = c\n sig_array[i_ind] = sig\n nu_array[i_ind] = nu\n zf_array[i_ind] = zf\n\n return(c_array, sig_array, nu_array, zf_array)\n"
] | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
|
astroduff/commah | commah/cosmology_list.py | DRAGONS | python | def DRAGONS(flat=False, extras=True):
omega_c_0 = 0.2292
omega_b_0 = 0.0458
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.725,
'h': 0.702,
'n': 0.963,
'sigma_8': 0.816,
'tau': 0.088,
'z_reion': 10.6,
't_0': 13.76,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo | DRAGONS cosmology assumes WMAP7 + BAO + H_0 mean from
Komatsu et al. (2011) ApJS 192 18K (arxiv:1001.4538v1)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24. | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/cosmology_list.py#L20-L55 | [
"def add_extras(cosmo):\n \"\"\"Sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n \"\"\"\n extras = {'omega_n_0': 0.0,\n 'N_nu': 0,\n 'Y_He': 0.24}\n\n cosmo.update(extras)\n return cosmo\n"
] | """ Some pre-defined sets of cosmological parameters (e.g. from WMAP)
copied and expanded from the cosmolopy list that's no longer updated.
"""
from __future__ import absolute_import, division, print_function
def add_extras(cosmo):
"""Sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
extras = {'omega_n_0': 0.0,
'N_nu': 0,
'Y_He': 0.24}
cosmo.update(extras)
return cosmo
def WMAP1_2dF_mean(flat=False, extras=True):
"""WMAP1 with 2dF and ACBAR results from
Spergel et al. (2003) ApJS 148 175S (arXiv:astro-ph/0302209)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.206
omega_b_0 = 0.044
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.75,
'h': 0.73,
'n': 0.97,
'sigma_8': 0.9,
'tau': 0.148,
'z_reion': 17.,
't_0': 13.7,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP1_Mill(flat=False, extras=True):
"""WMAP1 Millennium cosmology
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.206
omega_b_0 = 0.044
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.75,
'h': 0.73,
'n': 1.0,
'sigma_8': 0.9,
'tau': 0.148,
'z_reion': 17.,
't_0': 13.7,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP3_mean(flat=False, extras=True):
"""WMAP3 mean fit from Spergel et al. (2007) ApJS 170 377-408
(arXiv:astro-ph/0603449)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.196
omega_b_0 = 0.041
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.763,
'h': 0.73,
'n': 0.954,
'sigma_8': 0.756,
'tau': 0.091,
'z_reion': 11.3,
't_0': 13.73,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP3_ML(flat=False, extras=True):
"""WMAP3 Maximum Liklihood from Spergel et al. (2007) ApJS 170 377-408
(arXiv:astro-ph/0603449)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.1959
omega_b_0 = 0.0411
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.763,
'h': 0.732,
'n': 0.954,
'sigma_8': 0.756,
'tau': 0.091,
'z_reion': 11.3,
't_0': 13.73,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP5_BAO_SN_mean(flat=False, extras=True):
"""WMAP5 + BAO + SN parameters from Komatsu et al. (2009ApJS..180..330K).
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
Notes
-----
From the abstract of the paper:
The six parameters and the corresponding 68% uncertainties,
derived from the WMAP data combined with the distance
measurements from the Type Ia supernovae (SN) and the Baryon
Acoustic Oscillations (BAO) in the distribution of galaxies,
are:
Omega_B h^2 = 0.02267+0.00058-0.00059,
Omega_c h^2 = 0.1131 +/- 0.0034,
Omega_Lambda = 0.726 +/- 0.015,
n_s = 0.960 +/- 0.013,
tau = 0.084 +/- 0.016, and
Delata^2 R = (2.445 +/- 0.096) * 10^-9 at k = 0.002 Mpc^-1.
From these, we derive
sigma_8 = 0.812 +/- 0.026,
H0 = 70.5 +/- 1.3 km s^-11 Mpc^-1,
Omega_b = 0.0456 +/- 0.0015,
Omega_c = 0.228 +/- 0.013,
Omega_m h^2 = 0.1358 + 0.0037 - 0.0036,
zreion = 10.9 +/- 1.4, and
t0 = 13.72 +/- 0.12 Gyr
"""
omega_c_0 = 0.2284
omega_b_0 = 0.0456
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.726,
'h': 0.706,
'n': 0.960,
'sigma_8': 0.812,
'tau': 0.084,
'z_reion': 10.9,
't_0': 13.72
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP5_ML(flat=False, extras=True):
"""WMAP5 parameters (using WMAP data alone) from Komatsu et
al. (2009ApJS..180..330K).
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
Notes
-----
Values taken from "WMAP 5 Year ML" column of Table 1 of the paper.
"""
omega_c_0 = 0.206
omega_b_0 = 0.043
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.751,
'h': 0.724,
'n': 0.961,
'sigma_8': 0.787,
'tau': 0.089,
'z_reion': 11.2,
't_0': 13.69
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP5_mean(flat=False, extras=True):
"""WMAP5 parameters (using WMAP data alone) from Komatsu et
al. (2009ApJS..180..330K).
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
Notes
-----
Values taken from "WMAP 5 Year Mean" of Table 1 of the paper.
"""
omega_c_0 = 0.214
omega_b_0 = 0.044
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.742,
'h': 0.719,
'n': 0.963,
'sigma_8': 0.796,
'tau': 0.087,
'z_reion': 11.0,
't_0': 13.69
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP7_ML(flat=False, extras=True):
"""WMAP7 ML parameters from Komatsu et al. (2011) ApJS 192 18K
(arxiv:1001.4538v1)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.2175
omega_b_0 = 0.0445
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.738,
'h': 0.714,
'n': 0.969,
'sigma_8': 0.803,
'tau': 0.086,
'z_reion': 10.3,
't_0': 13.71,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP7_BAO_H0_mean(flat=False, extras=True):
"""WMAP7 + BAO + H_0 parameters from Komatsu et al. (2011) ApJS 192 18K
(arxiv:1001.4538v1)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.2264 # 0.228
omega_b_0 = 0.0456 # 0.0456
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.728, # 0.726,
'h': 0.704, # 0.706,
'n': 0.963, # 0.960,
'sigma_8': 0.809, # 0.812,
'tau': 0.087, # 0.084,
'z_reion': 10.4, # 10.9,
't_0': 13.75, # 13.72
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP9_ML(flat=False, extras=True):
"""WMAP Maximum Likelihood from Hinshaw et al. (2013) ApJS 208 19
(arxiv:1212.5226v3)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.235
omega_b_0 = 0.0465
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.7185,
'h': 0.693,
'n': 0.971,
'sigma_8': 0.820,
'tau': 0.0851,
'z_reion': 10.36,
't_0': 13.76,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def Planck_2013(flat=False, extras=True):
"""Planck 2013 XVI: Scalar Perturbations only (Maximum Likelihood)
from Ade et al. (2013) A&A 571 16 (arxiv:1303.5076)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.267
omega_b_0 = 0.05
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.683,
'h': 0.671,
'n': 0.9624,
'sigma_8': 0.82344,
'tau': 0.0925,
'z_reion': 11.35,
't_0': 13.82,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def Planck_2015(flat=False, extras=True):
"""Planck 2015 XII: Cosmological parameters Table 4
column Planck TT, TE, EE + lowP + lensing + ext
from Ade et al. (2015) A&A in press (arxiv:1502.01589v1)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_b_0 = 0.02230/(0.6774**2)
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': 0.3089,
'omega_lambda_0': 0.6911,
'h': 0.6774,
'n': 0.9667,
'sigma_8': 0.8159,
'tau': 0.066,
'z_reion': 8.8,
't_0': 13.799,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
|
astroduff/commah | commah/cosmology_list.py | Planck_2015 | python | def Planck_2015(flat=False, extras=True):
omega_b_0 = 0.02230/(0.6774**2)
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': 0.3089,
'omega_lambda_0': 0.6911,
'h': 0.6774,
'n': 0.9667,
'sigma_8': 0.8159,
'tau': 0.066,
'z_reion': 8.8,
't_0': 13.799,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo | Planck 2015 XII: Cosmological parameters Table 4
column Planck TT, TE, EE + lowP + lensing + ext
from Ade et al. (2015) A&A in press (arxiv:1502.01589v1)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24. | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/cosmology_list.py#L512-L547 | [
"def add_extras(cosmo):\n \"\"\"Sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n \"\"\"\n extras = {'omega_n_0': 0.0,\n 'N_nu': 0,\n 'Y_He': 0.24}\n\n cosmo.update(extras)\n return cosmo\n"
] | """ Some pre-defined sets of cosmological parameters (e.g. from WMAP)
copied and expanded from the cosmolopy list that's no longer updated.
"""
from __future__ import absolute_import, division, print_function
def add_extras(cosmo):
"""Sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
extras = {'omega_n_0': 0.0,
'N_nu': 0,
'Y_He': 0.24}
cosmo.update(extras)
return cosmo
def DRAGONS(flat=False, extras=True):
"""DRAGONS cosmology assumes WMAP7 + BAO + H_0 mean from
Komatsu et al. (2011) ApJS 192 18K (arxiv:1001.4538v1)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.2292
omega_b_0 = 0.0458
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.725,
'h': 0.702,
'n': 0.963,
'sigma_8': 0.816,
'tau': 0.088,
'z_reion': 10.6,
't_0': 13.76,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP1_2dF_mean(flat=False, extras=True):
"""WMAP1 with 2dF and ACBAR results from
Spergel et al. (2003) ApJS 148 175S (arXiv:astro-ph/0302209)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.206
omega_b_0 = 0.044
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.75,
'h': 0.73,
'n': 0.97,
'sigma_8': 0.9,
'tau': 0.148,
'z_reion': 17.,
't_0': 13.7,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP1_Mill(flat=False, extras=True):
"""WMAP1 Millennium cosmology
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.206
omega_b_0 = 0.044
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.75,
'h': 0.73,
'n': 1.0,
'sigma_8': 0.9,
'tau': 0.148,
'z_reion': 17.,
't_0': 13.7,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP3_mean(flat=False, extras=True):
"""WMAP3 mean fit from Spergel et al. (2007) ApJS 170 377-408
(arXiv:astro-ph/0603449)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.196
omega_b_0 = 0.041
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.763,
'h': 0.73,
'n': 0.954,
'sigma_8': 0.756,
'tau': 0.091,
'z_reion': 11.3,
't_0': 13.73,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP3_ML(flat=False, extras=True):
"""WMAP3 Maximum Liklihood from Spergel et al. (2007) ApJS 170 377-408
(arXiv:astro-ph/0603449)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.1959
omega_b_0 = 0.0411
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.763,
'h': 0.732,
'n': 0.954,
'sigma_8': 0.756,
'tau': 0.091,
'z_reion': 11.3,
't_0': 13.73,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP5_BAO_SN_mean(flat=False, extras=True):
"""WMAP5 + BAO + SN parameters from Komatsu et al. (2009ApJS..180..330K).
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
Notes
-----
From the abstract of the paper:
The six parameters and the corresponding 68% uncertainties,
derived from the WMAP data combined with the distance
measurements from the Type Ia supernovae (SN) and the Baryon
Acoustic Oscillations (BAO) in the distribution of galaxies,
are:
Omega_B h^2 = 0.02267+0.00058-0.00059,
Omega_c h^2 = 0.1131 +/- 0.0034,
Omega_Lambda = 0.726 +/- 0.015,
n_s = 0.960 +/- 0.013,
tau = 0.084 +/- 0.016, and
Delata^2 R = (2.445 +/- 0.096) * 10^-9 at k = 0.002 Mpc^-1.
From these, we derive
sigma_8 = 0.812 +/- 0.026,
H0 = 70.5 +/- 1.3 km s^-11 Mpc^-1,
Omega_b = 0.0456 +/- 0.0015,
Omega_c = 0.228 +/- 0.013,
Omega_m h^2 = 0.1358 + 0.0037 - 0.0036,
zreion = 10.9 +/- 1.4, and
t0 = 13.72 +/- 0.12 Gyr
"""
omega_c_0 = 0.2284
omega_b_0 = 0.0456
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.726,
'h': 0.706,
'n': 0.960,
'sigma_8': 0.812,
'tau': 0.084,
'z_reion': 10.9,
't_0': 13.72
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP5_ML(flat=False, extras=True):
"""WMAP5 parameters (using WMAP data alone) from Komatsu et
al. (2009ApJS..180..330K).
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
Notes
-----
Values taken from "WMAP 5 Year ML" column of Table 1 of the paper.
"""
omega_c_0 = 0.206
omega_b_0 = 0.043
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.751,
'h': 0.724,
'n': 0.961,
'sigma_8': 0.787,
'tau': 0.089,
'z_reion': 11.2,
't_0': 13.69
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP5_mean(flat=False, extras=True):
"""WMAP5 parameters (using WMAP data alone) from Komatsu et
al. (2009ApJS..180..330K).
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
Notes
-----
Values taken from "WMAP 5 Year Mean" of Table 1 of the paper.
"""
omega_c_0 = 0.214
omega_b_0 = 0.044
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.742,
'h': 0.719,
'n': 0.963,
'sigma_8': 0.796,
'tau': 0.087,
'z_reion': 11.0,
't_0': 13.69
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP7_ML(flat=False, extras=True):
"""WMAP7 ML parameters from Komatsu et al. (2011) ApJS 192 18K
(arxiv:1001.4538v1)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.2175
omega_b_0 = 0.0445
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.738,
'h': 0.714,
'n': 0.969,
'sigma_8': 0.803,
'tau': 0.086,
'z_reion': 10.3,
't_0': 13.71,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP7_BAO_H0_mean(flat=False, extras=True):
"""WMAP7 + BAO + H_0 parameters from Komatsu et al. (2011) ApJS 192 18K
(arxiv:1001.4538v1)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.2264 # 0.228
omega_b_0 = 0.0456 # 0.0456
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.728, # 0.726,
'h': 0.704, # 0.706,
'n': 0.963, # 0.960,
'sigma_8': 0.809, # 0.812,
'tau': 0.087, # 0.084,
'z_reion': 10.4, # 10.9,
't_0': 13.75, # 13.72
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def WMAP9_ML(flat=False, extras=True):
"""WMAP Maximum Likelihood from Hinshaw et al. (2013) ApJS 208 19
(arxiv:1212.5226v3)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.235
omega_b_0 = 0.0465
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.7185,
'h': 0.693,
'n': 0.971,
'sigma_8': 0.820,
'tau': 0.0851,
'z_reion': 10.36,
't_0': 13.76,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
def Planck_2013(flat=False, extras=True):
"""Planck 2013 XVI: Scalar Perturbations only (Maximum Likelihood)
from Ade et al. (2013) A&A 571 16 (arxiv:1303.5076)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.267
omega_b_0 = 0.05
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.683,
'h': 0.671,
'n': 0.9624,
'sigma_8': 0.82344,
'tau': 0.0925,
'z_reion': 11.35,
't_0': 13.82,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo
|
frictionlessdata/tableschema-sql-py | tableschema_sql/writer.py | Writer.write | python | def write(self, rows, keyed=False):
for row in rows:
keyed_row = row
if not keyed:
keyed_row = dict(zip(self.__schema.field_names, row))
keyed_row = self.__convert_row(keyed_row)
if self.__check_existing(keyed_row):
for wr in self.__insert():
yield wr
ret = self.__update(keyed_row)
if ret is not None:
yield WrittenRow(keyed_row, True, ret if self.__autoincrement else None)
continue
self.__buffer.append(keyed_row)
if len(self.__buffer) > BUFFER_SIZE:
for wr in self.__insert():
yield wr
for wr in self.__insert():
yield wr | Write rows/keyed_rows to table | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/writer.py#L32-L52 | [
"def __insert(self):\n \"\"\"Insert rows to table\n \"\"\"\n if len(self.__buffer) > 0:\n # Insert data\n statement = self.__table.insert()\n if self.__autoincrement:\n statement = statement.returning(\n getattr(self.__table.c, self.__autoincrement))\n statement = statement.values(self.__buffer)\n res = statement.execute()\n for id, in res:\n row = self.__buffer.pop(0)\n yield WrittenRow(row, False, id)\n else:\n statement.execute(self.__buffer)\n for row in self.__buffer:\n yield WrittenRow(row, False, None)\n # Clean memory\n self.__buffer = []\n",
"def __check_existing(self, row):\n \"\"\"Check if row exists in table\n \"\"\"\n if self.__update_keys is not None:\n key = tuple(row[key] for key in self.__update_keys)\n if key in self.__bloom:\n return True\n self.__bloom.add(key)\n return False\n return False\n"
] | class Writer(object):
# Public
def __init__(self, table, schema, update_keys, autoincrement, convert_row):
"""Writer to insert/update rows into table
"""
self.__table = table
self.__schema = schema
self.__update_keys = update_keys
self.__autoincrement = autoincrement
self.__convert_row = convert_row
self.__buffer = []
if update_keys is not None:
self.__prepare_bloom()
# Private
def __prepare_bloom(self):
"""Prepare bloom for existing checks
"""
self.__bloom = pybloom_live.ScalableBloomFilter()
columns = [getattr(self.__table.c, key) for key in self.__update_keys]
keys = select(columns).execution_options(stream_results=True).execute()
for key in keys:
self.__bloom.add(tuple(key))
def __insert(self):
"""Insert rows to table
"""
if len(self.__buffer) > 0:
# Insert data
statement = self.__table.insert()
if self.__autoincrement:
statement = statement.returning(
getattr(self.__table.c, self.__autoincrement))
statement = statement.values(self.__buffer)
res = statement.execute()
for id, in res:
row = self.__buffer.pop(0)
yield WrittenRow(row, False, id)
else:
statement.execute(self.__buffer)
for row in self.__buffer:
yield WrittenRow(row, False, None)
# Clean memory
self.__buffer = []
def __update(self, row):
"""Update rows in table
"""
expr = self.__table.update().values(row)
for key in self.__update_keys:
expr = expr.where(getattr(self.__table.c, key) == row[key])
if self.__autoincrement:
expr = expr.returning(getattr(self.__table.c, self.__autoincrement))
res = expr.execute()
if res.rowcount > 0:
if self.__autoincrement:
first = next(iter(res))
last_row_id = first[0]
return last_row_id
return 0
return None
def __check_existing(self, row):
"""Check if row exists in table
"""
if self.__update_keys is not None:
key = tuple(row[key] for key in self.__update_keys)
if key in self.__bloom:
return True
self.__bloom.add(key)
return False
return False
|
frictionlessdata/tableschema-sql-py | tableschema_sql/writer.py | Writer.__prepare_bloom | python | def __prepare_bloom(self):
self.__bloom = pybloom_live.ScalableBloomFilter()
columns = [getattr(self.__table.c, key) for key in self.__update_keys]
keys = select(columns).execution_options(stream_results=True).execute()
for key in keys:
self.__bloom.add(tuple(key)) | Prepare bloom for existing checks | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/writer.py#L56-L63 | null | class Writer(object):
# Public
def __init__(self, table, schema, update_keys, autoincrement, convert_row):
"""Writer to insert/update rows into table
"""
self.__table = table
self.__schema = schema
self.__update_keys = update_keys
self.__autoincrement = autoincrement
self.__convert_row = convert_row
self.__buffer = []
if update_keys is not None:
self.__prepare_bloom()
def write(self, rows, keyed=False):
"""Write rows/keyed_rows to table
"""
for row in rows:
keyed_row = row
if not keyed:
keyed_row = dict(zip(self.__schema.field_names, row))
keyed_row = self.__convert_row(keyed_row)
if self.__check_existing(keyed_row):
for wr in self.__insert():
yield wr
ret = self.__update(keyed_row)
if ret is not None:
yield WrittenRow(keyed_row, True, ret if self.__autoincrement else None)
continue
self.__buffer.append(keyed_row)
if len(self.__buffer) > BUFFER_SIZE:
for wr in self.__insert():
yield wr
for wr in self.__insert():
yield wr
# Private
def __insert(self):
"""Insert rows to table
"""
if len(self.__buffer) > 0:
# Insert data
statement = self.__table.insert()
if self.__autoincrement:
statement = statement.returning(
getattr(self.__table.c, self.__autoincrement))
statement = statement.values(self.__buffer)
res = statement.execute()
for id, in res:
row = self.__buffer.pop(0)
yield WrittenRow(row, False, id)
else:
statement.execute(self.__buffer)
for row in self.__buffer:
yield WrittenRow(row, False, None)
# Clean memory
self.__buffer = []
def __update(self, row):
"""Update rows in table
"""
expr = self.__table.update().values(row)
for key in self.__update_keys:
expr = expr.where(getattr(self.__table.c, key) == row[key])
if self.__autoincrement:
expr = expr.returning(getattr(self.__table.c, self.__autoincrement))
res = expr.execute()
if res.rowcount > 0:
if self.__autoincrement:
first = next(iter(res))
last_row_id = first[0]
return last_row_id
return 0
return None
def __check_existing(self, row):
"""Check if row exists in table
"""
if self.__update_keys is not None:
key = tuple(row[key] for key in self.__update_keys)
if key in self.__bloom:
return True
self.__bloom.add(key)
return False
return False
|
frictionlessdata/tableschema-sql-py | tableschema_sql/writer.py | Writer.__insert | python | def __insert(self):
if len(self.__buffer) > 0:
# Insert data
statement = self.__table.insert()
if self.__autoincrement:
statement = statement.returning(
getattr(self.__table.c, self.__autoincrement))
statement = statement.values(self.__buffer)
res = statement.execute()
for id, in res:
row = self.__buffer.pop(0)
yield WrittenRow(row, False, id)
else:
statement.execute(self.__buffer)
for row in self.__buffer:
yield WrittenRow(row, False, None)
# Clean memory
self.__buffer = [] | Insert rows to table | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/writer.py#L65-L84 | null | class Writer(object):
# Public
def __init__(self, table, schema, update_keys, autoincrement, convert_row):
"""Writer to insert/update rows into table
"""
self.__table = table
self.__schema = schema
self.__update_keys = update_keys
self.__autoincrement = autoincrement
self.__convert_row = convert_row
self.__buffer = []
if update_keys is not None:
self.__prepare_bloom()
def write(self, rows, keyed=False):
"""Write rows/keyed_rows to table
"""
for row in rows:
keyed_row = row
if not keyed:
keyed_row = dict(zip(self.__schema.field_names, row))
keyed_row = self.__convert_row(keyed_row)
if self.__check_existing(keyed_row):
for wr in self.__insert():
yield wr
ret = self.__update(keyed_row)
if ret is not None:
yield WrittenRow(keyed_row, True, ret if self.__autoincrement else None)
continue
self.__buffer.append(keyed_row)
if len(self.__buffer) > BUFFER_SIZE:
for wr in self.__insert():
yield wr
for wr in self.__insert():
yield wr
# Private
def __prepare_bloom(self):
"""Prepare bloom for existing checks
"""
self.__bloom = pybloom_live.ScalableBloomFilter()
columns = [getattr(self.__table.c, key) for key in self.__update_keys]
keys = select(columns).execution_options(stream_results=True).execute()
for key in keys:
self.__bloom.add(tuple(key))
def __update(self, row):
"""Update rows in table
"""
expr = self.__table.update().values(row)
for key in self.__update_keys:
expr = expr.where(getattr(self.__table.c, key) == row[key])
if self.__autoincrement:
expr = expr.returning(getattr(self.__table.c, self.__autoincrement))
res = expr.execute()
if res.rowcount > 0:
if self.__autoincrement:
first = next(iter(res))
last_row_id = first[0]
return last_row_id
return 0
return None
def __check_existing(self, row):
"""Check if row exists in table
"""
if self.__update_keys is not None:
key = tuple(row[key] for key in self.__update_keys)
if key in self.__bloom:
return True
self.__bloom.add(key)
return False
return False
|
frictionlessdata/tableschema-sql-py | tableschema_sql/writer.py | Writer.__update | python | def __update(self, row):
expr = self.__table.update().values(row)
for key in self.__update_keys:
expr = expr.where(getattr(self.__table.c, key) == row[key])
if self.__autoincrement:
expr = expr.returning(getattr(self.__table.c, self.__autoincrement))
res = expr.execute()
if res.rowcount > 0:
if self.__autoincrement:
first = next(iter(res))
last_row_id = first[0]
return last_row_id
return 0
return None | Update rows in table | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/writer.py#L86-L101 | null | class Writer(object):
# Public
def __init__(self, table, schema, update_keys, autoincrement, convert_row):
"""Writer to insert/update rows into table
"""
self.__table = table
self.__schema = schema
self.__update_keys = update_keys
self.__autoincrement = autoincrement
self.__convert_row = convert_row
self.__buffer = []
if update_keys is not None:
self.__prepare_bloom()
def write(self, rows, keyed=False):
"""Write rows/keyed_rows to table
"""
for row in rows:
keyed_row = row
if not keyed:
keyed_row = dict(zip(self.__schema.field_names, row))
keyed_row = self.__convert_row(keyed_row)
if self.__check_existing(keyed_row):
for wr in self.__insert():
yield wr
ret = self.__update(keyed_row)
if ret is not None:
yield WrittenRow(keyed_row, True, ret if self.__autoincrement else None)
continue
self.__buffer.append(keyed_row)
if len(self.__buffer) > BUFFER_SIZE:
for wr in self.__insert():
yield wr
for wr in self.__insert():
yield wr
# Private
def __prepare_bloom(self):
"""Prepare bloom for existing checks
"""
self.__bloom = pybloom_live.ScalableBloomFilter()
columns = [getattr(self.__table.c, key) for key in self.__update_keys]
keys = select(columns).execution_options(stream_results=True).execute()
for key in keys:
self.__bloom.add(tuple(key))
def __insert(self):
"""Insert rows to table
"""
if len(self.__buffer) > 0:
# Insert data
statement = self.__table.insert()
if self.__autoincrement:
statement = statement.returning(
getattr(self.__table.c, self.__autoincrement))
statement = statement.values(self.__buffer)
res = statement.execute()
for id, in res:
row = self.__buffer.pop(0)
yield WrittenRow(row, False, id)
else:
statement.execute(self.__buffer)
for row in self.__buffer:
yield WrittenRow(row, False, None)
# Clean memory
self.__buffer = []
def __check_existing(self, row):
"""Check if row exists in table
"""
if self.__update_keys is not None:
key = tuple(row[key] for key in self.__update_keys)
if key in self.__bloom:
return True
self.__bloom.add(key)
return False
return False
|
frictionlessdata/tableschema-sql-py | tableschema_sql/writer.py | Writer.__check_existing | python | def __check_existing(self, row):
if self.__update_keys is not None:
key = tuple(row[key] for key in self.__update_keys)
if key in self.__bloom:
return True
self.__bloom.add(key)
return False
return False | Check if row exists in table | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/writer.py#L103-L112 | null | class Writer(object):
# Public
def __init__(self, table, schema, update_keys, autoincrement, convert_row):
"""Writer to insert/update rows into table
"""
self.__table = table
self.__schema = schema
self.__update_keys = update_keys
self.__autoincrement = autoincrement
self.__convert_row = convert_row
self.__buffer = []
if update_keys is not None:
self.__prepare_bloom()
def write(self, rows, keyed=False):
"""Write rows/keyed_rows to table
"""
for row in rows:
keyed_row = row
if not keyed:
keyed_row = dict(zip(self.__schema.field_names, row))
keyed_row = self.__convert_row(keyed_row)
if self.__check_existing(keyed_row):
for wr in self.__insert():
yield wr
ret = self.__update(keyed_row)
if ret is not None:
yield WrittenRow(keyed_row, True, ret if self.__autoincrement else None)
continue
self.__buffer.append(keyed_row)
if len(self.__buffer) > BUFFER_SIZE:
for wr in self.__insert():
yield wr
for wr in self.__insert():
yield wr
# Private
def __prepare_bloom(self):
"""Prepare bloom for existing checks
"""
self.__bloom = pybloom_live.ScalableBloomFilter()
columns = [getattr(self.__table.c, key) for key in self.__update_keys]
keys = select(columns).execution_options(stream_results=True).execute()
for key in keys:
self.__bloom.add(tuple(key))
def __insert(self):
"""Insert rows to table
"""
if len(self.__buffer) > 0:
# Insert data
statement = self.__table.insert()
if self.__autoincrement:
statement = statement.returning(
getattr(self.__table.c, self.__autoincrement))
statement = statement.values(self.__buffer)
res = statement.execute()
for id, in res:
row = self.__buffer.pop(0)
yield WrittenRow(row, False, id)
else:
statement.execute(self.__buffer)
for row in self.__buffer:
yield WrittenRow(row, False, None)
# Clean memory
self.__buffer = []
def __update(self, row):
"""Update rows in table
"""
expr = self.__table.update().values(row)
for key in self.__update_keys:
expr = expr.where(getattr(self.__table.c, key) == row[key])
if self.__autoincrement:
expr = expr.returning(getattr(self.__table.c, self.__autoincrement))
res = expr.execute()
if res.rowcount > 0:
if self.__autoincrement:
first = next(iter(res))
last_row_id = first[0]
return last_row_id
return 0
return None
|
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.buckets | python | def buckets(self):
buckets = []
for table in self.__metadata.sorted_tables:
bucket = self.__mapper.restore_bucket(table.name)
if bucket is not None:
buckets.append(bucket)
return buckets | https://github.com/frictionlessdata/tableschema-sql-py#storage | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L57-L65 | null | class Storage(tableschema.Storage):
# Public
def __init__(self, engine, dbschema=None, prefix='', reflect_only=None, autoincrement=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set attributes
self.__connection = engine.connect()
self.__dbschema = dbschema
self.__prefix = prefix
self.__descriptors = {}
self.__fallbacks = {}
self.__autoincrement = autoincrement
self.__only = reflect_only or (lambda _: True)
# Create mapper
self.__mapper = Mapper(prefix=prefix, dialect=engine.dialect.name)
# Create metadata and reflect
self.__metadata = MetaData(bind=self.__connection, schema=self.__dbschema)
self.__reflect()
def __repr__(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Template and format
template = 'Storage <{engine}/{dbschema}>'
text = template.format(
engine=self.__connection.engine,
dbschema=self.__dbschema)
return text
@property
def create(self, bucket, descriptor, force=False, indexes_fields=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
if indexes_fields is None or len(indexes_fields) == 0:
indexes_fields = [()] * len(descriptors)
elif type(indexes_fields[0][0]) not in {list, tuple}:
indexes_fields = [indexes_fields]
# Check dimensions
if not (len(buckets) == len(descriptors) == len(indexes_fields)):
raise tableschema.exceptions.StorageError('Wrong argument dimensions')
# Check buckets for existence
for bucket in reversed(self.buckets):
if bucket in buckets:
if not force:
message = 'Bucket "%s" already exists.' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Define buckets
for bucket, descriptor, index_fields in zip(buckets, descriptors, indexes_fields):
tableschema.validate(descriptor)
table_name = self.__mapper.convert_bucket(bucket)
columns, constraints, indexes, fallbacks, table_comment = self.__mapper \
.convert_descriptor(bucket, descriptor, index_fields, self.__autoincrement)
Table(table_name, self.__metadata, *(columns + constraints + indexes),
comment=table_comment)
self.__descriptors[bucket] = descriptor
self.__fallbacks[bucket] = fallbacks
# Create tables, update metadata
self.__metadata.create_all()
def delete(self, bucket=None, ignore=False):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
elif bucket is None:
buckets = reversed(self.buckets)
# Iterate
tables = []
for bucket in buckets:
# Check existent
if bucket not in self.buckets:
if not ignore:
message = 'Bucket "%s" doesn\'t exist.' % bucket
raise tableschema.exceptions.StorageError(message)
return
# Remove from buckets
if bucket in self.__descriptors:
del self.__descriptors[bucket]
# Add table to tables
table = self.__get_table(bucket)
tables.append(table)
# Drop tables, update metadata
self.__metadata.drop_all(tables=tables)
self.__metadata.clear()
self.__reflect()
def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table = self.__get_table(bucket)
descriptor = self.__mapper.restore_descriptor(
table.name, table.columns, table.constraints, self.__autoincrement)
return descriptor
def iter(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Get table and fallbacks
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
# Open and close transaction
with self.__connection.begin():
# Streaming could be not working for some backends:
# http://docs.sqlalchemy.org/en/latest/core/connections.html
select = table.select().execution_options(stream_results=True)
result = select.execute()
for row in result:
row = self.__mapper.restore_row(row, schema=schema)
yield row
def read(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
rows = list(self.iter(bucket))
return rows
def write(self, bucket, rows, keyed=False, as_generator=False, update_keys=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Check update keys
if update_keys is not None and len(update_keys) == 0:
message = 'Argument "update_keys" cannot be an empty list'
raise tableschema.exceptions.StorageError(message)
# Get table and description
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
fallbacks = self.__fallbacks.get(bucket, [])
# Write rows to table
convert_row = partial(self.__mapper.convert_row, schema=schema, fallbacks=fallbacks)
writer = Writer(table, schema, update_keys, self.__autoincrement, convert_row)
with self.__connection.begin():
gen = writer.write(rows, keyed=keyed)
if as_generator:
return gen
collections.deque(gen, maxlen=0)
# Private
def __get_table(self, bucket):
"""Get table by bucket
"""
table_name = self.__mapper.convert_bucket(bucket)
if self.__dbschema:
table_name = '.'.join((self.__dbschema, table_name))
return self.__metadata.tables[table_name]
def __reflect(self):
"""Reflect metadata
"""
def only(name, _):
return self.__only(name) and self.__mapper.restore_bucket(name) is not None
self.__metadata.reflect(only=only)
|
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.create | python | def create(self, bucket, descriptor, force=False, indexes_fields=None):
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
if indexes_fields is None or len(indexes_fields) == 0:
indexes_fields = [()] * len(descriptors)
elif type(indexes_fields[0][0]) not in {list, tuple}:
indexes_fields = [indexes_fields]
# Check dimensions
if not (len(buckets) == len(descriptors) == len(indexes_fields)):
raise tableschema.exceptions.StorageError('Wrong argument dimensions')
# Check buckets for existence
for bucket in reversed(self.buckets):
if bucket in buckets:
if not force:
message = 'Bucket "%s" already exists.' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Define buckets
for bucket, descriptor, index_fields in zip(buckets, descriptors, indexes_fields):
tableschema.validate(descriptor)
table_name = self.__mapper.convert_bucket(bucket)
columns, constraints, indexes, fallbacks, table_comment = self.__mapper \
.convert_descriptor(bucket, descriptor, index_fields, self.__autoincrement)
Table(table_name, self.__metadata, *(columns + constraints + indexes),
comment=table_comment)
self.__descriptors[bucket] = descriptor
self.__fallbacks[bucket] = fallbacks
# Create tables, update metadata
self.__metadata.create_all() | https://github.com/frictionlessdata/tableschema-sql-py#storage | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L67-L107 | null | class Storage(tableschema.Storage):
# Public
def __init__(self, engine, dbschema=None, prefix='', reflect_only=None, autoincrement=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set attributes
self.__connection = engine.connect()
self.__dbschema = dbschema
self.__prefix = prefix
self.__descriptors = {}
self.__fallbacks = {}
self.__autoincrement = autoincrement
self.__only = reflect_only or (lambda _: True)
# Create mapper
self.__mapper = Mapper(prefix=prefix, dialect=engine.dialect.name)
# Create metadata and reflect
self.__metadata = MetaData(bind=self.__connection, schema=self.__dbschema)
self.__reflect()
def __repr__(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Template and format
template = 'Storage <{engine}/{dbschema}>'
text = template.format(
engine=self.__connection.engine,
dbschema=self.__dbschema)
return text
@property
def buckets(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
buckets = []
for table in self.__metadata.sorted_tables:
bucket = self.__mapper.restore_bucket(table.name)
if bucket is not None:
buckets.append(bucket)
return buckets
def delete(self, bucket=None, ignore=False):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
elif bucket is None:
buckets = reversed(self.buckets)
# Iterate
tables = []
for bucket in buckets:
# Check existent
if bucket not in self.buckets:
if not ignore:
message = 'Bucket "%s" doesn\'t exist.' % bucket
raise tableschema.exceptions.StorageError(message)
return
# Remove from buckets
if bucket in self.__descriptors:
del self.__descriptors[bucket]
# Add table to tables
table = self.__get_table(bucket)
tables.append(table)
# Drop tables, update metadata
self.__metadata.drop_all(tables=tables)
self.__metadata.clear()
self.__reflect()
def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table = self.__get_table(bucket)
descriptor = self.__mapper.restore_descriptor(
table.name, table.columns, table.constraints, self.__autoincrement)
return descriptor
def iter(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Get table and fallbacks
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
# Open and close transaction
with self.__connection.begin():
# Streaming could be not working for some backends:
# http://docs.sqlalchemy.org/en/latest/core/connections.html
select = table.select().execution_options(stream_results=True)
result = select.execute()
for row in result:
row = self.__mapper.restore_row(row, schema=schema)
yield row
def read(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
rows = list(self.iter(bucket))
return rows
def write(self, bucket, rows, keyed=False, as_generator=False, update_keys=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Check update keys
if update_keys is not None and len(update_keys) == 0:
message = 'Argument "update_keys" cannot be an empty list'
raise tableschema.exceptions.StorageError(message)
# Get table and description
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
fallbacks = self.__fallbacks.get(bucket, [])
# Write rows to table
convert_row = partial(self.__mapper.convert_row, schema=schema, fallbacks=fallbacks)
writer = Writer(table, schema, update_keys, self.__autoincrement, convert_row)
with self.__connection.begin():
gen = writer.write(rows, keyed=keyed)
if as_generator:
return gen
collections.deque(gen, maxlen=0)
# Private
def __get_table(self, bucket):
"""Get table by bucket
"""
table_name = self.__mapper.convert_bucket(bucket)
if self.__dbschema:
table_name = '.'.join((self.__dbschema, table_name))
return self.__metadata.tables[table_name]
def __reflect(self):
"""Reflect metadata
"""
def only(name, _):
return self.__only(name) and self.__mapper.restore_bucket(name) is not None
self.__metadata.reflect(only=only)
|
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.delete | python | def delete(self, bucket=None, ignore=False):
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
elif bucket is None:
buckets = reversed(self.buckets)
# Iterate
tables = []
for bucket in buckets:
# Check existent
if bucket not in self.buckets:
if not ignore:
message = 'Bucket "%s" doesn\'t exist.' % bucket
raise tableschema.exceptions.StorageError(message)
return
# Remove from buckets
if bucket in self.__descriptors:
del self.__descriptors[bucket]
# Add table to tables
table = self.__get_table(bucket)
tables.append(table)
# Drop tables, update metadata
self.__metadata.drop_all(tables=tables)
self.__metadata.clear()
self.__reflect() | https://github.com/frictionlessdata/tableschema-sql-py#storage | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L109-L142 | null | class Storage(tableschema.Storage):
# Public
def __init__(self, engine, dbschema=None, prefix='', reflect_only=None, autoincrement=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set attributes
self.__connection = engine.connect()
self.__dbschema = dbschema
self.__prefix = prefix
self.__descriptors = {}
self.__fallbacks = {}
self.__autoincrement = autoincrement
self.__only = reflect_only or (lambda _: True)
# Create mapper
self.__mapper = Mapper(prefix=prefix, dialect=engine.dialect.name)
# Create metadata and reflect
self.__metadata = MetaData(bind=self.__connection, schema=self.__dbschema)
self.__reflect()
def __repr__(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Template and format
template = 'Storage <{engine}/{dbschema}>'
text = template.format(
engine=self.__connection.engine,
dbschema=self.__dbschema)
return text
@property
def buckets(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
buckets = []
for table in self.__metadata.sorted_tables:
bucket = self.__mapper.restore_bucket(table.name)
if bucket is not None:
buckets.append(bucket)
return buckets
def create(self, bucket, descriptor, force=False, indexes_fields=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
if indexes_fields is None or len(indexes_fields) == 0:
indexes_fields = [()] * len(descriptors)
elif type(indexes_fields[0][0]) not in {list, tuple}:
indexes_fields = [indexes_fields]
# Check dimensions
if not (len(buckets) == len(descriptors) == len(indexes_fields)):
raise tableschema.exceptions.StorageError('Wrong argument dimensions')
# Check buckets for existence
for bucket in reversed(self.buckets):
if bucket in buckets:
if not force:
message = 'Bucket "%s" already exists.' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Define buckets
for bucket, descriptor, index_fields in zip(buckets, descriptors, indexes_fields):
tableschema.validate(descriptor)
table_name = self.__mapper.convert_bucket(bucket)
columns, constraints, indexes, fallbacks, table_comment = self.__mapper \
.convert_descriptor(bucket, descriptor, index_fields, self.__autoincrement)
Table(table_name, self.__metadata, *(columns + constraints + indexes),
comment=table_comment)
self.__descriptors[bucket] = descriptor
self.__fallbacks[bucket] = fallbacks
# Create tables, update metadata
self.__metadata.create_all()
def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table = self.__get_table(bucket)
descriptor = self.__mapper.restore_descriptor(
table.name, table.columns, table.constraints, self.__autoincrement)
return descriptor
def iter(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Get table and fallbacks
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
# Open and close transaction
with self.__connection.begin():
# Streaming could be not working for some backends:
# http://docs.sqlalchemy.org/en/latest/core/connections.html
select = table.select().execution_options(stream_results=True)
result = select.execute()
for row in result:
row = self.__mapper.restore_row(row, schema=schema)
yield row
def read(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
rows = list(self.iter(bucket))
return rows
def write(self, bucket, rows, keyed=False, as_generator=False, update_keys=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Check update keys
if update_keys is not None and len(update_keys) == 0:
message = 'Argument "update_keys" cannot be an empty list'
raise tableschema.exceptions.StorageError(message)
# Get table and description
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
fallbacks = self.__fallbacks.get(bucket, [])
# Write rows to table
convert_row = partial(self.__mapper.convert_row, schema=schema, fallbacks=fallbacks)
writer = Writer(table, schema, update_keys, self.__autoincrement, convert_row)
with self.__connection.begin():
gen = writer.write(rows, keyed=keyed)
if as_generator:
return gen
collections.deque(gen, maxlen=0)
# Private
def __get_table(self, bucket):
"""Get table by bucket
"""
table_name = self.__mapper.convert_bucket(bucket)
if self.__dbschema:
table_name = '.'.join((self.__dbschema, table_name))
return self.__metadata.tables[table_name]
def __reflect(self):
"""Reflect metadata
"""
def only(name, _):
return self.__only(name) and self.__mapper.restore_bucket(name) is not None
self.__metadata.reflect(only=only)
|
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.describe | python | def describe(self, bucket, descriptor=None):
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table = self.__get_table(bucket)
descriptor = self.__mapper.restore_descriptor(
table.name, table.columns, table.constraints, self.__autoincrement)
return descriptor | https://github.com/frictionlessdata/tableschema-sql-py#storage | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L144-L160 | null | class Storage(tableschema.Storage):
# Public
def __init__(self, engine, dbschema=None, prefix='', reflect_only=None, autoincrement=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set attributes
self.__connection = engine.connect()
self.__dbschema = dbschema
self.__prefix = prefix
self.__descriptors = {}
self.__fallbacks = {}
self.__autoincrement = autoincrement
self.__only = reflect_only or (lambda _: True)
# Create mapper
self.__mapper = Mapper(prefix=prefix, dialect=engine.dialect.name)
# Create metadata and reflect
self.__metadata = MetaData(bind=self.__connection, schema=self.__dbschema)
self.__reflect()
def __repr__(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Template and format
template = 'Storage <{engine}/{dbschema}>'
text = template.format(
engine=self.__connection.engine,
dbschema=self.__dbschema)
return text
@property
def buckets(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
buckets = []
for table in self.__metadata.sorted_tables:
bucket = self.__mapper.restore_bucket(table.name)
if bucket is not None:
buckets.append(bucket)
return buckets
def create(self, bucket, descriptor, force=False, indexes_fields=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
if indexes_fields is None or len(indexes_fields) == 0:
indexes_fields = [()] * len(descriptors)
elif type(indexes_fields[0][0]) not in {list, tuple}:
indexes_fields = [indexes_fields]
# Check dimensions
if not (len(buckets) == len(descriptors) == len(indexes_fields)):
raise tableschema.exceptions.StorageError('Wrong argument dimensions')
# Check buckets for existence
for bucket in reversed(self.buckets):
if bucket in buckets:
if not force:
message = 'Bucket "%s" already exists.' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Define buckets
for bucket, descriptor, index_fields in zip(buckets, descriptors, indexes_fields):
tableschema.validate(descriptor)
table_name = self.__mapper.convert_bucket(bucket)
columns, constraints, indexes, fallbacks, table_comment = self.__mapper \
.convert_descriptor(bucket, descriptor, index_fields, self.__autoincrement)
Table(table_name, self.__metadata, *(columns + constraints + indexes),
comment=table_comment)
self.__descriptors[bucket] = descriptor
self.__fallbacks[bucket] = fallbacks
# Create tables, update metadata
self.__metadata.create_all()
def delete(self, bucket=None, ignore=False):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
elif bucket is None:
buckets = reversed(self.buckets)
# Iterate
tables = []
for bucket in buckets:
# Check existent
if bucket not in self.buckets:
if not ignore:
message = 'Bucket "%s" doesn\'t exist.' % bucket
raise tableschema.exceptions.StorageError(message)
return
# Remove from buckets
if bucket in self.__descriptors:
del self.__descriptors[bucket]
# Add table to tables
table = self.__get_table(bucket)
tables.append(table)
# Drop tables, update metadata
self.__metadata.drop_all(tables=tables)
self.__metadata.clear()
self.__reflect()
def iter(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Get table and fallbacks
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
# Open and close transaction
with self.__connection.begin():
# Streaming could be not working for some backends:
# http://docs.sqlalchemy.org/en/latest/core/connections.html
select = table.select().execution_options(stream_results=True)
result = select.execute()
for row in result:
row = self.__mapper.restore_row(row, schema=schema)
yield row
def read(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
rows = list(self.iter(bucket))
return rows
def write(self, bucket, rows, keyed=False, as_generator=False, update_keys=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Check update keys
if update_keys is not None and len(update_keys) == 0:
message = 'Argument "update_keys" cannot be an empty list'
raise tableschema.exceptions.StorageError(message)
# Get table and description
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
fallbacks = self.__fallbacks.get(bucket, [])
# Write rows to table
convert_row = partial(self.__mapper.convert_row, schema=schema, fallbacks=fallbacks)
writer = Writer(table, schema, update_keys, self.__autoincrement, convert_row)
with self.__connection.begin():
gen = writer.write(rows, keyed=keyed)
if as_generator:
return gen
collections.deque(gen, maxlen=0)
# Private
def __get_table(self, bucket):
"""Get table by bucket
"""
table_name = self.__mapper.convert_bucket(bucket)
if self.__dbschema:
table_name = '.'.join((self.__dbschema, table_name))
return self.__metadata.tables[table_name]
def __reflect(self):
"""Reflect metadata
"""
def only(name, _):
return self.__only(name) and self.__mapper.restore_bucket(name) is not None
self.__metadata.reflect(only=only)
|
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.iter | python | def iter(self, bucket):
# Get table and fallbacks
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
# Open and close transaction
with self.__connection.begin():
# Streaming could be not working for some backends:
# http://docs.sqlalchemy.org/en/latest/core/connections.html
select = table.select().execution_options(stream_results=True)
result = select.execute()
for row in result:
row = self.__mapper.restore_row(row, schema=schema)
yield row | https://github.com/frictionlessdata/tableschema-sql-py#storage | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L162-L178 | null | class Storage(tableschema.Storage):
# Public
def __init__(self, engine, dbschema=None, prefix='', reflect_only=None, autoincrement=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set attributes
self.__connection = engine.connect()
self.__dbschema = dbschema
self.__prefix = prefix
self.__descriptors = {}
self.__fallbacks = {}
self.__autoincrement = autoincrement
self.__only = reflect_only or (lambda _: True)
# Create mapper
self.__mapper = Mapper(prefix=prefix, dialect=engine.dialect.name)
# Create metadata and reflect
self.__metadata = MetaData(bind=self.__connection, schema=self.__dbschema)
self.__reflect()
def __repr__(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Template and format
template = 'Storage <{engine}/{dbschema}>'
text = template.format(
engine=self.__connection.engine,
dbschema=self.__dbschema)
return text
@property
def buckets(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
buckets = []
for table in self.__metadata.sorted_tables:
bucket = self.__mapper.restore_bucket(table.name)
if bucket is not None:
buckets.append(bucket)
return buckets
def create(self, bucket, descriptor, force=False, indexes_fields=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
if indexes_fields is None or len(indexes_fields) == 0:
indexes_fields = [()] * len(descriptors)
elif type(indexes_fields[0][0]) not in {list, tuple}:
indexes_fields = [indexes_fields]
# Check dimensions
if not (len(buckets) == len(descriptors) == len(indexes_fields)):
raise tableschema.exceptions.StorageError('Wrong argument dimensions')
# Check buckets for existence
for bucket in reversed(self.buckets):
if bucket in buckets:
if not force:
message = 'Bucket "%s" already exists.' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Define buckets
for bucket, descriptor, index_fields in zip(buckets, descriptors, indexes_fields):
tableschema.validate(descriptor)
table_name = self.__mapper.convert_bucket(bucket)
columns, constraints, indexes, fallbacks, table_comment = self.__mapper \
.convert_descriptor(bucket, descriptor, index_fields, self.__autoincrement)
Table(table_name, self.__metadata, *(columns + constraints + indexes),
comment=table_comment)
self.__descriptors[bucket] = descriptor
self.__fallbacks[bucket] = fallbacks
# Create tables, update metadata
self.__metadata.create_all()
def delete(self, bucket=None, ignore=False):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
elif bucket is None:
buckets = reversed(self.buckets)
# Iterate
tables = []
for bucket in buckets:
# Check existent
if bucket not in self.buckets:
if not ignore:
message = 'Bucket "%s" doesn\'t exist.' % bucket
raise tableschema.exceptions.StorageError(message)
return
# Remove from buckets
if bucket in self.__descriptors:
del self.__descriptors[bucket]
# Add table to tables
table = self.__get_table(bucket)
tables.append(table)
# Drop tables, update metadata
self.__metadata.drop_all(tables=tables)
self.__metadata.clear()
self.__reflect()
def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table = self.__get_table(bucket)
descriptor = self.__mapper.restore_descriptor(
table.name, table.columns, table.constraints, self.__autoincrement)
return descriptor
def read(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
rows = list(self.iter(bucket))
return rows
def write(self, bucket, rows, keyed=False, as_generator=False, update_keys=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Check update keys
if update_keys is not None and len(update_keys) == 0:
message = 'Argument "update_keys" cannot be an empty list'
raise tableschema.exceptions.StorageError(message)
# Get table and description
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
fallbacks = self.__fallbacks.get(bucket, [])
# Write rows to table
convert_row = partial(self.__mapper.convert_row, schema=schema, fallbacks=fallbacks)
writer = Writer(table, schema, update_keys, self.__autoincrement, convert_row)
with self.__connection.begin():
gen = writer.write(rows, keyed=keyed)
if as_generator:
return gen
collections.deque(gen, maxlen=0)
# Private
def __get_table(self, bucket):
"""Get table by bucket
"""
table_name = self.__mapper.convert_bucket(bucket)
if self.__dbschema:
table_name = '.'.join((self.__dbschema, table_name))
return self.__metadata.tables[table_name]
def __reflect(self):
"""Reflect metadata
"""
def only(name, _):
return self.__only(name) and self.__mapper.restore_bucket(name) is not None
self.__metadata.reflect(only=only)
|
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.write | python | def write(self, bucket, rows, keyed=False, as_generator=False, update_keys=None):
# Check update keys
if update_keys is not None and len(update_keys) == 0:
message = 'Argument "update_keys" cannot be an empty list'
raise tableschema.exceptions.StorageError(message)
# Get table and description
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
fallbacks = self.__fallbacks.get(bucket, [])
# Write rows to table
convert_row = partial(self.__mapper.convert_row, schema=schema, fallbacks=fallbacks)
writer = Writer(table, schema, update_keys, self.__autoincrement, convert_row)
with self.__connection.begin():
gen = writer.write(rows, keyed=keyed)
if as_generator:
return gen
collections.deque(gen, maxlen=0) | https://github.com/frictionlessdata/tableschema-sql-py#storage | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L186-L207 | [
"def write(self, rows, keyed=False):\n \"\"\"Write rows/keyed_rows to table\n \"\"\"\n for row in rows:\n keyed_row = row\n if not keyed:\n keyed_row = dict(zip(self.__schema.field_names, row))\n keyed_row = self.__convert_row(keyed_row)\n if self.__check_existing(keyed_row):\n for wr in self.__insert():\n yield wr\n ret = self.__update(keyed_row)\n if ret is not None:\n yield WrittenRow(keyed_row, True, ret if self.__autoincrement else None)\n continue\n self.__buffer.append(keyed_row)\n if len(self.__buffer) > BUFFER_SIZE:\n for wr in self.__insert():\n yield wr\n for wr in self.__insert():\n yield wr\n"
] | class Storage(tableschema.Storage):
# Public
def __init__(self, engine, dbschema=None, prefix='', reflect_only=None, autoincrement=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set attributes
self.__connection = engine.connect()
self.__dbschema = dbschema
self.__prefix = prefix
self.__descriptors = {}
self.__fallbacks = {}
self.__autoincrement = autoincrement
self.__only = reflect_only or (lambda _: True)
# Create mapper
self.__mapper = Mapper(prefix=prefix, dialect=engine.dialect.name)
# Create metadata and reflect
self.__metadata = MetaData(bind=self.__connection, schema=self.__dbschema)
self.__reflect()
def __repr__(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Template and format
template = 'Storage <{engine}/{dbschema}>'
text = template.format(
engine=self.__connection.engine,
dbschema=self.__dbschema)
return text
@property
def buckets(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
buckets = []
for table in self.__metadata.sorted_tables:
bucket = self.__mapper.restore_bucket(table.name)
if bucket is not None:
buckets.append(bucket)
return buckets
def create(self, bucket, descriptor, force=False, indexes_fields=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
if indexes_fields is None or len(indexes_fields) == 0:
indexes_fields = [()] * len(descriptors)
elif type(indexes_fields[0][0]) not in {list, tuple}:
indexes_fields = [indexes_fields]
# Check dimensions
if not (len(buckets) == len(descriptors) == len(indexes_fields)):
raise tableschema.exceptions.StorageError('Wrong argument dimensions')
# Check buckets for existence
for bucket in reversed(self.buckets):
if bucket in buckets:
if not force:
message = 'Bucket "%s" already exists.' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Define buckets
for bucket, descriptor, index_fields in zip(buckets, descriptors, indexes_fields):
tableschema.validate(descriptor)
table_name = self.__mapper.convert_bucket(bucket)
columns, constraints, indexes, fallbacks, table_comment = self.__mapper \
.convert_descriptor(bucket, descriptor, index_fields, self.__autoincrement)
Table(table_name, self.__metadata, *(columns + constraints + indexes),
comment=table_comment)
self.__descriptors[bucket] = descriptor
self.__fallbacks[bucket] = fallbacks
# Create tables, update metadata
self.__metadata.create_all()
def delete(self, bucket=None, ignore=False):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
elif bucket is None:
buckets = reversed(self.buckets)
# Iterate
tables = []
for bucket in buckets:
# Check existent
if bucket not in self.buckets:
if not ignore:
message = 'Bucket "%s" doesn\'t exist.' % bucket
raise tableschema.exceptions.StorageError(message)
return
# Remove from buckets
if bucket in self.__descriptors:
del self.__descriptors[bucket]
# Add table to tables
table = self.__get_table(bucket)
tables.append(table)
# Drop tables, update metadata
self.__metadata.drop_all(tables=tables)
self.__metadata.clear()
self.__reflect()
def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table = self.__get_table(bucket)
descriptor = self.__mapper.restore_descriptor(
table.name, table.columns, table.constraints, self.__autoincrement)
return descriptor
def iter(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Get table and fallbacks
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
# Open and close transaction
with self.__connection.begin():
# Streaming could be not working for some backends:
# http://docs.sqlalchemy.org/en/latest/core/connections.html
select = table.select().execution_options(stream_results=True)
result = select.execute()
for row in result:
row = self.__mapper.restore_row(row, schema=schema)
yield row
def read(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
rows = list(self.iter(bucket))
return rows
# Private
def __get_table(self, bucket):
"""Get table by bucket
"""
table_name = self.__mapper.convert_bucket(bucket)
if self.__dbschema:
table_name = '.'.join((self.__dbschema, table_name))
return self.__metadata.tables[table_name]
def __reflect(self):
"""Reflect metadata
"""
def only(name, _):
return self.__only(name) and self.__mapper.restore_bucket(name) is not None
self.__metadata.reflect(only=only)
|
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.__get_table | python | def __get_table(self, bucket):
table_name = self.__mapper.convert_bucket(bucket)
if self.__dbschema:
table_name = '.'.join((self.__dbschema, table_name))
return self.__metadata.tables[table_name] | Get table by bucket | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L211-L217 | null | class Storage(tableschema.Storage):
# Public
def __init__(self, engine, dbschema=None, prefix='', reflect_only=None, autoincrement=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set attributes
self.__connection = engine.connect()
self.__dbschema = dbschema
self.__prefix = prefix
self.__descriptors = {}
self.__fallbacks = {}
self.__autoincrement = autoincrement
self.__only = reflect_only or (lambda _: True)
# Create mapper
self.__mapper = Mapper(prefix=prefix, dialect=engine.dialect.name)
# Create metadata and reflect
self.__metadata = MetaData(bind=self.__connection, schema=self.__dbschema)
self.__reflect()
def __repr__(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Template and format
template = 'Storage <{engine}/{dbschema}>'
text = template.format(
engine=self.__connection.engine,
dbschema=self.__dbschema)
return text
@property
def buckets(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
buckets = []
for table in self.__metadata.sorted_tables:
bucket = self.__mapper.restore_bucket(table.name)
if bucket is not None:
buckets.append(bucket)
return buckets
def create(self, bucket, descriptor, force=False, indexes_fields=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
if indexes_fields is None or len(indexes_fields) == 0:
indexes_fields = [()] * len(descriptors)
elif type(indexes_fields[0][0]) not in {list, tuple}:
indexes_fields = [indexes_fields]
# Check dimensions
if not (len(buckets) == len(descriptors) == len(indexes_fields)):
raise tableschema.exceptions.StorageError('Wrong argument dimensions')
# Check buckets for existence
for bucket in reversed(self.buckets):
if bucket in buckets:
if not force:
message = 'Bucket "%s" already exists.' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Define buckets
for bucket, descriptor, index_fields in zip(buckets, descriptors, indexes_fields):
tableschema.validate(descriptor)
table_name = self.__mapper.convert_bucket(bucket)
columns, constraints, indexes, fallbacks, table_comment = self.__mapper \
.convert_descriptor(bucket, descriptor, index_fields, self.__autoincrement)
Table(table_name, self.__metadata, *(columns + constraints + indexes),
comment=table_comment)
self.__descriptors[bucket] = descriptor
self.__fallbacks[bucket] = fallbacks
# Create tables, update metadata
self.__metadata.create_all()
def delete(self, bucket=None, ignore=False):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
elif bucket is None:
buckets = reversed(self.buckets)
# Iterate
tables = []
for bucket in buckets:
# Check existent
if bucket not in self.buckets:
if not ignore:
message = 'Bucket "%s" doesn\'t exist.' % bucket
raise tableschema.exceptions.StorageError(message)
return
# Remove from buckets
if bucket in self.__descriptors:
del self.__descriptors[bucket]
# Add table to tables
table = self.__get_table(bucket)
tables.append(table)
# Drop tables, update metadata
self.__metadata.drop_all(tables=tables)
self.__metadata.clear()
self.__reflect()
def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table = self.__get_table(bucket)
descriptor = self.__mapper.restore_descriptor(
table.name, table.columns, table.constraints, self.__autoincrement)
return descriptor
def iter(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Get table and fallbacks
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
# Open and close transaction
with self.__connection.begin():
# Streaming could be not working for some backends:
# http://docs.sqlalchemy.org/en/latest/core/connections.html
select = table.select().execution_options(stream_results=True)
result = select.execute()
for row in result:
row = self.__mapper.restore_row(row, schema=schema)
yield row
def read(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
rows = list(self.iter(bucket))
return rows
def write(self, bucket, rows, keyed=False, as_generator=False, update_keys=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Check update keys
if update_keys is not None and len(update_keys) == 0:
message = 'Argument "update_keys" cannot be an empty list'
raise tableschema.exceptions.StorageError(message)
# Get table and description
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
fallbacks = self.__fallbacks.get(bucket, [])
# Write rows to table
convert_row = partial(self.__mapper.convert_row, schema=schema, fallbacks=fallbacks)
writer = Writer(table, schema, update_keys, self.__autoincrement, convert_row)
with self.__connection.begin():
gen = writer.write(rows, keyed=keyed)
if as_generator:
return gen
collections.deque(gen, maxlen=0)
# Private
def __reflect(self):
"""Reflect metadata
"""
def only(name, _):
return self.__only(name) and self.__mapper.restore_bucket(name) is not None
self.__metadata.reflect(only=only)
|
frictionlessdata/tableschema-sql-py | tableschema_sql/storage.py | Storage.__reflect | python | def __reflect(self):
def only(name, _):
return self.__only(name) and self.__mapper.restore_bucket(name) is not None
self.__metadata.reflect(only=only) | Reflect metadata | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L219-L226 | null | class Storage(tableschema.Storage):
# Public
def __init__(self, engine, dbschema=None, prefix='', reflect_only=None, autoincrement=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set attributes
self.__connection = engine.connect()
self.__dbschema = dbschema
self.__prefix = prefix
self.__descriptors = {}
self.__fallbacks = {}
self.__autoincrement = autoincrement
self.__only = reflect_only or (lambda _: True)
# Create mapper
self.__mapper = Mapper(prefix=prefix, dialect=engine.dialect.name)
# Create metadata and reflect
self.__metadata = MetaData(bind=self.__connection, schema=self.__dbschema)
self.__reflect()
def __repr__(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Template and format
template = 'Storage <{engine}/{dbschema}>'
text = template.format(
engine=self.__connection.engine,
dbschema=self.__dbschema)
return text
@property
def buckets(self):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
buckets = []
for table in self.__metadata.sorted_tables:
bucket = self.__mapper.restore_bucket(table.name)
if bucket is not None:
buckets.append(bucket)
return buckets
def create(self, bucket, descriptor, force=False, indexes_fields=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
if indexes_fields is None or len(indexes_fields) == 0:
indexes_fields = [()] * len(descriptors)
elif type(indexes_fields[0][0]) not in {list, tuple}:
indexes_fields = [indexes_fields]
# Check dimensions
if not (len(buckets) == len(descriptors) == len(indexes_fields)):
raise tableschema.exceptions.StorageError('Wrong argument dimensions')
# Check buckets for existence
for bucket in reversed(self.buckets):
if bucket in buckets:
if not force:
message = 'Bucket "%s" already exists.' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Define buckets
for bucket, descriptor, index_fields in zip(buckets, descriptors, indexes_fields):
tableschema.validate(descriptor)
table_name = self.__mapper.convert_bucket(bucket)
columns, constraints, indexes, fallbacks, table_comment = self.__mapper \
.convert_descriptor(bucket, descriptor, index_fields, self.__autoincrement)
Table(table_name, self.__metadata, *(columns + constraints + indexes),
comment=table_comment)
self.__descriptors[bucket] = descriptor
self.__fallbacks[bucket] = fallbacks
# Create tables, update metadata
self.__metadata.create_all()
def delete(self, bucket=None, ignore=False):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
elif bucket is None:
buckets = reversed(self.buckets)
# Iterate
tables = []
for bucket in buckets:
# Check existent
if bucket not in self.buckets:
if not ignore:
message = 'Bucket "%s" doesn\'t exist.' % bucket
raise tableschema.exceptions.StorageError(message)
return
# Remove from buckets
if bucket in self.__descriptors:
del self.__descriptors[bucket]
# Add table to tables
table = self.__get_table(bucket)
tables.append(table)
# Drop tables, update metadata
self.__metadata.drop_all(tables=tables)
self.__metadata.clear()
self.__reflect()
def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table = self.__get_table(bucket)
descriptor = self.__mapper.restore_descriptor(
table.name, table.columns, table.constraints, self.__autoincrement)
return descriptor
def iter(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Get table and fallbacks
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
# Open and close transaction
with self.__connection.begin():
# Streaming could be not working for some backends:
# http://docs.sqlalchemy.org/en/latest/core/connections.html
select = table.select().execution_options(stream_results=True)
result = select.execute()
for row in result:
row = self.__mapper.restore_row(row, schema=schema)
yield row
def read(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
rows = list(self.iter(bucket))
return rows
def write(self, bucket, rows, keyed=False, as_generator=False, update_keys=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Check update keys
if update_keys is not None and len(update_keys) == 0:
message = 'Argument "update_keys" cannot be an empty list'
raise tableschema.exceptions.StorageError(message)
# Get table and description
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
fallbacks = self.__fallbacks.get(bucket, [])
# Write rows to table
convert_row = partial(self.__mapper.convert_row, schema=schema, fallbacks=fallbacks)
writer = Writer(table, schema, update_keys, self.__autoincrement, convert_row)
with self.__connection.begin():
gen = writer.write(rows, keyed=keyed)
if as_generator:
return gen
collections.deque(gen, maxlen=0)
# Private
def __get_table(self, bucket):
"""Get table by bucket
"""
table_name = self.__mapper.convert_bucket(bucket)
if self.__dbschema:
table_name = '.'.join((self.__dbschema, table_name))
return self.__metadata.tables[table_name]
|
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | _get_field_comment | python | def _get_field_comment(field, separator=' - '):
title = field.descriptor.get('title') or ''
description = field.descriptor.get('description') or ''
return _get_comment(description, title, separator) | Create SQL comment from field's title and description
:param field: tableschema-py Field, with optional 'title' and 'description' values
:param separator:
:return:
>>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': 'my_desc'}))
'my_title - my_desc'
>>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': None}))
'my_title'
>>> _get_field_comment(tableschema.Field({'title': '', 'description': 'my_description'}))
'my_description'
>>> _get_field_comment(tableschema.Field({}))
'' | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L281-L300 | [
"def _get_comment(description, title, separator=' - '):\n if title == '':\n return description\n if description == '':\n return title\n return title + separator + description\n"
] | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import six
import sqlalchemy as sa
import tableschema
from sqlalchemy.dialects.postgresql import ARRAY, JSON, JSONB, UUID
# Module API
class Mapper(object):
# Public
def __init__(self, prefix, dialect='sqlite'):
"""Mapper to convert/restore FD entities to/from SQL entities
"""
self.__prefix = prefix
self.__dialect = dialect
def convert_bucket(self, bucket):
"""Convert bucket to SQL
"""
return self.__prefix + bucket
def convert_descriptor(self, bucket, descriptor, index_fields=[], autoincrement=None):
"""Convert descriptor to SQL
"""
# Prepare
columns = []
indexes = []
fallbacks = []
constraints = []
column_mapping = {}
table_name = self.convert_bucket(bucket)
table_comment = _get_comment(descriptor.get('title', ''), descriptor.get('description', ''))
schema = tableschema.Schema(descriptor)
# Autoincrement
if autoincrement is not None:
columns.append(sa.Column(
autoincrement, sa.Integer, autoincrement=True, nullable=False))
# Fields
for field in schema.fields:
column_type = self.convert_type(field.type)
if not column_type:
column_type = sa.Text
fallbacks.append(field.name)
nullable = not field.required
table_comment = _get_field_comment(field)
unique = field.constraints.get('unique', False)
column = sa.Column(field.name, column_type, nullable=nullable, comment=table_comment,
unique=unique)
columns.append(column)
column_mapping[field.name] = column
# Primary key
pk = descriptor.get('primaryKey', None)
if pk is not None:
if isinstance(pk, six.string_types):
pk = [pk]
if autoincrement is not None:
if pk is not None:
pk = [autoincrement] + pk
else:
pk = [autoincrement]
if pk is not None:
constraint = sa.PrimaryKeyConstraint(*pk)
constraints.append(constraint)
# Foreign keys
if self.__dialect == 'postgresql':
fks = descriptor.get('foreignKeys', [])
for fk in fks:
fields = fk['fields']
resource = fk['reference']['resource']
foreign_fields = fk['reference']['fields']
if isinstance(fields, six.string_types):
fields = [fields]
if resource != '':
table_name = self.convert_bucket(resource)
if isinstance(foreign_fields, six.string_types):
foreign_fields = [foreign_fields]
composer = lambda field: '.'.join([table_name, field])
foreign_fields = list(map(composer, foreign_fields))
constraint = sa.ForeignKeyConstraint(fields, foreign_fields)
constraints.append(constraint)
# Indexes
if self.__dialect == 'postgresql':
for index, index_definition in enumerate(index_fields):
name = table_name + '_ix%03d' % index
index_columns = [column_mapping[field] for field in index_definition]
indexes.append(sa.Index(name, *index_columns))
return columns, constraints, indexes, fallbacks, table_comment
def convert_row(self, keyed_row, schema, fallbacks):
"""Convert row to SQL
"""
for key, value in list(keyed_row.items()):
field = schema.get_field(key)
if not field:
del keyed_row[key]
if key in fallbacks:
value = _uncast_value(value, field=field)
else:
value = field.cast_value(value)
keyed_row[key] = value
return keyed_row
def convert_type(self, type):
"""Convert type to SQL
"""
# Default dialect
mapping = {
'any': sa.Text,
'array': None,
'boolean': sa.Boolean,
'date': sa.Date,
'datetime': sa.DateTime,
'duration': None,
'geojson': None,
'geopoint': None,
'integer': sa.Integer,
'number': sa.Float,
'object': None,
'string': sa.Text,
'time': sa.Time,
'year': sa.Integer,
'yearmonth': None,
}
# Postgresql dialect
if self.__dialect == 'postgresql':
mapping.update({
'array': JSONB,
'geojson': JSONB,
'number': sa.Numeric,
'object': JSONB,
})
# Not supported type
if type not in mapping:
message = 'Field type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return mapping[type]
def restore_bucket(self, table_name):
"""Restore bucket from SQL
"""
if table_name.startswith(self.__prefix):
return table_name.replace(self.__prefix, '', 1)
return None
def restore_descriptor(self, table_name, columns, constraints, autoincrement_column=None):
"""Restore descriptor from SQL
"""
# Fields
fields = []
for column in columns:
if column.name == autoincrement_column:
continue
field_type = self.restore_type(column.type)
field = {'name': column.name, 'type': field_type}
if not column.nullable:
field['constraints'] = {'required': True}
fields.append(field)
# Primary key
pk = []
for constraint in constraints:
if isinstance(constraint, sa.PrimaryKeyConstraint):
for column in constraint.columns:
if column.name == autoincrement_column:
continue
pk.append(column.name)
# Foreign keys
fks = []
if self.__dialect == 'postgresql':
for constraint in constraints:
if isinstance(constraint, sa.ForeignKeyConstraint):
resource = ''
own_fields = []
foreign_fields = []
for element in constraint.elements:
own_fields.append(element.parent.name)
if element.column.table.name != table_name:
resource = self.restore_bucket(element.column.table.name)
foreign_fields.append(element.column.name)
if len(own_fields) == len(foreign_fields) == 1:
own_fields = own_fields.pop()
foreign_fields = foreign_fields.pop()
fks.append({
'fields': own_fields,
'reference': {'resource': resource, 'fields': foreign_fields},
})
# Desscriptor
descriptor = {}
descriptor['fields'] = fields
if len(pk) > 0:
if len(pk) == 1:
pk = pk.pop()
descriptor['primaryKey'] = pk
if len(fks) > 0:
descriptor['foreignKeys'] = fks
return descriptor
def restore_row(self, row, schema):
"""Restore row from SQL
"""
row = list(row)
for index, field in enumerate(schema.fields):
if self.__dialect == 'postgresql':
if field.type in ['array', 'object']:
continue
row[index] = field.cast_value(row[index])
return row
def restore_type(self, type):
"""Restore type from SQL
"""
# All dialects
mapping = {
ARRAY: 'array',
sa.Boolean: 'boolean',
sa.Date: 'date',
sa.DateTime: 'datetime',
sa.Float: 'number',
sa.Integer: 'integer',
JSONB: 'object',
JSON: 'object',
sa.Numeric: 'number',
sa.Text: 'string',
sa.Time: 'time',
sa.VARCHAR: 'string',
UUID: 'string',
}
# Get field type
field_type = None
for key, value in mapping.items():
if isinstance(type, key):
field_type = value
# Not supported
if field_type is None:
message = 'Type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return field_type
# Internal
def _uncast_value(value, field):
# Eventially should be moved to:
# https://github.com/frictionlessdata/tableschema-py/issues/161
if isinstance(value, (list, dict)):
value = json.dumps(value)
else:
value = str(value)
return value
def _get_comment(description, title, separator=' - '):
if title == '':
return description
if description == '':
return title
return title + separator + description
|
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | Mapper.convert_descriptor | python | def convert_descriptor(self, bucket, descriptor, index_fields=[], autoincrement=None):
# Prepare
columns = []
indexes = []
fallbacks = []
constraints = []
column_mapping = {}
table_name = self.convert_bucket(bucket)
table_comment = _get_comment(descriptor.get('title', ''), descriptor.get('description', ''))
schema = tableschema.Schema(descriptor)
# Autoincrement
if autoincrement is not None:
columns.append(sa.Column(
autoincrement, sa.Integer, autoincrement=True, nullable=False))
# Fields
for field in schema.fields:
column_type = self.convert_type(field.type)
if not column_type:
column_type = sa.Text
fallbacks.append(field.name)
nullable = not field.required
table_comment = _get_field_comment(field)
unique = field.constraints.get('unique', False)
column = sa.Column(field.name, column_type, nullable=nullable, comment=table_comment,
unique=unique)
columns.append(column)
column_mapping[field.name] = column
# Primary key
pk = descriptor.get('primaryKey', None)
if pk is not None:
if isinstance(pk, six.string_types):
pk = [pk]
if autoincrement is not None:
if pk is not None:
pk = [autoincrement] + pk
else:
pk = [autoincrement]
if pk is not None:
constraint = sa.PrimaryKeyConstraint(*pk)
constraints.append(constraint)
# Foreign keys
if self.__dialect == 'postgresql':
fks = descriptor.get('foreignKeys', [])
for fk in fks:
fields = fk['fields']
resource = fk['reference']['resource']
foreign_fields = fk['reference']['fields']
if isinstance(fields, six.string_types):
fields = [fields]
if resource != '':
table_name = self.convert_bucket(resource)
if isinstance(foreign_fields, six.string_types):
foreign_fields = [foreign_fields]
composer = lambda field: '.'.join([table_name, field])
foreign_fields = list(map(composer, foreign_fields))
constraint = sa.ForeignKeyConstraint(fields, foreign_fields)
constraints.append(constraint)
# Indexes
if self.__dialect == 'postgresql':
for index, index_definition in enumerate(index_fields):
name = table_name + '_ix%03d' % index
index_columns = [column_mapping[field] for field in index_definition]
indexes.append(sa.Index(name, *index_columns))
return columns, constraints, indexes, fallbacks, table_comment | Convert descriptor to SQL | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L32-L104 | [
"def _get_comment(description, title, separator=' - '):\n if title == '':\n return description\n if description == '':\n return title\n return title + separator + description\n",
"def _get_field_comment(field, separator=' - '):\n \"\"\"\n Create SQL comment from field's title and description\n\n :param field: tableschema-py Field, with optional 'title' and 'description' values\n :param separator:\n :return:\n\n >>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': 'my_desc'}))\n 'my_title - my_desc'\n >>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': None}))\n 'my_title'\n >>> _get_field_comment(tableschema.Field({'title': '', 'description': 'my_description'}))\n 'my_description'\n >>> _get_field_comment(tableschema.Field({}))\n ''\n \"\"\"\n title = field.descriptor.get('title') or ''\n description = field.descriptor.get('description') or ''\n return _get_comment(description, title, separator)\n",
"def convert_bucket(self, bucket):\n \"\"\"Convert bucket to SQL\n \"\"\"\n return self.__prefix + bucket\n",
"def convert_type(self, type):\n \"\"\"Convert type to SQL\n \"\"\"\n\n # Default dialect\n mapping = {\n 'any': sa.Text,\n 'array': None,\n 'boolean': sa.Boolean,\n 'date': sa.Date,\n 'datetime': sa.DateTime,\n 'duration': None,\n 'geojson': None,\n 'geopoint': None,\n 'integer': sa.Integer,\n 'number': sa.Float,\n 'object': None,\n 'string': sa.Text,\n 'time': sa.Time,\n 'year': sa.Integer,\n 'yearmonth': None,\n }\n\n # Postgresql dialect\n if self.__dialect == 'postgresql':\n mapping.update({\n 'array': JSONB,\n 'geojson': JSONB,\n 'number': sa.Numeric,\n 'object': JSONB,\n })\n\n # Not supported type\n if type not in mapping:\n message = 'Field type \"%s\" is not supported'\n raise tableschema.exceptions.StorageError(message % type)\n\n return mapping[type]\n"
] | class Mapper(object):
# Public
def __init__(self, prefix, dialect='sqlite'):
"""Mapper to convert/restore FD entities to/from SQL entities
"""
self.__prefix = prefix
self.__dialect = dialect
def convert_bucket(self, bucket):
"""Convert bucket to SQL
"""
return self.__prefix + bucket
def convert_row(self, keyed_row, schema, fallbacks):
"""Convert row to SQL
"""
for key, value in list(keyed_row.items()):
field = schema.get_field(key)
if not field:
del keyed_row[key]
if key in fallbacks:
value = _uncast_value(value, field=field)
else:
value = field.cast_value(value)
keyed_row[key] = value
return keyed_row
def convert_type(self, type):
"""Convert type to SQL
"""
# Default dialect
mapping = {
'any': sa.Text,
'array': None,
'boolean': sa.Boolean,
'date': sa.Date,
'datetime': sa.DateTime,
'duration': None,
'geojson': None,
'geopoint': None,
'integer': sa.Integer,
'number': sa.Float,
'object': None,
'string': sa.Text,
'time': sa.Time,
'year': sa.Integer,
'yearmonth': None,
}
# Postgresql dialect
if self.__dialect == 'postgresql':
mapping.update({
'array': JSONB,
'geojson': JSONB,
'number': sa.Numeric,
'object': JSONB,
})
# Not supported type
if type not in mapping:
message = 'Field type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return mapping[type]
def restore_bucket(self, table_name):
"""Restore bucket from SQL
"""
if table_name.startswith(self.__prefix):
return table_name.replace(self.__prefix, '', 1)
return None
def restore_descriptor(self, table_name, columns, constraints, autoincrement_column=None):
"""Restore descriptor from SQL
"""
# Fields
fields = []
for column in columns:
if column.name == autoincrement_column:
continue
field_type = self.restore_type(column.type)
field = {'name': column.name, 'type': field_type}
if not column.nullable:
field['constraints'] = {'required': True}
fields.append(field)
# Primary key
pk = []
for constraint in constraints:
if isinstance(constraint, sa.PrimaryKeyConstraint):
for column in constraint.columns:
if column.name == autoincrement_column:
continue
pk.append(column.name)
# Foreign keys
fks = []
if self.__dialect == 'postgresql':
for constraint in constraints:
if isinstance(constraint, sa.ForeignKeyConstraint):
resource = ''
own_fields = []
foreign_fields = []
for element in constraint.elements:
own_fields.append(element.parent.name)
if element.column.table.name != table_name:
resource = self.restore_bucket(element.column.table.name)
foreign_fields.append(element.column.name)
if len(own_fields) == len(foreign_fields) == 1:
own_fields = own_fields.pop()
foreign_fields = foreign_fields.pop()
fks.append({
'fields': own_fields,
'reference': {'resource': resource, 'fields': foreign_fields},
})
# Desscriptor
descriptor = {}
descriptor['fields'] = fields
if len(pk) > 0:
if len(pk) == 1:
pk = pk.pop()
descriptor['primaryKey'] = pk
if len(fks) > 0:
descriptor['foreignKeys'] = fks
return descriptor
def restore_row(self, row, schema):
"""Restore row from SQL
"""
row = list(row)
for index, field in enumerate(schema.fields):
if self.__dialect == 'postgresql':
if field.type in ['array', 'object']:
continue
row[index] = field.cast_value(row[index])
return row
def restore_type(self, type):
"""Restore type from SQL
"""
# All dialects
mapping = {
ARRAY: 'array',
sa.Boolean: 'boolean',
sa.Date: 'date',
sa.DateTime: 'datetime',
sa.Float: 'number',
sa.Integer: 'integer',
JSONB: 'object',
JSON: 'object',
sa.Numeric: 'number',
sa.Text: 'string',
sa.Time: 'time',
sa.VARCHAR: 'string',
UUID: 'string',
}
# Get field type
field_type = None
for key, value in mapping.items():
if isinstance(type, key):
field_type = value
# Not supported
if field_type is None:
message = 'Type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return field_type
|
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | Mapper.convert_row | python | def convert_row(self, keyed_row, schema, fallbacks):
for key, value in list(keyed_row.items()):
field = schema.get_field(key)
if not field:
del keyed_row[key]
if key in fallbacks:
value = _uncast_value(value, field=field)
else:
value = field.cast_value(value)
keyed_row[key] = value
return keyed_row | Convert row to SQL | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L106-L118 | [
"def _uncast_value(value, field):\n # Eventially should be moved to:\n # https://github.com/frictionlessdata/tableschema-py/issues/161\n if isinstance(value, (list, dict)):\n value = json.dumps(value)\n else:\n value = str(value)\n return value\n"
] | class Mapper(object):
# Public
def __init__(self, prefix, dialect='sqlite'):
"""Mapper to convert/restore FD entities to/from SQL entities
"""
self.__prefix = prefix
self.__dialect = dialect
def convert_bucket(self, bucket):
"""Convert bucket to SQL
"""
return self.__prefix + bucket
def convert_descriptor(self, bucket, descriptor, index_fields=[], autoincrement=None):
"""Convert descriptor to SQL
"""
# Prepare
columns = []
indexes = []
fallbacks = []
constraints = []
column_mapping = {}
table_name = self.convert_bucket(bucket)
table_comment = _get_comment(descriptor.get('title', ''), descriptor.get('description', ''))
schema = tableschema.Schema(descriptor)
# Autoincrement
if autoincrement is not None:
columns.append(sa.Column(
autoincrement, sa.Integer, autoincrement=True, nullable=False))
# Fields
for field in schema.fields:
column_type = self.convert_type(field.type)
if not column_type:
column_type = sa.Text
fallbacks.append(field.name)
nullable = not field.required
table_comment = _get_field_comment(field)
unique = field.constraints.get('unique', False)
column = sa.Column(field.name, column_type, nullable=nullable, comment=table_comment,
unique=unique)
columns.append(column)
column_mapping[field.name] = column
# Primary key
pk = descriptor.get('primaryKey', None)
if pk is not None:
if isinstance(pk, six.string_types):
pk = [pk]
if autoincrement is not None:
if pk is not None:
pk = [autoincrement] + pk
else:
pk = [autoincrement]
if pk is not None:
constraint = sa.PrimaryKeyConstraint(*pk)
constraints.append(constraint)
# Foreign keys
if self.__dialect == 'postgresql':
fks = descriptor.get('foreignKeys', [])
for fk in fks:
fields = fk['fields']
resource = fk['reference']['resource']
foreign_fields = fk['reference']['fields']
if isinstance(fields, six.string_types):
fields = [fields]
if resource != '':
table_name = self.convert_bucket(resource)
if isinstance(foreign_fields, six.string_types):
foreign_fields = [foreign_fields]
composer = lambda field: '.'.join([table_name, field])
foreign_fields = list(map(composer, foreign_fields))
constraint = sa.ForeignKeyConstraint(fields, foreign_fields)
constraints.append(constraint)
# Indexes
if self.__dialect == 'postgresql':
for index, index_definition in enumerate(index_fields):
name = table_name + '_ix%03d' % index
index_columns = [column_mapping[field] for field in index_definition]
indexes.append(sa.Index(name, *index_columns))
return columns, constraints, indexes, fallbacks, table_comment
def convert_type(self, type):
"""Convert type to SQL
"""
# Default dialect
mapping = {
'any': sa.Text,
'array': None,
'boolean': sa.Boolean,
'date': sa.Date,
'datetime': sa.DateTime,
'duration': None,
'geojson': None,
'geopoint': None,
'integer': sa.Integer,
'number': sa.Float,
'object': None,
'string': sa.Text,
'time': sa.Time,
'year': sa.Integer,
'yearmonth': None,
}
# Postgresql dialect
if self.__dialect == 'postgresql':
mapping.update({
'array': JSONB,
'geojson': JSONB,
'number': sa.Numeric,
'object': JSONB,
})
# Not supported type
if type not in mapping:
message = 'Field type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return mapping[type]
def restore_bucket(self, table_name):
"""Restore bucket from SQL
"""
if table_name.startswith(self.__prefix):
return table_name.replace(self.__prefix, '', 1)
return None
def restore_descriptor(self, table_name, columns, constraints, autoincrement_column=None):
"""Restore descriptor from SQL
"""
# Fields
fields = []
for column in columns:
if column.name == autoincrement_column:
continue
field_type = self.restore_type(column.type)
field = {'name': column.name, 'type': field_type}
if not column.nullable:
field['constraints'] = {'required': True}
fields.append(field)
# Primary key
pk = []
for constraint in constraints:
if isinstance(constraint, sa.PrimaryKeyConstraint):
for column in constraint.columns:
if column.name == autoincrement_column:
continue
pk.append(column.name)
# Foreign keys
fks = []
if self.__dialect == 'postgresql':
for constraint in constraints:
if isinstance(constraint, sa.ForeignKeyConstraint):
resource = ''
own_fields = []
foreign_fields = []
for element in constraint.elements:
own_fields.append(element.parent.name)
if element.column.table.name != table_name:
resource = self.restore_bucket(element.column.table.name)
foreign_fields.append(element.column.name)
if len(own_fields) == len(foreign_fields) == 1:
own_fields = own_fields.pop()
foreign_fields = foreign_fields.pop()
fks.append({
'fields': own_fields,
'reference': {'resource': resource, 'fields': foreign_fields},
})
# Desscriptor
descriptor = {}
descriptor['fields'] = fields
if len(pk) > 0:
if len(pk) == 1:
pk = pk.pop()
descriptor['primaryKey'] = pk
if len(fks) > 0:
descriptor['foreignKeys'] = fks
return descriptor
def restore_row(self, row, schema):
"""Restore row from SQL
"""
row = list(row)
for index, field in enumerate(schema.fields):
if self.__dialect == 'postgresql':
if field.type in ['array', 'object']:
continue
row[index] = field.cast_value(row[index])
return row
def restore_type(self, type):
"""Restore type from SQL
"""
# All dialects
mapping = {
ARRAY: 'array',
sa.Boolean: 'boolean',
sa.Date: 'date',
sa.DateTime: 'datetime',
sa.Float: 'number',
sa.Integer: 'integer',
JSONB: 'object',
JSON: 'object',
sa.Numeric: 'number',
sa.Text: 'string',
sa.Time: 'time',
sa.VARCHAR: 'string',
UUID: 'string',
}
# Get field type
field_type = None
for key, value in mapping.items():
if isinstance(type, key):
field_type = value
# Not supported
if field_type is None:
message = 'Type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return field_type
|
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | Mapper.convert_type | python | def convert_type(self, type):
# Default dialect
mapping = {
'any': sa.Text,
'array': None,
'boolean': sa.Boolean,
'date': sa.Date,
'datetime': sa.DateTime,
'duration': None,
'geojson': None,
'geopoint': None,
'integer': sa.Integer,
'number': sa.Float,
'object': None,
'string': sa.Text,
'time': sa.Time,
'year': sa.Integer,
'yearmonth': None,
}
# Postgresql dialect
if self.__dialect == 'postgresql':
mapping.update({
'array': JSONB,
'geojson': JSONB,
'number': sa.Numeric,
'object': JSONB,
})
# Not supported type
if type not in mapping:
message = 'Field type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return mapping[type] | Convert type to SQL | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L120-L157 | null | class Mapper(object):
# Public
def __init__(self, prefix, dialect='sqlite'):
"""Mapper to convert/restore FD entities to/from SQL entities
"""
self.__prefix = prefix
self.__dialect = dialect
def convert_bucket(self, bucket):
"""Convert bucket to SQL
"""
return self.__prefix + bucket
def convert_descriptor(self, bucket, descriptor, index_fields=[], autoincrement=None):
"""Convert descriptor to SQL
"""
# Prepare
columns = []
indexes = []
fallbacks = []
constraints = []
column_mapping = {}
table_name = self.convert_bucket(bucket)
table_comment = _get_comment(descriptor.get('title', ''), descriptor.get('description', ''))
schema = tableschema.Schema(descriptor)
# Autoincrement
if autoincrement is not None:
columns.append(sa.Column(
autoincrement, sa.Integer, autoincrement=True, nullable=False))
# Fields
for field in schema.fields:
column_type = self.convert_type(field.type)
if not column_type:
column_type = sa.Text
fallbacks.append(field.name)
nullable = not field.required
table_comment = _get_field_comment(field)
unique = field.constraints.get('unique', False)
column = sa.Column(field.name, column_type, nullable=nullable, comment=table_comment,
unique=unique)
columns.append(column)
column_mapping[field.name] = column
# Primary key
pk = descriptor.get('primaryKey', None)
if pk is not None:
if isinstance(pk, six.string_types):
pk = [pk]
if autoincrement is not None:
if pk is not None:
pk = [autoincrement] + pk
else:
pk = [autoincrement]
if pk is not None:
constraint = sa.PrimaryKeyConstraint(*pk)
constraints.append(constraint)
# Foreign keys
if self.__dialect == 'postgresql':
fks = descriptor.get('foreignKeys', [])
for fk in fks:
fields = fk['fields']
resource = fk['reference']['resource']
foreign_fields = fk['reference']['fields']
if isinstance(fields, six.string_types):
fields = [fields]
if resource != '':
table_name = self.convert_bucket(resource)
if isinstance(foreign_fields, six.string_types):
foreign_fields = [foreign_fields]
composer = lambda field: '.'.join([table_name, field])
foreign_fields = list(map(composer, foreign_fields))
constraint = sa.ForeignKeyConstraint(fields, foreign_fields)
constraints.append(constraint)
# Indexes
if self.__dialect == 'postgresql':
for index, index_definition in enumerate(index_fields):
name = table_name + '_ix%03d' % index
index_columns = [column_mapping[field] for field in index_definition]
indexes.append(sa.Index(name, *index_columns))
return columns, constraints, indexes, fallbacks, table_comment
def convert_row(self, keyed_row, schema, fallbacks):
"""Convert row to SQL
"""
for key, value in list(keyed_row.items()):
field = schema.get_field(key)
if not field:
del keyed_row[key]
if key in fallbacks:
value = _uncast_value(value, field=field)
else:
value = field.cast_value(value)
keyed_row[key] = value
return keyed_row
def restore_bucket(self, table_name):
"""Restore bucket from SQL
"""
if table_name.startswith(self.__prefix):
return table_name.replace(self.__prefix, '', 1)
return None
def restore_descriptor(self, table_name, columns, constraints, autoincrement_column=None):
"""Restore descriptor from SQL
"""
# Fields
fields = []
for column in columns:
if column.name == autoincrement_column:
continue
field_type = self.restore_type(column.type)
field = {'name': column.name, 'type': field_type}
if not column.nullable:
field['constraints'] = {'required': True}
fields.append(field)
# Primary key
pk = []
for constraint in constraints:
if isinstance(constraint, sa.PrimaryKeyConstraint):
for column in constraint.columns:
if column.name == autoincrement_column:
continue
pk.append(column.name)
# Foreign keys
fks = []
if self.__dialect == 'postgresql':
for constraint in constraints:
if isinstance(constraint, sa.ForeignKeyConstraint):
resource = ''
own_fields = []
foreign_fields = []
for element in constraint.elements:
own_fields.append(element.parent.name)
if element.column.table.name != table_name:
resource = self.restore_bucket(element.column.table.name)
foreign_fields.append(element.column.name)
if len(own_fields) == len(foreign_fields) == 1:
own_fields = own_fields.pop()
foreign_fields = foreign_fields.pop()
fks.append({
'fields': own_fields,
'reference': {'resource': resource, 'fields': foreign_fields},
})
# Desscriptor
descriptor = {}
descriptor['fields'] = fields
if len(pk) > 0:
if len(pk) == 1:
pk = pk.pop()
descriptor['primaryKey'] = pk
if len(fks) > 0:
descriptor['foreignKeys'] = fks
return descriptor
def restore_row(self, row, schema):
"""Restore row from SQL
"""
row = list(row)
for index, field in enumerate(schema.fields):
if self.__dialect == 'postgresql':
if field.type in ['array', 'object']:
continue
row[index] = field.cast_value(row[index])
return row
def restore_type(self, type):
"""Restore type from SQL
"""
# All dialects
mapping = {
ARRAY: 'array',
sa.Boolean: 'boolean',
sa.Date: 'date',
sa.DateTime: 'datetime',
sa.Float: 'number',
sa.Integer: 'integer',
JSONB: 'object',
JSON: 'object',
sa.Numeric: 'number',
sa.Text: 'string',
sa.Time: 'time',
sa.VARCHAR: 'string',
UUID: 'string',
}
# Get field type
field_type = None
for key, value in mapping.items():
if isinstance(type, key):
field_type = value
# Not supported
if field_type is None:
message = 'Type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return field_type
|
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | Mapper.restore_bucket | python | def restore_bucket(self, table_name):
if table_name.startswith(self.__prefix):
return table_name.replace(self.__prefix, '', 1)
return None | Restore bucket from SQL | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L159-L164 | null | class Mapper(object):
# Public
def __init__(self, prefix, dialect='sqlite'):
"""Mapper to convert/restore FD entities to/from SQL entities
"""
self.__prefix = prefix
self.__dialect = dialect
def convert_bucket(self, bucket):
"""Convert bucket to SQL
"""
return self.__prefix + bucket
def convert_descriptor(self, bucket, descriptor, index_fields=[], autoincrement=None):
"""Convert descriptor to SQL
"""
# Prepare
columns = []
indexes = []
fallbacks = []
constraints = []
column_mapping = {}
table_name = self.convert_bucket(bucket)
table_comment = _get_comment(descriptor.get('title', ''), descriptor.get('description', ''))
schema = tableschema.Schema(descriptor)
# Autoincrement
if autoincrement is not None:
columns.append(sa.Column(
autoincrement, sa.Integer, autoincrement=True, nullable=False))
# Fields
for field in schema.fields:
column_type = self.convert_type(field.type)
if not column_type:
column_type = sa.Text
fallbacks.append(field.name)
nullable = not field.required
table_comment = _get_field_comment(field)
unique = field.constraints.get('unique', False)
column = sa.Column(field.name, column_type, nullable=nullable, comment=table_comment,
unique=unique)
columns.append(column)
column_mapping[field.name] = column
# Primary key
pk = descriptor.get('primaryKey', None)
if pk is not None:
if isinstance(pk, six.string_types):
pk = [pk]
if autoincrement is not None:
if pk is not None:
pk = [autoincrement] + pk
else:
pk = [autoincrement]
if pk is not None:
constraint = sa.PrimaryKeyConstraint(*pk)
constraints.append(constraint)
# Foreign keys
if self.__dialect == 'postgresql':
fks = descriptor.get('foreignKeys', [])
for fk in fks:
fields = fk['fields']
resource = fk['reference']['resource']
foreign_fields = fk['reference']['fields']
if isinstance(fields, six.string_types):
fields = [fields]
if resource != '':
table_name = self.convert_bucket(resource)
if isinstance(foreign_fields, six.string_types):
foreign_fields = [foreign_fields]
composer = lambda field: '.'.join([table_name, field])
foreign_fields = list(map(composer, foreign_fields))
constraint = sa.ForeignKeyConstraint(fields, foreign_fields)
constraints.append(constraint)
# Indexes
if self.__dialect == 'postgresql':
for index, index_definition in enumerate(index_fields):
name = table_name + '_ix%03d' % index
index_columns = [column_mapping[field] for field in index_definition]
indexes.append(sa.Index(name, *index_columns))
return columns, constraints, indexes, fallbacks, table_comment
def convert_row(self, keyed_row, schema, fallbacks):
"""Convert row to SQL
"""
for key, value in list(keyed_row.items()):
field = schema.get_field(key)
if not field:
del keyed_row[key]
if key in fallbacks:
value = _uncast_value(value, field=field)
else:
value = field.cast_value(value)
keyed_row[key] = value
return keyed_row
def convert_type(self, type):
"""Convert type to SQL
"""
# Default dialect
mapping = {
'any': sa.Text,
'array': None,
'boolean': sa.Boolean,
'date': sa.Date,
'datetime': sa.DateTime,
'duration': None,
'geojson': None,
'geopoint': None,
'integer': sa.Integer,
'number': sa.Float,
'object': None,
'string': sa.Text,
'time': sa.Time,
'year': sa.Integer,
'yearmonth': None,
}
# Postgresql dialect
if self.__dialect == 'postgresql':
mapping.update({
'array': JSONB,
'geojson': JSONB,
'number': sa.Numeric,
'object': JSONB,
})
# Not supported type
if type not in mapping:
message = 'Field type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return mapping[type]
def restore_descriptor(self, table_name, columns, constraints, autoincrement_column=None):
"""Restore descriptor from SQL
"""
# Fields
fields = []
for column in columns:
if column.name == autoincrement_column:
continue
field_type = self.restore_type(column.type)
field = {'name': column.name, 'type': field_type}
if not column.nullable:
field['constraints'] = {'required': True}
fields.append(field)
# Primary key
pk = []
for constraint in constraints:
if isinstance(constraint, sa.PrimaryKeyConstraint):
for column in constraint.columns:
if column.name == autoincrement_column:
continue
pk.append(column.name)
# Foreign keys
fks = []
if self.__dialect == 'postgresql':
for constraint in constraints:
if isinstance(constraint, sa.ForeignKeyConstraint):
resource = ''
own_fields = []
foreign_fields = []
for element in constraint.elements:
own_fields.append(element.parent.name)
if element.column.table.name != table_name:
resource = self.restore_bucket(element.column.table.name)
foreign_fields.append(element.column.name)
if len(own_fields) == len(foreign_fields) == 1:
own_fields = own_fields.pop()
foreign_fields = foreign_fields.pop()
fks.append({
'fields': own_fields,
'reference': {'resource': resource, 'fields': foreign_fields},
})
# Desscriptor
descriptor = {}
descriptor['fields'] = fields
if len(pk) > 0:
if len(pk) == 1:
pk = pk.pop()
descriptor['primaryKey'] = pk
if len(fks) > 0:
descriptor['foreignKeys'] = fks
return descriptor
def restore_row(self, row, schema):
"""Restore row from SQL
"""
row = list(row)
for index, field in enumerate(schema.fields):
if self.__dialect == 'postgresql':
if field.type in ['array', 'object']:
continue
row[index] = field.cast_value(row[index])
return row
def restore_type(self, type):
"""Restore type from SQL
"""
# All dialects
mapping = {
ARRAY: 'array',
sa.Boolean: 'boolean',
sa.Date: 'date',
sa.DateTime: 'datetime',
sa.Float: 'number',
sa.Integer: 'integer',
JSONB: 'object',
JSON: 'object',
sa.Numeric: 'number',
sa.Text: 'string',
sa.Time: 'time',
sa.VARCHAR: 'string',
UUID: 'string',
}
# Get field type
field_type = None
for key, value in mapping.items():
if isinstance(type, key):
field_type = value
# Not supported
if field_type is None:
message = 'Type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return field_type
|
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | Mapper.restore_descriptor | python | def restore_descriptor(self, table_name, columns, constraints, autoincrement_column=None):
# Fields
fields = []
for column in columns:
if column.name == autoincrement_column:
continue
field_type = self.restore_type(column.type)
field = {'name': column.name, 'type': field_type}
if not column.nullable:
field['constraints'] = {'required': True}
fields.append(field)
# Primary key
pk = []
for constraint in constraints:
if isinstance(constraint, sa.PrimaryKeyConstraint):
for column in constraint.columns:
if column.name == autoincrement_column:
continue
pk.append(column.name)
# Foreign keys
fks = []
if self.__dialect == 'postgresql':
for constraint in constraints:
if isinstance(constraint, sa.ForeignKeyConstraint):
resource = ''
own_fields = []
foreign_fields = []
for element in constraint.elements:
own_fields.append(element.parent.name)
if element.column.table.name != table_name:
resource = self.restore_bucket(element.column.table.name)
foreign_fields.append(element.column.name)
if len(own_fields) == len(foreign_fields) == 1:
own_fields = own_fields.pop()
foreign_fields = foreign_fields.pop()
fks.append({
'fields': own_fields,
'reference': {'resource': resource, 'fields': foreign_fields},
})
# Desscriptor
descriptor = {}
descriptor['fields'] = fields
if len(pk) > 0:
if len(pk) == 1:
pk = pk.pop()
descriptor['primaryKey'] = pk
if len(fks) > 0:
descriptor['foreignKeys'] = fks
return descriptor | Restore descriptor from SQL | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L166-L221 | [
"def restore_bucket(self, table_name):\n \"\"\"Restore bucket from SQL\n \"\"\"\n if table_name.startswith(self.__prefix):\n return table_name.replace(self.__prefix, '', 1)\n return None\n",
"def restore_type(self, type):\n \"\"\"Restore type from SQL\n \"\"\"\n\n # All dialects\n mapping = {\n ARRAY: 'array',\n sa.Boolean: 'boolean',\n sa.Date: 'date',\n sa.DateTime: 'datetime',\n sa.Float: 'number',\n sa.Integer: 'integer',\n JSONB: 'object',\n JSON: 'object',\n sa.Numeric: 'number',\n sa.Text: 'string',\n sa.Time: 'time',\n sa.VARCHAR: 'string',\n UUID: 'string',\n }\n\n # Get field type\n field_type = None\n for key, value in mapping.items():\n if isinstance(type, key):\n field_type = value\n\n # Not supported\n if field_type is None:\n message = 'Type \"%s\" is not supported'\n raise tableschema.exceptions.StorageError(message % type)\n\n return field_type\n"
] | class Mapper(object):
# Public
def __init__(self, prefix, dialect='sqlite'):
"""Mapper to convert/restore FD entities to/from SQL entities
"""
self.__prefix = prefix
self.__dialect = dialect
def convert_bucket(self, bucket):
"""Convert bucket to SQL
"""
return self.__prefix + bucket
def convert_descriptor(self, bucket, descriptor, index_fields=[], autoincrement=None):
"""Convert descriptor to SQL
"""
# Prepare
columns = []
indexes = []
fallbacks = []
constraints = []
column_mapping = {}
table_name = self.convert_bucket(bucket)
table_comment = _get_comment(descriptor.get('title', ''), descriptor.get('description', ''))
schema = tableschema.Schema(descriptor)
# Autoincrement
if autoincrement is not None:
columns.append(sa.Column(
autoincrement, sa.Integer, autoincrement=True, nullable=False))
# Fields
for field in schema.fields:
column_type = self.convert_type(field.type)
if not column_type:
column_type = sa.Text
fallbacks.append(field.name)
nullable = not field.required
table_comment = _get_field_comment(field)
unique = field.constraints.get('unique', False)
column = sa.Column(field.name, column_type, nullable=nullable, comment=table_comment,
unique=unique)
columns.append(column)
column_mapping[field.name] = column
# Primary key
pk = descriptor.get('primaryKey', None)
if pk is not None:
if isinstance(pk, six.string_types):
pk = [pk]
if autoincrement is not None:
if pk is not None:
pk = [autoincrement] + pk
else:
pk = [autoincrement]
if pk is not None:
constraint = sa.PrimaryKeyConstraint(*pk)
constraints.append(constraint)
# Foreign keys
if self.__dialect == 'postgresql':
fks = descriptor.get('foreignKeys', [])
for fk in fks:
fields = fk['fields']
resource = fk['reference']['resource']
foreign_fields = fk['reference']['fields']
if isinstance(fields, six.string_types):
fields = [fields]
if resource != '':
table_name = self.convert_bucket(resource)
if isinstance(foreign_fields, six.string_types):
foreign_fields = [foreign_fields]
composer = lambda field: '.'.join([table_name, field])
foreign_fields = list(map(composer, foreign_fields))
constraint = sa.ForeignKeyConstraint(fields, foreign_fields)
constraints.append(constraint)
# Indexes
if self.__dialect == 'postgresql':
for index, index_definition in enumerate(index_fields):
name = table_name + '_ix%03d' % index
index_columns = [column_mapping[field] for field in index_definition]
indexes.append(sa.Index(name, *index_columns))
return columns, constraints, indexes, fallbacks, table_comment
def convert_row(self, keyed_row, schema, fallbacks):
"""Convert row to SQL
"""
for key, value in list(keyed_row.items()):
field = schema.get_field(key)
if not field:
del keyed_row[key]
if key in fallbacks:
value = _uncast_value(value, field=field)
else:
value = field.cast_value(value)
keyed_row[key] = value
return keyed_row
def convert_type(self, type):
"""Convert type to SQL
"""
# Default dialect
mapping = {
'any': sa.Text,
'array': None,
'boolean': sa.Boolean,
'date': sa.Date,
'datetime': sa.DateTime,
'duration': None,
'geojson': None,
'geopoint': None,
'integer': sa.Integer,
'number': sa.Float,
'object': None,
'string': sa.Text,
'time': sa.Time,
'year': sa.Integer,
'yearmonth': None,
}
# Postgresql dialect
if self.__dialect == 'postgresql':
mapping.update({
'array': JSONB,
'geojson': JSONB,
'number': sa.Numeric,
'object': JSONB,
})
# Not supported type
if type not in mapping:
message = 'Field type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return mapping[type]
def restore_bucket(self, table_name):
"""Restore bucket from SQL
"""
if table_name.startswith(self.__prefix):
return table_name.replace(self.__prefix, '', 1)
return None
def restore_row(self, row, schema):
"""Restore row from SQL
"""
row = list(row)
for index, field in enumerate(schema.fields):
if self.__dialect == 'postgresql':
if field.type in ['array', 'object']:
continue
row[index] = field.cast_value(row[index])
return row
def restore_type(self, type):
"""Restore type from SQL
"""
# All dialects
mapping = {
ARRAY: 'array',
sa.Boolean: 'boolean',
sa.Date: 'date',
sa.DateTime: 'datetime',
sa.Float: 'number',
sa.Integer: 'integer',
JSONB: 'object',
JSON: 'object',
sa.Numeric: 'number',
sa.Text: 'string',
sa.Time: 'time',
sa.VARCHAR: 'string',
UUID: 'string',
}
# Get field type
field_type = None
for key, value in mapping.items():
if isinstance(type, key):
field_type = value
# Not supported
if field_type is None:
message = 'Type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return field_type
|
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | Mapper.restore_row | python | def restore_row(self, row, schema):
row = list(row)
for index, field in enumerate(schema.fields):
if self.__dialect == 'postgresql':
if field.type in ['array', 'object']:
continue
row[index] = field.cast_value(row[index])
return row | Restore row from SQL | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L223-L232 | null | class Mapper(object):
# Public
def __init__(self, prefix, dialect='sqlite'):
"""Mapper to convert/restore FD entities to/from SQL entities
"""
self.__prefix = prefix
self.__dialect = dialect
def convert_bucket(self, bucket):
"""Convert bucket to SQL
"""
return self.__prefix + bucket
def convert_descriptor(self, bucket, descriptor, index_fields=[], autoincrement=None):
"""Convert descriptor to SQL
"""
# Prepare
columns = []
indexes = []
fallbacks = []
constraints = []
column_mapping = {}
table_name = self.convert_bucket(bucket)
table_comment = _get_comment(descriptor.get('title', ''), descriptor.get('description', ''))
schema = tableschema.Schema(descriptor)
# Autoincrement
if autoincrement is not None:
columns.append(sa.Column(
autoincrement, sa.Integer, autoincrement=True, nullable=False))
# Fields
for field in schema.fields:
column_type = self.convert_type(field.type)
if not column_type:
column_type = sa.Text
fallbacks.append(field.name)
nullable = not field.required
table_comment = _get_field_comment(field)
unique = field.constraints.get('unique', False)
column = sa.Column(field.name, column_type, nullable=nullable, comment=table_comment,
unique=unique)
columns.append(column)
column_mapping[field.name] = column
# Primary key
pk = descriptor.get('primaryKey', None)
if pk is not None:
if isinstance(pk, six.string_types):
pk = [pk]
if autoincrement is not None:
if pk is not None:
pk = [autoincrement] + pk
else:
pk = [autoincrement]
if pk is not None:
constraint = sa.PrimaryKeyConstraint(*pk)
constraints.append(constraint)
# Foreign keys
if self.__dialect == 'postgresql':
fks = descriptor.get('foreignKeys', [])
for fk in fks:
fields = fk['fields']
resource = fk['reference']['resource']
foreign_fields = fk['reference']['fields']
if isinstance(fields, six.string_types):
fields = [fields]
if resource != '':
table_name = self.convert_bucket(resource)
if isinstance(foreign_fields, six.string_types):
foreign_fields = [foreign_fields]
composer = lambda field: '.'.join([table_name, field])
foreign_fields = list(map(composer, foreign_fields))
constraint = sa.ForeignKeyConstraint(fields, foreign_fields)
constraints.append(constraint)
# Indexes
if self.__dialect == 'postgresql':
for index, index_definition in enumerate(index_fields):
name = table_name + '_ix%03d' % index
index_columns = [column_mapping[field] for field in index_definition]
indexes.append(sa.Index(name, *index_columns))
return columns, constraints, indexes, fallbacks, table_comment
def convert_row(self, keyed_row, schema, fallbacks):
"""Convert row to SQL
"""
for key, value in list(keyed_row.items()):
field = schema.get_field(key)
if not field:
del keyed_row[key]
if key in fallbacks:
value = _uncast_value(value, field=field)
else:
value = field.cast_value(value)
keyed_row[key] = value
return keyed_row
def convert_type(self, type):
"""Convert type to SQL
"""
# Default dialect
mapping = {
'any': sa.Text,
'array': None,
'boolean': sa.Boolean,
'date': sa.Date,
'datetime': sa.DateTime,
'duration': None,
'geojson': None,
'geopoint': None,
'integer': sa.Integer,
'number': sa.Float,
'object': None,
'string': sa.Text,
'time': sa.Time,
'year': sa.Integer,
'yearmonth': None,
}
# Postgresql dialect
if self.__dialect == 'postgresql':
mapping.update({
'array': JSONB,
'geojson': JSONB,
'number': sa.Numeric,
'object': JSONB,
})
# Not supported type
if type not in mapping:
message = 'Field type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return mapping[type]
def restore_bucket(self, table_name):
"""Restore bucket from SQL
"""
if table_name.startswith(self.__prefix):
return table_name.replace(self.__prefix, '', 1)
return None
def restore_descriptor(self, table_name, columns, constraints, autoincrement_column=None):
"""Restore descriptor from SQL
"""
# Fields
fields = []
for column in columns:
if column.name == autoincrement_column:
continue
field_type = self.restore_type(column.type)
field = {'name': column.name, 'type': field_type}
if not column.nullable:
field['constraints'] = {'required': True}
fields.append(field)
# Primary key
pk = []
for constraint in constraints:
if isinstance(constraint, sa.PrimaryKeyConstraint):
for column in constraint.columns:
if column.name == autoincrement_column:
continue
pk.append(column.name)
# Foreign keys
fks = []
if self.__dialect == 'postgresql':
for constraint in constraints:
if isinstance(constraint, sa.ForeignKeyConstraint):
resource = ''
own_fields = []
foreign_fields = []
for element in constraint.elements:
own_fields.append(element.parent.name)
if element.column.table.name != table_name:
resource = self.restore_bucket(element.column.table.name)
foreign_fields.append(element.column.name)
if len(own_fields) == len(foreign_fields) == 1:
own_fields = own_fields.pop()
foreign_fields = foreign_fields.pop()
fks.append({
'fields': own_fields,
'reference': {'resource': resource, 'fields': foreign_fields},
})
# Desscriptor
descriptor = {}
descriptor['fields'] = fields
if len(pk) > 0:
if len(pk) == 1:
pk = pk.pop()
descriptor['primaryKey'] = pk
if len(fks) > 0:
descriptor['foreignKeys'] = fks
return descriptor
def restore_type(self, type):
"""Restore type from SQL
"""
# All dialects
mapping = {
ARRAY: 'array',
sa.Boolean: 'boolean',
sa.Date: 'date',
sa.DateTime: 'datetime',
sa.Float: 'number',
sa.Integer: 'integer',
JSONB: 'object',
JSON: 'object',
sa.Numeric: 'number',
sa.Text: 'string',
sa.Time: 'time',
sa.VARCHAR: 'string',
UUID: 'string',
}
# Get field type
field_type = None
for key, value in mapping.items():
if isinstance(type, key):
field_type = value
# Not supported
if field_type is None:
message = 'Type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return field_type
|
frictionlessdata/tableschema-sql-py | tableschema_sql/mapper.py | Mapper.restore_type | python | def restore_type(self, type):
# All dialects
mapping = {
ARRAY: 'array',
sa.Boolean: 'boolean',
sa.Date: 'date',
sa.DateTime: 'datetime',
sa.Float: 'number',
sa.Integer: 'integer',
JSONB: 'object',
JSON: 'object',
sa.Numeric: 'number',
sa.Text: 'string',
sa.Time: 'time',
sa.VARCHAR: 'string',
UUID: 'string',
}
# Get field type
field_type = None
for key, value in mapping.items():
if isinstance(type, key):
field_type = value
# Not supported
if field_type is None:
message = 'Type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return field_type | Restore type from SQL | train | https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L234-L266 | null | class Mapper(object):
# Public
def __init__(self, prefix, dialect='sqlite'):
"""Mapper to convert/restore FD entities to/from SQL entities
"""
self.__prefix = prefix
self.__dialect = dialect
def convert_bucket(self, bucket):
"""Convert bucket to SQL
"""
return self.__prefix + bucket
def convert_descriptor(self, bucket, descriptor, index_fields=[], autoincrement=None):
"""Convert descriptor to SQL
"""
# Prepare
columns = []
indexes = []
fallbacks = []
constraints = []
column_mapping = {}
table_name = self.convert_bucket(bucket)
table_comment = _get_comment(descriptor.get('title', ''), descriptor.get('description', ''))
schema = tableschema.Schema(descriptor)
# Autoincrement
if autoincrement is not None:
columns.append(sa.Column(
autoincrement, sa.Integer, autoincrement=True, nullable=False))
# Fields
for field in schema.fields:
column_type = self.convert_type(field.type)
if not column_type:
column_type = sa.Text
fallbacks.append(field.name)
nullable = not field.required
table_comment = _get_field_comment(field)
unique = field.constraints.get('unique', False)
column = sa.Column(field.name, column_type, nullable=nullable, comment=table_comment,
unique=unique)
columns.append(column)
column_mapping[field.name] = column
# Primary key
pk = descriptor.get('primaryKey', None)
if pk is not None:
if isinstance(pk, six.string_types):
pk = [pk]
if autoincrement is not None:
if pk is not None:
pk = [autoincrement] + pk
else:
pk = [autoincrement]
if pk is not None:
constraint = sa.PrimaryKeyConstraint(*pk)
constraints.append(constraint)
# Foreign keys
if self.__dialect == 'postgresql':
fks = descriptor.get('foreignKeys', [])
for fk in fks:
fields = fk['fields']
resource = fk['reference']['resource']
foreign_fields = fk['reference']['fields']
if isinstance(fields, six.string_types):
fields = [fields]
if resource != '':
table_name = self.convert_bucket(resource)
if isinstance(foreign_fields, six.string_types):
foreign_fields = [foreign_fields]
composer = lambda field: '.'.join([table_name, field])
foreign_fields = list(map(composer, foreign_fields))
constraint = sa.ForeignKeyConstraint(fields, foreign_fields)
constraints.append(constraint)
# Indexes
if self.__dialect == 'postgresql':
for index, index_definition in enumerate(index_fields):
name = table_name + '_ix%03d' % index
index_columns = [column_mapping[field] for field in index_definition]
indexes.append(sa.Index(name, *index_columns))
return columns, constraints, indexes, fallbacks, table_comment
def convert_row(self, keyed_row, schema, fallbacks):
"""Convert row to SQL
"""
for key, value in list(keyed_row.items()):
field = schema.get_field(key)
if not field:
del keyed_row[key]
if key in fallbacks:
value = _uncast_value(value, field=field)
else:
value = field.cast_value(value)
keyed_row[key] = value
return keyed_row
def convert_type(self, type):
"""Convert type to SQL
"""
# Default dialect
mapping = {
'any': sa.Text,
'array': None,
'boolean': sa.Boolean,
'date': sa.Date,
'datetime': sa.DateTime,
'duration': None,
'geojson': None,
'geopoint': None,
'integer': sa.Integer,
'number': sa.Float,
'object': None,
'string': sa.Text,
'time': sa.Time,
'year': sa.Integer,
'yearmonth': None,
}
# Postgresql dialect
if self.__dialect == 'postgresql':
mapping.update({
'array': JSONB,
'geojson': JSONB,
'number': sa.Numeric,
'object': JSONB,
})
# Not supported type
if type not in mapping:
message = 'Field type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return mapping[type]
def restore_bucket(self, table_name):
"""Restore bucket from SQL
"""
if table_name.startswith(self.__prefix):
return table_name.replace(self.__prefix, '', 1)
return None
def restore_descriptor(self, table_name, columns, constraints, autoincrement_column=None):
"""Restore descriptor from SQL
"""
# Fields
fields = []
for column in columns:
if column.name == autoincrement_column:
continue
field_type = self.restore_type(column.type)
field = {'name': column.name, 'type': field_type}
if not column.nullable:
field['constraints'] = {'required': True}
fields.append(field)
# Primary key
pk = []
for constraint in constraints:
if isinstance(constraint, sa.PrimaryKeyConstraint):
for column in constraint.columns:
if column.name == autoincrement_column:
continue
pk.append(column.name)
# Foreign keys
fks = []
if self.__dialect == 'postgresql':
for constraint in constraints:
if isinstance(constraint, sa.ForeignKeyConstraint):
resource = ''
own_fields = []
foreign_fields = []
for element in constraint.elements:
own_fields.append(element.parent.name)
if element.column.table.name != table_name:
resource = self.restore_bucket(element.column.table.name)
foreign_fields.append(element.column.name)
if len(own_fields) == len(foreign_fields) == 1:
own_fields = own_fields.pop()
foreign_fields = foreign_fields.pop()
fks.append({
'fields': own_fields,
'reference': {'resource': resource, 'fields': foreign_fields},
})
# Desscriptor
descriptor = {}
descriptor['fields'] = fields
if len(pk) > 0:
if len(pk) == 1:
pk = pk.pop()
descriptor['primaryKey'] = pk
if len(fks) > 0:
descriptor['foreignKeys'] = fks
return descriptor
def restore_row(self, row, schema):
"""Restore row from SQL
"""
row = list(row)
for index, field in enumerate(schema.fields):
if self.__dialect == 'postgresql':
if field.type in ['array', 'object']:
continue
row[index] = field.cast_value(row[index])
return row
|
metapensiero/metapensiero.signal | src/metapensiero/signal/core.py | InstanceProxy.connect | python | def connect(self, cback):
"See signal"
return self.signal.connect(cback,
subscribers=self.subscribers,
instance=self.instance) | See signal | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/core.py#L56-L60 | null | class InstanceProxy:
"""A small proxy used to get instance context when signal is a
member of a class.
"""
def __init__(self, signal, instance):
self.signal = signal
self.instance = instance
self.subscribers = self.get_subscribers()
sdoc = signal.__dict__.get('__doc__')
if sdoc is not None:
self.__doc__ = sdoc
def __repr__(self):
return ('<Signal "{self.signal.name}" '
' on {self.instance!r}>').format(self=self)
def clear(self):
"""Remove all the connected handlers, for this instance"""
self.subscribers.clear()
def disconnect(self, cback):
"See signal"
return self.signal.disconnect(cback,
subscribers=self.subscribers,
instance=self.instance)
def get_subscribers(self):
"""Get per-instance subscribers from the signal.
"""
data = self.signal.instance_subscribers
if self.instance not in data:
data[self.instance] = MethodAwareWeakList()
return data[self.instance]
@property
def loop(self):
return getattr(self.instance, 'loop', None)
def notify(self, *args, **kwargs):
"See signal"
loop = kwargs.pop('loop', self.loop)
return self.signal.prepare_notification(
subscribers=self.subscribers, instance=self.instance,
loop=loop).run(*args, **kwargs)
__call__ = notify
def notify_prepared(self, args=None, kwargs=None, **opts):
"""Like notify allows to pass more options to the underlying
`Signal.prepare_notification()` method.
The allowed options are:
notify_external : bool
a flag indicating if the notification should also include the
registered `~.external.ExternalSignaller` in the notification. It's
``True`` by default
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
loop = kwargs.pop('loop', self.loop)
return self.signal.prepare_notification(
subscribers=self.subscribers, instance=self.instance,
loop=loop, **opts).run(*args, **kwargs)
|
metapensiero/metapensiero.signal | src/metapensiero/signal/core.py | InstanceProxy.disconnect | python | def disconnect(self, cback):
"See signal"
return self.signal.disconnect(cback,
subscribers=self.subscribers,
instance=self.instance) | See signal | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/core.py#L62-L66 | null | class InstanceProxy:
"""A small proxy used to get instance context when signal is a
member of a class.
"""
def __init__(self, signal, instance):
self.signal = signal
self.instance = instance
self.subscribers = self.get_subscribers()
sdoc = signal.__dict__.get('__doc__')
if sdoc is not None:
self.__doc__ = sdoc
def __repr__(self):
return ('<Signal "{self.signal.name}" '
' on {self.instance!r}>').format(self=self)
def clear(self):
"""Remove all the connected handlers, for this instance"""
self.subscribers.clear()
def connect(self, cback):
"See signal"
return self.signal.connect(cback,
subscribers=self.subscribers,
instance=self.instance)
def get_subscribers(self):
"""Get per-instance subscribers from the signal.
"""
data = self.signal.instance_subscribers
if self.instance not in data:
data[self.instance] = MethodAwareWeakList()
return data[self.instance]
@property
def loop(self):
return getattr(self.instance, 'loop', None)
def notify(self, *args, **kwargs):
"See signal"
loop = kwargs.pop('loop', self.loop)
return self.signal.prepare_notification(
subscribers=self.subscribers, instance=self.instance,
loop=loop).run(*args, **kwargs)
__call__ = notify
def notify_prepared(self, args=None, kwargs=None, **opts):
"""Like notify allows to pass more options to the underlying
`Signal.prepare_notification()` method.
The allowed options are:
notify_external : bool
a flag indicating if the notification should also include the
registered `~.external.ExternalSignaller` in the notification. It's
``True`` by default
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
loop = kwargs.pop('loop', self.loop)
return self.signal.prepare_notification(
subscribers=self.subscribers, instance=self.instance,
loop=loop, **opts).run(*args, **kwargs)
|
metapensiero/metapensiero.signal | src/metapensiero/signal/core.py | InstanceProxy.get_subscribers | python | def get_subscribers(self):
data = self.signal.instance_subscribers
if self.instance not in data:
data[self.instance] = MethodAwareWeakList()
return data[self.instance] | Get per-instance subscribers from the signal. | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/core.py#L68-L74 | null | class InstanceProxy:
"""A small proxy used to get instance context when signal is a
member of a class.
"""
def __init__(self, signal, instance):
self.signal = signal
self.instance = instance
self.subscribers = self.get_subscribers()
sdoc = signal.__dict__.get('__doc__')
if sdoc is not None:
self.__doc__ = sdoc
def __repr__(self):
return ('<Signal "{self.signal.name}" '
' on {self.instance!r}>').format(self=self)
def clear(self):
"""Remove all the connected handlers, for this instance"""
self.subscribers.clear()
def connect(self, cback):
"See signal"
return self.signal.connect(cback,
subscribers=self.subscribers,
instance=self.instance)
def disconnect(self, cback):
"See signal"
return self.signal.disconnect(cback,
subscribers=self.subscribers,
instance=self.instance)
@property
def loop(self):
return getattr(self.instance, 'loop', None)
def notify(self, *args, **kwargs):
"See signal"
loop = kwargs.pop('loop', self.loop)
return self.signal.prepare_notification(
subscribers=self.subscribers, instance=self.instance,
loop=loop).run(*args, **kwargs)
__call__ = notify
def notify_prepared(self, args=None, kwargs=None, **opts):
"""Like notify allows to pass more options to the underlying
`Signal.prepare_notification()` method.
The allowed options are:
notify_external : bool
a flag indicating if the notification should also include the
registered `~.external.ExternalSignaller` in the notification. It's
``True`` by default
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
loop = kwargs.pop('loop', self.loop)
return self.signal.prepare_notification(
subscribers=self.subscribers, instance=self.instance,
loop=loop, **opts).run(*args, **kwargs)
|
metapensiero/metapensiero.signal | src/metapensiero/signal/core.py | InstanceProxy.notify | python | def notify(self, *args, **kwargs):
"See signal"
loop = kwargs.pop('loop', self.loop)
return self.signal.prepare_notification(
subscribers=self.subscribers, instance=self.instance,
loop=loop).run(*args, **kwargs) | See signal | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/core.py#L80-L85 | null | class InstanceProxy:
"""A small proxy used to get instance context when signal is a
member of a class.
"""
def __init__(self, signal, instance):
self.signal = signal
self.instance = instance
self.subscribers = self.get_subscribers()
sdoc = signal.__dict__.get('__doc__')
if sdoc is not None:
self.__doc__ = sdoc
def __repr__(self):
return ('<Signal "{self.signal.name}" '
' on {self.instance!r}>').format(self=self)
def clear(self):
"""Remove all the connected handlers, for this instance"""
self.subscribers.clear()
def connect(self, cback):
"See signal"
return self.signal.connect(cback,
subscribers=self.subscribers,
instance=self.instance)
def disconnect(self, cback):
"See signal"
return self.signal.disconnect(cback,
subscribers=self.subscribers,
instance=self.instance)
def get_subscribers(self):
"""Get per-instance subscribers from the signal.
"""
data = self.signal.instance_subscribers
if self.instance not in data:
data[self.instance] = MethodAwareWeakList()
return data[self.instance]
@property
def loop(self):
return getattr(self.instance, 'loop', None)
__call__ = notify
def notify_prepared(self, args=None, kwargs=None, **opts):
"""Like notify allows to pass more options to the underlying
`Signal.prepare_notification()` method.
The allowed options are:
notify_external : bool
a flag indicating if the notification should also include the
registered `~.external.ExternalSignaller` in the notification. It's
``True`` by default
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
loop = kwargs.pop('loop', self.loop)
return self.signal.prepare_notification(
subscribers=self.subscribers, instance=self.instance,
loop=loop, **opts).run(*args, **kwargs)
|
metapensiero/metapensiero.signal | src/metapensiero/signal/core.py | InstanceProxy.notify_prepared | python | def notify_prepared(self, args=None, kwargs=None, **opts):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
loop = kwargs.pop('loop', self.loop)
return self.signal.prepare_notification(
subscribers=self.subscribers, instance=self.instance,
loop=loop, **opts).run(*args, **kwargs) | Like notify allows to pass more options to the underlying
`Signal.prepare_notification()` method.
The allowed options are:
notify_external : bool
a flag indicating if the notification should also include the
registered `~.external.ExternalSignaller` in the notification. It's
``True`` by default | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/core.py#L89-L108 | null | class InstanceProxy:
"""A small proxy used to get instance context when signal is a
member of a class.
"""
def __init__(self, signal, instance):
self.signal = signal
self.instance = instance
self.subscribers = self.get_subscribers()
sdoc = signal.__dict__.get('__doc__')
if sdoc is not None:
self.__doc__ = sdoc
def __repr__(self):
return ('<Signal "{self.signal.name}" '
' on {self.instance!r}>').format(self=self)
def clear(self):
"""Remove all the connected handlers, for this instance"""
self.subscribers.clear()
def connect(self, cback):
"See signal"
return self.signal.connect(cback,
subscribers=self.subscribers,
instance=self.instance)
def disconnect(self, cback):
"See signal"
return self.signal.disconnect(cback,
subscribers=self.subscribers,
instance=self.instance)
def get_subscribers(self):
"""Get per-instance subscribers from the signal.
"""
data = self.signal.instance_subscribers
if self.instance not in data:
data[self.instance] = MethodAwareWeakList()
return data[self.instance]
@property
def loop(self):
return getattr(self.instance, 'loop', None)
def notify(self, *args, **kwargs):
"See signal"
loop = kwargs.pop('loop', self.loop)
return self.signal.prepare_notification(
subscribers=self.subscribers, instance=self.instance,
loop=loop).run(*args, **kwargs)
__call__ = notify
|
metapensiero/metapensiero.signal | src/metapensiero/signal/core.py | Signal.connect | python | def connect(self, cback, subscribers=None, instance=None):
if subscribers is None:
subscribers = self.subscribers
# wrapper
if self._fconnect is not None:
def _connect(cback):
self._connect(subscribers, cback)
notify = partial(self._notify_one, instance)
if instance is not None:
result = self._fconnect(instance, cback, subscribers,
_connect, notify)
else:
result = self._fconnect(cback, subscribers, _connect, notify)
if inspect.isawaitable(result):
result = pull_result(result)
else:
self._connect(subscribers, cback)
result = None
return result | Add a function or a method as an handler of this signal.
Any handler added can be a coroutine.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/core.py#L233-L258 | [
"def _connect(self, subscribers, cback):\n if cback not in subscribers:\n subscribers.append(cback)\n"
] | class Signal:
"""The core class. It collects subscribers that can be either normal
"callable" or coroutine/awaitable generator functions. The subscription is
managed using the `connect`:meth: and `disconnect`:meth: methods. The
notification is done by executing e `notify`:meth: method.
It can be used either as a standalone event or in the body of another class
which uses `~.user.SignalAndHandlerInitMeta`:class: metaclass, with the
help of the `~.utils.signal`:func: decorator.
:param \*flags: any flags that can change the behavior of the signal
instance, see `~.utils.SignalOptions`:class: class
:keyword fconnect: an optional callable that wraps the `connect`:meth:
method
:keyword fdisconnect: an optional callable that wraps the
`disconnect`:meth: method
:keyword fnotify: an optional callable that wraps the
`notify`:meth: method
:keyword fvalidation: an optional validation callable used to ensure that
arguments passed to the `notify`:meth: invocation are those permitted
:keyword str name: optional name of the signal
:keyword loop: optional asyncio event loop to use
:keyword external: optional external signaller that extends the signal
:type external: `~.external.ExternalSignaller`:class:
:param \*\*additional_params: optional additional params that will be
stored in the instance
"""
_external_signaller = None
_name = None
_concurrent_handlers = False
FLAGS = SignalOptions
"""All the available handlers sort modes. See `~.utils.SignalOptions`.
"""
def __init__(self, *flags, fconnect=None, fdisconnect=None,
fnotify=None, fvalidation=None, name=None,
loop=None, external=None, **additional_params):
self.name = name
self.subscribers = MethodAwareWeakList()
"""A weak list containing the connected handlers"""
self.loop = loop or asyncio.get_event_loop()
self.instance_subscribers = weakref.WeakKeyDictionary()
self.external_signaller = external
self._fnotify = fnotify
self._fconnect = fconnect
self._fdisconnect = fdisconnect
self._set_fvalidation(fvalidation)
self._iproxies = weakref.WeakKeyDictionary()
if not all(isinstance(f, SignalOptions) for f in flags):
raise ValueError("``flags`` elements must be instances of "
"`SignalOptions")
if (SignalOptions.SORT_BOTTOMUP in flags and
SignalOptions.SORT_TOPDOWN in flags):
raise ValueError("Both sort modes specified in the flags")
elif not (SignalOptions.SORT_BOTTOMUP in flags or
SignalOptions.SORT_TOPDOWN in flags):
flags = flags + (SignalOptions.SORT_BOTTOMUP,)
self.flags = flags
self.additional_params = additional_params
"""additional parameter passed at construction time"""
def __get__(self, instance, owner):
if instance is None:
# just a silly trick to get some better autodoc docs
if (self._fvalidation is not None and
'sphinx.ext.autodoc' in sys.modules):
result = self._fvalidation
else:
result = self
else:
if instance not in self._iproxies:
self._iproxies[instance] = InstanceProxy(self, instance)
result = self._iproxies[instance]
return result
def __repr__(self):
return ("<%s with name %r, len: %d>" % (
self.__class__.__name__, self.name, len(self.subscribers)
))
def _connect(self, subscribers, cback):
if cback not in subscribers:
subscribers.append(cback)
def _disconnect(self, subscribers, cback):
if cback in subscribers:
subscribers.remove(cback)
def _find_indent(self, doct):
lines = doct.splitlines()
for l in lines:
match = re.match('^[ ]+', l)
if match is not None:
return len(match.group(0))
return 0
def _loop_from_instance(self, instance):
if instance is None:
loop = self.loop
else:
loop = self.__get__(instance).loop
return loop
def _notify_one(self, instance, cback, *args, **kwargs):
loop = self._loop_from_instance(instance)
return self.prepare_notification(
subscribers=(cback,), instance=instance,
loop=loop).run(*args, **kwargs)
def _set_fvalidation(self, value):
self._fvalidation = value
if value is not None:
if value.__doc__ is None:
doc = ''
indent = 0
else:
doc = value.__doc__
indent = self._find_indent(doc)
sig_doc = textwrap.indent(SIGN_DOC_TEMPLATE, ' ' * indent)
value.__doc__ = self.__doc__ = doc + sig_doc
def clear(self):
"""Remove all the connected handlers"""
self.subscribers.clear()
def disconnect(self, cback, subscribers=None, instance=None):
"""Remove a previously added function or method from the set of the
signal's handlers.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper
"""
if subscribers is None:
subscribers = self.subscribers
# wrapper
if self._fdisconnect is not None:
def _disconnect(cback):
self._disconnect(subscribers, cback)
notify = partial(self._notify_one, instance)
if instance is not None:
result = self._fdisconnect(instance, cback, subscribers,
_disconnect, notify)
else:
result = self._fdisconnect(cback, subscribers, _disconnect,
notify)
if inspect.isawaitable(result):
result = pull_result(result)
else:
self._disconnect(subscribers, cback)
result = None
return result
def ext_publish(self, instance, loop, *args, **kwargs):
"""If 'external_signaller' is defined, calls it's publish method to
notify external event systems.
This is for internal usage only, but it's doumented because it's part
of the interface with external notification systems.
"""
if self.external_signaller is not None:
# Assumes that the loop is managed by the external handler
return self.external_signaller.publish_signal(self, instance, loop,
args, kwargs)
@property
def external_signaller(self):
"""The registered `~.external.ExternalSignaller`:class:."""
return self._external_signaller
@external_signaller.setter
def external_signaller(self, value):
if value is not None:
assert isinstance(value, ExternalSignaller)
self._external_signaller = value
if self._name and value:
value.register_signal(self, self._name)
@property
def name(self):
"""The *name* of the signal used in conjunction with external
notification systems."""
return self._name
@name.setter
def name(self, value):
self._name = value
if value is not None and self._external_signaller:
self._external_signaller.register_signal(self, value)
def notify(self, *args, **kwargs):
"""Call all the registered handlers with the arguments passed.
:returns: an instance of `~.utils.MultipleResults`:class: or the
result of the execution of the corresponding wrapper function
"""
return self.prepare_notification().run(*args, **kwargs)
__call__ = notify
def prepare_notification(self, *, subscribers=None, instance=None,
loop=None, notify_external=True):
"""Sets up a and configures an `~.utils.Executor`:class: instance."""
# merge callbacks added to the class level with those added to the
# instance, giving the formers precedence while preserving overall
# order
self_subscribers = self.subscribers.copy()
# add in callbacks declared in the main class body and marked with
# @handler
if (instance is not None and self.name and
isinstance(instance.__class__, SignalAndHandlerInitMeta)):
class_handlers = type(instance)._get_class_handlers(
self.name, instance)
for ch in class_handlers:
# eventual methods are ephemeral and normally the following
# condition would always be True for methods but the dict used
# has logic to take that into account
if ch not in self_subscribers:
self_subscribers.append(ch)
# add in the other instance level callbacks added at runtime
if subscribers is not None:
for el in subscribers:
# eventual methods are ephemeral and normally the following
# condition would always be True for methods but the dict used
# has logic to take that into account
if el not in self_subscribers:
self_subscribers.append(el)
loop = loop or self.loop
# maybe do a round of external publishing
if notify_external and self.external_signaller is not None:
self_subscribers.append(partial(self.ext_publish, instance, loop))
if self._fnotify is None:
fnotify = None
else:
if instance is None:
fnotify = self._fnotify
else:
fnotify = types.MethodType(self._fnotify, instance)
validator = self._fvalidation
if validator is not None and instance is not None:
validator = types.MethodType(validator, instance)
return Executor(self_subscribers, owner=self,
concurrent=SignalOptions.EXEC_CONCURRENT in self.flags,
loop=loop, exec_wrapper=fnotify,
fvalidation=validator)
def on_connect(self, fconnect):
"""On connect optional wrapper decorator.
:param fconnect: the callable to install as `connect`:meth: wrapper
:returns: the signal
"""
self._fconnect = fconnect
return self
def on_disconnect(self, fdisconnect):
"""On disconnect optional wrapper decorator.
:param fdisconnect: the callable to install as `disconnect`:meth:
wrapper
:returns: the signal
"""
self._fdisconnect = fdisconnect
return self
def on_notify(self, fnotify):
"""On notify optional wrapper decorator.
:param fnotify: the callable to install as `notify`:meth: wrapper
:returns: the signal
"""
self._fnotify = fnotify
return self
|
metapensiero/metapensiero.signal | src/metapensiero/signal/core.py | Signal.disconnect | python | def disconnect(self, cback, subscribers=None, instance=None):
if subscribers is None:
subscribers = self.subscribers
# wrapper
if self._fdisconnect is not None:
def _disconnect(cback):
self._disconnect(subscribers, cback)
notify = partial(self._notify_one, instance)
if instance is not None:
result = self._fdisconnect(instance, cback, subscribers,
_disconnect, notify)
else:
result = self._fdisconnect(cback, subscribers, _disconnect,
notify)
if inspect.isawaitable(result):
result = pull_result(result)
else:
self._disconnect(subscribers, cback)
result = None
return result | Remove a previously added function or method from the set of the
signal's handlers.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/core.py#L264-L290 | [
"def _disconnect(self, subscribers, cback):\n if cback in subscribers:\n subscribers.remove(cback)\n"
] | class Signal:
"""The core class. It collects subscribers that can be either normal
"callable" or coroutine/awaitable generator functions. The subscription is
managed using the `connect`:meth: and `disconnect`:meth: methods. The
notification is done by executing e `notify`:meth: method.
It can be used either as a standalone event or in the body of another class
which uses `~.user.SignalAndHandlerInitMeta`:class: metaclass, with the
help of the `~.utils.signal`:func: decorator.
:param \*flags: any flags that can change the behavior of the signal
instance, see `~.utils.SignalOptions`:class: class
:keyword fconnect: an optional callable that wraps the `connect`:meth:
method
:keyword fdisconnect: an optional callable that wraps the
`disconnect`:meth: method
:keyword fnotify: an optional callable that wraps the
`notify`:meth: method
:keyword fvalidation: an optional validation callable used to ensure that
arguments passed to the `notify`:meth: invocation are those permitted
:keyword str name: optional name of the signal
:keyword loop: optional asyncio event loop to use
:keyword external: optional external signaller that extends the signal
:type external: `~.external.ExternalSignaller`:class:
:param \*\*additional_params: optional additional params that will be
stored in the instance
"""
_external_signaller = None
_name = None
_concurrent_handlers = False
FLAGS = SignalOptions
"""All the available handlers sort modes. See `~.utils.SignalOptions`.
"""
def __init__(self, *flags, fconnect=None, fdisconnect=None,
fnotify=None, fvalidation=None, name=None,
loop=None, external=None, **additional_params):
self.name = name
self.subscribers = MethodAwareWeakList()
"""A weak list containing the connected handlers"""
self.loop = loop or asyncio.get_event_loop()
self.instance_subscribers = weakref.WeakKeyDictionary()
self.external_signaller = external
self._fnotify = fnotify
self._fconnect = fconnect
self._fdisconnect = fdisconnect
self._set_fvalidation(fvalidation)
self._iproxies = weakref.WeakKeyDictionary()
if not all(isinstance(f, SignalOptions) for f in flags):
raise ValueError("``flags`` elements must be instances of "
"`SignalOptions")
if (SignalOptions.SORT_BOTTOMUP in flags and
SignalOptions.SORT_TOPDOWN in flags):
raise ValueError("Both sort modes specified in the flags")
elif not (SignalOptions.SORT_BOTTOMUP in flags or
SignalOptions.SORT_TOPDOWN in flags):
flags = flags + (SignalOptions.SORT_BOTTOMUP,)
self.flags = flags
self.additional_params = additional_params
"""additional parameter passed at construction time"""
def __get__(self, instance, owner):
if instance is None:
# just a silly trick to get some better autodoc docs
if (self._fvalidation is not None and
'sphinx.ext.autodoc' in sys.modules):
result = self._fvalidation
else:
result = self
else:
if instance not in self._iproxies:
self._iproxies[instance] = InstanceProxy(self, instance)
result = self._iproxies[instance]
return result
def __repr__(self):
return ("<%s with name %r, len: %d>" % (
self.__class__.__name__, self.name, len(self.subscribers)
))
def _connect(self, subscribers, cback):
if cback not in subscribers:
subscribers.append(cback)
def _disconnect(self, subscribers, cback):
if cback in subscribers:
subscribers.remove(cback)
def _find_indent(self, doct):
lines = doct.splitlines()
for l in lines:
match = re.match('^[ ]+', l)
if match is not None:
return len(match.group(0))
return 0
def _loop_from_instance(self, instance):
if instance is None:
loop = self.loop
else:
loop = self.__get__(instance).loop
return loop
def _notify_one(self, instance, cback, *args, **kwargs):
loop = self._loop_from_instance(instance)
return self.prepare_notification(
subscribers=(cback,), instance=instance,
loop=loop).run(*args, **kwargs)
def _set_fvalidation(self, value):
self._fvalidation = value
if value is not None:
if value.__doc__ is None:
doc = ''
indent = 0
else:
doc = value.__doc__
indent = self._find_indent(doc)
sig_doc = textwrap.indent(SIGN_DOC_TEMPLATE, ' ' * indent)
value.__doc__ = self.__doc__ = doc + sig_doc
def connect(self, cback, subscribers=None, instance=None):
"""Add a function or a method as an handler of this signal.
Any handler added can be a coroutine.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper
"""
if subscribers is None:
subscribers = self.subscribers
# wrapper
if self._fconnect is not None:
def _connect(cback):
self._connect(subscribers, cback)
notify = partial(self._notify_one, instance)
if instance is not None:
result = self._fconnect(instance, cback, subscribers,
_connect, notify)
else:
result = self._fconnect(cback, subscribers, _connect, notify)
if inspect.isawaitable(result):
result = pull_result(result)
else:
self._connect(subscribers, cback)
result = None
return result
def clear(self):
"""Remove all the connected handlers"""
self.subscribers.clear()
def ext_publish(self, instance, loop, *args, **kwargs):
"""If 'external_signaller' is defined, calls it's publish method to
notify external event systems.
This is for internal usage only, but it's doumented because it's part
of the interface with external notification systems.
"""
if self.external_signaller is not None:
# Assumes that the loop is managed by the external handler
return self.external_signaller.publish_signal(self, instance, loop,
args, kwargs)
@property
def external_signaller(self):
"""The registered `~.external.ExternalSignaller`:class:."""
return self._external_signaller
@external_signaller.setter
def external_signaller(self, value):
if value is not None:
assert isinstance(value, ExternalSignaller)
self._external_signaller = value
if self._name and value:
value.register_signal(self, self._name)
@property
def name(self):
"""The *name* of the signal used in conjunction with external
notification systems."""
return self._name
@name.setter
def name(self, value):
self._name = value
if value is not None and self._external_signaller:
self._external_signaller.register_signal(self, value)
def notify(self, *args, **kwargs):
"""Call all the registered handlers with the arguments passed.
:returns: an instance of `~.utils.MultipleResults`:class: or the
result of the execution of the corresponding wrapper function
"""
return self.prepare_notification().run(*args, **kwargs)
__call__ = notify
def prepare_notification(self, *, subscribers=None, instance=None,
loop=None, notify_external=True):
"""Sets up a and configures an `~.utils.Executor`:class: instance."""
# merge callbacks added to the class level with those added to the
# instance, giving the formers precedence while preserving overall
# order
self_subscribers = self.subscribers.copy()
# add in callbacks declared in the main class body and marked with
# @handler
if (instance is not None and self.name and
isinstance(instance.__class__, SignalAndHandlerInitMeta)):
class_handlers = type(instance)._get_class_handlers(
self.name, instance)
for ch in class_handlers:
# eventual methods are ephemeral and normally the following
# condition would always be True for methods but the dict used
# has logic to take that into account
if ch not in self_subscribers:
self_subscribers.append(ch)
# add in the other instance level callbacks added at runtime
if subscribers is not None:
for el in subscribers:
# eventual methods are ephemeral and normally the following
# condition would always be True for methods but the dict used
# has logic to take that into account
if el not in self_subscribers:
self_subscribers.append(el)
loop = loop or self.loop
# maybe do a round of external publishing
if notify_external and self.external_signaller is not None:
self_subscribers.append(partial(self.ext_publish, instance, loop))
if self._fnotify is None:
fnotify = None
else:
if instance is None:
fnotify = self._fnotify
else:
fnotify = types.MethodType(self._fnotify, instance)
validator = self._fvalidation
if validator is not None and instance is not None:
validator = types.MethodType(validator, instance)
return Executor(self_subscribers, owner=self,
concurrent=SignalOptions.EXEC_CONCURRENT in self.flags,
loop=loop, exec_wrapper=fnotify,
fvalidation=validator)
def on_connect(self, fconnect):
"""On connect optional wrapper decorator.
:param fconnect: the callable to install as `connect`:meth: wrapper
:returns: the signal
"""
self._fconnect = fconnect
return self
def on_disconnect(self, fdisconnect):
"""On disconnect optional wrapper decorator.
:param fdisconnect: the callable to install as `disconnect`:meth:
wrapper
:returns: the signal
"""
self._fdisconnect = fdisconnect
return self
def on_notify(self, fnotify):
"""On notify optional wrapper decorator.
:param fnotify: the callable to install as `notify`:meth: wrapper
:returns: the signal
"""
self._fnotify = fnotify
return self
|
metapensiero/metapensiero.signal | src/metapensiero/signal/core.py | Signal.ext_publish | python | def ext_publish(self, instance, loop, *args, **kwargs):
if self.external_signaller is not None:
# Assumes that the loop is managed by the external handler
return self.external_signaller.publish_signal(self, instance, loop,
args, kwargs) | If 'external_signaller' is defined, calls it's publish method to
notify external event systems.
This is for internal usage only, but it's doumented because it's part
of the interface with external notification systems. | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/core.py#L292-L302 | null | class Signal:
"""The core class. It collects subscribers that can be either normal
"callable" or coroutine/awaitable generator functions. The subscription is
managed using the `connect`:meth: and `disconnect`:meth: methods. The
notification is done by executing e `notify`:meth: method.
It can be used either as a standalone event or in the body of another class
which uses `~.user.SignalAndHandlerInitMeta`:class: metaclass, with the
help of the `~.utils.signal`:func: decorator.
:param \*flags: any flags that can change the behavior of the signal
instance, see `~.utils.SignalOptions`:class: class
:keyword fconnect: an optional callable that wraps the `connect`:meth:
method
:keyword fdisconnect: an optional callable that wraps the
`disconnect`:meth: method
:keyword fnotify: an optional callable that wraps the
`notify`:meth: method
:keyword fvalidation: an optional validation callable used to ensure that
arguments passed to the `notify`:meth: invocation are those permitted
:keyword str name: optional name of the signal
:keyword loop: optional asyncio event loop to use
:keyword external: optional external signaller that extends the signal
:type external: `~.external.ExternalSignaller`:class:
:param \*\*additional_params: optional additional params that will be
stored in the instance
"""
_external_signaller = None
_name = None
_concurrent_handlers = False
FLAGS = SignalOptions
"""All the available handlers sort modes. See `~.utils.SignalOptions`.
"""
def __init__(self, *flags, fconnect=None, fdisconnect=None,
fnotify=None, fvalidation=None, name=None,
loop=None, external=None, **additional_params):
self.name = name
self.subscribers = MethodAwareWeakList()
"""A weak list containing the connected handlers"""
self.loop = loop or asyncio.get_event_loop()
self.instance_subscribers = weakref.WeakKeyDictionary()
self.external_signaller = external
self._fnotify = fnotify
self._fconnect = fconnect
self._fdisconnect = fdisconnect
self._set_fvalidation(fvalidation)
self._iproxies = weakref.WeakKeyDictionary()
if not all(isinstance(f, SignalOptions) for f in flags):
raise ValueError("``flags`` elements must be instances of "
"`SignalOptions")
if (SignalOptions.SORT_BOTTOMUP in flags and
SignalOptions.SORT_TOPDOWN in flags):
raise ValueError("Both sort modes specified in the flags")
elif not (SignalOptions.SORT_BOTTOMUP in flags or
SignalOptions.SORT_TOPDOWN in flags):
flags = flags + (SignalOptions.SORT_BOTTOMUP,)
self.flags = flags
self.additional_params = additional_params
"""additional parameter passed at construction time"""
def __get__(self, instance, owner):
if instance is None:
# just a silly trick to get some better autodoc docs
if (self._fvalidation is not None and
'sphinx.ext.autodoc' in sys.modules):
result = self._fvalidation
else:
result = self
else:
if instance not in self._iproxies:
self._iproxies[instance] = InstanceProxy(self, instance)
result = self._iproxies[instance]
return result
def __repr__(self):
return ("<%s with name %r, len: %d>" % (
self.__class__.__name__, self.name, len(self.subscribers)
))
def _connect(self, subscribers, cback):
if cback not in subscribers:
subscribers.append(cback)
def _disconnect(self, subscribers, cback):
if cback in subscribers:
subscribers.remove(cback)
def _find_indent(self, doct):
lines = doct.splitlines()
for l in lines:
match = re.match('^[ ]+', l)
if match is not None:
return len(match.group(0))
return 0
def _loop_from_instance(self, instance):
if instance is None:
loop = self.loop
else:
loop = self.__get__(instance).loop
return loop
def _notify_one(self, instance, cback, *args, **kwargs):
loop = self._loop_from_instance(instance)
return self.prepare_notification(
subscribers=(cback,), instance=instance,
loop=loop).run(*args, **kwargs)
def _set_fvalidation(self, value):
self._fvalidation = value
if value is not None:
if value.__doc__ is None:
doc = ''
indent = 0
else:
doc = value.__doc__
indent = self._find_indent(doc)
sig_doc = textwrap.indent(SIGN_DOC_TEMPLATE, ' ' * indent)
value.__doc__ = self.__doc__ = doc + sig_doc
def connect(self, cback, subscribers=None, instance=None):
"""Add a function or a method as an handler of this signal.
Any handler added can be a coroutine.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper
"""
if subscribers is None:
subscribers = self.subscribers
# wrapper
if self._fconnect is not None:
def _connect(cback):
self._connect(subscribers, cback)
notify = partial(self._notify_one, instance)
if instance is not None:
result = self._fconnect(instance, cback, subscribers,
_connect, notify)
else:
result = self._fconnect(cback, subscribers, _connect, notify)
if inspect.isawaitable(result):
result = pull_result(result)
else:
self._connect(subscribers, cback)
result = None
return result
def clear(self):
"""Remove all the connected handlers"""
self.subscribers.clear()
def disconnect(self, cback, subscribers=None, instance=None):
"""Remove a previously added function or method from the set of the
signal's handlers.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper
"""
if subscribers is None:
subscribers = self.subscribers
# wrapper
if self._fdisconnect is not None:
def _disconnect(cback):
self._disconnect(subscribers, cback)
notify = partial(self._notify_one, instance)
if instance is not None:
result = self._fdisconnect(instance, cback, subscribers,
_disconnect, notify)
else:
result = self._fdisconnect(cback, subscribers, _disconnect,
notify)
if inspect.isawaitable(result):
result = pull_result(result)
else:
self._disconnect(subscribers, cback)
result = None
return result
@property
def external_signaller(self):
"""The registered `~.external.ExternalSignaller`:class:."""
return self._external_signaller
@external_signaller.setter
def external_signaller(self, value):
if value is not None:
assert isinstance(value, ExternalSignaller)
self._external_signaller = value
if self._name and value:
value.register_signal(self, self._name)
@property
def name(self):
"""The *name* of the signal used in conjunction with external
notification systems."""
return self._name
@name.setter
def name(self, value):
self._name = value
if value is not None and self._external_signaller:
self._external_signaller.register_signal(self, value)
def notify(self, *args, **kwargs):
"""Call all the registered handlers with the arguments passed.
:returns: an instance of `~.utils.MultipleResults`:class: or the
result of the execution of the corresponding wrapper function
"""
return self.prepare_notification().run(*args, **kwargs)
__call__ = notify
def prepare_notification(self, *, subscribers=None, instance=None,
loop=None, notify_external=True):
"""Sets up a and configures an `~.utils.Executor`:class: instance."""
# merge callbacks added to the class level with those added to the
# instance, giving the formers precedence while preserving overall
# order
self_subscribers = self.subscribers.copy()
# add in callbacks declared in the main class body and marked with
# @handler
if (instance is not None and self.name and
isinstance(instance.__class__, SignalAndHandlerInitMeta)):
class_handlers = type(instance)._get_class_handlers(
self.name, instance)
for ch in class_handlers:
# eventual methods are ephemeral and normally the following
# condition would always be True for methods but the dict used
# has logic to take that into account
if ch not in self_subscribers:
self_subscribers.append(ch)
# add in the other instance level callbacks added at runtime
if subscribers is not None:
for el in subscribers:
# eventual methods are ephemeral and normally the following
# condition would always be True for methods but the dict used
# has logic to take that into account
if el not in self_subscribers:
self_subscribers.append(el)
loop = loop or self.loop
# maybe do a round of external publishing
if notify_external and self.external_signaller is not None:
self_subscribers.append(partial(self.ext_publish, instance, loop))
if self._fnotify is None:
fnotify = None
else:
if instance is None:
fnotify = self._fnotify
else:
fnotify = types.MethodType(self._fnotify, instance)
validator = self._fvalidation
if validator is not None and instance is not None:
validator = types.MethodType(validator, instance)
return Executor(self_subscribers, owner=self,
concurrent=SignalOptions.EXEC_CONCURRENT in self.flags,
loop=loop, exec_wrapper=fnotify,
fvalidation=validator)
def on_connect(self, fconnect):
"""On connect optional wrapper decorator.
:param fconnect: the callable to install as `connect`:meth: wrapper
:returns: the signal
"""
self._fconnect = fconnect
return self
def on_disconnect(self, fdisconnect):
"""On disconnect optional wrapper decorator.
:param fdisconnect: the callable to install as `disconnect`:meth:
wrapper
:returns: the signal
"""
self._fdisconnect = fdisconnect
return self
def on_notify(self, fnotify):
"""On notify optional wrapper decorator.
:param fnotify: the callable to install as `notify`:meth: wrapper
:returns: the signal
"""
self._fnotify = fnotify
return self
|
metapensiero/metapensiero.signal | src/metapensiero/signal/core.py | Signal.prepare_notification | python | def prepare_notification(self, *, subscribers=None, instance=None,
loop=None, notify_external=True):
# merge callbacks added to the class level with those added to the
# instance, giving the formers precedence while preserving overall
# order
self_subscribers = self.subscribers.copy()
# add in callbacks declared in the main class body and marked with
# @handler
if (instance is not None and self.name and
isinstance(instance.__class__, SignalAndHandlerInitMeta)):
class_handlers = type(instance)._get_class_handlers(
self.name, instance)
for ch in class_handlers:
# eventual methods are ephemeral and normally the following
# condition would always be True for methods but the dict used
# has logic to take that into account
if ch not in self_subscribers:
self_subscribers.append(ch)
# add in the other instance level callbacks added at runtime
if subscribers is not None:
for el in subscribers:
# eventual methods are ephemeral and normally the following
# condition would always be True for methods but the dict used
# has logic to take that into account
if el not in self_subscribers:
self_subscribers.append(el)
loop = loop or self.loop
# maybe do a round of external publishing
if notify_external and self.external_signaller is not None:
self_subscribers.append(partial(self.ext_publish, instance, loop))
if self._fnotify is None:
fnotify = None
else:
if instance is None:
fnotify = self._fnotify
else:
fnotify = types.MethodType(self._fnotify, instance)
validator = self._fvalidation
if validator is not None and instance is not None:
validator = types.MethodType(validator, instance)
return Executor(self_subscribers, owner=self,
concurrent=SignalOptions.EXEC_CONCURRENT in self.flags,
loop=loop, exec_wrapper=fnotify,
fvalidation=validator) | Sets up a and configures an `~.utils.Executor`:class: instance. | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/core.py#L339-L383 | null | class Signal:
"""The core class. It collects subscribers that can be either normal
"callable" or coroutine/awaitable generator functions. The subscription is
managed using the `connect`:meth: and `disconnect`:meth: methods. The
notification is done by executing e `notify`:meth: method.
It can be used either as a standalone event or in the body of another class
which uses `~.user.SignalAndHandlerInitMeta`:class: metaclass, with the
help of the `~.utils.signal`:func: decorator.
:param \*flags: any flags that can change the behavior of the signal
instance, see `~.utils.SignalOptions`:class: class
:keyword fconnect: an optional callable that wraps the `connect`:meth:
method
:keyword fdisconnect: an optional callable that wraps the
`disconnect`:meth: method
:keyword fnotify: an optional callable that wraps the
`notify`:meth: method
:keyword fvalidation: an optional validation callable used to ensure that
arguments passed to the `notify`:meth: invocation are those permitted
:keyword str name: optional name of the signal
:keyword loop: optional asyncio event loop to use
:keyword external: optional external signaller that extends the signal
:type external: `~.external.ExternalSignaller`:class:
:param \*\*additional_params: optional additional params that will be
stored in the instance
"""
_external_signaller = None
_name = None
_concurrent_handlers = False
FLAGS = SignalOptions
"""All the available handlers sort modes. See `~.utils.SignalOptions`.
"""
def __init__(self, *flags, fconnect=None, fdisconnect=None,
fnotify=None, fvalidation=None, name=None,
loop=None, external=None, **additional_params):
self.name = name
self.subscribers = MethodAwareWeakList()
"""A weak list containing the connected handlers"""
self.loop = loop or asyncio.get_event_loop()
self.instance_subscribers = weakref.WeakKeyDictionary()
self.external_signaller = external
self._fnotify = fnotify
self._fconnect = fconnect
self._fdisconnect = fdisconnect
self._set_fvalidation(fvalidation)
self._iproxies = weakref.WeakKeyDictionary()
if not all(isinstance(f, SignalOptions) for f in flags):
raise ValueError("``flags`` elements must be instances of "
"`SignalOptions")
if (SignalOptions.SORT_BOTTOMUP in flags and
SignalOptions.SORT_TOPDOWN in flags):
raise ValueError("Both sort modes specified in the flags")
elif not (SignalOptions.SORT_BOTTOMUP in flags or
SignalOptions.SORT_TOPDOWN in flags):
flags = flags + (SignalOptions.SORT_BOTTOMUP,)
self.flags = flags
self.additional_params = additional_params
"""additional parameter passed at construction time"""
def __get__(self, instance, owner):
if instance is None:
# just a silly trick to get some better autodoc docs
if (self._fvalidation is not None and
'sphinx.ext.autodoc' in sys.modules):
result = self._fvalidation
else:
result = self
else:
if instance not in self._iproxies:
self._iproxies[instance] = InstanceProxy(self, instance)
result = self._iproxies[instance]
return result
def __repr__(self):
return ("<%s with name %r, len: %d>" % (
self.__class__.__name__, self.name, len(self.subscribers)
))
def _connect(self, subscribers, cback):
if cback not in subscribers:
subscribers.append(cback)
def _disconnect(self, subscribers, cback):
if cback in subscribers:
subscribers.remove(cback)
def _find_indent(self, doct):
lines = doct.splitlines()
for l in lines:
match = re.match('^[ ]+', l)
if match is not None:
return len(match.group(0))
return 0
def _loop_from_instance(self, instance):
if instance is None:
loop = self.loop
else:
loop = self.__get__(instance).loop
return loop
def _notify_one(self, instance, cback, *args, **kwargs):
loop = self._loop_from_instance(instance)
return self.prepare_notification(
subscribers=(cback,), instance=instance,
loop=loop).run(*args, **kwargs)
def _set_fvalidation(self, value):
self._fvalidation = value
if value is not None:
if value.__doc__ is None:
doc = ''
indent = 0
else:
doc = value.__doc__
indent = self._find_indent(doc)
sig_doc = textwrap.indent(SIGN_DOC_TEMPLATE, ' ' * indent)
value.__doc__ = self.__doc__ = doc + sig_doc
def connect(self, cback, subscribers=None, instance=None):
"""Add a function or a method as an handler of this signal.
Any handler added can be a coroutine.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper
"""
if subscribers is None:
subscribers = self.subscribers
# wrapper
if self._fconnect is not None:
def _connect(cback):
self._connect(subscribers, cback)
notify = partial(self._notify_one, instance)
if instance is not None:
result = self._fconnect(instance, cback, subscribers,
_connect, notify)
else:
result = self._fconnect(cback, subscribers, _connect, notify)
if inspect.isawaitable(result):
result = pull_result(result)
else:
self._connect(subscribers, cback)
result = None
return result
def clear(self):
"""Remove all the connected handlers"""
self.subscribers.clear()
def disconnect(self, cback, subscribers=None, instance=None):
"""Remove a previously added function or method from the set of the
signal's handlers.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper
"""
if subscribers is None:
subscribers = self.subscribers
# wrapper
if self._fdisconnect is not None:
def _disconnect(cback):
self._disconnect(subscribers, cback)
notify = partial(self._notify_one, instance)
if instance is not None:
result = self._fdisconnect(instance, cback, subscribers,
_disconnect, notify)
else:
result = self._fdisconnect(cback, subscribers, _disconnect,
notify)
if inspect.isawaitable(result):
result = pull_result(result)
else:
self._disconnect(subscribers, cback)
result = None
return result
def ext_publish(self, instance, loop, *args, **kwargs):
"""If 'external_signaller' is defined, calls it's publish method to
notify external event systems.
This is for internal usage only, but it's doumented because it's part
of the interface with external notification systems.
"""
if self.external_signaller is not None:
# Assumes that the loop is managed by the external handler
return self.external_signaller.publish_signal(self, instance, loop,
args, kwargs)
@property
def external_signaller(self):
"""The registered `~.external.ExternalSignaller`:class:."""
return self._external_signaller
@external_signaller.setter
def external_signaller(self, value):
if value is not None:
assert isinstance(value, ExternalSignaller)
self._external_signaller = value
if self._name and value:
value.register_signal(self, self._name)
@property
def name(self):
"""The *name* of the signal used in conjunction with external
notification systems."""
return self._name
@name.setter
def name(self, value):
self._name = value
if value is not None and self._external_signaller:
self._external_signaller.register_signal(self, value)
def notify(self, *args, **kwargs):
"""Call all the registered handlers with the arguments passed.
:returns: an instance of `~.utils.MultipleResults`:class: or the
result of the execution of the corresponding wrapper function
"""
return self.prepare_notification().run(*args, **kwargs)
__call__ = notify
def on_connect(self, fconnect):
"""On connect optional wrapper decorator.
:param fconnect: the callable to install as `connect`:meth: wrapper
:returns: the signal
"""
self._fconnect = fconnect
return self
def on_disconnect(self, fdisconnect):
"""On disconnect optional wrapper decorator.
:param fdisconnect: the callable to install as `disconnect`:meth:
wrapper
:returns: the signal
"""
self._fdisconnect = fdisconnect
return self
def on_notify(self, fnotify):
"""On notify optional wrapper decorator.
:param fnotify: the callable to install as `notify`:meth: wrapper
:returns: the signal
"""
self._fnotify = fnotify
return self
|
metapensiero/metapensiero.signal | src/metapensiero/signal/user.py | SignalNameHandlerDecorator.is_handler | python | def is_handler(cls, name, value):
signal_name = False
config = None
if callable(value) and hasattr(value, SPEC_CONTAINER_MEMBER_NAME):
spec = getattr(value, SPEC_CONTAINER_MEMBER_NAME)
if spec['kind'] == 'handler':
signal_name = spec['name']
config = spec['config']
return signal_name, config | Detect an handler and return its wanted signal name. | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/user.py#L36-L45 | null | class SignalNameHandlerDecorator(object):
"A decorator used to mark a method as handler for a particular signal."
def __init__(self, signal_name, **config):
self.signal_name = signal_name
self.config = config
def __call__(self, method):
setattr(method, SPEC_CONTAINER_MEMBER_NAME,
{'kind': 'handler', 'name': self.signal_name,
'config': self.config})
return method
@classmethod
|
metapensiero/metapensiero.signal | src/metapensiero/signal/user.py | InheritanceToolsMeta._build_inheritance_chain | python | def _build_inheritance_chain(cls, bases, *names, merge=False):
result = []
for name in names:
maps = []
for base in bases:
bmap = getattr(base, name, None)
if bmap is not None:
assert isinstance(bmap, (dict, ChainMap))
if len(bmap):
if isinstance(bmap, ChainMap):
maps.extend(bmap.maps)
else:
maps.append(bmap)
result.append(ChainMap({}, *maps))
if merge:
result = [dict(map) for map in result]
if len(names) == 1:
return result[0]
return result | For all of the names build a ChainMap containing a map for every
base class. | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/user.py#L55-L75 | null | class InheritanceToolsMeta(ABCMeta):
"""A reusable metaclass with method to deal with constructing data from
elements contained in one class body and in its bases."""
|
metapensiero/metapensiero.signal | src/metapensiero/signal/user.py | SignalAndHandlerInitMeta._build_instance_handler_mapping | python | def _build_instance_handler_mapping(cls, instance, handle_d):
res = {}
for member_name, sig_name in handle_d.items():
if sig_name in res:
sig_handlers = res[sig_name]
else:
sig_handlers = res[sig_name] = []
sig_handlers.append(getattr(instance, member_name))
return res | For every unbound handler, get the bound version. | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/user.py#L134-L143 | null | class SignalAndHandlerInitMeta(InheritanceToolsMeta):
"""A metaclass for registering signals and handlers."""
_is_handler = SignalNameHandlerDecorator.is_handler
_external_signaller_and_handler = None
"""Optional :class:`~.atom.ExternalSignaller` instance that connects to
external event systems.
"""
_signals = None
"""Container for signal definitions."""
_signal_handlers = None
"""Container for handlers definitions."""
_signal_handlers_sorted = None
"""Contains a Dict[signal_name, handlers] with sorted handlers."""
_signal_handlers_configs = None
"""Container for additional handler config."""
_registered_classes = WeakSet()
"""Store a weak ref of the classes already managed."""
def __init__(cls, name, bases, namespace):
if cls not in cls._registered_classes:
cls._register_class(bases, namespace)
cls._registered_classes.add(cls)
super().__init__(name, bases, namespace)
def _register_class(cls, bases, namespace):
# collect signals and handlers from the bases, overwriting them from
# right to left
signaller = cls._external_signaller_and_handler
signals, handlers, configs = cls._build_inheritance_chain(
bases, '_signals', '_signal_handlers', '_signal_handlers_configs')
cls._find_local_signals(signals, namespace)
cls._find_local_handlers(handlers, namespace, configs)
cls._signal_handlers_sorted = cls._sort_handlers(
signals, handlers, configs)
configs = dict(configs)
if signaller is not None:
try:
signaller.register_class(
cls, bases, namespace, signals, handlers)
except Exception as cause:
new = SignalError(("Error while registering class "
"{cls!r}").format(cls=cls))
raise new from cause
cls._check_local_handlers(signals, handlers, namespace, configs)
cls._signals = signals
cls._signal_handlers = handlers
cls._signal_handlers_configs = configs
def _check_local_handlers(cls, signals, handlers, namespace, configs):
"""For every marked handler, see if there is a suitable signal. If
not, raise an error."""
for aname, sig_name in handlers.items():
# WARN: this code doesn't take in account the case where a new
# method with the same name of an handler in a base class is
# present in this class but it isn't an handler (so the handler
# with the same name should be removed from the handlers)
if sig_name not in signals:
disable_check = configs[aname].get('disable_check', False)
if not disable_check:
raise SignalError("Cannot find a signal named '%s'"
% sig_name)
def _find_local_signals(cls, signals, namespace):
"""Add name info to every "local" (present in the body of this class)
signal and add it to the mapping. Also complete signal
initialization as member of the class by injecting its name.
"""
from . import Signal
signaller = cls._external_signaller_and_handler
for aname, avalue in namespace.items():
if isinstance(avalue, Signal):
if avalue.name:
aname = avalue.name
else:
avalue.name = aname
assert ((aname not in signals) or
(aname in signals and avalue is not signals[aname])), \
("The same signal {name!r} was found "
"two times".format(name=aname))
if signaller:
avalue.external_signaller = signaller
signals[aname] = avalue
def _find_local_handlers(cls, handlers, namespace, configs):
"""Add name info to every "local" (present in the body of this class)
handler and add it to the mapping.
"""
for aname, avalue in namespace.items():
sig_name, config = cls._is_handler(aname, avalue)
if sig_name:
configs[aname] = config
handlers[aname] = sig_name
def _get_class_handlers(cls, signal_name, instance):
"""Returns the handlers registered at class level.
"""
handlers = cls._signal_handlers_sorted[signal_name]
return [getattr(instance, hname) for hname in handlers]
def _sort_handlers(cls, signals, handlers, configs):
"""Sort class defined handlers to give precedence to those declared at
lower level. ``config`` can contain two keys ``begin`` or ``end`` that
will further reposition the handler at the two extremes.
"""
def macro_precedence_sorter(flags, hname):
"""The default is to sort 'bottom_up', with lower level getting
executed first, but sometimes you need them reversed."""
data = configs[hname]
topdown_sort = SignalOptions.SORT_TOPDOWN in flags
if topdown_sort:
level = levels_count - 1 - data['level']
else:
level = data['level']
if 'begin' in data:
return (-1, level, hname)
elif 'end' in data:
return (1, level, hname)
else:
return (0, level, hname)
levels_count = len(handlers.maps)
per_signal = defaultdict(list)
for level, m in enumerate(reversed(handlers.maps)):
for hname, sig_name in m.items():
sig_handlers = per_signal[sig_name]
if hname not in sig_handlers:
configs[hname]['level'] = level
sig_handlers.append(hname)
for sig_name, sig_handlers in per_signal.items():
if sig_name in signals: # it may be on a mixin
flags = signals[sig_name].flags
sig_handlers.sort(key=partial(macro_precedence_sorter,
flags))
return per_signal
def instance_signals_and_handlers(cls, instance):
"""Calculate per-instance signals and handlers."""
isignals = cls._signals.copy()
ihandlers = cls._build_instance_handler_mapping(
instance,
cls._signal_handlers
)
return isignals, ihandlers
@classmethod
def with_external(mclass, external, name=None):
assert isinstance(external, ExternalSignallerAndHandler)
name = name or "ExternalSignalAndHandlerInitMeta"
return type(name, (mclass,),
{'_external_signaller_and_handler': external})
|
metapensiero/metapensiero.signal | src/metapensiero/signal/user.py | SignalAndHandlerInitMeta._check_local_handlers | python | def _check_local_handlers(cls, signals, handlers, namespace, configs):
for aname, sig_name in handlers.items():
# WARN: this code doesn't take in account the case where a new
# method with the same name of an handler in a base class is
# present in this class but it isn't an handler (so the handler
# with the same name should be removed from the handlers)
if sig_name not in signals:
disable_check = configs[aname].get('disable_check', False)
if not disable_check:
raise SignalError("Cannot find a signal named '%s'"
% sig_name) | For every marked handler, see if there is a suitable signal. If
not, raise an error. | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/user.py#L145-L157 | null | class SignalAndHandlerInitMeta(InheritanceToolsMeta):
"""A metaclass for registering signals and handlers."""
_is_handler = SignalNameHandlerDecorator.is_handler
_external_signaller_and_handler = None
"""Optional :class:`~.atom.ExternalSignaller` instance that connects to
external event systems.
"""
_signals = None
"""Container for signal definitions."""
_signal_handlers = None
"""Container for handlers definitions."""
_signal_handlers_sorted = None
"""Contains a Dict[signal_name, handlers] with sorted handlers."""
_signal_handlers_configs = None
"""Container for additional handler config."""
_registered_classes = WeakSet()
"""Store a weak ref of the classes already managed."""
def __init__(cls, name, bases, namespace):
if cls not in cls._registered_classes:
cls._register_class(bases, namespace)
cls._registered_classes.add(cls)
super().__init__(name, bases, namespace)
def _register_class(cls, bases, namespace):
# collect signals and handlers from the bases, overwriting them from
# right to left
signaller = cls._external_signaller_and_handler
signals, handlers, configs = cls._build_inheritance_chain(
bases, '_signals', '_signal_handlers', '_signal_handlers_configs')
cls._find_local_signals(signals, namespace)
cls._find_local_handlers(handlers, namespace, configs)
cls._signal_handlers_sorted = cls._sort_handlers(
signals, handlers, configs)
configs = dict(configs)
if signaller is not None:
try:
signaller.register_class(
cls, bases, namespace, signals, handlers)
except Exception as cause:
new = SignalError(("Error while registering class "
"{cls!r}").format(cls=cls))
raise new from cause
cls._check_local_handlers(signals, handlers, namespace, configs)
cls._signals = signals
cls._signal_handlers = handlers
cls._signal_handlers_configs = configs
def _build_instance_handler_mapping(cls, instance, handle_d):
"""For every unbound handler, get the bound version."""
res = {}
for member_name, sig_name in handle_d.items():
if sig_name in res:
sig_handlers = res[sig_name]
else:
sig_handlers = res[sig_name] = []
sig_handlers.append(getattr(instance, member_name))
return res
def _find_local_signals(cls, signals, namespace):
"""Add name info to every "local" (present in the body of this class)
signal and add it to the mapping. Also complete signal
initialization as member of the class by injecting its name.
"""
from . import Signal
signaller = cls._external_signaller_and_handler
for aname, avalue in namespace.items():
if isinstance(avalue, Signal):
if avalue.name:
aname = avalue.name
else:
avalue.name = aname
assert ((aname not in signals) or
(aname in signals and avalue is not signals[aname])), \
("The same signal {name!r} was found "
"two times".format(name=aname))
if signaller:
avalue.external_signaller = signaller
signals[aname] = avalue
def _find_local_handlers(cls, handlers, namespace, configs):
"""Add name info to every "local" (present in the body of this class)
handler and add it to the mapping.
"""
for aname, avalue in namespace.items():
sig_name, config = cls._is_handler(aname, avalue)
if sig_name:
configs[aname] = config
handlers[aname] = sig_name
def _get_class_handlers(cls, signal_name, instance):
"""Returns the handlers registered at class level.
"""
handlers = cls._signal_handlers_sorted[signal_name]
return [getattr(instance, hname) for hname in handlers]
def _sort_handlers(cls, signals, handlers, configs):
"""Sort class defined handlers to give precedence to those declared at
lower level. ``config`` can contain two keys ``begin`` or ``end`` that
will further reposition the handler at the two extremes.
"""
def macro_precedence_sorter(flags, hname):
"""The default is to sort 'bottom_up', with lower level getting
executed first, but sometimes you need them reversed."""
data = configs[hname]
topdown_sort = SignalOptions.SORT_TOPDOWN in flags
if topdown_sort:
level = levels_count - 1 - data['level']
else:
level = data['level']
if 'begin' in data:
return (-1, level, hname)
elif 'end' in data:
return (1, level, hname)
else:
return (0, level, hname)
levels_count = len(handlers.maps)
per_signal = defaultdict(list)
for level, m in enumerate(reversed(handlers.maps)):
for hname, sig_name in m.items():
sig_handlers = per_signal[sig_name]
if hname not in sig_handlers:
configs[hname]['level'] = level
sig_handlers.append(hname)
for sig_name, sig_handlers in per_signal.items():
if sig_name in signals: # it may be on a mixin
flags = signals[sig_name].flags
sig_handlers.sort(key=partial(macro_precedence_sorter,
flags))
return per_signal
def instance_signals_and_handlers(cls, instance):
"""Calculate per-instance signals and handlers."""
isignals = cls._signals.copy()
ihandlers = cls._build_instance_handler_mapping(
instance,
cls._signal_handlers
)
return isignals, ihandlers
@classmethod
def with_external(mclass, external, name=None):
assert isinstance(external, ExternalSignallerAndHandler)
name = name or "ExternalSignalAndHandlerInitMeta"
return type(name, (mclass,),
{'_external_signaller_and_handler': external})
|
metapensiero/metapensiero.signal | src/metapensiero/signal/user.py | SignalAndHandlerInitMeta._find_local_signals | python | def _find_local_signals(cls, signals, namespace):
from . import Signal
signaller = cls._external_signaller_and_handler
for aname, avalue in namespace.items():
if isinstance(avalue, Signal):
if avalue.name:
aname = avalue.name
else:
avalue.name = aname
assert ((aname not in signals) or
(aname in signals and avalue is not signals[aname])), \
("The same signal {name!r} was found "
"two times".format(name=aname))
if signaller:
avalue.external_signaller = signaller
signals[aname] = avalue | Add name info to every "local" (present in the body of this class)
signal and add it to the mapping. Also complete signal
initialization as member of the class by injecting its name. | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/user.py#L159-L178 | null | class SignalAndHandlerInitMeta(InheritanceToolsMeta):
"""A metaclass for registering signals and handlers."""
_is_handler = SignalNameHandlerDecorator.is_handler
_external_signaller_and_handler = None
"""Optional :class:`~.atom.ExternalSignaller` instance that connects to
external event systems.
"""
_signals = None
"""Container for signal definitions."""
_signal_handlers = None
"""Container for handlers definitions."""
_signal_handlers_sorted = None
"""Contains a Dict[signal_name, handlers] with sorted handlers."""
_signal_handlers_configs = None
"""Container for additional handler config."""
_registered_classes = WeakSet()
"""Store a weak ref of the classes already managed."""
def __init__(cls, name, bases, namespace):
if cls not in cls._registered_classes:
cls._register_class(bases, namespace)
cls._registered_classes.add(cls)
super().__init__(name, bases, namespace)
def _register_class(cls, bases, namespace):
# collect signals and handlers from the bases, overwriting them from
# right to left
signaller = cls._external_signaller_and_handler
signals, handlers, configs = cls._build_inheritance_chain(
bases, '_signals', '_signal_handlers', '_signal_handlers_configs')
cls._find_local_signals(signals, namespace)
cls._find_local_handlers(handlers, namespace, configs)
cls._signal_handlers_sorted = cls._sort_handlers(
signals, handlers, configs)
configs = dict(configs)
if signaller is not None:
try:
signaller.register_class(
cls, bases, namespace, signals, handlers)
except Exception as cause:
new = SignalError(("Error while registering class "
"{cls!r}").format(cls=cls))
raise new from cause
cls._check_local_handlers(signals, handlers, namespace, configs)
cls._signals = signals
cls._signal_handlers = handlers
cls._signal_handlers_configs = configs
def _build_instance_handler_mapping(cls, instance, handle_d):
"""For every unbound handler, get the bound version."""
res = {}
for member_name, sig_name in handle_d.items():
if sig_name in res:
sig_handlers = res[sig_name]
else:
sig_handlers = res[sig_name] = []
sig_handlers.append(getattr(instance, member_name))
return res
def _check_local_handlers(cls, signals, handlers, namespace, configs):
"""For every marked handler, see if there is a suitable signal. If
not, raise an error."""
for aname, sig_name in handlers.items():
# WARN: this code doesn't take in account the case where a new
# method with the same name of an handler in a base class is
# present in this class but it isn't an handler (so the handler
# with the same name should be removed from the handlers)
if sig_name not in signals:
disable_check = configs[aname].get('disable_check', False)
if not disable_check:
raise SignalError("Cannot find a signal named '%s'"
% sig_name)
def _find_local_handlers(cls, handlers, namespace, configs):
"""Add name info to every "local" (present in the body of this class)
handler and add it to the mapping.
"""
for aname, avalue in namespace.items():
sig_name, config = cls._is_handler(aname, avalue)
if sig_name:
configs[aname] = config
handlers[aname] = sig_name
def _get_class_handlers(cls, signal_name, instance):
"""Returns the handlers registered at class level.
"""
handlers = cls._signal_handlers_sorted[signal_name]
return [getattr(instance, hname) for hname in handlers]
def _sort_handlers(cls, signals, handlers, configs):
"""Sort class defined handlers to give precedence to those declared at
lower level. ``config`` can contain two keys ``begin`` or ``end`` that
will further reposition the handler at the two extremes.
"""
def macro_precedence_sorter(flags, hname):
"""The default is to sort 'bottom_up', with lower level getting
executed first, but sometimes you need them reversed."""
data = configs[hname]
topdown_sort = SignalOptions.SORT_TOPDOWN in flags
if topdown_sort:
level = levels_count - 1 - data['level']
else:
level = data['level']
if 'begin' in data:
return (-1, level, hname)
elif 'end' in data:
return (1, level, hname)
else:
return (0, level, hname)
levels_count = len(handlers.maps)
per_signal = defaultdict(list)
for level, m in enumerate(reversed(handlers.maps)):
for hname, sig_name in m.items():
sig_handlers = per_signal[sig_name]
if hname not in sig_handlers:
configs[hname]['level'] = level
sig_handlers.append(hname)
for sig_name, sig_handlers in per_signal.items():
if sig_name in signals: # it may be on a mixin
flags = signals[sig_name].flags
sig_handlers.sort(key=partial(macro_precedence_sorter,
flags))
return per_signal
def instance_signals_and_handlers(cls, instance):
"""Calculate per-instance signals and handlers."""
isignals = cls._signals.copy()
ihandlers = cls._build_instance_handler_mapping(
instance,
cls._signal_handlers
)
return isignals, ihandlers
@classmethod
def with_external(mclass, external, name=None):
assert isinstance(external, ExternalSignallerAndHandler)
name = name or "ExternalSignalAndHandlerInitMeta"
return type(name, (mclass,),
{'_external_signaller_and_handler': external})
|
metapensiero/metapensiero.signal | src/metapensiero/signal/user.py | SignalAndHandlerInitMeta._find_local_handlers | python | def _find_local_handlers(cls, handlers, namespace, configs):
for aname, avalue in namespace.items():
sig_name, config = cls._is_handler(aname, avalue)
if sig_name:
configs[aname] = config
handlers[aname] = sig_name | Add name info to every "local" (present in the body of this class)
handler and add it to the mapping. | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/user.py#L180-L188 | null | class SignalAndHandlerInitMeta(InheritanceToolsMeta):
"""A metaclass for registering signals and handlers."""
_is_handler = SignalNameHandlerDecorator.is_handler
_external_signaller_and_handler = None
"""Optional :class:`~.atom.ExternalSignaller` instance that connects to
external event systems.
"""
_signals = None
"""Container for signal definitions."""
_signal_handlers = None
"""Container for handlers definitions."""
_signal_handlers_sorted = None
"""Contains a Dict[signal_name, handlers] with sorted handlers."""
_signal_handlers_configs = None
"""Container for additional handler config."""
_registered_classes = WeakSet()
"""Store a weak ref of the classes already managed."""
def __init__(cls, name, bases, namespace):
if cls not in cls._registered_classes:
cls._register_class(bases, namespace)
cls._registered_classes.add(cls)
super().__init__(name, bases, namespace)
def _register_class(cls, bases, namespace):
# collect signals and handlers from the bases, overwriting them from
# right to left
signaller = cls._external_signaller_and_handler
signals, handlers, configs = cls._build_inheritance_chain(
bases, '_signals', '_signal_handlers', '_signal_handlers_configs')
cls._find_local_signals(signals, namespace)
cls._find_local_handlers(handlers, namespace, configs)
cls._signal_handlers_sorted = cls._sort_handlers(
signals, handlers, configs)
configs = dict(configs)
if signaller is not None:
try:
signaller.register_class(
cls, bases, namespace, signals, handlers)
except Exception as cause:
new = SignalError(("Error while registering class "
"{cls!r}").format(cls=cls))
raise new from cause
cls._check_local_handlers(signals, handlers, namespace, configs)
cls._signals = signals
cls._signal_handlers = handlers
cls._signal_handlers_configs = configs
def _build_instance_handler_mapping(cls, instance, handle_d):
"""For every unbound handler, get the bound version."""
res = {}
for member_name, sig_name in handle_d.items():
if sig_name in res:
sig_handlers = res[sig_name]
else:
sig_handlers = res[sig_name] = []
sig_handlers.append(getattr(instance, member_name))
return res
def _check_local_handlers(cls, signals, handlers, namespace, configs):
"""For every marked handler, see if there is a suitable signal. If
not, raise an error."""
for aname, sig_name in handlers.items():
# WARN: this code doesn't take in account the case where a new
# method with the same name of an handler in a base class is
# present in this class but it isn't an handler (so the handler
# with the same name should be removed from the handlers)
if sig_name not in signals:
disable_check = configs[aname].get('disable_check', False)
if not disable_check:
raise SignalError("Cannot find a signal named '%s'"
% sig_name)
def _find_local_signals(cls, signals, namespace):
"""Add name info to every "local" (present in the body of this class)
signal and add it to the mapping. Also complete signal
initialization as member of the class by injecting its name.
"""
from . import Signal
signaller = cls._external_signaller_and_handler
for aname, avalue in namespace.items():
if isinstance(avalue, Signal):
if avalue.name:
aname = avalue.name
else:
avalue.name = aname
assert ((aname not in signals) or
(aname in signals and avalue is not signals[aname])), \
("The same signal {name!r} was found "
"two times".format(name=aname))
if signaller:
avalue.external_signaller = signaller
signals[aname] = avalue
def _get_class_handlers(cls, signal_name, instance):
"""Returns the handlers registered at class level.
"""
handlers = cls._signal_handlers_sorted[signal_name]
return [getattr(instance, hname) for hname in handlers]
def _sort_handlers(cls, signals, handlers, configs):
"""Sort class defined handlers to give precedence to those declared at
lower level. ``config`` can contain two keys ``begin`` or ``end`` that
will further reposition the handler at the two extremes.
"""
def macro_precedence_sorter(flags, hname):
"""The default is to sort 'bottom_up', with lower level getting
executed first, but sometimes you need them reversed."""
data = configs[hname]
topdown_sort = SignalOptions.SORT_TOPDOWN in flags
if topdown_sort:
level = levels_count - 1 - data['level']
else:
level = data['level']
if 'begin' in data:
return (-1, level, hname)
elif 'end' in data:
return (1, level, hname)
else:
return (0, level, hname)
levels_count = len(handlers.maps)
per_signal = defaultdict(list)
for level, m in enumerate(reversed(handlers.maps)):
for hname, sig_name in m.items():
sig_handlers = per_signal[sig_name]
if hname not in sig_handlers:
configs[hname]['level'] = level
sig_handlers.append(hname)
for sig_name, sig_handlers in per_signal.items():
if sig_name in signals: # it may be on a mixin
flags = signals[sig_name].flags
sig_handlers.sort(key=partial(macro_precedence_sorter,
flags))
return per_signal
def instance_signals_and_handlers(cls, instance):
"""Calculate per-instance signals and handlers."""
isignals = cls._signals.copy()
ihandlers = cls._build_instance_handler_mapping(
instance,
cls._signal_handlers
)
return isignals, ihandlers
@classmethod
def with_external(mclass, external, name=None):
assert isinstance(external, ExternalSignallerAndHandler)
name = name or "ExternalSignalAndHandlerInitMeta"
return type(name, (mclass,),
{'_external_signaller_and_handler': external})
|
metapensiero/metapensiero.signal | src/metapensiero/signal/user.py | SignalAndHandlerInitMeta._get_class_handlers | python | def _get_class_handlers(cls, signal_name, instance):
handlers = cls._signal_handlers_sorted[signal_name]
return [getattr(instance, hname) for hname in handlers] | Returns the handlers registered at class level. | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/user.py#L190-L194 | null | class SignalAndHandlerInitMeta(InheritanceToolsMeta):
"""A metaclass for registering signals and handlers."""
_is_handler = SignalNameHandlerDecorator.is_handler
_external_signaller_and_handler = None
"""Optional :class:`~.atom.ExternalSignaller` instance that connects to
external event systems.
"""
_signals = None
"""Container for signal definitions."""
_signal_handlers = None
"""Container for handlers definitions."""
_signal_handlers_sorted = None
"""Contains a Dict[signal_name, handlers] with sorted handlers."""
_signal_handlers_configs = None
"""Container for additional handler config."""
_registered_classes = WeakSet()
"""Store a weak ref of the classes already managed."""
def __init__(cls, name, bases, namespace):
if cls not in cls._registered_classes:
cls._register_class(bases, namespace)
cls._registered_classes.add(cls)
super().__init__(name, bases, namespace)
def _register_class(cls, bases, namespace):
# collect signals and handlers from the bases, overwriting them from
# right to left
signaller = cls._external_signaller_and_handler
signals, handlers, configs = cls._build_inheritance_chain(
bases, '_signals', '_signal_handlers', '_signal_handlers_configs')
cls._find_local_signals(signals, namespace)
cls._find_local_handlers(handlers, namespace, configs)
cls._signal_handlers_sorted = cls._sort_handlers(
signals, handlers, configs)
configs = dict(configs)
if signaller is not None:
try:
signaller.register_class(
cls, bases, namespace, signals, handlers)
except Exception as cause:
new = SignalError(("Error while registering class "
"{cls!r}").format(cls=cls))
raise new from cause
cls._check_local_handlers(signals, handlers, namespace, configs)
cls._signals = signals
cls._signal_handlers = handlers
cls._signal_handlers_configs = configs
def _build_instance_handler_mapping(cls, instance, handle_d):
"""For every unbound handler, get the bound version."""
res = {}
for member_name, sig_name in handle_d.items():
if sig_name in res:
sig_handlers = res[sig_name]
else:
sig_handlers = res[sig_name] = []
sig_handlers.append(getattr(instance, member_name))
return res
def _check_local_handlers(cls, signals, handlers, namespace, configs):
"""For every marked handler, see if there is a suitable signal. If
not, raise an error."""
for aname, sig_name in handlers.items():
# WARN: this code doesn't take in account the case where a new
# method with the same name of an handler in a base class is
# present in this class but it isn't an handler (so the handler
# with the same name should be removed from the handlers)
if sig_name not in signals:
disable_check = configs[aname].get('disable_check', False)
if not disable_check:
raise SignalError("Cannot find a signal named '%s'"
% sig_name)
def _find_local_signals(cls, signals, namespace):
"""Add name info to every "local" (present in the body of this class)
signal and add it to the mapping. Also complete signal
initialization as member of the class by injecting its name.
"""
from . import Signal
signaller = cls._external_signaller_and_handler
for aname, avalue in namespace.items():
if isinstance(avalue, Signal):
if avalue.name:
aname = avalue.name
else:
avalue.name = aname
assert ((aname not in signals) or
(aname in signals and avalue is not signals[aname])), \
("The same signal {name!r} was found "
"two times".format(name=aname))
if signaller:
avalue.external_signaller = signaller
signals[aname] = avalue
def _find_local_handlers(cls, handlers, namespace, configs):
"""Add name info to every "local" (present in the body of this class)
handler and add it to the mapping.
"""
for aname, avalue in namespace.items():
sig_name, config = cls._is_handler(aname, avalue)
if sig_name:
configs[aname] = config
handlers[aname] = sig_name
def _sort_handlers(cls, signals, handlers, configs):
"""Sort class defined handlers to give precedence to those declared at
lower level. ``config`` can contain two keys ``begin`` or ``end`` that
will further reposition the handler at the two extremes.
"""
def macro_precedence_sorter(flags, hname):
"""The default is to sort 'bottom_up', with lower level getting
executed first, but sometimes you need them reversed."""
data = configs[hname]
topdown_sort = SignalOptions.SORT_TOPDOWN in flags
if topdown_sort:
level = levels_count - 1 - data['level']
else:
level = data['level']
if 'begin' in data:
return (-1, level, hname)
elif 'end' in data:
return (1, level, hname)
else:
return (0, level, hname)
levels_count = len(handlers.maps)
per_signal = defaultdict(list)
for level, m in enumerate(reversed(handlers.maps)):
for hname, sig_name in m.items():
sig_handlers = per_signal[sig_name]
if hname not in sig_handlers:
configs[hname]['level'] = level
sig_handlers.append(hname)
for sig_name, sig_handlers in per_signal.items():
if sig_name in signals: # it may be on a mixin
flags = signals[sig_name].flags
sig_handlers.sort(key=partial(macro_precedence_sorter,
flags))
return per_signal
def instance_signals_and_handlers(cls, instance):
"""Calculate per-instance signals and handlers."""
isignals = cls._signals.copy()
ihandlers = cls._build_instance_handler_mapping(
instance,
cls._signal_handlers
)
return isignals, ihandlers
@classmethod
def with_external(mclass, external, name=None):
assert isinstance(external, ExternalSignallerAndHandler)
name = name or "ExternalSignalAndHandlerInitMeta"
return type(name, (mclass,),
{'_external_signaller_and_handler': external})
|
metapensiero/metapensiero.signal | src/metapensiero/signal/user.py | SignalAndHandlerInitMeta._sort_handlers | python | def _sort_handlers(cls, signals, handlers, configs):
def macro_precedence_sorter(flags, hname):
"""The default is to sort 'bottom_up', with lower level getting
executed first, but sometimes you need them reversed."""
data = configs[hname]
topdown_sort = SignalOptions.SORT_TOPDOWN in flags
if topdown_sort:
level = levels_count - 1 - data['level']
else:
level = data['level']
if 'begin' in data:
return (-1, level, hname)
elif 'end' in data:
return (1, level, hname)
else:
return (0, level, hname)
levels_count = len(handlers.maps)
per_signal = defaultdict(list)
for level, m in enumerate(reversed(handlers.maps)):
for hname, sig_name in m.items():
sig_handlers = per_signal[sig_name]
if hname not in sig_handlers:
configs[hname]['level'] = level
sig_handlers.append(hname)
for sig_name, sig_handlers in per_signal.items():
if sig_name in signals: # it may be on a mixin
flags = signals[sig_name].flags
sig_handlers.sort(key=partial(macro_precedence_sorter,
flags))
return per_signal | Sort class defined handlers to give precedence to those declared at
lower level. ``config`` can contain two keys ``begin`` or ``end`` that
will further reposition the handler at the two extremes. | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/user.py#L196-L230 | null | class SignalAndHandlerInitMeta(InheritanceToolsMeta):
"""A metaclass for registering signals and handlers."""
_is_handler = SignalNameHandlerDecorator.is_handler
_external_signaller_and_handler = None
"""Optional :class:`~.atom.ExternalSignaller` instance that connects to
external event systems.
"""
_signals = None
"""Container for signal definitions."""
_signal_handlers = None
"""Container for handlers definitions."""
_signal_handlers_sorted = None
"""Contains a Dict[signal_name, handlers] with sorted handlers."""
_signal_handlers_configs = None
"""Container for additional handler config."""
_registered_classes = WeakSet()
"""Store a weak ref of the classes already managed."""
def __init__(cls, name, bases, namespace):
if cls not in cls._registered_classes:
cls._register_class(bases, namespace)
cls._registered_classes.add(cls)
super().__init__(name, bases, namespace)
def _register_class(cls, bases, namespace):
# collect signals and handlers from the bases, overwriting them from
# right to left
signaller = cls._external_signaller_and_handler
signals, handlers, configs = cls._build_inheritance_chain(
bases, '_signals', '_signal_handlers', '_signal_handlers_configs')
cls._find_local_signals(signals, namespace)
cls._find_local_handlers(handlers, namespace, configs)
cls._signal_handlers_sorted = cls._sort_handlers(
signals, handlers, configs)
configs = dict(configs)
if signaller is not None:
try:
signaller.register_class(
cls, bases, namespace, signals, handlers)
except Exception as cause:
new = SignalError(("Error while registering class "
"{cls!r}").format(cls=cls))
raise new from cause
cls._check_local_handlers(signals, handlers, namespace, configs)
cls._signals = signals
cls._signal_handlers = handlers
cls._signal_handlers_configs = configs
def _build_instance_handler_mapping(cls, instance, handle_d):
"""For every unbound handler, get the bound version."""
res = {}
for member_name, sig_name in handle_d.items():
if sig_name in res:
sig_handlers = res[sig_name]
else:
sig_handlers = res[sig_name] = []
sig_handlers.append(getattr(instance, member_name))
return res
def _check_local_handlers(cls, signals, handlers, namespace, configs):
"""For every marked handler, see if there is a suitable signal. If
not, raise an error."""
for aname, sig_name in handlers.items():
# WARN: this code doesn't take in account the case where a new
# method with the same name of an handler in a base class is
# present in this class but it isn't an handler (so the handler
# with the same name should be removed from the handlers)
if sig_name not in signals:
disable_check = configs[aname].get('disable_check', False)
if not disable_check:
raise SignalError("Cannot find a signal named '%s'"
% sig_name)
def _find_local_signals(cls, signals, namespace):
"""Add name info to every "local" (present in the body of this class)
signal and add it to the mapping. Also complete signal
initialization as member of the class by injecting its name.
"""
from . import Signal
signaller = cls._external_signaller_and_handler
for aname, avalue in namespace.items():
if isinstance(avalue, Signal):
if avalue.name:
aname = avalue.name
else:
avalue.name = aname
assert ((aname not in signals) or
(aname in signals and avalue is not signals[aname])), \
("The same signal {name!r} was found "
"two times".format(name=aname))
if signaller:
avalue.external_signaller = signaller
signals[aname] = avalue
def _find_local_handlers(cls, handlers, namespace, configs):
"""Add name info to every "local" (present in the body of this class)
handler and add it to the mapping.
"""
for aname, avalue in namespace.items():
sig_name, config = cls._is_handler(aname, avalue)
if sig_name:
configs[aname] = config
handlers[aname] = sig_name
def _get_class_handlers(cls, signal_name, instance):
"""Returns the handlers registered at class level.
"""
handlers = cls._signal_handlers_sorted[signal_name]
return [getattr(instance, hname) for hname in handlers]
def instance_signals_and_handlers(cls, instance):
"""Calculate per-instance signals and handlers."""
isignals = cls._signals.copy()
ihandlers = cls._build_instance_handler_mapping(
instance,
cls._signal_handlers
)
return isignals, ihandlers
@classmethod
def with_external(mclass, external, name=None):
assert isinstance(external, ExternalSignallerAndHandler)
name = name or "ExternalSignalAndHandlerInitMeta"
return type(name, (mclass,),
{'_external_signaller_and_handler': external})
|
metapensiero/metapensiero.signal | src/metapensiero/signal/user.py | SignalAndHandlerInitMeta.instance_signals_and_handlers | python | def instance_signals_and_handlers(cls, instance):
isignals = cls._signals.copy()
ihandlers = cls._build_instance_handler_mapping(
instance,
cls._signal_handlers
)
return isignals, ihandlers | Calculate per-instance signals and handlers. | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/user.py#L232-L240 | [
"def _build_instance_handler_mapping(cls, instance, handle_d):\n \"\"\"For every unbound handler, get the bound version.\"\"\"\n res = {}\n for member_name, sig_name in handle_d.items():\n if sig_name in res:\n sig_handlers = res[sig_name]\n else:\n sig_handlers = res[sig_name] = []\n sig_handlers.append(getattr(instance, member_name))\n return res\n"
] | class SignalAndHandlerInitMeta(InheritanceToolsMeta):
"""A metaclass for registering signals and handlers."""
_is_handler = SignalNameHandlerDecorator.is_handler
_external_signaller_and_handler = None
"""Optional :class:`~.atom.ExternalSignaller` instance that connects to
external event systems.
"""
_signals = None
"""Container for signal definitions."""
_signal_handlers = None
"""Container for handlers definitions."""
_signal_handlers_sorted = None
"""Contains a Dict[signal_name, handlers] with sorted handlers."""
_signal_handlers_configs = None
"""Container for additional handler config."""
_registered_classes = WeakSet()
"""Store a weak ref of the classes already managed."""
def __init__(cls, name, bases, namespace):
if cls not in cls._registered_classes:
cls._register_class(bases, namespace)
cls._registered_classes.add(cls)
super().__init__(name, bases, namespace)
def _register_class(cls, bases, namespace):
# collect signals and handlers from the bases, overwriting them from
# right to left
signaller = cls._external_signaller_and_handler
signals, handlers, configs = cls._build_inheritance_chain(
bases, '_signals', '_signal_handlers', '_signal_handlers_configs')
cls._find_local_signals(signals, namespace)
cls._find_local_handlers(handlers, namespace, configs)
cls._signal_handlers_sorted = cls._sort_handlers(
signals, handlers, configs)
configs = dict(configs)
if signaller is not None:
try:
signaller.register_class(
cls, bases, namespace, signals, handlers)
except Exception as cause:
new = SignalError(("Error while registering class "
"{cls!r}").format(cls=cls))
raise new from cause
cls._check_local_handlers(signals, handlers, namespace, configs)
cls._signals = signals
cls._signal_handlers = handlers
cls._signal_handlers_configs = configs
def _build_instance_handler_mapping(cls, instance, handle_d):
"""For every unbound handler, get the bound version."""
res = {}
for member_name, sig_name in handle_d.items():
if sig_name in res:
sig_handlers = res[sig_name]
else:
sig_handlers = res[sig_name] = []
sig_handlers.append(getattr(instance, member_name))
return res
def _check_local_handlers(cls, signals, handlers, namespace, configs):
"""For every marked handler, see if there is a suitable signal. If
not, raise an error."""
for aname, sig_name in handlers.items():
# WARN: this code doesn't take in account the case where a new
# method with the same name of an handler in a base class is
# present in this class but it isn't an handler (so the handler
# with the same name should be removed from the handlers)
if sig_name not in signals:
disable_check = configs[aname].get('disable_check', False)
if not disable_check:
raise SignalError("Cannot find a signal named '%s'"
% sig_name)
def _find_local_signals(cls, signals, namespace):
"""Add name info to every "local" (present in the body of this class)
signal and add it to the mapping. Also complete signal
initialization as member of the class by injecting its name.
"""
from . import Signal
signaller = cls._external_signaller_and_handler
for aname, avalue in namespace.items():
if isinstance(avalue, Signal):
if avalue.name:
aname = avalue.name
else:
avalue.name = aname
assert ((aname not in signals) or
(aname in signals and avalue is not signals[aname])), \
("The same signal {name!r} was found "
"two times".format(name=aname))
if signaller:
avalue.external_signaller = signaller
signals[aname] = avalue
def _find_local_handlers(cls, handlers, namespace, configs):
"""Add name info to every "local" (present in the body of this class)
handler and add it to the mapping.
"""
for aname, avalue in namespace.items():
sig_name, config = cls._is_handler(aname, avalue)
if sig_name:
configs[aname] = config
handlers[aname] = sig_name
def _get_class_handlers(cls, signal_name, instance):
"""Returns the handlers registered at class level.
"""
handlers = cls._signal_handlers_sorted[signal_name]
return [getattr(instance, hname) for hname in handlers]
def _sort_handlers(cls, signals, handlers, configs):
"""Sort class defined handlers to give precedence to those declared at
lower level. ``config`` can contain two keys ``begin`` or ``end`` that
will further reposition the handler at the two extremes.
"""
def macro_precedence_sorter(flags, hname):
"""The default is to sort 'bottom_up', with lower level getting
executed first, but sometimes you need them reversed."""
data = configs[hname]
topdown_sort = SignalOptions.SORT_TOPDOWN in flags
if topdown_sort:
level = levels_count - 1 - data['level']
else:
level = data['level']
if 'begin' in data:
return (-1, level, hname)
elif 'end' in data:
return (1, level, hname)
else:
return (0, level, hname)
levels_count = len(handlers.maps)
per_signal = defaultdict(list)
for level, m in enumerate(reversed(handlers.maps)):
for hname, sig_name in m.items():
sig_handlers = per_signal[sig_name]
if hname not in sig_handlers:
configs[hname]['level'] = level
sig_handlers.append(hname)
for sig_name, sig_handlers in per_signal.items():
if sig_name in signals: # it may be on a mixin
flags = signals[sig_name].flags
sig_handlers.sort(key=partial(macro_precedence_sorter,
flags))
return per_signal
@classmethod
def with_external(mclass, external, name=None):
assert isinstance(external, ExternalSignallerAndHandler)
name = name or "ExternalSignalAndHandlerInitMeta"
return type(name, (mclass,),
{'_external_signaller_and_handler': external})
|
metapensiero/metapensiero.signal | src/metapensiero/signal/utils.py | signal | python | def signal(*args, **kwargs):
from .core import Signal
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return Signal(fvalidation=args[0])
else:
sig = Signal(*args, **kwargs)
def wrapper(fvalidation):
sig._set_fvalidation(fvalidation)
return sig
return wrapper | A signal decorator designed to work both in the simpler way, like:
.. code:: python
@signal
def validation_function(arg1, ...):
'''Some doc'''
and also as a double-called decorator, like
.. code:: python
@signal(SignalOptions.EXEC_CONCURRENT)
def validation_function(arg1, ...):
'''Some doc''' | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/utils.py#L248-L274 | null | # -*- coding: utf-8 -*-
# :Project: metapensiero.signal -- utilities
# :Created: sab 21 ott 2017 12:25:42 CEST
# :Author: Alberto Berti <alberto@metapensiero.it>
# :License: GNU General Public License version 3 or later
# :Copyright: Copyright © 2015, 2016, 2017, 2018 Alberto Berti
#
from collections.abc import Awaitable
import asyncio
from enum import Enum
import inspect
import logging
import weakref
logger = logging.getLogger(__name__)
class Executor:
"""A configurable executor of callable endpoints.
:param owner: an object to reference as owner, or producer
:param endpoints: an iterable containing the handlers to execute
:keyword bool concurrent: optional flag indicating if the *asynchronous*
handlers have to be executed concurrently or sequentially (the default)
:keyword loop: optional loop
:keyword exec_wrapper: an optional callable to call as a wrapper
:keyword adapt_params: a flag indicating if the executor should try to
adapt available call parameters to those accepted by the endpoints.
``True`` by default
:keyword fvalidation: an optional callable that will be used to validate
the arguments passed to `~.run()`. If the args aren't compatible with
the signature of such callable or if the callable returns ``False``
execution will be aborted by raising an `~.ExecutionError`
"""
def __init__(self, endpoints, *, owner=None, concurrent=False, loop=None,
exec_wrapper=None, adapt_params=True, fvalidation=None):
self.owner = owner
self.endpoints = list(endpoints)
self.concurrent = concurrent
self.loop = loop
self.exec_wrapper = exec_wrapper
self.adapt_params = adapt_params
if fvalidation is None:
self.fvalidation = None
else:
if callable(fvalidation):
self.fvalidation = fvalidation
else:
raise ExecutionError("Wrong value for ``fvalidation``")
def _adapt_call_params(self, func, args, kwargs):
signature = inspect.signature(func, follow_wrapped=False)
if (not inspect.ismethod(func) and
getattr(func, '__signature__', None) is None):
setattr(func, '__signature__', signature)
has_varkw = any(p.kind == inspect.Parameter.VAR_KEYWORD
for n, p in signature.parameters.items())
if has_varkw:
bind = signature.bind_partial(*args, **kwargs)
else:
bind = signature.bind_partial(*args,
**{k: v for k, v in kwargs.items()
if k in signature.parameters})
bind.apply_defaults()
return bind
def exec_all_endpoints(self, *args, **kwargs):
"""Execute each passed endpoint and collect the results. If a result
is anoter `MultipleResults` it will extend the results with those
contained therein. If the result is `NoResult`, skip the addition."""
results = []
for handler in self.endpoints:
if isinstance(handler, weakref.ref):
handler = handler()
if self.adapt_params:
bind = self._adapt_call_params(handler, args, kwargs)
res = handler(*bind.args, **bind.kwargs)
else:
res = handler(*args, **kwargs)
if isinstance(res, MultipleResults):
if res.done:
results += res.results
else:
results += res._results
elif res is not NoResult:
results.append(res)
return MultipleResults(results, concurrent=self.concurrent, owner=self)
def run(self, *args, **kwargs):
"""Call all the registered handlers with the arguments passed.
If this signal is a class member, call also the handlers registered
at class-definition time. If an external publish function is
supplied, call it with the provided arguments.
:returns: an instance of `~.utils.MultipleResults`
"""
if self.fvalidation is not None:
try:
if self.fvalidation(*args, **kwargs) is False:
raise ExecutionError("Validation returned ``False``")
except Exception as e:
if __debug__:
logger.exception("Validation failed")
else:
logger.error("Validation failed")
raise ExecutionError(
"The validation of the arguments specified to ``run()`` "
"has failed") from e
try:
if self.exec_wrapper is None:
return self.exec_all_endpoints(*args, **kwargs)
else:
# if a exec wrapper is defined, defer notification to it,
# a callback to execute the default notification process
result = self.exec_wrapper(self.endpoints,
self.exec_all_endpoints,
*args, **kwargs)
if inspect.isawaitable(result):
result = pull_result(result)
return result
except Exception as e:
if __debug__:
logger.exception("Error while executing handlers")
else:
logger.error("Error while executing handlers")
raise ExecutionError("Error while executing handlers") from e
__call__ = run
class MultipleResults(Awaitable):
"""An utility class containing multiple results, either *synchronous* or
*asynchronous*. It accepts an iterable as main parameter that can contain
actual values or *awaitables*. If any of the latter is present, it will be
needed to ``await`` on the instance to obtain the complete set of values.
When that is done the final results are available on the ``results``
member and are also returned by the ``await`` expression.
It is possible to choose how to evaluate the *awaitables*, either
concurrently or sequentially.
:param iterable: the incoming iterable containing the results
:keyword concurrent: a flag indicating if the evaluation of the
*awaitables* has to be done concurrently or sequentially
:keyword owner: the optional creator instance
"""
results = None
"""Contains the final results."""
done = False
"""It's ``True`` when the results are ready for consumption."""
has_async = False
"""``True`` if the original result set contained awaitables."""
concurrent = False
"""``True`` if the evaluation of the awaitables is done concurrently using
`asyncio.gather`, it's done sequentially by default."""
owner = None
"""The optional creator of the instance passed in as a parameter, usually
the `~.atom.Notifier` that created it."""
def __init__(self, iterable=None, *, concurrent=False, owner=None):
if owner is not None:
self.owner = owner
self.concurrent = concurrent
self._results = list(iterable)
self._coro_ixs = tuple(ix for ix, e in enumerate(self._results)
if inspect.isawaitable(e))
if self._coro_ixs:
self.has_async = True
else:
self.results = tuple(self._results)
self.done = True
self.has_async = False
def __await__(self):
task = self._completion_task(
map(self._results.__getitem__, self._coro_ixs),
concurrent=self.concurrent)
return task.__await__()
async def _completion_task(self, coro_iter=None, concurrent=False):
if not self.done and coro_iter is not None:
if concurrent:
results = await asyncio.gather(*coro_iter)
for ix, res in zip(self._coro_ixs, results):
self._results[ix] = res
else:
for ix, coro in zip(self._coro_ixs, coro_iter):
res = await coro
self._results[ix] = res
self.results = tuple(self._results)
del self._results
self.done = True
return self.results
class TokenClass:
"""A token class whose instances always generate a ``False`` bool."""
def __bool__(self):
return False
NoResult = TokenClass()
"""A value that is returned by a callable when there's no return value and
when ``None`` can be considered a value."""
async def pull_result(result):
"""`An utility coroutine generator to `await`` on an awaitable until the
result is not an awaitable anymore, and return that.
:param result: an awaitable
:returns: a value that isn't an awaitable
"""
while inspect.isawaitable(result):
result = await result
return result
class SignalError(Exception):
"""Generic error raised during signal operations."""
class ExecutionError(SignalError):
"""Error raised during executor operations."""
class SignalOptions(Enum):
"""The flags that change how the signal operates.
"""
SORT_BOTTOMUP = 1
"""The class level handlers are sorted from the "oldest" to the
"newest". Handlers defined in the ancestor classes will be executed before
of those on child classes."""
SORT_TOPDOWN = 2
"""The class level handlers are sorted from the "newest" to the
"oldest". Handlers defined in the child classes will be executed before
of those on ancestor classes."""
EXEC_CONCURRENT = 3
"""Execute the subscribers concurrently by using an ``asyncio.gather()``
call."""
|
metapensiero/metapensiero.signal | src/metapensiero/signal/utils.py | Executor.exec_all_endpoints | python | def exec_all_endpoints(self, *args, **kwargs):
results = []
for handler in self.endpoints:
if isinstance(handler, weakref.ref):
handler = handler()
if self.adapt_params:
bind = self._adapt_call_params(handler, args, kwargs)
res = handler(*bind.args, **bind.kwargs)
else:
res = handler(*args, **kwargs)
if isinstance(res, MultipleResults):
if res.done:
results += res.results
else:
results += res._results
elif res is not NoResult:
results.append(res)
return MultipleResults(results, concurrent=self.concurrent, owner=self) | Execute each passed endpoint and collect the results. If a result
is anoter `MultipleResults` it will extend the results with those
contained therein. If the result is `NoResult`, skip the addition. | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/utils.py#L70-L90 | [
"def _adapt_call_params(self, func, args, kwargs):\n signature = inspect.signature(func, follow_wrapped=False)\n if (not inspect.ismethod(func) and\n getattr(func, '__signature__', None) is None):\n setattr(func, '__signature__', signature)\n has_varkw = any(p.kind == inspect.Parameter.VAR_KEYWORD\n for n, p in signature.parameters.items())\n if has_varkw:\n bind = signature.bind_partial(*args, **kwargs)\n else:\n bind = signature.bind_partial(*args,\n **{k: v for k, v in kwargs.items()\n if k in signature.parameters})\n bind.apply_defaults()\n return bind\n"
] | class Executor:
"""A configurable executor of callable endpoints.
:param owner: an object to reference as owner, or producer
:param endpoints: an iterable containing the handlers to execute
:keyword bool concurrent: optional flag indicating if the *asynchronous*
handlers have to be executed concurrently or sequentially (the default)
:keyword loop: optional loop
:keyword exec_wrapper: an optional callable to call as a wrapper
:keyword adapt_params: a flag indicating if the executor should try to
adapt available call parameters to those accepted by the endpoints.
``True`` by default
:keyword fvalidation: an optional callable that will be used to validate
the arguments passed to `~.run()`. If the args aren't compatible with
the signature of such callable or if the callable returns ``False``
execution will be aborted by raising an `~.ExecutionError`
"""
def __init__(self, endpoints, *, owner=None, concurrent=False, loop=None,
exec_wrapper=None, adapt_params=True, fvalidation=None):
self.owner = owner
self.endpoints = list(endpoints)
self.concurrent = concurrent
self.loop = loop
self.exec_wrapper = exec_wrapper
self.adapt_params = adapt_params
if fvalidation is None:
self.fvalidation = None
else:
if callable(fvalidation):
self.fvalidation = fvalidation
else:
raise ExecutionError("Wrong value for ``fvalidation``")
def _adapt_call_params(self, func, args, kwargs):
signature = inspect.signature(func, follow_wrapped=False)
if (not inspect.ismethod(func) and
getattr(func, '__signature__', None) is None):
setattr(func, '__signature__', signature)
has_varkw = any(p.kind == inspect.Parameter.VAR_KEYWORD
for n, p in signature.parameters.items())
if has_varkw:
bind = signature.bind_partial(*args, **kwargs)
else:
bind = signature.bind_partial(*args,
**{k: v for k, v in kwargs.items()
if k in signature.parameters})
bind.apply_defaults()
return bind
def run(self, *args, **kwargs):
"""Call all the registered handlers with the arguments passed.
If this signal is a class member, call also the handlers registered
at class-definition time. If an external publish function is
supplied, call it with the provided arguments.
:returns: an instance of `~.utils.MultipleResults`
"""
if self.fvalidation is not None:
try:
if self.fvalidation(*args, **kwargs) is False:
raise ExecutionError("Validation returned ``False``")
except Exception as e:
if __debug__:
logger.exception("Validation failed")
else:
logger.error("Validation failed")
raise ExecutionError(
"The validation of the arguments specified to ``run()`` "
"has failed") from e
try:
if self.exec_wrapper is None:
return self.exec_all_endpoints(*args, **kwargs)
else:
# if a exec wrapper is defined, defer notification to it,
# a callback to execute the default notification process
result = self.exec_wrapper(self.endpoints,
self.exec_all_endpoints,
*args, **kwargs)
if inspect.isawaitable(result):
result = pull_result(result)
return result
except Exception as e:
if __debug__:
logger.exception("Error while executing handlers")
else:
logger.error("Error while executing handlers")
raise ExecutionError("Error while executing handlers") from e
__call__ = run
|
metapensiero/metapensiero.signal | src/metapensiero/signal/utils.py | Executor.run | python | def run(self, *args, **kwargs):
if self.fvalidation is not None:
try:
if self.fvalidation(*args, **kwargs) is False:
raise ExecutionError("Validation returned ``False``")
except Exception as e:
if __debug__:
logger.exception("Validation failed")
else:
logger.error("Validation failed")
raise ExecutionError(
"The validation of the arguments specified to ``run()`` "
"has failed") from e
try:
if self.exec_wrapper is None:
return self.exec_all_endpoints(*args, **kwargs)
else:
# if a exec wrapper is defined, defer notification to it,
# a callback to execute the default notification process
result = self.exec_wrapper(self.endpoints,
self.exec_all_endpoints,
*args, **kwargs)
if inspect.isawaitable(result):
result = pull_result(result)
return result
except Exception as e:
if __debug__:
logger.exception("Error while executing handlers")
else:
logger.error("Error while executing handlers")
raise ExecutionError("Error while executing handlers") from e | Call all the registered handlers with the arguments passed.
If this signal is a class member, call also the handlers registered
at class-definition time. If an external publish function is
supplied, call it with the provided arguments.
:returns: an instance of `~.utils.MultipleResults` | train | https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/utils.py#L92-L129 | [
"def exec_all_endpoints(self, *args, **kwargs):\n \"\"\"Execute each passed endpoint and collect the results. If a result\n is anoter `MultipleResults` it will extend the results with those\n contained therein. If the result is `NoResult`, skip the addition.\"\"\"\n results = []\n for handler in self.endpoints:\n if isinstance(handler, weakref.ref):\n handler = handler()\n if self.adapt_params:\n bind = self._adapt_call_params(handler, args, kwargs)\n res = handler(*bind.args, **bind.kwargs)\n else:\n res = handler(*args, **kwargs)\n if isinstance(res, MultipleResults):\n if res.done:\n results += res.results\n else:\n results += res._results\n elif res is not NoResult:\n results.append(res)\n return MultipleResults(results, concurrent=self.concurrent, owner=self)\n"
] | class Executor:
"""A configurable executor of callable endpoints.
:param owner: an object to reference as owner, or producer
:param endpoints: an iterable containing the handlers to execute
:keyword bool concurrent: optional flag indicating if the *asynchronous*
handlers have to be executed concurrently or sequentially (the default)
:keyword loop: optional loop
:keyword exec_wrapper: an optional callable to call as a wrapper
:keyword adapt_params: a flag indicating if the executor should try to
adapt available call parameters to those accepted by the endpoints.
``True`` by default
:keyword fvalidation: an optional callable that will be used to validate
the arguments passed to `~.run()`. If the args aren't compatible with
the signature of such callable or if the callable returns ``False``
execution will be aborted by raising an `~.ExecutionError`
"""
def __init__(self, endpoints, *, owner=None, concurrent=False, loop=None,
exec_wrapper=None, adapt_params=True, fvalidation=None):
self.owner = owner
self.endpoints = list(endpoints)
self.concurrent = concurrent
self.loop = loop
self.exec_wrapper = exec_wrapper
self.adapt_params = adapt_params
if fvalidation is None:
self.fvalidation = None
else:
if callable(fvalidation):
self.fvalidation = fvalidation
else:
raise ExecutionError("Wrong value for ``fvalidation``")
def _adapt_call_params(self, func, args, kwargs):
signature = inspect.signature(func, follow_wrapped=False)
if (not inspect.ismethod(func) and
getattr(func, '__signature__', None) is None):
setattr(func, '__signature__', signature)
has_varkw = any(p.kind == inspect.Parameter.VAR_KEYWORD
for n, p in signature.parameters.items())
if has_varkw:
bind = signature.bind_partial(*args, **kwargs)
else:
bind = signature.bind_partial(*args,
**{k: v for k, v in kwargs.items()
if k in signature.parameters})
bind.apply_defaults()
return bind
def exec_all_endpoints(self, *args, **kwargs):
"""Execute each passed endpoint and collect the results. If a result
is anoter `MultipleResults` it will extend the results with those
contained therein. If the result is `NoResult`, skip the addition."""
results = []
for handler in self.endpoints:
if isinstance(handler, weakref.ref):
handler = handler()
if self.adapt_params:
bind = self._adapt_call_params(handler, args, kwargs)
res = handler(*bind.args, **bind.kwargs)
else:
res = handler(*args, **kwargs)
if isinstance(res, MultipleResults):
if res.done:
results += res.results
else:
results += res._results
elif res is not NoResult:
results.append(res)
return MultipleResults(results, concurrent=self.concurrent, owner=self)
__call__ = run
|
lvieirajr/mongorest | mongorest/collection.py | Collection.insert | python | def insert(self, **kwargs):
if self.is_valid:
before = self.before_insert()
if before:
return before
try:
self._document['_id'] = self.insert_one(self._document)
self.after_insert()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get('err', 'PyMongoError.')
),
operation='insert', collection=type(self).__name__,
document=self._document,
)
return self._errors | Saves the Document to the database if it is valid.
Returns errors otherwise. | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L159-L184 | null | class Collection(six.with_metaclass(CollectionMeta, object)):
"""
Base Class for Collections.
"""
def __init__(self, document=None):
self._document = copy.deepcopy(document or {})
self._errors = {}
if not self.before_validation():
if self.validator.validate_document(self):
self._document = self.validator.document
if not self.after_validation():
self.after_validation_succeeded()
else:
if not self.after_validation():
self.after_validation_failed()
def __repr__(self):
"""
Returns the representation of the Object formated like:
<Document<{%Collection Name%}> object at {%object id%}>
"""
return '<Document<{0}> object at {1}>'.format(
type(self).__name__, hex(id(self)),
)
def __setattr__(self, key, value):
"""
Sets the value to the given key if it is one of the specified keys.
Sets the given key on the _document to the given value otherwise.
"""
keys = {
'collection', 'schema', 'allow_unknown', '_document', '_errors'
}
if key in keys:
object.__setattr__(self, key, value)
else:
self._document[key] = value
def __getattr__(self, name):
"""
Returns the attribute from the _document if it exists.
Returns it from the collection if not on _document, but on collection.
"""
if name in self._document:
return self._document[name]
elif name in dir(self.collection):
attribute = getattr(self.collection, name)
if inspect.isfunction(attribute):
attribute = types.MethodType(attribute, self)
return attribute
else:
raise AttributeError(name)
def __deepcopy__(self, memo):
copy = self.__class__(self._document)
memo[id(self)] = copy
return copy
@property
def document(self):
"""
Returns the document
"""
return self._document
@property
def errors(self):
"""
Returns the validation errors
"""
return self._errors
@property
def is_valid(self):
"""
Returns True if no validation errors have been found, False otherwise.
"""
return not self._errors
@serializable
@serializable
def update(self, **kwargs):
"""
Updates the document with the given _id saved in the collection if it
is valid.
Returns errors otherwise.
"""
if self.is_valid:
if '_id' in self._document:
to_update = self.find_one({'_id': self._id})
if to_update:
before = self.before_update(old=to_update)
if before:
return before
try:
self.replace_one({'_id': self._id}, self._document)
self.after_update(old=to_update)
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='update', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
return self._errors
@serializable
def delete(self, **kwargs):
"""
Deletes the document if it is saved in the collection.
"""
if self.is_valid:
if '_id' in self._document:
to_delete = self.find_one({'_id': self._id})
if to_delete:
before = self.before_delete()
if before:
return before
try:
self.delete_one({'_id': self._id})
self.after_delete()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='delete', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
@classmethod
@serializable
def find_one(cls, filter=None, *args, **kwargs):
"""
Returns one document dict if one passes the filter.
Returns None otherwise.
"""
return cls.collection.find_one(filter, *args, **kwargs)
@classmethod
@serializable
def find(cls, *args, **kwargs):
"""
Returns all document dicts that pass the filter
"""
return list(cls.collection.find(*args, **kwargs))
@classmethod
@serializable
def aggregate(cls, pipeline=None, **kwargs):
"""
Returns the document dicts returned from the Aggregation Pipeline
"""
return list(cls.collection.aggregate(pipeline or [], **kwargs))
@classmethod
@serializable
def insert_one(cls, document):
"""
Inserts a document into the Collection and returns its _id
"""
return cls.collection.insert_one(document).inserted_id
@classmethod
@serializable
def insert_many(cls, documents, ordered=True):
"""
Inserts a list of documents into the Collection and returns their _ids
"""
return cls.collection.insert_many(documents, ordered).inserted_ids
@classmethod
@serializable
def update_one(cls, filter, update, upsert=False):
"""
Updates a document that passes the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_one(filter, update, upsert).raw_result
@classmethod
@serializable
def update_many(cls, filter, update, upsert=False):
"""
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_many(filter, update, upsert).raw_result
@classmethod
@serializable
def replace_one(cls, filter, replacement, upsert=False):
"""
Replaces a document that passes the filter.
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.replace_one(
filter, replacement, upsert
).raw_result
@classmethod
@serializable
def delete_one(cls, filter):
"""
Deletes one document that passes the filter
"""
return cls.collection.delete_one(filter).raw_result
@classmethod
@serializable
def delete_many(cls, filter):
"""
Deletes all documents that pass the filter
"""
return cls.collection.delete_many(filter).raw_result
@classmethod
@serializable
def count(cls, filter=None, **kwargs):
"""
Returns the number of documents that pass the filter
"""
return cls.collection.count(filter, **kwargs)
@classmethod
def get(cls, filter=None, **kwargs):
"""
Returns a Document if any document is filtered, returns None otherwise
"""
document = cls(cls.find_one(filter, **kwargs))
return document if document.document else None
@classmethod
def documents(cls, filter=None, **kwargs):
"""
Returns a list of Documents if any document is filtered
"""
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document]
def before_validation(self):
"""
Callback called before the validation of a document occurs.
Should return None to allow the validation process to happen.
If anything else is returned the validation is not executed and
__init__ is finished
"""
return
def after_validation(self):
"""
Callback called after the validation of a document occurs.
Should return None to allow the next callbacks to run.
If anything else is returned the next callbacks are not called and
__init__ is finished
"""
return
def after_validation_failed(self):
"""
Callback called after the validation of a document fails.
"""
return
def after_validation_succeeded(self):
"""
Callback called after the validation of a document succeeds.
"""
return
def before_insert(self):
"""
Callback called before an insert occurs.
Should return None to allow the insert to happen.
If anything else is returned the insert will not occur and the returned
value will be the return of the insert function
"""
return
def after_insert(self):
"""
Callback called after an insert occurs.
"""
return
def before_update(self, old):
"""
Callback called before an update occurs.
Should return None to allow the update to happen.
If anything else is returned the update will not occur and the returned
value will be the return of the update function
"""
return
def after_update(self, old):
"""
Callback called after an update occurs.
"""
return
def before_delete(self):
"""
Callback called before a delete occurs.
Should return None to allow the delete to happen.
If anything else is returned the delete will not occur and the returned
value will be the return of the delete function
"""
return
def after_delete(self):
"""
Callback called after a delete occurs.
"""
return
|
lvieirajr/mongorest | mongorest/collection.py | Collection.update | python | def update(self, **kwargs):
if self.is_valid:
if '_id' in self._document:
to_update = self.find_one({'_id': self._id})
if to_update:
before = self.before_update(old=to_update)
if before:
return before
try:
self.replace_one({'_id': self._id}, self._document)
self.after_update(old=to_update)
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='update', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
return self._errors | Updates the document with the given _id saved in the collection if it
is valid.
Returns errors otherwise. | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L187-L225 | null | class Collection(six.with_metaclass(CollectionMeta, object)):
"""
Base Class for Collections.
"""
def __init__(self, document=None):
self._document = copy.deepcopy(document or {})
self._errors = {}
if not self.before_validation():
if self.validator.validate_document(self):
self._document = self.validator.document
if not self.after_validation():
self.after_validation_succeeded()
else:
if not self.after_validation():
self.after_validation_failed()
def __repr__(self):
"""
Returns the representation of the Object formated like:
<Document<{%Collection Name%}> object at {%object id%}>
"""
return '<Document<{0}> object at {1}>'.format(
type(self).__name__, hex(id(self)),
)
def __setattr__(self, key, value):
"""
Sets the value to the given key if it is one of the specified keys.
Sets the given key on the _document to the given value otherwise.
"""
keys = {
'collection', 'schema', 'allow_unknown', '_document', '_errors'
}
if key in keys:
object.__setattr__(self, key, value)
else:
self._document[key] = value
def __getattr__(self, name):
"""
Returns the attribute from the _document if it exists.
Returns it from the collection if not on _document, but on collection.
"""
if name in self._document:
return self._document[name]
elif name in dir(self.collection):
attribute = getattr(self.collection, name)
if inspect.isfunction(attribute):
attribute = types.MethodType(attribute, self)
return attribute
else:
raise AttributeError(name)
def __deepcopy__(self, memo):
copy = self.__class__(self._document)
memo[id(self)] = copy
return copy
@property
def document(self):
"""
Returns the document
"""
return self._document
@property
def errors(self):
"""
Returns the validation errors
"""
return self._errors
@property
def is_valid(self):
"""
Returns True if no validation errors have been found, False otherwise.
"""
return not self._errors
@serializable
def insert(self, **kwargs):
"""
Saves the Document to the database if it is valid.
Returns errors otherwise.
"""
if self.is_valid:
before = self.before_insert()
if before:
return before
try:
self._document['_id'] = self.insert_one(self._document)
self.after_insert()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get('err', 'PyMongoError.')
),
operation='insert', collection=type(self).__name__,
document=self._document,
)
return self._errors
@serializable
@serializable
def delete(self, **kwargs):
"""
Deletes the document if it is saved in the collection.
"""
if self.is_valid:
if '_id' in self._document:
to_delete = self.find_one({'_id': self._id})
if to_delete:
before = self.before_delete()
if before:
return before
try:
self.delete_one({'_id': self._id})
self.after_delete()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='delete', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
@classmethod
@serializable
def find_one(cls, filter=None, *args, **kwargs):
"""
Returns one document dict if one passes the filter.
Returns None otherwise.
"""
return cls.collection.find_one(filter, *args, **kwargs)
@classmethod
@serializable
def find(cls, *args, **kwargs):
"""
Returns all document dicts that pass the filter
"""
return list(cls.collection.find(*args, **kwargs))
@classmethod
@serializable
def aggregate(cls, pipeline=None, **kwargs):
"""
Returns the document dicts returned from the Aggregation Pipeline
"""
return list(cls.collection.aggregate(pipeline or [], **kwargs))
@classmethod
@serializable
def insert_one(cls, document):
"""
Inserts a document into the Collection and returns its _id
"""
return cls.collection.insert_one(document).inserted_id
@classmethod
@serializable
def insert_many(cls, documents, ordered=True):
"""
Inserts a list of documents into the Collection and returns their _ids
"""
return cls.collection.insert_many(documents, ordered).inserted_ids
@classmethod
@serializable
def update_one(cls, filter, update, upsert=False):
"""
Updates a document that passes the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_one(filter, update, upsert).raw_result
@classmethod
@serializable
def update_many(cls, filter, update, upsert=False):
"""
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_many(filter, update, upsert).raw_result
@classmethod
@serializable
def replace_one(cls, filter, replacement, upsert=False):
"""
Replaces a document that passes the filter.
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.replace_one(
filter, replacement, upsert
).raw_result
@classmethod
@serializable
def delete_one(cls, filter):
"""
Deletes one document that passes the filter
"""
return cls.collection.delete_one(filter).raw_result
@classmethod
@serializable
def delete_many(cls, filter):
"""
Deletes all documents that pass the filter
"""
return cls.collection.delete_many(filter).raw_result
@classmethod
@serializable
def count(cls, filter=None, **kwargs):
"""
Returns the number of documents that pass the filter
"""
return cls.collection.count(filter, **kwargs)
@classmethod
def get(cls, filter=None, **kwargs):
"""
Returns a Document if any document is filtered, returns None otherwise
"""
document = cls(cls.find_one(filter, **kwargs))
return document if document.document else None
@classmethod
def documents(cls, filter=None, **kwargs):
"""
Returns a list of Documents if any document is filtered
"""
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document]
def before_validation(self):
"""
Callback called before the validation of a document occurs.
Should return None to allow the validation process to happen.
If anything else is returned the validation is not executed and
__init__ is finished
"""
return
def after_validation(self):
"""
Callback called after the validation of a document occurs.
Should return None to allow the next callbacks to run.
If anything else is returned the next callbacks are not called and
__init__ is finished
"""
return
def after_validation_failed(self):
"""
Callback called after the validation of a document fails.
"""
return
def after_validation_succeeded(self):
"""
Callback called after the validation of a document succeeds.
"""
return
def before_insert(self):
"""
Callback called before an insert occurs.
Should return None to allow the insert to happen.
If anything else is returned the insert will not occur and the returned
value will be the return of the insert function
"""
return
def after_insert(self):
"""
Callback called after an insert occurs.
"""
return
def before_update(self, old):
"""
Callback called before an update occurs.
Should return None to allow the update to happen.
If anything else is returned the update will not occur and the returned
value will be the return of the update function
"""
return
def after_update(self, old):
"""
Callback called after an update occurs.
"""
return
def before_delete(self):
"""
Callback called before a delete occurs.
Should return None to allow the delete to happen.
If anything else is returned the delete will not occur and the returned
value will be the return of the delete function
"""
return
def after_delete(self):
"""
Callback called after a delete occurs.
"""
return
|
lvieirajr/mongorest | mongorest/collection.py | Collection.delete | python | def delete(self, **kwargs):
if self.is_valid:
if '_id' in self._document:
to_delete = self.find_one({'_id': self._id})
if to_delete:
before = self.before_delete()
if before:
return before
try:
self.delete_one({'_id': self._id})
self.after_delete()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='delete', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
) | Deletes the document if it is saved in the collection. | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L228-L262 | null | class Collection(six.with_metaclass(CollectionMeta, object)):
"""
Base Class for Collections.
"""
def __init__(self, document=None):
self._document = copy.deepcopy(document or {})
self._errors = {}
if not self.before_validation():
if self.validator.validate_document(self):
self._document = self.validator.document
if not self.after_validation():
self.after_validation_succeeded()
else:
if not self.after_validation():
self.after_validation_failed()
def __repr__(self):
"""
Returns the representation of the Object formated like:
<Document<{%Collection Name%}> object at {%object id%}>
"""
return '<Document<{0}> object at {1}>'.format(
type(self).__name__, hex(id(self)),
)
def __setattr__(self, key, value):
"""
Sets the value to the given key if it is one of the specified keys.
Sets the given key on the _document to the given value otherwise.
"""
keys = {
'collection', 'schema', 'allow_unknown', '_document', '_errors'
}
if key in keys:
object.__setattr__(self, key, value)
else:
self._document[key] = value
def __getattr__(self, name):
"""
Returns the attribute from the _document if it exists.
Returns it from the collection if not on _document, but on collection.
"""
if name in self._document:
return self._document[name]
elif name in dir(self.collection):
attribute = getattr(self.collection, name)
if inspect.isfunction(attribute):
attribute = types.MethodType(attribute, self)
return attribute
else:
raise AttributeError(name)
def __deepcopy__(self, memo):
copy = self.__class__(self._document)
memo[id(self)] = copy
return copy
@property
def document(self):
"""
Returns the document
"""
return self._document
@property
def errors(self):
"""
Returns the validation errors
"""
return self._errors
@property
def is_valid(self):
"""
Returns True if no validation errors have been found, False otherwise.
"""
return not self._errors
@serializable
def insert(self, **kwargs):
"""
Saves the Document to the database if it is valid.
Returns errors otherwise.
"""
if self.is_valid:
before = self.before_insert()
if before:
return before
try:
self._document['_id'] = self.insert_one(self._document)
self.after_insert()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get('err', 'PyMongoError.')
),
operation='insert', collection=type(self).__name__,
document=self._document,
)
return self._errors
@serializable
def update(self, **kwargs):
"""
Updates the document with the given _id saved in the collection if it
is valid.
Returns errors otherwise.
"""
if self.is_valid:
if '_id' in self._document:
to_update = self.find_one({'_id': self._id})
if to_update:
before = self.before_update(old=to_update)
if before:
return before
try:
self.replace_one({'_id': self._id}, self._document)
self.after_update(old=to_update)
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='update', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
return self._errors
@serializable
@classmethod
@serializable
def find_one(cls, filter=None, *args, **kwargs):
"""
Returns one document dict if one passes the filter.
Returns None otherwise.
"""
return cls.collection.find_one(filter, *args, **kwargs)
@classmethod
@serializable
def find(cls, *args, **kwargs):
"""
Returns all document dicts that pass the filter
"""
return list(cls.collection.find(*args, **kwargs))
@classmethod
@serializable
def aggregate(cls, pipeline=None, **kwargs):
"""
Returns the document dicts returned from the Aggregation Pipeline
"""
return list(cls.collection.aggregate(pipeline or [], **kwargs))
@classmethod
@serializable
def insert_one(cls, document):
"""
Inserts a document into the Collection and returns its _id
"""
return cls.collection.insert_one(document).inserted_id
@classmethod
@serializable
def insert_many(cls, documents, ordered=True):
"""
Inserts a list of documents into the Collection and returns their _ids
"""
return cls.collection.insert_many(documents, ordered).inserted_ids
@classmethod
@serializable
def update_one(cls, filter, update, upsert=False):
"""
Updates a document that passes the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_one(filter, update, upsert).raw_result
@classmethod
@serializable
def update_many(cls, filter, update, upsert=False):
"""
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_many(filter, update, upsert).raw_result
@classmethod
@serializable
def replace_one(cls, filter, replacement, upsert=False):
"""
Replaces a document that passes the filter.
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.replace_one(
filter, replacement, upsert
).raw_result
@classmethod
@serializable
def delete_one(cls, filter):
"""
Deletes one document that passes the filter
"""
return cls.collection.delete_one(filter).raw_result
@classmethod
@serializable
def delete_many(cls, filter):
"""
Deletes all documents that pass the filter
"""
return cls.collection.delete_many(filter).raw_result
@classmethod
@serializable
def count(cls, filter=None, **kwargs):
"""
Returns the number of documents that pass the filter
"""
return cls.collection.count(filter, **kwargs)
@classmethod
def get(cls, filter=None, **kwargs):
"""
Returns a Document if any document is filtered, returns None otherwise
"""
document = cls(cls.find_one(filter, **kwargs))
return document if document.document else None
@classmethod
def documents(cls, filter=None, **kwargs):
"""
Returns a list of Documents if any document is filtered
"""
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document]
def before_validation(self):
"""
Callback called before the validation of a document occurs.
Should return None to allow the validation process to happen.
If anything else is returned the validation is not executed and
__init__ is finished
"""
return
def after_validation(self):
"""
Callback called after the validation of a document occurs.
Should return None to allow the next callbacks to run.
If anything else is returned the next callbacks are not called and
__init__ is finished
"""
return
def after_validation_failed(self):
"""
Callback called after the validation of a document fails.
"""
return
def after_validation_succeeded(self):
"""
Callback called after the validation of a document succeeds.
"""
return
def before_insert(self):
"""
Callback called before an insert occurs.
Should return None to allow the insert to happen.
If anything else is returned the insert will not occur and the returned
value will be the return of the insert function
"""
return
def after_insert(self):
"""
Callback called after an insert occurs.
"""
return
def before_update(self, old):
"""
Callback called before an update occurs.
Should return None to allow the update to happen.
If anything else is returned the update will not occur and the returned
value will be the return of the update function
"""
return
def after_update(self, old):
"""
Callback called after an update occurs.
"""
return
def before_delete(self):
"""
Callback called before a delete occurs.
Should return None to allow the delete to happen.
If anything else is returned the delete will not occur and the returned
value will be the return of the delete function
"""
return
def after_delete(self):
"""
Callback called after a delete occurs.
"""
return
|
lvieirajr/mongorest | mongorest/collection.py | Collection.find_one | python | def find_one(cls, filter=None, *args, **kwargs):
return cls.collection.find_one(filter, *args, **kwargs) | Returns one document dict if one passes the filter.
Returns None otherwise. | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L266-L271 | null | class Collection(six.with_metaclass(CollectionMeta, object)):
"""
Base Class for Collections.
"""
def __init__(self, document=None):
self._document = copy.deepcopy(document or {})
self._errors = {}
if not self.before_validation():
if self.validator.validate_document(self):
self._document = self.validator.document
if not self.after_validation():
self.after_validation_succeeded()
else:
if not self.after_validation():
self.after_validation_failed()
def __repr__(self):
"""
Returns the representation of the Object formated like:
<Document<{%Collection Name%}> object at {%object id%}>
"""
return '<Document<{0}> object at {1}>'.format(
type(self).__name__, hex(id(self)),
)
def __setattr__(self, key, value):
"""
Sets the value to the given key if it is one of the specified keys.
Sets the given key on the _document to the given value otherwise.
"""
keys = {
'collection', 'schema', 'allow_unknown', '_document', '_errors'
}
if key in keys:
object.__setattr__(self, key, value)
else:
self._document[key] = value
def __getattr__(self, name):
"""
Returns the attribute from the _document if it exists.
Returns it from the collection if not on _document, but on collection.
"""
if name in self._document:
return self._document[name]
elif name in dir(self.collection):
attribute = getattr(self.collection, name)
if inspect.isfunction(attribute):
attribute = types.MethodType(attribute, self)
return attribute
else:
raise AttributeError(name)
def __deepcopy__(self, memo):
copy = self.__class__(self._document)
memo[id(self)] = copy
return copy
@property
def document(self):
"""
Returns the document
"""
return self._document
@property
def errors(self):
"""
Returns the validation errors
"""
return self._errors
@property
def is_valid(self):
"""
Returns True if no validation errors have been found, False otherwise.
"""
return not self._errors
@serializable
def insert(self, **kwargs):
"""
Saves the Document to the database if it is valid.
Returns errors otherwise.
"""
if self.is_valid:
before = self.before_insert()
if before:
return before
try:
self._document['_id'] = self.insert_one(self._document)
self.after_insert()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get('err', 'PyMongoError.')
),
operation='insert', collection=type(self).__name__,
document=self._document,
)
return self._errors
@serializable
def update(self, **kwargs):
"""
Updates the document with the given _id saved in the collection if it
is valid.
Returns errors otherwise.
"""
if self.is_valid:
if '_id' in self._document:
to_update = self.find_one({'_id': self._id})
if to_update:
before = self.before_update(old=to_update)
if before:
return before
try:
self.replace_one({'_id': self._id}, self._document)
self.after_update(old=to_update)
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='update', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
return self._errors
@serializable
def delete(self, **kwargs):
"""
Deletes the document if it is saved in the collection.
"""
if self.is_valid:
if '_id' in self._document:
to_delete = self.find_one({'_id': self._id})
if to_delete:
before = self.before_delete()
if before:
return before
try:
self.delete_one({'_id': self._id})
self.after_delete()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='delete', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
@classmethod
@serializable
@classmethod
@serializable
def find(cls, *args, **kwargs):
"""
Returns all document dicts that pass the filter
"""
return list(cls.collection.find(*args, **kwargs))
@classmethod
@serializable
def aggregate(cls, pipeline=None, **kwargs):
"""
Returns the document dicts returned from the Aggregation Pipeline
"""
return list(cls.collection.aggregate(pipeline or [], **kwargs))
@classmethod
@serializable
def insert_one(cls, document):
"""
Inserts a document into the Collection and returns its _id
"""
return cls.collection.insert_one(document).inserted_id
@classmethod
@serializable
def insert_many(cls, documents, ordered=True):
"""
Inserts a list of documents into the Collection and returns their _ids
"""
return cls.collection.insert_many(documents, ordered).inserted_ids
@classmethod
@serializable
def update_one(cls, filter, update, upsert=False):
"""
Updates a document that passes the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_one(filter, update, upsert).raw_result
@classmethod
@serializable
def update_many(cls, filter, update, upsert=False):
"""
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_many(filter, update, upsert).raw_result
@classmethod
@serializable
def replace_one(cls, filter, replacement, upsert=False):
"""
Replaces a document that passes the filter.
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.replace_one(
filter, replacement, upsert
).raw_result
@classmethod
@serializable
def delete_one(cls, filter):
"""
Deletes one document that passes the filter
"""
return cls.collection.delete_one(filter).raw_result
@classmethod
@serializable
def delete_many(cls, filter):
"""
Deletes all documents that pass the filter
"""
return cls.collection.delete_many(filter).raw_result
@classmethod
@serializable
def count(cls, filter=None, **kwargs):
"""
Returns the number of documents that pass the filter
"""
return cls.collection.count(filter, **kwargs)
@classmethod
def get(cls, filter=None, **kwargs):
"""
Returns a Document if any document is filtered, returns None otherwise
"""
document = cls(cls.find_one(filter, **kwargs))
return document if document.document else None
@classmethod
def documents(cls, filter=None, **kwargs):
"""
Returns a list of Documents if any document is filtered
"""
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document]
def before_validation(self):
"""
Callback called before the validation of a document occurs.
Should return None to allow the validation process to happen.
If anything else is returned the validation is not executed and
__init__ is finished
"""
return
def after_validation(self):
"""
Callback called after the validation of a document occurs.
Should return None to allow the next callbacks to run.
If anything else is returned the next callbacks are not called and
__init__ is finished
"""
return
def after_validation_failed(self):
"""
Callback called after the validation of a document fails.
"""
return
def after_validation_succeeded(self):
"""
Callback called after the validation of a document succeeds.
"""
return
def before_insert(self):
"""
Callback called before an insert occurs.
Should return None to allow the insert to happen.
If anything else is returned the insert will not occur and the returned
value will be the return of the insert function
"""
return
def after_insert(self):
"""
Callback called after an insert occurs.
"""
return
def before_update(self, old):
"""
Callback called before an update occurs.
Should return None to allow the update to happen.
If anything else is returned the update will not occur and the returned
value will be the return of the update function
"""
return
def after_update(self, old):
"""
Callback called after an update occurs.
"""
return
def before_delete(self):
"""
Callback called before a delete occurs.
Should return None to allow the delete to happen.
If anything else is returned the delete will not occur and the returned
value will be the return of the delete function
"""
return
def after_delete(self):
"""
Callback called after a delete occurs.
"""
return
|
lvieirajr/mongorest | mongorest/collection.py | Collection.find | python | def find(cls, *args, **kwargs):
return list(cls.collection.find(*args, **kwargs)) | Returns all document dicts that pass the filter | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L275-L279 | null | class Collection(six.with_metaclass(CollectionMeta, object)):
"""
Base Class for Collections.
"""
def __init__(self, document=None):
self._document = copy.deepcopy(document or {})
self._errors = {}
if not self.before_validation():
if self.validator.validate_document(self):
self._document = self.validator.document
if not self.after_validation():
self.after_validation_succeeded()
else:
if not self.after_validation():
self.after_validation_failed()
def __repr__(self):
"""
Returns the representation of the Object formated like:
<Document<{%Collection Name%}> object at {%object id%}>
"""
return '<Document<{0}> object at {1}>'.format(
type(self).__name__, hex(id(self)),
)
def __setattr__(self, key, value):
"""
Sets the value to the given key if it is one of the specified keys.
Sets the given key on the _document to the given value otherwise.
"""
keys = {
'collection', 'schema', 'allow_unknown', '_document', '_errors'
}
if key in keys:
object.__setattr__(self, key, value)
else:
self._document[key] = value
def __getattr__(self, name):
"""
Returns the attribute from the _document if it exists.
Returns it from the collection if not on _document, but on collection.
"""
if name in self._document:
return self._document[name]
elif name in dir(self.collection):
attribute = getattr(self.collection, name)
if inspect.isfunction(attribute):
attribute = types.MethodType(attribute, self)
return attribute
else:
raise AttributeError(name)
def __deepcopy__(self, memo):
copy = self.__class__(self._document)
memo[id(self)] = copy
return copy
@property
def document(self):
"""
Returns the document
"""
return self._document
@property
def errors(self):
"""
Returns the validation errors
"""
return self._errors
@property
def is_valid(self):
"""
Returns True if no validation errors have been found, False otherwise.
"""
return not self._errors
@serializable
def insert(self, **kwargs):
"""
Saves the Document to the database if it is valid.
Returns errors otherwise.
"""
if self.is_valid:
before = self.before_insert()
if before:
return before
try:
self._document['_id'] = self.insert_one(self._document)
self.after_insert()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get('err', 'PyMongoError.')
),
operation='insert', collection=type(self).__name__,
document=self._document,
)
return self._errors
@serializable
def update(self, **kwargs):
"""
Updates the document with the given _id saved in the collection if it
is valid.
Returns errors otherwise.
"""
if self.is_valid:
if '_id' in self._document:
to_update = self.find_one({'_id': self._id})
if to_update:
before = self.before_update(old=to_update)
if before:
return before
try:
self.replace_one({'_id': self._id}, self._document)
self.after_update(old=to_update)
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='update', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
return self._errors
@serializable
def delete(self, **kwargs):
"""
Deletes the document if it is saved in the collection.
"""
if self.is_valid:
if '_id' in self._document:
to_delete = self.find_one({'_id': self._id})
if to_delete:
before = self.before_delete()
if before:
return before
try:
self.delete_one({'_id': self._id})
self.after_delete()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='delete', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
@classmethod
@serializable
def find_one(cls, filter=None, *args, **kwargs):
"""
Returns one document dict if one passes the filter.
Returns None otherwise.
"""
return cls.collection.find_one(filter, *args, **kwargs)
@classmethod
@serializable
@classmethod
@serializable
def aggregate(cls, pipeline=None, **kwargs):
"""
Returns the document dicts returned from the Aggregation Pipeline
"""
return list(cls.collection.aggregate(pipeline or [], **kwargs))
@classmethod
@serializable
def insert_one(cls, document):
"""
Inserts a document into the Collection and returns its _id
"""
return cls.collection.insert_one(document).inserted_id
@classmethod
@serializable
def insert_many(cls, documents, ordered=True):
"""
Inserts a list of documents into the Collection and returns their _ids
"""
return cls.collection.insert_many(documents, ordered).inserted_ids
@classmethod
@serializable
def update_one(cls, filter, update, upsert=False):
"""
Updates a document that passes the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_one(filter, update, upsert).raw_result
@classmethod
@serializable
def update_many(cls, filter, update, upsert=False):
"""
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_many(filter, update, upsert).raw_result
@classmethod
@serializable
def replace_one(cls, filter, replacement, upsert=False):
"""
Replaces a document that passes the filter.
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.replace_one(
filter, replacement, upsert
).raw_result
@classmethod
@serializable
def delete_one(cls, filter):
"""
Deletes one document that passes the filter
"""
return cls.collection.delete_one(filter).raw_result
@classmethod
@serializable
def delete_many(cls, filter):
"""
Deletes all documents that pass the filter
"""
return cls.collection.delete_many(filter).raw_result
@classmethod
@serializable
def count(cls, filter=None, **kwargs):
"""
Returns the number of documents that pass the filter
"""
return cls.collection.count(filter, **kwargs)
@classmethod
def get(cls, filter=None, **kwargs):
"""
Returns a Document if any document is filtered, returns None otherwise
"""
document = cls(cls.find_one(filter, **kwargs))
return document if document.document else None
@classmethod
def documents(cls, filter=None, **kwargs):
"""
Returns a list of Documents if any document is filtered
"""
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document]
def before_validation(self):
"""
Callback called before the validation of a document occurs.
Should return None to allow the validation process to happen.
If anything else is returned the validation is not executed and
__init__ is finished
"""
return
def after_validation(self):
"""
Callback called after the validation of a document occurs.
Should return None to allow the next callbacks to run.
If anything else is returned the next callbacks are not called and
__init__ is finished
"""
return
def after_validation_failed(self):
"""
Callback called after the validation of a document fails.
"""
return
def after_validation_succeeded(self):
"""
Callback called after the validation of a document succeeds.
"""
return
def before_insert(self):
"""
Callback called before an insert occurs.
Should return None to allow the insert to happen.
If anything else is returned the insert will not occur and the returned
value will be the return of the insert function
"""
return
def after_insert(self):
"""
Callback called after an insert occurs.
"""
return
def before_update(self, old):
"""
Callback called before an update occurs.
Should return None to allow the update to happen.
If anything else is returned the update will not occur and the returned
value will be the return of the update function
"""
return
def after_update(self, old):
"""
Callback called after an update occurs.
"""
return
def before_delete(self):
"""
Callback called before a delete occurs.
Should return None to allow the delete to happen.
If anything else is returned the delete will not occur and the returned
value will be the return of the delete function
"""
return
def after_delete(self):
"""
Callback called after a delete occurs.
"""
return
|
lvieirajr/mongorest | mongorest/collection.py | Collection.aggregate | python | def aggregate(cls, pipeline=None, **kwargs):
return list(cls.collection.aggregate(pipeline or [], **kwargs)) | Returns the document dicts returned from the Aggregation Pipeline | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L283-L287 | null | class Collection(six.with_metaclass(CollectionMeta, object)):
"""
Base Class for Collections.
"""
def __init__(self, document=None):
self._document = copy.deepcopy(document or {})
self._errors = {}
if not self.before_validation():
if self.validator.validate_document(self):
self._document = self.validator.document
if not self.after_validation():
self.after_validation_succeeded()
else:
if not self.after_validation():
self.after_validation_failed()
def __repr__(self):
"""
Returns the representation of the Object formated like:
<Document<{%Collection Name%}> object at {%object id%}>
"""
return '<Document<{0}> object at {1}>'.format(
type(self).__name__, hex(id(self)),
)
def __setattr__(self, key, value):
"""
Sets the value to the given key if it is one of the specified keys.
Sets the given key on the _document to the given value otherwise.
"""
keys = {
'collection', 'schema', 'allow_unknown', '_document', '_errors'
}
if key in keys:
object.__setattr__(self, key, value)
else:
self._document[key] = value
def __getattr__(self, name):
"""
Returns the attribute from the _document if it exists.
Returns it from the collection if not on _document, but on collection.
"""
if name in self._document:
return self._document[name]
elif name in dir(self.collection):
attribute = getattr(self.collection, name)
if inspect.isfunction(attribute):
attribute = types.MethodType(attribute, self)
return attribute
else:
raise AttributeError(name)
def __deepcopy__(self, memo):
copy = self.__class__(self._document)
memo[id(self)] = copy
return copy
@property
def document(self):
"""
Returns the document
"""
return self._document
@property
def errors(self):
"""
Returns the validation errors
"""
return self._errors
@property
def is_valid(self):
"""
Returns True if no validation errors have been found, False otherwise.
"""
return not self._errors
@serializable
def insert(self, **kwargs):
"""
Saves the Document to the database if it is valid.
Returns errors otherwise.
"""
if self.is_valid:
before = self.before_insert()
if before:
return before
try:
self._document['_id'] = self.insert_one(self._document)
self.after_insert()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get('err', 'PyMongoError.')
),
operation='insert', collection=type(self).__name__,
document=self._document,
)
return self._errors
@serializable
def update(self, **kwargs):
"""
Updates the document with the given _id saved in the collection if it
is valid.
Returns errors otherwise.
"""
if self.is_valid:
if '_id' in self._document:
to_update = self.find_one({'_id': self._id})
if to_update:
before = self.before_update(old=to_update)
if before:
return before
try:
self.replace_one({'_id': self._id}, self._document)
self.after_update(old=to_update)
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='update', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
return self._errors
@serializable
def delete(self, **kwargs):
"""
Deletes the document if it is saved in the collection.
"""
if self.is_valid:
if '_id' in self._document:
to_delete = self.find_one({'_id': self._id})
if to_delete:
before = self.before_delete()
if before:
return before
try:
self.delete_one({'_id': self._id})
self.after_delete()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='delete', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
@classmethod
@serializable
def find_one(cls, filter=None, *args, **kwargs):
"""
Returns one document dict if one passes the filter.
Returns None otherwise.
"""
return cls.collection.find_one(filter, *args, **kwargs)
@classmethod
@serializable
def find(cls, *args, **kwargs):
"""
Returns all document dicts that pass the filter
"""
return list(cls.collection.find(*args, **kwargs))
@classmethod
@serializable
@classmethod
@serializable
def insert_one(cls, document):
"""
Inserts a document into the Collection and returns its _id
"""
return cls.collection.insert_one(document).inserted_id
@classmethod
@serializable
def insert_many(cls, documents, ordered=True):
"""
Inserts a list of documents into the Collection and returns their _ids
"""
return cls.collection.insert_many(documents, ordered).inserted_ids
@classmethod
@serializable
def update_one(cls, filter, update, upsert=False):
"""
Updates a document that passes the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_one(filter, update, upsert).raw_result
@classmethod
@serializable
def update_many(cls, filter, update, upsert=False):
"""
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_many(filter, update, upsert).raw_result
@classmethod
@serializable
def replace_one(cls, filter, replacement, upsert=False):
"""
Replaces a document that passes the filter.
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.replace_one(
filter, replacement, upsert
).raw_result
@classmethod
@serializable
def delete_one(cls, filter):
"""
Deletes one document that passes the filter
"""
return cls.collection.delete_one(filter).raw_result
@classmethod
@serializable
def delete_many(cls, filter):
"""
Deletes all documents that pass the filter
"""
return cls.collection.delete_many(filter).raw_result
@classmethod
@serializable
def count(cls, filter=None, **kwargs):
"""
Returns the number of documents that pass the filter
"""
return cls.collection.count(filter, **kwargs)
@classmethod
def get(cls, filter=None, **kwargs):
"""
Returns a Document if any document is filtered, returns None otherwise
"""
document = cls(cls.find_one(filter, **kwargs))
return document if document.document else None
@classmethod
def documents(cls, filter=None, **kwargs):
"""
Returns a list of Documents if any document is filtered
"""
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document]
def before_validation(self):
"""
Callback called before the validation of a document occurs.
Should return None to allow the validation process to happen.
If anything else is returned the validation is not executed and
__init__ is finished
"""
return
def after_validation(self):
"""
Callback called after the validation of a document occurs.
Should return None to allow the next callbacks to run.
If anything else is returned the next callbacks are not called and
__init__ is finished
"""
return
def after_validation_failed(self):
"""
Callback called after the validation of a document fails.
"""
return
def after_validation_succeeded(self):
"""
Callback called after the validation of a document succeeds.
"""
return
def before_insert(self):
"""
Callback called before an insert occurs.
Should return None to allow the insert to happen.
If anything else is returned the insert will not occur and the returned
value will be the return of the insert function
"""
return
def after_insert(self):
"""
Callback called after an insert occurs.
"""
return
def before_update(self, old):
"""
Callback called before an update occurs.
Should return None to allow the update to happen.
If anything else is returned the update will not occur and the returned
value will be the return of the update function
"""
return
def after_update(self, old):
"""
Callback called after an update occurs.
"""
return
def before_delete(self):
"""
Callback called before a delete occurs.
Should return None to allow the delete to happen.
If anything else is returned the delete will not occur and the returned
value will be the return of the delete function
"""
return
def after_delete(self):
"""
Callback called after a delete occurs.
"""
return
|
lvieirajr/mongorest | mongorest/collection.py | Collection.insert_many | python | def insert_many(cls, documents, ordered=True):
return cls.collection.insert_many(documents, ordered).inserted_ids | Inserts a list of documents into the Collection and returns their _ids | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L299-L303 | null | class Collection(six.with_metaclass(CollectionMeta, object)):
"""
Base Class for Collections.
"""
def __init__(self, document=None):
self._document = copy.deepcopy(document or {})
self._errors = {}
if not self.before_validation():
if self.validator.validate_document(self):
self._document = self.validator.document
if not self.after_validation():
self.after_validation_succeeded()
else:
if not self.after_validation():
self.after_validation_failed()
def __repr__(self):
"""
Returns the representation of the Object formated like:
<Document<{%Collection Name%}> object at {%object id%}>
"""
return '<Document<{0}> object at {1}>'.format(
type(self).__name__, hex(id(self)),
)
def __setattr__(self, key, value):
"""
Sets the value to the given key if it is one of the specified keys.
Sets the given key on the _document to the given value otherwise.
"""
keys = {
'collection', 'schema', 'allow_unknown', '_document', '_errors'
}
if key in keys:
object.__setattr__(self, key, value)
else:
self._document[key] = value
def __getattr__(self, name):
"""
Returns the attribute from the _document if it exists.
Returns it from the collection if not on _document, but on collection.
"""
if name in self._document:
return self._document[name]
elif name in dir(self.collection):
attribute = getattr(self.collection, name)
if inspect.isfunction(attribute):
attribute = types.MethodType(attribute, self)
return attribute
else:
raise AttributeError(name)
def __deepcopy__(self, memo):
copy = self.__class__(self._document)
memo[id(self)] = copy
return copy
@property
def document(self):
"""
Returns the document
"""
return self._document
@property
def errors(self):
"""
Returns the validation errors
"""
return self._errors
@property
def is_valid(self):
"""
Returns True if no validation errors have been found, False otherwise.
"""
return not self._errors
@serializable
def insert(self, **kwargs):
"""
Saves the Document to the database if it is valid.
Returns errors otherwise.
"""
if self.is_valid:
before = self.before_insert()
if before:
return before
try:
self._document['_id'] = self.insert_one(self._document)
self.after_insert()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get('err', 'PyMongoError.')
),
operation='insert', collection=type(self).__name__,
document=self._document,
)
return self._errors
@serializable
def update(self, **kwargs):
"""
Updates the document with the given _id saved in the collection if it
is valid.
Returns errors otherwise.
"""
if self.is_valid:
if '_id' in self._document:
to_update = self.find_one({'_id': self._id})
if to_update:
before = self.before_update(old=to_update)
if before:
return before
try:
self.replace_one({'_id': self._id}, self._document)
self.after_update(old=to_update)
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='update', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
return self._errors
@serializable
def delete(self, **kwargs):
"""
Deletes the document if it is saved in the collection.
"""
if self.is_valid:
if '_id' in self._document:
to_delete = self.find_one({'_id': self._id})
if to_delete:
before = self.before_delete()
if before:
return before
try:
self.delete_one({'_id': self._id})
self.after_delete()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='delete', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
@classmethod
@serializable
def find_one(cls, filter=None, *args, **kwargs):
"""
Returns one document dict if one passes the filter.
Returns None otherwise.
"""
return cls.collection.find_one(filter, *args, **kwargs)
@classmethod
@serializable
def find(cls, *args, **kwargs):
"""
Returns all document dicts that pass the filter
"""
return list(cls.collection.find(*args, **kwargs))
@classmethod
@serializable
def aggregate(cls, pipeline=None, **kwargs):
"""
Returns the document dicts returned from the Aggregation Pipeline
"""
return list(cls.collection.aggregate(pipeline or [], **kwargs))
@classmethod
@serializable
def insert_one(cls, document):
"""
Inserts a document into the Collection and returns its _id
"""
return cls.collection.insert_one(document).inserted_id
@classmethod
@serializable
@classmethod
@serializable
def update_one(cls, filter, update, upsert=False):
"""
Updates a document that passes the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_one(filter, update, upsert).raw_result
@classmethod
@serializable
def update_many(cls, filter, update, upsert=False):
"""
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_many(filter, update, upsert).raw_result
@classmethod
@serializable
def replace_one(cls, filter, replacement, upsert=False):
"""
Replaces a document that passes the filter.
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.replace_one(
filter, replacement, upsert
).raw_result
@classmethod
@serializable
def delete_one(cls, filter):
"""
Deletes one document that passes the filter
"""
return cls.collection.delete_one(filter).raw_result
@classmethod
@serializable
def delete_many(cls, filter):
"""
Deletes all documents that pass the filter
"""
return cls.collection.delete_many(filter).raw_result
@classmethod
@serializable
def count(cls, filter=None, **kwargs):
"""
Returns the number of documents that pass the filter
"""
return cls.collection.count(filter, **kwargs)
@classmethod
def get(cls, filter=None, **kwargs):
"""
Returns a Document if any document is filtered, returns None otherwise
"""
document = cls(cls.find_one(filter, **kwargs))
return document if document.document else None
@classmethod
def documents(cls, filter=None, **kwargs):
"""
Returns a list of Documents if any document is filtered
"""
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document]
def before_validation(self):
"""
Callback called before the validation of a document occurs.
Should return None to allow the validation process to happen.
If anything else is returned the validation is not executed and
__init__ is finished
"""
return
def after_validation(self):
"""
Callback called after the validation of a document occurs.
Should return None to allow the next callbacks to run.
If anything else is returned the next callbacks are not called and
__init__ is finished
"""
return
def after_validation_failed(self):
"""
Callback called after the validation of a document fails.
"""
return
def after_validation_succeeded(self):
"""
Callback called after the validation of a document succeeds.
"""
return
def before_insert(self):
"""
Callback called before an insert occurs.
Should return None to allow the insert to happen.
If anything else is returned the insert will not occur and the returned
value will be the return of the insert function
"""
return
def after_insert(self):
"""
Callback called after an insert occurs.
"""
return
def before_update(self, old):
"""
Callback called before an update occurs.
Should return None to allow the update to happen.
If anything else is returned the update will not occur and the returned
value will be the return of the update function
"""
return
def after_update(self, old):
"""
Callback called after an update occurs.
"""
return
def before_delete(self):
"""
Callback called before a delete occurs.
Should return None to allow the delete to happen.
If anything else is returned the delete will not occur and the returned
value will be the return of the delete function
"""
return
def after_delete(self):
"""
Callback called after a delete occurs.
"""
return
|
lvieirajr/mongorest | mongorest/collection.py | Collection.update_one | python | def update_one(cls, filter, update, upsert=False):
return cls.collection.update_one(filter, update, upsert).raw_result | Updates a document that passes the filter with the update value
Will upsert a new document if upsert=True and no document is filtered | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L307-L312 | null | class Collection(six.with_metaclass(CollectionMeta, object)):
"""
Base Class for Collections.
"""
def __init__(self, document=None):
self._document = copy.deepcopy(document or {})
self._errors = {}
if not self.before_validation():
if self.validator.validate_document(self):
self._document = self.validator.document
if not self.after_validation():
self.after_validation_succeeded()
else:
if not self.after_validation():
self.after_validation_failed()
def __repr__(self):
"""
Returns the representation of the Object formated like:
<Document<{%Collection Name%}> object at {%object id%}>
"""
return '<Document<{0}> object at {1}>'.format(
type(self).__name__, hex(id(self)),
)
def __setattr__(self, key, value):
"""
Sets the value to the given key if it is one of the specified keys.
Sets the given key on the _document to the given value otherwise.
"""
keys = {
'collection', 'schema', 'allow_unknown', '_document', '_errors'
}
if key in keys:
object.__setattr__(self, key, value)
else:
self._document[key] = value
def __getattr__(self, name):
"""
Returns the attribute from the _document if it exists.
Returns it from the collection if not on _document, but on collection.
"""
if name in self._document:
return self._document[name]
elif name in dir(self.collection):
attribute = getattr(self.collection, name)
if inspect.isfunction(attribute):
attribute = types.MethodType(attribute, self)
return attribute
else:
raise AttributeError(name)
def __deepcopy__(self, memo):
copy = self.__class__(self._document)
memo[id(self)] = copy
return copy
@property
def document(self):
"""
Returns the document
"""
return self._document
@property
def errors(self):
"""
Returns the validation errors
"""
return self._errors
@property
def is_valid(self):
"""
Returns True if no validation errors have been found, False otherwise.
"""
return not self._errors
@serializable
def insert(self, **kwargs):
"""
Saves the Document to the database if it is valid.
Returns errors otherwise.
"""
if self.is_valid:
before = self.before_insert()
if before:
return before
try:
self._document['_id'] = self.insert_one(self._document)
self.after_insert()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get('err', 'PyMongoError.')
),
operation='insert', collection=type(self).__name__,
document=self._document,
)
return self._errors
@serializable
def update(self, **kwargs):
"""
Updates the document with the given _id saved in the collection if it
is valid.
Returns errors otherwise.
"""
if self.is_valid:
if '_id' in self._document:
to_update = self.find_one({'_id': self._id})
if to_update:
before = self.before_update(old=to_update)
if before:
return before
try:
self.replace_one({'_id': self._id}, self._document)
self.after_update(old=to_update)
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='update', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
return self._errors
@serializable
def delete(self, **kwargs):
"""
Deletes the document if it is saved in the collection.
"""
if self.is_valid:
if '_id' in self._document:
to_delete = self.find_one({'_id': self._id})
if to_delete:
before = self.before_delete()
if before:
return before
try:
self.delete_one({'_id': self._id})
self.after_delete()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='delete', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
@classmethod
@serializable
def find_one(cls, filter=None, *args, **kwargs):
"""
Returns one document dict if one passes the filter.
Returns None otherwise.
"""
return cls.collection.find_one(filter, *args, **kwargs)
@classmethod
@serializable
def find(cls, *args, **kwargs):
"""
Returns all document dicts that pass the filter
"""
return list(cls.collection.find(*args, **kwargs))
@classmethod
@serializable
def aggregate(cls, pipeline=None, **kwargs):
"""
Returns the document dicts returned from the Aggregation Pipeline
"""
return list(cls.collection.aggregate(pipeline or [], **kwargs))
@classmethod
@serializable
def insert_one(cls, document):
"""
Inserts a document into the Collection and returns its _id
"""
return cls.collection.insert_one(document).inserted_id
@classmethod
@serializable
def insert_many(cls, documents, ordered=True):
"""
Inserts a list of documents into the Collection and returns their _ids
"""
return cls.collection.insert_many(documents, ordered).inserted_ids
@classmethod
@serializable
@classmethod
@serializable
def update_many(cls, filter, update, upsert=False):
"""
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_many(filter, update, upsert).raw_result
@classmethod
@serializable
def replace_one(cls, filter, replacement, upsert=False):
"""
Replaces a document that passes the filter.
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.replace_one(
filter, replacement, upsert
).raw_result
@classmethod
@serializable
def delete_one(cls, filter):
"""
Deletes one document that passes the filter
"""
return cls.collection.delete_one(filter).raw_result
@classmethod
@serializable
def delete_many(cls, filter):
"""
Deletes all documents that pass the filter
"""
return cls.collection.delete_many(filter).raw_result
@classmethod
@serializable
def count(cls, filter=None, **kwargs):
"""
Returns the number of documents that pass the filter
"""
return cls.collection.count(filter, **kwargs)
@classmethod
def get(cls, filter=None, **kwargs):
"""
Returns a Document if any document is filtered, returns None otherwise
"""
document = cls(cls.find_one(filter, **kwargs))
return document if document.document else None
@classmethod
def documents(cls, filter=None, **kwargs):
"""
Returns a list of Documents if any document is filtered
"""
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document]
def before_validation(self):
"""
Callback called before the validation of a document occurs.
Should return None to allow the validation process to happen.
If anything else is returned the validation is not executed and
__init__ is finished
"""
return
def after_validation(self):
"""
Callback called after the validation of a document occurs.
Should return None to allow the next callbacks to run.
If anything else is returned the next callbacks are not called and
__init__ is finished
"""
return
def after_validation_failed(self):
"""
Callback called after the validation of a document fails.
"""
return
def after_validation_succeeded(self):
"""
Callback called after the validation of a document succeeds.
"""
return
def before_insert(self):
"""
Callback called before an insert occurs.
Should return None to allow the insert to happen.
If anything else is returned the insert will not occur and the returned
value will be the return of the insert function
"""
return
def after_insert(self):
"""
Callback called after an insert occurs.
"""
return
def before_update(self, old):
"""
Callback called before an update occurs.
Should return None to allow the update to happen.
If anything else is returned the update will not occur and the returned
value will be the return of the update function
"""
return
def after_update(self, old):
"""
Callback called after an update occurs.
"""
return
def before_delete(self):
"""
Callback called before a delete occurs.
Should return None to allow the delete to happen.
If anything else is returned the delete will not occur and the returned
value will be the return of the delete function
"""
return
def after_delete(self):
"""
Callback called after a delete occurs.
"""
return
|
lvieirajr/mongorest | mongorest/collection.py | Collection.update_many | python | def update_many(cls, filter, update, upsert=False):
return cls.collection.update_many(filter, update, upsert).raw_result | Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L316-L321 | null | class Collection(six.with_metaclass(CollectionMeta, object)):
"""
Base Class for Collections.
"""
def __init__(self, document=None):
self._document = copy.deepcopy(document or {})
self._errors = {}
if not self.before_validation():
if self.validator.validate_document(self):
self._document = self.validator.document
if not self.after_validation():
self.after_validation_succeeded()
else:
if not self.after_validation():
self.after_validation_failed()
def __repr__(self):
"""
Returns the representation of the Object formated like:
<Document<{%Collection Name%}> object at {%object id%}>
"""
return '<Document<{0}> object at {1}>'.format(
type(self).__name__, hex(id(self)),
)
def __setattr__(self, key, value):
"""
Sets the value to the given key if it is one of the specified keys.
Sets the given key on the _document to the given value otherwise.
"""
keys = {
'collection', 'schema', 'allow_unknown', '_document', '_errors'
}
if key in keys:
object.__setattr__(self, key, value)
else:
self._document[key] = value
def __getattr__(self, name):
"""
Returns the attribute from the _document if it exists.
Returns it from the collection if not on _document, but on collection.
"""
if name in self._document:
return self._document[name]
elif name in dir(self.collection):
attribute = getattr(self.collection, name)
if inspect.isfunction(attribute):
attribute = types.MethodType(attribute, self)
return attribute
else:
raise AttributeError(name)
def __deepcopy__(self, memo):
copy = self.__class__(self._document)
memo[id(self)] = copy
return copy
@property
def document(self):
"""
Returns the document
"""
return self._document
@property
def errors(self):
"""
Returns the validation errors
"""
return self._errors
@property
def is_valid(self):
"""
Returns True if no validation errors have been found, False otherwise.
"""
return not self._errors
@serializable
def insert(self, **kwargs):
"""
Saves the Document to the database if it is valid.
Returns errors otherwise.
"""
if self.is_valid:
before = self.before_insert()
if before:
return before
try:
self._document['_id'] = self.insert_one(self._document)
self.after_insert()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get('err', 'PyMongoError.')
),
operation='insert', collection=type(self).__name__,
document=self._document,
)
return self._errors
@serializable
def update(self, **kwargs):
"""
Updates the document with the given _id saved in the collection if it
is valid.
Returns errors otherwise.
"""
if self.is_valid:
if '_id' in self._document:
to_update = self.find_one({'_id': self._id})
if to_update:
before = self.before_update(old=to_update)
if before:
return before
try:
self.replace_one({'_id': self._id}, self._document)
self.after_update(old=to_update)
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='update', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
return self._errors
@serializable
def delete(self, **kwargs):
"""
Deletes the document if it is saved in the collection.
"""
if self.is_valid:
if '_id' in self._document:
to_delete = self.find_one({'_id': self._id})
if to_delete:
before = self.before_delete()
if before:
return before
try:
self.delete_one({'_id': self._id})
self.after_delete()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='delete', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
@classmethod
@serializable
def find_one(cls, filter=None, *args, **kwargs):
"""
Returns one document dict if one passes the filter.
Returns None otherwise.
"""
return cls.collection.find_one(filter, *args, **kwargs)
@classmethod
@serializable
def find(cls, *args, **kwargs):
"""
Returns all document dicts that pass the filter
"""
return list(cls.collection.find(*args, **kwargs))
@classmethod
@serializable
def aggregate(cls, pipeline=None, **kwargs):
"""
Returns the document dicts returned from the Aggregation Pipeline
"""
return list(cls.collection.aggregate(pipeline or [], **kwargs))
@classmethod
@serializable
def insert_one(cls, document):
"""
Inserts a document into the Collection and returns its _id
"""
return cls.collection.insert_one(document).inserted_id
@classmethod
@serializable
def insert_many(cls, documents, ordered=True):
"""
Inserts a list of documents into the Collection and returns their _ids
"""
return cls.collection.insert_many(documents, ordered).inserted_ids
@classmethod
@serializable
def update_one(cls, filter, update, upsert=False):
"""
Updates a document that passes the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_one(filter, update, upsert).raw_result
@classmethod
@serializable
@classmethod
@serializable
def replace_one(cls, filter, replacement, upsert=False):
"""
Replaces a document that passes the filter.
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.replace_one(
filter, replacement, upsert
).raw_result
@classmethod
@serializable
def delete_one(cls, filter):
"""
Deletes one document that passes the filter
"""
return cls.collection.delete_one(filter).raw_result
@classmethod
@serializable
def delete_many(cls, filter):
"""
Deletes all documents that pass the filter
"""
return cls.collection.delete_many(filter).raw_result
@classmethod
@serializable
def count(cls, filter=None, **kwargs):
"""
Returns the number of documents that pass the filter
"""
return cls.collection.count(filter, **kwargs)
@classmethod
def get(cls, filter=None, **kwargs):
"""
Returns a Document if any document is filtered, returns None otherwise
"""
document = cls(cls.find_one(filter, **kwargs))
return document if document.document else None
@classmethod
def documents(cls, filter=None, **kwargs):
"""
Returns a list of Documents if any document is filtered
"""
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document]
def before_validation(self):
"""
Callback called before the validation of a document occurs.
Should return None to allow the validation process to happen.
If anything else is returned the validation is not executed and
__init__ is finished
"""
return
def after_validation(self):
"""
Callback called after the validation of a document occurs.
Should return None to allow the next callbacks to run.
If anything else is returned the next callbacks are not called and
__init__ is finished
"""
return
def after_validation_failed(self):
"""
Callback called after the validation of a document fails.
"""
return
def after_validation_succeeded(self):
"""
Callback called after the validation of a document succeeds.
"""
return
def before_insert(self):
"""
Callback called before an insert occurs.
Should return None to allow the insert to happen.
If anything else is returned the insert will not occur and the returned
value will be the return of the insert function
"""
return
def after_insert(self):
"""
Callback called after an insert occurs.
"""
return
def before_update(self, old):
"""
Callback called before an update occurs.
Should return None to allow the update to happen.
If anything else is returned the update will not occur and the returned
value will be the return of the update function
"""
return
def after_update(self, old):
"""
Callback called after an update occurs.
"""
return
def before_delete(self):
"""
Callback called before a delete occurs.
Should return None to allow the delete to happen.
If anything else is returned the delete will not occur and the returned
value will be the return of the delete function
"""
return
def after_delete(self):
"""
Callback called after a delete occurs.
"""
return
|
lvieirajr/mongorest | mongorest/collection.py | Collection.replace_one | python | def replace_one(cls, filter, replacement, upsert=False):
return cls.collection.replace_one(
filter, replacement, upsert
).raw_result | Replaces a document that passes the filter.
Will upsert a new document if upsert=True and no document is filtered | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L325-L332 | null | class Collection(six.with_metaclass(CollectionMeta, object)):
"""
Base Class for Collections.
"""
def __init__(self, document=None):
self._document = copy.deepcopy(document or {})
self._errors = {}
if not self.before_validation():
if self.validator.validate_document(self):
self._document = self.validator.document
if not self.after_validation():
self.after_validation_succeeded()
else:
if not self.after_validation():
self.after_validation_failed()
def __repr__(self):
"""
Returns the representation of the Object formated like:
<Document<{%Collection Name%}> object at {%object id%}>
"""
return '<Document<{0}> object at {1}>'.format(
type(self).__name__, hex(id(self)),
)
def __setattr__(self, key, value):
"""
Sets the value to the given key if it is one of the specified keys.
Sets the given key on the _document to the given value otherwise.
"""
keys = {
'collection', 'schema', 'allow_unknown', '_document', '_errors'
}
if key in keys:
object.__setattr__(self, key, value)
else:
self._document[key] = value
def __getattr__(self, name):
"""
Returns the attribute from the _document if it exists.
Returns it from the collection if not on _document, but on collection.
"""
if name in self._document:
return self._document[name]
elif name in dir(self.collection):
attribute = getattr(self.collection, name)
if inspect.isfunction(attribute):
attribute = types.MethodType(attribute, self)
return attribute
else:
raise AttributeError(name)
def __deepcopy__(self, memo):
copy = self.__class__(self._document)
memo[id(self)] = copy
return copy
@property
def document(self):
"""
Returns the document
"""
return self._document
@property
def errors(self):
"""
Returns the validation errors
"""
return self._errors
@property
def is_valid(self):
"""
Returns True if no validation errors have been found, False otherwise.
"""
return not self._errors
@serializable
def insert(self, **kwargs):
"""
Saves the Document to the database if it is valid.
Returns errors otherwise.
"""
if self.is_valid:
before = self.before_insert()
if before:
return before
try:
self._document['_id'] = self.insert_one(self._document)
self.after_insert()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get('err', 'PyMongoError.')
),
operation='insert', collection=type(self).__name__,
document=self._document,
)
return self._errors
@serializable
def update(self, **kwargs):
"""
Updates the document with the given _id saved in the collection if it
is valid.
Returns errors otherwise.
"""
if self.is_valid:
if '_id' in self._document:
to_update = self.find_one({'_id': self._id})
if to_update:
before = self.before_update(old=to_update)
if before:
return before
try:
self.replace_one({'_id': self._id}, self._document)
self.after_update(old=to_update)
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='update', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
return self._errors
@serializable
def delete(self, **kwargs):
"""
Deletes the document if it is saved in the collection.
"""
if self.is_valid:
if '_id' in self._document:
to_delete = self.find_one({'_id': self._id})
if to_delete:
before = self.before_delete()
if before:
return before
try:
self.delete_one({'_id': self._id})
self.after_delete()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='delete', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
@classmethod
@serializable
def find_one(cls, filter=None, *args, **kwargs):
"""
Returns one document dict if one passes the filter.
Returns None otherwise.
"""
return cls.collection.find_one(filter, *args, **kwargs)
@classmethod
@serializable
def find(cls, *args, **kwargs):
"""
Returns all document dicts that pass the filter
"""
return list(cls.collection.find(*args, **kwargs))
@classmethod
@serializable
def aggregate(cls, pipeline=None, **kwargs):
"""
Returns the document dicts returned from the Aggregation Pipeline
"""
return list(cls.collection.aggregate(pipeline or [], **kwargs))
@classmethod
@serializable
def insert_one(cls, document):
"""
Inserts a document into the Collection and returns its _id
"""
return cls.collection.insert_one(document).inserted_id
@classmethod
@serializable
def insert_many(cls, documents, ordered=True):
"""
Inserts a list of documents into the Collection and returns their _ids
"""
return cls.collection.insert_many(documents, ordered).inserted_ids
@classmethod
@serializable
def update_one(cls, filter, update, upsert=False):
"""
Updates a document that passes the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_one(filter, update, upsert).raw_result
@classmethod
@serializable
def update_many(cls, filter, update, upsert=False):
"""
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_many(filter, update, upsert).raw_result
@classmethod
@serializable
@classmethod
@serializable
def delete_one(cls, filter):
"""
Deletes one document that passes the filter
"""
return cls.collection.delete_one(filter).raw_result
@classmethod
@serializable
def delete_many(cls, filter):
"""
Deletes all documents that pass the filter
"""
return cls.collection.delete_many(filter).raw_result
@classmethod
@serializable
def count(cls, filter=None, **kwargs):
"""
Returns the number of documents that pass the filter
"""
return cls.collection.count(filter, **kwargs)
@classmethod
def get(cls, filter=None, **kwargs):
"""
Returns a Document if any document is filtered, returns None otherwise
"""
document = cls(cls.find_one(filter, **kwargs))
return document if document.document else None
@classmethod
def documents(cls, filter=None, **kwargs):
"""
Returns a list of Documents if any document is filtered
"""
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document]
def before_validation(self):
"""
Callback called before the validation of a document occurs.
Should return None to allow the validation process to happen.
If anything else is returned the validation is not executed and
__init__ is finished
"""
return
def after_validation(self):
"""
Callback called after the validation of a document occurs.
Should return None to allow the next callbacks to run.
If anything else is returned the next callbacks are not called and
__init__ is finished
"""
return
def after_validation_failed(self):
"""
Callback called after the validation of a document fails.
"""
return
def after_validation_succeeded(self):
"""
Callback called after the validation of a document succeeds.
"""
return
def before_insert(self):
"""
Callback called before an insert occurs.
Should return None to allow the insert to happen.
If anything else is returned the insert will not occur and the returned
value will be the return of the insert function
"""
return
def after_insert(self):
"""
Callback called after an insert occurs.
"""
return
def before_update(self, old):
"""
Callback called before an update occurs.
Should return None to allow the update to happen.
If anything else is returned the update will not occur and the returned
value will be the return of the update function
"""
return
def after_update(self, old):
"""
Callback called after an update occurs.
"""
return
def before_delete(self):
"""
Callback called before a delete occurs.
Should return None to allow the delete to happen.
If anything else is returned the delete will not occur and the returned
value will be the return of the delete function
"""
return
def after_delete(self):
"""
Callback called after a delete occurs.
"""
return
|
lvieirajr/mongorest | mongorest/collection.py | Collection.get | python | def get(cls, filter=None, **kwargs):
document = cls(cls.find_one(filter, **kwargs))
return document if document.document else None | Returns a Document if any document is filtered, returns None otherwise | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L359-L364 | null | class Collection(six.with_metaclass(CollectionMeta, object)):
"""
Base Class for Collections.
"""
def __init__(self, document=None):
self._document = copy.deepcopy(document or {})
self._errors = {}
if not self.before_validation():
if self.validator.validate_document(self):
self._document = self.validator.document
if not self.after_validation():
self.after_validation_succeeded()
else:
if not self.after_validation():
self.after_validation_failed()
def __repr__(self):
"""
Returns the representation of the Object formated like:
<Document<{%Collection Name%}> object at {%object id%}>
"""
return '<Document<{0}> object at {1}>'.format(
type(self).__name__, hex(id(self)),
)
def __setattr__(self, key, value):
"""
Sets the value to the given key if it is one of the specified keys.
Sets the given key on the _document to the given value otherwise.
"""
keys = {
'collection', 'schema', 'allow_unknown', '_document', '_errors'
}
if key in keys:
object.__setattr__(self, key, value)
else:
self._document[key] = value
def __getattr__(self, name):
"""
Returns the attribute from the _document if it exists.
Returns it from the collection if not on _document, but on collection.
"""
if name in self._document:
return self._document[name]
elif name in dir(self.collection):
attribute = getattr(self.collection, name)
if inspect.isfunction(attribute):
attribute = types.MethodType(attribute, self)
return attribute
else:
raise AttributeError(name)
def __deepcopy__(self, memo):
copy = self.__class__(self._document)
memo[id(self)] = copy
return copy
@property
def document(self):
"""
Returns the document
"""
return self._document
@property
def errors(self):
"""
Returns the validation errors
"""
return self._errors
@property
def is_valid(self):
"""
Returns True if no validation errors have been found, False otherwise.
"""
return not self._errors
@serializable
def insert(self, **kwargs):
"""
Saves the Document to the database if it is valid.
Returns errors otherwise.
"""
if self.is_valid:
before = self.before_insert()
if before:
return before
try:
self._document['_id'] = self.insert_one(self._document)
self.after_insert()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get('err', 'PyMongoError.')
),
operation='insert', collection=type(self).__name__,
document=self._document,
)
return self._errors
@serializable
def update(self, **kwargs):
"""
Updates the document with the given _id saved in the collection if it
is valid.
Returns errors otherwise.
"""
if self.is_valid:
if '_id' in self._document:
to_update = self.find_one({'_id': self._id})
if to_update:
before = self.before_update(old=to_update)
if before:
return before
try:
self.replace_one({'_id': self._id}, self._document)
self.after_update(old=to_update)
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='update', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
return self._errors
@serializable
def delete(self, **kwargs):
"""
Deletes the document if it is saved in the collection.
"""
if self.is_valid:
if '_id' in self._document:
to_delete = self.find_one({'_id': self._id})
if to_delete:
before = self.before_delete()
if before:
return before
try:
self.delete_one({'_id': self._id})
self.after_delete()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='delete', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
@classmethod
@serializable
def find_one(cls, filter=None, *args, **kwargs):
"""
Returns one document dict if one passes the filter.
Returns None otherwise.
"""
return cls.collection.find_one(filter, *args, **kwargs)
@classmethod
@serializable
def find(cls, *args, **kwargs):
"""
Returns all document dicts that pass the filter
"""
return list(cls.collection.find(*args, **kwargs))
@classmethod
@serializable
def aggregate(cls, pipeline=None, **kwargs):
"""
Returns the document dicts returned from the Aggregation Pipeline
"""
return list(cls.collection.aggregate(pipeline or [], **kwargs))
@classmethod
@serializable
def insert_one(cls, document):
"""
Inserts a document into the Collection and returns its _id
"""
return cls.collection.insert_one(document).inserted_id
@classmethod
@serializable
def insert_many(cls, documents, ordered=True):
"""
Inserts a list of documents into the Collection and returns their _ids
"""
return cls.collection.insert_many(documents, ordered).inserted_ids
@classmethod
@serializable
def update_one(cls, filter, update, upsert=False):
"""
Updates a document that passes the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_one(filter, update, upsert).raw_result
@classmethod
@serializable
def update_many(cls, filter, update, upsert=False):
"""
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_many(filter, update, upsert).raw_result
@classmethod
@serializable
def replace_one(cls, filter, replacement, upsert=False):
"""
Replaces a document that passes the filter.
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.replace_one(
filter, replacement, upsert
).raw_result
@classmethod
@serializable
def delete_one(cls, filter):
"""
Deletes one document that passes the filter
"""
return cls.collection.delete_one(filter).raw_result
@classmethod
@serializable
def delete_many(cls, filter):
"""
Deletes all documents that pass the filter
"""
return cls.collection.delete_many(filter).raw_result
@classmethod
@serializable
def count(cls, filter=None, **kwargs):
"""
Returns the number of documents that pass the filter
"""
return cls.collection.count(filter, **kwargs)
@classmethod
@classmethod
def documents(cls, filter=None, **kwargs):
"""
Returns a list of Documents if any document is filtered
"""
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document]
def before_validation(self):
"""
Callback called before the validation of a document occurs.
Should return None to allow the validation process to happen.
If anything else is returned the validation is not executed and
__init__ is finished
"""
return
def after_validation(self):
"""
Callback called after the validation of a document occurs.
Should return None to allow the next callbacks to run.
If anything else is returned the next callbacks are not called and
__init__ is finished
"""
return
def after_validation_failed(self):
"""
Callback called after the validation of a document fails.
"""
return
def after_validation_succeeded(self):
"""
Callback called after the validation of a document succeeds.
"""
return
def before_insert(self):
"""
Callback called before an insert occurs.
Should return None to allow the insert to happen.
If anything else is returned the insert will not occur and the returned
value will be the return of the insert function
"""
return
def after_insert(self):
"""
Callback called after an insert occurs.
"""
return
def before_update(self, old):
"""
Callback called before an update occurs.
Should return None to allow the update to happen.
If anything else is returned the update will not occur and the returned
value will be the return of the update function
"""
return
def after_update(self, old):
"""
Callback called after an update occurs.
"""
return
def before_delete(self):
"""
Callback called before a delete occurs.
Should return None to allow the delete to happen.
If anything else is returned the delete will not occur and the returned
value will be the return of the delete function
"""
return
def after_delete(self):
"""
Callback called after a delete occurs.
"""
return
|
lvieirajr/mongorest | mongorest/collection.py | Collection.documents | python | def documents(cls, filter=None, **kwargs):
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document] | Returns a list of Documents if any document is filtered | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L367-L372 | null | class Collection(six.with_metaclass(CollectionMeta, object)):
"""
Base Class for Collections.
"""
def __init__(self, document=None):
self._document = copy.deepcopy(document or {})
self._errors = {}
if not self.before_validation():
if self.validator.validate_document(self):
self._document = self.validator.document
if not self.after_validation():
self.after_validation_succeeded()
else:
if not self.after_validation():
self.after_validation_failed()
def __repr__(self):
"""
Returns the representation of the Object formated like:
<Document<{%Collection Name%}> object at {%object id%}>
"""
return '<Document<{0}> object at {1}>'.format(
type(self).__name__, hex(id(self)),
)
def __setattr__(self, key, value):
"""
Sets the value to the given key if it is one of the specified keys.
Sets the given key on the _document to the given value otherwise.
"""
keys = {
'collection', 'schema', 'allow_unknown', '_document', '_errors'
}
if key in keys:
object.__setattr__(self, key, value)
else:
self._document[key] = value
def __getattr__(self, name):
"""
Returns the attribute from the _document if it exists.
Returns it from the collection if not on _document, but on collection.
"""
if name in self._document:
return self._document[name]
elif name in dir(self.collection):
attribute = getattr(self.collection, name)
if inspect.isfunction(attribute):
attribute = types.MethodType(attribute, self)
return attribute
else:
raise AttributeError(name)
def __deepcopy__(self, memo):
copy = self.__class__(self._document)
memo[id(self)] = copy
return copy
@property
def document(self):
"""
Returns the document
"""
return self._document
@property
def errors(self):
"""
Returns the validation errors
"""
return self._errors
@property
def is_valid(self):
"""
Returns True if no validation errors have been found, False otherwise.
"""
return not self._errors
@serializable
def insert(self, **kwargs):
"""
Saves the Document to the database if it is valid.
Returns errors otherwise.
"""
if self.is_valid:
before = self.before_insert()
if before:
return before
try:
self._document['_id'] = self.insert_one(self._document)
self.after_insert()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get('err', 'PyMongoError.')
),
operation='insert', collection=type(self).__name__,
document=self._document,
)
return self._errors
@serializable
def update(self, **kwargs):
"""
Updates the document with the given _id saved in the collection if it
is valid.
Returns errors otherwise.
"""
if self.is_valid:
if '_id' in self._document:
to_update = self.find_one({'_id': self._id})
if to_update:
before = self.before_update(old=to_update)
if before:
return before
try:
self.replace_one({'_id': self._id}, self._document)
self.after_update(old=to_update)
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='update', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
return self._errors
@serializable
def delete(self, **kwargs):
"""
Deletes the document if it is saved in the collection.
"""
if self.is_valid:
if '_id' in self._document:
to_delete = self.find_one({'_id': self._id})
if to_delete:
before = self.before_delete()
if before:
return before
try:
self.delete_one({'_id': self._id})
self.after_delete()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='delete', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
@classmethod
@serializable
def find_one(cls, filter=None, *args, **kwargs):
"""
Returns one document dict if one passes the filter.
Returns None otherwise.
"""
return cls.collection.find_one(filter, *args, **kwargs)
@classmethod
@serializable
def find(cls, *args, **kwargs):
"""
Returns all document dicts that pass the filter
"""
return list(cls.collection.find(*args, **kwargs))
@classmethod
@serializable
def aggregate(cls, pipeline=None, **kwargs):
"""
Returns the document dicts returned from the Aggregation Pipeline
"""
return list(cls.collection.aggregate(pipeline or [], **kwargs))
@classmethod
@serializable
def insert_one(cls, document):
"""
Inserts a document into the Collection and returns its _id
"""
return cls.collection.insert_one(document).inserted_id
@classmethod
@serializable
def insert_many(cls, documents, ordered=True):
"""
Inserts a list of documents into the Collection and returns their _ids
"""
return cls.collection.insert_many(documents, ordered).inserted_ids
@classmethod
@serializable
def update_one(cls, filter, update, upsert=False):
"""
Updates a document that passes the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_one(filter, update, upsert).raw_result
@classmethod
@serializable
def update_many(cls, filter, update, upsert=False):
"""
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_many(filter, update, upsert).raw_result
@classmethod
@serializable
def replace_one(cls, filter, replacement, upsert=False):
"""
Replaces a document that passes the filter.
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.replace_one(
filter, replacement, upsert
).raw_result
@classmethod
@serializable
def delete_one(cls, filter):
"""
Deletes one document that passes the filter
"""
return cls.collection.delete_one(filter).raw_result
@classmethod
@serializable
def delete_many(cls, filter):
"""
Deletes all documents that pass the filter
"""
return cls.collection.delete_many(filter).raw_result
@classmethod
@serializable
def count(cls, filter=None, **kwargs):
"""
Returns the number of documents that pass the filter
"""
return cls.collection.count(filter, **kwargs)
@classmethod
def get(cls, filter=None, **kwargs):
"""
Returns a Document if any document is filtered, returns None otherwise
"""
document = cls(cls.find_one(filter, **kwargs))
return document if document.document else None
@classmethod
def before_validation(self):
"""
Callback called before the validation of a document occurs.
Should return None to allow the validation process to happen.
If anything else is returned the validation is not executed and
__init__ is finished
"""
return
def after_validation(self):
"""
Callback called after the validation of a document occurs.
Should return None to allow the next callbacks to run.
If anything else is returned the next callbacks are not called and
__init__ is finished
"""
return
def after_validation_failed(self):
"""
Callback called after the validation of a document fails.
"""
return
def after_validation_succeeded(self):
"""
Callback called after the validation of a document succeeds.
"""
return
def before_insert(self):
"""
Callback called before an insert occurs.
Should return None to allow the insert to happen.
If anything else is returned the insert will not occur and the returned
value will be the return of the insert function
"""
return
def after_insert(self):
"""
Callback called after an insert occurs.
"""
return
def before_update(self, old):
"""
Callback called before an update occurs.
Should return None to allow the update to happen.
If anything else is returned the update will not occur and the returned
value will be the return of the update function
"""
return
def after_update(self, old):
"""
Callback called after an update occurs.
"""
return
def before_delete(self):
"""
Callback called before a delete occurs.
Should return None to allow the delete to happen.
If anything else is returned the delete will not occur and the returned
value will be the return of the delete function
"""
return
def after_delete(self):
"""
Callback called after a delete occurs.
"""
return
|
lvieirajr/mongorest | mongorest/database.py | _get_db | python | def _get_db():
from .settings import settings
mongo = settings.MONGODB
if 'URI' in mongo and mongo['URI']:
uri = mongo['URI']
else:
uri = 'mongodb://'
if all(mongo.get(key) for key in ('USERNAME', 'PASSWORD')):
uri += '{0}:{1}@'.format(mongo['USERNAME'], mongo['PASSWORD'])
if 'HOSTS' in mongo and mongo['HOSTS']:
uri += ','.join(
'{0}:{1}'.format(host, port)
for (host, port) in zip(mongo['HOSTS'], mongo['PORTS']),
)
else:
uri += '{0}:{1}'.format(mongo['HOST'], mongo.get('PORT', 27017))
uri += '/' + mongo['DATABASE']
if 'OPTIONS' in mongo and mongo['OPTIONS']:
uri += '?{0}'.format('&'.join(mongo['OPTIONS']))
client = ConnectionFailureProxy(MongoClient(uri, connect=False))
database = client[parse_uri(uri)['database']]
return database | Returns the connection to the database using the settings.
This function should not be called outside of this file.
Use db instead. | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/database.py#L91-L124 | null | # -*- encoding: UTF-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
import time
from pymongo.collection import Collection
from pymongo.cursor import Cursor
from pymongo.database import Database
from pymongo.errors import ConnectionFailure
from pymongo.mongo_client import MongoClient
from pymongo.uri_parser import parse_uri
__all__ = [
'db',
]
class ConnectionFailureProxy(object):
def __init__(self, proxied):
self.proxied = proxied
self.logger = logging.getLogger(__name__)
def __dir__(self):
return dir(self.proxied)
def __str__(self):
return str(self.proxied)
def __repr__(self):
return repr(self.proxied)
def __eq__(self, other):
return self.proxied == other.proxied
def __getitem__(self, key):
item = self.proxied[key]
if hasattr(item, '__call__'):
item = ConnectionFailureProxy(item)
return item
def __getattr__(self, attr):
attribute = getattr(self.proxied, attr)
if hasattr(attribute, '__call__'):
attribute = ConnectionFailureProxy(attribute)
return attribute
def __call__(self, *args, **kwargs):
from .settings import settings
retries = 0
while retries < settings.RETRY_LIMIT:
try:
result = self.proxied(*args, **kwargs)
return list(result) if isinstance(result, Cursor) else result
except ConnectionFailure:
if settings.LINEAR_RETRIES:
sleep_time = settings.BASE_RETRY_TIME
else:
sleep_time = pow(settings.BASE_RETRY_TIME, retries)
retries += 1
self.logger.warning(
'Retry nº %s in %s seconds.', retries, sleep_time
)
client, attempts = self.proxied, 0
while attempts <= 3:
if isinstance(client, MongoClient):
client.close()
break
elif isinstance(client, Database):
client = client.client
elif isinstance(client, Collection):
client = client.database
else:
client = client.__self__
attempts += 1
time.sleep(sleep_time)
return self.proxied(*args, **kwargs)
db = _get_db()
|
lvieirajr/mongorest | mongorest/decorators.py | login_required | python | def login_required(wrapped):
@wraps(wrapped)
def wrapper(*args, **kwargs):
request = args[1]
auth_collection = settings.AUTH_COLLECTION[
settings.AUTH_COLLECTION.rfind('.') + 1:
].lower()
auth_document = request.environ.get(auth_collection)
if auth_document and auth_document.is_authorized(request):
setattr(request, auth_collection, auth_document)
return wrapped(*args, **kwargs)
return Response(response=serialize(UnauthorizedError()), status=401)
if hasattr(wrapped, 'decorators'):
wrapper.decorators = wrapped.decorators
wrapper.decorators.append('login_required')
else:
wrapper.decorators = ['login_required']
return wrapper | Requires that the user is logged in and authorized to execute requests
Except if the method is in authorized_methods of the auth_collection
Then he can execute the requests even not being authorized | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/decorators.py#L17-L44 | null | # -*- encoding: UTF-8 -*-
from __future__ import absolute_import, unicode_literals
from functools import wraps
from .errors import UnauthorizedError
from .settings import settings
from .utils import serialize
from .wrappers import Response
__all__ = [
'login_required',
'serializable',
]
def serializable(wrapped):
"""
If a keyword argument 'serialize' with a True value is passed to the
Wrapped function, the return of the wrapped function will be serialized.
Nothing happens if the argument is not passed or the value is not True
"""
@wraps(wrapped)
def wrapper(*args, **kwargs):
should_serialize = kwargs.pop('serialize', False)
result = wrapped(*args, **kwargs)
return serialize(result) if should_serialize else result
if hasattr(wrapped, 'decorators'):
wrapper.decorators = wrapped.decorators
wrapper.decorators.append('serializable')
else:
wrapper.decorators = ['serializable']
return wrapper
|
lvieirajr/mongorest | mongorest/decorators.py | serializable | python | def serializable(wrapped):
@wraps(wrapped)
def wrapper(*args, **kwargs):
should_serialize = kwargs.pop('serialize', False)
result = wrapped(*args, **kwargs)
return serialize(result) if should_serialize else result
if hasattr(wrapped, 'decorators'):
wrapper.decorators = wrapped.decorators
wrapper.decorators.append('serializable')
else:
wrapper.decorators = ['serializable']
return wrapper | If a keyword argument 'serialize' with a True value is passed to the
Wrapped function, the return of the wrapped function will be serialized.
Nothing happens if the argument is not passed or the value is not True | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/decorators.py#L47-L67 | null | # -*- encoding: UTF-8 -*-
from __future__ import absolute_import, unicode_literals
from functools import wraps
from .errors import UnauthorizedError
from .settings import settings
from .utils import serialize
from .wrappers import Response
__all__ = [
'login_required',
'serializable',
]
def login_required(wrapped):
"""
Requires that the user is logged in and authorized to execute requests
Except if the method is in authorized_methods of the auth_collection
Then he can execute the requests even not being authorized
"""
@wraps(wrapped)
def wrapper(*args, **kwargs):
request = args[1]
auth_collection = settings.AUTH_COLLECTION[
settings.AUTH_COLLECTION.rfind('.') + 1:
].lower()
auth_document = request.environ.get(auth_collection)
if auth_document and auth_document.is_authorized(request):
setattr(request, auth_collection, auth_document)
return wrapped(*args, **kwargs)
return Response(response=serialize(UnauthorizedError()), status=401)
if hasattr(wrapped, 'decorators'):
wrapper.decorators = wrapped.decorators
wrapper.decorators.append('login_required')
else:
wrapper.decorators = ['login_required']
return wrapper
|
lvieirajr/mongorest | mongorest/utils.py | deserialize | python | def deserialize(to_deserialize, *args, **kwargs):
if isinstance(to_deserialize, string_types):
if re.match('^[0-9a-f]{24}$', to_deserialize):
return ObjectId(to_deserialize)
try:
return bson_loads(to_deserialize, *args, **kwargs)
except:
return bson_loads(bson_dumps(to_deserialize), *args, **kwargs)
else:
return bson_loads(bson_dumps(to_deserialize), *args, **kwargs) | Deserializes a string into a PyMongo BSON | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/utils.py#L16-L28 | null | # -*- encoding: UTF-8 -*-
from __future__ import absolute_import, unicode_literals
import re
from bson.json_util import dumps as bson_dumps, loads as bson_loads
from bson.objectid import ObjectId
from six import string_types
__all__ = [
'deserialize',
'serialize',
]
def serialize(to_serialize, *args, **kwargs):
"""
Serializes a PyMongo BSON into a string
"""
return bson_dumps(to_serialize, *args, **kwargs)
|
lvieirajr/mongorest | mongorest/resource.py | ListResourceMixin.list | python | def list(self, request):
pipeline = [{'$match': request.args.pop('match', {})}]
sort = request.args.pop('sort', {})
if sort:
pipeline.append({'$sort': sort})
project = request.args.pop('project', {})
if project:
pipeline.append({'$project': project})
return Response(serialize(self.collection.aggregate(pipeline))) | Returns the list of documents found on the collection | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/resource.py#L73-L87 | [
"def serialize(to_serialize, *args, **kwargs):\n \"\"\"\n Serializes a PyMongo BSON into a string\n \"\"\"\n return bson_dumps(to_serialize, *args, **kwargs)\n"
] | class ListResourceMixin(Resource):
"""
Resource Mixin that provides the list action for your endpoint.
"""
rules = [Rule('/', methods=['GET'], endpoint='list')]
|
lvieirajr/mongorest | mongorest/resource.py | CreateResourceMixin.create | python | def create(self, request):
document = self.collection(request.json)
document.created_at = datetime.utcnow()
document.updated_at = document.created_at
created = document.insert()
return Response(
response=serialize(created),
status=(
201 if not all(
key in created for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
) | Creates a new document based on the given data | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/resource.py#L96-L114 | [
"def serialize(to_serialize, *args, **kwargs):\n \"\"\"\n Serializes a PyMongo BSON into a string\n \"\"\"\n return bson_dumps(to_serialize, *args, **kwargs)\n"
] | class CreateResourceMixin(Resource):
"""
Resource Mixin that provides the create action for your endpoint.
"""
rules = [Rule('/', methods=['POST'], endpoint='create')]
|
lvieirajr/mongorest | mongorest/resource.py | RetrieveResourceMixin.retrieve | python | def retrieve(self, request, _id):
_id = deserialize(_id)
retrieved = self.collection.find_one({'_id': _id})
if retrieved:
return Response(serialize(retrieved))
else:
return Response(
response=serialize(
DocumentNotFoundError(self.collection.__name__, _id)
),
status=400
) | Returns the document containing the given _id or 404 | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/resource.py#L123-L138 | [
"def serialize(to_serialize, *args, **kwargs):\n \"\"\"\n Serializes a PyMongo BSON into a string\n \"\"\"\n return bson_dumps(to_serialize, *args, **kwargs)\n",
"def deserialize(to_deserialize, *args, **kwargs):\n \"\"\"\n Deserializes a string into a PyMongo BSON\n \"\"\"\n if isinstance(to_deserialize, string_types):\n if re.match('^[0-9a-f]{24}$', to_deserialize):\n return ObjectId(to_deserialize)\n try:\n return bson_loads(to_deserialize, *args, **kwargs)\n except:\n return bson_loads(bson_dumps(to_deserialize), *args, **kwargs)\n else:\n return bson_loads(bson_dumps(to_deserialize), *args, **kwargs)\n"
] | class RetrieveResourceMixin(Resource):
"""
Resource Mixin that provides the retrieve action for your endpoint.
"""
rules = [Rule('/<_id>/', methods=['GET'], endpoint='retrieve')]
|
lvieirajr/mongorest | mongorest/resource.py | UpdateResourceMixin.update | python | def update(self, request, _id):
_id = deserialize(_id)
to_update = self.collection.find_one({'_id': _id})
if to_update:
document = self.collection(dict(to_update, **request.json))
document.updated_at = datetime.utcnow()
updated = document.update()
return Response(
response=serialize(updated),
status=(
200 if not all(
key in updated for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
)
else:
return Response(
response=serialize(
DocumentNotFoundError(self.collection.__name__, _id)
),
status=400
) | Updates the document with the given _id using the given data | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/resource.py#L147-L175 | [
"def serialize(to_serialize, *args, **kwargs):\n \"\"\"\n Serializes a PyMongo BSON into a string\n \"\"\"\n return bson_dumps(to_serialize, *args, **kwargs)\n",
"def deserialize(to_deserialize, *args, **kwargs):\n \"\"\"\n Deserializes a string into a PyMongo BSON\n \"\"\"\n if isinstance(to_deserialize, string_types):\n if re.match('^[0-9a-f]{24}$', to_deserialize):\n return ObjectId(to_deserialize)\n try:\n return bson_loads(to_deserialize, *args, **kwargs)\n except:\n return bson_loads(bson_dumps(to_deserialize), *args, **kwargs)\n else:\n return bson_loads(bson_dumps(to_deserialize), *args, **kwargs)\n"
] | class UpdateResourceMixin(Resource):
"""
Resource Mixin that provides the update action for your endpoint.
"""
rules = [Rule('/<_id>/', methods=['PUT'], endpoint='update')]
|
lvieirajr/mongorest | mongorest/resource.py | DeleteResourceMixin.delete | python | def delete(self, request, _id):
_id = deserialize(_id)
to_delete = self.collection.get({'_id': _id})
if to_delete:
deleted = to_delete.delete()
return Response(
response=serialize(deleted),
status=(
200 if not all(
key in deleted for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
)
else:
return Response(
response=serialize(
DocumentNotFoundError(self.collection.__name__, _id)
),
status=404
) | Deletes the document with the given _id if it exists | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/resource.py#L184-L210 | [
"def serialize(to_serialize, *args, **kwargs):\n \"\"\"\n Serializes a PyMongo BSON into a string\n \"\"\"\n return bson_dumps(to_serialize, *args, **kwargs)\n",
"def deserialize(to_deserialize, *args, **kwargs):\n \"\"\"\n Deserializes a string into a PyMongo BSON\n \"\"\"\n if isinstance(to_deserialize, string_types):\n if re.match('^[0-9a-f]{24}$', to_deserialize):\n return ObjectId(to_deserialize)\n try:\n return bson_loads(to_deserialize, *args, **kwargs)\n except:\n return bson_loads(bson_dumps(to_deserialize), *args, **kwargs)\n else:\n return bson_loads(bson_dumps(to_deserialize), *args, **kwargs)\n"
] | class DeleteResourceMixin(Resource):
"""
Resource Mixin that provides the delete action for your endpoint.
"""
rules = [Rule('/<_id>/', methods=['DELETE'], endpoint='delete')]
|
sonyxperiadev/pygerrit | pygerrit/stream.py | GerritStream.stop | python | def stop(self):
self._stop.set()
if self._channel is not None:
self._channel.close() | Stop the thread. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/stream.py#L46-L50 | null | class GerritStream(Thread):
""" Gerrit events stream handler. """
def __init__(self, gerrit, ssh_client):
Thread.__init__(self)
self.daemon = True
self._gerrit = gerrit
self._ssh_client = ssh_client
self._stop = Event()
self._channel = None
def _error_event(self, error):
""" Dispatch `error` to the Gerrit client. """
self._gerrit.put_event(ErrorEvent.error_json(error))
def run(self):
""" Listen to the stream and send events to the client. """
channel = self._ssh_client.get_transport().open_session()
self._channel = channel
channel.exec_command("gerrit stream-events")
stdout = channel.makefile()
stderr = channel.makefile_stderr()
while not self._stop.is_set():
try:
if channel.exit_status_ready():
if channel.recv_stderr_ready():
error = stderr.readline().strip()
else:
error = "Remote server connection closed"
self._error_event(error)
self._stop.set()
else:
data = stdout.readline()
self._gerrit.put_event(data)
except Exception as e: # pylint: disable=W0703
self._error_event(repr(e))
self._stop.set()
|
sonyxperiadev/pygerrit | pygerrit/stream.py | GerritStream.run | python | def run(self):
channel = self._ssh_client.get_transport().open_session()
self._channel = channel
channel.exec_command("gerrit stream-events")
stdout = channel.makefile()
stderr = channel.makefile_stderr()
while not self._stop.is_set():
try:
if channel.exit_status_ready():
if channel.recv_stderr_ready():
error = stderr.readline().strip()
else:
error = "Remote server connection closed"
self._error_event(error)
self._stop.set()
else:
data = stdout.readline()
self._gerrit.put_event(data)
except Exception as e: # pylint: disable=W0703
self._error_event(repr(e))
self._stop.set() | Listen to the stream and send events to the client. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/stream.py#L56-L77 | null | class GerritStream(Thread):
""" Gerrit events stream handler. """
def __init__(self, gerrit, ssh_client):
Thread.__init__(self)
self.daemon = True
self._gerrit = gerrit
self._ssh_client = ssh_client
self._stop = Event()
self._channel = None
def stop(self):
""" Stop the thread. """
self._stop.set()
if self._channel is not None:
self._channel.close()
def _error_event(self, error):
""" Dispatch `error` to the Gerrit client. """
self._gerrit.put_event(ErrorEvent.error_json(error))
|
sonyxperiadev/pygerrit | pygerrit/client.py | GerritClient.run_command | python | def run_command(self, command):
if not isinstance(command, basestring):
raise ValueError("command must be a string")
return self._ssh_client.run_gerrit_command(command) | Run a command.
:arg str command: The command to run.
:Return: The result as a string.
:Raises: `ValueError` if `command` is not a string. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/client.py#L79-L91 | null | class GerritClient(object):
""" Gerrit client interface.
:arg str host: The hostname.
:arg str username: (optional) The username to use when connecting.
:arg str port: (optional) The port number to connect to.
:arg int keepalive: (optional) Keepalive interval in seconds.
:arg bool auto_add_hosts: (optional) If True, the ssh client will
automatically add hosts to known_hosts.
"""
def __init__(self, host, username=None, port=None,
keepalive=None, auto_add_hosts=False):
self._factory = GerritEventFactory()
self._events = Queue()
self._stream = None
self.keepalive = keepalive
self._ssh_client = GerritSSHClient(host,
username=username,
port=port,
keepalive=keepalive,
auto_add_hosts=auto_add_hosts)
def gerrit_version(self):
""" Get the Gerrit version.
:Returns: The version of Gerrit that is connected to, as a string.
"""
return self._ssh_client.get_remote_version()
def gerrit_info(self):
""" Get connection information.
:Returns: A tuple of the username, and version of Gerrit that is
connected to.
"""
return self._ssh_client.get_remote_info()
def query(self, term):
""" Run a query.
:arg str term: The query term to run.
:Returns: A list of results as :class:`pygerrit.models.Change` objects.
:Raises: `ValueError` if `term` is not a string.
"""
results = []
command = ["query", "--current-patch-set", "--all-approvals",
"--format JSON", "--commit-message"]
if not isinstance(term, basestring):
raise ValueError("term must be a string")
command.append(escape_string(term))
result = self._ssh_client.run_gerrit_command(" ".join(command))
decoder = JSONDecoder()
for line in result.stdout.read().splitlines():
# Gerrit's response to the query command contains one or more
# lines of JSON-encoded strings. The last one is a status
# dictionary containing the key "type" whose value indicates
# whether or not the operation was successful.
# According to http://goo.gl/h13HD it should be safe to use the
# presence of the "type" key to determine whether the dictionary
# represents a change or if it's the query status indicator.
try:
data = decoder.decode(line)
except ValueError as err:
raise GerritError("Query returned invalid data: %s", err)
if "type" in data and data["type"] == "error":
raise GerritError("Query error: %s" % data["message"])
elif "project" in data:
results.append(Change(data))
return results
def start_event_stream(self):
""" Start streaming events from `gerrit stream-events`. """
if not self._stream:
self._stream = GerritStream(self, ssh_client=self._ssh_client)
self._stream.start()
def stop_event_stream(self):
""" Stop streaming events from `gerrit stream-events`."""
if self._stream:
self._stream.stop()
self._stream.join()
self._stream = None
with self._events.mutex:
self._events.queue.clear()
def get_event(self, block=True, timeout=None):
""" Get the next event from the queue.
:arg boolean block: Set to True to block if no event is available.
:arg seconds timeout: Timeout to wait if no event is available.
:Returns: The next event as a :class:`pygerrit.events.GerritEvent`
instance, or `None` if:
- `block` is False and there is no event available in the queue, or
- `block` is True and no event is available within the time
specified by `timeout`.
"""
try:
return self._events.get(block, timeout)
except Empty:
return None
def put_event(self, data):
""" Create event from `data` and add it to the queue.
:arg json data: The JSON data from which to create the event.
:Raises: :class:`pygerrit.error.GerritError` if the queue is full, or
the factory could not create the event.
"""
try:
event = self._factory.create(data)
self._events.put(event)
except Full:
raise GerritError("Unable to add event: queue is full")
|
sonyxperiadev/pygerrit | pygerrit/client.py | GerritClient.query | python | def query(self, term):
results = []
command = ["query", "--current-patch-set", "--all-approvals",
"--format JSON", "--commit-message"]
if not isinstance(term, basestring):
raise ValueError("term must be a string")
command.append(escape_string(term))
result = self._ssh_client.run_gerrit_command(" ".join(command))
decoder = JSONDecoder()
for line in result.stdout.read().splitlines():
# Gerrit's response to the query command contains one or more
# lines of JSON-encoded strings. The last one is a status
# dictionary containing the key "type" whose value indicates
# whether or not the operation was successful.
# According to http://goo.gl/h13HD it should be safe to use the
# presence of the "type" key to determine whether the dictionary
# represents a change or if it's the query status indicator.
try:
data = decoder.decode(line)
except ValueError as err:
raise GerritError("Query returned invalid data: %s", err)
if "type" in data and data["type"] == "error":
raise GerritError("Query error: %s" % data["message"])
elif "project" in data:
results.append(Change(data))
return results | Run a query.
:arg str term: The query term to run.
:Returns: A list of results as :class:`pygerrit.models.Change` objects.
:Raises: `ValueError` if `term` is not a string. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/client.py#L93-L129 | [
"def escape_string(string):\n \"\"\" Escape a string for use in Gerrit commands.\n\n :arg str string: The string to escape.\n\n :returns: The string with necessary escapes and surrounding double quotes\n so that it can be passed to any of the Gerrit commands that require\n double-quoted strings.\n\n \"\"\"\n\n result = string\n result = result.replace('\\\\', '\\\\\\\\')\n result = result.replace('\"', '\\\\\"')\n return '\"' + result + '\"'\n"
] | class GerritClient(object):
""" Gerrit client interface.
:arg str host: The hostname.
:arg str username: (optional) The username to use when connecting.
:arg str port: (optional) The port number to connect to.
:arg int keepalive: (optional) Keepalive interval in seconds.
:arg bool auto_add_hosts: (optional) If True, the ssh client will
automatically add hosts to known_hosts.
"""
def __init__(self, host, username=None, port=None,
keepalive=None, auto_add_hosts=False):
self._factory = GerritEventFactory()
self._events = Queue()
self._stream = None
self.keepalive = keepalive
self._ssh_client = GerritSSHClient(host,
username=username,
port=port,
keepalive=keepalive,
auto_add_hosts=auto_add_hosts)
def gerrit_version(self):
""" Get the Gerrit version.
:Returns: The version of Gerrit that is connected to, as a string.
"""
return self._ssh_client.get_remote_version()
def gerrit_info(self):
""" Get connection information.
:Returns: A tuple of the username, and version of Gerrit that is
connected to.
"""
return self._ssh_client.get_remote_info()
def run_command(self, command):
""" Run a command.
:arg str command: The command to run.
:Return: The result as a string.
:Raises: `ValueError` if `command` is not a string.
"""
if not isinstance(command, basestring):
raise ValueError("command must be a string")
return self._ssh_client.run_gerrit_command(command)
def start_event_stream(self):
""" Start streaming events from `gerrit stream-events`. """
if not self._stream:
self._stream = GerritStream(self, ssh_client=self._ssh_client)
self._stream.start()
def stop_event_stream(self):
""" Stop streaming events from `gerrit stream-events`."""
if self._stream:
self._stream.stop()
self._stream.join()
self._stream = None
with self._events.mutex:
self._events.queue.clear()
def get_event(self, block=True, timeout=None):
""" Get the next event from the queue.
:arg boolean block: Set to True to block if no event is available.
:arg seconds timeout: Timeout to wait if no event is available.
:Returns: The next event as a :class:`pygerrit.events.GerritEvent`
instance, or `None` if:
- `block` is False and there is no event available in the queue, or
- `block` is True and no event is available within the time
specified by `timeout`.
"""
try:
return self._events.get(block, timeout)
except Empty:
return None
def put_event(self, data):
""" Create event from `data` and add it to the queue.
:arg json data: The JSON data from which to create the event.
:Raises: :class:`pygerrit.error.GerritError` if the queue is full, or
the factory could not create the event.
"""
try:
event = self._factory.create(data)
self._events.put(event)
except Full:
raise GerritError("Unable to add event: queue is full")
|
sonyxperiadev/pygerrit | pygerrit/client.py | GerritClient.start_event_stream | python | def start_event_stream(self):
if not self._stream:
self._stream = GerritStream(self, ssh_client=self._ssh_client)
self._stream.start() | Start streaming events from `gerrit stream-events`. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/client.py#L131-L135 | null | class GerritClient(object):
""" Gerrit client interface.
:arg str host: The hostname.
:arg str username: (optional) The username to use when connecting.
:arg str port: (optional) The port number to connect to.
:arg int keepalive: (optional) Keepalive interval in seconds.
:arg bool auto_add_hosts: (optional) If True, the ssh client will
automatically add hosts to known_hosts.
"""
def __init__(self, host, username=None, port=None,
keepalive=None, auto_add_hosts=False):
self._factory = GerritEventFactory()
self._events = Queue()
self._stream = None
self.keepalive = keepalive
self._ssh_client = GerritSSHClient(host,
username=username,
port=port,
keepalive=keepalive,
auto_add_hosts=auto_add_hosts)
def gerrit_version(self):
""" Get the Gerrit version.
:Returns: The version of Gerrit that is connected to, as a string.
"""
return self._ssh_client.get_remote_version()
def gerrit_info(self):
""" Get connection information.
:Returns: A tuple of the username, and version of Gerrit that is
connected to.
"""
return self._ssh_client.get_remote_info()
def run_command(self, command):
""" Run a command.
:arg str command: The command to run.
:Return: The result as a string.
:Raises: `ValueError` if `command` is not a string.
"""
if not isinstance(command, basestring):
raise ValueError("command must be a string")
return self._ssh_client.run_gerrit_command(command)
def query(self, term):
""" Run a query.
:arg str term: The query term to run.
:Returns: A list of results as :class:`pygerrit.models.Change` objects.
:Raises: `ValueError` if `term` is not a string.
"""
results = []
command = ["query", "--current-patch-set", "--all-approvals",
"--format JSON", "--commit-message"]
if not isinstance(term, basestring):
raise ValueError("term must be a string")
command.append(escape_string(term))
result = self._ssh_client.run_gerrit_command(" ".join(command))
decoder = JSONDecoder()
for line in result.stdout.read().splitlines():
# Gerrit's response to the query command contains one or more
# lines of JSON-encoded strings. The last one is a status
# dictionary containing the key "type" whose value indicates
# whether or not the operation was successful.
# According to http://goo.gl/h13HD it should be safe to use the
# presence of the "type" key to determine whether the dictionary
# represents a change or if it's the query status indicator.
try:
data = decoder.decode(line)
except ValueError as err:
raise GerritError("Query returned invalid data: %s", err)
if "type" in data and data["type"] == "error":
raise GerritError("Query error: %s" % data["message"])
elif "project" in data:
results.append(Change(data))
return results
def stop_event_stream(self):
""" Stop streaming events from `gerrit stream-events`."""
if self._stream:
self._stream.stop()
self._stream.join()
self._stream = None
with self._events.mutex:
self._events.queue.clear()
def get_event(self, block=True, timeout=None):
""" Get the next event from the queue.
:arg boolean block: Set to True to block if no event is available.
:arg seconds timeout: Timeout to wait if no event is available.
:Returns: The next event as a :class:`pygerrit.events.GerritEvent`
instance, or `None` if:
- `block` is False and there is no event available in the queue, or
- `block` is True and no event is available within the time
specified by `timeout`.
"""
try:
return self._events.get(block, timeout)
except Empty:
return None
def put_event(self, data):
""" Create event from `data` and add it to the queue.
:arg json data: The JSON data from which to create the event.
:Raises: :class:`pygerrit.error.GerritError` if the queue is full, or
the factory could not create the event.
"""
try:
event = self._factory.create(data)
self._events.put(event)
except Full:
raise GerritError("Unable to add event: queue is full")
|
sonyxperiadev/pygerrit | pygerrit/client.py | GerritClient.stop_event_stream | python | def stop_event_stream(self):
if self._stream:
self._stream.stop()
self._stream.join()
self._stream = None
with self._events.mutex:
self._events.queue.clear() | Stop streaming events from `gerrit stream-events`. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/client.py#L137-L144 | null | class GerritClient(object):
""" Gerrit client interface.
:arg str host: The hostname.
:arg str username: (optional) The username to use when connecting.
:arg str port: (optional) The port number to connect to.
:arg int keepalive: (optional) Keepalive interval in seconds.
:arg bool auto_add_hosts: (optional) If True, the ssh client will
automatically add hosts to known_hosts.
"""
def __init__(self, host, username=None, port=None,
keepalive=None, auto_add_hosts=False):
self._factory = GerritEventFactory()
self._events = Queue()
self._stream = None
self.keepalive = keepalive
self._ssh_client = GerritSSHClient(host,
username=username,
port=port,
keepalive=keepalive,
auto_add_hosts=auto_add_hosts)
def gerrit_version(self):
""" Get the Gerrit version.
:Returns: The version of Gerrit that is connected to, as a string.
"""
return self._ssh_client.get_remote_version()
def gerrit_info(self):
""" Get connection information.
:Returns: A tuple of the username, and version of Gerrit that is
connected to.
"""
return self._ssh_client.get_remote_info()
def run_command(self, command):
""" Run a command.
:arg str command: The command to run.
:Return: The result as a string.
:Raises: `ValueError` if `command` is not a string.
"""
if not isinstance(command, basestring):
raise ValueError("command must be a string")
return self._ssh_client.run_gerrit_command(command)
def query(self, term):
""" Run a query.
:arg str term: The query term to run.
:Returns: A list of results as :class:`pygerrit.models.Change` objects.
:Raises: `ValueError` if `term` is not a string.
"""
results = []
command = ["query", "--current-patch-set", "--all-approvals",
"--format JSON", "--commit-message"]
if not isinstance(term, basestring):
raise ValueError("term must be a string")
command.append(escape_string(term))
result = self._ssh_client.run_gerrit_command(" ".join(command))
decoder = JSONDecoder()
for line in result.stdout.read().splitlines():
# Gerrit's response to the query command contains one or more
# lines of JSON-encoded strings. The last one is a status
# dictionary containing the key "type" whose value indicates
# whether or not the operation was successful.
# According to http://goo.gl/h13HD it should be safe to use the
# presence of the "type" key to determine whether the dictionary
# represents a change or if it's the query status indicator.
try:
data = decoder.decode(line)
except ValueError as err:
raise GerritError("Query returned invalid data: %s", err)
if "type" in data and data["type"] == "error":
raise GerritError("Query error: %s" % data["message"])
elif "project" in data:
results.append(Change(data))
return results
def start_event_stream(self):
""" Start streaming events from `gerrit stream-events`. """
if not self._stream:
self._stream = GerritStream(self, ssh_client=self._ssh_client)
self._stream.start()
def get_event(self, block=True, timeout=None):
""" Get the next event from the queue.
:arg boolean block: Set to True to block if no event is available.
:arg seconds timeout: Timeout to wait if no event is available.
:Returns: The next event as a :class:`pygerrit.events.GerritEvent`
instance, or `None` if:
- `block` is False and there is no event available in the queue, or
- `block` is True and no event is available within the time
specified by `timeout`.
"""
try:
return self._events.get(block, timeout)
except Empty:
return None
def put_event(self, data):
""" Create event from `data` and add it to the queue.
:arg json data: The JSON data from which to create the event.
:Raises: :class:`pygerrit.error.GerritError` if the queue is full, or
the factory could not create the event.
"""
try:
event = self._factory.create(data)
self._events.put(event)
except Full:
raise GerritError("Unable to add event: queue is full")
|
sonyxperiadev/pygerrit | pygerrit/client.py | GerritClient.get_event | python | def get_event(self, block=True, timeout=None):
try:
return self._events.get(block, timeout)
except Empty:
return None | Get the next event from the queue.
:arg boolean block: Set to True to block if no event is available.
:arg seconds timeout: Timeout to wait if no event is available.
:Returns: The next event as a :class:`pygerrit.events.GerritEvent`
instance, or `None` if:
- `block` is False and there is no event available in the queue, or
- `block` is True and no event is available within the time
specified by `timeout`. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/client.py#L146-L162 | null | class GerritClient(object):
""" Gerrit client interface.
:arg str host: The hostname.
:arg str username: (optional) The username to use when connecting.
:arg str port: (optional) The port number to connect to.
:arg int keepalive: (optional) Keepalive interval in seconds.
:arg bool auto_add_hosts: (optional) If True, the ssh client will
automatically add hosts to known_hosts.
"""
def __init__(self, host, username=None, port=None,
keepalive=None, auto_add_hosts=False):
self._factory = GerritEventFactory()
self._events = Queue()
self._stream = None
self.keepalive = keepalive
self._ssh_client = GerritSSHClient(host,
username=username,
port=port,
keepalive=keepalive,
auto_add_hosts=auto_add_hosts)
def gerrit_version(self):
""" Get the Gerrit version.
:Returns: The version of Gerrit that is connected to, as a string.
"""
return self._ssh_client.get_remote_version()
def gerrit_info(self):
""" Get connection information.
:Returns: A tuple of the username, and version of Gerrit that is
connected to.
"""
return self._ssh_client.get_remote_info()
def run_command(self, command):
""" Run a command.
:arg str command: The command to run.
:Return: The result as a string.
:Raises: `ValueError` if `command` is not a string.
"""
if not isinstance(command, basestring):
raise ValueError("command must be a string")
return self._ssh_client.run_gerrit_command(command)
def query(self, term):
""" Run a query.
:arg str term: The query term to run.
:Returns: A list of results as :class:`pygerrit.models.Change` objects.
:Raises: `ValueError` if `term` is not a string.
"""
results = []
command = ["query", "--current-patch-set", "--all-approvals",
"--format JSON", "--commit-message"]
if not isinstance(term, basestring):
raise ValueError("term must be a string")
command.append(escape_string(term))
result = self._ssh_client.run_gerrit_command(" ".join(command))
decoder = JSONDecoder()
for line in result.stdout.read().splitlines():
# Gerrit's response to the query command contains one or more
# lines of JSON-encoded strings. The last one is a status
# dictionary containing the key "type" whose value indicates
# whether or not the operation was successful.
# According to http://goo.gl/h13HD it should be safe to use the
# presence of the "type" key to determine whether the dictionary
# represents a change or if it's the query status indicator.
try:
data = decoder.decode(line)
except ValueError as err:
raise GerritError("Query returned invalid data: %s", err)
if "type" in data and data["type"] == "error":
raise GerritError("Query error: %s" % data["message"])
elif "project" in data:
results.append(Change(data))
return results
def start_event_stream(self):
""" Start streaming events from `gerrit stream-events`. """
if not self._stream:
self._stream = GerritStream(self, ssh_client=self._ssh_client)
self._stream.start()
def stop_event_stream(self):
""" Stop streaming events from `gerrit stream-events`."""
if self._stream:
self._stream.stop()
self._stream.join()
self._stream = None
with self._events.mutex:
self._events.queue.clear()
def put_event(self, data):
""" Create event from `data` and add it to the queue.
:arg json data: The JSON data from which to create the event.
:Raises: :class:`pygerrit.error.GerritError` if the queue is full, or
the factory could not create the event.
"""
try:
event = self._factory.create(data)
self._events.put(event)
except Full:
raise GerritError("Unable to add event: queue is full")
|
sonyxperiadev/pygerrit | pygerrit/client.py | GerritClient.put_event | python | def put_event(self, data):
try:
event = self._factory.create(data)
self._events.put(event)
except Full:
raise GerritError("Unable to add event: queue is full") | Create event from `data` and add it to the queue.
:arg json data: The JSON data from which to create the event.
:Raises: :class:`pygerrit.error.GerritError` if the queue is full, or
the factory could not create the event. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/client.py#L164-L177 | null | class GerritClient(object):
""" Gerrit client interface.
:arg str host: The hostname.
:arg str username: (optional) The username to use when connecting.
:arg str port: (optional) The port number to connect to.
:arg int keepalive: (optional) Keepalive interval in seconds.
:arg bool auto_add_hosts: (optional) If True, the ssh client will
automatically add hosts to known_hosts.
"""
def __init__(self, host, username=None, port=None,
keepalive=None, auto_add_hosts=False):
self._factory = GerritEventFactory()
self._events = Queue()
self._stream = None
self.keepalive = keepalive
self._ssh_client = GerritSSHClient(host,
username=username,
port=port,
keepalive=keepalive,
auto_add_hosts=auto_add_hosts)
def gerrit_version(self):
""" Get the Gerrit version.
:Returns: The version of Gerrit that is connected to, as a string.
"""
return self._ssh_client.get_remote_version()
def gerrit_info(self):
""" Get connection information.
:Returns: A tuple of the username, and version of Gerrit that is
connected to.
"""
return self._ssh_client.get_remote_info()
def run_command(self, command):
""" Run a command.
:arg str command: The command to run.
:Return: The result as a string.
:Raises: `ValueError` if `command` is not a string.
"""
if not isinstance(command, basestring):
raise ValueError("command must be a string")
return self._ssh_client.run_gerrit_command(command)
def query(self, term):
""" Run a query.
:arg str term: The query term to run.
:Returns: A list of results as :class:`pygerrit.models.Change` objects.
:Raises: `ValueError` if `term` is not a string.
"""
results = []
command = ["query", "--current-patch-set", "--all-approvals",
"--format JSON", "--commit-message"]
if not isinstance(term, basestring):
raise ValueError("term must be a string")
command.append(escape_string(term))
result = self._ssh_client.run_gerrit_command(" ".join(command))
decoder = JSONDecoder()
for line in result.stdout.read().splitlines():
# Gerrit's response to the query command contains one or more
# lines of JSON-encoded strings. The last one is a status
# dictionary containing the key "type" whose value indicates
# whether or not the operation was successful.
# According to http://goo.gl/h13HD it should be safe to use the
# presence of the "type" key to determine whether the dictionary
# represents a change or if it's the query status indicator.
try:
data = decoder.decode(line)
except ValueError as err:
raise GerritError("Query returned invalid data: %s", err)
if "type" in data and data["type"] == "error":
raise GerritError("Query error: %s" % data["message"])
elif "project" in data:
results.append(Change(data))
return results
def start_event_stream(self):
""" Start streaming events from `gerrit stream-events`. """
if not self._stream:
self._stream = GerritStream(self, ssh_client=self._ssh_client)
self._stream.start()
def stop_event_stream(self):
""" Stop streaming events from `gerrit stream-events`."""
if self._stream:
self._stream.stop()
self._stream.join()
self._stream = None
with self._events.mutex:
self._events.queue.clear()
def get_event(self, block=True, timeout=None):
""" Get the next event from the queue.
:arg boolean block: Set to True to block if no event is available.
:arg seconds timeout: Timeout to wait if no event is available.
:Returns: The next event as a :class:`pygerrit.events.GerritEvent`
instance, or `None` if:
- `block` is False and there is no event available in the queue, or
- `block` is True and no event is available within the time
specified by `timeout`.
"""
try:
return self._events.get(block, timeout)
except Empty:
return None
|
sonyxperiadev/pygerrit | pygerrit/ssh.py | _extract_version | python | def _extract_version(version_string, pattern):
if version_string:
match = pattern.match(version_string.strip())
if match:
return match.group(1)
return "" | Extract the version from `version_string` using `pattern`.
Return the version as a string, with leading/trailing whitespace
stripped. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/ssh.py#L36-L47 | null | # The MIT License
#
# Copyright 2012 Sony Mobile Communications. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Gerrit SSH Client. """
from os.path import abspath, expanduser, isfile
import re
import socket
from threading import Event, Lock
from .error import GerritError
from paramiko import AutoAddPolicy, SSHClient, SSHConfig, ProxyCommand
from paramiko.ssh_exception import SSHException
class GerritSSHCommandResult(object):
""" Represents the results of a Gerrit command run over SSH. """
def __init__(self, command, stdin, stdout, stderr):
self.command = command
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def __repr__(self):
return "<GerritSSHCommandResult [%s]>" % self.command
class GerritSSHClient(SSHClient):
""" Gerrit SSH Client, wrapping the paramiko SSH Client. """
def __init__(self, hostname, username=None, port=None,
keepalive=None, auto_add_hosts=False):
""" Initialise and connect to SSH. """
super(GerritSSHClient, self).__init__()
self.remote_version = None
self.hostname = hostname
self.username = username
self.key_filename = None
self.port = port
self.connected = Event()
self.lock = Lock()
self.proxy = None
self.keepalive = keepalive
if auto_add_hosts:
self.set_missing_host_key_policy(AutoAddPolicy())
def _configure(self):
""" Configure the ssh parameters from the config file. """
configfile = expanduser("~/.ssh/config")
if not isfile(configfile):
raise GerritError("ssh config file '%s' does not exist" %
configfile)
config = SSHConfig()
config.parse(open(configfile))
data = config.lookup(self.hostname)
if not data:
raise GerritError("No ssh config for host %s" % self.hostname)
if 'hostname' not in data or 'port' not in data or 'user' not in data:
raise GerritError("Missing configuration data in %s" % configfile)
self.hostname = data['hostname']
self.username = data['user']
if 'identityfile' in data:
key_filename = abspath(expanduser(data['identityfile'][0]))
if not isfile(key_filename):
raise GerritError("Identity file '%s' does not exist" %
key_filename)
self.key_filename = key_filename
try:
self.port = int(data['port'])
except ValueError:
raise GerritError("Invalid port: %s" % data['port'])
if 'proxycommand' in data:
self.proxy = ProxyCommand(data['proxycommand'])
def _do_connect(self):
""" Connect to the remote. """
self.load_system_host_keys()
if self.username is None or self.port is None:
self._configure()
try:
self.connect(hostname=self.hostname,
port=self.port,
username=self.username,
key_filename=self.key_filename,
sock=self.proxy)
except socket.error as e:
raise GerritError("Failed to connect to server: %s" % e)
try:
version_string = self._transport.remote_version
pattern = re.compile(r'^.*GerritCodeReview_([a-z0-9-\.]*) .*$')
self.remote_version = _extract_version(version_string, pattern)
except AttributeError:
self.remote_version = None
def _connect(self):
""" Connect to the remote if not already connected. """
if not self.connected.is_set():
try:
self.lock.acquire()
# Another thread may have connected while we were
# waiting to acquire the lock
if not self.connected.is_set():
self._do_connect()
if self.keepalive:
self._transport.set_keepalive(self.keepalive)
self.connected.set()
except GerritError:
raise
finally:
self.lock.release()
def get_remote_version(self):
""" Return the version of the remote Gerrit server. """
if self.remote_version is None:
result = self.run_gerrit_command("version")
version_string = result.stdout.read()
pattern = re.compile(r'^gerrit version (.*)$')
self.remote_version = _extract_version(version_string, pattern)
return self.remote_version
def get_remote_info(self):
""" Return the username, and version of the remote Gerrit server. """
version = self.get_remote_version()
return (self.username, version)
def run_gerrit_command(self, command):
""" Run the given command.
Make sure we're connected to the remote server, and run `command`.
Return the results as a `GerritSSHCommandResult`.
Raise `ValueError` if `command` is not a string, or `GerritError` if
command execution fails.
"""
if not isinstance(command, basestring):
raise ValueError("command must be a string")
gerrit_command = "gerrit " + command
# are we sending non-ascii data?
try:
gerrit_command.encode('ascii')
except UnicodeEncodeError:
gerrit_command = gerrit_command.encode('utf-8')
self._connect()
try:
stdin, stdout, stderr = self.exec_command(gerrit_command,
bufsize=1,
timeout=None,
get_pty=False)
except SSHException as err:
raise GerritError("Command execution error: %s" % err)
return GerritSSHCommandResult(command, stdin, stdout, stderr)
|
sonyxperiadev/pygerrit | pygerrit/ssh.py | GerritSSHClient._configure | python | def _configure(self):
configfile = expanduser("~/.ssh/config")
if not isfile(configfile):
raise GerritError("ssh config file '%s' does not exist" %
configfile)
config = SSHConfig()
config.parse(open(configfile))
data = config.lookup(self.hostname)
if not data:
raise GerritError("No ssh config for host %s" % self.hostname)
if 'hostname' not in data or 'port' not in data or 'user' not in data:
raise GerritError("Missing configuration data in %s" % configfile)
self.hostname = data['hostname']
self.username = data['user']
if 'identityfile' in data:
key_filename = abspath(expanduser(data['identityfile'][0]))
if not isfile(key_filename):
raise GerritError("Identity file '%s' does not exist" %
key_filename)
self.key_filename = key_filename
try:
self.port = int(data['port'])
except ValueError:
raise GerritError("Invalid port: %s" % data['port'])
if 'proxycommand' in data:
self.proxy = ProxyCommand(data['proxycommand']) | Configure the ssh parameters from the config file. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/ssh.py#L84-L111 | null | class GerritSSHClient(SSHClient):
""" Gerrit SSH Client, wrapping the paramiko SSH Client. """
def __init__(self, hostname, username=None, port=None,
keepalive=None, auto_add_hosts=False):
""" Initialise and connect to SSH. """
super(GerritSSHClient, self).__init__()
self.remote_version = None
self.hostname = hostname
self.username = username
self.key_filename = None
self.port = port
self.connected = Event()
self.lock = Lock()
self.proxy = None
self.keepalive = keepalive
if auto_add_hosts:
self.set_missing_host_key_policy(AutoAddPolicy())
def _do_connect(self):
""" Connect to the remote. """
self.load_system_host_keys()
if self.username is None or self.port is None:
self._configure()
try:
self.connect(hostname=self.hostname,
port=self.port,
username=self.username,
key_filename=self.key_filename,
sock=self.proxy)
except socket.error as e:
raise GerritError("Failed to connect to server: %s" % e)
try:
version_string = self._transport.remote_version
pattern = re.compile(r'^.*GerritCodeReview_([a-z0-9-\.]*) .*$')
self.remote_version = _extract_version(version_string, pattern)
except AttributeError:
self.remote_version = None
def _connect(self):
""" Connect to the remote if not already connected. """
if not self.connected.is_set():
try:
self.lock.acquire()
# Another thread may have connected while we were
# waiting to acquire the lock
if not self.connected.is_set():
self._do_connect()
if self.keepalive:
self._transport.set_keepalive(self.keepalive)
self.connected.set()
except GerritError:
raise
finally:
self.lock.release()
def get_remote_version(self):
""" Return the version of the remote Gerrit server. """
if self.remote_version is None:
result = self.run_gerrit_command("version")
version_string = result.stdout.read()
pattern = re.compile(r'^gerrit version (.*)$')
self.remote_version = _extract_version(version_string, pattern)
return self.remote_version
def get_remote_info(self):
""" Return the username, and version of the remote Gerrit server. """
version = self.get_remote_version()
return (self.username, version)
def run_gerrit_command(self, command):
""" Run the given command.
Make sure we're connected to the remote server, and run `command`.
Return the results as a `GerritSSHCommandResult`.
Raise `ValueError` if `command` is not a string, or `GerritError` if
command execution fails.
"""
if not isinstance(command, basestring):
raise ValueError("command must be a string")
gerrit_command = "gerrit " + command
# are we sending non-ascii data?
try:
gerrit_command.encode('ascii')
except UnicodeEncodeError:
gerrit_command = gerrit_command.encode('utf-8')
self._connect()
try:
stdin, stdout, stderr = self.exec_command(gerrit_command,
bufsize=1,
timeout=None,
get_pty=False)
except SSHException as err:
raise GerritError("Command execution error: %s" % err)
return GerritSSHCommandResult(command, stdin, stdout, stderr)
|
sonyxperiadev/pygerrit | pygerrit/ssh.py | GerritSSHClient._do_connect | python | def _do_connect(self):
self.load_system_host_keys()
if self.username is None or self.port is None:
self._configure()
try:
self.connect(hostname=self.hostname,
port=self.port,
username=self.username,
key_filename=self.key_filename,
sock=self.proxy)
except socket.error as e:
raise GerritError("Failed to connect to server: %s" % e)
try:
version_string = self._transport.remote_version
pattern = re.compile(r'^.*GerritCodeReview_([a-z0-9-\.]*) .*$')
self.remote_version = _extract_version(version_string, pattern)
except AttributeError:
self.remote_version = None | Connect to the remote. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/ssh.py#L113-L132 | null | class GerritSSHClient(SSHClient):
""" Gerrit SSH Client, wrapping the paramiko SSH Client. """
def __init__(self, hostname, username=None, port=None,
keepalive=None, auto_add_hosts=False):
""" Initialise and connect to SSH. """
super(GerritSSHClient, self).__init__()
self.remote_version = None
self.hostname = hostname
self.username = username
self.key_filename = None
self.port = port
self.connected = Event()
self.lock = Lock()
self.proxy = None
self.keepalive = keepalive
if auto_add_hosts:
self.set_missing_host_key_policy(AutoAddPolicy())
def _configure(self):
""" Configure the ssh parameters from the config file. """
configfile = expanduser("~/.ssh/config")
if not isfile(configfile):
raise GerritError("ssh config file '%s' does not exist" %
configfile)
config = SSHConfig()
config.parse(open(configfile))
data = config.lookup(self.hostname)
if not data:
raise GerritError("No ssh config for host %s" % self.hostname)
if 'hostname' not in data or 'port' not in data or 'user' not in data:
raise GerritError("Missing configuration data in %s" % configfile)
self.hostname = data['hostname']
self.username = data['user']
if 'identityfile' in data:
key_filename = abspath(expanduser(data['identityfile'][0]))
if not isfile(key_filename):
raise GerritError("Identity file '%s' does not exist" %
key_filename)
self.key_filename = key_filename
try:
self.port = int(data['port'])
except ValueError:
raise GerritError("Invalid port: %s" % data['port'])
if 'proxycommand' in data:
self.proxy = ProxyCommand(data['proxycommand'])
def _connect(self):
""" Connect to the remote if not already connected. """
if not self.connected.is_set():
try:
self.lock.acquire()
# Another thread may have connected while we were
# waiting to acquire the lock
if not self.connected.is_set():
self._do_connect()
if self.keepalive:
self._transport.set_keepalive(self.keepalive)
self.connected.set()
except GerritError:
raise
finally:
self.lock.release()
def get_remote_version(self):
""" Return the version of the remote Gerrit server. """
if self.remote_version is None:
result = self.run_gerrit_command("version")
version_string = result.stdout.read()
pattern = re.compile(r'^gerrit version (.*)$')
self.remote_version = _extract_version(version_string, pattern)
return self.remote_version
def get_remote_info(self):
""" Return the username, and version of the remote Gerrit server. """
version = self.get_remote_version()
return (self.username, version)
def run_gerrit_command(self, command):
""" Run the given command.
Make sure we're connected to the remote server, and run `command`.
Return the results as a `GerritSSHCommandResult`.
Raise `ValueError` if `command` is not a string, or `GerritError` if
command execution fails.
"""
if not isinstance(command, basestring):
raise ValueError("command must be a string")
gerrit_command = "gerrit " + command
# are we sending non-ascii data?
try:
gerrit_command.encode('ascii')
except UnicodeEncodeError:
gerrit_command = gerrit_command.encode('utf-8')
self._connect()
try:
stdin, stdout, stderr = self.exec_command(gerrit_command,
bufsize=1,
timeout=None,
get_pty=False)
except SSHException as err:
raise GerritError("Command execution error: %s" % err)
return GerritSSHCommandResult(command, stdin, stdout, stderr)
|
sonyxperiadev/pygerrit | pygerrit/ssh.py | GerritSSHClient._connect | python | def _connect(self):
if not self.connected.is_set():
try:
self.lock.acquire()
# Another thread may have connected while we were
# waiting to acquire the lock
if not self.connected.is_set():
self._do_connect()
if self.keepalive:
self._transport.set_keepalive(self.keepalive)
self.connected.set()
except GerritError:
raise
finally:
self.lock.release() | Connect to the remote if not already connected. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/ssh.py#L134-L149 | null | class GerritSSHClient(SSHClient):
""" Gerrit SSH Client, wrapping the paramiko SSH Client. """
def __init__(self, hostname, username=None, port=None,
keepalive=None, auto_add_hosts=False):
""" Initialise and connect to SSH. """
super(GerritSSHClient, self).__init__()
self.remote_version = None
self.hostname = hostname
self.username = username
self.key_filename = None
self.port = port
self.connected = Event()
self.lock = Lock()
self.proxy = None
self.keepalive = keepalive
if auto_add_hosts:
self.set_missing_host_key_policy(AutoAddPolicy())
def _configure(self):
""" Configure the ssh parameters from the config file. """
configfile = expanduser("~/.ssh/config")
if not isfile(configfile):
raise GerritError("ssh config file '%s' does not exist" %
configfile)
config = SSHConfig()
config.parse(open(configfile))
data = config.lookup(self.hostname)
if not data:
raise GerritError("No ssh config for host %s" % self.hostname)
if 'hostname' not in data or 'port' not in data or 'user' not in data:
raise GerritError("Missing configuration data in %s" % configfile)
self.hostname = data['hostname']
self.username = data['user']
if 'identityfile' in data:
key_filename = abspath(expanduser(data['identityfile'][0]))
if not isfile(key_filename):
raise GerritError("Identity file '%s' does not exist" %
key_filename)
self.key_filename = key_filename
try:
self.port = int(data['port'])
except ValueError:
raise GerritError("Invalid port: %s" % data['port'])
if 'proxycommand' in data:
self.proxy = ProxyCommand(data['proxycommand'])
def _do_connect(self):
""" Connect to the remote. """
self.load_system_host_keys()
if self.username is None or self.port is None:
self._configure()
try:
self.connect(hostname=self.hostname,
port=self.port,
username=self.username,
key_filename=self.key_filename,
sock=self.proxy)
except socket.error as e:
raise GerritError("Failed to connect to server: %s" % e)
try:
version_string = self._transport.remote_version
pattern = re.compile(r'^.*GerritCodeReview_([a-z0-9-\.]*) .*$')
self.remote_version = _extract_version(version_string, pattern)
except AttributeError:
self.remote_version = None
def get_remote_version(self):
""" Return the version of the remote Gerrit server. """
if self.remote_version is None:
result = self.run_gerrit_command("version")
version_string = result.stdout.read()
pattern = re.compile(r'^gerrit version (.*)$')
self.remote_version = _extract_version(version_string, pattern)
return self.remote_version
def get_remote_info(self):
""" Return the username, and version of the remote Gerrit server. """
version = self.get_remote_version()
return (self.username, version)
def run_gerrit_command(self, command):
""" Run the given command.
Make sure we're connected to the remote server, and run `command`.
Return the results as a `GerritSSHCommandResult`.
Raise `ValueError` if `command` is not a string, or `GerritError` if
command execution fails.
"""
if not isinstance(command, basestring):
raise ValueError("command must be a string")
gerrit_command = "gerrit " + command
# are we sending non-ascii data?
try:
gerrit_command.encode('ascii')
except UnicodeEncodeError:
gerrit_command = gerrit_command.encode('utf-8')
self._connect()
try:
stdin, stdout, stderr = self.exec_command(gerrit_command,
bufsize=1,
timeout=None,
get_pty=False)
except SSHException as err:
raise GerritError("Command execution error: %s" % err)
return GerritSSHCommandResult(command, stdin, stdout, stderr)
|
sonyxperiadev/pygerrit | pygerrit/ssh.py | GerritSSHClient.get_remote_version | python | def get_remote_version(self):
if self.remote_version is None:
result = self.run_gerrit_command("version")
version_string = result.stdout.read()
pattern = re.compile(r'^gerrit version (.*)$')
self.remote_version = _extract_version(version_string, pattern)
return self.remote_version | Return the version of the remote Gerrit server. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/ssh.py#L151-L158 | [
"def _extract_version(version_string, pattern):\n \"\"\" Extract the version from `version_string` using `pattern`.\n\n Return the version as a string, with leading/trailing whitespace\n stripped.\n\n \"\"\"\n if version_string:\n match = pattern.match(version_string.strip())\n if match:\n return match.group(1)\n return \"\"\n"
] | class GerritSSHClient(SSHClient):
""" Gerrit SSH Client, wrapping the paramiko SSH Client. """
def __init__(self, hostname, username=None, port=None,
keepalive=None, auto_add_hosts=False):
""" Initialise and connect to SSH. """
super(GerritSSHClient, self).__init__()
self.remote_version = None
self.hostname = hostname
self.username = username
self.key_filename = None
self.port = port
self.connected = Event()
self.lock = Lock()
self.proxy = None
self.keepalive = keepalive
if auto_add_hosts:
self.set_missing_host_key_policy(AutoAddPolicy())
def _configure(self):
""" Configure the ssh parameters from the config file. """
configfile = expanduser("~/.ssh/config")
if not isfile(configfile):
raise GerritError("ssh config file '%s' does not exist" %
configfile)
config = SSHConfig()
config.parse(open(configfile))
data = config.lookup(self.hostname)
if not data:
raise GerritError("No ssh config for host %s" % self.hostname)
if 'hostname' not in data or 'port' not in data or 'user' not in data:
raise GerritError("Missing configuration data in %s" % configfile)
self.hostname = data['hostname']
self.username = data['user']
if 'identityfile' in data:
key_filename = abspath(expanduser(data['identityfile'][0]))
if not isfile(key_filename):
raise GerritError("Identity file '%s' does not exist" %
key_filename)
self.key_filename = key_filename
try:
self.port = int(data['port'])
except ValueError:
raise GerritError("Invalid port: %s" % data['port'])
if 'proxycommand' in data:
self.proxy = ProxyCommand(data['proxycommand'])
def _do_connect(self):
""" Connect to the remote. """
self.load_system_host_keys()
if self.username is None or self.port is None:
self._configure()
try:
self.connect(hostname=self.hostname,
port=self.port,
username=self.username,
key_filename=self.key_filename,
sock=self.proxy)
except socket.error as e:
raise GerritError("Failed to connect to server: %s" % e)
try:
version_string = self._transport.remote_version
pattern = re.compile(r'^.*GerritCodeReview_([a-z0-9-\.]*) .*$')
self.remote_version = _extract_version(version_string, pattern)
except AttributeError:
self.remote_version = None
def _connect(self):
""" Connect to the remote if not already connected. """
if not self.connected.is_set():
try:
self.lock.acquire()
# Another thread may have connected while we were
# waiting to acquire the lock
if not self.connected.is_set():
self._do_connect()
if self.keepalive:
self._transport.set_keepalive(self.keepalive)
self.connected.set()
except GerritError:
raise
finally:
self.lock.release()
def get_remote_info(self):
""" Return the username, and version of the remote Gerrit server. """
version = self.get_remote_version()
return (self.username, version)
def run_gerrit_command(self, command):
""" Run the given command.
Make sure we're connected to the remote server, and run `command`.
Return the results as a `GerritSSHCommandResult`.
Raise `ValueError` if `command` is not a string, or `GerritError` if
command execution fails.
"""
if not isinstance(command, basestring):
raise ValueError("command must be a string")
gerrit_command = "gerrit " + command
# are we sending non-ascii data?
try:
gerrit_command.encode('ascii')
except UnicodeEncodeError:
gerrit_command = gerrit_command.encode('utf-8')
self._connect()
try:
stdin, stdout, stderr = self.exec_command(gerrit_command,
bufsize=1,
timeout=None,
get_pty=False)
except SSHException as err:
raise GerritError("Command execution error: %s" % err)
return GerritSSHCommandResult(command, stdin, stdout, stderr)
|
sonyxperiadev/pygerrit | pygerrit/ssh.py | GerritSSHClient.run_gerrit_command | python | def run_gerrit_command(self, command):
if not isinstance(command, basestring):
raise ValueError("command must be a string")
gerrit_command = "gerrit " + command
# are we sending non-ascii data?
try:
gerrit_command.encode('ascii')
except UnicodeEncodeError:
gerrit_command = gerrit_command.encode('utf-8')
self._connect()
try:
stdin, stdout, stderr = self.exec_command(gerrit_command,
bufsize=1,
timeout=None,
get_pty=False)
except SSHException as err:
raise GerritError("Command execution error: %s" % err)
return GerritSSHCommandResult(command, stdin, stdout, stderr) | Run the given command.
Make sure we're connected to the remote server, and run `command`.
Return the results as a `GerritSSHCommandResult`.
Raise `ValueError` if `command` is not a string, or `GerritError` if
command execution fails. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/ssh.py#L165-L194 | null | class GerritSSHClient(SSHClient):
""" Gerrit SSH Client, wrapping the paramiko SSH Client. """
def __init__(self, hostname, username=None, port=None,
keepalive=None, auto_add_hosts=False):
""" Initialise and connect to SSH. """
super(GerritSSHClient, self).__init__()
self.remote_version = None
self.hostname = hostname
self.username = username
self.key_filename = None
self.port = port
self.connected = Event()
self.lock = Lock()
self.proxy = None
self.keepalive = keepalive
if auto_add_hosts:
self.set_missing_host_key_policy(AutoAddPolicy())
def _configure(self):
""" Configure the ssh parameters from the config file. """
configfile = expanduser("~/.ssh/config")
if not isfile(configfile):
raise GerritError("ssh config file '%s' does not exist" %
configfile)
config = SSHConfig()
config.parse(open(configfile))
data = config.lookup(self.hostname)
if not data:
raise GerritError("No ssh config for host %s" % self.hostname)
if 'hostname' not in data or 'port' not in data or 'user' not in data:
raise GerritError("Missing configuration data in %s" % configfile)
self.hostname = data['hostname']
self.username = data['user']
if 'identityfile' in data:
key_filename = abspath(expanduser(data['identityfile'][0]))
if not isfile(key_filename):
raise GerritError("Identity file '%s' does not exist" %
key_filename)
self.key_filename = key_filename
try:
self.port = int(data['port'])
except ValueError:
raise GerritError("Invalid port: %s" % data['port'])
if 'proxycommand' in data:
self.proxy = ProxyCommand(data['proxycommand'])
def _do_connect(self):
""" Connect to the remote. """
self.load_system_host_keys()
if self.username is None or self.port is None:
self._configure()
try:
self.connect(hostname=self.hostname,
port=self.port,
username=self.username,
key_filename=self.key_filename,
sock=self.proxy)
except socket.error as e:
raise GerritError("Failed to connect to server: %s" % e)
try:
version_string = self._transport.remote_version
pattern = re.compile(r'^.*GerritCodeReview_([a-z0-9-\.]*) .*$')
self.remote_version = _extract_version(version_string, pattern)
except AttributeError:
self.remote_version = None
def _connect(self):
""" Connect to the remote if not already connected. """
if not self.connected.is_set():
try:
self.lock.acquire()
# Another thread may have connected while we were
# waiting to acquire the lock
if not self.connected.is_set():
self._do_connect()
if self.keepalive:
self._transport.set_keepalive(self.keepalive)
self.connected.set()
except GerritError:
raise
finally:
self.lock.release()
def get_remote_version(self):
""" Return the version of the remote Gerrit server. """
if self.remote_version is None:
result = self.run_gerrit_command("version")
version_string = result.stdout.read()
pattern = re.compile(r'^gerrit version (.*)$')
self.remote_version = _extract_version(version_string, pattern)
return self.remote_version
def get_remote_info(self):
""" Return the username, and version of the remote Gerrit server. """
version = self.get_remote_version()
return (self.username, version)
|
sonyxperiadev/pygerrit | pygerrit/events.py | GerritEventFactory.register | python | def register(cls, name):
def decorate(klazz):
""" Decorator. """
if name in cls._events:
raise GerritError("Duplicate event: %s" % name)
cls._events[name] = [klazz.__module__, klazz.__name__]
klazz.name = name
return klazz
return decorate | Decorator to register the event identified by `name`.
Return the decorated class.
Raise GerritError if the event is already registered. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/events.py#L40-L56 | null | class GerritEventFactory(object):
""" Gerrit event factory. """
_events = {}
@classmethod
@classmethod
def create(cls, data):
""" Create a new event instance.
Return an instance of the `GerritEvent` subclass after converting
`data` to json.
Raise GerritError if json parsed from `data` does not contain a `type`
key.
"""
try:
json_data = json.loads(data)
except ValueError as err:
logging.debug("Failed to load json data: %s: [%s]", str(err), data)
json_data = json.loads(ErrorEvent.error_json(err))
if "type" not in json_data:
raise GerritError("`type` not in json_data")
name = json_data["type"]
if name not in cls._events:
name = 'unhandled-event'
event = cls._events[name]
module_name = event[0]
class_name = event[1]
module = __import__(module_name, fromlist=[module_name])
klazz = getattr(module, class_name)
return klazz(json_data)
|
sonyxperiadev/pygerrit | pygerrit/events.py | GerritEventFactory.create | python | def create(cls, data):
try:
json_data = json.loads(data)
except ValueError as err:
logging.debug("Failed to load json data: %s: [%s]", str(err), data)
json_data = json.loads(ErrorEvent.error_json(err))
if "type" not in json_data:
raise GerritError("`type` not in json_data")
name = json_data["type"]
if name not in cls._events:
name = 'unhandled-event'
event = cls._events[name]
module_name = event[0]
class_name = event[1]
module = __import__(module_name, fromlist=[module_name])
klazz = getattr(module, class_name)
return klazz(json_data) | Create a new event instance.
Return an instance of the `GerritEvent` subclass after converting
`data` to json.
Raise GerritError if json parsed from `data` does not contain a `type`
key. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/events.py#L59-L85 | null | class GerritEventFactory(object):
""" Gerrit event factory. """
_events = {}
@classmethod
def register(cls, name):
""" Decorator to register the event identified by `name`.
Return the decorated class.
Raise GerritError if the event is already registered.
"""
def decorate(klazz):
""" Decorator. """
if name in cls._events:
raise GerritError("Duplicate event: %s" % name)
cls._events[name] = [klazz.__module__, klazz.__name__]
klazz.name = name
return klazz
return decorate
@classmethod
|
sonyxperiadev/pygerrit | pygerrit/rest/__init__.py | _decode_response | python | def _decode_response(response):
content = response.content.strip()
logging.debug(content[:512])
response.raise_for_status()
if content.startswith(GERRIT_MAGIC_JSON_PREFIX):
content = content[len(GERRIT_MAGIC_JSON_PREFIX):]
try:
return json.loads(content)
except ValueError:
logging.error('Invalid json content: %s' % content)
raise | Strip off Gerrit's magic prefix and decode a response.
:returns:
Decoded JSON content as a dict, or raw text if content could not be
decoded as JSON.
:raises:
requests.HTTPError if the response contains an HTTP error status code. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/rest/__init__.py#L33-L53 | null | # The MIT License
#
# Copyright 2013 Sony Mobile Communications. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Interface to the Gerrit REST API. """
import json
import logging
import requests
GERRIT_MAGIC_JSON_PREFIX = ")]}\'\n"
GERRIT_AUTH_SUFFIX = "/a"
class GerritRestAPI(object):
""" Interface to the Gerrit REST API.
:arg str url: The full URL to the server, including the `http(s)://` prefix.
If `auth` is given, `url` will be automatically adjusted to include
Gerrit's authentication suffix.
:arg auth: (optional) Authentication handler. Must be derived from
`requests.auth.AuthBase`.
:arg boolean verify: (optional) Set to False to disable verification of
SSL certificates.
"""
def __init__(self, url, auth=None, verify=True):
headers = {'Accept': 'application/json',
'Accept-Encoding': 'gzip'}
self.kwargs = {'auth': auth,
'verify': verify,
'headers': headers}
self.url = url.rstrip('/')
if auth:
if not isinstance(auth, requests.auth.AuthBase):
raise ValueError('Invalid auth type; must be derived '
'from requests.auth.AuthBase')
if not self.url.endswith(GERRIT_AUTH_SUFFIX):
self.url += GERRIT_AUTH_SUFFIX
else:
if self.url.endswith(GERRIT_AUTH_SUFFIX):
self.url = self.url[: - len(GERRIT_AUTH_SUFFIX)]
if not self.url.endswith('/'):
self.url += '/'
logging.debug("url %s", self.url)
def make_url(self, endpoint):
""" Make the full url for the endpoint.
:arg str endpoint: The endpoint.
:returns:
The full url.
"""
endpoint = endpoint.lstrip('/')
return self.url + endpoint
def get(self, endpoint, **kwargs):
""" Send HTTP GET to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
kwargs.update(self.kwargs.copy())
response = requests.get(self.make_url(endpoint), **kwargs)
return _decode_response(response)
def put(self, endpoint, **kwargs):
""" Send HTTP PUT to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
kwargs.update(self.kwargs.copy())
if "data" in kwargs:
kwargs["headers"].update(
{"Content-Type": "application/json;charset=UTF-8"})
response = requests.put(self.make_url(endpoint), **kwargs)
return _decode_response(response)
def post(self, endpoint, **kwargs):
""" Send HTTP POST to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
kwargs.update(self.kwargs.copy())
if "data" in kwargs:
kwargs["headers"].update(
{"Content-Type": "application/json;charset=UTF-8"})
response = requests.post(self.make_url(endpoint), **kwargs)
return _decode_response(response)
def delete(self, endpoint, **kwargs):
""" Send HTTP DELETE to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
kwargs.update(self.kwargs.copy())
response = requests.delete(self.make_url(endpoint), **kwargs)
return _decode_response(response)
def review(self, change_id, revision, review):
""" Submit a review.
:arg str change_id: The change ID.
:arg str revision: The revision.
:arg str review: The review details as a :class:`GerritReview`.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
endpoint = "changes/%s/revisions/%s/review" % (change_id, revision)
self.post(endpoint, data=str(review))
class GerritReview(object):
""" Encapsulation of a Gerrit review.
:arg str message: (optional) Cover message.
:arg dict labels: (optional) Review labels.
:arg dict comments: (optional) Inline comments.
"""
def __init__(self, message=None, labels=None, comments=None):
self.message = message if message else ""
if labels:
if not isinstance(labels, dict):
raise ValueError("labels must be a dict.")
self.labels = labels
else:
self.labels = {}
if comments:
if not isinstance(comments, list):
raise ValueError("comments must be a list.")
self.comments = {}
self.add_comments(comments)
else:
self.comments = {}
def set_message(self, message):
""" Set review cover message.
:arg str message: Cover message.
"""
self.message = message
def add_labels(self, labels):
""" Add labels.
:arg dict labels: Labels to add, for example
Usage::
add_labels({'Verified': 1,
'Code-Review': -1})
"""
self.labels.update(labels)
def add_comments(self, comments):
""" Add inline comments.
:arg dict comments: Comments to add.
Usage::
add_comments([{'filename': 'Makefile',
'line': 10,
'message': 'inline message'}])
add_comments([{'filename': 'Makefile',
'range': {'start_line': 0,
'start_character': 1,
'end_line': 0,
'end_character': 5},
'message': 'inline message'}])
"""
for comment in comments:
if 'filename' and 'message' in comment.keys():
msg = {}
if 'range' in comment.keys():
msg = {"range": comment['range'],
"message": comment['message']}
elif 'line' in comment.keys():
msg = {"line": comment['line'],
"message": comment['message']}
else:
continue
file_comment = {comment['filename']: [msg]}
if self.comments:
if comment['filename'] in self.comments.keys():
self.comments[comment['filename']].append(msg)
else:
self.comments.update(file_comment)
else:
self.comments.update(file_comment)
def __str__(self):
review_input = {}
if self.message:
review_input.update({'message': self.message})
if self.labels:
review_input.update({'labels': self.labels})
if self.comments:
review_input.update({'comments': self.comments})
return json.dumps(review_input)
|
sonyxperiadev/pygerrit | pygerrit/rest/__init__.py | GerritRestAPI.put | python | def put(self, endpoint, **kwargs):
kwargs.update(self.kwargs.copy())
if "data" in kwargs:
kwargs["headers"].update(
{"Content-Type": "application/json;charset=UTF-8"})
response = requests.put(self.make_url(endpoint), **kwargs)
return _decode_response(response) | Send HTTP PUT to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/rest/__init__.py#L121-L138 | [
"def _decode_response(response):\n \"\"\" Strip off Gerrit's magic prefix and decode a response.\n\n :returns:\n Decoded JSON content as a dict, or raw text if content could not be\n decoded as JSON.\n\n :raises:\n requests.HTTPError if the response contains an HTTP error status code.\n\n \"\"\"\n content = response.content.strip()\n logging.debug(content[:512])\n response.raise_for_status()\n if content.startswith(GERRIT_MAGIC_JSON_PREFIX):\n content = content[len(GERRIT_MAGIC_JSON_PREFIX):]\n try:\n return json.loads(content)\n except ValueError:\n logging.error('Invalid json content: %s' % content)\n raise\n",
"def make_url(self, endpoint):\n \"\"\" Make the full url for the endpoint.\n\n :arg str endpoint: The endpoint.\n\n :returns:\n The full url.\n\n \"\"\"\n endpoint = endpoint.lstrip('/')\n return self.url + endpoint\n"
] | class GerritRestAPI(object):
""" Interface to the Gerrit REST API.
:arg str url: The full URL to the server, including the `http(s)://` prefix.
If `auth` is given, `url` will be automatically adjusted to include
Gerrit's authentication suffix.
:arg auth: (optional) Authentication handler. Must be derived from
`requests.auth.AuthBase`.
:arg boolean verify: (optional) Set to False to disable verification of
SSL certificates.
"""
def __init__(self, url, auth=None, verify=True):
headers = {'Accept': 'application/json',
'Accept-Encoding': 'gzip'}
self.kwargs = {'auth': auth,
'verify': verify,
'headers': headers}
self.url = url.rstrip('/')
if auth:
if not isinstance(auth, requests.auth.AuthBase):
raise ValueError('Invalid auth type; must be derived '
'from requests.auth.AuthBase')
if not self.url.endswith(GERRIT_AUTH_SUFFIX):
self.url += GERRIT_AUTH_SUFFIX
else:
if self.url.endswith(GERRIT_AUTH_SUFFIX):
self.url = self.url[: - len(GERRIT_AUTH_SUFFIX)]
if not self.url.endswith('/'):
self.url += '/'
logging.debug("url %s", self.url)
def make_url(self, endpoint):
""" Make the full url for the endpoint.
:arg str endpoint: The endpoint.
:returns:
The full url.
"""
endpoint = endpoint.lstrip('/')
return self.url + endpoint
def get(self, endpoint, **kwargs):
""" Send HTTP GET to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
kwargs.update(self.kwargs.copy())
response = requests.get(self.make_url(endpoint), **kwargs)
return _decode_response(response)
def post(self, endpoint, **kwargs):
""" Send HTTP POST to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
kwargs.update(self.kwargs.copy())
if "data" in kwargs:
kwargs["headers"].update(
{"Content-Type": "application/json;charset=UTF-8"})
response = requests.post(self.make_url(endpoint), **kwargs)
return _decode_response(response)
def delete(self, endpoint, **kwargs):
""" Send HTTP DELETE to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
kwargs.update(self.kwargs.copy())
response = requests.delete(self.make_url(endpoint), **kwargs)
return _decode_response(response)
def review(self, change_id, revision, review):
""" Submit a review.
:arg str change_id: The change ID.
:arg str revision: The revision.
:arg str review: The review details as a :class:`GerritReview`.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
endpoint = "changes/%s/revisions/%s/review" % (change_id, revision)
self.post(endpoint, data=str(review))
|
sonyxperiadev/pygerrit | pygerrit/rest/__init__.py | GerritRestAPI.delete | python | def delete(self, endpoint, **kwargs):
kwargs.update(self.kwargs.copy())
response = requests.delete(self.make_url(endpoint), **kwargs)
return _decode_response(response) | Send HTTP DELETE to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/rest/__init__.py#L159-L173 | [
"def _decode_response(response):\n \"\"\" Strip off Gerrit's magic prefix and decode a response.\n\n :returns:\n Decoded JSON content as a dict, or raw text if content could not be\n decoded as JSON.\n\n :raises:\n requests.HTTPError if the response contains an HTTP error status code.\n\n \"\"\"\n content = response.content.strip()\n logging.debug(content[:512])\n response.raise_for_status()\n if content.startswith(GERRIT_MAGIC_JSON_PREFIX):\n content = content[len(GERRIT_MAGIC_JSON_PREFIX):]\n try:\n return json.loads(content)\n except ValueError:\n logging.error('Invalid json content: %s' % content)\n raise\n",
"def make_url(self, endpoint):\n \"\"\" Make the full url for the endpoint.\n\n :arg str endpoint: The endpoint.\n\n :returns:\n The full url.\n\n \"\"\"\n endpoint = endpoint.lstrip('/')\n return self.url + endpoint\n"
] | class GerritRestAPI(object):
""" Interface to the Gerrit REST API.
:arg str url: The full URL to the server, including the `http(s)://` prefix.
If `auth` is given, `url` will be automatically adjusted to include
Gerrit's authentication suffix.
:arg auth: (optional) Authentication handler. Must be derived from
`requests.auth.AuthBase`.
:arg boolean verify: (optional) Set to False to disable verification of
SSL certificates.
"""
def __init__(self, url, auth=None, verify=True):
headers = {'Accept': 'application/json',
'Accept-Encoding': 'gzip'}
self.kwargs = {'auth': auth,
'verify': verify,
'headers': headers}
self.url = url.rstrip('/')
if auth:
if not isinstance(auth, requests.auth.AuthBase):
raise ValueError('Invalid auth type; must be derived '
'from requests.auth.AuthBase')
if not self.url.endswith(GERRIT_AUTH_SUFFIX):
self.url += GERRIT_AUTH_SUFFIX
else:
if self.url.endswith(GERRIT_AUTH_SUFFIX):
self.url = self.url[: - len(GERRIT_AUTH_SUFFIX)]
if not self.url.endswith('/'):
self.url += '/'
logging.debug("url %s", self.url)
def make_url(self, endpoint):
""" Make the full url for the endpoint.
:arg str endpoint: The endpoint.
:returns:
The full url.
"""
endpoint = endpoint.lstrip('/')
return self.url + endpoint
def get(self, endpoint, **kwargs):
""" Send HTTP GET to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
kwargs.update(self.kwargs.copy())
response = requests.get(self.make_url(endpoint), **kwargs)
return _decode_response(response)
def put(self, endpoint, **kwargs):
""" Send HTTP PUT to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
kwargs.update(self.kwargs.copy())
if "data" in kwargs:
kwargs["headers"].update(
{"Content-Type": "application/json;charset=UTF-8"})
response = requests.put(self.make_url(endpoint), **kwargs)
return _decode_response(response)
def post(self, endpoint, **kwargs):
""" Send HTTP POST to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
kwargs.update(self.kwargs.copy())
if "data" in kwargs:
kwargs["headers"].update(
{"Content-Type": "application/json;charset=UTF-8"})
response = requests.post(self.make_url(endpoint), **kwargs)
return _decode_response(response)
def review(self, change_id, revision, review):
""" Submit a review.
:arg str change_id: The change ID.
:arg str revision: The revision.
:arg str review: The review details as a :class:`GerritReview`.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
endpoint = "changes/%s/revisions/%s/review" % (change_id, revision)
self.post(endpoint, data=str(review))
|
sonyxperiadev/pygerrit | pygerrit/rest/__init__.py | GerritRestAPI.review | python | def review(self, change_id, revision, review):
endpoint = "changes/%s/revisions/%s/review" % (change_id, revision)
self.post(endpoint, data=str(review)) | Submit a review.
:arg str change_id: The change ID.
:arg str revision: The revision.
:arg str review: The review details as a :class:`GerritReview`.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error. | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/rest/__init__.py#L175-L191 | [
"def post(self, endpoint, **kwargs):\n \"\"\" Send HTTP POST to the endpoint.\n\n :arg str endpoint: The endpoint to send to.\n\n :returns:\n JSON decoded result.\n\n :raises:\n requests.RequestException on timeout or connection error.\n\n \"\"\"\n kwargs.update(self.kwargs.copy())\n if \"data\" in kwargs:\n kwargs[\"headers\"].update(\n {\"Content-Type\": \"application/json;charset=UTF-8\"})\n response = requests.post(self.make_url(endpoint), **kwargs)\n return _decode_response(response)\n"
] | class GerritRestAPI(object):
""" Interface to the Gerrit REST API.
:arg str url: The full URL to the server, including the `http(s)://` prefix.
If `auth` is given, `url` will be automatically adjusted to include
Gerrit's authentication suffix.
:arg auth: (optional) Authentication handler. Must be derived from
`requests.auth.AuthBase`.
:arg boolean verify: (optional) Set to False to disable verification of
SSL certificates.
"""
def __init__(self, url, auth=None, verify=True):
headers = {'Accept': 'application/json',
'Accept-Encoding': 'gzip'}
self.kwargs = {'auth': auth,
'verify': verify,
'headers': headers}
self.url = url.rstrip('/')
if auth:
if not isinstance(auth, requests.auth.AuthBase):
raise ValueError('Invalid auth type; must be derived '
'from requests.auth.AuthBase')
if not self.url.endswith(GERRIT_AUTH_SUFFIX):
self.url += GERRIT_AUTH_SUFFIX
else:
if self.url.endswith(GERRIT_AUTH_SUFFIX):
self.url = self.url[: - len(GERRIT_AUTH_SUFFIX)]
if not self.url.endswith('/'):
self.url += '/'
logging.debug("url %s", self.url)
def make_url(self, endpoint):
""" Make the full url for the endpoint.
:arg str endpoint: The endpoint.
:returns:
The full url.
"""
endpoint = endpoint.lstrip('/')
return self.url + endpoint
def get(self, endpoint, **kwargs):
""" Send HTTP GET to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
kwargs.update(self.kwargs.copy())
response = requests.get(self.make_url(endpoint), **kwargs)
return _decode_response(response)
def put(self, endpoint, **kwargs):
""" Send HTTP PUT to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
kwargs.update(self.kwargs.copy())
if "data" in kwargs:
kwargs["headers"].update(
{"Content-Type": "application/json;charset=UTF-8"})
response = requests.put(self.make_url(endpoint), **kwargs)
return _decode_response(response)
def post(self, endpoint, **kwargs):
""" Send HTTP POST to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
kwargs.update(self.kwargs.copy())
if "data" in kwargs:
kwargs["headers"].update(
{"Content-Type": "application/json;charset=UTF-8"})
response = requests.post(self.make_url(endpoint), **kwargs)
return _decode_response(response)
def delete(self, endpoint, **kwargs):
""" Send HTTP DELETE to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
kwargs.update(self.kwargs.copy())
response = requests.delete(self.make_url(endpoint), **kwargs)
return _decode_response(response)
|
sonyxperiadev/pygerrit | pygerrit/rest/__init__.py | GerritReview.add_comments | python | def add_comments(self, comments):
for comment in comments:
if 'filename' and 'message' in comment.keys():
msg = {}
if 'range' in comment.keys():
msg = {"range": comment['range'],
"message": comment['message']}
elif 'line' in comment.keys():
msg = {"line": comment['line'],
"message": comment['message']}
else:
continue
file_comment = {comment['filename']: [msg]}
if self.comments:
if comment['filename'] in self.comments.keys():
self.comments[comment['filename']].append(msg)
else:
self.comments.update(file_comment)
else:
self.comments.update(file_comment) | Add inline comments.
:arg dict comments: Comments to add.
Usage::
add_comments([{'filename': 'Makefile',
'line': 10,
'message': 'inline message'}])
add_comments([{'filename': 'Makefile',
'range': {'start_line': 0,
'start_character': 1,
'end_line': 0,
'end_character': 5},
'message': 'inline message'}]) | train | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/rest/__init__.py#L241-L278 | null | class GerritReview(object):
""" Encapsulation of a Gerrit review.
:arg str message: (optional) Cover message.
:arg dict labels: (optional) Review labels.
:arg dict comments: (optional) Inline comments.
"""
def __init__(self, message=None, labels=None, comments=None):
self.message = message if message else ""
if labels:
if not isinstance(labels, dict):
raise ValueError("labels must be a dict.")
self.labels = labels
else:
self.labels = {}
if comments:
if not isinstance(comments, list):
raise ValueError("comments must be a list.")
self.comments = {}
self.add_comments(comments)
else:
self.comments = {}
def set_message(self, message):
""" Set review cover message.
:arg str message: Cover message.
"""
self.message = message
def add_labels(self, labels):
""" Add labels.
:arg dict labels: Labels to add, for example
Usage::
add_labels({'Verified': 1,
'Code-Review': -1})
"""
self.labels.update(labels)
def __str__(self):
review_input = {}
if self.message:
review_input.update({'message': self.message})
if self.labels:
review_input.update({'labels': self.labels})
if self.comments:
review_input.update({'comments': self.comments})
return json.dumps(review_input)
|
Scifabric/pbs | pbs.py | cli | python | def cli(config, server, api_key, all, credentials, project):
# Check first for the pybossa.rc file to configure server and api-key
home = expanduser("~")
if os.path.isfile(os.path.join(home, '.pybossa.cfg')):
config.parser.read(os.path.join(home, '.pybossa.cfg'))
config.server = config.parser.get(credentials,'server')
config.api_key = config.parser.get(credentials, 'apikey')
try:
config.all = config.parser.get(credentials, 'all')
except ConfigParser.NoOptionError:
config.all = None
if server:
config.server = server
if api_key:
config.api_key = api_key
if all:
config.all = all
try:
config.project = json.loads(project.read())
except JSONDecodeError as e:
click.secho("Error: invalid JSON format in project.json:", fg='red')
if e.msg == 'Expecting value':
e.msg += " (if string enclose it with double quotes)"
click.echo("%s\n%s: line %s column %s" % (e.doc, e.msg, e.lineno, e.colno))
raise click.Abort()
try:
project_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"short_name": {"type": "string"},
"description": {"type": "string"}
}
}
jsonschema.validate(config.project, project_schema)
except jsonschema.exceptions.ValidationError as e:
click.secho("Error: invalid type in project.json", fg='red')
click.secho("'%s': %s" % (e.path[0], e.message), fg='yellow')
click.echo("'%s' must be a %s" % (e.path[0], e.validator_value))
raise click.Abort()
config.pbclient = pbclient
config.pbclient.set('endpoint', config.server)
config.pbclient.set('api_key', config.api_key) | Create the cli command line. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/pbs.py#L65-L109 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
A very simple PYBOSSA command line client.
This module is a pybossa-client that runs the following commands:
* create_project: to create a PYBOSSA project
* add_tasks: to add tasks to an existing project
* delete_tasks: to delete all tasks and task_runs from an existing project
"""
import click
import pbclient
import simplejson as json
from simplejson import JSONDecodeError
import jsonschema
import ConfigParser
import os.path
from os.path import expanduser
from helpers import *
class Config(object):
"""Config class for the command line."""
def __init__(self):
"""Init the configuration default values."""
self.verbose = False
self.server = None
self.api_key = None
self.project = None
self.all = None
self.pbclient = pbclient
self.parser = ConfigParser.ConfigParser()
pass_config = click.make_pass_decorator(Config, ensure=True)
@click.group()
@click.option('--server', help='The PYBOSSA server')
@click.option('--api-key', help='Your PYBOSSA API-KEY')
@click.option('--all', help='Search across all projects')
@click.option('--credentials', help='Use your PYBOSSA credentials in .pybossa.cfg file',
default="default")
@click.option('--project', type=click.File('r'), default='project.json')
@pass_config
@cli.command()
def version():
"""Show pbs version."""
try:
import pkg_resources
click.echo(pkg_resources.get_distribution('pybossa-pbs').version)
except ImportError:
click.echo("pybossa-pbs package not found!")
@cli.command()
@pass_config
def create_project(config): # pragma: no cover
"""Create the PYBOSSA project."""
res = _create_project(config)
click.echo(res)
@cli.command()
@click.option('--task-presenter', help='The project task presenter file',
default='template.html')
@click.option('--results', help='The project results file',
default='results.html')
@click.option('--long-description', help='The project long description file (Markdown)',
default='long_description.md')
@click.option('--tutorial', help='The project tutorial file',
default='tutorial.html')
@click.option('--watch/--no-watch', help='Watch for changes in the current folder and update the project',
default=False)
@pass_config
def update_project(config, task_presenter, results,
long_description, tutorial, watch): # pragma: no cover
"""Update project templates and information."""
if watch:
res = _update_project_watch(config, task_presenter, results,
long_description, tutorial)
else:
res = _update_project(config, task_presenter, results,
long_description, tutorial)
click.echo(res)
@cli.command()
@click.option('--tasks-file', help='File with tasks',
default='project.tasks', type=click.File('r'))
@click.option('--tasks-type', help='Tasks type: JSON|CSV|XLSX|XLSM|XLTX|XLTM|PO|PROPERTIES',
default=None, type=click.Choice(['json', 'csv', 'xlsx', 'xlsm',
'xltx', 'xltm', 'po', 'properties']))
@click.option('--priority', help="Priority for the tasks.", default=0)
@click.option('--redundancy', help="Redundancy for tasks.", default=30)
@pass_config
def add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
res = _add_tasks(config, tasks_file, tasks_type, priority, redundancy)
click.echo(res)
@cli.command()
@click.option('--helping-materials-file', help='File with helping materials',
default='helping.materials', type=click.File('r'))
@click.option('--helping-type', help='Tasks type: JSON|CSV|XLSX|XLSM|XLTX|XLTM',
default=None, type=click.Choice(['json', 'csv', 'xlsx', 'xlsm',
'xltx', 'xltm']))
@pass_config
def add_helpingmaterials(config, helping_materials_file, helping_type):
"""Add helping materials to a project."""
res = _add_helpingmaterials(config, helping_materials_file, helping_type)
click.echo(res)
@cli.command()
@click.option('--task-id', help='Task ID to delete from project', default=None)
@pass_config
def delete_tasks(config, task_id):
"""Delete tasks from a project."""
if task_id is None:
msg = ("Are you sure you want to delete all the tasks and associated task runs?")
if click.confirm(msg):
res = _delete_tasks(config, task_id)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _delete_tasks(config, task_id)
click.echo(res)
@cli.command(name='update-task-redundancy')
@click.option('--task-id', help='Task ID to update from project', default=None)
@click.option('--redundancy', help='New redundancy for task', default=None)
@pass_config
def update_task_redundancy(config, task_id, redundancy):
"""Update task redudancy for a project."""
if task_id is None:
msg = ("Are you sure you want to update all the tasks redundancy?")
if click.confirm(msg):
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res)
|
Scifabric/pbs | pbs.py | version | python | def version():
try:
import pkg_resources
click.echo(pkg_resources.get_distribution('pybossa-pbs').version)
except ImportError:
click.echo("pybossa-pbs package not found!") | Show pbs version. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/pbs.py#L113-L119 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
A very simple PYBOSSA command line client.
This module is a pybossa-client that runs the following commands:
* create_project: to create a PYBOSSA project
* add_tasks: to add tasks to an existing project
* delete_tasks: to delete all tasks and task_runs from an existing project
"""
import click
import pbclient
import simplejson as json
from simplejson import JSONDecodeError
import jsonschema
import ConfigParser
import os.path
from os.path import expanduser
from helpers import *
class Config(object):
"""Config class for the command line."""
def __init__(self):
"""Init the configuration default values."""
self.verbose = False
self.server = None
self.api_key = None
self.project = None
self.all = None
self.pbclient = pbclient
self.parser = ConfigParser.ConfigParser()
pass_config = click.make_pass_decorator(Config, ensure=True)
@click.group()
@click.option('--server', help='The PYBOSSA server')
@click.option('--api-key', help='Your PYBOSSA API-KEY')
@click.option('--all', help='Search across all projects')
@click.option('--credentials', help='Use your PYBOSSA credentials in .pybossa.cfg file',
default="default")
@click.option('--project', type=click.File('r'), default='project.json')
@pass_config
def cli(config, server, api_key, all, credentials, project):
"""Create the cli command line."""
# Check first for the pybossa.rc file to configure server and api-key
home = expanduser("~")
if os.path.isfile(os.path.join(home, '.pybossa.cfg')):
config.parser.read(os.path.join(home, '.pybossa.cfg'))
config.server = config.parser.get(credentials,'server')
config.api_key = config.parser.get(credentials, 'apikey')
try:
config.all = config.parser.get(credentials, 'all')
except ConfigParser.NoOptionError:
config.all = None
if server:
config.server = server
if api_key:
config.api_key = api_key
if all:
config.all = all
try:
config.project = json.loads(project.read())
except JSONDecodeError as e:
click.secho("Error: invalid JSON format in project.json:", fg='red')
if e.msg == 'Expecting value':
e.msg += " (if string enclose it with double quotes)"
click.echo("%s\n%s: line %s column %s" % (e.doc, e.msg, e.lineno, e.colno))
raise click.Abort()
try:
project_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"short_name": {"type": "string"},
"description": {"type": "string"}
}
}
jsonschema.validate(config.project, project_schema)
except jsonschema.exceptions.ValidationError as e:
click.secho("Error: invalid type in project.json", fg='red')
click.secho("'%s': %s" % (e.path[0], e.message), fg='yellow')
click.echo("'%s' must be a %s" % (e.path[0], e.validator_value))
raise click.Abort()
config.pbclient = pbclient
config.pbclient.set('endpoint', config.server)
config.pbclient.set('api_key', config.api_key)
@cli.command()
@cli.command()
@pass_config
def create_project(config): # pragma: no cover
"""Create the PYBOSSA project."""
res = _create_project(config)
click.echo(res)
@cli.command()
@click.option('--task-presenter', help='The project task presenter file',
default='template.html')
@click.option('--results', help='The project results file',
default='results.html')
@click.option('--long-description', help='The project long description file (Markdown)',
default='long_description.md')
@click.option('--tutorial', help='The project tutorial file',
default='tutorial.html')
@click.option('--watch/--no-watch', help='Watch for changes in the current folder and update the project',
default=False)
@pass_config
def update_project(config, task_presenter, results,
long_description, tutorial, watch): # pragma: no cover
"""Update project templates and information."""
if watch:
res = _update_project_watch(config, task_presenter, results,
long_description, tutorial)
else:
res = _update_project(config, task_presenter, results,
long_description, tutorial)
click.echo(res)
@cli.command()
@click.option('--tasks-file', help='File with tasks',
default='project.tasks', type=click.File('r'))
@click.option('--tasks-type', help='Tasks type: JSON|CSV|XLSX|XLSM|XLTX|XLTM|PO|PROPERTIES',
default=None, type=click.Choice(['json', 'csv', 'xlsx', 'xlsm',
'xltx', 'xltm', 'po', 'properties']))
@click.option('--priority', help="Priority for the tasks.", default=0)
@click.option('--redundancy', help="Redundancy for tasks.", default=30)
@pass_config
def add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
res = _add_tasks(config, tasks_file, tasks_type, priority, redundancy)
click.echo(res)
@cli.command()
@click.option('--helping-materials-file', help='File with helping materials',
default='helping.materials', type=click.File('r'))
@click.option('--helping-type', help='Tasks type: JSON|CSV|XLSX|XLSM|XLTX|XLTM',
default=None, type=click.Choice(['json', 'csv', 'xlsx', 'xlsm',
'xltx', 'xltm']))
@pass_config
def add_helpingmaterials(config, helping_materials_file, helping_type):
"""Add helping materials to a project."""
res = _add_helpingmaterials(config, helping_materials_file, helping_type)
click.echo(res)
@cli.command()
@click.option('--task-id', help='Task ID to delete from project', default=None)
@pass_config
def delete_tasks(config, task_id):
"""Delete tasks from a project."""
if task_id is None:
msg = ("Are you sure you want to delete all the tasks and associated task runs?")
if click.confirm(msg):
res = _delete_tasks(config, task_id)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _delete_tasks(config, task_id)
click.echo(res)
@cli.command(name='update-task-redundancy')
@click.option('--task-id', help='Task ID to update from project', default=None)
@click.option('--redundancy', help='New redundancy for task', default=None)
@pass_config
def update_task_redundancy(config, task_id, redundancy):
"""Update task redudancy for a project."""
if task_id is None:
msg = ("Are you sure you want to update all the tasks redundancy?")
if click.confirm(msg):
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res)
|
Scifabric/pbs | pbs.py | update_project | python | def update_project(config, task_presenter, results,
long_description, tutorial, watch): # pragma: no cover
if watch:
res = _update_project_watch(config, task_presenter, results,
long_description, tutorial)
else:
res = _update_project(config, task_presenter, results,
long_description, tutorial)
click.echo(res) | Update project templates and information. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/pbs.py#L141-L150 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
A very simple PYBOSSA command line client.
This module is a pybossa-client that runs the following commands:
* create_project: to create a PYBOSSA project
* add_tasks: to add tasks to an existing project
* delete_tasks: to delete all tasks and task_runs from an existing project
"""
import click
import pbclient
import simplejson as json
from simplejson import JSONDecodeError
import jsonschema
import ConfigParser
import os.path
from os.path import expanduser
from helpers import *
class Config(object):
"""Config class for the command line."""
def __init__(self):
"""Init the configuration default values."""
self.verbose = False
self.server = None
self.api_key = None
self.project = None
self.all = None
self.pbclient = pbclient
self.parser = ConfigParser.ConfigParser()
pass_config = click.make_pass_decorator(Config, ensure=True)
@click.group()
@click.option('--server', help='The PYBOSSA server')
@click.option('--api-key', help='Your PYBOSSA API-KEY')
@click.option('--all', help='Search across all projects')
@click.option('--credentials', help='Use your PYBOSSA credentials in .pybossa.cfg file',
default="default")
@click.option('--project', type=click.File('r'), default='project.json')
@pass_config
def cli(config, server, api_key, all, credentials, project):
"""Create the cli command line."""
# Check first for the pybossa.rc file to configure server and api-key
home = expanduser("~")
if os.path.isfile(os.path.join(home, '.pybossa.cfg')):
config.parser.read(os.path.join(home, '.pybossa.cfg'))
config.server = config.parser.get(credentials,'server')
config.api_key = config.parser.get(credentials, 'apikey')
try:
config.all = config.parser.get(credentials, 'all')
except ConfigParser.NoOptionError:
config.all = None
if server:
config.server = server
if api_key:
config.api_key = api_key
if all:
config.all = all
try:
config.project = json.loads(project.read())
except JSONDecodeError as e:
click.secho("Error: invalid JSON format in project.json:", fg='red')
if e.msg == 'Expecting value':
e.msg += " (if string enclose it with double quotes)"
click.echo("%s\n%s: line %s column %s" % (e.doc, e.msg, e.lineno, e.colno))
raise click.Abort()
try:
project_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"short_name": {"type": "string"},
"description": {"type": "string"}
}
}
jsonschema.validate(config.project, project_schema)
except jsonschema.exceptions.ValidationError as e:
click.secho("Error: invalid type in project.json", fg='red')
click.secho("'%s': %s" % (e.path[0], e.message), fg='yellow')
click.echo("'%s' must be a %s" % (e.path[0], e.validator_value))
raise click.Abort()
config.pbclient = pbclient
config.pbclient.set('endpoint', config.server)
config.pbclient.set('api_key', config.api_key)
@cli.command()
def version():
"""Show pbs version."""
try:
import pkg_resources
click.echo(pkg_resources.get_distribution('pybossa-pbs').version)
except ImportError:
click.echo("pybossa-pbs package not found!")
@cli.command()
@pass_config
def create_project(config): # pragma: no cover
"""Create the PYBOSSA project."""
res = _create_project(config)
click.echo(res)
@cli.command()
@click.option('--task-presenter', help='The project task presenter file',
default='template.html')
@click.option('--results', help='The project results file',
default='results.html')
@click.option('--long-description', help='The project long description file (Markdown)',
default='long_description.md')
@click.option('--tutorial', help='The project tutorial file',
default='tutorial.html')
@click.option('--watch/--no-watch', help='Watch for changes in the current folder and update the project',
default=False)
@pass_config
@cli.command()
@click.option('--tasks-file', help='File with tasks',
default='project.tasks', type=click.File('r'))
@click.option('--tasks-type', help='Tasks type: JSON|CSV|XLSX|XLSM|XLTX|XLTM|PO|PROPERTIES',
default=None, type=click.Choice(['json', 'csv', 'xlsx', 'xlsm',
'xltx', 'xltm', 'po', 'properties']))
@click.option('--priority', help="Priority for the tasks.", default=0)
@click.option('--redundancy', help="Redundancy for tasks.", default=30)
@pass_config
def add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
res = _add_tasks(config, tasks_file, tasks_type, priority, redundancy)
click.echo(res)
@cli.command()
@click.option('--helping-materials-file', help='File with helping materials',
default='helping.materials', type=click.File('r'))
@click.option('--helping-type', help='Tasks type: JSON|CSV|XLSX|XLSM|XLTX|XLTM',
default=None, type=click.Choice(['json', 'csv', 'xlsx', 'xlsm',
'xltx', 'xltm']))
@pass_config
def add_helpingmaterials(config, helping_materials_file, helping_type):
"""Add helping materials to a project."""
res = _add_helpingmaterials(config, helping_materials_file, helping_type)
click.echo(res)
@cli.command()
@click.option('--task-id', help='Task ID to delete from project', default=None)
@pass_config
def delete_tasks(config, task_id):
"""Delete tasks from a project."""
if task_id is None:
msg = ("Are you sure you want to delete all the tasks and associated task runs?")
if click.confirm(msg):
res = _delete_tasks(config, task_id)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _delete_tasks(config, task_id)
click.echo(res)
@cli.command(name='update-task-redundancy')
@click.option('--task-id', help='Task ID to update from project', default=None)
@click.option('--redundancy', help='New redundancy for task', default=None)
@pass_config
def update_task_redundancy(config, task_id, redundancy):
"""Update task redudancy for a project."""
if task_id is None:
msg = ("Are you sure you want to update all the tasks redundancy?")
if click.confirm(msg):
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res)
|
Scifabric/pbs | pbs.py | add_tasks | python | def add_tasks(config, tasks_file, tasks_type, priority, redundancy):
res = _add_tasks(config, tasks_file, tasks_type, priority, redundancy)
click.echo(res) | Add tasks to a project. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/pbs.py#L162-L165 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
A very simple PYBOSSA command line client.
This module is a pybossa-client that runs the following commands:
* create_project: to create a PYBOSSA project
* add_tasks: to add tasks to an existing project
* delete_tasks: to delete all tasks and task_runs from an existing project
"""
import click
import pbclient
import simplejson as json
from simplejson import JSONDecodeError
import jsonschema
import ConfigParser
import os.path
from os.path import expanduser
from helpers import *
class Config(object):
"""Config class for the command line."""
def __init__(self):
"""Init the configuration default values."""
self.verbose = False
self.server = None
self.api_key = None
self.project = None
self.all = None
self.pbclient = pbclient
self.parser = ConfigParser.ConfigParser()
pass_config = click.make_pass_decorator(Config, ensure=True)
@click.group()
@click.option('--server', help='The PYBOSSA server')
@click.option('--api-key', help='Your PYBOSSA API-KEY')
@click.option('--all', help='Search across all projects')
@click.option('--credentials', help='Use your PYBOSSA credentials in .pybossa.cfg file',
default="default")
@click.option('--project', type=click.File('r'), default='project.json')
@pass_config
def cli(config, server, api_key, all, credentials, project):
"""Create the cli command line."""
# Check first for the pybossa.rc file to configure server and api-key
home = expanduser("~")
if os.path.isfile(os.path.join(home, '.pybossa.cfg')):
config.parser.read(os.path.join(home, '.pybossa.cfg'))
config.server = config.parser.get(credentials,'server')
config.api_key = config.parser.get(credentials, 'apikey')
try:
config.all = config.parser.get(credentials, 'all')
except ConfigParser.NoOptionError:
config.all = None
if server:
config.server = server
if api_key:
config.api_key = api_key
if all:
config.all = all
try:
config.project = json.loads(project.read())
except JSONDecodeError as e:
click.secho("Error: invalid JSON format in project.json:", fg='red')
if e.msg == 'Expecting value':
e.msg += " (if string enclose it with double quotes)"
click.echo("%s\n%s: line %s column %s" % (e.doc, e.msg, e.lineno, e.colno))
raise click.Abort()
try:
project_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"short_name": {"type": "string"},
"description": {"type": "string"}
}
}
jsonschema.validate(config.project, project_schema)
except jsonschema.exceptions.ValidationError as e:
click.secho("Error: invalid type in project.json", fg='red')
click.secho("'%s': %s" % (e.path[0], e.message), fg='yellow')
click.echo("'%s' must be a %s" % (e.path[0], e.validator_value))
raise click.Abort()
config.pbclient = pbclient
config.pbclient.set('endpoint', config.server)
config.pbclient.set('api_key', config.api_key)
@cli.command()
def version():
"""Show pbs version."""
try:
import pkg_resources
click.echo(pkg_resources.get_distribution('pybossa-pbs').version)
except ImportError:
click.echo("pybossa-pbs package not found!")
@cli.command()
@pass_config
def create_project(config): # pragma: no cover
"""Create the PYBOSSA project."""
res = _create_project(config)
click.echo(res)
@cli.command()
@click.option('--task-presenter', help='The project task presenter file',
default='template.html')
@click.option('--results', help='The project results file',
default='results.html')
@click.option('--long-description', help='The project long description file (Markdown)',
default='long_description.md')
@click.option('--tutorial', help='The project tutorial file',
default='tutorial.html')
@click.option('--watch/--no-watch', help='Watch for changes in the current folder and update the project',
default=False)
@pass_config
def update_project(config, task_presenter, results,
long_description, tutorial, watch): # pragma: no cover
"""Update project templates and information."""
if watch:
res = _update_project_watch(config, task_presenter, results,
long_description, tutorial)
else:
res = _update_project(config, task_presenter, results,
long_description, tutorial)
click.echo(res)
@cli.command()
@click.option('--tasks-file', help='File with tasks',
default='project.tasks', type=click.File('r'))
@click.option('--tasks-type', help='Tasks type: JSON|CSV|XLSX|XLSM|XLTX|XLTM|PO|PROPERTIES',
default=None, type=click.Choice(['json', 'csv', 'xlsx', 'xlsm',
'xltx', 'xltm', 'po', 'properties']))
@click.option('--priority', help="Priority for the tasks.", default=0)
@click.option('--redundancy', help="Redundancy for tasks.", default=30)
@pass_config
@cli.command()
@click.option('--helping-materials-file', help='File with helping materials',
default='helping.materials', type=click.File('r'))
@click.option('--helping-type', help='Tasks type: JSON|CSV|XLSX|XLSM|XLTX|XLTM',
default=None, type=click.Choice(['json', 'csv', 'xlsx', 'xlsm',
'xltx', 'xltm']))
@pass_config
def add_helpingmaterials(config, helping_materials_file, helping_type):
"""Add helping materials to a project."""
res = _add_helpingmaterials(config, helping_materials_file, helping_type)
click.echo(res)
@cli.command()
@click.option('--task-id', help='Task ID to delete from project', default=None)
@pass_config
def delete_tasks(config, task_id):
"""Delete tasks from a project."""
if task_id is None:
msg = ("Are you sure you want to delete all the tasks and associated task runs?")
if click.confirm(msg):
res = _delete_tasks(config, task_id)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _delete_tasks(config, task_id)
click.echo(res)
@cli.command(name='update-task-redundancy')
@click.option('--task-id', help='Task ID to update from project', default=None)
@click.option('--redundancy', help='New redundancy for task', default=None)
@pass_config
def update_task_redundancy(config, task_id, redundancy):
"""Update task redudancy for a project."""
if task_id is None:
msg = ("Are you sure you want to update all the tasks redundancy?")
if click.confirm(msg):
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res)
|
Scifabric/pbs | pbs.py | add_helpingmaterials | python | def add_helpingmaterials(config, helping_materials_file, helping_type):
res = _add_helpingmaterials(config, helping_materials_file, helping_type)
click.echo(res) | Add helping materials to a project. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/pbs.py#L175-L178 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
A very simple PYBOSSA command line client.
This module is a pybossa-client that runs the following commands:
* create_project: to create a PYBOSSA project
* add_tasks: to add tasks to an existing project
* delete_tasks: to delete all tasks and task_runs from an existing project
"""
import click
import pbclient
import simplejson as json
from simplejson import JSONDecodeError
import jsonschema
import ConfigParser
import os.path
from os.path import expanduser
from helpers import *
class Config(object):
"""Config class for the command line."""
def __init__(self):
"""Init the configuration default values."""
self.verbose = False
self.server = None
self.api_key = None
self.project = None
self.all = None
self.pbclient = pbclient
self.parser = ConfigParser.ConfigParser()
pass_config = click.make_pass_decorator(Config, ensure=True)
@click.group()
@click.option('--server', help='The PYBOSSA server')
@click.option('--api-key', help='Your PYBOSSA API-KEY')
@click.option('--all', help='Search across all projects')
@click.option('--credentials', help='Use your PYBOSSA credentials in .pybossa.cfg file',
default="default")
@click.option('--project', type=click.File('r'), default='project.json')
@pass_config
def cli(config, server, api_key, all, credentials, project):
"""Create the cli command line."""
# Check first for the pybossa.rc file to configure server and api-key
home = expanduser("~")
if os.path.isfile(os.path.join(home, '.pybossa.cfg')):
config.parser.read(os.path.join(home, '.pybossa.cfg'))
config.server = config.parser.get(credentials,'server')
config.api_key = config.parser.get(credentials, 'apikey')
try:
config.all = config.parser.get(credentials, 'all')
except ConfigParser.NoOptionError:
config.all = None
if server:
config.server = server
if api_key:
config.api_key = api_key
if all:
config.all = all
try:
config.project = json.loads(project.read())
except JSONDecodeError as e:
click.secho("Error: invalid JSON format in project.json:", fg='red')
if e.msg == 'Expecting value':
e.msg += " (if string enclose it with double quotes)"
click.echo("%s\n%s: line %s column %s" % (e.doc, e.msg, e.lineno, e.colno))
raise click.Abort()
try:
project_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"short_name": {"type": "string"},
"description": {"type": "string"}
}
}
jsonschema.validate(config.project, project_schema)
except jsonschema.exceptions.ValidationError as e:
click.secho("Error: invalid type in project.json", fg='red')
click.secho("'%s': %s" % (e.path[0], e.message), fg='yellow')
click.echo("'%s' must be a %s" % (e.path[0], e.validator_value))
raise click.Abort()
config.pbclient = pbclient
config.pbclient.set('endpoint', config.server)
config.pbclient.set('api_key', config.api_key)
@cli.command()
def version():
"""Show pbs version."""
try:
import pkg_resources
click.echo(pkg_resources.get_distribution('pybossa-pbs').version)
except ImportError:
click.echo("pybossa-pbs package not found!")
@cli.command()
@pass_config
def create_project(config): # pragma: no cover
"""Create the PYBOSSA project."""
res = _create_project(config)
click.echo(res)
@cli.command()
@click.option('--task-presenter', help='The project task presenter file',
default='template.html')
@click.option('--results', help='The project results file',
default='results.html')
@click.option('--long-description', help='The project long description file (Markdown)',
default='long_description.md')
@click.option('--tutorial', help='The project tutorial file',
default='tutorial.html')
@click.option('--watch/--no-watch', help='Watch for changes in the current folder and update the project',
default=False)
@pass_config
def update_project(config, task_presenter, results,
long_description, tutorial, watch): # pragma: no cover
"""Update project templates and information."""
if watch:
res = _update_project_watch(config, task_presenter, results,
long_description, tutorial)
else:
res = _update_project(config, task_presenter, results,
long_description, tutorial)
click.echo(res)
@cli.command()
@click.option('--tasks-file', help='File with tasks',
default='project.tasks', type=click.File('r'))
@click.option('--tasks-type', help='Tasks type: JSON|CSV|XLSX|XLSM|XLTX|XLTM|PO|PROPERTIES',
default=None, type=click.Choice(['json', 'csv', 'xlsx', 'xlsm',
'xltx', 'xltm', 'po', 'properties']))
@click.option('--priority', help="Priority for the tasks.", default=0)
@click.option('--redundancy', help="Redundancy for tasks.", default=30)
@pass_config
def add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
res = _add_tasks(config, tasks_file, tasks_type, priority, redundancy)
click.echo(res)
@cli.command()
@click.option('--helping-materials-file', help='File with helping materials',
default='helping.materials', type=click.File('r'))
@click.option('--helping-type', help='Tasks type: JSON|CSV|XLSX|XLSM|XLTX|XLTM',
default=None, type=click.Choice(['json', 'csv', 'xlsx', 'xlsm',
'xltx', 'xltm']))
@pass_config
@cli.command()
@click.option('--task-id', help='Task ID to delete from project', default=None)
@pass_config
def delete_tasks(config, task_id):
"""Delete tasks from a project."""
if task_id is None:
msg = ("Are you sure you want to delete all the tasks and associated task runs?")
if click.confirm(msg):
res = _delete_tasks(config, task_id)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _delete_tasks(config, task_id)
click.echo(res)
@cli.command(name='update-task-redundancy')
@click.option('--task-id', help='Task ID to update from project', default=None)
@click.option('--redundancy', help='New redundancy for task', default=None)
@pass_config
def update_task_redundancy(config, task_id, redundancy):
"""Update task redudancy for a project."""
if task_id is None:
msg = ("Are you sure you want to update all the tasks redundancy?")
if click.confirm(msg):
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res)
|
Scifabric/pbs | pbs.py | delete_tasks | python | def delete_tasks(config, task_id):
if task_id is None:
msg = ("Are you sure you want to delete all the tasks and associated task runs?")
if click.confirm(msg):
res = _delete_tasks(config, task_id)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _delete_tasks(config, task_id)
click.echo(res) | Delete tasks from a project. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/pbs.py#L184-L196 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
A very simple PYBOSSA command line client.
This module is a pybossa-client that runs the following commands:
* create_project: to create a PYBOSSA project
* add_tasks: to add tasks to an existing project
* delete_tasks: to delete all tasks and task_runs from an existing project
"""
import click
import pbclient
import simplejson as json
from simplejson import JSONDecodeError
import jsonschema
import ConfigParser
import os.path
from os.path import expanduser
from helpers import *
class Config(object):
"""Config class for the command line."""
def __init__(self):
"""Init the configuration default values."""
self.verbose = False
self.server = None
self.api_key = None
self.project = None
self.all = None
self.pbclient = pbclient
self.parser = ConfigParser.ConfigParser()
pass_config = click.make_pass_decorator(Config, ensure=True)
@click.group()
@click.option('--server', help='The PYBOSSA server')
@click.option('--api-key', help='Your PYBOSSA API-KEY')
@click.option('--all', help='Search across all projects')
@click.option('--credentials', help='Use your PYBOSSA credentials in .pybossa.cfg file',
default="default")
@click.option('--project', type=click.File('r'), default='project.json')
@pass_config
def cli(config, server, api_key, all, credentials, project):
"""Create the cli command line."""
# Check first for the pybossa.rc file to configure server and api-key
home = expanduser("~")
if os.path.isfile(os.path.join(home, '.pybossa.cfg')):
config.parser.read(os.path.join(home, '.pybossa.cfg'))
config.server = config.parser.get(credentials,'server')
config.api_key = config.parser.get(credentials, 'apikey')
try:
config.all = config.parser.get(credentials, 'all')
except ConfigParser.NoOptionError:
config.all = None
if server:
config.server = server
if api_key:
config.api_key = api_key
if all:
config.all = all
try:
config.project = json.loads(project.read())
except JSONDecodeError as e:
click.secho("Error: invalid JSON format in project.json:", fg='red')
if e.msg == 'Expecting value':
e.msg += " (if string enclose it with double quotes)"
click.echo("%s\n%s: line %s column %s" % (e.doc, e.msg, e.lineno, e.colno))
raise click.Abort()
try:
project_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"short_name": {"type": "string"},
"description": {"type": "string"}
}
}
jsonschema.validate(config.project, project_schema)
except jsonschema.exceptions.ValidationError as e:
click.secho("Error: invalid type in project.json", fg='red')
click.secho("'%s': %s" % (e.path[0], e.message), fg='yellow')
click.echo("'%s' must be a %s" % (e.path[0], e.validator_value))
raise click.Abort()
config.pbclient = pbclient
config.pbclient.set('endpoint', config.server)
config.pbclient.set('api_key', config.api_key)
@cli.command()
def version():
"""Show pbs version."""
try:
import pkg_resources
click.echo(pkg_resources.get_distribution('pybossa-pbs').version)
except ImportError:
click.echo("pybossa-pbs package not found!")
@cli.command()
@pass_config
def create_project(config): # pragma: no cover
"""Create the PYBOSSA project."""
res = _create_project(config)
click.echo(res)
@cli.command()
@click.option('--task-presenter', help='The project task presenter file',
default='template.html')
@click.option('--results', help='The project results file',
default='results.html')
@click.option('--long-description', help='The project long description file (Markdown)',
default='long_description.md')
@click.option('--tutorial', help='The project tutorial file',
default='tutorial.html')
@click.option('--watch/--no-watch', help='Watch for changes in the current folder and update the project',
default=False)
@pass_config
def update_project(config, task_presenter, results,
long_description, tutorial, watch): # pragma: no cover
"""Update project templates and information."""
if watch:
res = _update_project_watch(config, task_presenter, results,
long_description, tutorial)
else:
res = _update_project(config, task_presenter, results,
long_description, tutorial)
click.echo(res)
@cli.command()
@click.option('--tasks-file', help='File with tasks',
default='project.tasks', type=click.File('r'))
@click.option('--tasks-type', help='Tasks type: JSON|CSV|XLSX|XLSM|XLTX|XLTM|PO|PROPERTIES',
default=None, type=click.Choice(['json', 'csv', 'xlsx', 'xlsm',
'xltx', 'xltm', 'po', 'properties']))
@click.option('--priority', help="Priority for the tasks.", default=0)
@click.option('--redundancy', help="Redundancy for tasks.", default=30)
@pass_config
def add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
res = _add_tasks(config, tasks_file, tasks_type, priority, redundancy)
click.echo(res)
@cli.command()
@click.option('--helping-materials-file', help='File with helping materials',
default='helping.materials', type=click.File('r'))
@click.option('--helping-type', help='Tasks type: JSON|CSV|XLSX|XLSM|XLTX|XLTM',
default=None, type=click.Choice(['json', 'csv', 'xlsx', 'xlsm',
'xltx', 'xltm']))
@pass_config
def add_helpingmaterials(config, helping_materials_file, helping_type):
"""Add helping materials to a project."""
res = _add_helpingmaterials(config, helping_materials_file, helping_type)
click.echo(res)
@cli.command()
@click.option('--task-id', help='Task ID to delete from project', default=None)
@pass_config
@cli.command(name='update-task-redundancy')
@click.option('--task-id', help='Task ID to update from project', default=None)
@click.option('--redundancy', help='New redundancy for task', default=None)
@pass_config
def update_task_redundancy(config, task_id, redundancy):
"""Update task redudancy for a project."""
if task_id is None:
msg = ("Are you sure you want to update all the tasks redundancy?")
if click.confirm(msg):
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res)
|
Scifabric/pbs | pbs.py | update_task_redundancy | python | def update_task_redundancy(config, task_id, redundancy):
if task_id is None:
msg = ("Are you sure you want to update all the tasks redundancy?")
if click.confirm(msg):
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res) | Update task redudancy for a project. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/pbs.py#L202-L214 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
A very simple PYBOSSA command line client.
This module is a pybossa-client that runs the following commands:
* create_project: to create a PYBOSSA project
* add_tasks: to add tasks to an existing project
* delete_tasks: to delete all tasks and task_runs from an existing project
"""
import click
import pbclient
import simplejson as json
from simplejson import JSONDecodeError
import jsonschema
import ConfigParser
import os.path
from os.path import expanduser
from helpers import *
class Config(object):
"""Config class for the command line."""
def __init__(self):
"""Init the configuration default values."""
self.verbose = False
self.server = None
self.api_key = None
self.project = None
self.all = None
self.pbclient = pbclient
self.parser = ConfigParser.ConfigParser()
pass_config = click.make_pass_decorator(Config, ensure=True)
@click.group()
@click.option('--server', help='The PYBOSSA server')
@click.option('--api-key', help='Your PYBOSSA API-KEY')
@click.option('--all', help='Search across all projects')
@click.option('--credentials', help='Use your PYBOSSA credentials in .pybossa.cfg file',
default="default")
@click.option('--project', type=click.File('r'), default='project.json')
@pass_config
def cli(config, server, api_key, all, credentials, project):
"""Create the cli command line."""
# Check first for the pybossa.rc file to configure server and api-key
home = expanduser("~")
if os.path.isfile(os.path.join(home, '.pybossa.cfg')):
config.parser.read(os.path.join(home, '.pybossa.cfg'))
config.server = config.parser.get(credentials,'server')
config.api_key = config.parser.get(credentials, 'apikey')
try:
config.all = config.parser.get(credentials, 'all')
except ConfigParser.NoOptionError:
config.all = None
if server:
config.server = server
if api_key:
config.api_key = api_key
if all:
config.all = all
try:
config.project = json.loads(project.read())
except JSONDecodeError as e:
click.secho("Error: invalid JSON format in project.json:", fg='red')
if e.msg == 'Expecting value':
e.msg += " (if string enclose it with double quotes)"
click.echo("%s\n%s: line %s column %s" % (e.doc, e.msg, e.lineno, e.colno))
raise click.Abort()
try:
project_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"short_name": {"type": "string"},
"description": {"type": "string"}
}
}
jsonschema.validate(config.project, project_schema)
except jsonschema.exceptions.ValidationError as e:
click.secho("Error: invalid type in project.json", fg='red')
click.secho("'%s': %s" % (e.path[0], e.message), fg='yellow')
click.echo("'%s' must be a %s" % (e.path[0], e.validator_value))
raise click.Abort()
config.pbclient = pbclient
config.pbclient.set('endpoint', config.server)
config.pbclient.set('api_key', config.api_key)
@cli.command()
def version():
"""Show pbs version."""
try:
import pkg_resources
click.echo(pkg_resources.get_distribution('pybossa-pbs').version)
except ImportError:
click.echo("pybossa-pbs package not found!")
@cli.command()
@pass_config
def create_project(config): # pragma: no cover
"""Create the PYBOSSA project."""
res = _create_project(config)
click.echo(res)
@cli.command()
@click.option('--task-presenter', help='The project task presenter file',
default='template.html')
@click.option('--results', help='The project results file',
default='results.html')
@click.option('--long-description', help='The project long description file (Markdown)',
default='long_description.md')
@click.option('--tutorial', help='The project tutorial file',
default='tutorial.html')
@click.option('--watch/--no-watch', help='Watch for changes in the current folder and update the project',
default=False)
@pass_config
def update_project(config, task_presenter, results,
long_description, tutorial, watch): # pragma: no cover
"""Update project templates and information."""
if watch:
res = _update_project_watch(config, task_presenter, results,
long_description, tutorial)
else:
res = _update_project(config, task_presenter, results,
long_description, tutorial)
click.echo(res)
@cli.command()
@click.option('--tasks-file', help='File with tasks',
default='project.tasks', type=click.File('r'))
@click.option('--tasks-type', help='Tasks type: JSON|CSV|XLSX|XLSM|XLTX|XLTM|PO|PROPERTIES',
default=None, type=click.Choice(['json', 'csv', 'xlsx', 'xlsm',
'xltx', 'xltm', 'po', 'properties']))
@click.option('--priority', help="Priority for the tasks.", default=0)
@click.option('--redundancy', help="Redundancy for tasks.", default=30)
@pass_config
def add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
res = _add_tasks(config, tasks_file, tasks_type, priority, redundancy)
click.echo(res)
@cli.command()
@click.option('--helping-materials-file', help='File with helping materials',
default='helping.materials', type=click.File('r'))
@click.option('--helping-type', help='Tasks type: JSON|CSV|XLSX|XLSM|XLTX|XLTM',
default=None, type=click.Choice(['json', 'csv', 'xlsx', 'xlsm',
'xltx', 'xltm']))
@pass_config
def add_helpingmaterials(config, helping_materials_file, helping_type):
"""Add helping materials to a project."""
res = _add_helpingmaterials(config, helping_materials_file, helping_type)
click.echo(res)
@cli.command()
@click.option('--task-id', help='Task ID to delete from project', default=None)
@pass_config
def delete_tasks(config, task_id):
"""Delete tasks from a project."""
if task_id is None:
msg = ("Are you sure you want to delete all the tasks and associated task runs?")
if click.confirm(msg):
res = _delete_tasks(config, task_id)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _delete_tasks(config, task_id)
click.echo(res)
@cli.command(name='update-task-redundancy')
@click.option('--task-id', help='Task ID to update from project', default=None)
@click.option('--redundancy', help='New redundancy for task', default=None)
@pass_config
|
Scifabric/pbs | helpers.py | _create_project | python | def _create_project(config):
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise | Create a project in a PyBossa server. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L57-L68 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for the pbs command line client.
This module exports the following methods:
* find_project_by_short_name: return the project by short_name.
* check_api_errors: check for API errors returned by a PyBossa server.
* format_error: format error message.
* format_json_task: format a CSV row into JSON.
"""
import re
import os
import csv
import json
import time
import click
import datetime
from StringIO import StringIO
import polib
import openpyxl
import itertools
from requests import exceptions
import requests
from pbsexceptions import *
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import calendar
__all__ = ['find_project_by_short_name', 'check_api_error',
'format_error', 'format_json_task', '_create_project',
'_update_project', '_add_tasks', 'create_task_info',
'_delete_tasks', 'enable_auto_throttling',
'_update_tasks_redundancy',
'_update_project_watch', 'PbsHandler',
'_update_task_presenter_bundle_js', 'row_empty',
'_add_helpingmaterials', 'create_helping_material_info']
def _update_project_watch(config, task_presenter, results,
long_description, tutorial): # pragma: no cover
"""Update a project in a loop."""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = os.getcwd()
event_handler = PbsHandler(config, task_presenter, results,
long_description, tutorial)
observer = Observer()
# We only want the current folder, not sub-folders
observer.schedule(event_handler, path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _update_task_presenter_bundle_js(project):
"""Append to template a distribution bundle js."""
if os.path.isfile ('bundle.min.js'):
with open('bundle.min.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
return
if os.path.isfile ('bundle.js'):
with open('bundle.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
def _update_project(config, task_presenter, results,
long_description, tutorial):
"""Update a project."""
try:
# Get project
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
# Update attributes
project.name = config.project['name']
project.short_name = config.project['short_name']
project.description = config.project['description']
# Update long_description
with open(long_description, 'r') as f:
project.long_description = f.read()
# Update task presenter
with open(task_presenter, 'r') as f:
project.info['task_presenter'] = f.read()
_update_task_presenter_bundle_js(project)
# Update results
with open(results, 'r') as f:
project.info['results'] = f.read()
# Update tutorial
with open(tutorial, 'r') as f:
project.info['tutorial'] = f.read()
response = config.pbclient.update_project(project)
check_api_error(response)
return ("Project %s updated!" % config.project['short_name'])
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except ProjectNotFound:
return ("Project not found! The project: %s is missing." \
" Use the flag --all=1 to search in all the server " \
% config.project['short_name'])
except TaskNotFound:
raise
def _load_data(data_file, data_type):
"""Load data from CSV, JSON, Excel, ..., formats."""
raw_data = data_file.read()
if data_type is None:
data_type = data_file.name.split('.')[-1]
# Data list to process
data = []
# JSON type
if data_type == 'json':
data = json.loads(raw_data)
return data
# CSV type
elif data_type == 'csv':
csv_data = StringIO(raw_data)
reader = csv.DictReader(csv_data, delimiter=',')
for line in reader:
data.append(line)
return data
elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']:
excel_data = StringIO(raw_data)
wb = openpyxl.load_workbook(excel_data)
ws = wb.active
# First headers
headers = []
for row in ws.iter_rows(max_row=1):
for cell in row:
tmp = '_'.join(cell.value.split(" ")).lower()
headers.append(tmp)
# Simulate DictReader
for row in ws.iter_rows(row_offset=1):
values = []
for cell in row:
values.append(cell.value)
tmp = dict(itertools.izip(headers, values))
if len(values) == len(headers) and not row_empty(values):
data.append(tmp)
return data
# PO type
elif data_type == 'po':
po = polib.pofile(raw_data)
for entry in po.untranslated_entries():
data.append(entry.__dict__)
return data
# PROPERTIES type (used in Java and Firefox extensions)
elif data_type == 'properties':
lines = raw_data.split('\n')
for l in lines:
if l:
var_id, string = l.split('=')
tmp = dict(var_id=var_id, string=string)
data.append(tmp)
return data
else:
return data
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _delete_tasks(config, task_id, limit=100, offset=0):
"""Delete tasks from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.delete_task(task_id)
check_api_error(response)
return "Task.id = %s and its associated task_runs have been deleted" % task_id
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
while len(tasks) > 0:
for t in tasks:
response = config.pbclient.delete_task(t.id)
check_api_error(response)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks and task_runs have been deleted"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_tasks_redundancy(config, task_id, redundancy, limit=300, offset=0):
"""Update tasks redundancy from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.find_tasks(project.id, id=task_id)
check_api_error(response)
task = response[0]
task.n_answers = redundancy
response = config.pbclient.update_task(task)
check_api_error(response)
msg = "Task.id = %s redundancy has been updated to %s" % (task_id,
redundancy)
return msg
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
with click.progressbar(tasks, label="Updating Tasks") as pgbar:
while len(tasks) > 0:
for t in pgbar:
t.n_answers = redundancy
response = config.pbclient.update_task(t)
check_api_error(response)
# Check if for the data we have to auto-throttle task update
sleep, msg = enable_auto_throttling(config, tasks)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks redundancy have been updated"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def find_project_by_short_name(short_name, pbclient, all=None):
"""Return project by short_name."""
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise
def check_api_error(api_response):
print(api_response)
"""Check if returned API response contains an error."""
if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:
print("Server response code: %s" % api_response['code'])
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
if type(api_response) == dict and (api_response.get('status') == 'failed'):
if 'ProgrammingError' in api_response.get('exception_cls'):
raise DatabaseError(message='PyBossa database error.',
error=api_response)
if ('DBIntegrityError' in api_response.get('exception_cls') and
'project' in api_response.get('target')):
msg = 'PyBossa project already exists.'
raise ProjectAlreadyExists(message=msg, error=api_response)
if 'project' in api_response.get('target'):
raise ProjectNotFound(message='PyBossa Project not found',
error=api_response)
if 'task' in api_response.get('target'):
raise TaskNotFound(message='PyBossa Task not found',
error=api_response)
else:
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
def format_error(module, error):
"""Format the error for the given module."""
logging.error(module)
# Beautify JSON error
print error.message
print json.dumps(error.error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1)
def create_task_info(task):
"""Create task_info field."""
task_info = None
if task.get('info'):
task_info = task['info']
else:
task_info = task
return task_info
def create_helping_material_info(helping):
"""Create helping_material_info field."""
helping_info = None
file_path = None
if helping.get('info'):
helping_info = helping['info']
else:
helping_info = helping
if helping_info.get('file_path'):
file_path = helping_info.get('file_path')
del helping_info['file_path']
return helping_info, file_path
def enable_auto_throttling(config, data, limit=299, endpoint='/api/task'):
"Return sleep time if more tasks than those " \
"allowed by the server are requested."
# Get header from server
endpoint = config.server + endpoint
headers = requests.head(endpoint).headers
# Get limit
server_limit = int(headers.get('X-RateLimit-Remaining', 0))
limit = server_limit or limit
# Get reset time
reset_epoch = int(headers.get('X-RateLimit-Reset', 0))
# Compute sleep time
sleep = (reset_epoch -
calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
msg = 'Warning: %s remaining hits to the endpoint.' \
' Auto-throttling enabled!' % limit
# If we have less than 10 hits on the endpoint, sleep
if limit <= 10:
return (sleep, msg)
else:
return 0, None
def format_json_task(task_info):
"""Format task_info into JSON if applicable."""
try:
return json.loads(task_info)
except:
return task_info
def row_empty(row):
"""Check if all values in row are None."""
for value in row:
if value is not None:
return False
return True
class PbsHandler(PatternMatchingEventHandler):
patterns = ['*/template.html', '*/tutorial.html',
'*/long_description.md', '*/results.html',
'*/bundle.js', '*/bundle.min.js']
def __init__(self, config, task_presenter, results,
long_description, tutorial):
super(PbsHandler, self).__init__()
self.config = config
self.task_presenter = task_presenter
self.results = results
self.long_description = long_description
self.tutorial = tutorial
def on_modified(self, event):
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
res = _update_project(self.config, self.task_presenter, self.results,
self.long_description, self.tutorial)
logging.info(res)
|
Scifabric/pbs | helpers.py | _update_project_watch | python | def _update_project_watch(config, task_presenter, results,
long_description, tutorial): # pragma: no cover
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = os.getcwd()
event_handler = PbsHandler(config, task_presenter, results,
long_description, tutorial)
observer = Observer()
# We only want the current folder, not sub-folders
observer.schedule(event_handler, path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join() | Update a project in a loop. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L70-L88 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for the pbs command line client.
This module exports the following methods:
* find_project_by_short_name: return the project by short_name.
* check_api_errors: check for API errors returned by a PyBossa server.
* format_error: format error message.
* format_json_task: format a CSV row into JSON.
"""
import re
import os
import csv
import json
import time
import click
import datetime
from StringIO import StringIO
import polib
import openpyxl
import itertools
from requests import exceptions
import requests
from pbsexceptions import *
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import calendar
__all__ = ['find_project_by_short_name', 'check_api_error',
'format_error', 'format_json_task', '_create_project',
'_update_project', '_add_tasks', 'create_task_info',
'_delete_tasks', 'enable_auto_throttling',
'_update_tasks_redundancy',
'_update_project_watch', 'PbsHandler',
'_update_task_presenter_bundle_js', 'row_empty',
'_add_helpingmaterials', 'create_helping_material_info']
def _create_project(config):
"""Create a project in a PyBossa server."""
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_task_presenter_bundle_js(project):
"""Append to template a distribution bundle js."""
if os.path.isfile ('bundle.min.js'):
with open('bundle.min.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
return
if os.path.isfile ('bundle.js'):
with open('bundle.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
def _update_project(config, task_presenter, results,
long_description, tutorial):
"""Update a project."""
try:
# Get project
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
# Update attributes
project.name = config.project['name']
project.short_name = config.project['short_name']
project.description = config.project['description']
# Update long_description
with open(long_description, 'r') as f:
project.long_description = f.read()
# Update task presenter
with open(task_presenter, 'r') as f:
project.info['task_presenter'] = f.read()
_update_task_presenter_bundle_js(project)
# Update results
with open(results, 'r') as f:
project.info['results'] = f.read()
# Update tutorial
with open(tutorial, 'r') as f:
project.info['tutorial'] = f.read()
response = config.pbclient.update_project(project)
check_api_error(response)
return ("Project %s updated!" % config.project['short_name'])
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except ProjectNotFound:
return ("Project not found! The project: %s is missing." \
" Use the flag --all=1 to search in all the server " \
% config.project['short_name'])
except TaskNotFound:
raise
def _load_data(data_file, data_type):
"""Load data from CSV, JSON, Excel, ..., formats."""
raw_data = data_file.read()
if data_type is None:
data_type = data_file.name.split('.')[-1]
# Data list to process
data = []
# JSON type
if data_type == 'json':
data = json.loads(raw_data)
return data
# CSV type
elif data_type == 'csv':
csv_data = StringIO(raw_data)
reader = csv.DictReader(csv_data, delimiter=',')
for line in reader:
data.append(line)
return data
elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']:
excel_data = StringIO(raw_data)
wb = openpyxl.load_workbook(excel_data)
ws = wb.active
# First headers
headers = []
for row in ws.iter_rows(max_row=1):
for cell in row:
tmp = '_'.join(cell.value.split(" ")).lower()
headers.append(tmp)
# Simulate DictReader
for row in ws.iter_rows(row_offset=1):
values = []
for cell in row:
values.append(cell.value)
tmp = dict(itertools.izip(headers, values))
if len(values) == len(headers) and not row_empty(values):
data.append(tmp)
return data
# PO type
elif data_type == 'po':
po = polib.pofile(raw_data)
for entry in po.untranslated_entries():
data.append(entry.__dict__)
return data
# PROPERTIES type (used in Java and Firefox extensions)
elif data_type == 'properties':
lines = raw_data.split('\n')
for l in lines:
if l:
var_id, string = l.split('=')
tmp = dict(var_id=var_id, string=string)
data.append(tmp)
return data
else:
return data
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _delete_tasks(config, task_id, limit=100, offset=0):
"""Delete tasks from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.delete_task(task_id)
check_api_error(response)
return "Task.id = %s and its associated task_runs have been deleted" % task_id
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
while len(tasks) > 0:
for t in tasks:
response = config.pbclient.delete_task(t.id)
check_api_error(response)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks and task_runs have been deleted"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_tasks_redundancy(config, task_id, redundancy, limit=300, offset=0):
"""Update tasks redundancy from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.find_tasks(project.id, id=task_id)
check_api_error(response)
task = response[0]
task.n_answers = redundancy
response = config.pbclient.update_task(task)
check_api_error(response)
msg = "Task.id = %s redundancy has been updated to %s" % (task_id,
redundancy)
return msg
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
with click.progressbar(tasks, label="Updating Tasks") as pgbar:
while len(tasks) > 0:
for t in pgbar:
t.n_answers = redundancy
response = config.pbclient.update_task(t)
check_api_error(response)
# Check if for the data we have to auto-throttle task update
sleep, msg = enable_auto_throttling(config, tasks)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks redundancy have been updated"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def find_project_by_short_name(short_name, pbclient, all=None):
"""Return project by short_name."""
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise
def check_api_error(api_response):
print(api_response)
"""Check if returned API response contains an error."""
if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:
print("Server response code: %s" % api_response['code'])
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
if type(api_response) == dict and (api_response.get('status') == 'failed'):
if 'ProgrammingError' in api_response.get('exception_cls'):
raise DatabaseError(message='PyBossa database error.',
error=api_response)
if ('DBIntegrityError' in api_response.get('exception_cls') and
'project' in api_response.get('target')):
msg = 'PyBossa project already exists.'
raise ProjectAlreadyExists(message=msg, error=api_response)
if 'project' in api_response.get('target'):
raise ProjectNotFound(message='PyBossa Project not found',
error=api_response)
if 'task' in api_response.get('target'):
raise TaskNotFound(message='PyBossa Task not found',
error=api_response)
else:
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
def format_error(module, error):
"""Format the error for the given module."""
logging.error(module)
# Beautify JSON error
print error.message
print json.dumps(error.error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1)
def create_task_info(task):
"""Create task_info field."""
task_info = None
if task.get('info'):
task_info = task['info']
else:
task_info = task
return task_info
def create_helping_material_info(helping):
"""Create helping_material_info field."""
helping_info = None
file_path = None
if helping.get('info'):
helping_info = helping['info']
else:
helping_info = helping
if helping_info.get('file_path'):
file_path = helping_info.get('file_path')
del helping_info['file_path']
return helping_info, file_path
def enable_auto_throttling(config, data, limit=299, endpoint='/api/task'):
"Return sleep time if more tasks than those " \
"allowed by the server are requested."
# Get header from server
endpoint = config.server + endpoint
headers = requests.head(endpoint).headers
# Get limit
server_limit = int(headers.get('X-RateLimit-Remaining', 0))
limit = server_limit or limit
# Get reset time
reset_epoch = int(headers.get('X-RateLimit-Reset', 0))
# Compute sleep time
sleep = (reset_epoch -
calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
msg = 'Warning: %s remaining hits to the endpoint.' \
' Auto-throttling enabled!' % limit
# If we have less than 10 hits on the endpoint, sleep
if limit <= 10:
return (sleep, msg)
else:
return 0, None
def format_json_task(task_info):
"""Format task_info into JSON if applicable."""
try:
return json.loads(task_info)
except:
return task_info
def row_empty(row):
"""Check if all values in row are None."""
for value in row:
if value is not None:
return False
return True
class PbsHandler(PatternMatchingEventHandler):
patterns = ['*/template.html', '*/tutorial.html',
'*/long_description.md', '*/results.html',
'*/bundle.js', '*/bundle.min.js']
def __init__(self, config, task_presenter, results,
long_description, tutorial):
super(PbsHandler, self).__init__()
self.config = config
self.task_presenter = task_presenter
self.results = results
self.long_description = long_description
self.tutorial = tutorial
def on_modified(self, event):
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
res = _update_project(self.config, self.task_presenter, self.results,
self.long_description, self.tutorial)
logging.info(res)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.