hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
78a3c249081829ff15e4f72762973488c3073d35 | 53,787 | py | Python | ares/static/SpectralSynthesis.py | eklem1/ares | df39056065f0493e3c922fb50ced2dc6d1bc79a2 | [
"MIT"
] | null | null | null | ares/static/SpectralSynthesis.py | eklem1/ares | df39056065f0493e3c922fb50ced2dc6d1bc79a2 | [
"MIT"
] | null | null | null | ares/static/SpectralSynthesis.py | eklem1/ares | df39056065f0493e3c922fb50ced2dc6d1bc79a2 | [
"MIT"
] | null | null | null | """
SpectralSynthesis.py
Author: Jordan Mirocha
Affiliation: McGill
Created on: Sat 25 May 2019 09:58:14 EDT
Description:
"""
import time
import numpy as np
from ..util import Survey
from ..util import ProgressBar
from ..phenom import Madau1995
from ..util import ParameterFile
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from ..physics.Cosmology import Cosmology
from scipy.interpolate import RectBivariateSpline
from ..physics.Constants import s_per_myr, c, h_p, erg_per_ev, flux_AB
nanoJ = 1e-23 * 1e-9
tiny_lum = 1e-8
all_cameras = ['wfc', 'wfc3', 'nircam']
def _powlaw(x, p0, p1):
return p0 * (x / 1.)**p1
class SpectralSynthesis(object):
def __init__(self, **kwargs):
self.pf = ParameterFile(**kwargs)
@property
def cosm(self):
if not hasattr(self, '_cosm'):
self._cosm = Cosmology(pf=self.pf, **self.pf)
return self._cosm
@property
def src(self):
return self._src
@src.setter
def src(self, value):
self._src = value
@property
def oversampling_enabled(self):
if not hasattr(self, '_oversampling_enabled'):
self._oversampling_enabled = True
return self._oversampling_enabled
@oversampling_enabled.setter
def oversampling_enabled(self, value):
self._oversampling_enabled = value
@property
def oversampling_below(self):
if not hasattr(self, '_oversampling_below'):
self._oversampling_below = 30.
return self._oversampling_below
@oversampling_below.setter
def oversampling_below(self, value):
self._oversampling_below = value
@property
def force_perfect(self):
if not hasattr(self, '_force_perfect'):
self._force_perfect = False
return self._force_perfect
@force_perfect.setter
def force_perfect(self, value):
self._force_perfect = value
@property
def careful_cache(self):
if not hasattr(self, '_careful_cache_'):
self._careful_cache_ = True
return self._careful_cache_
@careful_cache.setter
def careful_cache(self, value):
self._careful_cache_ = value
@property
def cameras(self):
if not hasattr(self, '_cameras'):
self._cameras = {}
for cam in all_cameras:
self._cameras[cam] = Survey(cam=cam,
force_perfect=self.force_perfect,
cache=self.pf['pop_synth_cache_phot'])
return self._cameras
@property
def hydr(self):
if not hasattr(self, '_hydr'):
from ..physics.Hydrogen import Hydrogen
self._hydr = Hydrogen(pf=self.pf, cosm=self.cosm, **self.pf)
return self._hydr
@property
def madau1995(self):
if not hasattr(self, '_madau1995'):
self._madau1995 = Madau1995(hydr=self.hydr, cosm=self.cosm,
**self.pf)
return self._madau1995
def OpticalDepth(self, z, owaves):
"""
Compute Lyman series line blanketing following Madau (1995).
Parameters
----------
zobs : int, float
Redshift of object.
owaves : np.ndarray
Observed wavelengths in microns.
"""
if self.pf['tau_clumpy'] is None:
return 0.0
assert self.pf['tau_clumpy'].lower() == 'madau1995', \
"tau_clumpy='madau1995' is currently the sole option!"
return self.madau1995(z, owaves)
def L_of_Z_t(self, wave):
if not hasattr(self, '_L_of_Z_t'):
self._L_of_Z_t = {}
if wave in self._L_of_Z_t:
return self._L_of_Z_t[wave]
tarr = self.src.times
Zarr = np.sort(list(self.src.metallicities.values()))
L = np.zeros((tarr.size, Zarr.size))
for j, Z in enumerate(Zarr):
L[:,j] = self.src.L_per_sfr_of_t(wave, Z=Z)
# Interpolant
self._L_of_Z_t[wave] = RectBivariateSpline(np.log10(tarr),
np.log10(Zarr), np.log10(L), kx=3, ky=3)
return self._L_of_Z_t[wave]
def Slope(self, zobs=None, tobs=None, spec=None, waves=None,
sfh=None, zarr=None, tarr=None, hist={}, idnum=None,
cam=None, rest_wave=None, band=None,
return_norm=False, filters=None, filter_set=None, dlam=20.,
method='linear', window=1, extras={}, picky=False, return_err=False):
"""
Compute slope in some wavelength range or using photometry.
Parameters
----------
zobs : int, float
Redshift of observation.
rest_wave: tuple
Rest-wavelength range in which slope will be computed (Angstrom).
dlam : int
Sample the spectrum with this wavelength resolution (Angstrom).
window : int
Can optionally operate on a smoothed version of the spectrum,
obtained by convolving with a boxcar window function if this width.
"""
assert (tobs is not None) or (zobs is not None)
if tobs is not None:
zobs = self.cosm.z_of_t(tobs * s_per_myr)
# If no camera supplied, operate directly on spectrum
if cam is None:
func = lambda xx, p0, p1: p0 * (xx / 1.)**p1
if waves is None:
waves = np.arange(rest_wave[0], rest_wave[1]+dlam, dlam)
owaves, oflux = self.ObserveSpectrum(zobs, spec=spec, waves=waves,
sfh=sfh, zarr=zarr, tarr=tarr, flux_units='Ang', hist=hist,
extras=extras, idnum=idnum, window=window)
rwaves = waves
ok = np.logical_and(rwaves >= rest_wave[0], rwaves <= rest_wave[1])
x = owaves[ok==1]
if oflux.ndim == 2:
batch_mode = True
y = oflux[:,ok==1].swapaxes(0, 1)
ma = np.max(y, axis=0)
sl = -2.5 * np.ones(ma.size)
guess = np.vstack((ma, sl)).T
else:
batch_mode = False
y = oflux[ok==1]
guess = np.array([oflux[np.argmin(np.abs(owaves - 1.))], -2.4])
else:
if filters is not None:
assert rest_wave is None, \
"Set rest_wave=None if filters are supplied"
if type(cam) not in [list, tuple]:
cam = [cam]
filt = []
xphot = []
dxphot = []
ycorr = []
for _cam in cam:
_filters, _xphot, _dxphot, _ycorr = \
self.Photometry(sfh=sfh, hist=hist, idnum=idnum, spec=spec,
cam=_cam, filters=filters, filter_set=filter_set, waves=waves,
dlam=dlam, tarr=tarr, tobs=tobs, extras=extras, picky=picky,
zarr=zarr, zobs=zobs, rest_wave=rest_wave, window=window)
filt.extend(list(_filters))
xphot.extend(list(_xphot))
dxphot.extend(list(_dxphot))
ycorr.extend(list(_ycorr))
# No matching filters? Return.
if len(filt) == 0:
if idnum is not None:
N = 1
elif sfh is not None:
N = sfh.shape[0]
else:
N = 1
if return_norm:
return -99999 * np.ones((N, 2))
else:
return -99999 * np.ones(N)
filt = np.array(filt)
xphot = np.array(xphot)
dxphot = np.array(dxphot)
ycorr = np.array(ycorr)
# Sort arrays in ascending wavelength
isort = np.argsort(xphot)
_x = xphot[isort]
_y = ycorr[isort]
# Recover flux to do power-law fit
xp, xm = dxphot.T
dx = xp + xm
# Need flux in units of A^-1
#dnphot = c / ((xphot-xm) * 1e-4) - c / ((xphot + xp) * 1e-4)
#dwdn = dx * 1e4 / dnphot
_dwdn = (_x * 1e4)**2 / (c * 1e8)
if rest_wave is not None:
r = _x * 1e4 / (1. + zobs)
ok = np.logical_and(r >= rest_wave[0], r <= rest_wave[1])
x = _x[ok==1]
else:
ok = np.ones_like(_x)
x = _x
# Be careful in batch mode!
if ycorr.ndim == 2:
batch_mode = True
_f = 10**(_y / -2.5) * flux_AB / _dwdn[:,None]
y = _f[ok==1]
ma = np.max(y, axis=0)
sl = -2.5 * np.ones(ma.size)
guess = np.vstack((ma, sl)).T
else:
batch_mode = False
_f = 10**(_y / -2.5) * flux_AB / _dwdn
y = _f[ok==1]
ma = np.max(y)
guess = np.array([ma, -2.])
if ok.sum() == 2 and self.pf['verbose']:
print("WARNING: Estimating slope at z={} from only two points: {}".format(zobs,
filt[isort][ok==1]))
##
# Fit a PL to points.
if method == 'fit':
if len(x) < 2:
if self.pf['verbose']:
print("Not enough points to estimate slope")
if batch_mode:
corr = np.ones(y.shape[1])
else:
corr = 1
if return_norm:
return -99999 * corr, -99999 * corr
else:
return -99999 * corr
if batch_mode:
N = y.shape[1]
popt = -99999 * np.ones((2, N))
pcov = -99999 * np.ones((2, 2, N))
for i in range(N):
if not np.any(y[:,i] > 0):
continue
try:
popt[:,i], pcov[:,:,i] = curve_fit(_powlaw, x, y[:,i],
p0=guess[i], maxfev=10000)
except RuntimeError:
popt[:,i], pcov[:,:,i] = -99999, -99999
else:
try:
popt, pcov = curve_fit(_powlaw, x, y, p0=guess, maxfev=10000)
except RuntimeError:
popt, pcov = -99999 * np.ones(2), -99999 * np.ones(2)
elif method == 'linear':
logx = np.log10(x)
logy = np.log10(y)
A = np.vstack([logx, np.ones(len(logx))]).T
if batch_mode:
N = y.shape[1]
popt = -99999 * np.ones((2, N))
pcov = -99999 * np.ones((2, 2, N))
for i in range(N):
popt[:,i] = np.linalg.lstsq(A, logy[:,i],
rcond=None)[0][-1::-1]
else:
popt = np.linalg.lstsq(A, logy, rcond=None)[0]
pcov = -99999 * np.ones(2)
elif method == 'diff':
assert cam is None, "Should only use to skip photometry."
# Remember that galaxy number is second dimension
logL = np.log(y)
logw = np.log(x)
if batch_mode:
# Logarithmic derivative = beta
beta = (logL[-1,:] - logL[0,:]) / (logw[-1,None] - logw[0,None])
else:
beta = (logL[-1] - logL[0]) / (logw[-1] - logw[0])
popt = np.array([-99999, beta])
else:
raise NotImplemented('help me')
if return_norm:
return popt
else:
if return_err:
return popt[1], np.sqrt(pcov[1,1])
else:
return popt[1]
def ObserveSpectrum(self, zobs, spec=None, sfh=None, waves=None,
flux_units='Hz', tarr=None, tobs=None, zarr=None, hist={},
idnum=None, window=1, extras={}, nthreads=1, load=True):
"""
Take an input spectrum and "observe" it at redshift z.
Parameters
----------
zobs : int, float
Redshift of observation.
waves : np.ndarray
Simulate spectrum at these rest wavelengths [Angstrom]
spec : np.ndarray
Specific luminosities in [erg/s/A]
Returns
-------
Observed wavelengths in microns, observed fluxes in erg/s/cm^2/Hz.
"""
if spec is None:
spec = self.Spectrum(waves, sfh=sfh, tarr=tarr, zarr=zarr,
zobs=zobs, tobs=None, hist=hist, idnum=idnum,
extras=extras, window=window, load=load)
dL = self.cosm.LuminosityDistance(zobs)
if waves is None:
waves = self.src.wavelengths
dwdn = self.src.dwdn
assert len(spec) == len(waves)
else:
#freqs = c / (waves / 1e8)
dwdn = waves**2 / (c * 1e8)
#tmp = np.abs(np.diff(waves) / np.diff(freqs))
#dwdn = np.concatenate((tmp, [tmp[-1]]))
# Flux at Earth in erg/s/cm^2/Hz
f = spec / (4. * np.pi * dL**2)
if flux_units == 'Hz':
pass
else:
f /= dwdn
owaves = waves * (1. + zobs) / 1e4
tau = self.OpticalDepth(zobs, owaves)
T = np.exp(-tau)
return owaves, f * T
def Photometry(self, spec=None, sfh=None, cam='wfc3', filters='all',
filter_set=None, dlam=20., rest_wave=None, extras={}, window=1,
tarr=None, zarr=None, waves=None, zobs=None, tobs=None, band=None,
hist={}, idnum=None, flux_units=None, picky=False, lbuffer=200.,
ospec=None, owaves=None, load=True):
"""
Just a wrapper around `Spectrum`.
Returns
-------
Tuple containing (in this order):
- Names of all filters included
- Midpoints of photometric filters [microns]
- Width of filters [microns]
- Apparent magnitudes corrected for filter transmission.
"""
assert (tobs is not None) or (zobs is not None)
if zobs is None:
zobs = self.cosm.z_of_t(tobs * s_per_myr)
# Might be stored for all redshifts so pick out zobs
if type(filters) == dict:
assert zobs is not None
filters = filters[round(zobs)]
# Get transmission curves
if cam in self.cameras.keys():
filter_data = self.cameras[cam]._read_throughputs(filter_set=filter_set,
filters=filters)
else:
# Can supply spectral windows, e.g., Calzetti+ 1994, in which case
# we assume perfect transmission but otherwise just treat like
# photometric filters.
assert type(filters) in [list, tuple, np.ndarray]
#print("Generating photometry from {} spectral ranges.".format(len(filters)))
wraw = np.array(filters)
x1 = wraw.min()
x2 = wraw.max()
x = np.arange(x1-1, x2+1, 1.) * 1e-4 * (1. + zobs)
# Note that in this case, the filter wavelengths are in rest-frame
# units, so we convert them to observed wavelengths before
# photometrizing everything.
filter_data = {}
for _window in filters:
lo, hi = _window
lo *= 1e-4 * (1. + zobs)
hi *= 1e-4 * (1. + zobs)
y = np.zeros_like(x)
y[np.logical_and(x >= lo, x <= hi)] = 1
mi = np.mean([lo, hi])
dx = np.array([hi - mi, mi - lo])
Tavg = 1.
filter_data[_window] = x, y, mi, dx, Tavg
all_filters = filter_data.keys()
# Figure out spectral range we need to model for these filters.
# Find bluest and reddest filters, set wavelength range with some
# padding above and below these limits.
lmin = np.inf
lmax = 0.0
ct = 0
for filt in filter_data:
x, y, cent, dx, Tavg = filter_data[filt]
# If we're only doing this for the sake of measuring a slope, we
# might restrict the range based on wavelengths of interest, i.e.,
# we may not use all the filters.
# Right now, will include filters as long as their center is in
# the requested band. This results in fluctuations in slope
# measurements, so to be more stringent set picky=True.
if rest_wave is not None:
if picky:
l = (cent - dx[1]) * 1e4 / (1. + zobs)
r = (cent + dx[0]) * 1e4 / (1. + zobs)
if (l < rest_wave[0]) or (r > rest_wave[1]):
continue
cent_r = cent * 1e4 / (1. + zobs)
if (cent_r < rest_wave[0]) or (cent_r > rest_wave[1]):
continue
lmin = min(lmin, cent - dx[1] * 1.2)
lmax = max(lmax, cent + dx[0] * 1.2)
ct += 1
# No filters in range requested
if ct == 0:
return [], [], [], []
# Here's our array of REST wavelengths
if waves is None:
# Convert from microns to Angstroms, undo redshift.
lmin = lmin * 1e4 / (1. + zobs)
lmax = lmax * 1e4 / (1. + zobs)
lmin = max(lmin, self.src.wavelengths.min())
lmax = min(lmax, self.src.wavelengths.max())
# Force edges to be multiples of dlam
l1 = lmin - lbuffer
l1 -= l1 % dlam
l2 = lmax + lbuffer
waves = np.arange(l1, l2+dlam, dlam)
# Get spectrum first.
if (spec is None) and (ospec is None):
spec = self.Spectrum(waves, sfh=sfh, tarr=tarr, tobs=tobs,
zarr=zarr, zobs=zobs, band=band, hist=hist,
idnum=idnum, extras=extras, window=window, load=load)
# Observed wavelengths in micron, flux in erg/s/cm^2/Hz
wave_obs, flux_obs = self.ObserveSpectrum(zobs, spec=spec,
waves=waves, extras=extras, window=window)
elif ospec is not None:
flux_obs = ospec
wave_obs = owaves
else:
raise ValueError('This shouldn\'t happen')
# Might be running over lots of galaxies
batch_mode = False
if flux_obs.ndim == 2:
batch_mode = True
# Convert microns to cm. micron * (m / 1e6) * (1e2 cm / m)
freq_obs = c / (wave_obs * 1e-4)
# Why do NaNs happen? Just nircam.
flux_obs[np.isnan(flux_obs)] = 0.0
# Loop over filters and re-weight spectrum
xphot = [] # Filter centroids
wphot = [] # Filter width
yphot_corr = [] # Magnitudes corrected for filter transmissions.
# Loop over filters, compute fluxes in band (accounting for
# transmission fraction) and convert to observed magnitudes.
for filt in all_filters:
x, T, cent, dx, Tavg = filter_data[filt]
if rest_wave is not None:
cent_r = cent * 1e4 / (1. + zobs)
if (cent_r < rest_wave[0]) or (cent_r > rest_wave[1]):
continue
# Re-grid transmission onto provided wavelength axis.
T_regrid = np.interp(wave_obs, x, T, left=0, right=0)
#func = interp1d(x, T, kind='cubic', fill_value=0.0,
# bounds_error=False)
#T_regrid = func(wave_obs)
#T_regrid = np.interp(np.log(wave_obs), np.log(x), T, left=0.,
# right=0)
# Remember: observed flux is in erg/s/cm^2/Hz
# Integrate over frequency to get integrated flux in band
# defined by filter.
if batch_mode:
integrand = -1. * flux_obs * T_regrid[None,:]
_yphot = np.sum(integrand[:,0:-1] * np.diff(freq_obs)[None,:],
axis=1)
else:
integrand = -1. * flux_obs * T_regrid
_yphot = np.sum(integrand[0:-1] * np.diff(freq_obs))
#_yphot = np.trapz(integrand, x=freq_obs)
corr = np.sum(T_regrid[0:-1] * -1. * np.diff(freq_obs), axis=-1)
xphot.append(cent)
yphot_corr.append(_yphot / corr)
wphot.append(dx)
xphot = np.array(xphot)
wphot = np.array(wphot)
yphot_corr = np.array(yphot_corr)
# Convert to magnitudes and return
return all_filters, xphot, wphot, -2.5 * np.log10(yphot_corr / flux_AB)
def Spectrum(self, waves, sfh=None, tarr=None, zarr=None, window=1,
zobs=None, tobs=None, band=None, idnum=None, units='Hz', hist={},
extras={}, load=True):
"""
This is just a wrapper around `Luminosity`.
"""
# Select single row of SFH array if `idnum` provided.
if sfh.ndim == 2 and idnum is not None:
sfh = sfh[idnum,:]
batch_mode = sfh.ndim == 2
time_series = (zobs is None) and (tobs is None)
# Shape of output array depends on some input parameters.
shape = []
if batch_mode:
shape.append(sfh.shape[0])
if time_series:
shape.append(tarr.size)
shape.append(len(waves))
# Do kappa up front?
pb = ProgressBar(waves.size, name='l(nu)', use=self.pf['progress_bar'])
pb.start()
##
# Can thread this calculation
##
if (self.pf['nthreads'] is not None):
try:
import pymp
have_pymp = True
except ImportError:
have_pymp = False
assert have_pymp, "Need pymp installed to run with nthreads!=None!"
pymp.config.num_threads = self.pf['nthreads']
if self.pf['verbose']:
print("Setting nthreads={} for spectral synthesis.".format(
self.pf['nthreads']))
spec = pymp.shared.array(shape, dtype='float64')
with pymp.Parallel(self.pf['nthreads']) as p:
for i in p.xrange(0, waves.size):
slc = (Ellipsis, i) if (batch_mode or time_series) else i
spec[slc] = self.Luminosity(wave=waves[i],
sfh=sfh, tarr=tarr, zarr=zarr, zobs=zobs, tobs=tobs,
band=band, hist=hist, idnum=idnum,
extras=extras, window=window, load=load)
pb.update(i)
else:
spec = np.zeros(shape)
for i, wave in enumerate(waves):
slc = (Ellipsis, i) if (batch_mode or time_series) else i
spec[slc] = self.Luminosity(wave=wave,
sfh=sfh, tarr=tarr, zarr=zarr, zobs=zobs, tobs=tobs,
band=band, hist=hist, idnum=idnum,
extras=extras, window=window, load=load)
pb.update(i)
pb.finish()
if units in ['A', 'Ang']:
#freqs = c / (waves / 1e8)
#tmp = np.abs(np.diff(waves) / np.diff(freqs))
#dwdn = np.concatenate((tmp, [tmp[-1]]))
dwdn = waves**2 / (c * 1e8)
spec /= dwdn
return spec
def Magnitude(self, wave=1600., sfh=None, tarr=None, zarr=None, window=1,
zobs=None, tobs=None, band=None, idnum=None, hist={}, extras={}):
L = self.Luminosity(wave=wave, sfh=sfh, tarr=tarr, zarr=zarr,
zobs=zobs, tobs=tobs, band=band, idnum=idnum, hist=hist,
extras=extras, window=window)
MAB = self.magsys.L_to_MAB(L, z=zobs)
return MAB
def _oversample_sfh(self, ages, sfh, i):
"""
Over-sample time axis while stellar populations are young if the time
resolution is worse than 1 Myr / grid point.
"""
batch_mode = sfh.ndim == 2
# Use 1 Myr time resolution for final stretch.
# final stretch is determined by `oversampling_below` attribute.
# This loop determines how many elements at the end of
# `ages` are within the `oversampling_below` zone.
ct = 0
while ages[-1-ct] < self.oversampling_below:
ct += 1
if ct + 1 == len(ages):
break
ifin = -1 - ct
ages_x = np.arange(ages[-1], ages[ifin], 1.)[-1::-1]
# `ages_x` is an array of ages at higher resolution than native data
# to-be-tagged on the end of supplied `ages`.
# Must augment ages and dt accordingly
_ages = np.hstack((ages[0:ifin], ages_x))
_dt = np.abs(np.diff(_ages) * 1e6)
if batch_mode:
xSFR = np.ones((sfh.shape[0], ages_x.size-1))
else:
xSFR = np.ones(ages_x.size-1)
# Must allow non-constant SFR within over-sampled region
# as it may be tens of Myr.
# Walk back from the end and fill in SFR
N = int((ages_x.size - 1) / ct)
for _i in range(0, ct):
if batch_mode:
slc = Ellipsis, slice(-1 * N * _i-1, -1 * N * (_i + 1) -1, -1)
else:
slc = slice(-1 * N * _i-1, -1 * N * (_i + 1) -1, -1)
if batch_mode:
_sfh_rs = np.array([sfh[:,-_i-2]]*N).T
xSFR[slc] = _sfh_rs * np.ones(N)[None,:]
else:
xSFR[slc] = sfh[-_i-2] * np.ones(N)
# Need to tack on the SFH at ages older than our
# oversampling approach kicks in.
if batch_mode:
if ct + 1 == len(ages):
_SFR = np.hstack((sfh[:,0][:,None], xSFR))
else:
_SFR = np.hstack((sfh[:,0:i+1][:,0:ifin+1], xSFR))
else:
if ct + 1 == len(ages):
_SFR = np.hstack((sfh[0], xSFR))
else:
_SFR = np.hstack((sfh[0:i+1][0:ifin+1], xSFR))
return _ages, _SFR
@property
def _cache_lum_ctr(self):
if not hasattr(self, '_cache_lum_ctr_'):
self._cache_lum_ctr_ = 0
return self._cache_lum_ctr_
def _cache_kappa(self, wave):
if not hasattr(self, '_cache_kappa_'):
self._cache_kappa_ = {}
if wave in self._cache_kappa_:
return self._cache_kappa_[wave]
return None
def _cache_lum(self, kwds):
"""
Cache object for spectral synthesis of stellar luminosity.
"""
if not hasattr(self, '_cache_lum_'):
self._cache_lum_ = {}
notok = -1
t1 = time.time()
# If we set order by hand, it greatly speeds things up because
# more likely than not, the redshift and wavelength are the only
# things that change and that's an easy logical check to do.
# Checking that SFHs, histories, etc., is more expensive.
ok_keys = ('wave', 'zobs', 'tobs', 'idnum', 'sfh', 'tarr', 'zarr',
'window', 'band', 'hist', 'extras', 'load')
ct = -1
# Loop through keys to do more careful comparison for unhashable types.
#all_waves = self._cache_lum_waves_
all_keys = self._cache_lum_.keys()
# Search in reverse order since we often the keys represent different
# wavelengths, which are generated in ascending order.
for keyset in all_keys:
ct += 1
# Remember: keyset is just a number.
kw, data = self._cache_lum_[keyset]
# Check wavelength first. Most common thing.
# If we're not being as careful as possible, retrieve cached
# result so long as wavelength and zobs match requested values.
# This should only be used when SpectralSynthesis is summoned
# internally! Likely to lead to confusing behavior otherwise.
if (self.careful_cache == 0) and ('wave' in kw) and ('zobs' in kw):
if (kw['wave'] == kwds['wave']) and (kw['zobs'] == kwds['zobs']):
notok = 0
break
notok = 0
# Loop over cached keywords, compare to those supplied.
for key in ok_keys:
if key not in kwds:
notok += 1
break
#if isinstance(kw[key], collections.Hashable):
# if kwds[key] == kw[key]:
# continue
# else:
# notok += 1
# break
#else:
# For unhashable types, must work on case-by-case basis.
if type(kwds[key]) != type(kw[key]):
notok += 1
break
elif type(kwds[key]) == np.ndarray:
if np.array_equal(kwds[key], kw[key]):
continue
else:
# This happens when, e.g., we pass SFH by hand.
notok += 1
break
elif type(kwds[key]) == dict:
if kwds[key] == kw[key]:
continue
else:
#for _key in kwds[key]:
# print(_key, kwds[key][_key] == kw[key][_key])
#
#raw_input('<enter>')
notok += 1
break
else:
if kwds[key] == kw[key]:
continue
else:
notok += 1
break
if notok > 0:
#print(keyset, key)
continue
# If we're here, load this thing.
break
t2 = time.time()
if notok < 0:
return kwds, None
elif notok == 0:
if (self.pf['verbose'] and self.pf['debug']):
print("Loaded from cache! Took N={} iterations, {} sec to find match".format(ct, t2 - t1))
# Recall that this is (kwds, data)
return self._cache_lum_[keyset]
else:
return kwds, None
def Luminosity(self, wave=1600., sfh=None, tarr=None, zarr=None, window=1,
zobs=None, tobs=None, band=None, idnum=None, hist={}, extras={},
load=True, use_cache=True):
"""
Synthesize luminosity of galaxy with given star formation history at a
given wavelength and time.
Parameters
----------
sfh : np.ndarray
Array of SFRs. If 1-D, should be same shape as time or redshift
array. If 2-D, first dimension should correspond to galaxy number
and second should be time.
tarr : np.ndarray
Array of times in ascending order [Myr].
zarr : np.ndarray
Array of redshift in ascending order (so decreasing time). Only
supply if not passing `tarr` argument.
wave : int, float
Wavelength of interest [Angstrom]
window : int
Average over interval about `wave`. [Angstrom]
zobs : int, float
Redshift of observation.
tobs : int, float
Time of observation (will be computed self-consistently if `zobs`
is supplied).
hist : dict
Extra information we may need, e.g., metallicity, dust optical
depth, etc. to compute spectrum.
Returns
-------
Luminosity at wavelength=`wave` in units of erg/s/Hz.
"""
setup_1 = (sfh is not None) and \
((tarr is not None) or (zarr is not None))
setup_2 = hist != {}
do_all_time = False
if (tobs is None) and (zobs is None):
do_all_time = True
#assert (tobs is not None) or (zobs is not None), \
# "Must supply time or redshift of observation, `tobs` or `zobs`!"
assert setup_1 or setup_2
if setup_1:
assert (sfh is not None)
elif setup_2:
assert ('z' in hist) or ('t' in hist), \
"`hist` must contain redshifts, `z`, or times, `t`."
sfh = hist['SFR'] if 'SFR' in hist else hist['sfr']
if 'z' in hist:
zarr = hist['z']
else:
tarr = hist['t']
kw = {'sfh':sfh, 'zobs':zobs, 'tobs':tobs, 'wave':wave, 'tarr':tarr,
'zarr':zarr, 'band':band, 'idnum':idnum, 'hist':hist,
'extras':extras, 'window': window}
if load:
_kwds, cached_result = self._cache_lum(kw)
else:
self._cache_lum_ = {}
cached_result = None
if cached_result is not None:
return cached_result
if sfh.ndim == 2 and idnum is not None:
sfh = sfh[idnum,:]
if 'Z' in hist:
Z = hist['Z'][idnum,:]
# Don't necessarily need Mh here.
if 'Mh' in hist:
Mh = hist['Mh'][idnum,:]
else:
if 'Mh' in hist:
Mh = hist['Mh']
if 'Z' in hist:
Z = hist['Z']
# If SFH is 2-D it means we're doing this for multiple galaxies at once.
# The first dimension will be number of galaxies and second dimension
# is time/redshift.
batch_mode = sfh.ndim == 2
# Parse time/redshift information
if tarr is not None:
zarr = self.cosm.z_of_t(tarr * s_per_myr)
else:
assert tarr is None
tarr = self.cosm.t_of_z(zarr) / s_per_myr
assert np.all(np.diff(tarr) > 0), \
"Must supply SFH in time-ascending (i.e., redshift-descending) order!"
# Convert tobs to redshift.
if tobs is not None:
zobs = self.cosm.z_of_t(tobs * s_per_myr)
if type(tobs) == np.ndarray:
assert (tobs.min() >= tarr.min()) and (tobs.max() <= tarr.max()), \
"Requested times of observation (`tobs={}-{}`) not in supplied range ({}, {})!".format(tobs.min(),
tobs.max(), tarr.min(), tarr.max())
else:
assert tarr.min() <= tobs <= tarr.max(), \
"Requested time of observation (`tobs={}`) not in supplied range ({}, {})!".format(tobs,
tarr.min(), tarr.max())
# Prepare slice through time-axis.
if zobs is None:
slc = Ellipsis
izobs = None
else:
# Need to be sure that we grab a grid point exactly at or just
# below the requested redshift (?)
izobs = np.argmin(np.abs(zarr - zobs))
if zarr[izobs] > zobs:
izobs += 1
if batch_mode:
#raise NotImplemented('help')
# Need to slice over first dimension now...
slc = Ellipsis, slice(0, izobs+1)
else:
slc = slice(0, izobs+1)
if not (zarr.min() <= zobs <= zarr.max()):
if batch_mode:
return np.ones(sfh.shape[0]) * -99999
else:
return -99999
fill = np.zeros(1)
tyr = tarr * 1e6
dt = np.hstack((np.diff(tyr), fill))
# Figure out if we need to over-sample the grid we've got to more
# accurately solve for young stellar populations.
oversample = self.oversampling_enabled and (dt[-2] > 1.01e6)
# Used to also require zobs is not None. Why?
##
# Done parsing time/redshift
# Is this luminosity in some bandpass or monochromatic?
if band is not None:
# Will have been supplied in Angstroms
b = h_p * c / (np.array(band) * 1e-8) / erg_per_ev
Loft = self.src.IntegratedEmission(b[1], b[0],
energy_units=True)
# Need to get Hz^-1 units back
db = b[0] - b[1]
Loft = Loft / (db * erg_per_ev / h_p)
#raise NotImplemented('help!')
else:
Loft = self.src.L_per_sfr_of_t(wave=wave, avg=window)
#print("Synth. Lum = ", wave, window)
#
# Setup interpolant for luminosity as a function of SSP age.
Loft[Loft == 0] = tiny_lum
_func = interp1d(np.log(self.src.times), np.log(Loft),
kind=self.pf['pop_synth_age_interp'], bounds_error=False,
fill_value=(Loft[0], Loft[-1]))
# Extrapolate linearly at times < 1 Myr
_m = (Loft[1] - Loft[0]) / (self.src.times[1] - self.src.times[0])
L_small_t = lambda age: _m * age + Loft[0]
if not (self.src.pf['source_aging'] or self.src.pf['source_ssp']):
L_asympt = np.exp(_func(np.log(self.src.pf['source_tsf'])))
#L_small_t = lambda age: Loft[0]
# Extrapolate as PL at t < 1 Myr based on first two
# grid points
#m = np.log(Loft[1] / Loft[0]) \
# / np.log(self.src.times[1] / self.src.times[0])
#func = lambda age: np.exp(m * np.log(age) + np.log(Loft[0]))
#if zobs is None:
Lhist = np.zeros(sfh.shape)
#if hasattr(self, '_sfh_zeros'):
# Lhist = self._sfh_zeros.copy()
#else:
# Lhist = np.zeros_like(sfh)
# self._sfh_zeros = Lhist.copy()
#else:
# pass
# Lhist will just get made once. Don't need to initialize
##
# Loop over the history of object(s) and compute the luminosity of
# simple stellar populations of the corresponding ages (relative to
# zobs).
##
# Start from initial redshift and move forward in time, i.e., from
# high redshift to low.
for i, _tobs in enumerate(tarr):
# If zobs is supplied, we only have to do one iteration
# of this loop. This is just a dumb way to generalize this function
# to either do one redshift or return a whole history.
if not do_all_time:
if (zarr[i] > zobs):
continue
##
# Life if easy for constant SFR models
if not (self.src.pf['source_aging'] or self.src.pf['source_ssp']):
if not do_all_time:
Lhist = L_asympt * sfh[:,i]
break
raise NotImplemented('does this happne?')
Lhist[:,i] = L_asympt * sfh[:,i]
continue
# If we made it here, it's time to integrate over star formation
# at previous times. First, retrieve ages of stars formed in all
# past star forming episodes.
ages = tarr[i] - tarr[0:i+1]
# Note: this will be in order of *descending* age, i.e., the
# star formation episodes furthest in the past are first in the
# array.
# Recall also that `sfh` contains SFRs for all time, so any
# z < zobs will contain zeroes, hence all the 0:i+1 slicing below.
# Treat metallicity evolution? If so, need to grab luminosity as
# function of age and Z.
if self.pf['pop_enrichment']:
assert batch_mode
logA = np.log10(ages)
logZ = np.log10(Z[:,0:i+1])
L_per_msun = np.zeros_like(ages)
logL_at_wave = self.L_of_Z_t(wave)
L_per_msun = np.zeros_like(logZ)
for j, _Z_ in enumerate(range(logZ.shape[0])):
L_per_msun[j,:] = 10**logL_at_wave(logA, logZ[j,:],
grid=False)
# erg/s/Hz
if batch_mode:
Lall = L_per_msun[:,0:i+1] * sfh[:,0:i+1]
else:
Lall = L_per_msun[0:i+1] * sfh[0:i+1]
if oversample:
raise NotImplemented('help!')
else:
_dt = dt[0:i]
_ages = ages
else:
##
# If time resolution is >= 2 Myr, over-sample final interval.
if oversample and len(ages) > 1:
if batch_mode:
_ages, _SFR = self._oversample_sfh(ages, sfh[:,0:i+1], i)
else:
_ages, _SFR = self._oversample_sfh(ages, sfh[0:i+1], i)
_dt = np.abs(np.diff(_ages) * 1e6)
# `_ages` is in order of old to young.
# Now, compute luminosity at expanded ages.
L_per_msun = np.exp(_func(np.log(_ages)))
# Interpolate linearly at t < 1 Myr
L_per_msun[_ages < 1] = L_small_t(_ages[_ages < 1])
#L_per_msun[_ages < 10] = 0.
# erg/s/Hz/yr
if batch_mode:
Lall = L_per_msun * _SFR
else:
Lall = L_per_msun * _SFR
else:
L_per_msun = np.exp(_func(np.log(ages)))
#L_per_msun = np.exp(np.interp(np.log(ages),
# np.log(self.src.times), np.log(Loft),
# left=np.log(Loft[0]), right=np.log(Loft[-1])))
_dt = dt[0:i]
# Fix early time behavior
L_per_msun[ages < 1] = L_small_t(ages[ages < 1])
_ages = ages
# erg/s/Hz/yr
if batch_mode:
Lall = L_per_msun * sfh[:,0:i+1]
else:
Lall = L_per_msun * sfh[0:i+1]
# Correction for IMF sampling (can't use SPS).
#if self.pf['pop_sample_imf'] and np.any(bursty):
# life = self._stars.tab_life
# on = np.array([life > age for age in ages])
#
# il = np.argmin(np.abs(wave - self._stars.wavelengths))
#
# if self._stars.aging:
# raise NotImplemented('help')
# lum = self._stars.tab_Ls[:,il] * self._stars.dldn[il]
# else:
# lum = self._stars.tab_Ls[:,il] * self._stars.dldn[il]
#
# # Need luminosity in erg/s/Hz
# #print(lum)
#
# # 'imf' is (z or age, mass)
#
# integ = imf[bursty==1,:] * lum[None,:]
# Loft = np.sum(integ * on[bursty==1], axis=1)
#
# Lall[bursty==1] = Loft
# Apply local reddening
#tau_bc = self.pf['pop_tau_bc']
#if tau_bc > 0:
#
# corr = np.ones_like(_ages) * np.exp(-tau_bc)
# corr[_ages > self.pf['pop_age_bc']] = 1
#
# Lall *= corr
###
## Integrate over all times up to this tobs
if batch_mode:
# Should really just np.sum here...using trapz assumes that
# the SFH is a smooth function and not a series of constant
# SFRs. Doesn't really matter in practice, though.
if not do_all_time:
Lhist = np.trapz(Lall, dx=_dt, axis=1)
else:
Lhist[:,i] = np.trapz(Lall, dx=_dt, axis=1)
else:
if not do_all_time:
Lhist = np.trapz(Lall, dx=_dt)
else:
Lhist[i] = np.trapz(Lall, dx=_dt)
##
# In this case, we only need one iteration of this loop.
##
if not do_all_time:
break
##
# Redden spectra
##
if 'Sd' in hist:
# Redden away!
if np.any(hist['Sd'] > 0) and (band is None):
assert 'kappa' in extras
_kappa = self._cache_kappa(wave)
if _kappa is None:
kappa = extras['kappa'](wave=wave, Mh=Mh)
self._cache_kappa_[wave] = kappa
else:
kappa = _kappa
kslc = idnum if idnum is not None else Ellipsis
if idnum is not None:
Sd = hist['Sd'][kslc]
if type(hist['fcov']) in [int, float, np.float64]:
fcov = hist['fcov']
else:
fcov = hist['fcov'][kslc]
rand = hist['rand'][kslc]
else:
Sd = hist['Sd']
fcov = hist['fcov']
rand = hist['rand']
tau = kappa * Sd
clear = rand > fcov
block = ~clear
if idnum is not None:
Lout = Lhist * np.exp(-tau[izobs])
#if self.pf['pop_dust_holes'] == 'big':
# Lout = Lhist * clear[izobs] \
# + Lhist * np.exp(-tau[izobs]) * block[izobs]
#else:
# Lout = Lhist * (1. - fcov[izobs]) \
# + Lhist * fcov[izobs] * np.exp(-tau[izobs])
else:
Lout = Lhist * np.exp(-tau[:,izobs])
#if self.pf['pop_dust_holes'] == 'big':
# print(Lhist.shape, clear.shape, tau.shape, block.shape)
# Lout = Lhist * clear[:,izobs] \
# + Lhist * np.exp(-tau[:,izobs]) * block[:,izobs]
#else:
# Lout = Lhist * (1. - fcov[:,izobs]) \
# + Lhist * fcov[:,izobs] * np.exp(-tau[:,izobs])
else:
Lout = Lhist.copy()
else:
Lout = Lhist.copy()
#del Lhist, tau, Lall
#gc.collect()
##
# Sum luminosity of parent halos along merger tree
##
# Don't change shape, just zero-out luminosities of
# parent halos after they merge?
if hist is not None:
do_mergers = self.pf['pop_mergers'] and batch_mode
if 'children' in hist:
if (hist['children'] is not None) and do_mergers:
child_iz, child_iM = children.T
is_central = child_iM == -1
if np.all(is_central == 1):
pass
else:
print("Looping over {} halos...".format(sfh.shape[0]))
pb = ProgressBar(sfh.shape[0], use=self.pf['progress_bar'])
pb.start()
# Loop over all 'branches'
for i in range(SFR.shape[0]):
# This means the i'th halo is alive and well at the
# final redshift, i.e., it's a central
if is_central[i]:
continue
pb.update(i)
# At this point, need to figure out which child halos
# to dump mass and SFH into...
# Be careful with redshift array.
# We're now working in ascending time, reverse redshift,
# so we need to correct the child iz values. We've also
# chopped off elements at z < zobs.
#iz = Nz0 - child_iz[i]
# This `iz` should not be negative despite us having
# chopped up the redshift array since getting to this
# point in the loop is predicated on being a parent of
# another halo, i.e., not surviving beyond this redshift.
# Lout is just 1-D at this point, i.e., just luminosity
# *now*.
# Add luminosity to child halo. Zero out luminosity of
# parent to avoid double counting. Note that nh will
# also have been zeroed out but we're just being careful.
Lout[child_iM[i]] += 1 * Lout[i]
Lout[i] = 0.0
pb.finish()
##
# Will be unhashable types so just save to a unique identifier
##
if use_cache:
self._cache_lum_[self._cache_lum_ctr] = kw, Lout
self._cache_lum_ctr_ += 1
# Get outta here.
return Lout
| 37.665966 | 119 | 0.445981 |
d31b2a6933989c70fda77231552e1edb2868a85c | 12,212 | py | Python | payments/mtn_old.py | naamara/blink | 326c035b2f0ef0feae4cd7aa2d4e73fa4a40171a | [
"Unlicense",
"MIT"
] | null | null | null | payments/mtn_old.py | naamara/blink | 326c035b2f0ef0feae4cd7aa2d4e73fa4a40171a | [
"Unlicense",
"MIT"
] | 10 | 2019-12-26T17:31:31.000Z | 2022-03-21T22:17:33.000Z | payments/mtn_old.py | naamara/blink | 326c035b2f0ef0feae4cd7aa2d4e73fa4a40171a | [
"Unlicense",
"MIT"
] | null | null | null | '''mtn sdb'''
import remit.settings as settings
from datetime import datetime
import requests, base64
from BeautifulSoup import BeautifulSoup
from remit.tasks import send_sms, send_email
from remit.utils import debug, mailer
from xml.sax.saxutils import escape
from xml.sax.saxutils import quoteattr
import json
import xmltodict
import suds
import datetime
import time
#import logging
#logging.getLogger('suds.client').setLevel(logging.CRITICAL)
from suds.sax.element import Element
import logging
#logging.basicConfig(level=logging.INFO)
#logging.getLogger('suds.transport.http').setLevel(logging.DEBUG)
logging.basicConfig(level=logging.INFO)
logging.getLogger('suds.client').setLevel(logging.DEBUG)
logging.getLogger('suds.transport').setLevel(logging.DEBUG)
from suds.sudsobject import asdict
#logging.getLogger('suds.xsd.schema').setLevel(logging.DEBUG)
#logging.getLogger('suds.wsdl').setLevel(logging.DEBUG)
class Mtn():
"""mtn mobile money functions"""
def __init__(self):
self.base_path = "file:%spayments/wsdl/" % settings.BASE_DIR
self.OrderDateTime = unicode(datetime.datetime.now().isoformat()).partition('.')[0]+"Z"
self.OpCoID = 'UG'
self.SenderID = 'MTN'
self.VendorCode='REMITUG'
self.TypeCode='GSM'
self.OrderDateTimestamp = int(time.time())
def get_client(self, filename, url, headers=False):
base_url = "%s%s" % (
self.base_path,
filename
)
import base64
import hashlib
Nonce = "gagdasgsagasdgsadgsadsda"
Created = self.OrderDateTime
digest = "%s%s%s"%(Nonce,Created,settings.MTN_SDP_PASS)
digest = hashlib.sha1(digest).digest()
PasswordDigest = base64.encodestring(digest).replace('\n', '')
if not headers:
headers = {
#'Host': '172.25.48.43:8312',
#'X-RequestHeader': 'FA="256783370747"',
'Accept-Encoding': 'gzip,deflate',
'Accept': 'application/json',
'Content-Type: application/json'
'Authorization': 'WSSE realm="SDP", profile="UsernameToken"',
'X-WSSE': 'UsernameToken Username="%s", PasswordDigest="%s",Nonce="%s",Created="%s"' % (
#settings.MTN_SDP_USERNAME,
settings.MTN_SDP_SERVICEID,
PasswordDigest,
Nonce,
Created
),
}
from suds.xsd.doctor import Import, ImportDoctor
# Fix missing types with ImportDoctor
schema_url = 'http://www.type-applications.com/character_set/'
schema_import = Import(schema_url)
schema_doctor = ImportDoctor(schema_import)
client = suds.client.Client(base_url, headers=headers, doctor=schema_doctor)
list_of_methods = [method for method in client.wsdl.services[0].ports[0].methods]
print "Available methods %s" % list_of_methods
#client.options.location = url
client.options.location = url
print client
return client
def DepositMoney(self, amount, number, transactionid, ref_text=""):
'''
deposit Mobile Money into users phonenumber
'''
number = self.clean_number(number)
url = 'http://172.25.48.43:8310/ThirdPartyServiceUMMImpl/UMMServiceService/DepositMobileMoney/v17'
headers = {
'spld': '%s' % settings.MTN_SDP_SERVICEID,
'serviceId': '201',
'Content-Type': 'text/xml; charset=utf-8',
}
import hashlib
m = hashlib.md5()
digest = "%s%s%s" % (settings.MTN_SDP_SERVICEID,settings.MTN_SDP_PASS,self.OrderDateTimestamp)
m.update(digest)
PasswordDigest = m.hexdigest()#.replace('\n', '')
from suds.sax.attribute import Attribute
client = self.get_client("DepositMobileMoney.wsdl", url, headers=headers)
code = Element('spId').setText('%s' % settings.MTN_SDP_SERVICEID)
pwd = Element('spPassword').setText('%s'%PasswordDigest)
tsp = Element('timeStamp').setText('%s'%self.OrderDateTimestamp)
reqsoapheader = Element('RequestSOAPHeader')
reqsoapheader.insert(code)
reqsoapheader.insert(pwd)
reqsoapheader.insert(tsp)
reqsoap_attribute = Attribute('xmlns', "http://www.huawei.com.cn/schema/common/v2_1")
reqsoapheader.append(reqsoap_attribute)
client.set_options(soapheaders=reqsoapheader)
#CommonComponents = {}
CommonComponents = [
{
"name": "ProcessingNumber",
"value": transactionid
},
{
"name": "serviceId",
#"value": 'WEETULI.sp1'#settings.MTN_SDP_USERNAME
#"value": 'remitug.sp1'
'value': '%s' % settings.MTN_SDP_USERNAME
},
{
"name": "SenderID",
"value": 'MOM'
},
{
"name": "PrefLang",
"value": 121212121
},
{
"name": "OpCoID",
"value": '25601'#self.OpCoID
},
{
"name": "CurrCode",
"value": 'UGX'
},
{
"name": "MSISDNNum",
'value': '%s' % number
},
{
"name": "Amount",
"value": amount
},
{
"name": "Narration",
"value": "sdf"
},
{
"name": "OrderDateTime",
"value": self.OrderDateTimestamp,
},
]
response = client.service.DepositMobileMoney(
'201',#settings.MTN_SDP_SERVICEID,
CommonComponents
)
data = {'status': '', 'statuscode': '', 'response': response}
try:
status = response[0].value
print status
if status == '01':
data = {'status': 'Ok', 'statuscode': '0', 'response': response}
try:
momid = response[2].value
data['transaction_response_id'] = momid
print momid
except Exception, e:
print e
return data
elif status == '108':
'''Deposit Transfer not processed: Insufficient funds'''
self.stone_fucked_up()
#for key,value in response:
# print "key: %s , value: %s" % (key,value)
#return self.suds_to_json(response)
except Exception, e:
print e
return data
#print query_request
def stone_fucked_up(self):
print "we have Insufficient funds"
def SendNotification(self):
'''
deposit Mobile Money into users phonenumber
'''
url = 'http://172.25.48.43:8310/SendNotification'
client = self.get_client("SendNotification.wsdl", url)
query_request = client.factory.create('SendNotification')
print query_request
try:
response = client.service.SendNotification(
'1111','adsadasdsad','adasdasd','asdasdasd','asdsadasdasd','asdasdasdsad','wwwwww')
#response.VendorCode = '333333'
print str(response)
except Exception, e:
print "Remit Error %s" % e
pass
def clean_number(self, number):
number = "%s" % number
if number[0] == '0':
return number[1:]
return number
def kyc_sms(self, number):
number = "256%s" % self.clean_number(number)
sms = "Y'ello. Please visit an MTN Service Centre with a valid ID to complete your SIM card validation immediately to be able to receive money from MTN International Remittance."
if settings.SEND_KYC_SMS:
return send_sms(number, {}, {}, sms)
return True
def momo_sms(self, number):
number = "256%s" % self.clean_number(number)
sms = "Y'ello. Please visit an MTN Service Centre with a valid ID to complete your mobile money registration immediately to be able to receive money from MTN International Remittance."
if settings.SEND_KYC_SMS:
return send_sms(number, {}, {}, sms)
return True
def kyc_email(self, number, email, request, transaction):
# send email to sender
template = settings.EMAIL_TEMPLATE_DIR + 'unregistered_recipient.html'
c = {'number':number, 'transaction':transaction}
return mailer(request, 'Unregistered Recipient', template, c, email)
def CheckNumber(self, number="789999550", OperationType="ProfileDetails"):
number = self.clean_number(number)
import base64
import hashlib
result = 'Failed'
Nonce = "%s" % self.OrderDateTimestamp
Created = self.OrderDateTime
digest = "%s%s%s" % (Nonce, Created, settings.MTN_SDP_PASS)
digest = hashlib.sha1(digest).digest()
PasswordDigest = base64.encodestring(digest).replace('\n', '')
data = {
"ProcessCustomerRequest": {
"TypeCode": "GSM",
"OperationType": OperationType,
"VendorCode": "REMITUG",
"CommonComponents": {
"MSISDNNum": "%s" % number,
"ProcessingNumber": "1230909",
"OrderDateTime": "%s" % self.OrderDateTime,
"OpCoID": "UG",
"SenderID": "SDP"
},
"SpecificationGroup": [{
"Narration": "CustomerDetails",
"Specification": [{
"Name": "LevelCode",
"Value": "ServiceLevel"
},
{
"Name": "ServiceID",
"Value": "%s" % number,
}]
}]
}
}
headers = {
'Host': 'http://172.25.48.43:8312',
'X-RequestHeader': 'FA="%s"' % number,
'Accept-Encoding': 'gzip,deflate',
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'WSSE realm="SDP", profile="UsernameToken"',
'X-WSSE': 'UsernameToken Username="%s", PasswordDigest="%s",Nonce="%s",Created="%s"' % (
settings.MTN_SDP_SERVICEID,
PasswordDigest,
Nonce,
Created
),
}
import requests
try:
url = 'http://172.25.48.43:8312/1/generic/processcustomer'
r = requests.post(url, json=data, headers=headers)
data = json.loads(r.text)
print data
if 'ProcessCustomerRequestResponse' in data:
response = data['ProcessCustomerRequestResponse']['SpecificationGroup']
response = response[0]['Specification']
registration_status = response[1]['Value']
result = registration_status
except Exception, e:
print e
pass
print result
return result
def validateAcountHolder(self, number="256789945550"):
import base64
import hashlib
import json
import xmltodict
if not number[:3] == '256':
number = "256%s" % self.clean_number(number)
Nonce = "gdhdgdhdgdhdhdgdh"
Created = self.OrderDateTime
digest = "%s%s%s"%(Nonce,Created,settings.MTN_SDP_PASS)
digest = hashlib.sha1(digest).digest()
PasswordDigest = base64.encodestring(digest).replace('\n', '')
headers = {
'X-RequestHeader': 'request ServiceId=, TransId="1430215126132",FA="%s"' % number,
'Msisdn': '%s' % number,
'Content-Type': 'text/xml; charset=utf-8',
'Authorization': 'WSSE realm="SDP", profile="UsernameToken"',
'X-WSSE': 'UsernameToken Username="%s", PasswordDigest="%s",Nonce="%s",Created="%s"' % (
settings.MTN_SDP_SERVICEID,
PasswordDigest,
Nonce,
Created
),
}
response = {'valid': False}
xml = '<?xml version="1.0" encoding="utf-8"?><validateaccountholderrequest><accountholderid>ID:%s/MSISDN</accountholderid></validateaccountholderrequest>' % number
url = 'http://172.25.48.43:8323/mom/mt/validateaccountholder'
req = requests.Request('POST',url,headers=headers,data=xml)
prepared = req.prepare()
self.pretty_print_POST(prepared)
s = requests.Session()
r = s.send(prepared)
print r.text
try:
response = json.dumps(xmltodict.parse('%s' % r.text))
response = json.loads(response)
response = response['validateaccountholderresponse']
except Exception, e:
print e
response['error'] = True
pass
return response
def pretty_print_POST(self, req):
"""
At this point it is completely built and ready
to be fired; it is "prepared".
However pay attention at the formatting used in
this function because it is programmed to be pretty
printed and may differ from the actual request.
"""
print('{}\n{}\n{}\n\n{}'.format(
'-----------START-----------',
req.method + ' ' + req.url,
'\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),
req.body,
))
def recursive_asdict(self, d):
"""Convert Suds object into serializable format."""
out = {}
for k, v in asdict(d).iteritems():
if hasattr(v, '__keylist__'):
out[k] = recursive_asdict(v)
elif isinstance(v, list):
out[k] = []
for item in v:
if hasattr(item, '__keylist__'):
out[k].append(recursive_asdict(item))
else:
out[k].append(item)
else:
out[k] = v
return out
def suds_to_json(self, data):
return json.dumps(self.recursive_asdict(data))
def kyc_check(self, number):
is_kyc = False
try:
response = self.CheckNumber(number)
if response == 'RegisteredComplete':
is_kyc = True
except Exception, e:
print e
pass
return is_kyc
def momo_check(self, number):
is_momo = False
response = {}
try:
response = self.validateAcountHolder(number)
if response['valid'] == 'true':
is_momo = True
except Exception, e:
print e
pass
return is_momo, response
def test_api_call():
mtn = Mtn()
#mtn.DepositMoney()
#mtn.CheckNumber()
mtn.validateAcountHolder()
| 29.355769 | 186 | 0.676384 |
688d526bce5f41220174e84f529dd979bf50ddb6 | 7,466 | py | Python | pysnmp-with-texts/JUNIPER-JS-IPSEC-VPN-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/JUNIPER-JS-IPSEC-VPN-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/JUNIPER-JS-IPSEC-VPN-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module JUNIPER-JS-IPSEC-VPN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/JUNIPER-JS-IPSEC-VPN-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:59:36 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint")
jnxIpSecTunnelMonEntry, = mibBuilder.importSymbols("JUNIPER-IPSEC-FLOW-MON-MIB", "jnxIpSecTunnelMonEntry")
jnxJsIPSecVpn, = mibBuilder.importSymbols("JUNIPER-JS-SMI", "jnxJsIPSecVpn")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
TimeTicks, IpAddress, Counter64, Gauge32, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, ObjectIdentity, NotificationType, iso, Unsigned32, MibIdentifier, ModuleIdentity, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "IpAddress", "Counter64", "Gauge32", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "ObjectIdentity", "NotificationType", "iso", "Unsigned32", "MibIdentifier", "ModuleIdentity", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
jnxJsIpSecVpnMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 5, 1))
jnxJsIpSecVpnMib.setRevisions(('2007-04-27 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: jnxJsIpSecVpnMib.setRevisionsDescriptions(('Create the jnxJsIpSecTunnelTable as an augmented table to the jnxIpSecTunnelMonTable in JUNIPER-IPSEC-FLOW-MON-MIB.',))
if mibBuilder.loadTexts: jnxJsIpSecVpnMib.setLastUpdated('200705112153Z')
if mibBuilder.loadTexts: jnxJsIpSecVpnMib.setOrganization('Juniper Networks, Inc.')
if mibBuilder.loadTexts: jnxJsIpSecVpnMib.setContactInfo('Juniper Technical Assistance Center Juniper Networks, Inc. 1194 N. Mathilda Avenue Sunnyvale, CA 94089 E-mail: support@juniper.net')
if mibBuilder.loadTexts: jnxJsIpSecVpnMib.setDescription("This module defines the object used to monitor the entries pertaining to IPSec objects and the management of the IPSEC VPN functionalities for Juniper security product lines. This mib module extend Juniper's common IPSEC flow monitoring MIB, building on the existing common infrastruature, the security implementation integrates the value-added features for the security products")
jnxJsIpSecVpnNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 5, 1, 0))
jnxJsIpSecVpnPhaseOne = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 5, 1, 1))
jnxJsIpSecVpnPhaseTwo = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 5, 1, 2))
class JnxJsIpSecVpnType(TextualConvention, Integer32):
description = "The type of the remote peer gateway (endpoint). It can be one of the following two types: - policyBased : tunnels requires a policy with action 'tunnel' to trigger IPSEC VPN. The device receives traffic and matches it with policy that has action 'tunnel', it performs the encryption/decryption and authentication options negotiated for this VPN phase 2 negotiation. - routeBased : requires a tunnel interface a route directing traffic to protected networks to exit the system using that tunnel interface. The tunnel interface is bound to a Phase 2 VPN configuration that specifies all the tunnel parameters. "
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("policyBased", 1), ("routeBased", 2))
jnxJsIpSecTunnelTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 5, 1, 2, 1), )
if mibBuilder.loadTexts: jnxJsIpSecTunnelTable.setStatus('current')
if mibBuilder.loadTexts: jnxJsIpSecTunnelTable.setDescription('The IPsec Phase-2 Tunnel Table. There is one entry in this table for each active IPsec Phase-2 Tunnel. If the tunnel is terminated, then the entry is no longer available after the table has been refreshed. ')
jnxJsIpSecTunnelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 5, 1, 2, 1, 1), )
jnxIpSecTunnelMonEntry.registerAugmentions(("JUNIPER-JS-IPSEC-VPN-MIB", "jnxJsIpSecTunnelEntry"))
jnxJsIpSecTunnelEntry.setIndexNames(*jnxIpSecTunnelMonEntry.getIndexNames())
if mibBuilder.loadTexts: jnxJsIpSecTunnelEntry.setStatus('current')
if mibBuilder.loadTexts: jnxJsIpSecTunnelEntry.setDescription('Each entry contains the attributes associated with an active IPsec Phase-2 Tunnel.')
jnxJsIpSecTunPolicyName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 5, 1, 2, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsIpSecTunPolicyName.setStatus('current')
if mibBuilder.loadTexts: jnxJsIpSecTunPolicyName.setDescription('The policy name assoicated with this tunnel if the this IPSEC VPN is policy based. Otherwise, this attribute is not applicable.')
jnxJsIpSecVpnTunType = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 5, 1, 2, 1, 1, 2), JnxJsIpSecVpnType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsIpSecVpnTunType.setStatus('current')
if mibBuilder.loadTexts: jnxJsIpSecVpnTunType.setDescription('This attribute indicates the IPSEC VPN tunnel is policy based or route based.')
jnxJsIpSecTunCfgMonState = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 5, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsIpSecTunCfgMonState.setStatus('current')
if mibBuilder.loadTexts: jnxJsIpSecTunCfgMonState.setDescription('The user configuration states whether to monitor the IPSec tunnel to be alive or not. ')
jnxJsIpSecTunState = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 5, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("vpnMonitoringDisabled", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsIpSecTunState.setStatus('current')
if mibBuilder.loadTexts: jnxJsIpSecTunState.setDescription('This attribute indicates whether the IPSec Tunnel is up or down, determined by icmp ping if the jnxJsIpSecTunCfgMonState is enable. Down: VPN monitor detects the tunnel is down Up: VPN monitor detects the tunnel is up. vpnMonitoringDisabled: user has disabled VPN tunnel monitoring.')
mibBuilder.exportSymbols("JUNIPER-JS-IPSEC-VPN-MIB", jnxJsIpSecVpnPhaseOne=jnxJsIpSecVpnPhaseOne, jnxJsIpSecTunnelEntry=jnxJsIpSecTunnelEntry, jnxJsIpSecTunCfgMonState=jnxJsIpSecTunCfgMonState, jnxJsIpSecTunState=jnxJsIpSecTunState, JnxJsIpSecVpnType=JnxJsIpSecVpnType, jnxJsIpSecTunnelTable=jnxJsIpSecTunnelTable, jnxJsIpSecVpnMib=jnxJsIpSecVpnMib, jnxJsIpSecVpnPhaseTwo=jnxJsIpSecVpnPhaseTwo, PYSNMP_MODULE_ID=jnxJsIpSecVpnMib, jnxJsIpSecTunPolicyName=jnxJsIpSecTunPolicyName, jnxJsIpSecVpnTunType=jnxJsIpSecVpnTunType, jnxJsIpSecVpnNotifications=jnxJsIpSecVpnNotifications)
| 135.745455 | 628 | 0.79199 |
7103a533118d685e45bf20536aaa6183d73d3d11 | 10,719 | py | Python | src/feature_engineering/temporal_features.py | PacktPublishing/Modern-Time-Series-Forecasting-with-Python- | 391ae9c8c8c5b2fba20a8ada8e48e68eb46f118a | [
"MIT"
] | 10 | 2021-08-09T11:06:28.000Z | 2022-03-07T14:47:36.000Z | src/feature_engineering/temporal_features.py | PacktPublishing/Modern-Time-Series-Forecasting-with-Python- | 391ae9c8c8c5b2fba20a8ada8e48e68eb46f118a | [
"MIT"
] | null | null | null | src/feature_engineering/temporal_features.py | PacktPublishing/Modern-Time-Series-Forecasting-with-Python- | 391ae9c8c8c5b2fba20a8ada8e48e68eb46f118a | [
"MIT"
] | null | null | null | import re
import warnings
from typing import List, Optional, Tuple
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
# adapted from gluonts
def time_features_from_frequency_str(freq_str: str) -> List[str]:
"""
Returns a list of time features that will be appropriate for the given frequency string.
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
"""
features_by_offsets = {
offsets.YearBegin: [],
offsets.YearEnd: [],
offsets.MonthBegin: [
"Month",
"Quarter",
"Is_quarter_end",
"Is_quarter_start",
"Is_year_end",
"Is_year_start",
],
offsets.MonthEnd: [
"Month",
"Quarter",
"Is_quarter_end",
"Is_quarter_start",
"Is_year_end",
"Is_year_start",
],
offsets.Week: [
"Month",
"Quarter",
"Is_quarter_end",
"Is_quarter_start",
"Is_year_end",
"Is_year_start",
"Is_month_start",
"Week",
],
offsets.Day: [
"Month",
"Quarter",
"Is_quarter_end",
"Is_quarter_start",
"Is_year_end",
"Is_year_start",
"Is_month_start",
"Week" "Day",
"Dayofweek",
"Dayofyear",
],
offsets.BusinessDay: [
"Month",
"Quarter",
"Is_quarter_end",
"Is_quarter_start",
"Is_year_end",
"Is_year_start",
"Is_month_start",
"Week" "Day",
"Dayofweek",
"Dayofyear",
],
offsets.Hour: [
"Month",
"Quarter",
"Is_quarter_end",
"Is_quarter_start",
"Is_year_end",
"Is_year_start",
"Is_month_start",
"Week" "Day",
"Dayofweek",
"Dayofyear",
"Hour",
],
offsets.Minute: [
"Month",
"Quarter",
"Is_quarter_end",
"Is_quarter_start",
"Is_year_end",
"Is_year_start",
"Is_month_start",
"Week" "Day",
"Dayofweek",
"Dayofyear",
"Hour",
"Minute",
],
}
offset = to_offset(freq_str)
for offset_type, feature in features_by_offsets.items():
if isinstance(offset, offset_type):
return feature
supported_freq_msg = f"""
Unsupported frequency {freq_str}
The following frequencies are supported:
Y, YS - yearly
alias: A
M, MS - monthly
W - weekly
D - daily
B - business days
H - hourly
T - minutely
alias: min
"""
raise RuntimeError(supported_freq_msg)
# adapted from fastai
@classmethod
def make_date(df: pd.DataFrame, date_field: str):
"Make sure `df[date_field]` is of the right date type."
field_dtype = df[date_field].dtype
if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
field_dtype = np.datetime64
if not np.issubdtype(field_dtype, np.datetime64):
df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True)
return df
# adapted from fastai
def add_temporal_features(
df: pd.DataFrame,
field_name: str,
frequency: str,
add_elapsed: bool = True,
prefix: str = None,
drop: bool = True,
use_32_bit: bool = False,
) -> Tuple[pd.DataFrame, List]:
"""Adds columns relevant to a date in the column `field_name` of `df`.
Args:
df (pd.DataFrame): Dataframe to which the features need to be added
field_name (str): The date column which should be encoded using temporal features
frequency (str): The frequency of the date column so that only relevant features are added.
If frequency is "Weekly", then temporal features like hour, minutes, etc. doesn't make sense.
add_elapsed (bool, optional): Add time elapsed as a monotonically increasing function. Defaults to True.
prefix (str, optional): Prefix to the newly created columns. If left None, will use the field name. Defaults to None.
drop (bool, optional): Flag to drop the data column after feature creation. Defaults to True.
use_32_bit (bool, optional): Flag to use float32 or int32 to reduce memory. Defaults to False.
Returns:
Tuple[pd.DataFrame, List]: Returns a tuple of the new dataframe and a list of features which were added
"""
field = df[field_name]
prefix = (re.sub("[Dd]ate$", "", field_name) if prefix is None else prefix) + "_"
attr = time_features_from_frequency_str(frequency)
_32_bit_dtype = "int32"
added_features = []
for n in attr:
if n == "Week":
continue
df[prefix + n] = getattr(field.dt, n.lower()).astype(_32_bit_dtype) if use_32_bit else getattr(field.dt, n.lower())
added_features.append(prefix + n)
# Pandas removed `dt.week` in v1.1.10
if "Week" in attr:
week = (
field.dt.isocalendar().week
if hasattr(field.dt, "isocalendar")
else field.dt.week
)
df.insert(3, prefix + "Week", week.astype(_32_bit_dtype) if use_32_bit else week)
added_features.append(prefix + "Week")
if add_elapsed:
mask = ~field.isna()
df[prefix + "Elapsed"] = np.where(
mask, field.values.astype(np.int64) // 10 ** 9, None
)
if use_32_bit:
if df[prefix + "Elapsed"].isnull().sum()==0:
df[prefix + "Elapsed"] = df[prefix + "Elapsed"].astype("int32")
else:
df[prefix + "Elapsed"] = df[prefix + "Elapsed"].astype("float32")
added_features.append(prefix + "Elapsed")
if drop:
df.drop(field_name, axis=1, inplace=True)
return df, added_features
def _calculate_fourier_terms(
seasonal_cycle: np.ndarray, max_cycle: int, n_fourier_terms: int
):
"""Calculates Fourier Terms given the seasonal cycle and max_cycle"""
sin_X = np.empty((len(seasonal_cycle), n_fourier_terms), dtype="float64")
cos_X = np.empty((len(seasonal_cycle), n_fourier_terms), dtype="float64")
for i in range(1, n_fourier_terms + 1):
sin_X[:, i - 1] = np.sin((2 * np.pi * seasonal_cycle * i) / max_cycle)
cos_X[:, i - 1] = np.cos((2 * np.pi * seasonal_cycle * i) / max_cycle)
return np.hstack([sin_X, cos_X])
def add_fourier_features(
df: pd.DataFrame,
column_to_encode: str,
max_value: Optional[int] = None,
n_fourier_terms: int = 1,
use_32_bit: bool = False,
) -> Tuple[pd.DataFrame, List]:
"""Adds Fourier Terms for the specified seasonal cycle column, like month, week, hour, etc.
Args:
df (pd.DataFrame): The dataframe which has the seasonal cyycles which has to be encoded
column_to_encode (str): The column name which has the seasonal cycle
max_value (int): The maximum value the seasonal cycle can attain. for eg. for month, max_value is 12.
If not given, it will be inferred from the data, but if the data does not have at least a
single full cycle, the inferred max value will not be appropriate. Defaults to None
n_fourier_terms (int): Number of fourier terms to be added. Defaults to 1
use_32_bit (bool, optional): Flag to use float32 or int32 to reduce memory. Defaults to False.
Raises:
warnings.warn: Raises a warning if max_value is None
Returns:
[Tuple[pd.DataFrame, List]]: Returns a tuple of the new dataframe and a list of features which were added
"""
assert (
column_to_encode in df.columns
), "`column_to_encode` should be a valid column name in the dataframe"
assert is_numeric_dtype(
df[column_to_encode]
), "`column_to_encode` should have numeric values."
if max_value is None:
max_value = df[column_to_encode].max()
raise warnings.warn(
"Inferring max cycle as {} from the data. This may not be accuracte if data is less than a single seasonal cycle."
)
fourier_features = _calculate_fourier_terms(
df[column_to_encode].astype(int).values,
max_cycle=max_value,
n_fourier_terms=n_fourier_terms,
)
feature_names = [
f"{column_to_encode}_sin_{i}" for i in range(1, n_fourier_terms + 1)
] + [f"{column_to_encode}_cos_{i}" for i in range(1, n_fourier_terms + 1)]
df[feature_names] = fourier_features
if use_32_bit:
df[feature_names] = df[feature_names].astype("float32")
return df, feature_names
def bulk_add_fourier_features(
df: pd.DataFrame,
columns_to_encode: List[str],
max_values: List[int],
n_fourier_terms: int = 1,
use_32_bit: bool = False,
) -> Tuple[pd.DataFrame, List]:
"""Adds Fourier Terms for all the specified seasonal cycle columns, like month, week, hour, etc.
Args:
df (pd.DataFrame): The dataframe which has the seasonal cyycles which has to be encoded
columns_to_encode (List[str]): The column names which has the seasonal cycle
max_values (List[int]): The list of maximum values the seasonal cycles can attain in the
same order as the columns to encode. for eg. for month, max_value is 12.
If not given, it will be inferred from the data, but if the data does not have at least a
single full cycle, the inferred max value will not be appropriate. Defaults to None
n_fourier_terms (int): Number of fourier terms to be added. Defaults to 1
use_32_bit (bool, optional): Flag to use float32 or int32 to reduce memory. Defaults to False.
Raises:
warnings.warn: Raises a warning if max_value is None
Returns:
[Tuple[pd.DataFrame, List]]: Returns a tuple of the new dataframe and a list of features which were added
"""
assert len(columns_to_encode) == len(
max_values
), "`columns_to_encode` and `max_values` should be of same length."
added_features = []
for column_to_encode, max_value in zip(columns_to_encode, max_values):
df, features = add_fourier_features(
df, column_to_encode, max_value, n_fourier_terms=n_fourier_terms, use_32_bit=use_32_bit
)
added_features+=features
return df, added_features
| 36.090909 | 126 | 0.613397 |
295a6963171895c3ea340fe04913499c33c32bc3 | 8,939 | py | Python | torchmin/trustregion/base.py | macio232/pytorch-minimize | 5724f8540b14c4a4427ea978075bfe5e78838602 | [
"MIT"
] | null | null | null | torchmin/trustregion/base.py | macio232/pytorch-minimize | 5724f8540b14c4a4427ea978075bfe5e78838602 | [
"MIT"
] | null | null | null | torchmin/trustregion/base.py | macio232/pytorch-minimize | 5724f8540b14c4a4427ea978075bfe5e78838602 | [
"MIT"
] | null | null | null | """
Trust-region optimization.
Code ported from SciPy to PyTorch
Copyright (c) 2001-2002 Enthought, Inc. 2003-2019, SciPy Developers.
All rights reserved.
"""
from abc import ABC, abstractmethod
import torch
from torch.linalg import norm
from scipy.optimize.optimize import OptimizeResult, _status_message
from ..function import ScalarFunction
from ..optim.minimizer import Minimizer
status_messages = (
_status_message['success'],
_status_message['maxiter'],
'A bad approximation caused failure to predict improvement.',
'A linalg error occurred, such as a non-psd Hessian.',
)
class BaseQuadraticSubproblem(ABC):
"""
Base/abstract class defining the quadratic model for trust-region
minimization. Child classes must implement the ``solve`` method and
``hess_prod`` property.
"""
def __init__(self, x, closure):
# evaluate closure
f, g, hessp, hess = closure(x)
self._x = x
self._f = f
self._g = g
self._h = hessp if self.hess_prod else hess
self._g_mag = None
self._cauchy_point = None
self._newton_point = None
# buffer for boundaries computation
self._tab = x.new_empty(2)
def __call__(self, p):
return self.fun + self.jac.dot(p) + 0.5 * p.dot(self.hessp(p))
@property
def fun(self):
"""Value of objective function at current iteration."""
return self._f
@property
def jac(self):
"""Value of Jacobian of objective function at current iteration."""
return self._g
@property
def hess(self):
"""Value of Hessian of objective function at current iteration."""
if self.hess_prod:
raise Exception('class {} does not have '
'method `hess`'.format(type(self)))
return self._h
def hessp(self, p):
"""Value of Hessian-vector product at current iteration for a
particular vector ``p``.
Note: ``self._h`` is either a Tensor or a LinearOperator. In either
case, it has a method ``mv()``.
"""
return self._h.mv(p)
@property
def jac_mag(self):
"""Magnitude of jacobian of objective function at current iteration."""
if self._g_mag is None:
self._g_mag = norm(self.jac)
return self._g_mag
def get_boundaries_intersections(self, z, d, trust_radius):
"""
Solve the scalar quadratic equation ||z + t d|| == trust_radius.
This is like a line-sphere intersection.
Return the two values of t, sorted from low to high.
"""
a = d.dot(d)
b = 2 * z.dot(d)
c = z.dot(z) - trust_radius**2
sqrt_discriminant = torch.sqrt(b*b - 4*a*c)
# The following calculation is mathematically equivalent to:
# ta = (-b - sqrt_discriminant) / (2*a)
# tb = (-b + sqrt_discriminant) / (2*a)
# but produces smaller round off errors.
aux = b + torch.copysign(sqrt_discriminant, b)
self._tab[0] = -aux / (2*a)
self._tab[1] = -2*c / aux
return self._tab.sort()[0]
@abstractmethod
def solve(self, trust_radius):
pass
@property
@abstractmethod
def hess_prod(self):
"""A property that must be set by every sub-class indicating whether
to use full hessian matrix or hessian-vector products."""
pass
def _minimize_trust_region(fun, x0, subproblem=None, initial_trust_radius=1.,
max_trust_radius=1000., eta=0.15, gtol=1e-4,
max_iter=None, disp=False, return_all=False,
callback=None):
"""
Minimization of scalar function of one or more variables using a
trust-region algorithm.
Options for the trust-region algorithm are:
initial_trust_radius : float
Initial trust radius.
max_trust_radius : float
Never propose steps that are longer than this value.
eta : float
Trust region related acceptance stringency for proposed steps.
gtol : float
Gradient norm must be less than `gtol`
before successful termination.
max_iter : int
Maximum number of iterations to perform.
disp : bool
If True, print convergence message.
This function is called by :func:`torchmin.minimize`.
It is not supposed to be called directly.
"""
if subproblem is None:
raise ValueError('A subproblem solving strategy is required for '
'trust-region methods')
if not (0 <= eta < 0.25):
raise Exception('invalid acceptance stringency')
if max_trust_radius <= 0:
raise Exception('the max trust radius must be positive')
if initial_trust_radius <= 0:
raise ValueError('the initial trust radius must be positive')
if initial_trust_radius >= max_trust_radius:
raise ValueError('the initial trust radius must be less than the '
'max trust radius')
# Input check/pre-process
disp = int(disp)
if max_iter is None:
max_iter = x0.numel() * 200
# Construct scalar objective function
hessp = subproblem.hess_prod
sf = ScalarFunction(fun, x0.shape, hessp=hessp, hess=not hessp)
closure = sf.closure
# init the search status
warnflag = 1 # maximum iterations flag
k = 0
# initialize the search
trust_radius = torch.as_tensor(initial_trust_radius,
dtype=x0.dtype, device=x0.device)
x = x0.detach().flatten()
if return_all:
allvecs = [x]
# initial subproblem
m = subproblem(x, closure)
# search for the function min
# do not even start if the gradient is small enough
while k < max_iter:
# Solve the sub-problem.
# This gives us the proposed step relative to the current position
# and it tells us whether the proposed step
# has reached the trust region boundary or not.
try:
p, hits_boundary = m.solve(trust_radius)
except RuntimeError as exc:
# TODO: catch general linalg error like np.linalg.linalg.LinAlgError
if 'singular' in exc.args[0]:
warnflag = 3
break
else:
raise
# calculate the predicted value at the proposed point
predicted_value = m(p)
# define the local approximation at the proposed point
x_proposed = x + p
m_proposed = subproblem(x_proposed, closure)
# evaluate the ratio defined in equation (4.4)
actual_reduction = m.fun - m_proposed.fun
predicted_reduction = m.fun - predicted_value
if predicted_reduction <= 0:
warnflag = 2
break
rho = actual_reduction / predicted_reduction
# update the trust radius according to the actual/predicted ratio
if rho < 0.25:
trust_radius = trust_radius.mul(0.25)
elif rho > 0.75 and hits_boundary:
trust_radius = torch.clamp(2*trust_radius, max=max_trust_radius)
# if the ratio is high enough then accept the proposed step
if rho > eta:
x = x_proposed
m = m_proposed
elif isinstance(sf, Minimizer):
# if we are using a Minimizer as our ScalarFunction then we
# need to re-compute the previous state because it was
# overwritten during the call `subproblem(x_proposed, closure)`
m = subproblem(x, closure)
# append the best guess, call back, increment the iteration count
if return_all:
allvecs.append(x.clone())
if callback is not None:
callback(x.clone())
k += 1
# verbosity check
if disp > 1:
print('iter %d - fval: %0.4f' % (k, m.fun))
# check if the gradient is small enough to stop
if m.jac_mag < gtol:
warnflag = 0
break
# print some stuff if requested
if disp:
msg = status_messages[warnflag]
if warnflag != 0:
msg = 'Warning: ' + msg
print(msg)
print(" Current function value: %f" % m.fun)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % sf.nfev)
# print(" Gradient evaluations: %d" % sf.ngev)
# print(" Hessian evaluations: %d" % (sf.nhev + nhessp[0]))
result = OptimizeResult(x=x.view_as(x0), fun=m.fun, grad=m.jac.view_as(x0),
success=(warnflag == 0), status=warnflag,
nfev=sf.nfev, nit=k, message=status_messages[warnflag])
if not subproblem.hess_prod:
result['hess'] = m.hess.view(2 * x0.shape)
if return_all:
result['allvecs'] = allvecs
return result | 33.732075 | 83 | 0.601633 |
db49114d078cd832c4195978f43613f9d10a79be | 5,369 | py | Python | 6. Mitosis Simulation/mitosis_sim.py | soumitradev/codingtrain-challenges-python | 7487f69405f106629a64b71e255a61d98623c65d | [
"MIT"
] | 1 | 2021-09-06T05:18:15.000Z | 2021-09-06T05:18:15.000Z | 6. Mitosis Simulation/mitosis_sim.py | PseudoCodeNerd/codingtrain-challenges-python | 7487f69405f106629a64b71e255a61d98623c65d | [
"MIT"
] | null | null | null | 6. Mitosis Simulation/mitosis_sim.py | PseudoCodeNerd/codingtrain-challenges-python | 7487f69405f106629a64b71e255a61d98623c65d | [
"MIT"
] | 1 | 2020-08-04T13:19:22.000Z | 2020-08-04T13:19:22.000Z | # desc: In this coding challenge (originally done at The Coding Train), I attempt to code a mitosis simulation in pygame.
# A part of Coding Train's Challenges implementation in Python series.
# @author: PseudoCodeNerd
# Importing libraries
import pygame, sys, random, math
from pygame import gfxdraw
# Setting screen dimensions
WINDOW_HEIGHT = 600
WINDOW_WIDTH = 900
# Set FPS
FPS = 60
# Definition of Cell object, contains all properties of a single cell.
class Cell:
def __init__(self, surface):
self.surface = surface
# Assign random color and position.
self.color = (random.randint(155, 255), 0, random.randint(155, 255), 200)
self.x = random.randint(50, 850)
self.y = random.randint(50, 550)
self.r = WINDOW_WIDTH//20
def move(self):
"""
Randomly move a cell on the screen, prevent it from going outside.
"""
if random.random() < 0.5:
vel = (random.uniform(0, 2), random.uniform(0, 2))
self.x = math.floor(self.x - vel[0])
self.y = math.floor(self.y - vel[1])
else:
vel = (random.uniform(0, 2), random.uniform(0, 2))
self.x = math.floor(self.x + vel[0])
self.y = math.floor(self.y + vel[1])
if not 0<=self.x<WINDOW_WIDTH-self.r:
self.x = random.randint(50, 850)
if not 0<=self.y<WINDOW_HEIGHT-self.r:
self.y = random.randint(50, 550)
def clicked(self, m_x, m_y):
"""
Checks if mouse clicked on a cell object.
params
m_x: x_coord of mouse click
m_y : y_coord of mouse click
return
boolean: if click on cell object or not
"""
d = math.sqrt((self.x - m_x)**2 + (self.y - m_y)**2)
return d <= self.r
def show(self):
"""
Render a Cell on the screeen.
"""
pygame.draw.circle(self.surface, self.color, (self.x, self.y), self.r)
def mitosis(self, surface):
"""
Creates two children cells from the parent cell (read up Mitosis please)
params
surface for the children cells
"""
cell_a = Cell(surface)
# Spawn the children close to the parent's location
cell_a.x = self.x + self.r//2
cell_a.y = self.y + (2 * self.r//2)
cell_a.color = (random.randint(155, 255), 0, random.randint(155, 255), 200)
cell_a.r = math.ceil(self.r * 0.9)
cell_b = Cell(surface)
cell_b.x = self.x + self.r//2
cell_b.y = self.y
cell_b.color = (random.randint(155, 255), 0, random.randint(155, 255), 200)
cell_b.r = math.ceil(self.r * 0.96)
return cell_a, cell_b
# Definition of Cells object. Cells represent all the individual cells on a screen in a list.
# Contains helper functions for mitosis.
class Cells:
# Initialize Cells with one parent cell.
def __init__(self, Cell_1):
cell_lis = []
self.cell_lis = cell_lis
self.Cell = cell
cell_lis.append(cell)
def num_cells(self):
"""
get number of cells on the board
"""
return len(self.cell_lis)
def get_cell_lis(self):
"""
get list of all cells on the board
"""
return self.cell_lis
def cell_clicked(self, surface, m_coords):
"""
Perform mitosis
params
surface : board surface where cels reside
m_coords : mouse-click coordinates
"""
# Loops through all cells in decreasing order
for i in range(len(self.cell_lis)-1, -1, -1):
# call `clicked` to perform mitosis if mouse click on a cell
if self.cell_lis[i].clicked(m_coords[0], m_coords[1]):
# Get children cells
cell_a, cell_b = self.cell_lis[i].mitosis(surface)
self.cell_lis.append(cell_a)
self.cell_lis.append(cell_b)
# Remove parent cell
self.cell_lis.remove(self.cell_lis[i])
# Initialize pygame
pygame.init()
# Create screen for showing simulation and set font for cell counter
surface = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption("Mitosis Simulation")
text = pygame.font.SysFont("Tahoma", 18)
# Initialize parent cell and Cells object
cell = Cell(surface)
Cells = Cells(cell)
# While game is running, for every frame,
while True:
# Check for events
for event in pygame.event.get():
# If mouse click on a cell, perform mitosis
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
Cells.cell_clicked(surface, pygame.mouse.get_pos())
# Let user exit
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
s = Cells.num_cells()
# Color the screen black
surface.fill((0, 0, 0))
# Keep track of cells on a counter
counter = text.render("No. of Cells: {0}".format(s), 1, (255,255,255))
surface.blit(counter, (5, 10))
# Get *updated* cell_lis with every iteration
cell_lis = Cells.get_cell_lis()
# Draw cells and move them
for cell in cell_lis:
cell.show()
cell.move()
# Update the display
pygame.display.update()
# Tick the display as per FPS
pygame.time.Clock().tick(FPS)
| 31.769231 | 121 | 0.597132 |
cc6dcce50d5065d04d0ebe7738b0e20d7de30b35 | 6,809 | py | Python | httprunner/report.py | wishchen/HttpRunnerManager | 1e72baa8a84ca70124af908ac683bf88b869d48a | [
"MIT"
] | null | null | null | httprunner/report.py | wishchen/HttpRunnerManager | 1e72baa8a84ca70124af908ac683bf88b869d48a | [
"MIT"
] | null | null | null | httprunner/report.py | wishchen/HttpRunnerManager | 1e72baa8a84ca70124af908ac683bf88b869d48a | [
"MIT"
] | null | null | null | # encoding: utf-8
import io
import os
import platform
import time
import unittest
from base64 import b64encode
from collections import Iterable, OrderedDict
from datetime import datetime
from httprunner import logger
from httprunner.__about__ import __version__
from httprunner.compat import basestring, bytes, json, numeric_types
from jinja2 import Template, escape
from requests.structures import CaseInsensitiveDict
def get_platform():
return {
"httprunner_version": __version__,
"python_version": "{} {}".format(
platform.python_implementation(),
platform.python_version()
),
"platform": platform.platform()
}
def get_summary(result):
""" get summary from test result
"""
summary = {
"success": result.wasSuccessful(),
"stat": {
'testsRun': result.testsRun,
'failures': len(result.failures),
'errors': len(result.errors),
'skipped': len(result.skipped),
'expectedFailures': len(result.expectedFailures),
'unexpectedSuccesses': len(result.unexpectedSuccesses)
},
"platform": get_platform()
}
summary["stat"]["successes"] = summary["stat"]["testsRun"] \
- summary["stat"]["failures"] \
- summary["stat"]["errors"] \
- summary["stat"]["skipped"] \
- summary["stat"]["expectedFailures"] \
- summary["stat"]["unexpectedSuccesses"]
if getattr(result, "records", None):
summary["time"] = {
'start_at': datetime.fromtimestamp(result.start_at),
'duration': result.duration
}
summary["records"] = result.records
else:
summary["records"] = []
return summary
def render_html_report(summary, html_report_name=None, html_report_template=None):
""" render html report with specified report name and template
if html_report_name is not specified, use current datetime
if html_report_template is not specified, use default report template
"""
if not html_report_template:
html_report_template = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"templates",
"default_report_template.html"
)
logger.log_debug("No html report template specified, use default.")
else:
logger.log_info("render with html report template: {}".format(html_report_template))
logger.log_info("Start to render Html report ...")
logger.log_debug("render data: {}".format(summary))
report_dir_path = os.path.join(os.getcwd(), "reports")
start_datetime = summary["time"]["start_at"].strftime('%Y-%m-%d-%H-%M-%S')
if html_report_name:
summary["html_report_name"] = html_report_name
report_dir_path = os.path.join(report_dir_path, html_report_name)
html_report_name += "-{}.html".format(start_datetime)
else:
summary["html_report_name"] = ""
html_report_name = "{}.html".format(start_datetime)
if not os.path.isdir(report_dir_path):
os.makedirs(report_dir_path)
for record in summary.get("records"):
meta_data = record['meta_data']
stringify_body(meta_data, 'request')
stringify_body(meta_data, 'response')
with io.open(html_report_template, "r", encoding='utf-8') as fp_r:
template_content = fp_r.read()
report_path = os.path.join(report_dir_path, html_report_name)
with io.open(report_path, 'w', encoding='utf-8') as fp_w:
rendered_content = Template(template_content).render(summary)
fp_w.write(rendered_content)
logger.log_info("Generated Html report: {}".format(report_path))
return report_path
def stringify_body(meta_data, request_or_response):
headers = meta_data['{}_headers'.format(request_or_response)]
body = meta_data.get('{}_body'.format(request_or_response))
if isinstance(body, CaseInsensitiveDict):
body = json.dumps(dict(body), ensure_ascii=False)
elif isinstance(body, (dict, list)):
body = json.dumps(body, indent=2, ensure_ascii=False)
elif isinstance(body, bytes):
resp_content_type = headers.get("Content-Type", "")
try:
if "image" in resp_content_type:
meta_data["response_data_type"] = "image"
body = "data:{};base64,{}".format(
resp_content_type,
b64encode(body).decode('utf-8')
)
else:
body = body.decode("utf-8")
except UnicodeDecodeError:
pass
elif not isinstance(body, (basestring, numeric_types, Iterable)):
# class instance, e.g. MultipartEncoder()
body = repr(body)
meta_data['{}_body'.format(request_or_response)] = body
class HtmlTestResult(unittest.TextTestResult):
"""A html result class that can generate formatted html results.
Used by TextTestRunner.
"""
def __init__(self, stream, descriptions, verbosity):
super(HtmlTestResult, self).__init__(stream, descriptions, verbosity)
self.records = []
def _record_test(self, test, status, attachment=''):
self.records.append({
'name': test.shortDescription(),
'status': status,
'attachment': attachment,
"meta_data": test.meta_data
})
def startTestRun(self):
self.start_at = time.time()
def startTest(self, test):
""" add start test time """
super(HtmlTestResult, self).startTest(test)
logger.color_print(test.shortDescription(), "yellow")
def addSuccess(self, test):
super(HtmlTestResult, self).addSuccess(test)
self._record_test(test, 'success')
print("")
def addError(self, test, err):
super(HtmlTestResult, self).addError(test, err)
self._record_test(test, 'error', self._exc_info_to_string(err, test))
print("")
def addFailure(self, test, err):
super(HtmlTestResult, self).addFailure(test, err)
self._record_test(test, 'failure', self._exc_info_to_string(err, test))
print("")
def addSkip(self, test, reason):
super(HtmlTestResult, self).addSkip(test, reason)
self._record_test(test, 'skipped', reason)
print("")
def addExpectedFailure(self, test, err):
super(HtmlTestResult, self).addExpectedFailure(test, err)
self._record_test(test, 'ExpectedFailure', self._exc_info_to_string(err, test))
print("")
def addUnexpectedSuccess(self, test):
super(HtmlTestResult, self).addUnexpectedSuccess(test)
self._record_test(test, 'UnexpectedSuccess')
print("")
@property
def duration(self):
return time.time() - self.start_at
| 34.388889 | 92 | 0.639595 |
8d31826c059a76cdf550207a0652939350a22f2f | 35,279 | py | Python | x690/types.py | exhuma/x690 | 42c1c62b15ba9883204af96bfa7d8f88da469dd1 | [
"MIT"
] | null | null | null | x690/types.py | exhuma/x690 | 42c1c62b15ba9883204af96bfa7d8f88da469dd1 | [
"MIT"
] | null | null | null | x690/types.py | exhuma/x690 | 42c1c62b15ba9883204af96bfa7d8f88da469dd1 | [
"MIT"
] | 2 | 2021-07-09T10:11:28.000Z | 2021-07-15T06:40:49.000Z | # Type-Hinting is done in a stub file
"""
Overview
========
This module contains the encoding/decoding logic for data types as defined in
:term:`X.690`.
Each type is made available via a registry dictionary on :py:class:`~.X690Type` and
can be retrieved via :py:meth:`~.X690Type.get`.
Additionally, given a :py:class:`bytes` object, the :py:func:`~.decode`
function can be used to parse the bytes object and return a typed instance
from it. See :py:func:`~.decode` for details about it's behaviour!
.. note::
The individual type classes in this module do not contain any additional
documentation. The bulk of this module is documented in :py:class:`~.X690Type`.
For the rest, the type classes simply define the type identifier tag.
Supporting Additional Classes
=============================
Just by subclassing :py:class:`~.X690Type` and setting correct ``TAG`` and
``TYPECLASS`` values, most of the basic functionality will be covered by the
superclass. X690Type detection, and addition to the registry is automatic.
Subclassing is enough.
By default, a new type which does not override any methods will have it's value
reported as bytes objects. You may want to override at least
:py:meth:`~.X690Type.decode_raw` to convert the raw-bytes into your own data-type.
Example
-------
Let's assume you want to decode/encode a "Person" object with a first-name,
last-name and age. Let's also assume it will be an application-specific type of
a "constructed" nature with our application-local tag 1. Let's further assume
that the value will be a UTF-8 encoded JSON string inside the x690 stream.
We specify the metadata as class-level variables ``TYPECLASS``, ``NATURE`` and
``TAG``. The decoding is handled by implementing a static-method
``decode_raw`` which gets the data-object containing the value and a slice
defining at which position the data is located. The encoding is handled by
implementing the instance-method ``encode_raw``. The instance contains the
Python value in ``self.pyvalue``.
So we can implement this as follows (including a named-tuple as our local
type):
.. code-block:: python
from typing import NamedTuple
from x690.types import X690Type
from json import loads, dumps
class Person(NamedTuple):
first_name: str
last_name: str
age: int
class PersonType(X690Type[Person]):
TYPECLASS = TypeClass.APPLICATION
NATURE = [TypeNature.CONSTRUCTED]
TAG = 1
@staticmethod
def decode_raw(data: bytes, slc: slice = slice(None)) -> Person:
values = loads(data[slc].decode("utf8"))
return Person(
values["first_name"], values["last_name"], values["age"]
)
def encode_raw(self) -> bytes:
return dumps(self.pyvalue._asdict()).encode("utf8")
"""
# pylint: disable=abstract-method, missing-class-docstring, too-few-public-methods
from datetime import datetime
from itertools import zip_longest
from textwrap import indent
from typing import (
Any,
Dict,
Generic,
Iterator,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import t61codec # type: ignore
from .exc import IncompleteDecoding, UnexpectedType, X690Error
from .util import (
INDENT_STRING,
TypeClass,
TypeInfo,
TypeNature,
encode_length,
get_value_slice,
visible_octets,
wrap,
)
TWrappedPyType = TypeVar("TWrappedPyType", bound=Any)
TPopType = TypeVar("TPopType", bound=Any)
TConcreteType = TypeVar("TConcreteType", bound="X690Type[Any]")
class _SENTINEL_UNINITIALISED: # pylint: disable=invalid-name
"""
Helper for specific sentinel values
"""
#: sentinel value for uninitialised objects (used for lazy decoding)
UNINITIALISED = _SENTINEL_UNINITIALISED()
def decode(
data: bytes,
start_index: int = 0,
enforce_type: Optional[Type[TPopType]] = None,
strict: bool = False,
) -> Tuple[TPopType, int]:
"""
Convert a X.690 bytes object into a Python instance, and the location of
the next object.
Given a :py:class:`bytes` object and any start-index, inspects and parses
the octets starting at the given index (as many as required) to determine
variable type (and corresponding Python class), and length. That class is
then used to parse the object located in ``data`` at the given index. The
location of the start of the next (subsequent) object is also determined.
The return value is a tuple with the decoded object and the start-index
of the next object.
Example::
>>> data = b'\\x02\\x01\\x05\\x11'
>>> decode(data)
(Integer(5), 3)
>>> data = b'some-skippable-bytes\\x02\\x01\\x05\\x11'
>>> decode(data, 20)
(Integer(5), 23)
"""
if start_index >= len(data):
raise IndexError(
f"Attempting to read from position {start_index} "
f"on data with length {len(data)}"
)
start_index = start_index or 0
type_ = TypeInfo.from_bytes(data[start_index])
try:
cls = X690Type.get(type_.cls, type_.tag, type_.nature)
except KeyError:
cls = UnknownType
data_slice, next_tlv = get_value_slice(data, start_index)
output = cls.from_bytes(data, data_slice)
if cls is UnknownType:
output.tag = data[start_index] # type: ignore
if enforce_type and not isinstance(output, enforce_type):
raise UnexpectedType(
f"Unexpected decode result. Expected instance of type "
f"{enforce_type} but got {type(output)} instead"
)
if strict and next_tlv < len(data) - 1:
remainder = data[next_tlv:]
raise IncompleteDecoding(
f"Strict decoding still had {len(remainder)} remaining bytes!",
remainder=remainder,
)
return output, next_tlv # type: ignore
class X690Type(Generic[TWrappedPyType]):
"""
The superclass for all supported types.
"""
__slots__ = ["pyvalue", "_raw_bytes"]
__registry: Dict[Tuple[str, int, TypeNature], Type["X690Type[Any]"]] = {}
#: The x690 type-class (universal, application or context)
TYPECLASS: TypeClass = TypeClass.UNIVERSAL
#: The x690 "private/constructed" information
NATURE = [TypeNature.CONSTRUCTED]
#: The x690 identifier for the type
TAG: int = -1
#: The decoded (or to-be encoded) Python value
pyvalue: Union[TWrappedPyType, _SENTINEL_UNINITIALISED]
#: The byte representation of "pyvalue" without metadata-header
_raw_bytes: bytes
#: The location of the value within "raw_bytes"
bounds: slice = slice(None)
def __init_subclass__(cls: Type["X690Type[Any]"]) -> None:
for nature in cls.NATURE:
X690Type.__registry[(cls.TYPECLASS, cls.TAG, nature)] = cls
@property
def value(self) -> TWrappedPyType:
"""
Returns the value as a pure Python type
"""
if not isinstance(self.pyvalue, _SENTINEL_UNINITIALISED):
return self.pyvalue
return self.decode_raw(self.raw_bytes, self.bounds)
@staticmethod
def decode_raw(data: bytes, slc: slice = slice(None)) -> TWrappedPyType:
"""
Converts the raw byte-value (without type & length header) into a
pure Python type
>>> Integer.decode_raw(b"\\x05")
5
:param data: A data-block containing the byte-information
:param slc: A slice of the data-block that contains the exact
raw-bytes.
:return: The value that should be wrapped by the current x690 type.
"""
return data[slc] # type: ignore
@staticmethod
def get(
typeclass: str, typeid: int, nature: TypeNature = TypeNature.CONSTRUCTED
) -> Type["X690Type[Any]"]:
"""
Retrieve a Python class by x690 type information
Classes can be registered by subclassing :py:class:`x690.types.X690Type`
"""
cls = X690Type.__registry[(typeclass, typeid, nature)]
return cls
@staticmethod
def all() -> List[Type["X690Type[Any]"]]:
"""
Returns all registered classes
"""
return list(X690Type.__registry.values())
@classmethod
def validate(cls, data: bytes) -> None:
"""
Given a bytes object, checks if the given class *cls* supports decoding
this object. If not, raises a ValueError.
"""
tinfo = TypeInfo.from_bytes(data[0])
if tinfo.cls != cls.TYPECLASS or tinfo.tag != cls.TAG:
raise ValueError(
"Invalid type header! "
"Expected a %s class with tag "
"ID 0x%02x, but got a %s class with "
"tag ID 0x%02x" % (cls.TYPECLASS, cls.TAG, tinfo.cls, data[0])
)
@classmethod
def decode(
cls: Type[TConcreteType], data: bytes
) -> TConcreteType: # pragma: no cover
"""
This method takes a bytes object which contains the raw content octets
of the object. That means, the octets *without* the type information
and length.
This function must be overridden by the concrete subclasses.
"""
slc = get_value_slice(data).bounds
output = cls.decode_raw(data, slc)
return cls(output)
@classmethod
def from_bytes(
cls: Type[TConcreteType], data: bytes, slc: slice = slice(None)
) -> TConcreteType:
"""
Creates a new :py:class:`x690.types.X690Type` instance from raw-bytes
(without type nor length bytes)
>>> Integer.from_bytes(b"\\x01")
Integer(1)
>>> OctetString.from_bytes(b"hello-world")
OctetString(b'hello-world')
>>> Boolean.from_bytes(b"\\x00")
Boolean(False)
"""
try:
instance = cls()
except TypeError as exc:
raise X690Error(
"Custom types must have a no-arg constructor allowing "
"x690.types.UNINITIALISED as value. Custom type %r does not "
"support this!" % cls
) from exc
instance.raw_bytes = data
instance.bounds = slc
return instance
def __init__(
self,
value: Union[TWrappedPyType, _SENTINEL_UNINITIALISED] = UNINITIALISED,
) -> None:
self.pyvalue = value
self._raw_bytes = b""
@property
def raw_bytes(self) -> bytes:
if self._raw_bytes != b"":
return self._raw_bytes
if self.pyvalue is UNINITIALISED:
return b""
self._raw_bytes = self.encode_raw()
return self._raw_bytes
@raw_bytes.setter
def raw_bytes(self, value: bytes) -> None:
self._raw_bytes = value
def __bytes__(self) -> bytes: # pragma: no cover
"""
Convert this instance into a bytes object. This must be implemented by
subclasses.
"""
value = self.raw_bytes[self.bounds] or self.encode_raw()
tinfo = TypeInfo(self.TYPECLASS, self.NATURE[0], self.TAG)
return bytes(tinfo) + encode_length(len(value)) + value
def __repr__(self) -> str:
repr_value = repr(self.value)
return "%s(%s)" % (self.__class__.__name__, repr_value)
@property
def length(self) -> int:
"""
Return the x690 byte-length of this instance
"""
return len(self.raw_bytes[self.bounds])
def encode_raw(self) -> bytes:
"""
Convert this instance into raw x690 bytes (excluding the type and
length header)
>>> import x690.types as t
>>> Integer(5).encode_raw()
b'\\x05'
>>> Boolean(True).encode_raw()
b'\\x01'
>>> X690Type(t.UNINITIALISED).encode_raw()
b''
"""
if isinstance(self.pyvalue, _SENTINEL_UNINITIALISED):
return b""
return self.pyvalue
def pythonize(self) -> TWrappedPyType:
"""
Convert this instance to an appropriate pure Python object.
"""
return self.value
def pretty(self, depth: int = 0) -> str: # pragma: no cover
"""
Returns a readable representation (possibly multiline) of the value.
The value is indented by *depth* levels of indentation
By default this simply returns the string representation. But more
complex values may override this.
"""
return indent(str(self), INDENT_STRING * depth)
class UnknownType(X690Type[bytes]):
"""
A fallback type for anything not in X.690.
Instances of this class contain the raw information as parsed from the
bytes as the following attributes:
* ``value``: The value without leading metadata (as bytes value)
* ``tag``: The *unparsed* "tag". This is the type ID as defined in the
reference document. See :py:class:`~puresnmp.x690.util.TypeInfo` for
details.
* ``typeinfo``: unused (derived from *tag* and only here for consistency
with ``__repr__`` of this class).
"""
TAG = 0x99
def __init__(self, value: bytes = b"", tag: int = -1) -> None:
super().__init__(value or UNINITIALISED)
self.tag = tag
def __repr__(self) -> str:
typeinfo = TypeInfo.from_bytes(self.tag)
tinfo = f"{typeinfo.cls}/{typeinfo.nature}/{typeinfo.tag}"
return f"<{self.__class__.__name__} {self.tag} {self.value!r} {tinfo}>"
def __eq__(self, other: object) -> bool:
return (
isinstance(other, UnknownType)
and self.value == other.value
and self.tag == other.tag
)
def pretty(self, depth: int = 0) -> str:
"""
Returns a prettified string with *depth* levels of indentation
See :py:meth:`~.X690Type.pretty`
"""
wrapped = wrap(
visible_octets(self.value), str(type(self)), depth
).splitlines()
if len(wrapped) > 15:
line_width = len(wrapped[0])
sniptext = ("<%d more lines>" % (len(wrapped) - 10 - 5)).center(
line_width - 2
)
wrapped = wrapped[:10] + ["┊%s┊" % sniptext] + wrapped[-5:]
typeinfo = TypeInfo.from_bytes(self.tag)
lines = [
"Unknown X690Type",
f" │ Tag: {self.tag}",
" │ X690Type Info:",
f" │ │ Class: {typeinfo.cls}",
f" │ │ Nature: {typeinfo.nature}",
f" │ │ Tag: {typeinfo.tag}",
] + wrapped
return indent(
"\n".join(lines),
INDENT_STRING * depth,
)
class Boolean(X690Type[bool]):
TAG = 0x01
NATURE = [TypeNature.PRIMITIVE]
@staticmethod
def decode_raw(data: bytes, slc: slice = slice(None)) -> bool:
"""
Converts the raw byte-value (without type & length header) into a
pure Python type
Overrides :py:meth:`~.X690Type.decode_raw`
"""
return data[slc] != b"\x00"
@classmethod
def validate(cls, data: bytes) -> None:
"""
Overrides :py:meth:`.X690Type.validate`
"""
super().validate(data)
if data[1] != 1:
raise ValueError(
"Unexpected Boolean value. Length should be 1,"
" it was %d" % data[1]
)
def encode_raw(self) -> bytes:
"""
Overrides :py:meth:`.X690Type.encode_raw`
"""
return b"\x01" if self.pyvalue else b"\x00"
def __eq__(self, other: object) -> bool:
return isinstance(other, Boolean) and self.value == other.value
class Null(X690Type[None]):
TAG = 0x05
NATURE = [TypeNature.PRIMITIVE]
@classmethod
def validate(cls, data: bytes) -> None:
"""
Overrides :py:meth:`.X690Type.validate`
"""
super().validate(data)
if data[1] != 0:
raise ValueError(
"Unexpected NULL value. Length should be 0, it "
"was %d" % data[1]
)
@staticmethod
def decode_raw(data: bytes, slc: slice = slice(None)) -> None:
"""
Converts the raw byte-value (without type & length header) into a
pure Python type
Overrides :py:meth:`~.X690Type.decode_raw`
"""
# pylint: disable=unused-argument
return None
def encode_raw(self) -> bytes:
"""
Overrides :py:meth:`.X690Type.encode_raw`
>>> Null().encode_raw()
b'\\x00'
"""
# pylint: disable=no-self-use
return b"\x00"
def __bytes__(self) -> bytes:
return b"\x05\x00"
def __eq__(self, other: object) -> bool:
return isinstance(other, Null) and self.value == other.value
def __repr__(self) -> str:
return "Null()"
def __bool__(self) -> bool:
return False
def __nonzero__(self) -> bool: # pragma: no cover
return False
class OctetString(X690Type[bytes]):
TAG = 0x04
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
def __init__(
self, value: Union[str, bytes, _SENTINEL_UNINITIALISED] = b""
) -> None:
if isinstance(value, str):
value = value.encode("ascii")
# The custom init allows us to pass in str instances instead of only
# bytes. We still need to pass down "None" if need to detect
# "not-yet-decoded" values
if not value:
value = UNINITIALISED
super().__init__(value)
def __eq__(self, other: object) -> bool:
return isinstance(other, OctetString) and self.value == other.value
def pretty(self, depth: int = 0) -> str:
"""
Returns a prettified string with *depth* levels of indentation
See :py:meth:`~.X690Type.pretty`
"""
if self.value == b"":
return repr(self)
try:
# We try to decode embedded X.690 items. If we can't, we display
# the value raw
embedded: X690Type[Any] = decode(self.value)[0]
return wrap(embedded.pretty(0), f"Embedded in {type(self)}", depth)
except: # pylint: disable=bare-except
wrapped = wrap(visible_octets(self.value), str(type(self)), depth)
return wrapped
class Sequence(X690Type[List[X690Type[Any]]]):
"""
Represents an X.690 sequence type. Instances of this class are iterable and
indexable.
"""
TAG = 0x10
@staticmethod
def decode_raw(
data: bytes, slc: slice = slice(None)
) -> List[X690Type[Any]]:
"""
Converts the raw byte-value (without type & length header) into a
pure Python type
Overrides :py:meth:`~.X690Type.decode_raw`
"""
start_index = slc.start or 0
if not data[slc] or start_index > len(data):
return []
item: X690Type[Any]
item, next_pos = decode(data, start_index)
items: List[X690Type[Any]] = [item]
end = slc.stop or len(data)
while next_pos < end:
item, next_pos = decode(data, next_pos)
items.append(item)
return items
def encode_raw(self) -> bytes:
"""
Overrides :py:meth:`.X690Type.encode_raw`
"""
if isinstance(self.pyvalue, _SENTINEL_UNINITIALISED):
return b""
items = [bytes(item) for item in self.pyvalue]
output = b"".join(items)
return output
def __eq__(self, other: object) -> bool:
if not isinstance(other, Sequence):
return False
return self.raw_bytes[self.bounds] == other.raw_bytes[other.bounds]
def __repr__(self) -> str:
item_repr = list(self)
return "Sequence(%r)" % item_repr
def __len__(self) -> int:
return len(self.value)
def __iter__(self) -> Iterator[X690Type[Any]]:
yield from self.value
def __getitem__(self, idx: int) -> X690Type[Any]:
return self.value[idx]
def pythonize(self) -> List[X690Type[Any]]:
"""
Overrides :py:meth:`~.X690Type.pythonize`
"""
return [obj.pythonize() for obj in self]
def pretty(self, depth: int = 0) -> str: # pragma: no cover
"""
Returns a prettified string with *depth* levels of indentation
See :py:meth:`~.X690Type.pretty`
"""
lines = [f"{self.__class__.__name__} with {len(self.value)} items:"]
for item in self.value:
prettified_item = item.pretty(depth)
bullet = INDENT_STRING * depth + "⁃ "
for line in prettified_item.splitlines():
lines.append(bullet + line)
bullet = " "
return "\n".join(lines)
class Integer(X690Type[int]):
SIGNED = True
TAG = 0x02
NATURE = [TypeNature.PRIMITIVE]
@classmethod
def decode_raw(cls, data: bytes, slc: slice = slice(None)) -> int:
"""
Converts the raw byte-value (without type & length header) into a
pure Python type
Overrides :py:meth:`~.X690Type.decode_raw`
"""
data = data[slc]
return int.from_bytes(data, "big", signed=cls.SIGNED)
def encode_raw(self) -> bytes:
"""
Overrides :py:meth:`.X690Type.encode_raw`
"""
if isinstance(self.pyvalue, _SENTINEL_UNINITIALISED):
return b""
octets = [self.pyvalue & 0b11111111]
# Append remaining octets for long integers.
remainder = self.pyvalue
while remainder not in (0, -1):
remainder = remainder >> 8
octets.append(remainder & 0b11111111)
if remainder == 0 and octets[-1] == 0b10000000:
octets.append(0)
octets.reverse()
# remove leading octet if there is a string of 9 zeros or ones
while len(octets) > 1 and (
(octets[0] == 0 and octets[1] & 0b10000000 == 0)
or (octets[0] == 0b11111111 and octets[1] & 0b10000000 != 0)
):
del octets[0]
return bytes(octets)
def __eq__(self, other: object) -> bool:
return isinstance(other, Integer) and self.value == other.value
class ObjectIdentifier(X690Type[str]):
"""
Represents an OID.
Instances of this class support containment checks to determine if one OID
is a sub-item of another::
>>> ObjectIdentifier("1.2.3.4.5") in ObjectIdentifier("1.2.3")
True
>>> ObjectIdentifier("1.2.4.5.6") in ObjectIdentifier("1.2.3")
False
"""
TAG = 0x06
NATURE = [TypeNature.PRIMITIVE]
def __init__(
self, value: Union[str, _SENTINEL_UNINITIALISED] = UNINITIALISED
) -> None:
if (
not isinstance(value, _SENTINEL_UNINITIALISED)
and value
and value.startswith(".")
):
value = value[1:]
super().__init__(value)
@property
def nodes(self) -> Tuple[int, ...]:
"""
Returns the numerical nodes for this instance as tuple
>>> ObjectIdentifier("1.2.3").nodes
(1, 2, 3)
>>> ObjectIdentifier().nodes
()
"""
if not self.value:
return tuple()
return tuple(int(n) for n in self.value.split("."))
@staticmethod
def decode_large_value(current_char: int, stream: Iterator[int]) -> int:
"""
If we encounter a value larger than 127, we have to consume from the
stram until we encounter a value below 127 and recombine them.
See: https://msdn.microsoft.com/en-us/library/bb540809(v=vs.85).aspx
"""
buffer = []
while current_char > 127:
buffer.append(current_char ^ 0b10000000)
current_char = next(stream)
total = current_char
for i, digit in enumerate(reversed(buffer)):
total += digit * 128 ** (i + 1)
return total
@staticmethod
def encode_large_value(value: int) -> List[int]:
"""
Inverse function of :py:meth:`~.ObjectIdentifier.decode_large_value`
"""
if value <= 127:
return [value]
output = [value & 0b1111111]
value = value >> 7
while value:
output.append(value & 0b1111111 | 0b10000000)
value = value >> 7
output.reverse()
return output
@staticmethod
def decode_raw(data: bytes, slc: slice = slice(None)) -> str:
"""
Converts the raw byte-value (without type & length header) into a
pure Python type
Overrides :py:meth:`~.X690Type.decode_raw`
"""
# Special case for "empty" object identifiers which should be returned
# as "0"
data = data[slc]
if not data:
return ""
# unpack the first byte into first and second sub-identifiers.
data0 = data[0]
first, second = data0 // 40, data0 % 40
output = [first, second]
remaining = iter(data[1:])
for node in remaining:
# Each node can only contain values from 0-127. Other values need
# to be combined.
if node > 127:
collapsed_value = ObjectIdentifier.decode_large_value(
node, remaining
)
output.append(collapsed_value)
continue
output.append(node)
instance = ".".join([str(n) for n in output])
return instance
def collapse_identifiers(self) -> Tuple[int, ...]:
"""
Meld the first two octets into one octet as defined by x.690
In x.690 ObjectIdentifiers are a sequence of numbers. In the
byte-representation the first two of those numbers are stored in the
first byte.
This function takes a "human-readable" OID tuple and returns a new
tuple with the first two elements merged (collapsed) together.
>>> ObjectIdentifier("1.3.6.1.4.1").collapse_identifiers()
(43, 6, 1, 4, 1)
>>> ObjectIdentifier().collapse_identifiers()
()
"""
# pylint: disable=no-self-use
identifiers = self.nodes
if len(identifiers) == 0:
return tuple()
if len(identifiers) > 1:
# The first two bytes are collapsed according to X.690
# See https://en.wikipedia.org/wiki/X.690#BER_encoding
first, second, rest = (
identifiers[0],
identifiers[1],
identifiers[2:],
)
first_output = (40 * first) + second
else:
first_output = identifiers[0]
rest = tuple()
# Values above 127 need a special encoding. They get split up into
# multiple positions.
exploded_high_values = []
for char in rest:
if char > 127:
exploded_high_values.extend(
ObjectIdentifier.encode_large_value(char)
)
else:
exploded_high_values.append(char)
collapsed_identifiers = [first_output]
for subidentifier in rest:
collapsed_identifiers.extend(
ObjectIdentifier.encode_large_value(subidentifier)
)
return tuple(collapsed_identifiers)
def encode_raw(self) -> bytes:
"""
Overrides :py:meth:`.X690Type.encode_raw`
"""
if isinstance(self.pyvalue, _SENTINEL_UNINITIALISED):
return b""
collapsed_identifiers = self.collapse_identifiers()
if collapsed_identifiers == ():
return b""
try:
output = bytes(collapsed_identifiers)
except ValueError as exc:
raise ValueError(
"Unable to collapse %r. First two octets are too large!"
% (self.nodes,)
) from exc
return output
def __int__(self) -> int:
nodes = self.nodes
if len(nodes) != 1:
raise ValueError(
"Only ObjectIdentifier with one node can be "
"converted to int. %r is not convertable. It has %d nodes."
% (self, len(self))
)
return nodes[0]
def __str__(self) -> str:
return self.value
def __repr__(self) -> str:
return "ObjectIdentifier(%r)" % (self.value,)
def __eq__(self, other: object) -> bool:
return isinstance(other, ObjectIdentifier) and self.value == other.value
def __len__(self) -> int:
return len(self.nodes)
def __contains__(self, other: "ObjectIdentifier") -> bool:
"""
Check if one OID is a child of another.
TODO: This has been written in the middle of the night! It's messy...
"""
# pylint: disable=invalid-name
a, b = other.nodes, self.nodes
# if both have the same amount of identifiers, check for equality
if len(a) == len(b):
return a == b
# if "self" is longer than "other", self cannot be "in" other
if len(b) > len(a):
return False
# For all other cases:
# 1. zero-fill
# 2. drop identical items from the front (leaving us with "tail")
# 3. compare both tails
zipped = zip_longest(a, b, fillvalue=None)
tail: List[Tuple[int, int]] = []
for tmp_a, tmp_b in zipped:
if tmp_a == tmp_b and not tail:
continue
tail.append((tmp_a, tmp_b))
# if we only have Nones in "b", we know that "a" was longer and that it
# is a direct subtree of "b" (no diverging nodes). Otherwise we would
# have te divergence in "b", and we can say that "b is contained in a"
_, unzipped_b = zip(*tail)
if all([x is None for x in unzipped_b]):
return True
# In all other cases we end up with an unmatching tail and know that "b
# is not contained in a".
return False
def __lt__(self, other: "ObjectIdentifier") -> bool:
return self.nodes < other.nodes
def __hash__(self) -> int:
return hash(self.value)
def __add__(self, other: "ObjectIdentifier") -> "ObjectIdentifier":
nodes = ".".join([self.value, other.value])
return ObjectIdentifier(nodes)
@overload
def __getitem__(self, index: int) -> int: # pragma: no cover
...
@overload
def __getitem__(
self, index: slice
) -> "ObjectIdentifier": # pragma: no cover
...
def __getitem__(
self, index: Union[int, slice]
) -> Union["ObjectIdentifier", int]:
if isinstance(index, int):
return self.nodes[index]
output = self.nodes[index]
return ObjectIdentifier(".".join([str(n) for n in output]))
def parentof(self, other: "ObjectIdentifier") -> bool:
"""
Convenience method to check whether this OID is a parent of another OID
"""
return other in self
def childof(self, other: "ObjectIdentifier") -> bool:
"""
Convenience method to check whether this OID is a child of another OID
"""
return self in other
class ObjectDescriptor(X690Type[str]):
TAG = 0x07
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
class External(X690Type[bytes]):
TAG = 0x08
class Real(X690Type[float]):
TAG = 0x09
NATURE = [TypeNature.PRIMITIVE]
class Enumerated(X690Type[List[Any]]):
TAG = 0x0A
NATURE = [TypeNature.PRIMITIVE]
class EmbeddedPdv(X690Type[bytes]):
TAG = 0x0B
class Utf8String(X690Type[str]):
TAG = 0x0C
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
class RelativeOid(X690Type[str]):
TAG = 0x0D
NATURE = [TypeNature.PRIMITIVE]
class Set(X690Type[bytes]):
TAG = 0x11
class NumericString(X690Type[str]):
TAG = 0x12
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
class PrintableString(X690Type[str]):
TAG = 0x13
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
class T61String(X690Type[str]):
TAG = 0x14
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
__INITIALISED = False
def __init__(self, value: Union[str, bytes] = "") -> None:
if isinstance(value, str):
super().__init__(value or UNINITIALISED)
else:
super().__init__(T61String.decode_raw(value))
def __eq__(self, other: object) -> bool:
return isinstance(other, T61String) and self.value == other.value
@staticmethod
def decode_raw(data: bytes, slc: slice = slice(None, None)) -> str:
"""
Converts the raw byte-value (without type & length header) into a
pure Python type
Overrides :py:meth:`~.X690Type.decode_raw`
"""
data = data[slc]
if not T61String.__INITIALISED:
t61codec.register()
T61String.__INITIALISED = True
return data.decode("t61")
def encode_raw(self) -> bytes:
"""
Overrides :py:meth:`.X690Type.encode_raw`
"""
if not T61String.__INITIALISED: # pragma: no cover
t61codec.register()
T61String.__INITIALISED = True
if isinstance(self.pyvalue, _SENTINEL_UNINITIALISED):
return b""
return self.pyvalue.encode("t61")
class VideotexString(X690Type[str]):
TAG = 0x15
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
class IA5String(X690Type[str]):
TAG = 0x16
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
class UtcTime(X690Type[datetime]):
TAG = 0x17
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
class GeneralizedTime(X690Type[datetime]):
TAG = 0x18
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
class GraphicString(X690Type[str]):
# NOTE: As per x.690, this should inherit from OctetString. However, this
# library serves as an abstraction layer between X.690 and Python.
# For this reason, it defines this as a "str" type. To keep the
# correct behaviours, we can still "borrow" the implementation from
# OctetString if needed
TAG = 0x19
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
@staticmethod
def decode_raw(data: bytes, slc: slice = slice(None)) -> str:
"""
Converts the raw byte-value (without type & length header) into a
pure Python type
Overrides :py:meth:`~.X690Type.decode_raw`
"""
data = data[slc]
return data.decode("ascii")
class VisibleString(X690Type[str]):
TAG = 0x1A
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
class GeneralString(X690Type[str]):
TAG = 0x1B
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
class UniversalString(X690Type[str]):
TAG = 0x1C
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
class CharacterString(X690Type[str]):
TAG = 0x1D
class BmpString(X690Type[str]):
TAG = 0x1E
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
class EOC(X690Type[bytes]):
TAG = 0x00
NATURE = [TypeNature.PRIMITIVE]
class BitString(X690Type[str]):
TAG = 0x03
NATURE = [TypeNature.PRIMITIVE, TypeNature.CONSTRUCTED]
| 30.465458 | 83 | 0.600074 |
6d97037a4ee3c679a9cb47bae08b341225f14d6b | 4,302 | py | Python | homeassistant/components/whois/sensor.py | mikan-megane/core | 837220cce40890e296920d33a623adbc11bd15a6 | [
"Apache-2.0"
] | 4 | 2020-08-10T20:02:24.000Z | 2022-01-31T02:14:22.000Z | homeassistant/components/whois/sensor.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 79 | 2020-07-23T07:13:37.000Z | 2022-03-22T06:02:37.000Z | homeassistant/components/whois/sensor.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 4 | 2017-01-10T04:17:33.000Z | 2021-09-02T16:37:24.000Z | """Get WHOIS information for a given host."""
from datetime import timedelta
import logging
import voluptuous as vol
import whois
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_DOMAIN, CONF_NAME, TIME_DAYS
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Whois"
ATTR_EXPIRES = "expires"
ATTR_NAME_SERVERS = "name_servers"
ATTR_REGISTRAR = "registrar"
ATTR_UPDATED = "updated"
SCAN_INTERVAL = timedelta(hours=24)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_DOMAIN): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the WHOIS sensor."""
domain = config.get(CONF_DOMAIN)
name = config.get(CONF_NAME)
try:
if "expiration_date" in whois.whois(domain):
add_entities([WhoisSensor(name, domain)], True)
else:
_LOGGER.error(
"WHOIS lookup for %s didn't contain an expiration date", domain
)
return
except whois.BaseException as ex: # pylint: disable=broad-except
_LOGGER.error("Exception %s occurred during WHOIS lookup for %s", ex, domain)
return
class WhoisSensor(SensorEntity):
"""Implementation of a WHOIS sensor."""
def __init__(self, name, domain):
"""Initialize the sensor."""
self.whois = whois.whois
self._name = name
self._domain = domain
self._state = None
self._attributes = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon to represent this sensor."""
return "mdi:calendar-clock"
@property
def unit_of_measurement(self):
"""Return the unit of measurement to present the value in."""
return TIME_DAYS
@property
def state(self):
"""Return the expiration days for hostname."""
return self._state
@property
def extra_state_attributes(self):
"""Get the more info attributes."""
return self._attributes
def _empty_state_and_attributes(self):
"""Empty the state and attributes on an error."""
self._state = None
self._attributes = None
def update(self):
"""Get the current WHOIS data for the domain."""
try:
response = self.whois(self._domain)
except whois.BaseException as ex: # pylint: disable=broad-except
_LOGGER.error("Exception %s occurred during WHOIS lookup", ex)
self._empty_state_and_attributes()
return
if response:
if "expiration_date" not in response:
_LOGGER.error(
"Failed to find expiration_date in whois lookup response. "
"Did find: %s",
", ".join(response.keys()),
)
self._empty_state_and_attributes()
return
if not response["expiration_date"]:
_LOGGER.error("Whois response contains empty expiration_date")
self._empty_state_and_attributes()
return
attrs = {}
expiration_date = response["expiration_date"]
if isinstance(expiration_date, list):
attrs[ATTR_EXPIRES] = expiration_date[0].isoformat()
else:
attrs[ATTR_EXPIRES] = expiration_date.isoformat()
if "nameservers" in response:
attrs[ATTR_NAME_SERVERS] = " ".join(response["nameservers"])
if "updated_date" in response:
update_date = response["updated_date"]
if isinstance(update_date, list):
attrs[ATTR_UPDATED] = update_date[0].isoformat()
else:
attrs[ATTR_UPDATED] = update_date.isoformat()
if "registrar" in response:
attrs[ATTR_REGISTRAR] = response["registrar"]
time_delta = expiration_date - expiration_date.now()
self._attributes = attrs
self._state = time_delta.days
| 30.510638 | 85 | 0.612041 |
0bddcb791c2c5dd9c7684be215f3a15b2b20d0b2 | 3,459 | py | Python | Scripts/JudgeSystem.py | MaowMan/pyjudge | ac3ab2d1da796326cec251feb3f4b558a3939c70 | [
"MIT"
] | null | null | null | Scripts/JudgeSystem.py | MaowMan/pyjudge | ac3ab2d1da796326cec251feb3f4b558a3939c70 | [
"MIT"
] | null | null | null | Scripts/JudgeSystem.py | MaowMan/pyjudge | ac3ab2d1da796326cec251feb3f4b558a3939c70 | [
"MIT"
] | null | null | null | import json as json
import csv as csv
from Scripts.JudgeBase import JudgeObj,JudgeError
from Scripts.JudgeMain import JudgeMain
class JudgeSystem(JudgeObj):
def __init__(self):
super(JudgeSystem,self).__init__()
self.SetupAsset()
self.CheckFileExists()
self.MainProcess()
self.StoreResult()
def SetupAsset(self):
with open(self.MainConfig["Environmentfolder"]+"Info.json","r") as reader:
self.Info=json.loads(reader.read())
self.Problems=JudgeProblems(self.Info["Problems"])
self.Students=JudgeStudents(self.Info["Students"])
def CheckFileExists(self):
for Problem in self:
for Testcase in Problem:
try:
with open("{}{}-{}.in".format(self.MainConfig["Environmentfolder"],Problem.Name,Testcase.Seq),"r") as reader:
pass
except(FileNotFoundError):
raise JudgeError("{}{}-{}.in not found".format(self.MainConfig["Environmentfolder"],Problem.Name,Testcase.Seq))
try:
with open("{}{}-{}.out".format(self.MainConfig["Environmentfolder"],Problem.Name,Testcase.Seq),"r") as reader:
pass
except(FileNotFoundError):
raise JudgeError("{}{}-{}.out not found".format(self.MainConfig["Environmentfolder"],Problem.Name,Testcase.Seq))
def MainProcess(self):
for Student in self.Students:
for Problem in self.Problems:
for Testcase in Problem:
Student.Load(JudgeMain(Student,Problem,Testcase).Result)
def StoreResult(self):
with open("{}{}.csv".format(self.MainConfig["Environmentfolder"],self.Info["Name"]),"w",newline="") as csvfile:
writer=csv.writer(csvfile)
writer.writerow(self.Problems.Firstrow())
for Student in self.Students:
writer.writerow(Student.Flush())
print("Program Ended")
class JudgeProblems(JudgeObj):
def __init__(self,Data):
super(JudgeProblems,self).__init__()
self.Container=[]
for Problem in Data:
self.Container.append(JudgeProblem(Problem))
def Firstrow(self):
result=["Id","Name","ResultScore"]
for Problem in self:
for Testcase in Problem:
result+=["{}-{}_Status".format(Problem.Name,Testcase.Seq),"{}-{}_Score".format(Problem.Name,Testcase.Seq)]
return result
class JudgeProblem(JudgeObj):
def __init__(self,Data):
super(JudgeProblem,self).__init__()
self.Name=Data["Name"]
for Testcase in Data["Testcases"]:
self.Container.append(JudgeTestcase(Testcase))
class JudgeTestcase(JudgeObj):
def __init__(self,Data):
self.Score=Data["Score"]
self.Seq=Data["Seq"]
self.Timelimit=Data["Timelimit"]
class JudgeStudents(JudgeObj):
def __init__(self,Data):
super(JudgeStudents,self).__init__()
Cache=0
for Student in Data:
self.Container.append(JudgeStudent(Cache,Student))
class JudgeStudent(JudgeObj):
def __init__(self,Id,Name):
self.Id=Id
self.Name=Name
self.Score=0
self.Result=[]
def Flush(self):
return ([self.Id,self.Name,self.Score]+self.Result)
def Load(self,data):
self.Score+=data[1]
self.Result+=[data[0].replace("\n",""),data[1]] | 39.306818 | 132 | 0.610581 |
9197dcce40c6ca3ebf0646b94e3479331fdbcde8 | 1,585 | py | Python | anki/importing/anki1.py | bdunnette/omeka2anki | 6280b4a17f83a930cc053273fb4f36f691c49b83 | [
"MIT"
] | 2 | 2015-03-21T16:26:07.000Z | 2021-05-24T11:40:04.000Z | src/anki/importing/anki1.py | jlitven/vexer | bc3b836771795acbba64f492b5bfb731adf91674 | [
"MIT"
] | null | null | null | src/anki/importing/anki1.py | jlitven/vexer | bc3b836771795acbba64f492b5bfb731adf91674 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright: Damien Elmes <anki@ichi2.net>
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
import traceback, os, re
from anki.lang import _
from anki.upgrade import Upgrader
from anki.importing.anki2 import Anki2Importer
class Anki1Importer(Anki2Importer):
dupeOnSchemaChange = True
def run(self):
u = Upgrader()
# check
res = u.check(self.file)
if res == "invalid":
self.log.append(_(
"File is invalid. Please restore from backup."))
raise Exception("invalidFile")
# upgrade
if res != "ok":
self.log.append(
"Problems fixed during upgrade:\n***\n%s\n***\n" % res)
try:
deck = u.upgrade()
except:
traceback.print_exc()
self.log.append(traceback.format_exc())
return
# save the conf for later
conf = deck.decks.confForDid(1)
# merge
deck.close()
mdir = re.sub(r"\.anki2?$", ".media", self.file)
self.deckPrefix = re.sub(r"\.anki$", "", os.path.basename(self.file))
self.file = deck.path
Anki2Importer.run(self, mdir)
# set imported deck to saved conf
id = self.col.decks.confId(self.deckPrefix)
conf['id'] = id
conf['name'] = self.deckPrefix
conf['usn'] = self.col.usn()
self.col.decks.updateConf(conf)
did = self.col.decks.id(self.deckPrefix)
d = self.col.decks.get(did)
self.col.decks.setConf(d, id)
| 32.346939 | 78 | 0.569716 |
64969bcfc56531b521e3a9d750a632a175b0583a | 6,728 | py | Python | nodechecker/nodechecker/lib/python/nodechecker/context.py | open-infinity/health-monitoring | 47819485f9a7ffae1b326af09b23a159e4aeb190 | [
"Apache-2.0"
] | null | null | null | nodechecker/nodechecker/lib/python/nodechecker/context.py | open-infinity/health-monitoring | 47819485f9a7ffae1b326af09b23a159e4aeb190 | [
"Apache-2.0"
] | null | null | null | nodechecker/nodechecker/lib/python/nodechecker/context.py | open-infinity/health-monitoring | 47819485f9a7ffae1b326af09b23a159e4aeb190 | [
"Apache-2.0"
] | null | null | null | import os
import node
import util
import threading
import filereader
import logging
import logging.handlers
import notification.parser
import notification.manager
class Context(object):
def __init__(self, options=None, conf=None, node_manager_obj=None):
# ---------------- Default values ----------------
# Parameters
self.BIG_TIME_DIFF = 1000000
self.RRD_HTTP_SERVER_PORT = 8181
self.NODE_CREATION_TIMEOUT = 30
self.MAX_BYTES_LOGFILE = 5000000
self.MAX_CMT_CONF_WAIT = 600
self.CMT_CONF_WAIT = 10
# Runtime collections
self.node_list = []
self.active_node_list = []
self.dead_node_set = set()
self.new_dead_node_set = set()
self.master_list = []
# Runtime classes
#self.udp_listener = None
self.ntf_reader = None
self.nodelist_reader = None
self.ntf_manager = None
self.node_manager = None
self.conf = None
self.this_node = None
#self.timer_dead_node = None
#self.timer_delayed_dead_node = None
#self.timer_heartbeat = None
self.logger = None
self.resource_lock = None
# State variables
#role = "SLAVE"
self.mode = "RUN"
self.my_master = None
# Configuration variables
self.heartbeat_period = 1
self.rrd_scan_period = 1
self.dead_node_timeout = 1
self.heartbeats_received = 0
self.min_time_diff = self.BIG_TIME_DIFF
self.log_level = ""
self.log_file = ""
self.node_list_file = ""
self.active_node_list_file = ""
# ---------------- End of default values ----------------
# Extract properties from configuration file or command line
self.this_node = node.Node()
self.conf = conf
if self.conf:
self.node_list_file = os.path.join(conf.hm_root, 'nodechecker', "etc", "nodelist.conf")
self.active_node_list_file = os.path.join(conf.hm_root, 'nodechecker', "etc", "active_nodelist.conf")
self.nodelist_reader = filereader.FileReader(self.node_list_file)
self.node_manager = node_manager_obj
self.heartbeat_period = int(conf.node_heartbeat_period)
self.rrd_scan_period = int(conf.node_rrd_scan_period)
self.dead_node_timeout = int(conf.node_dead_node_timeout)
self.node_master_timeout = int(conf.node_master_timeout)
if options and options.log_level:
self.log_level = options.log_level
else:
self.log_level = conf.node_log_level
if options and options.log_file:
self.log_file = options.log_file
else:
self.log_file = conf.node_log_file
if options and options.port:
self.this_node.port = int(options.port)
else:
self.this_node.port = int(conf.node_udp_port)
if options and options.ip_address:
self.this_node.ip_address = options.ip_address
elif conf.node_ip_address == "auto":
self.this_node.ip_address = util.get_ip_address()
else:
self.this_node.ip_address = conf.node_ip_address
if options and options.ip_address_public:
self.this_node.ip_address_public = options.ip_address_public
# can't know which ip address to use automatically. this must be configured in config file
#elif conf.node_ip_address_public == "auto":
# node.ip_address_public = os.environ["OI_PUBLIC_IP"]
else:
self.this_node.ip_address_public = conf.node_ip_address_public
if options and options.instance_id:
self.this_node.instance_id = options.instance_id
#elif conf.node_instance_id == "auto":
# node.instance_id = os.environ["OI_INSTANCE_ID"]
else:
self.this_node.instance_id = conf.node_instance_id
if options and options.cluster_id:
self.this_node.cluster_id = options.cluster_id
#elif conf.node_cluster_id == "auto":
# node.cluster_id = os.environ["OI_CLUSTER_ID"]
else:
self.this_node.cluster_id = conf.node_cluster_id
if options and options.machine_id:
self.this_node.machine_id = options.machine_id
#elif conf.node_machine_id == "auto":
# node.machine_id = os.environ["OI_MACHINE_ID"]
else:
self.this_node.machine_id = conf.node_machine_id
if options and options.cloud_zone:
self.this_node.cloud_zone = options.cloud_zone
#elif conf.node_cloud_zone == "auto":
# node.cloud_zone = os.environ["OI_CLOUD_ZONE"]
else:
self.this_node.cloud_zone = conf.node_cloud_zone
if options and options.mode:
self.mode = options.mode
else:
self.mode = conf.node_mode
# Construct the rest
if self.nodelist_reader:
self.this_node.group_name = self.nodelist_reader.read_attribute(self.this_node.ip_address, 'GROUP_NAME')
self.this_node.machine_type = self.nodelist_reader.read_attribute(self.this_node.ip_address, 'MACHINE_TYPE')
self.this_node.hostname = self.nodelist_reader.read_attribute(self.this_node.ip_address, 'HOST_NAME')
self.resource_lock = threading.RLock()
self.ntf_reader = notification.parser.NotificationParser(self.this_node, conf)
self.ntf_manager = notification.manager.NotificationManager(self.this_node, conf)
self.construct_logger()
def construct_logger(self):
#global logger
#global log_level
#global log_file
#global conf
self.logger = logging.getLogger('nodechecker')
log_file_path = os.path.join(self.conf.hm_root, self.conf.nodechecker_home, self.log_file)
handler = logging.handlers.RotatingFileHandler(
log_file_path, maxBytes=self.MAX_BYTES_LOGFILE, backupCount=5)
print("****************************************log level " + self.log_level)
if self.log_level == "debug":
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s [%(levelname)s] [%(name)s] "
" [%(funcName)s] %(message)s")
handler.setFormatter(formatter)
self.logger.addHandler(handler)
| 38.666667 | 124 | 0.608799 |
5fe1198fcbf6e0f31ce863c3d1a20abba2fe6eb6 | 410 | py | Python | exercicios/exercicio082.py | Helton-Rubens/Python-3 | eb6d5ee71bcb2a2a80de4eaea942bd0c41d846b7 | [
"MIT"
] | null | null | null | exercicios/exercicio082.py | Helton-Rubens/Python-3 | eb6d5ee71bcb2a2a80de4eaea942bd0c41d846b7 | [
"MIT"
] | null | null | null | exercicios/exercicio082.py | Helton-Rubens/Python-3 | eb6d5ee71bcb2a2a80de4eaea942bd0c41d846b7 | [
"MIT"
] | null | null | null | lista = []
par = []
impar = []
while True:
lista.append(int(input('Digite um número: ')))
dec = str(input('Quer continuar? ')).strip().lower()
if dec[0] in 'Nn':
break
for i in lista:
if i % 2 == 0:
par.append(i)
else:
impar.append(i)
print(f'''Você digitou os números: {lista}
Os números pares digitados foram: {par}
Os números impares digitados foram: {impar}''')
| 24.117647 | 56 | 0.592683 |
a4122d5f707ff6099836f06d9a64b21452434659 | 902 | py | Python | src/SocialNetwork_API/models/user_types.py | HoangNguyenHuy/SocialNetwork | 495062d9b85cfdaa3df41002c2c4a88ab4b53446 | [
"MIT"
] | null | null | null | src/SocialNetwork_API/models/user_types.py | HoangNguyenHuy/SocialNetwork | 495062d9b85cfdaa3df41002c2c4a88ab4b53446 | [
"MIT"
] | null | null | null | src/SocialNetwork_API/models/user_types.py | HoangNguyenHuy/SocialNetwork | 495062d9b85cfdaa3df41002c2c4a88ab4b53446 | [
"MIT"
] | null | null | null | from django.db import models
class TinyIntegerField(models.SmallIntegerField):
def db_type(self, connection):
if connection.settings_dict['ENGINE'] == 'django.db.backends.mysql':
return "tinyint"
else:
return super(TinyIntegerField, self).db_type(connection)
class PositiveTinyIntegerField(models.PositiveSmallIntegerField):
def db_type(self, connection):
if connection.settings_dict['ENGINE'] == 'django.db.backends.mysql':
return "tinyint unsigned"
else:
return super(PositiveTinyIntegerField, self).db_type(connection)
class NormalTextField(models.TextField):
def db_type(self, connection):
if connection.settings_dict['ENGINE'] == 'django.db.backends.mysql':
return "text"
else:
return super(NormalTextField, self).db_type(connection)
| 34.692308 | 77 | 0.662971 |
ba19d773555568b2e4dd3c8d9d918a270e0a3efe | 53,012 | py | Python | tensorflow/contrib/layers/python/layers/feature_column.py | gameon67/tensorflow | cf831df71d4d2e0ac55bef4efdcda4962c456290 | [
"Apache-2.0"
] | 2 | 2020-06-30T05:52:37.000Z | 2021-01-21T04:16:39.000Z | tensorflow/contrib/layers/python/layers/feature_column.py | Dinesh-3/tensorflow | be647ad9512f7d2b891494ef8abbbde46e2e0663 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/layers/python/layers/feature_column.py | Dinesh-3/tensorflow | be647ad9512f7d2b891494ef8abbbde46e2e0663 | [
"Apache-2.0"
] | 2 | 2020-06-24T11:07:08.000Z | 2020-08-09T00:02:58.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction.
To distinguish the concept of a feature family and a specific binary feature
within a family, we refer to a feature family like "country" as a feature
column. For example "country:US" is a feature which is in "country" feature
column and has a feature value ("US").
Supported feature types are:
* _SparseColumn: also known as categorical features.
* _RealValuedColumn: also known as continuous features.
Supported transformations on above features are:
* Bucketization: also known as binning.
* Crossing: also known as composition or union.
* Embedding.
Typical usage example:
```python
# Define features and transformations
country = sparse_column_with_keys(column_name="native_country",
keys=["US", "BRA", ...])
country_emb = embedding_column(sparse_id_column=country, dimension=3,
combiner="sum")
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
occupation_x_country = crossed_column(columns=[occupation, country],
hash_bucket_size=10000)
age = real_valued_column("age")
age_buckets = bucketized_column(
source_column=age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
my_features = [occupation_emb, age_buckets, country_emb]
# Building model via layers
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=my_features)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=my_features)
second_layer = fully_connected(first_layer, ...)
# Building model via tf.learn.estimators
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=my_wide_features,
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
See feature_column_ops_test for more examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.contrib.layers.python.ops import bucketization_op
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.contrib.lookup import lookup_ops as contrib_lookup_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
class _FeatureColumn(object):
"""Represents a feature column abstraction.
To distinguish the concept of a feature family and a specific binary feature
within a family, we refer to a feature family like "country" as a feature
column. For example "country:US" is a feature which is in "country" feature
column and has a feature value ("US").
This class is an abstract class. User should not create one instance of this.
Following classes (_SparseColumn, _RealValuedColumn, ...) are concrete
instances.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def name(self):
"""Returns the name of column or transformed column."""
pass
@abc.abstractproperty
def config(self):
"""Returns configuration of the base feature for `tf.parse_example`."""
pass
@abc.abstractproperty
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
pass
@abc.abstractmethod
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
raise NotImplementedError("Transform is not implemented for {}.".format(
self))
@abc.abstractmethod
def to_dnn_input_layer(self,
input_tensor,
weight_collection=None,
trainable=True):
"""Returns a Tensor as an input to the first layer of neural network."""
raise ValueError("Calling an abstract method.")
@abc.abstractmethod
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
"""Returns a Tensor as linear predictions and a list of created Variable."""
raise ValueError("Calling an abstract method.")
class _SparseColumn(_FeatureColumn,
collections.namedtuple("_SparseColumn",
["column_name", "is_integerized",
"bucket_size", "lookup_config",
"weight_column", "combiner",
"dtype"])):
""""Represents a sparse feature column also known as categorical features.
Instances of this class are immutable. A sparse column means features are
sparse and dictionary returned by InputBuilder contains a
("column_name", SparseTensor) pair.
One and only one of bucket_size or lookup_config should be set. If
is_integerized is True then bucket_size should be set.
Attributes:
column_name: A string defining sparse column name.
is_integerized: A bool if True means type of feature is an integer.
Integerized means we can use the feature itself as id.
bucket_size: An int that is > 1. The number of buckets.
lookup_config: A _SparseIdLookupConfig defining feature-to-id lookup
configuration
weight_column: A string defining a sparse column name which represents
weight or value of the corresponding sparse feature. Please check
weighted_sparse_column for more information.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features, such as `tf.string` or `tf.int64`.
Raises:
TypeError: if lookup_config is not a _SparseIdLookupConfig.
ValueError: if above expectations about input fails.
"""
def __new__(cls,
column_name,
is_integerized=False,
bucket_size=None,
lookup_config=None,
weight_column=None,
combiner="sum",
dtype=dtypes.string):
if is_integerized and bucket_size is None:
raise ValueError("bucket_size should be set if is_integerized=True. "
"column_name: {}".format(column_name))
if is_integerized and not dtype.is_integer:
raise ValueError("dtype should be an integer if is_integerized is True. "
"Column {}.".format(column_name))
if bucket_size is None and lookup_config is None:
raise ValueError("one of bucket_size or lookup_config should be "
"set. column_name: {}".format(column_name))
if bucket_size is not None and lookup_config:
raise ValueError("one and only one of bucket_size or lookup_config "
"should be set. column_name: {}".format(column_name))
if bucket_size is not None and bucket_size < 2:
raise ValueError("bucket_size should be at least 2. "
"column_name: {}".format(column_name))
if ((lookup_config) and
(not isinstance(lookup_config, _SparseIdLookupConfig))):
raise TypeError(
"lookup_config should be an instance of _SparseIdLookupConfig. "
"Given one is in type {} for column_name {}".format(
type(lookup_config), column_name))
if (lookup_config and lookup_config.vocabulary_file and
lookup_config.vocab_size is None):
raise ValueError("vocab_size should be defined. "
"column_name: {}".format(column_name))
return super(_SparseColumn, cls).__new__(cls, column_name, is_integerized,
bucket_size, lookup_config,
weight_column, combiner, dtype)
@property
def name(self):
return self.column_name
@property
def length(self):
"""Returns vocabulary or hash_bucket size."""
if self.bucket_size is not None:
return self.bucket_size
return self.lookup_config.vocab_size + self.lookup_config.num_oov_buckets
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(self.dtype)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
# pylint: disable=unused-argument
def to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True):
raise ValueError("Column {} is not supported in DNN. "
"Please use embedding_column.".format(self))
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
return _create_embedding_lookup(
input_tensor=input_tensor,
vocab_size=self.length,
dimension=num_outputs,
weight_collections=_add_variable_collection(weight_collections),
initializer=init_ops.zeros_initializer,
combiner=self.combiner,
trainable=trainable,
name=self.name + "_weights")
class _SparseColumnIntegerized(_SparseColumn):
"""See `sparse_column_with_integerized_feature`."""
def __new__(cls,
column_name,
bucket_size,
combiner="sum",
dtype=dtypes.int64):
if not dtype.is_integer:
raise ValueError("dtype should be an integer. Given {}".format(
column_name))
return super(_SparseColumnIntegerized, cls).__new__(cls,
column_name,
is_integerized=True,
bucket_size=bucket_size,
combiner=combiner,
dtype=dtype)
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
sparse_id_values = math_ops.mod(columns_to_tensors[self.name].values,
self.bucket_size)
columns_to_tensors[self] = ops.SparseTensor(
columns_to_tensors[self.name].indices, sparse_id_values,
columns_to_tensors[self.name].shape)
def sparse_column_with_integerized_feature(column_name,
bucket_size,
combiner="sum",
dtype=dtypes.int64):
"""Creates an integerized _SparseColumn.
Use this when your features are already pre-integerized into int64 IDs.
output_id = input_feature
Args:
column_name: A string defining sparse column name.
bucket_size: An int that is > 1. The number of buckets. It should be bigger
than maximum feature. In other words features in this column should be an
int64 in range [0, bucket_size)
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features. It should be an integer type. Default value is
dtypes.int64.
Returns:
An integerized _SparseColumn definition.
Raises:
ValueError: bucket_size is not greater than 1.
ValueError: dtype is not integer.
"""
return _SparseColumnIntegerized(column_name,
bucket_size,
combiner=combiner,
dtype=dtype)
class _SparseColumnHashed(_SparseColumn):
"""See `sparse_column_with_hash_bucket`."""
def __new__(cls, column_name, hash_bucket_size, combiner="sum"):
return super(_SparseColumnHashed, cls).__new__(cls,
column_name,
bucket_size=hash_bucket_size,
combiner=combiner,
dtype=dtypes.string)
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
sparse_id_values = string_ops.string_to_hash_bucket_fast(
columns_to_tensors[self.name].values,
self.bucket_size,
name=self.name + "_lookup")
columns_to_tensors[self] = ops.SparseTensor(
columns_to_tensors[self.name].indices, sparse_id_values,
columns_to_tensors[self.name].shape)
def sparse_column_with_hash_bucket(column_name,
hash_bucket_size,
combiner="sum"):
"""Creates a _SparseColumn with hashed bucket configuration.
Use this when your sparse features are in string format, but you don't have a
vocab file that maps each string to an integer ID.
output_id = Hash(input_feature_string) % bucket_size
Args:
column_name: A string defining sparse column name.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
Returns:
A _SparseColumn with hashed bucket configuration
Raises:
ValueError: hash_bucket_size is not greater than 2.
"""
return _SparseColumnHashed(column_name, hash_bucket_size, combiner)
class _SparseColumnKeys(_SparseColumn):
"""See `sparse_column_with_keys`."""
def __new__(cls,
column_name,
keys,
default_value=-1,
combiner="sum"):
return super(_SparseColumnKeys, cls).__new__(
cls,
column_name,
combiner=combiner,
lookup_config=_SparseIdLookupConfig(keys=keys,
vocab_size=len(keys),
default_value=default_value),
dtype=dtypes.string)
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
columns_to_tensors[self] = contrib_lookup_ops.string_to_index(
tensor=columns_to_tensors[self.name],
mapping=list(self.lookup_config.keys),
default_value=self.lookup_config.default_value,
name=self.name + "_lookup")
def sparse_column_with_keys(column_name,
keys,
default_value=-1,
combiner="sum"):
"""Creates a _SparseColumn with keys.
Look up logic is as follows:
lookup_id = index_of_feature_in_keys if feature in keys else default_value
Args:
column_name: A string defining sparse column name.
keys: a string list defining vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Default is -1.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
Returns:
A _SparseColumnKeys with keys configuration.
"""
return _SparseColumnKeys(column_name,
tuple(keys),
default_value=default_value,
combiner=combiner)
class _EmbeddingColumn(_FeatureColumn, collections.namedtuple(
"_EmbeddingColumn",
["sparse_id_column", "dimension", "combiner", "initializer"])):
"""Represents an embedding column.
Args:
sparse_id_column: A _SparseColumn which is created by `sparse_column_with_*`
functions.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
of this can be thought as example level normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_column.length).
"""
def __new__(cls,
sparse_id_column,
dimension,
combiner="mean",
initializer=None):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified.")
if initializer is None:
stddev = 1 / math.sqrt(sparse_id_column.length)
# TODO(b/25671353): Better initial value?
initializer = init_ops.truncated_normal_initializer(mean=0.0,
stddev=stddev)
return super(_EmbeddingColumn, cls).__new__(cls, sparse_id_column,
dimension, combiner,
initializer)
@property
def name(self):
return self.sparse_id_column.name + "_embedding"
@property
def length(self):
"""Returns id size."""
return self.sparse_id_column.length
@property
def config(self):
return _get_feature_config(self.sparse_id_column)
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
fields_values = []
# pylint: disable=protected-access
for k, v in self._asdict().items():
if k == "initializer":
# Excludes initializer from the key since we don't support allowing
# users to specify different initializers for the same embedding column.
# Special treatment is needed since the default str form of a
# function contains its address, which could introduce non-determinism
# in sorting.
continue
fields_values.append("{}={}".format(k, v))
# pylint: enable=protected-access
# This is effectively the same format as str(self), except with our special
# treatment.
return "%s(%s)" % (type(self).__name__, ", ".join(fields_values))
def insert_transformed_feature(self, columns_to_tensors):
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column]
def to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True):
output, _ = _create_embedding_lookup(
input_tensor=input_tensor,
vocab_size=self.length,
dimension=self.dimension,
weight_collections=_add_variable_collection(weight_collections),
initializer=self.initializer,
combiner=self.combiner,
trainable=trainable,
name=self.name + "_weights")
return output
# pylint: disable=unused-argument
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
raise ValueError("Column {} is not supported in linear models. "
"Please use sparse_column.".format(self))
def embedding_column(sparse_id_column,
dimension,
combiner="mean",
initializer=None):
"""Creates an _EmbeddingColumn.
Args:
sparse_id_column: A _SparseColumn which is created by `sparse_column_with_*`
functions. Note that `combiner` defined in `sparse_id_column` is ignored.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
of this can be thought as example level normalizations on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_column.length).
Returns:
An _EmbeddingColumn.
"""
return _EmbeddingColumn(sparse_id_column, dimension, combiner, initializer)
class _HashedEmbeddingColumn(collections.namedtuple(
"_HashedEmbeddingColumn", ["column_name", "size", "dimension", "combiner",
"initializer"]), _EmbeddingColumn):
"""See `hashed_embedding_column`."""
def __new__(cls,
column_name,
size,
dimension,
combiner="mean",
initializer=None):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified.")
if initializer is None:
stddev = 0.1
# TODO(b/25671353): Better initial value?
initializer = init_ops.truncated_normal_initializer(mean=0.0,
stddev=stddev)
return super(_HashedEmbeddingColumn, cls).__new__(cls, column_name, size,
dimension, combiner,
initializer)
@property
def name(self):
return self.column_name + "_embedding"
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(dtypes.string)}
def insert_transformed_feature(self, columns_to_tensors):
columns_to_tensors[self] = columns_to_tensors[self.column_name]
def to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True):
# Same heuristic for the number of shards as _max_size_embedding_partitioner
max_shard_bytes = (64 << 20) - 1
shards = self.size * 4.0 / max_shard_bytes
shards = max(1, int(math.ceil(shards)))
embeddings = partitioned_variables.create_partitioned_variables(
shape=[self.size],
slicing=[shards],
initializer=self.initializer,
dtype=dtypes.float32,
collections=_add_variable_collection(weight_collections),
name=self.name + "_weights",
reuse=False,
trainable=trainable)
return embedding_ops.hashed_embedding_lookup_sparse(
embeddings, input_tensor, self.dimension, name=self.name + "_lookup")
def hashed_embedding_column(column_name,
size,
dimension,
combiner="mean",
initializer=None):
"""Creates an embedding column of a sparse feature using parameter hashing.
The i-th embedding component of a value v is found by retrieving an
embedding weight whose index is a fingerprint of the pair (v,i).
Args:
column_name: A string defining sparse column name.
size: An integer specifying the number of parameters in the embedding layer.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
of this can be thought as example level normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0 and standard deviation 0.1.
Returns:
A _HashedEmbeddingColumn.
Raises:
ValueError: if dimension or size is not a positive integer; or if combiner
is not supported.
"""
if (dimension < 1) or (size < 1):
raise ValueError("Dimension and size must be greater than 0.")
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'.")
return _HashedEmbeddingColumn(column_name, size, dimension, combiner,
initializer)
class _RealValuedColumn(_FeatureColumn, collections.namedtuple(
"_RealValuedColumn",
["column_name", "dimension", "default_value", "dtype"])):
"""Represents a real valued feature column also known as continuous features.
Instances of this class are immutable. A real valued column means features are
dense. It means dictionary returned by InputBuilder contains a
("column_name", Tensor) pair. Tensor shape should be (batch_size, 1).
"""
def __new__(cls, column_name, dimension, default_value, dtype):
if default_value is not None:
default_value = tuple(default_value)
return super(_RealValuedColumn, cls).__new__(cls, column_name, dimension,
default_value, dtype)
@property
def name(self):
return self.column_name
@property
def config(self):
default_value = self.default_value
if default_value is not None:
default_value = list(default_value)
return {self.column_name: parsing_ops.FixedLenFeature(
[self.dimension], self.dtype, default_value)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
# No transformation is needed for _RealValuedColumn except reshaping.
input_tensor = columns_to_tensors[self.name]
batch_size = input_tensor.get_shape().as_list()[0]
batch_size = int(batch_size) if batch_size else -1
flattened_shape = [batch_size, self.dimension]
columns_to_tensors[self] = array_ops.reshape(
math_ops.to_float(input_tensor), flattened_shape)
# pylint: disable=unused-argument
def to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True):
return input_tensor
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
"""Returns a Tensor as linear predictions and a list of created Variable."""
def _weight(name):
return variable_scope.get_variable(
name,
shape=[self.dimension, num_outputs],
initializer=array_ops.zeros_initializer,
collections=_add_variable_collection(weight_collections))
if self.name:
with variable_scope.variable_op_scope([input_tensor], None, self.name):
weight = _weight("weight")
else:
# Old behavior to support a subset of old checkpoints.
weight = _weight("_weight")
# The _RealValuedColumn has the shape of [batch_size, column.dimension].
log_odds_by_dim = math_ops.matmul(input_tensor, weight)
return log_odds_by_dim, [weight]
def real_valued_column(column_name,
dimension=1,
default_value=None,
dtype=dtypes.float32):
"""Creates a _RealValuedColumn.
Args:
column_name: A string defining real valued column name.
dimension: An integer specifying dimension of the real valued column.
The default is 1. The Tensor representing the _RealValuedColumn
will have the shape of [batch_size, dimension].
default_value: A single value compatible with dtype or a list of values
compatible with dtype which the column takes on if data is missing. If
None, then tf.parse_example will fail if an example does not contain
this column. If a single value is provided, the same value will be
applied as the default value for every dimension. If a list of values
is provided, the length of the list should be equal to the value of
`dimension`.
dtype: defines the type of values. Default value is tf.float32.
Returns:
A _RealValuedColumn.
Raises:
TypeError: if dimension is not an int
ValueError: if dimension is not a positive integer
TypeError: if default_value is a list but its length is not equal to the
value of `dimension`.
TypeError: if default_value is not compatible with dtype.
ValueError: if dtype is not convertable to tf.float32.
"""
if not isinstance(dimension, int):
raise TypeError("dimension must be an integer")
if dimension < 1:
raise ValueError("dimension must be greater than 0")
if not (dtype.is_integer or dtype.is_floating):
raise ValueError("dtype is not convertible to tf.float32. Given {}".format(
dtype))
if default_value is None:
return _RealValuedColumn(column_name, dimension, default_value, dtype)
if isinstance(default_value, int):
if dtype.is_integer:
default_value = [default_value for _ in range(dimension)]
return _RealValuedColumn(column_name, dimension, default_value, dtype)
if dtype.is_floating:
default_value = float(default_value)
default_value = [default_value for _ in range(dimension)]
return _RealValuedColumn(column_name, dimension, default_value, dtype)
if isinstance(default_value, float):
if dtype.is_floating and (not dtype.is_integer):
default_value = [default_value for _ in range(dimension)]
return _RealValuedColumn(column_name, dimension, default_value, dtype)
if isinstance(default_value, list):
if len(default_value) != dimension:
raise ValueError("The length of default_value is not equal to the "
"value of dimension. default_value is {}.".format(
default_value))
# Check if the values in the list are all integers or are convertible to
# floats.
is_list_all_int = True
is_list_all_float = True
for v in default_value:
if not isinstance(v, int):
is_list_all_int = False
if not (isinstance(v, float) or isinstance(v, int)):
is_list_all_float = False
if is_list_all_int:
if dtype.is_integer:
return _RealValuedColumn(column_name, dimension, default_value, dtype)
elif dtype.is_floating:
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype)
if is_list_all_float:
if dtype.is_floating and (not dtype.is_integer):
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype)
raise TypeError("default_value is not compatible with dtype. "
"default_value is {}.".format(default_value))
class _BucketizedColumn(_FeatureColumn, collections.namedtuple(
"_BucketizedColumn", ["source_column", "boundaries"])):
"""Represents a bucketization transformation also known as binning.
Instances of this class are immutable. Values in `source_column` will be
bucketized based on `boundaries`.
For example, if the inputs are:
boundaries = [0, 10, 100]
source_column = [[-5], [150], [10], [0], [4], [19]]
then the bucketized feature will be:
output = [[0], [3], [2], [1], [1], [2]]
Attributes:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list of floats specifying the boundaries. It has to be sorted.
[a, b, c] defines following buckets: (-inf., a), [a, b), [b, c), [c, inf.)
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
def __new__(cls, source_column, boundaries):
if not isinstance(source_column, _RealValuedColumn):
raise TypeError(
"source_column should be an instance of _RealValuedColumn.")
if not isinstance(boundaries, list) or not boundaries:
raise ValueError("boundaries must be a list and it should not be empty.")
# We allow bucket boundaries to be monotonically increasing
# (ie a[i+1] >= a[i]). When two bucket boundaries are the same, we
# de-duplicate.
sanitized_boundaries = []
for i in range(len(boundaries) - 1):
if boundaries[i] == boundaries[i + 1]:
continue
elif boundaries[i] < boundaries[i + 1]:
sanitized_boundaries.append(boundaries[i])
else:
raise ValueError("boundaries must be a sorted list")
sanitized_boundaries.append(boundaries[len(boundaries) - 1])
return super(_BucketizedColumn, cls).__new__(cls, source_column,
tuple(sanitized_boundaries))
@property
def name(self):
return self.source_column.name + "_BUCKETIZED"
@property
def length(self):
"""Returns total number of buckets."""
return len(self.boundaries) + 1
@property
def config(self):
return self.source_column.config
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
# Bucketize the source column.
if self.source_column not in columns_to_tensors:
self.source_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = bucketization_op.bucketize(
columns_to_tensors[self.source_column],
boundaries=list(self.boundaries))
# pylint: disable=unused-argument
def to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True):
return array_ops.reshape(
array_ops.one_hot(
math_ops.to_int64(input_tensor), self.length, 1., 0.),
[-1, self.length * self.source_column.dimension])
def to_sparse_tensor(self, input_tensor):
"""Creates a SparseTensor from the bucketized Tensor."""
dimension = self.source_column.dimension
batch_size = array_ops.shape(input_tensor)[0]
if dimension > 1:
i1 = array_ops.reshape(array_ops.tile(array_ops.expand_dims(
math_ops.range(0, batch_size), 1), [1, dimension]), [-1])
i2 = array_ops.tile(math_ops.range(0, dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = array_ops.reshape(input_tensor, [-1]) + self.length * i2
else:
# Simpler indices when dimension=1
i1 = math_ops.range(0, batch_size)
i2 = array_ops.zeros([batch_size], dtype=dtypes.int32)
bucket_indices = array_ops.reshape(input_tensor, [-1])
indices = math_ops.to_int64(array_ops.transpose(array_ops.pack((i1, i2))))
shape = math_ops.to_int64(array_ops.pack([batch_size, dimension]))
sparse_id_values = ops.SparseTensor(indices, bucket_indices, shape)
return sparse_id_values
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
"""Returns a Tensor as linear predictions and a list of created Variable."""
return _create_embedding_lookup(
input_tensor=self.to_sparse_tensor(input_tensor),
vocab_size=self.length * self.source_column.dimension,
dimension=num_outputs,
weight_collections=_add_variable_collection(weight_collections),
initializer=init_ops.zeros_initializer,
combiner="sum",
trainable=trainable,
name=self.name + "_weights")
def bucketized_column(source_column, boundaries):
"""Creates a _BucketizedColumn.
Args:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list of floats specifying the boundaries. It has to be sorted.
Returns:
A _BucketizedColumn.
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
return _BucketizedColumn(source_column, boundaries)
class _CrossedColumn(_FeatureColumn, collections.namedtuple(
"_CrossedColumn", ["columns", "hash_bucket_size", "combiner"])):
""""Represents a cross transformation also known as composition or union.
Instances of this class are immutable. It crosses given `columns`. Crossed
column output will be hashed to hash_bucket_size.
Conceptually, transformation can be thought as:
Hash(cartesian product of features in columns) % `hash_bucket_size`
For example, if the columns are
SparseTensor referred by first column: shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
SparseTensor referred by second column: : shape = [2, 1]
[0, 0]: "d"
[1, 1]: "e"
then crossed feature will look like:
shape = [2, 2]
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
Attributes:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
of this can be thought as example level normalizations on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
Raises:
TypeError: if all items in columns are not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn or
hash_bucket_size is not an int.
ValueError: if hash_bucket_size is not > 1 or
len(columns) is not > 1.
"""
@staticmethod
def _is_crossable(column):
return isinstance(column,
(_SparseColumn, _CrossedColumn, _BucketizedColumn))
def __new__(cls, columns, hash_bucket_size, combiner="sum"):
for column in columns:
if not _CrossedColumn._is_crossable(column):
raise TypeError("columns should be a set of "
"_SparseColumn, _CrossedColumn, or _BucketizedColumn. "
"Column is {}".format(column))
if len(columns) < 2:
raise ValueError("columns should contain at least 2 elements.")
if not isinstance(hash_bucket_size, int):
raise TypeError("hash_bucket_size should be an int.")
if hash_bucket_size < 2:
raise ValueError("hash_bucket_size should be at least 2.")
sorted_columns = sorted([column for column in columns],
key=lambda column: column.name)
return super(_CrossedColumn, cls).__new__(cls, tuple(sorted_columns),
hash_bucket_size, combiner)
@property
def name(self):
sorted_names = sorted([column.name for column in self.columns])
return "_X_".join(sorted_names)
@property
def config(self):
config = {}
for column in self.columns:
config.update(_get_feature_config(column))
return config
@property
def length(self):
"""Returns total number of buckets."""
return self.hash_bucket_size
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
"""Handles cross transformation."""
def _collect_leaf_level_columns(cross):
"""Collects base columns contained in the cross."""
leaf_level_columns = []
for c in cross.columns:
if isinstance(c, _CrossedColumn):
leaf_level_columns.extend(_collect_leaf_level_columns(c))
else:
leaf_level_columns.append(c)
return leaf_level_columns
feature_tensors = []
for c in _collect_leaf_level_columns(self):
if isinstance(c, _SparseColumn):
feature_tensors.append(columns_to_tensors[c.name])
else:
if c not in columns_to_tensors:
c.insert_transformed_feature(columns_to_tensors)
if isinstance(c, _BucketizedColumn):
feature_tensors.append(c.to_sparse_tensor(columns_to_tensors[c]))
else:
feature_tensors.append(columns_to_tensors[c])
columns_to_tensors[self] = sparse_feature_cross_op.sparse_feature_cross(
feature_tensors,
hashed_output=True,
num_buckets=self.hash_bucket_size)
# pylint: disable=unused-argument
def to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True):
raise ValueError("Column {} is not supported in DNN. "
"Please use embedding_column.".format(self))
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
return _create_embedding_lookup(
input_tensor=input_tensor,
vocab_size=self.length,
dimension=num_outputs,
weight_collections=_add_variable_collection(weight_collections),
initializer=init_ops.zeros_initializer,
combiner=self.combiner,
trainable=trainable,
name=self.name + "_weights")
def crossed_column(columns, hash_bucket_size, combiner="sum"):
"""Creates a _CrossedColumn.
Args:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A combiner string, supports sum, mean, sqrtn.
Returns:
A _CrossedColumn.
Raises:
TypeError: if any item in columns is not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn, or
hash_bucket_size is not an int.
ValueError: if hash_bucket_size is not > 1 or
len(columns) is not > 1.
"""
return _CrossedColumn(columns, hash_bucket_size, combiner=combiner)
def _get_feature_config(feature_column):
"""Returns configuration for the base feature defined in feature_column."""
if not isinstance(feature_column, _FeatureColumn):
raise TypeError(
"feature_columns should only contain instances of _FeatureColumn. "
"Given column is {}".format(feature_column))
if isinstance(feature_column, (_SparseColumn, _EmbeddingColumn,
_RealValuedColumn, _BucketizedColumn,
_CrossedColumn)):
return feature_column.config
raise TypeError("Not supported _FeatureColumn type. "
"Given column is {}".format(feature_column))
def create_feature_spec_for_parsing(feature_columns):
"""Helper that prepares features config from input feature_columns.
The returned feature config can be used as arg 'features' in tf.parse_example.
Typical usage example:
```python
# Define features and transformations
country = sparse_column_with_vocabulary_file("country", VOCAB_FILE)
age = real_valued_column("age")
click_bucket = bucketized_column(real_valued_column("historical_click_ratio"),
boundaries=[i/10. for i in range(10)])
country_x_click = crossed_column([country, click_bucket], 10)
feature_columns = set([age, click_bucket, country_x_click])
batch_examples = tf.parse_example(
serialized_examples,
create_feature_spec_for_parsing(feature_columns))
```
For the above example, create_feature_spec_for_parsing would return the dict:
{"age": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"historical_click_ratio": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"country": parsing_ops.VarLenFeature(tf.string)}
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
Returns:
A dict mapping feature keys to FixedLenFeature or VarLenFeature values.
"""
features_config = {}
for column in feature_columns:
features_config.update(_get_feature_config(column))
return features_config
def make_place_holder_tensors_for_base_features(feature_columns):
"""Returns placeholder tensors for inference.
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
Returns:
A dict mapping feature keys to SparseTensors (sparse columns) or
placeholder Tensors (dense columns).
"""
# Get dict mapping features to FixedLenFeature or VarLenFeature values.
dict_for_parse_example = create_feature_spec_for_parsing(feature_columns)
placeholders = {}
for column_name, column_type in dict_for_parse_example.items():
if isinstance(column_type, parsing_ops.VarLenFeature):
# Sparse placeholder for sparse tensors.
placeholders[column_name] = array_ops.sparse_placeholder(
column_type.dtype,
name="Placeholder_" + column_name)
else:
# Simple placeholder for dense tensors.
placeholders[column_name] = array_ops.placeholder(
column_type.dtype,
shape=(None, column_type.shape[0]),
name="Placeholder_" + column_name)
return placeholders
class _SparseIdLookupConfig(collections.namedtuple("_SparseIdLookupConfig",
["vocabulary_file", "keys",
"num_oov_buckets",
"vocab_size",
"default_value"])):
"""Defines lookup configuration for a sparse feature.
An immutable object defines lookup table configuration used by
tf.feature_to_id_v2.
Attributes:
vocabulary_file: The vocabulary filename. vocabulary_file cannot be combined
with keys.
keys: A 1-D string iterable that specifies the mapping of strings to
indices. It means a feature in keys will map to it's index in keys.
num_oov_buckets: The number of out-of-vocabulary buckets. If zero all out of
vocabulary features will be ignored.
vocab_size: Number of the elements in the vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
"""
def __new__(cls,
vocabulary_file=None,
keys=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1):
return super(_SparseIdLookupConfig, cls).__new__(cls, vocabulary_file, keys,
num_oov_buckets,
vocab_size, default_value)
def _add_variable_collection(weight_collections):
if weight_collections:
weight_collections = list(set(list(weight_collections) +
[ops.GraphKeys.VARIABLES]))
return weight_collections
def _max_size_embedding_partitioner(max_shard_bytes=(64 << 20) - 1):
"""Partitioner based on max size.
Args:
max_shard_bytes: max shard bytes.
Returns:
partitioner
"""
# max_shard_bytes defaults to ~64MB to keep below open sourced proto buffer
# size limit.
# TODO(zakaria): b/28274688 might cause low performance if there are too many
# partitions. Consider higher size, possily based on ps shards if the bug is
# not fixed.
# TODO(zakaria): Use a better heuristic based on vocab size and upper/lower
# bound. Partitioning only at over 16M vicab_size is suboptimal for most
# cases.
def partitioner(vocab_size, embed_dim):
total_size = 1.0 * vocab_size * embed_dim * 4 # 4 bytes for float32
shards = total_size / max_shard_bytes
shards = min(vocab_size, max(1, int(math.ceil(shards))))
return [shards, 1]
return partitioner
def _create_embedding_lookup(input_tensor, vocab_size, dimension,
weight_collections, initializer, combiner,
trainable, name):
"""Creates embedding variable and does a lookup.
Args:
input_tensor: A tensor which should contain sparse id to look up.
vocab_size: An integer specifying the vocabulary size.
dimension: An integer specifying the embedding vector dimension.
weight_collections: List of graph collections to which weights are added.
initializer: A variable initializer function to be used in embedding
variable initialization.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: A string specifying the name of the embedding variable.
Returns:
A Tensor with shape [batch_size, dimension] and embedding Variable.
Raises:
ValueError: If initializer is None or not callable.
"""
slicing = _max_size_embedding_partitioner()(vocab_size, dimension)
logging.info("Slicing=%s for name=%s, vocab_size=%d, embed_dim=%d",
str(slicing), name, vocab_size, dimension)
if not initializer:
raise ValueError("initializer must be defined.")
if not callable(initializer):
raise ValueError("initializer must be callable.")
embeddings = partitioned_variables.create_partitioned_variables(
shape=[vocab_size, dimension],
slicing=slicing,
initializer=initializer,
dtype=dtypes.float32,
collections=weight_collections,
name=name,
reuse=False,
trainable=trainable)
return embedding_ops.safe_embedding_lookup_sparse(
embeddings,
input_tensor,
default_id=0,
combiner=combiner,
name=name), embeddings
| 39.414126 | 80 | 0.664982 |
9f14f84fd6ba2ae57c89bbec0c01047a04b774f3 | 574 | py | Python | scripts/india_nhm/districts/__init__.py | SenthamizhanV/data | 9ba7b94ba4163a7dc2a7c31ba4ac7a9e69908ee0 | [
"Apache-2.0"
] | 23 | 2017-04-12T13:45:34.000Z | 2020-04-30T22:41:37.000Z | scripts/india_nhm/districts/__init__.py | SenthamizhanV/data | 9ba7b94ba4163a7dc2a7c31ba4ac7a9e69908ee0 | [
"Apache-2.0"
] | 1 | 2021-01-11T18:19:37.000Z | 2021-01-11T19:27:25.000Z | scripts/india_nhm/districts/__init__.py | SenthamizhanV/data | 9ba7b94ba4163a7dc2a7c31ba4ac7a9e69908ee0 | [
"Apache-2.0"
] | 7 | 2020-11-21T20:45:06.000Z | 2021-05-28T12:29:52.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | 44.153846 | 74 | 0.764808 |
0c55b6cf996bbba1aac370287cdc09446420c4a4 | 1,037 | py | Python | app/user_content.py | mpalaiokostas/dash-app-template | f149f9968f2d6fdbf65f8842232fb04423bbe3a4 | [
"MIT"
] | null | null | null | app/user_content.py | mpalaiokostas/dash-app-template | f149f9968f2d6fdbf65f8842232fb04423bbe3a4 | [
"MIT"
] | null | null | null | app/user_content.py | mpalaiokostas/dash-app-template | f149f9968f2d6fdbf65f8842232fb04423bbe3a4 | [
"MIT"
] | null | null | null | from app.view_components import Tab
from app.pages import page_about, page_example_static, page_example_dynamic
class UserContent:
def get_user_settings(self):
return {
"app_title": "example title",
"app_subtitle": "example subtitle",
"contact_email": "dev@dev.com",
}
def load_user_tabs(self):
return [
Tab(
title="Static page",
id="tab-static-page",
html_code=page_example_static.get_code(),
callbacks=page_example_static.get_callbacks,
),
Tab(
title="Dynamic page",
id="tab-dynamic-page",
html_code=page_example_dynamic.get_code(),
callbacks=page_example_dynamic.get_callbacks,
),
Tab(
title="About",
id="tab-about",
html_code=page_about.get_code(),
callbacks=page_about.get_callbacks,
),
]
| 30.5 | 75 | 0.527483 |
2e7f8a4aaae9a74fedaf20a9e461503952cb5c20 | 1,612 | py | Python | djangoxpay/models.py | ajharry69/django-xpay | ebfe09ffa615293dbea6b6d8e1ffd65f8d283d15 | [
"Apache-2.0"
] | null | null | null | djangoxpay/models.py | ajharry69/django-xpay | ebfe09ffa615293dbea6b6d8e1ffd65f8d283d15 | [
"Apache-2.0"
] | 5 | 2021-03-30T14:04:24.000Z | 2021-09-22T19:21:25.000Z | djangoxpay/models.py | ajharry69/django-xpay | ebfe09ffa615293dbea6b6d8e1ffd65f8d283d15 | [
"Apache-2.0"
] | 1 | 2020-11-30T04:46:48.000Z | 2020-11-30T04:46:48.000Z | import json
class BaseModel:
def __str__(self):
return json.dumps(self.json)
def __repr__(self):
return self.__str__()
@property
def json(self):
return self.__dict__
class Money(BaseModel):
DEFAULT_CURRENCY = 'usd'
CURRENCIES = list(map(lambda c: c.upper(), [
DEFAULT_CURRENCY, 'aed', 'afn', 'all', 'amd', 'ang', 'aoa', 'ars', 'aud', 'awg', 'azn', 'bam', 'bbd', 'bdt',
'bgn', 'bif', 'bmd', 'bnd', 'bob', 'brl', 'bsd', 'bwp', 'bzd', 'cad', 'cdf', 'chf', 'clp', 'cny', 'cop', 'crc',
'cve', 'czk', 'djf', 'dkk', 'dop', 'dzd', 'egp', 'etb', 'eur', 'fjd', 'fkp', 'gbp', 'gel', 'gip', 'gmd', 'gnf',
'gtq', 'gyd', 'hkd', 'hnl', 'hrk', 'htg', 'huf', 'idr', 'ils', 'inr', 'isk', 'jmd', 'jpy', 'kes', 'kgs', 'khr',
'kmf', 'krw', 'kyd', 'kzt', 'lak', 'lbp', 'lkr', 'lrd', 'lsl', 'mad', 'mdl', 'mga', 'mkd', 'mmk', 'mnt', 'mop',
'mro', 'mur', 'mvr', 'mwk', 'mxn', 'myr', 'mzn', 'nad', 'ngn', 'nio', 'nok', 'npr', 'nzd', 'pab', 'pen', 'pgk',
'php', 'pkr', 'pln', 'pyg', 'qar', 'ron', 'rsd', 'rub', 'rwf', 'sar', 'sbd', 'scr', 'sek', 'sgd', 'shp', 'sll',
'sos', 'srd', 'std', 'szl', 'thb', 'tjs', 'top', 'try', 'ttd', 'twd', 'tzs', 'uah', 'ugx', 'uyu', 'uzs', 'vnd',
'vuv', 'wst', 'xaf', 'xcd', 'xof', 'xpf', 'yer', 'zar', 'zmw', 'eek', 'lvl', 'svc', 'vef', 'ltl',
], ))
def __init__(self, amount, currency: str = 'USD', lowest_value: bool = False):
self.amount = round(float(amount) * (1 if lowest_value else 100)) # x 100 converts amount to cents
self.currency = currency
| 48.848485 | 119 | 0.483871 |
cdf99fd140140f1ab17dba2d46f39753a325c761 | 23,689 | py | Python | synapse/event_auth.py | LouisMT/synapse | 1241156c82644d5609f45659607a356af5d8fe08 | [
"Apache-2.0"
] | null | null | null | synapse/event_auth.py | LouisMT/synapse | 1241156c82644d5609f45659607a356af5d8fe08 | [
"Apache-2.0"
] | null | null | null | synapse/event_auth.py | LouisMT/synapse | 1241156c82644d5609f45659607a356af5d8fe08 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from canonicaljson import encode_canonical_json
from signedjson.key import decode_verify_key_bytes
from signedjson.sign import verify_signed_json, SignatureVerifyException
from unpaddedbase64 import decode_base64
from synapse.api.constants import EventTypes, Membership, JoinRules
from synapse.api.errors import AuthError, SynapseError, EventSizeError
from synapse.types import UserID, get_domain_from_id
logger = logging.getLogger(__name__)
def check(event, auth_events, do_sig_check=True, do_size_check=True):
""" Checks if this event is correctly authed.
Args:
event: the event being checked.
auth_events (dict: event-key -> event): the existing room state.
Raises:
AuthError if the checks fail
Returns:
if the auth checks pass.
"""
if do_size_check:
_check_size_limits(event)
if not hasattr(event, "room_id"):
raise AuthError(500, "Event has no room_id: %s" % event)
if do_sig_check:
sender_domain = get_domain_from_id(event.sender)
event_id_domain = get_domain_from_id(event.event_id)
is_invite_via_3pid = (
event.type == EventTypes.Member
and event.membership == Membership.INVITE
and "third_party_invite" in event.content
)
# Check the sender's domain has signed the event
if not event.signatures.get(sender_domain):
# We allow invites via 3pid to have a sender from a different
# HS, as the sender must match the sender of the original
# 3pid invite. This is checked further down with the
# other dedicated membership checks.
if not is_invite_via_3pid:
raise AuthError(403, "Event not signed by sender's server")
# Check the event_id's domain has signed the event
if not event.signatures.get(event_id_domain):
raise AuthError(403, "Event not signed by sending server")
if auth_events is None:
# Oh, we don't know what the state of the room was, so we
# are trusting that this is allowed (at least for now)
logger.warn("Trusting event: %s", event.event_id)
return
if event.type == EventTypes.Create:
sender_domain = get_domain_from_id(event.sender)
room_id_domain = get_domain_from_id(event.room_id)
if room_id_domain != sender_domain:
raise AuthError(
403,
"Creation event's room_id domain does not match sender's"
)
# FIXME
logger.debug("Allowing! %s", event)
return
creation_event = auth_events.get((EventTypes.Create, ""), None)
if not creation_event:
raise SynapseError(
403,
"Room %r does not exist" % (event.room_id,)
)
creating_domain = get_domain_from_id(event.room_id)
originating_domain = get_domain_from_id(event.sender)
if creating_domain != originating_domain:
if not _can_federate(event, auth_events):
raise AuthError(
403,
"This room has been marked as unfederatable."
)
# FIXME: Temp hack
if event.type == EventTypes.Aliases:
if not event.is_state():
raise AuthError(
403,
"Alias event must be a state event",
)
if not event.state_key:
raise AuthError(
403,
"Alias event must have non-empty state_key"
)
sender_domain = get_domain_from_id(event.sender)
if event.state_key != sender_domain:
raise AuthError(
403,
"Alias event's state_key does not match sender's domain"
)
logger.debug("Allowing! %s", event)
return
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"Auth events: %s",
[a.event_id for a in auth_events.values()]
)
if event.type == EventTypes.Member:
_is_membership_change_allowed(event, auth_events)
logger.debug("Allowing! %s", event)
return
_check_event_sender_in_room(event, auth_events)
# Special case to allow m.room.third_party_invite events wherever
# a user is allowed to issue invites. Fixes
# https://github.com/vector-im/vector-web/issues/1208 hopefully
if event.type == EventTypes.ThirdPartyInvite:
user_level = get_user_power_level(event.user_id, auth_events)
invite_level = _get_named_level(auth_events, "invite", 0)
if user_level < invite_level:
raise AuthError(
403, (
"You cannot issue a third party invite for %s." %
(event.content.display_name,)
)
)
else:
logger.debug("Allowing! %s", event)
return
_can_send_event(event, auth_events)
if event.type == EventTypes.PowerLevels:
_check_power_levels(event, auth_events)
if event.type == EventTypes.Redaction:
check_redaction(event, auth_events)
logger.debug("Allowing! %s", event)
def _check_size_limits(event):
def too_big(field):
raise EventSizeError("%s too large" % (field,))
if len(event.user_id) > 255:
too_big("user_id")
if len(event.room_id) > 255:
too_big("room_id")
if event.is_state() and len(event.state_key) > 255:
too_big("state_key")
if len(event.type) > 255:
too_big("type")
if len(event.event_id) > 255:
too_big("event_id")
if len(encode_canonical_json(event.get_pdu_json())) > 65536:
too_big("event")
def _can_federate(event, auth_events):
creation_event = auth_events.get((EventTypes.Create, ""))
return creation_event.content.get("m.federate", True) is True
def _is_membership_change_allowed(event, auth_events):
membership = event.content["membership"]
# Check if this is the room creator joining:
if len(event.prev_events) == 1 and Membership.JOIN == membership:
# Get room creation event:
key = (EventTypes.Create, "", )
create = auth_events.get(key)
if create and event.prev_events[0][0] == create.event_id:
if create.content["creator"] == event.state_key:
return
target_user_id = event.state_key
creating_domain = get_domain_from_id(event.room_id)
target_domain = get_domain_from_id(target_user_id)
if creating_domain != target_domain:
if not _can_federate(event, auth_events):
raise AuthError(
403,
"This room has been marked as unfederatable."
)
# get info about the caller
key = (EventTypes.Member, event.user_id, )
caller = auth_events.get(key)
caller_in_room = caller and caller.membership == Membership.JOIN
caller_invited = caller and caller.membership == Membership.INVITE
# get info about the target
key = (EventTypes.Member, target_user_id, )
target = auth_events.get(key)
target_in_room = target and target.membership == Membership.JOIN
target_banned = target and target.membership == Membership.BAN
key = (EventTypes.JoinRules, "", )
join_rule_event = auth_events.get(key)
if join_rule_event:
join_rule = join_rule_event.content.get(
"join_rule", JoinRules.INVITE
)
else:
join_rule = JoinRules.INVITE
user_level = get_user_power_level(event.user_id, auth_events)
target_level = get_user_power_level(
target_user_id, auth_events
)
# FIXME (erikj): What should we do here as the default?
ban_level = _get_named_level(auth_events, "ban", 50)
logger.debug(
"_is_membership_change_allowed: %s",
{
"caller_in_room": caller_in_room,
"caller_invited": caller_invited,
"target_banned": target_banned,
"target_in_room": target_in_room,
"membership": membership,
"join_rule": join_rule,
"target_user_id": target_user_id,
"event.user_id": event.user_id,
}
)
if Membership.INVITE == membership and "third_party_invite" in event.content:
if not _verify_third_party_invite(event, auth_events):
raise AuthError(403, "You are not invited to this room.")
if target_banned:
raise AuthError(
403, "%s is banned from the room" % (target_user_id,)
)
return
if Membership.JOIN != membership:
if (caller_invited
and Membership.LEAVE == membership
and target_user_id == event.user_id):
return
if not caller_in_room: # caller isn't joined
raise AuthError(
403,
"%s not in room %s." % (event.user_id, event.room_id,)
)
if Membership.INVITE == membership:
# TODO (erikj): We should probably handle this more intelligently
# PRIVATE join rules.
# Invites are valid iff caller is in the room and target isn't.
if target_banned:
raise AuthError(
403, "%s is banned from the room" % (target_user_id,)
)
elif target_in_room: # the target is already in the room.
raise AuthError(403, "%s is already in the room." %
target_user_id)
else:
invite_level = _get_named_level(auth_events, "invite", 0)
if user_level < invite_level:
raise AuthError(
403, "You cannot invite user %s." % target_user_id
)
elif Membership.JOIN == membership:
# Joins are valid iff caller == target and they were:
# invited: They are accepting the invitation
# joined: It's a NOOP
if event.user_id != target_user_id:
raise AuthError(403, "Cannot force another user to join.")
elif target_banned:
raise AuthError(403, "You are banned from this room")
elif join_rule == JoinRules.PUBLIC:
pass
elif join_rule == JoinRules.INVITE:
if not caller_in_room and not caller_invited:
raise AuthError(403, "You are not invited to this room.")
else:
# TODO (erikj): may_join list
# TODO (erikj): private rooms
raise AuthError(403, "You are not allowed to join this room")
elif Membership.LEAVE == membership:
# TODO (erikj): Implement kicks.
if target_banned and user_level < ban_level:
raise AuthError(
403, "You cannot unban user %s." % (target_user_id,)
)
elif target_user_id != event.user_id:
kick_level = _get_named_level(auth_events, "kick", 50)
if user_level < kick_level or user_level <= target_level:
raise AuthError(
403, "You cannot kick user %s." % target_user_id
)
elif Membership.BAN == membership:
if user_level < ban_level or user_level <= target_level:
raise AuthError(403, "You don't have permission to ban")
else:
raise AuthError(500, "Unknown membership %s" % membership)
def _check_event_sender_in_room(event, auth_events):
key = (EventTypes.Member, event.user_id, )
member_event = auth_events.get(key)
return _check_joined_room(
member_event,
event.user_id,
event.room_id
)
def _check_joined_room(member, user_id, room_id):
if not member or member.membership != Membership.JOIN:
raise AuthError(403, "User %s not in room %s (%s)" % (
user_id, room_id, repr(member)
))
def get_send_level(etype, state_key, power_levels_event):
"""Get the power level required to send an event of a given type
The federation spec [1] refers to this as "Required Power Level".
https://matrix.org/docs/spec/server_server/unstable.html#definitions
Args:
etype (str): type of event
state_key (str|None): state_key of state event, or None if it is not
a state event.
power_levels_event (synapse.events.EventBase|None): power levels event
in force at this point in the room
Returns:
int: power level required to send this event.
"""
if power_levels_event:
power_levels_content = power_levels_event.content
else:
power_levels_content = {}
# see if we have a custom level for this event type
send_level = power_levels_content.get("events", {}).get(etype)
# otherwise, fall back to the state_default/events_default.
if send_level is None:
if state_key is not None:
send_level = power_levels_content.get("state_default", 50)
else:
send_level = power_levels_content.get("events_default", 0)
return int(send_level)
def _can_send_event(event, auth_events):
power_levels_event = _get_power_level_event(auth_events)
send_level = get_send_level(
event.type, event.get("state_key"), power_levels_event,
)
user_level = get_user_power_level(event.user_id, auth_events)
if user_level < send_level:
raise AuthError(
403,
"You don't have permission to post that to the room. " +
"user_level (%d) < send_level (%d)" % (user_level, send_level)
)
# Check state_key
if hasattr(event, "state_key"):
if event.state_key.startswith("@"):
if event.state_key != event.user_id:
raise AuthError(
403,
"You are not allowed to set others state"
)
return True
def check_redaction(event, auth_events):
"""Check whether the event sender is allowed to redact the target event.
Returns:
True if the the sender is allowed to redact the target event if the
target event was created by them.
False if the sender is allowed to redact the target event with no
further checks.
Raises:
AuthError if the event sender is definitely not allowed to redact
the target event.
"""
user_level = get_user_power_level(event.user_id, auth_events)
redact_level = _get_named_level(auth_events, "redact", 50)
if user_level >= redact_level:
return False
redacter_domain = get_domain_from_id(event.event_id)
redactee_domain = get_domain_from_id(event.redacts)
if redacter_domain == redactee_domain:
return True
raise AuthError(
403,
"You don't have permission to redact events"
)
def _check_power_levels(event, auth_events):
user_list = event.content.get("users", {})
# Validate users
for k, v in user_list.items():
try:
UserID.from_string(k)
except Exception:
raise SynapseError(400, "Not a valid user_id: %s" % (k,))
try:
int(v)
except Exception:
raise SynapseError(400, "Not a valid power level: %s" % (v,))
key = (event.type, event.state_key, )
current_state = auth_events.get(key)
if not current_state:
return
user_level = get_user_power_level(event.user_id, auth_events)
# Check other levels:
levels_to_check = [
("users_default", None),
("events_default", None),
("state_default", None),
("ban", None),
("redact", None),
("kick", None),
("invite", None),
]
old_list = current_state.content.get("users", {})
for user in set(list(old_list) + list(user_list)):
levels_to_check.append(
(user, "users")
)
old_list = current_state.content.get("events", {})
new_list = event.content.get("events", {})
for ev_id in set(list(old_list) + list(new_list)):
levels_to_check.append(
(ev_id, "events")
)
old_state = current_state.content
new_state = event.content
for level_to_check, dir in levels_to_check:
old_loc = old_state
new_loc = new_state
if dir:
old_loc = old_loc.get(dir, {})
new_loc = new_loc.get(dir, {})
if level_to_check in old_loc:
old_level = int(old_loc[level_to_check])
else:
old_level = None
if level_to_check in new_loc:
new_level = int(new_loc[level_to_check])
else:
new_level = None
if new_level is not None and old_level is not None:
if new_level == old_level:
continue
if dir == "users" and level_to_check != event.user_id:
if old_level == user_level:
raise AuthError(
403,
"You don't have permission to remove ops level equal "
"to your own"
)
# Check if the old and new levels are greater than the user level
# (if defined)
old_level_too_big = old_level is not None and old_level > user_level
new_level_too_big = new_level is not None and new_level > user_level
if old_level_too_big or new_level_too_big:
raise AuthError(
403,
"You don't have permission to add ops level greater "
"than your own"
)
def _get_power_level_event(auth_events):
return auth_events.get((EventTypes.PowerLevels, ""))
def get_user_power_level(user_id, auth_events):
"""Get a user's power level
Args:
user_id (str): user's id to look up in power_levels
auth_events (dict[(str, str), synapse.events.EventBase]):
state in force at this point in the room (or rather, a subset of
it including at least the create event and power levels event.
Returns:
int: the user's power level in this room.
"""
power_level_event = _get_power_level_event(auth_events)
if power_level_event:
level = power_level_event.content.get("users", {}).get(user_id)
if not level:
level = power_level_event.content.get("users_default", 0)
if level is None:
return 0
else:
return int(level)
else:
# if there is no power levels event, the creator gets 100 and everyone
# else gets 0.
# some things which call this don't pass the create event: hack around
# that.
key = (EventTypes.Create, "", )
create_event = auth_events.get(key)
if (create_event is not None and
create_event.content["creator"] == user_id):
return 100
else:
return 0
def _get_named_level(auth_events, name, default):
power_level_event = _get_power_level_event(auth_events)
if not power_level_event:
return default
level = power_level_event.content.get(name, None)
if level is not None:
return int(level)
else:
return default
def _verify_third_party_invite(event, auth_events):
"""
Validates that the invite event is authorized by a previous third-party invite.
Checks that the public key, and keyserver, match those in the third party invite,
and that the invite event has a signature issued using that public key.
Args:
event: The m.room.member join event being validated.
auth_events: All relevant previous context events which may be used
for authorization decisions.
Return:
True if the event fulfills the expectations of a previous third party
invite event.
"""
if "third_party_invite" not in event.content:
return False
if "signed" not in event.content["third_party_invite"]:
return False
signed = event.content["third_party_invite"]["signed"]
for key in {"mxid", "token"}:
if key not in signed:
return False
token = signed["token"]
invite_event = auth_events.get(
(EventTypes.ThirdPartyInvite, token,)
)
if not invite_event:
return False
if invite_event.sender != event.sender:
return False
if event.user_id != invite_event.user_id:
return False
if signed["mxid"] != event.state_key:
return False
if signed["token"] != token:
return False
for public_key_object in get_public_keys(invite_event):
public_key = public_key_object["public_key"]
try:
for server, signature_block in signed["signatures"].items():
for key_name, encoded_signature in signature_block.items():
if not key_name.startswith("ed25519:"):
continue
verify_key = decode_verify_key_bytes(
key_name,
decode_base64(public_key)
)
verify_signed_json(signed, server, verify_key)
# We got the public key from the invite, so we know that the
# correct server signed the signed bundle.
# The caller is responsible for checking that the signing
# server has not revoked that public key.
return True
except (KeyError, SignatureVerifyException,):
continue
return False
def get_public_keys(invite_event):
public_keys = []
if "public_key" in invite_event.content:
o = {
"public_key": invite_event.content["public_key"],
}
if "key_validity_url" in invite_event.content:
o["key_validity_url"] = invite_event.content["key_validity_url"]
public_keys.append(o)
public_keys.extend(invite_event.content.get("public_keys", []))
return public_keys
def auth_types_for_event(event):
"""Given an event, return a list of (EventType, StateKey) that may be
needed to auth the event. The returned list may be a superset of what
would actually be required depending on the full state of the room.
Used to limit the number of events to fetch from the database to
actually auth the event.
"""
if event.type == EventTypes.Create:
return []
auth_types = []
auth_types.append((EventTypes.PowerLevels, "", ))
auth_types.append((EventTypes.Member, event.user_id, ))
auth_types.append((EventTypes.Create, "", ))
if event.type == EventTypes.Member:
membership = event.content["membership"]
if membership in [Membership.JOIN, Membership.INVITE]:
auth_types.append((EventTypes.JoinRules, "", ))
auth_types.append((EventTypes.Member, event.state_key, ))
if membership == Membership.INVITE:
if "third_party_invite" in event.content:
key = (
EventTypes.ThirdPartyInvite,
event.content["third_party_invite"]["signed"]["token"]
)
auth_types.append(key)
return auth_types
| 33.506365 | 85 | 0.621428 |
3d909e30f9a6eb605e50a4baffed001cbb473cf3 | 6,813 | py | Python | bin/dynatrace_dcrum_cas.py | Dynatrace/DCRUM-Splunk-Application | ae6f5f766750bfc56d2c31d75256320341b50f35 | [
"BSD-3-Clause"
] | 2 | 2016-06-20T02:02:34.000Z | 2021-12-15T12:07:51.000Z | bin/dynatrace_dcrum_cas.py | Dynatrace/DCRUM-Splunk-Application | ae6f5f766750bfc56d2c31d75256320341b50f35 | [
"BSD-3-Clause"
] | null | null | null | bin/dynatrace_dcrum_cas.py | Dynatrace/DCRUM-Splunk-Application | ae6f5f766750bfc56d2c31d75256320341b50f35 | [
"BSD-3-Clause"
] | 2 | 2020-01-20T04:36:55.000Z | 2021-03-24T08:00:11.000Z | #!/usr/bin/env python
#
# Copyright 2013 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import logging
import logging.handlers
from dynatrace.dcrum import splunk
from splunklib.modularinput import *
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
def isgoodipv4(s):
pieces = s.split('.')
if len(pieces) != 4: return False
try: return all(0<=int(p)<256 for p in pieces)
except ValueError: return False
class MyScript(Script):
def get_scheme(self):
scheme = Scheme("Dynatrace DC-RUM CAS Input")
scheme.description = "Streams events from Dynatrace DC-RUM CAS's."
scheme.use_external_validation = True
scheme.use_single_instance = True
ip_argument = Argument("ip")
ip_argument.data_type = Argument.data_type_string
ip_argument.description = "CAS's IP address"
ip_argument.required_on_create = True
scheme.add_argument(ip_argument)
port_argument = Argument("port")
port_argument.data_type = Argument.data_type_number
port_argument.description = "CAS's TCP port number"
port_argument.required_on_create = True
scheme.add_argument(port_argument)
secure_argument = Argument("secure")
secure_argument.data_type = Argument.data_type_boolean
secure_argument.description = "Is connection over SSL (0|1)?"
secure_argument.required_on_create = True
scheme.add_argument(secure_argument)
login_argument = Argument("login")
login_argument.data_type = Argument.data_type_string
login_argument.description = "Login to CAS"
login_argument.required_on_create = False
scheme.add_argument(login_argument)
password_argument = Argument("password")
password_argument.data_type = Argument.data_type_string
password_argument.description = "And corresponding password"
password_argument.required_on_create = False
scheme.add_argument(password_argument)
defaultReport_argument = Argument("defaultReport")
defaultReport_argument.data_type = Argument.data_type_boolean
defaultReport_argument.description ="Enable default report?"
defaultReport_argument.required_on_create = True
scheme.add_argument(defaultReport_argument)
customReports_argument = Argument("customReports")
customReports_argument.data_type = Argument.data_type_boolean
customReports_argument.description ="Enable custom reports?"
customReports_argument.required_on_create = True
scheme.add_argument(customReports_argument)
custom_reports_list_argument = Argument("customReportsList")
custom_reports_list_argument.data_type = Argument.data_type_string
custom_reports_list_argument.description ="Enter report names (separated by comma)"
custom_reports_list_argument.required_on_create = False
scheme.add_argument(custom_reports_list_argument)
return scheme
def validate_input(self, validation_definition):
ip = validation_definition.parameters["ip"]
# if not isgoodipv4(ip): raise ValueError("%s is not valid IP address" % ip)
port = validation_definition.parameters["port"]
try:
if not 0<=int(port)<65000: raise ValueError("%s is not valid TCP port number" % port)
except ValueError: raise ValueError("%s is not valid TCP port number" % port)
secure = validation_definition.parameters["secure"]
try:
if not 0<=int(secure)<=1: raise ValueError("\"secure\" flag should be 0 or 1")
except ValueError: raise ValueError("\"secure\" flag should be 0 or 1 (%s)" % secure)
defaultReport = validation_definition.parameters["defaultReport"]
try:
if not 0<=int(defaultReport)<=1: raise ValueError("\"defaultReport\" flag should be 0 or 1")
except ValueError: raise ValueError("\"defaultReport\" flag should be 0 or 1 (%s)" % secure)
customReports = validation_definition.parameters["customReports"]
try:
if not 0<=int(customReports)<=1: raise ValueError("\"customReports\" flag should be 0 or 1")
except ValueError: raise ValueError("\"customReports\" flag should be 0 or 1 (%s)" % secure)
def stream_events(self, inputs, ew):
self.create_logger()
manager = splunk.CASManager()
for input_name, input_item in inputs.inputs.iteritems():
ip = input_item["ip"]
sec = bool(int(input_item["secure"]))
port = int(input_item["port"])
login = input_item["login"]
password = input_item["password"]
defaultReportVal = "Splunk_Analysis_by_Tier"
defaultReport = bool(int(input_item["defaultReport"]))
customReports = bool(int(input_item["customReports"]))
reportsList = []
if customReports:
customReportsList = input_item["customReportsList"]
#Splitting reports by ;
reportsList = customReportsList.split(";")
if defaultReport:
reportsList.append(defaultReportVal)
#Make list distinct
reportsList = list(set(reportsList))
cas = splunk.CASProcessor(input_name, ip, port, sec, login, password, reportsList)
cas.use_with_splunk(ew)
manager.add_cas(cas)
manager.run()
def create_logger(self):
fmt = '%(levelname)s %(asctime)-15s %(message)s'
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "processor.log")
logger = logging.getLogger('dcrum.cas.input')
logger.setLevel(logging.INFO)
file_handler = logging.handlers.RotatingFileHandler(path, 10485760, 20)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(file_handler)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter(fmt))
logger.addHandler(console)
if __name__ == "__main__":
sys.exit(MyScript().run(sys.argv))
| 38.710227 | 104 | 0.678702 |
371cc22025ebd61e5209d2547aa8d0bfc4d1a488 | 910 | py | Python | packages/std/nodes/std___Button0/widgets/std___Button0___main_widget___METACODE.py | lidong1266/Ryven-Switch | 68d1f71e81d6564196f44ca49d5903f06db6a4d9 | [
"MIT"
] | 18 | 2021-01-18T09:52:41.000Z | 2022-03-22T10:48:44.000Z | packages/std/nodes/std___Button0/widgets/std___Button0___main_widget___METACODE.py | xamofb-xsk/Ryven | 8c3dcc613098863ae9fb747e62c0bb9d9eb4cef1 | [
"MIT"
] | null | null | null | packages/std/nodes/std___Button0/widgets/std___Button0___main_widget___METACODE.py | xamofb-xsk/Ryven | 8c3dcc613098863ae9fb747e62c0bb9d9eb4cef1 | [
"MIT"
] | 3 | 2021-01-18T09:49:42.000Z | 2022-03-22T10:48:47.000Z | from NIWENV import *
# from PySide2.QtWidgets import ...
# from PySide2.QtCore import ...
# from PySide2.QtGui import ...
from PySide2.QtWidgets import QPushButton
class %CLASS%(QPushButton, MWB):
def __init__(self, params):
MWB.__init__(self, params)
QPushButton.__init__(self)
self.setStyleSheet('''
QPushButton {
background-color: #36383B;
padding-top: 5px;
padding-bottom: 5px;
padding-left: 22px;
padding-right: 22px;
border: 1px solid #666666;
border-radius: 5px;
}
QPushButton:pressed {
background-color: #bcbbf2;
}
''')
self.clicked.connect(M(self.parent_node_instance.button_clicked))
def get_data(self):
return {}
def set_data(self, data):
pass
def remove_event(self):
pass
| 22.195122 | 73 | 0.571429 |
84ca4ba9ef3eb1880cc9368c0b3eebf69485c7c5 | 4,552 | py | Python | spammsand/water_to_duals/plot_z_15_water.py | FreeON/spammpack | fdb1cd8be99078e75dc627b00247f7c017c9579c | [
"BSD-3-Clause"
] | 2 | 2021-07-14T20:27:25.000Z | 2021-09-29T03:03:00.000Z | spammsand/water_to_duals/plot_z_15_water.py | FreeON/spammpack | fdb1cd8be99078e75dc627b00247f7c017c9579c | [
"BSD-3-Clause"
] | 1 | 2015-07-27T04:37:16.000Z | 2015-07-27T15:11:04.000Z | spammsand/water_to_duals/plot_z_15_water.py | FreeON/spammpack | fdb1cd8be99078e75dc627b00247f7c017c9579c | [
"BSD-3-Clause"
] | 3 | 2015-08-19T22:52:57.000Z | 2021-09-29T03:02:59.000Z | # Recorded script from Mayavi2
from numpy import array
try:
engine = mayavi.engine
except NameError:
from mayavi.api import Engine
engine = Engine()
engine.start()
if len(engine.scenes) == 0:
engine.new_scene()
# -------------------------------------------
from mayavi.modules.iso_surface import IsoSurface
#vtk_file_reader2 = engine.open(u'/home/m/spammsand/spammsand/water_to_duals/z_15.vtk')
vtk_file_reader2 = engine.open(u'/home/matcha/Desktop/RESEARCH/spammsand_may_21_2015/spammsand/water_to_duals/z_14.vtk')
iso_surface2 = IsoSurface()
engine.add_module(iso_surface2, obj=None)
iso_surface2.actor.mapper.scalar_mode = 'use_field_data'
#iso_surface2.actor.property.specular_color = (0.5019607843137255, 0.5019607843137255, 0.5019607843137255)
#iso_surface2.actor.property.diffuse_color = (0.5019607843137255, 0.5019607843137255, 0.5019607843137255)
#iso_surface2.actor.property.ambient_color = (0.5019607843137255, 0.5019607843137255, 0.5019607843137255)
#iso_surface2.actor.property.color = (0.5019607843137255, 0.5019607843137255, 0.5019607843137255)
iso_surface2.actor.property.specular_color = (1.0, 1.0, 1.0)
iso_surface2.actor.property.diffuse_color = (1.0, 1.0, 1.0)
iso_surface2.actor.property.ambient_color = (1.0, 1.0, 1.0)
iso_surface2.actor.property.color = (1.0, 1.0, 1.0)
iso_surface2.actor.property.opacity = 0.3
iso_surface2.contour.contours[0:1] = [0.01]
scene = engine.scenes[0]
#from mayavi.modules.axes import Axes
#axes = Axes()
#engine.add_module(axes, obj=None)
from mayavi.modules.outline import Outline
outline1 = Outline()
engine.add_module(outline1, obj=None)
outline1.actor.mapper.scalar_range = array([ 0., 1.])
outline1.outline_mode = 'full'
outline1.actor.property.specular_color = (0.0, 0.0, 0.0)
outline1.actor.property.diffuse_color = (0.0, 0.0, 0.0)
outline1.actor.property.ambient_color = (0.0, 0.0, 0.0)
outline1.actor.property.color = (0.0, 0.0, 0.0)
outline1.actor.property.line_width = 4.
outline1.actor.property.line_width = 4.
#scene.scene.background = (0.7529411764705882, 0.7529411764705882, 0.7529411764705882)
scene.scene.background = (1.0, 1.0, 1.0)
scene.scene.jpeg_quality = 100
from mayavi.modules.axes import Axes
axes = Axes()
engine.add_module(axes, obj=None)
axes.axes.x_label = 'i'
axes.axes.y_label = 'j'
axes.axes.z_label = 'k'
axes.axes.label_format = ''
axes.property.display_location = 'background'
scene.scene.isometric_view()
camera_light = engine.scenes[0].scene.light_manager.lights[0]
camera_light.activate = True
camera_light.azimuth = -20.0
camera_light.elevation = 35.0
camera_light.color = (1.0, 0.0, 1.0)
camera_light1 = engine.scenes[0].scene.light_manager.lights[1]
camera_light1.activate = False
camera_light2= engine.scenes[0].scene.light_manager.lights[2]
camera_light2.activate = False
camera_light3 = engine.scenes[0].scene.light_manager.lights[3]
camera_light3.activate = True
camera_light3.elevation = -2.0
camera_light3.azimuth = -10.0
camera_light3.color = (0.0, 1.0, 0.0)
scene.scene.camera.position = [16403.433503518248, 16131.551555667136, 24041.649958155223]
scene.scene.camera.focal_point = [4609.0, 4609.0, 4609.0]
scene.scene.camera.view_angle = 30.0
scene.scene.camera.view_up = [-0.53662464383091613, -0.54176353604187599, 0.64693605761987416]
scene.scene.camera.clipping_range = [9848.773230415045, 45230.169453467308]
#scene.scene.camera.position = [5760.0510962263688, 8264.1602192001847, 8166.9237003172129]
#scene.scene.camera.focal_point = [1545.0000000000143, 1544.9999999999961, 1544.9999999999879]
#scene.scene.camera.view_angle = 30.0
#scene.scene.camera.view_up = [-0.31733327151558843, -0.55718686597616895, 0.7673606656409151]
#scene.scene.camera.clipping_range = [5008.7568762710653, 17059.25614172122]
scene.scene.camera.compute_view_plane_normal()
scene.scene.render()
scene.scene.save(u'/home/matcha/Desktop/RESEARCH/spammsand_may_21_2015/spammsand/stabilized_paper_1/z_water_to_duals_scn1.png')
#!,size=(1024,1024))
scene.scene.camera.position = [5138.678723236244, 5354.3588072568082, 30077.993327509143]
scene.scene.camera.focal_point = [4609.0, 4609.0, 4609.0]
scene.scene.camera.view_angle = 30.0
scene.scene.camera.view_up = [-0.72145710637153859, -0.69156169586610861, 0.035242935133142715]
scene.scene.camera.clipping_range = [15607.760545286075, 37980.790949069298]
scene.scene.camera.compute_view_plane_normal()
scene.scene.render()
scene.scene.save(u'/home/matcha/Desktop/RESEARCH/spammsand_may_21_2015/spammsand/stabilized_paper_1/z_water_to_duals_scn2.png')
| 37.933333 | 127 | 0.773506 |
0adde8d08724793173dcbdcb409fa1b0e0393bde | 2,312 | py | Python | uno_ai/cards.py | unixpickle/uno-ai | 3124afc8fa6b0cbcced95ef03ed9672cdb4f35a7 | [
"BSD-2-Clause"
] | 21 | 2019-02-08T06:30:43.000Z | 2021-05-02T18:57:15.000Z | uno_ai/cards.py | unixpickle/uno-ai | 3124afc8fa6b0cbcced95ef03ed9672cdb4f35a7 | [
"BSD-2-Clause"
] | 1 | 2019-02-24T10:53:27.000Z | 2019-08-06T15:27:58.000Z | uno_ai/cards.py | unixpickle/uno-ai | 3124afc8fa6b0cbcced95ef03ed9672cdb4f35a7 | [
"BSD-2-Clause"
] | 3 | 2019-02-08T01:47:48.000Z | 2019-08-08T22:00:15.000Z | from enum import Enum
CARD_VEC_SIZE = 10 + 6 + 4
def full_deck():
"""
Create a complete Uno deck.
"""
deck = []
for color in [Color.RED, Color.ORANGE, Color.GREEN, Color.BLUE]:
for _ in range(2):
for number in range(1, 10):
deck.append(Card(CardType.NUMERAL, color=color, number=number))
for card_type in [CardType.SKIP, CardType.REVERSE, CardType.DRAW_TWO]:
deck.append(Card(card_type, color=color))
deck.append(Card(CardType.NUMERAL, color=color, number=0))
deck.append(Card(CardType.WILD))
deck.append(Card(CardType.WILD_DRAW))
return deck
class CardType(Enum):
"""
The type of a card.
"""
NUMERAL = 0
SKIP = 1
REVERSE = 2
DRAW_TWO = 3
WILD = 4
WILD_DRAW = 5
class Color(Enum):
"""
The color of a card.
"""
RED = 0
ORANGE = 1
GREEN = 2
BLUE = 3
def __str__(self):
return self.name.lower()
class Card:
"""
A card in the deck.
"""
def __init__(self, card_type, color=None, number=None):
self.card_type = card_type
self.color = color
self.number = number
def vector(self):
"""
Convert the card into a vector.
"""
vec = [0.0] * CARD_VEC_SIZE
if self.number is not None:
vec[self.number] = 1.0
if self.color is not None:
vec[10 + self.color.value] = 1.0
vec[14 + self.card_type.value] = 1.0
return vec
def __str__(self):
if self.card_type == CardType.NUMERAL:
return '%s %d' % (self.color, self.number)
elif self.card_type == CardType.SKIP:
return '%s skip' % self.color
elif self.card_type == CardType.REVERSE:
return '%s reverse' % self.color
elif self.card_type == CardType.DRAW_TWO:
return '%s draw two' % self.color
elif self.card_type == CardType.WILD:
if self.color is None:
return 'wild card'
return 'wild card (%s)' % self.color
elif self.card_type == CardType.WILD_DRAW:
if self.color is None:
return 'wild +4'
return 'wild +4 (%s)' % self.color
raise RuntimeError('unknown type')
| 25.688889 | 82 | 0.552336 |
89bfe13a04d4cb952a34b12e7f84e6616bfa60b6 | 2,425 | py | Python | scripts/md_table_generator.py | hackjustu/Project-Markdown-Table-Generator | db82430eca7bb0fe892e2325fea708b992d4db65 | [
"MIT"
] | 1 | 2017-01-17T04:00:21.000Z | 2017-01-17T04:00:21.000Z | scripts/md_table_generator.py | hackjustu/Project-Markdown-Table-Generator | db82430eca7bb0fe892e2325fea708b992d4db65 | [
"MIT"
] | null | null | null | scripts/md_table_generator.py | hackjustu/Project-Markdown-Table-Generator | db82430eca7bb0fe892e2325fea708b992d4db65 | [
"MIT"
] | 1 | 2016-11-14T11:26:13.000Z | 2016-11-14T11:26:13.000Z | #!/usr/bin/python
import os
import re
import sys
import json
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir + "/lib")
import markdown2
# Compose a markdown inline-style link
def compose_markdown_link( name, url ):
return "[" + name + "]" + "(" + url + ")"
# Compose a group of html image tags
def compose_html_image_tags(stack):
tech_stacks = ""
for tech in stack:
tech_stacks += "<img src='./resource/icons/" + tech + ".png' height='35px'> "
return tech_stacks
# ----------- Starts here -----------
with open(current_dir + '/projects.json') as data_file:
projects = json.load(data_file)
if projects['projects'] == []:
print 'No Data!'
sys.exit()
project_categories = ["full stack", "mobile", "big data"]
# markdown2 module requires no spaces between '|' and '\n' each line
base_table = "| Projects | Teams | Description |Stacks |\n" + \
"| :-------------: |:-------------:| :----: |:-----:|\n"
tables = {}
for category in project_categories:
tables[category] = base_table
for project in projects['projects']:
categories = project['category']
project_with_url = compose_markdown_link(project['name'], project['project_url'])
tech_stacks = compose_html_image_tags(project['stack'])
row = "|" + project_with_url + \
"|" + project['team'] + \
"|" + project['description'] + \
"|" + tech_stacks + \
"|" + '\n'
for category in categories:
if tables.has_key(category):
tables[category] = tables[category] + row
output_files = []
md_file_name = "OutStandingProjects.md"
md_file = open(current_dir + "/../" + md_file_name, 'w')
for key, value in tables.iteritems():
if (value != base_table):
# generate markdown table
md_file.write("## " + key.upper() + "\n\n")
md_file.write(value.encode('utf8') + "\n\n")
# generate html table
html_file_name = key.replace (" ", "_") + ".html"
html_file = open(current_dir + "/../" + html_file_name, 'w')
html_table = markdown2.markdown(value.encode('utf8'), extras=["tables"])
html_file.write(html_table.encode('utf8'))
html_file.close()
output_files.append(html_file_name)
md_file.close()
output_files.append(md_file_name)
print "** Please check out the output files:"
for file in output_files:
print "- " + file
| 28.197674 | 85 | 0.613196 |
f45e7125488d3d377dab69d1efbd3971dcd0c4ad | 1,018 | py | Python | paper_util_scripts/segmentation.py | ehsanasgari/dimotif | 6a969edc5324a48eee359d8137ec0e410bf78dd7 | [
"Apache-2.0"
] | 16 | 2018-08-13T11:15:11.000Z | 2022-01-01T13:11:44.000Z | paper_util_scripts/segmentation.py | ehsanasgari/dimotif | 6a969edc5324a48eee359d8137ec0e410bf78dd7 | [
"Apache-2.0"
] | 1 | 2018-11-02T15:39:29.000Z | 2018-11-16T10:59:31.000Z | paper_util_scripts/segmentation.py | ehsanasgari/dimotif | 6a969edc5324a48eee359d8137ec0e410bf78dd7 | [
"Apache-2.0"
] | 4 | 2019-04-13T07:44:30.000Z | 2021-03-16T01:53:36.000Z | import sys
sys.path.append('../')
from utility.file_utility import FileUtility
import collections
import pandas as pd
import tqdm
import itertools
import numpy as np
from make_representations.cpe_efficient import train_cpe
from multiprocessing import Pool
#############################################################
# Simple script for learning segmentation steps from a fasta file
# Output: the file containing merging steps (i.e., "path_to_mergings"),
# can be used instead of Swiss-Prot merging steps
#############################################################
# Inputs
seq_dict=FileUtility.read_fasta_sequences_ids('sequences.fasta')
max_symbols=10000
min_freq_for_merging=10
# Output
path_to_mergings='ppe_mergings.txt'
path_to_merging_freqs='ppe_freq.txt'
#############################################################
SID=list(seq_dict.keys())
SID.sort()
seqs=[seq_dict[seqID][0] for seqID in SID]
train_cpe(seqs,path_to_mergings, max_symbols, path_to_merging_freqs, min_frequency=min_freq_for_merging)
| 30.848485 | 104 | 0.670923 |
0473deb0fb6fa7eef6656e107d8b215a6363d73b | 7,281 | py | Python | boss/api/deployment/preset/node.py | cham11ng/boss | 71e67cf2c4411787d319e2bd842fd93402aeaef3 | [
"MIT"
] | 25 | 2017-10-23T09:22:06.000Z | 2021-09-15T11:04:51.000Z | boss/api/deployment/preset/node.py | cham11ng/boss | 71e67cf2c4411787d319e2bd842fd93402aeaef3 | [
"MIT"
] | 37 | 2017-10-18T15:40:18.000Z | 2021-12-19T12:59:29.000Z | boss/api/deployment/preset/node.py | cham11ng/boss | 71e67cf2c4411787d319e2bd842fd93402aeaef3 | [
"MIT"
] | 17 | 2017-10-19T08:39:09.000Z | 2021-11-01T09:35:05.000Z | # -*- coding: utf-8 -*-
'''
Node Application Deployment preset.
This would be useful for deploying node js projects to the remote server.
Here the source is built locally and uploaded to the server, then the application service
is started on restarted on the remote server.
'''
from datetime import datetime
from fabric.api import task, cd
from boss.util import remote_info
from boss.api import shell, notif, runner, fs, git
from boss.config import get as get_config
from boss.core.output import halt, info
from boss.core.constants import known_scripts, notification_types
from .. import buildman
@task
def builds():
''' Display the build history. '''
# Load the build history
history = buildman.load_history()
buildman.display_list(history)
@task
def rollback(id=None):
''' Zero-Downtime deployment rollback for the frontend. '''
buildman.rollback(id)
# Reload the service after build has been rollbacked.
reload_service()
@task(alias='info')
def buildinfo(id=None):
''' Print the build information. '''
buildman.display(id)
@task
def setup():
''' Setup remote host for deployment. '''
buildman.setup_remote(quiet=False)
def upload_included_files(files, remote_path):
''' Upload the local files if they were to be included. '''
for filename in files:
# Skip upload if the file doesn't exist.
if not fs.exists(filename, remote=False):
continue
fs.upload(filename, remote_path)
@task
def build():
''' Build the code locally. '''
config = get_config()
stage = shell.get_stage()
buildman.build(stage, config)
@task
def deploy():
''' Zero-Downtime deployment for the backend. '''
config = get_config()
stage = shell.get_stage()
is_first_deployment = not buildman.is_remote_setup()
branch = git.current_branch(remote=False)
commit = git.last_commit(remote=False, short=True)
info('Deploying <{branch}:{commit}> to the {stage} server'.format(
branch=branch,
commit=commit,
stage=stage
))
tmp_path = fs.get_temp_filename()
build_dir = buildman.resolve_local_build_dir()
included_files = config['deployment']['include_files']
deployer_user = shell.get_user()
notif.send(notification_types.DEPLOYMENT_STARTED, {
'user': deployer_user,
'commit': commit,
'branch': branch,
'stage': stage
})
runner.run_script_safely(known_scripts.PRE_DEPLOY)
(release_dir, current_path) = buildman.setup_remote()
timestamp = datetime.utcnow()
build_id = timestamp.strftime('%Y%m%d%H%M%S')
build_name = buildman.get_build_name(build_id)
build_compressed = build_name + '.tar.gz'
release_path = release_dir + '/' + build_name
dist_path = build_name + '/dist'
buildman.build(stage, config)
info('Compressing the build')
fs.tar_archive(build_compressed, build_dir, remote=False)
info('Uploading the build {} to {}'.format(build_compressed, tmp_path))
fs.upload(build_compressed, tmp_path)
# Remove the compressed build from the local directory.
fs.rm(build_compressed, remote=False)
# Once, the build is uploaded to the remote,
# set things up in the remote server.
with cd(release_dir):
remote_info('Extracting the build {}'.format(build_compressed))
# Create a new directory for the build in the remote.
fs.mkdir(dist_path, nested=True)
# Extract the build.
fs.tar_extract(tmp_path, dist_path)
# Remove the uploaded archived from the temp path.
fs.rm_rf(tmp_path)
# Upload the files to be included eg: package.json file
# to the remote build location.
upload_included_files(included_files, release_path)
remote_info('Pointing the current symlink to the latest build')
fs.update_symlink(release_path, current_path)
# Change directory to the release path.
with cd(current_path):
install_remote_dependencies()
# Start or restart the application service.
start_or_reload_service(is_first_deployment)
# Save build history
buildman.record_history({
'id': build_id,
'path': release_path,
'branch': branch,
'commit': commit,
'stage': stage,
'createdBy': deployer_user,
'timestamp': timestamp.strftime(buildman.TS_FORMAT)
})
runner.run_script_safely(known_scripts.POST_DEPLOY)
# Send deployment finished notification.
notif.send(notification_types.DEPLOYMENT_FINISHED, {
'user': deployer_user,
'branch': branch,
'commit': commit,
'stage': stage
})
remote_info('Deployment Completed')
def install_remote_dependencies():
''' Install dependencies on the remote host. '''
remote_info('Installing dependencies on the remote')
runner.run_script_safely(known_scripts.PRE_INSTALL)
if runner.is_script_defined(known_scripts.INSTALL_REMOTE):
runner.run_script_safely(known_scripts.PRE_INSTALL_REMOTE)
runner.run_script(known_scripts.INSTALL_REMOTE)
runner.run_script_safely(known_scripts.POST_INSTALL_REMOTE)
else:
runner.run_script_safely(known_scripts.INSTALL)
runner.run_script_safely(known_scripts.POST_INSTALL)
def start_or_reload_service(has_started=False):
''' Start or reload the application service. '''
with cd(buildman.get_deploy_dir()):
if runner.is_script_defined(known_scripts.START_OR_RELOAD):
remote_info('Starting/Reloading the service.')
runner.run_script(known_scripts.START_OR_RELOAD)
elif has_started and runner.is_script_defined(known_scripts.RELOAD):
remote_info('Reloading the service.')
runner.run_script_safely(known_scripts.RELOAD)
elif runner.is_script_defined(known_scripts.START):
remote_info('Starting the service.')
runner.run_script(known_scripts.START)
def reload_service():
''' Restart the application service. '''
with cd(buildman.get_deploy_dir()):
remote_info('Reloading the service.')
runner.run_script_safely(known_scripts.RELOAD)
def stop_service():
''' Stop the application service. '''
with cd(buildman.get_deploy_dir()):
remote_info('Stopping the service.')
runner.run_script_safely(known_scripts.STOP)
@task(alias='reload')
def restart():
''' Restart the service. '''
start_or_reload_service(True)
@task
def stop():
''' Stop the service. '''
stop_service()
@task
def status():
''' Get the status of the service. '''
with cd(buildman.get_current_path()):
runner.run_script(known_scripts.STATUS_CHECK)
@task
def run(script):
''' Run a custom script. '''
# Run a custom script defined in the config.
# Change the current working directory to the node application
# before running the script.
with cd(buildman.get_current_path()):
try:
runner.run_script(script)
except RuntimeError as e:
halt(str(e))
@task(alias='list')
def services():
''' List the services running for the application. '''
with cd(buildman.get_current_path()):
runner.run_script(known_scripts.LIST_SERVICES)
| 28.892857 | 89 | 0.687406 |
8095b07a541d997e0bfd625379d33eb2a72bbe57 | 5,156 | py | Python | IRIS_data_download/IRIS_download_support/obspy/io/gse2/paz.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-03-05T01:03:01.000Z | 2020-12-17T05:04:07.000Z | IRIS_data_download/IRIS_download_support/obspy/io/gse2/paz.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 4 | 2021-03-31T19:25:55.000Z | 2021-12-13T20:32:46.000Z | IRIS_data_download/IRIS_download_support/obspy/io/gse2/paz.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-09-08T19:33:40.000Z | 2021-04-05T09:47:50.000Z | #!/usr/bin/env python
# ------------------------------------------------------------------
# Filename: paz.py
# Purpose: Python routines for reading GSE poles and zero files
# Author: Moritz Beyreuther
# Email: moritz.beyreuther@geophysik.uni-muenchen.de
#
# Copyright (C) 2008-2012 Moritz Beyreuther
# --------------------------------------------------------------------
"""
Python routines for reading GSE pole and zero (PAZ) files.
The read in PAZ information can be used with
:mod:`~obspy.signal` for instrument correction.
:copyright:
The ObsPy Development Team (devs@obspy.org)
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
from future.utils import native_str
import doctest
import numpy as np
from obspy.core import AttribDict
def read_paz(paz_file):
'''
Read GSE PAZ / Calibration file format and returns poles, zeros and the
seismometer_gain.
Do not use this function in connection with the ObsPy instrument
simulation, the A0_normalization_factor might be set wrongly. Use
:func:`~obspy.io.gse2.libgse2.attach_paz` instead.
>>> import io
>>> f = io.StringIO(
... """CAL1 RJOB LE-3D Z M24 PAZ 010824 0001
... 2
... -4.39823 4.48709
... -4.39823 -4.48709
... 3
... 0.0 0.0
... 0.0 0.0
... 0.0 0.0
... 0.4""")
>>> p, z, k = read_paz(f)
>>> print('%.4f %.4f %.4f' % (p[0].real, z[0].real, k))
-4.3982 0.0000 0.4000
'''
poles = []
zeros = []
if isinstance(paz_file, (str, native_str)):
with open(paz_file, 'rt') as fh:
paz = fh.readlines()
else:
paz = paz_file.readlines()
if paz[0][0:4] != 'CAL1':
raise NameError("Unknown GSE PAZ format %s" % paz[0][0:4])
if paz[0][31:34] != 'PAZ':
raise NameError("%s type is not known" % paz[0][31:34])
ind = 1
npoles = int(paz[ind])
for i in range(npoles):
try:
poles.append(complex(*[float(n)
for n in paz[i + 1 + ind].split()]))
except ValueError:
poles.append(complex(float(paz[i + 1 + ind][:8]),
float(paz[i + 1 + ind][8:])))
ind += i + 2
nzeros = int(paz[ind])
for i in range(nzeros):
try:
zeros.append(complex(*[float(n)
for n in paz[i + 1 + ind].split()]))
except ValueError:
zeros.append(complex(float(paz[i + 1 + ind][:8]),
float(paz[i + 1 + ind][8:])))
ind += i + 2
# in the observatory this is the seismometer gain [muVolt/nm/s]
# the A0_normalization_factor is hardcoded to 1.0
seismometer_gain = float(paz[ind])
return poles, zeros, seismometer_gain
def attach_paz(tr, paz_file):
'''
Attach tr.stats.paz AttribDict to trace from GSE2 paz_file
This is experimental code, nevertheless it might be useful. It
makes several assumption on the gse2 paz format which are valid for the
geophysical observatory in Fuerstenfeldbruck but might be wrong in
other cases.
Attaches to a trace a paz AttribDict containing poles zeros and gain.
The A0_normalization_factor is set to 1.0.
:param tr: An ObsPy trace object containing the calib and gse2 calper
attributes
:param paz_file: path to pazfile or file pointer
>>> from obspy.core import Trace
>>> import io
>>> tr = Trace(header={'calib': .094856, 'gse2': {'calper': 1}})
>>> f = io.StringIO(
... """CAL1 RJOB LE-3D Z M24 PAZ 010824 0001
... 2
... -4.39823 4.48709
... -4.39823 -4.48709
... 3
... 0.0 0.0
... 0.0 0.0
... 0.0 0.0
... 0.4""")
>>> attach_paz(tr, f)
>>> print(round(tr.stats.paz.sensitivity / 10E3) * 10E3)
671140000.0
'''
poles, zeros, seismometer_gain = read_paz(paz_file)
# remove zero at 0,0j to undo integration in GSE PAZ
for i, zero in enumerate(list(zeros)):
if zero == complex(0, 0j):
zeros.pop(i)
break
else:
raise Exception("Could not remove (0,0j) zero to undo GSE integration")
# ftp://www.orfeus-eu.org/pub/software/conversion/GSE_UTI/gse2001.pdf
# page 3
calibration = tr.stats.calib * 2 * np.pi / tr.stats.gse2.calper
# fill up ObsPy Poles and Zeros AttribDict
tr.stats.paz = AttribDict()
# convert seismometer gain from [muVolt/nm/s] to [Volt/m/s]
tr.stats.paz.seismometer_gain = seismometer_gain * 1e3
# convert digitizer gain [count/muVolt] to [count/Volt]
tr.stats.paz.digitizer_gain = 1e6 / calibration
tr.stats.paz.poles = poles
tr.stats.paz.zeros = zeros
tr.stats.paz.sensitivity = tr.stats.paz.digitizer_gain * \
tr.stats.paz.seismometer_gain
# A0_normalization_factor convention for gse2 paz in Observatory in FFB
tr.stats.paz.gain = 1.0
if __name__ == '__main__':
doctest.testmod(exclude_empty=True)
| 32.024845 | 79 | 0.592126 |
232cb9ff9444dfdc468f4a21bd1f91cb5a8d8d0e | 3,353 | py | Python | stix_shifter/stix_transmission/stix_transmission.py | dalebowie/stix-shifter | 6f35d491ec41ae147d571367fe06e2bfd3d6c418 | [
"Apache-2.0"
] | null | null | null | stix_shifter/stix_transmission/stix_transmission.py | dalebowie/stix-shifter | 6f35d491ec41ae147d571367fe06e2bfd3d6c418 | [
"Apache-2.0"
] | 2 | 2021-02-01T13:43:54.000Z | 2021-02-02T20:51:06.000Z | stix_shifter/stix_transmission/stix_transmission.py | dalebowie/stix-shifter | 6f35d491ec41ae147d571367fe06e2bfd3d6c418 | [
"Apache-2.0"
] | 1 | 2021-12-29T09:31:49.000Z | 2021-12-29T09:31:49.000Z | import importlib
from stix_shifter_utils.utils.error_response import ErrorResponder
RESULTS = 'results'
QUERY = 'query'
DELETE = 'delete'
STATUS = 'status'
PING = 'ping'
IS_ASYNC = 'is_async'
class StixTransmission:
init_error = None
def __init__(self, module, connection, configuration):
module = module.split(':')[0]
if connection.get('options', {}).get('proxy_host'):
module = 'proxy'
try:
connector_module = importlib.import_module("stix_shifter_modules." + module + ".entry_point")
self.entry_point = connector_module.EntryPoint(connection, configuration)
except Exception as e:
self.init_error = e
def query(self, query):
# Creates and sends a query to the correct datasource
try:
if self.init_error is not None:
raise Exception(self.init_error)
return self.entry_point.create_query_connection(query)
except Exception as ex:
return_obj = dict()
ErrorResponder.fill_error(return_obj, error=ex)
return return_obj
def status(self, search_id):
# Creates and sends a status query to the correct datasource asking for the status of the specific query
try:
if self.init_error is not None:
raise Exception(self.init_error)
return self.entry_point.create_status_connection(search_id)
except Exception as ex:
return_obj = dict()
ErrorResponder.fill_error(return_obj, error=ex)
return return_obj
def results(self, search_id, offset, length):
# Creates and sends a query to the correct datasource asking for results of the specific query
try:
if self.init_error is not None:
raise Exception(self.init_error)
return self.entry_point.create_results_connection(search_id, offset, length)
except Exception as ex:
return_obj = dict()
ErrorResponder.fill_error(return_obj, error=ex)
return return_obj
def delete(self, search_id):
# Sends a request to the correct datasource, asking to terminate a specific query
try:
if self.init_error is not None:
raise Exception(self.init_error)
return self.entry_point.delete_query_connection(search_id)
except Exception as ex:
return_obj = dict()
ErrorResponder.fill_error(return_obj, error=ex)
return return_obj
def ping(self):
# Creates and sends a ping request to confirm we are connected and authenticated
try:
if self.init_error is not None:
raise Exception(self.init_error)
return self.entry_point.ping_connection()
except Exception as ex:
return_obj = dict()
ErrorResponder.fill_error(return_obj, error=ex)
return return_obj
def is_async(self):
# Check if the module is async/sync
try:
if self.init_error is not None:
raise Exception(self.init_error)
return self.entry_point.is_async()
except Exception as ex:
return_obj = dict()
ErrorResponder.fill_error(return_obj, error=ex)
return return_obj
| 36.445652 | 112 | 0.631077 |
bc114bddb14dc32178659de2bd8d0feb056e737c | 10,398 | py | Python | codes/models/classifiers/cifar_resnet_branched.py | neonbjb/DL-Art-School | a6f0f854b987ac724e258af8b042ea4459a571bc | [
"Apache-2.0"
] | 12 | 2020-12-13T12:45:03.000Z | 2022-03-29T09:58:15.000Z | codes/models/classifiers/cifar_resnet_branched.py | neonbjb/DL-Art-School | a6f0f854b987ac724e258af8b042ea4459a571bc | [
"Apache-2.0"
] | 1 | 2020-12-31T01:12:45.000Z | 2021-03-31T11:43:52.000Z | codes/models/classifiers/cifar_resnet_branched.py | neonbjb/DL-Art-School | a6f0f854b987ac724e258af8b042ea4459a571bc | [
"Apache-2.0"
] | 3 | 2020-12-14T06:04:04.000Z | 2020-12-26T19:11:41.000Z | """resnet in pytorch
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
Deep Residual Learning for Image Recognition
https://arxiv.org/abs/1512.03385v1
"""
import torch
import torch.nn as nn
import torch.distributed as dist
from models.switched_conv.switched_conv_hard_routing import SwitchNorm, RouteTop1
from trainer.networks import register_model
class BasicBlock(nn.Module):
"""Basic Block for resnet 18 and resnet 34
"""
#BasicBlock and BottleNeck block
#have different output size
#we use class attribute expansion
#to distinct
expansion = 1
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
#residual function
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BasicBlock.expansion, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
#shortcut
self.shortcut = nn.Sequential()
#the shortcut output dimension is not the same with residual function
#use 1*1 convolution to match the dimension
if stride != 1 or in_channels != BasicBlock.expansion * out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BasicBlock.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
class BottleNeck(nn.Module):
"""Residual block for resnet over 50 layers
"""
expansion = 4
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, stride=stride, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BottleNeck.expansion, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels * BottleNeck.expansion),
)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels * BottleNeck.expansion:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BottleNeck.expansion, stride=stride, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels * BottleNeck.expansion)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
class ResNetTail(nn.Module):
def __init__(self, block, num_block, num_classes=100):
super().__init__()
self.in_channels = 64
self.conv4_x = self._make_layer(block, 128, num_block[2], 2)
self.conv5_x = self._make_layer(block, 256, num_block[3], 2)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(256 * block.expansion, num_classes)
def _make_layer(self, block, out_channels, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
output = self.conv4_x(x)
output = self.conv5_x(output)
output = self.avg_pool(output)
output = output.view(output.size(0), -1)
output = self.fc(output)
return output
class DropoutNorm(SwitchNorm):
def __init__(self, group_size, dropout_rate, accumulator_size=256, eps=1e-6):
super().__init__(group_size, accumulator_size)
self.accumulator_desired_size = accumulator_size
self.group_size = group_size
self.dropout_rate = dropout_rate
self.register_buffer("accumulator_index", torch.zeros(1, dtype=torch.long, device='cpu'))
self.register_buffer("accumulator_filled", torch.zeros(1, dtype=torch.long, device='cpu'))
self.register_buffer("accumulator", torch.zeros(accumulator_size, group_size))
self.eps = eps
def add_norm_to_buffer(self, x):
flatten_dims = [0] + [k+2 for k in range(len(x.shape)-2)]
flat = x.mean(dim=flatten_dims)
self.accumulator[self.accumulator_index] = flat.detach().clone()
self.accumulator_index += 1
if self.accumulator_index >= self.accumulator_desired_size:
self.accumulator_index *= 0
if self.accumulator_filled <= 0:
self.accumulator_filled += 1
# Input into forward is a switching tensor of shape (batch,groups,<misc>)
def forward(self, x: torch.Tensor):
assert len(x.shape) >= 2
if not self.training:
return x
# Only accumulate the "winning" switch slots.
mask = torch.nn.functional.one_hot(x.argmax(dim=1), num_classes=x.shape[1])
if len(x.shape) > 2:
mask = mask.permute(0, 3, 1, 2) # TODO: Make this more extensible.
xtop = torch.ones_like(x)
xtop[mask != 1] = 0
# Push the accumulator to the right device on the first iteration.
if self.accumulator.device != xtop.device:
self.accumulator = self.accumulator.to(xtop.device)
self.add_norm_to_buffer(xtop)
# Reduce across all distributed entities, if needed
if dist.is_available() and dist.is_initialized():
dist.all_reduce(self.accumulator, op=dist.ReduceOp.SUM)
self.accumulator /= dist.get_world_size()
# Compute the dropout probabilities. This module is a no-op before the accumulator is initialized.
if self.accumulator_filled > 0:
with torch.no_grad():
probs = torch.mean(self.accumulator, dim=0) * self.dropout_rate
bs, br = x.shape[:2]
drop = torch.rand((bs, br), device=x.device) > probs.unsqueeze(0)
# Ensure that there is always at least one switch left un-dropped out
fix_blank = (drop.sum(dim=1, keepdim=True) == 0).repeat(1, br)
drop = drop.logical_or(fix_blank)
x_dropped = drop * x + ~drop * -1e20
x = x_dropped
return x
class HardRoutingGate(nn.Module):
def __init__(self, breadth, fade_steps=10000, dropout_rate=.8):
super().__init__()
self.norm = DropoutNorm(breadth, dropout_rate, accumulator_size=128)
self.fade_steps = fade_steps
self.register_buffer("last_step", torch.zeros(1, dtype=torch.long, device='cpu'))
def forward(self, x):
if self.last_step < self.fade_steps:
x = torch.randn_like(x) * (self.fade_steps - self.last_step) / self.fade_steps + \
x * self.last_step / self.fade_steps
self.last_step = self.last_step + 1
soft = nn.functional.softmax(self.norm(x), dim=1)
return RouteTop1.apply(soft)
class ResNet(nn.Module):
def __init__(self, block, num_block, num_classes=100, num_tails=8):
super().__init__()
self.in_channels = 32
self.conv1 = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True))
self.conv2_x = self._make_layer(block, 32, num_block[0], 1)
self.conv3_x = self._make_layer(block, 64, num_block[1], 2)
self.tails = nn.ModuleList([ResNetTail(block, num_block, 256) for _ in range(num_tails)])
self.selector = ResNetTail(block, num_block, num_tails)
self.selector_gate = nn.Linear(256, 1)
self.gate = HardRoutingGate(num_tails, dropout_rate=2)
self.final_linear = nn.Linear(256, num_classes)
def _make_layer(self, block, out_channels, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return nn.Sequential(*layers)
def get_debug_values(self, step, __):
logs = {'histogram_switch_usage': self.latest_masks}
return logs
def forward(self, x, coarse_label, return_selector=False):
output = self.conv1(x)
output = self.conv2_x(output)
output = self.conv3_x(output)
keys = []
for t in self.tails:
keys.append(t(output))
keys = torch.stack(keys, dim=1)
query = self.selector(output).unsqueeze(2)
selector = self.selector_gate(query * keys).squeeze(-1)
selector = self.gate(selector)
self.latest_masks = (selector.max(dim=1, keepdim=True)[0].repeat(1,8) == selector).float().argmax(dim=1)
values = self.final_linear(selector.unsqueeze(-1) * keys)
if return_selector:
return values.sum(dim=1), selector
else:
return values.sum(dim=1)
#bs = output.shape[0]
#return (tailouts[coarse_label] * torch.eye(n=bs, device=x.device).view(bs,bs,1)).sum(dim=1)
@register_model
def register_cifar_resnet18_branched(opt_net, opt):
""" return a ResNet 18 object
"""
return ResNet(BasicBlock, [2, 2, 2, 2])
def resnet34():
""" return a ResNet 34 object
"""
return ResNet(BasicBlock, [3, 4, 6, 3])
def resnet50():
""" return a ResNet 50 object
"""
return ResNet(BottleNeck, [3, 4, 6, 3])
def resnet101():
""" return a ResNet 101 object
"""
return ResNet(BottleNeck, [3, 4, 23, 3])
def resnet152():
""" return a ResNet 152 object
"""
return ResNet(BottleNeck, [3, 8, 36, 3])
if __name__ == '__main__':
model = ResNet(BasicBlock, [2,2,2,2])
for j in range(10):
v = model(torch.randn(256,3,32,32), None)
print(model.get_debug_values(0, None))
print(v.shape)
l = nn.MSELoss()(v, torch.randn_like(v))
l.backward()
| 37.003559 | 118 | 0.635026 |
08558f3d08b639588a600822dbd7f26554c54c86 | 4,876 | py | Python | g2d_pyg.py | Kekko01/My-Hitori | 3d34bb4035b1a8ac31d8e8f30faef09bb2de0acb | [
"MIT"
] | 1 | 2021-12-03T13:27:43.000Z | 2021-12-03T13:27:43.000Z | g2d_pyg.py | Kekko01/My-Hitori | 3d34bb4035b1a8ac31d8e8f30faef09bb2de0acb | [
"MIT"
] | null | null | null | g2d_pyg.py | Kekko01/My-Hitori | 3d34bb4035b1a8ac31d8e8f30faef09bb2de0acb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
'''
@author Michele Tomaiuolo - http://www.ce.unipr.it/people/tomamic
@license This software is free - http://www.gnu.org/licenses/gpl.html
'''
from tkinter import Tk, messagebox, simpledialog
import subprocess, sys
try:
import pygame
except:
subprocess.call([sys.executable, "-m", "pip", "install", "pygame"])
import pygame
_tkmain = Tk()
_tkmain.wm_withdraw() #to hide the main window
_ws, _hs = _tkmain.winfo_screenwidth(), _tkmain.winfo_screenheight()
_tkmain.geometry("100x100+%d+%d" % (_ws//2, _hs//2))
_canvas = None
_tick = None
_color = (127, 127, 127)
_mouse_pos = (0, 0)
_keys, _prev_keys = set(), set()
_mouse_codes = ["LeftButton", "MiddleButton", "RightButton"]
def init_canvas(size: (int, int)):
'''Set size of first CANVAS and return it'''
global _canvas
pygame.init()
_canvas = pygame.display.set_mode(size)
clear_canvas()
def set_color(color: (int, int, int)) -> None:
global _color
_color = color
def clear_canvas() -> None:
_canvas.fill((255, 255, 255))
def update_canvas() -> None:
pygame.display.update()
def draw_line(pt1: (int, int), pt2: (int, int)) -> None:
pygame.draw.line(_canvas, _color, pt1, pt2)
def fill_circle(center: (int, int), radius: int) -> None:
pygame.draw.circle(_canvas, _color, center, radius)
def fill_rect(rectangle: (int, int, int, int)) -> None:
pygame.draw.rect(_canvas, _color, rectangle)
def draw_text(txt: str, pos: (int, int), size: int) -> None:
font = pygame.font.SysFont('freesansbold', size)
surface = font.render(txt, True, _color)
_canvas.blit(surface, pos)
def draw_text_centered(txt: str, pos: (int, int), size: int) -> None:
font = pygame.font.SysFont('freesansbold', size)
surface = font.render(txt, True, _color)
w, h = surface.get_size()
_canvas.blit(surface, (pos[0] - w // 2, pos[1] - h // 2))
def load_image(url: str) -> pygame.Surface:
return pygame.image.load(url)
def draw_image(image: pygame.Surface, pos: (int, int)) -> None:
_canvas.blit(image, pos)
def draw_image_clip(image: pygame.Surface, src: (int, int, int, int), dst: (int, int, int, int)) -> None:
x0, y0, w0, h0 = src
x1, y1, w1, h1 = dst
if w0 == w1 and h0 == h1:
_canvas.blit(image, dst, area=src)
else:
cropped = pygame.Surface((w0, h0), pygame.SRCALPHA)
cropped.blit(image, (0, 0), area=src)
scaled = pygame.transform.smoothscale(cropped, (w1, h1))
_canvas.blit(scaled, (x1, y1))
def load_audio(url: str) -> pygame.mixer.Sound:
return pygame.mixer.Sound(url)
def play_audio(audio: pygame.mixer.Sound, loop=False) -> None:
audio.play(-1 if loop else 0)
def pause_audio(audio: pygame.mixer.Sound) -> None:
audio.stop()
def alert(message: str) -> None:
if _canvas:
update_canvas()
messagebox.showinfo("", message)
def confirm(message: str) -> bool:
if _canvas:
update_canvas()
return messagebox.askokcancel("", message)
def prompt(message: str) -> str:
if _canvas:
update_canvas()
return simpledialog.askstring("", message, parent=_tkmain)
def mouse_position() -> (int, int):
return _mouse_pos
def web_key(key: int) -> str:
word = pygame.key.name(key)
word = word[0].upper() + word[1:]
if len(word) == 1 and word.isalpha():
word = "Key" + word
elif len(word) == 1 and word.isdigit():
word = "Digit" + word
elif word in ("Up", "Down", "Right", "Left"):
word = "Arrow" + word
elif word == "Space":
word = "Spacebar"
elif word == "Return":
word = "Enter"
return word
def key_pressed(key: str) -> bool:
return key in _keys and key not in _prev_keys
def key_released(key: str) -> bool:
return key in _prev_keys and key not in _keys
def main_loop(tick=None, fps=30) -> None:
global _mouse_pos, _tick, _prev_keys
_tick = tick
clock = pygame.time.Clock()
update_canvas()
running = True
while running:
for e in pygame.event.get():
# print(e)
if e.type == pygame.QUIT:
running = False
break
elif e.type == pygame.KEYDOWN:
_keys.add(web_key(e.key))
elif e.type == pygame.KEYUP:
_keys.discard(web_key(e.key))
elif (e.type == pygame.MOUSEBUTTONDOWN and
1 <= e.button <= 3):
_keys.add(_mouse_codes[e.button - 1])
elif (e.type == pygame.MOUSEBUTTONUP and
1 <= e.button <= 3):
_keys.discard(_mouse_codes[e.button - 1])
if _tick:
_mouse_pos = pygame.mouse.get_pos()
_tick()
_prev_keys = _keys.copy()
update_canvas()
clock.tick(fps)
close_canvas()
def close_canvas() -> None:
pygame.quit()
sys.exit()
| 30.098765 | 105 | 0.613823 |
ab31318c000be6a2a7a0ee7b78d929997061b300 | 9,525 | py | Python | stackinawsgi/wsgi/app.py | BenjamenMeyer/stackInAWSGI | 8ac6be173bb08addc09214ba7dc9f91727d0221a | [
"Apache-2.0"
] | 2 | 2016-08-12T19:11:13.000Z | 2017-11-19T20:52:47.000Z | stackinawsgi/wsgi/app.py | BenjamenMeyer/stackInAWSGI | 8ac6be173bb08addc09214ba7dc9f91727d0221a | [
"Apache-2.0"
] | 16 | 2016-05-22T05:11:12.000Z | 2016-07-14T00:57:07.000Z | stackinawsgi/wsgi/app.py | BenjamenMeyer/stackInAWSGI | 8ac6be173bb08addc09214ba7dc9f91727d0221a | [
"Apache-2.0"
] | 1 | 2016-05-15T19:01:35.000Z | 2016-05-15T19:01:35.000Z | """
Stack-In-A-WSGI Application
"""
from __future__ import absolute_import
import logging
from collections import Iterable
from .request import Request
from .response import Response
from stackinawsgi.session.service import StackInAWsgiSessionManager
from stackinawsgi.admin.admin import StackInAWsgiAdmin
from stackinabox.services.service import StackInABoxService
from stackinabox.stack import StackInABox
logger = logging.getLogger(__name__)
class App(object):
"""
A WSGI Application for running StackInABox under a WSGI host
"""
# List of well-known status codes
status_values = {
# Official Status Codes
100: "Continue",
101: "Switching Protocols",
102: "Processing",
200: "OK",
201: "Created",
202: "Accepted",
203: "Non-Authoritative Information",
204: "No Content",
205: "Reset Content",
206: "Partial Content",
207: "Multi-Status Response",
208: "Already Reported",
226: "IM Used",
300: "Multiple Choices",
301: "Moved Permanently",
302: "Found",
303: "See Other",
304: "Not Modified",
305: "Use Proxy",
306: "Switch Proxy",
307: "Temporary Redirect",
308: "Permanent Redirect",
400: "Bad Request",
401: "Unauthorized",
402: "Payment Required",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
406: "Not Acceptable",
407: "Proxy Authentication Required",
408: "Request Timeout",
409: "Conflict",
410: "Gone",
411: "Length Required",
412: "Precondition Failed",
413: "Payload Too Large",
414: "URI Too Long",
415: "Unsupported Media Type",
416: "Range Not Satisfiable",
417: "Expectation Failed",
418: "I'm a teapot",
421: "Misdirected Request",
422: "Unprocessable Entity",
423: "Locked",
424: "Failed Dependency",
426: "Upgrade Required",
428: "Precondition Required",
429: "Too Many Requests",
431: "Requested Header Fields Too Large",
451: "Unavailable for Legal Reasons",
500: "Internal Server Error",
501: "Not Implemented",
502: "Bad Gateway",
503: "Service Unavailable",
504: "Gateway Timeout",
505: "HTTP Version Not Supported",
506: "Variant Also Negotiates",
507: "Insufficient Storage",
508: "Loop Detected",
510: "Not Extended",
511: "Network Authentication Required",
# Unofficial Status Codes:
103: "Checkpoint",
420: "Method Failure",
450: "Blocked by Windows Parental Control (MS)",
498: "Invalid Token",
# 499: "Token Required", (re-defined)
509: "Bandwidth Limit Exceeded",
530: "Site Frozen",
440: "Login Timeout",
449: "Retry With",
# 451 - Redirect (re-defined)
444: "No Response",
495: "SSL Certificate Error",
496: "SSL Certificate Required",
497: "HTTP Request Sent to HTTPS Port",
499: "Client Closed Request",
520: "Unknown Error",
521: "Web Server Is Down",
522: "Connection Timed Out",
523: "Origin Is Unreachable",
524: "A Timeout Occurred",
525: "SSL Handshake Failed",
526: "Invalid SSL Certificate",
# The below codes are specific cases for the infrastructure
# supported here and should not conflict with anything above.
# StackInABox Status Codes
595: "Route Not Handled",
596: "Unhandled Exception",
597: "URI Is For Service That Is Unknown",
# StackInAWSGI Status Codes
593: "Session ID Missing from URI",
594: "Invalid Session ID"
}
def __init__(self, services=None):
"""
Create the WSGI Application
:param list services: list of :obj:`StackInABoxService`s to load into
StackInABox.
"""
self.stackinabox = StackInABox()
self.stack_service = StackInAWsgiSessionManager()
self.admin_service = StackInAWsgiAdmin(
self.stack_service,
'http://localhost/stackinabox/'
)
self.stackinabox.register(self.admin_service)
self.stackinabox.register(self.stack_service)
def __check_service(service_object):
"""
Simple wrapper to check whether an object provide by the caller is
a StackInABoxService by creating an instance
"""
svc = service_object()
if not isinstance(svc, StackInABoxService):
raise TypeError(
"Service is not a Stack-In-A-Box Service"
)
# if the caller does not provide any services then just log it
# to keep from user confusion
if services is not None:
# Allow the caller to provide either an iterable of services to
# to provide to the session or to provide a single service object
if isinstance(services, Iterable):
# for each service verify it is a StackInABoxService
for service in services:
__check_service(service)
self.RegisterWithStackInABox(service)
else:
# if it's not an iterable - e.g a single object - then
# just check the variable itself
__check_service(services)
self.RegisterWithStackInABox(services)
else:
logger.debug(
"No services registered on initialization"
)
def RegisterWithStackInABox(self, service):
"""
Add a :obj:`StackInABoxService` to the StackInABox instance
:param :obj:`StackInABoxService` service: the service to register with
StackInABox
"""
self.stack_service.register_service(service)
def ResetStackInABox(self, session_uuid):
"""
Reset StackInABox to its default state
"""
self.stack_service.reset_session(session_uuid)
def StackInABoxHoldOnto(self, name, obj):
"""
Add something into the StackInABox KV store
:param text_type name: name of the value for the KV store
:param any obj: a value to associate in the KV store
"""
self.stackinabox.into_hold(name, obj)
def StackInABoxHoldOut(self, name):
"""
Retrieve a value from the KV store
:param text_type name: name of the value for the KV store
:returns: the value if the KV store associated with the given name
"""
return self.stackinabox.from_hold(name)
def StackInABoxUriUpdate(self, uri):
"""
Update StackInABox to use a new URI value.
"""
self.stackinabox.base_url = uri
self.admin_service.base_uri = uri
def CallStackInABox(self, request, response):
"""
Call into StackInABox with the given request and response objects.
:param :obj:`Request` request: the :obj:`Request` object to use for
as input
:param :obj:`Response` response: the :obj:`Response` object to use
for the output
"""
# Parse the URL and determine where it's going
# /stackinabox/<session>/<service>/<normal user path>
# /admin for StackInAWSGI administrative functionality
result = self.stackinabox.call(
request.method,
request,
request.url,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
def response_for_status(cls, status):
"""
Generate a status string for the status code
:param int status: the status code to look-up
:returns: string for the value or an appropriate Unknown value
"""
if status in cls.status_values:
return cls.status_values[status]
elif status >= 100 and status < 200:
return "Unknown Informational Status"
elif status >= 200 and status < 300:
return "Unknown Success Status"
elif status >= 300 and status < 400:
return "Unknown Redirection Status"
elif status >= 400 and status < 500:
return "Unknown Client Error"
elif status >= 500 and status < 600:
return "Unknown Server Error"
else:
return "Unknown Status"
def __call__(self, environ, start_response):
"""
Callable entry per the PEP-3333 WSGI spec
:param dict environ: the environment dictionary from the WSGI stack
:param callable start_response: the start_response callable for the
WSGI stack
:returns: generator for the response body
"""
logger.debug('Instance ID: {0}'.format(id(self)))
logger.debug('Environment: {0}'.format(environ))
request = Request(environ)
response = Response()
self.CallStackInABox(request, response)
start_response(
"{0} {1}".format(
response.status,
self.response_for_status(
response.status
)
),
[(k, v) for k, v in response.headers.items()]
)
yield response.body
| 32.070707 | 78 | 0.586667 |
dacddbf78233076710e85402b717f7264adbcce0 | 479 | py | Python | libraries/botbuilder-ai/botbuilder/ai/about.py | victor-kironde/botbuilder-python | e893d9b036d7cf33cf9c9afd1405450c354cdbcd | [
"MIT"
] | 1 | 2020-09-05T11:05:53.000Z | 2020-09-05T11:05:53.000Z | libraries/botbuilder-ai/botbuilder/ai/about.py | victor-kironde/botbuilder-python | e893d9b036d7cf33cf9c9afd1405450c354cdbcd | [
"MIT"
] | 13 | 2020-09-05T11:06:05.000Z | 2020-10-29T05:01:19.000Z | libraries/botbuilder-ai/botbuilder/ai/about.py | admdev8/botbuilder-python | 13190c4e3c0f3586515a079ed5b9e92771b04261 | [
"MIT"
] | 1 | 2020-10-01T07:34:07.000Z | 2020-10-01T07:34:07.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
__title__ = "botbuilder-ai"
__version__ = (
os.environ["packageVersion"] if "packageVersion" in os.environ else "4.10.0"
)
__uri__ = "https://www.github.com/Microsoft/botbuilder-python"
__author__ = "Microsoft"
__description__ = "Microsoft Bot Framework Bot Builder"
__summary__ = "Microsoft Bot Framework Bot Builder SDK for Python."
__license__ = "MIT"
| 31.933333 | 81 | 0.736952 |
8b345fd7ca7eb452c913cf4bca6e626a6cfeaa9e | 4,652 | py | Python | samples/python/wip/python_django/13.core-bot/dialogs/booking_dialog.py | Aliacf21/BotBuilder-Samples | be48548edafd4efdc074f5a59ef2bb3af735ad9a | [
"MIT"
] | 1,998 | 2019-05-07T06:33:22.000Z | 2022-03-31T12:59:15.000Z | samples/python/wip/python_django/13.core-bot/dialogs/booking_dialog.py | Aliacf21/BotBuilder-Samples | be48548edafd4efdc074f5a59ef2bb3af735ad9a | [
"MIT"
] | 1,526 | 2020-09-05T18:57:14.000Z | 2020-12-03T01:45:40.000Z | samples/python/wip/python_django/13.core-bot/dialogs/booking_dialog.py | stevkan/BotBuilder-Samples | 75a21b412d8873906bed3460f7c5f0940a067d58 | [
"MIT"
] | 2,820 | 2016-09-21T03:47:43.000Z | 2019-05-03T15:12:46.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Flight booking dialog."""
from botbuilder.dialogs import WaterfallDialog, WaterfallStepContext, DialogTurnResult
from botbuilder.dialogs.prompts import ConfirmPrompt, TextPrompt, PromptOptions
from botbuilder.core import MessageFactory
from datatypes_date_time.timex import Timex
from .cancel_and_help_dialog import CancelAndHelpDialog
from .date_resolver_dialog import DateResolverDialog
class BookingDialog(CancelAndHelpDialog):
"""Flight booking implementation."""
def __init__(self, dialog_id: str = None):
super(BookingDialog, self).__init__(dialog_id or BookingDialog.__name__)
self.add_dialog(TextPrompt(TextPrompt.__name__))
# self.add_dialog(ConfirmPrompt(ConfirmPrompt.__name__))
self.add_dialog(DateResolverDialog(DateResolverDialog.__name__))
self.add_dialog(
WaterfallDialog(
WaterfallDialog.__name__,
[
self.destination_step,
self.origin_step,
self.travel_date_step,
# self.confirm_step,
self.final_step,
],
)
)
self.initial_dialog_id = WaterfallDialog.__name__
async def destination_step(
self, step_context: WaterfallStepContext
) -> DialogTurnResult:
"""Prompt for destination."""
booking_details = step_context.options
if booking_details.destination is None:
return await step_context.prompt(
TextPrompt.__name__,
PromptOptions(
prompt=MessageFactory.text("To what city would you like to travel?")
),
) # pylint: disable=line-too-long,bad-continuation
else:
return await step_context.next(booking_details.destination)
async def origin_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
"""Prompt for origin city."""
booking_details = step_context.options
# Capture the response to the previous step's prompt
booking_details.destination = step_context.result
if booking_details.origin is None:
return await step_context.prompt(
TextPrompt.__name__,
PromptOptions(
prompt=MessageFactory.text("From what city will you be travelling?")
),
) # pylint: disable=line-too-long,bad-continuation
else:
return await step_context.next(booking_details.origin)
async def travel_date_step(
self, step_context: WaterfallStepContext
) -> DialogTurnResult:
"""Prompt for travel date.
This will use the DATE_RESOLVER_DIALOG."""
booking_details = step_context.options
# Capture the results of the previous step
booking_details.origin = step_context.result
if not booking_details.travel_date or self.is_ambiguous(
booking_details.travel_date
):
return await step_context.begin_dialog(
DateResolverDialog.__name__, booking_details.travel_date
) # pylint: disable=line-too-long
else:
return await step_context.next(booking_details.travel_date)
async def confirm_step(
self, step_context: WaterfallStepContext
) -> DialogTurnResult:
"""Confirm the information the user has provided."""
booking_details = step_context.options
# Capture the results of the previous step
booking_details.travel_date = step_context.result
msg = (
f"Please confirm, I have you traveling to: { booking_details.destination }"
f" from: { booking_details.origin } on: { booking_details.travel_date}."
)
# Offer a YES/NO prompt.
return await step_context.prompt(
ConfirmPrompt.__name__, PromptOptions(prompt=MessageFactory.text(msg))
)
async def final_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
"""Complete the interaction and end the dialog."""
if step_context.result:
booking_details = step_context.options
booking_details.travel_date = step_context.result
return await step_context.end_dialog(booking_details)
else:
return await step_context.end_dialog()
def is_ambiguous(self, timex: str) -> bool:
"""Ensure time is correct."""
timex_property = Timex(timex)
return "definite" not in timex_property.types
| 38.766667 | 88 | 0.655202 |
15e8bbd2852cae98927c7aff9d6c450d857e8291 | 1,229 | py | Python | migrations/versions/5488bd12618c_article_migration.py | Washikokevv27/Blog-post | 1f908d837a36ba26918dfcba04139f180f036305 | [
"MIT"
] | 1 | 2021-05-19T12:59:09.000Z | 2021-05-19T12:59:09.000Z | migrations/versions/5488bd12618c_article_migration.py | HASSAN1A/Blog | 383badf16baf8acdfa819de5237f10b9c572872e | [
"MIT"
] | null | null | null | migrations/versions/5488bd12618c_article_migration.py | HASSAN1A/Blog | 383badf16baf8acdfa819de5237f10b9c572872e | [
"MIT"
] | null | null | null | """Article Migration
Revision ID: 5488bd12618c
Revises: d30e7ee7a8d0
Create Date: 2020-10-30 18:14:15.115744
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5488bd12618c'
down_revision = 'd30e7ee7a8d0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('articles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('artile_title', sa.String(), nullable=True),
sa.Column('article_body', sa.String(), nullable=True),
sa.Column('article_tag', sa.String(), nullable=True),
sa.Column('article_cover_path', sa.String(), nullable=True),
sa.Column('posted', sa.DateTime(), nullable=True),
sa.Column('article_upvotes', sa.Integer(), nullable=True),
sa.Column('article_downvotes', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('articles')
# ### end Alembic commands ###
| 29.97561 | 65 | 0.681041 |
9997ab526d789da4691eed9ed1543279b2f3c803 | 74,817 | py | Python | zimsoap/client.py | emillion/zimsoap | d1ea2eb4d50f263c9a16e5549af03f1eff3e295e | [
"Apache-2.0"
] | 11 | 2015-02-15T23:52:56.000Z | 2021-05-06T18:05:24.000Z | zimsoap/client.py | emillion/zimsoap | d1ea2eb4d50f263c9a16e5549af03f1eff3e295e | [
"Apache-2.0"
] | 25 | 2015-01-14T11:27:51.000Z | 2016-09-07T14:06:52.000Z | zimsoap/client.py | emillion/zimsoap | d1ea2eb4d50f263c9a16e5549af03f1eff3e295e | [
"Apache-2.0"
] | 10 | 2015-08-12T14:45:17.000Z | 2021-12-08T23:40:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
""" Zimbra SOAP client pythonic abstraction
core classes for SOAP clients, there are also REST clients here, but used only
for pre-authentification.
"""
import datetime
try:
from urllib2 import HTTPCookieProcessor, build_opener, HTTPError
except ImportError:
from urllib.request import HTTPCookieProcessor, build_opener, HTTPError
import time
import re
import warnings
from six.moves import http_cookiejar, urllib
from six import text_type, binary_type
import pythonzimbra
import pythonzimbra.tools.auth
from pythonzimbra.communication import Communication
from zimsoap import utils
from zimsoap import zobjects
class RESTClient:
""" Abstract Classe, RESTClient defines a REST client for some operations we
can't do with SOAP API, such as admin preauth.
"""
class NoPreauthKeyProvided(Exception):
pass
class RESTBackendError(Exception):
def __init__(self, e):
self.parent = e
self.msg = 'Zimbra issued HTTP error : '+e.msg
Exception.__init__(self, self.msg)
def __init__(self, server_host, server_port=None, preauth_key=None):
if server_port:
self.preauth_url = 'https://{0}:{1}/service/preauth?'.format(
server_host, server_port)
else:
self.preauth_url = 'https://{0}/service/preauth?'.format(
server_host)
self.set_preauth_key(preauth_key)
def set_preauth_key(self, preauth_key):
self.preauth_key = preauth_key
def get_preauth_token(self, account_name, expires=0):
if not self.preauth_key:
raise self.NoPreauthKeyProvided
ts = int(time.time())*1000
preauth_str = utils.build_preauth_str(self.preauth_key, account_name,
ts, expires, admin=self.isadmin)
args = urllib.parse.urlencode({
'account': account_name,
'by': 'name',
'timestamp': ts,
'expires': expires*1000,
'admin': "1" if self.isadmin else "0",
'preauth': preauth_str
})
cj = http_cookiejar.CookieJar()
browser = build_opener(HTTPCookieProcessor(cj))
try:
url = browser.open(self.preauth_url+args)
url.read()
value = ""
for cookie in cj:
if cookie.name == self.TOKEN_COOKIE:
value = cookie.value
url.close()
browser.close()
return value
except HTTPError as e:
raise self.RESTBackendError(e)
class AdminRESTClient(RESTClient):
TOKEN_COOKIE = 'ZM_ADMIN_AUTH_TOKEN'
def __init__(self, server_host, server_port=7071, preauth_key=None):
self.isadmin = True
RESTClient.__init__(self, server_host, server_port, preauth_key)
class AccountRESTClient(RESTClient):
TOKEN_COOKIE = 'ZM_AUTH_TOKEN'
def __init__(self, *args, **kwargs):
self.isadmin = False
RESTClient.__init__(self, *args, **kwargs)
class MailRESTClient(RESTClient):
TOKEN_COOKIE = 'ZM_MAIL_AUTH_TOKEN'
def __init__(self, *args, **kwargs):
self.isadmin = False
RESTClient.__init__(self, *args, **kwargs)
class ZimSOAPException(Exception):
pass
class ShouldAuthenticateFirst(ZimSOAPException):
""" Error fired when an operation requiring auth is intented before the auth
is done.
"""
pass
class DomainHasNoPreAuthKey(ZimSOAPException):
""" Error fired when the server has no preauth key
"""
def __init__(self, domain):
# Call the base class constructor with the parameters it needs
self.msg = '"{0}" has no preauth key, make one first, see {1}'.format(
domain.name,
'http://wiki.zimbra.com/wiki/Preauth'
'#Preparing_a_domain_for_preauth'
)
Exception.__init__(self)
class ZimbraSoapServerError(ZimSOAPException):
r_soap_text = re.compile(r'<soap:Text>(.*)</soap:Text>')
def __init__(self, request, response):
self.request = request
self.response = response
fault = response.get_response()['Fault']
self.msg = fault['Reason']['Text']
self.code = fault['Detail']['Error']['Code']
self.trace_url = fault['Detail']['Error']['Trace']
def __str__(self):
return '{0}: {1}'.format(
self.code, self.msg)
class ZimbraSoapUnexpectedResponse(ZimSOAPException):
def __init__(self, request, response, msg=''):
self.request = request
self.response = response
self.msg = msg
def __str__(self):
if self.msg:
return self.msg
else:
return 'Unexpected Response from Zimbra Server'
class ZimbraAbstractClient(object):
""" Factorized abstract code for SOAP API access.
Provides common ground for zimbraAdmin and zimbraAccount.
"""
def __init__(self, server_host, server_port, *args, **kwargs):
loc = 'https://%s:%s/%s' % (server_host, server_port, self.LOCATION)
self.com = Communication(loc)
self._server_host = server_host
self._server_port = server_port
self._session = ZimbraAPISession(self)
def request(self, name, content={}, namespace=None):
""" Do a SOAP request and returns the result.
Simple wrapper arround pythonzimbra functions
:param name: ex: 'Auth' for performing an 'AuthRequest'
:param content: a dict formatted pythonzimbra-style for request
:param namespace: (optional), the namespace, if different from the
client's
:returns: a dict with response
"""
if not namespace:
namespace = self.NAMESPACE
req_name = name+'Request'
resp_name = name+'Response'
req = pythonzimbra.request_xml.RequestXml()
resp = pythonzimbra.response_xml.ResponseXml()
if self._session.is_logged_in():
req.set_auth_token(self._session.authToken)
req.add_request(req_name, content, namespace)
try:
self.com.send_request(req, resp)
except HTTPError as e:
if resp:
raise ZimbraSoapServerError(e.req, e.resp)
else:
raise
try:
resp_content = resp.get_response()
return resp_content[resp_name]
except KeyError:
if 'Fault' in resp_content:
raise ZimbraSoapServerError(req, resp)
raise ZimbraSoapUnexpectedResponse(
req, resp, 'Cannot find {} in response "{}"'.format(
resp_name, resp.get_response()))
return resp_content
def request_single(self, name, content={}):
""" Simple wrapper arround request to extract a single response
:returns: the first tag in the response body
"""
resp = self.request(name, content)
# We stop on the first non-attribute (attributes are unicode/str)
# If it's a list, we only return the first one.
for i in resp.values():
if type(i) == list:
return i[0]
elif type(i) == dict:
return i
return None
def request_list(self, name, content={}):
""" Simple wrapper arround request to extract a list of response
:returns: the list of tags with same name or empty list
"""
resp = self.request(name, content)
# We stop on the first non-attribute (attributes are unicode/str)
# If it's a list, we only return the first one.
for i in resp.values():
if type(i) == list:
return i
elif type(i) == dict:
return [i]
return []
def login(self, user, password):
self._session.login(user, password)
def login_with_authToken(self, authToken, lifetime=None):
self._session.import_session(authToken)
if lifetime:
self._session.set_end_date(int(lifetime))
def get_logged_in_by(self, login, parent_zc, duration=0):
"""Use another client to get logged in via preauth mechanism by an
already logged in admin.
It required the domain of the admin user to have preAuthKey
The preauth key cannot be created by API, do it with zmprov :
zmprov gdpak <domain>
"""
domain_name = zobjects.Account(name=login).get_domain()
preauth_key = parent_zc.get_domain(domain_name)['zimbraPreAuthKey']
rc = self.REST_PREAUTH(
self._server_host, parent_zc._server_port, preauth_key=preauth_key)
authToken = rc.get_preauth_token(login)
self.login_with_authToken(authToken)
def delegated_login(self, login, admin_zc, duration=0):
"""Use another client to get logged in via delegated_auth mechanism by an
already logged in admin.
:param admin_zc: An already logged-in admin client
:type admin_zc: ZimbraAdminClient
:param login: the user login (or email) you want to log as
"""
# a duration of zero is interpretted literaly by the API...
selector = zobjects.Account(name=login).to_selector()
delegate_args = {'account': selector}
if duration:
delegate_args['duration': duration]
resp = admin_zc.request('DelegateAuth', delegate_args)
lifetime = resp['lifetime']
authToken = resp['authToken']
self.login_account = login
self.login_with_authToken(authToken, lifetime)
def is_session_valid(self):
# some classes may need to overload it
return self._session.is_session_valid()
def get_host(self):
return self._server_host
class ZimbraAccountClient(ZimbraAbstractClient):
""" Specialized Soap client to access zimbraAccount webservice.
API ref is
http://files.zimbra.com/docs/soap_api/8.0.4/soap-docs-804/api-reference/zimbraAccount/service-summary.html
"""
NAMESPACE = 'urn:zimbraAccount'
LOCATION = 'service/soap'
REST_PREAUTH = AccountRESTClient
def __init__(self, server_host, server_port='443', *args, **kwargs):
super(ZimbraAccountClient, self).__init__(
server_host, server_port,
*args, **kwargs)
# Share
def get_share_info(self, grantee_type=None, grantee_id=None,
grantee_name=None, owner=None, owner_type='name'):
"""
:returns: list of dict representing shares informations
"""
params = {}
if grantee_type:
if 'grantee' not in params.keys():
params['grantee'] = {}
params['grantee'].update({'type': grantee_type})
if grantee_id:
if 'grantee' not in params.keys():
params['grantee'] = {}
params['grantee'].update({'id': grantee_id})
if grantee_name:
if 'grantee' not in params.keys():
params['grantee'] = {}
params['grantee'].update({'name': grantee_name})
if owner:
params['owner'] = {'by': owner_type, '_content': owner}
try:
resp = self.request('GetShareInfo', params)
# if user never logged in, no mailbox was created
except ZimbraSoapServerError as e:
if 'mailbox not found for account' in str(e):
return []
else:
raise e
if resp and isinstance(resp['share'], list):
return resp['share']
elif resp and isinstance(resp['share'], dict):
return [resp['share']]
else:
return []
# Signature
def create_signature(self, name, content, contenttype="text/html"):
"""
:param: name verbose name of the signature
:param: content content of the signature, in html or plain-text
:param: contenttype can be "text/html" (default) or "text/plain"
:returns: a zobjects.Signature object
"""
s = zobjects.Signature(name=name)
s.set_content(content, contenttype)
resp = self.request('CreateSignature', {'signature': s.to_creator()})
return zobjects.Signature.from_dict(resp['signature'])
def get_signatures(self):
""" Get all signatures for the current user
:returns: a list of zobjects.Signature
"""
signatures = self.request_list('GetSignatures')
return [zobjects.Signature.from_dict(i) for i in signatures]
def get_signature(self, signature):
"""Retrieve one signature, discriminated by name or id.
Note that signature name is not case sensitive.
:param: a zobjects.Signature describing the signature
like "Signature(name='my-sig')"
:returns: a zobjects.Signature object, filled with the signature if no
signature is matching, returns None.
"""
resp = self.request_list('GetSignatures')
# GetSignature does not allow to filter the results, so we do it by
# hand...
if resp and (len(resp) > 0):
for sig_dict in resp:
sig = zobjects.Signature.from_dict(sig_dict)
if hasattr(signature, 'id'):
its_this_one = (sig.id == signature.id)
elif hasattr(signature, 'name'):
its_this_one = (sig.name.upper() == signature.name.upper())
else:
raise ValueError('should mention one of id,name')
if its_this_one:
return sig
else:
return None
def delete_signature(self, signature):
""" Delete a signature by name or id
:param: signature a Signature object with name or id defined
"""
self.request('DeleteSignature', {'signature': signature.to_selector()})
def modify_signature(self, signature):
""" Modify an existing signature
Can modify the content, contenttype and name. An unset attribute will
not delete the attribute but leave it untouched.
:param: signature a zobject.Signature object, with modified
content/contentype/name, the id should be present and
valid, the name does not allows to identify the
signature for that operation.
"""
# if no content is specified, just use a selector (id/name)
dic = signature.to_creator(for_modify=True)
self.request('ModifySignature', {'signature': dic})
def get_preferences(self):
""" Gets all the preferences of the current user
:returns: a dict presenting the preferences by name, values are
typed to str/bool/int/float regarding their content.
"""
pref_list = self.request('GetPrefs')['pref']
out = {}
for pref in pref_list:
out[pref['name']] = utils.auto_type(pref['_content'])
return out
def get_preference(self, pref_name):
""" Gets a single named preference
:returns: the value, typed to str/bool/int/float regarding its content.
"""
resp = self.request_single('GetPrefs', {'pref': {'name': pref_name}})
return utils.auto_type(resp['_content'])
def create_identity(self, name, attrs=[]):
""" Create an Identity
:param: name identity name
:param: attrs list of dict of attributes (zimsoap format)
:returns: a zobjects.Identity object
"""
params = {
'name': name,
'a': attrs
}
resp = self.request('CreateIdentity', {'identity': params})
return zobjects.Identity.from_dict(resp['identity'])
def get_identities(self, identity=None, attrs=None):
""" Get identities matching name and attrs
of the user, as a list
:param: zobjects.Identity or identity name (string)
:param: attrs dict of attributes to return only identities matching
:returns: list of zobjects.Identity
"""
resp = self.request('GetIdentities')
if 'identity' in resp:
identities = resp['identity']
if type(identities) != list:
identities = [identities]
if identity or attrs:
wanted_identities = []
for u_identity in [
zobjects.Identity.from_dict(i) for i in identities]:
if identity:
if isinstance(identity, zobjects.Identity):
if u_identity.name == identity.name:
return [u_identity]
else:
if u_identity.name == identity:
return [u_identity]
elif attrs:
for attr, value in attrs.items():
if (attr in u_identity._a_tags and
u_identity._a_tags[attr] == value):
wanted_identities.append(u_identity)
return wanted_identities
else:
return [zobjects.Identity.from_dict(i) for i in identities]
else:
return []
def modify_identity(self, identity, **kwargs):
""" Modify some attributes of an identity or its name.
:param: identity a zobjects.Identity with `id` set (mandatory). Also
set items you want to modify/set and/or the `name` attribute to
rename the identity.
Can also take the name in string and then attributes to modify
:returns: zobjects.Identity object
"""
if isinstance(identity, zobjects.Identity):
self.request('ModifyIdentity', {'identity': identity._full_data})
return self.get_identities(identity=identity.name)[0]
else:
attrs = []
for attr, value in kwargs.items():
attrs.append({
'name': attr,
'_content': value
})
self.request('ModifyIdentity', {
'identity': {
'name': identity,
'a': attrs
}
})
return self.get_identities(identity=identity)[0]
def delete_identity(self, identity):
""" Delete an identity from its name or id
:param: a zobjects.Identity object with name or id defined or a string
of the identity's name
"""
if isinstance(identity, zobjects.Identity):
self.request(
'DeleteIdentity', {'identity': identity.to_selector()})
else:
self.request('DeleteIdentity', {'identity': {'name': identity}})
# Whitelists and Blacklists
def get_white_black_lists(self):
return self.request('GetWhiteBlackList')
def add_to_blacklist(self, values):
param = {'blackList': {'addr': []}}
for value in values:
param['blackList']['addr'].append({'op': '+', '_content': value})
self.request('ModifyWhiteBlackList', param)
def remove_from_blacklist(self, values):
param = {'blackList': {'addr': []}}
for value in values:
param['blackList']['addr'].append({'op': '-', '_content': value})
self.request('ModifyWhiteBlackList', param)
def add_to_whitelist(self, values):
param = {'whiteList': {'addr': []}}
for value in values:
param['whiteList']['addr'].append({'op': '+', '_content': value})
self.request('ModifyWhiteBlackList', param)
def remove_from_whitelist(self, values):
param = {'whiteList': {'addr': []}}
for value in values:
param['whiteList']['addr'].append({'op': '-', '_content': value})
self.request('ModifyWhiteBlackList', param)
class ZimbraAdminClient(ZimbraAbstractClient):
""" Specialized Soap client to access zimbraAdmin webservice, handling auth.
API ref is
http://files.zimbra.com/docs/soap_api/8.0.4/soap-docs-804/api-reference/zimbraAdmin/service-summary.html
"""
NAMESPACE = 'urn:zimbraAdmin'
LOCATION = 'service/admin/soap'
REST_PREAUTH = AdminRESTClient
def __init__(self, server_host, server_port='7071',
*args, **kwargs):
super(ZimbraAdminClient, self).__init__(
server_host, server_port,
*args, **kwargs)
def get_quota_usage(self, domain=None, all_servers=None,
limit=None, offset=None, sort_by=None,
sort_ascending=None, refresh=None):
content = {}
if domain:
content['domain'] = domain
if all_servers:
content['allServers'] = all_servers
if limit:
content['limit'] = limit
if sort_by:
content['sortBy'] = sort_by
if sort_ascending:
content['sortAscending'] = sort_ascending
if refresh:
content['refresh'] = refresh
resp = self.request_list('GetQuotaUsage', content)
return resp
def get_all_config(self):
resp = self.request_list('GetAllConfig')
config = {}
for attr in resp:
# If there is multiple attributes with the same name
if attr['n'] in config:
if isinstance(config[attr['n']], str):
config[attr['n']] = [config[attr['n']], attr['_content']]
else:
config[attr['n']].append(attr['_content'])
else:
config[attr['n']] = attr['_content']
return config
def get_config(self, attr):
resp = self.request_list('GetConfig', {'a': {'n': attr}})
if len(resp) > 1:
config = {attr: []}
for a in resp:
config[attr].append(a['_content'])
elif len(resp) == 1:
config = {attr: resp[0]['_content']}
else:
raise KeyError('{} not found'.format(attr))
return config
def modify_config(self, attr, value):
self.request('ModifyConfig', {
'a': {
'n': attr,
'_content': value
}})
if attr[0] == '-' or attr[0] == '+':
attr = attr[1::]
return self.get_config(attr)
def _get_or_fetch_id(self, zobj, fetch_func):
""" Returns the ID of a Zobject wether it's already known or not
If zobj.id is not known (frequent if zobj is a selector), fetches first
the object and then returns its ID.
:type zobj: a zobject subclass
:type fetch_func: the function to fetch the zobj from server if its id
is undefined.
:returns: the object id
"""
try:
return zobj.id
except AttributeError:
try:
return fetch_func(zobj).id
except AttributeError:
raise ValueError('Unqualified Resource')
def get_all_domains(self):
resp = self.request_list('GetAllDomains')
return [zobjects.Domain.from_dict(d) for d in resp]
def get_all_accounts(self, domain=None, server=None,
include_system_accounts=False,
include_admin_accounts=True,
include_virtual_accounts=True):
selectors = {}
if domain:
selectors['domain'] = domain.to_selector()
if server:
selectors['server'] = server.to_selector()
dict_accounts = self.request_list('GetAllAccounts', selectors)
accounts = []
for i in dict_accounts:
account = zobjects.Account.from_dict(i)
if not (
not include_system_accounts and account.is_system() or
not include_admin_accounts and account.is_admin() or
not include_virtual_accounts and account.is_virtual()
):
accounts.append(account)
return accounts
# Calendar resources
def get_all_calendar_resources(self, domain=None, server=None,):
selectors = {}
if domain:
selectors['domain'] = domain.to_selector()
if server:
selectors['server'] = server.to_selector()
dict_calres = self.request_list('GetAllCalendarResources', selectors)
resources = []
for i in dict_calres:
calres = zobjects.CalendarResource.from_dict(i)
resources.append(calres)
return resources
def get_calendar_resource(self, cal_resource):
""" Fetches an calendar resource with all its attributes.
:param account: a CalendarResource, with either id or
name attribute set.
:returns: a CalendarResource object, filled.
"""
selector = cal_resource.to_selector()
resp = self.request_single('GetCalendarResource',
{'calresource': selector})
return zobjects.CalendarResource.from_dict(resp)
def create_calendar_resource(self, name, password=None, attrs={}):
"""
:param: attrs a dict of attributes, must specify the displayName and
zimbraCalResType
"""
args = {
'name': name,
'a': [{'n': k, '_content': v} for k, v in attrs.items()]
}
if password:
args['password'] = password
resp = self.request_single('CreateCalendarResource', args)
return zobjects.CalendarResource.from_dict(resp)
def delete_calendar_resource(self, calresource):
self.request('DeleteCalendarResource', {
'id': self._get_or_fetch_id(calresource,
self.get_calendar_resource),
})
def modify_calendar_resource(self, calres, attrs):
"""
:param calres: a zobjects.CalendarResource
:param attrs: a dictionary of attributes to set ({key:value,...})
"""
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
self.request('ModifyCalendarResource', {
'id': self._get_or_fetch_id(
calres, self.get_calendar_resource),
'a': attrs
})
def rename_calendar_resource(self, r_description, new_r_name):
"""
:param r_description : a CalendarResource specifying either :
- id: the ressource ID
- r_description: the name of the ressource
:param new_r_name: new name of the list
:return: a zobjects.CalendarResource
"""
resp = self.request('RenameCalendarResource', {
'id': self._get_or_fetch_id(r_description,
self.get_calendar_resource),
'newName': new_r_name
})
return zobjects.CalendarResource.from_dict(resp['calresource'])
# Mailbox stats
def get_mailbox_stats(self):
""" Get global stats about mailboxes
Parses <stats numMboxes="6" totalSize="141077"/>
:returns: dict with stats
"""
resp = self.request_single('GetMailboxStats')
ret = {}
for k, v in resp.items():
ret[k] = int(v)
return ret
def count_account(self, domain):
""" Count the number of accounts for a given domain, sorted by cos
:returns: a list of pairs <ClassOfService object>,count
"""
selector = domain.to_selector()
cos_list = self.request_list('CountAccount', {'domain': selector})
ret = []
for i in cos_list:
count = int(i['_content'])
ret.append((zobjects.ClassOfService.from_dict(i), count))
return list(ret)
def get_all_mailboxes(self):
resp = self.request_list('GetAllMailboxes')
return [zobjects.Mailbox.from_dict(i) for i in resp]
def get_account_mailbox(self, account_id):
""" Returns a Mailbox corresponding to an account. Usefull to get the
size (attribute 's'), and the mailbox ID, returns nothing appart from
that.
"""
selector = zobjects.Mailbox(id=account_id).to_selector()
resp = self.request_single('GetMailbox', {'mbox': selector})
return zobjects.Mailbox.from_dict(resp)
def get_account_cos(self, account):
""" Fetch the cos for a given account
Quite different from the original request which returns COS + various
URL + COS + zimbraMailHost... But all other informations are accessible
through get_account.
:type account: zobjects.Account
:rtype: zobjects.COS
"""
resp = self.request(
'GetAccountInfo', {'account': account.to_selector()})
return zobjects.COS.from_dict(resp['cos'])
def create_domain(self, name):
"""
:param name: A string, NOT a zObject
:return: a zobjects.Domain
"""
args = {'name': name}
resp = self.request_single('CreateDomain', args)
return zobjects.Domain.from_dict(resp)
def delete_domain(self, domain):
self.request('DeleteDomain', {
'id': self._get_or_fetch_id(domain, self.get_domain)
})
def delete_domain_forced(self, domain):
# Remove aliases and accounts
# we take all accounts because there might be an alias
# for an account of an other domain
accounts = self.get_all_accounts()
for a in accounts:
if 'zimbraMailAlias' in a._a_tags:
aliases = a._a_tags['zimbraMailAlias']
if isinstance(aliases, list):
for alias in aliases:
if alias.split('@')[1] == domain.name:
self.remove_account_alias(a, alias)
else:
if aliases.split('@')[1] == domain.name:
self.remove_account_alias(a, aliases)
if a.name.split('@')[1] == domain.name:
self.delete_account(a)
# Remove resources
resources = self.get_all_calendar_resources(domain=domain)
for r in resources:
self.delete_calendar_resource(r)
# Remove distribution lists
dls = self.get_all_distribution_lists(domain)
for dl in dls:
self.delete_distribution_list(dl)
self.request('DeleteDomain', {
'id': self._get_or_fetch_id(domain, self.get_domain)
})
def get_domain(self, domain):
selector = domain.to_selector()
resp = self.request_single('GetDomain', {'domain': selector})
return zobjects.Domain.from_dict(resp)
def modify_domain(self, domain, attrs):
"""
:type domain: a zobjects.Domain
:param attrs: attributes to modify
:type attrs dict
"""
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
self.request('ModifyDomain', {
'id': self._get_or_fetch_id(domain, self.get_domain),
'a': attrs
})
def add_distribution_list_alias(self, distribution_list, alias):
"""
:param distribution_list: a distribution list object to be used as
a selector
:param alias: email alias address
:returns: None (the API itself returns nothing)
"""
self.request('AddDistributionListAlias', {
'id': self._get_or_fetch_id(
distribution_list, self.get_distribution_list
),
'alias': alias,
})
def remove_distribution_list_alias(self, distribution_list, alias):
"""
:param distribution_list: an distribution list object to be used as
a selector
:param alias: email alias address
:returns: None (the API itself returns nothing)
"""
self.request('RemoveDistributionListAlias', {
'id': self._get_or_fetch_id(
distribution_list, self.get_distribution_list
),
'alias': alias,
})
def get_all_distribution_lists(self, domain=None):
if domain:
selectors = {'domain': domain.to_selector()}
else:
selectors = {}
got = self.request_list('GetAllDistributionLists', selectors)
return [zobjects.DistributionList.from_dict(i) for i in got]
def get_distribution_list(self, dl_description):
"""
:param: dl_description : a DistributionList specifying either :
- id: the account_id
- name: the name of the list
:returns: the DistributionList
"""
selector = dl_description.to_selector()
resp = self.request_single('GetDistributionList', {'dl': selector})
dl = zobjects.DistributionList.from_dict(resp)
return dl
def create_distribution_list(self, name, dynamic=0):
"""
:param name: A string, NOT a zObject
:param dynamic:
:return: a zobjects.DistributionList
"""
args = {'name': name, 'dynamic': str(dynamic)}
resp = self.request_single('CreateDistributionList', args)
return zobjects.DistributionList.from_dict(resp)
def modify_distribution_list(self, dl_description, attrs):
"""
:param dl_description : a DistributionList specifying either :
- id: the dl_list_id
- dl_description: the name of the list
:param attrs : a dictionary of attributes to set ({key:value,...})
"""
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
self.request('ModifyDistributionList', {
'id': self._get_or_fetch_id(dl_description,
self.get_distribution_list),
'a': attrs
})
def rename_distribution_list(self, dl_description, new_dl_name):
"""
:param dl_description : a DistributionList specifying either :
- id: the dl_list_id
- dl_description: the name of the list
:param new_dl_name: new name of the list
:return: a zobjects.DistributionList
"""
resp = self.request('RenameDistributionList', {
'id': self._get_or_fetch_id(dl_description,
self.get_distribution_list),
'newName': new_dl_name
})
return zobjects.DistributionList.from_dict(resp['dl'])
def delete_distribution_list(self, dl):
self.request('DeleteDistributionList', {
'id': self._get_or_fetch_id(dl, self.get_distribution_list)
})
def add_distribution_list_member(self, distribution_list, members):
""" Adds members to the distribution list
:type distribution_list: zobjects.DistributionList
:param members: list of email addresses you want to add
:type members: list of str
"""
members = [{'_content': v} for v in members]
resp = self.request_single('AddDistributionListMember', {
'id': self._get_or_fetch_id(distribution_list,
self.get_distribution_list),
'dlm': members
})
return resp
def remove_distribution_list_member(self, distribution_list, members):
""" Removes members from the distribution list
:type distribution_list: zobjects.DistributionList
:param members: list of email addresses you want to remove
:type members: list of str
"""
members = [{'_content': v} for v in members]
resp = self.request_single('RemoveDistributionListMember', {
'id': self._get_or_fetch_id(distribution_list,
self.get_distribution_list),
'dlm': members
})
return resp
def get_account(self, account):
""" Fetches an account with all its attributes.
:param account: an account object, with either id or name attribute set
:returns: a zobjects.Account object, filled.
"""
selector = account.to_selector()
resp = self.request_single('GetAccount', {'account': selector})
return zobjects.Account.from_dict(resp)
def rename_account(self, account, new_name):
""" Rename an account.
:param account: a zobjects.Account
:param new_name: a string of new account name
"""
self.request('RenameAccount', {
'id': self._get_or_fetch_id(account, self.get_account),
'newName': new_name
})
def modify_account(self, account, attrs):
"""
:param account: a zobjects.Account
:param attrs : a dictionary of attributes to set ({key:value,...})
"""
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
self.request('ModifyAccount', {
'id': self._get_or_fetch_id(account, self.get_account),
'a': attrs
})
def set_password(self, account, password):
"""
:param account: a zobjects.Account
:param password: new password to set
"""
self.request('SetPassword', {
'id': account.id,
'newPassword': password
})
def create_account(self, email, password=None, attrs={}):
"""
:param email: Full email with domain eg: login@domain.com
:param password: Password for local auth
:param attrs: a dictionary of attributes to set ({key:value,...})
:returns: the created zobjects.Account
"""
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
params = {'name': email, 'a': attrs}
if password:
params['password'] = password
resp = self.request_single('CreateAccount', params)
return zobjects.Account.from_dict(resp)
def delete_account(self, account):
"""
:param account: an account object to be used as a selector
"""
self.request('DeleteAccount', {
'id': self._get_or_fetch_id(account, self.get_account),
})
def add_account_alias(self, account, alias):
"""
:param account: an account object to be used as a selector
:param alias: email alias address
:returns: None (the API itself returns nothing)
"""
self.request('AddAccountAlias', {
'id': self._get_or_fetch_id(account, self.get_account),
'alias': alias,
})
def remove_account_alias(self, account, alias):
"""
:param account: an account object to be used as a selector
:param alias: email alias address
:returns: None (the API itself returns nothing)
"""
self.request('RemoveAccountAlias', {
'id': self._get_or_fetch_id(account, self.get_account),
'alias': alias,
})
def mk_auth_token(self, account, admin=False, duration=0):
""" Builds an authentification token, using preauth mechanism.
See http://wiki.zimbra.com/wiki/Preauth
:param duration: in seconds defaults to 0, which means "use account
default"
:param account: an account object to be used as a selector
:returns: the auth string
"""
domain = account.get_domain()
try:
preauth_key = self.get_domain(domain)['zimbraPreAuthKey']
except KeyError:
raise DomainHasNoPreAuthKey(domain)
timestamp = int(time.time())*1000
expires = duration*1000
return utils.build_preauth_str(preauth_key, account.name, timestamp,
expires, admin)
def delegate_auth(self, account):
""" Uses the DelegateAuthRequest to provide a ZimbraAccountClient
already logged with the provided account.
It's the mechanism used with the "view email" button in admin console.
"""
warnings.warn("delegate_auth() on parent client is deprecated,"
" use delegated_login() on child client instead",
DeprecationWarning)
selector = account.to_selector()
resp = self.request('DelegateAuth', {'account': selector})
lifetime = resp['lifetime']
authToken = resp['authToken']
zc = ZimbraAccountClient(self._server_host)
zc.login_with_authToken(authToken, lifetime)
return zc
def get_account_authToken(self, account=None, account_name=''):
""" Use the DelegateAuthRequest to provide a token and his lifetime
for the provided account.
If account is provided we use it,
else we retreive the account from the provided account_name.
"""
if account is None:
account = self.get_account(zobjects.Account(name=account_name))
selector = account.to_selector()
resp = self.request('DelegateAuth', {'account': selector})
authToken = resp['authToken']
lifetime = int(resp['lifetime'])
return authToken, lifetime
def delegated_login(self, *args, **kwargs):
raise NotImplementedError(
'zimbraAdmin do not support to get logged-in by delegated auth')
def search_directory(self, **kwargs):
"""
SearchAccount is deprecated, using SearchDirectory
:param query: Query string - should be an LDAP-style filter
string (RFC 2254)
:param limit: The maximum number of accounts to return
(0 is default and means all)
:param offset: The starting offset (0, 25, etc)
:param domain: The domain name to limit the search to
:param applyCos: applyCos - Flag whether or not to apply the COS
policy to account. Specify 0 (false) if only requesting attrs that
aren't inherited from COS
:param applyConfig: whether or not to apply the global config attrs to
account. specify 0 (false) if only requesting attrs that aren't
inherited from global config
:param sortBy: Name of attribute to sort on. Default is the account
name.
:param types: Comma-separated list of types to return. Legal values
are: accounts|distributionlists|aliases|resources|domains|coses
(default is accounts)
:param sortAscending: Whether to sort in ascending order. Default is
1 (true)
:param countOnly: Whether response should be count only. Default is
0 (false)
:param attrs: Comma-seperated list of attrs to return ("displayName",
"zimbraId", "zimbraAccountStatus")
:return: dict of list of "account" "alias" "dl" "calresource" "domain"
"cos"
"""
search_response = self.request('SearchDirectory', kwargs)
result = {}
items = {
"account": zobjects.Account.from_dict,
"domain": zobjects.Domain.from_dict,
"dl": zobjects.DistributionList.from_dict,
"cos": zobjects.COS.from_dict,
"calresource": zobjects.CalendarResource.from_dict
# "alias": TODO,
}
for obj_type, func in items.items():
if obj_type in search_response:
if isinstance(search_response[obj_type], list):
result[obj_type] = [
func(v) for v in search_response[obj_type]]
else:
result[obj_type] = func(search_response[obj_type])
return result
class ZimbraMailClient(ZimbraAbstractClient):
""" Specialized Soap client to access zimbraMail webservice.
API ref is
http://files.zimbra.com/docs/soap_api/8.0.4/soap-docs-804/api-reference/zimbraMail/service-summary.html
"""
NAMESPACE = 'urn:zimbraMail'
LOCATION = 'service/soap'
REST_PREAUTH = MailRESTClient
def __init__(self, server_host, server_port='443', *args, **kwargs):
super(ZimbraMailClient, self).__init__(
server_host, server_port,
*args, **kwargs)
def _return_comma_list(self, l):
""" get a list and return a string with comma separated list values
Examples ['to', 'ta'] will return 'to,ta'.
"""
if isinstance(l, (text_type, int)):
return l
if not isinstance(l, list):
raise TypeError(l, ' should be a list of integers, \
not {0}'.format(type(l)))
str_ids = ','.join(str(i) for i in l)
return str_ids
def is_session_valid(self):
# zimbraMail do not have by itself an Auth request, so create a
# zimbraAccount client for that check.
zac = ZimbraAccountClient(self._server_host, self._server_port)
zac._session.import_session(self._session.authToken)
return zac.is_session_valid()
def login(self, user, password):
# !!! We need to authenticate with the 'urn:zimbraAccount' namespace
self._session.login(user, password, 'urn:zimbraAccount')
# Permissions
def get_permissions(self, rights=[]):
"""
:param rights: list of rights. Possible values : 'sendAs',
'sendOnBehalfOf'
:return: dict with key ace with a list of rights
"""
aces = []
if rights:
for right in rights:
ace = self.request(
'GetPermission',
{'ace': {
'right': {'_content': right}}})
if 'ace' in ace.keys() and isinstance(ace, list):
aces.extend(ace['ace'])
elif 'ace' in ace.keys() and isinstance(ace, dict):
aces.append(ace['ace'])
return {'ace': aces}
else:
ace = self.request('GetPermission', {})
if 'ace' in ace.keys() and isinstance(ace['ace'], list):
return ace
elif 'ace' in ace.keys() and isinstance(ace['ace'], dict):
return ace
else:
return {'ace': []}
def grant_permission(self, right, zid=None, grantee_name=None, gt='usr'):
params = {'ace': {
'gt': gt,
'right': right
}}
if grantee_name:
params['ace']['d'] = grantee_name
elif zid:
params['ace']['zid'] = zid
else:
raise TypeError('at least zid or grantee_name should be set')
return self.request('GrantPermission', params)
def revoke_permission(self, right, zid=None, grantee_name=None, gt='usr'):
params = {'ace': {
'gt': gt,
'right': right
}}
if grantee_name:
params['ace']['d'] = grantee_name
elif zid:
params['ace']['zid'] = zid
else:
raise TypeError('missing zid or grantee_name')
self.request('RevokePermission', params)
# Ranking action
def reset_ranking(self):
"""Reset the contact ranking table for the account
"""
self.request('RankingAction', {'action': {'op': 'reset'}})
def delete_ranking(self, email):
"""Delete a specific address in the auto-completion of the users
:param email: the address to remove
"""
self.request('RankingAction', {'action': {'op': 'reset',
'email': email
}})
# Task
def create_task(self, subject, desc):
"""Create a task
:param subject: the task's subject
:param desc: the task's content in plain-text
:returns: the task's id
"""
task = zobjects.Task()
task_creator = task.to_creator(subject, desc)
resp = self.request('CreateTask', task_creator)
task_id = resp['calItemId']
return task_id
def get_task(self, task_id):
"""Retrieve one task, discriminated by id.
:param: task_id: the task id
:returns: a zobjects.Task object ;
if no task is matching, returns None.
"""
task = self.request_single('GetTask', {'id': task_id})
if task:
return zobjects.Task.from_dict(task)
else:
return None
# Contact
def create_contact(self, attrs, members=None, folder_id=None, tags=None):
"""Create a contact
Does not include VCARD nor group membership yet
XML example :
<cn l="7> ## ContactSpec
<a n="lastName">MARTIN</a>
<a n="firstName">Pierre</a>
<a n="email">pmartin@example.com</a>
</cn>
Which would be in zimsoap : attrs = { 'lastname': 'MARTIN',
'firstname': 'Pierre',
'email': 'pmartin@example.com' }
folder_id = 7
:param folder_id: a string of the ID's folder where to create
contact. Default '7'
:param tags: comma-separated list of tag names
:param attrs: a dictionary of attributes to set ({key:value,...}). At
least one attr is required
:returns: the created zobjects.Contact
"""
cn = {}
if folder_id:
cn['l'] = str(folder_id)
if tags:
tags = self._return_comma_list(tags)
cn['tn'] = tags
if members:
cn['m'] = members
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
cn['a'] = attrs
resp = self.request_single('CreateContact', {'cn': cn})
return zobjects.Contact.from_dict(resp)
def get_contacts(self, ids=None, **kwargs):
""" Get all contacts for the current user
:param l: string of a folder id
:param ids: An coma separated list of contact's ID to look for
:returns: a list of zobjects.Contact
"""
params = {}
if ids:
ids = self._return_comma_list(ids)
params['cn'] = {'id': ids}
for key, value in kwargs.items():
if key in ['a', 'ma']:
params[key] = {'n': value}
else:
params[key] = value
contacts = self.request_list('GetContacts', params)
return [zobjects.Contact.from_dict(i) for i in contacts]
def modify_contact(self, contact_id, attrs=None, members=None, tags=None):
"""
:param contact_id: zimbra id of the targetd contact
:param attrs : a dictionary of attributes to set ({key:value,...})
:param members: list of dict representing contacts and
operation (+|-|reset)
:param tags: comma-separated list of tag names
:returns: the modified zobjects.Contact
"""
cn = {}
if tags:
tags = self._return_comma_list(tags)
cn['tn'] = tags
if members:
cn['m'] = members
if attrs:
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
cn['a'] = attrs
cn['id'] = contact_id
resp = self.request_single('ModifyContact', {'cn': cn})
return zobjects.Contact.from_dict(resp)
def delete_contacts(self, ids):
""" Delete selected contacts for the current user
:param ids: list of ids
"""
str_ids = self._return_comma_list(ids)
self.request('ContactAction', {'action': {'op': 'delete',
'id': str_ids}})
def create_group(self, attrs, members, folder_id=None, tags=None):
"""Create a contact group
XML example :
<cn l="7> ## ContactSpec
<a n="lastName">MARTIN</a>
<a n="firstName">Pierre</a>
<a n="email">pmartin@example.com</a>
</cn>
Which would be in zimsoap : attrs = { 'lastname': 'MARTIN',
'firstname': 'Pierre',
'email': 'pmartin@example.com' }
folder_id = 7
:param folder_id: a string of the ID's folder where to create
contact. Default '7'
:param tags: comma-separated list of tag names
:param members: list of dict. Members with their type. Example
{'type': 'I', 'value': 'manual_addresse@example.com'}.
:param attrs: a dictionary of attributes to set ({key:value,...}). At
least one attr is required
:returns: the created zobjects.Contact
"""
cn = {}
cn['m'] = members
if folder_id:
cn['l'] = str(folder_id)
if tags:
cn['tn'] = tags
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
attrs.append({'n': 'type', '_content': 'group'})
cn['a'] = attrs
resp = self.request_single('CreateContact', {'cn': cn})
return zobjects.Contact.from_dict(resp)
# Folder
def create_folder(self, name, parent_id='1'):
params = {'folder': {
'name': name,
'l': parent_id
}}
return self.request('CreateFolder', params)['folder']
def create_mountpoint(self, **kwargs):
""" Create mountpoint according to attributes definied in soap
documentation.
"""
params = {'link': kwargs}
return self.request('CreateMountpoint', params)['link']
def delete_folders(self, paths=None, folder_ids=None, f_type='folder'):
"""
:param folder_ids: list of ids
:param path: list of folder's paths
"""
if folder_ids:
f_ids = folder_ids
elif paths:
f_ids = []
for path in paths:
folder = self.get_folder(path=path)
f_ids.append(folder[f_type]['id'])
comma_ids = self._return_comma_list(f_ids)
params = {'action': {
'id': comma_ids,
'op': 'delete'
}}
self.request('FolderAction', params)
def delete_mountpoints(self, paths=None, folder_ids=None):
"""
:param folder_ids: list of ids
:param path: list of folder's paths
"""
self.delete_folders(paths=paths, folder_ids=folder_ids, f_type='link')
def get_mountpoint(self, mp_id=None, path=None, uuid=None):
return self.get_folder(f_id=mp_id, path=path, uuid=uuid)
def get_folder(self, f_id=None, path=None, uuid=None):
request = {'folder': {}}
if f_id:
request['folder']['l'] = str(f_id)
if uuid:
request['folder']['uuid'] = str(uuid)
if path:
request['folder']['path'] = str(path)
return self.request('GetFolder', request)
def get_folder_grant(self, **kwargs):
folder = self.get_folder(**kwargs)
if 'acl' in folder['folder']:
return folder['folder']['acl']
else:
return None
def modify_folder_grant(
self,
folder_ids,
perm,
zid=None,
grantee_name=None,
gt='usr',
flags=None
):
"""
:param folder_ids: list of ids
:param perm: permission to grant to the user on folder(s)
:param zid: id of user to grant rights
:param grantee_name: email address of user to grant rights
:param flags: folder's flags
"""
f_ids = self._return_comma_list(folder_ids)
params = {'action': {
'id': f_ids,
'op': 'grant',
'grant': {'perm': perm, 'gt': gt}
}}
if perm == 'none':
params['action']['op'] = '!grant'
params['action']['zid'] = zid
# Remove key to raise Zimsoap exception if no zid provided
if not zid:
params['action'].pop('zid', None)
if grantee_name:
params['action']['grant']['d'] = grantee_name
elif zid:
params['action']['grant']['zid'] = zid
else:
raise TypeError('missing zid or grantee_name')
self.request('FolderAction', params)
def modify_folders(
self, folder_ids, color=None, flags=None, parent_folder=None,
name=None, num_days=None, rgb=None, tags=None, view=None
):
"""
:param folder_ids: list of ids
:param color: color numeric; range 0-127; defaults to 0 if not present;
client can display only 0-7
:param flags: flags
:param parent_folder: id of new location folder
:param name: new name for the folder
:param tags: list of tag names
:param view: list of tag view
"""
f_ids = self._return_comma_list(folder_ids)
params = {'action': {
'id': f_ids,
'op': 'update',
}}
if color:
params['action']['color'] = color
if flags:
params['action']['f'] = flags
if parent_folder:
params['action']['l'] = parent_folder
if name:
params['action']['name'] = name
if tags:
tn = self._return_comma_list(tags)
params['action']['tn'] = tn
if view:
params['action']['view'] = view
self.request('FolderAction', params)
# Conversation
def get_conversation(self, conv_id, **kwargs):
content = {'c': kwargs}
content['c']['id'] = int(conv_id)
return self.request('GetConv', content)
def delete_conversations(self, ids):
""" Delete selected conversations
:params ids: list of ids
"""
str_ids = self._return_comma_list(ids)
self.request('ConvAction', {'action': {'op': 'delete',
'id': str_ids
}})
def move_conversations(self, ids, folder):
""" Move selected conversations to an other folder
:params ids: list of ids
:params folder: folder id
"""
str_ids = self._return_comma_list(ids)
self.request('ConvAction', {'action': {'op': 'move',
'id': str_ids,
'l': str(folder)}})
# Messages
def add_message(self, msg_content, folder, **kwargs):
""" Inject a message
:params string msg_content: The entire message's content.
:params string folder: Folder pathname (starts with '/') or folder ID
"""
content = {'m': kwargs}
content['m']['l'] = str(folder)
content['m']['content'] = {'_content': msg_content}
return self.request('AddMsg', content)
def get_message(self, msg_id, **kwargs):
content = {'m': kwargs}
content['m']['id'] = str(msg_id)
return self.request('GetMsg', content)
def move_messages(self, ids, folder_id):
""" Move selected messages to an other folder
:param msg_ids: list of message's ids to move
:param folder_id: folder's id where to move messages
"""
str_ids = self._return_comma_list(ids)
params = {'action': {
'id': str_ids,
'op': 'move',
'l': folder_id
}}
self.request('MsgAction', params)
def update_messages_flag(self, ids, flag):
"""
List of flags :
u -> unread f -> flagged
a -> has attachment s -> sent by me
r -> replied w -> forwarded
d -> draft x -> deleted
n -> notification sent
by default a message priority is "normal" otherwise:
! -> priority high ? -> priority low
"""
str_ids = self._return_comma_list(ids)
params = {'action': {
'id': str_ids,
'op': 'update',
'f': flag
}}
self.request('MsgAction', params)
def delete_messages(self, ids):
""" Delete selected messages for the current user
:param ids: list of ids
"""
str_ids = self._return_comma_list(ids)
return self.request('MsgAction', {'action': {'op': 'delete',
'id': str_ids}})
# Search
def search(self, query, **kwargs):
""" Search object in account
:returns: a dic where value c contains the list of results (if there
is any). Example : {
'more': '0',
'offset': '0',
'sortBy': 'dateDesc',
'c': [
{
'id': '-261',
'm': {'id': '261',
's': '2556',
'l': '2'},
'u': '0', 'd': '1450714720000',
'sf': '1450714720000',
'e': {'t': 'f',
'd': 'kokopu',
'a': 'kokopu@zimbratest3.example.com'},
'n': '1',
'fr': {'_content': 'Hello there !'},
'su': {'_content': 'The subject is cool'}
}
]
"""
content = kwargs
content['query'] = {'_content': query}
return self.request('Search', content)
# DataSource
def create_data_source(self, data_source, dest_folder):
""" Create data source from a dict
data_source example =
{
'pop3': {
'leaveOnServer': "(0|1)", 'id': 'data-source-id',
'name': 'data-source-name',
'isEnabled': '(0|1)', 'importOnly': '(0|1)',
'host': 'data-source-server', 'port': 'data-source-port',
'connectionType': '(cleartext|ssl|tls|tls_is_available)',
'username': 'data-source-username',
'password': 'data-source-password',
'emailAddress': 'data-source-address',
'useAddressForForwardReply': '(0|1)',
'defaultSignature': 'default-signature-id',
'forwardReplySignature': 'forward-reply-signature-id',
'fromDisplay': 'data-source-from-display',
'replyToAddress': 'data-source-replyto-address',
'replyToDisplay': 'data-source-replyto-display',
'importClass': 'data-import-class',
'failingSince': 'data-source-failing-since'
}
}
"""
folder = self.create_folder(dest_folder)
for type_source, source_config in data_source.items():
data_source[type_source]['l'] = folder['id']
return self.request('CreateDataSource', data_source)
def get_data_sources(self, types=[], source_addresses=[], source_id=None):
all_data_sources = self.request('GetDataSources')
data_sources = {}
if types and source_addresses:
for t in types:
data_sources = {t: []}
if t in all_data_sources and isinstance(all_data_sources[t],
list):
for data_source in all_data_sources[t]:
if data_source['emailAddress'] in source_addresses:
data_sources[t].append(data_source)
elif t in all_data_sources and isinstance(all_data_sources[t],
dict):
if all_data_sources[t]['emailAddress'] in source_addresses:
data_sources[t].append(all_data_sources[t])
elif types and not source_addresses:
for t in types:
data_sources = {t: []}
if t in all_data_sources and isinstance(all_data_sources[t],
list):
for data_source in all_data_sources[t]:
data_sources[t].append(data_source)
elif t in all_data_sources and isinstance(all_data_sources[t],
dict):
data_sources[t].append(all_data_sources[t])
elif source_addresses and not types:
for t in all_data_sources.keys():
if isinstance(all_data_sources[t], list):
for data_source in all_data_sources[t]:
if data_source['emailAddress'] in source_addresses:
try:
data_sources[t].append(data_source)
except KeyError:
data_sources = {t: []}
data_sources[t].append(data_source)
elif isinstance(all_data_sources[t], dict):
if all_data_sources[t]['emailAddress'] in source_addresses:
try:
data_sources[t].append(all_data_sources[t])
except KeyError:
data_sources = {t: []}
data_sources[t].append(all_data_sources[t])
elif source_id:
for t in all_data_sources.keys():
data_sources = {t: []}
if isinstance(all_data_sources[t], list):
for data_source in all_data_sources[t]:
if data_source['id'] == source_id:
data_sources[t].append(data_source)
elif isinstance(all_data_sources[t], dict):
if all_data_sources[t]['id'] == source_id:
data_sources[t].append(all_data_sources[t])
else:
return all_data_sources
return data_sources
def modify_data_source(self, data_source):
""" Modify data source from a dict
data_source example =
{
'pop3': {
'leaveOnServer': "(0|1)", 'id': 'data-source-id',
'name': 'data-source-name', 'l': 'data-source-folder-id',
'isEnabled': '(0|1)', 'importOnly': '(0|1)',
'host': 'data-source-server', 'port': 'data-source-port',
'connectionType': '(cleartext|ssl|tls|tls_is_available)',
'username': 'data-source-username',
'password': 'data-source-password',
'emailAddress': 'data-source-address',
'useAddressForForwardReply': '(0|1)',
'defaultSignature': 'default-signature-id',
'forwardReplySignature': 'forward-reply-signature-id',
'fromDisplay': 'data-source-from-display',
'replyToAddress': 'data-source-replyto-address',
'replyToDisplay': 'data-source-replyto-display',
'importClass': 'data-import-class',
'failingSince': 'data-source-failing-since'
}
}
"""
return self.request('ModifyDataSource', data_source)
def delete_data_source(self, data_source):
"""
Delete data source with it's name or ID.
data_source = { 'imap': {'name': 'data-source-name'}}
or
data_source = { 'pop3': {'id': 'data-source-id'}}
"""
source_type = [k for k in data_source.keys()][0]
complete_source = self.get_data_sources(
source_id=data_source[source_type]['id'])
folder_id = complete_source[source_type][0]['l']
self.delete_folders(folder_ids=[folder_id])
return self.request('DeleteDataSource', data_source)
# Filter
def add_filter_rule(
self, name, condition, filters, actions, active=1, way='in'):
"""
:param: name filter name
:param: condition allof or anyof
:param: filters dict of filters
:param: actions dict of actions
:param: way string discribing if filter is for 'in' or 'out' messages
:returns: list of user's zobjects.FilterRule
"""
filters['condition'] = condition
new_rule = {
'name': name,
'active': active,
'filterTests': filters,
'filterActions': actions
}
new_rules = [zobjects.FilterRule.from_dict(new_rule)]
prev_rules = self.get_filter_rules(way=way)
# if there is already some rules
if prev_rules:
for rule in prev_rules:
# don't add rule if it already exist
if rule.name == new_rules[0].name:
raise ZimSOAPException(
'filter %s already exists' % rule.name)
new_rules = new_rules + prev_rules
content = {
'filterRules': {
'filterRule': [r._full_data for r in new_rules]
}
}
if way == 'in':
self.request('ModifyFilterRules', content)
elif way == 'out':
self.request('ModifyOutgoingFilterRules', content)
return new_rules
def get_filter_rule(self, _filter, way='in'):
""" Return the filter rule
:param: _filter a zobjects.FilterRule or the filter name
:param: way string discribing if filter is for 'in' or 'out' messages
:returns: a zobjects.FilterRule"""
if isinstance(_filter, zobjects.FilterRule):
_filter = _filter.name
for f in self.get_filter_rules(way=way):
if f.name == _filter:
return f
return None
def get_filter_rules(self, way='in'):
"""
:param: way string discribing if filter is for 'in' or 'out' messages
:returns: list of zobjects.FilterRule
"""
try:
if way == 'in':
filters = self.request(
'GetFilterRules')['filterRules']['filterRule']
elif way == 'out':
filters = self.request(
'GetOutgoingFilterRules')['filterRules']['filterRule']
# Zimbra return a dict if there is only one instance
if isinstance(filters, dict):
filters = [filters]
return [zobjects.FilterRule.from_dict(f) for f in filters]
except KeyError:
return []
def apply_filter_rule(self, _filter, query='in:inbox', way='in'):
"""
:param: _filter _filter a zobjects.FilterRule or the filter name
:param: query on what will the filter be applied
:param: way string discribing if filter is for 'in' or 'out' messages
:returns: list of impacted message's ids
"""
if isinstance(_filter, zobjects.FilterRule):
_filter = _filter.name
content = {
'filterRules': {
'filterRule': {'name': _filter}
},
'query': {'_content': query}
}
if way == 'in':
ids = self.request('ApplyFilterRules', content)
elif way == 'out':
ids = self.request('ApplyOutgoingFilterRules', content)
if ids:
return [int(m) for m in ids['m']['ids'].split(',')]
else:
return []
def delete_filter_rule(self, _filter, way='in'):
""" delete a filter rule
:param: _filter a zobjects.FilterRule or the filter name
:param: way string discribing if filter is for 'in' or 'out' messages
:returns: a list of zobjects.FilterRule
"""
updated_rules = []
rules = self.get_filter_rules(way=way)
if isinstance(_filter, zobjects.FilterRule):
_filter = _filter.name
if rules:
for rule in rules:
if not rule.name == _filter:
updated_rules.append(rule)
if rules != updated_rules:
content = {
'filterRules': {
'filterRule': [f._full_data for f in updated_rules]
}
}
if way == 'in':
self.request('ModifyFilterRules', content)
elif way == 'out':
self.request('ModifyOutgoingFilterRules', content)
return updated_rules
class ZimbraAPISession:
"""Handle the login, the session expiration and the generation of the
authentification header.
"""
def __init__(self, client):
self.client = client
self.authToken = None
def set_end_date(self, lifetime):
"""Computes and store an absolute end_date session according to the
lifetime of the session"""
self.end_date = (datetime.datetime.now() +
datetime.timedelta(0, lifetime))
def login(self, username, password, namespace=None):
""" Performs the login against zimbra
(sends AuthRequest, receives AuthResponse).
:param namespace: if specified, the namespace used for authetication
(if the client namespace is not suitable for
authentication).
"""
if namespace is None:
namespace = self.client.NAMESPACE
data = self.client.request(
'Auth',
{
'account': zobjects.Account(name=username).to_selector(),
'password': {'_content': password}
},
namespace)
self.authToken = data['authToken']
lifetime = int(data['lifetime'])
self.authToken = str(self.authToken)
self.set_end_date(lifetime)
def import_session(self, auth_token):
if not isinstance(auth_token, (binary_type, text_type)):
raise TypeError('auth_token should be a string, not {0}'.format(
type(auth_token)))
self.authToken = auth_token
def is_logged_in(self, force_check=False):
if not self.authToken:
return False
# if it's logged-in by preauth, we can't know the exp. date for sure
try:
return self.end_date >= datetime.datetime.now()
except AttributeError:
return True
def is_session_valid(self):
try:
self.client.request('Auth',
{'authToken': {'_content': self.authToken}})
return True
except ZimbraSoapServerError:
return False
| 35.026685 | 110 | 0.562132 |
b2926af38f31b85a065bca603ac64f3c5007da22 | 10,152 | py | Python | utils/_plot.py | Giyn/DP-Star | 5c2e85f3f1def528679f52eeec17ea88f25cc705 | [
"MIT"
] | 2 | 2021-04-20T06:16:17.000Z | 2021-04-20T07:38:08.000Z | utils/_plot.py | Giyn/DP-Star | 5c2e85f3f1def528679f52eeec17ea88f25cc705 | [
"MIT"
] | null | null | null | utils/_plot.py | Giyn/DP-Star | 5c2e85f3f1def528679f52eeec17ea88f25cc705 | [
"MIT"
] | 5 | 2021-04-20T06:15:52.000Z | 2021-05-26T14:52:48.000Z | """
-------------------------------------
# -*- coding: utf-8 -*-
# @Author : nomalocaris
# @File : _plot.py
# @Software: PyCharm
-------------------------------------
"""
import os
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import FormatStrFormatter
from tqdm import tqdm
from config import *
from utils import ProgressBar
plt.rcParams['savefig.dpi'] = 600
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['axes.unicode_minus'] = False
colors = plt.cm.viridis(np.linspace(0, 1, 10))
def plot_scatter(points, traj_type=None, epsilon=None, fig_size=(6, 6), color='black', size=3):
"""
plot the points
Args:
points : points of trajectories
an example [[23.14811, 113.30516],
[23.14902, 113.30486]]
traj_type: trajectories type
fig_size : figure size
color : point color
size : point size
epsilon : point size
Returns:
None
use example:
file_list = os.listdir(f'../data/{USE_DATA}/Trajectories/')
point_list = []
for file in tqdm(file_list):
with open(f'../data/{USE_DATA}/Trajectories/' + file, 'r', encoding='utf-8') as traj_file:
for i in traj_file.readlines():
point_list.append(list(map(lambda x: float(x.strip()), i.split(','))))
plot_scatter(point_list, 'raw')
"""
plt.figure(figsize=fig_size)
plt.xlim(21.1, 26.1)
plt.ylim(109.8, 115.2)
plt.scatter(x=[p[0] for p in points], y=[p[1] for p in points], color=color, s=size)
plt.xlabel('latitude')
plt.ylabel('longitude')
plt.ylabel('longitude')
if not os.path.exists('trajs_visualization'):
os.mkdir('trajs_visualization')
if traj_type == 'raw':
plt.savefig(f'trajs_visualization/{USE_DATA}_Trajectories.png')
else:
plt.savefig(f'trajs_visualization/{USE_DATA}_{epsilon}.png')
plt.show()
def plot_trajs(trajs, fig_size=(6, 6), color="mediumpurple", size=5, title='', is_plot_line=False,
od_only=False, offset=None):
"""
plot the trajs
Args:
trajs : points of trajectories
fig_size : figure size
color : point color
size : point size
title :
is_plot_line:
od_only :
offset :
Returns:
None
"""
if offset is None:
offset = [0, 0]
p = ProgressBar(len(trajs), 'draw a trajectory graph')
plt.figure(figsize=fig_size)
for i in range(len(trajs)):
p.update(i)
traj = np.array(trajs[i])
if od_only:
traj = [traj[0], traj[-1]]
x = [x[0] + np.random.uniform(-offset[0], offset[0]) for x in traj]
y = [y[1] + np.random.uniform(-offset[1], offset[1]) for y in traj]
if od_only:
if is_plot_line:
plt.plot(x[0], y[0], c=color)
plt.plot(x[1], y[1], c="yellowgreen")
plt.scatter(x[0], y[0], c=color, s=size)
plt.scatter(x[1], y[1], c="yellowgreen", s=size)
else:
if is_plot_line:
plt.plot(x, y, c=color)
plt.scatter(x, y, c=color, s=size)
plt.title(title)
plt.show()
def line_chart(dp_stp_list: list, dp_star_list: list, metric: str):
"""
draw a line chart
Args:
dp_stp_list:
dp_star_list:
metric:
Returns:
None
use example:
re_1 = [0.123, 0.234, 0.345, 0.456]
re_2 = [0.321, 0.432, 0.543, 0.654]
line_chart(re_1, re_2, 'RE')
"""
x = [0.1, 0.5, 1.0, 2.0]
plt.plot(x, dp_stp_list, marker='*', ms=10, label='DP-STP')
plt.plot(x, dp_star_list, marker='o', mec='r', mfc='w', label='DP-Star')
plt.legend()
plt.xticks(x, rotation=1)
plt.margins(0)
plt.subplots_adjust(bottom=0.10)
plt.xlabel(r'$\varepsilon$')
plt.ylabel(metric)
plt.show()
def three_dimension_piece(data, z_label):
"""
draw a three dimension piece
Args:
data :
z_label:
Returns:
None
use example:
# test data for drawing
data_ = {'0.1': {'DP-STP': [0.246, 0.468, 0.680], 'DP-Star': [0.111, 0.222, 0.333]},
'0.5': {'DP-STP': [0.123, 0.223, 0.324], 'DP-Star': [0.224, 0.234, 0.234]},
'1.0': {'DP-STP': [0.135, 0.357, 0.579], 'DP-Star': [0.357, 0.579, 0.791]},
'2.0': {'DP-STP': [0.123, 0.456, 0.789], 'DP-Star': [0.002, 0.003, 0.004]}
}
three_dimension_piece(data_, 'RE')
"""
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
alphas = [0.5, 0.6, 0.8, 0.9]
y_ticks_labels = [str(i) for i in [0.1, 0.5, 1.0, 2.0]]
y_ticks = [index + 0.2 for index in range(len(y_ticks_labels))]
x_ticks_labels = ['GL', 'BK', 'GT']
x = np.arange(len(x_ticks_labels)) # the label locations
width = 0.1 # the width of the bars
# epsilon 0.1 to 2.0
for a, c, k, yl in zip(alphas, colors, y_ticks, y_ticks_labels):
ax.bar(x - width / 2, data[yl]['DP-STP'], width=width, zs=k, zdir='y', color=colors[2],
alpha=a)
ax.bar(x + width / 2, data[yl]['DP-Star'], width=width, zs=k, zdir='y', color=colors[8],
alpha=a)
ax.view_init(elev=20, azim=-130)
ax.set_box_aspect(aspect=(3, 2, 1.8))
ax.set_xticks(x)
ax.set_xticklabels(x_ticks_labels)
ax.set_ylabel(r'$\varepsilon$')
ax.set_yticks(y_ticks)
ax.set_yticklabels(y_ticks_labels)
ax.set_zlabel(z_label)
ax.zaxis.set_major_formatter(FormatStrFormatter('%.05f'))
# On the y axis let's only label the discrete values that we have data for.
ax.set_yticks(y_ticks)
if not os.path.exists('three_dimension'):
os.mkdir('three_dimension')
filename = f"three_dimension/{z_label}_piece.eps"
plt.savefig(filename, bbox_inches='tight', pad_inches=0, format='eps')
plt.show()
def three_dimension_bar(data, metric):
"""
draw a three dimension bar
Args:
data :
metric :
Returns:
None
use example:
data_ = {'0.1': {'Geolife': {'DP-STP': 0.0, 'DP-Star': 0.0},
'Brinkhoff': {'DP-STP': 0.0, 'DP-Star': 0.0},
'GZTaxi': {'DP-STP': 0.0, 'DP-Star': 0.0}
},
'0.5': {'Geolife': {'DP-STP': 0.0, 'DP-Star': 0.0},
'Brinkhoff': {'DP-STP': 0.0, 'DP-Star': 0.0},
'GZTaxi': {'DP-STP': 0.0, 'DP-Star': 0.0}
},
'1.0': {'Geolife': {'DP-STP': 0.0, 'DP-Star': 0.0},
'Brinkhoff': {'DP-STP': 0.0, 'DP-Star': 0.0},
'GZTaxi': {'DP-STP': 0.0, 'DP-Star': 0.0}
},
'2.0': {'Geolife': {'DP-STP': 0.0, 'DP-Star': 0.0},
'Brinkhoff': {'DP-STP': 0.0, 'DP-Star': 0.0},
'GZTaxi': {'DP-STP': 0.0, 'DP-Star': 0.0}
}
}
three_dimension_bar(data_, 'TE')
"""
# setup the figure and axes
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_subplot(111, projection='3d')
z_label = metric
ax1.view_init(elev=19, azim=117)
width = depth = 0.3
yticks_labels = ['Geolife', 'Brinkhoff', 'GZTaxi']
yticks = np.arange(len(yticks_labels)) + depth / 2 # the label locations
xticks_labels = [str(i) for i in [0.1, 0.5, 1.0, 2.0]]
xticks = [index + width / 2 for index in range(len(xticks_labels))]
xticks_dict = dict(zip(xticks_labels, xticks))
yticks_dict = dict(zip(yticks_labels, yticks))
_x = np.array(xticks)
_y = np.array(yticks)
_xx, _yy = np.meshgrid(_x, _y)
x, y = _xx.ravel(), _yy.ravel()
top = x + y
bottom = np.zeros_like(top)
# if metric == 'KT':
# Z = np.zeros((3, 4))
# ax1.plot_surface(_xx, _yy, Z, color='pink', alpha=0.6)
for each_epsilon in data.keys():
for each_dataset in data[each_epsilon]:
index_ = np.where((x == xticks_dict[each_epsilon]) & (y == yticks_dict[each_dataset]))
x_tmp = x[index_]
y_tmp = y[index_]
# DP-STP
top_stp = data[each_epsilon][each_dataset]["DP-STP"]
ax1.bar3d(x_tmp - width / 2, y_tmp - depth / 2, bottom, width / 2, depth,
[top_stp] * len(x_tmp), shade=True, color=colors[8])
# DP-Star
top_star = data[each_epsilon][each_dataset]["DP-Star"]
ax1.bar3d(x_tmp, y_tmp - depth / 2, bottom, width / 2, depth, top_star,
shade=True,
color=colors[2])
ax1.set_box_aspect(aspect=(3, 2, 2))
ax1.set_ylabel(r'Dataset')
ax1.set_yticks(yticks)
ax1.set_yticklabels(yticks_labels)
ax1.set_zlabel(z_label)
ax1.zaxis.set_major_formatter(FormatStrFormatter('%.05f'))
ax1.set_xlabel(r'$\varepsilon$')
ax1.set_xticks(xticks)
ax1.set_xticklabels(xticks_labels)
if not os.path.exists('three_dimension'):
os.mkdir('three_dimension')
plt.savefig(f"three_dimension/{metric}_bar.pdf", bbox_inches='tight', pad_inches=0)
plt.savefig(f"three_dimension/{metric}_bar.eps", bbox_inches='tight', pad_inches=0)
plt.show()
if __name__ == '__main__':
# file_list = os.listdir(f'../data/{USE_DATA}/Trajectories/')
file_list = os.listdir(f'../data/{USE_DATA}/SD/sd_final_epsilon_0.1/')
# file_list = os.listdir(f'../data/{USE_DATA}/SD/sd_final_epsilon_0.5/')
# file_list = os.listdir(f'../data/{USE_DATA}/SD/sd_final_epsilon_1.0/')
# file_list = os.listdir(f'../data/{USE_DATA}/SD/sd_final_epsilon_2.0/')
point_list = []
for file in tqdm(file_list):
with open(f'../data/{USE_DATA}/SD/sd_final_epsilon_0.1/' + file, 'r', encoding='utf-8') as traj_file:
for i in traj_file.readlines():
point_list.append(list(map(lambda x: float(x.strip()), i.split(','))))
plot_scatter(point_list)
| 30.95122 | 109 | 0.547872 |
fdbf28b8d6a01d39e569050de2b13998024b4181 | 1,309 | py | Python | app/ch16_mongodb/final/pypi_org/infrastructure/cookie_auth.py | tbensonwest/data-driven-web-apps-with-flask | be025c1c0190419019924f7516f49b3b8452cdf8 | [
"MIT"
] | 2 | 2020-04-29T15:45:19.000Z | 2020-11-02T18:18:24.000Z | app/ch16_mongodb/final/pypi_org/infrastructure/cookie_auth.py | tbensonwest/data-driven-web-apps-with-flask | be025c1c0190419019924f7516f49b3b8452cdf8 | [
"MIT"
] | null | null | null | app/ch16_mongodb/final/pypi_org/infrastructure/cookie_auth.py | tbensonwest/data-driven-web-apps-with-flask | be025c1c0190419019924f7516f49b3b8452cdf8 | [
"MIT"
] | null | null | null | import hashlib
from datetime import timedelta
from typing import Optional
import bson
from flask import Request
from flask import Response
from pypi_org.bin.load_data import try_int
auth_cookie_name = 'pypi_demo_user'
def set_auth(response: Response, user_id: int):
hash_val = __hash_text(str(user_id))
val = "{}:{}".format(user_id, hash_val)
response.set_cookie(auth_cookie_name, val)
def __hash_text(text: str) -> str:
text = 'salty__' + text + '__text'
return hashlib.sha512(text.encode('utf-8')).hexdigest()
def __add_cookie_callback(_, response: Response, name: str, value: str):
response.set_cookie(name, value, max_age=timedelta(days=30))
def get_user_id_via_auth_cookie(request: Request) -> Optional[bson.ObjectId]:
if auth_cookie_name not in request.cookies:
return None
val = request.cookies[auth_cookie_name]
parts = val.split(':')
if len(parts) != 2:
return None
user_id = parts[0]
hash_val = parts[1]
hash_val_check = __hash_text(user_id)
if hash_val != hash_val_check:
print("Warning: Hash mismatch, invalid cookie value")
return None
try:
return bson.ObjectId(user_id)
except:
return None
def logout(response: Response):
response.delete_cookie(auth_cookie_name)
| 24.698113 | 77 | 0.705882 |
3073a1d9016100f3b50d7f04a2d06214ed2cbe66 | 451 | py | Python | common/common.py | selcukusta/sentiment-analysis-for-comments | d1262f57dba900f59f2e0d100350c0f0f5902ce1 | [
"MIT"
] | 7 | 2020-07-21T11:51:08.000Z | 2022-01-06T21:52:04.000Z | common/common.py | selcukusta/sentiment-analysis-for-comments | d1262f57dba900f59f2e0d100350c0f0f5902ce1 | [
"MIT"
] | null | null | null | common/common.py | selcukusta/sentiment-analysis-for-comments | d1262f57dba900f59f2e0d100350c0f0f5902ce1 | [
"MIT"
] | 2 | 2021-03-20T19:19:56.000Z | 2021-04-22T20:52:33.000Z | class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def colored_print(print_type: bcolors, text: str):
print(f"{print_type}{text}{bcolors.ENDC}")
if __name__ == '__main__':
colored_print(bcolors.FAIL, "Epic Fail!")
colored_print(bcolors.WARNING, "Are you sure you want to continue?")
| 23.736842 | 72 | 0.618625 |
95f88ad38083e73415b66601edcc50020146c5b9 | 109 | py | Python | F_Machine_learning/1_Unsupervised-Learning/solutions/ex3_1.py | oercompbiomed/CBM101 | 20010dcb99fbf218c4789eb5918dcff8ceb94898 | [
"MIT"
] | 7 | 2019-07-03T07:41:55.000Z | 2022-02-06T20:25:37.000Z | F_Machine_learning/1_Unsupervised-Learning/solutions/ex3_1.py | oercompbiomed/CBM101 | 20010dcb99fbf218c4789eb5918dcff8ceb94898 | [
"MIT"
] | 9 | 2019-03-14T15:15:09.000Z | 2019-08-01T14:18:21.000Z | F_Machine_learning/1_Unsupervised-Learning/solutions/ex3_1.py | oercompbiomed/CBM101 | 20010dcb99fbf218c4789eb5918dcff8ceb94898 | [
"MIT"
] | 11 | 2019-03-12T10:43:11.000Z | 2021-10-05T12:15:00.000Z | # resize according to smallest image
sz = imgs[smallest_index].size
im1 = im1.resize(sz)
im2 = im2.resize(sz) | 27.25 | 36 | 0.752294 |
d88a56cebfac19b7a8b12b470628cf318884529e | 2,144 | py | Python | model/cv_score.py | phykn/xai_tree | 66f5cb4ea77686364478b1f16f937678b2e544a8 | [
"Apache-2.0"
] | 1 | 2022-02-06T17:49:26.000Z | 2022-02-06T17:49:26.000Z | model/cv_score.py | phykn/xai_tree | 66f5cb4ea77686364478b1f16f937678b2e544a8 | [
"Apache-2.0"
] | null | null | null | model/cv_score.py | phykn/xai_tree | 66f5cb4ea77686364478b1f16f937678b2e544a8 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from typing import Dict, Any, Optional
from .metric import *
def cv_score(
model_func,
datasets,
params={},
random_state: int=42,
n_jobs: Optional[int]=None,
) -> Dict[str, Any]:
'''
model: Tree Model Function
datasets: List of dataset.
dataset = {
'x_train': ndarray,
'y_train': ndarray,
'x_valid': ndarray,
'y_valid': ndarray,
}
'''
models = []
oob_true = []
oob_pred = []
for dataset in datasets:
x_train = dataset['x_train']
y_train = dataset['y_train'].flatten()
x_valid = dataset['x_valid']
y_valid = dataset['y_valid'].flatten()
output = model_func(
x = x_train,
y = y_train,
params = params,
random_state = random_state,
n_jobs = n_jobs
)
model = output['model']
if output['type'] == 'reg':
pred = model.predict(x_valid)
elif output['type'] == 'clf':
pred = model.predict_proba(x_valid)[:, 1]
else:
raise ValueError
models.append(model)
oob_true.append(y_valid)
oob_pred.append(pred)
oob_true = np.concatenate(oob_true, axis=0)
oob_pred = np.concatenate(oob_pred, axis=0)
score = {}
if output['type'] == 'reg':
score['r2'] = r2_score(oob_true, oob_pred)
score['mae'] = mae_score(oob_true, oob_pred)
score['mse'] = mse_score(oob_true, oob_pred)
score['rmse'] = rmse_score(oob_true, oob_pred)
score['mape'] = mape_score(oob_true, oob_pred)
elif output['type'] == 'clf':
score['auc'] = auc_score(oob_true, oob_pred)
score['logloss'] = logloss_score(oob_true, oob_pred)
score['accuracy'] = accuracy_score(oob_true, oob_pred)
else:
raise ValueError
return {
'name': output['name'],
'type': output['type'],
'models': models,
'oob_true': oob_true,
'oob_pred': oob_pred,
'score': score
}
| 27.844156 | 63 | 0.533116 |
24e687837adb5c95d64da007e81604a813cc3320 | 7,150 | py | Python | scale_chart.py | cbaumgardner/guitar_mapper | f4a983e0fc8180bac5b7cfa410e977353135d59b | [
"MIT"
] | null | null | null | scale_chart.py | cbaumgardner/guitar_mapper | f4a983e0fc8180bac5b7cfa410e977353135d59b | [
"MIT"
] | 1 | 2022-03-25T18:52:13.000Z | 2022-03-25T18:52:13.000Z | scale_chart.py | cbaumgardner/guitar_mapper | f4a983e0fc8180bac5b7cfa410e977353135d59b | [
"MIT"
] | 1 | 2022-03-04T22:14:02.000Z | 2022-03-04T22:14:02.000Z | import math
# These should probably go in a config file or something
scale_types = [{"type": "Major (Ionian)", "scale_steps": [2, 2, 1, 2, 2, 2, 1]},
{"type": "Minor (Aeolian)", "scale_steps": [2, 1, 2, 2, 1, 2, 2]},
{"type": "Major Pentatonic", "scale_steps": [2, 2, 3, 2, 3]},
{"type": "Minor Pentatonic", "scale_steps": [3, 2, 2, 3, 2]},
{"type": "Dorian", "scale_steps": [2, 1, 2, 2, 2, 1, 2]},
{"type": "Phrygian", "scale_steps": [1, 2, 2, 2, 1, 2, 2]},
{"type": "Lydian", "scale_steps": [2, 2, 2, 1, 2, 2, 1]},
{"type": "Mixolydian", "scale_steps": [2, 2, 1, 2, 2, 1, 2]},
{"type": "Locrian", "scale_steps": [1, 2, 2, 1, 2, 2, 2]}]
key_notes = [{"key": "A", "key_display": "A"},
{"key": "A#", "key_display": "A♯ / B♭"},
{"key": "B", "key_display": "B"},
{"key": "C", "key_display": "C"},
{"key": "C#", "key_display": "C♯ / D♭"},
{"key": "D", "key_display": "D"},
{"key": "D#", "key_display": "D♯ / E♭"},
{"key": "E", "key_display": "E"},
{"key": "F", "key_display": "F"},
{"key": "F#", "key_display": "F♯ / G♭"},
{"key": "G", "key_display": "G"},
{"key": "G#", "key_display": "G♯ / A♭"}]
notes = [d["key"] for d in key_notes]
inlay_frets = [3, 5, 7, 9, 12, 15, 17, 19, 21]
class FretBoard:
# TODO change scale_types to use id, maybe put in database
def __init__(self, key_note="A", scale_type="Major (Ionian)", fret_num=22):
self.fret_num = fret_num
self.fret_width = 7
self.tuning = {
1: "E",
2: "B",
3: "G",
4: "D",
5: "A",
6: "E"
}
self.neck = {
1: "",
2: "",
3: "",
4: "",
5: "",
6: ""
}
self.scale = []
self.scale_type = scale_type
if key_note in notes:
self.key_note = key_note
else:
raise RuntimeError("Invalid root note")
def create_fretboard(self):
""" Compile the fretboard """
self.scale = self.compile_scale()
for string in self.neck:
self.neck[string] = self.get_tuning_html(self.tuning[string])
current_fret = 0
while current_fret < self.fret_num:
for string in self.neck:
# determine current note
open_note = self.tuning[string]
open_note_index = notes.index(open_note)
current_note = notes[(open_note_index + current_fret + 1
) % len(notes)]
# add to string
string_so_far = self.neck[string]
updated_neck = string_so_far + self.create_string_tab(current_fret, current_note)
# + self.fret
self.neck[string] = updated_neck
current_fret += 1
def create_string_tab(self, fret, note):
""" Checks if note is in scale and creates the string drawing """
is_note = False
is_root = False
is_inlay = False
if note in self.scale:
if note == self.scale[0]:
# string_tab = (self.empty_tab * (math.trunc(self.fret_width / 2))) + self.root_tab + (
# self.empty_tab * (math.trunc(self.fret_width / 2)))
# string_tab = '<span class="string-tab root note"></span>'
# classes = ' root note fret-' + str(fret)
is_note = True
is_root = True
else:
# string_tab = (self.empty_tab * (math.trunc(self.fret_width / 2))) + self.note_tab + (
# self.empty_tab * (math.trunc(self.fret_width / 2)))
# string_tab = '<span class="string-tab note"></span>'
# classes = ' note fret-' + str(fret)
is_note = True
# else:
# # string_tab = self.empty_tab * self.fret_width
# string_tab = '<span class="string-tab"></span>'
# classes = ' fret-' + str(fret)
# frets were off by one, probably should make fret start at 1 instead of 0
if fret + 1 in inlay_frets:
# classes = classes + ' inlay'
is_inlay = True
return self.get_string_tab_html(note, is_note, is_root, is_inlay)
# return '<span class="string-tab' + classes + '"></span>'
def compile_scale(self):
""" Determines the notes in the scale for a given key """
scale_dict = [x for x in scale_types if x["type"] == self.scale_type][0]
scale_steps = scale_dict["scale_steps"]
scale = [self.key_note]
i = 0
while i < len(scale_steps):
current_note_index = notes.index(scale[i])
next_step = scale_steps[i]
next_note = notes[(current_note_index + next_step) % len(notes)]
scale.append(next_note)
i += 1
return scale
def add_inlay_markers(self):
""" Adds bullets below inlay frets """
inlay_list = [" "] * ((self.fret_width * self.fret_num) + (self.fret_num + 1))
for inlay in inlay_frets:
position = math.trunc(self.fret_width * (inlay - 0.5)) + inlay
inlay_list[position] = "•"
inlay_markers = "".join(inlay_list)
self.neck[7] = inlay_markers
def get_tuning_html(self, note):
# Should probably change this to use flags like Corey's
html = '<span class="string-tuning'
if note in self.scale:
html = html + ' note'
# return '<span class="string-tuning note root">' + note + '</span>'
if note == self.scale[0]:
html = html + ' root'
# return '<span class="string-tuning note">' + note + '</span>'
html = html + '">'
if note in self.scale:
html = html + '<span class="note-indicator">' + note + '</span>'
else:
html = html + note
html = html + '</span>'
return html
@staticmethod
def get_string_tab_html(note, is_note, is_root, is_inlay):
html = '<span class="string-tab'
if is_note:
html = html + ' note'
if is_root:
html = html + ' root'
if is_inlay:
html = html + ' inlay'
html = html + '">'
if is_note:
html = html + '<span class="note-indicator">' + note + '</span>'
else:
html = html + ' '
html = html + '</span>'
return html
def draw_neck(self):
""" Does the actual drawing of the neck """
# draw the tuning of each string
# for string in self.neck:
# self.neck[string] = self.get_tuning_html(self.tuning[string])
self.create_fretboard()
# self.add_inlay_markers()
# TODO make this logging
# print(*[str(v) for k, v in self.neck.items()], sep='\n')
return self.neck
| 36.85567 | 103 | 0.496224 |
a50e7a27226ac85dba68190d642d3db9877f5cda | 16,183 | py | Python | ci/autotest.py | ycheng-aa/gated_launch_backend | cbb9e7e530ab28d5914276e9607ebfcf84be6433 | [
"MIT"
] | null | null | null | ci/autotest.py | ycheng-aa/gated_launch_backend | cbb9e7e530ab28d5914276e9607ebfcf84be6433 | [
"MIT"
] | null | null | null | ci/autotest.py | ycheng-aa/gated_launch_backend | cbb9e7e530ab28d5914276e9607ebfcf84be6433 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*-coding:utf-8-*-
import sys
import requests
import json
sys.path.append("./")
from gated_launch_backend.settings_test import ZHONGCE_SIT_URL, JIRA_SIT_URL
def get_reponse_res(response):
if response.status_code == 200:
res = json.loads(response.text)
else:
res = ""
return res
def login():
# 测试登录是否成功
try:
url = "%sapi/v1/login/" % ZHONGCE_SIT_URL
print(url)
data = json.dumps(
{"username": "root",
"password": "123456aa",
"idCode": "bmapp"})
response = requests.post(url, data=data, headers={"Content-Type": "application/json"})
res = get_reponse_res(response)
if res:
if res['status'] == 200:
print("login successful")
print(res['data'])
token = res['data']['token']
return token
else:
msg = res['msg']
print("login failed :%s" % msg)
sys.exit(1)
else:
response.raise_for_status()
except Exception as e:
print("login exception:%s" % e)
sys.exit(1)
def create_app():
# 新建app;
try:
url = "%sapi/v1/apps/" % ZHONGCE_SIT_URL
print(url)
data = json.dumps(
{"name": "autotest_app1",
"desc": "test for autotest app",
"image": "T1NUhvB5C_1RCvBVdK",
"types": [1]})
response = requests.post(url, data=data, headers=HEADERS)
res = get_reponse_res(response)
if res:
if res['status'] == 200 and res['data']['name'] == "autotest_app1":
print("create app successful")
print(res['data'])
app_id = res['data']['id']
app_name = res['data']['name']
return app_id, app_name
else:
msg = res['msg']
print("create app failed :%s" % msg)
sys.exit(1)
else:
response.raise_for_status()
except Exception as e:
print("create app exception:%s" % e)
sys.exit(1)
def get_app_detail(app_id, app_name):
# 查询新建app的详情;
try:
url = "%sapi/v1/apps/%s/" % (ZHONGCE_SIT_URL, app_id)
print(url)
response = requests.get(url, headers=HEADERS)
res = get_reponse_res(response)
if res:
if res['status'] == 200 and res['data']['name'] == app_name:
print("get app detail successful")
print(res['data'])
else:
msg = res['msg']
print("get app detail failed :%s" % msg)
sys.exit(1)
else:
response.raise_for_status()
except Exception as e:
print("get app detail exception:%s" % e)
sys.exit(1)
def get_app_owner_group_id(app_id, app_name):
# 获取app的owner组的id;
try:
url = "%sapi/v1/userGroups/" % ZHONGCE_SIT_URL
print(url)
params = {"appId": app_id, "type": "owner"}
response = requests.get(url, params=params, headers=HEADERS)
res = get_reponse_res(response)
if res:
if res['status'] == 200 and res['data']['results'][0]['appName'] == app_name:
print("get app owner group id successful")
print(res['data'])
app_owner_group_id = res['data']['results'][0]['id']
return app_owner_group_id
else:
msg = res['msg']
print("get app owner group id failed :%s" % msg)
sys.exit(1)
else:
response.raise_for_status()
except Exception as e:
print("get app owner group id exception:%s" % e)
sys.exit(1)
def add_auto_test_user_to_app_owner(app_owner_group_id):
# 将自动化测试用户增加到测试app的owner;
try:
url = "%sapi/v1/userGroups/%s/members/" % (ZHONGCE_SIT_URL, app_owner_group_id)
print(url)
data = json.dumps({"account": "root"})
response = requests.post(url, data=data, headers=HEADERS)
res = get_reponse_res(response)
if res:
if res['status'] == 200 and res['data']['account'] == "root":
print("add auto test user to app owner group successful")
print(res['data'])
else:
msg = res['msg']
print("add auto test user to app owner group failed :%s" % msg)
sys.exit(1)
else:
response.raise_for_status()
except Exception as e:
print("add auto test user to app owner group exception:%s" % e)
sys.exit(1)
def create_task(app_id):
# 在app下新建task;
try:
url = "%sapi/v1/tasks/" % ZHONGCE_SIT_URL
print(url)
data = json.dumps(
{"name": "任务管理自动化测试",
"appId": app_id,
"startDate": "2017-10-31",
"endDate": "2017-11-08",
"innerStrategyList": [{"id": 1, "pushContent": "innerStrategy1"},
{"id": 1, "pushContent": "innerStrategy2"},
{"id": 1, "pushContent": "innerStrategy3"},
{"id": 1, "pushContent": "innerStrategy4"}],
"outerStrategyList": [1, 2],
"imageId": "T1NUhvB5C_1RCvBVdK",
"versionDesc": "自动化测试任务",
"awardRule": "autotest",
"contact": "autotest"
})
response = requests.post(url, data=data, headers=HEADERS)
res = get_reponse_res(response)
if res:
if res['status'] == 200 and res['data']['name'] == "任务管理自动化测试":
print("create task successful")
print(res['data'])
task_id = res['data']['id']
task_name = res['data']['name']
return task_id, task_name
else:
msg = res['msg']
print("create task failed :%s" % msg)
sys.exit(1)
else:
response.raise_for_status()
except Exception as e:
print("create task exception:%s" % e)
sys.exit(1)
def get_task_detail(task_id, task_name):
# 查询新建任务的详情;
try:
url = "%sapi/v1/tasks/%s/" % (ZHONGCE_SIT_URL, task_id)
print(url)
response = requests.get(url, headers=HEADERS)
res = get_reponse_res(response)
if res:
if res['status'] == 200 and res['data']['name'] == task_name:
print("get task detail successful")
print(res['data'])
else:
msg = res['msg']
print("get task detail failed :%s" % msg)
sys.exit(1)
else:
response.raise_for_status()
except Exception as e:
print("get task detail exception: %s" % (e))
sys.exit(1)
def update_task(task_id):
# 更新任务详情;
try:
url = "%sapi/v1/tasks/%s/" % (ZHONGCE_SIT_URL, task_id)
print(url)
data = json.dumps(
{"name": "任务管理自动化测试",
"appId": app_id,
"startDate": "2017-10-31",
"endDate": "2017-11-08",
"innerStrategyList": [{"id": 1, "pushContent": "innerStrategy1"},
{"id": 1, "pushContent": "innerStrategy2"},
{"id": 1, "pushContent": "innerStrategy3"},
{"id": 1, "pushContent": "innerStrategy4"}],
"outerStrategyList": [1, 2],
"imageId": "T1NUhvB5C_1RCvBVdK",
"versionDesc": "test for put request",
"awardRule": "autotest",
"contact": "autotest"
})
response = requests.put(url, data=data, headers=HEADERS)
res = get_reponse_res(response)
if res:
if res['status'] == 200 and res['data']['versionDesc'] == "test for put request":
print("update task successful")
print(res['data'])
else:
msg = res['msg']
print("update task failed :%s" % msg)
sys.exit(1)
else:
response.raise_for_status()
except Exception as e:
print("update task exception: %s" % e)
sys.exit(1)
def create_issue_zhongce(app_id, task_id):
# 新建普通众测平台issue;
try:
url = "%sapi/v1/issues/" % ZHONGCE_SIT_URL
print(url)
data = json.dumps(
{"appId": app_id,
"taskId": task_id,
"title": "autotest",
"detail": "test for autotest"
})
response = requests.post(url, data=data, headers=HEADERS)
res = get_reponse_res(response)
if res:
if res['status'] == 200 and res['data']['title'] == "autotest":
print("create issue successful")
print(res['data'])
issue_id_zhongce = res['data']['id']
return issue_id_zhongce
else:
msg = res['msg']
print("create issue failed :%s" % msg)
sys.exit(1)
else:
response.raise_for_status()
except Exception as e:
print("create issue exception: %s" % e)
sys.exit(1)
def get_issue_detail(issue_id):
# 查询issue;
try:
url = "%sapi/v1/issues/%s/" % (ZHONGCE_SIT_URL, issue_id)
print(url)
response = requests.get(url, headers=HEADERS)
res = get_reponse_res(response)
if res:
if res['status'] == 200 and res['data']['title'] == "autotest":
print("get issue detail successful")
print(res['data'])
return res['data']
else:
msg = res['msg']
print("get issue detail failed :%s" % msg)
sys.exit(1)
else:
response.raise_for_status()
except Exception as e:
print("get issue detail exception: %s" % e)
sys.exit(1)
def update_issue_detail(issue_id, app_id, task_id):
# 更新issue;
try:
url = "%sapi/v1/issues/%s/" % (ZHONGCE_SIT_URL, issue_id)
print(url)
data = json.dumps(
{"appId": app_id,
"taskId": task_id,
"title": "autotest",
"detail": "test for autotest update issue api"
})
response = requests.put(url, data=data, headers=HEADERS)
res = get_reponse_res(response)
if res:
if res['status'] == 200 and res['data']['detail'] == "test for autotest update issue api":
print("update issue successful")
print(res['data'])
else:
msg = res['msg']
print("update issue failed :%s" % msg)
sys.exit(1)
else:
response.raise_for_status()
except Exception as e:
print("update issue exception: %s" % (e))
sys.exit(1)
def issue_to_jira(issue_id):
# issue转jira;
try:
url = "%sapi/v1/issueToJira/?issueId=%s" % (ZHONGCE_SIT_URL, issue_id)
print(url)
response = requests.get(url, headers=HEADERS)
res = get_reponse_res(response)
if res:
if res['status'] == 200 and res['data']['issueId'] == issue_id:
print("issue to jira successful")
print(res['data'])
jira_id = res['data']['jiraId']
return jira_id
else:
msg = res['msg']
print("issue to jira failed :%s" % msg)
sys.exit(1)
else:
response.raise_for_status()
except Exception as e:
print("issue to jira exception: %s" % e)
sys.exit(1)
def create_issue_weixin(app_id, task_id):
# 新建微信小程序转来的issue;
try:
url = "%sapi/v1/issues/" % ZHONGCE_SIT_URL
print(url)
data = json.dumps(
{"appId": app_id,
"taskId": task_id,
"title": "autotest",
"detail": "test for autotest create weixin issue",
"reportSource": "四大区运营",
"score": "非常严重",
"other": "{\"phoneNumber\":\"15921372876\",\"order\":\"1234568\",\"phoneType\":\"find v5\",\"version\":\"0928gray\",\"square\":\"通州万达\",\"occurrenceTime\":\"2017-09-01T09:01:00.000+0800\",\"area\":\"ALL\",\"phoneBrand\":\"Vivo\",\"severity\":\"非常严重\",\"businessType\":\"停车\"}",
"images": ["T1ZQYvB5xT1RCvBVdK"]
})
response = requests.post(url, data=data, headers=HEADERS)
res = get_reponse_res(response)
if res:
if res['status'] == 200 and res['data']['title'] == "autotest":
print("create weixin issue successful")
print(res['data'])
issue_id_weixin = res['data']['id']
return issue_id_weixin
else:
msg = res['msg']
print("create weixin issue failed :%s" % msg)
sys.exit(1)
else:
response.raise_for_status()
except Exception as e:
print("create weixin issue exception: %s" % (e))
sys.exit(1)
def update_jira_status(jira_id):
# 更新测试环境jira信息;
try:
url = "%sapi/v1/CcStatus/%s/" % (JIRA_SIT_URL, jira_id)
print(url)
data = json.dumps({"status": "处理中"})
headers = {"Content-Type": "application/json"}
response = requests.put(url, data=data, headers=headers)
res = json.loads(response.text)
status = response.status_code
if status == 200 and res['data']['status'] == "处理中":
print("update jira status successful")
print(res['data'])
else:
response.raise_for_status()
except Exception as e:
print("update jira status exception: %s" % (e))
sys.exit(1)
def delete_app(app_id):
# 删除创建的app,即可自动删除相关联的tasks和issues;
try:
url = "%sapi/v1/apps/%s/" % (ZHONGCE_SIT_URL, app_id)
print(url)
response = requests.delete(url, headers=HEADERS)
res = get_reponse_res(response)
if res:
if res['status'] == 200:
print("delete app successful")
else:
msg = res['msg']
print("delete app failed :%s" % msg)
sys.exit(1)
else:
response.raise_for_status()
except Exception as e:
print("delete app exception: %s" % (e))
sys.exit(1)
if __name__ == "__main__":
print("----------------start zhongce autotest---------------------")
token = login()
HEADERS = {"Content-Type": "application/json", "Authorization": "token %s" % token}
app_id, app_name = create_app()
print("1.创建app成功")
get_app_detail(app_id, app_name)
print("2.查询app详情成功")
app_owner_group_id = get_app_owner_group_id(app_id, app_name)
print("3.获取app的owner组id成功")
add_auto_test_user_to_app_owner(app_owner_group_id)
print("4.将自动化测试的用户加入app的owner组成功")
task_id, task_name = create_task(app_id)
print("5.创建任务成功")
get_task_detail(task_id, task_name)
print("6.查询任务详情成功")
update_task(task_id)
print("7.更新任务成功")
# 关于众测平台本身问题的测试
print("---------------8.开始关于众测平台本身问题的测试-------------")
issue_id_zhongce = create_issue_zhongce(app_id, task_id)
print("8.1模拟创建众测平台本身的issue成功")
get_issue_detail(issue_id_zhongce)
print("8.2查询众测平台issue详情成功")
update_issue_detail(issue_id_zhongce, app_id, task_id)
print("8.3更新众测平台issue详情成功")
jira_id_zhongce = issue_to_jira(issue_id_zhongce)
print("8.4众测平台的issue转成jira成功")
# 关于从微信小程序转过来的问题的相关测试
print("--------------9.开始关于从微信小程序转过来的问题的测试-------------")
issue_id_weixin = create_issue_weixin(app_id, task_id)
print("9.1模拟创建微信转众测的issue成功")
issue_weixin_detail = get_issue_detail(issue_id_weixin)
print("9.2查询微信转众测的issue的详情成功")
jira_id_weixin = issue_to_jira(issue_id_weixin)
print("9.3将微信转众测的issue转成jira成功")
update_jira_status(jira_id_weixin)
print("9.4更新jira中问题的状态成功!")
delete_app(app_id)
print("10.清除相关数据成功")
| 34.802151 | 290 | 0.524748 |
32855b489fb49b74229d7c0202d0b31cc63c04fe | 106 | py | Python | sns-lambda-cdk/src/lambda/handler.py | enochtsai/serverless-patterns | c3b519304b8d4fdeff5ce6efb51239d35073fac3 | [
"MIT-0"
] | 883 | 2021-03-17T20:45:34.000Z | 2022-03-30T03:21:48.000Z | sns-lambda-cdk/src/lambda/handler.py | enochtsai/serverless-patterns | c3b519304b8d4fdeff5ce6efb51239d35073fac3 | [
"MIT-0"
] | 318 | 2021-03-31T16:47:35.000Z | 2022-03-31T12:55:12.000Z | sns-lambda-cdk/src/lambda/handler.py | enochtsai/serverless-patterns | c3b519304b8d4fdeff5ce6efb51239d35073fac3 | [
"MIT-0"
] | 218 | 2021-03-31T20:38:22.000Z | 2022-03-31T08:56:28.000Z | import json
def main(event, context):
print("lambda invoked")
print(json.dumps(event))
return | 17.666667 | 28 | 0.679245 |
0a01e0705ad2074c11165d81c402732a9364d6b4 | 63 | py | Python | magicclass/ext/pyvista/__init__.py | hanjinliu/magic-class | 2a9d8af3d385ec3870ebcade9f2dbc03115bed22 | [
"BSD-3-Clause"
] | 15 | 2021-09-07T10:18:59.000Z | 2022-03-23T14:55:45.000Z | magicclass/ext/pyvista/__init__.py | hanjinliu/magic-class | 2a9d8af3d385ec3870ebcade9f2dbc03115bed22 | [
"BSD-3-Clause"
] | 12 | 2021-09-10T08:54:43.000Z | 2022-03-31T02:43:50.000Z | magicclass/ext/pyvista/__init__.py | hanjinliu/magic-class | 2a9d8af3d385ec3870ebcade9f2dbc03115bed22 | [
"BSD-3-Clause"
] | 1 | 2022-02-13T15:51:51.000Z | 2022-02-13T15:51:51.000Z | from .widgets import PyVistaCanvas
__all__ = ["PyVistaCanvas"] | 21 | 34 | 0.793651 |
b58b189effa056ccfca53bd4aff6cf976a9dfa2d | 1,488 | py | Python | 22_all_paranthesis.py | rahlk/LeetCode | 92ea94a801d12e1cc350972b876d35a2b0f50996 | [
"MIT"
] | 1 | 2018-02-19T21:51:46.000Z | 2018-02-19T21:51:46.000Z | 22_all_paranthesis.py | rahlk/LeetCode | 92ea94a801d12e1cc350972b876d35a2b0f50996 | [
"MIT"
] | null | null | null | 22_all_paranthesis.py | rahlk/LeetCode | 92ea94a801d12e1cc350972b876d35a2b0f50996 | [
"MIT"
] | null | null | null | from pdb import set_trace
class AllParanthesis:
def solution_recur(self, n):
"""
A recursive solution
:type n: int
:rtype: List[str]
"""
def paranthesize(string, left, right, N, combinations):
if left == N and right == N:
combinations.append(string)
else:
if left < N:
paranthesize(string+"(", left+1, right, N, combinations)
if right < left:
paranthesize(
string + ")", left, right + 1, N, combinations)
return combinations
return paranthesize(string="", left=0, right=0, N=n, combinations=[])
def solution_dprog(self, n):
"""
My dynamic programing solution
:type n: int
:rtype: List[str]
"""
combinations = [[] for _ in xrange(n + 1)]
combinations[0] = [""]
combinations[1] = ["()"]
if n == 0:
return combinations[n]
for i in xrange(1, n + 1):
for string in combinations[i - 1]:
for idx, s in enumerate(string):
new = string[:idx] + "()" + string[idx:]
if not new in combinations[i]:
combinations[i].append(new)
return combinations[n]
if __name__ == "__main__":
all_paranthesis = AllParanthesis()
sol = all_paranthesis.solution_recur(3)
print sol
| 28.075472 | 77 | 0.494624 |
d16cb772dc07bd35a639615b4c751bc456dea938 | 8,300 | py | Python | 点餐系统mysql/DataBase.py | yxg995995/yxg-code | 7dd23f3dfb13d68ebc44bf7d5a344fb661c33136 | [
"MIT"
] | 1 | 2021-03-15T02:07:50.000Z | 2021-03-15T02:07:50.000Z | 点餐系统mysql/DataBase.py | yxg995995/yxg-code | 7dd23f3dfb13d68ebc44bf7d5a344fb661c33136 | [
"MIT"
] | null | null | null | 点餐系统mysql/DataBase.py | yxg995995/yxg-code | 7dd23f3dfb13d68ebc44bf7d5a344fb661c33136 | [
"MIT"
] | 1 | 2021-03-15T02:07:55.000Z | 2021-03-15T02:07:55.000Z | from pymysql import *
import datetime
class DataBase:
def __init__(self):
self.db_name='xinxi'
self.db=connect('localhost','yxg','yxg579521..',self.db_name)
def insertUser(self,username,password,phone):
c=self.db.cursor()
c.execute('''INSERT into user values (%s,%s,%s)''' ,(username,password,phone))
self.db.commit()
def find_user(self,username):
c=self.db.cursor()
c.execute('''select username from user where username=%s''',(username))
result=c.fetchone()
if result is None:
return False
else:
return True
def find_phone(self,phone):
c=self.db.cursor()
c.execute('''select phone from user where phone=%s''',(phone))
result=c.fetchone()
if result is None:
return False
else:
return True
def Verify(self,username,password):
c=self.db.cursor()
c.execute('''select username,password from user where username=%s and password=%s''',
(username,password))
result=c.fetchone()
if result is None:
return False
else:
return True
def getUser_password(self,username):
c=self.db.cursor()
c.execute('''select password from username where username=%s
''',(username))
result=c.fetchone()
self.db.commit()
return result
def insertAdmin(self,username,password,email):
c=self.db.cursor()
c.execute('''INSERT into admin values (%s,%s,%s)''' ,(username,password,email))
self.db.commit()
def find_admin(self,username):
c=self.db.cursor()
c.execute('''select adminName from admin where adminName=%s''',(username))
result=c.fetchone()
if result is None:
return False
else:
return True
def find_email(self,email):
c=self.db.cursor()
c.execute('''select email from admin where email=%s''',(email))
result=c.fetchone()
if result is None:
return False
else:
return True
def Verify_admin(self,username,password):
c=self.db.cursor()
c.execute('''select adminName,password from admin where adminName=%s and password=%s''',
(username,password))
result=c.fetchone()
if result is None:
return False
else:
return True
def getAdmin_password(self,username):
c=self.db.cursor()
c.execute('''select password from username where username=%s
''',(username))
result=c.fetchone()
self.db.commit()
return result
def insert_menu(self,data):
c=self.db.cursor()
c.executemany('''insert into menu values (%s,%s,%s,%s,%s)''',data)
self.db.commit()
def is_empty(self,id):
c=self.db.cursor()
c.execute('''select status from xinxi.table where id=%s ''',(id))
result=c.fetchone()
self.db.commit()
if result[0]=='空':
return True
else:
return False
def set_full(self,id):
c=self.db.cursor()
c.execute('''update xinxi.table set status='已订' where id=%s''',(id))
self.db.commit()
def insert(self,tablename,data):
c=self.db.cursor()
n=len(data)
a='%s,'*n
b=a[:-1]
data=tuple(data)
q='INSERT into '+tablename+' values('+b+')'
c.execute(q,data)
self.db.commit()
def get_mesg(self,Class):
c=self.db.cursor()
c.execute('''select pic,name,price from menu where class=%s ''',Class)
l=[]
result=c.fetchall()
for i in result:
l.append(list(i))
return l
def insert_order(self,data):
self.delete_order()
c=self.db.cursor()
c.executemany('''insert into xinxi.order values(%s,%s,%s,%s)''', data)
self.db.commit()
def delete_order(self):
c=self.db.cursor()
c.execute('truncate table xinxi.order')
self.db.commit()
def insert_parameters(self,data):
self.delete_parameters()
c=self.db.cursor()
c.execute('''insert into xinxi.parameters values(%s,%s,%s,%s,%s)''', data)
self.db.commit()
def delete_parameters(self):
c=self.db.cursor()
c.execute('truncate table xinxi.parameters')
self.db.commit()
def get_parameters(self):
c=self.db.cursor()
c.execute('''select * from xinxi.parameters ''')
l=[]
result=c.fetchall()
for i in result[0]:
l.append(list(eval(i)))
return l
def set_parameters(self):
self.delete_parameters()
self.insert_parameters(['[0,0,0,0]','[0,0,0,0,0]','[0,0,0,0,0,0]','[0,0,0,0,0]','[0,0,0,0,0,0]'])
def get_quantityAndprice(self):
c=self.db.cursor()
c.execute('''SELECT sum(quantity),sum(price) from xinxi.order;''')
result=c.fetchone()
self.db.commit()
l=[]
for i in result:
l.append(int(i))
return l
def get_order(self):
c=self.db.cursor()
c.execute('select * from xinxi.order')
result=c.fetchall()
self.db.commit()
return result
def insert_allOrder(self,username):
c=self.db.cursor()
c.execute('select * from xinxi.order')
result=c.fetchall()
l=[]
date=datetime.datetime.now()
for i in result:
a=list(i)
a.insert(0,username)
a.insert(1,date)
l.append(a)
c.executemany('''insert into xinxi.all_order values(%s,%s,%s,%s,%s,%s)''',l)
self.db.commit()
def get_tableid(self):
c=self.db.cursor()
c.execute('''select id from xinxi.table''')
result=c.fetchall()
l=[]
self.db.commit()
for i in result:
l.append(str(i[0]))
return l
def update_table(self,id):
c=self.db.cursor()
c.execute('''update xinxi.table set status='空' where id=%s''',id)
self.db.commit()
def setTable_empty(self):
l=self.get_tableid()
for i in l:
self.update_table(eval(i))
def get_menuName(self):
c=self.db.cursor()
c.execute('''select name from xinxi.menu''')
result=c.fetchall()
l=[]
self.db.commit()
for i in result:
l.append(i[0])
return l
def get_menu(self,name):
c=self.db.cursor()
c.execute('''select * from xinxi.menu where name=%s''',name)
result=c.fetchall()
self.db.commit()
return list(result[0])
def update_menu(self,data):
c=self.db.cursor()
c.execute('''update xinxi.menu
set name=%s,class=%s,pic=%s,price=%s,introduction=%s
where name=%s''',data)
self.db.commit()
def delete_menu(self,name):
c=self.db.cursor()
c.execute('DELETE FROM xinxi.menu where name=%s',name)
self.db.commit()
def insert(self,tablename,data):
c=self.db.cursor()
n=len(data)
a='%s,'*n
b=a[:-1]
data=tuple(data)
q='INSERT into '+tablename+' values('+b+')'
c.execute(q,data)
self.db.commit()
def get_user(self):
c=self.db.cursor()
c.execute('''select username from user ''')
result=c.fetchall()
l=[]
self.db.commit()
for i in result:
l.append(i[0])
return l
def delete_user(self,name):
c=self.db.cursor()
c.execute('DELETE FROM xinxi.user where username=%s',name)
self.db.commit()
def getSUM(self):
c=self.db.cursor()
c.execute('''select sum(sum) from xinxi.all_order''')
result=c.fetchone()
return result[0]
def close(self):
self.db.close()
import pandas as pd
import numpy as np
if __name__=='__main__':
db=DataBase()
data=db.getSUM()
print(data)
| 29.020979 | 105 | 0.531446 |
c1f07d6225a87a1350dc6f552821658e8cadbc73 | 2,381 | py | Python | azure-mgmt-keyvault/setup.py | Berryliao84/Python-Azure | a96ed6e8bbf4290372980a2919b31110da90b164 | [
"MIT"
] | 1 | 2017-10-29T15:14:35.000Z | 2017-10-29T15:14:35.000Z | azure-mgmt-keyvault/setup.py | Berryliao84/Python-Azure | a96ed6e8bbf4290372980a2919b31110da90b164 | [
"MIT"
] | null | null | null | azure-mgmt-keyvault/setup.py | Berryliao84/Python-Azure | a96ed6e8bbf4290372980a2919b31110da90b164 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
from setuptools import setup
from io import open
import re
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open('azure/mgmt/keyvault/version.py', 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.rst', encoding='utf-8') as f:
history = f.read()
setup(
name='azure-mgmt-keyvault',
version=version,
description='Microsoft Azure KeyVault Apps Resource Management Client Library for Python',
long_description=readme + '\n\n' + history,
license='MIT License',
author='Microsoft Corporation',
author_email='ptvshelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=[
'azure',
'azure.mgmt',
'azure.mgmt.keyvault',
'azure.mgmt.keyvault.models',
'azure.mgmt.keyvault.operations',
],
install_requires=[
'msrestazure~=0.4.6',
'azure-common~=1.1.4',
'azure-mgmt-nspkg',
],
)
| 31.746667 | 94 | 0.582948 |
7971f8d3a1028115b28df8bdc3c0c23f12822107 | 1,656 | py | Python | musmanim/text.py | mscuthbert/musmanim | 3ce0502d44b715e08f6ddca375c3a84d0a4078cf | [
"BSD-3-Clause"
] | null | null | null | musmanim/text.py | mscuthbert/musmanim | 3ce0502d44b715e08f6ddca375c3a84d0a4078cf | [
"BSD-3-Clause"
] | null | null | null | musmanim/text.py | mscuthbert/musmanim | 3ce0502d44b715e08f6ddca375c3a84d0a4078cf | [
"BSD-3-Clause"
] | null | null | null | import manim as m
MAXIMA = ''
LONGA = ''
BREVE = ''
SEMIBREVE = ''
MINIM = ''
class Text(m.Text):
def __init__(self, t, font='Adobe Garamond Pro', color=m.WHITE, **kwargs):
super().__init__(t, font=font, **kwargs)
self.set_color(color)
class Bravura(Text):
def __init__(self, t='', font_size=144, color=m.PURPLE_C, **kwargs):
super().__init__(t,
color=color,
font_size=font_size,
font='Bravura Text',
**kwargs)
def time_signature(numerator: int = 4, denominator: int = 4, **kwargs) -> m.VGroup:
# in case we need it later...
# @staticmethod
# def numerate(digit):
# return '\ue09e' + chr(57472 + digit)
# @staticmethod
# def denominate(digit):
# return '\ue09f' + chr(57472 + digit)
dig = lambda i: chr(57472 + i)
num_last_digit = Bravura(dig(numerator % 10), **kwargs)
den_last_digit = Bravura(dig(denominator % 10), **kwargs)
if numerator >= 10:
numerator_text = m.VGroup(Bravura(dig(numerator // 10), **kwargs),
num_last_digit,
).arrange(m.RIGHT, buff=0)
else:
numerator_text = num_last_digit
if denominator >= 10:
denominator_text = m.VGroup(Bravura(dig(denominator // 10), **kwargs),
den_last_digit,
).arrange(m.RIGHT, buff=0)
else:
denominator_text = den_last_digit
return m.VGroup(numerator_text, denominator_text).arrange(m.DOWN, buff=0)
| 32.470588 | 83 | 0.536232 |
fe3950fcbe073529860af34dadf5ef07e3051cb3 | 14,367 | py | Python | cvpr_clvision_challenge-master/submissions_archive/20200323_EWS/submissions/multi-task-nc/code_snapshot/train_test.py | aobject/NYU-AI-Project-02 | e25465345c85a774410b6dd7fccf53f93ad9318e | [
"CC-BY-4.0"
] | 2 | 2020-11-10T10:01:47.000Z | 2021-09-10T06:47:31.000Z | cvpr_clvision_challenge-master/submissions_archive/20200323_EWS/submissions/multi-task-nc/code_snapshot/train_test.py | aobject/NYU-AI-Project-02 | e25465345c85a774410b6dd7fccf53f93ad9318e | [
"CC-BY-4.0"
] | null | null | null | cvpr_clvision_challenge-master/submissions_archive/20200323_EWS/submissions/multi-task-nc/code_snapshot/train_test.py | aobject/NYU-AI-Project-02 | e25465345c85a774410b6dd7fccf53f93ad9318e | [
"CC-BY-4.0"
] | 3 | 2020-09-10T22:44:35.000Z | 2020-12-08T15:51:40.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# Copyright (c) 2019. Vincenzo Lomonaco, Massimo Caccia, Pau Rodriguez, #
# Lorenzo Pellegrini. All rights reserved. #
# Copyrights licensed under the CC BY 4.0 License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 8-11-2019 #
# Author: Vincenzo Lomonaco #
# E-mail: vincenzo.lomonaco@unibo.it #
# Website: vincenzolomonaco.com #
################################################################################
"""
General useful functions for machine learning with Pytorch.
"""
# Python 2-3 compatible
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import numpy as np
import torch
from torch.autograd import Variable
from .common import pad_data, shuffle_in_unison, check_ext_mem, check_ram_usage
def train_net(optimizer, model, criterion, mb_size, x, y, t,
train_ep, preproc=None, use_cuda=True, mask=None):
"""
Train a Pytorch model from pre-loaded tensors.
Args:
optimizer (object): the pytorch optimizer.
model (object): the pytorch model to train.
criterion (func): loss function.
mb_size (int): mini-batch size.
x (tensor): train data.
y (tensor): train labels.
t (int): task label.
train_ep (int): number of training epochs.
preproc (func): test iterations.
use_cuda (bool): if we want to use gpu or cpu.
mask (bool): if we want to maks out some classes from the results.
Returns:
ave_loss (float): average loss across the train set.
acc (float): average accuracy over training.
stats (dict): dictionary of several stats collected.
"""
cur_ep = 0
cur_train_t = t
stats = {"ram": [], "disk": []}
if preproc:
x = preproc(x)
(train_x, train_y), it_x_ep = pad_data(
[x, y], mb_size
)
shuffle_in_unison(
[train_x, train_y], 0, in_place=True
)
model = maybe_cuda(model, use_cuda=use_cuda)
acc = None
ave_loss = 0
train_x = torch.from_numpy(train_x).type(torch.FloatTensor)
train_y = torch.from_numpy(train_y).type(torch.LongTensor)
for ep in range(train_ep):
stats['disk'].append(check_ext_mem("cl_ext_mem"))
stats['ram'].append(check_ram_usage())
model.active_perc_list = []
model.train()
print("training ep: ", ep)
correct_cnt, ave_loss = 0, 0
for it in range(it_x_ep):
start = it * mb_size
end = (it + 1) * mb_size
optimizer.zero_grad()
x_mb = maybe_cuda(train_x[start:end], use_cuda=use_cuda)
y_mb = maybe_cuda(train_y[start:end], use_cuda=use_cuda)
logits = model(x_mb)
_, pred_label = torch.max(logits, 1)
correct_cnt += (pred_label == y_mb).sum()
loss = criterion(logits, y_mb)
ave_loss += loss.item()
loss.backward()
optimizer.step()
acc = correct_cnt.item() / \
((it + 1) * y_mb.size(0))
ave_loss /= ((it + 1) * y_mb.size(0))
if it % 100 == 0:
print(
'==>>> it: {}, avg. loss: {:.6f}, '
'running train acc: {:.3f}'
.format(it, ave_loss, acc)
)
cur_ep += 1
return ave_loss, acc, stats
def preprocess_imgs(img_batch, scale=True, norm=True, channel_first=True):
"""
Here we get a batch of PIL imgs and we return them normalized as for
the pytorch pre-trained models.
Args:
img_batch (tensor): batch of images.
scale (bool): if we want to scale the images between 0 an 1.
channel_first (bool): if the channel dimension is before of after
the other dimensions (width and height).
norm (bool): if we want to normalize them.
Returns:
tensor: pre-processed batch.
"""
if scale:
# convert to float in [0, 1]
img_batch = img_batch / 255
if norm:
# normalize
img_batch[:, :, :, 0] = ((img_batch[:, :, :, 0] - 0.485) / 0.229)
img_batch[:, :, :, 1] = ((img_batch[:, :, :, 1] - 0.456) / 0.224)
img_batch[:, :, :, 2] = ((img_batch[:, :, :, 2] - 0.406) / 0.225)
if channel_first:
# Swap channel dimension to fit the caffe format (c, w, h)
img_batch = np.transpose(img_batch, (0, 3, 1, 2))
return img_batch
def maybe_cuda(what, use_cuda=True, **kw):
"""
Moves `what` to CUDA and returns it, if `use_cuda` and it's available.
Args:
what (object): any object to move to eventually gpu
use_cuda (bool): if we want to use gpu or cpu.
Returns
object: the same object but eventually moved to gpu.
"""
if use_cuda is not False and torch.cuda.is_available():
what = what.cuda()
return what
def test_multitask(
model, test_set, mb_size, preproc=None, use_cuda=True, multi_heads=[], verbose=True):
"""
Test a model considering that the test set is composed of multiple tests
one for each task.
Args:
model (nn.Module): the pytorch model to test.
test_set (list): list of (x,y,t) test tuples.
mb_size (int): mini-batch size.
preproc (func): image preprocess function.
use_cuda (bool): if we want to use gpu or cpu.
multi_heads (list): ordered list of "heads" to be used for each
task.
Returns:
stats (float): collected stasts of the test including average and
per class accuracies.
"""
model.eval()
acc_x_task = []
stats = {'accs': [], 'acc': []}
preds = []
for (x, y), t in test_set:
if preproc:
x = preproc(x)
if multi_heads != [] and len(multi_heads) > t:
# we can use the stored head
if verbose:
print("Using head: ", t)
model.fc = multi_heads[t]
model = maybe_cuda(model, use_cuda=use_cuda)
acc = None
test_x = torch.from_numpy(x).type(torch.FloatTensor)
test_y = torch.from_numpy(y).type(torch.LongTensor)
correct_cnt, ave_loss = 0, 0
with torch.no_grad():
iters = test_y.size(0) // mb_size + 1
for it in range(iters):
start = it * mb_size
end = (it + 1) * mb_size
x_mb = maybe_cuda(test_x[start:end], use_cuda=use_cuda)
y_mb = maybe_cuda(test_y[start:end], use_cuda=use_cuda)
logits = model(x_mb)
_, pred_label = torch.max(logits, 1)
correct_cnt += (pred_label == y_mb).sum()
preds += list(pred_label.data.cpu().numpy())
# print(pred_label)
# print(y_mb)
acc = correct_cnt.item() / test_y.shape[0]
if verbose:
print('TEST Acc. Task {}==>>> acc: {:.3f}'.format(t, acc))
acc_x_task.append(acc)
stats['accs'].append(acc)
stats['acc'].append(np.mean(acc_x_task))
if verbose:
print("------------------------------------------")
print("Avg. acc:", stats['acc'])
print("------------------------------------------")
# reset the head for the next batch
if multi_heads:
if verbose:
print("classifier reset...")
classifier = torch.nn.Linear(512, 50)
return stats, preds
# Start Modification
def train_net_ewc(optimizer, model, criterion, mb_size, x, y, t, fisher_dict, optpar_dict, ewc_lambda,
train_ep, preproc=None, use_cuda=True, mask=None):
"""
Train a Pytorch model from pre-loaded tensors.
Use EWC to normalize training for CL.
Args:
optimizer (object): the pytorch optimizer.
model (object): the pytorch model to train.
criterion (func): loss function.
mb_size (int): mini-batch size. we use 32.
x (tensor): train data.
y (tensor): train labels.
t (int): task label.
train_ep (int): number of training epochs.
preproc (func): test iterations.
use_cuda (bool): if we want to use gpu or cpu.
mask (bool): if we want to maks out some classes from the results.
Returns:
ave_loss (float): average loss across the train set.
acc (float): average accuracy over training.
stats (dict): dictionary of several stats collected.
"""
cur_ep = 0
cur_train_t = t
stats = {"ram": [], "disk": []}
if preproc:
x = preproc(x)
(train_x, train_y), it_x_ep = pad_data(
[x, y], mb_size
)
shuffle_in_unison(
[train_x, train_y], 0, in_place=True
)
model = maybe_cuda(model, use_cuda=use_cuda)
acc = None
ave_loss = 0
train_x = torch.from_numpy(train_x).type(torch.FloatTensor)
train_y = torch.from_numpy(train_y).type(torch.LongTensor)
for ep in range(train_ep):
stats['disk'].append(check_ext_mem("cl_ext_mem"))
stats['ram'].append(check_ram_usage())
model.active_perc_list = []
model.train()
print("training ep: ", ep)
correct_cnt, ave_loss = 0, 0
for it in range(it_x_ep):
start = it * mb_size
end = (it + 1) * mb_size
optimizer.zero_grad()
x_mb = maybe_cuda(train_x[start:end], use_cuda=use_cuda)
y_mb = maybe_cuda(train_y[start:end], use_cuda=use_cuda)
logits = model(x_mb)
_, pred_label = torch.max(logits, 1)
correct_cnt += (pred_label == y_mb).sum()
loss = criterion(logits, y_mb)
ave_loss += loss.item()
# Start Modification
# if it > 0:
# Add EWC Penalty
for task in range(t): # for each task
# use EWC
for name, param in model.named_parameters(): # for each weight
fisher = fisher_dict[task][name] # get the fisher value for the given task and weight
optpar = optpar_dict[task][name] # get the parameter optimized value for the given task and weight
loss += (fisher * (optpar - param).pow(2)).sum() * ewc_lambda # loss is accumulator # add penalty for current task and weight
# End Modification
loss.backward()
optimizer.step()
acc = correct_cnt.item() / \
((it + 1) * y_mb.size(0))
ave_loss /= ((it + 1) * y_mb.size(0))
if it % 100 == 0:
print(
'==>>> it: {}, avg. loss: {:.6f}, '
'running train acc: {:.3f}'
.format(it, ave_loss, acc)
)
cur_ep += 1
return ave_loss, acc, stats
# Function to comput the fisher information for each weight at the end of each task
def on_task_update(t, x, y, fisher_dict, optpar_dict, model, optimizer, criterion, mb_size, use_cuda=True, mask=None, preproc=None):
"""
INPUT:
task_id: integer representing the task number
x_mem: current x_train values
t_mem: current true y_train values (aka target values)
OUTPUT:
The new values are added to the fisher and optpar dictionaries.
fisher_dict[t]
optpar_dict[t]
"""
cur_ep = 0
cur_train_t = t
if preproc:
x = preproc(x)
(train_x, train_y), it_x_ep = pad_data(
[x, y], mb_size
)
model = maybe_cuda(model, use_cuda=use_cuda)
acc = None
train_x = torch.from_numpy(train_x).type(torch.FloatTensor)
train_y = torch.from_numpy(train_y).type(torch.LongTensor)
model.active_perc_list = []
model.train() # model in train mode
# loop through batches
# prepare minibatch
# get loss
print("Updating Fisher values and old parameters")
correct_cnt, ave_loss = 0, 0
for it in range(it_x_ep):
start = it * mb_size
end = (it + 1) * mb_size
optimizer.zero_grad()
x_mb = maybe_cuda(train_x[start:end], use_cuda=use_cuda)
y_mb = maybe_cuda(train_y[start:end], use_cuda=use_cuda)
logits = model(x_mb)
_, pred_label = torch.max(logits, 1)
correct_cnt += (pred_label == y_mb).sum()
loss = criterion(logits, y_mb)
loss.backward()
fisher_dict[t] = {}
optpar_dict[t] = {}
# Update optpar_dict and fisher_dict for EWC
for name, param in model.named_parameters(): # for every parameter save two values
# optpar = param.data.clone() # save optimized gradient value for current task i and current gradient location j
# fisher = param.grad.data.clone().pow(2) # save fisher value for current task i and current gradient location j
# if t == 0: # first task. Just save weights and fisher values for next round
# optpar_dict[name] = optpar
# fisher_dict[name] = fisher
# else:
# optpar_dict[name] = optpar # save weights for next round
# fisher_dict[name] = torch.clamp((((fisher_dict[name]) + (fisher))/(2)), max=fisher_max) # average together old and new fisher values. save for use on next training round.
# # fisher_dict[name] = (((fisher_dict[name]/(t+1))*t) + (fisher / (t+1))) # average together old and new fisher values. save for use on next training round.
optpar_dict[t][name] = param.data.clone()
fisher_dict[t][name] = param.grad.data.clone().pow(2)
# End Modification
| 32.80137 | 186 | 0.54486 |
27b4a9b872f95dee2cb75bf9ba721769dd95803f | 3,853 | py | Python | seed/depends/auth/types.py | h4wldev/seed | 2febcb39edb6086128022e40d8734b0e3f93ebb1 | [
"MIT"
] | 3 | 2020-12-24T12:01:13.000Z | 2021-06-01T06:23:41.000Z | seed/depends/auth/types.py | h4wldev/seed | 2febcb39edb6086128022e40d8734b0e3f93ebb1 | [
"MIT"
] | null | null | null | seed/depends/auth/types.py | h4wldev/seed | 2febcb39edb6086128022e40d8734b0e3f93ebb1 | [
"MIT"
] | null | null | null | import arrow
import jwt
import uuid
import orjson
from typing import Any, Dict, Union, Optional
from seed.depends.redis import RedisContextManager
from seed.utils.convert import units_to_seconds
from seed.utils.crypto import AESCipher
from seed.setting import setting
class JWTTokenType:
ACCESS_TOKEN: str = 'access'
REFRESH_TOKEN: str = 'refresh'
class JWTToken(JWTTokenType):
aes_cipher: AESCipher = AESCipher()
def __init__(
self,
credential: str,
algorithm: str = None,
claims: Optional[Dict[str, Any]] = None
) -> None:
self.credential: str = credential
self.algorithm: str = algorithm or setting.jwt.algorithm
self.claims: Dict[str, Any] = claims or self.decode(
credential=credential,
algorithm=self.algorithm
)
self.id: str = self.claims['jti']
self.subject: str = self.claims['sub']
self.payload: Dict[str, Any] = self.claims['payload']
self.secrets: Dict[str, Any] = self.aes_cipher.decrypt(
self.claims['secrets']
)
self.token_type: str = self.claims['type']
self.expires_in: int = self.claims['exp_in']
self.expires: 'Arrow' = arrow.get(self.claims['exp']).to(setting.timezone)
self.created_at: 'Arrow' = arrow.get(self.claims['iat']).to(setting.timezone)
self.redis_name: str = f'token:{self.subject}'
def verify(self) -> bool:
with RedisContextManager() as r:
stored_uuid: str = r.hget(
name=self.redis_name,
key=self.token_type,
)
return stored_uuid is not None and \
self.id == stored_uuid.decode()
@classmethod
def create(
cls,
subject: str,
payload: Dict[str, Any] = {},
secrets: Dict[str, Any] = {},
token_type: Optional[str] = 'access',
expires: Union[int, str] = None,
algorithm: Optional[str] = None
) -> str:
token_type: str = token_type or JWTTokenType.ACCESS_TOKEN
algorithm: str = algorithm or setting.jwt.algorithm
expires: Union[int, str] = expires or (
setting.jwt.get(f'{token_type}_token_expires', None)
)
uuid_: str = str(uuid.uuid4())
now: int = arrow.now(setting.timezone).int_timestamp
claims: Dict[str, Any] = {
'sub': subject,
'iat': now,
'nbf': now,
'jti': uuid_,
'type': token_type,
'payload': payload,
'secrets': cls.aes_cipher.encrypt(
orjson.dumps(secrets).decode('utf-8')
),
}
if expires is not None:
if isinstance(expires, str):
expires = units_to_seconds(expires)
claims['exp'] = now + expires
claims['exp_in'] = expires
with RedisContextManager() as r:
r.hset(
name=f'token:{subject}',
key=token_type,
value=uuid_,
)
if token_type == JWTTokenType.REFRESH_TOKEN: # pragma: no cover
r.expire(
name=f'token:{subject}',
time=claims['exp_in'],
)
return cls(
credential=jwt.encode(
claims,
setting.secret_key.jwt_secret_key,
algorithm=algorithm,
headers={'typ': 'JWT', 'alg': algorithm}
),
algorithm=algorithm,
claims=claims,
)
@staticmethod
def decode(
credential: str,
algorithm: str = 'HS256'
) -> Dict[str, Any]:
return jwt.decode(
credential,
setting.secret_key.jwt_secret_key,
algorithms=algorithm
)
| 29.189394 | 85 | 0.547885 |
eb9782511692dc93e5b2d0890f4a7897ee7564d6 | 2,608 | py | Python | Wrapping/Python/Testing/Import_Hdf5_Dataset.py | mhitzem/SIMPL | cd8a58f8d955d232ea039121cc5286cc9545c7a6 | [
"NRL"
] | 3 | 2018-01-18T18:27:02.000Z | 2021-06-13T06:10:52.000Z | Wrapping/Python/Testing/Import_Hdf5_Dataset.py | mhitzem/SIMPL | cd8a58f8d955d232ea039121cc5286cc9545c7a6 | [
"NRL"
] | 211 | 2016-07-27T12:18:16.000Z | 2021-11-02T13:42:11.000Z | Wrapping/Python/Testing/Import_Hdf5_Dataset.py | mhitzem/SIMPL | cd8a58f8d955d232ea039121cc5286cc9545c7a6 | [
"NRL"
] | 23 | 2016-02-15T21:23:47.000Z | 2021-08-11T15:35:24.000Z | # Pipeline: Import Hdf5 Data Array Test
import simpl
import simplpy
import simpl_helpers as sh
import simpl_test_dirs as sd
def createInputFile():
# Create Data Container Array using simpl directly
dca = simpl.DataContainerArray()
# Create a Data Container using the pythonic version
err = simplpy.create_data_container(dca, 'DataContainer')
assert err == 0
# Create an Attribute Matrix
amDims = [[143, 1, 1]]
tableData = sh.CreateDynamicTableData(amDims)
dap = simpl.DataArrayPath('DataContainer', 'CellAttributeMatrix', '')
err = simplpy.create_attribute_matrix(dca, dap, simpl.AttributeMatrix.Type.Cell, tableData)
assert err == 0
# Import ASCII Data Attribute Array
err = simplpy.import_asci_data_array(dca, simpl.DataArrayPath('DataContainer', 'CellAttributeMatrix', 'DummyDBL'),
simpl.NumericTypes.Float, 3, 1,
sd.GetBuildDirectory() + '/Data/SIMPL/VertexCoordinates.csv',
simpl.DelimiterTypes.Comma)
assert err == 0, f'ImportAsciiDataArray ErrorCondition: {err}'
# Write to DREAM3D file
err = sh.WriteDREAM3DFile(sd.GetBuildDirectory() + '/Data/Output/CoreFilters/ImportHDF5.h5', dca)
assert err == 0
def import_hdf5_dataset_test():
# Create Data Container Array using simpl directly
dca = simpl.DataContainerArray()
# Create a Data Container using the pythonic version
err = simplpy.create_data_container(dca, 'DataContainer')
assert err == 0
# Create an Attribute Matrix
amDims = [[143, 1, 1]]
tableData = sh.CreateDynamicTableData(amDims)
dap = simpl.DataArrayPath('DataContainer', 'CellAttributeMatrix', '')
err = simplpy.create_attribute_matrix(dca, dap, simpl.AttributeMatrix.Type.Cell, tableData)
assert err == 0
# Import HDF5 Dataset
dataset_import_info_list = simpl.DatasetImportInfoList([['DataContainers/DataContainer/CellAttributeMatrix/DummyDBL', '3, 1']])
err = simplpy.import_hdf5_dataset(dca, sd.GetBuildDirectory() + '/Data/Output/CoreFilters/ImportHDF5.h5',
dataset_import_info_list,
simpl.DataArrayPath('DataContainer', 'CellAttributeMatrix', ''))
assert err == 0, f'ImportHdf5Dataset ErrorCondition: {err}'
# Write to DREAM3D file
err = sh.WriteDREAM3DFile(sd.GetBuildDirectory() + '/Data/Output/CoreFilters/ImportHDF5.dream3d', dca)
assert err == 0
if __name__ == '__main__':
createInputFile()
import_hdf5_dataset_test()
| 41.396825 | 131 | 0.678681 |
d4b3889e30d98a731546d71a722440ee1d136207 | 1,409 | py | Python | delta/utils/postprocess/postprocess_utils.py | hchang000/delta | 89320bd538e360d939c50d9f303e81554f6ce7ac | [
"Apache-2.0"
] | 1 | 2019-07-15T11:42:38.000Z | 2019-07-15T11:42:38.000Z | delta/utils/postprocess/postprocess_utils.py | hchang000/delta | 89320bd538e360d939c50d9f303e81554f6ce7ac | [
"Apache-2.0"
] | null | null | null | delta/utils/postprocess/postprocess_utils.py | hchang000/delta | 89320bd538e360d939c50d9f303e81554f6ce7ac | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' postprocess utils '''
from absl import logging
def ids_to_sentences(ids, vocab_file_path):
"""
transform array of numbers to array of tags/words
ids: [[1,2],[3,4]...]
"""
# TODO import error
from delta.data.preprocess.utils import load_vocab_dict
vocab_dict = load_vocab_dict(vocab_file_path)
id_to_vocab = {int(v): k for k, v in vocab_dict.items()}
sentences = []
for sent in ids:
sent_char = []
for s_char in sent:
if s_char not in id_to_vocab:
logging.error("label not in vocabs")
else:
sent_char.append(id_to_vocab[s_char])
sentences.append(sent_char)
assert len(sentences) == len(ids)
return sentences
| 33.547619 | 80 | 0.677786 |
cdfaed5bdf302c41e5a4d87390b51f62555fe7b2 | 1,864 | py | Python | cool/checks.py | SmallCream/django-cool | 63b136da7ce39135c9f900e8161288f8fc8893a4 | [
"BSD-3-Clause"
] | 11 | 2020-05-19T09:52:35.000Z | 2022-02-25T10:39:56.000Z | cool/checks.py | SmallCream/django-cool | 63b136da7ce39135c9f900e8161288f8fc8893a4 | [
"BSD-3-Clause"
] | null | null | null | cool/checks.py | SmallCream/django-cool | 63b136da7ce39135c9f900e8161288f8fc8893a4 | [
"BSD-3-Clause"
] | 1 | 2020-12-24T08:14:58.000Z | 2020-12-24T08:14:58.000Z | # encoding: utf-8
from django.apps import apps
from django.core.checks import Error, Tags, register
from django.core.exceptions import FieldError
from django.db.models import F
from cool.core.utils import construct_search
def register_checks():
register(Tags.models)(check_models)
def check_models(app_configs, **kwargs):
if app_configs is None:
app_configs = apps.get_app_configs()
errors = []
for app_config in app_configs:
for model in app_config.get_models():
errors.extend(check_model(model))
return errors
def check_model(model):
errors = []
errors.extend(_check_get_search_fields(model))
return errors
def _check_get_search_fields(model):
if not hasattr(model, 'get_search_fields'):
return []
# Ensure that autocomplete_search_fields returns a valid list of filters
# for a QuerySet on that model
failures = []
for lookup in model.get_search_fields():
try:
# This only constructs the QuerySet and doesn't actually query the
# DB, so it's fine for check phase.
model._default_manager.filter(**{construct_search(model._default_manager, lookup): F('pk')})
except FieldError:
failures.append(lookup)
if not failures:
return []
else:
return [
Error(
"Model {app}.{model} returned bad entries for get_search_fields: {failures}".format(
app=model._meta.app_label,
model=model._meta.model_name,
failures=",".join(failures)
),
hint="A QuerySet for {model} could not be constructed. Fix "
"the get_search_fields on it to return valid lookups.".format(model=model._meta.model_name),
id='cool.E001'
)
]
| 30.064516 | 113 | 0.630901 |
bfa09f73eda0ac4c013daff5ae107acb282633f3 | 322 | py | Python | leetcode/0191_number_of_1_bits.py | jacquerie/leetcode | a05e6b832eb0e0740aaff7b2eb3109038ad404bf | [
"MIT"
] | 3 | 2018-05-10T09:56:49.000Z | 2020-11-07T18:09:42.000Z | leetcode/0191_number_of_1_bits.py | jacquerie/leetcode | a05e6b832eb0e0740aaff7b2eb3109038ad404bf | [
"MIT"
] | null | null | null | leetcode/0191_number_of_1_bits.py | jacquerie/leetcode | a05e6b832eb0e0740aaff7b2eb3109038ad404bf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
class Solution:
def hammingWeight(self, n):
result = 0
while n:
n &= n - 1
result += 1
return result
if __name__ == '__main__':
solution = Solution()
assert 3 == solution.hammingWeight(11)
assert 1 == solution.hammingWeight(128)
| 17.888889 | 43 | 0.540373 |
84564bbefa9ef115d44514ada0cc997aedcaa414 | 6,834 | py | Python | asnake/client/web_client.py | archivalGrysbok/ArchivesSnake | 941298b91871a9aa2eb5abbe47b79be6d47518ef | [
"Apache-2.0"
] | 58 | 2017-08-17T16:09:23.000Z | 2022-03-18T00:00:43.000Z | asnake/client/web_client.py | archivalGrysbok/ArchivesSnake | 941298b91871a9aa2eb5abbe47b79be6d47518ef | [
"Apache-2.0"
] | 31 | 2018-02-20T05:40:40.000Z | 2021-09-15T12:35:34.000Z | asnake/client/web_client.py | archivalGrysbok/ArchivesSnake | 941298b91871a9aa2eb5abbe47b79be6d47518ef | [
"Apache-2.0"
] | 14 | 2018-01-22T19:09:54.000Z | 2020-11-02T16:46:40.000Z | from requests import Session
from urllib.parse import quote
from numbers import Number
from collections.abc import Sequence, Mapping
import json
import asnake.configurator as conf
import asnake.logging as logging
log = None # initialized on first client init
class ASnakeAuthError(Exception): pass
class ASnakeWeirdReturnError(Exception): pass
def listlike_seq(seq):
'''Determine if a thing is a list-like (sequence of values) sequence that's not string-like.'''
return isinstance(seq, Sequence) and not isinstance(seq, (str, bytes, Mapping,))
def http_meth_factory(meth):
'''Utility method for producing HTTP proxy methods for ASnakeProxyMethods mixin class.
Urls are prefixed with the value of baseurl from the client's ASnakeConfig. Arguments are
passed unaltered to the matching requests.Session method.'''
def http_method(self, url, *args, **kwargs):
# aspace uses the PHP convention where array-typed form values use names with '[]' appended
if 'params' in kwargs:
kwargs['params'] = {k + '[]' if listlike_seq(v) and k[-2:] != '[]' else k:v for k,v in kwargs['params'].items()}
full_url = "/".join([self.config['baseurl'].rstrip("/"), url.lstrip("/")])
result = getattr(self.session, meth)(full_url, *args, **kwargs)
if result.status_code == 403 and self.config['retry_with_auth']:
self.authorize()
result = getattr(self.session, meth)(full_url, *args, **kwargs)
log.debug("proxied http method", method=meth.upper(), url=full_url, status=result.status_code)
return result
return http_method
class ASnakeProxyMethods(type):
'''Metaclass to set up proxy methods for all requests-supported HTTP methods'''
def __init__(cls, name, parents, dct):
for meth in ('get', 'post', 'head', 'put', 'delete', 'options',):
fn = http_meth_factory(meth)
fn.__name__ = meth
fn.__doc__ = '''Proxied :meth:`requests.Session.{}` method from :class:`requests.Session`'''.format(meth)
setattr(cls, meth, fn)
class ASnakeClient(metaclass=ASnakeProxyMethods):
'''ArchivesSnake Web Client'''
def __init__(self, **config):
global log
if 'config_file' in config:
self.config = conf.ASnakeConfig(config['config_file'])
else:
self.config = conf.ASnakeConfig()
self.config.update(config)
# Only a subset of logging config can be supported in config
# For more complex setups (configuring output format, say),
# configure logs in Python code prior to loading
#
# Properties supported are:
# filename, filemode, level, and default_config
# Default config can be any of the default configurations exposed in logging
if not log:
if not logging.already_configured and 'logging_config' in self.config:
if 'default_config' in self.config['logging_config']:
default_logging_config = logging.configurations.get(
self.config['logging_config']['default_config'])
del self.config['logging_config']['default_config']
else:
default_logging_config = None
logging.setup_logging(config = default_logging_config,
**self.config['logging_config'])
log = logging.get_logger(__name__)
if not hasattr(self, 'session'): self.session = Session()
self.session.headers.update({'Accept': 'application/json',
'User-Agent': 'ArchivesSnake/0.1'})
log.debug("client created")
def authorize(self, username=None, password=None):
'''Authorizes the client against the configured archivesspace instance.
Parses the JSON response, and stores the returned session token in the session.headers for future requests.
Asks for a "non-expiring" session, which isn't truly immortal, just long-lived.'''
username = username or self.config['username']
password = password or self.config['password']
log.debug("authorizing against ArchivesSpace", user=username)
resp = self.session.post(
"/".join([self.config['baseurl'].rstrip("/"), 'users/{username}/login']).format(username=quote(username)),
data={"password": password, "expiring": False}
)
if resp.status_code != 200:
log.debug("authorization failure", status=resp.status_code)
raise ASnakeAuthError("Failed to authorize ASnake with status: {}".format(resp.status_code))
else:
session_token = json.loads(resp.text)['session']
self.session.headers['X-ArchivesSpace-Session'] = session_token
log.debug("authorization success", session_token=session_token)
return session_token
def get_paged(self, url, *args, page_size=100, **kwargs):
'''get list of json objects from urls of paged items'''
params = {}
if "params" in kwargs:
params.update(**kwargs['params'])
del kwargs['params']
# special-cased bc all_ids doesn't work on repositories index route
if "all_ids" in params and url in {"/repositories", "repositories"}:
del params['all_ids']
params.update(page_size=page_size, page=1)
current_page = self.get(url, params=params, **kwargs)
current_json = current_page.json()
# Regular paged object
if hasattr(current_json, 'keys') and \
{'results', 'this_page', 'last_page'} <= set(current_json.keys()):
while current_json['this_page'] <= current_json['last_page']:
for obj in current_json['results']:
yield obj
if current_json['this_page'] == current_json['last_page']: break
params['page'] += 1
current_page = self.get(url, params=params)
current_json = current_page.json()
# routes that just return a list, or ids, i.e. queries with all_ids param
elif isinstance(current_json, list):
# e.g. repositories
if len(current_json) >= 1:
if hasattr(current_json[0], 'keys'):
for obj in current_json:
yield obj
elif isinstance(current_json[0], Number):
for i in current_json:
yield self.get("/".join([url, str(i)])).json()
else:
raise ASnakeWeirdReturnError("get_paged doesn't know how to handle {}".format(current_json))
else:
raise ASnakeWeirdReturnError("get_paged doesn't know how to handle {}".format(current_json))
| 44.090323 | 124 | 0.622769 |
000c8eb8c516d61dae92dd64b94fa008adee6c29 | 346 | py | Python | zengo/migrations/0004_zendeskuser_alias.py | ableeck/django-zengo | 33f3795215dac4ac2121d26fc702a24adb1748f2 | [
"MIT"
] | 10 | 2019-02-11T19:13:41.000Z | 2021-12-10T21:23:51.000Z | zengo/migrations/0004_zendeskuser_alias.py | myles/django-zengo | d896b931139a65c497196b9669313f1dcfd560c9 | [
"MIT"
] | 4 | 2019-01-03T00:02:31.000Z | 2020-11-11T01:31:06.000Z | zengo/migrations/0004_zendeskuser_alias.py | myles/django-zengo | d896b931139a65c497196b9669313f1dcfd560c9 | [
"MIT"
] | 3 | 2019-02-28T15:58:24.000Z | 2020-06-09T02:45:42.000Z | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zengo", "0003_relax_url_maxlength"),
]
operations = [
migrations.AddField(
model_name="zendeskuser",
name="alias",
field=models.TextField(blank=True, null=True),
),
]
| 20.352941 | 58 | 0.586705 |
7e07c2a45defffb3508b42d0d49e96d296747109 | 89 | py | Python | test.py | Anu1996rag/PythonLogging | 3231c76c680a20df27182e99ffcc116f95b5f864 | [
"MIT"
] | null | null | null | test.py | Anu1996rag/PythonLogging | 3231c76c680a20df27182e99ffcc116f95b5f864 | [
"MIT"
] | null | null | null | test.py | Anu1996rag/PythonLogging | 3231c76c680a20df27182e99ffcc116f95b5f864 | [
"MIT"
] | null | null | null | import logging
import logger
logger.loggingInfo('debug')
logging.warning('test file')
| 12.714286 | 29 | 0.775281 |
907f17e20aafdb53463fec7550fe7cd7b2def3f4 | 700 | py | Python | algo/implementation/easy/acm-icpc.py | rivergillis/hackerrank-practice | 16b1d448c011f22b202b1ccadac09c71f646aa5e | [
"MIT"
] | null | null | null | algo/implementation/easy/acm-icpc.py | rivergillis/hackerrank-practice | 16b1d448c011f22b202b1ccadac09c71f646aa5e | [
"MIT"
] | null | null | null | algo/implementation/easy/acm-icpc.py | rivergillis/hackerrank-practice | 16b1d448c011f22b202b1ccadac09c71f646aa5e | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/challenges/acm-icpc-team
import itertools
n, m = input().strip().split(' ')
n, m = [int(n), int(m)]
topic = []
for topic_i in range(n):
topic_t = str(input().strip())
topic.append(topic_t)
topic_bin = [int(t, base=2) for t in topic]
best_num = 0
num_groups = 0
for pair in itertools.combinations(topic_bin, r=2):
x = pair[0]
y = pair[1]
num_topics = bin(x | y).count('1')
if num_topics > best_num:
best_num = num_topics
for pair in itertools.combinations(topic_bin, r=2):
x = pair[0]
y = pair[1]
num_topics = bin(x | y).count('1')
if num_topics == best_num:
num_groups += 1
print(best_num)
print(num_groups)
| 21.212121 | 53 | 0.627143 |
6d20f51546746302a752a5ef55ca9495321502e1 | 568 | py | Python | braXAI/sub_correction.py | MeetGandhi/MeetGandhi-Post-hoc-Explainability-of-Deep-Learning-Models-using-Conditional-Adversarial-Networks | 089226dce6d318247111ea60c2cc15c247b430d2 | [
"MIT"
] | null | null | null | braXAI/sub_correction.py | MeetGandhi/MeetGandhi-Post-hoc-Explainability-of-Deep-Learning-Models-using-Conditional-Adversarial-Networks | 089226dce6d318247111ea60c2cc15c247b430d2 | [
"MIT"
] | null | null | null | braXAI/sub_correction.py | MeetGandhi/MeetGandhi-Post-hoc-Explainability-of-Deep-Learning-Models-using-Conditional-Adversarial-Networks | 089226dce6d318247111ea60c2cc15c247b430d2 | [
"MIT"
] | null | null | null | import numpy as np
import os
import glob
classes=["bogus", "real"]
y=np.load("y_test.npy")
pr=np.load("preds.npy")
prob=np.load("preds_proba.npy")
#index = {1:0, 2:1, 4:2, 5:3, 6:4, 8:5, 13:6}
index = {0:0, 1:1}
for clss in classes:
#l=glob.glob("trainingdata\\saliency\\"+clss+"\\*.png")
l=glob.glob("keras-vis\\"+clss+"\\sub\\*.png")
#l=glob.glob("keras-vis\\saliency\\"+clss+"\\*.png")
for loc in range(len(l)):
i=int(l[loc].split("\\")[-1][:-4])
if y[i]!=pr[i] or prob[i,index[pr[i]]]<0.8:
os.remove(l[loc])
| 25.818182 | 59 | 0.551056 |
71da772a55c131ccce113a7aa24bfd1c8c608e2f | 983 | py | Python | fixture/application.py | sergeytsarev90/mantis_python | ca266ec64a67dfae400ccca438ba0b2c11998c2a | [
"Apache-2.0"
] | null | null | null | fixture/application.py | sergeytsarev90/mantis_python | ca266ec64a67dfae400ccca438ba0b2c11998c2a | [
"Apache-2.0"
] | null | null | null | fixture/application.py | sergeytsarev90/mantis_python | ca266ec64a67dfae400ccca438ba0b2c11998c2a | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
from fixture.session import SH
from fixture.projects import PH
from fixture.james import JS
from fixture.mail import MH
from fixture.signup import SGH
from fixture.soap import SoapH
class Applicaton:
def __init__(self, browser, config):
if browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognazed browser %s" % browser)
self.wd.implicitly_wait(1)
self.session = SH(self)
self.projects = PH(self)
self.james = JS(self)
self.config = config
self.mail = MH(self)
self.signup = SGH(self)
self.soap = SoapH(self)
Applicaton.base_url = config['web']['BaseUrl']
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def destroy(self):
self.wd.quit()
| 26.567568 | 65 | 0.600203 |
d784be5327df29c1ae4a024bf33983fd58db0ffc | 984 | py | Python | packages/watchmen-model/src/watchmen_model/admin/pipeline_action_read.py | Indexical-Metrics-Measure-Advisory/watchmen | c54ec54d9f91034a38e51fd339ba66453d2c7a6d | [
"MIT"
] | null | null | null | packages/watchmen-model/src/watchmen_model/admin/pipeline_action_read.py | Indexical-Metrics-Measure-Advisory/watchmen | c54ec54d9f91034a38e51fd339ba66453d2c7a6d | [
"MIT"
] | null | null | null | packages/watchmen-model/src/watchmen_model/admin/pipeline_action_read.py | Indexical-Metrics-Measure-Advisory/watchmen | c54ec54d9f91034a38e51fd339ba66453d2c7a6d | [
"MIT"
] | null | null | null | from watchmen_model.common import construct_parameter_joint
from .pipeline_action import AggregateArithmeticHolder, FindBy, FromFactor, FromTopic, MemoryWriter, ReadTopicActionType
class ReadTopicAction(FromTopic, MemoryWriter, FindBy):
type: ReadTopicActionType
def __setattr__(self, name, value):
if name == 'by':
super().__setattr__(name, construct_parameter_joint(value))
else:
super().__setattr__(name, value)
class ReadRowAction(ReadTopicAction):
type: ReadTopicActionType = ReadTopicActionType.READ_ROW
class ReadRowsAction(ReadTopicAction):
type: ReadTopicActionType = ReadTopicActionType.READ_ROWS
class ReadFactorAction(FromFactor, ReadTopicAction, AggregateArithmeticHolder):
type: ReadTopicActionType = ReadTopicActionType.READ_FACTOR
class ReadFactorsAction(FromFactor, ReadTopicAction):
type: ReadTopicActionType = ReadTopicActionType.READ_FACTORS
class ExistsAction(ReadTopicAction):
type: ReadTopicActionType = ReadTopicActionType.EXISTS
| 29.818182 | 120 | 0.831301 |
1631cd2b62929bfccacfe56ece80a75f7a05b12c | 705 | py | Python | src/area.py | JonasProg/Team15 | c8067c0fb7a837af10fc415c0ada2e639a0192f2 | [
"MIT"
] | 1 | 2021-03-22T08:29:57.000Z | 2021-03-22T08:29:57.000Z | src/area.py | JonasProg/Team15 | c8067c0fb7a837af10fc415c0ada2e639a0192f2 | [
"MIT"
] | 1 | 2021-04-14T11:58:32.000Z | 2021-04-14T11:58:32.000Z | src/area.py | JonasProg/Team15 | c8067c0fb7a837af10fc415c0ada2e639a0192f2 | [
"MIT"
] | null | null | null | from math import pi
def area_circle(r):
'''Area of circle.
Calculate the area of a circle based on the radius.
Args:
r (float): radius of circle
Returns:
float: area of circle
'''
if r < 0:
raise ValueError("The radius must be >= 0.")
return pi * r**2
def area_square(length):
"""Calculates the area of a square.
Calculates the area of a square based on the lenth of side.
Args:
length (float) : length is the length of side of a square.
Returns:
float: area of a square.
"""
if length < 0:
raise ValueError("The length of side must be >= 0.")
area_output = length**2
return area_output
| 20.142857 | 66 | 0.6 |
7fd825434e7fdbe4051dc5bab35c9df54d7efd61 | 3,203 | py | Python | Python/snake-game.py | pretam591/All_Program_helper | 83f52bce53bdcd0b115753ecda610d21aa0ddd2a | [
"MIT"
] | 16 | 2021-10-03T11:15:49.000Z | 2021-10-31T04:40:24.000Z | Python/snake-game.py | pretam591/All_Program_helper | 83f52bce53bdcd0b115753ecda610d21aa0ddd2a | [
"MIT"
] | 232 | 2021-10-02T14:51:43.000Z | 2021-11-14T08:23:27.000Z | Python/snake-game.py | pretam591/All_Program_helper | 83f52bce53bdcd0b115753ecda610d21aa0ddd2a | [
"MIT"
] | 166 | 2021-10-02T13:56:34.000Z | 2021-10-31T17:56:34.000Z | import pygame as pg
import random
pg.init()
white = (255,255,255)
red = (255,0,0)
black = (0,0,0)
screenWidth = 900
screenHeight = 600
gameWindow = pg.display.set_mode((screenWidth,screenHeight))
pg.display.set_caption('Snake Game')
pg.display.update()
clock = pg.time.Clock()
font = pg.font.SysFont(None,55)
def textScreen(text,color,x,y):
screenText = font.reader(text,True,color)
gameWindow.blit(sctreenText,[x,y])
def plotSnake(gameWindow,color,snakeList,snakeSize):
for x,y in snakeList:
pg.draw.rect(gameWindow, color, [x,y,snakeSize,snakeSize])
def gameLoop():
exitGame = False
gameOver = False
snakeX = 45
snakeY = 55
velocityX = 0
velocityY = 0
snakeList - []
snakeLength = 1
foodX = random.randint(20, screenWidth-20)
foodY = random.randint(60, screenHeight-20)
score = 0
initialVelocity = 4
snakeSize = 30
fps = 60
while not exitGame:
if gameOver:
gameWindow.fill(white)
textScreen('Game Over! Press Enter To Continue',red,100,250)
for event in pg.event.get():
if event.type == pg.QUIT:
exitGame = True
if event.type == pg.KEYDOWN:
if event.key == pg.K_RETURN:
gameLoop()
else:
for event in pg.event.get():
if event.type == pg.QUIT:
exitGame = True
if event.type == pg.KEYDOWN:
if evemt.key == pg.K_RIGHT:
velocityX = initialVelocity
velocityY = 0
if evemt.key == pg.K_LEFT:
velocityX = -initialVelocity
velocityY = 0
if evemt.key == pg.K_UP:
velocityY = -initialVelocity
velocityX = 0
if evemt.key == pg.K_RIGHT:
velocityY = initialVelocity
velocityX = 0
snakeX = snakeX + velocityX
snakeY = snakeY + velocityY
if abs(snakeX - foodX) < 10 and abs(snakeY - foodY) < 10:
score += 1
foodX = random.randint(20, screenWidth-30)
foodY = random.randint(60, screenHeight-30)
snakeLength += 5
gameWindow.fill(white)
textScreen("Score : " + str(score*10), red, 5, 5)
pg.draw.rect(gameWindow, red, [foodX, foodY, snakeSize, snakeSize])
pg.draw.line(gameWindow, red, (0,40), (900,40), 5)
head = []
head.append(snakeX)
head.append(snakeY)
snakeList.append(head)
if len(snakeList) > snakeLength:
del snakeList[0]
if head in snakeList[:-1]:
gameOver = True
if snakeX < 0 or snakeX > screenWidth-20 or snakeY < 50 or snakeY > screenHeight-20:
gameOver = True
plotSnake(gameWindow, black, snakeList, snakeSize)
pg.dispaly.update()
clock.tick(fps)
pg.quit()
quit()
gameloop()
| 27.376068 | 96 | 0.519825 |
206219d7d63d892b4c50c520d70589ec4cf51b72 | 1,516 | py | Python | ding/rl_utils/ppg.py | uuid0000/DI-engine | cc2713fa01e5288bae21cfeb595729d665e092d1 | [
"Apache-2.0"
] | 1 | 2021-07-13T02:56:34.000Z | 2021-07-13T02:56:34.000Z | ding/rl_utils/ppg.py | uuid0000/DI-engine | cc2713fa01e5288bae21cfeb595729d665e092d1 | [
"Apache-2.0"
] | null | null | null | ding/rl_utils/ppg.py | uuid0000/DI-engine | cc2713fa01e5288bae21cfeb595729d665e092d1 | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
from typing import Optional, Tuple
import torch
import torch.nn.functional as F
from torch.distributions import Independent, Normal
ppg_data = namedtuple('ppg_data', ['logit_new', 'logit_old', 'action', 'value_new', 'value_old', 'return_', 'weight'])
ppg_joint_loss = namedtuple('ppg_joint_loss', ['auxiliary_loss', 'behavioral_cloning_loss'])
def ppg_joint_error(
data: namedtuple,
clip_ratio: float = 0.2,
use_value_clip: bool = True,
) -> Tuple[namedtuple, namedtuple]:
logit_new, logit_old, action, value_new, value_old, return_, weight = data
if weight is None:
weight = torch.ones_like(return_)
# auxiliary_loss
if use_value_clip:
value_clip = value_old + (value_new - value_old).clamp(-clip_ratio, clip_ratio)
v1 = (return_ - value_new).pow(2)
v2 = (return_ - value_clip).pow(2)
auxiliary_loss = 0.5 * (torch.max(v1, v2) * weight).mean()
else:
auxiliary_loss = 0.5 * ((return_ - value_new).pow(2) * weight).mean()
dist_new = torch.distributions.categorical.Categorical(logits=logit_new)
dist_old = torch.distributions.categorical.Categorical(logits=logit_old)
logp_new = dist_new.log_prob(action)
logp_old = dist_old.log_prob(action)
# behavioral cloning loss
approx_kl = (logp_old - logp_new).mean()
behavioral_cloning_loss = F.kl_div(logp_new, logp_old, reduction='batchmean')
return ppg_joint_loss(auxiliary_loss, behavioral_cloning_loss)
| 37.9 | 118 | 0.709763 |
67eedabd3052f100c954eee175f24dbe682c8c33 | 1,185 | py | Python | examples/batch_parse_file_list.py | JohannesLiu/Apollo-Cyber-Parser | ef7e99ea29b2586a72c4cf0b74c18370102be65c | [
"MIT"
] | 2 | 2022-03-11T07:45:59.000Z | 2022-03-31T17:38:04.000Z | examples/batch_parse_file_list.py | JohannesLiu/Apollo-Cyber-Parser | ef7e99ea29b2586a72c4cf0b74c18370102be65c | [
"MIT"
] | 1 | 2022-03-20T12:22:49.000Z | 2022-03-31T17:47:06.000Z | examples/batch_parse_file_list.py | JohannesLiu/Apollo-Cyber-Parser | ef7e99ea29b2586a72c4cf0b74c18370102be65c | [
"MIT"
] | null | null | null | import sys
import os
sys.path.append("/apollo/cyber")
os.chdir("/apollo/cyber")
from cyber_py import cyber
from cyber_py import record
from modules.control.proto import control_cmd_pb2
def file_name(file_dir):
for root, dirs, files in os.walk(file_dir):
for file in files:
if os.path.splitext(file)[1] == '.00000':
L.append(os.path.join(root, file))
def listdir(path, list_name):
for file in os.listdir(path):
file_path = os.path.join(path, file)
if os.path.isdir(file_path):
listdir(file_path, list_name)
elif os.path.splitext(file)[1]=='.00000':
list_name.append(file_path)
def listdir_top(path, list_name):
for file in os.listdir(path):
file_path = os.path.join(path, file)
def listdir_bottom(path, list_name):
for file in os.listdir(path):
file_path = os.path.join(path, file)
if os.path.splitext(file)[1]=='.00000':
list_name.append(file_path)
path="/apollo/data/cyber_record/trail-10-20-2021-07-52-34/"
path_list=[]
listdir(path, path_list)
path_list.sort()
for path in path_list:
print(path) | 27.55814 | 59 | 0.638819 |
315cc1f685e80511cd31d64e785a1cd4125bb5f8 | 590 | py | Python | msgboard/migrations/0002_test_data.py | nemerna/beyond-tutorial | f9a2c0e14fa3f110f11d8b52a4f7fb2a3f480ac1 | [
"MIT"
] | null | null | null | msgboard/migrations/0002_test_data.py | nemerna/beyond-tutorial | f9a2c0e14fa3f110f11d8b52a4f7fb2a3f480ac1 | [
"MIT"
] | 2 | 2022-02-01T01:41:28.000Z | 2022-02-01T02:46:58.000Z | msgboard/migrations/0002_test_data.py | nemerna/beyond-tutorial | f9a2c0e14fa3f110f11d8b52a4f7fb2a3f480ac1 | [
"MIT"
] | null | null | null | from django.db import migrations, transaction
class Migration(migrations.Migration):
dependencies = [
('msgboard', '0001_initial'),
]
def generate_data(apps, schema_editor):
from msgboard.models import Message
test_data = [
('Test User1', 'A simple test message'),
('Test User2', 'Another simple test message'),
]
with transaction.atomic():
for author, text in test_data:
Message(author=author, text=text).save()
operations = [
migrations.RunPython(generate_data),
]
| 31.052632 | 58 | 0.601695 |
0330cc959c5e5108c02381e1a459bb77c5cadf70 | 8,406 | py | Python | pelion_systest_lib/cloud/libraries/enrollment.py | AnotherButler/e2e-edge-test-suite | 05d01922bc74d9ea4564a7561342ea428977ebff | [
"Apache-2.0"
] | null | null | null | pelion_systest_lib/cloud/libraries/enrollment.py | AnotherButler/e2e-edge-test-suite | 05d01922bc74d9ea4564a7561342ea428977ebff | [
"Apache-2.0"
] | 1 | 2021-07-30T20:43:56.000Z | 2021-08-06T19:40:24.000Z | pelion_systest_lib/cloud/libraries/enrollment.py | AnotherButler/e2e-edge-test-suite | 05d01922bc74d9ea4564a7561342ea428977ebff | [
"Apache-2.0"
] | 2 | 2021-07-29T15:47:25.000Z | 2022-03-07T08:38:20.000Z | # ----------------------------------------------------------------------------
# Copyright (c) 2020-2021, Pelion and affiliates.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
This module is for cloud's Enrollment API functions
"""
import pelion_systest_lib.tools as utils
class EnrollmentAPI:
"""
A class that provides Enrollment service related functionality, device and certificate
https://www.pelion.com/docs/device-management/current/service-api-references/enrollment-api.html
https://www.pelion.com/docs/device-management/current/service-api-references/certificate-enrollment.html
"""
def __init__(self, rest_api):
"""
Initializes the Enrollment API library
:param rest_api: RestAPI object
"""
self.api_version = 'v3'
self.cloud_api = rest_api
def get_device_enrollments(self, query_params=None, api_key=None, expected_status_code=None):
"""
Get device enrollments
:param query_params: e.g.{'limit': '1000', 'include': 'total_count'}
:param api_key: Authentication key
:param expected_status_code: Asserts the result in the function
:return: GET /device-enrollments response
"""
api_url = '/{}/device-enrollments'.format(self.api_version)
r = self.cloud_api.get(api_url, api_key, params=query_params, expected_status_code=expected_status_code)
return r
def get_device_enrollment(self, enrollment_id, api_key=None, expected_status_code=None):
"""
Get device enrollment
:param enrollment_id: Enrollment id
:param api_key: Authentication key
:param expected_status_code: Asserts the result in the function
:return: GET /device-enrollments/{enrollment_id} response
"""
api_url = '/{}/device-enrollments/{}'.format(self.api_version, enrollment_id)
r = self.cloud_api.get(api_url, api_key, expected_status_code=expected_status_code)
return r
def create_device_enrollment(self, enrollment_data=None, api_key=None, expected_status_code=None):
"""
Create device enrollment
:param enrollment_data: Enrollment payload
:param api_key: Authentication key
:param expected_status_code: Asserts the result in the function
:return: POST /device-enrollments response
"""
api_url = '/{}/device-enrollments'.format(self.api_version)
payload = {"enrollment_identity": utils.build_random_enrollment_identity()}
if enrollment_data is not None:
payload = enrollment_data
r = self.cloud_api.post(api_url, api_key, payload, expected_status_code=expected_status_code)
return r
def delete_device_enrollment(self, enrollment_id, api_key=None, expected_status_code=None):
"""
Delete device enrollment
:param enrollment_id: Enrollment id
:param api_key: Authentication key
:param expected_status_code: Asserts the result in the function
:return: DELETE /device-enrollments/{enrollment_id} response
"""
api_url = '/{}/device-enrollments/{}'.format(self.api_version, enrollment_id)
r = self.cloud_api.delete(api_url, api_key, expected_status_code=expected_status_code)
return r
def create_bulk_enrollment_upload(self, enrollment_identities, api_key=None, expected_status_code=None):
"""
Bulk enrollment upload
:param enrollment_identities: Enrollment csv
:param api_key: Authentication key
:param expected_status_code: Asserts the result in the function
:return: POST /device-enrollments-bulk-uploads response
"""
api_url = '/{}/device-enrollments-bulk-uploads'.format(self.api_version)
files = {'enrollment_identities': enrollment_identities}
r = self.cloud_api.post(api_url, api_key, files=files, expected_status_code=expected_status_code)
return r
def get_bulk_enrollment_upload(self, enrollment_id, api_key=None, expected_status_code=None):
"""
Get bulk enrollment upload
:param enrollment_id: Enrollment id
:param api_key: Authentication key
:param expected_status_code: Asserts the result in the function
:return: GET /device-enrollments-bulk-uploads/{enrollment_id} response
"""
api_url = '/{}/device-enrollments-bulk-uploads/{}'.format(self.api_version, enrollment_id)
r = self.cloud_api.get(api_url, api_key, expected_status_code=expected_status_code)
return r
def get_bulk_enrollment_delete(self, enrollment_id, api_key=None, expected_status_code=None):
"""
Get bulk enrollment delete
:param enrollment_id: Enrollment id
:param api_key: Authentication key
:param expected_status_code: Asserts the result in the function
:return: GET /device-enrollments-bulk-deletes/{enrollment_id} response
"""
api_url = '/{}/device-enrollments-bulk-deletes/{}'.format(self.api_version, enrollment_id)
r = self.cloud_api.get(api_url, api_key, expected_status_code=expected_status_code)
return r
def delete_bulk_enrollment(self, enrollment_identities, api_key=None, expected_status_code=None):
"""
Bulk enrollment delete
:param enrollment_identities: Enrollment csv
:param api_key: Authentication key
:param expected_status_code: Asserts the result in the function
:return: POST /device-enrollments-bulk-deletes response
"""
api_url = '/{}/device-enrollments-bulk-deletes'.format(self.api_version)
files = {'enrollment_identities': enrollment_identities}
r = self.cloud_api.post(api_url, api_key, files=files, expected_status_code=expected_status_code)
return r
def get_certificate_enrollments(self, query_params=None, api_key=None, expected_status_code=None):
"""
Get certificate enrollments
:param query_params: e.g.{'limit': '1000', 'include': 'total_count'}
:param api_key: Authentication key
:param expected_status_code: Asserts the result in the function
:return: GET /certificate-enrollments response
"""
api_url = '/{}/certificate-enrollments'.format(self.api_version)
r = self.cloud_api.get(api_url, api_key, params=query_params, expected_status_code=expected_status_code)
return r
def get_certificate_enrollment(self, certificate_enrollment_id, api_key=None, expected_status_code=None):
"""
Get certificate enrollment
:param certificate_enrollment_id: Certificate enrollment id
:param api_key: Authentication key
:param expected_status_code: Asserts the result in the function
:return: GET /certificate-enrollments/{enrollment_id} response
"""
api_url = '/{}/certificate-enrollments/{}'.format(self.api_version, certificate_enrollment_id)
r = self.cloud_api.get(api_url, api_key, expected_status_code=expected_status_code)
return r
def create_certificate_renewal_request(self, device_id, certificate_name, api_key=None, expected_status_code=None):
"""
Request certificate renewal
:param device_id: Device id
:param certificate_name: Certificate name
:param api_key: Authentication key
:param expected_status_code: Asserts the result in the function
:return: POST /devices/{device-id}/certificates/{certificate-name}/renew response
"""
api_url = '/{}/devices/{}/certificates/{}/renew'.format(self.api_version, device_id, certificate_name)
r = self.cloud_api.post(api_url, api_key, expected_status_code=expected_status_code)
return r
| 44.242105 | 119 | 0.689745 |
b552733e393bda9b78ea87b63c4b6d83f9f8b889 | 108 | py | Python | git_magic/grandmaster.py | StephanRempel/git_magic | e3feeccc18f195cd9b6f989148f64beefe38bd6c | [
"Apache-2.0"
] | null | null | null | git_magic/grandmaster.py | StephanRempel/git_magic | e3feeccc18f195cd9b6f989148f64beefe38bd6c | [
"Apache-2.0"
] | 2 | 2021-05-20T23:19:19.000Z | 2022-02-26T17:07:03.000Z | git_magic/grandmaster.py | StephanRempel/git_magic | e3feeccc18f195cd9b6f989148f64beefe38bd6c | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: 07_grandmaster.ipynb (unless otherwise specified).
__all__ = [] | 36 | 94 | 0.759259 |
c85dac5e330527716c2635f90141cb647ad5b352 | 2,304 | py | Python | utils.py | leyiweb/Pytorch-LRP | c4808bff5e61c03a185d177bcbcd7850d1b33f97 | [
"BSD-3-Clause"
] | null | null | null | utils.py | leyiweb/Pytorch-LRP | c4808bff5e61c03a185d177bcbcd7850d1b33f97 | [
"BSD-3-Clause"
] | null | null | null | utils.py | leyiweb/Pytorch-LRP | c4808bff5e61c03a185d177bcbcd7850d1b33f97 | [
"BSD-3-Clause"
] | null | null | null | import torch
from scipy.ndimage import zoom
from sklearn.model_selection import train_test_split
import numpy as np
def pprint(*args):
out = [str(argument) + "\n" for argument in args]
print(*out, "\n")
class Flatten(torch.nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, in_tensor):
return in_tensor.view((in_tensor.size()[0], -1))
def load_data():
import jrieke.datasets as datasets # 改为包级别的引用,需要用时再加载数据集
df = datasets.load_data_table_15T()
# Patient-wise train-test-split.
# Select a number of patients for each class, put all their images in the test set
# and all other images in the train set. This is the split that is used in the paper to produce the heatmaps.
test_patients_per_class = 30
patients_AD = df[df['DX'] == 'Dementia']['PTID'].unique()
patients_CN = df[df['DX'] == 'CN']['PTID'].unique()
patients_AD_train, patients_AD_test = train_test_split(patients_AD, test_size=test_patients_per_class,
random_state=0)
patients_CN_train, patients_CN_test = train_test_split(patients_CN, test_size=test_patients_per_class,
random_state=0)
patients_train = np.concatenate([patients_AD_train, patients_CN_train])
patients_test = np.concatenate([patients_AD_test, patients_CN_test])
return datasets.build_datasets(df, patients_train, patients_test, normalize=True)
def scale_mask(mask, shape):
if shape == mask.shape:
print("No rescaling necessary.")
return mask
nmm_map = np.zeros(shape)
print("Rescaling mask")
for lbl_idx in np.unique(mask):
nmm_map_lbl = mask.copy()
nmm_map_lbl[lbl_idx != nmm_map_lbl] = 0
nmm_map_lbl[lbl_idx == nmm_map_lbl] = 1
zoomed_lbl = zoom(nmm_map_lbl, 1.5, order=3)
zoomed_lbl[zoomed_lbl != 1] = 0
remain_diff = np.array(nmm_map.shape) - np.array(zoomed_lbl.shape)
pad_left = np.array(np.ceil(remain_diff / 2), dtype=int)
pad_right = np.array(np.floor(remain_diff / 2), dtype=int)
nmm_map[pad_left[0]:-pad_right[0], pad_left[1]:-pad_right[1], pad_left[2]:-pad_right[2]] += zoomed_lbl * lbl_idx
return nmm_map
| 36 | 120 | 0.661024 |
3e8b8af002ab425f948a8e87f5f5af38bff8816e | 646 | py | Python | setup.py | giorgiovisani/lime_stability | dbee493b9d865d3f9fa36caeabbc4e82d4d13c2a | [
"BSD-2-Clause"
] | 18 | 2020-02-21T11:18:34.000Z | 2022-02-25T20:07:37.000Z | setup.py | giorgiovisani/lime_stability | dbee493b9d865d3f9fa36caeabbc4e82d4d13c2a | [
"BSD-2-Clause"
] | null | null | null | setup.py | giorgiovisani/lime_stability | dbee493b9d865d3f9fa36caeabbc4e82d4d13c2a | [
"BSD-2-Clause"
] | 5 | 2020-02-17T07:32:37.000Z | 2022-02-01T07:30:14.000Z | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='lime_stability',
version='0.1.1',
author="Giorgio Visani",
author_email="giorgio.visani2@unibo.it",
description="A package to evaluate Lime stability",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/giorgiovisani/lime_stability.git",
packages=['lime_stability'],
install_requires=[
'lime',
'statsmodels',
'statistics',
'numpy',
'scikit-learn>=0.18'
],
license='BSD',
zip_safe=False)
| 25.84 | 62 | 0.650155 |
1c71c05bcaf00fc2bce8c94830ad89b90c5ddc81 | 7,562 | py | Python | test/functional/wallet_dump.py | xcoin-project/xcoin | a709553b4e10d5592e1095e3ba63d1a995917c1a | [
"MIT"
] | null | null | null | test/functional/wallet_dump.py | xcoin-project/xcoin | a709553b4e10d5592e1095e3ba63d1a995917c1a | [
"MIT"
] | null | null | null | test/functional/wallet_dump.py | xcoin-project/xcoin | a709553b4e10d5592e1095e3ba63d1a995917c1a | [
"MIT"
] | 1 | 2021-05-27T10:42:18.000Z | 2021-05-27T10:42:18.000Z | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the dumpwallet RPC."""
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
def read_dump(file_name, addrs, script_addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_legacy_addr = 0
found_p2sh_segwit_addr = 0
found_bech32_addr = 0
found_script_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
# only read non comment lines
if line[0] != "#" and len(line) > 10:
# split out some data
key_date_label, comment = line.split("#")
key_date_label = key_date_label.split(" ")
# key = key_date_label[0]
date = key_date_label[1]
keytype = key_date_label[2]
imported_key = date == '1970-01-01T00:00:01Z'
if imported_key:
# Imported keys have multiple addresses, no label (keypath) and timestamp
# Skip them
continue
addr_keypath = comment.split(" addr=")[1]
addr = addr_keypath.split(" ")[0]
keypath = None
if keytype == "inactivehdseed=1":
# ensure the old master is still available
assert (hd_master_addr_old == addr)
elif keytype == "hdseed=1":
# ensure we have generated a new hd master key
assert (hd_master_addr_old != addr)
hd_master_addr_ret = addr
elif keytype == "script=1":
# scripts don't have keypaths
keypath = None
else:
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
# count key types
for addrObj in addrs:
if addrObj['address'] == addr.split(",")[0] and addrObj['hdkeypath'] == keypath and keytype == "label=":
if addr.startswith('m') or addr.startswith('n'):
# P2PKH address
found_legacy_addr += 1
elif addr.startswith('Q'):
# P2SH-segwit address
found_p2sh_segwit_addr += 1
elif addr.startswith('rpym1'):
found_bech32_addr += 1
break
elif keytype == "change=1":
found_addr_chg += 1
break
elif keytype == "reserve=1":
found_addr_rsv += 1
break
# count scripts
for script_addr in script_addrs:
if script_addr == addr.rstrip() and keytype == "script=1":
found_script_addr += 1
break
return found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-keypool=90", "-addresstype=legacy"]]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
def run_test(self):
wallet_unenc_dump = os.path.join(self.nodes[0].datadir, "wallet.unencrypted.dump")
wallet_enc_dump = os.path.join(self.nodes[0].datadir, "wallet.encrypted.dump")
# generate 30 addresses to compare against the dump
# - 10 legacy P2PKH
# - 10 P2SH-segwit
# - 10 bech32
test_addr_count = 10
addrs = []
for address_type in ['legacy', 'p2sh-segwit', 'bech32']:
for i in range(0, test_addr_count):
addr = self.nodes[0].getnewaddress(address_type=address_type)
vaddr = self.nodes[0].getaddressinfo(addr) # required to get hd keypath
addrs.append(vaddr)
# Test scripts dump by adding a 1-of-1 multisig address
multisig_addr = self.nodes[0].addmultisigaddress(1, [addrs[1]["address"]])["address"]
# Refill the keypool. getnewaddress() refills the keypool *before* taking a key from
# the keypool, so the final call to getnewaddress leaves the keypool with one key below
# its capacity
self.nodes[0].keypoolrefill()
# dump unencrypted wallet
result = self.nodes[0].dumpwallet(wallet_unenc_dump)
assert_equal(result['filename'], wallet_unenc_dump)
found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(wallet_unenc_dump, addrs, [multisig_addr], None)
assert_equal(found_legacy_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_p2sh_segwit_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_bech32_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_script_addr, 1) # all scripts must be in the dump
assert_equal(found_addr_chg, 0) # 0 blocks where mined
assert_equal(found_addr_rsv, 90 * 2) # 90 keys plus 100% internal keys
# encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
self.nodes[0].walletpassphrase('test', 10)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(wallet_enc_dump)
found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, _ = \
read_dump(wallet_enc_dump, addrs, [multisig_addr], hd_master_addr_unenc)
assert_equal(found_legacy_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_p2sh_segwit_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_bech32_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_script_addr, 1)
assert_equal(found_addr_chg, 90 * 2) # old reserve keys are marked as change now
assert_equal(found_addr_rsv, 90 * 2)
# Overwriting should fail
assert_raises_rpc_error(-8, "already exists", lambda: self.nodes[0].dumpwallet(wallet_enc_dump))
# Restart node with new wallet, and test importwallet
self.stop_node(0)
self.start_node(0, ['-wallet=w2'])
# Make sure the address is not IsMine before import
result = self.nodes[0].getaddressinfo(multisig_addr)
assert not result['ismine']
self.nodes[0].importwallet(wallet_unenc_dump)
# Now check IsMine is true
result = self.nodes[0].getaddressinfo(multisig_addr)
assert result['ismine']
if __name__ == '__main__':
WalletDumpTest().main()
| 43.710983 | 146 | 0.604866 |
8ec09292fbf0f676e9b98fbf78ac6f1d2e183365 | 7,252 | py | Python | pyxrd/phases/controllers/edit_phase_controller.py | PyXRD/pyxrd | 26bacdf64f3153fa74b8caa62e219b76d91a55c1 | [
"BSD-2-Clause"
] | 27 | 2018-06-15T15:28:18.000Z | 2022-03-10T12:23:50.000Z | pyxrd/phases/controllers/edit_phase_controller.py | PyXRD/pyxrd | 26bacdf64f3153fa74b8caa62e219b76d91a55c1 | [
"BSD-2-Clause"
] | 22 | 2018-06-14T08:29:16.000Z | 2021-07-05T13:33:44.000Z | pyxrd/phases/controllers/edit_phase_controller.py | PyXRD/pyxrd | 26bacdf64f3153fa74b8caa62e219b76d91a55c1 | [
"BSD-2-Clause"
] | 8 | 2019-04-13T13:03:51.000Z | 2021-06-19T09:29:11.000Z | # coding=UTF-8
# ex:ts=4:sw=4:et=on
# Copyright (c) 2013, Mathijs Dumon
# All rights reserved.
# Complete license can be found in the LICENSE file.
import logging
logger = logging.getLogger(__name__)
from mvc import Controller
from mvc.adapters.dummy_adapter import DummyAdapter
from pyxrd.generic.views import ChildObjectListStoreView
from pyxrd.generic.views.combobox_tools import add_combo_text_column
from pyxrd.generic.controllers import BaseController
from pyxrd.generic.controllers.objectliststore_controllers import wrap_list_property_to_treemodel
from pyxrd.probabilities.controllers import EditProbabilitiesController
from pyxrd.probabilities.views import EditProbabilitiesView
from pyxrd.phases.controllers import (
EditCSDSTypeController, ComponentsController
)
from pyxrd.phases.views import EditCSDSDistributionView
class EditPhaseController(BaseController):
"""
Controller for the phase edit view
"""
probabilities_view = None
probabilities_controller = None
csds_view = None
csds_controller = None
components_view = None
components_controller = None
widget_handlers = {
'custom': 'custom_handler',
}
@property
def phases_treemodel(self):
if self.model.project is not None:
return wrap_list_property_to_treemodel(
self.model.project, type(self.model.project).phases)
else:
return None
def register_view(self, view):
BaseController.register_view(self, view)
self.csds_view = EditCSDSDistributionView(parent=self.view)
self.view.set_csds_view(self.csds_view)
if self.model.G > 1:
self.probabilities_view = EditProbabilitiesView(parent=self.view)
self.view.set_probabilities_view(self.probabilities_view)
else:
self.view.remove_probabilities()
self.components_view = ChildObjectListStoreView(parent=self.view)
self.components_view["button_add_object"].set_visible(False)
self.components_view["button_add_object"].set_no_show_all(True)
self.components_view["button_del_object"].set_visible(False)
self.components_view["button_del_object"].set_no_show_all(True)
self.view.set_components_view(self.components_view)
@staticmethod
def custom_handler(self, prop, widget): # TODO split out these 4 properties in their own adapters
if prop.label in ("CSDS_distribution", "components", "probabilities", "based_on"):
if prop.label == "CSDS_distribution":
self.reset_csds_controller()
elif prop.label == "components":
self.reset_components_controller()
elif prop.label == "probabilities":
self.reset_probabilities_controller()
elif prop.label == "based_on" and self.phases_treemodel is not None:
combo = self.view["phase_based_on"]
combo.set_model(self.phases_treemodel)
combo.connect('changed', self.on_based_on_changed)
def phase_renderer(celllayout, cell, model, itr, user_data=None):
phase = model.get_user_data(itr)
if phase: # FIXME an error can occur here if the phase list is cleared and the view is still open
cell.set_sensitive(phase.R == self.model.R and phase.G == self.model.G and phase.get_based_on_root() != self.model)
add_combo_text_column(combo, data_func=phase_renderer, text_col=self.phases_treemodel.c_name)
for row in self.phases_treemodel:
if self.phases_treemodel.get_user_data(row.iter) == self.model.based_on:
combo.set_active_iter (row.iter)
break
return DummyAdapter(controller=self, prop=prop)
def reset_csds_controller(self):
if self.csds_controller is None:
self.csds_controller = EditCSDSTypeController(
model=self.model, view=self.csds_view, parent=self)
else:
self.csds_controller.model = self.model
def reset_components_controller(self):
self.components_controller = ComponentsController(
model=self.model, view=self.components_view, parent=self)
def reset_probabilities_controller(self):
if self.probabilities_controller is None:
if self.model.G > 1: # False if model is a multi-component phase
self.probabilities_controller = EditProbabilitiesController(
model=self.model.probabilities,
view=self.probabilities_view, parent=self)
else:
self.probabilities_controller.model = self.model.probabilities
def register_adapters(self):
self.update_sensitivities()
def update_sensitivities(self):
can_inherit = (self.model.based_on is not None)
for name in ("sigma_star", "display_color"):
widget_name = "container_%s" % name
self.view[widget_name].set_sensitive(not (can_inherit and getattr(self.model, "inherit_%s" % name)))
self.view[widget_name].set_visible(not (can_inherit and getattr(self.model, "inherit_%s" % name)))
self.view["phase_inherit_%s" % name].set_sensitive(can_inherit)
for name in ("CSDS_distribution",):
sensitive = not (can_inherit and getattr(self.model, "inherit_%s" % name))
self.view["phase_inherit_%s" % name].set_sensitive(can_inherit)
self.view.set_csds_sensitive(sensitive)
self.reset_csds_controller()
# ------------------------------------------------------------
# Notifications of observable properties
# ------------------------------------------------------------
@Controller.observe("inherit_display_color", assign=True)
@Controller.observe("inherit_sigma_star", assign=True)
@Controller.observe("inherit_CSDS_distribution", assign=True)
def notif_change_inherit(self, model, prop_name, info):
self.update_sensitivities()
return
@Controller.observe("probabilities", assign=True)
def notif_change_probabilities(self, model, prop_name, info):
self.reset_probabilities_controller()
return
@Controller.observe("name", assign=True)
def notif_name_changed(self, model, prop_name, info):
self.phases_treemodel.on_item_changed(self.model)
return
# ------------------------------------------------------------
# GTK Signal handlers
# ------------------------------------------------------------
def on_based_on_changed(self, combo, user_data=None):
itr = combo.get_active_iter()
if itr is not None:
val = combo.get_model().get_user_data(itr)
# cannot be based on itself == not based on anything
# cannot be based on a model with a different # of components
if val != self.model and val.get_based_on_root() != self.model and val.G == self.model.G:
self.model.based_on = val
self.update_sensitivities()
return
combo.set_active(-1)
self.update_sensitivities()
self.model.based_on = None
| 41.919075 | 139 | 0.651682 |
843c2f9af2eb22d602b384d493c4608db9aac1e8 | 4,818 | py | Python | account/views.py | johnnfujita/social_django | 009ccc6d4c9480f1ed12b1095e8645ef73dca1f6 | [
"MIT"
] | null | null | null | account/views.py | johnnfujita/social_django | 009ccc6d4c9480f1ed12b1095e8645ef73dca1f6 | [
"MIT"
] | null | null | null | account/views.py | johnnfujita/social_django | 009ccc6d4c9480f1ed12b1095e8645ef73dca1f6 | [
"MIT"
] | null | null | null | from django.contrib import messages
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.views.decorators.http import require_POST
from common.decorators import ajax_required
from django.contrib.auth import authenticate, login
from .forms import LoginForm, UserRegistrationForm, UserEditForm, ProfileEditForm
# Create your views here.
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
from .models import Profile, Contact
from actions.utils import create_action
from actions.models import Action
def user_login(request):
if request.method == "POST":
form = LoginForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(request,
username=cd['username'],
password=cd['password'])
if user is not None:
if user.is_active:
login(request, user)
return HttpResponse('Authenticated successfully')
else:
return HttpResponse('Disabled Account')
else:
return HttpResponse('Invalid Login')
else:
form = LoginForm()
return render(request, 'account/login.html', {'form': form})
@login_required
def dashboard(request):
actions = Action.objects.exclude(user=request.user)
following_ids = request.user.following.values_list('id', flat=True)
if following_ids:
actions = actions.filter(user_id__in=following_ids)
actions = actions.select_related('user', 'user__profile').prefetch_related('target')[:10]
return render(request,
'account/dashboard.html',
{'section': 'dashboard',
'actions': actions})
def register(request):
if request.method == "POST":
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
new_user = user_form.save(commit=False)
new_user.set_password(
user_form.cleaned_data['password']
)
new_user.save()
Profile.objects.create(user=new_user)
create_action(new_user, 'has create an account')
return render(request, 'account/register_done.html', {'new_user': new_user})
else:
user_form = UserRegistrationForm()
return render(request, 'account/register.html', {'user_form': user_form})
@login_required
def edit(request):
if request.method == "POST":
user_form = UserEditForm(instance=request.user,
data=request.POST)
profile_form = ProfileEditForm(
instance=request.user.profile,
data=request.POST,
files=request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, "Profile updated successfully")
else:
messages.error(request, 'Error updating your profile')
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(
instance=request.user.profile
)
return render(request,
'account/edit.html',
{'user_form': user_form,
'profile_form': profile_form})
@login_required
def user_list(request):
users = User.objects.filter(is_active=True)
return render(request,
'account/user/list.html',
{'section': 'people',
'users': users})
@login_required
def user_detail(request, username):
user = get_object_or_404(User,
username=username,
is_active=True)
return render(request,
'account/user/detail.html',
{'section': 'people',
'user': user})
@ajax_required
@require_POST
@login_required
def user_follow(request):
user_id = request.POST.get('id')
action = request.POST.get('action')
if user_id and action:
try:
user = User.objects.get(id=user_id)
if action == 'follow':
Contact.objects.get_or_create(
user_from=request.user,
user_to=user
)
create_action(request.user, 'is following', user)
else:
Contact.objects.filter(user_from=request.user,
user_to=user).delete()
return JsonResponse({'status': 'ok'})
except:
return JsonResponse({'status': 'error'})
return JsonResponse({'status': 'error'}) | 36.5 | 93 | 0.600042 |
3dd10a015ff7b29247fef3aefa4e0c48b19e83f2 | 11,069 | py | Python | plugins/trustedcoin/qt.py | opendime/electrum | b436042c89dc852790bc95287fae18bfe2158031 | [
"MIT"
] | 4 | 2016-10-13T16:11:23.000Z | 2021-07-02T23:02:46.000Z | plugins/trustedcoin/qt.py | opendime/electrum | b436042c89dc852790bc95287fae18bfe2158031 | [
"MIT"
] | null | null | null | plugins/trustedcoin/qt.py | opendime/electrum | b436042c89dc852790bc95287fae18bfe2158031 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
from threading import Thread
import re
from decimal import Decimal
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_gui.qt.util import *
from electrum_gui.qt.qrcodewidget import QRCodeWidget
from electrum_gui.qt.amountedit import AmountEdit
from electrum_gui.qt.main_window import StatusBarButton
from electrum.i18n import _
from electrum.plugins import hook
from trustedcoin import TrustedCoinPlugin, server
def need_server(wallet, tx):
from electrum.account import BIP32_Account
# Detect if the server is needed
long_id, short_id = wallet.get_user_id()
xpub3 = wallet.master_public_keys['x3/']
for x in tx.inputs_to_sign():
if x[0:2] == 'ff':
xpub, sequence = BIP32_Account.parse_xpubkey(x)
if xpub == xpub3:
return True
return False
class Plugin(TrustedCoinPlugin):
@hook
def on_new_window(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
msg = ' '.join([
_('This wallet is was restored from seed, and it contains two master private keys.'),
_('Therefore, two-factor authentication is disabled.')
])
action = lambda: window.show_message(msg)
else:
action = partial(self.settings_dialog, window)
button = StatusBarButton(QIcon(":icons/trustedcoin.png"),
_("TrustedCoin"), action)
window.statusBar().addPermanentWidget(button)
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
def auth_dialog(self, window):
d = WindowModalDialog(window, _("Authorization"))
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int = True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
@hook
def sign_tx(self, window, tx):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if not wallet.can_sign_without_server():
self.print_error("twofactor:sign_tx")
auth_code = None
if need_server(wallet, tx):
auth_code = self.auth_dialog(window)
else:
self.print_error("twofactor: xpub3 not needed")
window.wallet.auth_code = auth_code
def waiting_dialog(self, window, on_finished=None):
task = partial(self.request_billing_info, window.wallet)
return WaitingDialog(window, 'Getting billing information...', task,
on_finished)
@hook
def abort_send(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if not wallet.can_sign_without_server():
if wallet.billing_info is None:
# request billing info before forming the transaction
waiting_dialog(self, window).wait()
if wallet.billing_info is None:
window.show_message('Could not contact server')
return True
return False
def settings_dialog(self, window):
self.waiting_dialog(window, partial(self.show_settings_dialog, window))
def show_settings_dialog(self, window, success):
if not success:
window.show_message(_('Server not reachable.'))
return
wallet = window.wallet
d = WindowModalDialog(window, _("TrustedCoin Information"))
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(":icons/trustedcoin.png"))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\
+ _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _('TrustedCoin charges a fee per co-signed transaction. You may pay on each transaction (an extra output will be added to your transaction), or you may purchase prepaid transaction using this dialog.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
price_per_tx = wallet.price_per_tx
v = price_per_tx.get(1)
grid.addWidget(QLabel(_("Price per transaction (not prepaid):")), 0, 0)
grid.addWidget(QLabel(window.format_amount(v) + ' ' + window.base_unit()), 0, 1)
i = 1
if 10 not in price_per_tx:
price_per_tx[10] = 10 * price_per_tx.get(1)
for k, v in sorted(price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Price for %d prepaid transactions:"%k), i, 0)
grid.addWidget(QLabel("%d x "%k + window.format_amount(v/k) + ' ' + window.base_unit()), i, 1)
b = QPushButton(_("Buy"))
b.clicked.connect(lambda b, k=k, v=v: self.on_buy(window, k, v, d))
grid.addWidget(b, i, 2)
i += 1
n = wallet.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has %d prepaid transactions.")%n), i, 0)
# tranfer button
#def on_transfer():
# server.transfer_credit(self.user_id, recipient, otp, signature_callback)
# pass
#b = QPushButton(_("Transfer"))
#b.clicked.connect(on_transfer)
#grid.addWidget(b, 1, 2)
#grid.addWidget(QLabel(_("Next Billing Address:")), i, 0)
#grid.addWidget(QLabel(self.billing_info['billing_address']), i, 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def on_buy(self, window, k, v, d):
d.close()
if window.pluginsdialog:
window.pluginsdialog.close()
wallet = window.wallet
uri = "bitcoin:" + wallet.billing_info['billing_address'] + "?message=TrustedCoin %d Prepaid Transactions&amount="%k + str(Decimal(v)/100000000)
wallet.is_billing = True
window.pay_to_URI(uri)
window.payto_e.setFrozen(True)
window.message_e.setFrozen(True)
window.amount_e.setFrozen(True)
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = QTextEdit()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
next_button = window.next_button
prior_button_text = next_button.text()
next_button.setText(_('Accept'))
def request_TOS():
tos = server.get_terms_of_service()
self.TOS = tos
window.emit(SIGNAL('twofactor:TOS'))
def on_result():
tos_e.setText(self.TOS)
def set_enabled():
next_button.setEnabled(re.match(regexp,email_e.text()) is not None)
window.connect(window, SIGNAL('twofactor:TOS'), on_result)
t = Thread(target=request_TOS)
t.setDaemon(True)
t.start()
regexp = r"[^@]+@[^@]+\.[^@]+"
email_e.textChanged.connect(set_enabled)
email_e.setFocus(True)
window.set_main_layout(vbox, next_enabled=False)
next_button.setText(prior_button_text)
return str(email_e.text())
def setup_google_auth(self, window, _id, otp_secret):
vbox = QVBoxLayout()
if otp_secret is not None:
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
l = QLabel("Please scan the following QR code in Google Authenticator. You may as well use the following key: %s"%otp_secret)
l.setWordWrap(True)
vbox.addWidget(l)
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel("This wallet is already registered, but it was never authenticated. To finalize your registration, please enter your Google Authenticator Code. If you do not have this code, delete the wallet file and start a new registration")
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(WWLabel(msg))
pw = AmountEdit(None, is_int = True)
pw.setFocus(True)
pw.setMaximumWidth(50)
hbox.addWidget(pw)
vbox.addLayout(hbox)
def set_enabled():
window.next_button.setEnabled(len(pw.text()) == 6)
pw.textChanged.connect(set_enabled)
while True:
if not window.set_main_layout(vbox, next_enabled=False,
raise_on_cancel=False):
return False
otp = pw.get_amount()
try:
server.auth(_id, otp)
return True
except:
window.show_message(_('Incorrect password'))
pw.setText('')
| 37.522034 | 254 | 0.624266 |
eae87542fcc95cbfdb8130b783f3843be562a8a0 | 3,476 | py | Python | ig_clone_api/photos/migrations/0001_initial.py | whosgriffith/ig-clone-api | 83b79ed62e21c654d0945decaaf6571e19c8c12a | [
"MIT"
] | null | null | null | ig_clone_api/photos/migrations/0001_initial.py | whosgriffith/ig-clone-api | 83b79ed62e21c654d0945decaaf6571e19c8c12a | [
"MIT"
] | null | null | null | ig_clone_api/photos/migrations/0001_initial.py | whosgriffith/ig-clone-api | 83b79ed62e21c654d0945decaaf6571e19c8c12a | [
"MIT"
] | null | null | null | # Generated by Django 3.1.13 on 2021-08-19 01:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was created.', verbose_name='Created at')),
('modified', models.DateTimeField(auto_now_add=True, help_text='Date time of the last time the object was modified.', verbose_name='Last modified at')),
('image', models.ImageField(upload_to='photos/', verbose_name='photo')),
('description', models.CharField(blank=True, max_length=255, verbose_name='photo description')),
('total_likes', models.PositiveIntegerField()),
('total_comments', models.PositiveIntegerField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was created.', verbose_name='Created at')),
('modified', models.DateTimeField(auto_now_add=True, help_text='Date time of the last time the object was modified.', verbose_name='Last modified at')),
('photo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='photos.photo')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was created.', verbose_name='Created at')),
('modified', models.DateTimeField(auto_now_add=True, help_text='Date time of the last time the object was modified.', verbose_name='Last modified at')),
('comment', models.CharField(max_length=255, verbose_name='comment')),
('photo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='photos.photo')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
]
| 51.880597 | 168 | 0.603567 |
74d88933ee816de5dafbd836b99627fe8ea0cf2f | 25,592 | py | Python | src/zeep/xsd/elements/indicators.py | steven-deprez/python-zeep | 6acadb67cde018ef409d5a1d265920f774ed03cf | [
"MIT"
] | null | null | null | src/zeep/xsd/elements/indicators.py | steven-deprez/python-zeep | 6acadb67cde018ef409d5a1d265920f774ed03cf | [
"MIT"
] | null | null | null | src/zeep/xsd/elements/indicators.py | steven-deprez/python-zeep | 6acadb67cde018ef409d5a1d265920f774ed03cf | [
"MIT"
] | null | null | null | """
zeep.xsd.elements.indicators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Indicators are a collection of elements. There are four available, these are
All, Choice, Group and Sequence.
Indicator -> OrderIndicator -> All
-> Choice
-> Sequence
-> Group
"""
import copy
import operator
import typing
from collections import OrderedDict, defaultdict, deque
from cached_property import threaded_cached_property
from lxml import etree
from zeep.exceptions import UnexpectedElementError, ValidationError
from zeep.xsd.const import NotSet, SkipValue
from zeep.xsd.elements import Any, Element
from zeep.xsd.elements.base import Base
from zeep.xsd.utils import (
NamePrefixGenerator,
UniqueNameGenerator,
create_prefixed_name,
max_occurs_iter,
)
__all__ = ["All", "Choice", "Group", "Sequence"]
class Indicator(Base):
"""Base class for the other indicators"""
def __repr__(self):
return "<%s(%s)>" % (self.__class__.__name__, super().__repr__())
@property
def default_value(self):
values = OrderedDict(
[(name, element.default_value) for name, element in self.elements]
)
if self.accepts_multiple:
return {"_value_1": values}
return values
@property
def elements(self):
raise NotImplementedError()
def clone(self, name, min_occurs=1, max_occurs=1):
raise NotImplementedError()
class OrderIndicator(Indicator, list):
"""Base class for All, Choice and Sequence classes."""
name = None
def __init__(self, elements=None, min_occurs=1, max_occurs=1):
self.min_occurs = min_occurs
self.max_occurs = max_occurs
super().__init__()
if elements is not None:
self.extend(elements)
def clone(self, name, min_occurs=1, max_occurs=1):
return self.__class__(
elements=list(self), min_occurs=min_occurs, max_occurs=max_occurs
)
@threaded_cached_property
def elements(self):
"""List of tuples containing the element name and the element"""
result = []
for name, elm in self.elements_nested:
if name is None:
result.extend(elm.elements)
else:
result.append((name, elm))
return result
@threaded_cached_property
def elements_nested(self):
"""List of tuples containing the element name and the element"""
result = [] # type: typing.List[typing.Tuple[typing.Optional[str], typing.Any]]
generator = NamePrefixGenerator()
generator_2 = UniqueNameGenerator()
for elm in self:
if isinstance(elm, (All, Choice, Group, Sequence)):
if elm.accepts_multiple:
result.append((generator.get_name(), elm))
else:
for sub_name, sub_elm in elm.elements:
sub_name = generator_2.create_name(sub_name)
result.append((None, elm))
elif isinstance(elm, (Any, Choice)):
result.append((generator.get_name(), elm))
else:
name = generator_2.create_name(elm.attr_name)
result.append((name, elm))
return result
def accept(self, values):
"""Return the number of values which are accepted by this choice.
If not all required elements are available then 0 is returned.
"""
if not self.accepts_multiple:
values = [values]
results = set()
for value in values:
num = 0
for name, element in self.elements_nested:
if isinstance(element, Element):
if element.name in value and value[element.name] is not None:
num += 1
else:
num += element.accept(value)
results.add(num)
return max(results)
def parse_args(self, args, index=0):
# If the sequence contains an choice element then we can't convert
# the args to kwargs since Choice elements don't work with position
# arguments
for name, elm in self.elements_nested:
if isinstance(elm, Choice):
raise TypeError("Choice elements only work with keyword arguments")
result = {}
for name, element in self.elements:
if index >= len(args):
break
result[name] = args[index]
index += 1
return result, args, index
def parse_kwargs(self, kwargs, name, available_kwargs):
"""Apply the given kwarg to the element.
The available_kwargs is modified in-place. Returns a dict with the
result.
:param kwargs: The kwargs
:type kwargs: dict
:param name: The name as which this type is registered in the parent
:type name: str
:param available_kwargs: The kwargs keys which are still available,
modified in place
:type available_kwargs: set
:rtype: dict
"""
if self.accepts_multiple:
assert name
if name:
if name not in available_kwargs:
return {}
assert self.accepts_multiple
# Make sure we have a list, lame lame
item_kwargs = kwargs.get(name)
if not isinstance(item_kwargs, list):
item_kwargs = [item_kwargs]
result = []
for item_value in max_occurs_iter(self.max_occurs, item_kwargs):
try:
item_kwargs = set(item_value.keys())
except AttributeError:
raise TypeError(
"A list of dicts is expected for unbounded Sequences"
)
subresult = OrderedDict()
for item_name, element in self.elements:
value = element.parse_kwargs(item_value, item_name, item_kwargs)
if value is not None:
subresult.update(value)
if item_kwargs:
raise TypeError(
("%s() got an unexpected keyword argument %r.")
% (self, list(item_kwargs)[0])
)
result.append(subresult)
result = {name: result}
# All items consumed
if not any(filter(None, item_kwargs)):
available_kwargs.remove(name)
return result
else:
assert not self.accepts_multiple
result = OrderedDict()
for elm_name, element in self.elements_nested:
sub_result = element.parse_kwargs(kwargs, elm_name, available_kwargs)
if sub_result:
result.update(sub_result)
return result
def resolve(self):
for i, elm in enumerate(self):
self[i] = elm.resolve()
return self
def render(self, parent, value, render_path):
"""Create subelements in the given parent object."""
if not isinstance(value, list):
values = [value]
else:
values = value
self.validate(values, render_path)
for value in max_occurs_iter(self.max_occurs, values):
for name, element in self.elements_nested:
if name:
if name in value:
element_value = value[name]
child_path = render_path + [name]
else:
element_value = NotSet
child_path = render_path
else:
element_value = value
child_path = render_path
if element_value is SkipValue:
continue
if element_value is not None or not element.is_optional:
element.render(parent, element_value, child_path)
def validate(self, value, render_path):
for item in value:
if item is NotSet:
raise ValidationError("No value set", path=render_path)
def signature(self, schema=None, standalone=True):
parts = []
for name, element in self.elements_nested:
if isinstance(element, Indicator):
parts.append(element.signature(schema, standalone=False))
else:
value = element.signature(schema, standalone=False)
parts.append("%s: %s" % (name, value))
part = ", ".join(parts)
if self.accepts_multiple:
return "[%s]" % (part,)
return part
class All(OrderIndicator):
"""Allows the elements in the group to appear (or not appear) in any order
in the containing element.
"""
def __init__(self, elements=None, min_occurs=1, max_occurs=1, consume_other=False):
super().__init__(elements, min_occurs, max_occurs)
self._consume_other = consume_other
def parse_xmlelements(self, xmlelements, schema, name=None, context=None):
"""Consume matching xmlelements
:param xmlelements: Dequeue of XML element objects
:type xmlelements: collections.deque of lxml.etree._Element
:param schema: The parent XML schema
:type schema: zeep.xsd.Schema
:param name: The name of the parent element
:type name: str
:param context: Optional parsing context (for inline schemas)
:type context: zeep.xsd.context.XmlParserContext
:rtype: dict or None
"""
result = OrderedDict()
expected_tags = {element.qname for __, element in self.elements}
consumed_tags = set()
values = defaultdict(deque) # type: typing.Dict[str, etree._Element]
for i, elm in enumerate(xmlelements):
if elm.tag in expected_tags:
consumed_tags.add(i)
values[elm.tag].append(elm)
# Remove the consumed tags from the xmlelements
for i in sorted(consumed_tags, reverse=True):
del xmlelements[i]
for name, element in self.elements:
sub_elements = values.get(element.qname)
if sub_elements:
result[name] = element.parse_xmlelements(
sub_elements, schema, context=context
)
if self._consume_other and xmlelements:
result["_raw_elements"] = list(xmlelements)
xmlelements.clear()
return result
class Choice(OrderIndicator):
"""Permits one and only one of the elements contained in the group."""
def parse_args(self, args, index=0):
if args:
raise TypeError("Choice elements only work with keyword arguments")
@property
def is_optional(self):
return True
@property
def default_value(self):
return OrderedDict()
def parse_xmlelements(self, xmlelements, schema, name=None, context=None):
"""Consume matching xmlelements
:param xmlelements: Dequeue of XML element objects
:type xmlelements: collections.deque of lxml.etree._Element
:param schema: The parent XML schema
:type schema: zeep.xsd.Schema
:param name: The name of the parent element
:type name: str
:param context: Optional parsing context (for inline schemas)
:type context: zeep.xsd.context.XmlParserContext
:rtype: dict or None
"""
result = []
for _unused in max_occurs_iter(self.max_occurs):
if not xmlelements:
break
# Choose out of multiple
options = []
for element_name, element in self.elements_nested:
local_xmlelements = copy.copy(xmlelements)
try:
sub_result = element.parse_xmlelements(
xmlelements=local_xmlelements,
schema=schema,
name=element_name,
context=context,
)
except UnexpectedElementError:
continue
if isinstance(element, Element):
sub_result = {element_name: sub_result}
num_consumed = len(xmlelements) - len(local_xmlelements)
if num_consumed:
options.append((num_consumed, sub_result))
if not options:
xmlelements = []
break
# Sort on least left
options = sorted(options, key=operator.itemgetter(0), reverse=True)
if options:
result.append(options[0][1])
for i in range(options[0][0]):
xmlelements.popleft()
else:
break
if self.accepts_multiple:
result = {name: result}
else:
result = result[0] if result else {}
return result
def parse_kwargs(self, kwargs, name, available_kwargs):
"""Processes the kwargs for this choice element.
Returns a dict containing the values found.
This handles two distinct initialization methods:
1. Passing the choice elements directly to the kwargs (unnested)
2. Passing the choice elements into the `name` kwarg (_value_1) (nested).
This case is required when multiple choice elements are given.
:param name: Name of the choice element (_value_1)
:type name: str
:param element: Choice element object
:type element: zeep.xsd.Choice
:param kwargs: dict (or list of dicts) of kwargs for initialization
:type kwargs: list / dict
"""
if name and name in available_kwargs:
assert self.accepts_multiple
values = kwargs[name] or []
available_kwargs.remove(name)
result = []
if isinstance(values, dict):
values = [values]
# TODO: Use most greedy choice instead of first matching
for value in values:
for element in self:
if isinstance(element, OrderIndicator):
choice_value = value[name] if name in value else value
if element.accept(choice_value):
result.append(choice_value)
break
else:
if isinstance(element, Any):
result.append(value)
break
elif element.name in value:
choice_value = value.get(element.name)
result.append({element.name: choice_value})
break
else:
raise TypeError(
"No complete xsd:Sequence found for the xsd:Choice %r.\n"
"The signature is: %s" % (name, self.signature())
)
if not self.accepts_multiple:
result = result[0] if result else None
else:
# Direct use-case isn't supported when maxOccurs > 1
if self.accepts_multiple:
return {}
result = {}
# When choice elements are specified directly in the kwargs
found = False
for name, choice in self.elements_nested:
temp_kwargs = copy.copy(available_kwargs)
subresult = choice.parse_kwargs(kwargs, name, temp_kwargs)
if subresult:
if not any(subresult.values()):
available_kwargs.intersection_update(temp_kwargs)
result.update(subresult)
elif not found:
available_kwargs.intersection_update(temp_kwargs)
result.update(subresult)
found = True
if found:
for choice_name, choice in self.elements:
result.setdefault(choice_name, None)
else:
result = {}
if name and self.accepts_multiple:
result = {name: result}
return result
def render(self, parent, value, render_path):
"""Render the value to the parent element tree node.
This is a bit more complex then the order render methods since we need
to search for the best matching choice element.
"""
if not self.accepts_multiple:
value = [value]
self.validate(value, render_path)
for item in value:
result = self._find_element_to_render(item)
if result:
element, choice_value = result
element.render(parent, choice_value, render_path)
def validate(self, value, render_path):
found = 0
for item in value:
result = self._find_element_to_render(item)
if result:
found += 1
if not found and not self.is_optional:
raise ValidationError("Missing choice values", path=render_path)
def accept(self, values):
"""Return the number of values which are accepted by this choice.
If not all required elements are available then 0 is returned.
"""
nums = set()
for name, element in self.elements_nested:
if isinstance(element, Element):
if self.accepts_multiple:
if all(name in item and item[name] for item in values):
nums.add(1)
else:
if name in values and values[name]:
nums.add(1)
else:
num = element.accept(values)
nums.add(num)
return max(nums) if nums else 0
def _find_element_to_render(self, value):
"""Return a tuple (element, value) for the best matching choice.
This is used to decide which choice child is best suitable for
rendering the available data.
"""
matches = []
for name, element in self.elements_nested:
if isinstance(element, Element):
if element.name in value:
try:
choice_value = value[element.name]
except KeyError:
choice_value = value
if choice_value is not None:
matches.append((1, element, choice_value))
else:
if name is not None:
try:
choice_value = value[name]
except (KeyError, TypeError):
choice_value = value
else:
choice_value = value
score = element.accept(choice_value)
if score:
matches.append((score, element, choice_value))
if matches:
matches = sorted(matches, key=operator.itemgetter(0), reverse=True)
return matches[0][1:]
def signature(self, schema=None, standalone=True):
parts = []
for name, element in self.elements_nested:
if isinstance(element, OrderIndicator):
parts.append("{%s}" % (element.signature(schema, standalone=False)))
else:
parts.append(
"{%s: %s}" % (name, element.signature(schema, standalone=False))
)
part = "(%s)" % " | ".join(parts)
if self.accepts_multiple:
return "%s[]" % (part,)
return part
class Sequence(OrderIndicator):
"""Requires the elements in the group to appear in the specified sequence
within the containing element.
"""
def parse_xmlelements(self, xmlelements, schema, name=None, context=None):
"""Consume matching xmlelements
:param xmlelements: Dequeue of XML element objects
:type xmlelements: collections.deque of lxml.etree._Element
:param schema: The parent XML schema
:type schema: zeep.xsd.Schema
:param name: The name of the parent element
:type name: str
:param context: Optional parsing context (for inline schemas)
:type context: zeep.xsd.context.XmlParserContext
:rtype: dict or None
"""
result = []
if self.accepts_multiple:
assert name
for _unused in max_occurs_iter(self.max_occurs):
if not xmlelements:
break
item_result = OrderedDict()
for elm_name, element in self.elements:
try:
item_subresult = element.parse_xmlelements(
xmlelements, schema, elm_name, context=context
)
except UnexpectedElementError:
if schema.settings.strict:
raise
item_subresult = None
# Unwrap if allowed
if isinstance(element, OrderIndicator):
item_result.update(item_subresult)
else:
item_result[elm_name] = item_subresult
if not xmlelements:
break
if item_result:
result.append(item_result)
if not self.accepts_multiple:
return result[0] if result else None
return {name: result}
class Group(Indicator):
"""Groups a set of element declarations so that they can be incorporated as
a group into complex type definitions.
"""
def __init__(self, name, child, max_occurs=1, min_occurs=1):
super().__init__()
self.child = child
self.qname = name
self.name = name.localname if name else None
self.max_occurs = max_occurs
self.min_occurs = min_occurs
def __str__(self):
return self.signature()
def __iter__(self, *args, **kwargs):
for item in self.child:
yield item
@threaded_cached_property
def elements(self):
if self.accepts_multiple:
return [("_value_1", self.child)]
return self.child.elements
def clone(self, name, min_occurs=1, max_occurs=1):
return self.__class__(
name=None, child=self.child, min_occurs=min_occurs, max_occurs=max_occurs
)
def accept(self, values):
"""Return the number of values which are accepted by this choice.
If not all required elements are available then 0 is returned.
"""
return self.child.accept(values)
def parse_args(self, args, index=0):
return self.child.parse_args(args, index)
def parse_kwargs(self, kwargs, name, available_kwargs):
if self.accepts_multiple:
if name not in kwargs:
return {}
available_kwargs.remove(name)
item_kwargs = kwargs[name]
result = []
sub_name = "_value_1" if self.child.accepts_multiple else None
for sub_kwargs in max_occurs_iter(self.max_occurs, item_kwargs):
available_sub_kwargs = set(sub_kwargs.keys())
subresult = self.child.parse_kwargs(
sub_kwargs, sub_name, available_sub_kwargs
)
if available_sub_kwargs:
raise TypeError(
("%s() got an unexpected keyword argument %r.")
% (self, list(available_sub_kwargs)[0])
)
if subresult:
result.append(subresult)
if result:
result = {name: result}
else:
result = self.child.parse_kwargs(kwargs, name, available_kwargs)
return result
def parse_xmlelements(self, xmlelements, schema, name=None, context=None):
"""Consume matching xmlelements
:param xmlelements: Dequeue of XML element objects
:type xmlelements: collections.deque of lxml.etree._Element
:param schema: The parent XML schema
:type schema: zeep.xsd.Schema
:param name: The name of the parent element
:type name: str
:param context: Optional parsing context (for inline schemas)
:type context: zeep.xsd.context.XmlParserContext
:rtype: dict or None
"""
result = []
for _unused in max_occurs_iter(self.max_occurs):
result.append(
self.child.parse_xmlelements(xmlelements, schema, name, context=context)
)
if not xmlelements:
break
if not self.accepts_multiple and result:
return result[0]
return {name: result}
def render(self, parent, value, render_path):
if not isinstance(value, list):
values = [value]
else:
values = value
for value in values:
self.child.render(parent, value, render_path)
def resolve(self):
self.child = self.child.resolve()
return self
def signature(self, schema=None, standalone=True):
name = create_prefixed_name(self.qname, schema)
if standalone:
return "%s(%s)" % (name, self.child.signature(schema, standalone=False))
else:
return self.child.signature(schema, standalone=False)
| 33.71805 | 88 | 0.561113 |
3112b6b77370d70d21764e6c386f647e1befbbb9 | 499 | py | Python | searching/linear_search.py | bharathikannann/Python-Data-Structures-and-Algorithms | 61aca26940e486a00cd293fae36c1391b7ef3864 | [
"MIT"
] | 3 | 2021-08-23T21:40:01.000Z | 2022-02-24T10:55:25.000Z | searching/linear_search.py | bharathikannann/Python-Data-Structures-and-Algorithms | 61aca26940e486a00cd293fae36c1391b7ef3864 | [
"MIT"
] | null | null | null | searching/linear_search.py | bharathikannann/Python-Data-Structures-and-Algorithms | 61aca26940e486a00cd293fae36c1391b7ef3864 | [
"MIT"
] | null | null | null | # Linear Search
def linear_search(arr: list, target: int) -> int:
# arr - Collection of items
# target - element tobe found
for i, item in enumerate(arr):
if item == target:
return i
return -1
'''
Example
>>>linear_search([1,2,3,4],1)
0
>>>linear_search([1,2,3,4],2)
1
>>>linear_search([1,2,3,4],3)
2
>>>linear_search([1,2,3,4],4)
3
>>>linear_search([1,2,3,4],5)
-1
'''
if __name__ == '__main__':
print("Linear Search")
print(linear_search([1,2,3,4],2)) | 19.192308 | 49 | 0.59519 |
6b54e8a2c02a8150e697eb5972decb9257f0fc63 | 82 | py | Python | chronicle/__init__.py | nkashy1/chronicle | e1a3b18c2f295c86ad25ffc174eeb8816903ebfb | [
"MIT"
] | null | null | null | chronicle/__init__.py | nkashy1/chronicle | e1a3b18c2f295c86ad25ffc174eeb8816903ebfb | [
"MIT"
] | 2 | 2015-02-09T03:45:12.000Z | 2015-02-09T06:13:14.000Z | chronicle/__init__.py | nkashy1/chronicle | e1a3b18c2f295c86ad25ffc174eeb8816903ebfb | [
"MIT"
] | null | null | null | from messenger import Messenger
from responder import Responder
import responders | 20.5 | 31 | 0.878049 |
bebe5ce0ecf7d903c4a6f9ac7ebd95c6e9f449ee | 1,270 | py | Python | beehave/display/camera.py | maximx1/beehave | 6ffa3aa1bea56ce71927fb97b8c281d53cbc0b58 | [
"MIT"
] | null | null | null | beehave/display/camera.py | maximx1/beehave | 6ffa3aa1bea56ce71927fb97b8c281d53cbc0b58 | [
"MIT"
] | null | null | null | beehave/display/camera.py | maximx1/beehave | 6ffa3aa1bea56ce71927fb97b8c281d53cbc0b58 | [
"MIT"
] | null | null | null | from pygame.rect import Rect
class Camera(object):
def __init__(self, screen_width, screen_height, width, height):
self.camera_man = CameraMan(screen_width, screen_height)
self.state = Rect(0, 0, width, height)
def apply(self, target):
return target.rect.move(self.state.topleft)
def update(self, target):
self.state = self.camera_man.update_camera_view(self.state, target.rect)
class CameraMan():
def __init__(self, screen_width, screen_height):
self.screen_width = screen_width
self.screen_height = screen_height
self.half_width = int(screen_width / 2)
self.half_height = int(screen_height / 2)
def update_camera_view(self, camera, target_rect):
l, t, _, _ = target_rect
_, _, w, h = camera
l, t, _, _ = -l + self.half_width, -t + self.half_height, w, h
l = min(0, l) # stop scrolling at the left edge
l = max(-(camera.width - self.screen_width), l) # stop scrolling at the right edge
t = max(-(camera.height - self.screen_height), t) # stop scrolling at the bottom
t = min(0, t) # stop scrolling at the top
return Rect(l, t, w, h)
| 40.967742 | 94 | 0.603937 |
8860a01668aa027d28e2f3c6c2f67264b2e78f20 | 1,840 | py | Python | azure/setup.py | CharaD7/azure-sdk-for-python | 9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c | [
"MIT"
] | 1 | 2017-10-29T15:14:35.000Z | 2017-10-29T15:14:35.000Z | azure/setup.py | Berryliao84/Python-Azure | a96ed6e8bbf4290372980a2919b31110da90b164 | [
"MIT"
] | null | null | null | azure/setup.py | Berryliao84/Python-Azure | a96ed6e8bbf4290372980a2919b31110da90b164 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
from setuptools import setup
# Upgrading from 0.x is not supported
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'Upgrading from azure=={} is not supported. '.format(ver) +
'Uninstall it with "pip uninstall azure" before installing ' +
'this version.'
)
except AttributeError:
pass
except ImportError:
pass
setup(
name='azure',
version='2.0.0rc6',
description='Microsoft Azure Client Libraries for Python',
long_description=open('README.rst', 'r').read(),
license='MIT License',
author='Microsoft Corporation',
author_email='ptvshelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
install_requires=[
'azure-mgmt==0.30.0rc6',
'azure-batch==1.0.0',
'azure-servicebus==0.20.3',
'azure-storage==0.33.0',
'azure-servicemanagement-legacy==0.20.4',
],
)
| 32.857143 | 75 | 0.570652 |
49d8374817c832cab34eec61db7000d0aa59b201 | 8,280 | py | Python | cmdb-usage/biz/handlers/ri_usage_handler.py | zjj1002/aws-cloud-cmdb-system | 47982007688e5db1272435891cb654ab11d0d60a | [
"Apache-2.0"
] | null | null | null | cmdb-usage/biz/handlers/ri_usage_handler.py | zjj1002/aws-cloud-cmdb-system | 47982007688e5db1272435891cb654ab11d0d60a | [
"Apache-2.0"
] | 1 | 2022-01-04T13:53:16.000Z | 2022-01-04T13:53:16.000Z | cmdb-usage/biz/handlers/ri_usage_handler.py | zjj1002/aws-cloud-cmdb-system | 47982007688e5db1272435891cb654ab11d0d60a | [
"Apache-2.0"
] | null | null | null | import csv
import decimal
from datetime import datetime, timedelta
from websdk.consts import const
from libs.base_handler import BaseHandler
from libs.db_context import DBContext
from models.db import AWSRiUsageReport, AwsTaskQueue, AWSRiDateDB
from models.db import model_to_dict
from tornado.web import RequestHandler
class RiUsageTodayHanlder(BaseHandler):
def get(self, *args, **kwargs):
pageNum = int(self.get_argument('pageNum', default='1', strip=True))
pageSize = int(self.get_argument('pageSize', default='10', strip=True))
key = self.get_argument('key', default=None, strip=True)
export_csv = self.get_argument('export_csv', default="0", strip=True)
d = datetime.now().strftime('%Y-%m-%d')
d_start = d + ' 00:00:00'
d_end = d + ' 23:59:59'
if not 5 <= pageSize <= 100:
return self.write(dict(code=400, msg='pageSize只能介于5和100之间。'))
if not 0 < pageNum:
return self.write(dict(code=400, msg='pageSize只能介于5和100之间。'))
with DBContext('r', const.DEFAULT_DB_KEY) as session:
data = session\
.query(AWSRiUsageReport)\
.filter(AWSRiUsageReport.date >= d_start)\
.filter(AWSRiUsageReport.date <= d_end)
if key is not None:
data = data.filter(AWSRiUsageReport.platform.like("%" + key + "%"))
data = data.all()
data_num = session.query(AWSRiDateDB).all()
usage_list = [model_to_dict(e) for e in data]
usage_date_numlist = [model_to_dict(e) for e in data_num]
for i in usage_date_numlist:
i["total_ri"] = str(decimal.Decimal(i["total_ri"]).quantize(decimal.Decimal('0.00000')))
total_ri_num = 0
rotal_running = 0
for usage in usage_list:
usage["total_running"] = int(decimal.Decimal(usage["total_running"]).quantize(decimal.Decimal('0.00000')))
usage["total_ri"] = int(decimal.Decimal(usage["total_ri"]).quantize(decimal.Decimal('0.00000')))
usage["coverage_rate"] = str(decimal.Decimal(usage["coverage_rate"]).quantize(decimal.Decimal('0.00000')))
usage["end"] = {}
total_ri_num += usage["total_ri"]
rotal_running += usage["total_running"]
for ri in usage_date_numlist:
for usage in usage_list:
if ri["family"] == usage["family"] and ri["size"] == usage["size"] and ri["platform"] == usage["platform"]:
if ri["end"] in usage["end"].keys():
usage["end"][ri["end"]] += ri["total_ri"]
else:
usage["end"].update({ri["end"]:ri["total_ri"]})
total = len(usage_list)
pageTotal = (total + pageSize if total % pageSize >= 0 else 0) // pageSize
pageNum = min([pageNum, pageTotal])
_pn = pageNum - 1
ec2_data = usage_list[_pn * pageSize: pageNum * pageSize + 1]
if export_csv == "1":
filename = "ri_report.csv"
data_dict = ec2_data
headers = [list(i.keys()) for i in data_dict][0]
rows = [list(i.values()) for i in data_dict]
with open(filename, "w", encoding="utf8", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(headers)
writer.writerows(rows)
self.set_header('Content-Type', 'application/octet-stream')
self.set_header('Content-Disposition', 'attachment; filename=' + filename)
buf_size = 4096
with open(filename, 'rb') as f:
while True:
data = f.read(buf_size)
if not data:
break
self.write(data)
self.finish()
else:
return self.write(dict(code=0,
msg='获取成功',
count=total,
pageTotal=pageTotal,
data=ec2_data))
class RiUsageHistoryHanlder(BaseHandler):
def get(self, *args, **kwargs):
end_day = datetime.now()
start_day = end_day - timedelta(days=365)
end_day = end_day.strftime("%Y-%m-%d")
start_day = start_day.strftime("%Y-%m-%d")
start_day = self.get_argument('key', default=start_day, strip=True)
end_day = self.get_argument('key', default=end_day, strip=True)
family = self.get_argument('family', default="c5", strip=True)
size = self.get_argument('size', default="large", strip=True)
platform = self.get_argument('platform', default="Linux", strip=True)
with DBContext('r', const.DEFAULT_DB_KEY) as session:
data = session \
.query(AWSRiUsageReport) \
.filter(AWSRiUsageReport.date >= start_day) \
.filter(AWSRiUsageReport.date < end_day) \
.filter(AWSRiUsageReport.family == family) \
.filter(AWSRiUsageReport.size == size) \
.filter(AWSRiUsageReport.platform == platform) \
.all()
histories = [model_to_dict(e) for e in data]
for history in histories:
history["total_running"] = str(decimal.Decimal(history["total_running"]).quantize(decimal.Decimal('0.00000')))
history["total_ri"] = str(decimal.Decimal(history["total_ri"]).quantize(decimal.Decimal('0.00000')))
history["coverage_rate"] = str(decimal.Decimal(history["coverage_rate"]).quantize(decimal.Decimal('0.00000')))
return self.write(dict(code=0, msg='获取成功', count=len(histories), data=histories))
class UsageAddRiByDayHanlder(BaseHandler):
def get(self, *args, **kwargs):
pass
def post(self,*args, **kwargs):
with DBContext('wr', const.DEFAULT_DB_KEY) as session:
new_db = AwsTaskQueue(
task_name="add_ri_usage",
date=datetime.now(),
status=0)
session.add(new_db)
session.commit()
return self.write(dict(code=0, msg='任务添加成功,后台执行添加ri_usage数据库', ))
class RiUsage30DayHanlder(BaseHandler):
def get(self, *args, **kwargs):
end_day = datetime.now()
start_day = end_day - timedelta(days=30)
end_day = end_day.strftime("%Y-%m-%d")
start_day = start_day.strftime("%Y-%m-%d")
with DBContext('r', const.DEFAULT_DB_KEY) as session:
data = session \
.query(AWSRiUsageReport) \
.filter(AWSRiUsageReport.date >= start_day) \
.filter(AWSRiUsageReport.date < end_day).all()
histories = [model_to_dict(e) for e in data]
for history in histories:
history["total_running"] = str(decimal.Decimal(history["total_running"]).quantize(decimal.Decimal('0.00000')))
history["total_ri"] = str(decimal.Decimal(history["total_ri"]).quantize(decimal.Decimal('0.00000')))
history["coverage_rate"] = str(decimal.Decimal(history["coverage_rate"]).quantize(decimal.Decimal('0.00000')))
date_list = []
begin_date = datetime.strptime(start_day, "%Y-%m-%d")
end_date = datetime.strptime(end_day, '%Y-%m-%d')
while begin_date < end_date:
date_str = begin_date
date_list.append(date_str)
begin_date += timedelta(days=1)
data_dict ={}
for i in histories:
name = i["family"]+"."+i["size"]+"."+i["platform"]
if name in data_dict.keys():
data_dict[name].update({i["date"]:i["coverage_rate"]})
else:
data_dict.update({name: {}})
for date in date_list:
data_dict[name].update({str(date):None})
data_dict[name].update({i["date"]: i["coverage_rate"]})
return self.write(dict(code=0, msg='获取成功', count=len(histories), data=data_dict))
aws_ri_usage_urls = [
(r"/v1/ri-usage/today/", RiUsageTodayHanlder),
(r"/v1/ri-usage/history/", RiUsageHistoryHanlder),
(r"/v1/ri-usage/add/byday/", UsageAddRiByDayHanlder),
(r"/v1/ri-usage/30day/", RiUsage30DayHanlder),
]
| 46.256983 | 123 | 0.577536 |
695def4b5e93f3e70a466eba8c6e6c61a37ebb9b | 5,386 | py | Python | socceraction/xpoints.py | maartensijmkens/socceraction | 357efe00b50ce071703005bf2a7dfd5bf60d765f | [
"MIT"
] | null | null | null | socceraction/xpoints.py | maartensijmkens/socceraction | 357efe00b50ce071703005bf2a7dfd5bf60d765f | [
"MIT"
] | null | null | null | socceraction/xpoints.py | maartensijmkens/socceraction | 357efe00b50ce071703005bf2a7dfd5bf60d765f | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from tqdm import tqdm
class ExpectedPoints:
def __init__(self, D: int = 2, T = 8):
self.D = D
self.xwin = np.full((T, D + 1), np.nan)
self.xdraw = np.full((T, D + 1), np.nan)
def fit(self, actions: pd.DataFrame, games: pd.DataFrame):
self.score_progression = self.get_score_progression(actions, games)
sp = self.score_progression
win_matrix = np.zeros_like(self.xwin)
draw_matrix = np.zeros_like(self.xdraw)
count_matrix = np.zeros_like(self.xwin)
for i, game in games.iterrows():
game_id = game['game_id']
home_score = game['home_score']
away_score = game['away_score']
diff = home_score - away_score
for t in range(8):
cur_score = sp[(sp.game_id == game_id) & (sp.apply(self.get_timeframe, axis=1) <= t)].tail(1)
cur_home_score, cur_away_score = \
cur_score.iloc[0][['home_score','away_score']]
cur_diff = int(cur_home_score - cur_away_score)
d = np.clip(abs(cur_diff), 0, self.D)
if diff * cur_diff > 0:
win_matrix[t, d] += 2
if diff == 0:
draw_matrix[t, d] += 2
if cur_diff == 0 and diff != 0:
win_matrix[t, d] += 1
count_matrix[t, d] += 2
np.divide(win_matrix, count_matrix, out = self.xwin, where = (count_matrix != 0))
np.divide(draw_matrix, count_matrix, out = self.xdraw, where = (count_matrix != 0))
self.fill_gaps(self.xwin)
self.fill_gaps(self.xdraw)
def fill_gaps(self, matrix):
# replace nans with closest value with the same score diff
for t,row in enumerate(matrix):
for s,cell in enumerate(row):
if np.isnan(cell):
d = np.argwhere(np.logical_not(np.isnan(matrix[:,s]))).flatten()
f = np.argmin(d-t)
matrix[t,s] = matrix[d[f],s]
def predict(self, t: int, diff: int):
""" Predict the expected points (xP) for the given score difference and time """
d = np.clip(abs(diff), 0, self.D)
a = 3 * self.xwin[t,d] + self.xdraw[t,d]
b = 3 * (1 - self.xwin[t,d] - self.xdraw[t,d]) + self.xdraw[t,d]
return np.where(diff >= 0, a, b)
def get_timeframe(self, action):
period_id = action['period_id']
time_seconds = action['time_seconds']
if period_id <= 2:
t = 3*(period_id-1) + np.clip(time_seconds // (15*60), 0, 2)
else:
t = 6 + (period_id-3)
return int(t)
def get_score_diff(self, action: pd.DataFrame):
sp = self.score_progression
game_id, team_id, period_id, time_seconds = \
action[['game_id', 'team_id', 'period_id', 'time_seconds']]
current_score = sp[
(sp.game_id == game_id) &
(((sp.period_id == period_id) & (sp.time_seconds <= time_seconds))
| (sp.period_id < period_id))
].tail(1)
home_team_id, away_team_id, home_score, away_score = \
current_score.iloc[0][['home_team_id','away_team_id','home_score','away_score']]
assert(team_id in [home_team_id, away_team_id])
diff = int(home_score - away_score)
if home_team_id == team_id:
return diff
elif away_team_id == team_id:
return -diff
def get_score_progression(self, actions: pd.DataFrame, games: pd.DataFrame):
score_progression = []
cols = ['game_id', 'home_team_id', 'away_team_id', 'period_id', 'time_seconds', 'home_score', 'away_score']
for game_id, game_actions in actions.groupby(['game_id']):
game = games[games.game_id == game_id]
home_team = game.iloc[0]['home_team_id']
away_team = game.iloc[0]['away_team_id']
home_score = game.iloc[0]['home_score']
away_score = game.iloc[0]['away_score']
shots = game_actions[
(game_actions.type_name == "shot")
| (game_actions.type_name == "shot_penalty")
| (game_actions.type_name == "shot_freekick")
]
goals = shots[(shots.result_name == "success") & (shots.period_id < 5)]
owngoals = game_actions[game_actions.result_name == "owngoal"]
allgoals = pd.concat([goals, owngoals]).sort_values(["period_id", "time_seconds"])
cur_score = [0,0]
teams = (home_team, away_team)
score_progression.append([game_id, home_team, away_team, 1, 0] + cur_score)
for _, goal in allgoals.iterrows():
if (goal['result_name'] == "success"):
t = teams.index(goal['team_id'])
if (goal['result_name'] == "owngoal"):
t = (teams.index(goal['team_id']) + 1) % 2
cur_score[t] += 1
score_progression.append([game_id, home_team, away_team, goal['period_id'], goal['time_seconds']] + cur_score)
assert (cur_score[0] == home_score and cur_score[1] == away_score)
return pd.DataFrame(score_progression, columns=cols).sort_values(["game_id", "period_id", "time_seconds"]) | 34.748387 | 126 | 0.557 |
a60b3dcd5864f0f9704b45b335b1a1971037604e | 2,079 | py | Python | src/grc_minify_db.py | Belethors-General-Mods/grc-temp-repo | 99f2e41e53ae2d2247446d2197950db4ecf7ed94 | [
"MIT"
] | null | null | null | src/grc_minify_db.py | Belethors-General-Mods/grc-temp-repo | 99f2e41e53ae2d2247446d2197950db4ecf7ed94 | [
"MIT"
] | null | null | null | src/grc_minify_db.py | Belethors-General-Mods/grc-temp-repo | 99f2e41e53ae2d2247446d2197950db4ecf7ed94 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Create a minified, stripped-down database copy."""
from argparse import ArgumentParser
from typing import Any, Dict, List
from grc_mod import Mod
import yaml
from zenlog import log
WANTED_KEYS = [
'deprecated',
'gems_category',
'modid',
'name',
'tags',
# oldrim and sse are special-cased
# 'oldrim',
# 'sse',
]
OLDRIM_WANTED_KEYS = [
'is_oldrim',
]
SSE_WANTED_KEYS = [
'console_compat',
'is_sse',
]
def main() -> None:
"""Load, strip, minify, and save the database."""
parser = ArgumentParser(description='Minify and strip the YAML database')
parser.add_argument('input', type=str)
parser.add_argument('output', type=str)
args = parser.parse_args()
input_file: str = args.input
output_file: str = args.output
log.info('Opening database...')
with open(input_file, 'r', encoding='utf8') as in_h:
raw_yaml = in_h.read()
log.info(f'Opened database (size: {len(raw_yaml.encode("utf8"))})')
log.info('Reading database...')
database: List[Dict[str, Any]] = yaml.safe_load(raw_yaml)
log.info('Stripping database...')
database_mini: List[Dict[str, Any]] = []
for mod in database:
omod: Dict[str, Any] = {}
omod = {key: value for key, value in mod.items() if key in WANTED_KEYS}
omod['oldrim'] = {key: value for key, value in mod['oldrim'].items()
if key in OLDRIM_WANTED_KEYS}
omod['sse'] = {key: value for key, value in mod['sse'].items()
if key in SSE_WANTED_KEYS}
database_mini.append(omod)
log.info('Converting databse back to YAML...')
yaml_str: str = yaml.dump(database_mini)
log.info(f'New size: {len(yaml_str.encode("utf8"))}')
log.info(f'Saving database to {output_file}')
with open(output_file, 'w', encoding='utf8') as out_h:
out_h.write(yaml_str)
log.info('Done')
if __name__ == '__main__':
try:
main()
except Exception as exc: # pylint: disable=W0703
log.critical(str(exc))
| 26.316456 | 79 | 0.623377 |
13e427439cd4619ef576a19dba283399675477b8 | 374 | py | Python | tests/test_utils.py | arrigonialberto86/mixture_nets | 9965ac9c7f378eb7d7e6277e609574344602152b | [
"MIT"
] | null | null | null | tests/test_utils.py | arrigonialberto86/mixture_nets | 9965ac9c7f378eb7d7e6277e609574344602152b | [
"MIT"
] | null | null | null | tests/test_utils.py | arrigonialberto86/mixture_nets | 9965ac9c7f378eb7d7e6277e609574344602152b | [
"MIT"
] | null | null | null | import unittest
from mixture_net.utils import slice_parameter_vectors
import numpy as np
class TestUtils(unittest.TestCase):
def test_slice_fn(self):
parameters = np.array([[1, 2, 3]])
components, no_parameters = 1, 3
alpha, mu, gamma = slice_parameter_vectors(parameters, components, no_parameters)
self.assertEqual(alpha.shape[0], 1)
| 31.166667 | 89 | 0.716578 |
415551877ea2112133af84216baa2237b1aca41e | 1,476 | py | Python | src/kgtests/src/cleaning/test_relation_vocabulary.py | HermannKroll/KGExtractionToolbox | c17a55dd1fa098f5033b7765ed0f80d3abb44cb7 | [
"MIT"
] | 6 | 2021-09-17T09:49:59.000Z | 2021-12-06T10:07:01.000Z | src/kgtests/src/cleaning/test_relation_vocabulary.py | HermannKroll/KGExtractionToolbox | c17a55dd1fa098f5033b7765ed0f80d3abb44cb7 | [
"MIT"
] | null | null | null | src/kgtests/src/cleaning/test_relation_vocabulary.py | HermannKroll/KGExtractionToolbox | c17a55dd1fa098f5033b7765ed0f80d3abb44cb7 | [
"MIT"
] | 1 | 2021-09-18T17:56:12.000Z | 2021-09-18T17:56:12.000Z | import unittest
from kgextractiontoolbox.cleaning.relation_vocabulary import RelationVocabulary
from kgtests import util
class RelationVocabularyTest(unittest.TestCase):
def setUp(self) -> None:
self.vocab = RelationVocabulary()
self.vocab.load_from_json(util.get_test_resource_filepath('cleaning/pharm_relation_vocab.json'))
def test_relation_count(self):
self.assertEqual(10, len(self.vocab.relation_dict))
def test_relation_treats(self):
treats_syn = self.vocab.get_relation_synonyms('treats')
self.assertEqual(8, len(treats_syn))
for s in ["prevent*", "use", "improv*", "promot*", "sensiti*", "aid", "treat*", "*therap*"]:
self.assertIn(s, treats_syn)
def test_relation_inhibits(self):
inhibits_syn = self.vocab.get_relation_synonyms('inhibits')
self.assertEqual(4, len(inhibits_syn))
for s in ["disrupt*", "suppres*", "inhibit*", "disturb*"]:
self.assertIn(s, inhibits_syn)
def test_broken_relation_vocab(self):
vocab2 = RelationVocabulary()
with self.assertRaises(ValueError):
vocab2.load_from_json(util.get_test_resource_filepath('cleaning/pharm_relation_vocab_broken.json'))
def test_broken_relation_vocab_2(self):
vocab2 = RelationVocabulary()
with self.assertRaises(ValueError):
vocab2.load_from_json(util.get_test_resource_filepath('cleaning/pharm_relation_vocab_broken_2.json'))
| 39.891892 | 113 | 0.70935 |
68c32c9c7cf6889e8e8720b72c7bf01d90ae4dce | 8,772 | py | Python | lingvodoc/utils/doc_parser.py | ankan2013/lingvodoc | 08eb70906104e703f5465538067775e6b1cafd0e | [
"Apache-2.0"
] | 5 | 2017-03-30T18:02:11.000Z | 2021-07-20T16:02:34.000Z | lingvodoc/utils/doc_parser.py | ankan2013/lingvodoc | 08eb70906104e703f5465538067775e6b1cafd0e | [
"Apache-2.0"
] | 15 | 2016-02-24T13:16:59.000Z | 2021-09-03T11:47:15.000Z | lingvodoc/utils/doc_parser.py | ankan2013/lingvodoc | 08eb70906104e703f5465538067775e6b1cafd0e | [
"Apache-2.0"
] | 22 | 2015-09-25T07:13:40.000Z | 2021-08-04T18:08:26.000Z | import json
import re
from uniparser_erzya import ErzyaAnalyzer
from uniparser_meadow_mari import MeadowMariAnalyzer
from uniparser_udmurt import UdmurtAnalyzer
from uniparser_moksha import MokshaAnalyzer
from uniparser_komi_zyrian import KomiZyrianAnalyzer
from nltk.tokenize import RegexpTokenizer
import csv
import os
import tempfile
import bs4
import requests
import io
def print_to_str(*args, **kwargs):
output = io.StringIO()
print(*args, file=output, **kwargs)
contents = output.getvalue()
output.close()
return contents
span_id_counter = 0
def generate_html_wrap(word, ana_tag_list):
json_list = list()
for ana_tag in ana_tag_list:
attr_dict = dict()
is_unparsed = True
for attr in ana_tag.attrs:
attr_dict[attr] = ana_tag[attr]
if (ana_tag[attr]):
is_unparsed = False
if (is_unparsed):
return word
json_list.append(attr_dict)
global span_id_counter
span_id_counter += 1
wrap = "<span class=\"unverified\"" + " id=" + str(span_id_counter) + ">"
for attr_json in json_list:
span_id_counter += 1
encoded_attrs = ((json.dumps(attr_json, ensure_ascii=False)).encode('utf8')).decode()
wrap += "<span class=\"result\"" + " id=" + str(span_id_counter) + ">" + encoded_attrs + "</span>"
wrap += word + "</span>"
return wrap
def insert_parser_output_to_text(text, parser_output):
soup = bs4.BeautifulSoup(parser_output, 'html.parser')
w_tag_list = soup("w")
search_start_index = 0
result_list = []
for w_tag in w_tag_list:
word = w_tag.contents[-1]
match_index = text.find(word, search_start_index)
result_list.append(text[search_start_index:match_index])
if (len(w_tag.contents) > 1):
result_list.append(generate_html_wrap(word, w_tag.contents[0:-1]))
search_start_index = match_index + len(word)
result_list.append(text[search_start_index:])
result = "".join(result_list)
return result
def timarkh_uniparser(dedoc_output, lang):
wordlist = list()
tokenizer = RegexpTokenizer(r'(?!\w+(?:-\w+)+)\w+|\w+(?:-\w+)+')
for word in tokenizer.tokenize(re.sub(r"(<.*?>)|nbsp", " ", dedoc_output)):
wordlist.append(word)
if lang == 'udm':
analyzer = UdmurtAnalyzer(mode='strict')
if lang == 'erzya':
analyzer = ErzyaAnalyzer(mode='strict')
if lang == 'moksha':
analyzer = MokshaAnalyzer(mode='strict')
if lang == 'komi_zyryan':
analyzer = KomiZyrianAnalyzer(mode='strict')
if lang == 'meadow_mari':
analyzer = MeadowMariAnalyzer(mode='strict')
parser_output = analyzer.analyze_words(wordlist, format="xml")
parser_output_str = print_to_str(parser_output)
return insert_parser_output_to_text(dedoc_output, parser_output_str)
def apertium_parser(dedoc_output, apertium_path, lang):
def reformat(biltrans_filename, morph_filename):
skip_list = ["guio", "cm", "sent", "lpar", "rpar", "lquot", "rquot"]
biltrans_file = open(biltrans_filename, "r", encoding="UTF-8")
morph_file = open(morph_filename, "r", encoding="UTF-8")
biltrans = biltrans_file.read()
morph = morph_file.read()
biltrans_elements = re.findall(r"\^(.+?)\$", biltrans)
morph_elements = re.findall(r"\^(.+?)\$", morph)
parsed = ""
def is_conform(lex_1, lex_2):
if lex_1.lower().find(lex_2.lower()) == -1 and lex_2.lower().find(lex_1.lower()) == -1:
return False
else:
return True
def biltrans_lex(i):
return biltrans_elements[i][0: biltrans_elements[i].find("<")].lower()
def gr(elem):
gr_list = list()
for match in re.findall(r"<(\w+)>", elem.split("/")[0]):
gr_list.append(match)
return gr_list
def add_gr(gr_list):
to_add = " gr=\""
gr_list = gr(biltrans_elements[i])
if gr_list:
k = 0
N = len(gr_list)
for gr_elem in gr_list:
if k == 0:
to_add += gr_elem.upper()
if N > 1:
to_add += ","
elif k < N - 1:
to_add += gr_elem + ","
else:
to_add += gr_elem
k += 1
to_add += "\""
return to_add
def trans(elem):
trans_list = list()
for match in re.findall(r"/(\w+)<", biltrans_elements[i]):
if match not in trans_list:
trans_list.append(match)
return trans_list
def add_variant(lex, gr, trans, parts = " parts=\"\"", gloss = " gloss=\"\""):
return "<ana " + lex + gr + parts + gloss + trans + " ></ana>"
i = -1
for morph_element in morph_elements:
i += 1
if len(morph_element) == 0:
continue
new = ""
new += "<w>"
if morph_element.find("*") != -1:
new += add_variant(" lex=\"\"", " gr=\"\"", " trans_ru=\"\"")
continue
continue_flag = False
for skip_elem in skip_list:
if morph_element.find(skip_elem) != -1:
continue_flag = True
break
if continue_flag:
continue
morph_slash_split_list = morph_element.split("/")
orig = morph_slash_split_list[0]
lex = morph_slash_split_list[1][:morph_slash_split_list[1].find("<")]
offset = 0
match_found = True
while not is_conform(lex, biltrans_lex(i+offset)):
if offset > 2:
match_found = False
break
offset += 1
if not match_found:
continue
i += offset
new_lex = "lex=" + "\"" + biltrans_lex(i).lower() + "\""
new_gr = add_gr(gr(biltrans_elements[i]))
trans_list = trans(biltrans_elements[i])
if not trans_list and new_gr == " gr=\"\"":
new += add_variant(" lex=\"\"", " gr=\"\"", " trans_ru=\"\"")
continue
if trans_list:
for trans_elem in trans_list:
new += add_variant(new_lex, new_gr, " trans_ru=\"" + trans_elem.lower() + "\"")
else:
new += add_variant(new_lex, new_gr, " trans_ru=\"\"")
new += orig
new += "</w>"
new += "\n"
parsed += new
biltrans_file.close()
morph_file.close()
return parsed
biltrans_file_id, biltrans_filename = tempfile.mkstemp()
morph_file_id, morph_filename = tempfile.mkstemp()
dedoc_output_without_tags = re.sub(r"(<.*?>)|nbsp", " ", dedoc_output)
s1 = 0
s2 = 0
if lang == 'tat':
s1 = os.system("echo \"" + dedoc_output_without_tags + "\" | apertium -d " + apertium_path + "/apertium-tat-rus tat-rus-biltrans >> " + biltrans_filename)
s2 = os.system("echo \"" + dedoc_output_without_tags + "\" | apertium -d " + apertium_path + "/apertium-tat-rus tat-rus-morph >> " + morph_filename)
if lang == 'kaz':
s1 = os.system("echo \"" + dedoc_output_without_tags + "\" | apertium -d " + apertium_path + "/apertium-kaz-rus kaz-rus-biltrans >> " + biltrans_filename)
s2 = os.system("echo \"" + dedoc_output_without_tags + "\" | apertium -d " + apertium_path + "/apertium-kaz-rus kaz-rus-morph >> " + morph_filename)
if s1 != 0 or s2 != 0:
raise ValueError("An error occured during Apertium parser process running")
parser_output = reformat(biltrans_filename, morph_filename)
os.remove(biltrans_filename)
os.remove(morph_filename)
return insert_parser_output_to_text(dedoc_output, parser_output)
def timarkh_udm(dedoc_output):
return timarkh_uniparser(dedoc_output, 'udm', disambiguate=True)
def timarkh_erzya(dedoc_output):
return timarkh_uniparser(dedoc_output, 'erzya')
def timarkh_moksha(dedoc_output):
return timarkh_uniparser(dedoc_output, 'moksha')
def timarkh_komi_zyryan(dedoc_output):
return timarkh_uniparser(dedoc_output, 'komi_zyryan')
def timarkh_meadow_mari(dedoc_output):
return timarkh_uniparser(dedoc_output, 'meadow_mari', disambiguate=True)
def apertium_tat_rus(dedoc_output, apertium_path):
return apertium_parser(dedoc_output, apertium_path, 'tat')
def apertium_kaz_rus(dedoc_output, apertium_path):
return apertium_parser(dedoc_output, apertium_path, 'kaz')
| 33.738462 | 163 | 0.582535 |
324feda40b79846cbdf9f9fee67aa79794e2a32c | 1,484 | py | Python | config/urls.py | halitcelik/usedtobeforest.com | fb529675a0f164bc9b81e6796fa02aca10cd7b8a | [
"MIT"
] | 2 | 2021-08-02T20:57:31.000Z | 2021-08-03T09:12:52.000Z | config/urls.py | halitcelik/usedtobeforest.com | fb529675a0f164bc9b81e6796fa02aca10cd7b8a | [
"MIT"
] | 1 | 2022-03-31T07:20:09.000Z | 2022-03-31T07:20:09.000Z | config/urls.py | halitcelik/usedtobeforest | fb529675a0f164bc9b81e6796fa02aca10cd7b8a | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views import defaults as default_views
urlpatterns = [
path("", include("usedtobeforest.main.urls")),
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path("users/", include("usedtobeforest.users.urls", namespace="users")),
path("accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| 34.511628 | 85 | 0.641509 |
7ffcdb2e32073688426a34bc699c2cfc3eb603a3 | 12,550 | py | Python | trove-11.0.0/trove/guestagent/strategies/replication/experimental/postgresql_impl.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 1 | 2020-04-08T07:42:19.000Z | 2020-04-08T07:42:19.000Z | trove/guestagent/strategies/replication/experimental/postgresql_impl.py | ttcong/trove | 1db2dc63fdd5409eafccebe79ff2900d0535ed13 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | trove/guestagent/strategies/replication/experimental/postgresql_impl.py | ttcong/trove | 1db2dc63fdd5409eafccebe79ff2900d0535ed13 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright 2014 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
from oslo_log import log as logging
from oslo_utils import netutils
from trove.common import cfg
from trove.common.db.postgresql import models
from trove.common import exception
from trove.common.i18n import _
from trove.common import stream_codecs
from trove.common import utils
from trove.guestagent.backup.backupagent import BackupAgent
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.strategies import backup
from trove.guestagent.strategies.replication import base
AGENT = BackupAgent()
CONF = cfg.CONF
REPL_BACKUP_NAMESPACE = 'trove.guestagent.strategies.backup.experimental' \
'.postgresql_impl'
LOG = logging.getLogger(__name__)
TRIGGER_FILE = '/tmp/postgresql.trigger'
REPL_USER = 'replicator'
SLAVE_STANDBY_OVERRIDE = 'SlaveStandbyOverride'
class PostgresqlReplicationStreaming(base.Replication):
def __init__(self, *args, **kwargs):
super(PostgresqlReplicationStreaming, self).__init__(*args, **kwargs)
@property
def repl_backup_runner(self):
return backup.get_backup_strategy('PgBaseBackup',
REPL_BACKUP_NAMESPACE)
@property
def repl_incr_backup_runner(self):
return backup.get_backup_strategy('PgBaseBackupIncremental',
REPL_BACKUP_NAMESPACE)
@property
def repl_backup_extra_opts(self):
return CONF.backup_runner_options.get('PgBaseBackup', '')
def get_master_ref(self, service, snapshot_info):
master_ref = {
'host': netutils.get_my_ipv4(),
'port': cfg.get_configuration_property('postgresql_port')
}
return master_ref
def backup_required_for_replication(self):
return True
def snapshot_for_replication(self, context, service,
location, snapshot_info):
snapshot_id = snapshot_info['id']
replica_number = snapshot_info.get('replica_number', 1)
LOG.debug("Acquiring backup for replica number %d.", replica_number)
# Only create a backup if it's the first replica
if replica_number == 1:
AGENT.execute_backup(
context, snapshot_info, runner=self.repl_backup_runner,
extra_opts=self.repl_backup_extra_opts,
incremental_runner=self.repl_incr_backup_runner)
else:
LOG.info("Using existing backup created for previous replica.")
repl_user_info = self._get_or_create_replication_user(service)
log_position = {
'replication_user': repl_user_info
}
return snapshot_id, log_position
def _get_or_create_replication_user(self, service):
"""There are three scenarios we need to deal with here:
- This is a fresh master, with no replicator user created.
Generate a new u/p
- We are attaching a new slave and need to give it the login creds
Send the creds we have stored in PGDATA/.replpass
- This is a failed-over-to slave, who will have the replicator user
but not the credentials file. Recreate the repl user in this case
"""
LOG.debug("Checking for replicator user")
pwfile = os.path.join(service.pgsql_data_dir, ".replpass")
admin = service.build_admin()
if admin.user_exists(REPL_USER):
if operating_system.exists(pwfile, as_root=True):
LOG.debug("Found existing .replpass, returning pw")
pw = operating_system.read_file(pwfile, as_root=True)
else:
LOG.debug("Found user but not .replpass, recreate")
u = models.PostgreSQLUser(REPL_USER)
admin._drop_user(context=None, user=u)
pw = self._create_replication_user(service, admin, pwfile)
else:
LOG.debug("Found no replicator user, create one")
pw = self._create_replication_user(service, admin, pwfile)
repl_user_info = {
'name': REPL_USER,
'password': pw
}
return repl_user_info
def _create_replication_user(self, service, admin, pwfile):
"""Create the replication user. Unfortunately, to be able to
run pg_rewind, we need SUPERUSER, not just REPLICATION privilege
"""
pw = utils.generate_random_password()
operating_system.write_file(pwfile, pw, as_root=True)
operating_system.chown(pwfile, user=service.pgsql_owner,
group=service.pgsql_owner, as_root=True)
operating_system.chmod(pwfile, FileMode.SET_USR_RWX(),
as_root=True)
repl_user = models.PostgreSQLUser(name=REPL_USER, password=pw)
admin._create_user(context=None, user=repl_user)
admin.alter_user(None, repl_user, True,
'REPLICATION', 'SUPERUSER', 'LOGIN')
return pw
def enable_as_master(self, service, master_config, for_failover=False):
"""For a server to be a master in postgres, we need to enable
the replication user in pg_hba and ensure that WAL logging is
at the appropriate level (use the same settings as backups)
"""
LOG.debug("Enabling as master, with cfg: %s ", master_config)
self._get_or_create_replication_user(service)
hba_entry = "host replication replicator 0.0.0.0/0 md5 \n"
tmp_hba = '/tmp/pg_hba'
operating_system.copy(service.pgsql_hba_config, tmp_hba,
force=True, as_root=True)
operating_system.chmod(tmp_hba, FileMode.SET_ALL_RWX(),
as_root=True)
with open(tmp_hba, 'a+') as hba_file:
hba_file.write(hba_entry)
operating_system.copy(tmp_hba, service.pgsql_hba_config,
force=True, as_root=True)
operating_system.chmod(service.pgsql_hba_config,
FileMode.SET_USR_RWX(),
as_root=True)
operating_system.remove(tmp_hba, as_root=True)
service.reload_configuration()
def enable_as_slave(self, service, snapshot, slave_config):
"""Adds appropriate config options to postgresql.conf, and writes out
the recovery.conf file used to set up replication
"""
LOG.debug("Got slave_config: %s", str(slave_config))
self._write_standby_recovery_file(service, snapshot, sslmode='prefer')
self.enable_hot_standby(service)
# Ensure the WAL arch is empty before restoring
service.recreate_wal_archive_dir()
def detach_slave(self, service, for_failover):
"""Touch trigger file in to disable recovery mode"""
LOG.info("Detaching slave, use trigger to disable recovery mode")
operating_system.write_file(TRIGGER_FILE, '')
operating_system.chown(TRIGGER_FILE, user=service.pgsql_owner,
group=service.pgsql_owner, as_root=True)
def _wait_for_failover():
"""Wait until slave has switched out of recovery mode"""
return not service.pg_is_in_recovery()
try:
utils.poll_until(_wait_for_failover, time_out=120)
except exception.PollTimeOut:
raise RuntimeError(_("Timeout occurred waiting for slave to exit "
"recovery mode"))
def cleanup_source_on_replica_detach(self, admin_service, replica_info):
pass
def _rewind_against_master(self, service):
"""Call pg_rewind to resync datadir against state of new master
We should already have a recovery.conf file in PGDATA
"""
rconf = operating_system.read_file(
service.pgsql_recovery_config,
codec=stream_codecs.KeyValueCodec(line_terminator='\n'),
as_root=True)
conninfo = rconf['primary_conninfo'].strip()
# The recovery.conf file we want should already be there, but pg_rewind
# will delete it, so copy it out first
rec = service.pgsql_recovery_config
tmprec = "/tmp/recovery.conf.bak"
operating_system.move(rec, tmprec, as_root=True)
cmd_full = " ".join(["pg_rewind",
'--target-pgdata=' + service.pgsql_data_dir,
'--source-server=' + conninfo])
out, err = utils.execute("sudo", "su", "-", service.pgsql_owner,
"-c", "%s" % cmd_full, check_exit_code=0)
LOG.debug("Got stdout %(out)s and stderr %(err)s from pg_rewind",
{'out': str(out), 'err': str(err)})
operating_system.move(tmprec, rec, as_root=True)
def demote_master(self, service):
"""In order to demote a master we need to shutdown the server and call
pg_rewind against the new master to enable a proper timeline
switch.
"""
service.stop_db()
self._rewind_against_master(service)
service.start_db()
def connect_to_master(self, service, snapshot):
"""All that is required in postgresql to connect to a slave is to
restart with a recovery.conf file in the data dir, which contains
the connection information for the master.
"""
assert operating_system.exists(service.pgsql_recovery_config,
as_root=True)
service.restart()
def _remove_recovery_file(self, service):
operating_system.remove(service.pgsql_recovery_config, as_root=True)
def _write_standby_recovery_file(self, service, snapshot,
sslmode='prefer'):
LOG.info("Snapshot data received: %s", str(snapshot))
logging_config = snapshot['log_position']
conninfo_params = \
{'host': snapshot['master']['host'],
'port': snapshot['master']['port'],
'repl_user': logging_config['replication_user']['name'],
'password': logging_config['replication_user']['password'],
'sslmode': sslmode}
conninfo = 'host=%(host)s ' \
'port=%(port)s ' \
'dbname=os_admin ' \
'user=%(repl_user)s ' \
'password=%(password)s ' \
'sslmode=%(sslmode)s ' % conninfo_params
recovery_conf = "standby_mode = 'on'\n"
recovery_conf += "primary_conninfo = '" + conninfo + "'\n"
recovery_conf += "trigger_file = '/tmp/postgresql.trigger'\n"
recovery_conf += "recovery_target_timeline='latest'\n"
operating_system.write_file(service.pgsql_recovery_config,
recovery_conf,
codec=stream_codecs.IdentityCodec(),
as_root=True)
operating_system.chown(service.pgsql_recovery_config,
user=service.pgsql_owner,
group=service.pgsql_owner, as_root=True)
def enable_hot_standby(self, service):
# Only support pg version > 9.6, wal_level set to replica, and
# remove parameter "checkpoint_segments".
opts = {'hot_standby': 'on',
'wal_level': 'replica',
'wal_log_hints': 'on'}
service.configuration_manager.\
apply_system_override(opts, SLAVE_STANDBY_OVERRIDE)
def get_replica_context(self, service):
LOG.debug("Calling get_replica_context")
repl_user_info = self._get_or_create_replication_user(service)
log_position = {
'replication_user': repl_user_info
}
return {
'master': self.get_master_ref(None, None),
'log_position': log_position
}
| 40.879479 | 79 | 0.630199 |
2f20084d8a5f01b674cfbcf6cd88ca1b7742b74e | 760 | py | Python | voice.py | akshay6820/DeepWay | 1497473f246fb92c164d42d1112dbca860a7629a | [
"MIT"
] | 140 | 2018-06-23T18:14:07.000Z | 2022-02-21T16:56:03.000Z | voice.py | akshay6820/DeepWay | 1497473f246fb92c164d42d1112dbca860a7629a | [
"MIT"
] | 1 | 2022-03-18T13:40:36.000Z | 2022-03-18T13:40:36.000Z | voice.py | akshay6820/DeepWay | 1497473f246fb92c164d42d1112dbca860a7629a | [
"MIT"
] | 42 | 2018-06-25T22:18:29.000Z | 2021-12-11T08:05:10.000Z | from pygame import mixer
class Voice:
def right(self):
mixer.init()
mixer.music.load('right.mp3')
mixer.music.play()
def left(self):
mixer.init()
mixer.music.load('left.mp3')
mixer.music.play()
def stop_left(self):
mixer.init()
mixer.music.load('stop_left.mp3')
mixer.music.play()
def stop_right(self):
mixer.init()
mixer.music.load('stop_right.mp3')
mixer.music.play()
def peopleOnRight(self):
mixer.init()
mixer.music.load('face_right.mp3')
mixer.music.play()
def peopleOnLeft(self):
mixer.init()
mixer.music.load('face_left.mp3')
mixer.music.play()
| 22.352941 | 43 | 0.543421 |
08915f2075e2aea475135409f90dd601f370c186 | 614 | py | Python | test_sample.py | chrislopez28/kebabs | 15085141ca5764c6ee00bbca5f404383cfdf2f66 | [
"MIT"
] | null | null | null | test_sample.py | chrislopez28/kebabs | 15085141ca5764c6ee00bbca5f404383cfdf2f66 | [
"MIT"
] | null | null | null | test_sample.py | chrislopez28/kebabs | 15085141ca5764c6ee00bbca5f404383cfdf2f66 | [
"MIT"
] | null | null | null | import pytest
from .kebabs import kebab_case
def test_camelcase():
assert kebab_case("camelCase") == "camel-case"
def test_sentence():
assert kebab_case(
"Why, sometimes I've believed as many as six impossible things before breakfast.") == "why-sometimes-ive-believed-as-many-as-six-impossible-things-before-breakfast"
def test_snakecase():
assert kebab_case("snake_case") == "snake-case"
def test_pascalcase():
assert kebab_case("PascalCase") == "pascal-case"
def test_spaces():
assert kebab_case(
"Spaces in the middle") == "spaces-in-the-middle"
| 24.56 | 172 | 0.685668 |
b4718e14113ec0b70192ae9463fe34a7f1325e72 | 2,730 | gyp | Python | node_modules/lmdb-store/binding.gyp | nfaltir/project-website | b2518eb8736501583c5f0210b6927eae317e7362 | [
"MIT"
] | null | null | null | node_modules/lmdb-store/binding.gyp | nfaltir/project-website | b2518eb8736501583c5f0210b6927eae317e7362 | [
"MIT"
] | null | null | null | node_modules/lmdb-store/binding.gyp | nfaltir/project-website | b2518eb8736501583c5f0210b6927eae317e7362 | [
"MIT"
] | 1 | 2021-11-01T00:57:56.000Z | 2021-11-01T00:57:56.000Z | {
"variables": {
"os_linux_compiler%": "gcc",
"use_robust%": "false",
"use_data_v1%": "false",
"enable_fast_api_calls%": "false",
"enable_pointer_compression%": "false",
"target%": "",
"build_v8_with_gn": "false"
},
"targets": [
{
"target_name": "lmdb-store",
"win_delay_load_hook": "false",
"sources": [
"dependencies/lmdb/libraries/liblmdb/midl.c",
"dependencies/lmdb/libraries/liblmdb/chacha8.c",
"dependencies/lz4/lib/lz4.h",
"dependencies/lz4/lib/lz4.c",
"src/node-lmdb.cpp",
"src/env.cpp",
"src/compression.cpp",
"src/ordered-binary.cpp",
"src/misc.cpp",
"src/txn.cpp",
"src/dbi.cpp",
"src/cursor.cpp",
"src/windows.c"
],
"include_dirs": [
"<!(node -e \"require('nan')\")",
"dependencies/lz4/lib"
],
"defines": ["MDB_MAXKEYSIZE=1978"],
"conditions": [
["OS=='linux'", {
"variables": {
"gcc_version" : "<!(<(os_linux_compiler) -dumpversion | cut -d '.' -f 1)",
},
"cflags_cc": [
"-fPIC",
"-Wno-strict-aliasing",
"-Wno-unused-result",
"-Wno-cast-function-type",
"-fvisibility=hidden",
"-fvisibility-inlines-hidden",
],
"conditions": [
["gcc_version>=7", {
"cflags": [
"-Wimplicit-fallthrough=2",
],
}],
],
"ldflags": [
"-fPIC",
"-fvisibility=hidden"
],
"cflags": [
"-fPIC",
"-fvisibility=hidden",
"-O3"
],
}],
["OS=='win'", {
"libraries": ["ntdll.lib"]
}],
["use_data_v1=='true'", {
"sources": [
"dependencies/lmdb-data-v1/libraries/liblmdb/mdb.c"
],
"include_dirs": [
"dependencies/lmdb-data-v1/libraries/liblmdb",
],
}, {
"sources": [
"dependencies/lmdb/libraries/liblmdb/mdb.c"
],
"include_dirs": [
"dependencies/lmdb/libraries/liblmdb",
],
}],
["enable_pointer_compression=='true'", {
"defines": ["V8_COMPRESS_POINTERS", "V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE"],
}],
["enable_fast_api_calls=='true'", {
"defines": ["ENABLE_FAST_API=1"],
}],
["use_robust=='true'", {
"defines": ["MDB_USE_ROBUST"],
}],
],
}
]
}
| 28.4375 | 87 | 0.4337 |
3788236965d12f541c7f365043039f574b4ca93c | 14,346 | py | Python | backend/venv/Lib/site-packages/git/diff.py | analurandis/Tur | b4b5d1230d70659be0c3f477f0baea68fc46ba39 | [
"MIT"
] | null | null | null | backend/venv/Lib/site-packages/git/diff.py | analurandis/Tur | b4b5d1230d70659be0c3f477f0baea68fc46ba39 | [
"MIT"
] | null | null | null | backend/venv/Lib/site-packages/git/diff.py | analurandis/Tur | b4b5d1230d70659be0c3f477f0baea68fc46ba39 | [
"MIT"
] | null | null | null | # diff.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import re
from gitdb.util import hex_to_bin
from .objects.blob import Blob
from .objects.util import mode_str_to_int
from git.compat import (
defenc,
PY3
)
__all__ = ('Diffable', 'DiffIndex', 'Diff')
class Diffable(object):
"""Common interface for all object that can be diffed against another object of compatible type.
:note:
Subclasses require a repo member as it is the case for Object instances, for practical
reasons we do not derive from Object."""
__slots__ = tuple()
# standin indicating you want to diff against the index
class Index(object):
pass
def _process_diff_args(self, args):
"""
:return:
possibly altered version of the given args list.
Method is called right before git command execution.
Subclasses can use it to alter the behaviour of the superclass"""
return args
def diff(self, other=Index, paths=None, create_patch=False, **kwargs):
"""Creates diffs between two items being trees, trees and index or an
index and the working tree. It will detect renames automatically.
:param other:
Is the item to compare us with.
If None, we will be compared to the working tree.
If Treeish, it will be compared against the respective tree
If Index ( type ), it will be compared against the index.
It defaults to Index to assure the method will not by-default fail
on bare repositories.
:param paths:
is a list of paths or a single path to limit the diff to.
It will only include at least one of the givne path or paths.
:param create_patch:
If True, the returned Diff contains a detailed patch that if applied
makes the self to other. Patches are somwhat costly as blobs have to be read
and diffed.
:param kwargs:
Additional arguments passed to git-diff, such as
R=True to swap both sides of the diff.
:return: git.DiffIndex
:note:
On a bare repository, 'other' needs to be provided as Index or as
as Tree/Commit, or a git command error will occour"""
args = list()
args.append("--abbrev=40") # we need full shas
args.append("--full-index") # get full index paths, not only filenames
args.append("-M") # check for renames, in both formats
if create_patch:
args.append("-p")
else:
args.append("--raw")
# in any way, assure we don't see colored output,
# fixes https://github.com/gitpython-developers/GitPython/issues/172
args.append('--no-color')
if paths is not None and not isinstance(paths, (tuple, list)):
paths = [paths]
if other is not None and other is not self.Index:
args.insert(0, other)
if other is self.Index:
args.insert(0, "--cached")
args.insert(0, self)
# paths is list here or None
if paths:
args.append("--")
args.extend(paths)
# END paths handling
kwargs['as_process'] = True
proc = self.repo.git.diff(*self._process_diff_args(args), **kwargs)
diff_method = Diff._index_from_raw_format
if create_patch:
diff_method = Diff._index_from_patch_format
index = diff_method(self.repo, proc.stdout)
proc.wait()
return index
class DiffIndex(list):
"""Implements an Index for diffs, allowing a list of Diffs to be queried by
the diff properties.
The class improves the diff handling convenience"""
# change type invariant identifying possible ways a blob can have changed
# A = Added
# D = Deleted
# R = Renamed
# M = modified
change_type = ("A", "D", "R", "M")
def iter_change_type(self, change_type):
"""
:return:
iterator yieling Diff instances that match the given change_type
:param change_type:
Member of DiffIndex.change_type, namely:
* 'A' for added paths
* 'D' for deleted paths
* 'R' for renamed paths
* 'M' for paths with modified data"""
if change_type not in self.change_type:
raise ValueError("Invalid change type: %s" % change_type)
for diff in self:
if change_type == "A" and diff.new_file:
yield diff
elif change_type == "D" and diff.deleted_file:
yield diff
elif change_type == "R" and diff.renamed:
yield diff
elif change_type == "M" and diff.a_blob and diff.b_blob and diff.a_blob != diff.b_blob:
yield diff
# END for each diff
class Diff(object):
"""A Diff contains diff information between two Trees.
It contains two sides a and b of the diff, members are prefixed with
"a" and "b" respectively to inidcate that.
Diffs keep information about the changed blob objects, the file mode, renames,
deletions and new files.
There are a few cases where None has to be expected as member variable value:
``New File``::
a_mode is None
a_blob is None
``Deleted File``::
b_mode is None
b_blob is None
``Working Tree Blobs``
When comparing to working trees, the working tree blob will have a null hexsha
as a corresponding object does not yet exist. The mode will be null as well.
But the path will be available though.
If it is listed in a diff the working tree version of the file must
be different to the version in the index or tree, and hence has been modified."""
# precompiled regex
re_header = re.compile(r"""
^diff[ ]--git
[ ](?:a/)?(?P<a_path>.+?)[ ](?:b/)?(?P<b_path>.+?)\n
(?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n
^rename[ ]from[ ](?P<rename_from>\S+)\n
^rename[ ]to[ ](?P<rename_to>\S+)(?:\n|$))?
(?:^old[ ]mode[ ](?P<old_mode>\d+)\n
^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
(?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
(?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
(?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
\.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
""".encode('ascii'), re.VERBOSE | re.MULTILINE)
# can be used for comparisons
NULL_HEX_SHA = "0" * 40
NULL_BIN_SHA = b"\0" * 20
__slots__ = ("a_blob", "b_blob", "a_mode", "b_mode", "new_file", "deleted_file",
"rename_from", "rename_to", "diff")
def __init__(self, repo, a_path, b_path, a_blob_id, b_blob_id, a_mode,
b_mode, new_file, deleted_file, rename_from,
rename_to, diff):
self.a_mode = a_mode
self.b_mode = b_mode
if self.a_mode:
self.a_mode = mode_str_to_int(self.a_mode)
if self.b_mode:
self.b_mode = mode_str_to_int(self.b_mode)
if a_blob_id is None:
self.a_blob = None
else:
assert self.a_mode
self.a_blob = Blob(repo, hex_to_bin(a_blob_id), mode=self.a_mode, path=a_path)
if b_blob_id is None:
self.b_blob = None
else:
assert self.b_mode
self.b_blob = Blob(repo, hex_to_bin(b_blob_id), mode=self.b_mode, path=b_path)
self.new_file = new_file
self.deleted_file = deleted_file
# be clear and use None instead of empty strings
self.rename_from = rename_from or None
self.rename_to = rename_to or None
self.diff = diff
def __eq__(self, other):
for name in self.__slots__:
if getattr(self, name) != getattr(other, name):
return False
# END for each name
return True
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(tuple(getattr(self, n) for n in self.__slots__))
def __str__(self):
h = "%s"
if self.a_blob:
h %= self.a_blob.path
elif self.b_blob:
h %= self.b_blob.path
msg = ''
l = None # temp line
ll = 0 # line length
for b, n in zip((self.a_blob, self.b_blob), ('lhs', 'rhs')):
if b:
l = "\n%s: %o | %s" % (n, b.mode, b.hexsha)
else:
l = "\n%s: None" % n
# END if blob is not None
ll = max(len(l), ll)
msg += l
# END for each blob
# add headline
h += '\n' + '=' * ll
if self.deleted_file:
msg += '\nfile deleted in rhs'
if self.new_file:
msg += '\nfile added in rhs'
if self.rename_from:
msg += '\nfile renamed from %r' % self.rename_from
if self.rename_to:
msg += '\nfile renamed to %r' % self.rename_to
if self.diff:
msg += '\n---'
try:
msg += self.diff.decode(defenc)
except UnicodeDecodeError:
msg += 'OMITTED BINARY DATA'
# end handle encoding
msg += '\n---'
# END diff info
# Python2 sillyness: have to assure we convert our likely to be unicode object to a string with the
# right encoding. Otherwise it tries to convert it using ascii, which may fail ungracefully
res = h + msg
if not PY3:
res = res.encode(defenc)
# end
return res
@property
def renamed(self):
""":returns: True if the blob of our diff has been renamed"""
return self.rename_from != self.rename_to
@classmethod
def _index_from_patch_format(cls, repo, stream):
"""Create a new DiffIndex from the given text which must be in patch format
:param repo: is the repository we are operating on - it is required
:param stream: result of 'git diff' as a stream (supporting file protocol)
:return: git.DiffIndex """
# for now, we have to bake the stream
text = stream.read()
index = DiffIndex()
previous_header = None
for header in cls.re_header.finditer(text):
a_path, b_path, similarity_index, rename_from, rename_to, \
old_mode, new_mode, new_file_mode, deleted_file_mode, \
a_blob_id, b_blob_id, b_mode = header.groups()
new_file, deleted_file = bool(new_file_mode), bool(deleted_file_mode)
# Our only means to find the actual text is to see what has not been matched by our regex,
# and then retro-actively assin it to our index
if previous_header is not None:
index[-1].diff = text[previous_header.end():header.start()]
# end assign actual diff
# Make sure the mode is set if the path is set. Otherwise the resulting blob is invalid
# We just use the one mode we should have parsed
a_mode = old_mode or deleted_file_mode or (a_path and (b_mode or new_mode or new_file_mode))
b_mode = b_mode or new_mode or new_file_mode or (b_path and a_mode)
index.append(Diff(repo,
a_path and a_path.decode(defenc),
b_path and b_path.decode(defenc),
a_blob_id and a_blob_id.decode(defenc),
b_blob_id and b_blob_id.decode(defenc),
a_mode and a_mode.decode(defenc),
b_mode and b_mode.decode(defenc),
new_file, deleted_file,
rename_from and rename_from.decode(defenc),
rename_to and rename_to.decode(defenc),
None))
previous_header = header
# end for each header we parse
if index:
index[-1].diff = text[header.end():]
# end assign last diff
return index
@classmethod
def _index_from_raw_format(cls, repo, stream):
"""Create a new DiffIndex from the given stream which must be in raw format.
:return: git.DiffIndex"""
# handles
# :100644 100644 687099101... 37c5e30c8... M .gitignore
index = DiffIndex()
for line in stream.readlines():
line = line.decode(defenc)
if not line.startswith(":"):
continue
# END its not a valid diff line
old_mode, new_mode, a_blob_id, b_blob_id, change_type, path = line[1:].split(None, 5)
path = path.strip()
a_path = path
b_path = path
deleted_file = False
new_file = False
rename_from = None
rename_to = None
# NOTE: We cannot conclude from the existance of a blob to change type
# as diffs with the working do not have blobs yet
if change_type == 'D':
b_blob_id = None
deleted_file = True
elif change_type == 'A':
a_blob_id = None
new_file = True
elif change_type[0] == 'R': # parses RXXX, where XXX is a confidence value
a_path, b_path = path.split('\t', 1)
rename_from, rename_to = a_path, b_path
# END add/remove handling
diff = Diff(repo, a_path, b_path, a_blob_id, b_blob_id, old_mode, new_mode,
new_file, deleted_file, rename_from, rename_to, '')
index.append(diff)
# END for each line
return index
| 36.690537 | 107 | 0.560226 |
0152ef6939f87644703740c8d4248a52c7e4b643 | 15,023 | py | Python | elmclient/_app.py | jjrevuel/ELM-Python-Client | 3585a77b13cfb791988a1ae394bcf7d89beca749 | [
"MIT"
] | null | null | null | elmclient/_app.py | jjrevuel/ELM-Python-Client | 3585a77b13cfb791988a1ae394bcf7d89beca749 | [
"MIT"
] | null | null | null | elmclient/_app.py | jjrevuel/ELM-Python-Client | 3585a77b13cfb791988a1ae394bcf7d89beca749 | [
"MIT"
] | null | null | null | ##
## © Copyright 2021- IBM Inc. All rights reserved
# SPDX-License-Identifier: MIT
##
import logging
import urllib
import requests.exceptions
from . import rdfxml
from . import oslcqueryapi
from . import utils
from . import httpops
logger = logging.getLogger(__name__)
#################################################################################################
# a generic jazz application
class _App( httpops.HttpOperations_Mixin ):
'A generic Jazz application'
domain = 'UNSPECIFIED APP DOMAIN'
project_class = None
artifact_formats = [] # For RR
reportablerest_baseurl = "publish"
def __init__(self, server, contextroot, jts=None):
super().__init__()
logger.info( f'Creating app {contextroot} {server=}' )
self.contextroot = contextroot
self.baseurl = urllib.parse.urljoin(server.baseurl, contextroot) + "/"
self.jts = jts
self.server = server
self.project_areas_xml = None
self._projects = None
self.headers = {}
self.cmServiceProviders = 'oslc_config:cmServiceProviders'
self.iid=None # app has a dummy (empty) iid
self.hooks = []
self.default_query_resource = None
def retrieve_cm_service_provider_xml(self):
cm_service_provider_uri = rdfxml.xmlrdf_get_resource_uri(self.rootservices_xml,
self.cmServiceProviders)
rdf = self.execute_get_rdf_xml(cm_service_provider_uri)
return rdf
def retrieve_oslc_catalog_xml(self):
oslccataloguri = rdfxml.xmlrdf_get_resource_uri(self.rootservices_xml, self.serviceproviders)
if oslccataloguri is None:
return None
return self.execute_get_rdf_xml(oslccataloguri)
# get local headers
def _get_headers(self, headers=None):
logger.info( f"app_gh" )
result = {'X-Requested-With': 'XMLHttpRequest', 'Referer': self.reluri('web'),'OSLC-Core-Version':'2.0'}
if self.headers:
result.update(self.headers)
result.update(self._get_oslc_headers())
if headers:
result.update(headers)
logger.info( f"app_gh {result}" )
return result
# get a request with local headers
def _get_request(self, verb, reluri='', *, params=None, headers=None, data=None):
fullheaders = self._get_headers()
if headers is not None:
fullheaders.update(headers)
sortedparams = None if params is None else {k:params[k] for k in sorted(params.keys())}
request = httpops.HttpRequest( self.server._session, verb, self.reluri(reluri), params=sortedparams, headers=fullheaders, data=data)
return request
def _get_oslc_headers(self, headers=None):
result = {
'Accept': 'application/rdf+xml'
, 'Referer': self.reluri('web')
, 'OSLC-Core-Version': '2.0'
}
if headers:
result.update(headers)
return result
def find_projectname_from_uri(self,name_or_uri):
self._load_projects()
if self.is_project_uri(name_or_uri):
# find the project
if name_or_uri in self._projects:
return self._projects[name_or_uri]['name']
else:
return name_or_uri
def is_project_uri(self, uri):
if uri.startswith(self.baseurl) and '/process/project-areas/' in uri:
return True
return False
# return an absolute URL for a url relative to this app
# NOTE if reluri has a leading / this will be relative to the serverhostname:port
# i.e. the app context root will be removed.
# So if you want an app-relative URL don't use a leading /
def reluri(self, *reluris):
url = self.baseurl
for reluri in reluris:
url = urllib.parse.urljoin(url,reluri)
return url
# load the projects from the project areas XML - doesn't create any project classes, this is done later when finding a project to open
def _load_projects(self,include_archived=False,force=False):
if self.project_class is None:
raise Exception(f"projectClass has not been set on {self}!")
if self._projects is not None and not force:
return
logger.info( "Loading projects")
self._projects = {}
uri = rdfxml.xmlrdf_get_resource_uri(self.rootservices_xml, 'jp06:projectAreas')
params = {}
if include_archived:
params['includeArchived'] = 'true'
self.project_areas_xml = self.execute_get_xml(uri, params=params)
for projectel in rdfxml.xml_find_elements(self.project_areas_xml,".//jp06:project-area" ):
projectu = rdfxml.xmlrdf_get_resource_text(projectel,".//jp06:url")
projectname = rdfxml.xmlrdf_get_resource_uri(projectel,attrib='jp06:name')
is_optin = False
singlemode = False
if self.supports_configs:
en = rdfxml.xmlrdf_get_resource_text(projectel,'.//jp:configuration-management-enabled')
is_optin = ( rdfxml.xmlrdf_get_resource_text(projectel,'.//jp:configuration-management-enabled') == "true" )
singlemode = ( rdfxml.xmlrdf_get_resource_text(projectel,'.//jp:configuration-management-mode') == "SINGLE" )
logger.info( f"{projectname=} {projectu=} {is_optin=} {singlemode=}" )
self._projects[projectu] = {'name':projectname, 'project': None, 'projectu': projectu, 'is_optin': is_optin, 'singlemode': singlemode }
self._projects[projectname] = projectu
# get an instance for a specific project
def find_project(self, projectname_or_uri, include_archived=False):
logger.info( f"Find project {projectname_or_uri}")
self._load_projects()
if self.is_project_uri(projectname_or_uri):
if projectname_or_uri in self._projects:
res = self._projects[projectname_or_uri]['project']
if res is None:
# create the project instance
res = self.project_class(self._projects[projectname_or_uri]['name'], self._projects[projectname_or_uri]['projectu'], self, is_optin=self._projects[projectname_or_uri]['is_optin'],singlemode=self._projects[projectname_or_uri]['singlemode'])
else:
res = None
else:
# must be a name
projectu = self._projects.get(projectname_or_uri)
if projectu is None:
res = None
else:
res = self._projects[projectu]['project']
if res is None:
# create the project instance
res = self.project_class(self._projects[projectu]['name'], self._projects[projectu]['projectu'], self, is_optin=self._projects[projectu]['is_optin'],singlemode=self._projects[projectu]['singlemode'])
logger.info( f'Project {projectname_or_uri} found {projectu} {res}' )
return res
def report_type_system( self ):
qcdetails = self.get_query_capability_uris()
report = "<HTML><BODY>\n"
report += f"<H1>Type system report for application {self.domain}</H1>\n"
report += "<H2>Application Queryable Resource Types, short name and URI</H2>\n"
rows = []
for k in sorted(qcdetails.keys()):
shortname = k.split('#')[-1]
shortname += " (default)" if self.default_query_resource is not None and k==rdfxml.tag_to_uri(self.default_query_resource) else ""
rows.append( [shortname,k,qcdetails[k]])
# print in a nice table with equal length columns
report += utils.print_in_html(rows,['Short Name', 'URI', 'Query Capability URI'])
rows = []
for prefix in sorted(rdfxml.RDF_DEFAULT_PREFIX.keys()):
rows.append([prefix,rdfxml.RDF_DEFAULT_PREFIX[prefix]] )
report += "<H2>Prefixes</H2>\n"
report += utils.print_in_html(rows,['Prefix', 'URI'])
report += "</BODY></HTML>\n"
return report
def get_query_capability_uri(self,resource_type=None,context=None):
context = context or self
resource_type = resource_type or context.default_query_resource
return self.get_query_capability_uri_from_xml(capabilitiesxml=context.retrieve_cm_service_provider_xml(), resource_type=resource_type,context=context)
def get_query_capability_uris(self,resource_type=None,context=None):
context = context or self
resource_type = resource_type or context.default_query_resource
return self.get_query_capability_uris_from_xml(capabilitiesxml=context.retrieve_cm_service_provider_xml(),context=context)
def get_query_capability_uri_from_xml(self,capabilitiesxml,resource_type,context):
logger.info( f"get_query_capability_uri_from_xml {self=} {resource_type=} {capabilitiesxml=}" )
if resource_type is None:
raise Exception( "You must provide a resource type" )
# ensure we have a URI for the resource type
resource_type_u = rdfxml.tag_to_uri(resource_type)
# get list of [resourcetype,uri]
qcs = self.get_query_capability_uris_from_xml(capabilitiesxml=capabilitiesxml,context=context)
if resource_type_u.startswith( 'http' ):
# looking for a complete precise URI
if resource_type_u in qcs:
return qcs[resource_type_u]
raise Exception( f"Resource type {resource_type} not found" )
# didn't specify a URI - find the first match at the end of the resouce type
for k,v in qcs.items():
if k.endswith(resource_type):
return v
raise Exception( f"Query capability {resource_type} {resource_type_u} not found!" )
# returns a dictionary of resource type to query capability URI
# this is used when the XML doesn't have references off to other URLs (like GCM does)
def get_query_capability_uris_from_xml(self,capabilitiesxml,context):
logger.info( f"get_query_capability_uris_from_xml {self=} {capabilitiesxml=}" )
qcs = {}
#<oslc:QueryCapability>
# <oslc:resourceType rdf:resource="http://open-services.net/ns/cm#ChangeRequest"/>
# <oslc:queryBase rdf:resource="https://jazz.ibm.com:9443/ccm/oslc/contexts/_2H-_4OpoEemSicvc8AFfxQ/workitems"/>
# find a queryBase and it's the containing tag that has the info
for qcx in rdfxml.xml_find_elements(capabilitiesxml,'.//oslc:queryBase/..'):
for qcrtx in rdfxml.xml_find_elements( qcx, 'oslc:resourceType'):
qcs[rdfxml.xmlrdf_get_resource_uri(qcrtx)] = rdfxml.xmlrdf_get_resource_uri(qcx, "oslc:queryBase")
logger.debug( f"{rdfxml.xmlrdf_get_resource_uri(qcrtx)=}" )
return qcs
def get_factory_uri_from_xml(self,factoriesxml,resource_type,context):
logger.info( f"get_factory_uri_from_xml {self=} {resource_type=} {factoriesxml=}" )
if resource_type is None:
raise Exception( "You must provide a resource type" )
# ensure we have a URI for the resource type
resource_type_u = rdfxml.tag_to_uri(resource_type)
# get list of [resourcetype,uri]
qcs = self.get_factory_uris_from_xml(factoriesxml=factoriesxml,context=context)
if resource_type_u.startswith( 'http' ):
# looking for a complete precise URI
if resource_type_u in qcs:
return qcs[resource_type_u]
raise Exception( f"Factory for resource type {resource_type} not found" )
# didn't specify a URI - find the first match at the end of the resouce type
for k,v in qcs.items():
if k.endswith(resource_type):
return v
raise Exception( f"QFactory {resource_type} {resource_type_u} not found!" )
# returns a dictionary of resource type to factory URI
# this is used when the XML doesn't have references off to other URLs (like GCM does)
def get_factory_uris_from_xml(self,factoriesxml,context):
logger.info( f"get_factory_uris_from_xml {self=} {factoriesxml=}" )
qcs = {}
#<oslc:QueryCapability>
# <oslc:resourceType rdf:resource="http://open-services.net/ns/cm#ChangeRequest"/>
# <oslc:queryBase rdf:resource="https://jazz.ibm.com:9443/ccm/oslc/contexts/_2H-_4OpoEemSicvc8AFfxQ/workitems"/>
# find a queryBase and it's the containing tag that has the info
for qcx in rdfxml.xml_find_elements(factoriesxml,'.//oslc:CreationFactory'):
for qcrtx in rdfxml.xml_find_elements( qcx, 'oslc:resourceType'):
qcs[rdfxml.xmlrdf_get_resource_uri(qcrtx)] = rdfxml.xmlrdf_get_resource_uri(qcx, "oslc:creation")
logger.debug( f"{rdfxml.xmlrdf_get_resource_uri(qcrtx)=}" )
return qcs
def is_user_uri(self, uri):
logger.info( f"{self=} {self.jts=}" )
if uri and uri.startswith(self.jts.baseurl) and '/users/' in uri:
return True
return False
def user_uritoname_resolver(self, uri):
if self.is_user_uri(uri):
res = uri[uri.rfind("/") + 1:]
return res
raise Exception(f"Bad user uri {uri}")
def is_user_name(self, name):
logger.info( f"Checking name {name}" )
if not name or name.startswith( "http:") or name.startswith( "https:"):
return False
res = self.user_nametouri_resolver( name,raiseifinvalid=False)
if res is not None:
return True
return False
def user_nametouri_resolver(self, name, raiseifinvalid=True):
logger.info( f"Converting name {name}" )
if not raiseifinvalid or self.is_user_name(name):
user_uri = self.jts.baseurl+f"users/{name}"
# check it using whoami
try:
res = self.execute_get(user_uri)
except requests.exceptions.HTTPError as e:
res = None
if res:
return user_uri
else:
if raiseifinvalid:
raise Exception( f"User {name} is not known on this server" )
return None
raise Exception(f"Bad user name {name}")
def resolve_project_nametouri(self, name, raiseifinvalid=True):
# find project for name
self._load_projects()
result = self._projects.get(name)
print( f"resolve_project_nametouri {name} {result}" )
return result
#################################################################################################
class JTSApp(_App):
'The JTS application'
domain = 'jts'
project_class = None
supports_configs = False
supports_components = False
def __init__(self, server, contextroot, jts=None):
super().__init__(server, contextroot, jts=self)
def find_project(self, projectname):
raise Exception("JTS does not have projects!")
| 46.082822 | 259 | 0.640485 |
ede33d12c4adae5a6651c12dc293ea5089c6ac4f | 764 | py | Python | algorithms/sorting/comb_sort.py | proudzhu/algorithms | 636a4519832849dcb6516f2ef78428872b3bb81c | [
"BSD-3-Clause"
] | 2,545 | 2015-01-01T17:11:49.000Z | 2022-03-26T04:51:28.000Z | algorithms/sorting/comb_sort.py | proudzhu/algorithms | 636a4519832849dcb6516f2ef78428872b3bb81c | [
"BSD-3-Clause"
] | 90 | 2015-03-15T11:34:35.000Z | 2018-10-27T13:47:53.000Z | algorithms/sorting/comb_sort.py | zhangbo2008/python_algorithm2 | c53669703b957a079f100c12711f86f5fc2f9389 | [
"BSD-3-Clause"
] | 787 | 2015-01-01T19:24:44.000Z | 2022-02-21T01:24:55.000Z | """
Comb Sort
---------
Improves on bubble sort by using a gap sequence to remove turtles.
Time Complexity: O(n**2)
Space Complexity: O(1) Auxiliary
Stable: Yes
Psuedo code: http://en.wikipedia.org/wiki/Comb_sort
"""
def sort(seq):
"""
Takes a list of integers and sorts them in ascending order. This sorted
list is then returned.
:param seq: A list of integers
:rtype: A list of sorted integers
"""
gap = len(seq)
swap = True
while gap > 1 or swap:
gap = max(1, int(gap / 1.25))
swap = False
for i in range(len(seq) - gap):
if seq[i] > seq[i + gap]:
seq[i], seq[i + gap] = seq[i + gap], seq[i]
swap = True
return seq
| 20.648649 | 75 | 0.548429 |
708ad55aaeb66ede7c5b8f680571f2d636c3d52b | 3,899 | py | Python | homeassistant/components/config/automation.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 1 | 2021-04-26T14:28:48.000Z | 2021-04-26T14:28:48.000Z | homeassistant/components/config/automation.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | null | null | null | homeassistant/components/config/automation.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | null | null | null | """Provide configuration end points for Automations."""
from collections import OrderedDict
import uuid
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.automation import (
get_debug_traces,
get_debug_traces_for_automation,
)
from homeassistant.components.automation.config import (
DOMAIN,
PLATFORM_SCHEMA,
async_validate_config_item,
)
from homeassistant.config import AUTOMATION_CONFIG_PATH
from homeassistant.const import CONF_ID, SERVICE_RELOAD
from homeassistant.helpers import config_validation as cv, entity_registry
from . import ACTION_DELETE, EditIdBasedConfigView
async def async_setup(hass):
"""Set up the Automation config API."""
websocket_api.async_register_command(hass, websocket_automation_trace_get)
websocket_api.async_register_command(hass, websocket_automation_trace_list)
async def hook(action, config_key):
"""post_write_hook for Config View that reloads automations."""
await hass.services.async_call(DOMAIN, SERVICE_RELOAD)
if action != ACTION_DELETE:
return
ent_reg = await entity_registry.async_get_registry(hass)
entity_id = ent_reg.async_get_entity_id(DOMAIN, DOMAIN, config_key)
if entity_id is None:
return
ent_reg.async_remove(entity_id)
hass.http.register_view(
EditAutomationConfigView(
DOMAIN,
"config",
AUTOMATION_CONFIG_PATH,
cv.string,
PLATFORM_SCHEMA,
post_write_hook=hook,
data_validator=async_validate_config_item,
)
)
return True
class EditAutomationConfigView(EditIdBasedConfigView):
"""Edit automation config."""
def _write_value(self, hass, data, config_key, new_value):
"""Set value."""
index = None
for index, cur_value in enumerate(data):
# When people copy paste their automations to the config file,
# they sometimes forget to add IDs. Fix it here.
if CONF_ID not in cur_value:
cur_value[CONF_ID] = uuid.uuid4().hex
elif cur_value[CONF_ID] == config_key:
break
else:
cur_value = OrderedDict()
cur_value[CONF_ID] = config_key
index = len(data)
data.append(cur_value)
# Iterate through some keys that we want to have ordered in the output
updated_value = OrderedDict()
for key in ("id", "alias", "description", "trigger", "condition", "action"):
if key in cur_value:
updated_value[key] = cur_value[key]
if key in new_value:
updated_value[key] = new_value[key]
# We cover all current fields above, but just in case we start
# supporting more fields in the future.
updated_value.update(cur_value)
updated_value.update(new_value)
data[index] = updated_value
@websocket_api.websocket_command(
{vol.Required("type"): "automation/trace/get", vol.Optional("automation_id"): str}
)
@websocket_api.async_response
async def websocket_automation_trace_get(hass, connection, msg):
"""Get automation traces."""
automation_id = msg.get("automation_id")
if not automation_id:
automation_traces = get_debug_traces(hass)
else:
automation_traces = {
automation_id: get_debug_traces_for_automation(hass, automation_id)
}
connection.send_result(msg["id"], automation_traces)
@websocket_api.websocket_command({vol.Required("type"): "automation/trace/list"})
@websocket_api.async_response
async def websocket_automation_trace_list(hass, connection, msg):
"""Summarize automation traces."""
automation_traces = get_debug_traces(hass, summary=True)
connection.send_result(msg["id"], automation_traces)
| 32.491667 | 86 | 0.685817 |
feea42e5e3e727390a823a74cf65b747d68f74de | 6,734 | py | Python | tests/test_decoders.py | BigChief45/httpx | cee1fccaca1701c39901c2c9663fcaa081075b2d | [
"BSD-3-Clause"
] | null | null | null | tests/test_decoders.py | BigChief45/httpx | cee1fccaca1701c39901c2c9663fcaa081075b2d | [
"BSD-3-Clause"
] | null | null | null | tests/test_decoders.py | BigChief45/httpx | cee1fccaca1701c39901c2c9663fcaa081075b2d | [
"BSD-3-Clause"
] | null | null | null | import zlib
import brotli
import pytest
import httpx
from httpx.content_streams import AsyncIteratorStream
from httpx.decoders import (
BrotliDecoder,
DeflateDecoder,
GZipDecoder,
IdentityDecoder,
LineDecoder,
TextDecoder,
)
def test_deflate():
body = b"test 123"
compressor = zlib.compressobj(9, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_body = compressor.compress(body) + compressor.flush()
headers = [(b"Content-Encoding", b"deflate")]
response = httpx.Response(200, headers=headers, content=compressed_body)
assert response.content == body
def test_gzip():
body = b"test 123"
compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16)
compressed_body = compressor.compress(body) + compressor.flush()
headers = [(b"Content-Encoding", b"gzip")]
response = httpx.Response(200, headers=headers, content=compressed_body)
assert response.content == body
def test_brotli():
body = b"test 123"
compressed_body = brotli.compress(body)
headers = [(b"Content-Encoding", b"br")]
response = httpx.Response(200, headers=headers, content=compressed_body)
assert response.content == body
def test_multi():
body = b"test 123"
deflate_compressor = zlib.compressobj(9, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_body = deflate_compressor.compress(body) + deflate_compressor.flush()
gzip_compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16)
compressed_body = (
gzip_compressor.compress(compressed_body) + gzip_compressor.flush()
)
headers = [(b"Content-Encoding", b"deflate, gzip")]
response = httpx.Response(200, headers=headers, content=compressed_body)
assert response.content == body
def test_multi_with_identity():
body = b"test 123"
compressed_body = brotli.compress(body)
headers = [(b"Content-Encoding", b"br, identity")]
response = httpx.Response(200, headers=headers, content=compressed_body)
assert response.content == body
headers = [(b"Content-Encoding", b"identity, br")]
response = httpx.Response(200, headers=headers, content=compressed_body)
assert response.content == body
@pytest.mark.asyncio
async def test_streaming():
body = b"test 123"
compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16)
async def compress(body):
yield compressor.compress(body)
yield compressor.flush()
headers = [(b"Content-Encoding", b"gzip")]
stream = AsyncIteratorStream(aiterator=compress(body))
response = httpx.Response(200, headers=headers, stream=stream)
assert not hasattr(response, "body")
assert await response.read() == body
@pytest.mark.parametrize("header_value", (b"deflate", b"gzip", b"br", b"identity"))
def test_empty_content(header_value):
headers = [(b"Content-Encoding", header_value)]
response = httpx.Response(200, headers=headers, content=b"")
assert response.content == b""
@pytest.mark.parametrize(
"decoder", (BrotliDecoder, DeflateDecoder, GZipDecoder, IdentityDecoder)
)
def test_decoders_empty_cases(decoder):
instance = decoder()
assert instance.decode(b"") == b""
assert instance.flush() == b""
@pytest.mark.parametrize("header_value", (b"deflate", b"gzip", b"br"))
def test_decoding_errors(header_value):
headers = [(b"Content-Encoding", header_value)]
body = b"test 123"
compressed_body = brotli.compress(body)[3:]
with pytest.raises(httpx.DecodingError):
response = httpx.Response(200, headers=headers, content=compressed_body)
response.content
@pytest.mark.parametrize(
["data", "encoding"],
[
((b"Hello,", b" world!"), "ascii"),
((b"\xe3\x83", b"\x88\xe3\x83\xa9", b"\xe3", b"\x83\x99\xe3\x83\xab"), "utf-8"),
((b"\x83g\x83\x89\x83x\x83\x8b",) * 64, "shift-jis"),
((b"\x83g\x83\x89\x83x\x83\x8b",) * 600, "shift-jis"),
(
(b"\xcb\xee\xf0\xe5\xec \xe8\xef\xf1\xf3\xec \xe4\xee\xeb\xee\xf0",) * 64,
"MacCyrillic",
),
(
(b"\xa5\xa6\xa5\xa7\xa5\xd6\xa4\xce\xb9\xf1\xba\xdd\xb2\xbd",) * 512,
"euc-jp",
),
],
)
@pytest.mark.asyncio
async def test_text_decoder(data, encoding):
async def iterator():
nonlocal data
for chunk in data:
yield chunk
stream = AsyncIteratorStream(aiterator=iterator())
response = httpx.Response(200, stream=stream)
await response.read()
assert response.text == (b"".join(data)).decode(encoding)
@pytest.mark.asyncio
async def test_text_decoder_known_encoding():
async def iterator():
yield b"\x83g"
yield b"\x83"
yield b"\x89\x83x\x83\x8b"
stream = AsyncIteratorStream(aiterator=iterator())
response = httpx.Response(
200,
headers=[(b"Content-Type", b"text/html; charset=shift-jis")],
stream=stream,
)
await response.read()
assert "".join(response.text) == "トラベル"
def test_text_decoder_empty_cases():
decoder = TextDecoder()
assert decoder.flush() == ""
decoder = TextDecoder()
assert decoder.decode(b"") == ""
assert decoder.flush() == ""
def test_line_decoder_nl():
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("a\n\nb\nc") == ["a\n", "\n", "b\n"]
assert decoder.flush() == ["c"]
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("a\n\nb\nc\n") == ["a\n", "\n", "b\n", "c\n"]
assert decoder.flush() == []
def test_line_decoder_cr():
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("a\r\rb\rc") == ["a\n", "\n", "b\n"]
assert decoder.flush() == ["c"]
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("a\r\rb\rc\r") == ["a\n", "\n", "b\n"]
assert decoder.flush() == ["c\n"]
def test_line_decoder_crnl():
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("a\r\n\r\nb\r\nc") == ["a\n", "\n", "b\n"]
assert decoder.flush() == ["c"]
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("a\r\n\r\nb\r\nc\r\n") == ["a\n", "\n", "b\n", "c\n"]
assert decoder.flush() == []
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("a\r") == []
assert decoder.decode("\n\r\nb\r\nc") == ["a\n", "\n", "b\n"]
assert decoder.flush() == ["c"]
def test_invalid_content_encoding_header():
headers = [(b"Content-Encoding", b"invalid-header")]
body = b"test 123"
response = httpx.Response(200, headers=headers, content=body)
assert response.content == body
| 30.197309 | 88 | 0.643303 |
26cc95f54c50f26fee3dfca93bdef090447d46fd | 10,032 | py | Python | config/settings/base.py | majaeseong/votesystem | 624fadca0251a81c0417f3a3a23f3d6c38b1cf33 | [
"MIT"
] | null | null | null | config/settings/base.py | majaeseong/votesystem | 624fadca0251a81c0417f3a3a23f3d6c38b1cf33 | [
"MIT"
] | null | null | null | config/settings/base.py | majaeseong/votesystem | 624fadca0251a81c0417f3a3a23f3d6c38b1cf33 | [
"MIT"
] | null | null | null | """
Base settings to build other settings files upon.
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (votesystem/config/settings/base.py - 3 = votesystem/)
APPS_DIR = ROOT_DIR.path('votesystem')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR.path('.env')))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = 'Asia/Seoul'
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///votesystem'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'config.urls'
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.humanize', # Handy template tags
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_framework',
]
LOCAL_APPS = [
'votesystem.users.apps.UsersAppConfig',
# Your stuff: custom apps go here
'votesystem.vote.apps.VoteConfig'
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {
'sites': 'votesystem.contrib.sites.migrations'
}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = 'users.User'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = 'users:redirect'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = 'account_login'
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = 'DENY'
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = 'admin/'
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Daniel Roy Greenfeld""", 'daniel-roy-greenfeld@example.com'),
]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = 'username'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = 'votesystem.users.adapters.AccountAdapter'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = 'votesystem.users.adapters.SocialAccountAdapter'
# Your stuff...
# ------------------------------------------------------------------------------
| 39.652174 | 98 | 0.630981 |
666ec01090864090f3f94ece30526d30bf83343c | 13,850 | py | Python | ai_economist/foundation/scenarios/one_step_economy/one_step_economy.py | lucifer2288/ai-economist | 444948f454babc3c7934cf503c1d4abfeae7f1f2 | [
"BSD-3-Clause"
] | null | null | null | ai_economist/foundation/scenarios/one_step_economy/one_step_economy.py | lucifer2288/ai-economist | 444948f454babc3c7934cf503c1d4abfeae7f1f2 | [
"BSD-3-Clause"
] | null | null | null | ai_economist/foundation/scenarios/one_step_economy/one_step_economy.py | lucifer2288/ai-economist | 444948f454babc3c7934cf503c1d4abfeae7f1f2 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from ai_economist.foundation.base.base_env import BaseEnvironment, scenario_registry
from ai_economist.foundation.scenarios.utils import rewards, social_metrics
@scenario_registry.add
class OneStepEconomy(BaseEnvironment):
"""
A simple model featuring one "step" of setting taxes and earning income.
As described in https://arxiv.org/abs/2108.02755:
A simplified version of simple_wood_and_stone scenario where both the planner
and the agents each make a single decision: the planner setting taxes and the
agents choosing labor. Each agent chooses an amount of labor that optimizes
its post-tax utility, and this optimal labor depends on its skill and the tax
rates, and it does not depend on the labor choices of other agents. Before
the agents act, the planner sets the marginal tax rates in order to optimize
social welfare.
Note:
This scenario is intended to be used with the 'PeriodicBracketTax' and
'SimpleLabor' components.
It should use an episode length of 2. In the first step, taxes are set by
the planner via 'PeriodicBracketTax'. In the second, agents select how much
to work/earn via 'SimpleLabor'.
Args:
agent_reward_type (str): The type of utility function used to compute each
agent's reward. Defaults to "coin_minus_labor_cost".
isoelastic_eta (float): The shape parameter of the isoelastic function used
in the "isoelastic_coin_minus_labor" utility function.
labor_exponent (float): The labor exponent parameter used in the
"coin_minus_labor_cost" utility function.
labor_cost (float): The coefficient used to weight the cost of labor.
planner_reward_type (str): The type of social welfare function (SWF) used to
compute the planner's reward. Defaults to "inv_income_weighted_utility".
mixing_weight_gini_vs_coin (float): Must be between 0 and 1 (inclusive).
Controls the weighting of equality and productivity when using SWF
"coin_eq_times_productivity", where a value of 0 (default) yields equal
weighting, and 1 only considers productivity.
"""
name = "one-step-economy"
agent_subclasses = ["BasicMobileAgent", "BasicPlanner"]
required_entities = ["Coin"]
def __init__(
self,
*base_env_args,
agent_reward_type="coin_minus_labor_cost",
isoelastic_eta=0.23,
labor_exponent=2.0,
labor_cost=1.0,
planner_reward_type="inv_income_weighted_utility",
mixing_weight_gini_vs_coin=0,
**base_env_kwargs
):
super().__init__(*base_env_args, **base_env_kwargs)
self.num_agents = len(self.world.agents)
self.labor_cost = labor_cost
self.agent_reward_type = agent_reward_type
self.isoelastic_eta = isoelastic_eta
self.labor_exponent = labor_exponent
self.planner_reward_type = planner_reward_type
self.mixing_weight_gini_vs_coin = mixing_weight_gini_vs_coin
self.planner_starting_coin = 0
self.curr_optimization_metrics = {str(a.idx): 0 for a in self.all_agents}
def reset_agent_states(self):
"""
Part 2/2 of scenario reset. This method handles resetting the state of the
agents themselves (i.e. inventory, locations, etc.).
Here, empty inventories, give mobile agents any starting coin, and place them
in random accesible locations to start.
"""
self.world.clear_agent_locs()
for agent in self.world.agents:
# Clear everything to start with
agent.state["inventory"] = {k: 0 for k in agent.state["inventory"].keys()}
agent.state["escrow"] = {k: 0 for k in agent.state["escrow"].keys()}
agent.state["endogenous"] = {k: 0 for k in agent.state["endogenous"].keys()}
self.world.planner.inventory["Coin"] = self.planner_starting_coin
def generate_observations(self):
"""
Generate observations associated with this scenario.
A scenario does not need to produce observations and can provide observations
for only some agent types; however, for a given agent type, it should either
always or never yield an observation. If it does yield an observation,
that observation should always have the same structure/sizes!
Returns:
obs (dict): A dictionary of {agent.idx: agent_obs_dict}. In words,
return a dictionary with an entry for each agent (which can including
the planner) for which this scenario provides an observation. For each
entry, the key specifies the index of the agent and the value contains
its associated observation dictionary.
Here, non-planner agents receive spatial observations (depending on the env
config) as well as the contents of their inventory and endogenous quantities.
The planner also receives spatial observations (again, depending on the env
config) as well as the inventory of each of the mobile agents.
"""
obs_dict = dict()
for agent in self.world.agents:
obs_dict[str(agent.idx)] = {}
coin_endowments = np.array(
[agent.total_endowment("Coin") for agent in self.world.agents]
)
equality = social_metrics.get_equality(coin_endowments)
productivity = social_metrics.get_productivity(coin_endowments)
normalized_per_capita_productivity = productivity / self.num_agents / 1000
obs_dict[self.world.planner.idx] = {
"normalized_per_capita_productivity": normalized_per_capita_productivity,
"equality": equality,
}
return obs_dict
def compute_reward(self):
"""
Apply the reward function(s) associated with this scenario to get the rewards
from this step.
Returns:
rew (dict): A dictionary of {agent.idx: agent_obs_dict}. In words,
return a dictionary with an entry for each agent in the environment
(including the planner). For each entry, the key specifies the index of
the agent and the value contains the scalar reward earned this timestep.
Rewards are computed as the marginal utility (agents) or marginal social
welfare (planner) experienced on this timestep. Ignoring discounting,
this means that agents' (planner's) objective is to maximize the utility
(social welfare) associated with the terminal state of the episode.
"""
curr_optimization_metrics = self.get_current_optimization_metrics(
self.world.agents,
isoelastic_eta=float(self.isoelastic_eta),
labor_exponent=float(self.labor_exponent),
labor_coefficient=float(self.labor_cost),
)
planner_agents_rew = {
k: v - self.curr_optimization_metrics[k]
for k, v in curr_optimization_metrics.items()
}
self.curr_optimization_metrics = curr_optimization_metrics
return planner_agents_rew
# Optional methods for customization
# ----------------------------------
def additional_reset_steps(self):
"""
Extra scenario-specific steps that should be performed at the end of the reset
cycle.
For each reset cycle...
First, reset_starting_layout() and reset_agent_states() will be called.
Second, <component>.reset() will be called for each registered component.
Lastly, this method will be called to allow for any final customization of
the reset cycle.
"""
self.curr_optimization_metrics = self.get_current_optimization_metrics(
self.world.agents,
isoelastic_eta=float(self.isoelastic_eta),
labor_exponent=float(self.labor_exponent),
labor_coefficient=float(self.labor_cost),
)
def scenario_metrics(self):
"""
Allows the scenario to generate metrics (collected along with component metrics
in the 'metrics' property).
To have the scenario add metrics, this function needs to return a dictionary of
{metric_key: value} where 'value' is a scalar (no nesting or lists!)
Here, summarize social metrics, endowments, utilities, and labor cost annealing.
"""
metrics = dict()
# Log social/economic indicators
coin_endowments = np.array(
[agent.total_endowment("Coin") for agent in self.world.agents]
)
pretax_incomes = np.array(
[agent.state["production"] for agent in self.world.agents]
)
metrics["social/productivity"] = social_metrics.get_productivity(
coin_endowments
)
metrics["social/equality"] = social_metrics.get_equality(coin_endowments)
utilities = np.array(
[self.curr_optimization_metrics[agent.idx] for agent in self.world.agents]
)
metrics[
"social_welfare/coin_eq_times_productivity"
] = rewards.coin_eq_times_productivity(
coin_endowments=coin_endowments, equality_weight=1.0
)
metrics[
"social_welfare/inv_income_weighted_utility"
] = rewards.inv_income_weighted_utility(
coin_endowments=pretax_incomes, utilities=utilities # coin_endowments,
)
# Log average endowments, endogenous, and utility for agents
agent_endows = {}
agent_endogenous = {}
agent_utilities = []
for agent in self.world.agents:
for resource in agent.inventory.keys():
if resource not in agent_endows:
agent_endows[resource] = []
agent_endows[resource].append(
agent.inventory[resource] + agent.escrow[resource]
)
for endogenous, quantity in agent.endogenous.items():
if endogenous not in agent_endogenous:
agent_endogenous[endogenous] = []
agent_endogenous[endogenous].append(quantity)
agent_utilities.append(self.curr_optimization_metrics[agent.idx])
for resource, quantities in agent_endows.items():
metrics["endow/avg_agent/{}".format(resource)] = np.mean(quantities)
for endogenous, quantities in agent_endogenous.items():
metrics["endogenous/avg_agent/{}".format(endogenous)] = np.mean(quantities)
metrics["util/avg_agent"] = np.mean(agent_utilities)
# Log endowments and utility for the planner
for resource, quantity in self.world.planner.inventory.items():
metrics["endow/p/{}".format(resource)] = quantity
metrics["util/p"] = self.curr_optimization_metrics[self.world.planner.idx]
return metrics
def get_current_optimization_metrics(
self, agents, isoelastic_eta=0.23, labor_exponent=2.0, labor_coefficient=0.1
):
"""
Compute optimization metrics based on the current state. Used to compute reward.
Returns:
curr_optimization_metric (dict): A dictionary of {agent.idx: metric}
with an entry for each agent (including the planner) in the env.
"""
curr_optimization_metric = {}
coin_endowments = np.array([agent.total_endowment("Coin") for agent in agents])
pretax_incomes = np.array([agent.state["production"] for agent in agents])
# Optimization metric for agents:
for agent in agents:
if self.agent_reward_type == "isoelastic_coin_minus_labor":
assert 0.0 <= isoelastic_eta <= 1.0
curr_optimization_metric[
agent.idx
] = rewards.isoelastic_coin_minus_labor(
coin_endowment=agent.total_endowment("Coin"),
total_labor=agent.state["endogenous"]["Labor"],
isoelastic_eta=isoelastic_eta,
labor_coefficient=labor_coefficient,
)
elif self.agent_reward_type == "coin_minus_labor_cost":
assert labor_exponent > 1.0
curr_optimization_metric[agent.idx] = rewards.coin_minus_labor_cost(
coin_endowment=agent.total_endowment("Coin"),
total_labor=agent.state["endogenous"]["Labor"],
labor_exponent=labor_exponent,
labor_coefficient=labor_coefficient,
)
# Optimization metric for the planner:
if self.planner_reward_type == "coin_eq_times_productivity":
curr_optimization_metric[
self.world.planner.idx
] = rewards.coin_eq_times_productivity(
coin_endowments=coin_endowments,
equality_weight=1 - self.mixing_weight_gini_vs_coin,
)
elif self.planner_reward_type == "inv_income_weighted_utility":
curr_optimization_metric[
self.world.planner.idx
] = rewards.inv_income_weighted_utility(
coin_endowments=pretax_incomes, # coin_endowments,
utilities=np.array(
[curr_optimization_metric[agent.idx] for agent in agents]
),
)
else:
print("No valid planner reward selected!")
raise NotImplementedError
return curr_optimization_metric
| 43.829114 | 88 | 0.650758 |
56a953f724ccf18c0df8ed83e745cda93561ef9d | 5,647 | py | Python | sdk/aqualink_sdk/api/aggregate_api.py | aqualinkorg/aqualink-sdk | dad972d1dd5b74e8216bdc30521a8b76f7844733 | [
"MIT"
] | 1 | 2022-02-06T23:05:37.000Z | 2022-02-06T23:05:37.000Z | sdk/aqualink_sdk/api/aggregate_api.py | aqualinkorg/aqualink-sdk | dad972d1dd5b74e8216bdc30521a8b76f7844733 | [
"MIT"
] | 3 | 2022-02-07T06:13:31.000Z | 2022-03-11T12:43:39.000Z | sdk/aqualink_sdk/api/aggregate_api.py | aqualinkorg/aqualink-sdk | dad972d1dd5b74e8216bdc30521a8b76f7844733 | [
"MIT"
] | null | null | null | """
Aqualink API documentation
The Aqualink public API documentation # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from pprint import pprint
import pandas as pd
from aqualink_sdk.api_client import ApiClient
from aqualink_sdk.api import time_series_api
default_mapping = {
"min_temperature": "min",
"precipitation": "sum",
"alert_level": "max",
"bottom_temperature": "mean",
}
def aggregate_data(
timeseries_data, aggregate_frequency, aggregate_mapping=default_mapping
):
data = timeseries_data._data_store
# Data is in the form:
# {'bottom_temperature': {
# 'spotter': {
# 'data': [
# {'timestamp': '2022-01-08T00:04:00.000Z', 'value': 25.32},
# {'timestamp': '2022-01-08T00:14:00.000Z', 'value': 25.259999999999998},
# {'timestamp': '2022-01-08T00:24:00.000Z', 'value': 25.28}]}}
# }
new_data = {}
for metric_name, metric_data in data.items():
for sonde_type_name, sonde_data in metric_data.items():
dataframe = pd.DataFrame(sonde_data["data"])
dataframe["timestamp_index"] = pd.to_datetime(
dataframe["timestamp"]
) # convert column to datetime object
dataframe.set_index("timestamp_index", inplace=True)
temp_aggregate_mapping = {"value": aggregate_mapping[metric_name]}
aggregate_data = dataframe.groupby(
pd.Grouper(freq=aggregate_frequency)
).agg(temp_aggregate_mapping)
# Convert timestamp back to string
aggregate_data["timestamp"] = aggregate_data.index.map(
lambda ts: ts.isoformat(timespec="milliseconds").replace("+00:00", "Z")
)
aggregate_data.dropna(inplace=True)
new_metric_data = {
sonde_type_name: {"data": aggregate_data.to_dict("records")}
}
new_data[metric_name] = new_metric_data
return new_data
def aggregate_data_for_csv(
timeseries_data, aggregate_frequency, aggregate_mapping=default_mapping
):
data = timeseries_data._data_store
# Data is in the form:
# {'bottom_temperature': {
# 'spotter': {
# 'data': [
# {'timestamp': '2022-01-08T00:04:00.000Z', 'value': 25.32},
# {'timestamp': '2022-01-08T00:14:00.000Z', 'value': 25.259999999999998},
# {'timestamp': '2022-01-08T00:24:00.000Z', 'value': 25.28}]}}
# }
new_data_drame = None
for metric_name, metric_data in data.items():
for sonde_type_name, sonde_data in metric_data.items():
dataframe = pd.DataFrame(sonde_data["data"])
dataframe["timestamp_index"] = pd.to_datetime(
dataframe["timestamp"]
) # convert column to datetime object
dataframe.set_index("timestamp_index", inplace=True)
temp_aggregate_mapping = {"value": aggregate_mapping[metric_name]}
aggregate_data = dataframe.groupby(
pd.Grouper(freq=aggregate_frequency)
).agg(temp_aggregate_mapping)
aggregate_data.dropna(inplace=True)
aggregate_data.rename(
columns={"value": f"{metric_name}_{sonde_type_name}"}, inplace=True
)
new_data_drame = (
new_data_drame.join(aggregate_data, how="outer")
if new_data_drame is not None
else aggregate_data
)
# Convert timestamp back to string
new_data_drame["timestamp"] = new_data_drame.index.map(
lambda ts: ts.isoformat(timespec="milliseconds").replace("+00:00", "Z")
)
# Replace timestamp in first position
new_data_drame.insert(0, "timestamp", new_data_drame.pop("timestamp"))
return new_data_drame
class AggregateApi(object):
"""AggregateApi Class, built on top of the TimeSeriesApi"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.ts_api = time_series_api.TimeSeriesApi(api_client)
def get_aggregate_site_data(
self,
site_id,
metrics,
start,
end,
aggregate_frequency,
aggregate_mapping,
csv_output,
**kwargs,
):
"""Returns specified aggregated time series data for a specified site # noqa: E501
Args:
site_id (float):
metrics ([str]):
start (str):
end (str):
aggregate_frequency (str):
aggregate_mapping({str: str})
Returns:
InlineResponse200
"""
kwargs["hourly"] = False
api_response = self.ts_api.time_series_controller_find_site_data(
site_id, metrics, start, end, async_req=False, **kwargs
)
# TODO - verify that all the metrics requested in "metrics" have a mapping
# TODO - strongly type the frequency
# TODO - Hourly and aggregate are incompatible
if not (aggregate_frequency and aggregate_mapping):
raise Exception('Missing parameter aggregate_frequency or aggregate_mapping')
if csv_output:
dataframe = aggregate_data_for_csv(
api_response, aggregate_frequency, aggregate_mapping
)
dataframe.to_csv(csv_output, encoding="utf-8", index=False)
data = aggregate_data(api_response, aggregate_frequency, aggregate_mapping)
api_response._data_store = data
return api_response
| 33.023392 | 91 | 0.613069 |
dbfb70d19a4f0ac906a6f145ad3108b6f08b7a18 | 415 | py | Python | src/stac_fastapi/sqlalchemy/stac_fastapi/sqlalchemy/types/links.py | Dataforsyningen/skraafoto_stac_public | fa9cb7e3516cfacf730d24bb56ae50e2e0e5bae1 | [
"MIT"
] | null | null | null | src/stac_fastapi/sqlalchemy/stac_fastapi/sqlalchemy/types/links.py | Dataforsyningen/skraafoto_stac_public | fa9cb7e3516cfacf730d24bb56ae50e2e0e5bae1 | [
"MIT"
] | 15 | 2022-01-12T08:52:22.000Z | 2022-02-02T14:41:47.000Z | src/stac_fastapi/sqlalchemy/stac_fastapi/sqlalchemy/types/links.py | Dataforsyningen/skraafoto_stac_public | fa9cb7e3516cfacf730d24bb56ae50e2e0e5bae1 | [
"MIT"
] | null | null | null | from typing import Dict
import attr
from stac_fastapi.types.links import BaseHrefBuilder
@attr.s
class ApiTokenHrefBuilder(BaseHrefBuilder):
"""Adds `token` param to all hrefs"""
token: str = attr.ib()
def build(self, path: str = None, query: Dict[str, str] = None):
q = query or {}
if self.token:
q["token"] = self.token
return super().build(path=path, query=q) | 24.411765 | 68 | 0.638554 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.