repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bernardopires/django-tenant-schemas | examples/tenant_tutorial/tenant_tutorial/middleware.py | 13 | 1257 | from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.db import connection
from django.http import Http404
from tenant_schemas.utils import get_tenant_model, remove_www_and_dev, get_public_schema_name
from django.db import utils
class TenantTutorialMiddleware(object):
def process_request(self, request):
connection.set_schema_to_public()
hostname_without_port = remove_www_and_dev(request.get_host().split(':')[0])
TenantModel = get_tenant_model()
try:
request.tenant = TenantModel.objects.get(domain_url=hostname_without_port)
except utils.DatabaseError:
request.urlconf = settings.PUBLIC_SCHEMA_URLCONF
return
except TenantModel.DoesNotExist:
if hostname_without_port in ("127.0.0.1", "localhost"):
request.urlconf = settings.PUBLIC_SCHEMA_URLCONF
return
else:
raise Http404
connection.set_tenant(request.tenant)
ContentType.objects.clear_cache()
if hasattr(settings, 'PUBLIC_SCHEMA_URLCONF') and request.tenant.schema_name == get_public_schema_name():
request.urlconf = settings.PUBLIC_SCHEMA_URLCONF
| mit | d45f364b8fccc99493320dd6f8292453 | 38.28125 | 113 | 0.677009 | 4.218121 | false | false | false | false |
neherlab/treetime | treetime/aa_models.py | 1 | 4591 | from __future__ import division, print_function, absolute_import, absolute_import
import numpy as np
from .seq_utils import alphabets
def JTT92(mu=1.0):
from .gtr import GTR
# stationary concentrations:
pis = np.array([
0.07674789,
0.05169087,
0.04264509,
0.05154407,
0.01980301,
0.04075195,
0.06182989,
0.07315199,
0.02294399,
0.05376110,
0.09190390,
0.05867583,
0.02382594,
0.04012589,
0.05090097,
0.06876503,
0.05856501,
0.01426057,
0.03210196,
0.06600504])
# attempt matrix (FIXME)
Q = np.array([
[-1.247831,0.044229,0.041179,0.061769,0.042704,0.043467,0.08007,0.136501,0.02059,0.027453,0.022877,0.02669,0.041179,0.011439,0.14794,0.288253,0.362223,0.006863,0.008388,0.227247 ],
[0.029789,-1.025965,0.023112,0.008218,0.058038,0.159218,0.014895,0.070364,0.168463,0.011299,0.019517,0.33179,0.022599,0.002568,0.038007,0.051874,0.032871,0.064714,0.010272,0.008731 ],
[0.022881,0.019068,-1.280568,0.223727,0.014407,0.03644,0.024576,0.034322,0.165676,0.019915,0.005085,0.11144,0.012712,0.004237,0.006356,0.213134,0.098304,0.00339,0.029661,0.00678 ],
[0.041484,0.008194,0.270413,-1.044903,0.005121,0.025095,0.392816,0.066579,0.05736,0.005634,0.003585,0.013316,0.007682,0.002049,0.007682,0.030217,0.019462,0.002049,0.023559,0.015877 ],
[0.011019,0.022234,0.00669,0.001968,-0.56571,0.001771,0.000984,0.011609,0.013577,0.003345,0.004526,0.001377,0.0061,0.015348,0.002755,0.043878,0.008264,0.022628,0.041124,0.012199 ],
[0.02308,0.125524,0.034823,0.019841,0.003644,-1.04415,0.130788,0.010528,0.241735,0.003644,0.029154,0.118235,0.017411,0.00162,0.066406,0.021461,0.020651,0.007288,0.009718,0.008098 ],
[0.064507,0.017816,0.035632,0.471205,0.003072,0.198435,-0.944343,0.073107,0.015973,0.007372,0.005529,0.111197,0.011058,0.003072,0.011058,0.01843,0.019659,0.006143,0.0043,0.027646 ],
[0.130105,0.099578,0.058874,0.09449,0.042884,0.018898,0.086495,-0.647831,0.016717,0.004361,0.004361,0.019625,0.010176,0.003634,0.017444,0.146096,0.023986,0.039976,0.005815,0.034162 ],
[0.006155,0.074775,0.089138,0.025533,0.01573,0.1361,0.005927,0.005243,-1.135695,0.003648,0.012767,0.010259,0.007523,0.009119,0.026217,0.016642,0.010487,0.001824,0.130629,0.002508 ],
[0.01923,0.011752,0.025106,0.005876,0.009081,0.004808,0.00641,0.003205,0.008547,-1.273602,0.122326,0.011218,0.25587,0.047542,0.005342,0.021367,0.130873,0.004808,0.017094,0.513342 ],
[0.027395,0.0347,0.010958,0.006392,0.021003,0.065748,0.008219,0.005479,0.051137,0.209115,-0.668139,0.012784,0.354309,0.226465,0.093143,0.053877,0.022829,0.047485,0.021916,0.16437 ],
[0.020405,0.376625,0.153332,0.015158,0.004081,0.170239,0.105525,0.015741,0.026235,0.012243,0.008162,-0.900734,0.037896,0.002332,0.012243,0.027401,0.06005,0.00583,0.004664,0.008162 ],
[0.012784,0.010416,0.007102,0.003551,0.007339,0.01018,0.004261,0.003314,0.007812,0.113397,0.091854,0.015388,-1.182051,0.01018,0.003788,0.006865,0.053503,0.005682,0.004261,0.076466 ],
[0.00598,0.001993,0.003987,0.001595,0.031098,0.001595,0.001993,0.001993,0.015948,0.035484,0.098877,0.001595,0.017144,-0.637182,0.006778,0.03668,0.004784,0.021131,0.213701,0.024719 ],
[0.098117,0.037426,0.007586,0.007586,0.007081,0.082944,0.009104,0.012138,0.058162,0.005058,0.051587,0.010621,0.008092,0.008598,-0.727675,0.144141,0.059679,0.003035,0.005058,0.011632 ],
[0.258271,0.069009,0.343678,0.040312,0.152366,0.036213,0.020498,0.137334,0.049878,0.02733,0.040312,0.032113,0.019814,0.06286,0.194728,-1.447863,0.325913,0.023914,0.043045,0.025964 ],
[0.276406,0.037242,0.135003,0.022112,0.02444,0.029677,0.018621,0.019203,0.026768,0.142567,0.014548,0.059936,0.131511,0.006983,0.068665,0.27757,-1.335389,0.006983,0.01222,0.065174 ],
[0.001275,0.017854,0.001134,0.000567,0.016295,0.002551,0.001417,0.007793,0.001134,0.001275,0.007368,0.001417,0.003401,0.00751,0.00085,0.004959,0.0017,-0.312785,0.010061,0.003542 ],
[0.003509,0.006379,0.022328,0.014673,0.066664,0.007655,0.002233,0.002552,0.182769,0.010207,0.007655,0.002552,0.005741,0.170967,0.00319,0.020095,0.006698,0.022647,-0.605978,0.005103 ],
[0.195438,0.011149,0.010493,0.020331,0.040662,0.013117,0.029512,0.030824,0.007214,0.630254,0.11805,0.009182,0.211834,0.040662,0.015084,0.024922,0.073453,0.016396,0.010493,-1.241722]
])
Spis = np.sqrt(pis[None, :] / pis[:,None])
W = Q * Spis
gtr = GTR(alphabet=alphabets['aa_nogap'])
gtr.assign_rates(mu=mu, pi=pis, W=W)
return gtr
| mit | 563da98f80ce796da6e942ecd3d08a2f | 74.262295 | 188 | 0.701808 | 1.823272 | false | false | false | false |
neherlab/treetime | treetime/node_interpolator.py | 1 | 14693 | import numpy as np
from . import config as ttconf
from .distribution import Distribution
from .utils import clip
from .config import FFT_FWHM_GRID_SIZE
def _convolution_integrand(t_val, f, g,
inverse_time=None, return_log=False):
'''
Evaluates int_tau f(t+tau)*g(tau) or int_tau f(t-tau)g(tau) if inverse time is TRUE
Parameters
-----------
t_val : double
Time point
f : Interpolation object
First multiplier in convolution
g : Interpolation object
Second multiplier in convolution
inverse_time : bool, None
time direction. If True, then the f(t-tau)*g(tau) is calculated, otherwise,
f(t+tau)*g(tau)
return_log : bool
If True, the logarithm will be returned
Returns
-------
FG : Distribution
The function to be integrated as Distribution object (interpolator)
'''
if inverse_time is None:
raise Exception("Inverse time argument must be set!")
# determine integration boundaries:
if inverse_time:
## tau>g.xmin and t-tau<f.xmax
tau_min = max(t_val - f.xmax, g.xmin)
## tau<g.xmax and t-tau>f.xmin
tau_max = min(t_val - f.xmin, g.xmax)
else:
## tau>g.xmin and t+tau>f.xmin
tau_min = max(f.xmin-t_val, g.xmin)
## tau<g.xmax and t+tau<f.xmax
tau_max = min(f.xmax-t_val, g.xmax)
#print(tau_min, tau_max)
if tau_max <= tau_min:
if return_log:
return ttconf.BIG_NUMBER
else:
return 0.0 # functions do not overlap
else:
# create the tau-grid for the interpolation object in the overlap region
if inverse_time:
tau = np.concatenate((g.x, t_val-f.x,[tau_min,tau_max]))
else:
tau = np.concatenate((g.x, f.x-t_val,[tau_min,tau_max]))
tau = np.unique(clip(tau, tau_min-ttconf.TINY_NUMBER, tau_max+ttconf.TINY_NUMBER))
if len(tau)<10:
tau = np.linspace(tau_min, tau_max, 10)
if inverse_time: # add negative logarithms
tnode = t_val - tau
fg = f(tnode) + g(tau)
else:
fg = f(t_val + tau) + g(tau)
# create the interpolation object on this grid
FG = Distribution(tau, fg, is_log=True, min_width = np.max([f.min_width, g.min_width]),
kind='linear', assume_sorted=True)
return FG
def _max_of_integrand(t_val, f, g, inverse_time=None, return_log=False):
'''
Evaluates max_tau f(t+tau)*g(tau) or max_tau f(t-tau)g(tau) if inverse time is TRUE
Parameters
-----------
t_val : double
Time point
f : Interpolation object
First multiplier in convolution
g : Interpolation object
Second multiplier in convolution
inverse_time : bool, None
time direction. If True, then the f(t-tau)*g(tau) is calculated, otherwise,
f(t+tau)*g(tau)
return_log : bool
If True, the logarithm will be returned
Returns
-------
FG : Distribution
The function to be integrated as Distribution object (interpolator)
'''
# return log is always True
FG = _convolution_integrand(t_val, f, g, inverse_time, return_log=True)
if FG == ttconf.BIG_NUMBER:
res = [ttconf.BIG_NUMBER, 0]
else:
X = FG.x[FG.y.argmin()]
Y = FG.y.min()
res = [Y, X]
if not return_log:
res[0] = np.exp(res[0])
return res
def _evaluate_convolution(t_val, f, g, n_integral = 100, inverse_time=None, return_log=False):
"""
Calculate convolution F(t) = int { f(tau)g(t-tau) } dtau
"""
FG = _convolution_integrand(t_val, f, g, inverse_time, return_log)
#integrate the interpolation object, return log, make neg_log
#print('FG:',FG.xmin, FG.xmax, FG(FG.xmin), FG(FG.xmax))
if (return_log and FG == ttconf.BIG_NUMBER) or \
(not return_log and FG == 0.0): # distributions do not overlap
res = ttconf.BIG_NUMBER # we integrate log funcitons
else:
res = -FG.integrate(a=FG.xmin, b=FG.xmax, n=n_integral, return_log=True)
if return_log:
return res, -1
else:
return np.exp(-res), -1
class NodeInterpolator (Distribution):
"""
Node's position distribution function. This class extends the distribution
class ind implements the convolution constructor.
"""
@classmethod
def convolve_fft(cls, node_interp, branch_interp, fft_grid_size=FFT_FWHM_GRID_SIZE, inverse_time=True):
dt = max(branch_interp.one_mutation*0.005, min(node_interp.fwhm, branch_interp.fwhm)/fft_grid_size)
ratio = node_interp.fwhm/branch_interp.fwhm
if ratio < 1/fft_grid_size and 4*dt > node_interp.fwhm:
## node distribution is much narrower than the branch distribution, proceed as if node distribution is
## a delta distribution
log_scale_node_interp = node_interp.integrate(return_log=True, a=node_interp.xmin,b=node_interp.xmax,n=max(100, len(node_interp.x))) #probability of node distribution
if inverse_time:
x = branch_interp.x + node_interp._peak_pos
dist = Distribution(x, branch_interp(x - node_interp._peak_pos) - log_scale_node_interp, min_width=max(node_interp.min_width, branch_interp.min_width), is_log=True)
else:
x = - branch_interp.x + node_interp._peak_pos
dist = Distribution(x, branch_interp(branch_interp.x) - log_scale_node_interp, min_width=max(node_interp.min_width, branch_interp.min_width), is_log=True)
return dist
elif ratio > fft_grid_size and 4*dt > branch_interp.fwhm:
raise ValueError("ERROR: Unexpected behavior: branch distribution is much narrower than the node distribution.")
else:
b_effsupport = branch_interp.effective_support
n_effsupport = node_interp.effective_support
tmax = 2*max(b_effsupport[1]-b_effsupport[0], n_effsupport[1]-n_effsupport[0])
Tb = np.arange(b_effsupport[0], b_effsupport[0] + tmax + dt, dt)
if inverse_time:
Tn = np.arange(n_effsupport[0], n_effsupport[0] + tmax + dt, dt)
Tmin = node_interp.xmin
Tmax = ttconf.MAX_BRANCH_LENGTH
else:
Tn = np.arange(n_effsupport[1] - tmax, n_effsupport[1] + dt, dt)
Tmin = -ttconf.MAX_BRANCH_LENGTH
Tmax = node_interp.xmax
raw_len = len(Tb)
fft_len = 2*raw_len
fftb = branch_interp.fft(Tb, n=fft_len)
fftn = node_interp.fft(Tn, n=fft_len, inverse_time=inverse_time)
if inverse_time:
fft_res = np.fft.irfft(fftb*fftn, fft_len)[:raw_len]
Tres = Tn + Tb[0]
else:
fft_res = np.fft.irfft(fftb*fftn, fft_len)[::-1]
fft_res = fft_res[raw_len:]
Tres = Tn - Tb[0]
# determine region in which we can trust the FFT convolution and avoid
# inaccuracies due to machine precision. 1e-13 seems robust
ind = fft_res>fft_res.max()*1e-13
res = -np.log(fft_res[ind]) + branch_interp.peak_val + node_interp.peak_val - np.log(dt)
Tres_cropped = Tres[ind]
# extrapolate the tails exponentially: use margin last data points
margin = np.minimum(3, Tres_cropped.shape[0]//3)
if margin<1 or len(res)==0:
import ipdb; ipdb.set_trace()
else:
left_slope = (res[margin]-res[0])/(Tres_cropped[margin]-Tres_cropped[0])
right_slope = (res[-1]-res[-margin-1])/(Tres_cropped[-1]-Tres_cropped[-margin-1])
# only extrapolate on the left when the slope is negative and we are not on the boundary
if Tmin<Tres_cropped[0] and left_slope<0:
Tleft = np.linspace(Tmin, Tres_cropped[0],10)[:-1]
res_left = res[0] + left_slope*(Tleft - Tres_cropped[0])
else:
Tleft, res_left = [], []
# only extrapolate on the right when the slope is positive and we are not on the boundary
if Tres_cropped[-1]<Tmax and right_slope>0:
Tright = np.linspace(Tres_cropped[-1], Tmax,10)[1:]
res_right = res[-1] + right_slope*(Tright - Tres_cropped[-1])
else: #otherwise
Tright, res_right = [], []
# instantiate the new interpolation object and return
return cls(np.concatenate((Tleft,Tres_cropped,Tright)),
np.concatenate((res_left, res, res_right)),
is_log=True, kind='linear', assume_sorted=True)
@classmethod
def convolve(cls, node_interp, branch_interp, max_or_integral='integral',
n_grid_points = ttconf.NODE_GRID_SIZE, n_integral=ttconf.N_INTEGRAL,
inverse_time=True, rel_tol=0.05, yc=10):
r'''
calculate H(t) = \int_tau f(t-tau)g(tau) if inverse_time=True
H(t) = \int_tau f(t+tau)g(tau) if inverse_time=False
This function determines the time points of the grid of the result to
ensure an accurate approximation.
'''
if max_or_integral not in ['max', 'integral']:
raise Exception("Max_or_integral expected to be 'max' or 'integral', got "
+ str(max_or_integral) + " instead.")
def conv_in_point(time_point):
if max_or_integral == 'integral': # compute integral of the convolution
return _evaluate_convolution(time_point, node_interp, branch_interp,
n_integral=n_integral, return_log=True,
inverse_time = inverse_time)
else: # compute max of the convolution
return _max_of_integrand(time_point, node_interp, branch_interp,
return_log=True, inverse_time = inverse_time)
# estimate peak and width
joint_fwhm = (node_interp.fwhm + branch_interp.fwhm)
min_fwhm = min(node_interp.fwhm, branch_interp.fwhm)
# determine support of the resulting convolution
# in order to be positive, the flipped support of f, shifted by t and g need to overlap
if inverse_time:
new_peak_pos = node_interp.peak_pos + branch_interp.peak_pos
tmin = node_interp.xmin+branch_interp.xmin
tmax = node_interp.xmax+branch_interp.xmax
else:
new_peak_pos = node_interp.peak_pos - branch_interp.peak_pos
tmin = node_interp.xmin - branch_interp.xmax
tmax = node_interp.xmax - branch_interp.xmin
# make initial node grid consisting of linearly spaced points around
# the center and quadratically spaced points at either end
n = n_grid_points//3
center_width = 3*joint_fwhm
grid_center = new_peak_pos + np.linspace(-1, 1, n)*center_width
# add the right and left grid if it is needed
right_range = (tmax - grid_center[-1])
if right_range>4*center_width:
grid_right = grid_center[-1] + right_range*(np.linspace(0, 1, n)**2.0)
elif right_range>0: # use linear grid the right_range is comparable to center_width
grid_right = grid_center[-1] + right_range*np.linspace(0,1, int(min(n,1+0.5*n*right_range/center_width)))
else:
grid_right =[]
left_range = grid_center[0]-tmin
if left_range>4*center_width:
grid_left = tmin + left_range*(np.linspace(0, 1, n)**2.0)
elif left_range>0:
grid_left = tmin + left_range*np.linspace(0,1, int(min(n,1+0.5*n*left_range/center_width)))
else:
grid_left =[]
if tmin>-1:
grid_zero_left = tmin + (tmax-tmin)*np.linspace(0,0.01,11)**2
else:
grid_zero_left = [tmin]
if tmax<1:
grid_zero_right = tmax - (tmax-tmin)*np.linspace(0,0.01,11)**2
else:
grid_zero_right = [tmax]
# make grid and calculate convolution
t_grid_0 = np.unique(np.concatenate([grid_zero_left, grid_left[:-1], grid_center, grid_right[1:], grid_zero_right]))
t_grid_0 = t_grid_0[(t_grid_0 > tmin-ttconf.TINY_NUMBER) & (t_grid_0 < tmax+ttconf.TINY_NUMBER)]
# res0 - the values of the convolution (integral or max)
# t_0 - the value, at which the res0 achieves maximum
# (when determining the maximum of the integrand, otherwise meaningless)
res_0, t_0 = np.array([conv_in_point(t_val) for t_val in t_grid_0]).T
# refine grid as necessary and add new points
# calculate interpolation error at all internal points [2:-2] bc end points are sometime off scale
interp_error = np.abs(res_0[3:-1]+res_0[1:-3]-2*res_0[2:-2])
# determine the number of extra points needed, criterion depends on distance from peak dy
dy = (res_0[2:-2]-res_0.min())
dx = np.diff(t_grid_0)
refine_factor = np.minimum(np.minimum(np.array(np.floor(np.sqrt(interp_error/(rel_tol*(1+(dy/yc)**4)))), dtype=int),
np.array(100*(dx[1:-2]+dx[2:-1])/min_fwhm, dtype=int)), 10)
insert_point_idx = np.zeros(interp_error.shape[0]+1, dtype=int)
insert_point_idx[1:] = refine_factor
insert_point_idx[:-1] += refine_factor
# add additional points if there are any to add
if np.sum(insert_point_idx):
add_x = np.concatenate([np.linspace(t1,t2,n+2)[1:-1] for t1,t2,n in
zip(t_grid_0[1:-2], t_grid_0[2:-1], insert_point_idx) if n>0])
# calculate convolution at these points
add_y, add_t = np.array([conv_in_point(t_val) for t_val in add_x]).T
t_grid_0 = np.concatenate((t_grid_0, add_x))
res_0 = np.concatenate ((res_0, add_y))
t_0 = np.concatenate ((t_0, add_t))
# instantiate the new interpolation object and return
res_y = cls(t_grid_0, res_0, is_log=True, kind='linear')
# the interpolation object, which is used to store the value of the
# grid, which maximizes the convolution (for 'max' option),
# or flat -1 distribution (for 'integral' option)
# this grid is the optimal branch length
res_t = Distribution(t_grid_0, t_0, is_log=True,
min_width=node_interp.min_width, kind='linear')
return res_y, res_t
| mit | 90b33addf019e918f22af8d3c76d1316 | 40.041899 | 182 | 0.580957 | 3.436959 | false | false | false | false |
alerta/alerta-contrib | plugins/dingtalk/alerta_ding.py | 1 | 1353 | from dingtalkchatbot.chatbot import DingtalkChatbot
import time
import json
import sys
import os
import logging
try:
from alerta.plugins import app # alerta >= 5.0
except ImportError:
from alerta.app import app # alerta < 5.0
from alerta.plugins import PluginBase
LOG = logging.getLogger('alerta.plugins.ding')
DING_WEBHOOK_URL = os.environ.get('DING_WEBHOOK_URL') or app.config.get('DING_WEBHOOK_URL')
DASHBOARD_URL = os.environ.get('DASHBOARD_URL') or app.config.get('DASHBOARD_URL', '')
class ServiceIntegration(PluginBase):
def __init__(self, name=None):
super().__init__(name)
def pre_receive(self, alert):
return alert
def _prepare_payload(self, alert):
return "{}** **{}**\n`{}` ```{}```".format(
alert.severity,
alert.environment,
alert.event,
alert.value,
)
LOG.debug('DingTalk: %s', alert)
def post_receive(self, alert):
if alert.repeat:
return
ding = DingtalkChatbot(DING_WEBHOOK_URL)
message = self._prepare_payload(alert)
LOG.debug('DingTalk: %s', message)
ding.send_text(msg='Received Alert {}'.format(message))
#xiaoding.send_text(msg='next alert {}'.format(service_name_str))
def status_change(self, alert, status, text):
return
| mit | 054f7eb30fc50caca5b9dd0dba20b00c | 21.55 | 91 | 0.625277 | 3.442748 | false | false | false | false |
alerta/alerta-contrib | plugins/dingtalk/dingtalkchatbot/chatbot.py | 1 | 14512 | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
# create time: 07/01/2018 11:35
__author__ = 'Devin -- http://zhangchuzhao.site'
import json
import time
import logging
import requests
try:
JSONDecodeError = json.decoder.JSONDecodeError
except AttributeError:
JSONDecodeError = ValueError
def is_not_null_and_blank_str(content):
"""
非空字符串
:param content: 字符串
:return: 非空 - True,空 - False
>>> is_not_null_and_blank_str('')
False
>>> is_not_null_and_blank_str(' ')
False
>>> is_not_null_and_blank_str(' ')
False
>>> is_not_null_and_blank_str('123')
True
"""
if content and content.strip():
return True
else:
return False
class DingtalkChatbot(object):
"""
钉钉群自定义机器人(每个机器人每分钟最多发送20条),支持文本(text)、连接(link)、markdown三种消息类型!
"""
def __init__(self, webhook):
"""
机器人初始化
:param webhook: 钉钉群自定义机器人webhook地址
"""
super(DingtalkChatbot, self).__init__()
self.headers = {'Content-Type': 'application/json; charset=utf-8'}
self.webhook = webhook
self.times = 0
self.start_time = time.time()
def send_text(self, msg, is_at_all=False, at_mobiles=[], at_dingtalk_ids=[]):
"""
text类型
:param msg: 消息内容
:param is_at_all: @所有人时:true,否则为false(可选)
:param at_mobiles: 被@人的手机号(可选)
:param at_dingtalk_ids: 被@人的dingtalkId(可选)
:return: 返回消息发送结果
"""
data = {"msgtype": "text", "at": {}}
if is_not_null_and_blank_str(msg):
data["text"] = {"content": msg}
else:
logging.error("text类型,消息内容不能为空!")
raise ValueError("text类型,消息内容不能为空!")
if is_at_all:
data["at"]["isAtAll"] = is_at_all
if at_mobiles:
at_mobiles = list(map(str, at_mobiles))
data["at"]["atMobiles"] = at_mobiles
if at_dingtalk_ids:
at_dingtalk_ids = list(map(str, at_dingtalk_ids))
data["at"]["atDingtalkIds"] = at_dingtalk_ids
logging.debug('text类型:%s' % data)
return self.post(data)
def send_image(self, pic_url):
"""
image类型(表情)
:param pic_url: 图片表情链接
:return: 返回消息发送结果
"""
if is_not_null_and_blank_str(pic_url):
data = {
"msgtype": "image",
"image": {
"picURL": pic_url
}
}
logging.debug('image类型:%s' % data)
return self.post(data)
else:
logging.error("image类型中图片链接不能为空!")
raise ValueError("image类型中图片链接不能为空!")
def send_link(self, title, text, message_url, pic_url=''):
"""
link类型
:param title: 消息标题
:param text: 消息内容(如果太长自动省略显示)
:param message_url: 点击消息触发的URL
:param pic_url: 图片URL(可选)
:return: 返回消息发送结果
"""
if is_not_null_and_blank_str(title) and is_not_null_and_blank_str(text) and is_not_null_and_blank_str(message_url):
data = {
"msgtype": "link",
"link": {
"text": text,
"title": title,
"picUrl": pic_url,
"messageUrl": message_url
}
}
logging.debug('link类型:%s' % data)
return self.post(data)
else:
logging.error("link类型中消息标题或内容或链接不能为空!")
raise ValueError("link类型中消息标题或内容或链接不能为空!")
def send_markdown(self, title, text, is_at_all=False, at_mobiles=[], at_dingtalk_ids=[]):
"""
markdown类型
:param title: 首屏会话透出的展示内容
:param text: markdown格式的消息内容
:param is_at_all: 被@人的手机号(在text内容里要有@手机号,可选)
:param at_mobiles: @所有人时:true,否则为:false(可选)
:param at_dingtalk_ids: 被@人的dingtalkId(可选)
:return: 返回消息发送结果
"""
if is_not_null_and_blank_str(title) and is_not_null_and_blank_str(text):
data = {
"msgtype": "markdown",
"markdown": {
"title": title,
"text": text
},
"at": {}
}
if is_at_all:
data["at"]["isAtAll"] = is_at_all
if at_mobiles:
at_mobiles = list(map(str, at_mobiles))
data["at"]["atMobiles"] = at_mobiles
if at_dingtalk_ids:
at_dingtalk_ids = list(map(str, at_dingtalk_ids))
data["at"]["atDingtalkIds"] = at_dingtalk_ids
logging.debug("markdown类型:%s" % data)
return self.post(data)
else:
logging.error("markdown类型中消息标题或内容不能为空!")
raise ValueError("markdown类型中消息标题或内容不能为空!")
def send_action_card(self, action_card):
"""
ActionCard类型
:param action_card: 整体跳转ActionCard类型实例或独立跳转ActionCard类型实例
:return: 返回消息发送结果
"""
if isinstance(action_card, ActionCard):
data = action_card.get_data()
logging.debug("ActionCard类型:%s" % data)
return self.post(data)
else:
logging.error("ActionCard类型:传入的实例类型不正确!")
raise TypeError("ActionCard类型:传入的实例类型不正确!")
def send_feed_card(self, links):
"""
FeedCard类型
:param links: 信息集(FeedLink数组)
:return: 返回消息发送结果
"""
link_data_list = []
for link in links:
if isinstance(link, FeedLink) or isinstance(link, CardItem):
link_data_list.append(link.get_data())
if link_data_list:
# 兼容:1、传入FeedLink或CardItem实例列表;2、传入数据字典列表;
links = link_data_list
data = {"msgtype": "feedCard", "feedCard": {"links": links}}
logging.debug("FeedCard类型:%s" % data)
return self.post(data)
def post(self, data):
"""
发送消息(内容UTF-8编码)
:param data: 消息数据(字典)
:return: 返回发送结果
"""
self.times += 1
if self.times % 20 == 0:
if time.time() - self.start_time < 60:
logging.debug('钉钉官方限制每个机器人每分钟最多发送20条,当前消息发送频率已达到限制条件,休眠一分钟')
time.sleep(60)
self.start_time = time.time()
post_data = json.dumps(data)
try:
response = requests.post(self.webhook, headers=self.headers, data=post_data)
except requests.exceptions.HTTPError as exc:
logging.error("消息发送失败, HTTP error: %d, reason: %s" % (exc.response.status_code, exc.response.reason))
raise
except requests.exceptions.ConnectionError:
logging.error("消息发送失败,HTTP connection error!")
raise
except requests.exceptions.Timeout:
logging.error("消息发送失败,Timeout error!")
raise
except requests.exceptions.RequestException:
logging.error("消息发送失败, Request Exception!")
raise
else:
try:
result = response.json()
except JSONDecodeError:
logging.error("服务器响应异常,状态码:%s,响应内容:%s" % (response.status_code, response.text))
return {'errcode': 500, 'errmsg': '服务器响应异常'}
else:
logging.debug('发送结果:%s' % result)
if result['errcode']:
error_data = {"msgtype": "text", "text": {"content": "钉钉机器人消息发送失败,原因:%s" % result['errmsg']}, "at": {"isAtAll": True}}
logging.error("消息发送失败,自动通知:%s" % error_data)
requests.post(self.webhook, headers=self.headers, data=json.dumps(error_data))
return result
class ActionCard(object):
"""
ActionCard类型消息格式(整体跳转、独立跳转)
"""
def __init__(self, title, text, btns, btn_orientation=0, hide_avatar=0):
"""
ActionCard初始化
:param title: 首屏会话透出的展示内容
:param text: markdown格式的消息
:param btns: 按钮列表:(1)按钮数量为1时,整体跳转ActionCard类型;(2)按钮数量大于1时,独立跳转ActionCard类型;
:param btn_orientation: 0:按钮竖直排列,1:按钮横向排列(可选)
:param hide_avatar: 0:正常发消息者头像,1:隐藏发消息者头像(可选)
"""
super(ActionCard, self).__init__()
self.title = title
self.text = text
self.btn_orientation = btn_orientation
self.hide_avatar = hide_avatar
btn_list = []
for btn in btns:
if isinstance(btn, CardItem):
btn_list.append(btn.get_data())
if btn_list:
btns = btn_list # 兼容:1、传入CardItem示例列表;2、传入数据字典列表
self.btns = btns
def get_data(self):
"""
获取ActionCard类型消息数据(字典)
:return: 返回ActionCard数据
"""
if is_not_null_and_blank_str(self.title) and is_not_null_and_blank_str(self.text) and len(self.btns):
if len(self.btns) == 1:
# 整体跳转ActionCard类型
data = {
"msgtype": "actionCard",
"actionCard": {
"title": self.title,
"text": self.text,
"hideAvatar": self.hide_avatar,
"btnOrientation": self.btn_orientation,
"singleTitle": self.btns[0]["title"],
"singleURL": self.btns[0]["actionURL"]
}
}
return data
else:
# 独立跳转ActionCard类型
data = {
"msgtype": "actionCard",
"actionCard": {
"title": self.title,
"text": self.text,
"hideAvatar": self.hide_avatar,
"btnOrientation": self.btn_orientation,
"btns": self.btns
}
}
return data
else:
logging.error("ActionCard类型,消息标题或内容或按钮数量不能为空!")
raise ValueError("ActionCard类型,消息标题或内容或按钮数量不能为空!")
class FeedLink(object):
"""
FeedCard类型单条消息格式
"""
def __init__(self, title, message_url, pic_url):
"""
初始化单条消息文本
:param title: 单条消息文本
:param message_url: 点击单条信息后触发的URL
:param pic_url: 点击单条消息后面图片触发的URL
"""
super(FeedLink, self).__init__()
self.title = title
self.message_url = message_url
self.pic_url = pic_url
def get_data(self):
"""
获取FeedLink消息数据(字典)
:return: 本FeedLink消息的数据
"""
if is_not_null_and_blank_str(self.title) and is_not_null_and_blank_str(self.message_url) and is_not_null_and_blank_str(self.pic_url):
data = {
"title": self.title,
"messageURL": self.message_url,
"picURL": self.pic_url
}
return data
else:
logging.error("FeedCard类型单条消息文本、消息链接、图片链接不能为空!")
raise ValueError("FeedCard类型单条消息文本、消息链接、图片链接不能为空!")
class CardItem(object):
"""
ActionCard和FeedCard消息类型中的子控件
"""
def __init__(self, title, url, pic_url=None):
"""
CardItem初始化
@param title: 子控件名称
@param url: 点击子控件时触发的URL
@param pic_url: FeedCard的图片地址,ActionCard时不需要,故默认为None
"""
super(CardItem, self).__init__()
self.title = title
self.url = url
self.pic_url = pic_url
def get_data(self):
"""
获取CardItem子控件数据(字典)
@return: 子控件的数据
"""
if is_not_null_and_blank_str(self.pic_url) and is_not_null_and_blank_str(self.title) and is_not_null_and_blank_str(self.url):
# FeedCard类型
data = {
"title": self.title,
"messageURL": self.url,
"picURL": self.pic_url
}
return data
elif is_not_null_and_blank_str(self.title) and is_not_null_and_blank_str(self.url):
# ActionCard类型
data = {
"title": self.title,
"actionURL": self.url
}
return data
else:
logging.error("CardItem是ActionCard的子控件时,title、url不能为空;是FeedCard的子控件时,title、url、pic_url不能为空!")
raise ValueError("CardItem是ActionCard的子控件时,title、url不能为空;是FeedCard的子控件时,title、url、pic_url不能为空!")
if __name__ == '__main__':
import doctest
doctest.testmod()
| mit | 3a45e77e683c5b010323d1732387696e | 31.733509 | 141 | 0.516282 | 2.778499 | false | false | false | false |
sfu-natlang/glm-parser | src/feature/ner_fgen.py | 1 | 15522 | import feature_vector
import feature_generator_base
import debug.debug
from data.file_io import fileRead, fileWrite
from ner_PER_list import ner_per_list
from ner_LOC_list import ner_loc_list
import string
import copy
__version__ = '1.1'
per_list = ner_per_list
loc_list = ner_loc_list
class FeatureGenerator(feature_generator_base.FeatureGeneratorBase):
"""
This is the feature generator for the NER Tagger
"""
name = "NERTaggerFeatureGenerator"
def __init__(self):
feature_generator_base.FeatureGeneratorBase.__init__(self)
self.care_list.append("FORM")
self.care_list.append("POSTAG")
self.care_list.append("CHUNK")
self.care_list.append("NER")
return
def load_to_sentence(self, sentence):
if sentence.fgen is not None:
sentence.fgen.unload_from_sentence(sentence)
sentence.column_list["FORM"].insert(0, '_B_-1')
sentence.column_list["FORM"].insert(0, '_B_-2') # first two 'words' are B_-2 B_-1
sentence.column_list["FORM"].append('_B_+1')
sentence.column_list["FORM"].append('_B_+2') # last two 'words' are B_+1 B_+2
sentence.column_list["POSTAG"].insert(0, 'B_-1')
sentence.column_list["POSTAG"].insert(0, 'B_-2')
sentence.column_list["POSTAG"].append('B_+1')
sentence.column_list["POSTAG"].append('B_+2')
sentence.column_list["CHUNK"].insert(0, 'B_-1')
sentence.column_list["CHUNK"].insert(0, 'B_-2')
sentence.column_list["CHUNK"].append('B_+1')
sentence.column_list["CHUNK"].append('B_+2')
sentence.column_list["NER"].insert(0, 'B_-1')
sentence.column_list["NER"].insert(0, 'B_-2')
gold_global_vector = self.get_feature_vector(sentence)
sentence.gold_global_vector = gold_global_vector
sentence.fgen = self
return
def unload_from_sentence(self, sentence):
if sentence.fgen is None:
return
if sentence.fgen.name != "NERTaggerFeatureGenerator":
sentence.fgen.unload_from_sentence(sentence)
return
del sentence.column_list["FORM"][0]
del sentence.column_list["FORM"][0]
del sentence.column_list["FORM"][len(sentence.column_list["FORM"]) - 1]
del sentence.column_list["FORM"][len(sentence.column_list["FORM"]) - 1]
del sentence.column_list["POSTAG"][0]
del sentence.column_list["POSTAG"][0]
del sentence.column_list["POSTAG"][len(sentence.column_list["POSTAG"]) - 1]
del sentence.column_list["POSTAG"][len(sentence.column_list["POSTAG"]) - 1]
del sentence.column_list["CHUNK"][0]
del sentence.column_list["CHUNK"][0]
del sentence.column_list["CHUNK"][len(sentence.column_list["CHUNK"]) - 1]
del sentence.column_list["CHUNK"][len(sentence.column_list["CHUNK"]) - 1]
del sentence.column_list["NER"][0]
del sentence.column_list["NER"][0]
sentence.fgen = None
return
def update_sentence_with_output(self, sentence, output):
if sentence.fgen != self:
raise RuntimeError("FGEN [ERROR]: update_sentence_with_output " +
"can only update sentence with this fgen as feature generator")
sentence.column_list["NER"] = copy.deepcopy(output)
return
# function to check if the word contains a digit / dollar /exclamatory /hyphen
def contains_digits(self, s):
return any(char.isdigit() for char in s)
def contains_dollar(self, s):
return any(char == "$" for char in s)
def contains_exclamatory(self, s):
return any(char == "!" for char in s)
def contains_hyphen(self, s):
return any(char == "-" for char in s)
# function to check if word contains upper case characters
def contains_upper(self, s):
return any(char.isupper() for char in s)
# function to check if word contains any punctuations
def contains_punc(self, s):
return any(char in string.punctuation for char in s)
# fucntion to check if word starts with capital letter
def starts_capital(self, s):
if s[0].isupper():
return 1
else:
return 0
# function to check if word ends with a period
def ends_with_period(self, s):
if s[:-1] == '.':
return 1
else:
return 0
# function to check is the word is in Upper case
def all_upper(self, s):
for k in s:
if not k.isupper():
return 0
return 1
# function tp check if the word has an internal period/apostrophe/amphasand
def contains_internal_period(self, s):
return any(char == '.' for char in s)
def has_internal_apostrophe(self, s):
return any(char == "'" for char in s)
def contains_hyphen(self, s):
return any(char == "-" for char in s)
def contains_amphasand(self, s):
return any(char == "&" for char in s)
# function to check if word contains both upper and lower characters
def contains_upper_lower(self, s):
return (any(char.isupper() for char in s) and any(char.islower() for char in s))
# function to check if the word is alphanumeric
def contains_alphanumeric(self, s):
return any(char.isalnum() for char in s)
# this function checks is all word contains only digits
def contains_all_num(self, s):
for x in s:
if not x.isdigit():
return 0
return 1
def contains_digits(self, s):
return any(char.isdigit() for char in s)
# checking if the word is of the formn .X where x is any alphabet
def containsPInitial(self, s):
if len(s) == 2:
if s[0] == '.' and s[1].isalpha():
return 1
return 0
def contains_hyphen(self, s):
return any(char == "-" for char in s)
def contains_all_upper(self, s):
x = [char for char in s if s.isdigit()]
return len(x)
def contains_upper(self, s):
return any(char.isupper() for char in s)
# function to replace all digits in a word with 'D'
def lexical_f(self, s):
for i in s:
if i.isdigit():
s = s.replace(i, "D")
return s
def list_PER_isPresent(self, s):
y = '\'s'
if s != y:
if s in per_list:
return 1
return 0
# function to generate ortho features
def ortho_feature_alphanumeric(self, s):
for i in s:
if i.isdigit():
s = s.replace(i, "D")
elif i.isalpha():
s = s.replace(i, "A")
return s
# function to check is all charcters are in lower case
def all_lower(self, s):
for i in s:
if not i.islower():
return 0
return 1
def possesive_feature(self, s):
y = '\'s'
if s == y:
# print y
return 1
return 0
# function to perform binary search in a gazatteer
def binarySearch(self, item, alist):
first = 0
last = len(alist)-1
found = False
while first <= last and not found:
midpoint = (first + last)//2
if alist[midpoint] == item:
found = True
else:
if item < alist[midpoint]:
last = midpoint-1
else:
first = midpoint+1
return found
def loading_list(self, list_file):
c = open(list_file, 'r')
alist = []
for line in c:
x = line.split()
alist.append(x[1])
c.close()
return alist
# Function to get the ner feature of a word
def current_tag_feature(self,
sentence,
index,
prev_tag,
prev_backpointer):
wordlist = sentence.column_list["FORM"]
ner_list = sentence.column_list["NER"]
pos_list = sentence.column_list["POSTAG"]
chunk_list = sentence.column_list["CHUNK"]
fv = []
word = wordlist[index].lower()
i = index
if self.all_upper(word):
fv.append((1, 'cur_upper'))
if self.all_upper(wordlist[i-1]):
fv.append((2, 'prev_upper'))
if self.all_upper(wordlist[i-2]):
fv.append((3, 'prev_prev_upper'))
if self.all_upper(wordlist[i+1]):
fv.append((4, 'after_upper'))
if self.all_upper(wordlist[i+2]):
fv.append((5, 'after_after_upper'))
fv.append((6, 'previous_2_tag', prev_tag, prev_backpointer))
fv.append((7, 'previous+cur', prev_tag, prev_backpointer, word))
fv.append((8, 'cur', word))
fv.append((9, 'prev', wordlist[i-1]))
fv.append((10, 'prev_prev', wordlist[i-2]))
fv.append((11, 'after', wordlist[i+1]))
fv.append((12, 'after_after', wordlist[i+2]))
fv.append((13, wordlist[i-2], wordlist[i-1], word, wordlist[i+1], wordlist[i+2]))
if self.starts_capital(word):
fv.append((14, 'cur_cap'))
if self.starts_capital(wordlist[i-1]):
fv.append((15, 'prev_cap'))
if self.starts_capital(wordlist[i-2]):
fv.append((16, 'prev_prev_cap'))
if self.starts_capital(wordlist[i+1]):
fv.append((17, 'after_cap'))
if self.starts_capital(wordlist[i+2]):
fv.append((18, 'after_after_cap'))
fv.append((19, word[:1]))
fv.append((20, word[-1:]))
fv.append((21, word[:2]))
fv.append((22, word[-2:]))
fv.append((23, word[:3]))
fv.append((24, word[-3:]))
fv.append((25, word[:4]))
fv.append((26, word[-4:]))
fv.append((29, len(word)))
fv.append((30, i))
if self.possesive_feature(word):
fv.append((36, prev_tag))
fv.append((37, pos_list[i-1]))
fv.append((27, pos_list[i]))
fv.append((28, chunk_list[i]))
if self.contains_all_num(word):
fv.append((29, 'all_digits'))
if self.contains_digits(word):
if self.contains_hyphen(word):
fv.append((30, 'digits_punc'))
elif self.contains_internal_period(word):
fv.append((31, 'digits_punc'))
elif self.has_internal_apostrophe(word):
fv.append((32, 'digits_punc'))
elif self.contains_internal_period(word):
fv.append((33, 'digits_punc'))
elif self.contains_amphasand(word):
fv.append((34, 'digits_punc'))
if self.contains_alphanumeric(word) and self.contains_hyphen(word):
fv.append((35, 'alphanum_hyphen'))
if self.binarySearch(word, per_list):
fv.append((38, 'PER_list'))
if self.binarySearch(word, loc_list):
fv.append((39, 'LOC_list'))
fv.append((40, wordlist[i-1], word))
fv.append((41, word, wordlist[i+1]))
if self.all_lower(word):
fv.append((42, 'islower'))
if self.contains_hyphen(word):
fv.append((43, 'hyphen'))
return fv
# Function to get sentence feature
def get_feature_vector(self, sentence, output=None):
wordlist = sentence.column_list["FORM"]
pos_list = sentence.column_list["POSTAG"]
chunk_list = sentence.column_list["CHUNK"]
if output is None:
ner_list = sentence.column_list["NER"]
else:
ner_list = output
fv = []
for i in range(3, len(wordlist) - 2):
word = wordlist[i]
tag = ner_list[i]
if self.all_upper(word):
fv.append(str((1, 'cur_upper', tag)))
if self.all_upper(wordlist[i-1]):
fv.append(str((2, 'prev_upper', tag)))
if self.all_upper(wordlist[i-2]):
fv.append(str((3, 'prev_prev_upper', tag)))
if self.all_upper(wordlist[i+1]):
fv.append(str((4, 'after_upper', tag)))
if self.all_upper(wordlist[i+2]):
fv.append(str((5, 'after_after_upper', tag)))
fv.append(str((6, 'previous_2_tag', ner_list[i-1], ner_list[i-2], tag)))
fv.append(str((7, 'previous+cur', ner_list[i-1], ner_list[i-2], word, tag)))
fv.append(str((8, 'cur', word, tag)))
fv.append(str((9, 'prev', wordlist[i-1], tag)))
fv.append(str((10, 'prev_prev', wordlist[i-2], tag)))
fv.append(str((11, 'after', wordlist[i+1], tag)))
fv.append(str((12, 'after_after', wordlist[i+2], tag)))
fv.append(str((13, wordlist[i-2], wordlist[i-1], word, wordlist[i+1], wordlist[i+2], tag)))
if self.starts_capital(word):
fv.append(str((14, 'cur_cap', tag)))
if self.starts_capital(wordlist[i-1]):
fv.append(str((15, 'prev_cap', tag)))
if self.starts_capital(wordlist[i-2]):
fv.append(str((16, 'prev_prev_cap', tag)))
if self.starts_capital(wordlist[i+1]):
fv.append(str((17, 'after_cap', tag)))
if self.starts_capital(wordlist[i+2]):
fv.append(str((18, 'after_after_cap', tag)))
fv.append(str((19, word[:1], tag)))
fv.append(str((20, word[-1:], tag)))
fv.append(str((21, word[:2], tag)))
fv.append(str((22, word[-2:], tag)))
fv.append(str((23, word[:3], tag)))
fv.append(str((24, word[-3:], tag)))
fv.append(str((25, word[:4], tag)))
fv.append(str((26, word[-4:], tag)))
fv.append(str((29, len(word), tag)))
fv.append(str((30, i, tag)))
if self.possesive_feature(word):
fv.append(str((36, ner_list[i-1], tag)))
fv.append(str((37, pos_list[i-1], tag)))
fv.append(str((27, pos_list[i], tag)))
fv.append(str((28, chunk_list[i], tag)))
if self.contains_all_num(word):
fv.append(str((29, 'all_digits', tag)))
if self.contains_digits(word):
if self.contains_hyphen(word):
fv.append(str((30, 'digits_punc', tag)))
elif self.contains_internal_period(word):
fv.append(str((31, 'digits_punc', tag)))
elif self.has_internal_apostrophe(word):
fv.append(str((32, 'digits_punc', tag)))
elif self.contains_internal_period(word):
fv.append(str((33, 'digits_punc', tag)))
elif self.contains_amphasand(word):
fv.append(str((34, 'digits_punc', tag)))
if self.contains_alphanumeric(word) and self.contains_hyphen(word):
fv.append(str((35, 'alphanum_hyphen', tag)))
if self.binarySearch(word, per_list):
fv.append(str((38, 'PER_list', tag)))
if self.binarySearch(word, loc_list):
fv.append(str((39, 'LOC_list', tag)))
fv.append(str((40, wordlist[i-1], word, tag)))
fv.append(str((41, word, wordlist[i+1], tag)))
if self.all_lower(word):
fv.append(str((42, 'islower', tag)))
if self.contains_hyphen(word):
fv.append(str((43, 'hyphen', tag)))
return fv
| mit | 971cc2410aff995a10d65eafab318e4f | 35.43662 | 103 | 0.541425 | 3.478709 | false | false | false | false |
alerta/alerta-contrib | plugins/prometheus/alerta_prometheus.py | 1 | 7937 | import datetime
import logging
import os
import requests
import json
from typing import Any
try:
from alerta.plugins import app # alerta >= 5.0
except ImportError:
from alerta.app import app # alerta < 5.0
from alerta.models.alert import Alert
from alerta.plugins import PluginBase
LOG = logging.getLogger('alerta.plugins.prometheus')
DEFAULT_ALERTMANAGER_API_URL = 'http://localhost:9093'
ALERTMANAGER_API_URL = os.environ.get('ALERTMANAGER_API_URL') or app.config.get('ALERTMANAGER_API_URL', None)
ALERTMANAGER_USERNAME = os.environ.get('ALERTMANAGER_USERNAME') or app.config.get('ALERTMANAGER_USERNAME', None)
ALERTMANAGER_PASSWORD = os.environ.get('ALERTMANAGER_PASSWORD') or app.config.get('ALERTMANAGER_PASSWORD', None)
ALERTMANAGER_SILENCE_DAYS = os.environ.get('ALERTMANAGER_SILENCE_DAYS') or app.config.get('ALERTMANAGER_SILENCE_DAYS', 1)
ALERTMANAGER_SILENCE_FROM_ACK = os.environ.get('ALERTMANAGER_SILENCE_FROM_ACK') or app.config.get('ALERTMANAGER_SILENCE_FROM_ACK', False)
ALERTMANAGER_USE_EXTERNALURL_FOR_SILENCES = os.environ.get('ALERTMANAGER_USE_EXTERNALURL_FOR_SILENCES') or app.config.get('ALERTMANAGER_USE_EXTERNALURL_FOR_SILENCES',False)
class AlertmanagerSilence(PluginBase):
def __init__(self, name=None):
self.auth = (ALERTMANAGER_USERNAME, ALERTMANAGER_PASSWORD) if ALERTMANAGER_USERNAME else None
super(AlertmanagerSilence, self).__init__(name)
def pre_receive(self, alert):
return alert
def post_receive(self, alert):
return
def status_change(self, alert, status, text):
'''
If a silence exists for an open or closed alert we probably want to remove it
'''
if status in ('open', 'closed'):
silenceId = alert.attributes.get('silenceId', None)
if silenceId:
LOG.debug('Alertmanager: Remove silence for alertname=%s instance=%s', alert.event, alert.resource)
base_url = ALERTMANAGER_API_URL or alert.attributes.get('externalUrl', DEFAULT_ALERTMANAGER_API_URL)
url = base_url + '/api/v1/silence/%s' % silenceId
try:
r = requests.delete(url, auth=self.auth, timeout=2)
except Exception as e:
raise RuntimeError("Alertmanager: ERROR - %s" % e)
LOG.debug('Alertmanager: %s - %s', r.status_code, r.text)
try:
alert.attributes['silenceId'] = None
except Exception as e:
raise RuntimeError("Alertmanager: ERROR - %s" % e)
LOG.debug('Alertmanager: Removed silenceId %s from attributes', silenceId)
if status == 'closed':
LOG.warning("Status is now closed")
return alert
def take_action(self, alert: Alert, action: str, text: str, **kwargs) -> Any:
'''
Set silence in alertmanager.
'''
if alert.event_type != 'prometheusAlert':
return alert
base_url = ALERTMANAGER_API_URL or alert.attributes.get('externalUrl', DEFAULT_ALERTMANAGER_API_URL)
if action == 'close':
LOG.warning("Got a close action so trying to close this in alertmanager too")
url = base_url + '/api/v1/alerts'
raw_data_string = alert.raw_data
raw_data = json.loads(raw_data_string)
# set the endsAt to now so alertmanager will consider it expired or whatever
raw_data["endsAt"] = (datetime.datetime.utcnow() - datetime.timedelta(minutes=5)).replace(microsecond=0).isoformat() + ".000Z"
LOG.debug("Raw data type: {}, Raw data contents: {}".format(type(raw_data),raw_data))
data = [ raw_data ]
try:
r = requests.post(url, json=data, auth=self.auth, timeout=2)
except Exception as e:
raise RuntimeError("Alertmanager: ERROR - %s" % e)
LOG.debug('Alertmanager response was: %s - %s', r.status_code, r.text)
elif action == 'ack' and ALERTMANAGER_SILENCE_FROM_ACK:
if not ALERTMANAGER_SILENCE_DAYS:
silence_seconds = kwargs.get('timeout', alert.timeout)
else:
try:
silence_days = int(ALERTMANAGER_SILENCE_DAYS)
except Exception as e:
LOG.error(
"Alertmanager: Could not parse 'ALERTMANAGER_SILENCE_DAYS': %s", e)
raise RuntimeError(
"Could not parse 'ALERTMANAGER_SILENCE_DAYS': %s" % e)
silence_seconds = silence_days * 86400
LOG.debug('Alertmanager: Add silence for alertname=%s instance=%s timeout=%s',
alert.event, alert.resource, str(silence_seconds))
data = {
"matchers": [
{
"name": "alertname",
"value": alert.event
},
{
"name": "instance",
"value": alert.resource
}
],
"startsAt": datetime.datetime.utcnow().replace(microsecond=0).isoformat() + ".000Z",
"endsAt": (datetime.datetime.utcnow() + datetime.timedelta(seconds=silence_seconds))
.replace(microsecond=0).isoformat() + ".000Z",
"createdBy": "alerta",
"comment": text if text != '' else "silenced by alerta"
}
# if alertmanager is clustered behind a load balancer that mirrors requests we should prefer to create one silence
# rather than many
if ALERTMANAGER_USE_EXTERNALURL_FOR_SILENCES:
base_url = alert.attributes.get('externalUrl', DEFAULT_ALERTMANAGER_API_URL) or ALERTMANAGER_API_URL
else:
base_url = ALERTMANAGER_API_URL or alert.attributes.get('externalUrl', DEFAULT_ALERTMANAGER_API_URL)
url = base_url + '/api/v1/silences'
try:
r = requests.post(url, json=data, auth=self.auth, timeout=2)
except Exception as e:
raise RuntimeError("Alertmanager: ERROR - %s" % e)
LOG.debug('Alertmanager: %s - %s', r.status_code, r.text)
# example r={"status":"success","data":{"silenceId":8}}
try:
data = r.json().get('data', [])
if data:
silenceId = data['silenceId']
alert.attributes['silenceId'] = silenceId
else:
silenceId = alert.attributes.get('silenceId', "unknown")
text = text + ' (silenced in Alertmanager)'
except Exception as e:
raise RuntimeError("Alertmanager: ERROR - %s" % e)
LOG.debug('Alertmanager: Added silenceId %s to attributes', silenceId)
elif action == 'unack':
LOG.debug('Alertmanager: Remove silence for alertname=%s instance=%s', alert.event, alert.resource)
silenceId = alert.attributes.get('silenceId', None)
if silenceId:
base_url = ALERTMANAGER_API_URL or alert.attributes.get('externalUrl', DEFAULT_ALERTMANAGER_API_URL)
url = base_url + '/api/v1/silence/%s' % silenceId
try:
r = requests.delete(url, auth=self.auth, timeout=2)
except Exception as e:
raise RuntimeError("Alertmanager: ERROR - %s" % e)
LOG.debug('Alertmanager: %s - %s', r.status_code, r.text)
try:
alert.attributes['silenceId'] = None
except Exception as e:
raise RuntimeError("Alertmanager: ERROR - %s" % e)
LOG.debug('Alertmanager: Removed silenceId %s from attributes', silenceId)
return alert
| mit | f9fcd9bbe1cd159ef974960a449ce100 | 44.096591 | 173 | 0.577422 | 3.966517 | false | false | false | false |
alerta/alerta-contrib | plugins/opsgenie/alerta_opsgenie.py | 1 | 6138 | import logging
import os
import re
import requests
try:
from alerta.plugins import app # alerta >= 5.0
except ImportError:
from alerta.app import app # alerta < 5.0
from alerta.plugins import PluginBase
LOG = logging.getLogger('alerta.plugins.opsgenie')
LOG.info('Initializing')
OPSGENIE_EVENTS_CREATE_URL = 'https://api.opsgenie.com/v2/alerts'
OPSGENIE_EVENTS_CLOSE_URL = 'https://api.opsgenie.com/v2/alerts/%s/close?identifierType=alias'
OPSGENIE_EVENTS_ACK_URL = 'https://api.opsgenie.com/v2/alerts/%s/acknowledge?identifierType=alias'
OPSGENIE_SERVICE_KEY = os.environ.get('OPSGENIE_SERVICE_KEY') or app.config['OPSGENIE_SERVICE_KEY']
OPSGENIE_TEAMS = os.environ.get('OPSGENIE_TEAMS', '') # comma separated list of teams
OPSGENIE_SEND_WARN = os.environ.get('OPSGENIE_SEND_WARN') or app.config.get('OPSGENIE_SEND_WARN', False)
SERVICE_KEY_MATCHERS = os.environ.get('SERVICE_KEY_MATCHERS') or app.config['SERVICE_KEY_MATCHERS']
DASHBOARD_URL = os.environ.get('DASHBOARD_URL') or app.config.get('DASHBOARD_URL', '')
LOG.info('Initialized: %s key, %s matchers' % (OPSGENIE_SERVICE_KEY, SERVICE_KEY_MATCHERS))
# when using with OpsGenie Edge connector setting a known source is useful
OPSGENIE_ALERT_SOURCE = os.environ.get('OPSGENIE_ALERT_SOURCE') or app.config.get('OPSGENIE_ALERT_SOURCE', 'Alerta')
class TriggerEvent(PluginBase):
def opsgenie_service_key(self, resource):
if not SERVICE_KEY_MATCHERS:
LOG.debug('No matchers defined! Default service key: %s' % (OPSGENIE_SERVICE_KEY))
return OPSGENIE_SERVICE_KEY
for mapping in SERVICE_KEY_MATCHERS:
if re.match(mapping['regex'], resource):
LOG.debug('Matched regex: %s, service key: %s' % (mapping['regex'], mapping['api_key']))
return mapping['api_key']
LOG.debug('No regex match! Default service key: %s' % (OPSGENIE_SERVICE_KEY))
return OPSGENIE_SERVICE_KEY
def opsgenie_close_alert(self, alert, why):
headers = {
"Authorization": 'GenieKey ' + self.opsgenie_service_key(alert.resource)
}
closeUrl = OPSGENIE_EVENTS_CLOSE_URL % alert.id
LOG.debug('OpsGenie close %s: %s %s' % (why, alert.id, closeUrl))
try:
r = requests.post(closeUrl, json={}, headers=headers, timeout=2)
except Exception as e:
raise RuntimeError("OpsGenie connection error: %s" % e)
return r
def opsgenie_ack_alert(self, alert, why):
headers = {
"Authorization": 'GenieKey ' + self.opsgenie_service_key(alert.resource)
}
ackUrl = OPSGENIE_EVENTS_ACK_URL % alert.id
LOG.debug('OpsGenie ack %s: %s %s' % (why, alert.id, ackUrl))
try:
r = requests.post(ackUrl, json={}, headers=headers, timeout=2)
except Exception as e:
raise RuntimeError("OpsGenie connection error: %s" % e)
return r
def pre_receive(self, alert):
return alert
def post_receive(self, alert):
LOG.debug('Alert receive %s: %s' % (alert.id, alert.get_body(history=False)))
if alert.repeat:
LOG.debug('Alert repeating; ignored')
return
# If alerta has cleared or status is closed, send the close to opsgenie
if (alert.severity in ['cleared', 'normal', 'ok']) or (alert.status == 'closed'):
r = self.opsgenie_close_alert(alert, 'CREATE-CLOSE')
elif (alert.severity in ['warning', 'informational']) and not OPSGENIE_SEND_WARN:
LOG.info('Just informational or warning not sending to OpsGenie')
else:
headers = {
"Authorization": 'GenieKey ' + self.opsgenie_service_key(alert.resource)
}
# Send all alert data as details to opsgenie
body = alert.get_body(history=False)
details = {}
details['web_url'] = '%s/#/alert/%s' % (DASHBOARD_URL, alert.id)
details['service'] = alert.service[0]
details['origin'] = body['origin']
details['event'] = body['event']
details['group'] = body['group']
details['trendIndication'] = body['trendIndication']
details['severity'] = body['severity']
details['previousSeverity'] = body['previousSeverity']
details['duplicateCount'] = body['duplicateCount']
payload = {
"alias": alert.id,
"message": "[ %s ]: %s: %s" % (alert.environment, alert.severity, alert.text),
"entity": alert.environment,
"responders" : self.get_opsgenie_teams(),
"tags": [alert.environment, alert.resource, alert.service[0], alert.event],
"source": "{}".format(OPSGENIE_ALERT_SOURCE),
"details": details
}
LOG.debug('OpsGenie CREATE payload: %s' % payload)
try:
r = requests.post(OPSGENIE_EVENTS_CREATE_URL, json=payload, headers=headers, timeout=2)
except Exception as e:
raise RuntimeError("OpsGenie connection error: %s" % e)
LOG.debug('OpsGenie response: %s - %s' % (r.status_code, r.text))
# generate list of responders from OPSGENIE_TEAMS env var
def get_opsgenie_teams(self):
teams = OPSGENIE_TEAMS.replace(' ', '') # remove whitespace
if len(teams) == 0:
return [] # no teams specified
teams = teams.split(',')
return [{"name": team, "type": "team"} for team in teams]
def status_change(self, alert, status, text):
LOG.debug('Alert change %s to %s: %s' % (alert.id, status, alert.get_body(history=False)))
if status not in ['ack', 'assign', 'closed']:
LOG.debug('Not sending status change to opsgenie: %s to %s' % (alert.id, status))
return
if status == 'closed':
r = self.opsgenie_close_alert(alert, 'STATUS-CLOSE')
elif status == 'ack':
r = self.opsgenie_ack_alert(alert, 'STATUS-ACK')
LOG.debug('OpsGenie response: %s - %s' % (r.status_code, r.text))
| mit | 860304e7cf04c09ff1ad3e59effbe0a7 | 41.625 | 116 | 0.609482 | 3.454136 | false | false | false | false |
alerta/alerta-contrib | integrations/supervisor/evlistener.py | 1 | 2703 | #!/usr/bin/env python
import sys
import json
import platform
from alertaclient.api import Client
class Listener(object):
def wait(self):
data = sys.stdin.readline()
headers = dict([x.split(':') for x in data.split()])
data = sys.stdin.read(int(headers['len']))
body = dict([x.split(':') for x in data.split()])
return headers, body
def send_cmd(self, s):
sys.stdout.write(s)
sys.stdout.flush()
def log_stderr(self, s):
sys.stderr.write(s)
sys.stderr.flush()
def main():
api = Client()
listener = Listener()
while True:
listener.send_cmd('READY\n')
headers, body = listener.wait()
event = headers['eventname']
if event.startswith('TICK'):
try:
origin = '{}/{}'.format('supervisord', platform.uname()[1])
api.heartbeat(origin, tags=[headers['ver'], event])
except Exception as e:
listener.log_stderr(e)
listener.send_cmd('RESULT 4\nFAIL')
else:
listener.send_cmd('RESULT 2\nOK')
else:
if event.endswith('FATAL'):
severity = 'critical'
elif event.endswith('BACKOFF'):
severity = 'warning'
elif event.endswith('EXITED'):
severity = 'minor'
else:
severity = 'normal'
try:
api.send_alert(
resource='%s:%s' % (platform.uname()[1], body['processname']),
environment='Production',
service=['supervisord'],
event=event,
correlate=[
'PROCESS_STATE_STARTING',
'PROCESS_STATE_RUNNING',
'PROCESS_STATE_BACKOFF',
'PROCESS_STATE_STOPPING',
'PROCESS_STATE_EXITED',
'PROCESS_STATE_STOPPED',
'PROCESS_STATE_FATAL',
'PROCESS_STATE_UNKNOWN'
],
value='serial=%s' % headers['serial'],
severity=severity,
origin=headers['server'],
text='State changed from %s to %s.' % (body['from_state'], event),
raw_data='%s\n\n%s' % (json.dumps(headers), json.dumps(body))
)
except Exception as e:
listener.log_stderr(e)
listener.send_cmd('RESULT 4\nFAIL')
else:
listener.send_cmd('RESULT 2\nOK')
if __name__ == '__main__':
main()
| mit | 78cc3482a186b579eb1ae7da5c6392bf | 30.8 | 86 | 0.468368 | 4.475166 | false | false | false | false |
cantools/cantools | cantools/database/can/c_source.py | 2 | 53130 | import re
import time
from decimal import Decimal
from ...version import __version__
HEADER_FMT = '''\
/**
* The MIT License (MIT)
*
* Copyright (c) 2018-2019 Erik Moqvist
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* This file was generated by cantools version {version} {date}.
*/
#ifndef {include_guard}
#define {include_guard}
#ifdef __cplusplus
extern "C" {{
#endif
#include <stdint.h>
#include <stdbool.h>
#include <stddef.h>
#ifndef EINVAL
# define EINVAL 22
#endif
/* Frame ids. */
{frame_id_defines}
/* Frame lengths in bytes. */
{frame_length_defines}
/* Extended or standard frame types. */
{is_extended_frame_defines}
/* Frame cycle times in milliseconds. */
{frame_cycle_time_defines}
/* Signal choices. */
{choices_defines}
{structs}
{declarations}
#ifdef __cplusplus
}}
#endif
#endif
'''
SOURCE_FMT = '''\
/**
* The MIT License (MIT)
*
* Copyright (c) 2018-2019 Erik Moqvist
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* This file was generated by cantools version {version} {date}.
*/
#include <string.h>
#include "{header}"
{helpers}\
{definitions}\
'''
FUZZER_SOURCE_FMT = '''\
/**
* The MIT License (MIT)
*
* Copyright (c) 2018-2019 Erik Moqvist
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* This file was generated by cantools version {version} {date}.
*/
#include <stdint.h>
#include <stdbool.h>
#include <stddef.h>
#include <string.h>
#include <stdio.h>
#include "{header}"
static void assert_first_pack(int res)
{{
if (res < 0) {{
printf("First pack failed with %ld.\\n", res);
__builtin_trap();
}}
}}
static void assert_second_unpack(int res)
{{
if (res < 0) {{
printf("Second unpack failed with %ld.\\n", res);
__builtin_trap();
}}
}}
static void assert_second_unpack_data(const void *unpacked_p,
const void *unpacked2_p,
size_t size)
{{
if (memcmp(unpacked_p, unpacked2_p, size) != 0) {{
printf("Second unpacked data does not match first unpacked data.\\n");
__builtin_trap();
}}
}}
static void assert_second_pack(int res, int res2)
{{
if (res != res2) {{
printf("Second pack result %ld does not match first pack "
"result %ld.\\n",
res,
res2);
__builtin_trap();
}}
}}
static void assert_second_pack_data(const uint8_t *packed_p,
const uint8_t *packed2_p,
int size)
{{
int i;
if (memcmp(packed_p, packed2_p, size) != 0) {{
for (i = 0; i < size; i++) {{
printf("[%04ld]: 0x%02x 0x%02x\\n", i, packed_p[i], packed2_p[i]);
}}
__builtin_trap();
}}
}}
{tests}
int LLVMFuzzerTestOneInput(const uint8_t *data_p, size_t size)
{{
{llvm_body}
return (0);
}}
'''
FUZZER_MAKEFILE_FMT = '''\
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2019 Erik Moqvist
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# This file was generated by cantools version {version} {date}.
#
CC = clang
EXE = fuzzer
C_SOURCES = \\
\t{source} \\
\t{fuzzer_source}
CFLAGS = \\
\t-fprofile-instr-generate \\
\t-fcoverage-mapping \\
\t-I. \\
\t-g -fsanitize=address,fuzzer \\
\t-fsanitize=signed-integer-overflow \\
\t-fno-sanitize-recover=all
EXECUTION_TIME ?= 5
all:
\t$(CC) $(CFLAGS) $(C_SOURCES) -o $(EXE)
\trm -f $(EXE).profraw
\tLLVM_PROFILE_FILE="$(EXE).profraw" \\
\t ./$(EXE) \\
\t -max_total_time=$(EXECUTION_TIME)
\tllvm-profdata merge -sparse $(EXE).profraw -o $(EXE).profdata
\tllvm-cov show ./$(EXE) -instr-profile=$(EXE).profdata
\tllvm-cov report ./$(EXE) -instr-profile=$(EXE).profdata
'''
TEST_FMT = '''
static void test_{name}(
const uint8_t *packed_p,
size_t size)
{{
int res;
int res2;
uint8_t packed[size];
uint8_t packed2[size];
struct {name}_t unpacked;
struct {name}_t unpacked2;
memset(&unpacked, 0, sizeof(unpacked));
res = {name}_unpack(
&unpacked,
packed_p,
size);
if (res >= 0) {{
res = {name}_pack(
&packed[0],
&unpacked,
sizeof(packed));
assert_first_pack(res);
memset(&unpacked2, 0, sizeof(unpacked2));
res2 = {name}_unpack(
&unpacked2,
&packed[0],
res);
assert_second_unpack(res2);
assert_second_unpack_data(&unpacked,
&unpacked2,
sizeof(unpacked));
res2 = {name}_pack(
&packed2[0],
&unpacked,
sizeof(packed2));
assert_second_pack(res, res2);
assert_second_pack_data(&packed[0], &packed2[0], res);
}}
}}\
'''
STRUCT_FMT = '''\
/**
* Signals in message {database_message_name}.
*
{comment}\
* All signal values are as on the CAN bus.
*/
struct {database_name}_{message_name}_t {{
{members}
}};
'''
DECLARATION_PACK_FMT = '''\
/**
* Pack message {database_message_name}.
*
* @param[out] dst_p Buffer to pack the message into.
* @param[in] src_p Data to pack.
* @param[in] size Size of dst_p.
*
* @return Size of packed data, or negative error code.
*/
int {database_name}_{message_name}_pack(
uint8_t *dst_p,
const struct {database_name}_{message_name}_t *src_p,
size_t size);
'''
DECLARATION_UNPACK_FMT = '''\
/**
* Unpack message {database_message_name}.
*
* @param[out] dst_p Object to unpack the message into.
* @param[in] src_p Message to unpack.
* @param[in] size Size of src_p.
*
* @return zero(0) or negative error code.
*/
int {database_name}_{message_name}_unpack(
struct {database_name}_{message_name}_t *dst_p,
const uint8_t *src_p,
size_t size);
'''
SIGNAL_DECLARATION_ENCODE_FMT = '''\
/**
* Encode given signal by applying scaling and offset.
*
* @param[in] value Signal to encode.
*
* @return Encoded signal.
*/
{type_name} {database_name}_{message_name}_{signal_name}_encode({floating_point_type} value);
'''
SIGNAL_DECLARATION_DECODE_FMT = '''\
/**
* Decode given signal by applying scaling and offset.
*
* @param[in] value Signal to decode.
*
* @return Decoded signal.
*/
{floating_point_type} {database_name}_{message_name}_{signal_name}_decode({type_name} value);
'''
SIGNAL_DECLARATION_IS_IN_RANGE_FMT = '''\
/**
* Check that given signal is in allowed range.
*
* @param[in] value Signal to check.
*
* @return true if in range, false otherwise.
*/
bool {database_name}_{message_name}_{signal_name}_is_in_range({type_name} value);
'''
PACK_HELPER_LEFT_SHIFT_FMT = '''\
static inline uint8_t pack_left_shift_u{length}(
{var_type} value,
uint8_t shift,
uint8_t mask)
{{
return (uint8_t)((uint8_t)(value << shift) & mask);
}}
'''
PACK_HELPER_RIGHT_SHIFT_FMT = '''\
static inline uint8_t pack_right_shift_u{length}(
{var_type} value,
uint8_t shift,
uint8_t mask)
{{
return (uint8_t)((uint8_t)(value >> shift) & mask);
}}
'''
UNPACK_HELPER_LEFT_SHIFT_FMT = '''\
static inline {var_type} unpack_left_shift_u{length}(
uint8_t value,
uint8_t shift,
uint8_t mask)
{{
return ({var_type})(({var_type})(value & mask) << shift);
}}
'''
UNPACK_HELPER_RIGHT_SHIFT_FMT = '''\
static inline {var_type} unpack_right_shift_u{length}(
uint8_t value,
uint8_t shift,
uint8_t mask)
{{
return ({var_type})(({var_type})(value & mask) >> shift);
}}
'''
DEFINITION_PACK_FMT = '''\
int {database_name}_{message_name}_pack(
uint8_t *dst_p,
const struct {database_name}_{message_name}_t *src_p,
size_t size)
{{
{pack_unused}\
{pack_variables}\
if (size < {message_length}u) {{
return (-EINVAL);
}}
memset(&dst_p[0], 0, {message_length});
{pack_body}
return ({message_length});
}}
'''
DEFINITION_UNPACK_FMT = '''\
int {database_name}_{message_name}_unpack(
struct {database_name}_{message_name}_t *dst_p,
const uint8_t *src_p,
size_t size)
{{
{unpack_unused}\
{unpack_variables}\
if (size < {message_length}u) {{
return (-EINVAL);
}}
{unpack_body}
return (0);
}}
'''
SIGNAL_DEFINITION_ENCODE_FMT = '''\
{type_name} {database_name}_{message_name}_{signal_name}_encode({floating_point_type} value)
{{
return ({type_name})({encode});
}}
'''
SIGNAL_DEFINITION_DECODE_FMT = '''\
{floating_point_type} {database_name}_{message_name}_{signal_name}_decode({type_name} value)
{{
return ({decode});
}}
'''
SIGNAL_DEFINITION_IS_IN_RANGE_FMT = '''\
bool {database_name}_{message_name}_{signal_name}_is_in_range({type_name} value)
{{
{unused}\
return ({check});
}}
'''
EMPTY_DEFINITION_FMT = '''\
int {database_name}_{message_name}_pack(
uint8_t *dst_p,
const struct {database_name}_{message_name}_t *src_p,
size_t size)
{{
(void)dst_p;
(void)src_p;
(void)size;
return (0);
}}
int {database_name}_{message_name}_unpack(
struct {database_name}_{message_name}_t *dst_p,
const uint8_t *src_p,
size_t size)
{{
(void)dst_p;
(void)src_p;
(void)size;
return (0);
}}
'''
SIGN_EXTENSION_FMT = '''
if (({name} & (1{suffix} << {shift})) != 0{suffix}) {{
{name} |= 0x{mask:x}{suffix};
}}
'''
SIGNAL_MEMBER_FMT = '''\
/**
{comment}\
* Range: {range}
* Scale: {scale}
* Offset: {offset}
*/
{type_name} {name}{length};\
'''
class Signal(object):
def __init__(self, signal):
self._signal = signal
self.snake_name = camel_to_snake_case(self.name)
def __getattr__(self, name):
return getattr(self._signal, name)
@property
def unit(self):
return _get(self._signal.unit, '-')
@property
def type_length(self):
if self.length <= 8:
return 8
elif self.length <= 16:
return 16
elif self.length <= 32:
return 32
else:
return 64
@property
def type_name(self):
if self.is_float:
if self.length == 32:
type_name = 'float'
else:
type_name = 'double'
else:
type_name = 'int{}_t'.format(self.type_length)
if not self.is_signed:
type_name = 'u' + type_name
return type_name
@property
def type_suffix(self):
try:
return {
'uint8_t': 'u',
'uint16_t': 'u',
'uint32_t': 'u',
'int64_t': 'll',
'uint64_t': 'ull',
'float': 'f'
}[self.type_name]
except KeyError:
return ''
@property
def conversion_type_suffix(self):
try:
return {
8: 'u',
16: 'u',
32: 'u',
64: 'ull'
}[self.type_length]
except KeyError:
return ''
@property
def unique_choices(self):
"""Make duplicated choice names unique by first appending its value
and then underscores until unique.
"""
items = {
value: camel_to_snake_case(str(name)).upper()
for value, name in self.choices.items()
}
names = list(items.values())
duplicated_names = [
name
for name in set(names)
if names.count(name) > 1
]
unique_choices = {
value: name
for value, name in items.items()
if names.count(name) == 1
}
for value, name in items.items():
if name in duplicated_names:
name += _canonical('_{}'.format(value))
while name in unique_choices.values():
name += '_'
unique_choices[value] = name
return unique_choices
@property
def minimum_type_value(self):
if self.type_name == 'int8_t':
return -128
elif self.type_name == 'int16_t':
return -32768
elif self.type_name == 'int32_t':
return -2147483648
elif self.type_name == 'int64_t':
return -9223372036854775808
elif self.type_name[0] == 'u':
return 0
else:
return None
@property
def maximum_type_value(self):
if self.type_name == 'int8_t':
return 127
elif self.type_name == 'int16_t':
return 32767
elif self.type_name == 'int32_t':
return 2147483647
elif self.type_name == 'int64_t':
return 9223372036854775807
elif self.type_name == 'uint8_t':
return 255
elif self.type_name == 'uint16_t':
return 65535
elif self.type_name == 'uint32_t':
return 4294967295
elif self.type_name == 'uint64_t':
return 18446744073709551615
else:
return None
@property
def minimum_value(self):
if self.is_float:
return None
elif self.is_signed:
return -(2 ** (self.length - 1))
else:
return 0
@property
def maximum_value(self):
if self.is_float:
return None
elif self.is_signed:
return ((2 ** (self.length - 1)) - 1)
else:
return ((2 ** self.length) - 1)
def segments(self, invert_shift):
index, pos = divmod(self.start, 8)
left = self.length
while left > 0:
if self.byte_order == 'big_endian':
if left >= (pos + 1):
length = (pos + 1)
pos = 7
shift = -(left - length)
mask = ((1 << length) - 1)
else:
length = left
shift = (pos - length + 1)
mask = ((1 << length) - 1)
mask <<= (pos - length + 1)
else:
shift = (left - self.length) + pos
if left >= (8 - pos):
length = (8 - pos)
mask = ((1 << length) - 1)
mask <<= pos
pos = 0
else:
length = left
mask = ((1 << length) - 1)
mask <<= pos
if invert_shift:
if shift < 0:
shift = -shift
shift_direction = 'left'
else:
shift_direction = 'right'
else:
if shift < 0:
shift = -shift
shift_direction = 'right'
else:
shift_direction = 'left'
yield index, shift, shift_direction, mask
left -= length
index += 1
class Message(object):
def __init__(self, message):
self._message = message
self.snake_name = camel_to_snake_case(self.name)
self.signals = [Signal(signal)for signal in message.signals]
def __getattr__(self, name):
return getattr(self._message, name)
def get_signal_by_name(self, name):
for signal in self.signals:
if signal.name == name:
return signal
def _canonical(value):
"""Replace anything but 'a-z', 'A-Z' and '0-9' with '_'.
"""
return re.sub(r'[^a-zA-Z0-9]', '_', value)
def camel_to_snake_case(value):
value = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', value)
value = re.sub(r'(_+)', '_', value)
value = re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', value).lower()
value = _canonical(value)
return value
def _strip_blank_lines(lines):
try:
while lines[0] == '':
lines = lines[1:]
while lines[-1] == '':
lines = lines[:-1]
except IndexError:
pass
return lines
def _get(value, default):
if value is None:
value = default
return value
def _format_comment(comment):
if comment:
return '\n'.join([
' * ' + line.rstrip()
for line in comment.splitlines()
]) + '\n *\n'
else:
return ''
def _format_decimal(value, is_float=False, use_float=False):
f_append = 'f' if use_float else ''
if int(value) == value:
value = int(value)
if is_float:
return f'{value}.0{f_append}'
else:
return str(value)
else:
return f'{value}{f_append}'
def _format_range(signal):
minimum = signal.decimal.minimum
maximum = signal.decimal.maximum
scale = signal.decimal.scale
offset = signal.decimal.offset
if minimum is not None and maximum is not None:
return '{}..{} ({}..{} {})'.format(
_format_decimal((minimum - offset) / scale),
_format_decimal((maximum - offset) / scale),
minimum,
maximum,
signal.unit)
elif minimum is not None:
return '{}.. ({}.. {})'.format(
_format_decimal((minimum - offset) / scale),
minimum,
signal.unit)
elif maximum is not None:
return '..{} (..{} {})'.format(
_format_decimal((maximum - offset) / scale),
maximum,
signal.unit)
else:
return '-'
def _generate_signal(signal, bit_fields):
comment = _format_comment(signal.comment)
range_ = _format_range(signal)
scale = _get(signal.scale, '-')
offset = _get(signal.offset, '-')
if signal.is_float or not bit_fields:
length = ''
else:
length = ' : {}'.format(signal.length)
member = SIGNAL_MEMBER_FMT.format(comment=comment,
range=range_,
scale=scale,
offset=offset,
type_name=signal.type_name,
name=signal.snake_name,
length=length)
return member
def _format_pack_code_mux(message,
mux,
body_lines_per_index,
variable_lines,
helper_kinds):
signal_name, multiplexed_signals = list(mux.items())[0]
_format_pack_code_signal(message,
signal_name,
body_lines_per_index,
variable_lines,
helper_kinds)
multiplexed_signals_per_id = sorted(list(multiplexed_signals.items()))
signal_name = camel_to_snake_case(signal_name)
lines = [
'',
'switch (src_p->{}) {{'.format(signal_name)
]
for multiplexer_id, multiplexed_signals in multiplexed_signals_per_id:
body_lines = _format_pack_code_level(message,
multiplexed_signals,
variable_lines,
helper_kinds)
lines.append('')
lines.append('case {}:'.format(multiplexer_id))
if body_lines:
lines.extend(body_lines[1:-1])
lines.append(' break;')
lines.extend([
'',
'default:',
' break;',
'}'])
return [(' ' + line).rstrip() for line in lines]
def _format_pack_code_signal(message,
signal_name,
body_lines,
variable_lines,
helper_kinds):
signal = message.get_signal_by_name(signal_name)
if signal.is_float or signal.is_signed:
variable = ' uint{}_t {};'.format(signal.type_length,
signal.snake_name)
if signal.is_float:
conversion = ' memcpy(&{0}, &src_p->{0}, sizeof({0}));'.format(
signal.snake_name)
else:
conversion = ' {0} = (uint{1}_t)src_p->{0};'.format(
signal.snake_name,
signal.type_length)
variable_lines.append(variable)
body_lines.append(conversion)
for index, shift, shift_direction, mask in signal.segments(invert_shift=False):
if signal.is_float or signal.is_signed:
fmt = ' dst_p[{}] |= pack_{}_shift_u{}({}, {}u, 0x{:02x}u);'
else:
fmt = ' dst_p[{}] |= pack_{}_shift_u{}(src_p->{}, {}u, 0x{:02x}u);'
line = fmt.format(index,
shift_direction,
signal.type_length,
signal.snake_name,
shift,
mask)
body_lines.append(line)
helper_kinds.add((shift_direction, signal.type_length))
def _format_pack_code_level(message,
signal_names,
variable_lines,
helper_kinds):
"""Format one pack level in a signal tree.
"""
body_lines = []
muxes_lines = []
for signal_name in signal_names:
if isinstance(signal_name, dict):
mux_lines = _format_pack_code_mux(message,
signal_name,
body_lines,
variable_lines,
helper_kinds)
muxes_lines += mux_lines
else:
_format_pack_code_signal(message,
signal_name,
body_lines,
variable_lines,
helper_kinds)
body_lines = body_lines + muxes_lines
if body_lines:
body_lines = [''] + body_lines + ['']
return body_lines
def _format_pack_code(message, helper_kinds):
variable_lines = []
body_lines = _format_pack_code_level(message,
message.signal_tree,
variable_lines,
helper_kinds)
if variable_lines:
variable_lines = sorted(list(set(variable_lines))) + ['', '']
return '\n'.join(variable_lines), '\n'.join(body_lines)
def _format_unpack_code_mux(message,
mux,
body_lines_per_index,
variable_lines,
helper_kinds,
node_name):
signal_name, multiplexed_signals = list(mux.items())[0]
_format_unpack_code_signal(message,
signal_name,
body_lines_per_index,
variable_lines,
helper_kinds)
multiplexed_signals_per_id = sorted(list(multiplexed_signals.items()))
signal_name = camel_to_snake_case(signal_name)
lines = [
'switch (dst_p->{}) {{'.format(signal_name)
]
for multiplexer_id, multiplexed_signals in multiplexed_signals_per_id:
body_lines = _format_unpack_code_level(message,
multiplexed_signals,
variable_lines,
helper_kinds,
node_name)
lines.append('')
lines.append('case {}:'.format(multiplexer_id))
lines.extend(_strip_blank_lines(body_lines))
lines.append(' break;')
lines.extend([
'',
'default:',
' break;',
'}'])
return [(' ' + line).rstrip() for line in lines]
def _format_unpack_code_signal(message,
signal_name,
body_lines,
variable_lines,
helper_kinds):
signal = message.get_signal_by_name(signal_name)
conversion_type_name = 'uint{}_t'.format(signal.type_length)
if signal.is_float or signal.is_signed:
variable = ' {} {};'.format(conversion_type_name, signal.snake_name)
variable_lines.append(variable)
segments = signal.segments(invert_shift=True)
for i, (index, shift, shift_direction, mask) in enumerate(segments):
if signal.is_float or signal.is_signed:
fmt = ' {} {} unpack_{}_shift_u{}(src_p[{}], {}u, 0x{:02x}u);'
else:
fmt = ' dst_p->{} {} unpack_{}_shift_u{}(src_p[{}], {}u, 0x{:02x}u);'
line = fmt.format(signal.snake_name,
'=' if i == 0 else '|=',
shift_direction,
signal.type_length,
index,
shift,
mask)
body_lines.append(line)
helper_kinds.add((shift_direction, signal.type_length))
if signal.is_float:
conversion = ' memcpy(&dst_p->{0}, &{0}, sizeof(dst_p->{0}));'.format(
signal.snake_name)
body_lines.append(conversion)
elif signal.is_signed:
mask = ((1 << (signal.type_length - signal.length)) - 1)
if mask != 0:
mask <<= signal.length
formatted = SIGN_EXTENSION_FMT.format(name=signal.snake_name,
shift=signal.length - 1,
mask=mask,
suffix=signal.conversion_type_suffix)
body_lines.extend(formatted.splitlines())
conversion = ' dst_p->{0} = (int{1}_t){0};'.format(signal.snake_name,
signal.type_length)
body_lines.append(conversion)
def _format_unpack_code_level(message,
signal_names,
variable_lines,
helper_kinds,
node_name):
"""Format one unpack level in a signal tree.
"""
body_lines = []
muxes_lines = []
for signal_name in signal_names:
if isinstance(signal_name, dict):
mux_lines = _format_unpack_code_mux(message,
signal_name,
body_lines,
variable_lines,
helper_kinds,
node_name)
if muxes_lines:
muxes_lines.append('')
muxes_lines += mux_lines
else:
if not _is_receiver(message.get_signal_by_name(signal_name), node_name):
continue
_format_unpack_code_signal(message,
signal_name,
body_lines,
variable_lines,
helper_kinds)
if body_lines:
if body_lines[-1] != '':
body_lines.append('')
if muxes_lines:
muxes_lines.append('')
body_lines = body_lines + muxes_lines
if body_lines:
body_lines = [''] + body_lines
return body_lines
def _format_unpack_code(message, helper_kinds, node_name):
variable_lines = []
body_lines = _format_unpack_code_level(message,
message.signal_tree,
variable_lines,
helper_kinds,
node_name)
if variable_lines:
variable_lines = sorted(list(set(variable_lines))) + ['', '']
return '\n'.join(variable_lines), '\n'.join(body_lines)
def _generate_struct(message, bit_fields):
members = []
for signal in message.signals:
members.append(_generate_signal(signal, bit_fields))
if not members:
members = [
' /**\n'
' * Dummy signal in empty message.\n'
' */\n'
' uint8_t dummy;'
]
if message.comment is None:
comment = ''
else:
comment = ' * {}\n *\n'.format(message.comment)
return comment, members
def _format_choices(signal, signal_name):
choices = []
for value, name in sorted(signal.unique_choices.items()):
if signal.is_signed:
fmt = '{signal_name}_{name}_CHOICE ({value})'
else:
fmt = '{signal_name}_{name}_CHOICE ({value}u)'
choices.append(fmt.format(signal_name=signal_name.upper(),
name=str(name),
value=value))
return choices
def _generate_encode_decode(message, use_float):
encode_decode = []
floating_point_type = _get_floating_point_type(use_float)
for signal in message.signals:
scale = signal.decimal.scale
offset = signal.decimal.offset
formatted_scale = _format_decimal(scale, is_float=True, use_float=use_float)
formatted_offset = _format_decimal(offset, is_float=True, use_float=use_float)
if offset == 0 and scale == 1:
encoding = 'value'
decoding = '({})value'.format(floating_point_type)
elif offset != 0 and scale != 1:
encoding = '(value - {}) / {}'.format(formatted_offset,
formatted_scale)
decoding = '(({})value * {}) + {}'.format(floating_point_type, formatted_scale,
formatted_offset)
elif offset != 0:
encoding = 'value - {}'.format(formatted_offset)
decoding = '({})value + {}'.format(floating_point_type, formatted_offset)
else:
encoding = 'value / {}'.format(formatted_scale)
decoding = '({})value * {}'.format(floating_point_type, formatted_scale)
encode_decode.append((encoding, decoding))
return encode_decode
def _generate_is_in_range(message):
"""Generate range checks for all signals in given message.
"""
checks = []
for signal in message.signals:
scale = signal.decimal.scale
offset = (signal.decimal.offset / scale)
minimum = signal.decimal.minimum
maximum = signal.decimal.maximum
if minimum is not None:
minimum = (minimum / scale - offset)
if maximum is not None:
maximum = (maximum / scale - offset)
if minimum is None and signal.minimum_value is not None:
if signal.minimum_value > signal.minimum_type_value:
minimum = signal.minimum_value
if maximum is None and signal.maximum_value is not None:
if signal.maximum_value < signal.maximum_type_value:
maximum = signal.maximum_value
suffix = signal.type_suffix
check = []
if minimum is not None:
if not signal.is_float:
minimum = Decimal(int(minimum))
minimum_type_value = signal.minimum_type_value
if (minimum_type_value is None) or (minimum > minimum_type_value):
minimum = _format_decimal(minimum, signal.is_float)
check.append('(value >= {}{})'.format(minimum, suffix))
if maximum is not None:
if not signal.is_float:
maximum = Decimal(int(maximum))
maximum_type_value = signal.maximum_type_value
if (maximum_type_value is None) or (maximum < maximum_type_value):
maximum = _format_decimal(maximum, signal.is_float)
check.append('(value <= {}{})'.format(maximum, suffix))
if not check:
check = ['true']
elif len(check) == 1:
check = [check[0][1:-1]]
check = ' && '.join(check)
checks.append(check)
return checks
def _generate_frame_id_defines(database_name, messages, node_name):
return '\n'.join([
'#define {}_{}_FRAME_ID (0x{:02x}u)'.format(
database_name.upper(),
message.snake_name.upper(),
message.frame_id)
for message in messages if _is_sender_or_receiver(message, node_name)
])
def _generate_frame_length_defines(database_name, messages, node_name):
result = '\n'.join([
'#define {}_{}_LENGTH ({}u)'.format(
database_name.upper(),
message.snake_name.upper(),
message.length)
for message in messages if _is_sender_or_receiver(message, node_name)
])
return result
def _generate_frame_cycle_time_defines(database_name, messages, node_name):
result = '\n'.join([
'#define {}_{}_CYCLE_TIME_MS ({}u)'.format(
database_name.upper(),
message.snake_name.upper(),
message.cycle_time)
for message in messages if message.cycle_time is not None and
_is_sender_or_receiver(message, node_name)
])
return result
def _generate_is_extended_frame_defines(database_name, messages, node_name):
result = '\n'.join([
'#define {}_{}_IS_EXTENDED ({})'.format(
database_name.upper(),
message.snake_name.upper(),
int(message.is_extended_frame))
for message in messages if _is_sender_or_receiver(message, node_name)
])
return result
def _generate_choices_defines(database_name, messages, node_name):
choices_defines = []
for message in messages:
is_sender = _is_sender(message, node_name)
for signal in message.signals:
if signal.choices is None:
continue
if not is_sender and not _is_receiver(signal, node_name):
continue
choices = _format_choices(signal, signal.snake_name)
signal_choices_defines = '\n'.join([
'#define {}_{}_{}'.format(database_name.upper(),
message.snake_name.upper(),
choice)
for choice in choices
])
choices_defines.append(signal_choices_defines)
return '\n\n'.join(choices_defines)
def _generate_structs(database_name, messages, bit_fields, node_name):
structs = []
for message in messages:
if _is_sender_or_receiver(message, node_name):
comment, members = _generate_struct(message, bit_fields)
structs.append(
STRUCT_FMT.format(comment=comment,
database_message_name=message.name,
message_name=message.snake_name,
database_name=database_name,
members='\n\n'.join(members)))
return '\n'.join(structs)
def _is_sender(message, node_name):
return node_name is None or node_name in message.senders
def _is_receiver(signal, node_name):
return node_name is None or node_name in signal.receivers
def _is_sender_or_receiver(message, node_name):
if _is_sender(message, node_name):
return True
return any(_is_receiver(signal, node_name) for signal in message.signals)
def _get_floating_point_type(use_float):
return 'float' if use_float else 'double'
def _generate_declarations(database_name, messages, floating_point_numbers, use_float, node_name):
declarations = []
for message in messages:
signal_declarations = []
is_sender = _is_sender(message, node_name)
is_receiver = node_name is None
for signal in message.signals:
if _is_receiver(signal, node_name):
is_receiver = True
signal_declaration = ''
if floating_point_numbers:
if is_sender:
signal_declaration += SIGNAL_DECLARATION_ENCODE_FMT.format(
database_name=database_name,
message_name=message.snake_name,
signal_name=signal.snake_name,
type_name=signal.type_name,
floating_point_type=_get_floating_point_type(use_float))
if node_name is None or _is_receiver(signal, node_name):
signal_declaration += SIGNAL_DECLARATION_DECODE_FMT.format(
database_name=database_name,
message_name=message.snake_name,
signal_name=signal.snake_name,
type_name=signal.type_name,
floating_point_type=_get_floating_point_type(use_float))
if is_sender or _is_receiver(signal, node_name):
signal_declaration += SIGNAL_DECLARATION_IS_IN_RANGE_FMT.format(
database_name=database_name,
message_name=message.snake_name,
signal_name=signal.snake_name,
type_name=signal.type_name)
signal_declarations.append(signal_declaration)
declaration = ""
if is_sender:
declaration += DECLARATION_PACK_FMT.format(database_name=database_name,
database_message_name=message.name,
message_name=message.snake_name)
if is_receiver:
declaration += DECLARATION_UNPACK_FMT.format(database_name=database_name,
database_message_name=message.name,
message_name=message.snake_name)
if signal_declarations:
declaration += '\n' + '\n'.join(signal_declarations)
if declaration:
declarations.append(declaration)
return '\n'.join(declarations)
def _generate_definitions(database_name, messages, floating_point_numbers, use_float, node_name):
definitions = []
pack_helper_kinds = set()
unpack_helper_kinds = set()
for message in messages:
signal_definitions = []
is_sender = _is_sender(message, node_name)
is_receiver = node_name is None
for signal, (encode, decode), check in zip(message.signals,
_generate_encode_decode(message, use_float),
_generate_is_in_range(message)):
if _is_receiver(signal, node_name):
is_receiver = True
if check == 'true':
unused = ' (void)value;\n\n'
else:
unused = ''
signal_definition = ''
if floating_point_numbers:
if is_sender:
signal_definition += SIGNAL_DEFINITION_ENCODE_FMT.format(
database_name=database_name,
message_name=message.snake_name,
signal_name=signal.snake_name,
type_name=signal.type_name,
encode=encode,
floating_point_type=_get_floating_point_type(use_float))
if node_name is None or _is_receiver(signal, node_name):
signal_definition += SIGNAL_DEFINITION_DECODE_FMT.format(
database_name=database_name,
message_name=message.snake_name,
signal_name=signal.snake_name,
type_name=signal.type_name,
decode=decode,
floating_point_type=_get_floating_point_type(use_float))
if is_sender or _is_receiver(signal, node_name):
signal_definition += SIGNAL_DEFINITION_IS_IN_RANGE_FMT.format(
database_name=database_name,
message_name=message.snake_name,
signal_name=signal.snake_name,
type_name=signal.type_name,
unused=unused,
check=check)
signal_definitions.append(signal_definition)
if message.length > 0:
pack_variables, pack_body = _format_pack_code(message,
pack_helper_kinds)
unpack_variables, unpack_body = _format_unpack_code(message,
unpack_helper_kinds,
node_name)
pack_unused = ''
unpack_unused = ''
if not pack_body:
pack_unused += ' (void)src_p;\n\n'
if not unpack_body:
unpack_unused += ' (void)dst_p;\n'
unpack_unused += ' (void)src_p;\n\n'
definition = ""
if is_sender:
definition += DEFINITION_PACK_FMT.format(database_name=database_name,
database_message_name=message.name,
message_name=message.snake_name,
message_length=message.length,
pack_unused=pack_unused,
pack_variables=pack_variables,
pack_body=pack_body)
if is_receiver:
definition += DEFINITION_UNPACK_FMT.format(database_name=database_name,
database_message_name=message.name,
message_name=message.snake_name,
message_length=message.length,
unpack_unused=unpack_unused,
unpack_variables=unpack_variables,
unpack_body=unpack_body)
else:
definition = EMPTY_DEFINITION_FMT.format(database_name=database_name,
message_name=message.snake_name)
if signal_definitions:
definition += '\n' + '\n'.join(signal_definitions)
if definition:
definitions.append(definition)
return '\n'.join(definitions), (pack_helper_kinds, unpack_helper_kinds)
def _generate_helpers_kind(kinds, left_format, right_format):
formats = {
'left': left_format,
'right': right_format
}
helpers = []
for shift_direction, length in sorted(kinds):
var_type = 'uint{}_t'.format(length)
helper = formats[shift_direction].format(length=length,
var_type=var_type)
helpers.append(helper)
return helpers
def _generate_helpers(kinds):
pack_helpers = _generate_helpers_kind(kinds[0],
PACK_HELPER_LEFT_SHIFT_FMT,
PACK_HELPER_RIGHT_SHIFT_FMT)
unpack_helpers = _generate_helpers_kind(kinds[1],
UNPACK_HELPER_LEFT_SHIFT_FMT,
UNPACK_HELPER_RIGHT_SHIFT_FMT)
helpers = pack_helpers + unpack_helpers
if helpers:
helpers.append('')
return '\n'.join(helpers)
def _generate_fuzzer_source(database_name,
messages,
date,
header_name,
source_name,
fuzzer_source_name):
tests = []
calls = []
for message in messages:
name = '{}_{}'.format(database_name,
camel_to_snake_case(message.name))
test = TEST_FMT.format(name=name)
tests.append(test)
call = ' test_{}(data_p, size);'.format(name)
calls.append(call)
source = FUZZER_SOURCE_FMT.format(version=__version__,
date=date,
header=header_name,
tests='\n'.join(tests),
llvm_body='\n'.join(calls))
makefile = FUZZER_MAKEFILE_FMT.format(version=__version__,
date=date,
source=source_name,
fuzzer_source=fuzzer_source_name)
return source, makefile
def generate(database,
database_name,
header_name,
source_name,
fuzzer_source_name,
floating_point_numbers=True,
bit_fields=False,
use_float=False,
node_name=None):
"""Generate C source code from given CAN database `database`.
`database_name` is used as a prefix for all defines, data
structures and functions.
`header_name` is the file name of the C header file, which is
included by the C source file.
`source_name` is the file name of the C source file, which is
needed by the fuzzer makefile.
`fuzzer_source_name` is the file name of the C source file, which
is needed by the fuzzer makefile.
Set `floating_point_numbers` to ``True`` to allow floating point
numbers in the generated code.
Set `bit_fields` to ``True`` to generate bit fields in structs.
Set `use_float` to ``True`` to prefer the `float` type instead
of the `double` type for floating point numbers.
`node_name` specifies the node for which message packers will be generated.
For all other messages, unpackers will be generated. If `node_name` is not
provided, both packers and unpackers will be generated.
This function returns a tuple of the C header and source files as
strings.
"""
date = time.ctime()
messages = [Message(message) for message in database.messages]
include_guard = '{}_H'.format(database_name.upper())
frame_id_defines = _generate_frame_id_defines(database_name, messages, node_name)
frame_length_defines = _generate_frame_length_defines(database_name,
messages,
node_name)
is_extended_frame_defines = _generate_is_extended_frame_defines(
database_name,
messages,
node_name)
frame_cycle_time_defines = _generate_frame_cycle_time_defines(
database_name,
messages,
node_name)
choices_defines = _generate_choices_defines(database_name, messages, node_name)
structs = _generate_structs(database_name, messages, bit_fields, node_name)
declarations = _generate_declarations(database_name,
messages,
floating_point_numbers,
use_float,
node_name)
definitions, helper_kinds = _generate_definitions(database_name,
messages,
floating_point_numbers,
use_float,
node_name)
helpers = _generate_helpers(helper_kinds)
header = HEADER_FMT.format(version=__version__,
date=date,
include_guard=include_guard,
frame_id_defines=frame_id_defines,
frame_length_defines=frame_length_defines,
is_extended_frame_defines=is_extended_frame_defines,
frame_cycle_time_defines=frame_cycle_time_defines,
choices_defines=choices_defines,
structs=structs,
declarations=declarations)
source = SOURCE_FMT.format(version=__version__,
date=date,
header=header_name,
helpers=helpers,
definitions=definitions)
fuzzer_source, fuzzer_makefile = _generate_fuzzer_source(
database_name,
messages,
date,
header_name,
source_name,
fuzzer_source_name)
return header, source, fuzzer_source, fuzzer_makefile
| mit | 63fe3810aa88ee181e04d7a5fe078866 | 30.326651 | 98 | 0.522341 | 4.16151 | false | false | false | false |
jepegit/cellpy | cellpy/readers/instruments/pec_csv.py | 1 | 16574 | """pec csv-type data files"""
import logging
import os
import warnings
from datetime import datetime
import numpy as np
import pandas as pd
from dateutil.parser import parse
from cellpy.parameters.internal_settings import get_headers_normal
from cellpy.readers.core import Data, FileID, humanize_bytes
from cellpy.readers.instruments.base import BaseLoader
pec_headers_normal = dict()
pec_headers_normal["step_index_txt"] = "Step"
pec_headers_normal["cycle_index_txt"] = "Cycle"
pec_headers_normal["test_time_txt"] = "Total_Time_Seconds" # This might change
pec_headers_normal["step_time_txt"] = "Step_Time_Seconds" # This might change
pec_headers_normal["datetime_txt"] = "Real_Time"
pec_headers_normal["voltage_txt"] = "Voltage_mV" # This might change
pec_headers_normal["current_txt"] = "Current_mA" # This might change
pec_headers_normal["charge_capacity_txt"] = "Charge_Capacity_mAh"
pec_headers_normal["discharge_capacity_txt"] = "Discharge_Capacity_mAh"
pec_headers_normal["charge_energy_txt"] = "Charge_Capacity_mWh"
pec_headers_normal["discharge_energy_txt"] = "Discharge_Capacity_mWh"
pec_headers_normal["internal_resistance_txt"] = "Internal_Resistance_1_mOhm"
pec_headers_normal["test_id_txt"] = "Test"
# TODO: better reading of first part of the file (comments and headers)
# 1. find the units
# 2. convert cycle and step numbers so that they start with 1 and not 0
# 3. find user-defined variables
class DataLoader(BaseLoader):
"""Main loading class"""
instrument_name = "pec_csv"
raw_ext = "csv"
def __init__(self, *args, **kwargs):
self.headers_normal = (
get_headers_normal()
) # should consider to move this to the Loader class
self.current_chunk = 0 # use this to set chunks to load
self.pec_data = None
self.pec_log = None
self.pec_settings = None
self.variable_header_keywords = [
"Voltage (V)",
"Current (A)",
] # The unit of these will be read from file
self.fake_header_length = [
"#RESULTS CHECK\n",
"#END RESULTS CHECK\n",
] # Ignores number of delimiters in between
self.pec_file_delimiter = ","
self.filename = None
self.number_of_header_lines = None # Number of header lines is not constant
self.cellpy_headers = (
get_headers_normal()
) # should consider to move this to the Loader class
# @staticmethod
# def _get_pec_units():
# pec_units = dict()
# pec_units["voltage"] = 0.001 # V
# pec_units["current"] = 0.001 # A
# pec_units["charge"] = 0.001 # Ah
# pec_units["mass"] = 0.001 # g
# pec_units["energy"] = 0.001 # Wh
# return pec_units
def _get_pec_units(self): # Fetches units from a csv file
# Mapping prefixes to values
prefix = {"µ": 10**-6, "m": 10**-3, "": 1}
# Adding the non-variable units to the return value
pec_units = {"charge": 0.001, "mass": 0.001, "energy": 0.001} # Ah # g # Wh
# A list with all the variable keywords without any prefixes, used as search terms
header = self.variable_header_keywords
data = pd.read_csv(self.filename, skiprows=self.number_of_header_lines, nrows=1)
# Searching for the prefix for all the variable units
for item in data.keys():
for unit in header:
x = unit.find("(") - len(unit)
if unit[: x + 1] in item:
y = item[x].replace("(", "")
# Adding units conversion factor to return value, renaming the headers to include correct units
if header.index(unit) == 0:
pec_units["voltage"] = prefix.get(y)
pec_headers_normal["voltage_txt"] = f"Voltage_{y}V"
elif header.index(unit) == 1:
pec_units["current"] = prefix.get(y)
pec_headers_normal["current_txt"] = f"Current_{y}A"
return pec_units
def _get_pec_times(self):
# Mapping units to their conversion values
logging.debug("retrieve pec units")
units = {
"(Hours in hh:mm:ss.xxx)": self.timestamp_to_seconds,
"(Decimal Hours)": 3600,
"(Minutes)": 60,
"(Seconds)": 1,
}
data = pd.read_csv(self.filename, skiprows=self.number_of_header_lines, nrows=0)
pec_times = dict()
# Adds the time variables and their units to the pec_times dictonary return value
# Also updates the column headers in pec_headers_normal with the correct name
for item in data.keys():
for unit in units:
if unit in item:
x = item.find("(")
var = item[: x - 1].lower().replace(" ", "_")
its_unit = item[x:]
pec_times[var] = units.get(its_unit)
if var == "total_time":
pec_headers_normal[
"test_time_txt"
] = f'Total_Time_{its_unit[1:-1].replace(" ", "_")}'
if var == "step_time":
pec_headers_normal[
"step_time_txt"
] = f'Step_Time_{its_unit[1:-1].replace(" ", "_")}'
return pec_times
@staticmethod
def get_raw_units():
"""Include the settings for the units used by the instrument.
The units are defined w.r.t. the SI units ('unit-fractions'; currently only units that are multiples of
Si units can be used). For example, for current defined in mA, the value for the
current unit-fraction will be 0.001.
Returns: dictionary containing the unit-fractions for current, charge, and mass
"""
raw_units = dict()
raw_units["current"] = "A"
raw_units["charge"] = "Ah"
raw_units["mass"] = "mg"
raw_units["voltage"] = "V"
raw_units["energy"] = "Wh"
raw_units["time"] = "s"
return raw_units
@staticmethod
def _raw_units_for_internal_calculations():
raw_units = dict()
raw_units["current"] = 1.0
raw_units["charge"] = 1.0
raw_units["mass"] = 0.001
raw_units["voltage"] = 1.0
raw_units["energy"] = 1.0
raw_units["time"] = 1.0
return raw_units
def get_raw_limits(self):
"""Include the settings for how to decide what kind of step you are examining here.
The raw limits are 'epsilons' used to check if the current and/or voltage is stable (for example
for galvanostatic steps, one would expect that the current is stable (constant) and non-zero).
It is expected that different instruments (with different resolution etc.) have different
'epsilons'.
Returns: the raw limits (dict)
"""
warnings.warn("raw limits have not been subject for testing yet")
raw_limits = dict()
raw_limits["current_hard"] = 0.1 # There is a bug in PEC
raw_limits["current_soft"] = 1.0
raw_limits["stable_current_hard"] = 2.0
raw_limits["stable_current_soft"] = 4.0
raw_limits["stable_voltage_hard"] = 2.0
raw_limits["stable_voltage_soft"] = 4.0
raw_limits["stable_charge_hard"] = 2.0
raw_limits["stable_charge_soft"] = 5.0
raw_limits["ir_change"] = 0.00001
return raw_limits
def loader(self, file_name, bad_steps=None, **kwargs):
if not os.path.isfile(file_name):
self.logger.info("Missing file_\n %s" % file_name)
return None
self.filename = file_name
self.number_of_header_lines = self._find_header_length()
filesize = os.path.getsize(file_name)
hfilesize = humanize_bytes(filesize)
txt = "Filesize: %i (%s)" % (filesize, hfilesize)
logging.debug(txt)
data = Data()
fid = FileID(file_name)
# div parameters and information (probably load this last)
test_no = 1
data.cell_no = int(test_no)
data.loaded_from = file_name
# some overall prms
data.channel_index = None
data.channel_number = None
data.creator = None
data.item_ID = None
data.schedule_file_name = None
data.test_ID = None
data.test_name = None
data.raw_data_files.append(fid)
# --------- read raw-data (normal-data) -------------------------
self._load_pec_data(file_name, bad_steps)
data.start_datetime = self.pec_settings["start_time"]
length_of_test = self.pec_data.shape[0]
logging.debug(f"length of test: {length_of_test}")
logging.debug("renaming columns")
self._rename_headers()
self._convert_units()
# cycle indices should not be 0
if 0 in self.pec_data["cycle_index"]:
self.pec_data["cycle_index"] += 1
data.raw = self.pec_data
data.raw_data_files_length.append(length_of_test)
return data
def _load_pec_data(self, file_name, bad_steps):
number_of_header_lines = self.number_of_header_lines
# ----------------- reading the data ---------------------
df = pd.read_csv(file_name, skiprows=number_of_header_lines)
# get rid of unnamed columns
df = df.loc[:, ~df.columns.str.contains("^Unnamed")]
# get rid of spaces, parenthesis, and the deg-sign
new_column_headers = {
c: c.replace(" ", "_")
.replace("(", "")
.replace(")", "")
.replace("°", "")
.replace(r"%", "pct")
for c in df.columns
}
df.rename(columns=new_column_headers, inplace=True)
# add missing columns
df.insert(0, self.headers_normal.data_point_txt, range(len(df)))
df[self.headers_normal.sub_step_index_txt] = 0
df[self.headers_normal.sub_step_time_txt] = 0
self.pec_data = df
# ---------------- reading the parameters ---------------
with open(file_name, "r") as ofile:
counter = 0
lines = []
for line in ofile:
counter += 1
if counter > number_of_header_lines:
break
lines.append(line)
self._extract_variables(lines)
def _extract_variables(self, lines):
header_comments = dict()
comment_loop = False
for line_number, line in enumerate(lines):
if line.startswith("#"):
if not comment_loop:
comment_loop = True
else:
comment_loop = False
else:
if not comment_loop:
parts = line.split(",")
variable = parts[0].strip()
variable = variable.strip(":")
variable = variable.replace(" ", "_")
try:
value = parts[1].strip()
except IndexError:
value = None
if not value:
value = np.nan
header_comments[variable] = value
logging.debug(" Headers Dict ")
logging.debug(header_comments)
headers = dict()
start_time = parse(header_comments["Start_Time"])
end_time = parse(header_comments["End_Time"])
headers["start_time"] = start_time
headers["end_time"] = end_time
# headers["test_regime_name"] = header_comments["TestRegime_Name"]
self.pec_settings = headers
def _rename_headers(self):
logging.debug("Trying to rename the columns")
# logging.debug("Current columns:")
# logging.debug(self.pec_data.columns)
# logging.debug("Rename to:")
# logging.debug(self.headers_normal)
for key in pec_headers_normal:
self._rename_header(key, pec_headers_normal[key])
# logging.debug("New cols:")
# logging.debug(self.pec_data.columns)
def _convert_units(self):
logging.debug("Trying to convert all data into correct units")
logging.debug("- dtypes")
self.pec_data[self.headers_normal.datetime_txt] = pd.to_datetime(
self.pec_data[self.headers_normal.datetime_txt]
)
self.pec_data["Position_Start_Time"] = pd.to_datetime(
self.pec_data["Position_Start_Time"]
)
self.pec_data["Rack"] = self.pec_data["Rack"].astype("category")
logging.debug("- cellpy units")
pec_units = self._get_pec_units()
pec_times = self._get_pec_times()
raw_units = self._raw_units_for_internal_calculations()
self._rename_headers() # Had to run this again after fixing the headers, might be a better way to fix this
_v = pec_units["voltage"] / raw_units["voltage"]
_i = pec_units["current"] / raw_units["current"]
_c = pec_units["charge"] / raw_units["charge"]
_w = pec_units["energy"] / raw_units["energy"]
# Check if time is given in a units proportional to seconds or in a hh:mm:ss.xxx format
# Convert all hh:mm:ss.xxx formats to seconds using self.timestamp_to_seconds()
relevant_times = ["total_time", "step_time"]
for x in relevant_times:
if isinstance(pec_times[x], (int, float)):
if x == relevant_times[0]:
_tt = pec_times["total_time"] / raw_units["time"]
self.pec_data[self.headers_normal.test_time_txt] *= _tt
elif x == relevant_times[1]:
_st = pec_times["step_time"] / raw_units["time"]
self.pec_data[self.headers_normal.step_time_txt] *= _st
elif callable(pec_times[x]):
# EDIT jepe 18.06.2020: change to .apply(func) instead of for-loop
# (now the column is of float64 type and behaves properly)
if x == relevant_times[0]:
# col = self.pec_data[self.headers_normal.test_time_txt]
hdr = self.headers_normal.test_time_txt
elif x == relevant_times[1]:
# col = self.pec_data[self.headers_normal.step_time_txt]
hdr = self.headers_normal.test_time_txt
self.pec_data[hdr] = self.pec_data[hdr].apply(pec_times[x])
# for i in range(len(col)):
# col[i] = pec_times[x](col[i])
v_txt = self.headers_normal.voltage_txt
i_txt = self.headers_normal.current_txt
self.pec_data[v_txt] *= _v
self.pec_data[i_txt] *= _i
self.pec_data[self.headers_normal.charge_capacity_txt] *= _c
self.pec_data[self.headers_normal.discharge_capacity_txt] *= _c
self.pec_data[self.headers_normal.charge_energy_txt] *= _w
self.pec_data[self.headers_normal.discharge_energy_txt] *= _w
def _rename_header(self, h_old, h_new):
try:
self.pec_data.rename(
columns={h_new: self.cellpy_headers[h_old]}, inplace=True
)
except KeyError as e:
logging.info(f"Problem during conversion to cellpy-format ({e})")
def _find_header_length(self):
skiprows = 0
resultscheck = False # Ignore number of delimiters inside RESULTS CHECK
with open(self.filename, "r") as header:
for line in header:
if line in self.fake_header_length:
resultscheck = not resultscheck
if (
line.count(self.pec_file_delimiter) > 1 and not resultscheck
): # End when there are >2 columns
break
skiprows += 1
return skiprows
@staticmethod
def timestamp_to_seconds(timestamp): # Changes hh:mm:s.xxx time format to seconds
total_secs = 0
# strptime can not handle more than 24 hours, days are counted manually
hours = int(timestamp[:2])
if hours >= 24:
days = hours // 24
total_secs += days * 3600 * 24
timestamp = str(hours - 24 * days) + timestamp[2:]
total_secs += (
datetime.strptime(timestamp, "%H:%M:%S.%f")
- datetime.strptime("00:00:00.000", "%H:%M:%S.%f")
).total_seconds()
return total_secs
if __name__ == "__main__":
pass
| mit | 0759a320b5fa8cafc98c3d53d4b8a376 | 37.096552 | 115 | 0.557507 | 3.797434 | false | false | false | false |
miguelgrinberg/python-socketio | examples/server/tornado/app.py | 1 | 2432 | import os
import tornado.ioloop
from tornado.options import define, options, parse_command_line
import tornado.web
import socketio
define("port", default=5000, help="run on the given port", type=int)
define("debug", default=False, help="run in debug mode")
sio = socketio.AsyncServer(async_mode='tornado')
async def background_task():
"""Example of how to send server generated events to clients."""
count = 0
while True:
await sio.sleep(10)
count += 1
await sio.emit('my_response', {'data': 'Server generated event'})
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("app.html")
@sio.event
async def my_event(sid, message):
await sio.emit('my_response', {'data': message['data']}, room=sid)
@sio.event
async def my_broadcast_event(sid, message):
await sio.emit('my_response', {'data': message['data']})
@sio.event
async def join(sid, message):
sio.enter_room(sid, message['room'])
await sio.emit('my_response', {'data': 'Entered room: ' + message['room']},
room=sid)
@sio.event
async def leave(sid, message):
sio.leave_room(sid, message['room'])
await sio.emit('my_response', {'data': 'Left room: ' + message['room']},
room=sid)
@sio.event
async def close_room(sid, message):
await sio.emit('my_response',
{'data': 'Room ' + message['room'] + ' is closing.'},
room=message['room'])
await sio.close_room(message['room'])
@sio.event
async def my_room_event(sid, message):
await sio.emit('my_response', {'data': message['data']},
room=message['room'])
@sio.event
async def disconnect_request(sid):
await sio.disconnect(sid)
@sio.event
async def connect(sid, environ):
await sio.emit('my_response', {'data': 'Connected', 'count': 0}, room=sid)
@sio.event
def disconnect(sid):
print('Client disconnected')
def main():
parse_command_line()
app = tornado.web.Application(
[
(r"/", MainHandler),
(r"/socket.io/", socketio.get_tornado_handler(sio)),
],
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
debug=options.debug,
)
app.listen(options.port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
| mit | d0190ff9cccf533f9a5bda72d6dc46dd | 23.816327 | 79 | 0.618421 | 3.524638 | false | false | false | false |
qiniu/python-sdk | qiniu/utils.py | 1 | 4783 | # -*- coding: utf-8 -*-
from hashlib import sha1
from base64 import urlsafe_b64encode, urlsafe_b64decode
from datetime import datetime
from .compat import b, s
try:
import zlib
binascii = zlib
except ImportError:
zlib = None
import binascii
_BLOCK_SIZE = 1024 * 1024 * 4
def urlsafe_base64_encode(data):
"""urlsafe的base64编码:
对提供的数据进行urlsafe的base64编码。规格参考:
https://developer.qiniu.com/kodo/manual/1231/appendix#1
Args:
data: 待编码的数据,一般为字符串
Returns:
编码后的字符串
"""
ret = urlsafe_b64encode(b(data))
return s(ret)
def urlsafe_base64_decode(data):
"""urlsafe的base64解码:
对提供的urlsafe的base64编码的数据进行解码
Args:
data: 待解码的数据,一般为字符串
Returns:
解码后的字符串。
"""
ret = urlsafe_b64decode(s(data))
return ret
def file_crc32(filePath):
"""计算文件的crc32检验码:
Args:
filePath: 待计算校验码的文件路径
Returns:
文件内容的crc32校验码。
"""
crc = 0
with open(filePath, 'rb') as f:
for block in _file_iter(f, _BLOCK_SIZE):
crc = binascii.crc32(block, crc) & 0xFFFFFFFF
return crc
def crc32(data):
"""计算输入流的crc32检验码:
Args:
data: 待计算校验码的字符流
Returns:
输入流的crc32校验码。
"""
return binascii.crc32(b(data)) & 0xffffffff
def _file_iter(input_stream, size, offset=0):
"""读取输入流:
Args:
input_stream: 待读取文件的二进制流
size: 二进制流的大小
Raises:
IOError: 文件流读取失败
"""
input_stream.seek(offset)
d = input_stream.read(size)
while d:
yield d
d = input_stream.read(size)
input_stream.seek(0)
def _sha1(data):
"""单块计算hash:
Args:
data: 待计算hash的数据
Returns:
输入数据计算的hash值
"""
h = sha1()
h.update(data)
return h.digest()
def etag_stream(input_stream):
"""计算输入流的etag:
etag规格参考 https://developer.qiniu.com/kodo/manual/1231/appendix#3
Args:
input_stream: 待计算etag的二进制流
Returns:
输入流的etag值
"""
array = [_sha1(block) for block in _file_iter(input_stream, _BLOCK_SIZE)]
if len(array) == 0:
array = [_sha1(b'')]
if len(array) == 1:
data = array[0]
prefix = b'\x16'
else:
sha1_str = b('').join(array)
data = _sha1(sha1_str)
prefix = b'\x96'
return urlsafe_base64_encode(prefix + data)
def etag(filePath):
"""计算文件的etag:
Args:
filePath: 待计算etag的文件路径
Returns:
输入文件的etag值
"""
with open(filePath, 'rb') as f:
return etag_stream(f)
def entry(bucket, key):
"""计算七牛API中的数据格式:
entry规格参考 https://developer.qiniu.com/kodo/api/1276/data-format
Args:
bucket: 待操作的空间名
key: 待操作的文件名
Returns:
符合七牛API规格的数据格式
"""
if key is None:
return urlsafe_base64_encode('{0}'.format(bucket))
else:
return urlsafe_base64_encode('{0}:{1}'.format(bucket, key))
def rfc_from_timestamp(timestamp):
"""将时间戳转换为HTTP RFC格式
Args:
timestamp: 整型Unix时间戳(单位秒)
"""
last_modified_date = datetime.utcfromtimestamp(timestamp)
last_modified_str = last_modified_date.strftime(
'%a, %d %b %Y %H:%M:%S GMT')
return last_modified_str
def _valid_header_key_char(ch):
is_token_table = [
"!", "#", "$", "%", "&", "\\", "*", "+", "-", ".",
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J",
"K", "L", "M", "N", "O", "P", "Q", "R", "S", "T",
"U", "W", "V", "X", "Y", "Z",
"^", "_", "`",
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
"k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
"u", "v", "w", "x", "y", "z",
"|", "~"]
return 0 <= ord(ch) < 128 and ch in is_token_table
def canonical_mime_header_key(field_name):
for ch in field_name:
if not _valid_header_key_char(ch):
return field_name
result = ""
upper = True
for ch in field_name:
if upper and "a" <= ch <= "z":
result += ch.upper()
elif not upper and "A" <= ch <= "Z":
result += ch.lower()
else:
result += ch
upper = ch == "-"
return result
| mit | 7a584c3d6c17e054ef8459177c6a1095 | 19.5 | 77 | 0.529008 | 2.551662 | false | false | false | false |
meejah/txtorcon | txtorcon/onion.py | 1 | 53398 | import os
import re
import six
import base64
import hashlib
import functools
import warnings
from os.path import isabs, abspath
from zope.interface import Interface, Attribute, implementer
from twisted.internet import defer
from twisted.python import log
from txtorcon.util import find_keywords, version_at_least
from txtorcon.util import _is_non_public_numeric_address
from txtorcon.util import available_tcp_port
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
class HiddenServiceClientAuth(object):
"""
Encapsulates a single client-authorization, as parsed from a
HiddenServiceDir's "client_keys" file if you have stealth or basic
authentication turned on.
:param name: the name you gave it in the HiddenServiceAuthorizeClient line
:param cookie: random password
:param key: RSA private key, or None if this was basic auth
"""
def __init__(self, name, cookie, key=None):
self.name = name
self.cookie = cookie
self.key = _parse_rsa_blob(key) if key else None
class IOnionService(Interface):
"""
Encapsulates a single, ephemeral onion service.
If this instance happens to be a filesystem-based service (instead
of ephemeral), it shall implement IFilesystemOnionService as well
(which is a subclass of this).
If this object happens to represent an authenticated service, it
shall implement IAuthenticatedOnionClients ONLY (not this
interface too; IAuthenticatedOnionClients returns *lists* of
IOnionClient instances which are a subclass of
IOnionService; see :class:`txtorcon.IAuthenticatedOnionClients`).
For non-authenticated services, there will be one of these per
directory (i.e. HiddenServiceDir) if using non-ephemeral services,
or one per ADD_ONION for ephemeral hidden services.
For authenticated services, there is an instance implementing this
interface for each "client" of the authenticated service. In the
"basic" case, the .onion URI happens to be the same for each one
(with a different authethentication token) whereas for a "stealth"
sevice the .onion URI is different.
"""
hostname = Attribute("hostname, including .onion") # XXX *with* .onion? or not?
private_key = Attribute("Private key blob (bytes)")
ports = Attribute("list of str; the ports lines like 'public_port host:local_port'")
class IFilesystemOnionService(IOnionService):
"""
Encapsulates a single filesystem-based service.
Note this is a subclass of IOnionService; it just adds two
attributes that ephemeral services lack: hidden_service_directory
and group_readable.
"""
hidden_service_directory = Attribute('The directory where private data is kept')
group_readable = Attribute("set HiddenServiceGroupReadable if true")
class IAuthenticatedOnionClients(Interface):
"""
This encapsulates both 'stealth' and 'basic' authenticated Onion
services, whether ephemeral or not.
Each client has an arbitrary (ASCII, no spaces) name. You may
access the clients with `get_client`, which will all be
:class:`txtorcon.IOnionClient` instances.
"""
def get_permanent_id(self):
"""
:return: the service's permanent id, in hex
(For authenticated services, this is not the same as the
.onion URI of any of the clients). The Permanent ID is the
base32 encoding of the first 10 bytes of the SHA1 hash of the
public-key of the service.
"""
def client_names(self):
"""
:return: list of str instances, one for each client
"""
def get_client(self, name):
"""
:return: object implementing IOnionClient for the named client
"""
def add_client(self, name):
"""
probably should return a Deferred?
"""
def del_client(self, name):
"""
probably should return a Deferred?
"""
class IOnionClient(IOnionService):
"""
A single client from a 'parent' IAuthenticatedOnionClients. We do
this because hidden services can have different URLs and/or
auth_tokens on a per-client basis. So, the only way to access
*anything* from an authenticated onion service is to list the
cleints -- which gives you one IOnionClient per client.
Note that this inherits from :class:`txtorcon.IOnionService` and
adds only those attributes required for authentication. For
'stealth' authentication, the hostnames of each client will be
unique; for 'basic' authentication the hostname is the same. The
auth_tokens are always unique -- these are given to clients to
include using the Tor option `HidServAuth`
"""
auth_token = Attribute('Some secret bytes')
name = Attribute('str') # XXX required? probably.
parent = Attribute('the IAuthenticatedOnionClients instance who owns me')
# from the IOnionService base interface, inherits:
# hostname
# private_key
# ports
def _canonical_hsdir(hsdir):
"""
Internal helper.
:return: the absolute path for 'hsdir' (and issue a warning)
issuing a warning) if it was relative. Otherwise, returns the path
unmodified.
"""
if not isabs(hsdir):
abs_hsdir = abspath(hsdir)
warnings.warn(
"Onions service directory ({}) is relative and has"
" been resolved to '{}'".format(hsdir, abs_hsdir)
)
hsdir = abs_hsdir
return hsdir
@implementer(IOnionService)
@implementer(IFilesystemOnionService)
class FilesystemOnionService(object):
"""
An Onion service whose keys are stored on disk.
"""
@staticmethod
@defer.inlineCallbacks
def create(reactor, config, hsdir, ports,
version=3,
group_readable=False,
progress=None,
await_all_uploads=None):
"""
returns a new FilesystemOnionService after adding it to the
provided config and ensuring at least one of its descriptors
is uploaded.
:param config: a :class:`txtorcon.TorConfig` instance
:param ports: a list of ports to make available; any of these
can be 2-tuples of (remote, local) if you want to expose a
particular port locally (otherwise, an available one is
chosen)
:param hsdir: the directory in which to store private keys
:param version: 2 or 3, which kind of service to create
:param group_readable: if True, the Tor option
`HiddenServiceDirGroupReadable` is set to 1 for this service
:param progress: a callable taking (percent, tag, description)
that is called periodically to report progress.
:param await_all_uploads: if True, the Deferred only fires
after ALL descriptor uploads have completed (otherwise, it
fires when at least one has completed).
See also :meth:`txtorcon.Tor.create_onion_service` (which
ultimately calls this).
"""
# if hsdir is relative, it's "least surprising" (IMO) to make
# it into a absolute path here -- otherwise, it's relative to
# whatever Tor's cwd is.
hsdir = _canonical_hsdir(hsdir)
processed_ports = yield _validate_ports(reactor, ports)
fhs = FilesystemOnionService(config, hsdir, processed_ports, version=version, group_readable=group_readable)
config.HiddenServices.append(fhs)
# we .save() down below, after setting HS_DESC listener
# XXX I *hate* this version checking crap. Can we discover a
# different way if this Tor supports proper HS_DESC stuff? I
# think part of the problem here is that "some" Tors have
# HS_DESC event, but it's not .. sufficient?
uploaded = [None]
if not version_at_least(config.tor_protocol.version, 0, 2, 7, 2):
if progress:
progress(
102, "wait_desctiptor",
"Adding an onion service to Tor requires at least version"
)
progress(
103, "wait_desctiptor",
"0.2.7.2 so that HS_DESC events work properly and we can"
)
progress(
104, "wait_desctiptor",
"detect our desctiptor being uploaded."
)
progress(
105, "wait_desctiptor",
"Your version is '{}'".format(config.tor_protocol.version),
)
progress(
106, "wait_desctiptor",
"So, we'll just declare it done right now..."
)
uploaded[0] = defer.succeed(None)
else:
# XXX actually, there's some versions of Tor when v3
# filesystem services could be added but they didn't send
# HS_DESC updates -- did any of these actually get
# released?!
uploaded[0] = _await_descriptor_upload(config.tor_protocol, fhs, progress, await_all_uploads)
yield config.save()
yield uploaded[0]
defer.returnValue(fhs)
def __init__(self, config, thedir, ports, version=3, group_readable=0):
"""
Do not instantiate directly; use
:func:`txtorcon.onion.FilesystemOnionService.create`
"""
self._config = config
self._dir = os.path.realpath(thedir)
_validate_ports_low_level(ports)
from .torconfig import _ListWrapper
self._ports = _ListWrapper(
ports,
functools.partial(config.mark_unsaved, 'HiddenServices'),
)
self._version = version
self._group_readable = group_readable
self._hostname = None
self._private_key = None
@property
def hostname(self):
if self._hostname is None:
try:
with open(os.path.join(self._dir, 'hostname'), 'r') as f:
self._hostname = f.read().strip()
except IOError:
# not clear under what circumstances this happens
# (i.e. we can create a new onion, but somehow not
# read the hostname file) but ... safety?
self._hostname = None
return self._hostname
@property
def private_key(self):
# XXX there's also a file called 'hs_ed25519_public_key' but I
# think we can just ignore that? .. or do we need a v3-only
# accessor for .public_key() as well?
if self._private_key is None:
if self.version == 2:
try:
with open(os.path.join(self._dir, 'private_key'), 'r') as f:
self._private_key = f.read().strip()
except IOError:
# not clear under what circumstances this happens
# (i.e. we can create a new onion, but somehow not
# read the private_key file) but ... safety?
self._private_key = None
elif self.version == 3:
# XXX see tor bug #20699 -- would be Really Nice to
# not have to deal with binary data here (well, more
# for ADD_ONION, but still)
try:
with open(os.path.join(self._dir, 'hs_ed25519_secret_key'), 'rb') as f:
self._private_key = f.read().strip()
except IOError:
# not clear under what circumstances this happens
# (i.e. we can create a new onion, but somehow not
# read the private key file) but ... safety?
self._private_key = None
else:
raise RuntimeError(
"Don't know how to load private_key for version={} "
"Onion service".format(self.version)
)
return self._private_key
@property
def ports(self):
return self._ports
@ports.setter
def ports(self, ports):
# XXX FIXME need to update Tor's notion of config and/or
# reject this request after we *have* updated Tor..."or
# something"
from .torconfig import _ListWrapper
self._ports = _ListWrapper(
ports,
functools.partial(self._config.mark_unsaved, 'HiddenServices'),
)
self._config.mark_unsaved('HiddenServices')
@property
def directory(self):
return self._dir
@directory.setter
def directory(self, d):
self._dir = d
self._config.mark_unsaved('HiddenServices')
@property
def dir(self):
# deprecated
return self.directory
@dir.setter
def dir(self, d):
# deprecated
self.directory = d
@property
def group_readable(self):
return self._group_readable
@property
def version(self):
return self._version
@version.setter
def version(self, v):
self._version = v
self._config.mark_unsaved('HiddenServices')
def config_attributes(self):
"""
Helper method used by TorConfig when generating a torrc file and
SETCONF commands
"""
rtn = [('HiddenServiceDir', str(self._dir))]
if self._config._supports['HiddenServiceDirGroupReadable'] \
and self.group_readable:
rtn.append(('HiddenServiceDirGroupReadable', str(1)))
for x in self.ports:
rtn.append(('HiddenServicePort', str(x)))
if self.version:
rtn.append(('HiddenServiceVersion', str(self.version)))
return rtn
@defer.inlineCallbacks
def _await_descriptor_upload(tor_protocol, onion, progress, await_all_uploads):
"""
Internal helper.
:param tor_protocol: ITorControlProtocol instance
:param onion: IOnionService instance
:param progress: a progess callback, or None
:returns: a Deferred that fires once we've detected at least one
descriptor upload for the service (as detected by listening for
HS_DESC events)
"""
# For v3 services, Tor attempts to upload to 16 services; we'll
# assume that for now but also cap it (we want to show some
# progress for "attempting uploads" but we need to decide how
# much) .. so we leave 50% of the "progress" for attempts, and the
# other 50% for "are we done" (which is either "one thing
# uploaded" or "all the things uploaded")
attempted_uploads = set()
confirmed_uploads = set()
failed_uploads = set()
uploaded = defer.Deferred()
await_all = False if await_all_uploads is None else await_all_uploads
def translate_progress(tag, description):
if progress:
done = len(confirmed_uploads) + len(failed_uploads)
done_endpoint = float(len(attempted_uploads)) if await_all else 1.0
done_pct = 0 if not attempted_uploads else float(done) / done_endpoint
started_pct = float(min(16, len(attempted_uploads))) / 16.0
try:
progress(
(done_pct * 50.0) + (started_pct * 50.0),
tag,
description,
)
except Exception:
log.err()
def hostname_matches(hostname):
if IAuthenticatedOnionClients.providedBy(onion):
return hostname[:-6] == onion.get_permanent_id()
else:
# provides IOnionService
return onion.hostname == hostname
def hs_desc(evt):
"""
From control-spec:
"650" SP "HS_DESC" SP Action SP HSAddress SP AuthType SP HsDir
[SP DescriptorID] [SP "REASON=" Reason] [SP "REPLICA=" Replica]
"""
args = evt.split()
subtype = args[0]
if subtype == 'UPLOAD':
if hostname_matches('{}.onion'.format(args[1])):
attempted_uploads.add(args[3])
translate_progress(
"wait_descriptor",
"Upload to {} started".format(args[3])
)
elif subtype == 'UPLOADED':
# we only need ONE successful upload to happen for the
# HS to be reachable.
# unused? addr = args[1]
# XXX FIXME I think tor is sending the onion-address
# properly with these now, so we can use those
# (i.e. instead of matching to "attempted_uploads")
if args[3] in attempted_uploads:
confirmed_uploads.add(args[3])
log.msg("Uploaded '{}' to '{}'".format(args[1], args[3]))
translate_progress(
"wait_descriptor",
"Successful upload to {}".format(args[3])
)
if not uploaded.called:
if await_all:
if (len(failed_uploads) + len(confirmed_uploads)) == len(attempted_uploads):
uploaded.callback(onion)
else:
uploaded.callback(onion)
elif subtype == 'FAILED':
if hostname_matches('{}.onion'.format(args[1])):
failed_uploads.add(args[3])
translate_progress(
"wait_descriptor",
"Failed upload to {}".format(args[3])
)
if failed_uploads == attempted_uploads:
msg = "Failed to upload '{}' to: {}".format(
args[1],
', '.join(failed_uploads),
)
uploaded.errback(RuntimeError(msg))
# the first 'yield' should be the add_event_listener so that a
# caller can do "d = _await_descriptor_upload()", then add the
# service.
yield tor_protocol.add_event_listener('HS_DESC', hs_desc)
yield uploaded
yield tor_protocol.remove_event_listener('HS_DESC', hs_desc)
# ensure we show "100%" at the end
if progress:
if await_all_uploads:
msg = "Completed descriptor uploads"
else:
msg = "At least one descriptor uploaded"
try:
progress(100.0, "wait_descriptor", msg)
except Exception:
log.err()
@defer.inlineCallbacks
def _add_ephemeral_service(config, onion, progress, version, auth=None, await_all_uploads=None):
"""
Internal Helper.
This uses ADD_ONION to add the given service to Tor. The Deferred
this returns will callback when the ADD_ONION call has succeed,
*and* when at least one descriptor has been uploaded to a Hidden
Service Directory.
:param config: a TorConfig instance
:param onion: an EphemeralOnionService instance
:param progress: a callable taking 3 arguments (percent, tag,
description) that is called some number of times to tell you of
progress.
:param version: 2 or 3, which kind of service to create
:param auth: if not None, create an authenticated service ("basic"
is the only kind supported currently so a AuthBasic instance
should be passed)
"""
if onion not in config.EphemeralOnionServices:
config.EphemeralOnionServices.append(onion)
# we have to keep this as a Deferred for now so that HS_DESC
# listener gets added before we issue ADD_ONION
assert version in (2, 3)
uploaded_d = _await_descriptor_upload(config.tor_protocol, onion, progress, await_all_uploads)
# we allow a key to be passed that *doestn'* start with
# "RSA1024:" because having to escape the ":" for endpoint
# string syntax (which uses ":" as delimeters) is annoying
# XXX rethink ^^? what do we do when the type is upgraded?
# maybe just a magic-character that's different from ":", or
# force people to escape them?
if onion.private_key:
if onion.private_key is not DISCARD and ':' not in onion.private_key:
if version == 2:
if not onion.private_key.startswith("RSA1024:"):
onion._private_key = "RSA1024:" + onion.private_key
elif version == 3:
if not onion.private_key.startswith("ED25519-V3:"):
onion._private_key = "ED25519-V3:" + onion.private_key
# okay, we're set up to listen, and now we issue the ADD_ONION
# command. this will set ._hostname and ._private_key properly
keystring = 'NEW:BEST'
if onion.private_key not in (None, DISCARD):
keystring = onion.private_key
elif version == 3:
keystring = 'NEW:ED25519-V3'
if version == 3:
if 'V3' not in keystring:
raise ValueError(
"version=3 but private key isn't 'ED25519-V3'"
)
# hmm, is it better to validate keyblob args in the create
# methods? "Feels nicer" to see it here when building ADD_ONION
# though?
if '\r' in keystring or '\n' in keystring:
raise ValueError(
"No newline or return characters allowed in key blobs"
)
cmd = 'ADD_ONION {}'.format(keystring)
for port in onion._ports:
cmd += ' Port={},{}'.format(*port.split(' ', 1))
flags = []
if onion._detach:
flags.append('Detach')
if onion.private_key is DISCARD:
flags.append('DiscardPK')
if auth is not None:
assert isinstance(auth, AuthBasic) # don't support AuthStealth yet
if isinstance(auth, AuthBasic):
flags.append('BasicAuth')
if onion._single_hop:
flags.append('NonAnonymous') # depends on some Tor options, too
if flags:
cmd += ' Flags={}'.format(','.join(flags))
if auth is not None:
for client_name in auth.client_names():
keyblob = auth.keyblob_for(client_name)
if keyblob is None:
cmd += ' ClientAuth={}'.format(client_name)
else:
cmd += ' ClientAuth={}:{}'.format(client_name, keyblob)
onion._add_client(client_name, keyblob)
raw_res = yield config.tor_protocol.queue_command(cmd)
res = find_keywords(raw_res.split('\n'))
try:
onion._hostname = res['ServiceID'] + '.onion'
if onion.private_key is DISCARD:
onion._private_key = None
else:
# if we specified a private key, it's not echoed back
if not onion.private_key:
onion._private_key = res['PrivateKey'].strip()
except KeyError:
raise RuntimeError(
"Expected ADD_ONION to return ServiceID= and PrivateKey= args."
"Got: {}".format(res)
)
if auth is not None:
for line in raw_res.split('\n'):
if line.startswith("ClientAuth="):
name, blob = line[11:].split(':', 1)
onion._add_client(name, blob)
log.msg("{}: waiting for descriptor uploads.".format(onion.hostname))
yield uploaded_d
class _AuthCommon(object):
"""
Common code for all types of authentication
"""
def __init__(self, clients):
self._clients = dict()
for client in clients:
if isinstance(client, tuple):
client_name, keyblob = client
self._clients[client_name] = keyblob
else:
self._clients[client] = None
if any(' ' in client for client in self._clients.keys()):
raise ValueError("Client names can't have spaces")
def client_names(self):
return self._clients.keys()
def keyblob_for(self, client_name):
return self._clients[client_name]
class AuthBasic(_AuthCommon):
"""
Authentication details for 'basic' auth.
"""
auth_type = 'basic'
# note that _AuthCommon.__init__ takes 'clients'
class AuthStealth(_AuthCommon):
"""
Authentication details for 'stealth' auth.
"""
auth_type = 'stealth'
# note that _AuthCommon.__init__ takes 'clients'
DISCARD = object()
@implementer(IAuthenticatedOnionClients)
class EphemeralAuthenticatedOnionService(object):
"""
An onion service with either 'stealth' or 'basic' authentication
and keys stored in memory only (Tor doesn't store the private keys
anywhere and erases them when shutting down).
Use the async class-method ``create`` to make instances of this.
"""
@staticmethod
@defer.inlineCallbacks
def create(reactor, config, ports,
detach=False,
private_key=None, # or DISCARD or a key
version=None,
progress=None,
auth=None,
await_all_uploads=None, # AuthBasic, or AuthStealth instance
single_hop=False):
"""
returns a new EphemeralAuthenticatedOnionService after adding it
to the provided config and ensuring at least one of its
descriptors is uploaded.
:param config: a :class:`txtorcon.TorConfig` instance
:param ports: a list of ports to make available; any of these
can be 2-tuples of (remote, local) if you want to expose a
particular port locally (otherwise, an available one is
chosen)
:param private_key: None, `DISCARD`, or a private key blob
:param detach: if True, tell Tor to NOT associate this control
connection with the lifetime of the created service
:param version: 2 or 3, which kind of service to create
:param progress: a callable taking (percent, tag, description)
that is called periodically to report progress.
:param await_all_uploads: if True, the Deferred only fires
after ALL descriptor uploads have completed (otherwise, it
fires when at least one has completed).
:param single_hop: if True, pass the `NonAnonymous` flag. Note
that Tor options `HiddenServiceSingleHopMode`,
`HiddenServiceNonAnonymousMode` must be set to `1` and there
must be no `SOCKSPort` configured for this to actually work.
See also :meth:`txtorcon.Tor.create_onion_service` (which
ultimately calls this).
"""
if not isinstance(auth, (AuthBasic, AuthStealth)):
raise ValueError(
"'auth' should be an AuthBasic or AuthStealth instance"
)
if isinstance(auth, AuthStealth):
raise ValueError(
"Tor does not yet support ephemeral stealth-auth"
)
version = 2 if version is None else version
assert version in (2, 3)
processed_ports = yield _validate_ports(reactor, ports)
onion = EphemeralAuthenticatedOnionService(
config, processed_ports,
private_key=private_key,
detach=detach,
version=version,
single_hop=single_hop,
)
yield _add_ephemeral_service(config, onion, progress, version, auth, await_all_uploads)
defer.returnValue(onion)
def __init__(self, config, ports, hostname=None, private_key=None, auth=[], version=3,
detach=False, single_hop=None):
"""
Users should create instances of this class by using the async
method :meth:`txtorcon.EphemeralAuthenticatedOnionService.create`
"""
_validate_ports_low_level(ports)
self._config = config
self._ports = ports
self._hostname = hostname
self._private_key = private_key
self._version = version
self._detach = detach
self._clients = dict()
self._single_hop = single_hop
def get_permanent_id(self):
"""
IAuthenticatedOnionClients API
"""
assert '\n' not in self._private_key
# why are we sometimes putting e.g. "RSA1024:xxxx" and
# sometimes not? Should be one or the other
if ':' in self._private_key:
blob = self._private_key.split(':')[1].encode('ascii')
else:
blob = self._private_key.encode('ascii')
keydata = b'-----BEGIN RSA PRIVATE KEY-----\n' + blob + b'\n-----END RSA PRIVATE KEY-----\n'
# keydata = b'-----BEGIN PRIVATE KEY-----\n' + blob + b'\n-----END PRIVATE KEY-----\n'
# XXX Hmm, it seems PyPy 5.8.0 *on travis* fails this (on my
# system it works fine). Only relevant difference I can see is
# openssl version 1.0.1f vs 1.0.1t
private_key = serialization.load_pem_private_key(
keydata,
password=None,
backend=default_backend(),
)
return _compute_permanent_id(private_key)
def client_names(self):
return self._clients.keys()
def get_client(self, name):
return self._clients[name]
def _add_client(self, name, auth_token):
self._clients[name] = EphemeralAuthenticatedOnionServiceClient(
parent=self,
name=name,
token=auth_token,
)
@property
def hostname(self):
return self._hostname
@property
def ports(self):
return set(self._ports)
@property
def version(self):
return self._version
@property
def private_key(self):
return self._private_key
@defer.inlineCallbacks
def remove(self):
"""
Issues a DEL_ONION call to our tor, removing this service.
"""
cmd = 'DEL_ONION {}'.format(self._hostname[:-len('.onion')])
res = yield self._config.tor_protocol.queue_command(cmd)
if res.strip() != "OK":
raise RuntimeError("Failed to remove service")
@implementer(IOnionService)
class EphemeralOnionService(object):
"""
An Onion service whose keys live in memory and are not persisted
by Tor.
It is up to the application developer to retrieve and store the
private key if this service is ever to be brought online again.
"""
@staticmethod
@defer.inlineCallbacks
def create(reactor, config, ports,
detach=False,
private_key=None, # or DISCARD
version=None,
progress=None,
await_all_uploads=None,
single_hop=False):
"""
returns a new EphemeralOnionService after adding it to the
provided config and ensuring at least one of its descriptors
is uploaded.
:param config: a :class:`txtorcon.TorConfig` instance
:param ports: a list of ports to make available; any of these
can be 2-tuples of (remote, local) if you want to expose a
particular port locally (otherwise, an available one is
chosen)
:param private_key: None, `DISCARD`, or a private key blob
:param detach: if True, tell Tor to NOT associate this control
connection with the lifetime of the created service
:param version: 2 or 3, which kind of service to create
:param progress: a callable taking (percent, tag, description)
that is called periodically to report progress.
:param await_all_uploads: if True, the Deferred only fires
after ALL descriptor uploads have completed (otherwise, it
fires when at least one has completed).
:param single_hop: if True, pass the `NonAnonymous` flag. Note
that Tor options `HiddenServiceSingleHopMode`,
`HiddenServiceNonAnonymousMode` must be set to `1` and there
must be no `SOCKSPort` configured for this to actually work.
See also :meth:`txtorcon.Tor.create_onion_service` (which
ultimately calls this).
"""
version = 2 if version is None else version
assert version in (2, 3)
processed_ports = yield _validate_ports(reactor, ports)
onion = EphemeralOnionService(
config, processed_ports,
hostname=None,
private_key=private_key,
detach=detach,
version=version,
await_all_uploads=await_all_uploads,
single_hop=single_hop,
)
yield _add_ephemeral_service(config, onion, progress, version, None, await_all_uploads)
defer.returnValue(onion)
def __init__(self, config, ports, hostname=None, private_key=None, version=3,
detach=False, await_all_uploads=None, single_hop=None, **kwarg):
"""
Users should create instances of this class by using the async
method :meth:`txtorcon.EphemeralOnionService.create`
"""
# prior to 17.0.0, this took an argument called "ver" instead
# of "version". So, we will silently upgrade that.
if "ver" in kwarg:
version = int(kwarg.pop("ver"))
# any other kwargs are illegal
if len(kwarg):
raise ValueError(
"Unknown kwargs: {}".format(", ".join(kwarg.keys()))
)
_validate_ports_low_level(ports)
self._config = config
self._ports = ports
self._hostname = hostname
self._private_key = private_key
self._version = version
self._detach = detach
self._single_hop = single_hop
# not putting an "add_to_tor" method here; that class is now
# deprecated and you add one of these by using .create()
@defer.inlineCallbacks
def remove(self):
"""
Issues a DEL_ONION call to our tor, removing this service.
"""
cmd = 'DEL_ONION {}'.format(self._hostname[:-len('.onion')])
res = yield self._config.tor_protocol.queue_command(cmd)
if res.strip() != "OK":
raise RuntimeError("Failed to remove service")
@property
def ports(self):
return set(self._ports)
@property
def version(self):
return self._version
@property
def hostname(self):
return self._hostname
@property
def private_key(self):
return self._private_key
@implementer(IOnionClient)
class EphemeralAuthenticatedOnionServiceClient(object):
"""
A single client of an EphemeralAuthenticatedOnionService
These are only created by and returned from the .clients property
of an AuthenticatedOnionService instance.
"""
def __init__(self, parent, name, token):
self._parent = parent
self._name = name
self._auth_token = token
@property
def name(self):
return self._name
@property
def ports(self):
return set(self._parent.ports)
@property
def hostname(self):
return self._parent.hostname
@property
def auth_token(self):
return self._auth_token
@property
def parent(self):
return self._parent
@property
def version(self):
return self._parent.version
@implementer(IOnionClient)
class FilesystemAuthenticatedOnionServiceClient(object):
"""
A single client of an FilesystemAuthenticatedOnionService
These are only created by and returned from the .clients property
of an FilesystemAuthenticatedOnionService instance.
"""
def __init__(self, parent, name, hostname, ports, token):
self._parent = parent
self._name = name
self.hostname = hostname
self.auth_token = token
self.ephemeral = False
self._ports = ports
# XXX private_key?
# XXX group_readable
@property
def name(self):
return self._name
@property
def parent(self):
return self._parent
@property
def ports(self):
return self._ports
@property
def private_key(self):
# yes, needs to come from "clients" file i think?
return self._parent._private_key(self._name).key
@property
def group_readable(self):
return self._parent.group_readable
@property
def authorize_client(self):
return '{} {}'.format(self._name, self.auth_token)
@property
def hidden_service_directory(self):
return self._parent.hidden_service_directory
@property
def version(self):
return self._parent.version
def _compute_permanent_id(private_key):
"""
Internal helper. Return an authenticated service's permanent ID
given an RSA private key object.
The permanent ID is the base32 encoding of the SHA1 hash of the
first 10 bytes (80 bits) of the public key.
"""
pub = private_key.public_key()
p = pub.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.PKCS1
)
z = ''.join(p.decode('ascii').strip().split('\n')[1:-1])
b = base64.b64decode(z)
h1 = hashlib.new('sha1')
h1.update(b)
permanent_id = h1.digest()[:10]
return base64.b32encode(permanent_id).lower().decode('ascii')
@implementer(IAuthenticatedOnionClients)
class FilesystemAuthenticatedOnionService(object):
"""
An Onion service whose keys are stored on disk by Tor and which
does authentication.
"""
@staticmethod
@defer.inlineCallbacks
def create(reactor, config, hsdir, ports,
auth=None,
version=3,
group_readable=False,
progress=None,
await_all_uploads=None):
"""
returns a new FilesystemAuthenticatedOnionService after adding it
to the provided config and ensureing at least one of its
descriptors is uploaded.
:param config: a :class:`txtorcon.TorConfig` instance
:param ports: a list of ports to make available; any of these
can be 2-tuples of (remote, local) if you want to expose a
particular port locally (otherwise, an available one is
chosen)
:param auth: an instance of :class:`txtorcon.AuthBasic` or
:class:`txtorcon.AuthStealth`
:param version: 2 or 3, which kind of service to create
:param group_readable: if True, the Tor option
`HiddenServiceDirGroupReadable` is set to 1 for this service
:param progress: a callable taking (percent, tag, description)
that is called periodically to report progress.
:param await_all_uploads: if True, the Deferred only fires
after ALL descriptor uploads have completed (otherwise, it
fires when at least one has completed).
"""
# if hsdir is relative, it's "least surprising" (IMO) to make
# it into a relative path here -- otherwise, it's relative to
# whatever Tor's cwd is. Issue similar warning to Tor?
hsdir = _canonical_hsdir(hsdir)
processed_ports = yield _validate_ports(reactor, ports)
fhs = FilesystemAuthenticatedOnionService(
config, hsdir, processed_ports, auth,
version=version,
group_readable=group_readable,
)
config.HiddenServices.append(fhs)
def translate_progress(pct, tag, description):
# XXX fixme actually translate..
if progress:
progress(pct, tag, description)
# most of this code same as non-authenticated version; can we share?
# we .save() down below, after setting HS_DESC listener
uploaded = [None]
if not version_at_least(config.tor_protocol.version, 0, 2, 7, 2):
translate_progress(
102, "wait_desctiptor",
"Adding an onion service to Tor requires at least version"
)
translate_progress(
103, "wait_desctiptor",
"0.2.7.2 so that HS_DESC events work properly and we can"
)
translate_progress(
104, "wait_desctiptor",
"detect our desctiptor being uploaded."
)
translate_progress(
105, "wait_desctiptor",
"Your version is '{}'".format(config.tor_protocol.version),
)
translate_progress(
106, "wait_desctiptor",
"So, we'll just declare it done right now..."
)
uploaded[0] = defer.succeed(None)
else:
# XXX actually, there's some versions of Tor when v3
# filesystem services could be added but they didn't send
# HS_DESC updates -- did any of these actually get
# released?!
uploaded[0] = _await_descriptor_upload(config.tor_protocol, fhs, progress, await_all_uploads)
yield config.save()
yield uploaded[0]
defer.returnValue(fhs)
def __init__(self, config, thedir, ports, auth, version=3, group_readable=0):
# XXX do we need version here? probably...
self._config = config
self._dir = thedir
self._ports = ports
assert auth is not None, "Must provide an auth= instance"
if not isinstance(auth, (AuthBasic, AuthStealth)):
raise ValueError("auth= must be one of AuthBasic or AuthStealth")
self._auth = auth
# dict: name -> IAuthenticatedOnionClient
self._clients = None
self._expected_clients = auth.client_names()
self._version = version
self._group_readable = group_readable
self._client_keys = None
@property
def hidden_service_directory(self):
return self._dir
@property
def group_readable(self):
return self._group_readable
@property
def ports(self):
return self._ports
@property
def version(self):
return self._version
# basically everything in HiddenService, except the only API we
# provide is "clients" because there's a separate .onion hostname
# and authentication token per client.
def get_permanent_id(self):
"""
IAuthenticatedOnionClients API
"""
with open(os.path.join(self._dir, "private_key"), "rb") as f:
private_key = serialization.load_pem_private_key(
f.read(),
password=None,
backend=default_backend(),
)
return _compute_permanent_id(private_key)
def client_names(self):
"""
IAuthenticatedOnionClients API
"""
if self._clients is None:
self._parse_hostname()
return self._clients.keys()
def get_client(self, name):
"""
IAuthenticatedOnionClients API
"""
if self._clients is None:
self._parse_hostname()
try:
return self._clients[name]
except KeyError:
raise KeyError("No such client '{}'".format(name))
def add_client(self, name, hostname, ports, token):
if self._clients is None:
self._parse_hostname()
client = FilesystemAuthenticatedOnionServiceClient(
parent=self,
name=name,
hostname=hostname,
ports=ports, token=token,
)
self._clients[client.name] = client
self._config.HiddenServices.append(client)
def _private_key(self, name):
if self._client_keys is None:
self._parse_client_keys()
return self._client_keys[name]
def _parse_client_keys(self):
try:
with open(os.path.join(self._dir, 'client_keys'), 'r') as f:
keys = _parse_client_keys(f)
except IOError:
keys = []
self._client_keys = {}
for auth in keys:
self._client_keys[auth.name] = auth
def _parse_hostname(self):
clients = {}
try:
with open(os.path.join(self._dir, 'hostname')) as f:
for idx, line in enumerate(f.readlines()):
# lines are like: hex.onion hex # client: name
m = re.match("(.*) (.*) # client: (.*)", line)
hostname, cookie, name = m.groups()
# -> for auth'd services we end up with multiple
# -> HiddenService instances now (because different
# -> hostnames)
clients[name] = FilesystemAuthenticatedOnionServiceClient(
self, name, hostname,
ports=self._ports,
token=cookie,
)
except IOError:
self._clients = dict()
return
self._clients = clients
if self._expected_clients:
for expected in self._expected_clients:
if expected not in self._clients:
raise RuntimeError(
"Didn't find expected client '{}'".format(expected)
)
def config_attributes(self):
"""
Helper method used by TorConfig when generating a torrc file and
SETCONF commands
"""
rtn = [('HiddenServiceDir', str(self._dir))]
if self._config._supports['HiddenServiceDirGroupReadable'] \
and self.group_readable:
rtn.append(('HiddenServiceDirGroupReadable', str(1)))
for port in self.ports:
rtn.append(('HiddenServicePort', str(port)))
if self._version:
rtn.append(('HiddenServiceVersion', str(self._version)))
if self._clients:
rtn.append((
'HiddenServiceAuthorizeClient',
"{} {}".format(self._auth.auth_type, ','.join(self.client_names()))
))
else:
rtn.append((
'HiddenServiceAuthorizeClient',
"{} {}".format(self._auth.auth_type, ','.join(self._expected_clients))
))
return rtn
@defer.inlineCallbacks
def _validate_ports(reactor, ports):
"""
Internal helper for Onion services. Validates an incoming list of
port mappings and returns a list of strings suitable for passing
to other onion-services functions.
Accepts 3 different ways of specifying ports:
- list of ints: each int is the public port, local port random
- list of 2-tuples of ints: (pubic, local) ports.
- list of strings like "80 127.0.0.1:1234"
This is async in case it needs to ask for a random, unallocated
local port.
"""
if not isinstance(ports, (list, tuple)):
raise ValueError("'ports' must be a list of strings, ints or 2-tuples")
processed_ports = []
for port in ports:
if isinstance(port, (set, list, tuple)):
if len(port) != 2:
raise ValueError(
"'ports' must contain a single int or a 2-tuple of ints"
)
remote, local = port
try:
remote = int(remote)
except ValueError:
raise ValueError(
"'ports' has a tuple with a non-integer "
"component: {}".format(port)
)
try:
local = int(local)
except ValueError:
if local.startswith('unix:/'):
pass
else:
if ':' not in local:
raise ValueError(
"local port must be either an integer"
" or start with unix:/ or be an IP:port"
)
ip, port = local.split(':')
if not _is_non_public_numeric_address(ip):
log.msg(
"'{}' used as onion port doesn't appear to be a "
"local, numeric address".format(ip)
)
processed_ports.append(
"{} {}".format(remote, local)
)
else:
processed_ports.append(
"{} 127.0.0.1:{}".format(remote, local)
)
elif isinstance(port, (six.text_type, str)):
_validate_single_port_string(port)
processed_ports.append(port)
else:
try:
remote = int(port)
except (ValueError, TypeError):
raise ValueError(
"'ports' has a non-integer entry: {}".format(port)
)
local = yield available_tcp_port(reactor)
processed_ports.append(
"{} 127.0.0.1:{}".format(remote, local)
)
defer.returnValue(processed_ports)
def _validate_ports_low_level(ports):
"""
Internal helper.
Validates the 'ports' argument to EphemeralOnionService or
EphemeralAuthenticatedOnionService returning None on success or
raising ValueError otherwise.
This only accepts the "list of strings" variants; some
higher-level APIs also allow lists of ints or lists of 2-tuples,
but those must be converted to strings before they get here.
"""
if not isinstance(ports, (list, tuple)):
raise ValueError("'ports' must be a list of strings")
if any([not isinstance(x, (six.text_type, str)) for x in ports]):
raise ValueError("'ports' must be a list of strings")
for port in ports:
_validate_single_port_string(port)
def _validate_single_port_string(port):
"""
Validate a single string specifying ports for Onion
services. These look like: "80 127.0.0.1:4321"
"""
if ' ' not in port or len(port.split(' ')) != 2:
raise ValueError(
"Port '{}' should have exactly one space in it".format(port)
)
(external, internal) = port.split(' ')
try:
external = int(external)
except ValueError:
raise ValueError(
"Port '{}' external port isn't an int".format(port)
)
if ':' not in internal:
raise ValueError(
"Port '{}' local address should be 'IP:port'".format(port)
)
if not internal.startswith('unix:'):
ip, localport = internal.split(':')
if ip != 'localhost' and not _is_non_public_numeric_address(ip):
raise ValueError(
"Port '{}' internal IP '{}' should be a local "
"address".format(port, ip)
)
def _parse_rsa_blob(lines):
'''
Internal helper.
'''
return 'RSA1024:' + ''.join(lines[1:-1])
def _parse_client_keys(stream):
'''
This parses a hidden-service "client_keys" file, either stealth or
basic (they're the same, except "stealth" includes a
"client-key"). Returns a list of HiddenServiceClientAuth() instances.
Note that the key does NOT include the "----BEGIN ---" markers,
nor *any* embedded whitespace. It is *just* the key blob.
'''
def parse_error(data):
raise RuntimeError("Parse error at: " + data)
class ParserState(object):
def __init__(self):
self.keys = []
self.reset()
def reset(self):
self.name = None
self.cookie = None
self.key = []
def create_key(self):
if self.name is not None:
self.keys.append(HiddenServiceClientAuth(self.name, self.cookie, self.key))
self.reset()
def set_name(self, name):
self.create_key()
self.name = name.split()[1]
def set_cookie(self, cookie):
self.cookie = cookie.split()[1]
if self.cookie.endswith('=='):
self.cookie = self.cookie[:-2]
def add_key_line(self, line):
self.key.append(line)
from txtorcon.spaghetti import FSM, State, Transition
init = State('init')
got_name = State('got_name')
got_cookie = State('got_cookie')
reading_key = State('got_key')
parser_state = ParserState()
# initial state; we want "client-name" or it's an error
init.add_transitions([
Transition(got_name, lambda line: line.startswith('client-name '), parser_state.set_name),
Transition(init, lambda line: not line.startswith('client-name '), parse_error),
])
# next up is "descriptor-cookie" or it's an error
got_name.add_transitions([
Transition(got_cookie, lambda line: line.startswith('descriptor-cookie '), parser_state.set_cookie),
Transition(init, lambda line: not line.startswith('descriptor-cookie '), parse_error),
])
# the "interesting bit": there's either a client-name if we're a
# "basic" file, or an RSA key (with "client-key" before it)
got_cookie.add_transitions([
Transition(reading_key, lambda line: line.startswith('client-key'), None),
Transition(got_name, lambda line: line.startswith('client-name '), parser_state.set_name),
])
# if we're reading an RSA key, we accumulate it in current_key.key
# until we hit a line starting with "client-name"
reading_key.add_transitions([
Transition(reading_key, lambda line: not line.startswith('client-name'), parser_state.add_key_line),
Transition(got_name, lambda line: line.startswith('client-name '), parser_state.set_name),
])
# create our FSM and parse the data
fsm = FSM([init, got_name, got_cookie, reading_key])
for line in stream.readlines():
fsm.process(line.strip())
parser_state.create_key() # make sure we get the "last" one
return parser_state.keys
| mit | fd57a0ab1489127f54c0a317304c6346 | 33.832355 | 116 | 0.592232 | 4.291754 | false | false | false | false |
meejah/txtorcon | txtorcon/torinfo.py | 1 | 9645 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import functools
from twisted.internet import defer
from txtorcon.interface import ITorControlProtocol
class MagicContainer(object):
"""
This merely contains 1 or more methods or further MagicContainer
instances; see _do_setup in TorInfo.
Once _setup_complete() is called, this behaves differently so that
one can get nicer access to GETINFO things from TorInfo --
specifically dir() and so forth pretend that there are only
methods/attributes that pertain to actual Tor GETINFO keys.
See TorInfo.
"""
def __init__(self, n):
self._txtorcon_name = n
self.attrs = {}
self._setup = False
def _setup_complete(self):
self._setup = True
def _add_attribute(self, n, v):
self.attrs[n] = v
def __repr__(self):
return object.__getattribute__(self, '_txtorcon_name')
def __getitem__(self, idx):
return list(object.__getattribute__(self, 'attrs').items())[idx][1]
def __len__(self):
return len(object.__getattribute__(self, 'attrs'))
def __dir__(self):
return list(object.__getattribute__(self, 'attrs').keys())
def __getattribute__(self, name):
sup = super(MagicContainer, self)
if sup.__getattribute__('_setup') is False:
return sup.__getattribute__(name)
attrs = sup.__getattribute__('attrs')
if name == '__members__':
return list(attrs.keys())
else:
if name.startswith('__'):
return sup.__getattribute__(name)
try:
return attrs[name]
except KeyError:
if name in ['dump']:
return object.__getattribute__(self, name)
raise AttributeError(name)
def dump(self, prefix):
prefix = prefix + '.' + object.__getattribute__(self, '_txtorcon_name')
for x in list(object.__getattribute__(self, 'attrs').values()):
x.dump(prefix)
class ConfigMethod(object):
def __init__(self, info_key, protocol, takes_arg=False):
self.info_key = info_key
self.proto = protocol
self.takes_arg = takes_arg
def dump(self, prefix):
n = self.info_key.replace('/', '.')
n = n.replace('-', '_')
s = '%s(%s)' % (n, 'arg' if self.takes_arg else '')
return s
def __call__(self, *args):
if self.takes_arg:
if len(args) != 1:
raise TypeError(
'"%s" takes exactly one argument' % self.info_key
)
req = '%s/%s' % (self.info_key, str(args[0]))
else:
if len(args) != 0:
raise TypeError('"%s" takes no arguments' % self.info_key)
req = self.info_key
def stripper(key, arg):
# strip "keyname="
# sometimes keyname= is followed by a newline, so final .strip()
return arg.strip()[len(key) + 1:].strip()
d = self.proto.get_info_raw(req)
d.addCallback(functools.partial(stripper, req))
return d
def __str__(self):
arg = ''
if self.takes_arg:
arg = 'arg'
return '%s(%s)' % (self.info_key.replace('-', '_'), arg)
class TorInfo(object):
"""Implements some attribute magic over top of TorControlProtocol so
that all the available GETINFO values are gettable in a little
easier fashion. Dashes are replaced by underscores (since dashes
aren't valid in method/attribute names for Python). Some of the
magic methods will take a single string argument if the
corresponding Tor GETINFO would take one (in 'GETINFO info/names'
it will end with '/*', and the same in torspec). In either case,
the method returns a Deferred which will callback with the
requested value, always a string.
For example (see also examples/tor_info.py):
proto = TorControlProtocol()
#...
def cb(arg):
print arg
info = TorInfo(proto)
info.traffic.written().addCallback(cb)
info.ip_to_country('8.8.8.8').addCallback(cb)
For interactive use -- or even checking things progammatically --
TorInfo pretends it only has attributes that coorespond to valid
GETINFO calls. So for example, dir(info) will only return all the
currently valid top-level things. In the above example this might
be ['traffic', 'ip_to_country'] (of course in practice this is a
much longer list). And "dir(info.traffic)" might return ['read',
'written']
For something similar to this for configuration (GETCONF, SETCONF)
see TorConfig which is quite a lot more complicated (internally)
since you can set config items.
NOTE that 'GETINFO config/*' is not supported as it's the only
case that's not a leaf, but theoretically a method.
"""
def __init__(self, control, errback=None):
self._setup = False
self.attrs = {}
'''After _setup is True, these are all we show as attributes.'''
self.protocol = ITorControlProtocol(control)
self.errback = errback
self.post_bootstrap = defer.Deferred()
if self.protocol.post_bootstrap:
self.protocol.post_bootstrap.addCallback(self.bootstrap)
else:
self.bootstrap()
def _add_attribute(self, n, v):
self.attrs[n] = v
# iterator protocol
def __getitem__(self, idx):
sup = super(TorInfo, self)
if sup.__getattribute__('_setup') is True:
return list(object.__getattribute__(self, 'attrs').items())[idx][1]
raise TypeError("No __getitem__ until we've setup.")
def __len__(self):
sup = super(TorInfo, self)
if sup.__getattribute__('_setup') is True:
return len(object.__getattribute__(self, 'attrs'))
raise TypeError("No length until we're setup.")
# change our attribute behavior based on the value of _setup
def __dir__(self):
sup = super(TorInfo, self)
if sup.__getattribute__('_setup') is True:
return list(sup.__getattribute__('attrs').keys())
return list(sup.__getattribute__('__dict__').keys())
def __getattribute__(self, name):
sup = super(TorInfo, self)
if sup.__getattribute__('_setup') is False:
return sup.__getattribute__(name)
attrs = sup.__getattribute__('attrs')
# are there other "special" attributes we need to consider..?
if name == '__members__':
return list(attrs.keys())
if name == '__class__':
return sup.__class__
else:
try:
return attrs[name]
except KeyError:
if name == 'dump':
return object.__getattribute__(self, name)
raise AttributeError(name)
def bootstrap(self, *args):
d = self.protocol.get_info_raw("info/names")
d.addCallback(self._do_setup)
if self.errback:
d.addErrback(self.errback)
d.addCallback(self._setup_complete)
return d
def dump(self):
for x in object.__getattribute__(self, 'attrs').values():
x.dump('')
def _do_setup(self, data):
# FIXME figure out why network-status doesn't work (get
# nothing back from Tor it seems, although stem does get an
# answer). this is a space-separated list of ~2500 OR id's;
# could it be that LineReceiver can't handle it?
added_magic = []
for line in data.split('\n'):
if line == "info/names=" or line.strip() == '':
continue
(name, documentation) = line.split(' ', 1)
# FIXME think about this -- this is the only case where
# there's something that's a directory
# (i.e. MagicContainer) AND needs to be a ConfigMethod as
# well...but doesn't really seem very useful. Somewhat
# simpler to not support this case for now...
if name == 'config/*':
continue
if name.endswith('/*'):
# this takes an arg, so make a method
bits = name[:-2].split('/')
takes_arg = True
else:
bits = name.split('/')
takes_arg = False
mine = self
for bit in bits[:-1]:
bit = bit.replace('-', '_')
if bit in mine.attrs:
mine = mine.attrs[bit]
if not isinstance(mine, MagicContainer):
raise RuntimeError(
"Already had something: %s for %s" % (bit, name)
)
else:
c = MagicContainer(bit)
added_magic.append(c)
mine._add_attribute(bit, c)
mine = c
n = bits[-1].replace('-', '_')
if n in mine.attrs:
raise RuntimeError(
"Already had something: %s for %s" % (n, name)
)
mine._add_attribute(n, ConfigMethod('/'.join(bits),
self.protocol, takes_arg))
for c in added_magic:
c._setup_complete()
return None
def _setup_complete(self, *args):
pb = self.post_bootstrap
self._setup = True
pb.callback(self)
| mit | 9c4cd161cfc133163774c02024ba4e1e | 32.489583 | 79 | 0.555314 | 4.332884 | false | false | false | false |
meejah/txtorcon | txtorcon/stream.py | 1 | 11697 | # -*- coding: utf-8 -*-
"""
Contains an implementation of a :class:`Stream abstraction used by
:class:`TorState to represent all streams in Tor's state. There is
also an interface called :class:`interface.IStreamListener` for
listening for stream updates (see also
:meth:`TorState.add_stream_listener`) and the interface called
:class:interface.IStreamAttacher` used by :class:`TorState` as a way
to attach streams to circuits "by hand"
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
from twisted.python import log
from twisted.internet import defer
from txtorcon.interface import ICircuitContainer, IStreamListener
from txtorcon.util import find_keywords, maybe_ip_addr
class Stream(object):
"""
Represents an active stream in Tor's state (:class:`txtorcon.TorState`).
:ivar circuit:
Streams will generally be attached to circuits pretty
quickly. If they are attached, circuit will be a
:class:`txtorcon.Circuit` instance or None if this stream
isn't yet attached to a circuit.
:ivar state:
Tor's idea of the stream's state, one of:
- NEW: New request to connect
- NEWRESOLVE: New request to resolve an address
- REMAP: Address re-mapped to another
- SENTCONNECT: Sent a connect cell along a circuit
- SENTRESOLVE: Sent a resolve cell along a circuit
- SUCCEEDED: Received a reply; stream established
- FAILED: Stream failed and not retriable
- CLOSED: Stream closed
- DETACHED: Detached from circuit; still retriable
:ivar target_host:
Something like www.example.com -- the host the stream is destined for.
:ivar target_port:
The port the stream will exit to.
:ivar target_addr:
Target address, looked up (usually) by Tor (e.g. 127.0.0.1).
:ivar id:
The ID of this stream, a number (or None if unset).
"""
def __init__(self, circuitcontainer, addrmap=None):
"""
:param circuitcontainer: an object which implements
:class:`interface.ICircuitContainer`
"""
self.circuit_container = ICircuitContainer(circuitcontainer)
# FIXME: Sphinx doesn't seem to understand these variable
# docstrings, so consolidate with above if Sphinx is the
# answer -- actually it does, so long as the :ivar: things
# are never mentioned it seems.
self.id = None
"""An int, Tor's ID for this :class:`txtorcon.Circuit`"""
self.state = None
"""A string, Tor's idea of the state of this
:class:`txtorcon.Stream`"""
self.target_host = None
"""Usually a hostname, but sometimes an IP address (e.g. when
we query existing state from Tor)"""
self.target_addr = None
"""If available, the IP address we're connecting to (if None,
see target_host instead)."""
self.target_port = 0
"""The port we're connecting to."""
self.circuit = None
"""If we've attached to a :class:`txtorcon.Circuit`, this will
be an instance of :class:`txtorcon.Circuit` (otherwise None)."""
self.listeners = []
"""A list of all connected
:class:`txtorcon.interface.IStreamListener` instances."""
self.source_addr = None
"""If available, the address from which this Stream originated
(e.g. local process, etc). See get_process() also."""
self.source_port = 0
"""If available, the port from which this Stream
originated. See get_process() also."""
self.flags = {}
"""All flags from last update to this Stream. str->str"""
self._closing_deferred = None
"""Internal. Holds Deferred that will callback when this
stream is CLOSED, FAILED (or DETACHED??)"""
self._addrmap = addrmap
def listen(self, listen):
"""
Attach an :class:`txtorcon.interface.IStreamListener` to this stream.
See also :meth:`txtorcon.TorState.add_stream_listener` to
listen to all streams.
:param listen: something that knows
:class:`txtorcon.interface.IStreamListener`
"""
listener = IStreamListener(listen)
if listener not in self.listeners:
self.listeners.append(listener)
def unlisten(self, listener):
self.listeners.remove(listener)
def close(self, **kw):
"""
This asks Tor to close the underlying stream object. See
:meth:`txtorcon.interface.ITorControlProtocol.close_stream`
for details.
Although Tor currently takes no flags, it allows you to; any
keyword arguments are passed through as flags.
NOTE that the callback delivered from this method only
callbacks after the underlying stream is really destroyed
(*not* just when the CLOSESTREAM command has successfully
completed).
"""
self._closing_deferred = defer.Deferred()
def close_command_is_queued(*args):
return self._closing_deferred
d = self.circuit_container.close_stream(self, **kw)
d.addCallback(close_command_is_queued)
return self._closing_deferred
def _create_flags(self, kw):
"""
this clones the kw dict, adding a lower-case version of every key
(duplicated in circuit.py; consider putting in util?)
"""
flags = {}
for k in kw.keys():
flags[k] = kw[k]
flags[k.lower()] = flags[k]
return flags
def update(self, args):
if self.id is None:
self.id = int(args[0])
else:
if self.id != int(args[0]):
raise RuntimeError("Update for wrong stream.")
kw = find_keywords(args)
self.flags = kw
if 'SOURCE_ADDR' in kw:
last_colon = kw['SOURCE_ADDR'].rfind(':')
self.source_addr = kw['SOURCE_ADDR'][:last_colon]
if self.source_addr != '(Tor_internal)':
self.source_addr = maybe_ip_addr(self.source_addr)
self.source_port = int(kw['SOURCE_ADDR'][last_colon + 1:])
self.state = args[1]
# XXX why not using the state-machine stuff? ;)
if self.state in ['NEW', 'NEWRESOLVE', 'SUCCEEDED']:
if self.target_host is None:
last_colon = args[3].rfind(':')
self.target_host = args[3][:last_colon]
self.target_port = int(args[3][last_colon + 1:])
# target_host is often an IP address (newer tors? did
# this change?) so we attempt to look it up in our
# AddrMap and make it a name no matter what.
if self._addrmap:
try:
h = self._addrmap.find(self.target_host)
self.target_host = h.name
except KeyError:
pass
self.target_port = int(self.target_port)
if self.state == 'NEW':
if self.circuit is not None:
log.err(RuntimeError("Weird: circuit valid in NEW"))
self._notify('stream_new', self)
else:
self._notify('stream_succeeded', self)
elif self.state == 'REMAP':
self.target_addr = maybe_ip_addr(args[3][:args[3].rfind(':')])
elif self.state == 'CLOSED':
if self.circuit:
self.circuit.streams.remove(self)
self.circuit = None
self.maybe_call_closing_deferred()
flags = self._create_flags(kw)
self._notify('stream_closed', self, **flags)
elif self.state == 'FAILED':
if self.circuit:
self.circuit.streams.remove(self)
self.circuit = None
self.maybe_call_closing_deferred()
# build lower-case version of all flags
flags = self._create_flags(kw)
self._notify('stream_failed', self, **flags)
elif self.state == 'SENTCONNECT':
pass # print 'SENTCONNECT',self,args
elif self.state == 'DETACHED':
if self.circuit:
self.circuit.streams.remove(self)
self.circuit = None
# FIXME does this count as closed?
# self.maybe_call_closing_deferred()
flags = self._create_flags(kw)
self._notify('stream_detach', self, **flags)
elif self.state in ['NEWRESOLVE', 'SENTRESOLVE']:
pass # print self.state, self, args
else:
raise RuntimeError("Unknown state: %s" % self.state)
# see if we attached to a circuit. I believe this only happens
# on a SENTCONNECT or REMAP. DETACHED is excluded so we don't
# immediately re-add the circuit we just detached from
if self.state not in ['CLOSED', 'FAILED', 'DETACHED']:
cid = int(args[2])
if cid == 0:
if self.circuit and self in self.circuit.streams:
self.circuit.streams.remove(self)
self.circuit = None
else:
if self.circuit is None:
self.circuit = self.circuit_container.find_circuit(cid)
if self not in self.circuit.streams:
self.circuit.streams.append(self)
self._notify('stream_attach', self, self.circuit)
else:
# XXX I am seeing this from torexitscan (*and*
# from my TorNS thing, so I think it's some kind
# of 'consistent' behavior out of Tor) so ... this
# is probably when we're doing stream-attachment
# stuff? maybe the stream gets assigned a circuit
# 'provisionally' and then it's changed?
# ...yup, looks like it!
if self.circuit.id != cid:
# you can get SENTCONNECT twice in a row (for example) with different circuit-ids if there is something (e.g. another txtorcon) doing R
log.err(
RuntimeError(
'Circuit ID changed from %d to %d state=%s.' %
(self.circuit.id, cid, self.state)
)
)
def _notify(self, func, *args, **kw):
"""
Internal helper. Calls the IStreamListener function 'func' with
the given args, guarding around errors.
"""
for x in self.listeners:
try:
getattr(x, func)(*args, **kw)
except Exception:
log.err()
def maybe_call_closing_deferred(self):
"""
Used internally to callback on the _closing_deferred if it
exists.
"""
if self._closing_deferred:
self._closing_deferred.callback(self)
self._closing_deferred = None
def __str__(self):
c = ''
if self.circuit:
c = 'on %d ' % self.circuit.id
return "<Stream %s %d %s%s -> %s port %d>" % (self.state,
self.id,
c,
self.target_host,
str(self.target_addr),
self.target_port)
| mit | eea8f2ce5f426a32fbcd2ba90641c6cf | 36.251592 | 159 | 0.558519 | 4.371076 | false | false | false | false |
tdryer/hangups | examples/sync_recent_conversations.py | 3 | 1429 | """Example of using hangups to get recent conversations."""
import hangups
from common import run_example
async def sync_recent_conversations(client, _):
request = hangups.hangouts_pb2.SyncRecentConversationsRequest(
request_header=client.get_request_header(),
max_conversations=20,
max_events_per_conversation=1,
sync_filter=[hangups.hangouts_pb2.SYNC_FILTER_INBOX],
)
res = await client.sync_recent_conversations(request)
# Sort the returned conversations by recency.
conv_states = sorted(
res.conversation_state,
key=lambda conv_state: (
conv_state.conversation.self_conversation_state.sort_timestamp
),
reverse=True
)
# Print the list of conversations and their participants.
for conv_state in conv_states:
if conv_state.conversation.name:
conv_name = repr(conv_state.conversation.name)
else:
conv_name = 'Unnamed Hangout'
print(' - {} ({})'.format(conv_name, conv_state.conversation_id.id))
for participant in conv_state.conversation.participant_data:
if participant.fallback_name:
name = repr(participant.fallback_name)
else:
name = 'No fallback name'
print(' - {} ({})'.format(name, participant.id.gaia_id))
if __name__ == '__main__':
run_example(sync_recent_conversations)
| mit | 759292b804599ce026ebb3a11f52b924 | 33.02381 | 76 | 0.636809 | 3.770449 | false | false | false | false |
tdryer/hangups | setup.py | 1 | 2156 | from setuptools import setup
import os
import sys
if sys.version_info < (3, 6):
raise RuntimeError("hangups requires Python 3.6+")
# Find __version__ without import that requires dependencies to be installed:
exec(open(os.path.join(
os.path.dirname(__file__), 'hangups/version.py'
)).read())
with open('README.rst') as f:
readme = f.read()
# Dependencies should be specified as a specific version or version range that
# is unlikely to break compatibility in the future. This is required to prevent
# hangups from breaking when new versions of dependencies are released,
# especially for end-users (non-developers) who use pip to install hangups.
install_requires = [
'ConfigArgParse>=0.11.0,<2',
'aiohttp>=3.7,<4',
'async-timeout>=2,<5',
'appdirs>=1.4,<1.5',
'readlike>=0.1.2,<0.2',
'requests>=2.6.0,<3', # uses semantic versioning (after 2.6)
'ReParser==1.4.3',
'protobuf>=3.1.0,<4',
'urwid>=1.3.1,<2.2',
'MechanicalSoup>=0.6.0,<0.13',
]
setup(
name='hangups',
version=__version__,
description=('the first third-party instant messaging client for Google '
'Hangouts'),
long_description=readme,
url='https://github.com/tdryer/hangups',
author='Tom Dryer',
author_email='tomdryer.com@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Communications :: Chat',
'Environment :: Console :: Curses',
],
packages=['hangups', 'hangups.ui'],
install_requires=install_requires,
entry_points={
'console_scripts': [
'hangups=hangups.ui.__main__:main',
],
},
)
| mit | bc715f3f8941f2ef196cf3f135acc601 | 29.8 | 79 | 0.616883 | 3.61139 | false | false | false | false |
tdryer/hangups | hangups/ui/__main__.py | 1 | 48593 | """Reference chat client for hangups."""
import appdirs
import asyncio
import configargparse
import contextlib
import logging
import os
import sys
import urwid
import readlike
from bisect import bisect
import hangups
from hangups.ui.emoticon import replace_emoticons
from hangups.ui import notifier
from hangups.ui.utils import get_conv_name, add_color_to_scheme
# hangups used to require a fork of urwid called hangups-urwid which may still
# be installed and create a conflict with the 'urwid' package name. See #198.
if urwid.__version__ == '1.2.2-dev':
sys.exit('error: hangups-urwid package is installed\n\n'
'Please uninstall hangups-urwid and urwid, and reinstall '
'hangups.')
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
COL_SCHEMES = {
# Very basic scheme with no colour
'default': {
('active_tab', '', ''),
('inactive_tab', 'standout', ''),
('msg_date', '', ''),
('msg_sender', '', ''),
('msg_self', '', ''),
('msg_text', '', ''),
('msg_text_self', '', ''),
('msg_watermark', '', ''),
('msg_selected', 'standout', ''),
('status_line', 'standout', ''),
('tab_background', 'standout', ''),
},
'solarized-dark': {
('active_tab', 'light gray', 'light blue'),
('inactive_tab', 'underline', 'light green'),
('msg_date', 'dark cyan', ''),
('msg_sender', 'dark blue', ''),
('msg_text_self', '', ''),
('msg_self', 'dark green', ''),
('msg_text', '', ''),
('msg_watermark', 'light gray', ''),
('msg_selected', 'standout', ''),
('status_line', 'standout', ''),
('tab_background', 'black,standout,underline', 'light green'),
},
}
COL_SCHEME_NAMES = (
'active_tab', 'inactive_tab', 'msg_date', 'msg_sender', 'msg_self',
'msg_text', 'msg_text_self', 'status_line', 'tab_background'
)
DISCREET_NOTIFICATION = notifier.Notification(
'hangups', 'Conversation', 'New message'
)
class HangupsDisconnected(Exception):
"""Raised when hangups is disconnected."""
class ChatUI:
"""User interface for hangups."""
def __init__(self, refresh_token_path, keybindings, palette,
palette_colors, datetimefmt, notifier_,
discreet_notifications, manual_login, keep_emoticons):
"""Start the user interface."""
self._keys = keybindings
self._datetimefmt = datetimefmt
self._notifier = notifier_
self._discreet_notifications = discreet_notifications
self._keep_emoticons = keep_emoticons
set_terminal_title('hangups')
# These are populated by on_connect when it's called.
self._conv_widgets = {} # {conversation_id: ConversationWidget}
self._tabbed_window = None # TabbedWindowWidget
self._conv_list = None # hangups.ConversationList
self._user_list = None # hangups.UserList
self._coroutine_queue = CoroutineQueue()
self._exception = None
# TODO Add urwid widget for getting auth.
try:
cookies = hangups.auth.get_auth_stdin(
refresh_token_path, manual_login
)
except hangups.GoogleAuthError as e:
sys.exit('Login failed ({})'.format(e))
self._client = hangups.Client(cookies)
self._client.on_connect.add_observer(self._on_connect)
loop = asyncio.get_event_loop()
loop.set_exception_handler(self._exception_handler)
try:
self._urwid_loop = urwid.MainLoop(
LoadingWidget(), palette, handle_mouse=False,
input_filter=self._input_filter,
event_loop=urwid.AsyncioEventLoop(loop=loop)
)
except urwid.AttrSpecError as e:
# Fail gracefully for invalid colour options.
sys.exit(e)
self._urwid_loop.screen.set_terminal_properties(colors=palette_colors)
self._urwid_loop.start()
coros = [self._connect(), self._coroutine_queue.consume()]
# Enable bracketed paste mode after the terminal has been switched to
# the alternate screen (after MainLoop.start() to work around bug
# 729533 in VTE.
with bracketed_paste_mode():
try:
# Run all the coros, until they all complete or one raises an
# exception. In the normal case, HangupsDisconnected will be
# raised.
loop.run_until_complete(asyncio.gather(*coros))
except HangupsDisconnected:
pass
finally:
# Clean up urwid.
self._urwid_loop.stop()
# Cancel all of the coros, and wait for them to shut down.
task = asyncio.gather(*coros, return_exceptions=True)
task.cancel()
try:
loop.run_until_complete(task)
except asyncio.CancelledError:
# In Python 3.7, asyncio.gather no longer swallows
# CancelledError, so we need to ignore it.
pass
loop.close()
# If an exception was stored, raise it now. This is used for exceptions
# originating in urwid callbacks.
if self._exception:
raise self._exception # pylint: disable=raising-bad-type
async def _connect(self):
await self._client.connect()
raise HangupsDisconnected()
def _exception_handler(self, _loop, context):
"""Handle exceptions from the asyncio loop."""
# Start a graceful shutdown.
self._coroutine_queue.put(self._client.disconnect())
# Store the exception to be re-raised later. If the context doesn't
# contain an exception, create one containing the error message.
default_exception = Exception(context.get('message'))
self._exception = context.get('exception', default_exception)
def _input_filter(self, keys, _):
"""Handle global keybindings."""
if keys == [self._keys['menu']]:
if self._urwid_loop.widget == self._tabbed_window:
self._show_menu()
else:
self._hide_menu()
elif keys == [self._keys['quit']]:
self._coroutine_queue.put(self._client.disconnect())
else:
return keys
def _show_menu(self):
"""Show the overlay menu."""
# If the current widget in the TabbedWindowWidget has a menu,
# overlay it on the TabbedWindowWidget.
current_widget = self._tabbed_window.get_current_widget()
if hasattr(current_widget, 'get_menu_widget'):
menu_widget = current_widget.get_menu_widget(self._hide_menu)
overlay = urwid.Overlay(menu_widget, self._tabbed_window,
align='center', width=('relative', 80),
valign='middle', height=('relative', 80))
self._urwid_loop.widget = overlay
def _hide_menu(self):
"""Hide the overlay menu."""
self._urwid_loop.widget = self._tabbed_window
def get_conv_widget(self, conv_id):
"""Return an existing or new ConversationWidget."""
if conv_id not in self._conv_widgets:
set_title_cb = (lambda widget, title:
self._tabbed_window.set_tab(widget, title=title))
widget = ConversationWidget(
self._client, self._coroutine_queue,
self._conv_list.get(conv_id), set_title_cb, self._keys,
self._datetimefmt, self._keep_emoticons
)
self._conv_widgets[conv_id] = widget
return self._conv_widgets[conv_id]
def add_conversation_tab(self, conv_id, switch=False):
"""Add conversation tab if not present, and optionally switch to it."""
conv_widget = self.get_conv_widget(conv_id)
self._tabbed_window.set_tab(conv_widget, switch=switch,
title=conv_widget.title)
def on_select_conversation(self, conv_id):
"""Called when the user selects a new conversation to listen to."""
# switch to new or existing tab for the conversation
self.add_conversation_tab(conv_id, switch=True)
async def _on_connect(self):
"""Handle connecting for the first time."""
self._user_list, self._conv_list = (
await hangups.build_user_conversation_list(self._client)
)
self._conv_list.on_event.add_observer(self._on_event)
# show the conversation menu
conv_picker = ConversationPickerWidget(self._conv_list,
self.on_select_conversation,
self._keys)
self._tabbed_window = TabbedWindowWidget(self._keys)
self._tabbed_window.set_tab(conv_picker, switch=True,
title='Conversations')
self._urwid_loop.widget = self._tabbed_window
def _on_event(self, conv_event):
"""Open conversation tab for new messages & pass events to notifier."""
conv = self._conv_list.get(conv_event.conversation_id)
user = conv.get_user(conv_event.user_id)
show_notification = all((
isinstance(conv_event, hangups.ChatMessageEvent),
not user.is_self,
not conv.is_quiet,
))
if show_notification:
self.add_conversation_tab(conv_event.conversation_id)
if self._discreet_notifications:
notification = DISCREET_NOTIFICATION
else:
notification = notifier.Notification(
user.full_name, get_conv_name(conv), conv_event.text
)
self._notifier.send(notification)
class CoroutineQueue:
"""Coroutine queue for the user interface.
Urwid executes callback functions for user input rather than coroutines.
This creates a problem if we need to execute a coroutine in response to
user input.
One option is to use asyncio.ensure_future to execute a "fire and forget"
coroutine. If we do this, exceptions will be logged instead of propagated,
which can obscure problems.
This class allows callbacks to place coroutines into a queue, and have them
executed by another coroutine. Exceptions will be propagated from the
consume method.
"""
def __init__(self):
self._queue = asyncio.Queue()
def put(self, coro):
"""Put a coroutine in the queue to be executed."""
# Avoid logging when a coroutine is queued or executed to avoid log
# spam from coroutines that are started on every keypress.
assert asyncio.iscoroutine(coro)
self._queue.put_nowait(coro)
async def consume(self):
"""Consume coroutines from the queue by executing them."""
while True:
coro = await self._queue.get()
assert asyncio.iscoroutine(coro)
await coro
class WidgetBase(urwid.WidgetWrap):
"""Base for UI Widgets
This class overrides the property definition for the method ``keypress`` in
``urwid.WidgetWrap``. Using a method that overrides the property saves
many pylint suppressions.
Args:
target: urwid.Widget instance
"""
def keypress(self, size, key):
"""forward the call"""
# pylint:disable=not-callable, useless-super-delegation
return super().keypress(size, key)
class LoadingWidget(WidgetBase):
"""Widget that shows a loading indicator."""
def __init__(self):
# show message in the center of the screen
super().__init__(urwid.Filler(
urwid.Text('Connecting...', align='center')
))
class RenameConversationDialog(WidgetBase):
"""Dialog widget for renaming a conversation."""
def __init__(self, coroutine_queue, conversation, on_cancel, on_save,
keybindings):
self._coroutine_queue = coroutine_queue
self._conversation = conversation
edit = urwid.Edit(edit_text=get_conv_name(conversation))
items = [
urwid.Text('Rename conversation:'),
edit,
urwid.Button(
'Save',
on_press=lambda _: self._rename(edit.edit_text, on_save)
),
urwid.Button('Cancel', on_press=lambda _: on_cancel()),
]
list_walker = urwid.SimpleFocusListWalker(items)
list_box = ListBox(keybindings, list_walker)
super().__init__(list_box)
def _rename(self, name, callback):
"""Rename conversation and call callback."""
self._coroutine_queue.put(self._conversation.rename(name))
callback()
class ConversationMenu(WidgetBase):
"""Menu for conversation actions."""
def __init__(self, coroutine_queue, conversation, close_callback,
keybindings):
rename_dialog = RenameConversationDialog(
coroutine_queue, conversation,
lambda: frame.contents.__setitem__('body', (list_box, None)),
close_callback, keybindings
)
items = [
urwid.Text(
'Conversation name: {}'.format(get_conv_name(conversation))
),
urwid.Button(
'Change Conversation Name',
on_press=lambda _: frame.contents.__setitem__(
'body', (rename_dialog, None)
)
),
urwid.Divider('-'),
urwid.Button('Back', on_press=lambda _: close_callback()),
]
list_walker = urwid.SimpleFocusListWalker(items)
list_box = ListBox(keybindings, list_walker)
frame = urwid.Frame(list_box)
padding = urwid.Padding(frame, left=1, right=1)
line_box = urwid.LineBox(padding, title='Conversation Menu')
super().__init__(line_box)
class ConversationButton(WidgetBase):
"""Button that shows the name and unread message count of conversation."""
def __init__(self, conversation, on_press):
conversation.on_event.add_observer(self._on_event)
# Need to update on watermark notifications as well since no event is
# received when the user marks messages as read.
conversation.on_watermark_notification.add_observer(self._on_event)
self._conversation = conversation
self._button = urwid.Button(self._get_label(), on_press=on_press,
user_data=conversation.id_)
super().__init__(self._button)
def _get_label(self):
"""Return the button's label generated from the conversation."""
return get_conv_name(self._conversation, show_unread=True)
def _on_event(self, _):
"""Update the button's label when an event occurs."""
self._button.set_label(self._get_label())
@property
def last_modified(self):
"""Last modified date of conversation, used for sorting."""
return self._conversation.last_modified
class ConversationListWalker(urwid.SimpleFocusListWalker):
"""ListWalker that maintains a list of ConversationButtons.
ConversationButtons are kept in order of last modified.
"""
# pylint: disable=abstract-method
def __init__(self, conversation_list, on_select):
self._conversation_list = conversation_list
self._conversation_list.on_event.add_observer(self._on_event)
self._on_press = lambda button, conv_id: on_select(conv_id)
convs = sorted(conversation_list.get_all(), reverse=True,
key=lambda c: c.last_modified)
buttons = [ConversationButton(conv, on_press=self._on_press)
for conv in convs]
super().__init__(buttons)
def _on_event(self, _):
"""Re-order the conversations when an event occurs."""
# TODO: handle adding new conversations
self.sort(key=lambda conv_button: conv_button.last_modified,
reverse=True)
class ListBox(WidgetBase):
"""ListBox widget supporting alternate keybindings."""
def __init__(self, keybindings, list_walker):
self._keybindings = keybindings
super().__init__(urwid.ListBox(list_walker))
def keypress(self, size, key):
# Handle alternate up/down keybindings
key = super().keypress(size, key)
if key == self._keybindings['down']:
super().keypress(size, 'down')
elif key == self._keybindings['up']:
super().keypress(size, 'up')
elif key == self._keybindings['page_up']:
super().keypress(size, 'page up')
elif key == self._keybindings['page_down']:
super().keypress(size, 'page down')
else:
return key
class ConversationPickerWidget(WidgetBase):
"""ListBox widget for picking a conversation from a list."""
def __init__(self, conversation_list, on_select, keybindings):
list_walker = ConversationListWalker(conversation_list, on_select)
list_box = ListBox(keybindings, list_walker)
widget = urwid.Padding(list_box, left=2, right=2)
super().__init__(widget)
class ReturnableEdit(urwid.Edit):
"""Edit widget that clears itself and calls a function on return."""
def __init__(self, on_return, keybindings, caption=None):
super().__init__(caption=caption, multiline=True)
self._on_return = on_return
self._keys = keybindings
self._paste_mode = False
def keypress(self, size, key):
if key == 'begin paste':
self._paste_mode = True
elif key == 'end paste':
self._paste_mode = False
elif key == 'enter' and not self._paste_mode:
self._on_return(self.get_edit_text())
self.set_edit_text('')
elif key not in self._keys.values() and key in readlike.keys():
text, pos = readlike.edit(self.edit_text, self.edit_pos, key)
self.set_edit_text(text)
self.set_edit_pos(pos)
else:
return super().keypress(size, key)
class StatusLineWidget(WidgetBase):
"""Widget for showing status messages.
If the client is disconnected, show a reconnecting message. If a temporary
message is showing, show the temporary message. If someone is typing, show
a typing messages.
"""
_MESSAGE_DELAY_SECS = 10
def __init__(self, client, conversation):
self._typing_statuses = {}
self._conversation = conversation
self._conversation.on_event.add_observer(self._on_event)
self._conversation.on_typing.add_observer(self._on_typing)
self._widget = urwid.Text('', align='center')
self._is_connected = True
self._message = None
self._message_handle = None
client.on_disconnect.add_observer(self._on_disconnect)
client.on_reconnect.add_observer(self._on_reconnect)
super().__init__(urwid.AttrMap(self._widget, 'status_line'))
def show_message(self, message_str):
"""Show a temporary message."""
if self._message_handle is not None:
self._message_handle.cancel()
self._message_handle = asyncio.get_event_loop().call_later(
self._MESSAGE_DELAY_SECS, self._clear_message
)
self._message = message_str
self._update()
def _clear_message(self):
"""Clear the temporary message."""
self._message = None
self._message_handle = None
self._update()
def _on_disconnect(self):
"""Show reconnecting message when disconnected."""
self._is_connected = False
self._update()
def _on_reconnect(self):
"""Hide reconnecting message when reconnected."""
self._is_connected = True
self._update()
def _on_event(self, conv_event):
"""Make users stop typing when they send a message."""
if isinstance(conv_event, hangups.ChatMessageEvent):
self._typing_statuses[conv_event.user_id] = (
hangups.TYPING_TYPE_STOPPED
)
self._update()
def _on_typing(self, typing_message):
"""Handle typing updates."""
self._typing_statuses[typing_message.user_id] = typing_message.status
self._update()
def _update(self):
"""Update status text."""
typing_users = [self._conversation.get_user(user_id)
for user_id, status in self._typing_statuses.items()
if status == hangups.TYPING_TYPE_STARTED]
displayed_names = [user.first_name for user in typing_users
if not user.is_self]
if displayed_names:
typing_message = '{} {} typing...'.format(
', '.join(sorted(displayed_names)),
'is' if len(displayed_names) == 1 else 'are'
)
else:
typing_message = ''
if not self._is_connected:
self._widget.set_text("RECONNECTING...")
elif self._message is not None:
self._widget.set_text(self._message)
else:
self._widget.set_text(typing_message)
class MessageWidget(WidgetBase):
"""Widget for displaying a single message in a conversation."""
def __init__(self, timestamp, text, datetimefmt, user=None,
show_date=False, watermark_users=None):
# Save the timestamp as an attribute for sorting.
self.timestamp = timestamp
text = [
('msg_date', self._get_date_str(timestamp, datetimefmt,
show_date=show_date) + ' '),
('msg_text_self' if user is not None and user.is_self
else 'msg_text', text)
]
if user is not None:
text.insert(1, ('msg_self' if user.is_self else 'msg_sender',
user.first_name + ': '))
if watermark_users is not None and bool(watermark_users):
sorted_users = sorted([x.first_name for x in watermark_users])
watermark = "\n[ Seen by {}. ]".format(', '.join(sorted_users))
text.append(('msg_watermark', watermark))
self._widget = urwid.SelectableIcon(text, cursor_position=0)
super().__init__(urwid.AttrMap(
self._widget, '', {
# If the widget is focused, map every other display attribute
# to 'msg_selected' so the entire message is highlighted.
None: 'msg_selected',
'msg_date': 'msg_selected',
'msg_text_self': 'msg_selected',
'msg_text': 'msg_selected',
'msg_self': 'msg_selected',
'msg_sender': 'msg_selected',
'msg_watermark': 'msg_selected',
}
))
@staticmethod
def _get_date_str(timestamp, datetimefmt, show_date=False):
"""Convert UTC datetime into user interface string."""
fmt = ''
if show_date:
fmt += '\n'+datetimefmt.get('date', '')+'\n'
fmt += datetimefmt.get('time', '')
return timestamp.astimezone(tz=None).strftime(fmt)
def __lt__(self, other):
return self.timestamp < other.timestamp
@staticmethod
def from_conversation_event(conversation, conv_event, prev_conv_event,
datetimefmt, watermark_users=None):
"""Return MessageWidget representing a ConversationEvent.
Returns None if the ConversationEvent does not have a widget
representation.
"""
user = conversation.get_user(conv_event.user_id)
# Check whether the previous event occurred on the same day as this
# event.
if prev_conv_event is not None:
is_new_day = (conv_event.timestamp.astimezone(tz=None).date() !=
prev_conv_event.timestamp.astimezone(tz=None).date())
else:
is_new_day = False
if isinstance(conv_event, hangups.ChatMessageEvent):
return MessageWidget(conv_event.timestamp, conv_event.text,
datetimefmt, user, show_date=is_new_day,
watermark_users=watermark_users)
elif isinstance(conv_event, hangups.RenameEvent):
if conv_event.new_name == '':
text = ('{} cleared the conversation name'
.format(user.first_name))
else:
text = ('{} renamed the conversation to {}'
.format(user.first_name, conv_event.new_name))
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day,
watermark_users=watermark_users)
elif isinstance(conv_event, hangups.MembershipChangeEvent):
event_users = [conversation.get_user(user_id) for user_id
in conv_event.participant_ids]
names = ', '.join([user.full_name for user in event_users])
if conv_event.type_ == hangups.MEMBERSHIP_CHANGE_TYPE_JOIN:
text = ('{} added {} to the conversation'
.format(user.first_name, names))
else: # LEAVE
text = ('{} left the conversation'.format(names))
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day,
watermark_users=watermark_users)
elif isinstance(conv_event, hangups.HangoutEvent):
text = {
hangups.HANGOUT_EVENT_TYPE_START: (
'A Hangout call is starting.'
),
hangups.HANGOUT_EVENT_TYPE_END: (
'A Hangout call ended.'
),
hangups.HANGOUT_EVENT_TYPE_ONGOING: (
'A Hangout call is ongoing.'
),
}.get(conv_event.event_type, 'Unknown Hangout call event.')
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day,
watermark_users=watermark_users)
elif isinstance(conv_event, hangups.GroupLinkSharingModificationEvent):
status_on = hangups.GROUP_LINK_SHARING_STATUS_ON
status_text = ('on' if conv_event.new_status == status_on
else 'off')
text = '{} turned {} joining by link.'.format(user.first_name,
status_text)
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day,
watermark_users=watermark_users)
else:
# conv_event is a generic hangups.ConversationEvent.
text = 'Unknown conversation event'
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day,
watermark_users=watermark_users)
class ConversationEventListWalker(urwid.ListWalker):
"""ListWalker for ConversationEvents.
The position may be an event ID or POSITION_LOADING.
"""
POSITION_LOADING = 'loading'
WATERMARK_FAST_SEARCH_ITEMS = 10
def __init__(self, coroutine_queue, conversation, datetimefmt):
self._coroutine_queue = coroutine_queue # CoroutineQueue
self._conversation = conversation # Conversation
self._is_scrolling = False # Whether the user is trying to scroll up
self._is_loading = False # Whether we're currently loading more events
self._first_loaded = False # Whether the first event is loaded
self._datetimefmt = datetimefmt
self._watermarked_events = {} # Users watermarked at a given event
# Focus position is the first event ID, or POSITION_LOADING.
self._focus_position = (conversation.events[-1].id_
if conversation.events
else self.POSITION_LOADING)
self._conversation.on_event.add_observer(self._handle_event)
self._conversation.on_watermark_notification.add_observer(
self._on_watermark_notification
)
super().__init__()
def _handle_event(self, conv_event):
"""Handle updating and scrolling when a new event is added.
Automatically scroll down to show the new text if the bottom is
showing. This allows the user to scroll up to read previous messages
while new messages are arriving.
"""
if not self._is_scrolling:
self.set_focus(conv_event.id_)
else:
self._modified()
async def _load(self):
"""Load more events for this conversation."""
try:
conv_events = await self._conversation.get_events(
self._conversation.events[0].id_
)
except (IndexError, hangups.NetworkError):
conv_events = []
if not conv_events:
self._first_loaded = True
if self._focus_position == self.POSITION_LOADING and conv_events:
# If the loading indicator is still focused, and we loaded more
# events, set focus on the first new event so the loaded
# indicator is replaced.
self.set_focus(conv_events[-1].id_)
else:
# Otherwise, still need to invalidate in case the loading
# indicator is showing but not focused.
self._modified()
# Loading events can also update the watermarks.
self._refresh_watermarked_events()
self._is_loading = False
def __getitem__(self, position):
"""Return widget at position or raise IndexError."""
if position == self.POSITION_LOADING:
if self._first_loaded:
# TODO: Show the full date the conversation was created.
return urwid.Text('No more messages', align='center')
else:
# Don't try to load while we're already loading.
if not self._is_loading and not self._first_loaded:
self._is_loading = True
self._coroutine_queue.put(self._load())
return urwid.Text('Loading...', align='center')
try:
# When creating the widget, also pass the previous event so a
# timestamp can be shown if this event occurred on a different day.
# Get the previous event, or None if it isn't loaded or doesn't
# exist.
prev_position = self._get_position(position, prev=True)
if prev_position == self.POSITION_LOADING:
prev_event = None
else:
prev_event = self._conversation.get_event(prev_position)
return MessageWidget.from_conversation_event(
self._conversation, self._conversation.get_event(position),
prev_event, self._datetimefmt,
watermark_users=self._watermarked_events.get(position, None)
)
except KeyError:
raise IndexError('Invalid position: {}'.format(position))
@staticmethod
def _find_watermark_event(timestamps, timestamp):
# Look back through the most recent events first.
back_idx = ConversationEventListWalker.WATERMARK_FAST_SEARCH_ITEMS
for i, t in list(enumerate(reversed(timestamps[-back_idx:]))):
if t <= timestamp:
return len(timestamps) - i - 1
# Bisect the rest.
return bisect(timestamps[:-back_idx], timestamp) - 1
def _refresh_watermarked_events(self):
self._watermarked_events.clear()
timestamps = [x.timestamp for x in self._conversation.events]
for user_id in self._conversation.watermarks:
user = self._conversation.get_user(user_id)
# Ignore the current user.
if user.is_self:
continue
# Skip searching if the watermark's event was not loaded yet.
timestamp = self._conversation.watermarks[user_id]
if timestamp < timestamps[0]:
continue
event_idx = ConversationEventListWalker._find_watermark_event(
timestamps, timestamp
)
if event_idx >= 0:
event_pos = self._conversation.events[event_idx].id_
if event_pos not in self._watermarked_events:
self._watermarked_events[event_pos] = set()
self._watermarked_events[event_pos].add(user)
def _on_watermark_notification(self, _):
"""Update watermarks for this conversation."""
self._refresh_watermarked_events()
self._modified()
def _get_position(self, position, prev=False):
"""Return the next/previous position or raise IndexError."""
if position == self.POSITION_LOADING:
if prev:
raise IndexError('Reached last position')
else:
return self._conversation.events[0].id_
else:
ev = self._conversation.next_event(position, prev=prev)
if ev is None:
if prev:
return self.POSITION_LOADING
else:
raise IndexError('Reached first position')
else:
return ev.id_
def next_position(self, position):
"""Return the position below position or raise IndexError."""
return self._get_position(position)
def prev_position(self, position):
"""Return the position above position or raise IndexError."""
return self._get_position(position, prev=True)
def set_focus(self, position):
"""Set the focus to position or raise IndexError."""
self._focus_position = position
self._modified()
# If we set focus to anywhere but the last position, the user if
# scrolling up:
try:
self.next_position(position)
except IndexError:
self._is_scrolling = False
else:
self._is_scrolling = True
def get_focus(self):
"""Return (widget, position) tuple."""
return (self[self._focus_position], self._focus_position)
class ConversationWidget(WidgetBase):
"""Widget for interacting with a conversation."""
def __init__(self, client, coroutine_queue, conversation, set_title_cb,
keybindings, datetimefmt, keep_emoticons):
self._client = client
self._coroutine_queue = coroutine_queue
self._conversation = conversation
self._conversation.on_event.add_observer(self._on_event)
self._conversation.on_watermark_notification.add_observer(
self._on_watermark_notification
)
self._keys = keybindings
self._keep_emoticons = keep_emoticons
self.title = ''
self._set_title_cb = set_title_cb
self._set_title()
self._list_walker = ConversationEventListWalker(
coroutine_queue, conversation, datetimefmt
)
self._list_box = ListBox(keybindings, self._list_walker)
self._status_widget = StatusLineWidget(client, conversation)
self._widget = urwid.Pile([
('weight', 1, self._list_box),
('pack', self._status_widget),
('pack', ReturnableEdit(self._on_return, keybindings,
caption='Send message: ')),
])
# focus the edit widget by default
self._widget.focus_position = 2
# Display any old ConversationEvents already attached to the
# conversation.
for event in self._conversation.events:
self._on_event(event)
super().__init__(self._widget)
def get_menu_widget(self, close_callback):
"""Return the menu widget associated with this widget."""
return ConversationMenu(
self._coroutine_queue, self._conversation, close_callback,
self._keys
)
def keypress(self, size, key):
"""Handle marking messages as read and keeping client active."""
# Set the client as active.
self._coroutine_queue.put(self._client.set_active())
# Mark the newest event as read.
self._coroutine_queue.put(self._conversation.update_read_timestamp())
return super().keypress(size, key)
def _set_title(self):
"""Update this conversation's tab title."""
self.title = get_conv_name(self._conversation, show_unread=True,
truncate=True)
self._set_title_cb(self, self.title)
def _on_return(self, text):
"""Called when the user presses return on the send message widget."""
# Ignore if the user hasn't typed a message.
if not text:
return
elif text.startswith('/image') and len(text.split(' ')) == 2:
# Temporary UI for testing image uploads
filename = text.split(' ')[1]
try:
image_file = open(filename, 'rb')
except FileNotFoundError:
message = 'Failed to find image {}'.format(filename)
self._status_widget.show_message(message)
return
text = ''
else:
image_file = None
if not self._keep_emoticons:
text = replace_emoticons(text)
segments = hangups.ChatMessageSegment.from_str(text)
self._coroutine_queue.put(
self._handle_send_message(
self._conversation.send_message(
segments, image_file=image_file
)
)
)
async def _handle_send_message(self, coro):
"""Handle showing an error if a message fails to send."""
try:
await coro
except hangups.NetworkError:
self._status_widget.show_message('Failed to send message')
def _on_watermark_notification(self, _):
"""Handle watermark changes for this conversation."""
# Update the unread count in the title.
self._set_title()
def _on_event(self, _):
"""Display a new conversation message."""
# Update the title in case unread count or conversation name changed.
self._set_title()
class TabbedWindowWidget(WidgetBase):
"""A widget that displays a list of widgets via a tab bar."""
def __init__(self, keybindings):
self._widgets = [] # [urwid.Widget]
self._widget_title = {} # {urwid.Widget: str}
self._tab_index = None # int
self._keys = keybindings
self._tabs = urwid.Text('')
self._frame = urwid.Frame(None)
super().__init__(urwid.Pile([
('pack', urwid.AttrMap(self._tabs, 'tab_background')),
('weight', 1, self._frame),
]))
def get_current_widget(self):
"""Return the widget in the current tab."""
return self._widgets[self._tab_index]
def _update_tabs(self):
"""Update tab display."""
text = []
for num, widget in enumerate(self._widgets):
palette = ('active_tab' if num == self._tab_index
else 'inactive_tab')
text += [
(palette, ' {} '.format(self._widget_title[widget])),
('tab_background', ' '),
]
self._tabs.set_text(text)
self._frame.contents['body'] = (self._widgets[self._tab_index], None)
def keypress(self, size, key):
"""Handle keypresses for changing tabs."""
key = super().keypress(size, key)
num_tabs = len(self._widgets)
if key == self._keys['prev_tab']:
self._tab_index = (self._tab_index - 1) % num_tabs
self._update_tabs()
elif key == self._keys['next_tab']:
self._tab_index = (self._tab_index + 1) % num_tabs
self._update_tabs()
elif key == self._keys['close_tab']:
# Don't allow closing the Conversations tab
if self._tab_index > 0:
curr_tab = self._widgets[self._tab_index]
self._widgets.remove(curr_tab)
del self._widget_title[curr_tab]
self._tab_index -= 1
self._update_tabs()
else:
return key
def set_tab(self, widget, switch=False, title=None):
"""Add or modify a tab.
If widget is not a tab, it will be added. If switch is True, switch to
this tab. If title is given, set the tab's title.
"""
if widget not in self._widgets:
self._widgets.append(widget)
self._widget_title[widget] = ''
if switch:
self._tab_index = self._widgets.index(widget)
if title:
self._widget_title[widget] = title
self._update_tabs()
def set_terminal_title(title):
"""Use an xterm escape sequence to set the terminal title."""
sys.stdout.write("\x1b]2;{}\x07".format(title))
@contextlib.contextmanager
def bracketed_paste_mode():
"""Context manager for enabling/disabling bracketed paste mode."""
sys.stdout.write('\x1b[?2004h')
try:
yield
finally:
sys.stdout.write('\x1b[?2004l')
def dir_maker(path):
"""Create a directory if it does not exist."""
directory = os.path.dirname(path)
if directory != '' and not os.path.isdir(directory):
try:
os.makedirs(directory)
except OSError as e:
sys.exit('Failed to create directory: {}'.format(e))
NOTIFIER_TYPES = {
'none': notifier.Notifier,
'default': notifier.DefaultNotifier,
'bell': notifier.BellNotifier,
'dbus': notifier.DbusNotifier,
'apple': notifier.AppleNotifier,
}
def get_notifier(notification_type, disable_notifications):
if disable_notifications:
return notifier.Notifier()
else:
return NOTIFIER_TYPES[notification_type]()
def main():
"""Main entry point."""
# Build default paths for files.
dirs = appdirs.AppDirs('hangups', 'hangups')
default_log_path = os.path.join(dirs.user_log_dir, 'hangups.log')
default_token_path = os.path.join(dirs.user_cache_dir, 'refresh_token.txt')
default_config_path = 'hangups.conf'
user_config_path = os.path.join(dirs.user_config_dir, 'hangups.conf')
# Create a default empty config file if does not exist.
dir_maker(user_config_path)
if not os.path.isfile(user_config_path):
with open(user_config_path, 'a', encoding="utf-8") as cfg:
cfg.write("")
parser = configargparse.ArgumentParser(
prog='hangups', default_config_files=[default_config_path,
user_config_path],
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
add_help=False, # Disable help so we can add it to the correct group.
)
general_group = parser.add_argument_group('General')
general_group.add('-h', '--help', action='help',
help='show this help message and exit')
general_group.add('--token-path', default=default_token_path,
help='path used to store OAuth refresh token')
general_group.add('--date-format', default='< %y-%m-%d >',
help='date format string')
general_group.add('--time-format', default='(%I:%M:%S %p)',
help='time format string')
general_group.add('-c', '--config', help='configuration file path',
is_config_file=True, default=user_config_path)
general_group.add('-v', '--version', action='version',
version='hangups {}'.format(hangups.__version__))
general_group.add('-d', '--debug', action='store_true',
help='log detailed debugging messages')
general_group.add('--manual-login', action='store_true',
help='enable manual login method')
general_group.add('--log', default=default_log_path, help='log file path')
general_group.add('--keep-emoticons', action='store_true',
help='do not replace emoticons with corresponding emoji')
key_group = parser.add_argument_group('Keybindings')
key_group.add('--key-next-tab', default='ctrl d',
help='keybinding for next tab')
key_group.add('--key-prev-tab', default='ctrl u',
help='keybinding for previous tab')
key_group.add('--key-close-tab', default='ctrl w',
help='keybinding for close tab')
key_group.add('--key-quit', default='ctrl e',
help='keybinding for quitting')
key_group.add('--key-menu', default='ctrl n',
help='keybinding for context menu')
key_group.add('--key-up', default='k',
help='keybinding for alternate up key')
key_group.add('--key-down', default='j',
help='keybinding for alternate down key')
key_group.add('--key-page-up', default='ctrl b',
help='keybinding for alternate page up')
key_group.add('--key-page-down', default='ctrl f',
help='keybinding for alternate page down')
notification_group = parser.add_argument_group('Notifications')
# deprecated in favor of --notification-type=none:
notification_group.add('-n', '--disable-notifications',
action='store_true',
help=configargparse.SUPPRESS)
notification_group.add('-D', '--discreet-notifications',
action='store_true',
help='hide message details in notifications')
notification_group.add('--notification-type',
choices=sorted(NOTIFIER_TYPES.keys()),
default='default',
help='type of notifications to create')
# add color scheme options
col_group = parser.add_argument_group('Colors')
col_group.add('--col-scheme', choices=COL_SCHEMES.keys(),
default='default', help='colour scheme to use')
col_group.add('--col-palette-colors', choices=('16', '88', '256'),
default=16, help='Amount of available colors')
for name in COL_SCHEME_NAMES:
col_group.add('--col-' + name.replace('_', '-') + '-fg',
help=name + ' foreground color')
col_group.add('--col-' + name.replace('_', '-') + '-bg',
help=name + ' background color')
args = parser.parse_args()
# Create all necessary directories.
for path in [args.log, args.token_path]:
dir_maker(path)
logging.basicConfig(filename=args.log,
level=logging.DEBUG if args.debug else logging.WARNING,
format=LOG_FORMAT)
# urwid makes asyncio's debugging logs VERY noisy, so adjust the log level:
logging.getLogger('asyncio').setLevel(logging.WARNING)
datetimefmt = {'date': args.date_format,
'time': args.time_format}
# setup color scheme
palette_colors = int(args.col_palette_colors)
col_scheme = COL_SCHEMES[args.col_scheme]
for name in COL_SCHEME_NAMES:
col_scheme = add_color_to_scheme(col_scheme, name,
getattr(args, 'col_' + name + '_fg'),
getattr(args, 'col_' + name + '_bg'),
palette_colors)
keybindings = {
'next_tab': args.key_next_tab,
'prev_tab': args.key_prev_tab,
'close_tab': args.key_close_tab,
'quit': args.key_quit,
'menu': args.key_menu,
'up': args.key_up,
'down': args.key_down,
'page_up': args.key_page_up,
'page_down': args.key_page_down,
}
notifier_ = get_notifier(
args.notification_type, args.disable_notifications
)
try:
ChatUI(
args.token_path, keybindings, col_scheme, palette_colors,
datetimefmt, notifier_, args.discreet_notifications,
args.manual_login, args.keep_emoticons
)
except KeyboardInterrupt:
sys.exit('Caught KeyboardInterrupt, exiting abnormally')
if __name__ == '__main__':
main()
| mit | b080acbabc3086f4ee7ded3812026b64 | 38.667755 | 79 | 0.579013 | 4.198099 | false | false | false | false |
tdryer/hangups | hangups/test/test_event.py | 1 | 1588 | """Tests for the simple observer implementation."""
import asyncio
import pytest
from hangups import event
def coroutine_test(coro):
"""Decorator to create a coroutine that starts and stops its own loop."""
def wrapper(*args, **kwargs):
future = coro(*args, **kwargs)
loop = asyncio.new_event_loop()
loop.run_until_complete(future)
return wrapper
@coroutine_test
async def test_event():
e = event.Event('MyEvent')
res = []
async def a(arg):
res.append('a' + arg)
async def b(arg):
res.append('b' + arg)
e.add_observer(a)
await e.fire('1')
e.add_observer(b)
await e.fire('2')
e.remove_observer(a)
await e.fire('3')
e.remove_observer(b)
await e.fire('4')
assert res == ['a1', 'a2', 'b2', 'b3']
@coroutine_test
async def test_function_observer():
e = event.Event('MyEvent')
res = []
e.add_observer(lambda arg: res.append('a' + arg))
await e.fire('1')
assert res == ['a1']
@coroutine_test
async def test_coroutine_observer():
e = event.Event('MyEvent')
res = []
async def a(arg):
res.append('a' + arg)
e.add_observer(a)
await e.fire('1')
assert res == ['a1']
def test_already_added():
def a(arg):
print('A: got {}'.format(arg))
e = event.Event('MyEvent')
e.add_observer(a)
with pytest.raises(ValueError):
e.add_observer(a)
def test_remove_nonexistent():
e = event.Event('MyEvent')
with pytest.raises(ValueError):
e.remove_observer(lambda a: print('A: got {}'.format(a)))
| mit | b8a7518ecc9c02988abe8c7cd8619b37 | 21.055556 | 77 | 0.594458 | 3.254098 | false | true | false | false |
tdryer/hangups | hangups/test/test_notifier.py | 3 | 2837 | import subprocess
import unittest.mock
from hangups.ui import notifier
NOTIFICATION = notifier.Notification(
'John Cleese', 'Cheese Shop', 'How about a little red Leicester?'
)
MOCK_DBUS = unittest.mock.patch(
'subprocess.check_output', autospec=True, return_value=b'(uint32 7,)\n'
)
MOCK_APPLE = unittest.mock.patch(
'subprocess.check_output', autospec=True, return_value=b''
)
def test_bell_notifier(capsys):
notifier.BellNotifier().send(NOTIFICATION)
assert capsys.readouterr() == ('\a', '')
def test_dbus_notifier():
with MOCK_DBUS as check_output:
notifier.DbusNotifier().send(NOTIFICATION)
check_output.assert_called_once_with([
'gdbus', 'call', '--session',
'--dest', 'org.freedesktop.Notifications',
'--object-path', '/org/freedesktop/Notifications',
'--method', 'org.freedesktop.Notifications.Notify',
'hangups', '0', '', 'John Cleese', 'How about a little red Leicester?',
'[]', '{}', ' -1'
], stderr=subprocess.STDOUT)
def test_dbus_notifier_replaces_id():
dbus_notifier = notifier.DbusNotifier()
with MOCK_DBUS as check_output:
dbus_notifier.send(NOTIFICATION)
assert check_output.call_args[0][0][10] == '0'
dbus_notifier.send(NOTIFICATION)
assert check_output.call_args[0][0][10] == '7'
def test_dbus_notifier_escaping():
evil_notification = notifier.Notification(
'<b>title</b> \\ \' "', None, '<b>message</b> \\ \' "'
)
with MOCK_DBUS as check_output:
notifier.DbusNotifier().send(evil_notification)
assert check_output.call_args[0][0][12:14] == [
'<b>title</b> \\\\ \\u0027 \\u0022',
'<b>message</b> \\\\ \\u0027 \\u0022',
]
def test_apple_notifier():
with MOCK_APPLE as check_output:
notifier.AppleNotifier().send(NOTIFICATION)
check_output.assert_called_once_with([
'osascript', '-e',
'display notification "How about a little red Leicester?" '
'with title "John Cleese" subtitle "Cheese Shop"'
], stderr=subprocess.STDOUT)
def test_apple_notifier_escaping():
evil_notification = notifier.Notification(
'title "', 'subtitle "', 'message "'
)
with MOCK_APPLE as check_output:
notifier.AppleNotifier().send(evil_notification)
assert check_output.call_args[0][0][2] == (
'display notification "message \\"" '
'with title "title \\"" subtitle "subtitle \\""'
)
def test_default_notifier():
default_notifier = notifier.DefaultNotifier()
# pylint: disable=protected-access
mock_send = unittest.mock.patch.object(
default_notifier._notifier, 'send', autospec=True
)
with mock_send as send:
default_notifier.send(NOTIFICATION)
send.assert_called_once_with(NOTIFICATION)
| mit | 9d98bfa75599b126eedac1202d3a8487 | 31.609195 | 79 | 0.63976 | 3.451338 | false | true | false | false |
tableau/server-client-python | tableauserverclient/server/endpoint/databases_endpoint.py | 1 | 5306 | import logging
from .default_permissions_endpoint import _DefaultPermissionsEndpoint
from .dqw_endpoint import _DataQualityWarningEndpoint
from .endpoint import api, Endpoint
from .exceptions import MissingRequiredFieldError
from .permissions_endpoint import _PermissionsEndpoint
from .. import RequestFactory, DatabaseItem, TableItem, PaginationItem, Resource
logger = logging.getLogger("tableau.endpoint.databases")
class Databases(Endpoint):
def __init__(self, parent_srv):
super(Databases, self).__init__(parent_srv)
self._permissions = _PermissionsEndpoint(parent_srv, lambda: self.baseurl)
self._default_permissions = _DefaultPermissionsEndpoint(parent_srv, lambda: self.baseurl)
self._data_quality_warnings = _DataQualityWarningEndpoint(parent_srv, Resource.Database)
@property
def baseurl(self):
return "{0}/sites/{1}/databases".format(self.parent_srv.baseurl, self.parent_srv.site_id)
@api(version="3.5")
def get(self, req_options=None):
logger.info("Querying all databases on site")
url = self.baseurl
server_response = self.get_request(url, req_options)
pagination_item = PaginationItem.from_response(server_response.content, self.parent_srv.namespace)
all_database_items = DatabaseItem.from_response(server_response.content, self.parent_srv.namespace)
return all_database_items, pagination_item
# Get 1 database
@api(version="3.5")
def get_by_id(self, database_id):
if not database_id:
error = "database ID undefined."
raise ValueError(error)
logger.info("Querying single database (ID: {0})".format(database_id))
url = "{0}/{1}".format(self.baseurl, database_id)
server_response = self.get_request(url)
return DatabaseItem.from_response(server_response.content, self.parent_srv.namespace)[0]
@api(version="3.5")
def delete(self, database_id):
if not database_id:
error = "Database ID undefined."
raise ValueError(error)
url = "{0}/{1}".format(self.baseurl, database_id)
self.delete_request(url)
logger.info("Deleted single database (ID: {0})".format(database_id))
@api(version="3.5")
def update(self, database_item):
if not database_item.id:
error = "Database item missing ID."
raise MissingRequiredFieldError(error)
url = "{0}/{1}".format(self.baseurl, database_item.id)
update_req = RequestFactory.Database.update_req(database_item)
server_response = self.put_request(url, update_req)
logger.info("Updated database item (ID: {0})".format(database_item.id))
updated_database = DatabaseItem.from_response(server_response.content, self.parent_srv.namespace)[0]
return updated_database
# Not Implemented Yet
@api(version="99")
def populate_tables(self, database_item):
if not database_item.id:
error = "database item missing ID. database must be retrieved from server first."
raise MissingRequiredFieldError(error)
def column_fetcher():
return self._get_tables_for_database(database_item)
database_item._set_tables(column_fetcher)
logger.info("Populated tables for database (ID: {0}".format(database_item.id))
def _get_tables_for_database(self, database_item):
url = "{0}/{1}/tables".format(self.baseurl, database_item.id)
server_response = self.get_request(url)
tables = TableItem.from_response(server_response.content, self.parent_srv.namespace)
return tables
@api(version="3.5")
def populate_permissions(self, item):
self._permissions.populate(item)
@api(version="3.5")
def update_permission(self, item, rules):
import warnings
warnings.warn(
"Server.databases.update_permission is deprecated, "
"please use Server.databases.update_permissions instead.",
DeprecationWarning,
)
return self._permissions.update(item, rules)
@api(version="3.5")
def update_permissions(self, item, rules):
return self._permissions.update(item, rules)
@api(version="3.5")
def delete_permission(self, item, rules):
self._permissions.delete(item, rules)
@api(version="3.5")
def populate_table_default_permissions(self, item):
self._default_permissions.populate_default_permissions(item, Resource.Table)
@api(version="3.5")
def update_table_default_permissions(self, item):
return self._default_permissions.update_default_permissions(item, Resource.Table)
@api(version="3.5")
def delete_table_default_permissions(self, item):
self._default_permissions.delete_default_permission(item, Resource.Table)
@api(version="3.5")
def populate_dqw(self, item):
self._data_quality_warnings.populate(item)
@api(version="3.5")
def update_dqw(self, item, warning):
return self._data_quality_warnings.update(item, warning)
@api(version="3.5")
def add_dqw(self, item, warning):
return self._data_quality_warnings.add(item, warning)
@api(version="3.5")
def delete_dqw(self, item):
self._data_quality_warnings.clear(item)
| mit | 2379da72b491dc2a2f9a0025e949bac7 | 38.303704 | 108 | 0.671504 | 3.881492 | false | false | false | false |
tableau/server-client-python | samples/set_refresh_schedule.py | 1 | 3554 | ####
# This script demonstrates how to set the refresh schedule for
# a workbook or datasource.
#
# To run the script, you must have installed Python 3.7 or later.
####
import argparse
import logging
import tableauserverclient as TSC
def usage(args):
parser = argparse.ArgumentParser(description="Set refresh schedule for a workbook or datasource.")
# Common options; please keep those in sync across all samples
parser.add_argument("--server", "-s", required=True, help="server address")
parser.add_argument("--site", "-S", help="site name")
parser.add_argument(
"--token-name", "-p", required=True, help="name of the personal access token used to sign into the server"
)
parser.add_argument(
"--token-value", "-v", required=True, help="value of the personal access token used to sign into the server"
)
parser.add_argument(
"--logging-level",
"-l",
choices=["debug", "info", "error"],
default="error",
help="desired logging level (set to error by default)",
)
# Options specific to this sample
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--workbook", "-w")
group.add_argument("--datasource", "-d")
group.add_argument("--flow", "-f")
parser.add_argument("schedule")
return parser.parse_args(args)
def make_filter(**kwargs):
options = TSC.RequestOptions()
for item, value in kwargs.items():
name = getattr(TSC.RequestOptions.Field, item)
options.filter.add(TSC.Filter(name, TSC.RequestOptions.Operator.Equals, value))
return options
def get_workbook_by_name(server, name):
request_filter = make_filter(Name=name)
workbooks, _ = server.workbooks.get(request_filter)
assert len(workbooks) == 1
return workbooks.pop()
def get_datasource_by_name(server, name):
request_filter = make_filter(Name=name)
datasources, _ = server.datasources.get(request_filter)
assert len(datasources) == 1
return datasources.pop()
def get_flow_by_name(server, name):
request_filter = make_filter(Name=name)
flows, _ = server.flows.get(request_filter)
assert len(flows) == 1
return flows.pop()
def get_schedule_by_name(server, name):
schedules = [x for x in TSC.Pager(server.schedules) if x.name == name]
assert len(schedules) == 1
return schedules.pop()
def assign_to_schedule(server, workbook_or_datasource, schedule):
server.schedules.add_to_schedule(schedule.id, workbook_or_datasource)
def run(args):
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
# Step 1: Sign in to server.
tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
if args.workbook:
item = get_workbook_by_name(server, args.workbook)
elif args.datasource:
item = get_datasource_by_name(server, args.datasource)
elif args.flow:
item = get_flow_by_name(server, args.flow)
else:
print("A scheduleable item must be included")
return
schedule = get_schedule_by_name(server, args.schedule)
assign_to_schedule(server, item, schedule)
def main():
import sys
args = usage(sys.argv[1:])
run(args)
if __name__ == "__main__":
main()
| mit | 1850edbe771a4c38cbafaa3337d108b7 | 30.451327 | 116 | 0.664603 | 3.648871 | false | false | false | false |
tableau/server-client-python | tableauserverclient/_version.py | 1 | 18449 | # This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "tableauserverclient/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {} # type: ignore
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| mit | 6e9246c48021c7deada821eba7872819 | 32.482759 | 105 | 0.5764 | 3.902898 | false | false | false | false |
tableau/server-client-python | tableauserverclient/server/endpoint/subscriptions_endpoint.py | 1 | 3224 | import logging
from .endpoint import Endpoint, api
from .exceptions import MissingRequiredFieldError
from .. import RequestFactory, SubscriptionItem, PaginationItem
logger = logging.getLogger("tableau.endpoint.subscriptions")
from typing import List, Optional, TYPE_CHECKING, Tuple
if TYPE_CHECKING:
from ..request_options import RequestOptions
class Subscriptions(Endpoint):
@property
def baseurl(self) -> str:
return "{0}/sites/{1}/subscriptions".format(self.parent_srv.baseurl, self.parent_srv.site_id)
@api(version="2.3")
def get(self, req_options: Optional["RequestOptions"] = None) -> Tuple[List[SubscriptionItem], PaginationItem]:
logger.info("Querying all subscriptions for the site")
url = self.baseurl
server_response = self.get_request(url, req_options)
pagination_item = PaginationItem.from_response(server_response.content, self.parent_srv.namespace)
all_subscriptions = SubscriptionItem.from_response(server_response.content, self.parent_srv.namespace)
return all_subscriptions, pagination_item
@api(version="2.3")
def get_by_id(self, subscription_id: str) -> SubscriptionItem:
if not subscription_id:
error = "No Subscription ID provided"
raise ValueError(error)
logger.info("Querying a single subscription by id ({})".format(subscription_id))
url = "{}/{}".format(self.baseurl, subscription_id)
server_response = self.get_request(url)
return SubscriptionItem.from_response(server_response.content, self.parent_srv.namespace)[0]
@api(version="2.3")
def create(self, subscription_item: SubscriptionItem) -> SubscriptionItem:
if not subscription_item:
error = "No Susbcription provided"
raise ValueError(error)
logger.info("Creating a subscription ({})".format(subscription_item))
url = self.baseurl
create_req = RequestFactory.Subscription.create_req(subscription_item)
server_response = self.post_request(url, create_req)
return SubscriptionItem.from_response(server_response.content, self.parent_srv.namespace)[0]
@api(version="2.3")
def delete(self, subscription_id: str) -> None:
if not subscription_id:
error = "Subscription ID undefined."
raise ValueError(error)
url = "{0}/{1}".format(self.baseurl, subscription_id)
self.delete_request(url)
logger.info("Deleted subscription (ID: {0})".format(subscription_id))
@api(version="2.3")
def update(self, subscription_item: SubscriptionItem) -> SubscriptionItem:
if not subscription_item.id:
error = "Subscription item missing ID. Subscription must be retrieved from server first."
raise MissingRequiredFieldError(error)
url = "{0}/{1}".format(self.baseurl, subscription_item.id)
update_req = RequestFactory.Subscription.update_req(subscription_item)
server_response = self.put_request(url, update_req)
logger.info("Updated subscription item (ID: {0})".format(subscription_item.id))
return SubscriptionItem.from_response(server_response.content, self.parent_srv.namespace)[0]
| mit | 94764b5ddef8ff02140f25f57eb3ec69 | 45.724638 | 115 | 0.692308 | 4.117497 | false | false | false | false |
tableau/server-client-python | test/test_fileuploads.py | 1 | 2568 | import os
import unittest
import requests_mock
from tableauserverclient.server import Server
from ._utils import asset
TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), "assets")
FILEUPLOAD_INITIALIZE = os.path.join(TEST_ASSET_DIR, "fileupload_initialize.xml")
FILEUPLOAD_APPEND = os.path.join(TEST_ASSET_DIR, "fileupload_append.xml")
class FileuploadsTests(unittest.TestCase):
def setUp(self):
self.server = Server("http://test", False)
# Fake sign in
self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67"
self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM"
self.baseurl = "{}/sites/{}/fileUploads".format(self.server.baseurl, self.server.site_id)
def test_read_chunks_file_path(self):
file_path = asset("SampleWB.twbx")
chunks = self.server.fileuploads._read_chunks(file_path)
for chunk in chunks:
self.assertIsNotNone(chunk)
def test_read_chunks_file_object(self):
with open(asset("SampleWB.twbx"), "rb") as f:
chunks = self.server.fileuploads._read_chunks(f)
for chunk in chunks:
self.assertIsNotNone(chunk)
def test_upload_chunks_file_path(self):
file_path = asset("SampleWB.twbx")
upload_id = "7720:170fe6b1c1c7422dadff20f944d58a52-1:0"
with open(FILEUPLOAD_INITIALIZE, "rb") as f:
initialize_response_xml = f.read().decode("utf-8")
with open(FILEUPLOAD_APPEND, "rb") as f:
append_response_xml = f.read().decode("utf-8")
with requests_mock.mock() as m:
m.post(self.baseurl, text=initialize_response_xml)
m.put(self.baseurl + "/" + upload_id, text=append_response_xml)
actual = self.server.fileuploads.upload(file_path)
self.assertEqual(upload_id, actual)
def test_upload_chunks_file_object(self):
upload_id = "7720:170fe6b1c1c7422dadff20f944d58a52-1:0"
with open(asset("SampleWB.twbx"), "rb") as file_content:
with open(FILEUPLOAD_INITIALIZE, "rb") as f:
initialize_response_xml = f.read().decode("utf-8")
with open(FILEUPLOAD_APPEND, "rb") as f:
append_response_xml = f.read().decode("utf-8")
with requests_mock.mock() as m:
m.post(self.baseurl, text=initialize_response_xml)
m.put(self.baseurl + "/" + upload_id, text=append_response_xml)
actual = self.server.fileuploads.upload(file_content)
self.assertEqual(upload_id, actual)
| mit | f588eaf32961b6a2abcb262735a77ddf | 39.125 | 97 | 0.63824 | 3.309278 | false | true | false | false |
tableau/server-client-python | test/test_flow.py | 1 | 7281 | import unittest
import requests_mock
import tableauserverclient as TSC
from tableauserverclient.datetime_helpers import format_datetime
from ._utils import read_xml_asset, asset
GET_XML = "flow_get.xml"
POPULATE_CONNECTIONS_XML = "flow_populate_connections.xml"
POPULATE_PERMISSIONS_XML = "flow_populate_permissions.xml"
UPDATE_XML = "flow_update.xml"
REFRESH_XML = "flow_refresh.xml"
class FlowTests(unittest.TestCase):
def setUp(self) -> None:
self.server = TSC.Server("http://test", False)
# Fake signin
self.server._site_id = "dad65087-b08b-4603-af4e-2887b8aafc67"
self.server._auth_token = "j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM"
self.server.version = "3.5"
self.baseurl = self.server.flows.baseurl
def test_get(self) -> None:
response_xml = read_xml_asset(GET_XML)
with requests_mock.mock() as m:
m.get(self.baseurl, text=response_xml)
all_flows, pagination_item = self.server.flows.get()
self.assertEqual(5, pagination_item.total_available)
self.assertEqual("587daa37-b84d-4400-a9a2-aa90e0be7837", all_flows[0].id)
self.assertEqual("http://tableauserver/#/flows/1", all_flows[0].webpage_url)
self.assertEqual("2019-06-16T21:43:28Z", format_datetime(all_flows[0].created_at))
self.assertEqual("2019-06-16T21:43:28Z", format_datetime(all_flows[0].updated_at))
self.assertEqual("Default", all_flows[0].project_name)
self.assertEqual("FlowOne", all_flows[0].name)
self.assertEqual("aa23f4ac-906f-11e9-86fb-3f0f71412e77", all_flows[0].project_id)
self.assertEqual("7ebb3f20-0fd2-4f27-a2f6-c539470999e2", all_flows[0].owner_id)
self.assertEqual({"i_love_tags"}, all_flows[0].tags)
self.assertEqual("Descriptive", all_flows[0].description)
self.assertEqual("5c36be69-eb30-461b-b66e-3e2a8e27cc35", all_flows[1].id)
self.assertEqual("http://tableauserver/#/flows/4", all_flows[1].webpage_url)
self.assertEqual("2019-06-18T03:08:19Z", format_datetime(all_flows[1].created_at))
self.assertEqual("2019-06-18T03:08:19Z", format_datetime(all_flows[1].updated_at))
self.assertEqual("Default", all_flows[1].project_name)
self.assertEqual("FlowTwo", all_flows[1].name)
self.assertEqual("aa23f4ac-906f-11e9-86fb-3f0f71412e77", all_flows[1].project_id)
self.assertEqual("9127d03f-d996-405f-b392-631b25183a0f", all_flows[1].owner_id)
def test_update(self) -> None:
response_xml = read_xml_asset(UPDATE_XML)
with requests_mock.mock() as m:
m.put(self.baseurl + "/587daa37-b84d-4400-a9a2-aa90e0be7837", text=response_xml)
single_datasource = TSC.FlowItem("test", "aa23f4ac-906f-11e9-86fb-3f0f71412e77")
single_datasource.owner_id = "7ebb3f20-0fd2-4f27-a2f6-c539470999e2"
single_datasource._id = "587daa37-b84d-4400-a9a2-aa90e0be7837"
single_datasource.description = "So fun to see"
single_datasource = self.server.flows.update(single_datasource)
self.assertEqual("587daa37-b84d-4400-a9a2-aa90e0be7837", single_datasource.id)
self.assertEqual("aa23f4ac-906f-11e9-86fb-3f0f71412e77", single_datasource.project_id)
self.assertEqual("7ebb3f20-0fd2-4f27-a2f6-c539470999e2", single_datasource.owner_id)
self.assertEqual("So fun to see", single_datasource.description)
def test_populate_connections(self) -> None:
response_xml = read_xml_asset(POPULATE_CONNECTIONS_XML)
with requests_mock.mock() as m:
m.get(self.baseurl + "/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/connections", text=response_xml)
single_datasource = TSC.FlowItem("test", "aa23f4ac-906f-11e9-86fb-3f0f71412e77")
single_datasource.owner_id = "dd2239f6-ddf1-4107-981a-4cf94e415794"
single_datasource._id = "9dbd2263-16b5-46e1-9c43-a76bb8ab65fb"
self.server.flows.populate_connections(single_datasource)
self.assertEqual("9dbd2263-16b5-46e1-9c43-a76bb8ab65fb", single_datasource.id)
connections = single_datasource.connections
self.assertTrue(connections)
conn1, conn2, conn3 = connections
self.assertEqual("405c1e4b-60c9-499f-9c47-a4ef1af69359", conn1.id)
self.assertEqual("excel-direct", conn1.connection_type)
self.assertEqual("", conn1.server_address)
self.assertEqual("", conn1.username)
self.assertEqual(False, conn1.embed_password)
self.assertEqual("b47f41b1-2c47-41a3-8b17-a38ebe8b340c", conn2.id)
self.assertEqual("sqlserver", conn2.connection_type)
self.assertEqual("test.database.com", conn2.server_address)
self.assertEqual("bob", conn2.username)
self.assertEqual(False, conn2.embed_password)
self.assertEqual("4f4a3b78-0554-43a7-b327-9605e9df9dd2", conn3.id)
self.assertEqual("tableau-server-site", conn3.connection_type)
self.assertEqual("http://tableauserver", conn3.server_address)
self.assertEqual("sally", conn3.username)
self.assertEqual(True, conn3.embed_password)
def test_populate_permissions(self) -> None:
with open(asset(POPULATE_PERMISSIONS_XML), "rb") as f:
response_xml = f.read().decode("utf-8")
with requests_mock.mock() as m:
m.get(self.baseurl + "/0448d2ed-590d-4fa0-b272-a2a8a24555b5/permissions", text=response_xml)
single_datasource = TSC.FlowItem("test")
single_datasource._id = "0448d2ed-590d-4fa0-b272-a2a8a24555b5"
self.server.flows.populate_permissions(single_datasource)
permissions = single_datasource.permissions
self.assertEqual(permissions[0].grantee.tag_name, "group")
self.assertEqual(permissions[0].grantee.id, "aa42f384-906f-11e9-86fc-bb24278874b9")
self.assertDictEqual(
permissions[0].capabilities,
{
TSC.Permission.Capability.Write: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.Read: TSC.Permission.Mode.Allow,
},
)
def test_refresh(self):
with open(asset(REFRESH_XML), "rb") as f:
response_xml = f.read().decode("utf-8")
with requests_mock.mock() as m:
m.post(self.baseurl + "/92967d2d-c7e2-46d0-8847-4802df58f484/run", text=response_xml)
flow_item = TSC.FlowItem("test")
flow_item._id = "92967d2d-c7e2-46d0-8847-4802df58f484"
refresh_job = self.server.flows.refresh(flow_item)
self.assertEqual(refresh_job.id, "d1b2ccd0-6dfa-444a-aee4-723dbd6b7c9d")
self.assertEqual(refresh_job.mode, "Asynchronous")
self.assertEqual(refresh_job.type, "RunFlow")
self.assertEqual(format_datetime(refresh_job.created_at), "2018-05-22T13:00:29Z")
self.assertIsInstance(refresh_job.flow_run, TSC.FlowRunItem)
self.assertEqual(refresh_job.flow_run.id, "e0c3067f-2333-4eee-8028-e0a56ca496f6")
self.assertEqual(refresh_job.flow_run.flow_id, "92967d2d-c7e2-46d0-8847-4802df58f484")
self.assertEqual(format_datetime(refresh_job.flow_run.started_at), "2018-05-22T13:00:29Z")
| mit | fd37f01ab7f2ccc0295d178b6d4648cc | 52.933333 | 104 | 0.671886 | 2.979133 | false | true | false | false |
tableau/server-client-python | tableauserverclient/models/property_decorators.py | 1 | 4709 | import datetime
import re
from functools import wraps
from ..datetime_helpers import parse_datetime
def property_is_enum(enum_type):
def property_type_decorator(func):
@wraps(func)
def wrapper(self, value):
if value is not None and not hasattr(enum_type, value):
error = "Invalid value: {0}. {1} must be of type {2}.".format(value, func.__name__, enum_type.__name__)
raise ValueError(error)
return func(self, value)
return wrapper
return property_type_decorator
def property_is_boolean(func):
@wraps(func)
def wrapper(self, value):
if not isinstance(value, bool):
error = "Boolean expected for {0} flag.".format(func.__name__)
raise ValueError(error)
return func(self, value)
return wrapper
def property_not_nullable(func):
@wraps(func)
def wrapper(self, value):
if value is None:
error = "{0} must be defined.".format(func.__name__)
raise ValueError(error)
return func(self, value)
return wrapper
def property_not_empty(func):
@wraps(func)
def wrapper(self, value):
if not value:
error = "{0} must not be empty.".format(func.__name__)
raise ValueError(error)
return func(self, value)
return wrapper
def property_is_valid_time(func):
@wraps(func)
def wrapper(self, value):
units_of_time = {"hour", "minute", "second"}
if not any(hasattr(value, unit) for unit in units_of_time):
error = "Invalid time object defined."
raise ValueError(error)
return func(self, value)
return wrapper
def property_is_int(range, allowed=None):
"""Takes a range of ints and a list of exemptions to check against
when setting a property on a model. The range is a tuple of (min, max) and the
allowed list (empty by default) allows values outside that range.
This is useful for when we use sentinel values.
Example: Revisions allow a range of 2-10000, but use -1 as a sentinel for 'unlimited'.
"""
if allowed is None:
allowed = () # Empty tuple for fast no-op testing.
def property_type_decorator(func):
@wraps(func)
def wrapper(self, value):
error = "Invalid property defined: '{}'. Integer value expected.".format(value)
if range is None:
if isinstance(value, int):
return func(self, value)
else:
raise ValueError(error)
min, max = range
if (value < min or value > max) and (value not in allowed):
raise ValueError(error)
return func(self, value)
return wrapper
return property_type_decorator
def property_matches(regex_to_match, error):
compiled_re = re.compile(regex_to_match)
def wrapper(func):
@wraps(func)
def validate_regex_decorator(self, value):
if not compiled_re.match(value):
raise ValueError(error)
return func(self, value)
return validate_regex_decorator
return wrapper
def property_is_datetime(func):
"""Takes the following datetime format and turns it into a datetime object:
2016-08-18T18:25:36Z
Because we return everything with Z as the timezone, we assume everything is in UTC and create
a timezone aware datetime.
"""
@wraps(func)
def wrapper(self, value):
if isinstance(value, datetime.datetime):
return func(self, value)
if not isinstance(value, str):
raise ValueError(
"Cannot convert {} into a datetime, cannot update {}".format(value.__class__.__name__, func.__name__)
)
dt = parse_datetime(value)
return func(self, dt)
return wrapper
def property_is_data_acceleration_config(func):
@wraps(func)
def wrapper(self, value):
if not isinstance(value, dict):
raise ValueError("{} is not type 'dict', cannot update {})".format(value.__class__.__name__, func.__name__))
if len(value) != 4 or not all(
attr in value.keys()
for attr in (
"acceleration_enabled",
"accelerate_now",
"last_updated_at",
"acceleration_status",
)
):
error = "{} should have 2 keys ".format(func.__name__)
error += "'acceleration_enabled' and 'accelerate_now'"
error += "instead you have {}".format(value.keys())
raise ValueError(error)
return func(self, value)
return wrapper
| mit | 5e90e2275e13bece184ea506d49894f8 | 27.889571 | 120 | 0.589722 | 4.280909 | false | false | false | false |
tableau/server-client-python | tableauserverclient/models/metric_item.py | 1 | 5102 | import xml.etree.ElementTree as ET
from ..datetime_helpers import parse_datetime
from .property_decorators import property_is_boolean, property_is_datetime
from .tag_item import TagItem
from typing import List, Optional, TYPE_CHECKING, Set
if TYPE_CHECKING:
from datetime import datetime
class MetricItem(object):
def __init__(self, name: Optional[str] = None):
self._id: Optional[str] = None
self._name: Optional[str] = name
self._description: Optional[str] = None
self._webpage_url: Optional[str] = None
self._created_at: Optional["datetime"] = None
self._updated_at: Optional["datetime"] = None
self._suspended: Optional[bool] = None
self._project_id: Optional[str] = None
self._project_name: Optional[str] = None
self._owner_id: Optional[str] = None
self._view_id: Optional[str] = None
self._initial_tags: Set[str] = set()
self.tags: Set[str] = set()
@property
def id(self) -> Optional[str]:
return self._id
@id.setter
def id(self, value: Optional[str]) -> None:
self._id = value
@property
def name(self) -> Optional[str]:
return self._name
@name.setter
def name(self, value: Optional[str]) -> None:
self._name = value
@property
def description(self) -> Optional[str]:
return self._description
@description.setter
def description(self, value: Optional[str]) -> None:
self._description = value
@property
def webpage_url(self) -> Optional[str]:
return self._webpage_url
@property
def created_at(self) -> Optional["datetime"]:
return self._created_at
@created_at.setter
@property_is_datetime
def created_at(self, value: "datetime") -> None:
self._created_at = value
@property
def updated_at(self) -> Optional["datetime"]:
return self._updated_at
@updated_at.setter
@property_is_datetime
def updated_at(self, value: "datetime") -> None:
self._updated_at = value
@property
def suspended(self) -> Optional[bool]:
return self._suspended
@suspended.setter
@property_is_boolean
def suspended(self, value: bool) -> None:
self._suspended = value
@property
def project_id(self) -> Optional[str]:
return self._project_id
@project_id.setter
def project_id(self, value: Optional[str]) -> None:
self._project_id = value
@property
def project_name(self) -> Optional[str]:
return self._project_name
@project_name.setter
def project_name(self, value: Optional[str]) -> None:
self._project_name = value
@property
def owner_id(self) -> Optional[str]:
return self._owner_id
@owner_id.setter
def owner_id(self, value: Optional[str]) -> None:
self._owner_id = value
@property
def view_id(self) -> Optional[str]:
return self._view_id
@view_id.setter
def view_id(self, value: Optional[str]) -> None:
self._view_id = value
def __repr__(self):
return "<MetricItem# name={_name} id={_id} owner_id={_owner_id}>".format(**vars(self))
@classmethod
def from_response(
cls,
resp: bytes,
ns,
) -> List["MetricItem"]:
all_metric_items = list()
parsed_response = ET.fromstring(resp)
all_metric_xml = parsed_response.findall(".//t:metric", namespaces=ns)
for metric_xml in all_metric_xml:
metric_item = cls()
metric_item._id = metric_xml.get("id", None)
metric_item._name = metric_xml.get("name", None)
metric_item._description = metric_xml.get("description", None)
metric_item._webpage_url = metric_xml.get("webpageUrl", None)
metric_item._created_at = parse_datetime(metric_xml.get("createdAt", None))
metric_item._updated_at = parse_datetime(metric_xml.get("updatedAt", None))
metric_item._suspended = string_to_bool(metric_xml.get("suspended", ""))
for owner in metric_xml.findall(".//t:owner", namespaces=ns):
metric_item._owner_id = owner.get("id", None)
for project in metric_xml.findall(".//t:project", namespaces=ns):
metric_item._project_id = project.get("id", None)
metric_item._project_name = project.get("name", None)
for view in metric_xml.findall(".//t:underlyingView", namespaces=ns):
metric_item._view_id = view.get("id", None)
tags = set()
tags_elem = metric_xml.find(".//t:tags", namespaces=ns)
if tags_elem is not None:
all_tags = TagItem.from_xml_element(tags_elem, ns)
tags = all_tags
metric_item.tags = tags
metric_item._initial_tags = tags
all_metric_items.append(metric_item)
return all_metric_items
# Used to convert string represented boolean to a boolean type
def string_to_bool(s: str) -> bool:
return s.lower() == "true"
| mit | 90b2dba860cae12a336bca0849d84186 | 30.8875 | 94 | 0.600941 | 3.787676 | false | false | false | false |
tableau/server-client-python | test/test_user_model.py | 1 | 5671 | import logging
import unittest
from unittest.mock import *
from typing import List
import io
import pytest
import tableauserverclient as TSC
class UserModelTests(unittest.TestCase):
def test_invalid_name(self):
self.assertRaises(ValueError, TSC.UserItem, None, TSC.UserItem.Roles.Publisher)
self.assertRaises(ValueError, TSC.UserItem, "", TSC.UserItem.Roles.Publisher)
user = TSC.UserItem("me", TSC.UserItem.Roles.Publisher)
with self.assertRaises(ValueError):
user.name = None
with self.assertRaises(ValueError):
user.name = ""
def test_invalid_auth_setting(self):
user = TSC.UserItem("me", TSC.UserItem.Roles.Publisher)
with self.assertRaises(ValueError):
user.auth_setting = "Hello"
def test_invalid_site_role(self):
user = TSC.UserItem("me", TSC.UserItem.Roles.Publisher)
with self.assertRaises(ValueError):
user.site_role = "Hello"
class UserDataTest(unittest.TestCase):
logger = logging.getLogger("UserDataTest")
role_inputs = [
["creator", "system", "yes", "SiteAdministrator"],
["None", "system", "no", "SiteAdministrator"],
["explorer", "SysTEm", "no", "SiteAdministrator"],
["creator", "site", "yes", "SiteAdministratorCreator"],
["explorer", "site", "yes", "SiteAdministratorExplorer"],
["creator", "SITE", "no", "SiteAdministratorCreator"],
["creator", "none", "yes", "Creator"],
["explorer", "none", "yes", "ExplorerCanPublish"],
["viewer", "None", "no", "Viewer"],
["explorer", "no", "yes", "ExplorerCanPublish"],
["EXPLORER", "noNO", "yes", "ExplorerCanPublish"],
["explorer", "no", "no", "Explorer"],
["unlicensed", "none", "no", "Unlicensed"],
["Chef", "none", "yes", "Unlicensed"],
["yes", "yes", "yes", "Unlicensed"],
]
valid_import_content = [
"username, pword, fname, creator, site, yes, email",
"username, pword, fname, explorer, none, no, email",
"",
"u",
"p",
]
valid_username_content = ["jfitzgerald@tableau.com"]
usernames = [
"valid",
"valid@email.com",
"domain/valid",
"domain/valid@tmail.com",
"va!@#$%^&*()lid",
"in@v@lid",
"in valid",
"",
]
def test_validate_usernames(self):
TSC.UserItem.validate_username_or_throw(UserDataTest.usernames[0])
TSC.UserItem.validate_username_or_throw(UserDataTest.usernames[1])
TSC.UserItem.validate_username_or_throw(UserDataTest.usernames[2])
TSC.UserItem.validate_username_or_throw(UserDataTest.usernames[3])
TSC.UserItem.validate_username_or_throw(UserDataTest.usernames[4])
with self.assertRaises(AttributeError):
TSC.UserItem.validate_username_or_throw(UserDataTest.usernames[5])
with self.assertRaises(AttributeError):
TSC.UserItem.validate_username_or_throw(UserDataTest.usernames[6])
def test_evaluate_role(self):
for line in UserDataTest.role_inputs:
actual = TSC.UserItem.CSVImport._evaluate_site_role(line[0], line[1], line[2])
assert actual == line[3], line + [actual]
def test_get_user_detail_empty_line(self):
test_line = ""
test_user = TSC.UserItem.CSVImport.create_user_from_line(test_line)
assert test_user is None
def test_get_user_detail_standard(self):
test_line = "username, pword, fname, license, admin, pub, email"
test_user: TSC.UserItem = TSC.UserItem.CSVImport.create_user_from_line(test_line)
assert test_user.name == "username", test_user.name
assert test_user.fullname == "fname", test_user.fullname
assert test_user.site_role == "Unlicensed", test_user.site_role
assert test_user.email == "email", test_user.email
def test_get_user_details_only_username(self):
test_line = "username"
test_user: TSC.UserItem = TSC.UserItem.CSVImport.create_user_from_line(test_line)
def test_populate_user_details_only_some(self):
values = "username, , , creator, admin"
user = TSC.UserItem.CSVImport.create_user_from_line(values)
assert user.name == "username"
def test_validate_user_detail_standard(self):
test_line = "username, pword, fname, creator, site, 1, email"
TSC.UserItem.CSVImport._validate_import_line_or_throw(test_line, UserDataTest.logger)
TSC.UserItem.CSVImport.create_user_from_line(test_line)
# for file handling
def _mock_file_content(self, content: List[str]) -> io.TextIOWrapper:
# the empty string represents EOF
# the tests run through the file twice, first to validate then to fetch
mock = MagicMock(io.TextIOWrapper)
content.append("") # EOF
mock.readline.side_effect = content
mock.name = "file-mock"
return mock
def test_validate_import_file(self):
test_data = self._mock_file_content(UserDataTest.valid_import_content)
valid, invalid = TSC.UserItem.CSVImport.validate_file_for_import(test_data, UserDataTest.logger)
assert valid == 2, "Expected two lines to be parsed, got {}".format(valid)
assert invalid == [], "Expected no failures, got {}".format(invalid)
def test_validate_usernames_file(self):
test_data = self._mock_file_content(UserDataTest.usernames)
valid, invalid = TSC.UserItem.CSVImport.validate_file_for_import(test_data, UserDataTest.logger)
assert valid == 5, "Exactly 5 of the lines were valid, counted {}".format(valid + invalid)
| mit | b7292b7acb8db4a3c48d12f4df07a946 | 39.798561 | 104 | 0.637454 | 3.537742 | false | true | false | false |
tableau/server-client-python | samples/create_project.py | 1 | 3902 | ####
# This script demonstrates how to use the Tableau Server Client
# to create new projects, both at the root level and how to nest them using
# parent_id.
#
#
# To run the script, you must have installed Python 3.7 or later.
####
import argparse
import logging
import sys
import tableauserverclient as TSC
def create_project(server, project_item, samples=False):
try:
project_item = server.projects.create(project_item, samples)
print("Created a new project called: %s" % project_item.name)
return project_item
except TSC.ServerResponseError:
print("We have already created this project: %s" % project_item.name)
project_items = server.projects.filter(name=project_item.name)
return project_items[0]
def main():
parser = argparse.ArgumentParser(description="Create new projects.")
# Common options; please keep those in sync across all samples
parser.add_argument("--server", "-s", required=True, help="server address")
parser.add_argument("--site", "-S", help="site name")
parser.add_argument(
"--token-name", "-p", required=True, help="name of the personal access token used to sign into the server"
)
parser.add_argument(
"--token-value", "-v", required=True, help="value of the personal access token used to sign into the server"
)
parser.add_argument(
"--logging-level",
"-l",
choices=["debug", "info", "error"],
default="error",
help="desired logging level (set to error by default)",
)
# Options specific to this sample
# This sample has no additional options, yet. If you add some, please add them here
args = parser.parse_args()
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
server = TSC.Server(args.server, http_options={"verify": False})
server.use_server_version()
with server.auth.sign_in(tableau_auth):
# Use highest Server REST API version available
server.use_server_version()
# Without parent_id specified, projects are created at the top level.
top_level_project = TSC.ProjectItem(name="Top Level Project")
top_level_project = create_project(server, top_level_project)
# Specifying parent_id creates a nested projects.
child_project = TSC.ProjectItem(name="Child Project", parent_id=top_level_project.id)
child_project = create_project(server, child_project, samples=True)
# Projects can be nested at any level.
grand_child_project = TSC.ProjectItem(name="Grand Child Project", parent_id=child_project.id)
grand_child_project = create_project(server, grand_child_project)
# Projects can be updated
changed_project = server.projects.update(grand_child_project, samples=True)
server.projects.populate_workbook_default_permissions(changed_project),
server.projects.populate_flow_default_permissions(changed_project),
server.projects.populate_lens_default_permissions(changed_project), # uses same as workbook
server.projects.populate_datasource_default_permissions(changed_project),
server.projects.populate_permissions(changed_project)
# Projects have default permissions set for the object types they contain
print("Permissions from project {}:".format(changed_project.id))
print(changed_project.permissions)
print(
changed_project.default_workbook_permissions,
changed_project.default_datasource_permissions,
changed_project.default_lens_permissions,
changed_project.default_flow_permissions,
)
if __name__ == "__main__":
main()
| mit | 4e900064d358662807992dce4b05eb76 | 40.073684 | 116 | 0.688365 | 4.077325 | false | false | false | false |
tableau/server-client-python | tableauserverclient/server/request_options.py | 1 | 7010 | from ..models.property_decorators import property_is_int
import logging
logger = logging.getLogger("tableau.request_options")
class RequestOptionsBase(object):
# This method is used if server api version is below 3.7 (2020.1)
def apply_query_params(self, url):
try:
params = self.get_query_params()
params_list = ["{}={}".format(k, v) for (k, v) in params.items()]
logger.debug("Applying options to request: <%s(%s)>", self.__class__.__name__, ",".join(params_list))
if "?" in url:
url, existing_params = url.split("?")
params_list.append(existing_params)
return "{0}?{1}".format(url, "&".join(params_list))
except NotImplementedError:
raise
def get_query_params(self):
raise NotImplementedError()
class RequestOptions(RequestOptionsBase):
class Operator:
Equals = "eq"
GreaterThan = "gt"
GreaterThanOrEqual = "gte"
LessThan = "lt"
LessThanOrEqual = "lte"
In = "in"
Has = "has"
class Field:
Args = "args"
CompletedAt = "completedAt"
CreatedAt = "createdAt"
DomainName = "domainName"
DomainNickname = "domainNickname"
HitsTotal = "hitsTotal"
IsLocal = "isLocal"
JobType = "jobType"
LastLogin = "lastLogin"
MinimumSiteRole = "minimumSiteRole"
Name = "name"
Notes = "notes"
OwnerDomain = "ownerDomain"
OwnerEmail = "ownerEmail"
OwnerName = "ownerName"
ParentProjectId = "parentProjectId"
Progress = "progress"
ProjectName = "projectName"
PublishSamples = "publishSamples"
SiteRole = "siteRole"
StartedAt = "startedAt"
Status = "status"
Subtitle = "subtitle"
Tags = "tags"
Title = "title"
TopLevelProject = "topLevelProject"
Type = "type"
UpdatedAt = "updatedAt"
UserCount = "userCount"
class Direction:
Desc = "desc"
Asc = "asc"
def __init__(self, pagenumber=1, pagesize=100):
self.pagenumber = pagenumber
self.pagesize = pagesize
self.sort = set()
self.filter = set()
# This is private until we expand all of our parsers to handle the extra fields
self._all_fields = False
def page_size(self, page_size):
self.pagesize = page_size
return self
def page_number(self, page_number):
self.pagenumber = page_number
return self
def get_query_params(self):
params = {}
if self.pagenumber:
params["pageNumber"] = self.pagenumber
if self.pagesize:
params["pageSize"] = self.pagesize
if len(self.sort) > 0:
sort_options = (str(sort_item) for sort_item in self.sort)
ordered_sort_options = sorted(sort_options)
params["sort"] = ",".join(ordered_sort_options)
if len(self.filter) > 0:
filter_options = (str(filter_item) for filter_item in self.filter)
ordered_filter_options = sorted(filter_options)
params["filter"] = ",".join(ordered_filter_options)
if self._all_fields:
params["fields"] = "_all_"
return params
class _FilterOptionsBase(RequestOptionsBase):
"""Provide a basic implementation of adding view filters to the url"""
def __init__(self):
self.view_filters = []
def get_query_params(self):
raise NotImplementedError()
def vf(self, name, value):
self.view_filters.append((name, value))
return self
def _append_view_filters(self, params):
for name, value in self.view_filters:
params["vf_" + name] = value
class CSVRequestOptions(_FilterOptionsBase):
def __init__(self, maxage=-1):
super(CSVRequestOptions, self).__init__()
self.max_age = maxage
@property
def max_age(self):
return self._max_age
@max_age.setter
@property_is_int(range=(0, 240), allowed=[-1])
def max_age(self, value):
self._max_age = value
def get_query_params(self):
params = {}
if self.max_age != -1:
params["maxAge"] = self.max_age
self._append_view_filters(params)
return params
class ExcelRequestOptions(RequestOptionsBase):
def __init__(self, maxage: int = -1) -> None:
super().__init__()
self.max_age = maxage
@property
def max_age(self) -> int:
return self._max_age
@max_age.setter
@property_is_int(range=(0, 240), allowed=[-1])
def max_age(self, value: int) -> None:
self._max_age = value
def get_query_params(self):
params = {}
if self.max_age != -1:
params["maxAge"] = self.max_age
return params
class ImageRequestOptions(_FilterOptionsBase):
# if 'high' isn't specified, the REST API endpoint returns an image with standard resolution
class Resolution:
High = "high"
def __init__(self, imageresolution=None, maxage=-1):
super(ImageRequestOptions, self).__init__()
self.image_resolution = imageresolution
self.max_age = maxage
@property
def max_age(self):
return self._max_age
@max_age.setter
@property_is_int(range=(0, 240), allowed=[-1])
def max_age(self, value):
self._max_age = value
def get_query_params(self):
params = {}
if self.image_resolution:
params["resolution"] = self.image_resolution
if self.max_age != -1:
params["maxAge"] = self.max_age
self._append_view_filters(params)
return params
class PDFRequestOptions(_FilterOptionsBase):
class PageType:
A3 = "a3"
A4 = "a4"
A5 = "a5"
B4 = "b4"
B5 = "b5"
Executive = "executive"
Folio = "folio"
Ledger = "ledger"
Legal = "legal"
Letter = "letter"
Note = "note"
Quarto = "quarto"
Tabloid = "tabloid"
Unspecified = "unspecified"
class Orientation:
Portrait = "portrait"
Landscape = "landscape"
def __init__(self, page_type=None, orientation=None, maxage=-1):
super(PDFRequestOptions, self).__init__()
self.page_type = page_type
self.orientation = orientation
self.max_age = maxage
@property
def max_age(self):
return self._max_age
@max_age.setter
@property_is_int(range=(0, 240), allowed=[-1])
def max_age(self, value):
self._max_age = value
def get_query_params(self):
params = {}
if self.page_type:
params["type"] = self.page_type
if self.orientation:
params["orientation"] = self.orientation
if self.max_age != -1:
params["maxAge"] = self.max_age
self._append_view_filters(params)
return params
| mit | 54761aed8771d23c067396934d5bb9c2 | 27.04 | 113 | 0.571612 | 3.785097 | false | false | false | false |
tableau/server-client-python | tableauserverclient/server/server.py | 1 | 7383 | import logging
import warnings
import requests
import urllib3
from defusedxml.ElementTree import fromstring, ParseError
from packaging.version import Version
from .endpoint import (
Sites,
Views,
Users,
Groups,
Workbooks,
Datasources,
Projects,
Auth,
Schedules,
ServerInfo,
Tasks,
Subscriptions,
Jobs,
Metadata,
Databases,
Tables,
Flows,
Webhooks,
DataAccelerationReport,
Favorites,
DataAlerts,
Fileuploads,
FlowRuns,
Metrics,
)
from .endpoint.exceptions import (
ServerInfoEndpointNotFoundError,
EndpointUnavailableError,
)
from .exceptions import NotSignedInError
from ..namespace import Namespace
_PRODUCT_TO_REST_VERSION = {
"10.0": "2.3",
"9.3": "2.2",
"9.2": "2.1",
"9.1": "2.0",
"9.0": "2.0",
}
minimum_supported_server_version = "2.3"
default_server_version = "2.3"
class Server(object):
class PublishMode:
Append = "Append"
Overwrite = "Overwrite"
CreateNew = "CreateNew"
def __init__(self, server_address, use_server_version=False, http_options=None, session_factory=None):
self._auth_token = None
self._site_id = None
self._user_id = None
self._server_address: str = server_address
self._session_factory = session_factory or requests.session
self.auth = Auth(self)
self.views = Views(self)
self.users = Users(self)
self.sites = Sites(self)
self.groups = Groups(self)
self.jobs = Jobs(self)
self.workbooks = Workbooks(self)
self.datasources = Datasources(self)
self.favorites = Favorites(self)
self.flows = Flows(self)
self.projects = Projects(self)
self.schedules = Schedules(self)
self.server_info = ServerInfo(self)
self.tasks = Tasks(self)
self.subscriptions = Subscriptions(self)
self.metadata = Metadata(self)
self.databases = Databases(self)
self.tables = Tables(self)
self.webhooks = Webhooks(self)
self.data_acceleration_report = DataAccelerationReport(self)
self.data_alerts = DataAlerts(self)
self.fileuploads = Fileuploads(self)
self._namespace = Namespace()
self.flow_runs = FlowRuns(self)
self.metrics = Metrics(self)
self._session = self._session_factory()
self._http_options = dict() # must set this before making a server call
if http_options:
self.add_http_options(http_options)
self.validate_server_connection()
self.version = default_server_version
if use_server_version:
self.use_server_version() # this makes a server call
def validate_server_connection(self):
try:
if not self._server_address.startswith("http://") and not self._server_address.startswith("https://"):
self._server_address = "http://" + self._server_address
self._session.prepare_request(
requests.Request("GET", url=self._server_address, params=self._http_options)
)
except Exception as req_ex:
raise ValueError("Invalid server initialization", req_ex)
def __repr__(self):
return "<TableauServerClient> [Connection: {}, {}]".format(self.baseurl, self.server_info.serverInfo)
def add_http_options(self, options_dict: dict):
try:
self._http_options.update(options_dict)
if "verify" in options_dict.keys() and self._http_options.get("verify") is False:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# would be nice if you could turn them back on
except BaseException as be:
print(be)
# expected errors on invalid input:
# 'set' object has no attribute 'keys', 'list' object has no attribute 'keys'
# TypeError: cannot convert dictionary update sequence element #0 to a sequence (input is a tuple)
raise ValueError("Invalid http options given: {}".format(options_dict))
def clear_http_options(self):
self._http_options = dict()
def _clear_auth(self):
self._site_id = None
self._user_id = None
self._auth_token = None
self._session = self._session_factory()
def _set_auth(self, site_id, user_id, auth_token):
self._site_id = site_id
self._user_id = user_id
self._auth_token = auth_token
def _get_legacy_version(self):
response = self._session.get(self.server_address + "/auth?format=xml")
try:
info_xml = fromstring(response.content)
except ParseError as parseError:
logging.getLogger("TSC.server").info(
"Could not read server version info. The server may not be running or configured."
)
return self.version
prod_version = info_xml.find(".//product_version").text
version = _PRODUCT_TO_REST_VERSION.get(prod_version, "2.1") # 2.1
return version
def _determine_highest_version(self):
try:
old_version = self.version
self.version = "2.4"
version = self.server_info.get().rest_api_version
except ServerInfoEndpointNotFoundError:
version = self._get_legacy_version()
except BaseException:
version = self._get_legacy_version()
self.version = old_version
return version
def use_server_version(self):
self.version = self._determine_highest_version()
def use_highest_version(self):
self.use_server_version()
warnings.warn("use use_server_version instead", DeprecationWarning)
def check_at_least_version(self, target: str):
server_version = Version(self.version or "0.0")
target_version = Version(target)
return server_version >= target_version
def assert_at_least_version(self, comparison: str, reason: str):
if not self.check_at_least_version(comparison):
error = "{} is not available in API version {}. Requires {}".format(reason, self.version, comparison)
raise EndpointUnavailableError(error)
@property
def baseurl(self):
return "{0}/api/{1}".format(self._server_address, str(self.version))
@property
def namespace(self):
return self._namespace()
@property
def auth_token(self):
if self._auth_token is None:
error = "Missing authentication token. You must sign in first."
raise NotSignedInError(error)
return self._auth_token
@property
def site_id(self):
if self._site_id is None:
error = "Missing site ID. You must sign in first."
raise NotSignedInError(error)
return self._site_id
@property
def user_id(self):
if self._user_id is None:
error = "Missing user ID. You must sign in first."
raise NotSignedInError(error)
return self._user_id
@property
def server_address(self):
return self._server_address
@property
def http_options(self):
return self._http_options
@property
def session(self):
return self._session
def is_signed_in(self):
return self._auth_token is not None
| mit | 1894116c1a1a5ff872f15678b57175e7 | 30.961039 | 114 | 0.616822 | 3.988655 | false | false | false | false |
materialsvirtuallab/monty | monty/serialization.py | 1 | 4391 | """
This module implements serialization support for common formats such as json
and yaml.
"""
import json
import os
try:
from ruamel.yaml import YAML
except ImportError:
YAML = None # type: ignore
from monty.io import zopen
from monty.json import MontyDecoder, MontyEncoder
from monty.msgpack import default, object_hook
try:
import msgpack
except ImportError:
msgpack = None
def loadfn(fn, *args, fmt=None, **kwargs):
r"""
Loads json/yaml/msgpack directly from a filename instead of a
File-like object. File may also be a BZ2 (".BZ2") or GZIP (".GZ", ".Z")
compressed file.
For YAML, ruamel.yaml must be installed. The file type is automatically
detected from the file extension (case insensitive).
YAML is assumed if the filename contains ".yaml" or ".yml".
Msgpack is assumed if the filename contains ".mpk".
JSON is otherwise assumed.
Args:
fn (str/Path): filename or pathlib.Path.
*args: Any of the args supported by json/yaml.load.
fmt (string): If specified, the fmt specified would be used instead
of autodetection from filename. Supported formats right now are
"json", "yaml" or "mpk".
**kwargs: Any of the kwargs supported by json/yaml.load.
Returns:
(object) Result of json/yaml/msgpack.load.
"""
if fmt is None:
basename = os.path.basename(fn).lower()
if ".mpk" in basename:
fmt = "mpk"
elif any(ext in basename for ext in (".yaml", ".yml")):
fmt = "yaml"
else:
fmt = "json"
if fmt == "mpk":
if msgpack is None:
raise RuntimeError("Loading of message pack files is not possible as msgpack-python is not installed.")
if "object_hook" not in kwargs:
kwargs["object_hook"] = object_hook
with zopen(fn, "rb") as fp:
return msgpack.load(fp, *args, **kwargs) # pylint: disable=E1101
else:
with zopen(fn, "rt") as fp:
if fmt == "yaml":
if YAML is None:
raise RuntimeError("Loading of YAML files requires ruamel.yaml.")
yaml = YAML()
return yaml.load(fp, *args, **kwargs)
if fmt == "json":
if "cls" not in kwargs:
kwargs["cls"] = MontyDecoder
return json.load(fp, *args, **kwargs)
raise TypeError(f"Invalid format: {fmt}")
def dumpfn(obj, fn, *args, fmt=None, **kwargs):
r"""
Dump to a json/yaml directly by filename instead of a
File-like object. File may also be a BZ2 (".BZ2") or GZIP (".GZ", ".Z")
compressed file.
For YAML, ruamel.yaml must be installed. The file type is automatically
detected from the file extension (case insensitive). YAML is assumed if the
filename contains ".yaml" or ".yml".
Msgpack is assumed if the filename contains ".mpk".
JSON is otherwise assumed.
Args:
obj (object): Object to dump.
fn (str/Path): filename or pathlib.Path.
*args: Any of the args supported by json/yaml.dump.
**kwargs: Any of the kwargs supported by json/yaml.dump.
Returns:
(object) Result of json.load.
"""
if fmt is None:
basename = os.path.basename(fn).lower()
if ".mpk" in basename:
fmt = "mpk"
elif any(ext in basename for ext in (".yaml", ".yml")):
fmt = "yaml"
else:
fmt = "json"
if fmt == "mpk":
if msgpack is None:
raise RuntimeError("Loading of message pack files is not possible as msgpack-python is not installed.")
if "default" not in kwargs:
kwargs["default"] = default
with zopen(fn, "wb") as fp:
msgpack.dump(obj, fp, *args, **kwargs) # pylint: disable=E1101
else:
with zopen(fn, "wt") as fp:
if fmt == "yaml":
if YAML is None:
raise RuntimeError("Loading of YAML files requires ruamel.yaml.")
yaml = YAML()
yaml.dump(obj, fp, *args, **kwargs)
elif fmt == "json":
if "cls" not in kwargs:
kwargs["cls"] = MontyEncoder
fp.write(json.dumps(obj, *args, **kwargs))
else:
raise TypeError(f"Invalid format: {fmt}")
| mit | 15cce3e7fcb2d0d7e243527396246c9c | 34.128 | 115 | 0.580733 | 4.024748 | false | false | false | false |
earwig/mwparserfromhell | src/mwparserfromhell/smart_list/smart_list.py | 2 | 5535 | # Copyright (C) 2012-2020 Ben Kurtovic <ben.kurtovic@gmail.com>
# Copyright (C) 2019-2020 Yuri Astrakhan <YuriAstrakhan@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from weakref import ref
from .list_proxy import ListProxy
from .utils import _SliceNormalizerMixIn, inheritdoc
class SmartList(_SliceNormalizerMixIn, list):
"""Implements the ``list`` interface with special handling of sublists.
When a sublist is created (by ``list[i:j]``), any changes made to this
list (such as the addition, removal, or replacement of elements) will be
reflected in the sublist, or vice-versa, to the greatest degree possible.
This is implemented by having sublists - instances of the
:class:`.ListProxy` type - dynamically determine their elements by storing
their slice info and retrieving that slice from the parent. Methods that
change the size of the list also change the slice info. For example::
>>> parent = SmartList([0, 1, 2, 3])
>>> parent
[0, 1, 2, 3]
>>> child = parent[2:]
>>> child
[2, 3]
>>> child.append(4)
>>> child
[2, 3, 4]
>>> parent
[0, 1, 2, 3, 4]
"""
def __init__(self, iterable=None):
if iterable:
super().__init__(iterable)
else:
super().__init__()
self._children = {}
def __getitem__(self, key):
if not isinstance(key, slice):
return super().__getitem__(key)
key = self._normalize_slice(key, clamp=False)
sliceinfo = [key.start, key.stop, key.step]
child = ListProxy(self, sliceinfo)
child_ref = ref(child, self._delete_child)
self._children[id(child_ref)] = (child_ref, sliceinfo)
return child
def __setitem__(self, key, item):
if not isinstance(key, slice):
super().__setitem__(key, item)
return
item = list(item)
super().__setitem__(key, item)
key = self._normalize_slice(key, clamp=True)
diff = len(item) + (key.start - key.stop) // key.step
if not diff:
return
for child, (start, stop, _step) in self._children.values():
if start > key.stop:
self._children[id(child)][1][0] += diff
if stop is not None and stop >= key.stop:
self._children[id(child)][1][1] += diff
def __delitem__(self, key):
super().__delitem__(key)
if isinstance(key, slice):
key = self._normalize_slice(key, clamp=True)
else:
key = slice(key, key + 1, 1)
diff = (key.stop - key.start) // key.step
for child, (start, stop, _step) in self._children.values():
if start > key.start:
self._children[id(child)][1][0] -= diff
if stop is not None and stop >= key.stop:
self._children[id(child)][1][1] -= diff
def __add__(self, other):
return SmartList(list(self) + other)
def __radd__(self, other):
return SmartList(other + list(self))
def __iadd__(self, other):
self.extend(other)
return self
def _delete_child(self, child_ref):
"""Remove a child reference that is about to be garbage-collected."""
del self._children[id(child_ref)]
def _detach_children(self):
"""Remove all children and give them independent parent copies."""
children = [val[0] for val in self._children.values()]
for child in children:
child()._parent = list(self)
self._children.clear()
@inheritdoc
def append(self, item):
head = len(self)
self[head:head] = [item]
@inheritdoc
def extend(self, item):
head = len(self)
self[head:head] = item
@inheritdoc
def insert(self, index, item):
self[index:index] = [item]
@inheritdoc
def pop(self, index=None):
if index is None:
index = len(self) - 1
item = self[index]
del self[index]
return item
@inheritdoc
def remove(self, item):
del self[self.index(item)]
@inheritdoc
def reverse(self):
self._detach_children()
super().reverse()
@inheritdoc
def sort(self, key=None, reverse=None):
self._detach_children()
kwargs = {}
if key is not None:
kwargs["key"] = key
if reverse is not None:
kwargs["reverse"] = reverse
super().sort(**kwargs)
| mit | 9c65dafe19794565bb082b02fd3c3d76 | 34.031646 | 79 | 0.605781 | 4.019608 | false | false | false | false |
materialsvirtuallab/monty | monty/collections.py | 1 | 6532 | """
Useful collection classes, e.g., tree, frozendict, etc.
"""
import collections
def tree():
"""
A tree object, which is effectively a recursive defaultdict that
adds tree as members.
Usage:
x = tree()
x['a']['b']['c'] = 1
Returns:
A tree.
"""
return collections.defaultdict(tree)
class frozendict(dict):
"""
A dictionary that does not permit changes. The naming
violates PEP8 to be consistent with standard Python's "frozenset" naming.
"""
def __init__(self, *args, **kwargs):
"""
:param args: Passthrough arguments for standard dict.
:param kwargs: Passthrough keyword arguments for standard dict.
"""
dict.__init__(self, *args, **kwargs)
def __setitem__(self, key, val):
raise KeyError(f"Cannot overwrite existing key: {str(key)}")
def update(self, *args, **kwargs):
"""
:param args: Passthrough arguments for standard dict.
:param kwargs: Passthrough keyword arguments for standard dict.
"""
raise KeyError(f"Cannot update a {self.__class__.__name__}")
class Namespace(dict):
"""A dictionary that does not permit to redefine its keys."""
def __init__(self, *args, **kwargs):
"""
:param args: Passthrough arguments for standard dict.
:param kwargs: Passthrough keyword arguments for standard dict.
"""
self.update(*args, **kwargs)
def __setitem__(self, key, val):
if key in self:
raise KeyError(f"Cannot overwrite existent key: {str(key)}")
dict.__setitem__(self, key, val)
def update(self, *args, **kwargs):
"""
:param args: Passthrough arguments for standard dict.
:param kwargs: Passthrough keyword arguments for standard dict.
"""
for k, v in dict(*args, **kwargs).items():
self[k] = v
class AttrDict(dict):
"""
Allows to access dict keys as obj.foo in addition
to the traditional way obj['foo']"
Example:
>>> d = AttrDict(foo=1, bar=2)
>>> assert d["foo"] == d.foo
>>> d.bar = "hello"
>>> assert d.bar == "hello"
"""
def __init__(self, *args, **kwargs):
"""
:param args: Passthrough arguments for standard dict.
:param kwargs: Passthrough keyword arguments for standard dict.
"""
super().__init__(*args, **kwargs)
self.__dict__ = self
def copy(self):
"""
:return: Copy of AttrDict
"""
newd = super().copy()
return self.__class__(**newd)
class FrozenAttrDict(frozendict):
"""
A dictionary that:
* does not permit changes.
* Allows to access dict keys as obj.foo in addition
to the traditional way obj['foo']
"""
def __init__(self, *args, **kwargs):
"""
:param args: Passthrough arguments for standard dict.
:param kwargs: Passthrough keyword arguments for standard dict.
"""
super().__init__(*args, **kwargs)
def __getattribute__(self, name):
try:
return super().__getattribute__(name)
except AttributeError:
try:
return self[name]
except KeyError as exc:
raise AttributeError(str(exc))
def __setattr__(self, name, value):
raise KeyError(f"You cannot modify attribute {name} of {self.__class__.__name__}")
class MongoDict:
"""
This dict-like object allows one to access the entries in a nested dict as
attributes.
Entries (attributes) cannot be modified. It also provides Ipython tab
completion hence this object is particularly useful if you need to analyze
a nested dict interactively (e.g. documents extracted from a MongoDB
database).
>>> m = MongoDict({'a': {'b': 1}, 'x': 2})
>>> assert m.a.b == 1 and m.x == 2
>>> assert "a" in m and "b" in m.a
>>> m["a"]
{'b': 1}
.. note::
Cannot inherit from ABC collections.Mapping because otherwise
dict.keys and dict.items will pollute the namespace.
e.g MongoDict({"keys": 1}).keys would be the ABC dict method.
"""
def __init__(self, *args, **kwargs):
"""
:param args: Passthrough arguments for standard dict.
:param kwargs: Passthrough keyword arguments for standard dict.
"""
self.__dict__["_mongo_dict_"] = dict(*args, **kwargs)
def __repr__(self):
return str(self)
def __str__(self):
return str(self._mongo_dict_)
def __setattr__(self, name, value):
raise NotImplementedError(f"You cannot modify attribute {name} of {self.__class__.__name__}")
def __getattribute__(self, name):
try:
return super().__getattribute__(name)
except AttributeError:
# raise
try:
a = self._mongo_dict_[name]
if isinstance(a, collections.abc.Mapping):
a = self.__class__(a)
return a
except Exception as exc:
raise AttributeError(str(exc))
def __getitem__(self, slice_):
return self._mongo_dict_.__getitem__(slice_)
def __iter__(self):
return iter(self._mongo_dict_)
def __len__(self):
return len(self._mongo_dict_)
def __dir__(self):
"""
For Ipython tab completion.
See http://ipython.org/ipython-doc/dev/config/integrating.html
"""
return sorted(list(k for k in self._mongo_dict_ if not callable(k)))
def dict2namedtuple(*args, **kwargs):
"""
Helper function to create a :class:`namedtuple` from a dictionary.
Example:
>>> t = dict2namedtuple(foo=1, bar="hello")
>>> assert t.foo == 1 and t.bar == "hello"
>>> t = dict2namedtuple([("foo", 1), ("bar", "hello")])
>>> assert t[0] == t.foo and t[1] == t.bar
.. warning::
- The order of the items in the namedtuple is not deterministic if
kwargs are used.
namedtuples, however, should always be accessed by attribute hence
this behaviour should not represent a serious problem.
- Don't use this function in code in which memory and performance are
crucial since a dict is needed to instantiate the tuple!
"""
d = collections.OrderedDict(*args)
d.update(**kwargs)
return collections.namedtuple(typename="dict2namedtuple", field_names=list(d.keys()))(**d)
| mit | 596ef942b758b216b64227faad6b9790 | 28.690909 | 101 | 0.574556 | 4.203346 | false | false | false | false |
materialsvirtuallab/monty | monty/string.py | 1 | 3098 | """
Useful additional string functions.
"""
import sys
def remove_non_ascii(s):
"""
Remove non-ascii characters in a file. Needed when support for non-ASCII
is not available.
Args:
s (str): Input string
Returns:
String with all non-ascii characters removed.
"""
return "".join(i for i in s if ord(i) < 128)
def unicode2str(s):
"""
Forces a unicode to a string in Python 2, but transparently handles
Python 3.
Args:
s (str/unicode): Input string / unicode.
Returns:
str in Python 2. Unchanged otherwise.
"""
return s.encode("utf-8") if sys.version_info.major < 3 else s
def is_string(s):
"""True if s behaves like a string (duck typing test)."""
try:
s + " "
return True
except TypeError:
return False
def list_strings(arg):
"""
Always return a list of strings, given a string or list of strings as
input.
:Examples:
>>> list_strings('A single string')
['A single string']
>>> list_strings(['A single string in a list'])
['A single string in a list']
>>> list_strings(['A','list','of','strings'])
['A', 'list', 'of', 'strings']
"""
if is_string(arg):
return [arg]
return arg
def marquee(text="", width=78, mark="*"):
"""
Return the input string centered in a 'marquee'.
Args:
text (str): Input string
width (int): Width of final output string.
mark (str): Character used to fill string.
:Examples:
>>> marquee('A test', width=40)
'**************** A test ****************'
>>> marquee('A test', width=40, mark='-')
'---------------- A test ----------------'
marquee('A test',40, ' ')
' A test '
"""
if not text:
return (mark * width)[:width]
nmark = (width - len(text) - 2) // len(mark) // 2
nmark = max(nmark, 0)
marks = mark * nmark
return f"{marks} {text} {marks}"
def boxed(msg, ch="=", pad=5):
"""
Returns a string in a box
Args:
msg: Input string.
ch: Character used to form the box.
pad: Number of characters ch added before and after msg.
>>> print(boxed("hello", ch="*", pad=2))
***********
** hello **
***********
"""
if pad > 0:
msg = pad * ch + " " + msg.strip() + " " + pad * ch
return "\n".join(
[
len(msg) * ch,
msg,
len(msg) * ch,
]
)
def make_banner(s, width=78, mark="*"):
"""
:param s: String
:param width: Width of banner. Defaults to 78.
:param mark: The mark used to create the banner.
:return: Banner string.
"""
banner = marquee(s, width=width, mark=mark)
return "\n" + len(banner) * mark + "\n" + banner + "\n" + len(banner) * mark
def indent(lines, amount, ch=" "):
"""
Indent the lines in a string by padding each one with proper number of pad
characters
"""
padding = amount * ch
return padding + ("\n" + padding).join(lines.split("\n"))
| mit | 8aa0fe8a5b6d1b5c733b922742b885e8 | 20.971631 | 80 | 0.526791 | 3.692491 | false | false | false | false |
earwig/mwparserfromhell | tests/test_text.py | 1 | 2363 | # Copyright (C) 2012-2020 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Test cases for the Text node.
"""
import pytest
from mwparserfromhell.nodes import Text
def test_str():
"""test Text.__str__()"""
node = Text("foobar")
assert "foobar" == str(node)
node2 = Text("fóóbar")
assert "fóóbar" == str(node2)
def test_children():
"""test Text.__children__()"""
node = Text("foobar")
gen = node.__children__()
with pytest.raises(StopIteration):
next(gen)
def test_strip():
"""test Text.__strip__()"""
node = Text("foobar")
assert node is node.__strip__()
def test_showtree():
"""test Text.__showtree__()"""
output = []
node1 = Text("foobar")
node2 = Text("fóóbar")
node3 = Text("𐌲𐌿𐍄")
node1.__showtree__(output.append, None, None)
node2.__showtree__(output.append, None, None)
node3.__showtree__(output.append, None, None)
res = ["foobar", r"f\xf3\xf3bar", "\\U00010332\\U0001033f\\U00010344"]
assert res == output
def test_value():
"""test getter/setter for the value attribute"""
node = Text("foobar")
assert "foobar" == node.value
assert isinstance(node.value, str)
node.value = "héhéhé"
assert "héhéhé" == node.value
assert isinstance(node.value, str)
| mit | fca217c1641d027e6b1af5e99e73e667 | 31.527778 | 79 | 0.684885 | 3.699842 | false | true | false | false |
materialsvirtuallab/monty | monty/os/__init__.py | 1 | 1232 | """
Os functions, e.g., cd, makedirs_p.
"""
import errno
import os
from contextlib import contextmanager
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "1/24/14"
@contextmanager
def cd(path):
"""
A Fabric-inspired cd context that temporarily changes directory for
performing some tasks, and returns to the original working directory
afterwards. E.g.,
with cd("/my/path/"):
do_something()
Args:
path: Path to cd to.
"""
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
def makedirs_p(path, **kwargs):
"""
Wrapper for os.makedirs that does not raise an exception if the directory
already exists, in the fashion of "mkdir -p" command. The check is
performed in a thread-safe way
Args:
path: path of the directory to create
kwargs: standard kwargs for os.makedirs
"""
try:
os.makedirs(path, **kwargs)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| mit | 0b293157b1d7c6308087bdaf34b5c184 | 21.4 | 77 | 0.608766 | 3.677612 | false | false | false | false |
materialsvirtuallab/monty | tasks.py | 1 | 4101 | #!/usr/bin/env python
"""
Deployment file to facilitate releases of monty.
"""
import datetime
import glob
import json
import os
import re
import requests
from invoke import task
from monty import __version__ as ver
from monty.os import cd
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Apr 29, 2012"
NEW_VER = datetime.datetime.today().strftime("%Y.%-m.%-d")
@task
def make_doc(ctx):
with cd("docs_rst"):
ctx.run("sphinx-apidoc --separate -d 6 -o . -f ../monty")
for f in glob.glob("*.rst"):
if f.startswith("monty") and f.endswith("rst"):
newoutput = []
suboutput = []
subpackage = False
with open(f) as fid:
for line in fid:
clean = line.strip()
if clean == "Subpackages":
subpackage = True
if not subpackage and not clean.endswith("tests"):
newoutput.append(line)
else:
if not clean.endswith("tests"):
suboutput.append(line)
if clean.startswith("monty") and not clean.endswith("tests"):
newoutput.extend(suboutput)
subpackage = False
suboutput = []
with open(f, "w") as fid:
fid.write("".join(newoutput))
ctx.run("make html")
# ctx.run("cp _static/* ../docs/html/_static")
with cd("docs"):
ctx.run("cp -r html/* .")
ctx.run("rm -r html")
ctx.run("rm -r doctrees")
ctx.run("rm -r _sources")
# This makes sure monty.org works to redirect to the Gihub page
# ctx.run("echo \"monty.org\" > CNAME")
# Avoid the use of jekyll so that _dir works as intended.
ctx.run("touch .nojekyll")
@task
def update_doc(ctx):
ctx.run("git pull", warn=True)
make_doc(ctx)
ctx.run("git add .", warn=True)
ctx.run('git commit -a -m "Update dev docs"', warn=True)
ctx.run("git push", warn=True)
@task
def test(ctx):
ctx.run("pytest")
@task
def setver(ctx):
ctx.run(f'sed s/version=.*,/version=\\"{ver}\\",/ setup.py > newsetup')
ctx.run("mv newsetup setup.py")
@task
def release_github(ctx):
with open("docs_rst/changelog.rst") as f:
contents = f.read()
toks = re.split(r"\-+", contents)
desc = toks[1].strip()
toks = desc.split("\n")
desc = "\n".join(toks[:-1]).strip()
payload = {
"tag_name": "v" + NEW_VER,
"target_commitish": "master",
"name": "v" + NEW_VER,
"body": desc,
"draft": False,
"prerelease": False,
}
response = requests.post(
"https://api.github.com/repos/materialsvirtuallab/monty/releases",
data=json.dumps(payload),
headers={"Authorization": "token " + os.environ["GITHUB_RELEASES_TOKEN"]},
)
print(response.text)
@task
def commit(ctx):
ctx.run('git commit -a -m "v%s release"' % NEW_VER, warn=True)
ctx.run("git push", warn=True)
@task
def set_ver(ctx):
with open("monty/__init__.py") as f:
contents = f.read()
contents = re.sub(r"__version__ = .*\n", '__version__ = "%s"\n' % NEW_VER, contents)
with open("monty/__init__.py", "w") as f:
f.write(contents)
with open("setup.py") as f:
contents = f.read()
contents = re.sub(r"version=([^,]+),", 'version="%s",' % NEW_VER, contents)
with open("setup.py", "w") as f:
f.write(contents)
@task
def release(ctx):
set_ver(ctx)
test(ctx)
update_doc(ctx)
commit(ctx)
release_github(ctx)
ctx.run("python setup.py sdist bdist_wheel", warn=True)
ctx.run("twine upload --skip-existing dist/*.whl", warn=True)
ctx.run("twine upload --skip-existing dist/*.tar.gz", warn=True)
| mit | 042880b43f71d44b2c2a8fca0f61d264 | 26.52349 | 92 | 0.532065 | 3.431799 | false | false | false | false |
materialsvirtuallab/monty | monty/subprocess.py | 1 | 2634 | """
Calling shell processes.
"""
import shlex
import threading
import traceback
from subprocess import PIPE, Popen
from .string import is_string
__author__ = "Matteo Giantomass"
__copyright__ = "Copyright 2014, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo@gmail.com"
__date__ = "10/26/14"
class Command:
"""
Enables to run subprocess commands in a different thread with TIMEOUT
option.
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
and
https://gist.github.com/kirpit/1306188
.. attribute:: retcode
Return code of the subprocess
.. attribute:: killed
True if subprocess has been killed due to the timeout
.. attribute:: output
stdout of the subprocess
.. attribute:: error
stderr of the subprocess
Example:
com = Command("sleep 1").run(timeout=2)
print(com.retcode, com.killed, com.output, com.output)
"""
def __init__(self, command):
"""
:param command: Command to execute
"""
if is_string(command):
command = shlex.split(command)
self.command = command
self.process = None
self.retcode = None
self.output, self.error = "", ""
self.killed = False
def __str__(self):
return f"command: {self.command}, retcode: {self.retcode}"
def run(self, timeout=None, **kwargs):
"""
Run a command in a separated thread and wait timeout seconds.
kwargs are keyword arguments passed to Popen.
Return: self
"""
def target(**kw):
try:
# print('Thread started')
with Popen(self.command, **kw) as self.process:
self.output, self.error = self.process.communicate()
self.retcode = self.process.returncode
# print('Thread stopped')
except Exception:
self.error = traceback.format_exc()
self.retcode = -1
# default stdout and stderr
if "stdout" not in kwargs:
kwargs["stdout"] = PIPE
if "stderr" not in kwargs:
kwargs["stderr"] = PIPE
# thread
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
# print("Terminating process")
self.process.terminate()
self.killed = True
thread.join()
return self
| mit | d9272a47371302c4f6a3dfa88e46866b | 24.823529 | 90 | 0.577828 | 4.2969 | false | false | false | false |
materialsvirtuallab/monty | monty/itertools.py | 1 | 2755 | """
Additional tools for iteration.
"""
import itertools
try:
import numpy as np
except ImportError:
np = None # type: ignore
def chunks(items, n):
"""
Yield successive n-sized chunks from a list-like object.
>>> import pprint
>>> pprint.pprint(list(chunks(range(1, 25), 10)))
[(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(11, 12, 13, 14, 15, 16, 17, 18, 19, 20),
(21, 22, 23, 24)]
"""
it = iter(items)
chunk = tuple(itertools.islice(it, n))
while chunk:
yield chunk
chunk = tuple(itertools.islice(it, n))
def iterator_from_slice(s):
"""
Constructs an iterator given a slice object s.
.. note::
The function returns an infinite iterator if s.stop is None
"""
start = s.start if s.start is not None else 0
step = s.step if s.step is not None else 1
if s.stop is None:
# Infinite iterator.
return itertools.count(start=start, step=step)
# xrange-like iterator that supports float.
return iter(np.arange(start, s.stop, step))
def iuptri(items, diago=True, with_inds=False):
"""
A generator that yields the upper triangle of the matrix (items x items)
Args:
items: Iterable object with elements [e0, e1, ...]
diago: False if diagonal matrix elements should be excluded
with_inds: If True, (i,j) (e_i, e_j) is returned else (e_i, e_j)
>>> for (ij, mate) in iuptri([0,1], with_inds=True):
... print("ij:", ij, "mate:", mate)
ij: (0, 0) mate: (0, 0)
ij: (0, 1) mate: (0, 1)
ij: (1, 1) mate: (1, 1)
"""
for (ii, item1) in enumerate(items):
for (jj, item2) in enumerate(items):
do_yield = (jj >= ii) if diago else (jj > ii)
if do_yield:
if with_inds:
yield (ii, jj), (item1, item2)
else:
yield item1, item2
def ilotri(items, diago=True, with_inds=False):
"""
A generator that yields the lower triangle of the matrix (items x items)
Args:
items: Iterable object with elements [e0, e1, ...]
diago: False if diagonal matrix elements should be excluded
with_inds: If True, (i,j) (e_i, e_j) is returned else (e_i, e_j)
>>> for (ij, mate) in ilotri([0,1], with_inds=True):
... print("ij:", ij, "mate:", mate)
ij: (0, 0) mate: (0, 0)
ij: (1, 0) mate: (1, 0)
ij: (1, 1) mate: (1, 1)
"""
for (ii, item1) in enumerate(items):
for (jj, item2) in enumerate(items):
do_yield = (jj <= ii) if diago else (jj < ii)
if do_yield:
if with_inds:
yield (ii, jj), (item1, item2)
else:
yield item1, item2
| mit | edcaadbd60c215ea57db9652716cb128 | 28 | 76 | 0.544828 | 3.196056 | false | false | false | false |
earwig/mwparserfromhell | src/mwparserfromhell/nodes/extras/parameter.py | 1 | 2892 | # Copyright (C) 2012-2020 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
from ...string_mixin import StringMixIn
from ...utils import parse_anything
__all__ = ["Parameter"]
class Parameter(StringMixIn):
"""Represents a paramater of a template.
For example, the template ``{{foo|bar|spam=eggs}}`` contains two
Parameters: one whose name is ``"1"``, value is ``"bar"``, and ``showkey``
is ``False``, and one whose name is ``"spam"``, value is ``"eggs"``, and
``showkey`` is ``True``.
"""
def __init__(self, name, value, showkey=True):
super().__init__()
self.name = name
self.value = value
self.showkey = showkey
def __str__(self):
if self.showkey:
return str(self.name) + "=" + str(self.value)
return str(self.value)
@staticmethod
def can_hide_key(key):
"""Return whether or not the given key can be hidden."""
return re.match(r"[1-9][0-9]*$", str(key).strip())
@property
def name(self):
"""The name of the parameter as a :class:`.Wikicode` object."""
return self._name
@property
def value(self):
"""The value of the parameter as a :class:`.Wikicode` object."""
return self._value
@property
def showkey(self):
"""Whether to show the parameter's key (i.e., its "name")."""
return self._showkey
@name.setter
def name(self, newval):
self._name = parse_anything(newval)
@value.setter
def value(self, newval):
self._value = parse_anything(newval)
@showkey.setter
def showkey(self, newval):
newval = bool(newval)
if not newval and not self.can_hide_key(self.name):
raise ValueError("parameter key {!r} cannot be hidden".format(self.name))
self._showkey = newval
| mit | a87ea3d8853c08b86e4c36a7979c826b | 34.268293 | 85 | 0.660788 | 3.983471 | false | false | false | false |
materialsvirtuallab/monty | monty/logging.py | 1 | 2224 | #!/usr/bin/env python
"""
Logging tools
"""
import argparse
import datetime
import functools
import logging
logger = logging.getLogger(__name__)
def logged(level=logging.DEBUG):
"""
Useful logging decorator. If a method is logged, the beginning and end of
the method call will be logged at a pre-specified level.
Args:
level: Level to log method at. Defaults to DEBUG.
"""
def wrap(f):
_logger = logging.getLogger(f"{f.__module__}.{f.__name__}")
def wrapped_f(*args, **kwargs):
_logger.log(
level,
f"Called at {datetime.datetime.now()} with args = {args} and kwargs = {kwargs}",
)
data = f(*args, **kwargs)
_logger.log(
level,
f"Done at {datetime.datetime.now()} with args = {args} and kwargs = {kwargs}",
)
return data
return wrapped_f
return wrap
def enable_logging(main):
"""
This decorator is used to decorate main functions.
It adds the initialization of the logger and an argument parser that allows
one to select the loglevel.
Useful if we are writing simple main functions that call libraries where
the logging module is used
Args:
main:
main function.
"""
@functools.wraps(main)
def wrapper(*args, **kwargs):
parser = argparse.ArgumentParser()
parser.add_argument(
"--loglevel",
default="ERROR",
type=str,
help="Set the loglevel. Possible values: CRITICAL, ERROR (default)," "WARNING, INFO, DEBUG",
)
options = parser.parse_args()
# loglevel is bound to the string value obtained from the command line
# argument.
# Convert to upper case to allow the user to specify --loglevel=DEBUG
# or --loglevel=debug
numeric_level = getattr(logging, options.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError(f"Invalid log level: {options.loglevel}")
logging.basicConfig(level=numeric_level)
retcode = main(*args, **kwargs)
return retcode
return wrapper
| mit | 835c309b0d13eb3a82b2270523a696ba | 26.121951 | 104 | 0.594424 | 4.483871 | false | false | false | false |
getnikola/nikola | nikola/plugins/task/page_index.py | 1 | 4438 | # -*- coding: utf-8 -*-
# Copyright © 2012-2022 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Render the page index."""
from nikola.plugin_categories import Taxonomy
class PageIndex(Taxonomy):
"""Classify for the page index."""
name = "classify_page_index"
classification_name = "page_index_folder"
overview_page_variable_name = "page_folder"
more_than_one_classifications_per_post = False
has_hierarchy = True
include_posts_from_subhierarchies = False
show_list_as_index = False
template_for_single_list = "list.tmpl"
template_for_classification_overview = None
always_disable_rss = True
always_disable_atom = True
apply_to_posts = False
apply_to_pages = True
omit_empty_classifications = True
path_handler_docstrings = {
'page_index_folder_index': None,
'page_index_folder': None,
'page_index_folder_atom': None,
'page_index_folder_rss': None,
}
def is_enabled(self, lang=None):
"""Return True if this taxonomy is enabled, or False otherwise."""
return self.site.config["PAGE_INDEX"]
def classify(self, post, lang):
"""Classify the given post for the given language."""
destpath = post.destination_path(lang, sep='/')
if post.has_pretty_url(lang):
idx = '/index.html'
if destpath.endswith(idx):
destpath = destpath[:-len(idx)]
i = destpath.rfind('/')
return [destpath[:i] if i >= 0 else '']
def get_classification_friendly_name(self, dirname, lang, only_last_component=False):
"""Extract a friendly name from the classification."""
return dirname
def get_path(self, hierarchy, lang, dest_type='page'):
"""Return a path for the given classification."""
return hierarchy, 'always'
def extract_hierarchy(self, dirname):
"""Given a classification, return a list of parts in the hierarchy."""
return dirname.split('/') if dirname else []
def recombine_classification_from_hierarchy(self, hierarchy):
"""Given a list of parts in the hierarchy, return the classification string."""
return '/'.join(hierarchy)
def provide_context_and_uptodate(self, dirname, lang, node=None):
"""Provide data for the context and the uptodate list for the list of the given classifiation."""
kw = {
"translations": self.site.config['TRANSLATIONS'],
"filters": self.site.config['FILTERS'],
}
context = {
"title": self.site.config['BLOG_TITLE'](lang),
"pagekind": ["list", "front_page", "page_index"] if dirname == '' else ["list", "page_index"],
"kind": "page_index_folder",
"classification": dirname,
"has_no_feeds": True,
}
kw.update(context)
return context, kw
def should_generate_classification_page(self, dirname, post_list, lang):
"""Only generates list of posts for classification if this function returns True."""
short_destination = dirname + '/' + self.site.config['INDEX_FILE']
for post in post_list:
# If there is an index.html pending to be created from a page, do not generate the page index.
if post.destination_path(lang, sep='/') == short_destination:
return False
return True
| mit | b47adaf9ab6a2b40a5c20a7d8939d0f8 | 38.972973 | 106 | 0.659004 | 4.154494 | false | false | false | false |
machow/siuba | docs/conf.py | 1 | 3376 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copied from https://github.com/spatialaudio/nbsphinx/blob/0.2.7/doc/conf.py
# Select nbsphinx and, if needed, add a math extension (mathjax or pngmath):
extensions = [
'nbsphinx',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
#'sphinx.ext.mathjax',
'sphinx.ext.autosectionlabel',
#'sphinx_togglebutton',
]
# Exclude build directory and Jupyter backup files:
exclude_patterns = ['_build', '**.ipynb_checkpoints', '**.swp', 'draft*', 'scripts', '.*swp', '.~*.ipynb']
# Default language for syntax highlighting in reST and Markdown cells
highlight_language = 'none'
autosectionlabel_maxdepth = 2
# -- These set defaults that can be overridden through notebook metadata --
# See http://nbsphinx.readthedocs.org/en/latest/allow-errors.html
# and http://nbsphinx.readthedocs.org/en/latest/timeout.html for more details.
# If True, the build process is continued even if an exception occurs:
#nbsphinx_allow_errors = True
# Controls when a cell will time out (defaults to 30; use -1 for no timeout):
#nbsphinx_timeout = 60
# Default Pygments lexer for syntax highlighting in code cells
#nbsphinx_codecell_lexer = 'ipython3'
# -- The settings below this line are not specific to nbsphinx ------------
master_doc = 'index'
project = 'siuba'
author = 'Michael Chow'
copyright = '2019, ' + author
linkcheck_ignore = [r'http://localhost:\d+/']
# -- Get version information from Git -------------------------------------
# TODO: use mock to pull version info
try:
from subprocess import check_output
release = check_output(['git', 'describe', '--tags', '--always'])
release = release.decode().strip()
except Exception:
release = '<unknown>'
# -- Options for HTML output ----------------------------------------------
import alabaster
html_title = project + ' version ' + release
html_theme = 'alabaster'
#html_theme = "sphinx_rtd_theme"
html_theme_options = {
#"description": "",
"github_user": "machow",
"github_repo": "siuba",
"github_banner": True,
"github_button": False,
"fixed_sidebar": True,
"sidebar_width": "260px"
}
html_static_path = ['_static']
html_js_files = ['custom.js']
# -- nbsphinx customization ---------------------------------------------------
import jupytext
nbsphinx_custom_formats = {
'.Rmd': lambda s: jupytext.reads(s, '.Rmd'),
}
# This is processed by Jinja2 and inserted before each notebook
nbsphinx_epilog = r"""
{% set docname = env.doc2path(env.docname, base='docs') %}
.. only:: html
.. role:: raw-html(raw)
:format: html
.. nbinfo::
Edit page on github `here`__.
Interactive version:
:raw-html:`<a href="https://mybinder.org/v2/gh/machow/siuba/{{ env.config.release }}?filepath={{ docname }}"><img alt="Binder badge" src="https://mybinder.org/badge_logo.svg" style="vertical-align:text-bottom"></a>`
__ https://github.com/machow/siuba/blob/
{{ env.config.release }}/{{ docname }}
"""
# -- Options for LaTeX output ---------------------------------------------
#latex_elements = {
# 'papersize': 'a4paper',
# 'preamble': r"""
#\usepackage{lmodern} % heavier typewriter font
#""",
#}
#
#latex_documents = [
# (master_doc, 'nbsphinx.tex', project, author, 'howto'),
#]
#
#latex_show_urls = 'footnote'
| mit | d3de219830005e90c8045a17156d3f91 | 25.582677 | 223 | 0.625889 | 3.40666 | false | false | false | false |
getnikola/nikola | tests/test_slugify.py | 1 | 2219 | """Test slugify."""
import pytest
import nikola.utils
@pytest.mark.parametrize(
"title, language, expected_slug",
[
pytest.param("hello", "en", "hello", id="ASCII"),
pytest.param("hello-world", "en", "hello-world", id="ASCII with dashes"),
pytest.param("hello world", "en", "hello-world", id="ASCII two words"),
pytest.param("Hello World", "en", "hello-world", id="ASCII uppercase"),
pytest.param(
"The quick brown fox jumps over the lazy dog!-123.456",
"en",
"the-quick-brown-fox-jumps-over-the-lazy-dog-123456",
id="ASCII with fancy characters",
),
pytest.param(
"zażółćgęśląjaźń",
"pl",
"zazolcgeslajazn",
id="Polish diacritical characters",
),
pytest.param(
"zażółć-gęślą-jaźń",
"pl",
"zazolc-gesla-jazn",
id="Polish diacritical characters and dashes",
),
pytest.param(
"Zażółć gęślą jaźń!-123.456",
"pl",
"zazolc-gesla-jazn-123456",
id="Polish diacritical characters and fancy characters",
),
],
)
def test_slugify(title, language, expected_slug):
o = nikola.utils.slugify(title, lang=language)
assert o == expected_slug
assert isinstance(o, str)
@pytest.mark.parametrize(
"title, expected_slug",
[
pytest.param(
u"Zażółć gęślą jaźń!-123.456", u"Zażółć gęślą jaźń!-123.456", id="polish"
),
pytest.param(
u'Zażółć gęślą jaźń!-123.456 "Hello World"?#H<e>l/l\\o:W\'o\rr*l\td|!\n',
u"Zażółć gęślą jaźń!-123.456 -Hello World---H-e-l-l-o-W-o-r-l-d-!-",
id="polish with banned characters",
),
],
)
def test_disarmed(disarm_slugify, title, expected_slug):
"""Test disarmed slugify."""
o = nikola.utils.slugify(title, lang="pl")
assert o == expected_slug
assert isinstance(o, str)
@pytest.fixture
def disarm_slugify():
nikola.utils.USE_SLUGIFY = False
try:
yield
finally:
nikola.utils.USE_SLUGIFY = True
| mit | 63b6bcb06347c6dd395d9f0670b041fe | 28.534247 | 85 | 0.560297 | 2.833114 | false | true | false | false |
postlund/pyatv | pyatv/protocols/raop/__init__.py | 1 | 20847 | """Support for audio streaming using Remote Audio Output Protocol (RAOP)."""
import asyncio
import io
import logging
import math
from os import path
from typing import Any, Dict, Generator, Mapping, Optional, Set, Tuple, Union, cast
from pyatv import const, exceptions
from pyatv.auth.hap_pairing import AuthenticationType, parse_credentials
from pyatv.const import (
DeviceModel,
FeatureName,
FeatureState,
PairingRequirement,
Protocol,
)
from pyatv.core import (
AbstractPushUpdater,
Core,
MutableService,
ProtocolStateDispatcher,
SetupData,
StateMessage,
UpdatedState,
mdns,
)
from pyatv.core.scan import (
ScanHandlerDeviceInfoName,
ScanHandlerReturn,
device_info_name_from_unique_short_name,
)
from pyatv.helpers import get_unique_id
from pyatv.interface import (
Audio,
BaseConfig,
BaseService,
DeviceInfo,
FeatureInfo,
Features,
Metadata,
PairingHandler,
Playing,
PushUpdater,
RemoteControl,
Stream,
)
from pyatv.protocols.airplay import service_info as airplay_service_info
from pyatv.protocols.airplay.pairing import AirPlayPairingHandler
from pyatv.protocols.airplay.utils import AirPlayFlags, parse_features
from pyatv.protocols.raop.audio_source import AudioSource, open_source
from pyatv.protocols.raop.raop import (
PlaybackInfo,
RaopClient,
RaopContext,
RaopListener,
)
from pyatv.support import map_range
from pyatv.support.collections import dict_merge
from pyatv.support.device_info import lookup_model
from pyatv.support.http import ClientSessionManager, HttpConnection, http_connect
from pyatv.support.metadata import EMPTY_METADATA, AudioMetadata, get_metadata
from pyatv.support.rtsp import RtspSession
_LOGGER = logging.getLogger(__name__)
INITIAL_VOLUME = 33.0 # Percent
DBFS_MIN = -30.0
DBFS_MAX = 0.0
PERCENTAGE_MIN = 0.0
PERCENTAGE_MAX = 100.0
class RaopPushUpdater(AbstractPushUpdater):
"""Implementation of push update support for RAOP."""
def __init__(self, metadata: Metadata, state_dispatcher: ProtocolStateDispatcher):
"""Initialize a new RaopPushUpdater instance."""
super().__init__(state_dispatcher)
self._activated = False
self.metadata = metadata
@property
def active(self) -> bool:
"""Return if push updater has been started."""
return self._activated
def start(self, initial_delay: int = 0) -> None:
"""Begin to listen to updates.
If an error occurs, start must be called again.
"""
if self.listener is None:
raise exceptions.NoAsyncListenerError()
self._activated = True
asyncio.ensure_future(self.state_updated())
def stop(self) -> None:
"""No longer forward updates to listener."""
self._activated = False
async def state_updated(self):
"""State was updated, call listener."""
try:
playing = await self.metadata.playing()
self.post_update(playing)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.debug("Playstatus error occurred: %s", ex)
class RaopPlaybackManager:
"""Manage current play state for RAOP."""
def __init__(self, address: str, port: int) -> None:
"""Initialize a new RaopPlaybackManager instance."""
self.playback_info: Optional[PlaybackInfo] = None
self._is_acquired: bool = False
self._address: str = address
self._port: int = port
self._context: RaopContext = RaopContext()
self._connection: Optional[HttpConnection] = None
self._rtsp: Optional[RtspSession] = None
self._raop: Optional[RaopClient] = None
@property
def context(self) -> RaopContext:
"""Return RTSP context if a session is active."""
return self._context
@property
def raop(self) -> Optional[RaopClient]:
"""Return RAOP client if a session is active."""
return self._raop
def acquire(self) -> None:
"""Acquire playback manager for playback."""
if self._is_acquired:
raise exceptions.InvalidStateError("already streaming to device")
self._is_acquired = True
async def setup(self) -> Tuple[RaopClient, RtspSession, RaopContext]:
"""Set up a session or return active if it exists."""
if self._raop and self._rtsp and self._context:
return self._raop, self._rtsp, self._context
self._connection = await http_connect(self._address, self._port)
self._rtsp = RtspSession(self._connection)
self._raop = RaopClient(self._rtsp, self._context)
return self._raop, self._rtsp, self._context
async def teardown(self) -> None:
"""Tear down and disconnect current session."""
if self._raop:
self._raop.close()
if self._connection:
self._connection = None
self._raop = None
self._context.reset()
self._rtsp = None
self._connection = None
self._is_acquired = False
class RaopMetadata(Metadata):
"""Implementation of metadata interface for RAOP."""
def __init__(self, playback_manager: RaopPlaybackManager) -> None:
"""Initialize a new RaopMetadata instance."""
self._playback_manager = playback_manager
async def playing(self) -> Playing:
"""Return what is currently playing."""
if self._playback_manager.playback_info is None:
return Playing(
device_state=const.DeviceState.Idle, media_type=const.MediaType.Unknown
)
metadata = self._playback_manager.playback_info.metadata
total_time = int(metadata.duration) if metadata.duration else None
return Playing(
device_state=const.DeviceState.Playing,
media_type=const.MediaType.Music,
title=metadata.title,
artist=metadata.artist,
album=metadata.album,
position=int(self._playback_manager.playback_info.position),
total_time=total_time,
)
class RaopFeatures(Features):
"""Implementation of supported feature functionality."""
def __init__(self, playback_manager: RaopPlaybackManager) -> None:
"""Initialize a new RaopFeatures instance."""
self.playback_manager = playback_manager
def get_feature( # pylint: disable=too-many-return-statements
self, feature_name: FeatureName
) -> FeatureInfo:
"""Return current state of a feature."""
if feature_name == FeatureName.StreamFile:
return FeatureInfo(FeatureState.Available)
metadata = EMPTY_METADATA
if self.playback_manager.playback_info:
metadata = self.playback_manager.playback_info.metadata
if feature_name == FeatureName.Title:
return self._availability(metadata.title)
if feature_name == FeatureName.Artist:
return self._availability(metadata.artist)
if feature_name == FeatureName.Album:
return self._availability(metadata.album)
if feature_name in [FeatureName.Position, FeatureName.TotalTime]:
return self._availability(metadata.duration)
# As far as known, volume controls are always supported
if feature_name in [
FeatureName.SetVolume,
FeatureName.Volume,
FeatureName.VolumeDown,
FeatureName.VolumeUp,
]:
return FeatureInfo(FeatureState.Available)
if feature_name in [FeatureName.Stop, FeatureName.Pause]:
is_streaming = self.playback_manager.raop is not None
return FeatureInfo(
FeatureState.Available if is_streaming else FeatureState.Unavailable
)
return FeatureInfo(FeatureState.Unavailable)
@staticmethod
def _availability(value):
return FeatureInfo(
FeatureState.Available if value else FeatureState.Unavailable
)
class RaopAudio(Audio):
"""Implementation of audio functionality."""
def __init__(
self,
playback_manager: RaopPlaybackManager,
state_dispatcher: ProtocolStateDispatcher,
):
"""Initialize a new RaopAudio instance."""
self.playback_manager = playback_manager
self.state_dispatcher = state_dispatcher
self.state_dispatcher.listen_to(UpdatedState.Volume, self._volume_changed)
# Intercept volume changes by other protocols and update accordingly. We blindly
# blindly trust any volume we see here as it's a much better guess than we have.
def _volume_changed(self, message: StateMessage) -> None:
"""State of something changed."""
volume = cast(float, message.value)
_LOGGER.debug("Protocol %s changed volume to %f", message.protocol.name, volume)
self.playback_manager.context.volume = RaopAudio._pct_to_dbfs(volume)
@property
def has_changed_volume(self) -> bool:
"""Return whether volume has changed from default or not."""
return self.playback_manager.context.volume is not None
@property
def volume(self) -> float:
"""Return current volume level."""
vol = self.playback_manager.context.volume
if vol is None:
return INITIAL_VOLUME
# AirPlay uses -144.0 as "muted", but we treat everything below -30.0 as
# muted to be a bit defensive
if vol < DBFS_MIN:
return PERCENTAGE_MIN
# Map dBFS to percentage
return map_range(vol, DBFS_MIN, DBFS_MAX, PERCENTAGE_MIN, PERCENTAGE_MAX)
async def set_volume(self, level: float) -> None:
"""Change current volume level."""
raop = self.playback_manager.raop
dbfs_volume = RaopAudio._pct_to_dbfs(level)
if raop:
await raop.set_volume(dbfs_volume)
else:
self.playback_manager.context.volume = dbfs_volume
self.state_dispatcher.dispatch(UpdatedState.Volume, self.volume)
async def volume_up(self) -> None:
"""Increase volume by one step."""
await self.set_volume(min(self.volume + 5.0, 100.0))
async def volume_down(self) -> None:
"""Decrease volume by one step."""
await self.set_volume(max(self.volume - 5.0, 0.0))
@staticmethod
def _pct_to_dbfs(level: float) -> float:
# AirPlay uses -144.0 as muted volume, so re-map 0.0 to that
if math.isclose(level, 0.0):
return -144.0
# Map percentage to dBFS
return map_range(level, PERCENTAGE_MIN, PERCENTAGE_MAX, DBFS_MIN, DBFS_MAX)
class RaopStream(Stream):
"""Implementation of stream functionality."""
def __init__(
self,
core: Core,
listener: RaopListener,
audio: RaopAudio,
playback_manager: RaopPlaybackManager,
) -> None:
"""Initialize a new RaopStream instance."""
self.core = core
self.listener = listener
self.audio = audio
self.playback_manager = playback_manager
async def stream_file(self, file: Union[str, io.BufferedReader], **kwargs) -> None:
"""Stream local or remote file to device.
Supports either local file paths or a HTTP(s) address.
INCUBATING METHOD - MIGHT CHANGE IN THE FUTURE!
"""
self.playback_manager.acquire()
audio_file: Optional[AudioSource] = None
takeover_release = self.core.takeover(
Audio, Metadata, PushUpdater, RemoteControl
)
try:
client, _, context = await self.playback_manager.setup()
client.credentials = parse_credentials(self.core.service.credentials)
client.password = self.core.service.password
client.listener = self.listener
await client.initialize(self.core.service.properties)
# Try to load metadata and pass it along if it succeeds
metadata: AudioMetadata = EMPTY_METADATA
try:
# Source must support seeking to read metadata (or point to file)
if (isinstance(file, io.BufferedReader) and file.seekable()) or (
isinstance(file, str) and path.exists(file)
):
metadata = await get_metadata(file)
else:
_LOGGER.debug(
"Seeking not supported by source, not loading metadata"
)
except Exception as ex:
_LOGGER.exception("Failed to extract metadata from %s: %s", file, ex)
# After initialize has been called, all the audio properties will be
# initialized and can be used in the miniaudio wrapper
audio_file = await open_source(
file,
context.sample_rate,
context.channels,
context.bytes_per_channel,
)
# If the user didn't change volume level prior to streaming, try to extract
# volume level from device (if supported). Otherwise set the default level
# in pyatv.
if not self.audio.has_changed_volume and "initialVolume" in client.info:
initial_volume = client.info["initialVolume"]
if not isinstance(initial_volume, float):
raise exceptions.ProtocolError(
f"initial volume {initial_volume} has "
"incorrect type {type(initial_volume)}",
)
context.volume = initial_volume
else:
await self.audio.set_volume(self.audio.volume)
await client.send_audio(audio_file, metadata)
finally:
takeover_release()
if audio_file:
await audio_file.close()
await self.playback_manager.teardown()
class RaopRemoteControl(RemoteControl):
"""Implementation of remote control functionality."""
def __init__(self, audio: RaopAudio, playback_manager: RaopPlaybackManager):
"""Initialize a new RaopRemoteControl instance."""
self.audio = audio
self.playback_manager = playback_manager
# At the moment, pause will stop playback until it is properly implemented. This
# gives a better experience in Home Assistant.
async def pause(self) -> None:
"""Press key pause."""
if self.playback_manager.raop:
self.playback_manager.raop.stop()
async def stop(self) -> None:
"""Press key stop."""
if self.playback_manager.raop:
self.playback_manager.raop.stop()
async def volume_up(self) -> None:
"""Press key volume up."""
await self.audio.set_volume(min(self.audio.volume + 5.0, 100.0))
async def volume_down(self) -> None:
"""Press key volume down."""
await self.audio.set_volume(max(self.audio.volume - 5.0, 0.0))
def raop_name_from_service_name(service_name: str) -> str:
"""Convert an raop service name to a name."""
split = service_name.split("@", maxsplit=1)
return split[1] if len(split) == 2 else split[0]
def raop_service_handler(
mdns_service: mdns.Service, response: mdns.Response
) -> Optional[ScanHandlerReturn]:
"""Parse and return a new RAOP service."""
name = raop_name_from_service_name(mdns_service.name)
service = MutableService(
get_unique_id(mdns_service.type, mdns_service.name, mdns_service.properties),
Protocol.RAOP,
mdns_service.port,
mdns_service.properties,
)
return name, service
def scan() -> Mapping[str, ScanHandlerDeviceInfoName]:
"""Return handlers used for scanning."""
return {
"_raop._tcp.local": (raop_service_handler, raop_name_from_service_name),
"_airport._tcp.local": (
lambda service, response: None,
device_info_name_from_unique_short_name,
),
}
def device_info(service_type: str, properties: Mapping[str, Any]) -> Dict[str, Any]:
"""Return device information from zeroconf properties."""
devinfo: Dict[str, Any] = {}
if "am" in properties:
model = lookup_model(properties["am"])
devinfo[DeviceInfo.RAW_MODEL] = properties["am"]
if model != DeviceModel.Unknown:
devinfo[DeviceInfo.MODEL] = model
if "ov" in properties:
devinfo[DeviceInfo.VERSION] = properties["ov"]
# This comes from _airport._tcp.local and belongs to AirPort Expresses
if "wama" in properties:
props: Mapping[str, str] = dict(
cast(Tuple[str, str], prop.split("=", maxsplit=1))
for prop in ("macaddress=" + properties["wama"]).split(",")
)
if DeviceInfo.MAC not in devinfo:
devinfo[DeviceInfo.MAC] = props["macaddress"].replace("-", ":").upper()
if "syVs" in props:
devinfo[DeviceInfo.VERSION] = props["syVs"]
return devinfo
async def service_info(
service: MutableService,
devinfo: DeviceInfo,
services: Mapping[Protocol, BaseService],
) -> None:
"""Update service with additional information."""
airplay_service = services.get(Protocol.AirPlay)
if airplay_service and airplay_service.properties.get("acl", "0") == "1":
# Access control might say that pairing is not possible, e.g. only devices
# belonging to the same home (not supported by pyatv)
service.pairing = PairingRequirement.Disabled
else:
# Same behavior as for AirPlay expected, so re-using that here
await airplay_service_info(service, devinfo, services)
def setup( # pylint: disable=too-many-locals
core: Core,
) -> Generator[SetupData, None, None]:
"""Set up a new RAOP service."""
playback_manager = RaopPlaybackManager(str(core.config.address), core.service.port)
metadata = RaopMetadata(playback_manager)
push_updater = RaopPushUpdater(metadata, core.state_dispatcher)
class RaopStateListener(RaopListener):
"""Listener for RAOP state changes."""
def playing(self, playback_info: PlaybackInfo) -> None:
"""Media started playing with metadata."""
playback_manager.playback_info = playback_info
self._trigger()
def stopped(self) -> None:
"""Media stopped playing."""
playback_manager.playback_info = None
self._trigger()
@staticmethod
def _trigger():
"""Trigger push update."""
if push_updater.active:
asyncio.ensure_future(push_updater.state_updated())
raop_listener = RaopStateListener()
raop_audio = RaopAudio(playback_manager, core.state_dispatcher)
interfaces = {
Stream: RaopStream(core, raop_listener, raop_audio, playback_manager),
Features: RaopFeatures(playback_manager),
PushUpdater: push_updater,
Metadata: metadata,
Audio: raop_audio,
RemoteControl: RaopRemoteControl(raop_audio, playback_manager),
}
async def _connect() -> bool:
return True
def _close() -> Set[asyncio.Task]:
return set()
def _device_info() -> Dict[str, Any]:
devinfo: Dict[str, Any] = {}
for service_type in scan():
properties = core.config.properties.get(service_type)
if properties:
dict_merge(devinfo, device_info(service_type, properties))
return devinfo
yield SetupData(
Protocol.RAOP,
_connect,
_close,
_device_info,
interfaces,
set(
[
FeatureName.StreamFile,
FeatureName.PushUpdates,
FeatureName.Artist,
FeatureName.Album,
FeatureName.Title,
FeatureName.Position,
FeatureName.TotalTime,
FeatureName.SetVolume,
FeatureName.Volume,
FeatureName.VolumeUp,
FeatureName.VolumeDown,
FeatureName.Stop,
FeatureName.Pause,
]
),
)
def pair(
config: BaseConfig,
service: BaseService,
session_manager: ClientSessionManager,
loop: asyncio.AbstractEventLoop,
**kwargs
) -> PairingHandler:
"""Return pairing handler for protocol."""
features = service.properties.get("ft")
if not features:
# TODO: Better handle cases like these (provide API)
raise exceptions.NotSupportedError("pairing not required")
flags = parse_features(features)
if AirPlayFlags.SupportsLegacyPairing not in flags:
raise exceptions.NotSupportedError("legacy pairing not supported")
return AirPlayPairingHandler(
config, service, session_manager, AuthenticationType.Legacy, **kwargs
)
| mit | bfc1cad2dc8bd8f809402065269e1fd7 | 33.861204 | 88 | 0.629491 | 4.130573 | false | false | false | false |
pre-commit/pre-commit | pre_commit/languages/dotnet.py | 1 | 2616 | from __future__ import annotations
import contextlib
import os.path
from typing import Generator
from typing import Sequence
import pre_commit.constants as C
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import PatchesT
from pre_commit.envcontext import Var
from pre_commit.hook import Hook
from pre_commit.languages import helpers
from pre_commit.prefix import Prefix
from pre_commit.util import clean_path_on_failure
ENVIRONMENT_DIR = 'dotnetenv'
BIN_DIR = 'bin'
get_default_version = helpers.basic_get_default_version
health_check = helpers.basic_health_check
def get_env_patch(venv: str) -> PatchesT:
return (
('PATH', (os.path.join(venv, BIN_DIR), os.pathsep, Var('PATH'))),
)
@contextlib.contextmanager
def in_env(prefix: Prefix) -> Generator[None, None, None]:
directory = helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT)
envdir = prefix.path(directory)
with envcontext(get_env_patch(envdir)):
yield
def install_environment(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> None:
helpers.assert_version_default('dotnet', version)
helpers.assert_no_additional_deps('dotnet', additional_dependencies)
envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))
with clean_path_on_failure(envdir):
build_dir = 'pre-commit-build'
# Build & pack nupkg file
helpers.run_setup_cmd(
prefix,
(
'dotnet', 'pack',
'--configuration', 'Release',
'--output', build_dir,
),
)
# Determine tool from the packaged file <tool_name>.<version>.nupkg
build_outputs = os.listdir(os.path.join(prefix.prefix_dir, build_dir))
for output in build_outputs:
tool_name = output.split('.')[0]
# Install to bin dir
helpers.run_setup_cmd(
prefix,
(
'dotnet', 'tool', 'install',
'--tool-path', os.path.join(envdir, BIN_DIR),
'--add-source', build_dir,
tool_name,
),
)
# Clean the git dir, ignoring the environment dir
clean_cmd = ('git', 'clean', '-ffxd', '-e', f'{ENVIRONMENT_DIR}-*')
helpers.run_setup_cmd(prefix, clean_cmd)
def run_hook(
hook: Hook,
file_args: Sequence[str],
color: bool,
) -> tuple[int, bytes]:
with in_env(hook.prefix):
return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
| mit | 36d76866c0812fe12aa1c35dc4f025df | 29.068966 | 78 | 0.61315 | 3.780347 | false | false | false | false |
pre-commit/pre-commit | pre_commit/languages/all.py | 1 | 6488 | from __future__ import annotations
from typing import Callable
from typing import NamedTuple
from typing import Sequence
from pre_commit.hook import Hook
from pre_commit.languages import conda
from pre_commit.languages import coursier
from pre_commit.languages import dart
from pre_commit.languages import docker
from pre_commit.languages import docker_image
from pre_commit.languages import dotnet
from pre_commit.languages import fail
from pre_commit.languages import golang
from pre_commit.languages import lua
from pre_commit.languages import node
from pre_commit.languages import perl
from pre_commit.languages import pygrep
from pre_commit.languages import python
from pre_commit.languages import r
from pre_commit.languages import ruby
from pre_commit.languages import rust
from pre_commit.languages import script
from pre_commit.languages import swift
from pre_commit.languages import system
from pre_commit.prefix import Prefix
class Language(NamedTuple):
name: str
# Use `None` for no installation / environment
ENVIRONMENT_DIR: str | None
# return a value to replace `'default` for `language_version`
get_default_version: Callable[[], str]
# return whether the environment is healthy (or should be rebuilt)
health_check: Callable[[Prefix, str], str | None]
# install a repository for the given language and language_version
install_environment: Callable[[Prefix, str, Sequence[str]], None]
# execute a hook and return the exit code and output
run_hook: Callable[[Hook, Sequence[str], bool], tuple[int, bytes]]
# TODO: back to modules + Protocol: https://github.com/python/mypy/issues/5018
languages = {
# BEGIN GENERATED (testing/gen-languages-all)
'conda': Language(name='conda', ENVIRONMENT_DIR=conda.ENVIRONMENT_DIR, get_default_version=conda.get_default_version, health_check=conda.health_check, install_environment=conda.install_environment, run_hook=conda.run_hook), # noqa: E501
'coursier': Language(name='coursier', ENVIRONMENT_DIR=coursier.ENVIRONMENT_DIR, get_default_version=coursier.get_default_version, health_check=coursier.health_check, install_environment=coursier.install_environment, run_hook=coursier.run_hook), # noqa: E501
'dart': Language(name='dart', ENVIRONMENT_DIR=dart.ENVIRONMENT_DIR, get_default_version=dart.get_default_version, health_check=dart.health_check, install_environment=dart.install_environment, run_hook=dart.run_hook), # noqa: E501
'docker': Language(name='docker', ENVIRONMENT_DIR=docker.ENVIRONMENT_DIR, get_default_version=docker.get_default_version, health_check=docker.health_check, install_environment=docker.install_environment, run_hook=docker.run_hook), # noqa: E501
'docker_image': Language(name='docker_image', ENVIRONMENT_DIR=docker_image.ENVIRONMENT_DIR, get_default_version=docker_image.get_default_version, health_check=docker_image.health_check, install_environment=docker_image.install_environment, run_hook=docker_image.run_hook), # noqa: E501
'dotnet': Language(name='dotnet', ENVIRONMENT_DIR=dotnet.ENVIRONMENT_DIR, get_default_version=dotnet.get_default_version, health_check=dotnet.health_check, install_environment=dotnet.install_environment, run_hook=dotnet.run_hook), # noqa: E501
'fail': Language(name='fail', ENVIRONMENT_DIR=fail.ENVIRONMENT_DIR, get_default_version=fail.get_default_version, health_check=fail.health_check, install_environment=fail.install_environment, run_hook=fail.run_hook), # noqa: E501
'golang': Language(name='golang', ENVIRONMENT_DIR=golang.ENVIRONMENT_DIR, get_default_version=golang.get_default_version, health_check=golang.health_check, install_environment=golang.install_environment, run_hook=golang.run_hook), # noqa: E501
'lua': Language(name='lua', ENVIRONMENT_DIR=lua.ENVIRONMENT_DIR, get_default_version=lua.get_default_version, health_check=lua.health_check, install_environment=lua.install_environment, run_hook=lua.run_hook), # noqa: E501
'node': Language(name='node', ENVIRONMENT_DIR=node.ENVIRONMENT_DIR, get_default_version=node.get_default_version, health_check=node.health_check, install_environment=node.install_environment, run_hook=node.run_hook), # noqa: E501
'perl': Language(name='perl', ENVIRONMENT_DIR=perl.ENVIRONMENT_DIR, get_default_version=perl.get_default_version, health_check=perl.health_check, install_environment=perl.install_environment, run_hook=perl.run_hook), # noqa: E501
'pygrep': Language(name='pygrep', ENVIRONMENT_DIR=pygrep.ENVIRONMENT_DIR, get_default_version=pygrep.get_default_version, health_check=pygrep.health_check, install_environment=pygrep.install_environment, run_hook=pygrep.run_hook), # noqa: E501
'python': Language(name='python', ENVIRONMENT_DIR=python.ENVIRONMENT_DIR, get_default_version=python.get_default_version, health_check=python.health_check, install_environment=python.install_environment, run_hook=python.run_hook), # noqa: E501
'r': Language(name='r', ENVIRONMENT_DIR=r.ENVIRONMENT_DIR, get_default_version=r.get_default_version, health_check=r.health_check, install_environment=r.install_environment, run_hook=r.run_hook), # noqa: E501
'ruby': Language(name='ruby', ENVIRONMENT_DIR=ruby.ENVIRONMENT_DIR, get_default_version=ruby.get_default_version, health_check=ruby.health_check, install_environment=ruby.install_environment, run_hook=ruby.run_hook), # noqa: E501
'rust': Language(name='rust', ENVIRONMENT_DIR=rust.ENVIRONMENT_DIR, get_default_version=rust.get_default_version, health_check=rust.health_check, install_environment=rust.install_environment, run_hook=rust.run_hook), # noqa: E501
'script': Language(name='script', ENVIRONMENT_DIR=script.ENVIRONMENT_DIR, get_default_version=script.get_default_version, health_check=script.health_check, install_environment=script.install_environment, run_hook=script.run_hook), # noqa: E501
'swift': Language(name='swift', ENVIRONMENT_DIR=swift.ENVIRONMENT_DIR, get_default_version=swift.get_default_version, health_check=swift.health_check, install_environment=swift.install_environment, run_hook=swift.run_hook), # noqa: E501
'system': Language(name='system', ENVIRONMENT_DIR=system.ENVIRONMENT_DIR, get_default_version=system.get_default_version, health_check=system.health_check, install_environment=system.install_environment, run_hook=system.run_hook), # noqa: E501
# END GENERATED
}
# TODO: fully deprecate `python_venv`
languages['python_venv'] = languages['python']
all_languages = sorted(languages)
| mit | 7604d7832ba80a6fe130fb6e02321649 | 91.685714 | 290 | 0.779131 | 3.303462 | false | false | false | false |
pre-commit/pre-commit | pre_commit/envcontext.py | 1 | 1612 | from __future__ import annotations
import contextlib
import enum
import os
from typing import Generator
from typing import MutableMapping
from typing import NamedTuple
from typing import Tuple
from typing import Union
_Unset = enum.Enum('_Unset', 'UNSET')
UNSET = _Unset.UNSET
class Var(NamedTuple):
name: str
default: str = ''
SubstitutionT = Tuple[Union[str, Var], ...]
ValueT = Union[str, _Unset, SubstitutionT]
PatchesT = Tuple[Tuple[str, ValueT], ...]
def format_env(parts: SubstitutionT, env: MutableMapping[str, str]) -> str:
return ''.join(
env.get(part.name, part.default) if isinstance(part, Var) else part
for part in parts
)
@contextlib.contextmanager
def envcontext(
patch: PatchesT,
_env: MutableMapping[str, str] | None = None,
) -> Generator[None, None, None]:
"""In this context, `os.environ` is modified according to `patch`.
`patch` is an iterable of 2-tuples (key, value):
`key`: string
`value`:
- string: `environ[key] == value` inside the context.
- UNSET: `key not in environ` inside the context.
- template: A template is a tuple of strings and Var which will be
replaced with the previous environment
"""
env = os.environ if _env is None else _env
before = dict(env)
for k, v in patch:
if v is UNSET:
env.pop(k, None)
elif isinstance(v, tuple):
env[k] = format_env(v, before)
else:
env[k] = v
try:
yield
finally:
env.clear()
env.update(before)
| mit | 587b1bd18c0e49900c9820d9b27157ef | 24.587302 | 78 | 0.617866 | 3.740139 | false | false | false | false |
tiangolo/fastapi | tests/test_tutorial/test_events/test_tutorial001.py | 1 | 2763 | from fastapi.testclient import TestClient
from docs_src.events.tutorial001 import app
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/{item_id}": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items",
"operationId": "read_items_items__item_id__get",
"parameters": [
{
"required": True,
"schema": {"title": "Item Id", "type": "string"},
"name": "item_id",
"in": "path",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"anyOf": [{"type": "string"}, {"type": "integer"}]},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_events():
with TestClient(app) as client:
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
response = client.get("/items/foo")
assert response.status_code == 200, response.text
assert response.json() == {"name": "Fighters"}
| mit | edfeac9bd118b5662e1357c01996bd70 | 33.974684 | 86 | 0.354687 | 5.079044 | false | true | false | false |
tiangolo/fastapi | tests/test_exception_handlers.py | 1 | 1918 | import pytest
from fastapi import FastAPI, HTTPException
from fastapi.exceptions import RequestValidationError
from fastapi.testclient import TestClient
from starlette.responses import JSONResponse
def http_exception_handler(request, exception):
return JSONResponse({"exception": "http-exception"})
def request_validation_exception_handler(request, exception):
return JSONResponse({"exception": "request-validation"})
def server_error_exception_handler(request, exception):
return JSONResponse(status_code=500, content={"exception": "server-error"})
app = FastAPI(
exception_handlers={
HTTPException: http_exception_handler,
RequestValidationError: request_validation_exception_handler,
Exception: server_error_exception_handler,
}
)
client = TestClient(app)
@app.get("/http-exception")
def route_with_http_exception():
raise HTTPException(status_code=400)
@app.get("/request-validation/{param}/")
def route_with_request_validation_exception(param: int):
pass # pragma: no cover
@app.get("/server-error")
def route_with_server_error():
raise RuntimeError("Oops!")
def test_override_http_exception():
response = client.get("/http-exception")
assert response.status_code == 200
assert response.json() == {"exception": "http-exception"}
def test_override_request_validation_exception():
response = client.get("/request-validation/invalid")
assert response.status_code == 200
assert response.json() == {"exception": "request-validation"}
def test_override_server_error_exception_raises():
with pytest.raises(RuntimeError):
client.get("/server-error")
def test_override_server_error_exception_response():
client = TestClient(app, raise_server_exceptions=False)
response = client.get("/server-error")
assert response.status_code == 500
assert response.json() == {"exception": "server-error"}
| mit | d644596c5022059943847dc83c84a6c5 | 27.626866 | 79 | 0.729406 | 4.037895 | false | true | false | false |
tiangolo/fastapi | tests/test_starlette_urlconvertors.py | 1 | 1371 | from fastapi import FastAPI, Path
from fastapi.testclient import TestClient
app = FastAPI()
@app.get("/int/{param:int}")
def int_convertor(param: int = Path()):
return {"int": param}
@app.get("/float/{param:float}")
def float_convertor(param: float = Path()):
return {"float": param}
@app.get("/path/{param:path}")
def path_convertor(param: str = Path()):
return {"path": param}
client = TestClient(app)
def test_route_converters_int():
# Test integer conversion
response = client.get("/int/5")
assert response.status_code == 200, response.text
assert response.json() == {"int": 5}
assert app.url_path_for("int_convertor", param=5) == "/int/5" # type: ignore
def test_route_converters_float():
# Test float conversion
response = client.get("/float/25.5")
assert response.status_code == 200, response.text
assert response.json() == {"float": 25.5}
assert app.url_path_for("float_convertor", param=25.5) == "/float/25.5" # type: ignore
def test_route_converters_path():
# Test path conversion
response = client.get("/path/some/example")
assert response.status_code == 200, response.text
assert response.json() == {"path": "some/example"}
def test_url_path_for_path_convertor():
assert (
app.url_path_for("path_convertor", param="some/example") == "/path/some/example"
)
| mit | 4189c767b3b3756144a13e925e1d670e | 25.882353 | 91 | 0.654267 | 3.32767 | false | true | false | false |
tiangolo/fastapi | tests/test_tuples.py | 1 | 9118 | from typing import List, Tuple
from fastapi import FastAPI, Form
from fastapi.testclient import TestClient
from pydantic import BaseModel
app = FastAPI()
class ItemGroup(BaseModel):
items: List[Tuple[str, str]]
class Coordinate(BaseModel):
x: float
y: float
@app.post("/model-with-tuple/")
def post_model_with_tuple(item_group: ItemGroup):
return item_group
@app.post("/tuple-of-models/")
def post_tuple_of_models(square: Tuple[Coordinate, Coordinate]):
return square
@app.post("/tuple-form/")
def hello(values: Tuple[int, int] = Form()):
return values
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/model-with-tuple/": {
"post": {
"summary": "Post Model With Tuple",
"operationId": "post_model_with_tuple_model_with_tuple__post",
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/ItemGroup"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/tuple-of-models/": {
"post": {
"summary": "Post Tuple Of Models",
"operationId": "post_tuple_of_models_tuple_of_models__post",
"requestBody": {
"content": {
"application/json": {
"schema": {
"title": "Square",
"maxItems": 2,
"minItems": 2,
"type": "array",
"items": [
{"$ref": "#/components/schemas/Coordinate"},
{"$ref": "#/components/schemas/Coordinate"},
],
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/tuple-form/": {
"post": {
"summary": "Hello",
"operationId": "hello_tuple_form__post",
"requestBody": {
"content": {
"application/x-www-form-urlencoded": {
"schema": {
"$ref": "#/components/schemas/Body_hello_tuple_form__post"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_hello_tuple_form__post": {
"title": "Body_hello_tuple_form__post",
"required": ["values"],
"type": "object",
"properties": {
"values": {
"title": "Values",
"maxItems": 2,
"minItems": 2,
"type": "array",
"items": [{"type": "integer"}, {"type": "integer"}],
}
},
},
"Coordinate": {
"title": "Coordinate",
"required": ["x", "y"],
"type": "object",
"properties": {
"x": {"title": "X", "type": "number"},
"y": {"title": "Y", "type": "number"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"ItemGroup": {
"title": "ItemGroup",
"required": ["items"],
"type": "object",
"properties": {
"items": {
"title": "Items",
"type": "array",
"items": {
"maxItems": 2,
"minItems": 2,
"type": "array",
"items": [{"type": "string"}, {"type": "string"}],
},
}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"anyOf": [{"type": "string"}, {"type": "integer"}]},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_model_with_tuple_valid():
data = {"items": [["foo", "bar"], ["baz", "whatelse"]]}
response = client.post("/model-with-tuple/", json=data)
assert response.status_code == 200, response.text
assert response.json() == data
def test_model_with_tuple_invalid():
data = {"items": [["foo", "bar"], ["baz", "whatelse", "too", "much"]]}
response = client.post("/model-with-tuple/", json=data)
assert response.status_code == 422, response.text
data = {"items": [["foo", "bar"], ["baz"]]}
response = client.post("/model-with-tuple/", json=data)
assert response.status_code == 422, response.text
def test_tuple_with_model_valid():
data = [{"x": 1, "y": 2}, {"x": 3, "y": 4}]
response = client.post("/tuple-of-models/", json=data)
assert response.status_code == 200, response.text
assert response.json() == data
def test_tuple_with_model_invalid():
data = [{"x": 1, "y": 2}, {"x": 3, "y": 4}, {"x": 5, "y": 6}]
response = client.post("/tuple-of-models/", json=data)
assert response.status_code == 422, response.text
data = [{"x": 1, "y": 2}]
response = client.post("/tuple-of-models/", json=data)
assert response.status_code == 422, response.text
def test_tuple_form_valid():
response = client.post("/tuple-form/", data={"values": ("1", "2")})
assert response.status_code == 200, response.text
assert response.json() == [1, 2]
def test_tuple_form_invalid():
response = client.post("/tuple-form/", data={"values": ("1", "2", "3")})
assert response.status_code == 422, response.text
response = client.post("/tuple-form/", data={"values": ("1")})
assert response.status_code == 422, response.text
| mit | fa1dac293853dc03a5d15029e222dceb | 33.407547 | 90 | 0.375082 | 4.824339 | false | false | false | false |
tiangolo/fastapi | tests/test_tutorial/test_bigger_applications/test_main.py | 1 | 17262 | import pytest
from fastapi.testclient import TestClient
from docs_src.bigger_applications.app.main import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/users/": {
"get": {
"tags": ["users"],
"summary": "Read Users",
"operationId": "read_users_users__get",
"parameters": [
{
"required": True,
"schema": {"title": "Token", "type": "string"},
"name": "token",
"in": "query",
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/users/me": {
"get": {
"tags": ["users"],
"summary": "Read User Me",
"operationId": "read_user_me_users_me_get",
"parameters": [
{
"required": True,
"schema": {"title": "Token", "type": "string"},
"name": "token",
"in": "query",
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/users/{username}": {
"get": {
"tags": ["users"],
"summary": "Read User",
"operationId": "read_user_users__username__get",
"parameters": [
{
"required": True,
"schema": {"title": "Username", "type": "string"},
"name": "username",
"in": "path",
},
{
"required": True,
"schema": {"title": "Token", "type": "string"},
"name": "token",
"in": "query",
},
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/items/": {
"get": {
"tags": ["items"],
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": True,
"schema": {"title": "Token", "type": "string"},
"name": "token",
"in": "query",
},
{
"required": True,
"schema": {"title": "X-Token", "type": "string"},
"name": "x-token",
"in": "header",
},
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"404": {"description": "Not found"},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/items/{item_id}": {
"get": {
"tags": ["items"],
"summary": "Read Item",
"operationId": "read_item_items__item_id__get",
"parameters": [
{
"required": True,
"schema": {"title": "Item Id", "type": "string"},
"name": "item_id",
"in": "path",
},
{
"required": True,
"schema": {"title": "Token", "type": "string"},
"name": "token",
"in": "query",
},
{
"required": True,
"schema": {"title": "X-Token", "type": "string"},
"name": "x-token",
"in": "header",
},
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"404": {"description": "Not found"},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
},
"put": {
"tags": ["items", "custom"],
"summary": "Update Item",
"operationId": "update_item_items__item_id__put",
"parameters": [
{
"required": True,
"schema": {"title": "Item Id", "type": "string"},
"name": "item_id",
"in": "path",
},
{
"required": True,
"schema": {"title": "Token", "type": "string"},
"name": "token",
"in": "query",
},
{
"required": True,
"schema": {"title": "X-Token", "type": "string"},
"name": "x-token",
"in": "header",
},
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"404": {"description": "Not found"},
"403": {"description": "Operation forbidden"},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
},
},
"/admin/": {
"post": {
"tags": ["admin"],
"summary": "Update Admin",
"operationId": "update_admin_admin__post",
"parameters": [
{
"required": True,
"schema": {"title": "Token", "type": "string"},
"name": "token",
"in": "query",
},
{
"required": True,
"schema": {"title": "X-Token", "type": "string"},
"name": "x-token",
"in": "header",
},
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"418": {"description": "I'm a teapot"},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/": {
"get": {
"summary": "Root",
"operationId": "root__get",
"parameters": [
{
"required": True,
"schema": {"title": "Token", "type": "string"},
"name": "token",
"in": "query",
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"anyOf": [{"type": "string"}, {"type": "integer"}]},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
no_jessica = {
"detail": [
{
"loc": ["query", "token"],
"msg": "field required",
"type": "value_error.missing",
},
]
}
@pytest.mark.parametrize(
"path,expected_status,expected_response,headers",
[
(
"/users?token=jessica",
200,
[{"username": "Rick"}, {"username": "Morty"}],
{},
),
("/users", 422, no_jessica, {}),
("/users/foo?token=jessica", 200, {"username": "foo"}, {}),
("/users/foo", 422, no_jessica, {}),
("/users/me?token=jessica", 200, {"username": "fakecurrentuser"}, {}),
("/users/me", 422, no_jessica, {}),
(
"/users?token=monica",
400,
{"detail": "No Jessica token provided"},
{},
),
(
"/items?token=jessica",
200,
{"plumbus": {"name": "Plumbus"}, "gun": {"name": "Portal Gun"}},
{"X-Token": "fake-super-secret-token"},
),
("/items", 422, no_jessica, {"X-Token": "fake-super-secret-token"}),
(
"/items/plumbus?token=jessica",
200,
{"name": "Plumbus", "item_id": "plumbus"},
{"X-Token": "fake-super-secret-token"},
),
(
"/items/bar?token=jessica",
404,
{"detail": "Item not found"},
{"X-Token": "fake-super-secret-token"},
),
("/items/plumbus", 422, no_jessica, {"X-Token": "fake-super-secret-token"}),
(
"/items?token=jessica",
400,
{"detail": "X-Token header invalid"},
{"X-Token": "invalid"},
),
(
"/items/bar?token=jessica",
400,
{"detail": "X-Token header invalid"},
{"X-Token": "invalid"},
),
(
"/items?token=jessica",
422,
{
"detail": [
{
"loc": ["header", "x-token"],
"msg": "field required",
"type": "value_error.missing",
}
]
},
{},
),
(
"/items/plumbus?token=jessica",
422,
{
"detail": [
{
"loc": ["header", "x-token"],
"msg": "field required",
"type": "value_error.missing",
}
]
},
{},
),
("/?token=jessica", 200, {"message": "Hello Bigger Applications!"}, {}),
("/", 422, no_jessica, {}),
("/openapi.json", 200, openapi_schema, {}),
],
)
def test_get_path(path, expected_status, expected_response, headers):
response = client.get(path, headers=headers)
assert response.status_code == expected_status
assert response.json() == expected_response
def test_put_no_header():
response = client.put("/items/foo")
assert response.status_code == 422, response.text
assert response.json() == {
"detail": [
{
"loc": ["query", "token"],
"msg": "field required",
"type": "value_error.missing",
},
{
"loc": ["header", "x-token"],
"msg": "field required",
"type": "value_error.missing",
},
]
}
def test_put_invalid_header():
response = client.put("/items/foo", headers={"X-Token": "invalid"})
assert response.status_code == 400, response.text
assert response.json() == {"detail": "X-Token header invalid"}
def test_put():
response = client.put(
"/items/plumbus?token=jessica", headers={"X-Token": "fake-super-secret-token"}
)
assert response.status_code == 200, response.text
assert response.json() == {"item_id": "plumbus", "name": "The great Plumbus"}
def test_put_forbidden():
response = client.put(
"/items/bar?token=jessica", headers={"X-Token": "fake-super-secret-token"}
)
assert response.status_code == 403, response.text
assert response.json() == {"detail": "You can only update the item: plumbus"}
def test_admin():
response = client.post(
"/admin/?token=jessica", headers={"X-Token": "fake-super-secret-token"}
)
assert response.status_code == 200, response.text
assert response.json() == {"message": "Admin getting schwifty"}
def test_admin_invalid_header():
response = client.post("/admin/", headers={"X-Token": "invalid"})
assert response.status_code == 400, response.text
assert response.json() == {"detail": "X-Token header invalid"}
| mit | 0f2f05d5d38f80e68cbebcaa15dc0a29 | 34.156823 | 86 | 0.322964 | 5.190018 | false | false | false | false |
tiangolo/fastapi | tests/test_additional_responses_custom_model_in_callback.py | 1 | 4810 | from fastapi import APIRouter, FastAPI
from fastapi.testclient import TestClient
from pydantic import BaseModel, HttpUrl
from starlette.responses import JSONResponse
class CustomModel(BaseModel):
a: int
app = FastAPI()
callback_router = APIRouter(default_response_class=JSONResponse)
@callback_router.get(
"{$callback_url}/callback/", responses={400: {"model": CustomModel}}
)
def callback_route():
pass # pragma: no cover
@app.post("/", callbacks=callback_router.routes)
def main_route(callback_url: HttpUrl):
pass # pragma: no cover
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Main Route",
"operationId": "main_route__post",
"parameters": [
{
"required": True,
"schema": {
"title": "Callback Url",
"maxLength": 2083,
"minLength": 1,
"type": "string",
"format": "uri",
},
"name": "callback_url",
"in": "query",
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"callbacks": {
"callback_route": {
"{$callback_url}/callback/": {
"get": {
"summary": "Callback Route",
"operationId": "callback_route__callback_url__callback__get",
"responses": {
"400": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CustomModel"
}
}
},
"description": "Bad Request",
},
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
},
}
}
}
},
}
}
},
"components": {
"schemas": {
"CustomModel": {
"title": "CustomModel",
"required": ["a"],
"type": "object",
"properties": {"a": {"title": "A", "type": "integer"}},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"anyOf": [{"type": "string"}, {"type": "integer"}]},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
client = TestClient(app)
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
| mit | 129ddd849b8e9bbc0f681cd8e7f1b882 | 33.855072 | 94 | 0.330561 | 5.632319 | false | false | false | false |
tiangolo/fastapi | tests/test_security_api_key_header_optional.py | 1 | 2023 | from typing import Optional
from fastapi import Depends, FastAPI, Security
from fastapi.security import APIKeyHeader
from fastapi.testclient import TestClient
from pydantic import BaseModel
app = FastAPI()
api_key = APIKeyHeader(name="key", auto_error=False)
class User(BaseModel):
username: str
def get_current_user(oauth_header: Optional[str] = Security(api_key)):
if oauth_header is None:
return None
user = User(username=oauth_header)
return user
@app.get("/users/me")
def read_current_user(current_user: Optional[User] = Depends(get_current_user)):
if current_user is None:
return {"msg": "Create an account first"}
return current_user
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Current User",
"operationId": "read_current_user_users_me_get",
"security": [{"APIKeyHeader": []}],
}
}
},
"components": {
"securitySchemes": {
"APIKeyHeader": {"type": "apiKey", "name": "key", "in": "header"}
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_security_api_key():
response = client.get("/users/me", headers={"key": "secret"})
assert response.status_code == 200, response.text
assert response.json() == {"username": "secret"}
def test_security_api_key_no_key():
response = client.get("/users/me")
assert response.status_code == 200, response.text
assert response.json() == {"msg": "Create an account first"}
| mit | cdeabbdb03e8c29438453d43fffa0d57 | 26.337838 | 80 | 0.579832 | 3.746296 | false | true | false | false |
tiangolo/fastapi | fastapi/dependencies/utils.py | 1 | 27462 | import dataclasses
import inspect
from contextlib import contextmanager
from copy import deepcopy
from typing import (
Any,
Callable,
Coroutine,
Dict,
ForwardRef,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import anyio
from fastapi import params
from fastapi.concurrency import (
AsyncExitStack,
asynccontextmanager,
contextmanager_in_threadpool,
)
from fastapi.dependencies.models import Dependant, SecurityRequirement
from fastapi.logger import logger
from fastapi.security.base import SecurityBase
from fastapi.security.oauth2 import OAuth2, SecurityScopes
from fastapi.security.open_id_connect_url import OpenIdConnect
from fastapi.utils import create_response_field, get_path_param_names
from pydantic import BaseModel, create_model
from pydantic.error_wrappers import ErrorWrapper
from pydantic.errors import MissingError
from pydantic.fields import (
SHAPE_FROZENSET,
SHAPE_LIST,
SHAPE_SEQUENCE,
SHAPE_SET,
SHAPE_SINGLETON,
SHAPE_TUPLE,
SHAPE_TUPLE_ELLIPSIS,
FieldInfo,
ModelField,
Required,
Undefined,
)
from pydantic.schema import get_annotation_from_field_info
from pydantic.typing import evaluate_forwardref
from pydantic.utils import lenient_issubclass
from starlette.background import BackgroundTasks
from starlette.concurrency import run_in_threadpool
from starlette.datastructures import FormData, Headers, QueryParams, UploadFile
from starlette.requests import HTTPConnection, Request
from starlette.responses import Response
from starlette.websockets import WebSocket
sequence_shapes = {
SHAPE_LIST,
SHAPE_SET,
SHAPE_FROZENSET,
SHAPE_TUPLE,
SHAPE_SEQUENCE,
SHAPE_TUPLE_ELLIPSIS,
}
sequence_types = (list, set, tuple)
sequence_shape_to_type = {
SHAPE_LIST: list,
SHAPE_SET: set,
SHAPE_TUPLE: tuple,
SHAPE_SEQUENCE: list,
SHAPE_TUPLE_ELLIPSIS: list,
}
multipart_not_installed_error = (
'Form data requires "python-multipart" to be installed. \n'
'You can install "python-multipart" with: \n\n'
"pip install python-multipart\n"
)
multipart_incorrect_install_error = (
'Form data requires "python-multipart" to be installed. '
'It seems you installed "multipart" instead. \n'
'You can remove "multipart" with: \n\n'
"pip uninstall multipart\n\n"
'And then install "python-multipart" with: \n\n'
"pip install python-multipart\n"
)
def check_file_field(field: ModelField) -> None:
field_info = field.field_info
if isinstance(field_info, params.Form):
try:
# __version__ is available in both multiparts, and can be mocked
from multipart import __version__ # type: ignore
assert __version__
try:
# parse_options_header is only available in the right multipart
from multipart.multipart import parse_options_header # type: ignore
assert parse_options_header
except ImportError:
logger.error(multipart_incorrect_install_error)
raise RuntimeError(multipart_incorrect_install_error)
except ImportError:
logger.error(multipart_not_installed_error)
raise RuntimeError(multipart_not_installed_error)
def get_param_sub_dependant(
*, param: inspect.Parameter, path: str, security_scopes: Optional[List[str]] = None
) -> Dependant:
depends: params.Depends = param.default
if depends.dependency:
dependency = depends.dependency
else:
dependency = param.annotation
return get_sub_dependant(
depends=depends,
dependency=dependency,
path=path,
name=param.name,
security_scopes=security_scopes,
)
def get_parameterless_sub_dependant(*, depends: params.Depends, path: str) -> Dependant:
assert callable(
depends.dependency
), "A parameter-less dependency must have a callable dependency"
return get_sub_dependant(depends=depends, dependency=depends.dependency, path=path)
def get_sub_dependant(
*,
depends: params.Depends,
dependency: Callable[..., Any],
path: str,
name: Optional[str] = None,
security_scopes: Optional[List[str]] = None,
) -> Dependant:
security_requirement = None
security_scopes = security_scopes or []
if isinstance(depends, params.Security):
dependency_scopes = depends.scopes
security_scopes.extend(dependency_scopes)
if isinstance(dependency, SecurityBase):
use_scopes: List[str] = []
if isinstance(dependency, (OAuth2, OpenIdConnect)):
use_scopes = security_scopes
security_requirement = SecurityRequirement(
security_scheme=dependency, scopes=use_scopes
)
sub_dependant = get_dependant(
path=path,
call=dependency,
name=name,
security_scopes=security_scopes,
use_cache=depends.use_cache,
)
if security_requirement:
sub_dependant.security_requirements.append(security_requirement)
return sub_dependant
CacheKey = Tuple[Optional[Callable[..., Any]], Tuple[str, ...]]
def get_flat_dependant(
dependant: Dependant,
*,
skip_repeats: bool = False,
visited: Optional[List[CacheKey]] = None,
) -> Dependant:
if visited is None:
visited = []
visited.append(dependant.cache_key)
flat_dependant = Dependant(
path_params=dependant.path_params.copy(),
query_params=dependant.query_params.copy(),
header_params=dependant.header_params.copy(),
cookie_params=dependant.cookie_params.copy(),
body_params=dependant.body_params.copy(),
security_schemes=dependant.security_requirements.copy(),
use_cache=dependant.use_cache,
path=dependant.path,
)
for sub_dependant in dependant.dependencies:
if skip_repeats and sub_dependant.cache_key in visited:
continue
flat_sub = get_flat_dependant(
sub_dependant, skip_repeats=skip_repeats, visited=visited
)
flat_dependant.path_params.extend(flat_sub.path_params)
flat_dependant.query_params.extend(flat_sub.query_params)
flat_dependant.header_params.extend(flat_sub.header_params)
flat_dependant.cookie_params.extend(flat_sub.cookie_params)
flat_dependant.body_params.extend(flat_sub.body_params)
flat_dependant.security_requirements.extend(flat_sub.security_requirements)
return flat_dependant
def get_flat_params(dependant: Dependant) -> List[ModelField]:
flat_dependant = get_flat_dependant(dependant, skip_repeats=True)
return (
flat_dependant.path_params
+ flat_dependant.query_params
+ flat_dependant.header_params
+ flat_dependant.cookie_params
)
def is_scalar_field(field: ModelField) -> bool:
field_info = field.field_info
if not (
field.shape == SHAPE_SINGLETON
and not lenient_issubclass(field.type_, BaseModel)
and not lenient_issubclass(field.type_, sequence_types + (dict,))
and not dataclasses.is_dataclass(field.type_)
and not isinstance(field_info, params.Body)
):
return False
if field.sub_fields:
if not all(is_scalar_field(f) for f in field.sub_fields):
return False
return True
def is_scalar_sequence_field(field: ModelField) -> bool:
if (field.shape in sequence_shapes) and not lenient_issubclass(
field.type_, BaseModel
):
if field.sub_fields is not None:
for sub_field in field.sub_fields:
if not is_scalar_field(sub_field):
return False
return True
if lenient_issubclass(field.type_, sequence_types):
return True
return False
def get_typed_signature(call: Callable[..., Any]) -> inspect.Signature:
signature = inspect.signature(call)
globalns = getattr(call, "__globals__", {})
typed_params = [
inspect.Parameter(
name=param.name,
kind=param.kind,
default=param.default,
annotation=get_typed_annotation(param, globalns),
)
for param in signature.parameters.values()
]
typed_signature = inspect.Signature(typed_params)
return typed_signature
def get_typed_annotation(param: inspect.Parameter, globalns: Dict[str, Any]) -> Any:
annotation = param.annotation
if isinstance(annotation, str):
annotation = ForwardRef(annotation)
annotation = evaluate_forwardref(annotation, globalns, globalns)
return annotation
def get_dependant(
*,
path: str,
call: Callable[..., Any],
name: Optional[str] = None,
security_scopes: Optional[List[str]] = None,
use_cache: bool = True,
) -> Dependant:
path_param_names = get_path_param_names(path)
endpoint_signature = get_typed_signature(call)
signature_params = endpoint_signature.parameters
dependant = Dependant(
call=call,
name=name,
path=path,
security_scopes=security_scopes,
use_cache=use_cache,
)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
sub_dependant = get_param_sub_dependant(
param=param, path=path, security_scopes=security_scopes
)
dependant.dependencies.append(sub_dependant)
continue
if add_non_field_param_to_dependency(param=param, dependant=dependant):
continue
param_field = get_param_field(
param=param, default_field_info=params.Query, param_name=param_name
)
if param_name in path_param_names:
assert is_scalar_field(
field=param_field
), "Path params must be of one of the supported types"
ignore_default = not isinstance(param.default, params.Path)
param_field = get_param_field(
param=param,
param_name=param_name,
default_field_info=params.Path,
force_type=params.ParamTypes.path,
ignore_default=ignore_default,
)
add_param_to_fields(field=param_field, dependant=dependant)
elif is_scalar_field(field=param_field):
add_param_to_fields(field=param_field, dependant=dependant)
elif isinstance(
param.default, (params.Query, params.Header)
) and is_scalar_sequence_field(param_field):
add_param_to_fields(field=param_field, dependant=dependant)
else:
field_info = param_field.field_info
assert isinstance(
field_info, params.Body
), f"Param: {param_field.name} can only be a request body, using Body()"
dependant.body_params.append(param_field)
return dependant
def add_non_field_param_to_dependency(
*, param: inspect.Parameter, dependant: Dependant
) -> Optional[bool]:
if lenient_issubclass(param.annotation, Request):
dependant.request_param_name = param.name
return True
elif lenient_issubclass(param.annotation, WebSocket):
dependant.websocket_param_name = param.name
return True
elif lenient_issubclass(param.annotation, HTTPConnection):
dependant.http_connection_param_name = param.name
return True
elif lenient_issubclass(param.annotation, Response):
dependant.response_param_name = param.name
return True
elif lenient_issubclass(param.annotation, BackgroundTasks):
dependant.background_tasks_param_name = param.name
return True
elif lenient_issubclass(param.annotation, SecurityScopes):
dependant.security_scopes_param_name = param.name
return True
return None
def get_param_field(
*,
param: inspect.Parameter,
param_name: str,
default_field_info: Type[params.Param] = params.Param,
force_type: Optional[params.ParamTypes] = None,
ignore_default: bool = False,
) -> ModelField:
default_value: Any = Undefined
had_schema = False
if not param.default == param.empty and ignore_default is False:
default_value = param.default
if isinstance(default_value, FieldInfo):
had_schema = True
field_info = default_value
default_value = field_info.default
if (
isinstance(field_info, params.Param)
and getattr(field_info, "in_", None) is None
):
field_info.in_ = default_field_info.in_
if force_type:
field_info.in_ = force_type # type: ignore
else:
field_info = default_field_info(default=default_value)
required = True
if default_value is Required or ignore_default:
required = True
default_value = None
elif default_value is not Undefined:
required = False
annotation: Any = Any
if not param.annotation == param.empty:
annotation = param.annotation
annotation = get_annotation_from_field_info(annotation, field_info, param_name)
if not field_info.alias and getattr(field_info, "convert_underscores", None):
alias = param.name.replace("_", "-")
else:
alias = field_info.alias or param.name
field = create_response_field(
name=param.name,
type_=annotation,
default=default_value,
alias=alias,
required=required,
field_info=field_info,
)
if not had_schema and not is_scalar_field(field=field):
field.field_info = params.Body(field_info.default)
if not had_schema and lenient_issubclass(field.type_, UploadFile):
field.field_info = params.File(field_info.default)
return field
def add_param_to_fields(*, field: ModelField, dependant: Dependant) -> None:
field_info = cast(params.Param, field.field_info)
if field_info.in_ == params.ParamTypes.path:
dependant.path_params.append(field)
elif field_info.in_ == params.ParamTypes.query:
dependant.query_params.append(field)
elif field_info.in_ == params.ParamTypes.header:
dependant.header_params.append(field)
else:
assert (
field_info.in_ == params.ParamTypes.cookie
), f"non-body parameters must be in path, query, header or cookie: {field.name}"
dependant.cookie_params.append(field)
def is_coroutine_callable(call: Callable[..., Any]) -> bool:
if inspect.isroutine(call):
return inspect.iscoroutinefunction(call)
if inspect.isclass(call):
return False
dunder_call = getattr(call, "__call__", None) # noqa: B004
return inspect.iscoroutinefunction(dunder_call)
def is_async_gen_callable(call: Callable[..., Any]) -> bool:
if inspect.isasyncgenfunction(call):
return True
dunder_call = getattr(call, "__call__", None) # noqa: B004
return inspect.isasyncgenfunction(dunder_call)
def is_gen_callable(call: Callable[..., Any]) -> bool:
if inspect.isgeneratorfunction(call):
return True
dunder_call = getattr(call, "__call__", None) # noqa: B004
return inspect.isgeneratorfunction(dunder_call)
async def solve_generator(
*, call: Callable[..., Any], stack: AsyncExitStack, sub_values: Dict[str, Any]
) -> Any:
if is_gen_callable(call):
cm = contextmanager_in_threadpool(contextmanager(call)(**sub_values))
elif is_async_gen_callable(call):
cm = asynccontextmanager(call)(**sub_values)
return await stack.enter_async_context(cm)
async def solve_dependencies(
*,
request: Union[Request, WebSocket],
dependant: Dependant,
body: Optional[Union[Dict[str, Any], FormData]] = None,
background_tasks: Optional[BackgroundTasks] = None,
response: Optional[Response] = None,
dependency_overrides_provider: Optional[Any] = None,
dependency_cache: Optional[Dict[Tuple[Callable[..., Any], Tuple[str]], Any]] = None,
) -> Tuple[
Dict[str, Any],
List[ErrorWrapper],
Optional[BackgroundTasks],
Response,
Dict[Tuple[Callable[..., Any], Tuple[str]], Any],
]:
values: Dict[str, Any] = {}
errors: List[ErrorWrapper] = []
if response is None:
response = Response()
del response.headers["content-length"]
response.status_code = None # type: ignore
dependency_cache = dependency_cache or {}
sub_dependant: Dependant
for sub_dependant in dependant.dependencies:
sub_dependant.call = cast(Callable[..., Any], sub_dependant.call)
sub_dependant.cache_key = cast(
Tuple[Callable[..., Any], Tuple[str]], sub_dependant.cache_key
)
call = sub_dependant.call
use_sub_dependant = sub_dependant
if (
dependency_overrides_provider
and dependency_overrides_provider.dependency_overrides
):
original_call = sub_dependant.call
call = getattr(
dependency_overrides_provider, "dependency_overrides", {}
).get(original_call, original_call)
use_path: str = sub_dependant.path # type: ignore
use_sub_dependant = get_dependant(
path=use_path,
call=call,
name=sub_dependant.name,
security_scopes=sub_dependant.security_scopes,
)
solved_result = await solve_dependencies(
request=request,
dependant=use_sub_dependant,
body=body,
background_tasks=background_tasks,
response=response,
dependency_overrides_provider=dependency_overrides_provider,
dependency_cache=dependency_cache,
)
(
sub_values,
sub_errors,
background_tasks,
_, # the subdependency returns the same response we have
sub_dependency_cache,
) = solved_result
dependency_cache.update(sub_dependency_cache)
if sub_errors:
errors.extend(sub_errors)
continue
if sub_dependant.use_cache and sub_dependant.cache_key in dependency_cache:
solved = dependency_cache[sub_dependant.cache_key]
elif is_gen_callable(call) or is_async_gen_callable(call):
stack = request.scope.get("fastapi_astack")
assert isinstance(stack, AsyncExitStack)
solved = await solve_generator(
call=call, stack=stack, sub_values=sub_values
)
elif is_coroutine_callable(call):
solved = await call(**sub_values)
else:
solved = await run_in_threadpool(call, **sub_values)
if sub_dependant.name is not None:
values[sub_dependant.name] = solved
if sub_dependant.cache_key not in dependency_cache:
dependency_cache[sub_dependant.cache_key] = solved
path_values, path_errors = request_params_to_args(
dependant.path_params, request.path_params
)
query_values, query_errors = request_params_to_args(
dependant.query_params, request.query_params
)
header_values, header_errors = request_params_to_args(
dependant.header_params, request.headers
)
cookie_values, cookie_errors = request_params_to_args(
dependant.cookie_params, request.cookies
)
values.update(path_values)
values.update(query_values)
values.update(header_values)
values.update(cookie_values)
errors += path_errors + query_errors + header_errors + cookie_errors
if dependant.body_params:
(
body_values,
body_errors,
) = await request_body_to_args( # body_params checked above
required_params=dependant.body_params, received_body=body
)
values.update(body_values)
errors.extend(body_errors)
if dependant.http_connection_param_name:
values[dependant.http_connection_param_name] = request
if dependant.request_param_name and isinstance(request, Request):
values[dependant.request_param_name] = request
elif dependant.websocket_param_name and isinstance(request, WebSocket):
values[dependant.websocket_param_name] = request
if dependant.background_tasks_param_name:
if background_tasks is None:
background_tasks = BackgroundTasks()
values[dependant.background_tasks_param_name] = background_tasks
if dependant.response_param_name:
values[dependant.response_param_name] = response
if dependant.security_scopes_param_name:
values[dependant.security_scopes_param_name] = SecurityScopes(
scopes=dependant.security_scopes
)
return values, errors, background_tasks, response, dependency_cache
def request_params_to_args(
required_params: Sequence[ModelField],
received_params: Union[Mapping[str, Any], QueryParams, Headers],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
for field in required_params:
if is_scalar_sequence_field(field) and isinstance(
received_params, (QueryParams, Headers)
):
value = received_params.getlist(field.alias) or field.default
else:
value = received_params.get(field.alias)
field_info = field.field_info
assert isinstance(
field_info, params.Param
), "Params must be subclasses of Param"
if value is None:
if field.required:
errors.append(
ErrorWrapper(
MissingError(), loc=(field_info.in_.value, field.alias)
)
)
else:
values[field.name] = deepcopy(field.default)
continue
v_, errors_ = field.validate(
value, values, loc=(field_info.in_.value, field.alias)
)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
async def request_body_to_args(
required_params: List[ModelField],
received_body: Optional[Union[Dict[str, Any], FormData]],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
if required_params:
field = required_params[0]
field_info = field.field_info
embed = getattr(field_info, "embed", None)
field_alias_omitted = len(required_params) == 1 and not embed
if field_alias_omitted:
received_body = {field.alias: received_body}
for field in required_params:
loc: Tuple[str, ...]
if field_alias_omitted:
loc = ("body",)
else:
loc = ("body", field.alias)
value: Optional[Any] = None
if received_body is not None:
if (
field.shape in sequence_shapes or field.type_ in sequence_types
) and isinstance(received_body, FormData):
value = received_body.getlist(field.alias)
else:
try:
value = received_body.get(field.alias)
except AttributeError:
errors.append(get_missing_field_error(loc))
continue
if (
value is None
or (isinstance(field_info, params.Form) and value == "")
or (
isinstance(field_info, params.Form)
and field.shape in sequence_shapes
and len(value) == 0
)
):
if field.required:
errors.append(get_missing_field_error(loc))
else:
values[field.name] = deepcopy(field.default)
continue
if (
isinstance(field_info, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, UploadFile)
):
value = await value.read()
elif (
field.shape in sequence_shapes
and isinstance(field_info, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, sequence_types)
):
results: List[Union[bytes, str]] = []
async def process_fn(
fn: Callable[[], Coroutine[Any, Any, Any]]
) -> None:
result = await fn()
results.append(result)
async with anyio.create_task_group() as tg:
for sub_value in value:
tg.start_soon(process_fn, sub_value.read)
value = sequence_shape_to_type[field.shape](results)
v_, errors_ = field.validate(value, values, loc=loc)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
def get_missing_field_error(loc: Tuple[str, ...]) -> ErrorWrapper:
missing_field_error = ErrorWrapper(MissingError(), loc=loc)
return missing_field_error
def get_body_field(*, dependant: Dependant, name: str) -> Optional[ModelField]:
flat_dependant = get_flat_dependant(dependant)
if not flat_dependant.body_params:
return None
first_param = flat_dependant.body_params[0]
field_info = first_param.field_info
embed = getattr(field_info, "embed", None)
body_param_names_set = {param.name for param in flat_dependant.body_params}
if len(body_param_names_set) == 1 and not embed:
check_file_field(first_param)
return first_param
# If one field requires to embed, all have to be embedded
# in case a sub-dependency is evaluated with a single unique body field
# That is combined (embedded) with other body fields
for param in flat_dependant.body_params:
setattr(param.field_info, "embed", True) # noqa: B010
model_name = "Body_" + name
BodyModel: Type[BaseModel] = create_model(model_name)
for f in flat_dependant.body_params:
BodyModel.__fields__[f.name] = f
required = any(True for f in flat_dependant.body_params if f.required)
BodyFieldInfo_kwargs: Dict[str, Any] = {"default": None}
if any(isinstance(f.field_info, params.File) for f in flat_dependant.body_params):
BodyFieldInfo: Type[params.Body] = params.File
elif any(isinstance(f.field_info, params.Form) for f in flat_dependant.body_params):
BodyFieldInfo = params.Form
else:
BodyFieldInfo = params.Body
body_param_media_types = [
f.field_info.media_type
for f in flat_dependant.body_params
if isinstance(f.field_info, params.Body)
]
if len(set(body_param_media_types)) == 1:
BodyFieldInfo_kwargs["media_type"] = body_param_media_types[0]
final_field = create_response_field(
name="body",
type_=BodyModel,
required=required,
alias="body",
field_info=BodyFieldInfo(**BodyFieldInfo_kwargs),
)
check_file_field(final_field)
return final_field
| mit | 85e0818738971f58b518c88c6b145dc2 | 35.277411 | 88 | 0.630071 | 3.99099 | false | false | false | false |
alphagov/backdrop | features/support/tests/test_stagecraft.py | 1 | 4100 | import requests
from requests.exceptions import ConnectionError
from nose.tools import assert_raises
from hamcrest import assert_that, is_
from ..stagecraft import StagecraftService, \
create_or_update_stagecraft_service, stop_stagecraft_service_if_running
class StubContext(object):
def __init__(self):
self._params = {}
def __getattr__(self, key):
return self._params[key]
def __setattr__(self, key, value):
if key.startswith('_'):
self.__dict__[key] = value
else:
self._params[key] = value
def __contains__(self, key):
return key in self._params
def test_create_stagecraft_service():
context = StubContext()
service = create_or_update_stagecraft_service(context, 2012, {})
assert_that(context.mock_stagecraft_service.running(), is_(True))
service.stop()
assert_that(context.mock_stagecraft_service.stopped(), is_(True))
def test_update_stagecraft_service():
context = StubContext()
service1 = create_or_update_stagecraft_service(context, 8089, {})
response = requests.get('http://localhost:8089/example')
assert_that(response.status_code, is_(404))
service2 = create_or_update_stagecraft_service(
context, 8089,
{('GET', u'example'): {u'foo': u'bar'}})
response = requests.get('http://localhost:8089/example')
assert_that(response.status_code, is_(200))
assert_that(service1, is_(service2))
service1.stop()
def test_stop_stagecraft_if_running():
context = StubContext()
service = create_or_update_stagecraft_service(context, 8089, {})
stop_stagecraft_service_if_running(context)
assert_that(service.running(), is_(False))
class TestStagecraftService(object):
def create_service(self):
return StagecraftService(8089, {
('GET', u'example'): {u'foo': u'bar'}
})
def test_service_catches_calls(self):
service = self.create_service()
service.start()
response = requests.get('http://localhost:8089/example')
service.stop()
assert_that(response.json(), is_({u'foo': u'bar'}))
def test_calls_fail_if_service_is_not_started(self):
self.create_service()
assert_raises(ConnectionError, requests.get, ('http://localhost:8089/example'))
def test_calls_fail_after_service_is_stopped(self):
service = self.create_service()
service.start()
service.stop()
assert_raises(ConnectionError, requests.get, ('http://localhost:8089/example'))
def test_calls_succeed_after_service_is_restarted(self):
service = self.create_service()
service.restart()
response = requests.get('http://localhost:8089/example')
service.stop()
assert_that(response.status_code, is_(200))
def test_running_returns_true_if_the_service_is_running(self):
service = self.create_service()
assert_that(service.running(), is_(False))
service.start()
assert_that(service.running(), is_(True))
service.stop()
assert_that(service.running(), is_(False))
def test_stopped_returns_true_if_the_service_is_not_running(self):
service = self.create_service()
assert_that(service.stopped(), is_(True))
service.start()
assert_that(service.stopped(), is_(False))
service.stop()
assert_that(service.stopped(), is_(True))
def test_new_routes_can_be_added_to_a_running_service(self):
service = self.create_service()
service.start()
service.add_routes({
('GET', u'foobar'): {u'bar': u'foo'}})
response = requests.get('http://localhost:8089/foobar')
service.stop()
assert_that(response.json(), is_({u'bar': u'foo'}))
def test_all_routes_can_be_removed_from_a_running_service(self):
service = self.create_service()
service.start()
service.reset()
response = requests.get('http://localhost:8089/example')
service.stop()
assert_that(response.status_code, is_(404))
| mit | 4c892553e1345461320cbda3878e4b97 | 29.597015 | 87 | 0.634634 | 3.717135 | false | true | false | false |
alphagov/backdrop | backdrop/transformers/tasks/mapping.py | 1 | 1830 | import re
from .util import group_by
def compile_mappings(mappings):
compiled = {}
for mapping, patterns in mappings.iteritems():
compiled[mapping] = map(re.compile, patterns)
return compiled
def match_mapping(values, mappings):
for mapping, re_list in mappings.iteritems():
if all(map(lambda value, re: re.search(value), values, re_list)):
return mapping
return None
def map_data(grouped_data, mappings, mapped_attribute, other_mapping, value_attribute):
mapped_data = {}
for grouped_values, data in grouped_data.iteritems():
start_at = grouped_values[0]
end_at = grouped_values[1]
mapping_values = grouped_values[2:]
mapping = match_mapping(mapping_values, mappings) or other_mapping
if mapping is not None:
period_mapping_key = (start_at, end_at, mapping)
summed_value = reduce(
lambda sum, datum: sum + datum[value_attribute], data, 0)
if period_mapping_key in mapped_data:
mapped_data[period_mapping_key][
value_attribute] += summed_value
else:
mapped_data[period_mapping_key] = {
"_start_at": start_at,
"_end_at": end_at,
mapped_attribute: mapping,
value_attribute: summed_value,
}
return mapped_data.values()
def compute(data, options):
grouped = group_by(
['_start_at', '_end_at'] + options['mapping-keys'], data)
compiled_mappings = compile_mappings(options['mappings'])
mapped_data = map_data(
grouped, compiled_mappings,
options['mapped-attribute'],
options.get('other-mapping', None),
options['value-attribute'])
return mapped_data
| mit | 6402a99cdb204317aa73fea41bb26ad8 | 28.516129 | 87 | 0.590164 | 4.14966 | false | false | false | false |
alphagov/backdrop | backdrop/contrib/evl_volumetrics.py | 1 | 2681 | from datetime import datetime
import re
from backdrop.core.timeutils import as_utc
def extract_column_header(sheet):
HEADER_INDEX = 3
return sheet[HEADER_INDEX]
def extract_transaction_rows(sheet):
TRANSACTION_INDEXES = {
4: ["Assisted Digital", "Relicensing"],
5: ["Assisted Digital", "Relicensing"],
7: ["Assisted Digital", "SORN"],
10: ["Fully Digital", "Relicensing"],
11: ["Fully Digital", "Relicensing"],
12: ["Fully Digital", "Relicensing"],
13: ["Fully Digital", "Relicensing"],
15: ["Fully Digital", "SORN"],
16: ["Fully Digital", "SORN"],
17: ["Fully Digital", "SORN"],
18: ["Fully Digital", "SORN"],
21: ["Manual", "Relicensing"],
22: ["Manual", "Relicensing"],
23: ["Manual", "Relicensing"],
25: ["Manual", "SORN"],
26: ["Manual", "SORN"],
27: ["Manual", "SORN"],
28: ["Manual", "SORN"],
29: ["Manual", "SORN"],
30: ["Manual", "SORN"],
31: ["Manual", "SORN"],
}
def transaction_row(index):
channel_service = TRANSACTION_INDEXES[index]
return channel_service + sheet[index][2:]
return extract_column_header(sheet), map(transaction_row,
TRANSACTION_INDEXES.keys())
def create_transaction_data(header, row):
CHANNEL_INDEX = 0
SERVICE_INDEX = 1
TRANSACTION_NAME_INDEX = 2
DATES_START_INDEX = 3
SERVICES = {
"Relicensing": "tax-disc",
"SORN": "sorn"
}
volumes = zip(header, row)[DATES_START_INDEX:]
def transaction_data(date_volume):
date, volume = date_volume
if volume == "" or volume == "-":
volume = 0
date = as_utc(datetime.strptime(date, "%b %Y"))
service = SERVICES[row[SERVICE_INDEX]]
channel = row[CHANNEL_INDEX].lower().replace(" ", "-")
transaction = row[TRANSACTION_NAME_INDEX]
return [date.isoformat(), service, channel, transaction, volume]
return map(transaction_data, volumes)
def remove_summary_columns(sheet):
DATES_START_INDEX = 3
DATE_REGEXP = re.compile("[A-Z][a-z]{2}\s\d{4}")
header = extract_column_header(sheet)
def add_date_index(mem, i):
if bool(DATE_REGEXP.match(header[i])):
mem.append(i)
return mem
else:
return mem
date_indexes = reduce(add_date_index,
range(DATES_START_INDEX, len(header)), [])
def remove_columns_from_row(row):
return row[:DATES_START_INDEX] + [row[i] for i in date_indexes]
return map(remove_columns_from_row, sheet)
| mit | 9803a8de7ad16e93cb7bf04e05f33cf0 | 29.123596 | 72 | 0.564342 | 3.584225 | false | false | false | false |
chromaway/ngcccbase | ngcccbase/p2ptrade/protocol_objects.py | 4 | 4722 | import time
from coloredcoinlib import IncompatibleTypesError
from ngcccbase.txcons import RawTxSpec
from utils import make_random_id
from utils import CommonEqualityMixin
class EOffer(CommonEqualityMixin):
"""
A is the offer side's ColorValue
B is the replyer side's ColorValue
"""
def __init__(self, oid, A, B):
self.oid = oid or make_random_id()
self.A = A
self.B = B
self.expires = None
def expired(self):
return self.expired_shift(0)
def expired_shift(self, shift):
return (not self.expires) or (self.expires < (time.time() + shift))
def refresh(self, delta):
self.expires = time.time() + delta
def get_data(self):
return {"oid": self.oid,
"A": self.A,
"B": self.B}
def matches(self, offer):
"""A <=x=> B"""
return self.A == offer.B and offer.A == self.B
def is_same_as_mine(self, my_offer):
return self.A == my_offer.A and self.B == my_offer.B
@classmethod
def from_data(cls, data):
x = cls(data["oid"], data["A"], data["B"])
return x
class MyEOffer(EOffer):
def __init__(self, oid, A, B):
super(MyEOffer, self).__init__(oid, A, B)
self.auto_post = True
class ETxSpec(CommonEqualityMixin):
def __init__(self, inputs, targets, my_utxo_list):
self.inputs = inputs
self.targets = targets
self.my_utxo_list = my_utxo_list
def get_data(self):
return {"inputs": self.inputs,
"targets": self.targets}
@classmethod
def from_data(cls, data):
return cls(data['inputs'], data['targets'], None)
class EProposal(CommonEqualityMixin):
def __init__(self, pid, ewctrl, offer):
self.pid = pid
self.ewctrl = ewctrl
self.offer = offer
def get_data(self):
return {"pid": self.pid, "offer": self.offer.get_data()}
class MyEProposal(EProposal):
def __init__(self, ewctrl, orig_offer, my_offer):
super(MyEProposal, self).__init__(make_random_id(),
ewctrl, orig_offer)
self.my_offer = my_offer
if not orig_offer.matches(my_offer):
raise Exception("Offers are incongruent!")
self.etx_spec = ewctrl.make_etx_spec(self.offer.B, self.offer.A)
self.etx_data = None
def get_data(self):
res = super(MyEProposal, self).get_data()
if self.etx_data:
res["etx_data"] = self.etx_data
else:
res["etx_spec"] = self.etx_spec.get_data()
return res
def process_reply(self, reply_ep):
rtxs = RawTxSpec.from_tx_data(self.ewctrl.model,
reply_ep.etx_data.decode('hex'))
if self.ewctrl.check_tx(rtxs, self.etx_spec):
rtxs.sign(self.etx_spec.my_utxo_list)
self.ewctrl.publish_tx(rtxs, self.my_offer)
self.etx_data = rtxs.get_hex_tx_data()
else:
raise Exception('P2ptrade reply tx check failed!')
class MyReplyEProposal(EProposal):
def __init__(self, ewctrl, foreign_ep, my_offer):
super(MyReplyEProposal, self).__init__(foreign_ep.pid,
ewctrl,
foreign_ep.offer)
self.my_offer = my_offer
self.tx = self.ewctrl.make_reply_tx(foreign_ep.etx_spec,
my_offer.A,
my_offer.B)
def get_data(self):
data = super(MyReplyEProposal, self).get_data()
data['etx_data'] = self.tx.get_hex_tx_data()
return data
def process_reply(self, reply_ep):
# FIXME how is ever valid to call this function???
rtxs = RawTxSpec.from_tx_data(self.ewctrl.model,
reply_ep.etx_data.decode('hex'))
self.ewctrl.publish_tx(rtxs, self.my_offer) # TODO: ???
class ForeignEProposal(EProposal):
def __init__(self, ewctrl, ep_data):
offer = EOffer.from_data(ep_data['offer'])
super(ForeignEProposal, self).__init__(ep_data['pid'], ewctrl, offer)
self.etx_spec = None
if 'etx_spec' in ep_data:
self.etx_spec = ETxSpec.from_data(ep_data['etx_spec'])
self.etx_data = ep_data.get('etx_data', None)
def accept(self, my_offer):
if not self.offer.is_same_as_mine(my_offer):
raise Exception("Incompatible offer!") # pragma: no cover
if not self.etx_spec:
raise Exception("Need etx_spec!") # pragma: no cover
return MyReplyEProposal(self.ewctrl, self, my_offer)
| mit | bfebc5eb6a8163b99caaea18c265fe7b | 31.791667 | 78 | 0.55845 | 3.441691 | false | false | false | false |
chromaway/ngcccbase | ui/wallet.py | 4 | 7007 | from ngcccbase.pwallet import PersistentWallet
from ngcccbase.wallet_controller import WalletController
from ngcccbase.asset import AssetDefinition
from ngcccbase.p2ptrade.ewctrl import EWalletController
from ngcccbase.p2ptrade.agent import EAgent
from ngcccbase.p2ptrade.comm import HTTPComm, ThreadedComm
from ngcccbase.p2ptrade.protocol_objects import MyEOffer
from ngcccbase.utxo_fetcher import AsyncUTXOFetcher
import time
import argparse
import threading
from decimal import Decimal
class TimedAsyncTask(threading.Thread):
def __init__(self, task, sleep_time):
super(TimedAsyncTask, self).__init__()
self._stop = threading.Event()
self.sleep_time = sleep_time
self.task = task
def run(self):
while not self._stop.is_set():
self.task()
time.sleep(self.sleep_time)
def stop(self):
self._stop.set()
class Wallet(object):
thread_comm = None
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument("--wallet", dest="wallet_path")
parser.add_argument("--testnet", action='store_true')
parsed_args = vars(parser.parse_args())
self.wallet = PersistentWallet(parsed_args.get('wallet_path'),
parsed_args.get('testnet'))
self.wallet.init_model()
self.model = self.wallet.get_model()
self.controller = WalletController(self.wallet.get_model())
self.async_utxo_fetcher = AsyncUTXOFetcher(
self.model, self.wallet.wallet_config.get('utxo_fetcher', {}))
self.update_connected_thread = TimedAsyncTask(self.update_connected, 2.5)
self.update_connected_thread.start()
self.update_connected()
def connected(self):
return self.is_connected
def update_connected(self):
try:
for moniker in self.get_all_monikers():
asset = self.get_asset_definition(moniker)
address = self.get_some_address(asset)
total_balance = self.get_total_balance(asset)
self.is_connected = self.async_utxo_fetcher.interface.connected()
except:
raise
self.is_connected = False
def get_asset_definition(self, moniker):
if isinstance(moniker, AssetDefinition):
return moniker
adm = self.wallet.get_model().get_asset_definition_manager()
asset = adm.get_asset_by_moniker(moniker)
if asset:
return asset
else:
raise Exception("Asset '%s' not found!" % moniker)
def get_asset_definition_by_color_set(self, color_set):
adm = self.wallet.get_model().get_asset_definition_manager()
for asset in adm.get_all_assets():
if color_set in asset.get_color_set().get_data():
return asset
raise Exception("Asset not found!")
def add_asset(self, params):
self.controller.add_asset_definition({
"monikers": [params['moniker']],
"color_set": [params['color_desc']],
"unit": params['unit']
})
if len(self.get_all_addresses(params['moniker'])) == 0:
self.get_new_address(params['moniker'])
def get_all_asset(self):
return self.wallet.wallet_config['asset_definitions']
def issue(self, params):
self.controller.issue_coins(
params['moniker'], params['coloring_scheme'],
params['units'], params['atoms'])
if len(self.get_all_addresses(params['moniker'])) == 0:
self.get_new_address(params['moniker'])
def get_all_monikers(self):
monikers = [asset.get_monikers()[0] for asset in
self.model.get_asset_definition_manager().get_all_assets()]
monikers.remove('bitcoin')
monikers = ['bitcoin'] + monikers
return monikers
def get_available_balance(self, color):
return self.controller.get_available_balance(
self.get_asset_definition(color))
def get_total_balance(self, color):
return self.controller.get_total_balance(
self.get_asset_definition(color))
def get_unconfirmed_balance(self, color):
return self.controller.get_unconfirmed_balance(
self.get_asset_definition(color))
def get_all_addresses(self, color):
return [addr.get_color_address() for addr in
self.controller.get_all_addresses(self.get_asset_definition(color))]
def get_received_by_address(self, color):
asset = self.get_asset_definition(color)
return self.controller.get_received_by_address(asset)
def get_some_address(self, color):
wam = self.model.get_address_manager()
cs = self.get_asset_definition(color).get_color_set()
ar = wam.get_some_address(cs)
return ar.get_color_address()
def get_new_address(self, color):
return self.controller. \
get_new_address(self.get_asset_definition(color)).get_color_address()
def scan(self):
self.controller.scan_utxos()
def send_coins(self, items):
if isinstance(items, dict):
items = [items]
for item in items:
self.controller.send_coins(
item['asset'] if 'asset' in item \
else self.get_asset_definition(item['moniker']),
[item['address']],
[item['value']])
def p2ptrade_init(self):
ewctrl = EWalletController(self.model, self.controller)
config = {"offer_expiry_interval": 30, "ep_expiry_interval": 30}
self.thread_comm = ThreadedComm(
config, 'http://p2ptrade.btx.udoidio.info/messages'
)
self.p2p_agent = EAgent(ewctrl, config, self.thread_comm)
self.thread_comm.start()
def p2ptrade_stop(self):
if self.thread_comm is not None:
self.thread_comm.stop()
def p2ptrade_make_offer(self, we_sell, params):
asset = self.get_asset_definition(params['moniker'])
value = asset.parse_value(params['value'])
bitcoin = self.get_asset_definition('bitcoin')
price = bitcoin.parse_value(params['price'])
total = int(Decimal(value)/Decimal(asset.unit)*Decimal(price))
color_desc = asset.get_color_set().color_desc_list[0]
sell_side = {"color_spec": color_desc, "value": value}
buy_side = {"color_spec": "", "value": total}
if we_sell:
return MyEOffer(None, sell_side, buy_side)
else:
return MyEOffer(None, buy_side, sell_side)
def p2ptrade_make_mirror_offer(self, offer):
data = offer.get_data()
return MyEOffer(None, data['B'], data['A'])
def stop_all(self):
self.update_connected_thread.stop()
self.update_connected_thread.join()
self.async_utxo_fetcher.stop()
self.p2ptrade_stop()
if hasattr(self.model.txdb, 'vbs'):
self.model.txdb.vbs.stop()
wallet = Wallet()
| mit | 17e64c7365c816f9a2b2c93d3b584bbe | 34.933333 | 81 | 0.618096 | 3.647579 | false | false | false | false |
chromaway/ngcccbase | coloredcoinlib/txspec.py | 4 | 5703 | """ Transaction specification language """
from blockchain import CTxIn
from colorvalue import ColorValue, SimpleColorValue
class InvalidColorIdError(Exception):
pass
class ZeroSelectError(Exception):
pass
class ColorTarget(object):
def __init__(self, address, colorvalue):
self.address = address
self.colorvalue = colorvalue
def get_colordef(self):
return self.colorvalue.get_colordef()
def get_color_id(self):
return self.colorvalue.get_color_id()
def is_uncolored(self):
return self.colorvalue.is_uncolored()
def get_address(self):
return self.address
def get_value(self):
return self.colorvalue.get_value()
def get_satoshi(self):
return self.colorvalue.get_satoshi()
def __repr__(self):
return "%s: %s" % (self.get_address(), self.colorvalue)
@classmethod
def sum(cls, targets):
if len(targets) == 0:
from colordef import UNCOLORED_MARKER # circular import
return SimpleColorValue(colordef=UNCOLORED_MARKER, value=0)
c = targets[0].colorvalue.__class__
return c.sum([t.colorvalue for t in targets])
class OperationalTxSpec(object):
"""transaction specification which is ready to be operated on
(has all the necessary data)"""
def get_targets(self):
"""returns a list of ColorTargets"""
raise Exception('not implemented') # pragma: no cover
def select_coins(self, colorvalue, use_fee_estimator=None):
"""returns a list of UTXO objects with whose colordef is
the same as <colorvalue> and have a sum colorvalues
have at least the <colorvalue>.
For uncolored coins sum of values of UTXO objects must
also include a fee (if <use_fee_estimator> parameter is
provided, usually it is composed_tx_spec)."""
raise Exception('not implemented') # pragma: no cover
def get_change_addr(self, color_def):
"""returns an address which can be used as
a change for this color_def"""
raise Exception('not implemented') # pragma: no cover
def get_required_fee(self, tx_size):
"""returns ColorValue object representing the fee for
a certain tx size"""
raise Exception('not implemented') # pragma: no cover
def get_dust_threshold(self):
"""returns ColorValue object representing smallest
satoshi value which isn't dust according to current
parameters"""
raise Exception('not implemented') # pragma: no cover
def is_monocolor(self):
targets = self.get_targets()
color_def = targets[0].get_colordef()
for target in targets[1:]:
if target.get_colordef() is not color_def:
return False
return True
def make_composed_tx_spec(self):
return ComposedTxSpec(self)
class ComposedTxSpec(object):
"""specification of a transaction which is already composed,
but isn't signed yet"""
class TxIn(CTxIn):
pass
class TxOut(object):
__slots__ = ['value', 'target_addr']
def __init__(self, value, target_addr):
self.value = value
self.target_addr = target_addr
class FeeChangeTxOut(TxOut):
pass
def __init__(self, operational_tx_spec=None):
self.txins = []
self.txouts = []
self.operational_tx_spec = operational_tx_spec
def add_txin(self, txin):
assert isinstance(txin, self.TxIn)
self.txins.append(txin)
def add_txout(self, txout=None, value=None, target_addr=None,
target=None, is_fee_change=False):
if not txout:
if not value:
if target and target.is_uncolored():
value = target.get_value()
else:
raise Exception("Error in ComposedTxSpec.add_txout: no\
value is provided and target is not uncolored!")
if isinstance(value, ColorValue):
if value.is_uncolored():
value = value.get_value()
else:
raise Exception("Error in ComposedTxSpec.add_txout: no\
value isn't uncolored!")
if not target_addr:
target_addr = target.get_address()
cls = self.FeeChangeTxOut if is_fee_change else self.TxOut
txout = cls(value, target_addr)
self.txouts.append(txout)
def add_txouts(self, txouts):
for txout in txouts:
if isinstance(txout, ColorTarget):
self.add_txout(target=txout)
elif isinstance(txout, self.TxOut):
self.add_txout(txout=txout)
else:
raise Exception('Wrong txout instance!')
def add_txins(self, txins):
for txin in txins:
self.add_txin(txin)
def get_txins(self):
return self.txins
def get_txouts(self):
return self.txouts
def estimate_size(self, extra_txins=0, extra_txouts=0, extra_bytes=0):
return (181 * (len(self.txins) + extra_txins) +
34 * (len(self.txouts) + extra_txouts) +
10 + extra_bytes)
def estimate_required_fee(self, extra_txins=0, extra_txouts=1, extra_bytes=0):
return self.operational_tx_spec.get_required_fee(
self.estimate_size(extra_txins=extra_txins,
extra_txouts=extra_txouts,
extra_bytes=extra_bytes))
def get_fee(self):
sum_txins = sum([inp.value for inp in self.txins])
sum_txouts = sum([out.value for out in self.txouts])
return sum_txins - sum_txouts
| mit | 271c7455fa01b52ee160ffe5dd277102 | 31.775862 | 82 | 0.60512 | 3.890177 | false | false | false | false |
chromaway/ngcccbase | ngcccbase/txcons.py | 3 | 15270 | """
txcons.py
Transaction Constructors for the blockchain.
"""
from collections import defaultdict
from asset import AssetTarget
from coloredcoinlib import (ColorSet, ColorTarget, SimpleColorValue,
ComposedTxSpec, OperationalTxSpec,
UNCOLORED_MARKER, OBColorDefinition,
InvalidColorIdError, ZeroSelectError)
from binascii import hexlify
import pycoin_txcons
import io
import math
class InsufficientFundsError(Exception):
pass
class InvalidTargetError(Exception):
pass
class InvalidTransformationError(Exception):
pass
class BasicTxSpec(object):
"""Represents a really simple colored coin transaction.
Specifically, this particular transaction class has not been
constructed, composed or signed. Those are done in other classes.
Note this only supports a single asset.
"""
def __init__(self, model):
"""Create a BasicTxSpec that has a wallet_model <model>
for an asset <asset>
"""
self.model = model
self.targets = []
def add_target(self, asset_target):
"""Add a ColorTarget <color_target> which specifies the
colorvalue and address
"""
if not isinstance(asset_target, AssetTarget):
raise InvalidTargetError("Not an asset target!")
self.targets.append(asset_target)
def is_monoasset(self):
"""Returns a boolean representing if the transaction sends
coins of exactly 1 color.
"""
if not self.targets:
raise InvalidTargetError('Basic txs is empty!')
asset = self.targets[0].get_asset()
for target in self.targets:
if target.get_asset() != asset:
return False
return True
def is_monocolor(self):
"""Returns a boolean representing if the transaction sends
coins of exactly 1 color.
"""
if not self.is_monoasset():
return False
asset = self.targets[0].get_asset()
return len(asset.get_color_set().color_id_set) == 1
def make_operational_tx_spec(self, asset):
"""Given a <tx_spec> of type BasicTxSpec, return
a SimpleOperationalTxSpec.
"""
if not self.is_monocolor():
raise InvalidTransformationError('Tx spec type not supported!')
op_tx_spec = SimpleOperationalTxSpec(self.model, asset)
color_id = list(asset.get_color_set().color_id_set)[0]
color_def = self.model.get_color_def(color_id)
for target in self.targets:
colorvalue = SimpleColorValue(colordef=color_def,
value=target.get_value())
colortarget = ColorTarget(target.get_address(), colorvalue)
op_tx_spec.add_target(colortarget)
return op_tx_spec
class BaseOperationalTxSpec(OperationalTxSpec):
def get_required_fee(self, tx_size):
"""Given a transaction that is of size <tx_size>,
return the transaction fee in Satoshi that needs to be
paid out to miners.
"""
base_fee = 11000.0
fee_value = math.ceil((tx_size * base_fee) / 1000)
return SimpleColorValue(colordef=UNCOLORED_MARKER,
value=fee_value)
def get_dust_threshold(self):
return SimpleColorValue(colordef=UNCOLORED_MARKER, value=600)
def _select_enough_coins(self, colordef, utxo_list, required_sum_fn):
ssum = SimpleColorValue(colordef=colordef, value=0)
selection = []
required_sum = None
for utxo in utxo_list:
ssum += SimpleColorValue.sum(utxo.colorvalues)
selection.append(utxo)
required_sum = required_sum_fn(utxo_list)
if ssum >= required_sum:
return selection, ssum
raise InsufficientFundsError('Not enough coins: %s requested, %s found!'
% (required_sum, ssum))
def _validate_select_coins_parameters(self, colorvalue, use_fee_estimator):
fee = None
if use_fee_estimator:
fee = use_fee_estimator.estimate_required_fee()
if not fee and colorvalue.get_value() < 0:
raise Exception("Cannot select negative coins!")
elif fee and (colorvalue + fee).get_value() < 0:
raise Exception("Cannot select negative coins!")
colordef = colorvalue.get_colordef()
if colordef != UNCOLORED_MARKER and use_fee_estimator:
msg = "Fee estimator can only be used with uncolored coins!"
raise Exception(msg)
class SimpleOperationalTxSpec(BaseOperationalTxSpec):
"""Subclass of OperationalTxSpec which uses wallet model.
Represents a transaction that's ready to be composed
and then signed. The parent is an abstract class.
"""
def __init__(self, model, asset):
"""Initialize a transaction that uses a wallet model
<model> and transfers asset/color <asset>.
"""
super(SimpleOperationalTxSpec, self).__init__()
self.model = model
self.targets = []
self.asset = asset
def add_target(self, color_target):
"""Add a ColorTarget <color_target> to the transaction
"""
if not isinstance(color_target, ColorTarget):
raise InvalidTargetError("Target is not an instance of ColorTarget!")
self.targets.append(color_target)
def get_targets(self):
"""Get a list of (receiving address, color_id, colorvalue)
triplets representing all the targets for this tx.
"""
return self.targets
def get_change_addr(self, color_def):
"""Get an address associated with color definition <color_def>
that is in the current wallet for receiving change.
"""
am = self.model.get_asset_definition_manager()
wam = self.model.get_address_manager()
color_id = color_def.color_id
asset = am.get_asset_by_color_id(color_id)
color_set = None
if color_def == UNCOLORED_MARKER:
color_set = ColorSet.from_color_ids(self.model.get_color_map(), [0])
elif asset.get_color_set().has_color_id(color_id):
color_set = asset.get_color_set()
if color_set is None:
raise InvalidColorIdError('Wrong color id!')
aw = wam.get_change_address(color_set)
return aw.get_address()
def select_coins(self, colorvalue, use_fee_estimator=None):
"""Return a list of utxos and sum that corresponds to
the colored coins identified by <color_def> of amount <colorvalue>
that we'll be spending from our wallet.
"""
self._validate_select_coins_parameters(colorvalue, use_fee_estimator)
def required_sum_fn(selection):
if use_fee_estimator:
return colorvalue + use_fee_estimator.estimate_required_fee(
extra_txins=len(selection))
else:
return colorvalue
required_sum_0 = required_sum_fn([])
if required_sum_0.get_value() == 0:
# no coins need to be selected
return [], required_sum_0
colordef = colorvalue.get_colordef()
color_id = colordef.get_color_id()
cq = self.model.make_coin_query({"color_id_set": set([color_id])})
utxo_list = cq.get_result()
return self._select_enough_coins(colordef, utxo_list, required_sum_fn)
class RawTxSpec(object):
"""Represents a transaction which can be serialized.
"""
def __init__(self, model, pycoin_tx, composed_tx_spec=None):
self.model = model
self.pycoin_tx = pycoin_tx
self.composed_tx_spec = composed_tx_spec
self.update_tx_data()
self.intent = None
def get_intent(self):
return self.intent
def get_hex_txhash(self):
the_hash = self.pycoin_tx.hash()
return the_hash[::-1].encode('hex')
def update_tx_data(self):
"""Updates serialized form of transaction.
"""
s = io.BytesIO()
self.pycoin_tx.stream(s)
self.tx_data = s.getvalue()
@classmethod
def from_composed_tx_spec(cls, model, composed_tx_spec):
testnet = model.is_testnet()
tx = pycoin_txcons.construct_standard_tx(composed_tx_spec, testnet)
return cls(model, tx, composed_tx_spec)
@classmethod
def from_tx_data(cls, model, tx_data):
pycoin_tx = pycoin_txcons.deserialize(tx_data)
composed_tx_spec = pycoin_txcons.reconstruct_composed_tx_spec(
model, pycoin_tx)
return cls(model, pycoin_tx, composed_tx_spec)
def sign(self, utxo_list):
pycoin_txcons.sign_tx(
self.pycoin_tx, utxo_list, self.model.is_testnet())
self.update_tx_data()
def get_tx_data(self):
"""Returns the signed transaction data.
"""
return self.tx_data
def get_hex_tx_data(self):
"""Returns the hex version of the signed transaction data.
"""
return hexlify(self.tx_data).decode("utf8")
def get_input_addresses(self):
ccc = self.model.ccc
bs = self.model.get_blockchain_state()
inputs = [ti.get_outpoint() for ti in self.composed_tx_spec.txins]
raw_addrs = [bs.get_tx(tx).outputs[n].raw_address for tx, n in inputs]
return [ccc.raw_to_address(raw) for raw in raw_addrs]
def compose_uncolored_tx(tx_spec):
""" compose a simple bitcoin transaction """
composed_tx_spec = tx_spec.make_composed_tx_spec()
targets = tx_spec.get_targets()
composed_tx_spec.add_txouts(targets)
ttotal = ColorTarget.sum(targets)
sel_utxos, sum_sel_coins = tx_spec.select_coins(ttotal, composed_tx_spec)
composed_tx_spec.add_txins(sel_utxos)
fee = composed_tx_spec.estimate_required_fee()
change = sum_sel_coins - ttotal - fee
# give ourselves the change
if change > tx_spec.get_dust_threshold():
composed_tx_spec.add_txout(value=change,
target_addr=tx_spec.get_change_addr(UNCOLORED_MARKER),
is_fee_change=True)
return composed_tx_spec
class TransactionSpecTransformer(object):
"""An object that can transform one type of transaction into another.
Essentially has the ability to take a transaction, compose it
and sign it by returning the appropriate objects.
The general flow of transaction types is this:
BasicTxSpec -> SimpleOperationalTxSpec -> ComposedTxSpec -> SignedTxSpec
"basic" -> "operational" -> "composed" -> "signed"
"""
def __init__(self, model, config):
"""Create a transaction transformer object for wallet_model <model>
and a wallet configuration <config>
"""
self.model = model
self.testnet = config.get('testnet', False)
def get_tx_composer(self, op_tx_spec):
"""Returns a function which is able to convert a given operational
tx spec <op_tx_spec> into a composed tx spec
"""
if op_tx_spec.is_monocolor():
color_def = op_tx_spec.get_targets()[0].get_colordef()
if color_def == UNCOLORED_MARKER:
return compose_uncolored_tx
else:
return color_def.compose_tx_spec
else:
# grab the first color def and hope that its compose_tx_spec
# will be able to handle it. if transaction has incompatible
# colors, compose_tx_spec will throw an exception
for target in op_tx_spec.get_targets():
tgt_color_def = target.get_colordef()
if tgt_color_def is UNCOLORED_MARKER:
continue
else:
return tgt_color_def.compose_tx_spec
return None
def classify_tx_spec(self, tx_spec):
"""For a transaction <tx_spec>, returns a string that represents
the type of transaction (basic, operational, composed, signed)
that it is.
"""
if isinstance(tx_spec, BasicTxSpec):
return 'basic'
elif isinstance(tx_spec, OperationalTxSpec):
return 'operational'
elif isinstance(tx_spec, ComposedTxSpec):
return 'composed'
elif isinstance(tx_spec, RawTxSpec):
return 'signed'
else:
return None
def transform_basic(self, tx_spec, target_spec_kind):
"""Takes a basic transaction <tx_spec> and returns a transaction
of type <target_spec_kind> which is one of (operational,
composed, signed).
"""
if target_spec_kind in ['operational', 'composed', 'signed']:
if tx_spec.is_monocolor():
asset = tx_spec.targets[0].get_asset()
operational_ts = tx_spec.make_operational_tx_spec(asset)
return self.transform(operational_ts, target_spec_kind)
msg = 'Do not know how to transform tx spec!'
raise InvalidTransformationError(msg)
def transform_operational(self, tx_spec, target_spec_kind):
"""Takes an operational transaction <tx_spec> and returns a
transaction of type <target_spec_kind> which is one of
(composed, signed).
"""
if target_spec_kind in ['composed', 'signed']:
composer = self.get_tx_composer(tx_spec)
if composer:
composed = composer(tx_spec)
return self.transform(composed, target_spec_kind)
msg = 'Do not know how to transform tx spec!'
raise InvalidTransformationError(msg)
def transform_composed(self, tx_spec, target_spec_kind):
"""Takes a SimpleComposedTxSpec <tx_spec> and returns
a signed transaction. For now, <target_spec_kind> must
equal "signed" or will throw an exception.
"""
if target_spec_kind in ['signed']:
rtxs = RawTxSpec.from_composed_tx_spec(self.model, tx_spec)
rtxs.sign(tx_spec.get_txins())
return rtxs
msg = 'Do not know how to transform tx spec!'
raise InvalidTransformationError(msg)
def transform_signed(self, tx_spec, target_spec_kind):
"""This method is not yet implemented.
"""
msg = 'Do not know how to transform tx spec!'
raise InvalidTransformationError(msg)
def transform(self, tx_spec, target_spec_kind):
"""Transform a transaction <tx_spec> into another type
of transaction defined by <target_spec_kind> and returns it.
"""
spec_kind = self.classify_tx_spec(tx_spec)
if spec_kind is None:
raise InvalidTransformationError('Spec kind is not recognized!')
if spec_kind == target_spec_kind:
return tx_spec
if spec_kind == 'basic':
return self.transform_basic(tx_spec, target_spec_kind)
elif spec_kind == 'operational':
return self.transform_operational(tx_spec, target_spec_kind)
elif spec_kind == 'composed':
return self.transform_composed(tx_spec, target_spec_kind)
elif spec_kind == 'signed':
return self.transform_signed(tx_spec, target_spec_kind)
| mit | 1154a06f78a6b79f1fb789889bc2e904 | 37.560606 | 89 | 0.614604 | 3.967264 | false | false | false | false |
rlworkgroup/metaworld | metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_drawer_open_v2.py | 1 | 4519 | import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerDrawerOpenEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.9, 0.0)
obj_high = (0.1, 0.9, 0.0)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_angle': np.array([0.3, ], dtype=np.float32),
'obj_init_pos': np.array([0., 0.9, 0.0], dtype=np.float32),
'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32),
}
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
goal_low = self.hand_low
goal_high = self.hand_high
self._random_reset_space = Box(
np.array(obj_low),
np.array(obj_high),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self.maxDist = 0.2
self.target_reward = 1000 * self.maxDist + 1000 * 2
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_drawer.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
(
reward,
gripper_error,
gripped,
handle_error,
caging_reward,
opening_reward
) = self.compute_reward(action, obs)
info = {
'success': float(handle_error <= 0.03),
'near_object': float(gripper_error <= 0.03),
'grasp_success': float(gripped > 0),
'grasp_reward': caging_reward,
'in_place_reward': opening_reward,
'obj_to_target': handle_error,
'unscaled_reward': reward,
}
return reward, info
def _get_id_main_object(self):
return self.unwrapped.model.geom_name2id('objGeom')
def _get_pos_objects(self):
return self.get_body_com('drawer_link') + np.array([.0, -.16, .0])
def _get_quat_objects(self):
return self.sim.data.get_body_xquat('drawer_link')
def reset_model(self):
self._reset_hand()
self.prev_obs = self._get_curr_obs_combined_no_goal()
# Compute nightstand position
self.obj_init_pos = self._get_state_rand_vec() if self.random_init \
else self.init_config['obj_init_pos']
# Set mujoco body to computed position
self.sim.model.body_pos[self.model.body_name2id(
'drawer'
)] = self.obj_init_pos
# Set _target_pos to current drawer position (closed) minus an offset
self._target_pos = self.obj_init_pos + np.array([.0, -.16 - self.maxDist, .09])
return self._get_obs()
def compute_reward(self, action, obs):
gripper = obs[:3]
handle = obs[4:7]
handle_error = np.linalg.norm(handle - self._target_pos)
reward_for_opening = reward_utils.tolerance(
handle_error,
bounds=(0, 0.02),
margin=self.maxDist,
sigmoid='long_tail'
)
handle_pos_init = self._target_pos + np.array([.0, self.maxDist, .0])
# Emphasize XY error so that gripper is able to drop down and cage
# handle without running into it. By doing this, we are assuming
# that the reward in the Z direction is small enough that the agent
# will be willing to explore raising a finger above the handle, hook it,
# and drop back down to re-gain Z reward
scale = np.array([3., 3., 1.])
gripper_error = (handle - gripper) * scale
gripper_error_init = (handle_pos_init - self.init_tcp) * scale
reward_for_caging = reward_utils.tolerance(
np.linalg.norm(gripper_error),
bounds=(0, 0.01),
margin=np.linalg.norm(gripper_error_init),
sigmoid='long_tail'
)
reward = reward_for_caging + reward_for_opening
reward *= 5.0
return (
reward,
np.linalg.norm(handle - gripper),
obs[3],
handle_error,
reward_for_caging,
reward_for_opening
)
| mit | 2d0921ad76e4bdafa25c1bee9c52c43c | 31.746377 | 93 | 0.561407 | 3.339985 | false | false | false | false |
rlworkgroup/metaworld | metaworld/policies/sawyer_drawer_open_v1_policy.py | 1 | 1555 | import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
class SawyerDrawerOpenV1Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_pos': obs[:3],
'drwr_pos': obs[3:6],
'unused_info': obs[6:],
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_effort': 3
})
# NOTE this policy looks different from the others because it must
# modify its p constant part-way through the task
pos_curr = o_d['hand_pos']
pos_drwr = o_d['drwr_pos']
# align end effector's Z axis with drawer handle's Z axis
if np.linalg.norm(pos_curr[:2] - pos_drwr[:2]) > 0.06:
to_pos = pos_drwr + np.array([0., 0., 0.3])
action['delta_pos'] = move(o_d['hand_pos'], to_pos, p=4.)
# drop down to touch drawer handle
elif abs(pos_curr[2] - pos_drwr[2]) > 0.04:
to_pos = pos_drwr
action['delta_pos'] = move(o_d['hand_pos'], to_pos, p=4.)
# push toward a point just behind the drawer handle
# also increase p value to apply more force
else:
to_pos = pos_drwr + np.array([0., -0.06, 0.])
action['delta_pos'] = move(o_d['hand_pos'], to_pos, p=50.)
# keep gripper open
action['grab_effort'] = -1.
return action.array
| mit | 62e6dd9318c873caaf1c20c43e273642 | 31.395833 | 74 | 0.550482 | 3.351293 | false | false | false | false |
rlworkgroup/metaworld | metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_press_side.py | 1 | 3890 | import numpy as np
from gym.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerHandlePressSideEnv(SawyerXYZEnv):
def __init__(self):
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.35, 0.65, 0.05)
obj_high = (-0.25, 0.75, 0.05)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_pos': np.array([-0.3, 0.7, 0.05]),
'hand_init_pos': np.array((0, 0.6, 0.2),),
}
self.goal = np.array([-0.2, 0.7, 0.14])
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
goal_low = self.hand_low
goal_high = self.hand_high
self._random_reset_space = Box(
np.array(obj_low),
np.array(obj_high),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
@property
def model_name(self):
return full_v1_path_for('sawyer_xyz/sawyer_handle_press_sideway.xml')
@_assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pressDist = self.compute_reward(action, ob)
info = {
'reachDist': reachDist,
'goalDist': pressDist,
'epRew': reward,
'pickRew': None,
'success': float(pressDist <= 0.04)
}
return ob, reward, False, info
@property
def _target_site_config(self):
return []
def _get_pos_objects(self):
return self.data.site_xpos[self.model.site_name2id('handleStart')]
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
if self.random_init:
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = goal_pos
button_pos = goal_pos.copy()
button_pos[0] += 0.1
button_pos[2] += 0.09
self._target_pos = button_pos
self.sim.model.body_pos[self.model.body_name2id('box')] = self.obj_init_pos
self.sim.model.body_pos[self.model.body_name2id('handle')] = self._target_pos
self._set_obj_xyz(0)
self._target_pos = self._get_site_pos('goalPress')
self.maxDist = np.abs(self.data.site_xpos[self.model.site_name2id('handleStart')][-1] - self._target_pos[-1])
self.target_reward = 1000*self.maxDist + 1000*2
return self._get_obs()
def _reset_hand(self):
super()._reset_hand(10)
rightFinger, leftFinger = self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')
self.init_fingerCOM = (rightFinger + leftFinger)/2
self.pickCompleted = False
def compute_reward(self, actions, obs):
del actions
objPos = obs[3:6]
leftFinger = self._get_site_pos('leftEndEffector')
fingerCOM = leftFinger
pressGoal = self._target_pos[-1]
pressDist = np.abs(objPos[-1] - pressGoal)
reachDist = np.linalg.norm(objPos - fingerCOM)
c1 = 1000
c2 = 0.01
c3 = 0.001
if reachDist < 0.05:
pressRew = 1000*(self.maxDist - pressDist) + c1*(np.exp(-(pressDist**2)/c2) + np.exp(-(pressDist**2)/c3))
else:
pressRew = 0
pressRew = max(pressRew, 0)
reward = -reachDist + pressRew
return [reward, reachDist, pressDist]
| mit | db9d5ac66b2bbfeb9ae4dcc1b788e9d9 | 30.12 | 117 | 0.562982 | 3.119487 | false | false | false | false |
rlworkgroup/metaworld | metaworld/policies/sawyer_button_press_topdown_wall_v2_policy.py | 1 | 1069 | import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
class SawyerButtonPressTopdownWallV2Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_pos': obs[:3],
'hand_closed': obs[3],
'button_pos': obs[4:7],
'unused_info': obs[7:],
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_effort': 3
})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.)
action['grab_effort'] = -1.
return action.array
@staticmethod
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_button = o_d['button_pos'] + np.array([.0, -.06, .0])
if np.linalg.norm(pos_curr[:2] - pos_button[:2]) > 0.04:
return pos_button + np.array([0., 0., 0.1])
else:
return pos_button
| mit | d9822c083a8d14942e076a9fe25ba4c2 | 25.725 | 89 | 0.540692 | 3.299383 | false | false | false | false |
rlworkgroup/metaworld | metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_back_v2.py | 1 | 8128 | import numpy as np
from gym.spaces import Box
from scipy.spatial.transform import Rotation
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerPushBackEnvV2(SawyerXYZEnv):
OBJ_RADIUS = 0.007
TARGET_RADIUS = 0.05
def __init__(self):
goal_low = (-0.1, 0.6, 0.0199)
goal_high = (0.1, 0.7, 0.0201)
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.8, 0.02)
obj_high = (0.1, 0.85, 0.02)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_pos':np.array([0, 0.8, 0.02]),
'obj_init_angle': 0.3,
'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32),
}
self.goal = np.array([0., 0.6, 0.02])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_push_back_v2.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
obj = obs[4:7]
(
reward,
tcp_to_obj,
tcp_opened,
target_to_obj,
object_grasped,
in_place
) = self.compute_reward(action, obs)
success = float(target_to_obj <= 0.07)
near_object = float(tcp_to_obj <= 0.03)
grasp_success = float(self.touching_object and (tcp_opened > 0) and \
(obj[2] - 0.02 > self.obj_init_pos[2]))
info = {
'success': success,
'near_object': near_object,
'grasp_success': grasp_success,
'grasp_reward': object_grasped,
'in_place_reward': in_place,
'obj_to_target': target_to_obj,
'unscaled_reward': reward,
}
return reward, info
def _get_pos_objects(self):
return self.data.get_geom_xpos('objGeom')
def _get_quat_objects(self):
return Rotation.from_matrix(
self.data.get_geom_xmat('objGeom')
).as_quat()
def adjust_initObjPos(self, orig_init_pos):
# This is to account for meshes for the geom and object are not aligned
# If this is not done, the object could be initialized in an extreme position
diff = self.get_body_com('obj')[:2] - self.data.get_geom_xpos('objGeom')[:2]
adjustedPos = orig_init_pos[:2] + diff
# The convention we follow is that body_com[2] is always 0, and geom_pos[2] is the object height
return [adjustedPos[0], adjustedPos[1],self.data.get_geom_xpos('objGeom')[-1]]
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.adjust_initObjPos(self.init_config['obj_init_pos'])
self.obj_init_angle = self.init_config['obj_init_angle']
if self.random_init:
goal_pos = self._get_state_rand_vec()
self._target_pos = np.concatenate((goal_pos[-3:-1], [self.obj_init_pos[-1]]))
while np.linalg.norm(goal_pos[:2] - self._target_pos[:2]) < 0.15:
goal_pos = self._get_state_rand_vec()
self._target_pos = np.concatenate((goal_pos[-3:-1], [self.obj_init_pos[-1]]))
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
self._set_obj_xyz(self.obj_init_pos)
return self._get_obs()
def _gripper_caging_reward(self, action, obj_position, obj_radius):
pad_success_margin = 0.05
grip_success_margin = obj_radius + 0.003
x_z_success_margin = 0.01
tcp = self.tcp_center
left_pad = self.get_body_com('leftpad')
right_pad = self.get_body_com('rightpad')
delta_object_y_left_pad = left_pad[1] - obj_position[1]
delta_object_y_right_pad = obj_position[1] - right_pad[1]
right_caging_margin = abs(abs(obj_position[1] - self.init_right_pad[1]) - pad_success_margin)
left_caging_margin = abs(abs(obj_position[1] - self.init_left_pad[1]) - pad_success_margin)
right_caging = reward_utils.tolerance(delta_object_y_right_pad,
bounds=(obj_radius, pad_success_margin),
margin=right_caging_margin,
sigmoid='long_tail',
)
left_caging = reward_utils.tolerance(delta_object_y_left_pad,
bounds=(obj_radius, pad_success_margin),
margin=left_caging_margin,
sigmoid='long_tail',
)
right_gripping = reward_utils.tolerance(delta_object_y_right_pad,
bounds=(obj_radius, grip_success_margin),
margin=right_caging_margin,
sigmoid='long_tail',
)
left_gripping = reward_utils.tolerance(delta_object_y_left_pad,
bounds=(obj_radius, grip_success_margin),
margin=left_caging_margin,
sigmoid='long_tail',
)
assert right_caging >= 0 and right_caging <= 1
assert left_caging >= 0 and left_caging <= 1
y_caging = reward_utils.hamacher_product(right_caging, left_caging)
y_gripping = reward_utils.hamacher_product(right_gripping, left_gripping)
assert y_caging >= 0 and y_caging <= 1
tcp_xz = tcp + np.array([0., -tcp[1], 0.])
obj_position_x_z = np.copy(obj_position) + np.array([0., -obj_position[1], 0.])
tcp_obj_norm_x_z = np.linalg.norm(tcp_xz - obj_position_x_z, ord=2)
init_obj_x_z = self.obj_init_pos + np.array([0., -self.obj_init_pos[1], 0.])
init_tcp_x_z = self.init_tcp + np.array([0., -self.init_tcp[1], 0.])
tcp_obj_x_z_margin = np.linalg.norm(init_obj_x_z - init_tcp_x_z, ord=2) - x_z_success_margin
x_z_caging = reward_utils.tolerance(tcp_obj_norm_x_z,
bounds=(0, x_z_success_margin),
margin=tcp_obj_x_z_margin,
sigmoid='long_tail',)
assert right_caging >= 0 and right_caging <= 1
gripper_closed = min(max(0, action[-1]), 1)
assert gripper_closed >= 0 and gripper_closed <= 1
caging = reward_utils.hamacher_product(y_caging, x_z_caging)
assert caging >= 0 and caging <= 1
if caging > 0.95:
gripping = y_gripping
else:
gripping = 0.
assert gripping >= 0 and gripping <= 1
caging_and_gripping = (caging + gripping) / 2
assert caging_and_gripping >= 0 and caging_and_gripping <= 1
return caging_and_gripping
def compute_reward(self, action, obs):
obj = obs[4:7]
tcp_opened = obs[3]
tcp_to_obj = np.linalg.norm(obj - self.tcp_center)
target_to_obj = np.linalg.norm(obj - self._target_pos)
target_to_obj_init = np.linalg.norm(self.obj_init_pos - self._target_pos)
in_place = reward_utils.tolerance(
target_to_obj,
bounds=(0, self.TARGET_RADIUS),
margin=target_to_obj_init,
sigmoid='long_tail',
)
object_grasped = self._gripper_caging_reward(action, obj, self.OBJ_RADIUS)
reward = reward_utils.hamacher_product(object_grasped, in_place)
if (tcp_to_obj < 0.01) and (0 < tcp_opened < 0.55) and \
(target_to_obj_init - target_to_obj > 0.01):
reward += 1. + 5. * in_place
if target_to_obj < self.TARGET_RADIUS:
reward = 10.
return (
reward,
tcp_to_obj,
tcp_opened,
target_to_obj,
object_grasped,
in_place
)
| mit | 183f55d9340a093f49b44ede51ded5ca | 36.981308 | 104 | 0.563484 | 3.187451 | false | false | false | false |
rlworkgroup/metaworld | metaworld/policies/sawyer_handle_press_side_v2_policy.py | 1 | 1056 | import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
class SawyerHandlePressSideV2Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_pos': obs[:3],
'gripper': obs[3],
'handle_pos': obs[4:7],
'unused_info': obs[7:],
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_effort': 3
})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.)
action['grab_effort'] = 1.
return action.array
@staticmethod
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_button = o_d['handle_pos']
if np.linalg.norm(pos_curr[:2] - pos_button[:2]) > 0.02:
return pos_button + np.array([0., 0., 0.2])
else:
return pos_button + np.array([.0, .0, -.5])
| mit | 5311e0bd4f2f1c89514743e0cc1fd148 | 25.4 | 89 | 0.536932 | 3.28972 | false | false | false | false |
rlworkgroup/metaworld | metaworld/policies/sawyer_pick_place_wall_v2_policy.py | 1 | 2569 | import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, move, assert_fully_parsed
class SawyerPickPlaceWallV2Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_pos': obs[:3],
'unused_1': obs[3],
'puck_pos': obs[4:7],
'unused_2': obs[7:-3],
'goal_pos': obs[-3:],
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_effort': 3
})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self.desired_pos(o_d), p=10.)
action['grab_effort'] = self.grab_effort(o_d)
return action.array
@staticmethod
def desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_puck = o_d['puck_pos'] + np.array([-0.005, 0, 0])
pos_goal = o_d['goal_pos']
# If error in the XY plane is greater than 0.02, place end effector above the puck
if np.linalg.norm(pos_curr[:2] - pos_puck[:2]) > 0.015:
return pos_puck + np.array([0., 0., 0.1])
# Once XY error is low enough, drop end effector down on top of puck
elif abs(pos_curr[2] - pos_puck[2]) > 0.04 and pos_puck[-1] < 0.03:
return pos_puck + np.array([0., 0., 0.03])
# Move to the goal
else:
# if wall is in the way of arm, straight up above the wall
if(-0.15 <= pos_curr[0] <= 0.35 and
0.60 <= pos_curr[1] <= 0.80 and
pos_curr[2] < 0.25):
return pos_curr + [0, 0, 1]
#move towards the goal while staying above the wall
elif(-0.15 <= pos_curr[0] <= 0.35 and
0.60 <= pos_curr[1] <= 0.80 and
pos_curr[2] < 0.35):
return np.array([pos_goal[0], pos_goal[1], pos_curr[2]])
# If not at the same Z height as the goal, move up to that plane
elif abs(pos_curr[2] - pos_goal[2]) > 0.04:
return np.array([pos_curr[0], pos_curr[1], pos_goal[2]])
return pos_goal
@staticmethod
def grab_effort(o_d):
pos_curr = o_d['hand_pos']
pos_puck = o_d['puck_pos']
if np.linalg.norm(pos_curr[:2] - pos_puck[:2]) > 0.015 or abs(pos_curr[2] - pos_puck[2]) > 0.1:
return 0.
# While end effector is moving down toward the puck, begin closing the grabber
else:
return 0.9
| mit | 7a9525c2c0b623034708145b4427f8fc | 35.183099 | 103 | 0.51849 | 3.195274 | false | false | false | false |
rlworkgroup/metaworld | metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_v2.py | 1 | 5849 | import numpy as np
from gym.spaces import Box
from scipy.spatial.transform import Rotation
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerPushEnvV2(SawyerXYZEnv):
"""
Motivation for V2:
V1 was very difficult to solve because the observation didn't say where
to move after reaching the puck.
Changelog from V1 to V2:
- (7/7/20) Removed 3 element vector. Replaced with 3 element position
of the goal (for consistency with other environments)
- (6/15/20) Added a 3 element vector to the observation. This vector
points from the end effector to the goal coordinate.
i.e. (self._target_pos - pos_hand)
- (6/15/20) Separated reach-push-pick-place into 3 separate envs.
"""
TARGET_RADIUS=0.05
def __init__(self):
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.6, 0.02)
obj_high = (0.1, 0.7, 0.02)
goal_low = (-0.1, 0.8, 0.01)
goal_high = (0.1, 0.9, 0.02)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_angle': .3,
'obj_init_pos': np.array([0., 0.6, 0.02]),
'hand_init_pos': np.array([0., 0.6, 0.2]),
}
self.goal = np.array([0.1, 0.8, 0.02])
self.obj_init_angle = self.init_config['obj_init_angle']
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self.action_space = Box(
np.array([-1, -1, -1, -1]),
np.array([+1, +1, +1, +1]),
)
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self.num_resets = 0
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_push_v2.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
obj = obs[4:7]
(
reward,
tcp_to_obj,
tcp_opened,
target_to_obj,
object_grasped,
in_place
) = self.compute_reward(action, obs)
info = {
'success': float(target_to_obj <= self.TARGET_RADIUS),
'near_object': float(tcp_to_obj <= 0.03),
'grasp_success': float(
self.touching_main_object and
(tcp_opened > 0) and
(obj[2] - 0.02 > self.obj_init_pos[2])
),
'grasp_reward': object_grasped,
'in_place_reward': in_place,
'obj_to_target': target_to_obj,
'unscaled_reward': reward,
}
return reward, info
def _get_quat_objects(self):
return Rotation.from_matrix(
self.data.get_geom_xmat('objGeom')
).as_quat()
def _get_pos_objects(self):
return self.get_body_com('obj')
def fix_extreme_obj_pos(self, orig_init_pos):
# This is to account for meshes for the geom and object are not
# aligned. If this is not done, the object could be initialized in an
# extreme position
diff = self.get_body_com('obj')[:2] - \
self.get_body_com('obj')[:2]
adjusted_pos = orig_init_pos[:2] + diff
# The convention we follow is that body_com[2] is always 0,
# and geom_pos[2] is the object height
return [
adjusted_pos[0],
adjusted_pos[1],
self.get_body_com('obj')[-1]
]
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = np.array(self.fix_extreme_obj_pos(self.init_config['obj_init_pos']))
self.obj_init_angle = self.init_config['obj_init_angle']
if self.random_init:
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
while np.linalg.norm(goal_pos[:2] - self._target_pos[:2]) < 0.15:
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
self._target_pos = np.concatenate((goal_pos[-3:-1], [self.obj_init_pos[-1]]))
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
self._set_obj_xyz(self.obj_init_pos)
self.num_resets += 1
return self._get_obs()
def compute_reward(self, action, obs):
obj = obs[4:7]
tcp_opened = obs[3]
tcp_to_obj = np.linalg.norm(obj - self.tcp_center)
target_to_obj = np.linalg.norm(obj - self._target_pos)
target_to_obj_init = np.linalg.norm(self.obj_init_pos - self._target_pos)
in_place = reward_utils.tolerance(
target_to_obj,
bounds=(0, self.TARGET_RADIUS),
margin=target_to_obj_init,
sigmoid='long_tail',
)
object_grasped = self._gripper_caging_reward(
action,
obj,
object_reach_radius=0.01,
obj_radius=0.015,
pad_success_thresh=0.05,
xz_thresh=0.005,
high_density=True
)
reward = 2 * object_grasped
if tcp_to_obj < 0.02 and tcp_opened > 0:
reward += 1. + reward + 5. * in_place
if target_to_obj < self.TARGET_RADIUS:
reward = 10.
return (
reward,
tcp_to_obj,
tcp_opened,
target_to_obj,
object_grasped,
in_place
)
| mit | d7786bd68cf9b9c3272791a1cad02f84 | 32.232955 | 96 | 0.535647 | 3.30452 | false | false | false | false |
rlworkgroup/metaworld | tests/metaworld/envs/mujoco/sawyer_xyz/test_obs_space_hand.py | 1 | 1831 | import numpy as np
import pytest
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
from metaworld.envs.mujoco.env_dict import ALL_V2_ENVIRONMENTS
from metaworld.policies.policy import Policy, move
from metaworld.policies.action import Action
class SawyerRandomReachPolicy(Policy):
def __init__(self, target):
self._target = target
@staticmethod
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'unused_info': obs[3:]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_effort': 3
})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._target, p=25.)
action['grab_effort'] = 0.
return action.array
def sample_spherical(num_points, radius=1.0):
"""Samples points from the surface of a sphere centered at the origin
Args:
num_points (int): number of points to sample
radius (float): radius of the sphere
Returns:
(np.ndarray): points array of shape (num_points, 3)
"""
points = np.random.randn(3, num_points)
points /= np.linalg.norm(points, axis=0)
return points.T * radius
@pytest.mark.parametrize('target', sample_spherical(100, 10.0))
def test_reaching_limit(target):
env = ALL_V2_ENVIRONMENTS['reach-v2']()
env._partially_observable = False
env._freeze_rand_vec = False
env._set_task_called = True
policy = SawyerRandomReachPolicy(target)
env.reset()
env.reset_model()
o_prev = env.reset()
for _ in range(env.max_path_length):
a = policy.get_action(o_prev)
o = env.step(a)[0]
if np.linalg.norm(o[:3] - o_prev[:3]) < 0.001:
break
o_prev = o
assert SawyerXYZEnv._HAND_SPACE.contains(o[:3])
| mit | 7b0d68e69ccdeed216422de0ac00f3d9 | 26.328358 | 79 | 0.627526 | 3.287253 | false | false | false | false |
rlworkgroup/metaworld | metaworld/policies/sawyer_pick_place_v2_policy.py | 1 | 1963 | import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
class SawyerPickPlaceV2Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_pos': obs[:3],
'gripper_distance_apart': obs[3],
'puck_pos': obs[4:7],
'puck_rot': obs[7:11],
'goal_pos': obs[-3:],
'unused_info_curr_obs': obs[11:18],
'_prev_obs':obs[18:36]
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_effort': 3
})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.)
action['grab_effort'] = self._grab_effort(o_d)
return action.array
@staticmethod
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_puck = o_d['puck_pos'] + np.array([-0.005, 0, 0])
pos_goal = o_d['goal_pos']
gripper_separation = o_d['gripper_distance_apart']
# If error in the XY plane is greater than 0.02, place end effector above the puck
if np.linalg.norm(pos_curr[:2] - pos_puck[:2]) > 0.02:
return pos_puck + np.array([0., 0., 0.1])
# Once XY error is low enough, drop end effector down on top of puck
elif abs(pos_curr[2] - pos_puck[2]) > 0.05 and pos_puck[-1] < 0.04:
return pos_puck + np.array([0., 0., 0.03])
# Wait for gripper to close before continuing to move
elif gripper_separation > 0.73:
return pos_curr
# Move to goal
else:
return pos_goal
@staticmethod
def _grab_effort(o_d):
pos_curr = o_d['hand_pos']
pos_puck = o_d['puck_pos']
if np.linalg.norm(pos_curr - pos_puck) < 0.07:
return 1.
else:
return 0. | mit | 0e18cdbf7d43da1455bf5b1404e4a3aa | 31.196721 | 90 | 0.540499 | 3.166129 | false | false | false | false |
rlworkgroup/metaworld | tests/metaworld/envs/mujoco/sawyer_xyz/test_scripted_policies.py | 1 | 14279 | import pytest
from metaworld.envs.mujoco.env_dict import ALL_V1_ENVIRONMENTS, ALL_V2_ENVIRONMENTS
from metaworld.policies import *
from tests.metaworld.envs.mujoco.sawyer_xyz.utils import trajectory_summary
test_cases_old_nonoise = [
# This should contain configs where a V2 policy is running in a V1 env.
# name, policy, action noise pct, success rate
['bin-picking-v1', SawyerBinPickingV2Policy(), .0, .50],
['handle-press-side-v1', SawyerHandlePressSideV2Policy(), .0, .05],
['lever-pull-v1', SawyerLeverPullV2Policy(), .0, .0],
['peg-insert-side-v1', SawyerPegInsertionSideV2Policy(), .0, .0],
['plate-slide-back-side-v1', SawyerPlateSlideBackSideV2Policy(), .0, 1.],
['window-open-v1', SawyerWindowOpenV2Policy(), .0, 0.85],
['window-close-v1', SawyerWindowCloseV2Policy(), .0, 0.37],
]
test_cases_old_noisy = [
# This should contain configs where a V2 policy is running in a V1 env.
# name, policy, action noise pct, success rate
['bin-picking-v1', SawyerBinPickingV2Policy(), .1, .40],
['handle-press-side-v1', SawyerHandlePressSideV2Policy(), .1, .77],
['lever-pull-v1', SawyerLeverPullV2Policy(), .1, .0],
['peg-insert-side-v1', SawyerPegInsertionSideV2Policy(), .1, .0],
['plate-slide-back-side-v1', SawyerPlateSlideBackSideV2Policy(), .1, 0.30],
['window-open-v1', SawyerWindowOpenV2Policy(), .1, 0.81],
['window-close-v1', SawyerWindowCloseV2Policy(), .1, 0.37],
]
test_cases_latest_nonoise = [
# name, policy, action noise pct, success rate
['assembly-v1', SawyerAssemblyV1Policy(), .0, 1.],
['assembly-v2', SawyerAssemblyV2Policy(), .0, 1.],
['basketball-v1', SawyerBasketballV1Policy(), .0, .98],
['basketball-v2', SawyerBasketballV2Policy(), .0, .98],
['bin-picking-v2', SawyerBinPickingV2Policy(), .0, .98],
['box-close-v1', SawyerBoxCloseV1Policy(), .0, .85],
['box-close-v2', SawyerBoxCloseV2Policy(), .0, .90],
['button-press-topdown-v1', SawyerButtonPressTopdownV1Policy(), .0, 1.],
['button-press-topdown-v2', SawyerButtonPressTopdownV2Policy(), .0, .95],
['button-press-topdown-wall-v1', SawyerButtonPressTopdownWallV1Policy(), .0, 1.],
['button-press-topdown-wall-v2', SawyerButtonPressTopdownWallV2Policy(), .0, .95],
['button-press-v1', SawyerButtonPressV1Policy(), .0, 1.],
['button-press-v2', SawyerButtonPressV2Policy(), .0, 1.],
['button-press-wall-v1', SawyerButtonPressWallV1Policy(), .0, 1.],
['button-press-wall-v2', SawyerButtonPressWallV2Policy(), .0, .93],
['coffee-button-v1', SawyerCoffeeButtonV1Policy(), .0, 1.],
['coffee-button-v2', SawyerCoffeeButtonV2Policy(), .0, 1.],
['coffee-pull-v1', SawyerCoffeePullV1Policy(), .0, .96],
['coffee-pull-v2', SawyerCoffeePullV2Policy(), .0, .94],
['coffee-push-v1', SawyerCoffeePushV1Policy(), .0, .93],
['coffee-push-v2', SawyerCoffeePushV2Policy(), .0, .93],
['dial-turn-v1', SawyerDialTurnV1Policy(), .0, 0.96],
['dial-turn-v2', SawyerDialTurnV2Policy(), .0, 0.96],
['disassemble-v1', SawyerDisassembleV1Policy(), .0, .96],
['disassemble-v2', SawyerDisassembleV2Policy(), .0, .92],
['door-close-v1', SawyerDoorCloseV1Policy(), .0, .99],
['door-close-v2', SawyerDoorCloseV2Policy(), .0, .99],
['door-lock-v1', SawyerDoorLockV1Policy(), .0, 1.],
['door-lock-v2', SawyerDoorLockV2Policy(), .0, 1.],
['door-open-v1', SawyerDoorOpenV1Policy(), .0, .98],
['door-open-v2', SawyerDoorOpenV2Policy(), .0, .94],
['door-unlock-v1', SawyerDoorUnlockV1Policy(), .0, 1.],
['door-unlock-v2', SawyerDoorUnlockV2Policy(), .0, 1.],
['drawer-close-v1', SawyerDrawerCloseV1Policy(), .0, .99],
['drawer-close-v2', SawyerDrawerCloseV2Policy(), .0, .99],
['drawer-open-v1', SawyerDrawerOpenV1Policy(), .0, .99],
['drawer-open-v2', SawyerDrawerOpenV2Policy(), .0, .99],
['faucet-close-v1', SawyerFaucetCloseV1Policy(), .0, 1.],
['faucet-close-v2', SawyerFaucetCloseV2Policy(), .0, 1.],
['faucet-open-v1', SawyerFaucetOpenV1Policy(), .0, 1.],
['faucet-open-v2', SawyerFaucetOpenV2Policy(), .0, 1.],
['hammer-v1', SawyerHammerV1Policy(), .0, 1.],
['hammer-v2', SawyerHammerV2Policy(), .0, 1.],
['hand-insert-v1', SawyerHandInsertV1Policy(), .0, 0.96],
['hand-insert-v2', SawyerHandInsertV2Policy(), .0, 0.96],
['handle-press-side-v2', SawyerHandlePressSideV2Policy(), .0, .99],
['handle-press-v1', SawyerHandlePressV1Policy(), .0, 1.],
['handle-press-v2', SawyerHandlePressV2Policy(), .0, 1.],
['handle-pull-v1', SawyerHandlePullV1Policy(), .0, 1.],
['handle-pull-v2', SawyerHandlePullV2Policy(), .0, 0.93],
['handle-pull-side-v1', SawyerHandlePullSideV1Policy(), .0, .92],
['handle-pull-side-v2', SawyerHandlePullSideV2Policy(), .0, 1.],
['peg-insert-side-v2', SawyerPegInsertionSideV2Policy(), .0, .89],
['lever-pull-v2', SawyerLeverPullV2Policy(), .0, .94],
['peg-unplug-side-v1', SawyerPegUnplugSideV1Policy(), .0, .99],
['peg-unplug-side-v2', SawyerPegUnplugSideV2Policy(), .0, .99],
['pick-out-of-hole-v1', SawyerPickOutOfHoleV1Policy(), .0, 1.],
['pick-out-of-hole-v2', SawyerPickOutOfHoleV2Policy(), .0, 1.],
['pick-place-v2', SawyerPickPlaceV2Policy(), .0, .95],
['pick-place-wall-v2', SawyerPickPlaceWallV2Policy(), .0, .95],
['plate-slide-back-side-v2', SawyerPlateSlideBackSideV2Policy(), .0, 1.],
['plate-slide-back-v1', SawyerPlateSlideBackV1Policy(), .0, 1.],
['plate-slide-back-v2', SawyerPlateSlideBackV2Policy(), .0, 1.],
['plate-slide-side-v1', SawyerPlateSlideSideV1Policy(), .0, 1.],
['plate-slide-side-v2', SawyerPlateSlideSideV2Policy(), .0, 1.],
['plate-slide-v1', SawyerPlateSlideV1Policy(), .0, 1.],
['plate-slide-v2', SawyerPlateSlideV2Policy(), .0, 1.],
['reach-v2', SawyerReachV2Policy(), .0, .99],
['reach-wall-v2', SawyerReachWallV2Policy(), 0.0, .98],
['push-back-v1', SawyerPushBackV1Policy(), .0, .97],
['push-back-v2', SawyerPushBackV2Policy(), .0, .97],
['push-v2', SawyerPushV2Policy(), .0, .97],
['push-wall-v2', SawyerPushWallV2Policy(), .0, .97],
['shelf-place-v1', SawyerShelfPlaceV1Policy(), .0, .96],
['shelf-place-v2', SawyerShelfPlaceV2Policy(), .0, .96],
['soccer-v1', SawyerSoccerV1Policy(), .0, .88],
['soccer-v2', SawyerSoccerV2Policy(), .0, .88],
['stick-pull-v1', SawyerStickPullV1Policy(), .0, 0.95],
['stick-pull-v2', SawyerStickPullV2Policy(), .0, 0.96],
['stick-push-v1', SawyerStickPushV1Policy(), .0, 0.98],
['stick-push-v2', SawyerStickPushV2Policy(), .0, 0.98],
['sweep-into-v1', SawyerSweepIntoV1Policy(), .0, 1.],
['sweep-into-v2', SawyerSweepIntoV2Policy(), .0, 0.98],
['sweep-v1', SawyerSweepV1Policy(), .0, 1.],
['sweep-v2', SawyerSweepV2Policy(), .0, 0.99],
['window-close-v2', SawyerWindowCloseV2Policy(), 0., .98],
['window-open-v2', SawyerWindowOpenV2Policy(), 0., .94],
]
test_cases_latest_noisy = [
# name, policy, action noise pct, success rate
['assembly-v1', SawyerAssemblyV1Policy(), .1, .69],
['assembly-v2', SawyerAssemblyV2Policy(), .1, .70],
['basketball-v1', SawyerBasketballV1Policy(), .1, .97],
['basketball-v2', SawyerBasketballV2Policy(), .1, .96],
['bin-picking-v2', SawyerBinPickingV2Policy(), .1, .96],
['box-close-v1', SawyerBoxCloseV1Policy(), .1, .84],
['box-close-v2', SawyerBoxCloseV2Policy(), .1, .82],
['button-press-topdown-v1', SawyerButtonPressTopdownV1Policy(), .1, .98],
['button-press-topdown-v2', SawyerButtonPressTopdownV2Policy(), .1, .93],
['button-press-topdown-wall-v1', SawyerButtonPressTopdownWallV1Policy(), .1, .99],
['button-press-topdown-wall-v2', SawyerButtonPressTopdownWallV2Policy(), .1, .95],
['button-press-v1', SawyerButtonPressV1Policy(), .1, .98],
['button-press-v2', SawyerButtonPressV2Policy(), .1, .98],
['button-press-wall-v1', SawyerButtonPressWallV1Policy(), .1, .94],
['button-press-wall-v2', SawyerButtonPressWallV2Policy(), .1, .92],
['coffee-button-v1', SawyerCoffeeButtonV1Policy(), .1, .99],
['coffee-button-v2', SawyerCoffeeButtonV2Policy(), .1, .99],
['coffee-pull-v1', SawyerCoffeePullV1Policy(), .1, .95],
['coffee-pull-v2', SawyerCoffeePullV2Policy(), .1, .82],
['coffee-push-v1', SawyerCoffeePushV1Policy(), .1, .86],
['coffee-push-v2', SawyerCoffeePushV2Policy(), .1, .88],
['dial-turn-v1', SawyerDialTurnV1Policy(), .1, 0.84],
['dial-turn-v2', SawyerDialTurnV2Policy(), .1, 0.84],
['disassemble-v1', SawyerDisassembleV1Policy(), .1, .91],
['disassemble-v2', SawyerDisassembleV2Policy(), .1, .88],
['door-close-v1', SawyerDoorCloseV1Policy(), .1, .99],
['door-close-v2', SawyerDoorCloseV2Policy(), .1, .97],
['door-lock-v1', SawyerDoorLockV1Policy(), .1, 1.],
['door-lock-v2', SawyerDoorLockV2Policy(), .1, .96],
['door-open-v1', SawyerDoorOpenV1Policy(), .1, .93],
['door-open-v2', SawyerDoorOpenV2Policy(), .1, .92],
['door-unlock-v1', SawyerDoorUnlockV1Policy(), .1, .96],
['door-unlock-v2', SawyerDoorUnlockV2Policy(), .1, .97],
['drawer-close-v1', SawyerDrawerCloseV1Policy(), .1, .64],
['drawer-close-v2', SawyerDrawerCloseV2Policy(), .1, .99],
['drawer-open-v1', SawyerDrawerOpenV1Policy(), .1, .97],
['drawer-open-v2', SawyerDrawerOpenV2Policy(), .1, .97],
['faucet-close-v1', SawyerFaucetCloseV1Policy(), .1, .93],
['faucet-close-v2', SawyerFaucetCloseV2Policy(), .1, 1.],
['faucet-open-v1', SawyerFaucetOpenV1Policy(), .1, .99],
['faucet-open-v2', SawyerFaucetOpenV2Policy(), .1, .99],
['hammer-v1', SawyerHammerV1Policy(), .1, .97],
['hammer-v2', SawyerHammerV2Policy(), .1, .96],
['hand-insert-v1', SawyerHandInsertV1Policy(), .1, 0.95],
['hand-insert-v2', SawyerHandInsertV2Policy(), .1, 0.86],
['handle-press-side-v2', SawyerHandlePressSideV2Policy(), .1, .98],
['handle-press-v1', SawyerHandlePressV1Policy(), .1, 1.],
['handle-press-v2', SawyerHandlePressV2Policy(), .1, 1.],
['handle-pull-v1', SawyerHandlePullV1Policy(), .1, 1.],
['handle-pull-v2', SawyerHandlePullV2Policy(), .1, .99],
['handle-pull-side-v1', SawyerHandlePullSideV1Policy(), .1, .75],
['handle-pull-side-v2', SawyerHandlePullSideV2Policy(), .1, .71],
['peg-insert-side-v2', SawyerPegInsertionSideV2Policy(), .1, .87],
['lever-pull-v2', SawyerLeverPullV2Policy(), .1, .90],
['peg-unplug-side-v1', SawyerPegUnplugSideV1Policy(), .1, .97],
['peg-unplug-side-v2', SawyerPegUnplugSideV2Policy(), .1, .80],
['pick-out-of-hole-v1', SawyerPickOutOfHoleV1Policy(), .1, .87],
['pick-out-of-hole-v2', SawyerPickOutOfHoleV2Policy(), .1, .89],
['pick-place-v2', SawyerPickPlaceV2Policy(), .1, .83],
['pick-place-wall-v2', SawyerPickPlaceWallV2Policy(), .1, .83],
['plate-slide-back-side-v2', SawyerPlateSlideBackSideV2Policy(), .1, .95],
['plate-slide-back-v1', SawyerPlateSlideBackV1Policy(), .1, .95],
['plate-slide-back-v2', SawyerPlateSlideBackV2Policy(), .1, .94],
['plate-slide-side-v1', SawyerPlateSlideSideV1Policy(), .1, .76],
['plate-slide-side-v2', SawyerPlateSlideSideV2Policy(), .1, .78],
['plate-slide-v1', SawyerPlateSlideV1Policy(), .1, .97],
['plate-slide-v2', SawyerPlateSlideV2Policy(), .1, .97],
['reach-v2', SawyerReachV2Policy(), .1, .98],
['reach-wall-v2', SawyerReachWallV2Policy(), .1, .96],
['push-back-v1', SawyerPushBackV1Policy(), .1, .90],
['push-back-v2', SawyerPushBackV2Policy(), .0, .91],
['push-v2', SawyerPushV2Policy(), .1, .88],
['push-wall-v2', SawyerPushWallV2Policy(), .1, .82],
['shelf-place-v1', SawyerShelfPlaceV1Policy(), .1, .90],
['shelf-place-v2', SawyerShelfPlaceV2Policy(), .1, .89],
['soccer-v1', SawyerSoccerV1Policy(), .1, .91],
['soccer-v2', SawyerSoccerV2Policy(), .1, .81],
['stick-pull-v1', SawyerStickPullV1Policy(), .1, 0.81],
['stick-pull-v2', SawyerStickPullV2Policy(), .1, 0.81],
['stick-push-v1', SawyerStickPushV1Policy(), .1, 0.95],
['stick-push-v2', SawyerStickPushV2Policy(), .1, 0.95],
['sweep-into-v1', SawyerSweepIntoV1Policy(), .1, 1.],
['sweep-into-v2', SawyerSweepIntoV2Policy(), .1, 0.86],
['sweep-v1', SawyerSweepV1Policy(), .1, 1.],
['sweep-v2', SawyerSweepV2Policy(), .0, 0.99],
['window-close-v2', SawyerWindowCloseV2Policy(), .1, .95],
['window-open-v2', SawyerWindowOpenV2Policy(), .1, .93],
]
# Combine test cases into a single array to pass to parameterized test function
test_cases = []
for row in test_cases_old_nonoise:
test_cases.append(pytest.param(*row, marks=pytest.mark.skip))
for row in test_cases_old_noisy:
test_cases.append(pytest.param(*row, marks=pytest.mark.skip))
for row in test_cases_latest_nonoise:
test_cases.append(pytest.param(*row, marks=pytest.mark.skip))
for row in test_cases_latest_noisy:
test_cases.append(pytest.param(*row, marks=pytest.mark.basic))
ALL_ENVS = {**ALL_V1_ENVIRONMENTS, **ALL_V2_ENVIRONMENTS}
@pytest.fixture(scope='function')
def env(request):
e = ALL_ENVS[request.param]()
e._partially_observable = False
e._freeze_rand_vec = False
e._set_task_called = True
return e
@pytest.mark.parametrize(
'env,policy,act_noise_pct,expected_success_rate',
test_cases,
indirect=['env']
)
def test_scripted_policy(env, policy, act_noise_pct, expected_success_rate, iters=100):
"""Tests whether a given policy solves an environment in a stateless manner
Args:
env (metaworld.envs.MujocoEnv): Environment to test
policy (metaworld.policies.policy.Policy): Policy that's supposed to
succeed in env
act_noise_pct (np.ndarray): Decimal value(s) indicating std deviation of
the noise as a % of action space
expected_success_rate (float): Decimal value indicating % of runs that
must be successful
iters (int): How many times the policy should be tested
"""
assert len(vars(policy)) == 0, \
'{} has state variable(s)'.format(policy.__class__.__name__)
successes = 0
for _ in range(iters):
successes += float(trajectory_summary(env, policy, act_noise_pct, render=False)[0])
print(successes)
assert successes >= expected_success_rate * iters
| mit | aabc5411de81ac4b9bc1236daf9f85f1 | 53.708812 | 91 | 0.654037 | 2.564936 | false | true | false | false |
adamewing/bamsurgeon | bin/bamsurgeon/mutation.py | 1 | 11184 | #!/usr/bin/env python
from bamsurgeon.common import *
from collections import OrderedDict as od
import subprocess
import logging
FORMAT = '%(levelname)s %(asctime)s %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def countBaseAtPos(bamfile,chrom,pos,mutid='null'):
""" return list of bases at position chrom,pos
"""
locstr = chrom + ":" + str(pos) + "-" + str(pos)
args = ['samtools', 'mpileup', bamfile,'-Q', '0', '-r', locstr]
p = subprocess.Popen(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
p.wait()
pout = p.stdout.readlines()
pileup = None
for line in pout:
line = line.decode()
try:
c = line.strip().split()
assert len(c) > 5
pileup = c[4].upper()
except AssertionError:
logger.info(" mpileup failed, no coverage for base: " + chrom + ":" + str(pos))
return []
bases = []
if pileup:
for b in pileup:
if b in ['A','T','C','G']:
bases.append(b)
return bases
def makeins(read, start, ins, debug=False):
if len(read.seq) < len(ins) + 2:
logger.warning("INDELs (ins) must be less than one read length, skipped read: %s" % read.query_name)
return read.seq
logger.debug("DEBUG: INS: read.pos: %d" % read.pos)
logger.debug("DEBUG: INS: start: %d" % start)
logger.debug("DEBUG: INS: ins: %s" % ins)
logger.debug("DEBUG: INS: cigar: %s" % read.cigarstring)
logger.debug("DEBUG: is_reverse: %s" % read.is_reverse)
orig_len = len(read.seq)
pos_in_read = None
for (qpos, rpos) in read.get_aligned_pairs():
if rpos == start:
pos_in_read = qpos
if pos_in_read is None:
logger.warning("ref position %d not covered in read %s" % (start, read.query_name))
return read.seq
newseq = read.seq
if pos_in_read > 0: # insertion start in read span
logger.debug("DEBUG: INS: pos_in_read: %d" % pos_in_read)
if not read.is_reverse:
left = read.seq[:pos_in_read]
right = read.seq[pos_in_read:]
newseq = left + ins + right
newseq = newseq[:orig_len]
else:
pos_in_read = len(read.seq) - pos_in_read
rcseq = rc(read.seq)
left = rcseq[:pos_in_read]
right = rcseq[pos_in_read:]
newseq = left + rc(ins) + right
newseq = rc(newseq[:orig_len])
logger.debug("DEBUG: INS: orig seq: %s" % read.seq)
logger.debug("DEBUG: INS: newseq: %s" % newseq)
return newseq
def makedel(read, chrom, start, end, ref, debug=False):
if len(read.seq) < end-start-2:
logger.warning("INDELs (del) must be less than one read length, skipped read: %s" % read.query_name)
return read.seq
logger.debug("DEBUG: DEL: read.pos: %d" % read.pos)
logger.debug("DEBUG: DEL: start: %d" % start)
logger.debug("DEBUG: DEL: ins: %d" % end)
logger.debug("DEBUG: DEL: cigar: %s" % read.cigarstring)
logger.debug("DEBUG: DEL: orig seq: %s" % read.seq)
orig_len = len(read.seq)
start_in_read = None
end_in_read = None
for (qpos, rpos) in read.get_aligned_pairs():
if rpos == start:
start_in_read = qpos
if rpos == end:
end_in_read = qpos
if start_in_read is None and read.get_reference_positions()[0] > start:
start_in_read = start-read.get_reference_positions()[0]
if end_in_read is None and read.get_reference_positions()[-1] < end:
end_in_read = orig_len + (end-read.get_reference_positions()[-1])
if start_in_read is None:
logger.warning("ref position %d not covered in read %s" % (start, read.query_name))
return read.seq
if end_in_read is None:
logger.warning("ref position %d not covered in read %s" % (end, read.query_name))
return read.seq
logger.debug("DEBUG: DEL: start_in_read: %d" % start_in_read)
logger.debug("DEBUG: DEL: end_in_read: %d" % end_in_read)
if start_in_read < 0: # deletion begins to the left of the read
logger.debug("DEBUG: DEL: del begins to left of read.")
assert end_in_read < orig_len
right = read.seq[end_in_read:]
left = ref.fetch(chrom, start-(len(read.seq) - len(right)), start)
elif end_in_read > orig_len: # deletion ends to the right of the read
logger.debug("DEBUG: DEL: del ends to right of read.")
assert start_in_read > 0
left = read.seq[:start_in_read]
right = ref.fetch(chrom, end, end+(len(read.seq) - len(left)))
else:
logger.debug("DEBUG: DEL: del starts and ends within read.")
assert end_in_read <= orig_len and start_in_read >= 0 # deletion contained within the read
left = read.seq[:start_in_read]
right = read.seq[end_in_read:]
right += ref.fetch(chrom, read.pos+len(read.seq), read.pos+len(read.seq)+(len(read.seq)-len(left)-len(right)))
if debug:
logger.debug("DEBUG: DEL: newseq: %s" % (left + right))
return left + right
def find_mate(read, bam):
''' AlignmentFile.mate() can return a non-primary alignment, so use this function instead '''
chrom = read.next_reference_name
for rec in bam.fetch(chrom, read.next_reference_start, read.next_reference_start+1):
if rec.query_name == read.query_name and rec.reference_start == read.next_reference_start:
if not rec.is_secondary and bin(rec.flag & 2048) != bin(2048):
if rec.is_read1 != read.is_read1:
return rec
return None
def mutate(args, log, bamfile, bammate, chrom, mutstart, mutend, mutpos_list, avoid=None, mutid_list=None, is_snv=False, mutbase_list=None, is_insertion=False, is_deletion=False, ins_seq=None, reffile=None, indel_start=None, indel_end=None):
assert mutend > mutstart, "mutation start must occur before mutation end"
hasSNP = False
outreads = od()
mutreads = od()
mutmates = od()
region = 'haplo_' + chrom + '_' + str(mutstart) + '_' + str(mutend)
maxfrac = None
for pcol in bamfile.pileup(reference=chrom, start=mutstart-1, end=mutend+1, max_depth=int(args.maxdepth), ignore_overlaps=False):
if pcol.pos:
if args.ignorepileup and (pcol.pos < mutstart-1 or pcol.pos > mutend+1):
continue
refbase = reffile.fetch(chrom, pcol.pos-1, pcol.pos)
basepile = ''
for pread in pcol.pileups:
if avoid is not None and pread.alignment.qname in avoid:
logger.warning(region + " dropped mutation due to read in --avoidlist " + pread.alignment.qname)
return True, False, maxfrac, {}, {}, {}
# only consider primary alignments
if pread.query_position is not None and not pread.alignment.is_secondary and bin(pread.alignment.flag & 2048) != bin(2048):
basepile += pread.alignment.seq[pread.query_position-1]
pairname = 'F' # read is first in pair
if pread.alignment.is_read2:
pairname = 'S' # read is second in pair
if not pread.alignment.is_paired:
pairname = 'U' # read is unpaired
extqname = ','.join((pread.alignment.qname,str(pread.alignment.pos),pairname))
if pcol.pos+1 in mutpos_list:
if not pread.alignment.is_secondary and bin(pread.alignment.flag & 2048) != bin(2048) and not pread.alignment.mate_is_unmapped:
outreads[extqname] = pread.alignment
mutid = mutid_list[mutpos_list.index(pcol.pos+1)]
if is_snv:
if extqname not in mutreads:
mutreads[extqname] = pread.alignment.seq
mutbase = mutbase_list[mutpos_list.index(pcol.pos+1)]
mutbases = list(mutreads[extqname])
mutbases[pread.query_position] = mutbase
mutread = ''.join(mutbases)
mutreads[extqname] = mutread
if is_insertion:
mutreads[extqname] = makeins(pread.alignment, indel_start, ins_seq)
if is_deletion:
mutreads[extqname] = makedel(pread.alignment, chrom, indel_start, indel_end, reffile)
mate = None
if not args.single:
try:
mate = find_mate(pread.alignment, bammate)
except ValueError:
raise ValueError('cannot find mate reference chrom for read %s, is this a single-ended BAM?' % pread.alignment.qname)
if mate is None:
logger.warning(mutid + " warning: no mate for " + pread.alignment.qname)
if args.requirepaired:
logger.warning(mutid + " skipped mutation due to --requirepaired")
return True, False, {}, {}, {}
if extqname not in mutmates:
mutmates[extqname] = mate
log.write(" ".join(('read',extqname,mutreads[extqname],"\n")))
if len(mutreads) > int(args.maxdepth):
logger.warning("depth at site is greater than cutoff, aborting mutation")
return True, False, maxfrac, {}, {}, {}
# make sure region doesn't have any changes that are likely SNPs
# (trying to avoid messing with haplotypes)
maxfrac = 0.0
hasSNP = False
basepile = countBaseAtPos(args.bamFileName,chrom,pcol.pos,mutid=region)
if basepile:
majb = majorbase(basepile)
minb = minorbase(basepile)
frac = float(minb[1])/(float(majb[1])+float(minb[1]))
if minb[0] == majb[0]:
frac = 0.0
if frac > maxfrac:
maxfrac = frac
if frac > float(args.snvfrac):
logger.warning(region + " dropped for proximity to SNP, nearby SNP MAF: " + str(frac) + " (max snv frac: " + args.snvfrac + ")")
hasSNP = True
else:
logger.warning(region + " could not pileup for region: " + chrom + ":" + str(pcol.pos))
if not args.ignorepileup:
hasSNP = True
if maxfrac is None:
logger.warning("could not pile up over region: %s" % region)
return True, False, maxfrac, {}, {}, {}
return False, hasSNP, maxfrac, outreads, mutreads, mutmates # todo: convert to class
| mit | 8cd69ad873e39db4471e19eb4e0bf2c1 | 38.800712 | 241 | 0.54873 | 3.660884 | false | false | false | false |
cablehead/python-consul | consul/aio.py | 1 | 2424 | from __future__ import absolute_import
import sys
import asyncio
import warnings
import aiohttp
from consul import base
__all__ = ['Consul']
PY_341 = sys.version_info >= (3, 4, 1)
class HTTPClient(base.HTTPClient):
"""Asyncio adapter for python consul using aiohttp library"""
def __init__(self, *args, loop=None, **kwargs):
super(HTTPClient, self).__init__(*args, **kwargs)
self._loop = loop or asyncio.get_event_loop()
connector = aiohttp.TCPConnector(loop=self._loop,
verify_ssl=self.verify)
self._session = aiohttp.ClientSession(connector=connector)
@asyncio.coroutine
def _request(self, callback, method, uri, data=None):
resp = yield from self._session.request(method, uri, data=data)
body = yield from resp.text(encoding='utf-8')
if resp.status == 599:
raise base.Timeout
r = base.Response(resp.status, resp.headers, body)
return callback(r)
# python prior 3.4.1 does not play nice with __del__ method
if PY_341: # pragma: no branch
def __del__(self):
if not self._session.closed:
warnings.warn("Unclosed connector in aio.Consul.HTTPClient",
ResourceWarning)
self.close()
def get(self, callback, path, params=None):
uri = self.uri(path, params)
return self._request(callback, 'GET', uri)
def put(self, callback, path, params=None, data=''):
uri = self.uri(path, params)
return self._request(callback, 'PUT', uri, data=data)
def delete(self, callback, path, params=None):
uri = self.uri(path, params)
return self._request(callback, 'DELETE', uri)
def post(self, callback, path, params=None, data=''):
uri = self.uri(path, params)
return self._request(callback, 'POST', uri, data=data)
def close(self):
self._session.close()
class Consul(base.Consul):
def __init__(self, *args, loop=None, **kwargs):
self._loop = loop or asyncio.get_event_loop()
super().__init__(*args, **kwargs)
def connect(self, host, port, scheme, verify=True, cert=None):
return HTTPClient(host, port, scheme, loop=self._loop,
verify=verify, cert=None)
def close(self):
"""Close all opened http connections"""
self.http.close()
| mit | e4228141f3a29da8d57319959542a954 | 32.205479 | 76 | 0.600248 | 3.8784 | false | false | false | false |
adamewing/bamsurgeon | bin/addsv.py | 1 | 46537 | #!/usr/bin/env python
#from __future__ import print_function
import re
import os
import sys
import random
import subprocess
import argparse
import pysam
import bamsurgeon.replace_reads as rr
import bamsurgeon.asmregion as ar
import bamsurgeon.mutableseq as ms
import bamsurgeon.aligners as aligners
import bamsurgeon.makevcf as makevcf
from bamsurgeon.common import *
from uuid import uuid4
from shutil import move
from collections import defaultdict as dd
from concurrent.futures import ProcessPoolExecutor
import logging
FORMAT = '%(levelname)s %(asctime)s %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def get_reads(bam_file, chrom, start, end, svfrac):
bam = pysam.AlignmentFile(bam_file)
for read in bam.fetch(chrom, start, end):
read_end = read.reference_start + read.query_length
pair_end = read.next_reference_start + read.query_length
if read.is_duplicate or read.is_secondary or read.is_supplementary or \
read.is_unmapped or read.mate_is_unmapped or \
read.next_reference_name != chrom or \
pair_end > end or read.next_reference_start < start or \
read_end > end or read.reference_start < start:
continue
read_random_factor = read_hash_fraction(read.query_name)
if read_random_factor <= svfrac:
yield read
bam.close()
def runwgsim(contig, newseq, pemean, pesd, tmpdir, nsimreads, mutid='null', err_rate=0.0, seed=None, trn_contig=None, rename=True):
''' wrapper function for wgsim, could swap out to support other reads simulators (future work?) '''
basefn = tmpdir + '/' + mutid + ".wgsimtmp." + str(uuid4())
fasta = basefn + ".fasta"
fq1 = basefn + ".1.fq"
fq2 = basefn + ".2.fq"
fout = open(fasta,'w')
fout.write(">" + mutid + "\n" + newseq + "\n")
fout.close()
ctg_len = len(contig)
if trn_contig: ctg_len += len(trn_contig)
# # adjustment factor for length of new contig vs. old contig
logger.info("%s old ctg len: %d" % (mutid, ctg_len))
logger.info("%s new ctg len: %d" % (mutid, len(newseq)))
logger.info("%s num. sim. reads: %d" % (mutid, nsimreads))
logger.info("%s PE mean outer distance: %f" % (mutid, pemean))
logger.info("%s PE outer distance SD: %f" % (mutid, pesd))
logger.info("%s error rate: %f" % (mutid, err_rate))
rquals = contig.rquals
mquals = contig.mquals
if trn_contig:
rquals += trn_contig.rquals
mquals += trn_contig.mquals
# length of quality score comes from original read, used here to set length of read
maxqlen = 0
for qual in (rquals + mquals):
if len(qual) > maxqlen:
maxqlen = len(qual)
wgsim_args = ['wgsim','-e', str(err_rate),'-d',str(pemean),'-s',str(pesd),'-N',str(nsimreads),'-1',str(maxqlen),'-2', str(maxqlen),'-r','0','-R','0',fasta,fq1,fq2]
seed = 1 if seed == 0 else seed # Fix for wgsim thinking 0 is no seed
if seed is not None: wgsim_args += ['-S', str(seed)]
logger.info(str(wgsim_args))
subprocess.check_call(wgsim_args)
os.remove(fasta)
return (fq1,fq2)
def singleseqfa(file,mutid='null'):
with open(file, 'r') as fasta:
header = None
seq = ''
for line in fasta:
line = line.strip()
if line.startswith('>'):
if header is not None:
logger.warning("%s multiple entries found in %s only using the first" % (mutid, file))
header = line.lstrip('>')
else:
seq += line
return seq
def load_inslib(infa):
seqdict = {}
with open(infa, 'r') as fa:
seqid = ''
seq = ''
for line in fa:
if line.startswith('>'):
if seq != '':
seqdict[seqid] = seq
seqid = line.lstrip('>').strip()
seq = ''
else:
assert seqid != ''
seq = seq + line.strip()
if seqid not in seqdict and seq != '':
seqdict[seqid] = seq
return seqdict
def align(qryseq, refseq):
rnd = str(uuid4())
tgtfa = 'tmp.' + rnd + '.tgt.fa'
qryfa = 'tmp.' + rnd + '.qry.fa'
tgt = open(tgtfa, 'w')
qry = open(qryfa, 'w')
tgt.write('>ref' + '\n' + refseq + '\n')
qry.write('>qry' + '\n' + qryseq + '\n')
tgt.close()
qry.close()
cmd = ['exonerate', '--bestn', '1', '-m', 'ungapped', '--showalignment','0', '--ryo', 'SUMMARY\t%s\t%qab\t%qae\t%tab\t%tae\n', qryfa, tgtfa]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
best = []
topscore = 0
for pline in p.stdout.readlines():
pline = pline.decode()
if pline.startswith('SUMMARY'):
c = pline.strip().split()
if int(c[1]) > topscore:
topscore = int(c[1])
best = c
os.remove(tgtfa)
os.remove(qryfa)
return best
def discordant_fraction(bamfile, chrom, start, end):
r = 0
d = 0
bam = pysam.AlignmentFile(bamfile)
for read in bam.fetch(chrom, start, end):
r += 1
if not read.is_proper_pair:
d += 1
if r > 0:
return float(d)/float(r)
else:
return 0.0
def trim_contig(mutid, chrom, start, end, contig, reffile):
# trim contig to get best ungapped aligned region to ref.
refseq = reffile.fetch(chrom,start,end)
alignstats = align(contig.seq, refseq)
if len(alignstats) < 6:
logger.warning("%s alignstats: %s" % (mutid, str(alignstats)))
logger.warning("%s No good alignment between mutated contig and original, aborting mutation!" % mutid)
return [None] * 9
qrystart, qryend = map(int, alignstats[2:4])
tgtstart, tgtend = map(int, alignstats[4:6])
refseq = refseq[tgtstart:tgtend]
logger.info("%s alignment result: %s" % (mutid, str(alignstats)))
contig.trimseq(qrystart, qryend)
logger.info("%s trimmed contig length: %d" % (mutid, contig.len))
if tgtstart > tgtend: # detect reverse complemented contig
contig.rc = True
refstart = start + tgtstart
refend = start + tgtend
if refstart > refend:
refstart, refend = refend, refstart
return contig, refseq, alignstats, refstart, refend, qrystart, qryend, tgtstart, tgtend
def locate_contig_pos(refstart, refend, user_start, user_end, contig_len, maxlibsize):
contig_start = None
contig_end = None
if user_start - refstart > maxlibsize:
contig_start = (user_start - refstart)
if refend - user_end > maxlibsize:
contig_end = contig_len - (refend - user_end)
return contig_start, contig_end
def add_donor_reads(args, mutid, tmpbamfn, bdup_chrom, bdup_left_bnd, bdup_right_bnd, bdup_svfrac):
tmpbam = pysam.AlignmentFile(tmpbamfn)
outbamfn = '%s/%s.%s.bigdup.merged.bam' % (args.tmpdir, mutid, str(uuid4()))
outbam = pysam.AlignmentFile(outbamfn, 'wb', template=tmpbam)
for read in tmpbam.fetch(until_eof=True):
outbam.write(read)
# Calculate donor norm factor
with pysam.AlignmentFile(args.donorbam) as donorbam:
cover_donor = donorbam.count(contig=bdup_chrom, start=bdup_left_bnd, end=bdup_right_bnd) / float(bdup_right_bnd-bdup_left_bnd)
with pysam.AlignmentFile(args.bamFileName) as origbam:
cover_orig = origbam.count(contig=bdup_chrom, start=bdup_left_bnd, end=bdup_right_bnd) / float(bdup_right_bnd-bdup_left_bnd)
donor_norm_factor = cover_orig * bdup_svfrac / cover_donor
if donor_norm_factor > 1.0:
logger.warning('%s: donor_norm_factor %f > 1.0. This means donor bam has less coverage than required.' % (mutid, donor_norm_factor))
logger.info('%s: BIGDUP donor coverage normalisation factor: %f' % (mutid, donor_norm_factor))
logger.info('%s: fetch donor reads from %s-%d-%d' % (mutid, bdup_chrom, bdup_left_bnd, bdup_right_bnd))
nreads = 0
for read in get_reads(args.donorbam, bdup_chrom, bdup_left_bnd, bdup_right_bnd, donor_norm_factor):
read.query_name = read.query_name + '_donor_' + mutid
outbam.write(read)
nreads += 1
outbam.close()
logger.info('%s: using %d donor reads from %s' % (mutid, nreads, args.donorbam))
return outbamfn
def merge_multi_trn(args, alignopts, pair, chrom, start, end, vaf):
assert len(pair) == 2
mutid = os.path.basename(pair[0]).split('.')[0]
outbamfn = '%s/%s.%s.merged.bam' % (args.tmpdir, mutid, str(uuid4()))
bams = [pysam.AlignmentFile(bam) for bam in pair]
outbam = pysam.AlignmentFile(outbamfn, 'wb', template=bams[0])
readbins = {} # randomly assorted reads into bam sources 0 and 1
for bam in bams:
for read in bam.fetch(until_eof=True):
readbins[read.query_name] = random.choice([0,1])
bam.close()
bams = [pysam.AlignmentFile(bam) for bam in pair]
for i, bam in enumerate(bams):
for read in bam.fetch(until_eof=True):
if readbins[read.query_name] == i:
outbam.write(read)
outbam.close()
# cleanup
for fn in pair:
os.remove(fn)
return outbamfn
def makemut(args, bedline, alignopts):
bedline = bedline.strip()
if args.seed is not None: random.seed(args.seed + int(bedline.split()[1]))
mutid = '_'.join(map(str, bedline.split()[:4]))
bamfile = pysam.AlignmentFile(args.bamFileName)
reffile = pysam.Fastafile(args.refFasta)
logfn = '_'.join(map(os.path.basename, bedline.split()[:4])) + ".log"
logfile = open('addsv_logs_' + os.path.basename(args.outBamFile) + '/' + os.path.basename(args.outBamFile) + '_' + logfn, 'w')
mutinfo = {}
# optional CNV file
cnv = None
if (args.cnvfile):
cnv = pysam.Tabixfile(args.cnvfile, 'r')
# temporary file to hold mutated reads
outbam_mutsfile = args.tmpdir + '/' + '.'.join((mutid, str(uuid4()), "muts.bam"))
c = bedline.split()
chrom = c[0]
start = int(c[1])
end = int(c[2])
araw = c[3:] # INV, DEL, INS, DUP, TRN
# desired start/end
user_start = start
user_end = end
# Check if has sufficient depth
user_start_depth = bamfile.count(chrom, user_start-1, user_start)
user_end_depth = bamfile.count(chrom, user_end-1, user_end)
if user_start_depth < args.mindepth or user_end_depth < args.mindepth:
logger.warning('%s skipping due to insufficient depth %d %d' % (mutid, user_start_depth, user_end_depth))
return None, None, None
elif user_start_depth > args.maxdepth or user_end_depth > args.maxdepth:
logger.warning('%s skipping due to excessive depth %d %d' % (mutid, user_start_depth, user_end_depth))
return None, None, None
# translocation specific
trn_chrom = None
trn_start = None
trn_end = None
is_transloc = c[3] in ('TRN', 'BIGDEL', 'BIGINV', 'BIGDUP')
if is_transloc:
araw = [c[3]]
if len(c) > 7:
araw += c[7:]
start -= int(args.minctglen)
end += int(args.minctglen)
if start < 0: start = 0
trn_chrom = c[4]
user_trn_start = int(c[5])
user_trn_end = int(c[6])
# Check for sufficient depth
user_trn_start_depth = bamfile.count(trn_chrom, user_trn_start-1, user_trn_start)
user_trn_end_depth = bamfile.count(trn_chrom, user_trn_end-1, user_trn_end)
if user_trn_start_depth < args.mindepth or user_trn_end_depth < args.mindepth:
logger.warning('%s skipping due to insufficient depth %d %d' % (mutid, user_trn_start_depth, user_trn_end_depth))
return None, None, None
elif user_trn_start_depth > args.maxdepth or user_trn_end_depth > args.maxdepth:
logger.warning('%s skipping due to excessive depth %d %d' % (mutid, user_trn_start_depth, user_trn_end_depth))
return None, None, None
trn_start = int(c[5]) - int(args.minctglen)
trn_end = int(c[6]) + int(args.minctglen)
if trn_start < 0: trn_start = 0
actions = map(lambda x: x.strip(),' '.join(araw).split(';'))
svfrac = float(args.svfrac) # default, can be overridden by cnv file or per-variant
cn = 1.0
trn_left_flip = False
trn_right_flip = False
if cnv: # CNV file is present
if chrom in cnv.contigs:
for cnregion in cnv.fetch(chrom,start,end):
cn = float(cnregion.strip().split()[3]) # expect chrom,start,end,CN
logger.info("INFO" + mutid + "\t" + ' '.join(("copy number in sv region:",chrom,str(start),str(end),"=",str(cn))) + "\n")
svfrac = svfrac/float(cn)
assert svfrac <= 1.0, 'copy number from %s must be at least 1: %s' % (args.cnvfile, cnregion.strip())
logger.info("INFO" + mutid + "\tadjusted default MAF: " + str(svfrac) + "\n")
logger.info("%s interval: %s" % (mutid, bedline))
logger.info("%s length: %d" % (mutid, (end-start)))
# modify start and end if interval is too short
minctglen = int(args.minctglen)
# adjust if minctglen is too short
if minctglen < 3*int(args.maxlibsize):
minctglen = 3*int(args.maxlibsize)
#if end-start < minctglen:
adj = minctglen - (end-start)
start = int(start - adj/2)
end = int(end + adj/2)
#logger.info("%s note: interval size was too short, adjusted: %s:%d-%d" % (mutid, chrom, start, end))
dfrac = discordant_fraction(args.bamFileName, chrom, start, end)
logger.info("%s discordant fraction: %f" % (mutid, dfrac))
if dfrac > args.maxdfrac:
logger.warning("%s discordant fraction %f > %f aborting mutation!\n" % (mutid, dfrac, args.maxdfrac))
return None, None, None
contigs = ar.asm(chrom, start, end, args.bamFileName, reffile, int(args.kmersize), args.tmpdir, mutid=mutid, debug=args.debug)
if len(contigs) == 0:
logger.warning("%s generated no contigs, skipping site." % mutid)
return None, None, None
trn_contigs = None
if is_transloc:
logger.info("%s assemble translocation end: %s:%d-%d" % (mutid, trn_chrom, trn_start, trn_end))
trn_contigs = ar.asm(trn_chrom, trn_start, trn_end, args.bamFileName, reffile, int(args.kmersize), args.tmpdir, mutid=mutid, debug=args.debug)
maxcontig = sorted(contigs)[-1]
trn_maxcontig = None
rename_reads = True
if is_transloc:
if len(trn_contigs) == 0:
logger.warning("%s translocation partner generated no contigs, skipping site." % mutid)
return None, None, None
trn_maxcontig = sorted(trn_contigs)[-1]
if re.search('N', maxcontig.seq):
if args.allowN:
logger.warning("%s contig has ambiguous base (N), replaced with 'A'" % mutid)
maxcontig.seq = re.sub('N', 'A', maxcontig.seq)
else:
logger.warning("%s contig dropped due to ambiguous base (N), aborting mutation." % mutid)
return None, None, None
if is_transloc and re.search('N', trn_maxcontig.seq):
if args.allowN:
logger.warning("%s contig has ambiguous base (N), replaced with 'A'" % mutid)
trn_maxcontig.seq = re.sub('N', 'A', trn_maxcontig.seq)
else:
logger.warning("%s contig dropped due to ambiguous base (N), aborting mutation." % mutid)
return None, None, None
if maxcontig is None:
logger.warning("%s maxcontig has length 0, aborting mutation!" % mutid)
return None, None, None
if is_transloc and trn_maxcontig is None:
logger.warning("%s transloc maxcontig has length 0, aborting mutation!" % mutid)
return None, None, None
logger.info("%s best contig length: %d" % (mutid, sorted(contigs)[-1].len))
if is_transloc:
logger.info("%s best transloc contig length: %d" % (mutid, sorted(trn_contigs)[-1].len))
# trim contig to get best ungapped aligned region to ref.
maxcontig, refseq, alignstats, refstart, refend, qrystart, qryend, tgtstart, tgtend = trim_contig(mutid, chrom, start, end, maxcontig, reffile)
if maxcontig is None:
logger.warning("%s best contig did not have sufficent match to reference, aborting mutation." % mutid)
return None, None, None
logger.info("%s start: %d, end: %d, tgtstart: %d, tgtend: %d, refstart: %d, refend: %d" % (mutid, start, end, tgtstart, tgtend, refstart, refend))
if is_transloc:
trn_maxcontig, trn_refseq, trn_alignstats, trn_refstart, trn_refend, trn_qrystart, trn_qryend, trn_tgtstart, trn_tgtend = trim_contig(mutid, trn_chrom, trn_start, trn_end, trn_maxcontig, reffile)
if trn_maxcontig is None:
logger.warning("%s best contig for translocation partner did not have sufficent match to reference, aborting mutation." % mutid)
return None, None, None
logger.info("%s trn_start: %d, trn_end: %d, trn_tgtstart: %d, trn_tgtend:%d , trn_refstart: %d, trn_refend: %d" % (mutid, trn_start, trn_end, trn_tgtstart, trn_tgtend, trn_refstart, trn_refend))
# is there anough room to make mutations?
if maxcontig.len < 3*int(args.maxlibsize):
logger.warning("%s best contig too short to make mutation!" % mutid)
return None, None, None
if is_transloc and trn_maxcontig.len < 3*int(args.maxlibsize):
logger.warning("%s best transloc contig too short to make mutation!" % mutid)
return None, None, None
# make mutation in the largest contig
mutseq = ms.MutableSeq(maxcontig.seq)
if maxcontig.rc:
mutseq = ms.MutableSeq(rc(maxcontig.seq))
trn_mutseq = None
if is_transloc:
if trn_maxcontig.rc:
trn_mutseq = ms.MutableSeq(rc(trn_maxcontig.seq))
else:
trn_mutseq = ms.MutableSeq(trn_maxcontig.seq)
# support for multiple mutations
for actionstr in actions:
a = actionstr.split()
action = a[0]
logger.info("%s action: %s %s" % (mutid, actionstr, action))
insseqfile = None
insseq = ''
tsdlen = 0 # target site duplication length
ndups = 0 # number of tandem dups
dsize = 0.0 # deletion size fraction
dlen = 0
ins_motif = None
if action == 'INS':
assert len(a) > 1 # insertion syntax: INS <file.fa> [optional TSDlen]
insseqfile = a[1]
if not (os.path.exists(insseqfile) or insseqfile == 'RND' or insseqfile.startswith('INSLIB:')): # not a file... is it a sequence? (support indel ins.)
assert re.search('^[ATGCatgc]*$',insseqfile), "cannot determine SV type: %s" % insseqfile # make sure it's a sequence
insseq = insseqfile.upper()
insseqfile = None
if len(a) > 2: # field 5 for insertion is TSD Length
tsdlen = int(a[2])
if len(a) > 3: # field 6 for insertion is motif, format = 'NNNN^NNNN where ^ is cut site
ins_motif = a[3]
assert '^' in ins_motif, 'insertion motif specification requires cut site defined by ^'
if len(a) > 4: # field 7 is VAF
svfrac = float(a[4])/cn
if action == 'DUP':
if len(a) > 1:
ndups = int(a[1])
else:
ndups = 1
if len(a) > 2: # VAF
svfrac = float(a[2])/cn
if action == 'DEL':
dsize = 1.0
if len(a) > 1: # VAF
svfrac = float(a[1])/cn
if action in ('TRN', 'BIGDEL', 'BIGINV', 'BIGDUP'):
if len(a) > 1: # translocation end orientation ++ / +- / -+ / --
trn_left_flip = a[1][0] == '-'
trn_right_flip = a[1][1] == '-'
if len(a) > 2:
svfrac = float(a[2])/cn
if action == 'INV':
if len(a) > 1:
svfrac = float(a[1])/cn
logger.info("%s final VAF accounting for copy number %f: %f" % (mutid, cn, svfrac))
logfile.write(">" + chrom + ":" + str(refstart) + "-" + str(refend) + " BEFORE\n" + str(mutseq) + "\n")
contig_start = None
contig_end = None
trn_contig_start = None
trn_contig_end = None
exact_success = True
contig_start, contig_end = locate_contig_pos(refstart, refend, user_start, user_end, mutseq.length(), int(args.maxlibsize))
if contig_start is None:
logger.warning('%s contig does not cover user start' % mutid)
exact_success = False
#print refstart, refend, user_start, user_end, int(args.maxlibsize)
if contig_end is None:
logger.warning('%s contig does not cover user end' % mutid)
exact_success = False
#print refstart, refend, user_start, user_end, int(args.maxlibsize)
if is_transloc:
trn_contig_start, trn_contig_end = locate_contig_pos(trn_refstart, trn_refend, user_trn_start, user_trn_end, trn_mutseq.length(), int(args.maxlibsize))
if trn_contig_start is None:
logger.warning('%s contig does not cover user translocation start' % mutid)
exact_success = False
if trn_contig_end is None:
logger.warning('%s contig does not cover user translocation end' % mutid)
exact_success = False
if args.require_exact and not exact_success:
logger.warning('%s dropped mutation due to --require_exact')
return None, None, None
if action == 'INS':
inspoint = int(mutseq.length()/2)
if None not in (contig_start, contig_end):
inspoint = int((contig_start+contig_end)/2)
if ins_motif is not None:
inspoint = mutseq.find_site(ins_motif, left_trim=int(args.maxlibsize), right_trim=int(args.maxlibsize))
if inspoint < int(args.maxlibsize) or inspoint > mutseq.length() - int(args.maxlibsize):
logger.info("%s picked midpoint, no cutsite found" % mutid)
inspoint = int(mutseq.length()/2)
if insseqfile: # seq in file
if insseqfile == 'RND':
assert args.inslib is not None # insertion library needs to exist
insseqfile = random.choice(list(args.inslib.keys()))
logger.info("%s chose sequence from insertion library: %s" % (mutid, insseqfile))
mutseq.insertion(inspoint, args.inslib[insseqfile], tsdlen)
elif insseqfile.startswith('INSLIB:'):
assert args.inslib is not None # insertion library needs to exist
insseqfile = insseqfile.split(':')[1]
logger.info("%s specify sequence from insertion library: %s" % (mutid, insseqfile))
assert insseqfile in args.inslib, '%s not found in insertion library' % insseqfile
mutseq.insertion(inspoint, args.inslib[insseqfile], tsdlen)
else:
mutseq.insertion(inspoint, singleseqfa(insseqfile, mutid=mutid), tsdlen)
else: # seq is input
mutseq.insertion(inspoint, insseq, tsdlen)
ins_len = len(mutseq.seq) - len(maxcontig.seq)
mutinfo[mutid] = "\t".join(('ins',chrom,str(refstart),str(refend),action,str(mutseq.length()),str(inspoint),str(insseqfile),str(tsdlen),str(ins_len),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
elif action == 'INV':
invstart = int(args.maxlibsize)
invend = mutseq.length() - invstart
if None not in (contig_start, contig_end):
invstart = contig_start
invend = contig_end
mutseq.inversion(invstart,invend)
mutinfo[mutid] = "\t".join(('inv',chrom,str(refstart),str(refend),action,str(mutseq.length()),str(invstart),str(invend),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
elif action == 'DEL':
delstart = int(args.maxlibsize)
delend = mutseq.length() - delstart
if None not in (contig_start, contig_end):
delstart = contig_start
delend = contig_end
mutseq.deletion(delstart,delend)
mutinfo[mutid] = "\t".join(('del',chrom,str(refstart),str(refend),action,str(mutseq.length()),str(delstart),str(delend),str(dlen),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
elif action == 'DUP':
dupstart = int(args.maxlibsize)
dupend = mutseq.length() - dupstart
if None not in (contig_start, contig_end):
dupstart = contig_start
dupend = contig_end
mutseq.duplication(dupstart,dupend,ndups)
mutinfo[mutid] = "\t".join(('dup',chrom,str(refstart),str(refend),action,str(mutseq.length()),str(dupstart),str(dupend),str(ndups),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
elif action == 'TRN':
trnpoint_1 = int(mutseq.length()/2)
trnpoint_2 = int(trn_mutseq.length()/2)
if None not in (contig_start, contig_end):
trnpoint_1 = int((contig_start + contig_end)/2)
if None not in (trn_contig_start, trn_contig_end):
trnpoint_2 = int((trn_contig_start + trn_contig_end)/2)
mutseq.fusion(trnpoint_1, trn_mutseq, trnpoint_2, flip1=trn_left_flip, flip2=trn_right_flip)
mutinfo[mutid] = "\t".join(('trn',chrom,str(refstart),str(refend),action,str(trnpoint_1),trn_chrom,str(trn_refstart),str(trn_refend),str(trnpoint_2),str(trn_left_flip),str(trn_right_flip),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
elif action == 'BIGDEL':
trnpoint_1 = int(mutseq.length()/2)
trnpoint_2 = int(trn_mutseq.length()/2)
if None not in (contig_start, contig_end):
trnpoint_1 = int((contig_start + contig_end)/2)
if None not in (trn_contig_start, trn_contig_end):
trnpoint_2 = int((trn_contig_start + trn_contig_end)/2)
mutseq.fusion(trnpoint_1, trn_mutseq, trnpoint_2)
mutinfo[mutid] = "\t".join(('bigdel',chrom,str(refstart),str(refend),action,str(trnpoint_1),trn_chrom,str(trn_refstart),str(trn_refend),str(trnpoint_2),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
elif action == 'BIGINV':
trnpoint_1 = int(mutseq.length()/2)
trnpoint_2 = int(trn_mutseq.length()/2)
if None not in (contig_start, contig_end):
trnpoint_1 = int((contig_start + contig_end)/2)
if None not in (trn_contig_start, trn_contig_end):
trnpoint_2 = int((trn_contig_start + trn_contig_end)/2)
mutseq.fusion(trnpoint_1, trn_mutseq, trnpoint_2, flip1=trn_left_flip, flip2=trn_right_flip)
mutinfo[mutid] = "\t".join(('biginv',chrom,str(refstart),str(refend),action,str(trnpoint_1),trn_chrom,str(trn_refstart),str(trn_refend),str(trnpoint_2),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
elif action == 'BIGDUP':
trnpoint_1 = int(mutseq.length()/2)
trnpoint_2 = int(trn_mutseq.length()/2)
if None not in (contig_start, contig_end):
trnpoint_1 = int((contig_start + contig_end)/2)
if None not in (trn_contig_start, trn_contig_end):
trnpoint_2 = int((trn_contig_start + trn_contig_end)/2)
mutseq.fusion(trnpoint_1, trn_mutseq, trnpoint_2)
mutinfo[mutid] = "\t".join(('bigdup',chrom,str(refstart),str(refend),action,str(trnpoint_1),trn_chrom,str(trn_refstart),str(trn_refend),str(trnpoint_2),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
rename_reads = False
else:
raise ValueError("ERROR " + mutid + "\t: mutation not one of: INS,INV,DEL,DUP,TRN,BIGDEL,BIGINV,BIGDUP\n")
logfile.write(">" + chrom + ":" + str(refstart) + "-" + str(refend) +" AFTER\n" + str(mutseq) + "\n")
pemean, pesd = float(args.ismean), float(args.issd)
logger.info("%s set paired end mean distance: %f" % (mutid, pemean))
logger.info("%s set paired end distance stddev: %f" % (mutid, pesd))
exclfile = args.tmpdir + '/' + '.'.join((mutid, 'exclude', str(uuid4()), 'txt'))
exclude = open(exclfile, 'w')
if is_transloc:
buffer = int(float(args.ismean))
region_1_start, region_1_end = (refstart + trnpoint_1 - buffer, refend) if trn_left_flip else (refstart, refstart + trnpoint_1 + buffer)
region_2_start, region_2_end = (trn_refstart + trnpoint_2 - buffer, trn_refend) if not trn_right_flip else (trn_refstart, trn_refstart + trnpoint_2 + buffer)
region_1_reads = get_reads(args.bamFileName, chrom, region_1_start, region_1_end, float(svfrac))
region_2_reads = get_reads(args.bamFileName, trn_chrom, region_2_start, region_2_end, float(svfrac))
excl_reads_names = set([read.query_name for read in region_1_reads] + [read.query_name for read in region_2_reads])
nsimreads = len(excl_reads_names)
# add additional excluded reads if bigdel(s) present
if action == 'BIGDEL':
bigdel_region_reads = get_reads(args.bamFileName, chrom, region_1_start, region_2_end, float(svfrac))
excl_reads_names = set([read.query_name for read in bigdel_region_reads])
else:
region_reads = get_reads(args.bamFileName, chrom, refstart, refend, float(svfrac))
excl_reads_names = set([read.query_name for read in region_reads])
reads_ratio = len(mutseq.seq) / len(maxcontig.seq)
nsimreads = int(len(excl_reads_names) * reads_ratio)
for name in excl_reads_names:
exclude.write(name + "\n")
exclude.close()
# simulate reads
(fq1, fq2) = runwgsim(maxcontig, mutseq.seq, pemean, pesd, args.tmpdir, nsimreads, err_rate=float(args.simerr), mutid=mutid, seed=args.seed, trn_contig=trn_maxcontig, rename=rename_reads)
outreads = aligners.remap_fastq(args.aligner, fq1, fq2, args.refFasta, outbam_mutsfile, alignopts, mutid=mutid, threads=int(args.alignerthreads))
if outreads == 0:
logger.warning("%s outbam %s has no mapped reads!" % (mutid, outbam_mutsfile))
# Remove content from logfile in order to skip this mutation in the final VCF file
logfile.seek(0)
logfile.truncate()
return None, None, None
if action == 'BIGDUP':
bdup_left_bnd = min(region_1_start, region_2_start, region_1_end, region_2_end)
bdup_right_bnd = max(region_1_start, region_2_start, region_1_end, region_2_end)
prev_outbam_mutsfile = outbam_mutsfile
outbam_mutsfile = add_donor_reads(args, mutid, outbam_mutsfile, chrom, bdup_left_bnd, bdup_right_bnd, float(svfrac))
os.remove(prev_outbam_mutsfile)
os.remove(prev_outbam_mutsfile + '.bai')
logger.info("%s temporary bam: %s" % (mutid, outbam_mutsfile))
bamfile.close()
return outbam_mutsfile, exclfile, mutinfo
def main(args):
logger.info("starting %s called with args: %s" % (sys.argv[0], ' '.join(sys.argv)))
tmpbams = [] # temporary BAMs, each holds the realigned reads for one mutation
exclfns = [] # 'exclude' files store reads to be removed from the original BAM due to deletions
if (args.bamFileName.endswith('.bam') and not os.path.exists(args.bamFileName + '.bai')) or \
(args.bamFileName.endswith('.cram') and not os.path.exists(args.bamFileName + '.crai')):
logger.error("input file must be indexed, not .bai or .crai file found for %s" % args.bamFileName)
sys.exit(1)
alignopts = {}
if args.alignopts is not None:
alignopts = dict([o.split(':') for o in args.alignopts.split(',')])
aligners.checkoptions(args.aligner, alignopts, None, sv=True)
# load insertion library if present
try:
if args.inslib is not None:
logger.info("loading insertion library from %s" % args.inslib)
args.inslib = load_inslib(args.inslib)
except Exception:
logger.error("failed to load insertion library %s" % args.inslib)
traceback.print_exc(file=sys.stderr)
sys.exit(1)
results = []
pool = ProcessPoolExecutor(max_workers=int(args.procs))
nmuts = 0
if not os.path.exists(args.tmpdir):
os.mkdir(args.tmpdir)
logger.info("created tmp directory: %s" % args.tmpdir)
if not os.path.exists('addsv_logs_' + os.path.basename(args.outBamFile)):
os.mkdir('addsv_logs_' + os.path.basename(args.outBamFile))
logger.info("created log directory: addsv_logs_%s" % os.path.basename(args.outBamFile))
assert os.path.exists('addsv_logs_' + os.path.basename(args.outBamFile)), "could not create output directory!"
assert os.path.exists(args.tmpdir), "could not create temporary directory!"
biginvs = {}
with open(args.varFileName, 'r') as varfile:
for bedline in varfile:
bedline = bedline.strip()
multi_part = []
if re.search('^#',bedline):
continue
if args.maxmuts and nmuts >= int(args.maxmuts):
break
mut_type = bedline.split()[3]
mut_len = int(bedline.split()[2]) - int(bedline.split()[1])
if mut_type in ('DEL', 'DUP', 'INV') and mut_len > 10000:
logger.warning('%s is over 10kbp long: converting to use BIG%s instead.' % (bedline, mut_type))
mut_type = 'BIG' + mut_type
if mut_type == 'BIGDUP' and len(bedline.split()) == 6: # convert DUP to BIGDUP
b = bedline.split()
bedline = ' '.join((b[:4] + [b[-1]]))
if mut_type.startswith('BIG') and mut_len < 5000:
mut_type = mut_type.replace('BIG', '')
logger.warning('%s is under 5kbp, "BIG" mutation types will yield unpredictable results, converting to %s' % (bedline, mut_type))
# rewrite bigdel coords as translocation
if mut_type == 'BIGDEL':
bdel_svfrac = float(args.svfrac)
if len(bedline.split()) == 5:
bdel_svfrac = float(bedline.split()[-1])
bdel_chrom, bdel_start, bdel_end = bedline.split()[:3]
bdel_start = int(bdel_start)
bdel_end = int(bdel_end)
bdel_left_start = bdel_start
bdel_left_end = bdel_start
bdel_right_start = bdel_end
bdel_right_end = bdel_end
bedline = '%s %d %d BIGDEL %s %d %d %s %f' % (bdel_chrom, bdel_left_start, bdel_left_end, bdel_chrom, bdel_right_start, bdel_right_end, '++', bdel_svfrac)
# rewrite bigdup coords as translocation
if mut_type == 'BIGDUP':
bdup_svfrac = float(args.svfrac)
if len(bedline.split()) == 6:
bdup_svfrac = float(bedline.split()[-1])
if args.donorbam is None:
logger.warning('%s: using BIGDUP requires specifying a --donorbam and none was provided, using %s' % (bedline, args.bamFileName))
args.donorbam = args.bamFileName
bdup_chrom, bdup_start, bdup_end = bedline.split()[:3]
bdup_start = int(bdup_start)
bdup_end = int(bdup_end)
bdup_left_start = bdup_start
bdup_left_end = bdup_start
bdup_right_start = bdup_end
bdup_right_end = bdup_end
bedline = '%s %d %d BIGDUP %s %d %d %s %f' % (bdup_chrom, bdup_right_start, bdup_right_end, bdup_chrom, bdup_left_start, bdup_left_end, '++', bdup_svfrac)
# rewrite biginv coords as translocations
if mut_type == 'BIGINV':
binv_svfrac = float(args.svfrac)
if len(bedline.split()) == 5:
binv_svfrac = float(bedline.split()[-1])
binv_chrom, binv_start, binv_end = bedline.split()[:3]
binv_start = int(binv_start)
binv_end = int(binv_end)
binv_left_start = binv_start
binv_left_end = binv_start
binv_right_start = binv_end
binv_right_end = binv_end
# left breakpoint
multi_part.append('%s %d %d BIGINV %s %d %d %s %f' % (binv_chrom, binv_left_start, binv_left_end, binv_chrom, binv_right_start, binv_right_end, '+-', binv_svfrac))
# right breakpoint
multi_part.append('%s %d %d BIGINV %s %d %d %s %f' % (binv_chrom, binv_left_start, binv_left_end, binv_chrom, binv_right_start, binv_right_end, '-+', binv_svfrac))
binv_mutid = '_'.join(map(str, (binv_chrom, binv_left_start, binv_left_end, 'BIGINV')))
biginvs[binv_mutid] = (binv_chrom, binv_start, binv_end, binv_svfrac)
if len(multi_part) == 0:
# submit each mutation as its own thread
result = pool.submit(makemut, args, bedline, alignopts)
results.append(result)
else:
for bedline in multi_part:
result = pool.submit(makemut, args, bedline, alignopts)
results.append(result)
nmuts += 1
## process the results of mutation jobs
for result in results:
tmpbam = None
exclfn = None
tmpbam, exclfn, mutinfo = result.result()
if None not in (tmpbam, exclfn) and os.path.exists(tmpbam) and os.path.exists(exclfn):
if bamreadcount(tmpbam) > 0:
tmpbams.append(tmpbam)
exclfns.append(exclfn)
else:
os.remove(tmpbam)
os.remove(exclfn)
if len(tmpbams) == 0:
logger.error("no succesful mutations")
sys.exit(1)
biginv_pairs = dd(list)
new_tmpbams = []
for tmpbamfn in tmpbams:
mutid = os.path.basename(tmpbamfn).split('.')[0]
if mutid.endswith('BIGINV'):
biginv_pairs[mutid].append(tmpbamfn)
else:
new_tmpbams.append(tmpbamfn)
# find translocation pairs corresponding to BIGINV, merge pairs / remove singletons
for binv_pair in biginv_pairs.values():
if len(binv_pair) == 2:
logger.info('merging biginv pair and reversing unassembled interval: %s' % str(binv_pair))
binv_mutid = os.path.basename(binv_pair[0]).split('.')[0]
assert binv_mutid in biginvs
binv_chrom, binv_start, binv_end, binv_svfrac = biginvs[binv_mutid]
binv_left_end = int(binv_left_end)
binv_right_end = int(binv_right_end)
if binv_left_end > binv_right_end:
binv_left_end, binv_right_end = binv_right_end, binv_left_end
merged_binv = merge_multi_trn(args, alignopts, binv_pair, binv_chrom, binv_start, binv_end, binv_svfrac)
new_tmpbams.append(merged_binv)
tmpbams = new_tmpbams
logger.info("tmpbams: %s" % tmpbams)
logger.info("exclude: %s" % exclfns)
if len(tmpbams) == 0:
sys.exit('no tmp bams remain, nothing to do!')
excl_merged = 'addsv.exclude.final.' + str(uuid4()) + '.txt'
mergedtmp = 'addsv.mergetmp.final.' + str(uuid4()) + '.bam'
logger.info("merging exclude files into %s" % excl_merged)
exclout = open(excl_merged, 'w')
for exclfn in exclfns:
with open(exclfn, 'r') as excl:
for line in excl:
exclout.write(line)
if not args.debug:
os.remove(exclfn)
exclout.close()
if len(tmpbams) == 1:
logger.info("only one bam: %s renaming to %s" % (tmpbams[0], mergedtmp))
os.rename(tmpbams[0], mergedtmp)
elif len(tmpbams) > 1:
logger.info("merging bams into %s" % mergedtmp)
mergebams(tmpbams, mergedtmp, debug=args.debug)
if args.skipmerge:
logger.info("final merge skipped, please merge manually: %s" % mergedtmp)
logger.info("exclude file to use: %s" % excl_merged)
else:
if args.tagreads:
from bamsurgeon.markreads import markreads
tmp_tag_bam = 'tag.%s.bam' % str(uuid4())
markreads(mergedtmp, tmp_tag_bam)
move(tmp_tag_bam, mergedtmp)
logger.info("tagged reads.")
logger.info("writing to %s" % args.outBamFile)
rr.replace_reads(args.bamFileName, mergedtmp, args.outBamFile, excludefile=excl_merged, allreads=True, keepsecondary=args.keepsecondary, seed=args.seed, quiet=True)
if not args.debug:
os.remove(excl_merged)
os.remove(mergedtmp)
logger.info("done.")
if not args.debug:
for tmpbam in tmpbams:
if os.path.isfile(tmpbam):
os.remove(tmpbam)
if os.path.isfile(tmpbam + '.bai'):
os.remove(tmpbam + '.bai')
var_basename = '.'.join(os.path.basename(args.varFileName).split('.')[:-1])
bam_basename = '.'.join(os.path.basename(args.outBamFile).split('.')[:-1])
vcf_fn = bam_basename + '.addsv.' + var_basename + '.vcf'
makevcf.write_vcf_sv('addsv_logs_' + os.path.basename(args.outBamFile), args.refFasta, vcf_fn)
logger.info('vcf output written to ' + vcf_fn)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='adds SVs to reads, outputs modified reads as .bam along with mates')
parser.add_argument('-v', '--varfile', dest='varFileName', required=True,
help='whitespace-delimited target regions for SV spike-in, see manual for syntax')
parser.add_argument('-f', '--bamfile', dest='bamFileName', required=True,
help='sam/bam file from which to obtain reads')
parser.add_argument('-r', '--reference', dest='refFasta', required=True,
help='reference genome, fasta indexed with bwa index _and_ samtools faidx')
parser.add_argument('-o', '--outbam', dest='outBamFile', required=True,
help='.bam file name for output')
parser.add_argument('-l', '--maxlibsize', dest='maxlibsize', default=600,
help="maximum fragment length of seq. library")
parser.add_argument('-k', '--kmer', dest='kmersize', default=31,
help="kmer size for assembly (default = 31)")
parser.add_argument('-s', '--svfrac', dest='svfrac', default=1.0,
help="allele fraction of variant (default = 1.0)")
parser.add_argument('--require_exact', default=False, action='store_true',
help="drop mutation if breakpoints cannot be made exactly as input")
parser.add_argument('--mindepth', default=10, type=int,
help='minimum read depth in the breakend position to make mutation (default = 10)')
parser.add_argument('--maxdepth', default=2000, type=int,
help='maximum read depth in the breakend position to make mutation (default = 2000)')
parser.add_argument('--maxdfrac', default=0.1, type=float,
help='maximum discordant fraction (is_proper_pair / is_pair) of reads (default = 0.1)')
parser.add_argument('--minctglen', dest='minctglen', default=4000,
help="minimum length for contig generation, also used to pad assembly (default=4000)")
parser.add_argument('-n', dest='maxmuts', default=None,
help="maximum number of mutations to make")
parser.add_argument('-c', '--cnvfile', dest='cnvfile', default=None,
help="tabix-indexed list of genome-wide absolute copy number values (e.g. 2 alleles = no change)")
parser.add_argument('--donorbam', dest='donorbam', default=None,
help='bam file for donor reads if using BIGDUP mutations')
parser.add_argument('--ismean', dest='ismean', default=300,
help="mean insert size (default = estimate from region)")
parser.add_argument('--issd', dest='issd', default=70,
help="insert size standard deviation (default = estimate from region)")
parser.add_argument('--simerr', dest='simerr', default=0.0,
help='error rate for wgsim-generated reads')
parser.add_argument('-p', '--procs', dest='procs', default=1,
help="split into multiple processes (default=1)")
parser.add_argument('--inslib', default=None,
help='FASTA file containing library of possible insertions, use INS RND instead of INS filename to pick one')
parser.add_argument('--aligner', default='backtrack',
help='supported aligners: ' + ','.join(aligners.supported_aligners_fastq))
parser.add_argument('--alignopts', default=None,
help='aligner-specific options as comma delimited list of option1:value1,option2:value2,...')
parser.add_argument('--alignerthreads', default=1,
help='threads used per realignment (default = 1)')
parser.add_argument('--tagreads', action='store_true', default=False,
help='add BS tag to altered reads')
parser.add_argument('--skipmerge', action='store_true', default=False,
help='do not merge spike-in reads back into original BAM')
parser.add_argument('--keepsecondary', action='store_true', default=False,
help='keep secondary reads in final BAM')
parser.add_argument('--debug', action='store_true', default=False,
help='output read tracking info to debug file, retain all intermediates')
parser.add_argument('--tmpdir', default='addsv.tmp',
help='temporary directory (default=addsv.tmp)')
parser.add_argument('--seed', default=None, type=int,
help='seed random number generation')
parser.add_argument('--allowN', action='store_true', default=False,
help='allow N in contigs, replace with A and warn user (default: drop mutation)')
args = parser.parse_args()
main(args)
| mit | 5a5d6a2ad4a66b78d29e8e4feaf4072b | 40.001762 | 213 | 0.594409 | 3.274947 | false | false | false | false |
adamewing/bamsurgeon | scripts/postprocess.py | 1 | 10607 | #!/usr/bin/env python
import argparse
import pysam
import sys
import logging
from subprocess import call
from re import sub
from os.path import basename
from os import remove, rename
from uuid import uuid4
def getRG(tags):
''' fetch RG, tags is from a pysam.AlignedRead.tags, returns RG name '''
for tag, val in tags:
if tag == 'RG':
return val
return None
def putRG(tags, rg):
''' replace RG, tags is from a pysam.AlignedRead.tags, returns list of tags '''
out = []
for tag, val in tags:
if tag == 'RG':
out.append((tag, rg))
else:
out.append((tag, val))
return out
def samrec(read, bam, IDRG, newname=None):
''' output sam formatted record from pysam.AlignedRead '''
fields = []
if newname is not None:
fields.append(newname)
else:
fields.append(read.qname)
fields.append(str(read.flag))
if read.is_unmapped:
if read.mate_is_unmapped:
fields.append('*')
else:
fields.append(bam.getrname(read.rnext))
else:
fields.append(bam.getrname(read.tid))
if read.is_unmapped:
if read.mate_is_unmapped:
fields.append('0') # was 0
else:
fields.append(str(read.mpos+1)) #avoid OBOE, pysam is always 0-based, SAM is 1-based, BAM is 0-based.
fields.append('0') # was 0
else:
fields.append(str(read.pos+1)) #avoid OBOE
fields.append(str(read.mapq))
# unmapped reads should have '*' as CIGAR string
if read.is_unmapped:
fields.append('*')
else:
fields.append(read.cigarstring)
if read.tid == read.rnext:
fields.append('=')
else:
if read.is_unmapped or read.mate_is_unmapped:
fields.append('=')
else:
fields.append(bam.getrname(read.rnext))
if read.mate_is_unmapped:
if read.is_unmapped:
fields.append('0') # was 0
else:
fields.append(str(read.pos+1)) # avoid OBOE
else:
fields.append(str(read.mpos+1)) # avoid OBOE
if read.is_unmapped or read.mate_is_unmapped:
fields.append('0')
else:
fields.append(str(read.isize))
fields.append(read.seq)
fields.append(read.qual)
for tag in read.tags:
tagname, tagval = tag
# retain only certain tags
if tagname in ('NM', 'MD', 'AS', 'XS', 'RG'):
tagtype = None
# note, python has no 'char' type so tags with 'A' type will be converted to 'Z'
if type(tagval) == type('a'):
tagtype = 'Z'
if type(tagval) == type(1):
tagtype = 'i'
if type(tagval) == type(1.0):
tagtype = 'f'
if tagname == 'RG':
if tagval not in IDRG:
logging.error("ERROR: read group: " + tagval + " not found!\n")
assert tagval in IDRG # make sure readgroup is value (i.e. in the header)
tagval = IDRG[tagval]
if tagtype is not None:
fields.append(':'.join((tagname, tagtype, str(tagval))))
return '\t'.join(fields)
def makebam(sam, fai, threads, mem):
''' sam --> sorted bam '''
outbam = sub('.sam$', '.bam', sam)
cmd = ['samtools', 'view', '-bt', fai, '-o', outbam, sam]
logging.info(sam + ' --> ' + outbam + ': ' + ' '.join(cmd) + '\n')
call(cmd)
#remove(sam)
outsort = sub('.bam$', '.sorted.bam', outbam)
cmd = ['samtools', 'sort', '-m', str(mem), '-@', str(threads), '-T', outsort, '-o', outsort, outbam]
outsort += '.bam'
logging.info(outbam + ' --> ' + outsort + ': ' + ' '.join(cmd) + '\n')
call(cmd)
#remove(outbam)
def main(args):
assert args.bam[0].endswith('.bam')
assert args.fai.endswith('.fai')
outsamfn = sub('.bam$', '.postprocessed.sam', args.bam[0])
bam = pysam.AlignmentFile(args.bam[0])
PURG = {}
IDRG = {}
header = bam.header
if 'RG' in header:
newSM = sub('.bam$', '', basename(args.bam[0]))
for RG in header['RG']:
RG['SM'] = newSM
RG['LB'] = 'bamsurgeon'
RG['CN'] = 'BS'
if 'PU' in RG and RG['PU'] not in PURG:
PU = str(uuid4())
PURG[RG['PU']] = PU
RG['PU'] = PU
if 'ID' in RG and RG['ID'] not in IDRG:
ID = str(uuid4())
IDRG[RG['ID']] = ID
RG['ID'] = ID
if 'PG' in header:
del header['PG']
header['PG'] = [{'ID': 'bamsurgeon', 'PN': 'bamsurgeon'}]
outsam = pysam.AlignmentFile(outsamfn, 'wh', header=header)
outsam.close()
paired = {} # track read pairs
# counters for debug
n = 0 # number of reads
p = 0 # numbar of paired reads
u = 0 # number of unpaired reads
w = 0 # reads written
m = 0 # mates found
fixed_strand = 0
fixed_rg_pair = 0
fixed_matepos = 0
fixed_tlen = 0
fixed_unmap = 0
fixed_materef = 0
tick = 100000
try:
tick = int((bam.mapped + bam.unmapped) * 0.01)
if tick == 0:
tick = 1
logging.info("outputting status every " + str(tick) + " reads (1%) ...\n")
except ValueError as e:
logging.info("no index found, outputting status every " + str(tick) + " reads.\n")
outsam = open(outsamfn, 'a')
for read in bam.fetch(until_eof=True):
n += 1
if read.is_paired and not read.is_secondary:
p += 1
if read.qname in paired:
# make sure paired read groups match
rg = getRG(read.tags)
if rg != getRG(paired[read.qname].tags):
read.tags = putRG(read.tags, rg)
paired[read.qname].tags = putRG(paired[read.qname].tags, rg)
assert rg == getRG(paired[read.qname].tags)
fixed_rg_pair += 1
# fix strand
if read.mate_is_reverse != paired[read.qname].is_reverse or paired[read.qname].mate_is_reverse != read.is_reverse:
read.mate_is_reverse = paired[read.qname].is_reverse
paired[read.qname].mate_is_reverse = read.is_reverse
assert read.mate_is_reverse == paired[read.qname].is_reverse and paired[read.qname].mate_is_reverse == read.is_reverse
fixed_strand += 1
# fix mate position
if read.pnext != paired[read.qname].pos or paired[read.qname].pnext != read.pos:
read.pnext = paired[read.qname].pos
paired[read.qname].pnext = read.pos
assert read.pnext == paired[read.qname].pos and paired[read.qname].pnext == read.pos
fixed_matepos += 1
# fix unmapped flag
if read.mate_is_unmapped != paired[read.qname].is_unmapped or paired[read.qname].mate_is_unmapped != read.is_unmapped:
read.mate_is_unmapped = paired[read.qname].is_unmapped
paired[read.qname].mate_is_unmapped = read.is_unmapped
assert read.mate_is_unmapped == paired[read.qname].is_unmapped and paired[read.qname].mate_is_unmapped == read.is_unmapped
fixed_unmap += 1
# fix mate ref
if read.tid != paired[read.qname].rnext or paired[read.qname].tid != read.rnext:
read.rnext = paired[read.qname].tid
paired[read.qname].rnext = read.tid
assert read.tid == paired[read.qname].rnext and paired[read.qname].tid == read.rnext
fixed_materef += 1
# fix tlen (left - (right + read length) where left < right)
if not read.is_unmapped and not paired[read.qname].is_unmapped and read.tid == paired[read.qname].tid:
if abs(read.tlen) != abs(paired[read.qname].tlen):
read.tlen = min(read.pos, read.pnext)-(max(read.pos, read.pnext)+read.rlen)
paired[read.qname].tlen = 0-read.tlen
assert abs(read.tlen) == abs(paired[read.qname].tlen)
fixed_tlen += 1
newname = None
if args.rename:
newname = str(uuid4())
outsam.write(samrec(read, bam, IDRG, newname=newname) + '\n') # output read
outsam.write(samrec(paired[read.qname], bam, IDRG, newname=newname) + '\n') # output mate
del paired[read.qname]
w += 1
m += 1
else:
paired[read.qname] = read
w += 1
else:
if not read.is_secondary:
u += 1
newname = None
if args.rename:
newname = str(uuid4())
outsam.write(samrec(read, bam, IDRG, newname=newname) + '\n')
w += 1
if n % tick == 0:
logging.info('\t'.join(map(str, ('processed',n,'reads:',p,'paired',u,'unpaired',w,'written',m,'mates found.'))) + '\n')
logging.info('\t'.join(map(str, ('fixed strand:', fixed_strand, 'fixed RG pair:', fixed_rg_pair, 'fixed mate pos:', fixed_matepos))) + '\n')
logging.info('\t'.join(map(str, ('fixed unmapped flag:', fixed_unmap, 'fixed mate ref:', fixed_materef, 'fixed tlen:', fixed_tlen))) + '\n')
if len(paired.keys()) > 0:
logging.warn("found " + str(len(list(paired.keys()))) + " orphaned paired reads that were not output!\n")
outsam.close()
makebam(outsamfn, args.fai, args.threads, args.mem)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Postprocess BAM files generated by bamsurgeon to ensure consistency and compliance with SAM spec.')
parser.add_argument(metavar='<BAM file>', dest='bam', nargs=1, help='BAM file (from bamsurgeon output)')
parser.add_argument('-f', '--fai', dest='fai', required=True, help='.fai index, generated with samtools faidx on reference FASTA')
parser.add_argument('-t', '--sort-threads', dest='threads', default=1, help='threads for sorting with samtools (-@)')
parser.add_argument('-m', '--sort-mem', dest='mem', default='4G', help='memory PER THREAD for sorting with samtools (-m)')
parser.add_argument('--rename', action='store_true', default=False, help='rename reads to uuids')
args = parser.parse_args()
main(args)
| mit | 09cdc7671392ba479cc86c85df24b28e | 35.078231 | 152 | 0.537852 | 3.561786 | false | false | false | false |
wemake-services/wemake-django-template | {{cookiecutter.project_name}}/docs/conf.py | 1 | 4080 | # wemake-django-template documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 30 12:42:34 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import django
import tomli
# We need `server` to be importable from here:
sys.path.insert(0, os.path.abspath('..'))
# Django setup, all deps must be present to succeed:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'server.settings')
django.setup()
# -- Project information -----------------------------------------------------
def _get_project_meta():
with open('../pyproject.toml', mode='rb') as pyproject:
return tomli.load(pyproject)['tool']['poetry']
pkg_meta = _get_project_meta()
project = str(pkg_meta['name'])
author = str(pkg_meta['authors'][0])
copyright = author # noqa: WPS125
# The short X.Y version
version = str(pkg_meta['version'])
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '5.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
# 3rd party, order matters:
# https://github.com/wemake-services/wemake-django-template/issues/159
'sphinx_autodoc_typehints',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'moreinfo.html',
'searchbox.html',
],
}
| mit | 4beeaa3e9dc0b3ff52cdd5226c070929 | 30.875 | 79 | 0.699265 | 3.827392 | false | true | false | false |
scitran/core | tests/integration_tests/python/test_access_log.py | 2 | 11998 | import time
from api.web.request import AccessType
# NOTE these tests assume they are not running in parallel w/ other tests
# by relying on the last entry in the logs
def test_access_log_succeeds(data_builder, as_admin, log_db):
project = data_builder.create_project()
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
file_name = 'one.csv'
###
# Test login action is logged
###
api_key = as_admin.get('/users/self').json()['api_key']['key']
log_records_count_before = log_db.access_log.count({})
r = as_admin.post('/login', json={
'auth_type': 'api-key',
'code': api_key
})
assert r.ok
log_records_count_after = log_db.access_log.count({})
assert log_records_count_before+1 == log_records_count_after
most_recent_log = log_db.access_log.find({}).sort([('_id', -1)]).limit(1)[0]
assert most_recent_log['access_type'] == AccessType.user_login.value
assert most_recent_log['origin']['id'] == 'admin@user.com'
###
# Test logout action is logged
###
log_records_count_before = log_db.access_log.count({})
r = as_admin.post('/logout')
assert r.ok
log_records_count_after = log_db.access_log.count({})
assert log_records_count_before+1 == log_records_count_after
most_recent_log = log_db.access_log.find({}).sort([('_id', -1)]).limit(1)[0]
assert most_recent_log['access_type'] == AccessType.user_logout.value
assert most_recent_log['origin']['id'] == 'admin@user.com'
###
# Test project access is logged
###
log_records_count_before = log_db.access_log.count({})
r = as_admin.get('/projects/' + project)
assert r.ok
log_records_count_after = log_db.access_log.count({})
assert log_records_count_before+1 == log_records_count_after
most_recent_log = log_db.access_log.find({}).sort([('_id', -1)]).limit(1)[0]
assert most_recent_log['context']['project']['id'] == project
assert most_recent_log['access_type'] == AccessType.view_container.value
assert most_recent_log['origin']['id'] == 'admin@user.com'
###
# Test session access is logged
###
log_records_count_before = log_db.access_log.count({})
r = as_admin.get('/sessions/' + session)
assert r.ok
log_records_count_after = log_db.access_log.count({})
assert log_records_count_before+1 == log_records_count_after
most_recent_log = log_db.access_log.find({}).sort([('_id', -1)]).limit(1)[0]
assert most_recent_log['context']['session']['id'] == session
assert most_recent_log['access_type'] == AccessType.view_container.value
assert most_recent_log['origin']['id'] == 'admin@user.com'
###
# Test acquisition access is logged
###
log_records_count_before = log_db.access_log.count({})
r = as_admin.get('/acquisitions/' + acquisition)
assert r.ok
log_records_count_after = log_db.access_log.count({})
assert log_records_count_before+1 == log_records_count_after
most_recent_log = log_db.access_log.find({}).sort([('_id', -1)]).limit(1)[0]
assert most_recent_log['context']['acquisition']['id'] == acquisition
assert most_recent_log['access_type'] == AccessType.view_container.value
assert most_recent_log['origin']['id'] == 'admin@user.com'
###
# Add subject info
###
subject_code = 'Test subject code'
r = as_admin.put('/sessions/' + session, json={
'subject': {'code': subject_code}}
)
assert r.ok
###
# Test subject access is logged
###
log_records_count_before = log_db.access_log.count({})
r = as_admin.get('/sessions/' + session + '/subject')
assert r.ok
log_records_count_after = log_db.access_log.count({})
assert log_records_count_before+1 == log_records_count_after
most_recent_log = log_db.access_log.find({}).sort([('_id', -1)]).limit(1)[0]
assert most_recent_log['context']['session']['id'] == session
assert most_recent_log['context']['subject']['label'] == subject_code
assert most_recent_log['access_type'] == AccessType.view_subject.value
assert most_recent_log['origin']['id'] == 'admin@user.com'
# Upload files
r = as_admin.post('/projects/' + project + '/files', files={
'file': (file_name, 'test-content')
})
assert r.ok
###
# Test file download is logged
###
log_records_count_before = log_db.access_log.count({})
r = as_admin.get('/projects/' + project + '/files/' + file_name)
assert r.ok
file_ = r.raw.read(10)
time.sleep(1)
log_records_count_after = log_db.access_log.count({})
assert log_records_count_before+1 == log_records_count_after
most_recent_log = log_db.access_log.find({}).sort([('_id', -1)]).limit(1)[0]
assert most_recent_log['context']['project']['id'] == project
assert most_recent_log['context']['file']['name'] == file_name
assert most_recent_log['access_type'] == AccessType.download_file.value
assert most_recent_log['origin']['id'] == 'admin@user.com'
###
# Test file ticket download is logged once
###
log_records_count_before = log_db.access_log.count({})
r = as_admin.get('/projects/' + project + '/files/' + file_name, params={'ticket': ''})
assert r.ok
ticket_id = r.json()['ticket']
r = as_admin.get('/projects/' + project + '/files/' + file_name, params={'ticket': ticket_id})
assert r.ok
file_ = r.raw.read(10)
time.sleep(1)
log_records_count_after = log_db.access_log.count({})
assert log_records_count_before+1 == log_records_count_after
most_recent_log = log_db.access_log.find({}).sort([('_id', -1)]).limit(1)[0]
assert most_recent_log['context']['project']['id'] == project
assert most_recent_log['context']['file']['name'] == file_name
assert most_recent_log['context']['ticket_id'] == ticket_id
assert most_recent_log['access_type'] == AccessType.download_file.value
assert most_recent_log['origin']['id'] == 'admin@user.com'
# Upload another file
r = as_admin.post('/sessions/' + session + '/files', files={
'file': (file_name, 'test-content')
})
assert r.ok
###
# Test container bulk download
###
log_records_count_before = log_db.access_log.count({})
r = as_admin.post('/download', json={'optional': True, 'nodes':[{'level': 'project', '_id': project}]})
assert r.ok
ticket_id = r.json()['ticket']
file_count = r.json()['file_cnt']
r = as_admin.get('/download', params={'ticket':ticket_id})
assert r.ok
log_records_count_after = log_db.access_log.count({})
assert log_records_count_before + file_count == log_records_count_after
most_recent_logs = log_db.access_log.find({}).sort([('_id', -1)]).limit(file_count)
for l in most_recent_logs:
assert l['context']['file']['name'] == file_name
assert l['access_type'] == AccessType.download_file.value
assert l['origin']['id'] == 'admin@user.com'
###
# Test search bulk download
###
log_records_count_before = log_db.access_log.count({})
r = as_admin.post('/download', params={'bulk':True},
json={"files":[{"container_name":"project","container_id":project,"filename":file_name},
{"container_name":"session","container_id":session,"filename":file_name}]})
assert r.ok
ticket_id = r.json()['ticket']
file_count = r.json()['file_cnt']
r = as_admin.get('/download', params={'ticket':ticket_id})
assert r.ok
log_records_count_after = log_db.access_log.count({})
assert log_records_count_before + file_count == log_records_count_after
most_recent_logs = log_db.access_log.find({}).sort([('_id', -1)]).limit(file_count)
for l in most_recent_logs:
assert l['context']['file']['name'] == file_name
assert l['access_type'] == AccessType.download_file.value
assert l['origin']['id'] == 'admin@user.com'
###
# Test file info access is logged
###
log_records_count_before = log_db.access_log.count({})
r = as_admin.get('/projects/' + project + '/files/' + file_name + '/info')
assert r.ok
assert r.json()['name'] == file_name
log_records_count_after = log_db.access_log.count({})
assert log_records_count_before+1 == log_records_count_after
most_recent_log = log_db.access_log.find({}).sort([('_id', -1)]).limit(1)[0]
assert most_recent_log['context']['project']['id'] == project
assert most_recent_log['context']['file']['name'] == file_name
assert most_recent_log['access_type'] == AccessType.view_file.value
assert most_recent_log['origin']['id'] == 'admin@user.com'
###
# Test file delete is logged
###
log_records_count_before = log_db.access_log.count({})
r = as_admin.delete('/projects/' + project + '/files/' + file_name)
assert r.ok
log_records_count_after = log_db.access_log.count({})
assert log_records_count_before+1 == log_records_count_after
most_recent_log = log_db.access_log.find({}).sort([('_id', -1)]).limit(1)[0]
assert most_recent_log['context']['project']['id'] == project
assert most_recent_log['context']['file']['name'] == file_name
assert most_recent_log['access_type'] == AccessType.delete_file.value
assert most_recent_log['origin']['id'] == 'admin@user.com'
###
# Test acquisition delete is logged
###
log_records_count_before = log_db.access_log.count({})
r = as_admin.delete('/acquisitions/' + acquisition)
assert r.ok
log_records_count_after = log_db.access_log.count({})
assert log_records_count_before+1 == log_records_count_after
most_recent_log = log_db.access_log.find({}).sort([('_id', -1)]).limit(1)[0]
assert most_recent_log['context']['acquisition']['id'] == acquisition
assert most_recent_log['access_type'] == AccessType.delete_container.value
assert most_recent_log['origin']['id'] == 'admin@user.com'
###
# Test session delete is logged
###
log_records_count_before = log_db.access_log.count({})
r = as_admin.delete('/sessions/' + session)
assert r.ok
log_records_count_after = log_db.access_log.count({})
assert log_records_count_before+1 == log_records_count_after
most_recent_log = log_db.access_log.find({}).sort([('_id', -1)]).limit(1)[0]
assert most_recent_log['context']['session']['id'] == session
assert most_recent_log['access_type'] == AccessType.delete_container.value
assert most_recent_log['origin']['id'] == 'admin@user.com'
###
# Test project delete is logged
###
log_records_count_before = log_db.access_log.count({})
r = as_admin.delete('/projects/' + project)
assert r.ok
log_records_count_after = log_db.access_log.count({})
assert log_records_count_before+1 == log_records_count_after
most_recent_log = log_db.access_log.find({}).sort([('_id', -1)]).limit(1)[0]
assert most_recent_log['context']['project']['id'] == project
assert most_recent_log['access_type'] == AccessType.delete_container.value
assert most_recent_log['origin']['id'] == 'admin@user.com'
def test_access_log_fails(data_builder, as_admin, log_db):
project = data_builder.create_project()
file_name = 'one.csv'
log_db.command('collMod', 'access_log', validator={'$and': [{'foo': {'$exists': True}}]}, validationLevel='strict')
# Upload files
r = as_admin.post('/projects/' + project + '/files', files={
'file': (file_name, 'test-content')
})
assert r.ok
###
# Test file delete request fails and does not delete file
###
r = as_admin.delete('/projects/' + project + '/files/' + file_name)
assert r.status_code == 500
log_db.command('collMod', 'access_log', validator={}, validationLevel='strict')
r = as_admin.get('/projects/' + project)
assert r.ok
assert r.json()['files']
| mit | 307a5616e9834f4966e037c267a59916 | 30.994667 | 119 | 0.61702 | 3.369278 | false | true | false | false |
xesscorp/skidl | docs/files/a-taste-of-hierarchy/intfc_brd.py | 2 | 4256 | from skidl import *
@SubCircuit
def osc(osc1, osc2, gnd,
crystal = Part("Device", 'Crystal', footprint='Crystal:Crystal_HC49-U_Vertical', dest=TEMPLATE),
cap = Part("Device", 'C', value='10pf', footprint='Capacitors_SMD:C_0603', dest=TEMPLATE) ):
'''Attach a crystal and two caps to the osc1 and osc2 nets.'''
xtal = crystal(1) # Instantiate the crystal from the template.
num_xtal_pins = len(xtal['.*']) # Get the number of pins on the crystal.
if num_xtal_pins == 4: # This handles a 4-pin crystal...
xtal[2, 4] += gnd # Connect the crystal ground pins.
xtal[3, 1] += osc1, osc2 # Connect the crystal pins to the oscillator nets.
else: # Otherwise assume it's a 2-pin crystal...
xtal[1,2] += osc1, osc2 # Using a two-pin crystal.
trim_cap = cap(2) # Instantiate some trimmer caps.
trim_cap[0][1, 2] += osc1, gnd # Connect the trimmer caps to the crystal.
trim_cap[1][1, 2] += osc2, gnd
# Libraries.
xess_lib = r'C:\xesscorp\KiCad\libraries\xess.lib'
pic32_lib = r'C:\xesscorp\KiCad\libraries\pic32.lib'
pickit3_lib = r'C:\xesscorp\KiCad\libraries\pickit3.lib'
# Global nets.
gnd = Net('GND')
gnd.drive = POWER
vusb = Net('VUSB')
vusb.drive = POWER
vdd = Net('+3.3V')
# Some common parts used as templates.
cap = Part("Device", 'C', footprint='Capacitors_SMD:C_0603', dest=TEMPLATE)
res = Part("Device", 'R', footprint='Resistors_SMD:R_0603', dest=TEMPLATE)
# Regulate +5V VUSB down to +3.3V for VDD.
vreg = Part(xess_lib, 'TPS793XX', footprint='TO_SOT_Packages_SMD:SOT-23-5')
noise_cap = cap(value='0.01uf')
vreg['IN, EN'] += vusb
vreg['GND'] += gnd
vreg['OUT'] += vdd
vreg['NR'] += noise_cap[1]
noise_cap[2] += gnd
# Microcontroller.
pic32 = Part(pic32_lib, 'pic32MX2\*0F\*\*\*B-QFN28',
footprint='Housings_DFN_QFN:QFN-28-1EP_6x6mm_Pitch0.65mm')
pic32['VSS'] += gnd
pic32['VDD'] += vdd # Main CPU power.
pic32['VUSB3V3'] += vdd # Power to USB transceiver.
pic32['^VBUS$'] += vusb # Monitor power pin of USB connector.
pic32['PAD'] += gnd # Power pad on bottom attached to ground.
# Bypass capacitors for microcontroller.
bypass = cap(3, value='0.1uf')
bypass[0][1, 2] += vdd, gnd
bypass[1][1, 2] += vdd, gnd
bypass[2][1, 2] += pic32['VCAP'], gnd
# Microcontroller MCLR circuitry:
# Pull-up resistor to VDD.
# Filter capacitor to delay exit of reset or eliminate glitches.
# Series resistor to isolate capacitor from device programmer.
r_pullup = res(value='10K')
r_series = res(value='1K')
filter_cap = cap(value='0.1uf')
r_series[1, 2] += r_pullup[1], pic32['MCLR']
r_pullup[2] += vdd
filter_cap[1, 2] += r_series[1], gnd
# USB connector.
usb_conn = Part(xess_lib, 'USB-MicroB', footprint='XESS:USB-microB-1')
usb_conn['D\+, D-, VBUS, GND, NC'] += pic32['D\+, D-'], vusb, gnd, NC
# Noise filtering/isolation on the USB connector shield.
shld_cap = cap(value='4.7nf')
shld_res = res(value='1M')
shld_cap[1] += usb_conn['shield']
shld_res[1] += usb_conn['shield']
gnd += shld_cap[2], shld_res[2]
# LED with current-limiting resistor driven by microcontroller pin.
led = Part("Device", 'led', footprint='Diodes_SMD:D_0603')
led_curr_limit = res(value='1K')
led_curr_limit[1, 2] += pic32['RB4'], led['A']
led['K'] += gnd
# Crystal and trim capacitors.
# crystal = Part(xess_lib, 'XTAL4', footprint='XESS:32x25-4', dest=TEMPLATE)
# osc(pic32['OSC1'], pic32['OSC2'], gnd, crystal, cap)
osc(pic32['OSC1'], pic32['OSC2'], gnd) # Use default crystal and trim caps.
# Port for attachment of device programmer.
prg_hdr = Part(pickit3_lib, 'pickit3_hdr', footprint='Pin_Headers:Pin_Header_Straight_1x06')
prg_hdr.ref = 'PRG'
prg_hdr['MCLR'] += pic32['MCLR']
prg_hdr['VDD'] += vdd
prg_hdr['GND'] += gnd
prg_hdr['PGC'] += pic32['PGEC1']
prg_hdr['PGD'] += pic32['PGED1']
# Port for attachment of FPGA programming pins.
port = Part('conn', 'CONN_01x06', footprint='Pin_Headers:Pin_Header_Straight_1x06')
port.ref = 'JTAG'
port[1, 2] += vusb, gnd
port[3] += pic32['SCK1'] # SCK1 output.
port[5] += pic32['RB5'] # PPS: SDI1 input.
port[4] += pic32['RB15'] # PPS: SS1 output.
port[6] += pic32['RA4'] # PPS: SDO1 output.
ERC()
generate_netlist()
| mit | 238830559b964cdcf7563b9ab2a9f145 | 37.690909 | 105 | 0.639333 | 2.502058 | false | false | false | false |
xesscorp/skidl | skidl/libs/stm8_sklib.py | 1 | 8126 | from skidl import SKIDL, TEMPLATE, Part, Pin, SchLib
SKIDL_lib_version = '0.0.1'
stm8 = SchLib(tool=SKIDL).add_parts(*[
Part(name='STM8L051F3P',dest=TEMPLATE,tool=SKIDL,keywords='STM8L Microcontroller Value Line Low Power',description='16MHz, 8K Flash, 1k RAM, 256 EEPROM, RTC, USART, I2C, SPI, ADC, TSSOP20',ref_prefix='U',num_units=1,fplist=['TSSOP*'],do_erc=True,pins=[
Pin(num='1',name='PC5/OSC32_IN/TIM2_CH1',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='PC6/OSC32_OUT/TIM2_CH2',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='SWIM/BEEP/IR_TIM/PA0',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='NRST/PA1',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='OSC_IN/PA2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='OSC_OUT/PA3',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='VSS',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='PD0/TIM3_CH2/ADC1_IN22',func=Pin.BIDIR,do_erc=True),
Pin(num='10',name='ADC1_IN18/TIM2_CH1/PB0',func=Pin.BIDIR,do_erc=True),
Pin(num='20',name='PC4/I2C_SMB/CCO/ADC1_IN4',func=Pin.BIDIR,do_erc=True),
Pin(num='11',name='ADC1_IN17/TIM3_CH1/PB1',func=Pin.BIDIR,do_erc=True),
Pin(num='12',name='ADC1_IN16/TIM2_CH2/PB2',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='RTC_ALARM/ADC1_IN15/TIM2_ETR/PB3',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='ADC1_IN14/SPI1_NSS/PB4',func=Pin.BIDIR,do_erc=True),
Pin(num='15',name='ADC1_IN13/SPI_SCK/PB5',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='ADC1_IN12/SPI1_MOSI/PB6',func=Pin.BIDIR,do_erc=True),
Pin(num='17',name='ADC1_IN11/SPI1_MISO/PB7',func=Pin.BIDIR,do_erc=True),
Pin(num='18',name='PC0/I2C_SDA',func=Pin.BIDIR,do_erc=True),
Pin(num='19',name='PC1/I2C_SCL',func=Pin.BIDIR,do_erc=True)]),
Part(name='STM8L101F2P',dest=TEMPLATE,tool=SKIDL,keywords='STM8L Microcontroller Value Line Low Power',description='16MHz, 8K Flash, 1.5k RAM, 2k EEPROM, USART, I2C, SPI, AC, TSSOP20',ref_prefix='U',num_units=1,fplist=['TSSOP*'],do_erc=True,aliases=['STM8L101F3P'],pins=[
Pin(num='1',name='PC3/USART_TX',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='PC4/USART_CK/CCO',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='SWIM/BEEP/IR_TIM/PA0',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='NRST/PA1',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='PA2',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='PA3',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='VSS',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='PD0/TIM3_CH2/COMP1_CH3',func=Pin.BIDIR,do_erc=True),
Pin(num='10',name='COMP1_CH1/TIM2_CH1/PB0',func=Pin.BIDIR,do_erc=True),
Pin(num='20',name='PC2/USART_RX',func=Pin.BIDIR,do_erc=True),
Pin(num='11',name='COMP1_CH2/TIM3_CH1/PB1',func=Pin.BIDIR,do_erc=True),
Pin(num='12',name='COMP2_CH1/TIM2_CH2/PB2',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='COMP2_CH2/TIM2_TRIG/PB3',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='SPI1_NSS/PB4',func=Pin.BIDIR,do_erc=True),
Pin(num='15',name='SPI_SCK/PB5',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='SPI1_MOSI/PB6',func=Pin.BIDIR,do_erc=True),
Pin(num='17',name='SPI1_MISO/PB7',func=Pin.BIDIR,do_erc=True),
Pin(num='18',name='PC0/I2C_SDA',func=Pin.BIDIR,do_erc=True),
Pin(num='19',name='PC1/I2C_SCL',func=Pin.BIDIR,do_erc=True)]),
Part(name='STM8S003F3P',dest=TEMPLATE,tool=SKIDL,keywords='STM8S Mainstream Value line 8-bit, 16MHz, 1k RAM, 128 EEPROM',description='16MHz, 8K Flash, 1k RAM, 128 EEPROM, USART, I2C, SPI, TSSOP20',ref_prefix='U',num_units=1,fplist=['TSSOP*'],do_erc=True,pins=[
Pin(num='1',name='PD4/TIM2_CH1/BEEP/UART1_CK',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='PD5/AIN5/UART1_TX',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='PD6/AIN6/UART1_RX',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='NRST',do_erc=True),
Pin(num='5',name='OSCIN/PA1',func=Pin.BIDIR,do_erc=True),
Pin(num='6',name='OSCOUT/PA2',func=Pin.BIDIR,do_erc=True),
Pin(num='7',name='VSS',func=Pin.PWROUT,do_erc=True),
Pin(num='8',name='Vcap',func=Pin.PASSIVE,do_erc=True),
Pin(num='9',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='10',name='[SPI_NSS]TIM2_CH3/PA3',func=Pin.BIDIR,do_erc=True),
Pin(num='20',name='PD3/AIN4/TIM2_CH2',func=Pin.BIDIR,do_erc=True),
Pin(num='11',name='[TIM1_BKIN]I2C_SDA/PB5',func=Pin.BIDIR,do_erc=True),
Pin(num='12',name='[ADC_ETR]I2C_SCL/PB4',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='[TIM1_CH1N]TIM1_CH3/PC3',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='[TIM1_CH2N]AIN2/TIM1_CH4/PC4',func=Pin.BIDIR,do_erc=True),
Pin(num='15',name='[TIM2_CH1]SPI_SCK/PC5',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='[TIM1_CH1]SPI_MOSI/PC6',func=Pin.BIDIR,do_erc=True),
Pin(num='17',name='[TIM1_CH2]SPI_MISO/PC7',func=Pin.BIDIR,do_erc=True),
Pin(num='18',name='PD1/SWIM',func=Pin.BIDIR,do_erc=True),
Pin(num='19',name='PD2/AIN3[TIM2_CH3]',func=Pin.BIDIR,do_erc=True)]),
Part(name='STM8S003K3T',dest=TEMPLATE,tool=SKIDL,keywords='STM8 Microcontroller Value Line',description='16MHz, 8K Flash, 1K RAM, 128 EEPROM, LQFP32 (7x7mm, 0.8mm pitch)',ref_prefix='U',num_units=1,fplist=['LQFP32*'],do_erc=True,pins=[
Pin(num='1',name='NRST',do_erc=True),
Pin(num='2',name='OSCI/PA1',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='OSCOUT/PA2',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='VSS',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='Vcap',do_erc=True),
Pin(num='6',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='[SPI_NSS]TIM2_CH3/PA3',func=Pin.BIDIR,do_erc=True),
Pin(num='8',name='PF4',func=Pin.BIDIR,do_erc=True),
Pin(num='9',name='PB7',func=Pin.BIDIR,do_erc=True),
Pin(num='10',name='PB6',func=Pin.BIDIR,do_erc=True),
Pin(num='20',name='TIM1_CH3/PC3',func=Pin.BIDIR,do_erc=True),
Pin(num='30',name='PD5/UART1_TX',func=Pin.BIDIR,do_erc=True),
Pin(num='11',name='I2C_SDA/PB5',func=Pin.BIDIR,do_erc=True),
Pin(num='21',name='CLK_CCO/TIM1_CH4/PC4',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='PD6/UART1_RX',func=Pin.BIDIR,do_erc=True),
Pin(num='12',name='I2C_SCL/PB4',func=Pin.BIDIR,do_erc=True),
Pin(num='22',name='SPI_SCK/PC5',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='PD7/TLI[TIM1_CH4]',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='TIM1_ETR/AIN3/PB3',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='PI_MOSI/PC6',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='TIM1_CH3N/AIN2/PB2',func=Pin.BIDIR,do_erc=True),
Pin(num='24',name='PI_MISO/PC7',func=Pin.BIDIR,do_erc=True),
Pin(num='15',name='TIM1_CH2N/AIN1/PB1',func=Pin.BIDIR,do_erc=True),
Pin(num='25',name='PD0/TIM1_BKIN[CLK_CCO]',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='TIM1_CH1N/AIN0/PB0',func=Pin.BIDIR,do_erc=True),
Pin(num='26',name='PD1/SWIM',func=Pin.BIDIR,do_erc=True),
Pin(num='17',name='PE5/SPI_NSS',func=Pin.BIDIR,do_erc=True),
Pin(num='27',name='PD2[TIM2_CH3]',func=Pin.BIDIR,do_erc=True),
Pin(num='18',name='UART1_CK/TIM1_CH1/PC1',func=Pin.BIDIR,do_erc=True),
Pin(num='28',name='PD3/ADC_ETR/TIM2_CH2',func=Pin.BIDIR,do_erc=True),
Pin(num='19',name='TIM1_CH2/PC2',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='PD4/BEEP/TIM2_CH1',func=Pin.BIDIR,do_erc=True)])])
| mit | b627596d8531d2dd2be256f6d093e271 | 79.455446 | 279 | 0.602141 | 2.252842 | false | false | true | false |
scitran/core | bin/integrity_check.py | 2 | 3067 | # Print list of rules with invalid algs
import argparse
import logging
import sys
from api import config
from api.dao import APINotFoundException
from api.jobs.gears import get_gear_by_name
# Methods should return true if all integrity checks passed
INTEGRITY_CHECKS = {
"rule_alg" : "Confirm alg keys in rules table can be resolved to gear in gear table",
"session_length" : "Confirm there are no sessions whose acquisition timestamps span more than 3 hours"
}
def rule_alg():
errors = False
for rule in config.db.project_rules.find({}):
alg = rule.get('alg')
if not alg:
errors = True
logging.warning('Rule {} has no alg.'.format(rule['_id']))
else:
try:
get_gear_by_name(alg)
except APINotFoundException:
errors = True
logging.warning('Rule {} with alg {} does not match any gear in the system'.format(rule['_id'], alg))
return not errors
def session_length():
errors = False
pipeline = [
{'$match': {'timestamp': {'$ne': None}}},
{'$group': {'_id': '$session', 'min_timestamp': { '$min': '$timestamp' }, 'max_timestamp': { '$max': '$timestamp' }}},
{'$project': {'_id': '$_id', 'diff': { '$subtract': ['$max_timestamp', '$min_timestamp']}}},
{'$match': {'diff': {'$gt': 10800000}}}
]
results = config.db.command('aggregate', 'acquisitions', pipeline=pipeline)['result']
if len(results) > 0:
errors = True
logging.warning('There are {} sessions that span 3 hours.'.format(len(results)))
for r in results:
logging.warning('Session {} spans {} minutes'.format(r['_id'], r['diff']/60000))
return not errors
if __name__ == '__main__':
try:
parser = argparse.ArgumentParser()
parser.add_argument("--all", help="Run all checks", action="store_true")
for method, desc in INTEGRITY_CHECKS.iteritems():
parser.add_argument("--"+method, help=desc, action="store_true")
# get list of method to run:
args = parser.parse_args()
if args.all:
methods = INTEGRITY_CHECKS.keys()
else:
methods = [m for m, flag in vars(args).iteritems() if flag]
errors = False
for method in methods:
try:
logging.info('Running {}...'.format(method))
passed = globals()[method]()
if not passed:
errors = True
logging.warning('{} found integrity issues.'.format(method))
logging.info('{} complete.'.format(method))
except:
logging.exception('Failed to run check {}'.format(method))
if errors:
logging.error('One or more checks failed')
sys.exit(1)
else:
logging.info('Checks complete.')
sys.exit(0)
except Exception as e:
logging.exception('Main method failed...')
sys.exit(1)
| mit | af85c11ebfa5038613e115fee81e2be8 | 29.979798 | 126 | 0.558526 | 4.189891 | false | false | false | false |
scitran/core | tests/unit_tests/python/test_dataexplorer.py | 2 | 19300 | import copy
import json
import elasticsearch
import api.handlers.dataexplorerhandler as deh
class TestTransportError(elasticsearch.TransportError):
def __str__(self):
return 'TestTransportError'
def test_search(as_public, as_drone, es):
# try to search w/o login
r = as_public.post('/dataexplorer/search')
assert r.status_code == 403
# try to search w/o body
r = as_drone.post('/dataexplorer/search')
assert r.status_code == 400
# try to search w/o return_type in body
r = as_drone.post('/dataexplorer/search', json={})
assert r.status_code == 400
# try to search w/ invalid return_type
r = as_drone.post('/dataexplorer/search', json={'return_type': 'test'})
assert r.status_code == 400
# try to search w/ invalid filters
r = as_drone.post('/dataexplorer/search', json={'return_type': 'file', 'filters': 'test'})
assert r.status_code == 400
# session search against elastic mock
cont_type, filter_key, filter_value, filter_range, search_str, results = 'session', 'key', 'value', 'range', 'search', 'results'
es.search.return_value = {'aggregations': {'by_container': {'buckets': [
{'by_top_hit': {'hits': {'hits': [results]}}},
]}}}
r = as_drone.post('/dataexplorer/search', json={'return_type': cont_type, 'search_string': search_str, 'filters': [
{'terms': {filter_key: filter_value}},
{'range': filter_range},
]})
es.search.assert_called_with(
body={
'size': 0,
'query': {'bool': {
'must': {'match': {'_all': 'search'}},
'filter': {'bool': {'must': [
{'terms': {filter_key + '.raw': filter_value}},
{'range': filter_range},
{'term': {'deleted': False}}
]}},
}},
'aggs': {'by_container': {'terms':
{'field': cont_type + '._id', 'size': 100},
'aggs': {'by_top_hit': {'top_hits': {
'_source': deh.SOURCE[cont_type],
'size': 1
}}}
}}
},
doc_type='flywheel',
index='data_explorer')
assert r.ok
assert r.json['results'] == [results]
# acquisition search
cont_type = 'acquisition'
r = as_drone.post('/dataexplorer/search', json={'return_type': cont_type, 'all_data': True})
es.search.assert_called_with(
body={
'size': 0,
'query': {'bool': {
'filter': {'bool': {'must': [
{'term': {'deleted': False}}
]}},
}},
'aggs': {'by_container': {'terms':
{'field': cont_type + '._id', 'size': 100},
'aggs': {'by_top_hit': {'top_hits': {
'_source': deh.SOURCE[cont_type],
'size': 1
}}}
}}
},
doc_type='flywheel',
index='data_explorer')
assert r.ok
assert r.json['results'] == [results]
# analysis search
cont_type = 'analysis'
r = as_drone.post('/dataexplorer/search', json={'return_type': cont_type, 'all_data': True})
es.search.assert_called_with(
body={
'size': 0,
'query': {'bool': {
'filter': {'bool': {'must': [
{'term': {'deleted': False}}
]}},
}},
'aggs': {'by_container': {'terms':
{'field': cont_type + '._id', 'size': 100},
'aggs': {'by_top_hit': {'top_hits': {
'_source': deh.SOURCE[cont_type],
'size': 1
}}}
}}
},
doc_type='flywheel',
index='data_explorer')
assert r.ok
assert r.json['results'] == [results]
# file search
cont_type = 'file'
raw_file_results = [{'fields': {'info_exists': [True]}, '_source': {'file': {}}}]
formatted_file_results = [{'_source': {'file': {'info_exists': True}}}]
es.search.return_value = {'hits': {'hits': copy.deepcopy(raw_file_results)}}
r = as_drone.post('/dataexplorer/search', json={'return_type': cont_type, 'all_data': True})
es.search.assert_called_with(
body={
'_source': deh.SOURCE[cont_type],
'query': {'bool': {
'filter': {'bool': {'must': [
{'term': {'container_type': cont_type}},
{'term': {'deleted': False}}
]}},
}},
'script_fields': {'info_exists': deh.INFO_EXISTS_SCRIPT},
'size': 100},
doc_type='flywheel',
index='data_explorer')
assert r.ok
assert r.json['results'] == formatted_file_results
# file search w/ search string and filter
es.search.return_value = {'hits': {'hits': copy.deepcopy(raw_file_results)}}
r = as_drone.post('/dataexplorer/search', json={'return_type': cont_type, 'all_data': True, 'search_string': search_str, 'filters': [
{'terms': {filter_key: filter_value}},
{'range': filter_range},
]})
es.search.assert_called_with(
body={
'_source': deh.SOURCE[cont_type],
'query': {'bool': {
'must': {'match': {'_all': search_str}},
'filter': {'bool': {'must': [
{'term': {'container_type': cont_type}},
{'terms': {filter_key + '.raw': filter_value}},
{'range': filter_range},
{'term': {'deleted': False}}
]}}
}},
'script_fields': {'info_exists': deh.INFO_EXISTS_SCRIPT},
'size': 100},
doc_type='flywheel',
index='data_explorer')
assert r.ok
assert r.json['results'] == formatted_file_results
# Drone search without self.uid and all_data set to false
es.search.return_value = {'hits': {'hits': copy.deepcopy(raw_file_results)}}
r = as_drone.post('/dataexplorer/search', json={'return_type': cont_type, 'all_data': False, 'search_string': search_str, 'filters': [
{'terms': {filter_key: filter_value}},
{'range': filter_range},
]})
es.search.assert_called_with(
body={
'_source': deh.SOURCE[cont_type],
'query': {'bool': {
'must': {'match': {'_all': search_str}},
'filter': {'bool': {'must': [
{'term': {'container_type': cont_type}},
{'terms': {filter_key + '.raw': filter_value}},
{'range': filter_range},
{'term': {'deleted': False}}
]}}
}},
'script_fields': {'info_exists': deh.INFO_EXISTS_SCRIPT},
'size': 100},
doc_type='flywheel',
index='data_explorer')
assert r.ok
assert r.json['results'] == formatted_file_results
# file search w/ search null filter
es.search.return_value = {'hits': {'hits': copy.deepcopy(raw_file_results)}}
r = as_drone.post('/dataexplorer/search', json={'return_type': cont_type, 'all_data': True, 'filters': [
{'terms': {filter_key: [filter_value, "null"]}},
]})
es.search.assert_called_with(
body={
'_source': deh.SOURCE[cont_type],
'query': {'bool': {
'filter': {'bool': {'must': [
{'term': {'container_type': cont_type}},
{'bool':
{'should':
[
{'bool':
{
'must': [
{
'bool': {
'must_not': [
{"exists": {"field":filter_key}}
]
}
}
]
}
},
{'terms': {filter_key + '.raw': [filter_value]}}
]
}
},
{'term': {'deleted': False}}
]}}
}},
'script_fields': {'info_exists': deh.INFO_EXISTS_SCRIPT},
'size': 100},
doc_type='flywheel',
index='data_explorer')
assert r.ok
assert r.json['results'] == formatted_file_results
# file search size=all and filters
# file search w/ search string and filter
es.search.return_value = {
"hits": {
"total": 0,
"max_score": 0,
"hits": []
},
"aggregations": {
"count": {
"value": 0
}
}
}
r = as_drone.post('/dataexplorer/search', json={'return_type': cont_type, 'all_data': True, 'filters': [
{'terms': {filter_key: filter_value}},
], 'size':"all"})
es.search.assert_called_with(
body={
'_source': deh.SOURCE[cont_type],
'query': {'bool': {
'filter': {'bool': {'must': [
{'term': {'container_type': cont_type}},
{'terms': {filter_key + '.raw': filter_value}},
{'term': {'deleted': False}}
]}}
}},
'script_fields': {'info_exists': deh.INFO_EXISTS_SCRIPT},
'size': 0},
doc_type='flywheel',
index='data_explorer')
assert r.ok
# file search size > 10000
r = as_drone.post('/dataexplorer/search', json={'return_type': cont_type, 'all_data': True, 'filters': [
{'terms': {filter_key: filter_value}},
], 'size':"10000000"})
assert r.status_code == 400
def test_get_facets(as_public, as_drone, es):
# try to get facets w/o login
r = as_public.post('/dataexplorer/facets')
assert r.status_code == 403
# get facets w/o sending body
subject_age = 'test'
es.search.return_value = {'aggregations': {
'session_age': {'subject.age': subject_age},
'by_session': {}}}
r = as_drone.post('/dataexplorer/facets')
body = copy.deepcopy(deh.FACET_QUERY)
body.update({'query': {'match_all': {}}})
es.search.assert_called_with(body=body, doc_type='flywheel', index='data_explorer')
assert r.ok
assert r.json == {'facets': {'by_session': {'subject.age': subject_age}}}
def test_search_fields(as_public, as_drone, es):
# try to search fields w/o login
r = as_public.post('/dataexplorer/search/fields')
assert r.status_code == 403
# search fields
query_field, result_source = 'field', 'source'
es.search.return_value = {'hits': {'hits': [{'_source': result_source}]}}
r = as_drone.post('/dataexplorer/search/fields', json={'field': query_field})
es.search.assert_called_with(
body={'size': 15, 'query': {'match': {'name': query_field}}},
doc_type='flywheel_field',
index='data_explorer_fields')
assert r.ok
assert r.json == [result_source]
def test_index_fields(as_public, as_drone, es):
# try to index fields w/o login
r = as_public.post('/dataexplorer/index/fields')
assert r.status_code == 403
# setup functions for later use in es.indices.exists mock
indices = set()
def es_indices_exists(index): return index in indices
def es_indices_create(index=None, body=None): indices.add(index)
def es_indices_delete(index): indices.remove(index)
# try to index fields w/ es unavailable (exc @ exists)
es.indices.exists.side_effect = TestTransportError
r = as_drone.post('/dataexplorer/index/fields')
assert r.status_code == 404
es.indices.exists.side_effect = es_indices_exists
# try to index fields before data_explorer index is available
r = as_drone.post('/dataexplorer/index/fields')
es.indices.exists.assert_called_with('data_explorer')
assert r.status_code == 404
indices.add('data_explorer')
# try to (re)index data_explorer_fields w/ hard-reset=true (exc @ delete)
indices.add('data_explorer_fields')
es.indices.delete.side_effect = elasticsearch.ElasticsearchException
r = as_drone.post('/dataexplorer/index/fields?hard-reset=true')
es.indices.delete.assert_called_with(index='data_explorer_fields')
assert r.status_code == 500
es.indices.delete.side_effect = es_indices_delete
# try to (re)index data_explorer_fields w/ hard-reset=true (exc @ create)
es.indices.create.side_effect = elasticsearch.ElasticsearchException
r = as_drone.post('/dataexplorer/index/fields?hard-reset=true')
es.indices.exists.assert_called_with('data_explorer_fields')
assert es.indices.create.called
assert r.status_code == 500
es.indices.create.side_effect = es_indices_create
# try to (re)index data_explorer_fields w/ hard-reset=true (exc @ get_mapping)
es.indices.get_mapping.side_effect = KeyError
r = as_drone.post('/dataexplorer/index/fields?hard-reset=true')
assert r.status_code == 404
es.indices.get_mapping.side_effect = None
# (re)index data_explorer_fields w/ hard-reset=true
r = as_drone.post('/dataexplorer/index/fields?hard-reset=true')
es.indices.create.assert_called_with(index='data_explorer_fields', body={
'settings': {'number_of_shards': 1, 'number_of_replicas': 0, 'analysis': deh.ANALYSIS},
'mappings': {'_default_': {'_all': {'enabled' : True}, 'dynamic_templates': deh.DYNAMIC_TEMPLATES}, 'flywheel': {}}})
assert r.ok
# index data_explorer_fields - test ignored fields
ignored_fields = ['_all', 'dynamic_templates', 'analysis_reference', 'file_reference', 'parent', 'container_type', 'origin', 'permissions', '_id']
fields = {field: None for field in ignored_fields}
es.indices.get_mapping.return_value = {'data_explorer': {'mappings': {'flywheel': {'properties': fields}}}}
es.index.reset_mock()
r = as_drone.post('/dataexplorer/index/fields')
assert not es.indices.index.called
assert r.ok
# index data_explorer_fields - test type "flattening"
type_map = {
'string': ['text', 'keyword'],
'integer': ['long', 'integer', 'short', 'byte'],
'float': ['double', 'float'],
'date': ['date'],
'boolean': ['boolean'],
'object': ['object'],
None: ['unrecognized'],
# NOTE _get_field_type returns None for unrecognized field_types
}
type_map_r = {vi: k for k, v in type_map.iteritems() for vi in v}
fields = {k + 'field': {'type': k} for k in type_map_r}
es.indices.get_mapping.return_value = {'data_explorer': {'mappings': {'flywheel': {'properties': fields}}}}
es.search.return_value = {'aggregations': {'results': {
'sum_other_doc_count': 0,
'buckets': [{'doc_count': 0}]}}}
es.index.reset_mock()
r = as_drone.post('/dataexplorer/index/fields')
for field_name in fields:
field_type = type_map_r[field_name.replace('field', '')]
if field_type == 'object':
continue
if field_type == 'string':
es.search.assert_any_call(
body={'aggs': {'results': {'terms': {'field': field_name + '.raw', 'size': 15}}}, 'size': 0},
doc_type='flywheel',
index='data_explorer')
es.index.assert_any_call(
body=json.dumps({'name': field_name, 'type': field_type, 'facet': False}),
doc_type='flywheel_field',
id=field_name,
index='data_explorer_fields')
assert r.ok
# TODO index data_explorer_fields - test recursion
# TODO index data_explorer_fields - test facet=True
def test_aggregate_field_values(as_public, as_drone, es):
# try to get typeadhed w/o login
r = as_public.post('/dataexplorer/search/fields/aggregate')
assert r.status_code == 403
# try to get typeadhed w/o body
r = as_drone.post('/dataexplorer/search/fields/aggregate')
assert r.status_code == 400
# try to get typeadhed for non-existent field
field_name, search_str, result = 'field', 'search', 'result'
es.get.side_effect = TestTransportError
r = as_drone.post('/dataexplorer/search/fields/aggregate', json={'field_name': field_name})
assert r.status_code == 404
es.get.side_effect = None
# try to get typeadhed for a field type that's not allowed
es.get.return_value = {'_source': {'type': 'test'}}
r = as_drone.post('/dataexplorer/search/fields/aggregate', json={'field_name': field_name})
assert r.status_code == 400
# get typeahead w/o search string for string|boolean field type
es.get.return_value = {'_source': {'type': 'string'}}
es.search.return_value = {'aggregations': {'results': result}}
r = as_drone.post('/dataexplorer/search/fields/aggregate', json={'field_name': field_name})
es.search.assert_called_with(
body={'aggs': {'results': {'terms': {'field': field_name + '.raw', 'size': 15, 'missing': 'null'}}},
'query': {'bool': {
'filter': [{'term': {'deleted': False}}],
'must': {'match_all': {}}}},
'size': 0},
doc_type='flywheel',
index='data_explorer')
assert r.ok
assert r.json == result
# get typeahead w/ search string for string|boolean field type
r = as_drone.post('/dataexplorer/search/fields/aggregate', json={'field_name': field_name, 'search_string': search_str})
es.search.assert_called_with(
body={'aggs': {'results': {'terms': {'field': field_name + '.raw', 'size': 15, 'missing': 'null'}}},
'query': {'bool': {
'filter': [{'term': {'deleted': False}}],
'must': {'match': {'field': search_str}}}},
'size': 0},
doc_type='flywheel',
index='data_explorer')
assert r.ok
assert r.json == result
# get typeahead w/o search string for integer|float|date field type
es.get.return_value = {'_source': {'type': 'integer'}}
r = as_drone.post('/dataexplorer/search/fields/aggregate', json={'field_name': field_name})
es.search.assert_called_with(
body={'aggs': {'results': {'stats': {'field': field_name}}},
'query': {'bool': {
'filter': [{'term': {'deleted': False}}],
'must': {'match_all': {}}}},
'size': 0},
doc_type='flywheel',
index='data_explorer')
assert r.ok
assert r.json == result
# get typeahead w/ search string for integer|float|date field type
r = as_drone.post('/dataexplorer/search/fields/aggregate', json={'field_name': field_name, 'search_string': search_str})
es.search.assert_called_with(
body={'aggs': {'results': {'stats': {'field': field_name}}},
'query': {'bool': {
'filter': [{'term': {'deleted': False}}],
'must': {'match': {'field': search_str}}}},
'size': 0},
doc_type='flywheel',
index='data_explorer')
assert r.ok
assert r.json == result
| mit | 92995a6b9c2c32039e55dc52514bbc94 | 39.292276 | 150 | 0.528912 | 3.694487 | false | false | false | false |
pytest-dev/pytest | testing/test_setuponly.py | 12 | 8161 | import sys
import pytest
from _pytest.config import ExitCode
from _pytest.pytester import Pytester
@pytest.fixture(params=["--setup-only", "--setup-plan", "--setup-show"], scope="module")
def mode(request):
return request.param
def test_show_only_active_fixtures(
pytester: Pytester, mode, dummy_yaml_custom_test
) -> None:
pytester.makepyfile(
'''
import pytest
@pytest.fixture
def _arg0():
"""hidden arg0 fixture"""
@pytest.fixture
def arg1():
"""arg1 docstring"""
def test_arg1(arg1):
pass
'''
)
result = pytester.runpytest(mode)
assert result.ret == 0
result.stdout.fnmatch_lines(
["*SETUP F arg1*", "*test_arg1 (fixtures used: arg1)*", "*TEARDOWN F arg1*"]
)
result.stdout.no_fnmatch_line("*_arg0*")
def test_show_different_scopes(pytester: Pytester, mode) -> None:
p = pytester.makepyfile(
'''
import pytest
@pytest.fixture
def arg_function():
"""function scoped fixture"""
@pytest.fixture(scope='session')
def arg_session():
"""session scoped fixture"""
def test_arg1(arg_session, arg_function):
pass
'''
)
result = pytester.runpytest(mode, p)
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"SETUP S arg_session*",
"*SETUP F arg_function*",
"*test_arg1 (fixtures used: arg_function, arg_session)*",
"*TEARDOWN F arg_function*",
"TEARDOWN S arg_session*",
]
)
def test_show_nested_fixtures(pytester: Pytester, mode) -> None:
pytester.makeconftest(
'''
import pytest
@pytest.fixture(scope='session')
def arg_same():
"""session scoped fixture"""
'''
)
p = pytester.makepyfile(
'''
import pytest
@pytest.fixture(scope='function')
def arg_same(arg_same):
"""function scoped fixture"""
def test_arg1(arg_same):
pass
'''
)
result = pytester.runpytest(mode, p)
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"SETUP S arg_same*",
"*SETUP F arg_same (fixtures used: arg_same)*",
"*test_arg1 (fixtures used: arg_same)*",
"*TEARDOWN F arg_same*",
"TEARDOWN S arg_same*",
]
)
def test_show_fixtures_with_autouse(pytester: Pytester, mode) -> None:
p = pytester.makepyfile(
'''
import pytest
@pytest.fixture
def arg_function():
"""function scoped fixture"""
@pytest.fixture(scope='session', autouse=True)
def arg_session():
"""session scoped fixture"""
def test_arg1(arg_function):
pass
'''
)
result = pytester.runpytest(mode, p)
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"SETUP S arg_session*",
"*SETUP F arg_function*",
"*test_arg1 (fixtures used: arg_function, arg_session)*",
]
)
def test_show_fixtures_with_parameters(pytester: Pytester, mode) -> None:
pytester.makeconftest(
'''
import pytest
@pytest.fixture(scope='session', params=['foo', 'bar'])
def arg_same():
"""session scoped fixture"""
'''
)
p = pytester.makepyfile(
'''
import pytest
@pytest.fixture(scope='function')
def arg_other(arg_same):
"""function scoped fixture"""
def test_arg1(arg_other):
pass
'''
)
result = pytester.runpytest(mode, p)
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"SETUP S arg_same?'foo'?",
"TEARDOWN S arg_same?'foo'?",
"SETUP S arg_same?'bar'?",
"TEARDOWN S arg_same?'bar'?",
]
)
def test_show_fixtures_with_parameter_ids(pytester: Pytester, mode) -> None:
pytester.makeconftest(
'''
import pytest
@pytest.fixture(
scope='session', params=['foo', 'bar'], ids=['spam', 'ham'])
def arg_same():
"""session scoped fixture"""
'''
)
p = pytester.makepyfile(
'''
import pytest
@pytest.fixture(scope='function')
def arg_other(arg_same):
"""function scoped fixture"""
def test_arg1(arg_other):
pass
'''
)
result = pytester.runpytest(mode, p)
assert result.ret == 0
result.stdout.fnmatch_lines(
["SETUP S arg_same?'spam'?", "SETUP S arg_same?'ham'?"]
)
def test_show_fixtures_with_parameter_ids_function(pytester: Pytester, mode) -> None:
p = pytester.makepyfile(
"""
import pytest
@pytest.fixture(params=['foo', 'bar'], ids=lambda p: p.upper())
def foobar():
pass
def test_foobar(foobar):
pass
"""
)
result = pytester.runpytest(mode, p)
assert result.ret == 0
result.stdout.fnmatch_lines(
["*SETUP F foobar?'FOO'?", "*SETUP F foobar?'BAR'?"]
)
def test_dynamic_fixture_request(pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import pytest
@pytest.fixture()
def dynamically_requested_fixture():
pass
@pytest.fixture()
def dependent_fixture(request):
request.getfixturevalue('dynamically_requested_fixture')
def test_dyn(dependent_fixture):
pass
"""
)
result = pytester.runpytest("--setup-only", p)
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"*SETUP F dynamically_requested_fixture",
"*TEARDOWN F dynamically_requested_fixture",
]
)
def test_capturing(pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import pytest, sys
@pytest.fixture()
def one():
sys.stdout.write('this should be captured')
sys.stderr.write('this should also be captured')
@pytest.fixture()
def two(one):
assert 0
def test_capturing(two):
pass
"""
)
result = pytester.runpytest("--setup-only", p)
result.stdout.fnmatch_lines(
["this should be captured", "this should also be captured"]
)
def test_show_fixtures_and_execute_test(pytester: Pytester) -> None:
"""Verify that setups are shown and tests are executed."""
p = pytester.makepyfile(
"""
import pytest
@pytest.fixture
def arg():
assert True
def test_arg(arg):
assert False
"""
)
result = pytester.runpytest("--setup-show", p)
assert result.ret == 1
result.stdout.fnmatch_lines(
["*SETUP F arg*", "*test_arg (fixtures used: arg)F*", "*TEARDOWN F arg*"]
)
def test_setup_show_with_KeyboardInterrupt_in_test(pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import pytest
@pytest.fixture
def arg():
pass
def test_arg(arg):
raise KeyboardInterrupt()
"""
)
result = pytester.runpytest("--setup-show", p, no_reraise_ctrlc=True)
result.stdout.fnmatch_lines(
[
"*SETUP F arg*",
"*test_arg (fixtures used: arg)*",
"*TEARDOWN F arg*",
"*! KeyboardInterrupt !*",
"*= no tests ran in *",
]
)
assert result.ret == ExitCode.INTERRUPTED
def test_show_fixture_action_with_bytes(pytester: Pytester) -> None:
# Issue 7126, BytesWarning when using --setup-show with bytes parameter
test_file = pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('data', [b'Hello World'])
def test_data(data):
pass
"""
)
result = pytester.run(
sys.executable, "-bb", "-m", "pytest", "--setup-show", str(test_file)
)
assert result.ret == 0
| mit | a662ac43d01762ca758bb141b793c97f | 24.663522 | 88 | 0.537557 | 3.994616 | false | true | false | false |
pytest-dev/pytest | src/_pytest/pathlib.py | 1 | 25096 | import atexit
import contextlib
import fnmatch
import importlib.util
import itertools
import os
import shutil
import sys
import uuid
import warnings
from enum import Enum
from errno import EBADF
from errno import ELOOP
from errno import ENOENT
from errno import ENOTDIR
from functools import partial
from os.path import expanduser
from os.path import expandvars
from os.path import isabs
from os.path import sep
from pathlib import Path
from pathlib import PurePath
from posixpath import sep as posix_sep
from types import ModuleType
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import Optional
from typing import Set
from typing import TypeVar
from typing import Union
from _pytest.compat import assert_never
from _pytest.outcomes import skip
from _pytest.warning_types import PytestWarning
LOCK_TIMEOUT = 60 * 60 * 24 * 3
_AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath)
# The following function, variables and comments were
# copied from cpython 3.9 Lib/pathlib.py file.
# EBADF - guard against macOS `stat` throwing EBADF
_IGNORED_ERRORS = (ENOENT, ENOTDIR, EBADF, ELOOP)
_IGNORED_WINERRORS = (
21, # ERROR_NOT_READY - drive exists but is not accessible
1921, # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself
)
def _ignore_error(exception):
return (
getattr(exception, "errno", None) in _IGNORED_ERRORS
or getattr(exception, "winerror", None) in _IGNORED_WINERRORS
)
def get_lock_path(path: _AnyPurePath) -> _AnyPurePath:
return path.joinpath(".lock")
def on_rm_rf_error(func, path: str, exc, *, start_path: Path) -> bool:
"""Handle known read-only errors during rmtree.
The returned value is used only by our own tests.
"""
exctype, excvalue = exc[:2]
# Another process removed the file in the middle of the "rm_rf" (xdist for example).
# More context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018
if isinstance(excvalue, FileNotFoundError):
return False
if not isinstance(excvalue, PermissionError):
warnings.warn(
PytestWarning(f"(rm_rf) error removing {path}\n{exctype}: {excvalue}")
)
return False
if func not in (os.rmdir, os.remove, os.unlink):
if func not in (os.open,):
warnings.warn(
PytestWarning(
"(rm_rf) unknown function {} when removing {}:\n{}: {}".format(
func, path, exctype, excvalue
)
)
)
return False
# Chmod + retry.
import stat
def chmod_rw(p: str) -> None:
mode = os.stat(p).st_mode
os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR)
# For files, we need to recursively go upwards in the directories to
# ensure they all are also writable.
p = Path(path)
if p.is_file():
for parent in p.parents:
chmod_rw(str(parent))
# Stop when we reach the original path passed to rm_rf.
if parent == start_path:
break
chmod_rw(str(path))
func(path)
return True
def ensure_extended_length_path(path: Path) -> Path:
"""Get the extended-length version of a path (Windows).
On Windows, by default, the maximum length of a path (MAX_PATH) is 260
characters, and operations on paths longer than that fail. But it is possible
to overcome this by converting the path to "extended-length" form before
performing the operation:
https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation
On Windows, this function returns the extended-length absolute version of path.
On other platforms it returns path unchanged.
"""
if sys.platform.startswith("win32"):
path = path.resolve()
path = Path(get_extended_length_path_str(str(path)))
return path
def get_extended_length_path_str(path: str) -> str:
"""Convert a path to a Windows extended length path."""
long_path_prefix = "\\\\?\\"
unc_long_path_prefix = "\\\\?\\UNC\\"
if path.startswith((long_path_prefix, unc_long_path_prefix)):
return path
# UNC
if path.startswith("\\\\"):
return unc_long_path_prefix + path[2:]
return long_path_prefix + path
def rm_rf(path: Path) -> None:
"""Remove the path contents recursively, even if some elements
are read-only."""
path = ensure_extended_length_path(path)
onerror = partial(on_rm_rf_error, start_path=path)
shutil.rmtree(str(path), onerror=onerror)
def find_prefixed(root: Path, prefix: str) -> Iterator[Path]:
"""Find all elements in root that begin with the prefix, case insensitive."""
l_prefix = prefix.lower()
for x in root.iterdir():
if x.name.lower().startswith(l_prefix):
yield x
def extract_suffixes(iter: Iterable[PurePath], prefix: str) -> Iterator[str]:
"""Return the parts of the paths following the prefix.
:param iter: Iterator over path names.
:param prefix: Expected prefix of the path names.
"""
p_len = len(prefix)
for p in iter:
yield p.name[p_len:]
def find_suffixes(root: Path, prefix: str) -> Iterator[str]:
"""Combine find_prefixes and extract_suffixes."""
return extract_suffixes(find_prefixed(root, prefix), prefix)
def parse_num(maybe_num) -> int:
"""Parse number path suffixes, returns -1 on error."""
try:
return int(maybe_num)
except ValueError:
return -1
def _force_symlink(
root: Path, target: Union[str, PurePath], link_to: Union[str, Path]
) -> None:
"""Helper to create the current symlink.
It's full of race conditions that are reasonably OK to ignore
for the context of best effort linking to the latest test run.
The presumption being that in case of much parallelism
the inaccuracy is going to be acceptable.
"""
current_symlink = root.joinpath(target)
try:
current_symlink.unlink()
except OSError:
pass
try:
current_symlink.symlink_to(link_to)
except Exception:
pass
def make_numbered_dir(root: Path, prefix: str, mode: int = 0o700) -> Path:
"""Create a directory with an increased number as suffix for the given prefix."""
for i in range(10):
# try up to 10 times to create the folder
max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
new_number = max_existing + 1
new_path = root.joinpath(f"{prefix}{new_number}")
try:
new_path.mkdir(mode=mode)
except Exception:
pass
else:
_force_symlink(root, prefix + "current", new_path)
return new_path
else:
raise OSError(
"could not create numbered dir with prefix "
"{prefix} in {root} after 10 tries".format(prefix=prefix, root=root)
)
def create_cleanup_lock(p: Path) -> Path:
"""Create a lock to prevent premature folder cleanup."""
lock_path = get_lock_path(p)
try:
fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
except FileExistsError as e:
raise OSError(f"cannot create lockfile in {p}") from e
else:
pid = os.getpid()
spid = str(pid).encode()
os.write(fd, spid)
os.close(fd)
if not lock_path.is_file():
raise OSError("lock path got renamed after successful creation")
return lock_path
def register_cleanup_lock_removal(lock_path: Path, register=atexit.register):
"""Register a cleanup function for removing a lock, by default on atexit."""
pid = os.getpid()
def cleanup_on_exit(lock_path: Path = lock_path, original_pid: int = pid) -> None:
current_pid = os.getpid()
if current_pid != original_pid:
# fork
return
try:
lock_path.unlink()
except OSError:
pass
return register(cleanup_on_exit)
def maybe_delete_a_numbered_dir(path: Path) -> None:
"""Remove a numbered directory if its lock can be obtained and it does
not seem to be in use."""
path = ensure_extended_length_path(path)
lock_path = None
try:
lock_path = create_cleanup_lock(path)
parent = path.parent
garbage = parent.joinpath(f"garbage-{uuid.uuid4()}")
path.rename(garbage)
rm_rf(garbage)
except OSError:
# known races:
# * other process did a cleanup at the same time
# * deletable folder was found
# * process cwd (Windows)
return
finally:
# If we created the lock, ensure we remove it even if we failed
# to properly remove the numbered dir.
if lock_path is not None:
try:
lock_path.unlink()
except OSError:
pass
def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) -> bool:
"""Check if `path` is deletable based on whether the lock file is expired."""
if path.is_symlink():
return False
lock = get_lock_path(path)
try:
if not lock.is_file():
return True
except OSError:
# we might not have access to the lock file at all, in this case assume
# we don't have access to the entire directory (#7491).
return False
try:
lock_time = lock.stat().st_mtime
except Exception:
return False
else:
if lock_time < consider_lock_dead_if_created_before:
# We want to ignore any errors while trying to remove the lock such as:
# - PermissionDenied, like the file permissions have changed since the lock creation;
# - FileNotFoundError, in case another pytest process got here first;
# and any other cause of failure.
with contextlib.suppress(OSError):
lock.unlink()
return True
return False
def try_cleanup(path: Path, consider_lock_dead_if_created_before: float) -> None:
"""Try to cleanup a folder if we can ensure it's deletable."""
if ensure_deletable(path, consider_lock_dead_if_created_before):
maybe_delete_a_numbered_dir(path)
def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]:
"""List candidates for numbered directories to be removed - follows py.path."""
max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
max_delete = max_existing - keep
paths = find_prefixed(root, prefix)
paths, paths2 = itertools.tee(paths)
numbers = map(parse_num, extract_suffixes(paths2, prefix))
for path, number in zip(paths, numbers):
if number <= max_delete:
yield path
def cleanup_dead_symlink(root: Path):
for left_dir in root.iterdir():
if left_dir.is_symlink():
if not left_dir.resolve().exists():
left_dir.unlink()
def cleanup_numbered_dir(
root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float
) -> None:
"""Cleanup for lock driven numbered directories."""
if not root.exists():
return
for path in cleanup_candidates(root, prefix, keep):
try_cleanup(path, consider_lock_dead_if_created_before)
for path in root.glob("garbage-*"):
try_cleanup(path, consider_lock_dead_if_created_before)
cleanup_dead_symlink(root)
def make_numbered_dir_with_cleanup(
root: Path,
prefix: str,
keep: int,
lock_timeout: float,
mode: int,
) -> Path:
"""Create a numbered dir with a cleanup lock and remove old ones."""
e = None
for i in range(10):
try:
p = make_numbered_dir(root, prefix, mode)
# Only lock the current dir when keep is not 0
if keep != 0:
lock_path = create_cleanup_lock(p)
register_cleanup_lock_removal(lock_path)
except Exception as exc:
e = exc
else:
consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout
# Register a cleanup for program exit
atexit.register(
cleanup_numbered_dir,
root,
prefix,
keep,
consider_lock_dead_if_created_before,
)
return p
assert e is not None
raise e
def resolve_from_str(input: str, rootpath: Path) -> Path:
input = expanduser(input)
input = expandvars(input)
if isabs(input):
return Path(input)
else:
return rootpath.joinpath(input)
def fnmatch_ex(pattern: str, path: Union[str, "os.PathLike[str]"]) -> bool:
"""A port of FNMatcher from py.path.common which works with PurePath() instances.
The difference between this algorithm and PurePath.match() is that the
latter matches "**" glob expressions for each part of the path, while
this algorithm uses the whole path instead.
For example:
"tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py"
with this algorithm, but not with PurePath.match().
This algorithm was ported to keep backward-compatibility with existing
settings which assume paths match according this logic.
References:
* https://bugs.python.org/issue29249
* https://bugs.python.org/issue34731
"""
path = PurePath(path)
iswin32 = sys.platform.startswith("win")
if iswin32 and sep not in pattern and posix_sep in pattern:
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posix_sep, sep)
if sep not in pattern:
name = path.name
else:
name = str(path)
if path.is_absolute() and not os.path.isabs(pattern):
pattern = f"*{os.sep}{pattern}"
return fnmatch.fnmatch(name, pattern)
def parts(s: str) -> Set[str]:
parts = s.split(sep)
return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))}
def symlink_or_skip(src, dst, **kwargs):
"""Make a symlink, or skip the test in case symlinks are not supported."""
try:
os.symlink(str(src), str(dst), **kwargs)
except OSError as e:
skip(f"symlinks not supported: {e}")
class ImportMode(Enum):
"""Possible values for `mode` parameter of `import_path`."""
prepend = "prepend"
append = "append"
importlib = "importlib"
class ImportPathMismatchError(ImportError):
"""Raised on import_path() if there is a mismatch of __file__'s.
This can happen when `import_path` is called multiple times with different filenames that has
the same basename but reside in packages
(for example "/tests1/test_foo.py" and "/tests2/test_foo.py").
"""
def import_path(
p: Union[str, "os.PathLike[str]"],
*,
mode: Union[str, ImportMode] = ImportMode.prepend,
root: Path,
) -> ModuleType:
"""Import and return a module from the given path, which can be a file (a module) or
a directory (a package).
The import mechanism used is controlled by the `mode` parameter:
* `mode == ImportMode.prepend`: the directory containing the module (or package, taking
`__init__.py` files into account) will be put at the *start* of `sys.path` before
being imported with `__import__.
* `mode == ImportMode.append`: same as `prepend`, but the directory will be appended
to the end of `sys.path`, if not already in `sys.path`.
* `mode == ImportMode.importlib`: uses more fine control mechanisms provided by `importlib`
to import the module, which avoids having to use `__import__` and muck with `sys.path`
at all. It effectively allows having same-named test modules in different places.
:param root:
Used as an anchor when mode == ImportMode.importlib to obtain
a unique name for the module being imported so it can safely be stored
into ``sys.modules``.
:raises ImportPathMismatchError:
If after importing the given `path` and the module `__file__`
are different. Only raised in `prepend` and `append` modes.
"""
mode = ImportMode(mode)
path = Path(p)
if not path.exists():
raise ImportError(path)
if mode is ImportMode.importlib:
module_name = module_name_from_path(path, root)
for meta_importer in sys.meta_path:
spec = meta_importer.find_spec(module_name, [str(path.parent)])
if spec is not None:
break
else:
spec = importlib.util.spec_from_file_location(module_name, str(path))
if spec is None:
raise ImportError(f"Can't find module {module_name} at location {path}")
mod = importlib.util.module_from_spec(spec)
sys.modules[module_name] = mod
spec.loader.exec_module(mod) # type: ignore[union-attr]
insert_missing_modules(sys.modules, module_name)
return mod
pkg_path = resolve_package_path(path)
if pkg_path is not None:
pkg_root = pkg_path.parent
names = list(path.with_suffix("").relative_to(pkg_root).parts)
if names[-1] == "__init__":
names.pop()
module_name = ".".join(names)
else:
pkg_root = path.parent
module_name = path.stem
# Change sys.path permanently: restoring it at the end of this function would cause surprising
# problems because of delayed imports: for example, a conftest.py file imported by this function
# might have local imports, which would fail at runtime if we restored sys.path.
if mode is ImportMode.append:
if str(pkg_root) not in sys.path:
sys.path.append(str(pkg_root))
elif mode is ImportMode.prepend:
if str(pkg_root) != sys.path[0]:
sys.path.insert(0, str(pkg_root))
else:
assert_never(mode)
importlib.import_module(module_name)
mod = sys.modules[module_name]
if path.name == "__init__.py":
return mod
ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "")
if ignore != "1":
module_file = mod.__file__
if module_file is None:
raise ImportPathMismatchError(module_name, module_file, path)
if module_file.endswith((".pyc", ".pyo")):
module_file = module_file[:-1]
if module_file.endswith(os.path.sep + "__init__.py"):
module_file = module_file[: -(len(os.path.sep + "__init__.py"))]
try:
is_same = _is_same(str(path), module_file)
except FileNotFoundError:
is_same = False
if not is_same:
raise ImportPathMismatchError(module_name, module_file, path)
return mod
# Implement a special _is_same function on Windows which returns True if the two filenames
# compare equal, to circumvent os.path.samefile returning False for mounts in UNC (#7678).
if sys.platform.startswith("win"):
def _is_same(f1: str, f2: str) -> bool:
return Path(f1) == Path(f2) or os.path.samefile(f1, f2)
else:
def _is_same(f1: str, f2: str) -> bool:
return os.path.samefile(f1, f2)
def module_name_from_path(path: Path, root: Path) -> str:
"""
Return a dotted module name based on the given path, anchored on root.
For example: path="projects/src/tests/test_foo.py" and root="/projects", the
resulting module name will be "src.tests.test_foo".
"""
path = path.with_suffix("")
try:
relative_path = path.relative_to(root)
except ValueError:
# If we can't get a relative path to root, use the full path, except
# for the first part ("d:\\" or "/" depending on the platform, for example).
path_parts = path.parts[1:]
else:
# Use the parts for the relative path to the root path.
path_parts = relative_path.parts
return ".".join(path_parts)
def insert_missing_modules(modules: Dict[str, ModuleType], module_name: str) -> None:
"""
Used by ``import_path`` to create intermediate modules when using mode=importlib.
When we want to import a module as "src.tests.test_foo" for example, we need
to create empty modules "src" and "src.tests" after inserting "src.tests.test_foo",
otherwise "src.tests.test_foo" is not importable by ``__import__``.
"""
module_parts = module_name.split(".")
while module_name:
if module_name not in modules:
try:
# If sys.meta_path is empty, calling import_module will issue
# a warning and raise ModuleNotFoundError. To avoid the
# warning, we check sys.meta_path explicitly and raise the error
# ourselves to fall back to creating a dummy module.
if not sys.meta_path:
raise ModuleNotFoundError
importlib.import_module(module_name)
except ModuleNotFoundError:
module = ModuleType(
module_name,
doc="Empty module created by pytest's importmode=importlib.",
)
modules[module_name] = module
module_parts.pop(-1)
module_name = ".".join(module_parts)
def resolve_package_path(path: Path) -> Optional[Path]:
"""Return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
Returns None if it can not be determined.
"""
result = None
for parent in itertools.chain((path,), path.parents):
if parent.is_dir():
if not parent.joinpath("__init__.py").is_file():
break
if not parent.name.isidentifier():
break
result = parent
return result
def visit(
path: Union[str, "os.PathLike[str]"], recurse: Callable[["os.DirEntry[str]"], bool]
) -> Iterator["os.DirEntry[str]"]:
"""Walk a directory recursively, in breadth-first order.
Entries at each directory level are sorted.
"""
# Skip entries with symlink loops and other brokenness, so the caller doesn't
# have to deal with it.
entries = []
for entry in os.scandir(path):
try:
entry.is_file()
except OSError as err:
if _ignore_error(err):
continue
raise
entries.append(entry)
entries.sort(key=lambda entry: entry.name)
yield from entries
for entry in entries:
if entry.is_dir() and recurse(entry):
yield from visit(entry.path, recurse)
def absolutepath(path: Union[Path, str]) -> Path:
"""Convert a path to an absolute path using os.path.abspath.
Prefer this over Path.resolve() (see #6523).
Prefer this over Path.absolute() (not public, doesn't normalize).
"""
return Path(os.path.abspath(str(path)))
def commonpath(path1: Path, path2: Path) -> Optional[Path]:
"""Return the common part shared with the other path, or None if there is
no common part.
If one path is relative and one is absolute, returns None.
"""
try:
return Path(os.path.commonpath((str(path1), str(path2))))
except ValueError:
return None
def bestrelpath(directory: Path, dest: Path) -> str:
"""Return a string which is a relative path from directory to dest such
that directory/bestrelpath == dest.
The paths must be either both absolute or both relative.
If no such path can be determined, returns dest.
"""
assert isinstance(directory, Path)
assert isinstance(dest, Path)
if dest == directory:
return os.curdir
# Find the longest common directory.
base = commonpath(directory, dest)
# Can be the case on Windows for two absolute paths on different drives.
# Can be the case for two relative paths without common prefix.
# Can be the case for a relative path and an absolute path.
if not base:
return str(dest)
reldirectory = directory.relative_to(base)
reldest = dest.relative_to(base)
return os.path.join(
# Back from directory to base.
*([os.pardir] * len(reldirectory.parts)),
# Forward from base to dest.
*reldest.parts,
)
# Originates from py. path.local.copy(), with siginficant trims and adjustments.
# TODO(py38): Replace with shutil.copytree(..., symlinks=True, dirs_exist_ok=True)
def copytree(source: Path, target: Path) -> None:
"""Recursively copy a source directory to target."""
assert source.is_dir()
for entry in visit(source, recurse=lambda entry: not entry.is_symlink()):
x = Path(entry)
relpath = x.relative_to(source)
newx = target / relpath
newx.parent.mkdir(exist_ok=True)
if x.is_symlink():
newx.symlink_to(os.readlink(x))
elif x.is_file():
shutil.copyfile(x, newx)
elif x.is_dir():
newx.mkdir(exist_ok=True)
| mit | 7edb1db3344d0f33aedf4a7893058c12 | 32.550802 | 102 | 0.632571 | 3.886033 | false | false | false | false |
pytest-dev/pytest | src/_pytest/compat.py | 1 | 12820 | """Python version compatibility code."""
import enum
import functools
import inspect
import os
import sys
from inspect import Parameter
from inspect import signature
from pathlib import Path
from typing import Any
from typing import Callable
from typing import Generic
from typing import NoReturn
from typing import Optional
from typing import Tuple
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
import attr
import py
# fmt: off
# Workaround for https://github.com/sphinx-doc/sphinx/issues/10351.
# If `overload` is imported from `compat` instead of from `typing`,
# Sphinx doesn't recognize it as `overload` and the API docs for
# overloaded functions look good again. But type checkers handle
# it fine.
# fmt: on
if True:
from typing import overload as overload
if TYPE_CHECKING:
from typing_extensions import Final
_T = TypeVar("_T")
_S = TypeVar("_S")
#: constant to prepare valuing pylib path replacements/lazy proxies later on
# intended for removal in pytest 8.0 or 9.0
# fmt: off
# intentional space to create a fake difference for the verification
LEGACY_PATH = py.path. local
# fmt: on
def legacy_path(path: Union[str, "os.PathLike[str]"]) -> LEGACY_PATH:
"""Internal wrapper to prepare lazy proxies for legacy_path instances"""
return LEGACY_PATH(path)
# fmt: off
# Singleton type for NOTSET, as described in:
# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions
class NotSetType(enum.Enum):
token = 0
NOTSET: "Final" = NotSetType.token # noqa: E305
# fmt: on
if sys.version_info >= (3, 8):
import importlib.metadata
importlib_metadata = importlib.metadata
else:
import importlib_metadata as importlib_metadata # noqa: F401
def _format_args(func: Callable[..., Any]) -> str:
return str(signature(func))
def is_generator(func: object) -> bool:
genfunc = inspect.isgeneratorfunction(func)
return genfunc and not iscoroutinefunction(func)
def iscoroutinefunction(func: object) -> bool:
"""Return True if func is a coroutine function (a function defined with async
def syntax, and doesn't contain yield), or a function decorated with
@asyncio.coroutine.
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid
importing asyncio directly, which in turns also initializes the "logging"
module as a side-effect (see issue #8).
"""
return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False)
def is_async_function(func: object) -> bool:
"""Return True if the given function seems to be an async function or
an async generator."""
return iscoroutinefunction(func) or inspect.isasyncgenfunction(func)
def getlocation(function, curdir: Optional[str] = None) -> str:
function = get_real_func(function)
fn = Path(inspect.getfile(function))
lineno = function.__code__.co_firstlineno
if curdir is not None:
try:
relfn = fn.relative_to(curdir)
except ValueError:
pass
else:
return "%s:%d" % (relfn, lineno + 1)
return "%s:%d" % (fn, lineno + 1)
def num_mock_patch_args(function) -> int:
"""Return number of arguments used up by mock arguments (if any)."""
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object())
ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object())
return len(
[
p
for p in patchings
if not p.attribute_name
and (p.new is mock_sentinel or p.new is ut_mock_sentinel)
]
)
def getfuncargnames(
function: Callable[..., Any],
*,
name: str = "",
is_method: bool = False,
cls: Optional[type] = None,
) -> Tuple[str, ...]:
"""Return the names of a function's mandatory arguments.
Should return the names of all function arguments that:
* Aren't bound to an instance or type as in instance or class methods.
* Don't have default values.
* Aren't bound with functools.partial.
* Aren't replaced with mocks.
The is_method and cls arguments indicate that the function should
be treated as a bound method even though it's not unless, only in
the case of cls, the function is a static method.
The name parameter should be the original name in which the function was collected.
"""
# TODO(RonnyPfannschmidt): This function should be refactored when we
# revisit fixtures. The fixture mechanism should ask the node for
# the fixture names, and not try to obtain directly from the
# function object well after collection has occurred.
# The parameters attribute of a Signature object contains an
# ordered mapping of parameter names to Parameter instances. This
# creates a tuple of the names of the parameters that don't have
# defaults.
try:
parameters = signature(function).parameters
except (ValueError, TypeError) as e:
from _pytest.outcomes import fail
fail(
f"Could not determine arguments of {function!r}: {e}",
pytrace=False,
)
arg_names = tuple(
p.name
for p in parameters.values()
if (
p.kind is Parameter.POSITIONAL_OR_KEYWORD
or p.kind is Parameter.KEYWORD_ONLY
)
and p.default is Parameter.empty
)
if not name:
name = function.__name__
# If this function should be treated as a bound method even though
# it's passed as an unbound method or function, remove the first
# parameter name.
if is_method or (
# Not using `getattr` because we don't want to resolve the staticmethod.
# Not using `cls.__dict__` because we want to check the entire MRO.
cls
and not isinstance(
inspect.getattr_static(cls, name, default=None), staticmethod
)
):
arg_names = arg_names[1:]
# Remove any names that will be replaced with mocks.
if hasattr(function, "__wrapped__"):
arg_names = arg_names[num_mock_patch_args(function) :]
return arg_names
def get_default_arg_names(function: Callable[..., Any]) -> Tuple[str, ...]:
# Note: this code intentionally mirrors the code at the beginning of
# getfuncargnames, to get the arguments which were excluded from its result
# because they had default values.
return tuple(
p.name
for p in signature(function).parameters.values()
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
and p.default is not Parameter.empty
)
_non_printable_ascii_translate_table = {
i: f"\\x{i:02x}" for i in range(128) if i not in range(32, 127)
}
_non_printable_ascii_translate_table.update(
{ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"}
)
def _translate_non_printable(s: str) -> str:
return s.translate(_non_printable_ascii_translate_table)
STRING_TYPES = bytes, str
def _bytes_to_ascii(val: bytes) -> str:
return val.decode("ascii", "backslashreplace")
def ascii_escaped(val: Union[bytes, str]) -> str:
r"""If val is pure ASCII, return it as an str, otherwise, escape
bytes objects into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944'
Note:
The obvious "v.decode('unicode-escape')" will return
valid UTF-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a UTF-8 string.
"""
if isinstance(val, bytes):
ret = _bytes_to_ascii(val)
else:
ret = val.encode("unicode_escape").decode("ascii")
return _translate_non_printable(ret)
@attr.s
class _PytestWrapper:
"""Dummy wrapper around a function object for internal use only.
Used to correctly unwrap the underlying function object when we are
creating fixtures, because we wrap the function object ourselves with a
decorator to issue warnings when the fixture function is called directly.
"""
obj = attr.ib()
def get_real_func(obj):
"""Get the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial."""
start_obj = obj
for i in range(100):
# __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function
# to trigger a warning if it gets called directly instead of by pytest: we don't
# want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)
new_obj = getattr(obj, "__pytest_wrapped__", None)
if isinstance(new_obj, _PytestWrapper):
obj = new_obj.obj
break
new_obj = getattr(obj, "__wrapped__", None)
if new_obj is None:
break
obj = new_obj
else:
from _pytest._io.saferepr import saferepr
raise ValueError(
("could not find real function of {start}\nstopped at {current}").format(
start=saferepr(start_obj), current=saferepr(obj)
)
)
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def get_real_method(obj, holder):
"""Attempt to obtain the real function object that might be wrapping
``obj``, while at the same time returning a bound method to ``holder`` if
the original object was a bound method."""
try:
is_method = hasattr(obj, "__func__")
obj = get_real_func(obj)
except Exception: # pragma: no cover
return obj
if is_method and hasattr(obj, "__get__") and callable(obj.__get__):
obj = obj.__get__(holder)
return obj
def getimfunc(func):
try:
return func.__func__
except AttributeError:
return func
def safe_getattr(object: Any, name: str, default: Any) -> Any:
"""Like getattr but return default upon any Exception or any OutcomeException.
Attribute access can potentially fail for 'evil' Python objects.
See issue #214.
It catches OutcomeException because of #2490 (issue #580), new outcomes
are derived from BaseException instead of Exception (for more details
check #2707).
"""
from _pytest.outcomes import TEST_OUTCOME
try:
return getattr(object, name, default)
except TEST_OUTCOME:
return default
def safe_isclass(obj: object) -> bool:
"""Ignore any exception via isinstance on Python 3."""
try:
return inspect.isclass(obj)
except Exception:
return False
if TYPE_CHECKING:
if sys.version_info >= (3, 8):
from typing import final as final
else:
from typing_extensions import final as final
elif sys.version_info >= (3, 8):
from typing import final as final
else:
def final(f):
return f
if sys.version_info >= (3, 8):
from functools import cached_property as cached_property
else:
from typing import Type
class cached_property(Generic[_S, _T]):
__slots__ = ("func", "__doc__")
def __init__(self, func: Callable[[_S], _T]) -> None:
self.func = func
self.__doc__ = func.__doc__
@overload
def __get__(
self, instance: None, owner: Optional[Type[_S]] = ...
) -> "cached_property[_S, _T]":
...
@overload
def __get__(self, instance: _S, owner: Optional[Type[_S]] = ...) -> _T:
...
def __get__(self, instance, owner=None):
if instance is None:
return self
value = instance.__dict__[self.func.__name__] = self.func(instance)
return value
# Perform exhaustiveness checking.
#
# Consider this example:
#
# MyUnion = Union[int, str]
#
# def handle(x: MyUnion) -> int {
# if isinstance(x, int):
# return 1
# elif isinstance(x, str):
# return 2
# else:
# raise Exception('unreachable')
#
# Now suppose we add a new variant:
#
# MyUnion = Union[int, str, bytes]
#
# After doing this, we must remember ourselves to go and update the handle
# function to handle the new variant.
#
# With `assert_never` we can do better:
#
# // raise Exception('unreachable')
# return assert_never(x)
#
# Now, if we forget to handle the new variant, the type-checker will emit a
# compile-time error, instead of the runtime error we would have gotten
# previously.
#
# This also work for Enums (if you use `is` to compare) and Literals.
def assert_never(value: NoReturn) -> NoReturn:
assert False, f"Unhandled value: {value} ({type(value).__name__})"
| mit | 6e965772337c30e99b3cebfe41d59b6b | 29.743405 | 102 | 0.650234 | 3.88132 | false | false | false | false |
pytest-dev/pytest | src/_pytest/_py/path.py | 1 | 49158 | """local path implementation."""
from __future__ import annotations
import atexit
import fnmatch
import importlib.util
import io
import os
import posixpath
import sys
import uuid
import warnings
from contextlib import contextmanager
from os.path import abspath
from os.path import dirname
from os.path import exists
from os.path import isabs
from os.path import isdir
from os.path import isfile
from os.path import islink
from os.path import normpath
from stat import S_ISDIR
from stat import S_ISLNK
from stat import S_ISREG
from typing import Any
from typing import Callable
from typing import cast
from typing import overload
from typing import TYPE_CHECKING
from . import error
if TYPE_CHECKING:
from typing import Literal
# Moved from local.py.
iswin32 = sys.platform == "win32" or (getattr(os, "_name", False) == "nt")
class Checkers:
_depend_on_existence = "exists", "link", "dir", "file"
def __init__(self, path):
self.path = path
def dotfile(self):
return self.path.basename.startswith(".")
def ext(self, arg):
if not arg.startswith("."):
arg = "." + arg
return self.path.ext == arg
def basename(self, arg):
return self.path.basename == arg
def basestarts(self, arg):
return self.path.basename.startswith(arg)
def relto(self, arg):
return self.path.relto(arg)
def fnmatch(self, arg):
return self.path.fnmatch(arg)
def endswith(self, arg):
return str(self.path).endswith(arg)
def _evaluate(self, kw):
from .._code.source import getrawcode
for name, value in kw.items():
invert = False
meth = None
try:
meth = getattr(self, name)
except AttributeError:
if name[:3] == "not":
invert = True
try:
meth = getattr(self, name[3:])
except AttributeError:
pass
if meth is None:
raise TypeError(f"no {name!r} checker available for {self.path!r}")
try:
if getrawcode(meth).co_argcount > 1:
if (not meth(value)) ^ invert:
return False
else:
if bool(value) ^ bool(meth()) ^ invert:
return False
except (error.ENOENT, error.ENOTDIR, error.EBUSY):
# EBUSY feels not entirely correct,
# but its kind of necessary since ENOMEDIUM
# is not accessible in python
for name in self._depend_on_existence:
if name in kw:
if kw.get(name):
return False
name = "not" + name
if name in kw:
if not kw.get(name):
return False
return True
_statcache: Stat
def _stat(self) -> Stat:
try:
return self._statcache
except AttributeError:
try:
self._statcache = self.path.stat()
except error.ELOOP:
self._statcache = self.path.lstat()
return self._statcache
def dir(self):
return S_ISDIR(self._stat().mode)
def file(self):
return S_ISREG(self._stat().mode)
def exists(self):
return self._stat()
def link(self):
st = self.path.lstat()
return S_ISLNK(st.mode)
class NeverRaised(Exception):
pass
class Visitor:
def __init__(self, fil, rec, ignore, bf, sort):
if isinstance(fil, str):
fil = FNMatcher(fil)
if isinstance(rec, str):
self.rec: Callable[[LocalPath], bool] = FNMatcher(rec)
elif not hasattr(rec, "__call__") and rec:
self.rec = lambda path: True
else:
self.rec = rec
self.fil = fil
self.ignore = ignore
self.breadthfirst = bf
self.optsort = cast(Callable[[Any], Any], sorted) if sort else (lambda x: x)
def gen(self, path):
try:
entries = path.listdir()
except self.ignore:
return
rec = self.rec
dirs = self.optsort(
[p for p in entries if p.check(dir=1) and (rec is None or rec(p))]
)
if not self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
for p in self.optsort(entries):
if self.fil is None or self.fil(p):
yield p
if self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
class FNMatcher:
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, path):
pattern = self.pattern
if (
pattern.find(path.sep) == -1
and iswin32
and pattern.find(posixpath.sep) != -1
):
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posixpath.sep, path.sep)
if pattern.find(path.sep) == -1:
name = path.basename
else:
name = str(path) # path.strpath # XXX svn?
if not os.path.isabs(pattern):
pattern = "*" + path.sep + pattern
return fnmatch.fnmatch(name, pattern)
def map_as_list(func, iter):
return list(map(func, iter))
class Stat:
if TYPE_CHECKING:
@property
def size(self) -> int:
...
@property
def mtime(self) -> float:
...
def __getattr__(self, name: str) -> Any:
return getattr(self._osstatresult, "st_" + name)
def __init__(self, path, osstatresult):
self.path = path
self._osstatresult = osstatresult
@property
def owner(self):
if iswin32:
raise NotImplementedError("XXX win32")
import pwd
entry = error.checked_call(pwd.getpwuid, self.uid) # type:ignore[attr-defined]
return entry[0]
@property
def group(self):
"""Return group name of file."""
if iswin32:
raise NotImplementedError("XXX win32")
import grp
entry = error.checked_call(grp.getgrgid, self.gid) # type:ignore[attr-defined]
return entry[0]
def isdir(self):
return S_ISDIR(self._osstatresult.st_mode)
def isfile(self):
return S_ISREG(self._osstatresult.st_mode)
def islink(self):
self.path.lstat()
return S_ISLNK(self._osstatresult.st_mode)
def getuserid(user):
import pwd
if not isinstance(user, int):
user = pwd.getpwnam(user)[2] # type:ignore[attr-defined]
return user
def getgroupid(group):
import grp
if not isinstance(group, int):
group = grp.getgrnam(group)[2] # type:ignore[attr-defined]
return group
class LocalPath:
"""Object oriented interface to os.path and other local filesystem
related information.
"""
class ImportMismatchError(ImportError):
"""raised on pyimport() if there is a mismatch of __file__'s"""
sep = os.sep
def __init__(self, path=None, expanduser=False):
"""Initialize and return a local Path instance.
Path can be relative to the current directory.
If path is None it defaults to the current working directory.
If expanduser is True, tilde-expansion is performed.
Note that Path instances always carry an absolute path.
Note also that passing in a local path object will simply return
the exact same path object. Use new() to get a new copy.
"""
if path is None:
self.strpath = error.checked_call(os.getcwd)
else:
try:
path = os.fspath(path)
except TypeError:
raise ValueError(
"can only pass None, Path instances "
"or non-empty strings to LocalPath"
)
if expanduser:
path = os.path.expanduser(path)
self.strpath = abspath(path)
if sys.platform != "win32":
def chown(self, user, group, rec=0):
"""Change ownership to the given user and group.
user and group may be specified by a number or
by a name. if rec is True change ownership
recursively.
"""
uid = getuserid(user)
gid = getgroupid(group)
if rec:
for x in self.visit(rec=lambda x: x.check(link=0)):
if x.check(link=0):
error.checked_call(os.chown, str(x), uid, gid)
error.checked_call(os.chown, str(self), uid, gid)
def readlink(self) -> str:
"""Return value of a symbolic link."""
# https://github.com/python/mypy/issues/12278
return error.checked_call(os.readlink, self.strpath) # type: ignore[arg-type,return-value]
def mklinkto(self, oldname):
"""Posix style hard link to another name."""
error.checked_call(os.link, str(oldname), str(self))
def mksymlinkto(self, value, absolute=1):
"""Create a symbolic link with the given value (pointing to another name)."""
if absolute:
error.checked_call(os.symlink, str(value), self.strpath)
else:
base = self.common(value)
# with posix local paths '/' is always a common base
relsource = self.__class__(value).relto(base)
reldest = self.relto(base)
n = reldest.count(self.sep)
target = self.sep.join(("..",) * n + (relsource,))
error.checked_call(os.symlink, target, self.strpath)
def __div__(self, other):
return self.join(os.fspath(other))
__truediv__ = __div__ # py3k
@property
def basename(self):
"""Basename part of path."""
return self._getbyspec("basename")[0]
@property
def dirname(self):
"""Dirname part of path."""
return self._getbyspec("dirname")[0]
@property
def purebasename(self):
"""Pure base name of the path."""
return self._getbyspec("purebasename")[0]
@property
def ext(self):
"""Extension of the path (including the '.')."""
return self._getbyspec("ext")[0]
def read_binary(self):
"""Read and return a bytestring from reading the path."""
with self.open("rb") as f:
return f.read()
def read_text(self, encoding):
"""Read and return a Unicode string from reading the path."""
with self.open("r", encoding=encoding) as f:
return f.read()
def read(self, mode="r"):
"""Read and return a bytestring from reading the path."""
with self.open(mode) as f:
return f.read()
def readlines(self, cr=1):
"""Read and return a list of lines from the path. if cr is False, the
newline will be removed from the end of each line."""
mode = "r"
if not cr:
content = self.read(mode)
return content.split("\n")
else:
f = self.open(mode)
try:
return f.readlines()
finally:
f.close()
def load(self):
"""(deprecated) return object unpickled from self.read()"""
f = self.open("rb")
try:
import pickle
return error.checked_call(pickle.load, f)
finally:
f.close()
def move(self, target):
"""Move this path to target."""
if target.relto(self):
raise error.EINVAL(target, "cannot move path into a subdirectory of itself")
try:
self.rename(target)
except error.EXDEV: # invalid cross-device link
self.copy(target)
self.remove()
def fnmatch(self, pattern):
"""Return true if the basename/fullname matches the glob-'pattern'.
valid pattern characters::
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
If the pattern contains a path-separator then the full path
is used for pattern matching and a '*' is prepended to the
pattern.
if the pattern doesn't contain a path-separator the pattern
is only matched against the basename.
"""
return FNMatcher(pattern)(self)
def relto(self, relpath):
"""Return a string which is the relative part of the path
to the given 'relpath'.
"""
if not isinstance(relpath, (str, LocalPath)):
raise TypeError(f"{relpath!r}: not a string or path object")
strrelpath = str(relpath)
if strrelpath and strrelpath[-1] != self.sep:
strrelpath += self.sep
# assert strrelpath[-1] == self.sep
# assert strrelpath[-2] != self.sep
strself = self.strpath
if sys.platform == "win32" or getattr(os, "_name", None) == "nt":
if os.path.normcase(strself).startswith(os.path.normcase(strrelpath)):
return strself[len(strrelpath) :]
elif strself.startswith(strrelpath):
return strself[len(strrelpath) :]
return ""
def ensure_dir(self, *args):
"""Ensure the path joined with args is a directory."""
return self.ensure(*args, **{"dir": True})
def bestrelpath(self, dest):
"""Return a string which is a relative path from self
(assumed to be a directory) to dest such that
self.join(bestrelpath) == dest and if not such
path can be determined return dest.
"""
try:
if self == dest:
return os.curdir
base = self.common(dest)
if not base: # can be the case on windows
return str(dest)
self2base = self.relto(base)
reldest = dest.relto(base)
if self2base:
n = self2base.count(self.sep) + 1
else:
n = 0
lst = [os.pardir] * n
if reldest:
lst.append(reldest)
target = dest.sep.join(lst)
return target
except AttributeError:
return str(dest)
def exists(self):
return self.check()
def isdir(self):
return self.check(dir=1)
def isfile(self):
return self.check(file=1)
def parts(self, reverse=False):
"""Return a root-first list of all ancestor directories
plus the path itself.
"""
current = self
lst = [self]
while 1:
last = current
current = current.dirpath()
if last == current:
break
lst.append(current)
if not reverse:
lst.reverse()
return lst
def common(self, other):
"""Return the common part shared with the other path
or None if there is no common part.
"""
last = None
for x, y in zip(self.parts(), other.parts()):
if x != y:
return last
last = x
return last
def __add__(self, other):
"""Return new path object with 'other' added to the basename"""
return self.new(basename=self.basename + str(other))
def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
"""Yields all paths below the current one
fil is a filter (glob pattern or callable), if not matching the
path will not be yielded, defaulting to None (everything is
returned)
rec is a filter (glob pattern or callable) that controls whether
a node is descended, defaulting to None
ignore is an Exception class that is ignoredwhen calling dirlist()
on any of the paths (by default, all exceptions are reported)
bf if True will cause a breadthfirst search instead of the
default depthfirst. Default: False
sort if True will sort entries within each directory level.
"""
yield from Visitor(fil, rec, ignore, bf, sort).gen(self)
def _sortlist(self, res, sort):
if sort:
if hasattr(sort, "__call__"):
warnings.warn(
DeprecationWarning(
"listdir(sort=callable) is deprecated and breaks on python3"
),
stacklevel=3,
)
res.sort(sort)
else:
res.sort()
def __fspath__(self):
return self.strpath
def __hash__(self):
s = self.strpath
if iswin32:
s = s.lower()
return hash(s)
def __eq__(self, other):
s1 = os.fspath(self)
try:
s2 = os.fspath(other)
except TypeError:
return False
if iswin32:
s1 = s1.lower()
try:
s2 = s2.lower()
except AttributeError:
return False
return s1 == s2
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return os.fspath(self) < os.fspath(other)
def __gt__(self, other):
return os.fspath(self) > os.fspath(other)
def samefile(self, other):
"""Return True if 'other' references the same file as 'self'."""
other = os.fspath(other)
if not isabs(other):
other = abspath(other)
if self == other:
return True
if not hasattr(os.path, "samefile"):
return False
return error.checked_call(os.path.samefile, self.strpath, other)
def remove(self, rec=1, ignore_errors=False):
"""Remove a file or directory (or a directory tree if rec=1).
if ignore_errors is True, errors while removing directories will
be ignored.
"""
if self.check(dir=1, link=0):
if rec:
# force remove of readonly files on windows
if iswin32:
self.chmod(0o700, rec=1)
import shutil
error.checked_call(
shutil.rmtree, self.strpath, ignore_errors=ignore_errors
)
else:
error.checked_call(os.rmdir, self.strpath)
else:
if iswin32:
self.chmod(0o700)
error.checked_call(os.remove, self.strpath)
def computehash(self, hashtype="md5", chunksize=524288):
"""Return hexdigest of hashvalue for this file."""
try:
try:
import hashlib as mod
except ImportError:
if hashtype == "sha1":
hashtype = "sha"
mod = __import__(hashtype)
hash = getattr(mod, hashtype)()
except (AttributeError, ImportError):
raise ValueError(f"Don't know how to compute {hashtype!r} hash")
f = self.open("rb")
try:
while 1:
buf = f.read(chunksize)
if not buf:
return hash.hexdigest()
hash.update(buf)
finally:
f.close()
def new(self, **kw):
"""Create a modified version of this path.
the following keyword arguments modify various path parts::
a:/some/path/to/a/file.ext
xx drive
xxxxxxxxxxxxxxxxx dirname
xxxxxxxx basename
xxxx purebasename
xxx ext
"""
obj = object.__new__(self.__class__)
if not kw:
obj.strpath = self.strpath
return obj
drive, dirname, basename, purebasename, ext = self._getbyspec(
"drive,dirname,basename,purebasename,ext"
)
if "basename" in kw:
if "purebasename" in kw or "ext" in kw:
raise ValueError("invalid specification %r" % kw)
else:
pb = kw.setdefault("purebasename", purebasename)
try:
ext = kw["ext"]
except KeyError:
pass
else:
if ext and not ext.startswith("."):
ext = "." + ext
kw["basename"] = pb + ext
if "dirname" in kw and not kw["dirname"]:
kw["dirname"] = drive
else:
kw.setdefault("dirname", dirname)
kw.setdefault("sep", self.sep)
obj.strpath = normpath("%(dirname)s%(sep)s%(basename)s" % kw)
return obj
def _getbyspec(self, spec: str) -> list[str]:
"""See new for what 'spec' can be."""
res = []
parts = self.strpath.split(self.sep)
args = filter(None, spec.split(","))
for name in args:
if name == "drive":
res.append(parts[0])
elif name == "dirname":
res.append(self.sep.join(parts[:-1]))
else:
basename = parts[-1]
if name == "basename":
res.append(basename)
else:
i = basename.rfind(".")
if i == -1:
purebasename, ext = basename, ""
else:
purebasename, ext = basename[:i], basename[i:]
if name == "purebasename":
res.append(purebasename)
elif name == "ext":
res.append(ext)
else:
raise ValueError("invalid part specification %r" % name)
return res
def dirpath(self, *args, **kwargs):
"""Return the directory path joined with any given path arguments."""
if not kwargs:
path = object.__new__(self.__class__)
path.strpath = dirname(self.strpath)
if args:
path = path.join(*args)
return path
return self.new(basename="").join(*args, **kwargs)
def join(self, *args: os.PathLike[str], abs: bool = False) -> LocalPath:
"""Return a new path by appending all 'args' as path
components. if abs=1 is used restart from root if any
of the args is an absolute path.
"""
sep = self.sep
strargs = [os.fspath(arg) for arg in args]
strpath = self.strpath
if abs:
newargs: list[str] = []
for arg in reversed(strargs):
if isabs(arg):
strpath = arg
strargs = newargs
break
newargs.insert(0, arg)
# special case for when we have e.g. strpath == "/"
actual_sep = "" if strpath.endswith(sep) else sep
for arg in strargs:
arg = arg.strip(sep)
if iswin32:
# allow unix style paths even on windows.
arg = arg.strip("/")
arg = arg.replace("/", sep)
strpath = strpath + actual_sep + arg
actual_sep = sep
obj = object.__new__(self.__class__)
obj.strpath = normpath(strpath)
return obj
def open(self, mode="r", ensure=False, encoding=None):
"""Return an opened file with the given mode.
If ensure is True, create parent directories if needed.
"""
if ensure:
self.dirpath().ensure(dir=1)
if encoding:
return error.checked_call(io.open, self.strpath, mode, encoding=encoding)
return error.checked_call(open, self.strpath, mode)
def _fastjoin(self, name):
child = object.__new__(self.__class__)
child.strpath = self.strpath + self.sep + name
return child
def islink(self):
return islink(self.strpath)
def check(self, **kw):
"""Check a path for existence and properties.
Without arguments, return True if the path exists, otherwise False.
valid checkers::
file=1 # is a file
file=0 # is not a file (may not even exist)
dir=1 # is a dir
link=1 # is a link
exists=1 # exists
You can specify multiple checker definitions, for example::
path.check(file=1, link=1) # a link pointing to a file
"""
if not kw:
return exists(self.strpath)
if len(kw) == 1:
if "dir" in kw:
return not kw["dir"] ^ isdir(self.strpath)
if "file" in kw:
return not kw["file"] ^ isfile(self.strpath)
if not kw:
kw = {"exists": 1}
return Checkers(self)._evaluate(kw)
_patternchars = set("*?[" + os.path.sep)
def listdir(self, fil=None, sort=None):
"""List directory contents, possibly filter by the given fil func
and possibly sorted.
"""
if fil is None and sort is None:
names = error.checked_call(os.listdir, self.strpath)
return map_as_list(self._fastjoin, names)
if isinstance(fil, str):
if not self._patternchars.intersection(fil):
child = self._fastjoin(fil)
if exists(child.strpath):
return [child]
return []
fil = FNMatcher(fil)
names = error.checked_call(os.listdir, self.strpath)
res = []
for name in names:
child = self._fastjoin(name)
if fil is None or fil(child):
res.append(child)
self._sortlist(res, sort)
return res
def size(self) -> int:
"""Return size of the underlying file object"""
return self.stat().size
def mtime(self) -> float:
"""Return last modification time of the path."""
return self.stat().mtime
def copy(self, target, mode=False, stat=False):
"""Copy path to target.
If mode is True, will copy copy permission from path to target.
If stat is True, copy permission, last modification
time, last access time, and flags from path to target.
"""
if self.check(file=1):
if target.check(dir=1):
target = target.join(self.basename)
assert self != target
copychunked(self, target)
if mode:
copymode(self.strpath, target.strpath)
if stat:
copystat(self, target)
else:
def rec(p):
return p.check(link=0)
for x in self.visit(rec=rec):
relpath = x.relto(self)
newx = target.join(relpath)
newx.dirpath().ensure(dir=1)
if x.check(link=1):
newx.mksymlinkto(x.readlink())
continue
elif x.check(file=1):
copychunked(x, newx)
elif x.check(dir=1):
newx.ensure(dir=1)
if mode:
copymode(x.strpath, newx.strpath)
if stat:
copystat(x, newx)
def rename(self, target):
"""Rename this path to target."""
target = os.fspath(target)
return error.checked_call(os.rename, self.strpath, target)
def dump(self, obj, bin=1):
"""Pickle object into path location"""
f = self.open("wb")
import pickle
try:
error.checked_call(pickle.dump, obj, f, bin)
finally:
f.close()
def mkdir(self, *args):
"""Create & return the directory joined with args."""
p = self.join(*args)
error.checked_call(os.mkdir, os.fspath(p))
return p
def write_binary(self, data, ensure=False):
"""Write binary data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open("wb") as f:
f.write(data)
def write_text(self, data, encoding, ensure=False):
"""Write text data into path using the specified encoding.
If ensure is True create missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open("w", encoding=encoding) as f:
f.write(data)
def write(self, data, mode="w", ensure=False):
"""Write data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
if "b" in mode:
if not isinstance(data, bytes):
raise ValueError("can only process bytes")
else:
if not isinstance(data, str):
if not isinstance(data, bytes):
data = str(data)
else:
data = data.decode(sys.getdefaultencoding())
f = self.open(mode)
try:
f.write(data)
finally:
f.close()
def _ensuredirs(self):
parent = self.dirpath()
if parent == self:
return self
if parent.check(dir=0):
parent._ensuredirs()
if self.check(dir=0):
try:
self.mkdir()
except error.EEXIST:
# race condition: file/dir created by another thread/process.
# complain if it is not a dir
if self.check(dir=0):
raise
return self
def ensure(self, *args, **kwargs):
"""Ensure that an args-joined path exists (by default as
a file). if you specify a keyword argument 'dir=True'
then the path is forced to be a directory path.
"""
p = self.join(*args)
if kwargs.get("dir", 0):
return p._ensuredirs()
else:
p.dirpath()._ensuredirs()
if not p.check(file=1):
p.open("w").close()
return p
@overload
def stat(self, raising: Literal[True] = ...) -> Stat:
...
@overload
def stat(self, raising: Literal[False]) -> Stat | None:
...
def stat(self, raising: bool = True) -> Stat | None:
"""Return an os.stat() tuple."""
if raising:
return Stat(self, error.checked_call(os.stat, self.strpath))
try:
return Stat(self, os.stat(self.strpath))
except KeyboardInterrupt:
raise
except Exception:
return None
def lstat(self) -> Stat:
"""Return an os.lstat() tuple."""
return Stat(self, error.checked_call(os.lstat, self.strpath))
def setmtime(self, mtime=None):
"""Set modification time for the given path. if 'mtime' is None
(the default) then the file's mtime is set to current time.
Note that the resolution for 'mtime' is platform dependent.
"""
if mtime is None:
return error.checked_call(os.utime, self.strpath, mtime)
try:
return error.checked_call(os.utime, self.strpath, (-1, mtime))
except error.EINVAL:
return error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
def chdir(self):
"""Change directory to self and return old current directory"""
try:
old = self.__class__()
except error.ENOENT:
old = None
error.checked_call(os.chdir, self.strpath)
return old
@contextmanager
def as_cwd(self):
"""
Return a context manager, which changes to the path's dir during the
managed "with" context.
On __enter__ it returns the old dir, which might be ``None``.
"""
old = self.chdir()
try:
yield old
finally:
if old is not None:
old.chdir()
def realpath(self):
"""Return a new path which contains no symbolic links."""
return self.__class__(os.path.realpath(self.strpath))
def atime(self):
"""Return last access time of the path."""
return self.stat().atime
def __repr__(self):
return "local(%r)" % self.strpath
def __str__(self):
"""Return string representation of the Path."""
return self.strpath
def chmod(self, mode, rec=0):
"""Change permissions to the given mode. If mode is an
integer it directly encodes the os-specific modes.
if rec is True perform recursively.
"""
if not isinstance(mode, int):
raise TypeError(f"mode {mode!r} must be an integer")
if rec:
for x in self.visit(rec=rec):
error.checked_call(os.chmod, str(x), mode)
error.checked_call(os.chmod, self.strpath, mode)
def pypkgpath(self):
"""Return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
Return None if a pkgpath can not be determined.
"""
pkgpath = None
for parent in self.parts(reverse=True):
if parent.isdir():
if not parent.join("__init__.py").exists():
break
if not isimportable(parent.basename):
break
pkgpath = parent
return pkgpath
def _ensuresyspath(self, ensuremode, path):
if ensuremode:
s = str(path)
if ensuremode == "append":
if s not in sys.path:
sys.path.append(s)
else:
if s != sys.path[0]:
sys.path.insert(0, s)
def pyimport(self, modname=None, ensuresyspath=True):
"""Return path as an imported python module.
If modname is None, look for the containing package
and construct an according module name.
The module will be put/looked up in sys.modules.
if ensuresyspath is True then the root dir for importing
the file (taking __init__.py files into account) will
be prepended to sys.path if it isn't there already.
If ensuresyspath=="append" the root dir will be appended
if it isn't already contained in sys.path.
if ensuresyspath is False no modification of syspath happens.
Special value of ensuresyspath=="importlib" is intended
purely for using in pytest, it is capable only of importing
separate .py files outside packages, e.g. for test suite
without any __init__.py file. It effectively allows having
same-named test modules in different places and offers
mild opt-in via this option. Note that it works only in
recent versions of python.
"""
if not self.check():
raise error.ENOENT(self)
if ensuresyspath == "importlib":
if modname is None:
modname = self.purebasename
spec = importlib.util.spec_from_file_location(modname, str(self))
if spec is None or spec.loader is None:
raise ImportError(
f"Can't find module {modname} at location {str(self)}"
)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
pkgpath = None
if modname is None:
pkgpath = self.pypkgpath()
if pkgpath is not None:
pkgroot = pkgpath.dirpath()
names = self.new(ext="").relto(pkgroot).split(self.sep)
if names[-1] == "__init__":
names.pop()
modname = ".".join(names)
else:
pkgroot = self.dirpath()
modname = self.purebasename
self._ensuresyspath(ensuresyspath, pkgroot)
__import__(modname)
mod = sys.modules[modname]
if self.basename == "__init__.py":
return mod # we don't check anything as we might
# be in a namespace package ... too icky to check
modfile = mod.__file__
assert modfile is not None
if modfile[-4:] in (".pyc", ".pyo"):
modfile = modfile[:-1]
elif modfile.endswith("$py.class"):
modfile = modfile[:-9] + ".py"
if modfile.endswith(os.path.sep + "__init__.py"):
if self.basename != "__init__.py":
modfile = modfile[:-12]
try:
issame = self.samefile(modfile)
except error.ENOENT:
issame = False
if not issame:
ignore = os.getenv("PY_IGNORE_IMPORTMISMATCH")
if ignore != "1":
raise self.ImportMismatchError(modname, modfile, self)
return mod
else:
try:
return sys.modules[modname]
except KeyError:
# we have a custom modname, do a pseudo-import
import types
mod = types.ModuleType(modname)
mod.__file__ = str(self)
sys.modules[modname] = mod
try:
with open(str(self), "rb") as f:
exec(f.read(), mod.__dict__)
except BaseException:
del sys.modules[modname]
raise
return mod
def sysexec(self, *argv: os.PathLike[str], **popen_opts: Any) -> str:
"""Return stdout text from executing a system child process,
where the 'self' path points to executable.
The process is directly invoked and not through a system shell.
"""
from subprocess import Popen, PIPE
popen_opts.pop("stdout", None)
popen_opts.pop("stderr", None)
proc = Popen(
[str(self)] + [str(arg) for arg in argv],
**popen_opts,
stdout=PIPE,
stderr=PIPE,
)
stdout: str | bytes
stdout, stderr = proc.communicate()
ret = proc.wait()
if isinstance(stdout, bytes):
stdout = stdout.decode(sys.getdefaultencoding())
if ret != 0:
if isinstance(stderr, bytes):
stderr = stderr.decode(sys.getdefaultencoding())
raise RuntimeError(
ret,
ret,
str(self),
stdout,
stderr,
)
return stdout
@classmethod
def sysfind(cls, name, checker=None, paths=None):
"""Return a path object found by looking at the systems
underlying PATH specification. If the checker is not None
it will be invoked to filter matching paths. If a binary
cannot be found, None is returned
Note: This is probably not working on plain win32 systems
but may work on cygwin.
"""
if isabs(name):
p = local(name)
if p.check(file=1):
return p
else:
if paths is None:
if iswin32:
paths = os.environ["Path"].split(";")
if "" not in paths and "." not in paths:
paths.append(".")
try:
systemroot = os.environ["SYSTEMROOT"]
except KeyError:
pass
else:
paths = [
path.replace("%SystemRoot%", systemroot) for path in paths
]
else:
paths = os.environ["PATH"].split(":")
tryadd = []
if iswin32:
tryadd += os.environ["PATHEXT"].split(os.pathsep)
tryadd.append("")
for x in paths:
for addext in tryadd:
p = local(x).join(name, abs=True) + addext
try:
if p.check(file=1):
if checker:
if not checker(p):
continue
return p
except error.EACCES:
pass
return None
@classmethod
def _gethomedir(cls):
try:
x = os.environ["HOME"]
except KeyError:
try:
x = os.environ["HOMEDRIVE"] + os.environ["HOMEPATH"]
except KeyError:
return None
return cls(x)
# """
# special class constructors for local filesystem paths
# """
@classmethod
def get_temproot(cls):
"""Return the system's temporary directory
(where tempfiles are usually created in)
"""
import tempfile
return local(tempfile.gettempdir())
@classmethod
def mkdtemp(cls, rootdir=None):
"""Return a Path object pointing to a fresh new temporary directory
(which we created ourself).
"""
import tempfile
if rootdir is None:
rootdir = cls.get_temproot()
return cls(error.checked_call(tempfile.mkdtemp, dir=str(rootdir)))
@classmethod
def make_numbered_dir(
cls, prefix="session-", rootdir=None, keep=3, lock_timeout=172800
): # two days
"""Return unique directory with a number greater than the current
maximum one. The number is assumed to start directly after prefix.
if keep is true directories with a number less than (maxnum-keep)
will be removed. If .lock files are used (lock_timeout non-zero),
algorithm is multi-process safe.
"""
if rootdir is None:
rootdir = cls.get_temproot()
nprefix = prefix.lower()
def parse_num(path):
"""Parse the number out of a path (if it matches the prefix)"""
nbasename = path.basename.lower()
if nbasename.startswith(nprefix):
try:
return int(nbasename[len(nprefix) :])
except ValueError:
pass
def create_lockfile(path):
"""Exclusively create lockfile. Throws when failed"""
mypid = os.getpid()
lockfile = path.join(".lock")
if hasattr(lockfile, "mksymlinkto"):
lockfile.mksymlinkto(str(mypid))
else:
fd = error.checked_call(
os.open, str(lockfile), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644
)
with os.fdopen(fd, "w") as f:
f.write(str(mypid))
return lockfile
def atexit_remove_lockfile(lockfile):
"""Ensure lockfile is removed at process exit"""
mypid = os.getpid()
def try_remove_lockfile():
# in a fork() situation, only the last process should
# remove the .lock, otherwise the other processes run the
# risk of seeing their temporary dir disappear. For now
# we remove the .lock in the parent only (i.e. we assume
# that the children finish before the parent).
if os.getpid() != mypid:
return
try:
lockfile.remove()
except error.Error:
pass
atexit.register(try_remove_lockfile)
# compute the maximum number currently in use with the prefix
lastmax = None
while True:
maxnum = -1
for path in rootdir.listdir():
num = parse_num(path)
if num is not None:
maxnum = max(maxnum, num)
# make the new directory
try:
udir = rootdir.mkdir(prefix + str(maxnum + 1))
if lock_timeout:
lockfile = create_lockfile(udir)
atexit_remove_lockfile(lockfile)
except (error.EEXIST, error.ENOENT, error.EBUSY):
# race condition (1): another thread/process created the dir
# in the meantime - try again
# race condition (2): another thread/process spuriously acquired
# lock treating empty directory as candidate
# for removal - try again
# race condition (3): another thread/process tried to create the lock at
# the same time (happened in Python 3.3 on Windows)
# https://ci.appveyor.com/project/pytestbot/py/build/1.0.21/job/ffi85j4c0lqwsfwa
if lastmax == maxnum:
raise
lastmax = maxnum
continue
break
def get_mtime(path):
"""Read file modification time"""
try:
return path.lstat().mtime
except error.Error:
pass
garbage_prefix = prefix + "garbage-"
def is_garbage(path):
"""Check if path denotes directory scheduled for removal"""
bn = path.basename
return bn.startswith(garbage_prefix)
# prune old directories
udir_time = get_mtime(udir)
if keep and udir_time:
for path in rootdir.listdir():
num = parse_num(path)
if num is not None and num <= (maxnum - keep):
try:
# try acquiring lock to remove directory as exclusive user
if lock_timeout:
create_lockfile(path)
except (error.EEXIST, error.ENOENT, error.EBUSY):
path_time = get_mtime(path)
if not path_time:
# assume directory doesn't exist now
continue
if abs(udir_time - path_time) < lock_timeout:
# assume directory with lockfile exists
# and lock timeout hasn't expired yet
continue
# path dir locked for exclusive use
# and scheduled for removal to avoid another thread/process
# treating it as a new directory or removal candidate
garbage_path = rootdir.join(garbage_prefix + str(uuid.uuid4()))
try:
path.rename(garbage_path)
garbage_path.remove(rec=1)
except KeyboardInterrupt:
raise
except Exception: # this might be error.Error, WindowsError ...
pass
if is_garbage(path):
try:
path.remove(rec=1)
except KeyboardInterrupt:
raise
except Exception: # this might be error.Error, WindowsError ...
pass
# make link...
try:
username = os.environ["USER"] # linux, et al
except KeyError:
try:
username = os.environ["USERNAME"] # windows
except KeyError:
username = "current"
src = str(udir)
dest = src[: src.rfind("-")] + "-" + username
try:
os.unlink(dest)
except OSError:
pass
try:
os.symlink(src, dest)
except (OSError, AttributeError, NotImplementedError):
pass
return udir
def copymode(src, dest):
"""Copy permission from src to dst."""
import shutil
shutil.copymode(src, dest)
def copystat(src, dest):
"""Copy permission, last modification time,
last access time, and flags from src to dst."""
import shutil
shutil.copystat(str(src), str(dest))
def copychunked(src, dest):
chunksize = 524288 # half a meg of bytes
fsrc = src.open("rb")
try:
fdest = dest.open("wb")
try:
while 1:
buf = fsrc.read(chunksize)
if not buf:
break
fdest.write(buf)
finally:
fdest.close()
finally:
fsrc.close()
def isimportable(name):
if name and (name[0].isalpha() or name[0] == "_"):
name = name.replace("_", "")
return not name or name.isalnum()
local = LocalPath
| mit | 18b9720980eb46abfa413330fc00d127 | 32.327458 | 103 | 0.521197 | 4.449493 | false | false | false | false |
pytest-dev/pytest | testing/test_doctest.py | 2 | 49020 | import inspect
import sys
import textwrap
from pathlib import Path
from typing import Callable
from typing import Optional
import pytest
from _pytest.doctest import _get_checker
from _pytest.doctest import _is_main_py
from _pytest.doctest import _is_mocked
from _pytest.doctest import _is_setup_py
from _pytest.doctest import _patch_unwrap_mock_aware
from _pytest.doctest import DoctestItem
from _pytest.doctest import DoctestModule
from _pytest.doctest import DoctestTextfile
from _pytest.pytester import Pytester
class TestDoctests:
def test_collect_testtextfile(self, pytester: Pytester):
w = pytester.maketxtfile(whatever="")
checkfile = pytester.maketxtfile(
test_something="""
alskdjalsdk
>>> i = 5
>>> i-1
4
"""
)
for x in (pytester.path, checkfile):
# print "checking that %s returns custom items" % (x,)
items, reprec = pytester.inline_genitems(x)
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestTextfile)
# Empty file has no items.
items, reprec = pytester.inline_genitems(w)
assert len(items) == 0
def test_collect_module_empty(self, pytester: Pytester):
path = pytester.makepyfile(whatever="#")
for p in (path, pytester.path):
items, reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 0
def test_collect_module_single_modulelevel_doctest(self, pytester: Pytester):
path = pytester.makepyfile(whatever='""">>> pass"""')
for p in (path, pytester.path):
items, reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
def test_collect_module_two_doctest_one_modulelevel(self, pytester: Pytester):
path = pytester.makepyfile(
whatever="""
'>>> x = None'
def my_func():
">>> magic = 42 "
"""
)
for p in (path, pytester.path):
items, reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
@pytest.mark.parametrize("filename", ["__init__", "whatever"])
def test_collect_module_two_doctest_no_modulelevel(
self,
pytester: Pytester,
filename: str,
) -> None:
path = pytester.makepyfile(
**{
filename: """
'# Empty'
def my_func():
">>> magic = 42 "
def useless():
'''
# This is a function
# >>> # it doesn't have any doctest
'''
def another():
'''
# This is another function
>>> import os # this one does have a doctest
'''
""",
},
)
for p in (path, pytester.path):
items, reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_simple_doctestfile(self, pytester: Pytester):
p = pytester.maketxtfile(
test_doc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = pytester.inline_run(p)
reprec.assertoutcome(failed=1)
def test_importmode(self, pytester: Pytester):
p = pytester.makepyfile(
**{
"namespacepkg/innerpkg/__init__.py": "",
"namespacepkg/innerpkg/a.py": """
def some_func():
return 42
""",
"namespacepkg/innerpkg/b.py": """
from namespacepkg.innerpkg.a import some_func
def my_func():
'''
>>> my_func()
42
'''
return some_func()
""",
}
)
reprec = pytester.inline_run(p, "--doctest-modules", "--import-mode=importlib")
reprec.assertoutcome(passed=1)
def test_new_pattern(self, pytester: Pytester):
p = pytester.maketxtfile(
xdoc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = pytester.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1)
def test_multiple_patterns(self, pytester: Pytester):
"""Test support for multiple --doctest-glob arguments (#1255)."""
pytester.maketxtfile(
xdoc="""
>>> 1
1
"""
)
pytester.makefile(
".foo",
test="""
>>> 1
1
""",
)
pytester.maketxtfile(
test_normal="""
>>> 1
1
"""
)
expected = {"xdoc.txt", "test.foo", "test_normal.txt"}
assert {x.name for x in pytester.path.iterdir()} == expected
args = ["--doctest-glob=xdoc*.txt", "--doctest-glob=*.foo"]
result = pytester.runpytest(*args)
result.stdout.fnmatch_lines(["*test.foo *", "*xdoc.txt *", "*2 passed*"])
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*test_normal.txt *", "*1 passed*"])
@pytest.mark.parametrize(
" test_string, encoding",
[("foo", "ascii"), ("öäü", "latin1"), ("öäü", "utf-8")],
)
def test_encoding(self, pytester, test_string, encoding):
"""Test support for doctest_encoding ini option."""
pytester.makeini(
"""
[pytest]
doctest_encoding={}
""".format(
encoding
)
)
doctest = """
>>> "{}"
{}
""".format(
test_string, repr(test_string)
)
fn = pytester.path / "test_encoding.txt"
fn.write_text(doctest, encoding=encoding)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_doctest_unexpected_exception(self, pytester: Pytester):
pytester.maketxtfile(
"""
>>> i = 0
>>> 0 / i
2
"""
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"test_doctest_unexpected_exception.txt F *",
"",
"*= FAILURES =*",
"*_ [[]doctest[]] test_doctest_unexpected_exception.txt _*",
"001 >>> i = 0",
"002 >>> 0 / i",
"UNEXPECTED EXCEPTION: ZeroDivisionError*",
"Traceback (most recent call last):",
' File "*/doctest.py", line *, in __run',
" *",
*(
(" *^^^^*",)
if (3, 11, 0, "beta", 4) > sys.version_info >= (3, 11)
else ()
),
' File "<doctest test_doctest_unexpected_exception.txt[1]>", line 1, in <module>',
"ZeroDivisionError: division by zero",
"*/test_doctest_unexpected_exception.txt:2: UnexpectedException",
],
consecutive=True,
)
def test_doctest_outcomes(self, pytester: Pytester):
pytester.maketxtfile(
test_skip="""
>>> 1
1
>>> import pytest
>>> pytest.skip("")
>>> 2
3
""",
test_xfail="""
>>> import pytest
>>> pytest.xfail("xfail_reason")
>>> foo
bar
""",
test_importorskip="""
>>> import pytest
>>> pytest.importorskip("doesnotexist")
>>> foo
bar
""",
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"collected 3 items",
"",
"test_importorskip.txt s *",
"test_skip.txt s *",
"test_xfail.txt x *",
"",
"*= 2 skipped, 1 xfailed in *",
]
)
def test_docstring_partial_context_around_error(self, pytester: Pytester):
"""Test that we show some context before the actual line of a failing
doctest.
"""
pytester.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
text-line-3
text-line-4
text-line-5
text-line-6
text-line-7
text-line-8
text-line-9
text-line-10
text-line-11
>>> 1 + 1
3
text-line-after
"""
'''
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_partial_context_around_error*",
"005*text-line-3",
"006*text-line-4",
"013*text-line-11",
"014*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
# lines below should be trimmed out
result.stdout.no_fnmatch_line("*text-line-2*")
result.stdout.no_fnmatch_line("*text-line-after*")
def test_docstring_full_context_around_error(self, pytester: Pytester):
"""Test that we show the whole context before the actual line of a failing
doctest, provided that the context is up to 10 lines long.
"""
pytester.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
>>> 1 + 1
3
"""
'''
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_full_context_around_error*",
"003*text-line-1",
"004*text-line-2",
"006*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
def test_doctest_linedata_missing(self, pytester: Pytester):
pytester.path.joinpath("hello.py").write_text(
textwrap.dedent(
"""\
class Fun(object):
@property
def test(self):
'''
>>> a = 1
>>> 1/0
'''
"""
)
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
["*hello*", "006*>>> 1/0*", "*UNEXPECTED*ZeroDivision*", "*1 failed*"]
)
def test_doctest_linedata_on_property(self, pytester: Pytester):
pytester.makepyfile(
"""
class Sample(object):
@property
def some_property(self):
'''
>>> Sample().some_property
'another thing'
'''
return 'something'
"""
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*= FAILURES =*",
"*_ [[]doctest[]] test_doctest_linedata_on_property.Sample.some_property _*",
"004 ",
"005 >>> Sample().some_property",
"Expected:",
" 'another thing'",
"Got:",
" 'something'",
"",
"*/test_doctest_linedata_on_property.py:5: DocTestFailure",
"*= 1 failed in *",
]
)
def test_doctest_no_linedata_on_overriden_property(self, pytester: Pytester):
pytester.makepyfile(
"""
class Sample(object):
@property
def some_property(self):
'''
>>> Sample().some_property
'another thing'
'''
return 'something'
some_property = property(some_property.__get__, None, None, some_property.__doc__)
"""
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*= FAILURES =*",
"*_ [[]doctest[]] test_doctest_no_linedata_on_overriden_property.Sample.some_property _*",
"EXAMPLE LOCATION UNKNOWN, not showing all tests of that example",
"[?][?][?] >>> Sample().some_property",
"Expected:",
" 'another thing'",
"Got:",
" 'something'",
"",
"*/test_doctest_no_linedata_on_overriden_property.py:None: DocTestFailure",
"*= 1 failed in *",
]
)
def test_doctest_unex_importerror_only_txt(self, pytester: Pytester):
pytester.maketxtfile(
"""
>>> import asdalsdkjaslkdjasd
>>>
"""
)
result = pytester.runpytest()
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*>>> import asdals*",
"*UNEXPECTED*ModuleNotFoundError*",
"ModuleNotFoundError: No module named *asdal*",
]
)
def test_doctest_unex_importerror_with_module(self, pytester: Pytester):
pytester.path.joinpath("hello.py").write_text(
textwrap.dedent(
"""\
import asdalsdkjaslkdjasd
"""
)
)
pytester.maketxtfile(
"""
>>> import hello
>>>
"""
)
result = pytester.runpytest("--doctest-modules")
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*ERROR collecting hello.py*",
"*ModuleNotFoundError: No module named *asdals*",
"*Interrupted: 1 error during collection*",
]
)
def test_doctestmodule(self, pytester: Pytester):
p = pytester.makepyfile(
"""
'''
>>> x = 1
>>> x == 1
False
'''
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1)
def test_doctestmodule_external_and_issue116(self, pytester: Pytester):
p = pytester.mkpydir("hello")
p.joinpath("__init__.py").write_text(
textwrap.dedent(
"""\
def somefunc():
'''
>>> i = 0
>>> i + 1
2
'''
"""
)
)
result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(
[
"003 *>>> i = 0",
"004 *>>> i + 1",
"*Expected:",
"* 2",
"*Got:",
"* 1",
"*:4: DocTestFailure",
]
)
def test_txtfile_failing(self, pytester: Pytester):
p = pytester.maketxtfile(
"""
>>> i = 0
>>> i + 1
2
"""
)
result = pytester.runpytest(p, "-s")
result.stdout.fnmatch_lines(
[
"001 >>> i = 0",
"002 >>> i + 1",
"Expected:",
" 2",
"Got:",
" 1",
"*test_txtfile_failing.txt:2: DocTestFailure",
]
)
def test_txtfile_with_fixtures(self, pytester: Pytester):
p = pytester.maketxtfile(
"""
>>> p = getfixture('tmp_path')
>>> p.is_dir()
True
"""
)
reprec = pytester.inline_run(p)
reprec.assertoutcome(passed=1)
def test_txtfile_with_usefixtures_in_ini(self, pytester: Pytester):
pytester.makeini(
"""
[pytest]
usefixtures = myfixture
"""
)
pytester.makeconftest(
"""
import pytest
@pytest.fixture
def myfixture(monkeypatch):
monkeypatch.setenv("HELLO", "WORLD")
"""
)
p = pytester.maketxtfile(
"""
>>> import os
>>> os.environ["HELLO"]
'WORLD'
"""
)
reprec = pytester.inline_run(p)
reprec.assertoutcome(passed=1)
def test_doctestmodule_with_fixtures(self, pytester: Pytester):
p = pytester.makepyfile(
"""
'''
>>> p = getfixture('tmp_path')
>>> p.is_dir()
True
'''
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_doctestmodule_three_tests(self, pytester: Pytester):
p = pytester.makepyfile(
"""
'''
>>> p = getfixture('tmp_path')
>>> p.is_dir()
True
'''
def my_func():
'''
>>> magic = 42
>>> magic - 42
0
'''
def useless():
pass
def another():
'''
>>> import os
>>> os is os
True
'''
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=3)
def test_doctestmodule_two_tests_one_fail(self, pytester: Pytester):
p = pytester.makepyfile(
"""
class MyClass(object):
def bad_meth(self):
'''
>>> magic = 42
>>> magic
0
'''
def nice_meth(self):
'''
>>> magic = 42
>>> magic - 42
0
'''
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=1)
def test_ignored_whitespace(self, pytester: Pytester):
pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = pytester.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace(self, pytester: Pytester):
pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = pytester.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=0)
def test_ignored_whitespace_glob(self, pytester: Pytester):
pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = pytester.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = pytester.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace_glob(self, pytester: Pytester):
pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = pytester.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = pytester.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1, passed=0)
def test_contains_unicode(self, pytester: Pytester):
"""Fix internal error with docstrings containing non-ascii characters."""
pytester.makepyfile(
'''\
def foo():
"""
>>> name = 'с' # not letter 'c' but instead Cyrillic 's'.
'anything'
"""
'''
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["Got nothing", "* 1 failed in*"])
def test_ignore_import_errors_on_doctest(self, pytester: Pytester):
p = pytester.makepyfile(
"""
import asdf
def add_one(x):
'''
>>> add_one(1)
2
'''
return x + 1
"""
)
reprec = pytester.inline_run(
p, "--doctest-modules", "--doctest-ignore-import-errors"
)
reprec.assertoutcome(skipped=1, failed=1, passed=0)
def test_junit_report_for_doctest(self, pytester: Pytester):
"""#713: Fix --junit-xml option when used with --doctest-modules."""
p = pytester.makepyfile(
"""
def foo():
'''
>>> 1 + 1
3
'''
pass
"""
)
reprec = pytester.inline_run(p, "--doctest-modules", "--junit-xml=junit.xml")
reprec.assertoutcome(failed=1)
def test_unicode_doctest(self, pytester: Pytester):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii
characters.
"""
p = pytester.maketxtfile(
test_unicode_doctest="""
.. doctest::
>>> print("Hi\\n\\nByé")
Hi
...
Byé
>>> 1 / 0 # Byé
1
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
["*UNEXPECTED EXCEPTION: ZeroDivisionError*", "*1 failed*"]
)
def test_unicode_doctest_module(self, pytester: Pytester):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest docstring
contains non-ascii characters.
"""
p = pytester.makepyfile(
test_unicode_doctest_module="""
def fix_bad_unicode(text):
'''
>>> print(fix_bad_unicode('único'))
único
'''
return "único"
"""
)
result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_print_unicode_value(self, pytester: Pytester):
"""
Test case for issue 3583: Printing Unicode in doctest under Python 2.7
doesn't work
"""
p = pytester.maketxtfile(
test_print_unicode_value=r"""
Here is a doctest::
>>> print('\xE5\xE9\xEE\xF8\xFC')
åéîøü
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_reportinfo(self, pytester: Pytester):
"""Make sure that DoctestItem.reportinfo() returns lineno."""
p = pytester.makepyfile(
test_reportinfo="""
def foo(x):
'''
>>> foo('a')
'b'
'''
return 'c'
"""
)
items, reprec = pytester.inline_genitems(p, "--doctest-modules")
reportinfo = items[0].reportinfo()
assert reportinfo[1] == 1
def test_valid_setup_py(self, pytester: Pytester):
"""
Test to make sure that pytest ignores valid setup.py files when ran
with --doctest-modules
"""
p = pytester.makepyfile(
setup="""
if __name__ == '__main__':
from setuptools import setup, find_packages
setup(name='sample',
version='0.0',
description='description',
packages=find_packages()
)
"""
)
result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 0 items*"])
def test_main_py_does_not_cause_import_errors(self, pytester: Pytester):
p = pytester.copy_example("doctest/main_py")
result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 2 items*", "*1 failed, 1 passed*"])
def test_invalid_setup_py(self, pytester: Pytester):
"""
Test to make sure that pytest reads setup.py files that are not used
for python packages when ran with --doctest-modules
"""
p = pytester.makepyfile(
setup="""
def test_foo():
return 'bar'
"""
)
result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 1 item*"])
class TestLiterals:
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_allow_unicode(self, pytester, config_mode):
"""Test that doctests which output unicode work in all python versions
tested by pytest when the ALLOW_UNICODE option is used (either in
the ini file or by an inline comment).
"""
if config_mode == "ini":
pytester.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_UNICODE
"""
)
comment = ""
else:
comment = "#doctest: +ALLOW_UNICODE"
pytester.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii') {comment}
'12'
""".format(
comment=comment
)
)
pytester.makepyfile(
foo="""
def foo():
'''
>>> b'12'.decode('ascii') {comment}
'12'
'''
""".format(
comment=comment
)
)
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_allow_bytes(self, pytester, config_mode):
"""Test that doctests which output bytes work in all python versions
tested by pytest when the ALLOW_BYTES option is used (either in
the ini file or by an inline comment)(#1287).
"""
if config_mode == "ini":
pytester.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_BYTES
"""
)
comment = ""
else:
comment = "#doctest: +ALLOW_BYTES"
pytester.maketxtfile(
test_doc="""
>>> b'foo' {comment}
'foo'
""".format(
comment=comment
)
)
pytester.makepyfile(
foo="""
def foo():
'''
>>> b'foo' {comment}
'foo'
'''
""".format(
comment=comment
)
)
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
def test_unicode_string(self, pytester: Pytester):
"""Test that doctests which output unicode fail in Python 2 when
the ALLOW_UNICODE option is not used. The same test should pass
in Python 3.
"""
pytester.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii')
'12'
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_bytes_literal(self, pytester: Pytester):
"""Test that doctests which output bytes fail in Python 3 when
the ALLOW_BYTES option is not used. (#1287).
"""
pytester.maketxtfile(
test_doc="""
>>> b'foo'
'foo'
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(failed=1)
def test_number_re(self) -> None:
_number_re = _get_checker()._number_re # type: ignore
for s in [
"1.",
"+1.",
"-1.",
".1",
"+.1",
"-.1",
"0.1",
"+0.1",
"-0.1",
"1e5",
"+1e5",
"1e+5",
"+1e+5",
"1e-5",
"+1e-5",
"-1e-5",
"1.2e3",
"-1.2e-3",
]:
print(s)
m = _number_re.match(s)
assert m is not None
assert float(m.group()) == pytest.approx(float(s))
for s in ["1", "abc"]:
print(s)
assert _number_re.match(s) is None
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_number_precision(self, pytester, config_mode):
"""Test the NUMBER option."""
if config_mode == "ini":
pytester.makeini(
"""
[pytest]
doctest_optionflags = NUMBER
"""
)
comment = ""
else:
comment = "#doctest: +NUMBER"
pytester.maketxtfile(
test_doc="""
Scalars:
>>> import math
>>> math.pi {comment}
3.141592653589793
>>> math.pi {comment}
3.1416
>>> math.pi {comment}
3.14
>>> -math.pi {comment}
-3.14
>>> math.pi {comment}
3.
>>> 3. {comment}
3.0
>>> 3. {comment}
3.
>>> 3. {comment}
3.01
>>> 3. {comment}
2.99
>>> .299 {comment}
.3
>>> .301 {comment}
.3
>>> 951. {comment}
1e3
>>> 1049. {comment}
1e3
>>> -1049. {comment}
-1e3
>>> 1e3 {comment}
1e3
>>> 1e3 {comment}
1000.
Lists:
>>> [3.1415, 0.097, 13.1, 7, 8.22222e5, 0.598e-2] {comment}
[3.14, 0.1, 13., 7, 8.22e5, 6.0e-3]
>>> [[0.333, 0.667], [0.999, 1.333]] {comment}
[[0.33, 0.667], [0.999, 1.333]]
>>> [[[0.101]]] {comment}
[[[0.1]]]
Doesn't barf on non-numbers:
>>> 'abc' {comment}
'abc'
>>> None {comment}
""".format(
comment=comment
)
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize(
"expression,output",
[
# ints shouldn't match floats:
("3.0", "3"),
("3e0", "3"),
("1e3", "1000"),
("3", "3.0"),
# Rounding:
("3.1", "3.0"),
("3.1", "3.2"),
("3.1", "4.0"),
("8.22e5", "810000.0"),
# Only the actual output is rounded up, not the expected output:
("3.0", "2.98"),
("1e3", "999"),
# The current implementation doesn't understand that numbers inside
# strings shouldn't be treated as numbers:
pytest.param("'3.1416'", "'3.14'", marks=pytest.mark.xfail),
],
)
def test_number_non_matches(self, pytester, expression, output):
pytester.maketxtfile(
test_doc="""
>>> {expression} #doctest: +NUMBER
{output}
""".format(
expression=expression, output=output
)
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=0, failed=1)
def test_number_and_allow_unicode(self, pytester: Pytester):
pytester.maketxtfile(
test_doc="""
>>> from collections import namedtuple
>>> T = namedtuple('T', 'a b c')
>>> T(a=0.2330000001, b=u'str', c=b'bytes') # doctest: +ALLOW_UNICODE, +ALLOW_BYTES, +NUMBER
T(a=0.233, b=u'str', c='bytes')
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
class TestDoctestSkips:
"""
If all examples in a doctest are skipped due to the SKIP option, then
the tests should be SKIPPED rather than PASSED. (#957)
"""
@pytest.fixture(params=["text", "module"])
def makedoctest(self, pytester, request):
def makeit(doctest):
mode = request.param
if mode == "text":
pytester.maketxtfile(doctest)
else:
assert mode == "module"
pytester.makepyfile('"""\n%s"""' % doctest)
return makeit
def test_one_skipped(self, pytester, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
4
"""
)
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(passed=1)
def test_one_skipped_failed(self, pytester, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
200
"""
)
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(failed=1)
def test_all_skipped(self, pytester, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2 # doctest: +SKIP
200
"""
)
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(skipped=1)
def test_vacuous_all_skipped(self, pytester, makedoctest):
makedoctest("")
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(passed=0, skipped=0)
def test_continue_on_failure(self, pytester: Pytester):
pytester.maketxtfile(
test_something="""
>>> i = 5
>>> def foo():
... raise ValueError('error1')
>>> foo()
>>> i
>>> i + 2
7
>>> i + 1
"""
)
result = pytester.runpytest(
"--doctest-modules", "--doctest-continue-on-failure"
)
result.assert_outcomes(passed=0, failed=1)
# The lines that contains the failure are 4, 5, and 8. The first one
# is a stack trace and the other two are mismatches.
result.stdout.fnmatch_lines(
["*4: UnexpectedException*", "*5: DocTestFailure*", "*8: DocTestFailure*"]
)
def test_skipping_wrapped_test(self, pytester):
"""
Issue 8796: INTERNALERROR raised when skipping a decorated DocTest
through pytest_collection_modifyitems.
"""
pytester.makeconftest(
"""
import pytest
from _pytest.doctest import DoctestItem
def pytest_collection_modifyitems(config, items):
skip_marker = pytest.mark.skip()
for item in items:
if isinstance(item, DoctestItem):
item.add_marker(skip_marker)
"""
)
pytester.makepyfile(
"""
from contextlib import contextmanager
@contextmanager
def my_config_context():
'''
>>> import os
'''
"""
)
result = pytester.runpytest("--doctest-modules")
assert "INTERNALERROR" not in result.stdout.str()
result.assert_outcomes(skipped=1)
class TestDoctestAutoUseFixtures:
SCOPES = ["module", "session", "class", "function"]
def test_doctest_module_session_fixture(self, pytester: Pytester):
"""Test that session fixtures are initialized for doctest modules (#768)."""
# session fixture which changes some global data, which will
# be accessed by doctests in a module
pytester.makeconftest(
"""
import pytest
import sys
@pytest.fixture(autouse=True, scope='session')
def myfixture():
assert not hasattr(sys, 'pytest_session_data')
sys.pytest_session_data = 1
yield
del sys.pytest_session_data
"""
)
pytester.makepyfile(
foo="""
import sys
def foo():
'''
>>> assert sys.pytest_session_data == 1
'''
def bar():
'''
>>> assert sys.pytest_session_data == 1
'''
"""
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["*2 passed*"])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("enable_doctest", [True, False])
def test_fixture_scopes(self, pytester, scope, enable_doctest):
"""Test that auto-use fixtures work properly with doctest modules.
See #1057 and #1100.
"""
pytester.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
return 99
""".format(
scope=scope
)
)
pytester.makepyfile(
test_1='''
def test_foo():
"""
>>> getfixture('auto') + 1
100
"""
def test_bar():
assert 1
'''
)
params = ("--doctest-modules",) if enable_doctest else ()
passes = 3 if enable_doctest else 2
result = pytester.runpytest(*params)
result.stdout.fnmatch_lines(["*=== %d passed in *" % passes])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("autouse", [True, False])
@pytest.mark.parametrize("use_fixture_in_doctest", [True, False])
def test_fixture_module_doctest_scopes(
self, pytester, scope, autouse, use_fixture_in_doctest
):
"""Test that auto-use fixtures work properly with doctest files.
See #1057 and #1100.
"""
pytester.makeconftest(
"""
import pytest
@pytest.fixture(autouse={autouse}, scope="{scope}")
def auto(request):
return 99
""".format(
scope=scope, autouse=autouse
)
)
if use_fixture_in_doctest:
pytester.maketxtfile(
test_doc="""
>>> getfixture('auto')
99
"""
)
else:
pytester.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = pytester.runpytest("--doctest-modules")
result.stdout.no_fnmatch_line("*FAILURES*")
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
@pytest.mark.parametrize("scope", SCOPES)
def test_auto_use_request_attributes(self, pytester, scope):
"""Check that all attributes of a request in an autouse fixture
behave as expected when requested for a doctest item.
"""
pytester.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
if "{scope}" == 'module':
assert request.module is None
if "{scope}" == 'class':
assert request.cls is None
if "{scope}" == 'function':
assert request.function is None
return 99
""".format(
scope=scope
)
)
pytester.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = pytester.runpytest("--doctest-modules")
str(result.stdout.no_fnmatch_line("*FAILURES*"))
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
class TestDoctestNamespaceFixture:
SCOPES = ["module", "session", "class", "function"]
@pytest.mark.parametrize("scope", SCOPES)
def test_namespace_doctestfile(self, pytester, scope):
"""
Check that inserting something into the namespace works in a
simple text file doctest
"""
pytester.makeconftest(
"""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(
scope=scope
)
)
p = pytester.maketxtfile(
"""
>>> print(cl.__name__)
contextlib
"""
)
reprec = pytester.inline_run(p)
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize("scope", SCOPES)
def test_namespace_pyfile(self, pytester, scope):
"""
Check that inserting something into the namespace works in a
simple Python file docstring doctest
"""
pytester.makeconftest(
"""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(
scope=scope
)
)
p = pytester.makepyfile(
"""
def foo():
'''
>>> print(cl.__name__)
contextlib
'''
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
class TestDoctestReportingOption:
def _run_doctest_report(self, pytester, format):
pytester.makepyfile(
"""
def foo():
'''
>>> foo()
a b
0 1 4
1 2 4
2 3 6
'''
print(' a b\\n'
'0 1 4\\n'
'1 2 5\\n'
'2 3 6')
"""
)
return pytester.runpytest("--doctest-modules", "--doctest-report", format)
@pytest.mark.parametrize("format", ["udiff", "UDIFF", "uDiFf"])
def test_doctest_report_udiff(self, pytester, format):
result = self._run_doctest_report(pytester, format)
result.stdout.fnmatch_lines(
[" 0 1 4", " -1 2 4", " +1 2 5", " 2 3 6"]
)
def test_doctest_report_cdiff(self, pytester: Pytester):
result = self._run_doctest_report(pytester, "cdiff")
result.stdout.fnmatch_lines(
[
" a b",
" 0 1 4",
" ! 1 2 4",
" 2 3 6",
" --- 1,4 ----",
" a b",
" 0 1 4",
" ! 1 2 5",
" 2 3 6",
]
)
def test_doctest_report_ndiff(self, pytester: Pytester):
result = self._run_doctest_report(pytester, "ndiff")
result.stdout.fnmatch_lines(
[
" a b",
" 0 1 4",
" - 1 2 4",
" ? ^",
" + 1 2 5",
" ? ^",
" 2 3 6",
]
)
@pytest.mark.parametrize("format", ["none", "only_first_failure"])
def test_doctest_report_none_or_only_first_failure(self, pytester, format):
result = self._run_doctest_report(pytester, format)
result.stdout.fnmatch_lines(
[
"Expected:",
" a b",
" 0 1 4",
" 1 2 4",
" 2 3 6",
"Got:",
" a b",
" 0 1 4",
" 1 2 5",
" 2 3 6",
]
)
def test_doctest_report_invalid(self, pytester: Pytester):
result = self._run_doctest_report(pytester, "obviously_invalid_format")
result.stderr.fnmatch_lines(
[
"*error: argument --doctest-report: invalid choice: 'obviously_invalid_format' (choose from*"
]
)
@pytest.mark.parametrize("mock_module", ["mock", "unittest.mock"])
def test_doctest_mock_objects_dont_recurse_missbehaved(mock_module, pytester: Pytester):
pytest.importorskip(mock_module)
pytester.makepyfile(
"""
from {mock_module} import call
class Example(object):
'''
>>> 1 + 1
2
'''
""".format(
mock_module=mock_module
)
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
class Broken:
def __getattr__(self, _):
raise KeyError("This should be an AttributeError")
@pytest.mark.parametrize( # pragma: no branch (lambdas are not called)
"stop", [None, _is_mocked, lambda f: None, lambda f: False, lambda f: True]
)
def test_warning_on_unwrap_of_broken_object(
stop: Optional[Callable[[object], object]]
) -> None:
bad_instance = Broken()
assert inspect.unwrap.__module__ == "inspect"
with _patch_unwrap_mock_aware():
assert inspect.unwrap.__module__ != "inspect"
with pytest.warns(
pytest.PytestWarning, match="^Got KeyError.* when unwrapping"
):
with pytest.raises(KeyError):
inspect.unwrap(bad_instance, stop=stop) # type: ignore[arg-type]
assert inspect.unwrap.__module__ == "inspect"
def test_is_setup_py_not_named_setup_py(tmp_path: Path) -> None:
not_setup_py = tmp_path.joinpath("not_setup.py")
not_setup_py.write_text('from setuptools import setup; setup(name="foo")')
assert not _is_setup_py(not_setup_py)
@pytest.mark.parametrize("mod", ("setuptools", "distutils.core"))
def test_is_setup_py_is_a_setup_py(tmp_path: Path, mod: str) -> None:
setup_py = tmp_path.joinpath("setup.py")
setup_py.write_text(f'from {mod} import setup; setup(name="foo")', "utf-8")
assert _is_setup_py(setup_py)
@pytest.mark.parametrize("mod", ("setuptools", "distutils.core"))
def test_is_setup_py_different_encoding(tmp_path: Path, mod: str) -> None:
setup_py = tmp_path.joinpath("setup.py")
contents = (
"# -*- coding: cp1252 -*-\n"
'from {} import setup; setup(name="foo", description="€")\n'.format(mod)
)
setup_py.write_bytes(contents.encode("cp1252"))
assert _is_setup_py(setup_py)
@pytest.mark.parametrize(
"name, expected", [("__main__.py", True), ("__init__.py", False)]
)
def test_is_main_py(tmp_path: Path, name: str, expected: bool) -> None:
dunder_main = tmp_path.joinpath(name)
assert _is_main_py(dunder_main) == expected
| mit | 1c2987329e6cbc9de2744899da2c3375 | 29.662703 | 109 | 0.458254 | 4.348509 | false | true | false | false |
tweepy/tweepy | tests/config.py | 2 | 1118 | import os
import unittest
import vcr
from tweepy.api import API
from tweepy.auth import OAuth1UserHandler
user_id = os.environ.get('TWITTER_USER_ID', '1072250532645998596')
username = os.environ.get('TWITTER_USERNAME', 'TweepyDev')
bearer_token = os.environ.get('BEARER_TOKEN', '')
consumer_key = os.environ.get('CONSUMER_KEY', '')
consumer_secret = os.environ.get('CONSUMER_SECRET', '')
access_token = os.environ.get('ACCESS_KEY', '')
access_token_secret = os.environ.get('ACCESS_SECRET', '')
use_replay = os.environ.get('USE_REPLAY', True)
tape = vcr.VCR(
cassette_library_dir='cassettes',
filter_headers=['Authorization'],
# Either use existing cassettes, or never use recordings:
record_mode='none' if use_replay else 'all',
)
class TweepyTestCase(unittest.TestCase):
def setUp(self):
self.auth = create_auth()
self.api = API(self.auth)
self.api.retry_count = 2
self.api.retry_delay = 0 if use_replay else 5
def create_auth():
auth = OAuth1UserHandler(
consumer_key, consumer_secret, access_token, access_token_secret
)
return auth
| mit | 97058c773f64462c61c36ad2b7eb3a1f | 26.95 | 72 | 0.690519 | 3.140449 | false | true | false | false |
tweepy/tweepy | setup.py | 2 | 2523 | #!/usr/bin/env python
import re
from setuptools import find_packages, setup
VERSION_FILE = "tweepy/__init__.py"
with open(VERSION_FILE) as version_file:
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file.read(), re.MULTILINE)
if match:
version = match.group(1)
else:
raise RuntimeError(f"Unable to find version string in {VERSION_FILE}.")
with open("README.md") as readme_file:
long_description = readme_file.read()
setup(
name="tweepy",
version=version,
description="Twitter library for Python",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
author="Joshua Roesslein",
author_email="tweepy@googlegroups.com",
url="https://www.tweepy.org/",
project_urls={
"Documentation": "https://tweepy.readthedocs.io",
"Issue Tracker": "https://github.com/tweepy/tweepy/issues",
"Source Code": "https://github.com/tweepy/tweepy",
},
download_url="https://pypi.org/project/tweepy/",
packages=find_packages(),
install_requires=[
"oauthlib>=3.2.0,<4",
"requests>=2.27.0,<3",
"requests-oauthlib>=1.2.0,<2",
],
extras_require={
"async": [
"aiohttp>=3.7.3,<4",
"async-lru>=1.0.3,<2",
],
"docs": [
"myst-parser==0.15.2",
"readthedocs-sphinx-search==0.1.1",
"sphinx==4.2.0",
"sphinx-hoverxref==0.7b1",
"sphinx-tabs==3.2.0",
"sphinx_rtd_theme==1.0.0",
],
"dev": [
"coverage>=4.4.2",
"coveralls>=2.1.0",
"tox>=3.21.0",
],
"socks": ["requests[socks]>=2.27.0,<3"],
"test": ["vcrpy>=1.10.3"],
},
test_suite="tests",
keywords="twitter library",
python_requires=">=3.7",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3 :: Only",
],
zip_safe=True,
)
| mit | ee208b4876f191d9ee0872d7273629ad | 30.5375 | 75 | 0.550535 | 3.475207 | false | false | false | false |
tweepy/tweepy | tweepy/streaming.py | 2 | 38949 | # Tweepy
# Copyright 2009-2022 Joshua Roesslein
# See LICENSE for details.
# Appengine users: https://developers.google.com/appengine/docs/python/sockets/#making_httplib_use_sockets
from collections import namedtuple
import json
import logging
from math import inf
from platform import python_version
import ssl
import traceback
from threading import Thread
from time import sleep
from typing import NamedTuple
import requests
from requests_oauthlib import OAuth1
import urllib3
import tweepy
from tweepy.client import BaseClient, Response
from tweepy.errors import TweepyException
from tweepy.models import Status
from tweepy.tweet import Tweet
log = logging.getLogger(__name__)
StreamResponse = namedtuple(
"StreamResponse", ("data", "includes", "errors", "matching_rules")
)
class BaseStream:
def __init__(self, *, chunk_size=512, daemon=False, max_retries=inf,
proxy=None, verify=True):
self.chunk_size = chunk_size
self.daemon = daemon
self.max_retries = max_retries
self.proxies = {"https": proxy} if proxy else {}
self.verify = verify
self.running = False
self.session = requests.Session()
self.thread = None
self.user_agent = (
f"Python/{python_version()} "
f"Requests/{requests.__version__} "
f"Tweepy/{tweepy.__version__}"
)
def _connect(
self, method, url, auth=None, params=None, headers=None, body=None,
timeout=21
):
self.running = True
error_count = 0
# https://developer.twitter.com/en/docs/twitter-api/v1/tweets/filter-realtime/guides/connecting
# https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/integrate/handling-disconnections
# https://developer.twitter.com/en/docs/twitter-api/tweets/volume-streams/integrate/handling-disconnections
network_error_wait = 0
network_error_wait_step = 0.25
network_error_wait_max = 16
http_error_wait = http_error_wait_start = 5
http_error_wait_max = 320
http_429_error_wait_start = 60
self.session.headers["User-Agent"] = self.user_agent
try:
while self.running and error_count <= self.max_retries:
try:
with self.session.request(
method, url, params=params, headers=headers, data=body,
timeout=timeout, stream=True, auth=auth,
verify=self.verify, proxies=self.proxies
) as resp:
if resp.status_code == 200:
error_count = 0
http_error_wait = http_error_wait_start
network_error_wait = 0
self.on_connect()
if not self.running:
break
for line in resp.iter_lines(
chunk_size=self.chunk_size
):
if line:
self.on_data(line)
else:
self.on_keep_alive()
if not self.running:
break
if resp.raw.closed:
self.on_closed(resp)
else:
self.on_request_error(resp.status_code)
if not self.running:
break
# The error text is logged here instead of in
# on_request_error to keep on_request_error
# backwards-compatible. In a future version, the
# Response should be passed to on_request_error.
log.error(
"HTTP error response text: %s", resp.text
)
error_count += 1
if resp.status_code in (420, 429):
if http_error_wait < http_429_error_wait_start:
http_error_wait = http_429_error_wait_start
sleep(http_error_wait)
http_error_wait *= 2
if http_error_wait > http_error_wait_max:
http_error_wait = http_error_wait_max
except (requests.ConnectionError, requests.Timeout,
requests.exceptions.ChunkedEncodingError,
ssl.SSLError, urllib3.exceptions.ReadTimeoutError,
urllib3.exceptions.ProtocolError) as exc:
# This is still necessary, as a SSLError can actually be
# thrown when using Requests
# If it's not time out treat it like any other exception
if isinstance(exc, ssl.SSLError):
if not (exc.args and "timed out" in str(exc.args[0])):
raise
self.on_connection_error()
if not self.running:
break
# The error text is logged here instead of in
# on_connection_error to keep on_connection_error
# backwards-compatible. In a future version, the error
# should be passed to on_connection_error.
log.error(
"Connection error: %s",
"".join(
traceback.format_exception_only(type(exc), exc)
).rstrip()
)
sleep(network_error_wait)
network_error_wait += network_error_wait_step
if network_error_wait > network_error_wait_max:
network_error_wait = network_error_wait_max
except Exception as exc:
self.on_exception(exc)
finally:
self.session.close()
self.running = False
self.on_disconnect()
def _threaded_connect(self, *args, **kwargs):
self.thread = Thread(target=self._connect, name="Tweepy Stream",
args=args, kwargs=kwargs, daemon=self.daemon)
self.thread.start()
return self.thread
def disconnect(self):
"""Disconnect the stream"""
self.running = False
def on_closed(self, response):
"""This is called when the stream has been closed by Twitter.
Parameters
----------
response : requests.Response
The Response from Twitter
"""
log.error("Stream connection closed by Twitter")
def on_connect(self):
"""This is called after successfully connecting to the streaming API.
"""
log.info("Stream connected")
def on_connection_error(self):
"""This is called when the stream connection errors or times out."""
log.error("Stream connection has errored or timed out")
def on_disconnect(self):
"""This is called when the stream has disconnected."""
log.info("Stream disconnected")
def on_exception(self, exception):
"""This is called when an unhandled exception occurs.
Parameters
----------
exception : Exception
The unhandled exception
"""
log.exception("Stream encountered an exception")
def on_keep_alive(self):
"""This is called when a keep-alive signal is received."""
log.debug("Received keep-alive signal")
def on_request_error(self, status_code):
"""This is called when a non-200 HTTP status code is encountered.
Parameters
----------
status_code : int
The HTTP status code encountered
"""
log.error("Stream encountered HTTP error: %d", status_code)
class Stream(BaseStream):
"""Filter and sample realtime Tweets with Twitter API v1.1
.. note::
New Twitter Developer Apps created on or after April 29, 2022 `will not
be able to gain access to v1.1 statuses/sample and v1.1
statuses/filter`_, the Twitter API v1.1 endpoints that :class:`Stream`
uses. Twitter API v2 can be used instead with :class:`StreamingClient`.
Parameters
----------
consumer_key : str
Twitter API Consumer Key
consumer_secret : str
Twitter API Consumer Secret
access_token: str
Twitter API Access Token
access_token_secret : str
Twitter API Access Token Secret
chunk_size : int
The default socket.read size. Default to 512, less than half the size
of a Tweet so that it reads Tweets with the minimal latency of 2 reads
per Tweet. Values higher than ~1kb will increase latency by waiting for
more data to arrive but may also increase throughput by doing fewer
socket read calls.
daemon : bool
Whether or not to use a daemon thread when using a thread to run the
stream
max_retries : int
Max number of times to retry connecting the stream
proxy : str | None
URL of the proxy to use when connecting to the stream
verify : bool | str
Either a boolean, in which case it controls whether to verify the
server’s TLS certificate, or a string, in which case it must be a path
to a CA bundle to use.
Attributes
----------
running : bool
Whether there's currently a stream running
session : :class:`requests.Session`
Requests Session used to connect to the stream
thread : :class:`threading.Thread` | None
Thread used to run the stream
user_agent : str
User agent used when connecting to the stream
.. _will not be able to gain access to v1.1 statuses/sample and v1.1
statuses/filter: https://twittercommunity.com/t/deprecation-announcement-removing-compliance-messages-from-statuses-filter-and-retiring-statuses-sample-from-the-twitter-api-v1-1/170500
"""
def __init__(self, consumer_key, consumer_secret, access_token,
access_token_secret, **kwargs):
"""__init__( \
consumer_key, consumer_secret, access_token, access_token_secret, \
chunk_size=512, daemon=False, max_retries=inf, proxy=None, \
verify=True \
)
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = access_token
self.access_token_secret = access_token_secret
super().__init__(**kwargs)
def _connect(self, method, endpoint, **kwargs):
auth = OAuth1(self.consumer_key, self.consumer_secret,
self.access_token, self.access_token_secret)
url = f"https://stream.twitter.com/1.1/{endpoint}.json"
super()._connect(method, url, auth=auth, timeout=90, **kwargs)
def filter(self, *, follow=None, track=None, locations=None,
filter_level=None, languages=None, stall_warnings=False,
threaded=False):
"""Filter realtime Tweets
.. deprecated:: 4.9
`The delivery of compliance messages through the Twitter API v1.1
endpoint this method uses has been deprecated, and they will stop
being delivered beginning October 29, 2022.`_ Twitter API v2 can be
used instead with :meth:`StreamingClient.filter` and/or
:class:`Client` :ref:`batch compliance <Batch compliance>` methods.
Parameters
----------
follow : list[int | str] | None
User IDs, indicating the users to return statuses for in the stream
track : list[str] | None
Keywords to track
locations : list[float] | None
Specifies a set of bounding boxes to track
filter_level : str | None
Setting this parameter to one of none, low, or medium will set the
minimum value of the filter_level Tweet attribute required to be
included in the stream. The default value is none, which includes
all available Tweets.
When displaying a stream of Tweets to end users (dashboards or live
feeds at a presentation or conference, for example) it is suggested
that you set this value to medium.
languages : list[str] | None
Setting this parameter to a comma-separated list of `BCP 47`_
language identifiers corresponding to any of the languages listed
on Twitter’s `advanced search`_ page will only return Tweets that
have been detected as being written in the specified languages. For
example, connecting with language=en will only stream Tweets
detected to be in the English language.
stall_warnings : bool
Specifies whether stall warnings should be delivered
threaded : bool
Whether or not to use a thread to run the stream
Raises
------
TweepyException
When the stream is already connected or when the number of location
coordinates is not a multiple of 4
Returns
-------
threading.Thread | None
The thread if ``threaded`` is set to ``True``, else ``None``
References
----------
https://developer.twitter.com/en/docs/twitter-api/v1/tweets/filter-realtime/api-reference/post-statuses-filter
.. _BCP 47: https://tools.ietf.org/html/bcp47
.. _advanced search: https://twitter.com/search-advanced
.. _The delivery of compliance messages through the Twitter API v1.1
endpoint this method uses has been deprecated, and they will stop
being delivered beginning October 29, 2022.: https://twittercommunity.com/t/deprecation-announcement-removing-compliance-messages-from-statuses-filter-and-retiring-statuses-sample-from-the-twitter-api-v1-1/170500
"""
if self.running:
raise TweepyException("Stream is already connected")
method = "POST"
endpoint = "statuses/filter"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
body = {}
if follow:
body["follow"] = ','.join(map(str, follow))
if track:
body["track"] = ','.join(map(str, track))
if locations and len(locations) > 0:
if len(locations) % 4:
raise TweepyException(
"Number of location coordinates should be a multiple of 4"
)
body["locations"] = ','.join(f"{l:.4f}" for l in locations)
if filter_level:
body["filter_level"] = filter_level
if languages:
body["language"] = ','.join(map(str, languages))
if stall_warnings:
body["stall_warnings"] = stall_warnings
if threaded:
return self._threaded_connect(method, endpoint, headers=headers,
body=body)
else:
self._connect(method, endpoint, headers=headers, body=body)
def sample(self, *, languages=None, stall_warnings=False, threaded=False):
"""Sample realtime Tweets
.. deprecated:: 4.9
`The Twitter API v1.1 endpoint this method uses is now deprecated
and will be retired on October 29, 2022.`_ Twitter API v2 can be
used instead with :meth:`StreamingClient.sample`.
Parameters
----------
languages : list[str] | None
Setting this parameter to a comma-separated list of `BCP 47`_
language identifiers corresponding to any of the languages listed
on Twitter’s `advanced search`_ page will only return Tweets that
have been detected as being written in the specified languages. For
example, connecting with language=en will only stream Tweets
detected to be in the English language.
stall_warnings : bool
Specifies whether stall warnings should be delivered
threaded : bool
Whether or not to use a thread to run the stream
Raises
------
TweepyException
When the stream is already connected
Returns
-------
threading.Thread | None
The thread if ``threaded`` is set to ``True``, else ``None``
References
----------
https://developer.twitter.com/en/docs/twitter-api/v1/tweets/sample-realtime/api-reference/get-statuses-sample
.. _BCP 47: https://tools.ietf.org/html/bcp47
.. _advanced search: https://twitter.com/search-advanced
.. _The Twitter API v1.1 endpoint this method uses is now deprecated
and will be retired on October 29, 2022.: https://twittercommunity.com/t/deprecation-announcement-removing-compliance-messages-from-statuses-filter-and-retiring-statuses-sample-from-the-twitter-api-v1-1/170500
"""
if self.running:
raise TweepyException("Stream is already connected")
method = "GET"
endpoint = "statuses/sample"
params = {}
if languages:
params["language"] = ','.join(map(str, languages))
if stall_warnings:
params["stall_warnings"] = "true"
if threaded:
return self._threaded_connect(method, endpoint, params=params)
else:
self._connect(method, endpoint, params=params)
def on_data(self, raw_data):
"""This is called when raw data is received from the stream.
This method handles sending the data to other methods based on the
message type.
Parameters
----------
raw_data : JSON
The raw data from the stream
References
----------
https://developer.twitter.com/en/docs/twitter-api/v1/tweets/filter-realtime/guides/streaming-message-types
"""
data = json.loads(raw_data)
if "in_reply_to_status_id" in data:
status = Status.parse(None, data)
return self.on_status(status)
if "delete" in data:
delete = data["delete"]["status"]
return self.on_delete(delete["id"], delete["user_id"])
if "disconnect" in data:
return self.on_disconnect_message(data["disconnect"])
if "limit" in data:
return self.on_limit(data["limit"]["track"])
if "scrub_geo" in data:
return self.on_scrub_geo(data["scrub_geo"])
if "status_withheld" in data:
return self.on_status_withheld(data["status_withheld"])
if "user_withheld" in data:
return self.on_user_withheld(data["user_withheld"])
if "warning" in data:
return self.on_warning(data["warning"])
log.error("Received unknown message type: %s", raw_data)
def on_status(self, status):
"""This is called when a status is received.
Parameters
----------
status : Status
The Status received
"""
log.debug("Received status: %d", status.id)
def on_delete(self, status_id, user_id):
"""This is called when a status deletion notice is received.
Parameters
----------
status_id : int
The ID of the deleted Tweet
user_id : int
The ID of the author of the Tweet
"""
log.debug("Received status deletion notice: %d", status_id)
def on_disconnect_message(self, message):
"""This is called when a disconnect message is received.
Parameters
----------
message : JSON
The disconnect message
"""
log.warning("Received disconnect message: %s", message)
def on_limit(self, track):
"""This is called when a limit notice is received.
Parameters
----------
track : int
Total count of the number of undelivered Tweets since the
connection was opened
"""
log.debug("Received limit notice: %d", track)
def on_scrub_geo(self, notice):
"""This is called when a location deletion notice is received.
Parameters
----------
notice : JSON
The location deletion notice
"""
log.debug("Received location deletion notice: %s", notice)
def on_status_withheld(self, notice):
"""This is called when a status withheld content notice is received.
Parameters
----------
notice : JSON
The status withheld content notice
"""
log.debug("Received status withheld content notice: %s", notice)
def on_user_withheld(self, notice):
"""This is called when a user withheld content notice is received.
Parameters
----------
notice : JSON
The user withheld content notice
"""
log.debug("Received user withheld content notice: %s", notice)
def on_warning(self, warning):
"""This is called when a stall warning message is received.
Parameters
----------
warning : JSON
The stall warning
"""
log.warning("Received stall warning: %s", warning)
class StreamingClient(BaseClient, BaseStream):
"""Filter and sample realtime Tweets with Twitter API v2
.. versionadded:: 4.6
Parameters
----------
bearer_token : str
Twitter API Bearer Token
return_type : type[dict | requests.Response | Response]
Type to return from requests to the API
wait_on_rate_limit : bool
Whether or not to wait before retrying when a rate limit is
encountered. This applies to requests besides those that connect to a
stream (see ``max_retries``).
chunk_size : int
The default socket.read size. Default to 512, less than half the size
of a Tweet so that it reads Tweets with the minimal latency of 2 reads
per Tweet. Values higher than ~1kb will increase latency by waiting for
more data to arrive but may also increase throughput by doing fewer
socket read calls.
daemon : bool
Whether or not to use a daemon thread when using a thread to run the
stream
max_retries : int
Max number of times to retry connecting the stream
proxy : str | None
URL of the proxy to use when connecting to the stream
verify : bool | str
Either a boolean, in which case it controls whether to verify the
server’s TLS certificate, or a string, in which case it must be a path
to a CA bundle to use.
Attributes
----------
running : bool
Whether there's currently a stream running
session : :class:`requests.Session`
Requests Session used to connect to the stream
thread : :class:`threading.Thread` | None
Thread used to run the stream
user_agent : str
User agent used when connecting to the stream
"""
def __init__(self, bearer_token, *, return_type=Response,
wait_on_rate_limit=False, **kwargs):
"""__init__( \
bearer_token, *, return_type=Response, wait_on_rate_limit=False, \
chunk_size=512, daemon=False, max_retries=inf, proxy=None, \
verify=True \
)
"""
BaseClient.__init__(self, bearer_token, return_type=return_type,
wait_on_rate_limit=wait_on_rate_limit)
BaseStream.__init__(self, **kwargs)
def _connect(self, method, endpoint, **kwargs):
self.session.headers["Authorization"] = f"Bearer {self.bearer_token}"
url = f"https://api.twitter.com/2/tweets/{endpoint}/stream"
super()._connect(method, url, **kwargs)
def _process_data(self, data, data_type=None):
if data_type is StreamRule:
if isinstance(data, list):
rules = []
for rule in data:
if "tag" in rule:
rules.append(StreamRule(
value=rule["value"], id=rule["id"], tag=rule["tag"]
))
else:
rules.append(StreamRule(value=rule["value"],
id=rule["id"]))
return rules
elif data is not None:
if "tag" in data:
return StreamRule(value=data["value"], id=data["id"],
tag=data["tag"])
else:
return StreamRule(value=data["value"], id=data["id"])
else:
super()._process_data(data, data_type=data_type)
def add_rules(self, add, **params):
"""add_rules(add, *, dry_run)
Add rules to filtered stream.
Parameters
----------
add : list[StreamRule] | StreamRule
Specifies the operation you want to perform on the rules.
dry_run : bool
Set to true to test the syntax of your rule without submitting it.
This is useful if you want to check the syntax of a rule before
removing one or more of your existing rules.
Returns
-------
dict | requests.Response | Response
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/api-reference/post-tweets-search-stream-rules
"""
json = {"add": []}
if isinstance(add, StreamRule):
add = (add,)
for rule in add:
if rule.tag is not None:
json["add"].append({"value": rule.value, "tag": rule.tag})
else:
json["add"].append({"value": rule.value})
return self._make_request(
"POST", f"/2/tweets/search/stream/rules", params=params,
endpoint_parameters=("dry_run",), json=json, data_type=StreamRule
)
def delete_rules(self, ids, **params):
"""delete_rules(ids, *, dry_run)
Delete rules from filtered stream.
Parameters
----------
ids : int | str | list[int | str | StreamRule] | StreamRule
Array of rule IDs, each one representing a rule already active in
your stream. IDs must be submitted as strings.
dry_run : bool
Set to true to test the syntax of your rule without submitting it.
This is useful if you want to check the syntax of a rule before
removing one or more of your existing rules.
Returns
-------
dict | requests.Response | Response
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/api-reference/post-tweets-search-stream-rules
"""
json = {"delete": {"ids": []}}
if isinstance(ids, (int, str, StreamRule)):
ids = (ids,)
for id in ids:
if isinstance(id, StreamRule):
json["delete"]["ids"].append(str(id.id))
else:
json["delete"]["ids"].append(str(id))
return self._make_request(
"POST", f"/2/tweets/search/stream/rules", params=params,
endpoint_parameters=("dry_run",), json=json, data_type=StreamRule
)
def filter(self, *, threaded=False, **params):
"""filter( \
*, backfill_minutes=None, expansions=None, media_fields=None, \
place_fields=None, poll_fields=None, tweet_fields=None, \
user_fields=None, threaded=False \
)
Streams Tweets in real-time based on a specific set of filter rules.
If you are using the academic research product track, you can connect
up to two `redundant connections <filter redundant connections_>`_ to
maximize your streaming up-time.
The Tweets returned by this endpoint count towards the Project-level
`Tweet cap`_.
Parameters
----------
backfill_minutes : int | None
By passing this parameter, you can request up to five (5) minutes
worth of streaming data that you might have missed during a
disconnection to be delivered to you upon reconnection. The
backfilled Tweets will automatically flow through the reconnected
stream, with older Tweets generally being delivered before any
newly matching Tweets. You must include a whole number between 1
and 5 as the value to this parameter.
This feature will deliver duplicate Tweets, meaning that if you
were disconnected for 90 seconds, and you requested two minutes of
backfill, you will receive 30 seconds worth of duplicate Tweets.
Due to this, you should make sure your system is tolerant of
duplicate data.
This feature is currently only available to the Academic Research
product track.
expansions : list[str] | str
:ref:`expansions_parameter`
media_fields : list[str] | str
:ref:`media_fields_parameter`
place_fields : list[str] | str
:ref:`place_fields_parameter`
poll_fields : list[str] | str
:ref:`poll_fields_parameter`
tweet_fields : list[str] | str
:ref:`tweet_fields_parameter`
user_fields : list[str] | str
:ref:`user_fields_parameter`
threaded : bool
Whether or not to use a thread to run the stream
Raises
------
TweepyException
When the stream is already connected
Returns
-------
threading.Thread | None
The thread if ``threaded`` is set to ``True``, else ``None``
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/api-reference/get-tweets-search-stream
.. _filter redundant connections: https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/integrate/recovery-and-redundancy-features
.. _Tweet cap: https://developer.twitter.com/en/docs/twitter-api/tweet-caps
"""
if self.running:
raise TweepyException("Stream is already connected")
method = "GET"
endpoint = "search"
params = self._process_params(
params, endpoint_parameters=(
"backfill_minutes", "expansions", "media.fields",
"place.fields", "poll.fields", "tweet.fields", "user.fields"
)
)
if threaded:
return self._threaded_connect(method, endpoint, params=params)
else:
self._connect(method, endpoint, params=params)
def get_rules(self, **params):
"""get_rules(*, ids)
Return a list of rules currently active on the streaming endpoint,
either as a list or individually.
Parameters
----------
ids : list[str] | str
Comma-separated list of rule IDs. If omitted, all rules are
returned.
Returns
-------
dict | requests.Response | Response
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/api-reference/get-tweets-search-stream-rules
"""
return self._make_request(
"GET", f"/2/tweets/search/stream/rules", params=params,
endpoint_parameters=("ids",), data_type=StreamRule
)
def sample(self, *, threaded=False, **params):
"""sample( \
*, backfill_minutes=None, expansions=None, media_fields=None, \
place_fields=None, poll_fields=None, tweet_fields=None, \
user_fields=None, threaded=False \
)
Streams about 1% of all Tweets in real-time.
If you are using the academic research product track, you can connect
up to two `redundant connections <sample redundant connections_>`_ to
maximize your streaming up-time.
Parameters
----------
backfill_minutes : int | None
By passing this parameter, you can request up to five (5) minutes
worth of streaming data that you might have missed during a
disconnection to be delivered to you upon reconnection. The
backfilled Tweets will automatically flow through the reconnected
stream, with older Tweets generally being delivered before any
newly matching Tweets. You must include a whole number between 1
and 5 as the value to this parameter.
This feature will deliver duplicate Tweets, meaning that if you
were disconnected for 90 seconds, and you requested two minutes of
backfill, you will receive 30 seconds worth of duplicate Tweets.
Due to this, you should make sure your system is tolerant of
duplicate data.
This feature is currently only available to the Academic Research
product track.
expansions : list[str] | str
:ref:`expansions_parameter`
media_fields : list[str] | str
:ref:`media_fields_parameter`
place_fields : list[str] | str
:ref:`place_fields_parameter`
poll_fields : list[str] | str
:ref:`poll_fields_parameter`
tweet_fields : list[str] | str
:ref:`tweet_fields_parameter`
user_fields : list[str] | str
:ref:`user_fields_parameter`
threaded : bool
Whether or not to use a thread to run the stream
Raises
------
TweepyException
When the stream is already connected
Returns
-------
threading.Thread | None
The thread if ``threaded`` is set to ``True``, else ``None``
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/volume-streams/api-reference/get-tweets-sample-stream
.. _sample redundant connections: https://developer.twitter.com/en/docs/twitter-api/tweets/volume-streams/integrate/recovery-and-redundancy-features
"""
if self.running:
raise TweepyException("Stream is already connected")
method = "GET"
endpoint = "sample"
params = self._process_params(
params, endpoint_parameters=(
"backfill_minutes", "expansions", "media.fields",
"place.fields", "poll.fields", "tweet.fields", "user.fields"
)
)
if threaded:
return self._threaded_connect(method, endpoint, params=params)
else:
self._connect(method, endpoint, params=params)
def on_data(self, raw_data):
"""This is called when raw data is received from the stream.
This method handles sending the data to other methods.
Parameters
----------
raw_data : JSON
The raw data from the stream
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/integrate/consuming-streaming-data
"""
data = json.loads(raw_data)
tweet = None
includes = {}
errors = []
matching_rules = []
if "data" in data:
tweet = Tweet(data["data"])
self.on_tweet(tweet)
if "includes" in data:
includes = self._process_includes(data["includes"])
self.on_includes(includes)
if "errors" in data:
errors = data["errors"]
self.on_errors(errors)
if "matching_rules" in data:
matching_rules = [
StreamRule(id=rule["id"], tag=rule["tag"])
for rule in data["matching_rules"]
]
self.on_matching_rules(matching_rules)
self.on_response(
StreamResponse(tweet, includes, errors, matching_rules)
)
def on_tweet(self, tweet):
"""This is called when a Tweet is received.
Parameters
----------
tweet : Tweet
The Tweet received
"""
pass
def on_includes(self, includes):
"""This is called when includes are received.
Parameters
----------
includes : dict
The includes received
"""
pass
def on_errors(self, errors):
"""This is called when errors are received.
Parameters
----------
errors : dict
The errors received
"""
log.error("Received errors: %s", errors)
def on_matching_rules(self, matching_rules):
"""This is called when matching rules are received.
Parameters
----------
matching_rules : list[StreamRule]
The matching rules received
"""
pass
def on_response(self, response):
"""This is called when a response is received.
Parameters
----------
response : StreamResponse
The response received
"""
log.debug("Received response: %s", response)
class StreamRule(NamedTuple):
"""Rule for filtered stream
.. versionadded:: 4.6
Parameters
----------
value : str | None
The rule text. If you are using a `Standard Project`_ at the Basic
`access level`_, you can use the basic set of `operators`_, can submit
up to 25 concurrent rules, and can submit rules up to 512 characters
long. If you are using an `Academic Research Project`_ at the Basic
access level, you can use all available operators, can submit up to
1,000 concurrent rules, and can submit rules up to 1,024 characters
long.
tag : str | None
The tag label. This is a free-form text you can use to identify the
rules that matched a specific Tweet in the streaming response. Tags can
be the same across rules.
id : str | None
Unique identifier of this rule. This is returned as a string.
.. _Standard Project: https://developer.twitter.com/en/docs/projects
.. _access level: https://developer.twitter.com/en/products/twitter-api/early-access/guide#na_1
.. _operators: https://developer.twitter.com/en/docs/twitter-api/tweets/search/integrate/build-a-query
.. _Academic Research Project: https://developer.twitter.com/en/docs/projects
"""
value: str = None
tag: str = None
id: str = None
| mit | 70683ca59c2ef1b205606f27d2f9ef35 | 36.624155 | 224 | 0.576513 | 4.607312 | false | false | false | false |
cleverhans-lab/cleverhans | cleverhans_v3.1.0/examples/imagenet_featadvs/model.py | 1 | 1273 | # pylint: disable=missing-docstring
import functools
import tensorflow as tf
from cleverhans.initializers import HeReLuNormalInitializer
from cleverhans.model import Model
class ModelImageNetCNN(Model):
def __init__(self, scope, nb_classes=1000, **kwargs):
del kwargs
Model.__init__(self, scope, nb_classes, locals())
def fprop(self, x, **kwargs):
del kwargs
my_conv = functools.partial(
tf.layers.conv2d,
kernel_size=3,
strides=2,
padding="valid",
activation=tf.nn.relu,
kernel_initializer=HeReLuNormalInitializer,
)
my_dense = functools.partial(
tf.layers.dense, kernel_initializer=HeReLuNormalInitializer
)
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
for depth in [96, 256, 384, 384, 256]:
x = my_conv(x, depth)
y = tf.layers.flatten(x)
y = my_dense(y, 4096, tf.nn.relu)
y = fc7 = my_dense(y, 4096, tf.nn.relu)
y = my_dense(y, 1000)
return {"fc7": fc7, self.O_LOGITS: y, self.O_PROBS: tf.nn.softmax(logits=y)}
def make_imagenet_cnn(input_shape=(None, 224, 224, 3)):
return ModelImageNetCNN("imagenet")
| mit | 0362c955f9e4b15e70fb0f062a2636b0 | 31.641026 | 88 | 0.595444 | 3.478142 | false | false | false | false |
cleverhans-lab/cleverhans | cleverhans_v3.1.0/cleverhans/attacks/sparse_l1_descent.py | 1 | 14083 | """
The SparseL1Descent attack.
"""
import warnings
from distutils.version import LooseVersion
import tensorflow as tf
from cleverhans.attacks.attack import Attack
from cleverhans import utils_tf
from cleverhans.utils_tf import clip_eta, random_lp_vector
from cleverhans.compat import reduce_max, reduce_sum, softmax_cross_entropy_with_logits
class SparseL1Descent(Attack):
"""
This class implements a variant of Projected Gradient Descent for the l1-norm
(Tramer and Boneh 2019). The l1-norm case is more tricky than the l-inf and l2
cases covered by the ProjectedGradientDescent class, because the steepest
descent direction for the l1-norm is too sparse (it updates a single
coordinate in the adversarial perturbation in each step). This attack has an
additional parameter that controls the sparsity of the update step. For
moderately sparse update steps, the attack vastly outperforms Projected
Steepest Descent and is competitive with other attacks targeted at the l1-norm
such as the ElasticNetMethod attack (which is much more computationally
expensive).
Paper link (Tramer and Boneh 2019): https://arxiv.org/pdf/1904.13000.pdf
:param model: cleverhans.model.Model
:param sess: optional tf.Session
:param dtypestr: dtype of the data
:param kwargs: passed through to super constructor
"""
def __init__(self, model, sess=None, dtypestr="float32", **kwargs):
"""
Create a SparseL1Descent instance.
Note: the model parameter should be an instance of the
cleverhans.model.Model abstraction provided by CleverHans.
"""
super(SparseL1Descent, self).__init__(
model, sess=sess, dtypestr=dtypestr, **kwargs
)
self.feedable_kwargs = (
"eps",
"eps_iter",
"y",
"y_target",
"clip_min",
"clip_max",
"grad_sparsity",
)
self.structural_kwargs = ["nb_iter", "rand_init", "clip_grad", "sanity_checks"]
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
asserts = []
# If a data range was specified, check that the input was in that range
if self.clip_min is not None:
asserts.append(
utils_tf.assert_greater_equal(x, tf.cast(self.clip_min, x.dtype))
)
if self.clip_max is not None:
asserts.append(
utils_tf.assert_less_equal(x, tf.cast(self.clip_max, x.dtype))
)
# Initialize loop variables
if self.rand_init:
eta = random_lp_vector(
tf.shape(x), ord=1, eps=tf.cast(self.eps, x.dtype), dtype=x.dtype
)
else:
eta = tf.zeros(tf.shape(x))
# Clip eta
eta = clip_eta(eta, ord=1, eps=self.eps)
adv_x = x + eta
if self.clip_min is not None or self.clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
if self.y_target is not None:
y = self.y_target
targeted = True
elif self.y is not None:
y = self.y
targeted = False
else:
model_preds = self.model.get_probs(x)
preds_max = tf.reduce_max(model_preds, 1, keepdims=True)
y = tf.to_float(tf.equal(model_preds, preds_max))
y = tf.stop_gradient(y)
targeted = False
del model_preds
y_kwarg = "y_target" if targeted else "y"
def cond(i, _):
"""Iterate until requested number of iterations is completed"""
return tf.less(i, self.nb_iter)
def body(i, adv_x):
"""Do a projected gradient step"""
labels, _ = self.get_or_guess_labels(adv_x, {y_kwarg: y})
logits = self.model.get_logits(adv_x)
adv_x = sparse_l1_descent(
adv_x,
logits,
y=labels,
eps=self.eps_iter,
q=self.grad_sparsity,
clip_min=self.clip_min,
clip_max=self.clip_max,
clip_grad=self.clip_grad,
targeted=(self.y_target is not None),
sanity_checks=self.sanity_checks,
)
# Clipping perturbation eta to the l1-ball
eta = adv_x - x
eta = clip_eta(eta, ord=1, eps=self.eps)
adv_x = x + eta
# Redo the clipping.
# Subtracting and re-adding eta can add some small numerical error.
if self.clip_min is not None or self.clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
return i + 1, adv_x
_, adv_x = tf.while_loop(
cond,
body,
(tf.zeros([]), adv_x),
back_prop=True,
maximum_iterations=self.nb_iter,
)
# Asserts run only on CPU.
# When multi-GPU eval code tries to force all PGD ops onto GPU, this
# can cause an error.
common_dtype = tf.float32
asserts.append(
utils_tf.assert_less_equal(
tf.cast(self.eps_iter, dtype=common_dtype),
tf.cast(self.eps, dtype=common_dtype),
)
)
if self.sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x
def parse_params(
self,
eps=10.0,
eps_iter=1.0,
nb_iter=20,
y=None,
clip_min=None,
clip_max=None,
y_target=None,
rand_init=False,
clip_grad=False,
grad_sparsity=99,
sanity_checks=True,
**kwargs
):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param eps: (optional float) maximum distortion of adversarial example
compared to original input
:param eps_iter: (optional float) step size for each attack iteration
:param nb_iter: (optional int) Number of attack iterations.
:param y: (optional) A tensor with the true labels.
:param y_target: (optional) A tensor with the labels to target. Leave
y_target=None if y is also set. Labels should be
one-hot-encoded.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
:param clip_grad: (optional bool) Ignore gradient components
at positions where the input is already at the boundary
of the domain, and the update step will get clipped out.
:param grad_sparsity (optional) Relative sparsity of the gradient update
step, in percent. Only gradient values larger
than this percentile are retained. This parameter can
be a scalar, or a vector of the same length as the
input batch dimension.
:param sanity_checks: bool Insert tf asserts checking values
(Some tests need to run with no sanity checks because the
tests intentionally configure the attack strangely)
"""
# Save attack-specific parameters
self.eps = eps
self.rand_init = rand_init
self.eps_iter = eps_iter
self.nb_iter = nb_iter
self.y = y
self.y_target = y_target
self.clip_min = clip_min
self.clip_max = clip_max
self.clip_grad = clip_grad
self.grad_sparsity = grad_sparsity
if isinstance(eps, float) and isinstance(eps_iter, float):
# If these are both known at compile time, we can check before anything
# is run. If they are tf, we can't check them yet.
assert eps_iter <= eps, (eps_iter, eps)
if self.y is not None and self.y_target is not None:
raise ValueError("Must not set both y and y_target")
if self.clip_grad and (self.clip_min is None or self.clip_max is None):
raise ValueError("Must set clip_min and clip_max if clip_grad is set")
# The grad_sparsity argument governs the sparsity of the gradient
# update. It indicates the percentile value above which gradient entries
# are retained. It can be specified as a scalar or as a 1-dimensional
# vector of the same size as the input's batch dimension.
if isinstance(self.grad_sparsity, int) or isinstance(self.grad_sparsity, float):
if not 0 < self.grad_sparsity < 100:
raise ValueError("grad_sparsity should be in (0, 100)")
else:
self.grad_sparsity = tf.convert_to_tensor(self.grad_sparsity)
if len(self.grad_sparsity.shape) > 1:
raise ValueError("grad_sparsity should either be a scalar or a vector")
self.sanity_checks = sanity_checks
if len(kwargs.keys()) > 0:
warnings.warn(
"kwargs is unused and will be removed on or after " "2019-04-26."
)
return True
def sparse_l1_descent(
x,
logits,
y=None,
eps=1.0,
q=99,
clip_min=None,
clip_max=None,
clip_grad=False,
targeted=False,
sanity_checks=True,
):
"""
TensorFlow implementation of the Dense L1 Descent Method.
:param x: the input placeholder
:param logits: output of model.get_logits
:param y: (optional) A placeholder for the true labels. If targeted
is true, then provide the target label. Otherwise, only provide
this parameter if you'd like to use true labels when crafting
adversarial samples. Otherwise, model predictions are used as
labels to avoid the "label leaking" effect (explained in this
paper: https://arxiv.org/abs/1611.01236). Default is None.
Labels should be one-hot-encoded.
:param eps: the epsilon (input variation parameter)
:param q: the percentile above which gradient values are retained. Either a
scalar or a vector of same length as the input batch dimension.
:param clip_min: Minimum float value for adversarial example components
:param clip_max: Maximum float value for adversarial example components
:param clip_grad: (optional bool) Ignore gradient components
at positions where the input is already at the boundary
of the domain, and the update step will get clipped out.
:param targeted: Is the attack targeted or untargeted? Untargeted, the
default, will try to make the label incorrect. Targeted
will instead try to move in the direction of being more
like y.
:return: a tensor for the adversarial example
"""
asserts = []
# If a data range was specified, check that the input was in that range
if clip_min is not None:
asserts.append(utils_tf.assert_greater_equal(x, tf.cast(clip_min, x.dtype)))
if clip_max is not None:
asserts.append(utils_tf.assert_less_equal(x, tf.cast(clip_max, x.dtype)))
# Make sure the caller has not passed probs by accident
assert logits.op.type != "Softmax"
if y is None:
# Using model predictions as ground truth to avoid label leaking
preds_max = reduce_max(logits, 1, keepdims=True)
y = tf.to_float(tf.equal(logits, preds_max))
y = tf.stop_gradient(y)
y = y / reduce_sum(y, 1, keepdims=True)
# Compute loss
loss = softmax_cross_entropy_with_logits(labels=y, logits=logits)
if targeted:
loss = -loss
# Define gradient of loss wrt input
(grad,) = tf.gradients(loss, x)
if clip_grad:
grad = utils_tf.zero_out_clipped_grads(grad, x, clip_min, clip_max)
red_ind = list(range(1, len(grad.get_shape())))
dim = tf.reduce_prod(tf.shape(x)[1:])
abs_grad = tf.reshape(tf.abs(grad), (-1, dim))
# if q is a scalar, broadcast it to a vector of same length as the batch dim
q = tf.cast(tf.broadcast_to(q, tf.shape(x)[0:1]), tf.float32)
k = tf.cast(tf.floor(q / 100 * tf.cast(dim, tf.float32)), tf.int32)
# `tf.sort` is much faster than `tf.contrib.distributions.percentile`.
# For TF <= 1.12, use `tf.nn.top_k` as `tf.sort` is not implemented.
if LooseVersion(tf.__version__) <= LooseVersion("1.12.0"):
# `tf.sort` is only available in TF 1.13 onwards
sorted_grad = -tf.nn.top_k(-abs_grad, k=dim, sorted=True)[0]
else:
sorted_grad = tf.sort(abs_grad, axis=-1)
idx = tf.stack((tf.range(tf.shape(abs_grad)[0]), k), -1)
percentiles = tf.gather_nd(sorted_grad, idx)
tied_for_max = tf.greater_equal(abs_grad, tf.expand_dims(percentiles, -1))
tied_for_max = tf.reshape(tf.cast(tied_for_max, x.dtype), tf.shape(grad))
num_ties = tf.reduce_sum(tied_for_max, red_ind, keepdims=True)
optimal_perturbation = tf.sign(grad) * tied_for_max / num_ties
# Add perturbation to original example to obtain adversarial example
adv_x = x + utils_tf.mul(eps, optimal_perturbation)
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) or (clip_max is not None):
# We don't currently support one-sided clipping
assert clip_min is not None and clip_max is not None
adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
if sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x
| mit | ccbdf5b48b1315b83230451ecc106ad3 | 37.583562 | 88 | 0.602215 | 3.840469 | false | false | false | false |
cleverhans-lab/cleverhans | cleverhans/utils.py | 2 | 12028 | """
Generic utility functions useful for writing Python code in general
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
import warnings
import logging
import os
import re
import subprocess
import numpy as np
from six.moves import xrange
known_number_types = (
int,
float,
np.float16,
np.float32,
np.float64,
np.int8,
np.int16,
np.int32,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
)
CLEVERHANS_ROOT = os.path.dirname(os.path.dirname(__file__))
class _ArgsWrapper(object):
"""
Wrapper that allows attribute access to dictionaries
"""
def __init__(self, args):
if not isinstance(args, dict):
args = vars(args)
self.args = args
def __getattr__(self, name):
return self.args.get(name)
class AccuracyReport(object):
"""
An object summarizing the accuracy results for experiments involving
training on clean examples or adversarial examples, then evaluating
on clean or adversarial examples.
"""
def __init__(self):
self.clean_train_clean_eval = 0.0
self.clean_train_adv_eval = 0.0
self.adv_train_clean_eval = 0.0
self.adv_train_adv_eval = 0.0
# Training data accuracy results to be used by tutorials
self.train_clean_train_clean_eval = 0.0
self.train_clean_train_adv_eval = 0.0
self.train_adv_train_clean_eval = 0.0
self.train_adv_train_adv_eval = 0.0
def batch_indices(batch_nb, data_length, batch_size):
"""
This helper function computes a batch start and end index
:param batch_nb: the batch number
:param data_length: the total length of the data being parsed by batches
:param batch_size: the number of inputs in each batch
:return: pair of (start, end) indices
"""
# Batch start and end index
start = int(batch_nb * batch_size)
end = int((batch_nb + 1) * batch_size)
# When there are not enough inputs left, we reuse some to complete the
# batch
if end > data_length:
shift = end - data_length
start -= shift
end -= shift
return start, end
def other_classes(nb_classes, class_ind):
"""
Returns a list of class indices excluding the class indexed by class_ind
:param nb_classes: number of classes in the task
:param class_ind: the class index to be omitted
:return: list of class indices excluding the class indexed by class_ind
"""
if class_ind < 0 or class_ind >= nb_classes:
error_str = "class_ind must be within the range (0, nb_classes - 1)"
raise ValueError(error_str)
other_classes_list = list(range(nb_classes))
other_classes_list.remove(class_ind)
return other_classes_list
def to_categorical(y, nb_classes, num_classes=None):
"""
Converts a class vector (integers) to binary class matrix.
This is adapted from the Keras function with the same name.
:param y: class vector to be converted into a matrix
(integers from 0 to nb_classes).
:param nb_classes: nb_classes: total number of classes.
:param num_classses: depricated version of nb_classes
:return: A binary matrix representation of the input.
"""
if num_classes is not None:
if nb_classes is not None:
raise ValueError(
"Should not specify both nb_classes and its deprecated "
"alias, num_classes"
)
warnings.warn(
"`num_classes` is deprecated. Switch to `nb_classes`."
" `num_classes` may be removed on or after 2019-04-23."
)
nb_classes = num_classes
del num_classes
y = np.array(y, dtype="int").ravel()
n = y.shape[0]
categorical = np.zeros((n, nb_classes))
categorical[np.arange(n), y] = 1
return categorical
def random_targets(gt, nb_classes):
"""
Take in an array of correct labels and randomly select a different label
for each label in the array. This is typically used to randomly select a
target class in targeted adversarial examples attacks (i.e., when the
search algorithm takes in both a source class and target class to compute
the adversarial example).
:param gt: the ground truth (correct) labels. They can be provided as a
1D vector or 2D array of one-hot encoded labels.
:param nb_classes: The number of classes for this task. The random class
will be chosen between 0 and nb_classes such that it
is different from the correct class.
:return: A numpy array holding the randomly-selected target classes
encoded as one-hot labels.
"""
# If the ground truth labels are encoded as one-hot, convert to labels.
if len(gt.shape) == 2:
gt = np.argmax(gt, axis=1)
# This vector will hold the randomly selected labels.
result = np.zeros(gt.shape, dtype=np.int32)
for class_ind in xrange(nb_classes):
# Compute all indices in that class.
in_cl = gt == class_ind
size = np.sum(in_cl)
# Compute the set of potential targets for this class.
potential_targets = other_classes(nb_classes, class_ind)
# Draw with replacement random targets among the potential targets.
result[in_cl] = np.random.choice(potential_targets, size=size)
# Encode vector of random labels as one-hot labels.
result = to_categorical(result, nb_classes)
result = result.astype(np.int32)
return result
def pair_visual(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn(
"`pair_visual` has moved to `cleverhans.plot.pyplot_image`. "
"cleverhans.utils.pair_visual may be removed on or after "
"2019-04-24."
)
from cleverhans.plot.pyplot_image import pair_visual as new_pair_visual
return new_pair_visual(*args, **kwargs)
def grid_visual(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn(
"`grid_visual` has moved to `cleverhans.plot.pyplot_image`. "
"cleverhans.utils.grid_visual may be removed on or after "
"2019-04-24."
)
from cleverhans.plot.pyplot_image import grid_visual as new_grid_visual
return new_grid_visual(*args, **kwargs)
def get_logits_over_interval(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn(
"`get_logits_over_interval` has moved to "
"`cleverhans.plot.pyplot_image`. "
"cleverhans.utils.get_logits_over_interval may be removed on "
"or after 2019-04-24."
)
# pylint:disable=line-too-long
from cleverhans.plot.pyplot_image import (
get_logits_over_interval as new_get_logits_over_interval,
)
return new_get_logits_over_interval(*args, **kwargs)
def linear_extrapolation_plot(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn(
"`linear_extrapolation_plot` has moved to "
"`cleverhans.plot.pyplot_image`. "
"cleverhans.utils.linear_extrapolation_plot may be removed on "
"or after 2019-04-24."
)
# pylint:disable=line-too-long
from cleverhans.plot.pyplot_image import (
linear_extrapolation_plot as new_linear_extrapolation_plot,
)
return new_linear_extrapolation_plot(*args, **kwargs)
def set_log_level(level, name="cleverhans"):
"""
Sets the threshold for the cleverhans logger to level
:param level: the logger threshold. You can find values here:
https://docs.python.org/2/library/logging.html#levels
:param name: the name used for the cleverhans logger
"""
logging.getLogger(name).setLevel(level)
def get_log_level(name="cleverhans"):
"""
Gets the current threshold for the cleverhans logger
:param name: the name used for the cleverhans logger
"""
return logging.getLogger(name).getEffectiveLevel()
class TemporaryLogLevel(object):
"""
A ContextManager that changes a log level temporarily.
Note that the log level will be set back to its original value when
the context manager exits, even if the log level has been changed
again in the meantime.
"""
def __init__(self, level, name):
self.name = name
self.level = level
def __enter__(self):
self.old_level = get_log_level(self.name)
set_log_level(self.level, self.name)
def __exit__(self, type, value, traceback):
set_log_level(self.old_level, self.name)
return True
def create_logger(name):
"""
Create a logger object with the given name.
If this is the first time that we call this method, then initialize the
formatter.
"""
base = logging.getLogger("cleverhans")
if len(base.handlers) == 0:
ch = logging.StreamHandler()
formatter = logging.Formatter(
"[%(levelname)s %(asctime)s %(name)s] " + "%(message)s"
)
ch.setFormatter(formatter)
base.addHandler(ch)
return base
def deterministic_dict(normal_dict):
"""
Returns a version of `normal_dict` whose iteration order is always the same
"""
out = OrderedDict()
for key in sorted(normal_dict.keys()):
out[key] = normal_dict[key]
return out
def ordered_union(l1, l2):
"""
Return the union of l1 and l2, with a deterministic ordering.
(Union of python sets does not necessarily have a consisten iteration
order)
:param l1: list of items
:param l2: list of items
:returns: list containing one copy of each item that is in l1 or in l2
"""
out = []
for e in l1 + l2:
if e not in out:
out.append(e)
return out
def safe_zip(*args):
"""like zip but with these properties:
- returns a list, rather than an iterator. This is the old Python2 zip behavior.
- a guarantee that all arguments are the same length.
(normal zip silently drops entries to make them the same length)
"""
length = len(args[0])
if not all(len(arg) == length for arg in args):
raise ValueError(
"Lengths of arguments do not match: " + str([len(arg) for arg in args])
)
return list(zip(*args))
def shell_call(command, **kwargs):
"""Calls shell command with argument substitution.
Args:
command: command represented as a list. Each element of the list is one
token of the command. For example "cp a b" becomes ['cp', 'a', 'b']
If any element of the list looks like '${NAME}' then it will be replaced
by value from **kwargs with key 'NAME'.
**kwargs: dictionary with argument substitution
Returns:
output of the command
Raises:
subprocess.CalledProcessError if command return value is not zero
This function is useful when you need to do variable substitution prior
running the command. Below are few examples of how it works:
shell_call(['cp', 'a', 'b'], a='asd') calls command 'cp a b'
shell_call(['cp', '${a}', 'b'], a='asd') calls command 'cp asd b',
'${a}; was replaced with 'asd' before calling the command
"""
# Regular expression to find instances of '${NAME}' in a string
CMD_VARIABLE_RE = re.compile("^\\$\\{(\\w+)\\}$")
command = list(command)
for i in range(len(command)):
m = CMD_VARIABLE_RE.match(command[i])
if m:
var_id = m.group(1)
if var_id in kwargs:
command[i] = kwargs[var_id]
str_command = " ".join(command)
logging.debug("Executing shell command: %s" % str_command)
return subprocess.check_output(command)
def deep_copy(numpy_dict):
"""
Returns a copy of a dictionary whose values are numpy arrays.
Copies their values rather than copying references to them.
"""
out = {}
for key in numpy_dict:
out[key] = numpy_dict[key].copy()
return out
| mit | e547c828dfd6051cdcd3048d74f6f654 | 30.241558 | 84 | 0.643748 | 3.824483 | false | false | false | false |
cleverhans-lab/cleverhans | cleverhans_v3.1.0/cleverhans/attacks_tf.py | 1 | 8442 | # pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import warnings
import numpy as np
from cleverhans.attacks.carlini_wagner_l2 import (
CWL2 as CarliniWagnerL2,
) # pylint: disable=unused-import
from cleverhans.attacks.deep_fool import (
deepfool_batch,
deepfool_attack,
) # pylint: disable=unused-import
from cleverhans.attacks.elastic_net_method import (
EAD as ElasticNetMethod,
) # pylint: disable=unused-import
from cleverhans.attacks.lbfgs import (
LBFGS_impl as LBFGS_attack,
) # pylint: disable=unused-import
from cleverhans.attacks.saliency_map_method import (
jsma_symbolic,
) # pylint: disable=unused-import
from cleverhans.attacks.spsa import (
TensorOptimizer,
TensorGradientDescent,
TensorAdam,
) # pylint: disable=unused-import
from cleverhans.attacks.spsa import (
SPSAAdam,
margin_logit_loss,
_apply_black_border,
) # pylint: disable=unused-import
from cleverhans.attacks.spsa import (
_apply_transformation,
spm,
parallel_apply_transformations,
) # pylint: disable=unused-import
from cleverhans.attacks.virtual_adversarial_method import (
vatm,
) # pylint: disable=unused-import
from cleverhans.utils_tf import (
jacobian_graph,
jacobian_augmentation,
) # pylint: disable=unused-import
from cleverhans import utils
np_dtype = np.dtype("float32")
_logger = utils.create_logger("cleverhans.attacks.tf")
warnings.warn(
"attacks_tf is deprecated and will be removed on 2019-07-18"
" or after. Code should import functions from their new locations directly."
)
def fgsm(x, predictions, eps=0.3, clip_min=None, clip_max=None):
warnings.warn(
"This function is deprecated and will be removed on or after "
"2019-04-09. Switch to cleverhans.attacks.FastGradientMethod."
)
return fgm(
x,
predictions,
y=None,
eps=eps,
ord=np.inf,
clip_min=clip_min,
clip_max=clip_max,
)
def fgm(x, preds, *args, **kwargs):
if preds.op.type == "Softmax":
(logits,) = preds.op.inputs
else:
raise TypeError("Unclear how to get logits")
warnings.warn(
"This function is deprecated. Switch to passing *logits* to"
" cleverhans.attacks.fgm"
)
from cleverhans.attacks import fgm as logits_fgm
return logits_fgm(x, logits, *args, **kwargs)
def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max):
"""
TensorFlow implementation for apply perturbations to input features based
on saliency maps
:param i: index of first selected feature
:param j: index of second selected feature
:param X: a matrix containing our input features for our sample
:param increase: boolean; true if we are increasing pixels, false otherwise
:param theta: delta for each feature adjustment
:param clip_min: mininum value for a feature in our sample
:param clip_max: maximum value for a feature in our sample
: return: a perturbed input feature matrix for a target class
"""
warnings.warn(
"This function is dead code and will be removed on or after 2019-07-18"
)
# perturb our input sample
if increase:
X[0, i] = np.minimum(clip_max, X[0, i] + theta)
X[0, j] = np.minimum(clip_max, X[0, j] + theta)
else:
X[0, i] = np.maximum(clip_min, X[0, i] - theta)
X[0, j] = np.maximum(clip_min, X[0, j] - theta)
return X
def saliency_map(grads_target, grads_other, search_domain, increase):
"""
TensorFlow implementation for computing saliency maps
:param grads_target: a matrix containing forward derivatives for the
target class
:param grads_other: a matrix where every element is the sum of forward
derivatives over all non-target classes at that index
:param search_domain: the set of input indices that we are considering
:param increase: boolean; true if we are increasing pixels, false otherwise
:return: (i, j, search_domain) the two input indices selected and the
updated search domain
"""
warnings.warn(
"This function is dead code and will be removed on or after 2019-07-18"
)
# Compute the size of the input (the number of features)
nf = len(grads_target)
# Remove the already-used input features from the search space
invalid = list(set(range(nf)) - search_domain)
increase_coef = 2 * int(increase) - 1
grads_target[invalid] = -increase_coef * np.max(np.abs(grads_target))
grads_other[invalid] = increase_coef * np.max(np.abs(grads_other))
# Create a 2D numpy array of the sum of grads_target and grads_other
target_sum = grads_target.reshape((1, nf)) + grads_target.reshape((nf, 1))
other_sum = grads_other.reshape((1, nf)) + grads_other.reshape((nf, 1))
# Create a mask to only keep features that match saliency map conditions
if increase:
scores_mask = (target_sum > 0) & (other_sum < 0)
else:
scores_mask = (target_sum < 0) & (other_sum > 0)
# Create a 2D numpy array of the scores for each pair of candidate features
scores = scores_mask * (-target_sum * other_sum)
# A pixel can only be selected (and changed) once
np.fill_diagonal(scores, 0)
# Extract the best two pixels
best = np.argmax(scores)
p1, p2 = best % nf, best // nf
# Remove used pixels from our search domain
search_domain.discard(p1)
search_domain.discard(p2)
return p1, p2, search_domain
def jacobian(sess, x, grads, target, X, nb_features, nb_classes, feed=None):
"""
TensorFlow implementation of the foward derivative / Jacobian
:param x: the input placeholder
:param grads: the list of TF gradients returned by jacobian_graph()
:param target: the target misclassification class
:param X: numpy array with sample input
:param nb_features: the number of features in the input
:return: matrix of forward derivatives flattened into vectors
"""
warnings.warn(
"This function is dead code and will be removed on or after 2019-07-18"
)
# Prepare feeding dictionary for all gradient computations
feed_dict = {x: X}
if feed is not None:
feed_dict.update(feed)
# Initialize a numpy array to hold the Jacobian component values
jacobian_val = np.zeros((nb_classes, nb_features), dtype=np_dtype)
# Compute the gradients for all classes
for class_ind, grad in enumerate(grads):
run_grad = sess.run(grad, feed_dict)
jacobian_val[class_ind] = np.reshape(run_grad, (1, nb_features))
# Sum over all classes different from the target class to prepare for
# saliency map computation in the next step of the attack
other_classes = utils.other_classes(nb_classes, target)
grad_others = np.sum(jacobian_val[other_classes, :], axis=0)
return jacobian_val[target], grad_others
class UnrolledOptimizer(TensorOptimizer):
def __init__(self, *args, **kwargs):
warnings.warn(
"UnrolledOptimizer has been renamed to TensorOptimizer."
" The old name may be removed on or after 2019-04-25."
)
super(UnrolledOptimizer, self).__init__(*args, **kwargs)
class UnrolledGradientDescent(TensorGradientDescent):
def __init__(self, *args, **kwargs):
warnings.warn(
"UnrolledGradientDescent has been renamed to "
"TensorGradientDescent."
" The old name may be removed on or after 2019-04-25."
)
super(UnrolledGradientDescent, self).__init__(*args, **kwargs)
class UnrolledAdam(TensorAdam):
def __init__(self, *args, **kwargs):
warnings.warn(
"UnrolledAdam has been renamed to TensorAdam."
" The old name may be removed on or after 2019-04-25."
)
super(UnrolledAdam, self).__init__(*args, **kwargs)
def pgd_attack(*args, **kwargs):
warnings.warn(
"cleverhans.attacks_tf.pgd_attack has been renamed to "
"cleverhans.attacks.projected_optimization. "
"Please switch to the new name. The current name will "
"become unsupport on or after 2019-04-24."
)
from cleverhans.attacks import projected_optimization
return projected_optimization(*args, **kwargs)
| mit | 0163e047683a373755dde21b596cfc78 | 33.740741 | 80 | 0.673656 | 3.643505 | false | false | false | false |
cleverhans-lab/cleverhans | cleverhans_v3.1.0/examples/multigpu_advtrain/make_model.py | 1 | 1889 | # pylint: disable=missing-docstring
from model import Conv2D, ReLU, Flatten, Linear, Softmax, MLP
from model import MLPnGPU
from model import Conv2DnGPU
from model import LinearnGPU
from model import MaxPool
from resnet_tf import ResNetTF
def make_basic_cnn(nb_filters=64, nb_classes=10, input_shape=(None, 28, 28, 1)):
layers = [
Conv2D(nb_filters, (8, 8), (2, 2), "SAME"),
ReLU(),
Conv2D(nb_filters * 2, (6, 6), (2, 2), "VALID"),
ReLU(),
Conv2D(nb_filters * 2, (5, 5), (1, 1), "VALID"),
ReLU(),
Flatten(),
Linear(nb_classes),
Softmax(),
]
model = MLP(nb_classes, layers, input_shape)
return model
def make_basic_ngpu(nb_classes=10, input_shape=(None, 28, 28, 1), **kwargs):
"""
Create a multi-GPU model similar to the basic cnn in the tutorials.
"""
model = make_basic_cnn()
layers = model.layers
model = MLPnGPU(nb_classes, layers, input_shape)
return model
def make_madry_ngpu(nb_classes=10, input_shape=(None, 28, 28, 1), **kwargs):
"""
Create a multi-GPU model similar to Madry et al. (arXiv:1706.06083).
"""
layers = [
Conv2DnGPU(32, (5, 5), (1, 1), "SAME"),
ReLU(),
MaxPool((2, 2), (2, 2), "SAME"),
Conv2DnGPU(64, (5, 5), (1, 1), "SAME"),
ReLU(),
MaxPool((2, 2), (2, 2), "SAME"),
Flatten(),
LinearnGPU(1024),
ReLU(),
LinearnGPU(nb_classes),
Softmax(),
]
model = MLPnGPU(nb_classes, layers, input_shape)
return model
def make_model(model_type="madry", **kwargs):
if model_type == "basic":
return make_basic_ngpu(**kwargs)
elif model_type == "madry":
return make_madry_ngpu(**kwargs)
elif model_type == "resnet_tf":
return ResNetTF(**kwargs)
else:
raise Exception("model type not defined.")
| mit | d4cf8a10af5ce678e9e5589dda23e793 | 26.376812 | 80 | 0.574907 | 3.081566 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.