text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import print_function, division
from collections import defaultdict
from .basic import Basic
from .compatibility import cmp_to_key, reduce, is_sequence, range
from .logic import _fuzzy_group, fuzzy_or, fuzzy_not, fuzzy_and
from .singleton import S
from .operations import AssocOp
from .cache import cacheit
from .numbers import ilcm, igcd
from .expr import Expr
# Key for sorting commutative args in canonical order
_args_sortkey = cmp_to_key(Basic.compare)
def _addsort(args):
# in-place sorting of args
args.sort(key=_args_sortkey)
def _unevaluated_Add(*args):
"""Return a well-formed unevaluated Add: Numbers are collected and
put in slot 0 and args are sorted. Use this when args have changed
but you still want to return an unevaluated Add.
Examples
========
>>> from sympy.core.add import _unevaluated_Add as uAdd
>>> from sympy import S, Add
>>> from sympy.abc import x, y
>>> a = uAdd(*[S(1.0), x, S(2)])
>>> a.args[0]
3.00000000000000
>>> a.args[1]
x
Beyond the Number being in slot 0, there is no other assurance of
order for the arguments since they are hash sorted. So, for testing
purposes, output produced by this in some other function can only
be tested against the output of this function or as one of several
options:
>>> opts = (Add(x, y, evaluated=False), Add(y, x, evaluated=False))
>>> a = uAdd(x, y)
>>> assert a in opts and a == uAdd(x, y)
"""
args = list(args)
newargs = []
co = S.Zero
while args:
a = args.pop()
if a.is_Add:
# this will keep nesting from building up
# so that x + (x + 1) -> x + x + 1 (3 args)
args.extend(a.args)
elif a.is_Number:
co += a
else:
newargs.append(a)
_addsort(newargs)
if co:
newargs.insert(0, co)
return Add._from_args(newargs)
class Add(Expr, AssocOp):
__slots__ = []
is_Add = True
@classmethod
def flatten(cls, seq):
"""
Takes the sequence "seq" of nested Adds and returns a flatten list.
Returns: (commutative_part, noncommutative_part, order_symbols)
Applies associativity, all terms are commutable with respect to
addition.
NB: the removal of 0 is already handled by AssocOp.__new__
See also
========
sympy.core.mul.Mul.flatten
"""
from sympy.calculus.util import AccumBounds
rv = None
if len(seq) == 2:
a, b = seq
if b.is_Rational:
a, b = b, a
if a.is_Rational:
if b.is_Mul:
rv = [a, b], [], None
if rv:
if all(s.is_commutative for s in rv[0]):
return rv
return [], rv[0], None
terms = {} # term -> coeff
# e.g. x**2 -> 5 for ... + 5*x**2 + ...
coeff = S.Zero # coefficient (Number or zoo) to always be in slot 0
# e.g. 3 + ...
order_factors = []
for o in seq:
# O(x)
if o.is_Order:
for o1 in order_factors:
if o1.contains(o):
o = None
break
if o is None:
continue
order_factors = [o] + [
o1 for o1 in order_factors if not o.contains(o1)]
continue
# 3 or NaN
elif o.is_Number:
if (o is S.NaN or coeff is S.ComplexInfinity and
o.is_finite is False):
# we know for sure the result will be nan
return [S.NaN], [], None
if coeff.is_Number:
coeff += o
if coeff is S.NaN:
# we know for sure the result will be nan
return [S.NaN], [], None
continue
elif isinstance(o, AccumBounds):
coeff = o.__add__(coeff)
continue
elif o is S.ComplexInfinity:
if coeff.is_finite is False:
# we know for sure the result will be nan
return [S.NaN], [], None
coeff = S.ComplexInfinity
continue
# Add([...])
elif o.is_Add:
# NB: here we assume Add is always commutative
seq.extend(o.args) # TODO zerocopy?
continue
# Mul([...])
elif o.is_Mul:
c, s = o.as_coeff_Mul()
# check for unevaluated Pow, e.g. 2**3 or 2**(-1/2)
elif o.is_Pow:
b, e = o.as_base_exp()
if b.is_Number and (e.is_Integer or
(e.is_Rational and e.is_negative)):
seq.append(b**e)
continue
c, s = S.One, o
else:
# everything else
c = S.One
s = o
# now we have:
# o = c*s, where
#
# c is a Number
# s is an expression with number factor extracted
# let's collect terms with the same s, so e.g.
# 2*x**2 + 3*x**2 -> 5*x**2
if s in terms:
terms[s] += c
if terms[s] is S.NaN:
# we know for sure the result will be nan
return [S.NaN], [], None
else:
terms[s] = c
# now let's construct new args:
# [2*x**2, x**3, 7*x**4, pi, ...]
newseq = []
noncommutative = False
for s, c in terms.items():
# 0*s
if c is S.Zero:
continue
# 1*s
elif c is S.One:
newseq.append(s)
# c*s
else:
if s.is_Mul:
# Mul, already keeps its arguments in perfect order.
# so we can simply put c in slot0 and go the fast way.
cs = s._new_rawargs(*((c,) + s.args))
newseq.append(cs)
elif s.is_Add:
# we just re-create the unevaluated Mul
newseq.append(Mul(c, s, evaluate=False))
else:
# alternatively we have to call all Mul's machinery (slow)
newseq.append(Mul(c, s))
noncommutative = noncommutative or not s.is_commutative
# oo, -oo
if coeff is S.Infinity:
newseq = [f for f in newseq if not
(f.is_nonnegative or f.is_real and f.is_finite)]
elif coeff is S.NegativeInfinity:
newseq = [f for f in newseq if not
(f.is_nonpositive or f.is_real and f.is_finite)]
if coeff is S.ComplexInfinity:
# zoo might be
# infinite_real + finite_im
# finite_real + infinite_im
# infinite_real + infinite_im
# addition of a finite real or imaginary number won't be able to
# change the zoo nature; adding an infinite qualtity would result
# in a NaN condition if it had sign opposite of the infinite
# portion of zoo, e.g., infinite_real - infinite_real.
newseq = [c for c in newseq if not (c.is_finite and
c.is_real is not None)]
# process O(x)
if order_factors:
newseq2 = []
for t in newseq:
for o in order_factors:
# x + O(x) -> O(x)
if o.contains(t):
t = None
break
# x + O(x**2) -> x + O(x**2)
if t is not None:
newseq2.append(t)
newseq = newseq2 + order_factors
# 1 + O(1) -> O(1)
for o in order_factors:
if o.contains(coeff):
coeff = S.Zero
break
# order args canonically
_addsort(newseq)
# current code expects coeff to be first
if coeff is not S.Zero:
newseq.insert(0, coeff)
# we are done
if noncommutative:
return [], newseq, None
else:
return newseq, [], None
@classmethod
def class_key(cls):
"""Nice order of classes"""
return 3, 1, cls.__name__
def as_coefficients_dict(a):
"""Return a dictionary mapping terms to their Rational coefficient.
Since the dictionary is a defaultdict, inquiries about terms which
were not present will return a coefficient of 0. If an expression is
not an Add it is considered to have a single term.
Examples
========
>>> from sympy.abc import a, x
>>> (3*x + a*x + 4).as_coefficients_dict()
{1: 4, x: 3, a*x: 1}
>>> _[a]
0
>>> (3*a*x).as_coefficients_dict()
{a*x: 3}
"""
d = defaultdict(list)
for ai in a.args:
c, m = ai.as_coeff_Mul()
d[m].append(c)
for k, v in d.items():
if len(v) == 1:
d[k] = v[0]
else:
d[k] = Add(*v)
di = defaultdict(int)
di.update(d)
return di
@cacheit
def as_coeff_add(self, *deps):
"""
Returns a tuple (coeff, args) where self is treated as an Add and coeff
is the Number term and args is a tuple of all other terms.
Examples
========
>>> from sympy.abc import x
>>> (7 + 3*x).as_coeff_add()
(7, (3*x,))
>>> (7*x).as_coeff_add()
(0, (7*x,))
"""
if deps:
l1 = []
l2 = []
for f in self.args:
if f.has(*deps):
l2.append(f)
else:
l1.append(f)
return self._new_rawargs(*l1), tuple(l2)
coeff, notrat = self.args[0].as_coeff_add()
if coeff is not S.Zero:
return coeff, notrat + self.args[1:]
return S.Zero, self.args
def as_coeff_Add(self):
"""Efficiently extract the coefficient of a summation. """
coeff, args = self.args[0], self.args[1:]
if coeff.is_Number:
if len(args) == 1:
return coeff, args[0]
else:
return coeff, self._new_rawargs(*args)
else:
return S.Zero, self
# Note, we intentionally do not implement Add.as_coeff_mul(). Rather, we
# let Expr.as_coeff_mul() just always return (S.One, self) for an Add. See
# issue 5524.
@cacheit
def _eval_derivative(self, s):
return self.func(*[a.diff(s) for a in self.args])
def _eval_nseries(self, x, n, logx):
terms = [t.nseries(x, n=n, logx=logx) for t in self.args]
return self.func(*terms)
def _matches_simple(self, expr, repl_dict):
# handle (w+3).matches('x+5') -> {w: x+2}
coeff, terms = self.as_coeff_add()
if len(terms) == 1:
return terms[0].matches(expr - coeff, repl_dict)
return
def matches(self, expr, repl_dict={}, old=False):
return AssocOp._matches_commutative(self, expr, repl_dict, old)
@staticmethod
def _combine_inverse(lhs, rhs):
"""
Returns lhs - rhs, but treats arguments like symbols, so things like
oo - oo return 0, instead of a nan.
"""
from sympy import oo, I, expand_mul
if lhs == oo and rhs == oo or lhs == oo*I and rhs == oo*I:
return S.Zero
return expand_mul(lhs - rhs)
@cacheit
def as_two_terms(self):
"""Return head and tail of self.
This is the most efficient way to get the head and tail of an
expression.
- if you want only the head, use self.args[0];
- if you want to process the arguments of the tail then use
self.as_coef_add() which gives the head and a tuple containing
the arguments of the tail when treated as an Add.
- if you want the coefficient when self is treated as a Mul
then use self.as_coeff_mul()[0]
>>> from sympy.abc import x, y
>>> (3*x*y).as_two_terms()
(3, x*y)
"""
if len(self.args) == 1:
return S.Zero, self
return self.args[0], self._new_rawargs(*self.args[1:])
def as_numer_denom(self):
# clear rational denominator
content, expr = self.primitive()
ncon, dcon = content.as_numer_denom()
# collect numerators and denominators of the terms
nd = defaultdict(list)
for f in expr.args:
ni, di = f.as_numer_denom()
nd[di].append(ni)
# put infinity in the numerator
if S.Zero in nd:
n = nd.pop(S.Zero)
assert len(n) == 1
n = n[0]
nd[S.One].append(n/S.Zero)
# check for quick exit
if len(nd) == 1:
d, n = nd.popitem()
return self.func(
*[_keep_coeff(ncon, ni) for ni in n]), _keep_coeff(dcon, d)
# sum up the terms having a common denominator
for d, n in nd.items():
if len(n) == 1:
nd[d] = n[0]
else:
nd[d] = self.func(*n)
# assemble single numerator and denominator
denoms, numers = [list(i) for i in zip(*iter(nd.items()))]
n, d = self.func(*[Mul(*(denoms[:i] + [numers[i]] + denoms[i + 1:]))
for i in range(len(numers))]), Mul(*denoms)
return _keep_coeff(ncon, n), _keep_coeff(dcon, d)
def _eval_is_polynomial(self, syms):
return all(term._eval_is_polynomial(syms) for term in self.args)
def _eval_is_rational_function(self, syms):
return all(term._eval_is_rational_function(syms) for term in self.args)
def _eval_is_algebraic_expr(self, syms):
return all(term._eval_is_algebraic_expr(syms) for term in self.args)
# assumption methods
_eval_is_real = lambda self: _fuzzy_group(
(a.is_real for a in self.args), quick_exit=True)
_eval_is_complex = lambda self: _fuzzy_group(
(a.is_complex for a in self.args), quick_exit=True)
_eval_is_antihermitian = lambda self: _fuzzy_group(
(a.is_antihermitian for a in self.args), quick_exit=True)
_eval_is_finite = lambda self: _fuzzy_group(
(a.is_finite for a in self.args), quick_exit=True)
_eval_is_hermitian = lambda self: _fuzzy_group(
(a.is_hermitian for a in self.args), quick_exit=True)
_eval_is_integer = lambda self: _fuzzy_group(
(a.is_integer for a in self.args), quick_exit=True)
_eval_is_rational = lambda self: _fuzzy_group(
(a.is_rational for a in self.args), quick_exit=True)
_eval_is_algebraic = lambda self: _fuzzy_group(
(a.is_algebraic for a in self.args), quick_exit=True)
_eval_is_commutative = lambda self: _fuzzy_group(
a.is_commutative for a in self.args)
def _eval_is_imaginary(self):
nz = []
im_I = []
for a in self.args:
if a.is_real:
if a.is_zero:
pass
elif a.is_zero is False:
nz.append(a)
else:
return
elif a.is_imaginary:
im_I.append(a*S.ImaginaryUnit)
elif (S.ImaginaryUnit*a).is_real:
im_I.append(a*S.ImaginaryUnit)
else:
return
if self.func(*nz).is_zero:
return fuzzy_not(self.func(*im_I).is_zero)
elif self.func(*nz).is_zero is False:
return False
def _eval_is_zero(self):
nz = []
z = 0
im_or_z = False
im = False
for a in self.args:
if a.is_real:
if a.is_zero:
z += 1
elif a.is_zero is False:
nz.append(a)
else:
return
elif a.is_imaginary:
im = True
elif (S.ImaginaryUnit*a).is_real:
im_or_z = True
else:
return
if z == len(self.args):
return True
if self.func(*nz).is_zero:
if not im_or_z and not im:
return True
if im and not im_or_z:
return False
if self.func(*nz).is_zero is False:
return False
def _eval_is_odd(self):
l = [f for f in self.args if not (f.is_even is True)]
if not l:
return False
if l[0].is_odd:
return self._new_rawargs(*l[1:]).is_even
def _eval_is_irrational(self):
for t in self.args:
a = t.is_irrational
if a:
others = list(self.args)
others.remove(t)
if all(x.is_rational is True for x in others):
return True
return None
if a is None:
return
return False
def _eval_is_positive(self):
from sympy.core.exprtools import _monotonic_sign
if self.is_number:
return super(Add, self)._eval_is_positive()
c, a = self.as_coeff_Add()
if not c.is_zero:
v = _monotonic_sign(a)
if v is not None:
s = v + c
if s.is_positive and a.is_nonnegative:
return True
if len(self.free_symbols) == 1:
v = _monotonic_sign(self)
if v is not None and v.is_positive:
return True
pos = nonneg = nonpos = unknown_sign = False
saw_INF = set()
args = [a for a in self.args if not a.is_zero]
if not args:
return False
for a in args:
ispos = a.is_positive
infinite = a.is_infinite
if infinite:
saw_INF.add(fuzzy_or((ispos, a.is_nonnegative)))
if True in saw_INF and False in saw_INF:
return
if ispos:
pos = True
continue
elif a.is_nonnegative:
nonneg = True
continue
elif a.is_nonpositive:
nonpos = True
continue
if infinite is None:
return
unknown_sign = True
if saw_INF:
if len(saw_INF) > 1:
return
return saw_INF.pop()
elif unknown_sign:
return
elif not nonpos and not nonneg and pos:
return True
elif not nonpos and pos:
return True
elif not pos and not nonneg:
return False
def _eval_is_nonnegative(self):
from sympy.core.exprtools import _monotonic_sign
if not self.is_number:
c, a = self.as_coeff_Add()
if not c.is_zero and a.is_nonnegative:
v = _monotonic_sign(a)
if v is not None:
s = v + c
if s.is_nonnegative:
return True
if len(self.free_symbols) == 1:
v = _monotonic_sign(self)
if v is not None and v.is_nonnegative:
return True
def _eval_is_nonpositive(self):
from sympy.core.exprtools import _monotonic_sign
if not self.is_number:
c, a = self.as_coeff_Add()
if not c.is_zero and a.is_nonpositive:
v = _monotonic_sign(a)
if v is not None:
s = v + c
if s.is_nonpositive:
return True
if len(self.free_symbols) == 1:
v = _monotonic_sign(self)
if v is not None and v.is_nonpositive:
return True
def _eval_is_negative(self):
from sympy.core.exprtools import _monotonic_sign
if self.is_number:
return super(Add, self)._eval_is_negative()
c, a = self.as_coeff_Add()
if not c.is_zero:
v = _monotonic_sign(a)
if v is not None:
s = v + c
if s.is_negative and a.is_nonpositive:
return True
if len(self.free_symbols) == 1:
v = _monotonic_sign(self)
if v is not None and v.is_negative:
return True
neg = nonpos = nonneg = unknown_sign = False
saw_INF = set()
args = [a for a in self.args if not a.is_zero]
if not args:
return False
for a in args:
isneg = a.is_negative
infinite = a.is_infinite
if infinite:
saw_INF.add(fuzzy_or((isneg, a.is_nonpositive)))
if True in saw_INF and False in saw_INF:
return
if isneg:
neg = True
continue
elif a.is_nonpositive:
nonpos = True
continue
elif a.is_nonnegative:
nonneg = True
continue
if infinite is None:
return
unknown_sign = True
if saw_INF:
if len(saw_INF) > 1:
return
return saw_INF.pop()
elif unknown_sign:
return
elif not nonneg and not nonpos and neg:
return True
elif not nonneg and neg:
return True
elif not neg and not nonpos:
return False
def _eval_subs(self, old, new):
if not old.is_Add:
return None
coeff_self, terms_self = self.as_coeff_Add()
coeff_old, terms_old = old.as_coeff_Add()
if coeff_self.is_Rational and coeff_old.is_Rational:
if terms_self == terms_old: # (2 + a).subs( 3 + a, y) -> -1 + y
return self.func(new, coeff_self, -coeff_old)
if terms_self == -terms_old: # (2 + a).subs(-3 - a, y) -> -1 - y
return self.func(-new, coeff_self, coeff_old)
if coeff_self.is_Rational and coeff_old.is_Rational \
or coeff_self == coeff_old:
args_old, args_self = self.func.make_args(
terms_old), self.func.make_args(terms_self)
if len(args_old) < len(args_self): # (a+b+c).subs(b+c,x) -> a+x
self_set = set(args_self)
old_set = set(args_old)
if old_set < self_set:
ret_set = self_set - old_set
return self.func(new, coeff_self, -coeff_old,
*[s._subs(old, new) for s in ret_set])
args_old = self.func.make_args(
-terms_old) # (a+b+c+d).subs(-b-c,x) -> a-x+d
old_set = set(args_old)
if old_set < self_set:
ret_set = self_set - old_set
return self.func(-new, coeff_self, coeff_old,
*[s._subs(old, new) for s in ret_set])
def removeO(self):
args = [a for a in self.args if not a.is_Order]
return self._new_rawargs(*args)
def getO(self):
args = [a for a in self.args if a.is_Order]
if args:
return self._new_rawargs(*args)
@cacheit
def extract_leading_order(self, symbols, point=None):
"""
Returns the leading term and its order.
Examples
========
>>> from sympy.abc import x
>>> (x + 1 + 1/x**5).extract_leading_order(x)
((x**(-5), O(x**(-5))),)
>>> (1 + x).extract_leading_order(x)
((1, O(1)),)
>>> (x + x**2).extract_leading_order(x)
((x, O(x)),)
"""
from sympy import Order
lst = []
symbols = list(symbols if is_sequence(symbols) else [symbols])
if not point:
point = [0]*len(symbols)
seq = [(f, Order(f, *zip(symbols, point))) for f in self.args]
for ef, of in seq:
for e, o in lst:
if o.contains(of) and o != of:
of = None
break
if of is None:
continue
new_lst = [(ef, of)]
for e, o in lst:
if of.contains(o) and o != of:
continue
new_lst.append((e, o))
lst = new_lst
return tuple(lst)
def as_real_imag(self, deep=True, **hints):
"""
returns a tuple representing a complex number
Examples
========
>>> from sympy import I
>>> (7 + 9*I).as_real_imag()
(7, 9)
>>> ((1 + I)/(1 - I)).as_real_imag()
(0, 1)
>>> ((1 + 2*I)*(1 + 3*I)).as_real_imag()
(-5, 5)
"""
sargs, terms = self.args, []
re_part, im_part = [], []
for term in sargs:
re, im = term.as_real_imag(deep=deep)
re_part.append(re)
im_part.append(im)
return (self.func(*re_part), self.func(*im_part))
def _eval_as_leading_term(self, x):
from sympy import expand_mul, factor_terms
old = self
expr = expand_mul(self)
if not expr.is_Add:
return expr.as_leading_term(x)
infinite = [t for t in expr.args if t.is_infinite]
expr = expr.func(*[t.as_leading_term(x) for t in expr.args]).removeO()
if not expr:
# simple leading term analysis gave us 0 but we have to send
# back a term, so compute the leading term (via series)
return old.compute_leading_term(x)
elif expr is S.NaN:
return old.func._from_args(infinite)
elif not expr.is_Add:
return expr
else:
plain = expr.func(*[s for s, _ in expr.extract_leading_order(x)])
rv = factor_terms(plain, fraction=False)
rv_simplify = rv.simplify()
# if it simplifies to an x-free expression, return that;
# tests don't fail if we don't but it seems nicer to do this
if x not in rv_simplify.free_symbols:
if rv_simplify.is_zero and plain.is_zero is not True:
return (expr - plain)._eval_as_leading_term(x)
return rv_simplify
return rv
def _eval_adjoint(self):
return self.func(*[t.adjoint() for t in self.args])
def _eval_conjugate(self):
return self.func(*[t.conjugate() for t in self.args])
def _eval_transpose(self):
return self.func(*[t.transpose() for t in self.args])
def __neg__(self):
return self.func(*[-t for t in self.args])
def _sage_(self):
s = 0
for x in self.args:
s += x._sage_()
return s
def primitive(self):
"""
Return ``(R, self/R)`` where ``R``` is the Rational GCD of ``self```.
``R`` is collected only from the leading coefficient of each term.
Examples
========
>>> from sympy.abc import x, y
>>> (2*x + 4*y).primitive()
(2, x + 2*y)
>>> (2*x/3 + 4*y/9).primitive()
(2/9, 3*x + 2*y)
>>> (2*x/3 + 4.2*y).primitive()
(1/3, 2*x + 12.6*y)
No subprocessing of term factors is performed:
>>> ((2 + 2*x)*x + 2).primitive()
(1, x*(2*x + 2) + 2)
Recursive subprocessing can be done with the as_content_primitive()
method:
>>> ((2 + 2*x)*x + 2).as_content_primitive()
(2, x*(x + 1) + 1)
See also: primitive() function in polytools.py
"""
terms = []
inf = False
for a in self.args:
c, m = a.as_coeff_Mul()
if not c.is_Rational:
c = S.One
m = a
inf = inf or m is S.ComplexInfinity
terms.append((c.p, c.q, m))
if not inf:
ngcd = reduce(igcd, [t[0] for t in terms], 0)
dlcm = reduce(ilcm, [t[1] for t in terms], 1)
else:
ngcd = reduce(igcd, [t[0] for t in terms if t[1]], 0)
dlcm = reduce(ilcm, [t[1] for t in terms if t[1]], 1)
if ngcd == dlcm == 1:
return S.One, self
if not inf:
for i, (p, q, term) in enumerate(terms):
terms[i] = _keep_coeff(Rational((p//ngcd)*(dlcm//q)), term)
else:
for i, (p, q, term) in enumerate(terms):
if q:
terms[i] = _keep_coeff(Rational((p//ngcd)*(dlcm//q)), term)
else:
terms[i] = _keep_coeff(Rational(p, q), term)
# we don't need a complete re-flattening since no new terms will join
# so we just use the same sort as is used in Add.flatten. When the
# coefficient changes, the ordering of terms may change, e.g.
# (3*x, 6*y) -> (2*y, x)
#
# We do need to make sure that term[0] stays in position 0, however.
#
if terms[0].is_Number or terms[0] is S.ComplexInfinity:
c = terms.pop(0)
else:
c = None
_addsort(terms)
if c:
terms.insert(0, c)
return Rational(ngcd, dlcm), self._new_rawargs(*terms)
def as_content_primitive(self, radical=False):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self. If radical is True (default is False) then
common radicals will be removed and included as a factor of the
primitive expression.
Examples
========
>>> from sympy import sqrt
>>> (3 + 3*sqrt(2)).as_content_primitive()
(3, 1 + sqrt(2))
Radical content can also be factored out of the primitive:
>>> (2*sqrt(2) + 4*sqrt(10)).as_content_primitive(radical=True)
(2, sqrt(2)*(1 + 2*sqrt(5)))
See docstring of Expr.as_content_primitive for more examples.
"""
con, prim = self.func(*[_keep_coeff(*a.as_content_primitive(
radical=radical)) for a in self.args]).primitive()
if radical and prim.is_Add:
# look for common radicals that can be removed
args = prim.args
rads = []
common_q = None
for m in args:
term_rads = defaultdict(list)
for ai in Mul.make_args(m):
if ai.is_Pow:
b, e = ai.as_base_exp()
if e.is_Rational and b.is_Integer:
term_rads[e.q].append(abs(int(b))**e.p)
if not term_rads:
break
if common_q is None:
common_q = set(term_rads.keys())
else:
common_q = common_q & set(term_rads.keys())
if not common_q:
break
rads.append(term_rads)
else:
# process rads
# keep only those in common_q
for r in rads:
for q in list(r.keys()):
if q not in common_q:
r.pop(q)
for q in r:
r[q] = prod(r[q])
# find the gcd of bases for each q
G = []
for q in common_q:
g = reduce(igcd, [r[q] for r in rads], 0)
if g != 1:
G.append(g**Rational(1, q))
if G:
G = Mul(*G)
args = [ai/G for ai in args]
prim = G*prim.func(*args)
return con, prim
@property
def _sorted_args(self):
from sympy.core.compatibility import default_sort_key
return tuple(sorted(self.args, key=lambda w: default_sort_key(w)))
def _eval_difference_delta(self, n, step):
from sympy.series.limitseq import difference_delta as dd
return self.func(*[dd(a, n, step) for a in self.args])
from .mul import Mul, _keep_coeff, prod
from sympy.core.numbers import Rational
|
{
"content_hash": "bc350d292127e607bcb78588da5ef47a",
"timestamp": "",
"source": "github",
"line_count": 987,
"max_line_length": 79,
"avg_line_length": 33.13373860182371,
"alnum_prop": 0.4821270219857505,
"repo_name": "Arafatk/sympy",
"id": "a1fe26f05d444239f8c491eaedc3d26e1d239941",
"size": "32703",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sympy/core/add.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14201485"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "5199"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, unicode_literals
from time import time
from raspiutil.sensors import LPS331
NAME = "pressure.lps331"
if __name__ == '__main__':
lps = LPS331(1, 0x5d)
try:
lps.open()
# Print current pressure
print("{0}\t{1:.2f}\t{2:.0f}".format(NAME, lps.pressure, time()))
finally:
lps.close()
|
{
"content_hash": "d70a71b79f324e4a8e7aec4bdf972278",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 73,
"avg_line_length": 22.8125,
"alnum_prop": 0.6027397260273972,
"repo_name": "ymyzk/mackerel-plugins",
"id": "948d499188ed156d1ab43d2d5a5780453b03f02c",
"size": "412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lps331/lps331_p.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2170"
},
{
"name": "Shell",
"bytes": "2553"
}
],
"symlink_target": ""
}
|
""" Package to be compiled. """
def runtest():
import nose
nose.run("package", "package")
|
{
"content_hash": "207382dcdc5b3d7963ff9750683a0397",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 34,
"avg_line_length": 19.8,
"alnum_prop": 0.6060606060606061,
"repo_name": "kayhayen/Nuitka",
"id": "a7a379c3c279ea28374b1adf9113750f01ddaaa3",
"size": "994",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/test-runners/subject/package/sub_package1/tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1868"
},
{
"name": "C",
"bytes": "617681"
},
{
"name": "C++",
"bytes": "149777"
},
{
"name": "Python",
"bytes": "6603718"
},
{
"name": "Shell",
"bytes": "1088"
}
],
"symlink_target": ""
}
|
from itertools import groupby
from datetime import datetime, timedelta
import re
from dexter.app import app
from flask import request, make_response, jsonify, session
from flask.ext.mako import render_template
from flask.ext.security import roles_accepted, current_user, login_required
from sqlalchemy.sql import func, distinct, or_, desc
from sqlalchemy.orm import joinedload, lazyload
from sqlalchemy_fulltext import FullTextSearch
import sqlalchemy_fulltext.modes as FullTextMode
from dexter.models import * # noqa
from dexter.models.document import DocumentAnalysisProblem, DocumentTag
from dexter.models.user import default_country_id
from wtforms import validators, HiddenField, TextField, SelectMultipleField, BooleanField
from .forms import Form, SelectField, MultiCheckboxField, RadioField
from .analysis import SourceAnalyser, TopicAnalyser, XLSXExportBuilder, ChildrenRatingExport, MediaDiversityRatingExport
from utils import paginate
@app.route('/dashboard')
@login_required
@roles_accepted('monitor')
def dashboard():
latest_docs = [x.id for x in Document.query.order_by(Document.created_at.desc()).limit(30)]
latest_docs = Document.query\
.options(
joinedload('created_by'),
joinedload('sources'),
joinedload('topic'),
joinedload('medium'),
)\
.filter(Document.id.in_(latest_docs))\
.order_by(Document.created_at.desc())
doc_groups = []
for date, group in groupby(latest_docs, lambda d: d.created_at.date()):
doc_groups.append([date, list(group)])
return render_template('dashboard/dashboard.haml',
doc_groups=doc_groups)
@app.route('/monitor-dashboard')
@login_required
@roles_accepted('monitor')
def monitor_dashboard():
docs = [x.id for x in Document.query
.filter(or_(
Document.created_by_user_id == current_user.id,
Document.checked_by_user_id == current_user.id
))
.order_by(Document.created_at.desc()).limit(30)]
docs = Document.query\
.options(
joinedload('created_by'),
joinedload('sources'),
joinedload('topic'),
joinedload('medium'),
)\
.filter(Document.id.in_(docs))\
.order_by(Document.created_at.desc())
doc_groups = []
for date, group in groupby(docs, lambda d: d.created_at.date()):
doc_groups.append([date, list(group)])
return render_template('dashboard/monitor.haml',
doc_groups=doc_groups)
@app.route('/activity')
@login_required
@roles_accepted('monitor')
def activity():
per_page = 100
form = ActivityForm(request.args)
try:
page = int(request.args.get('page', 1))
except ValueError:
page = 1
if form.format.data == 'chart-json':
# chart data in json format
return jsonify(ActivityChartHelper(form).chart_data())
elif form.format.data == 'places-json':
# places in json format
query = Document.query.options(joinedload('places').joinedload('place'))
query = form.filter_query(query)
return jsonify(DocumentPlace.summary_for_docs(query.all()))
elif form.format.data == 'xlsx' and current_user.admin:
# excel spreadsheet
excel = XLSXExportBuilder(form).build()
response = make_response(excel)
response.headers["Content-Disposition"] = "attachment; filename=%s" % form.filename()
response.headers["Content-Type"] = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
return response
elif form.format.data == 'children-ratings.xlsx' and current_user.admin:
# excel spreadsheet
excel = ChildrenRatingExport(form.document_ids()).build()
response = make_response(excel)
response.headers["Content-Disposition"] = "attachment; filename=%s" % form.filename()
response.headers["Content-Type"] = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
return response
elif form.format.data == 'media-diversity-ratings.xlsx' and current_user.admin:
# excel spreadsheet
excel = MediaDiversityRatingExport(form.document_ids()).build()
response = make_response(excel)
response.headers["Content-Disposition"] = "attachment; filename=%s" % form.filename()
response.headers["Content-Type"] = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
return response
# setup pagination for doc ids
query = db.session.query(Document.id).order_by(Document.created_at.desc())
query = form.filter_query(query)
all_doc_ids = [str(d[0]) for d in query]
pagination = paginate(query, page, per_page)
doc_ids = [t.id for t in pagination.items]
# get documents
docs = Document.query\
.options(joinedload(Document.created_by),
joinedload(Document.medium),
joinedload(Document.sources).lazyload('*'),
lazyload(Document.raw_tags),
)\
.filter(Document.id.in_(doc_ids))\
.order_by(Document.created_at.desc())\
.all()
# group by date added
doc_groups = []
for date, group in groupby(docs, lambda d: d.created_at.date()):
doc_groups.append([date, list(group)])
# tags
tag_summary = db.session\
.query(DocumentTag.tag, func.count(1).label('count'))\
.filter(DocumentTag.doc_id.in_(doc_ids))\
.group_by(DocumentTag.tag)\
.order_by(desc('count'), DocumentTag.tag)\
.all()
try:
session[str(current_user.id)]['search'] = request.url
except:
session[str(current_user.id)] = {'search': []}
session[str(current_user.id)]['search'] = request.url
return render_template('dashboard/activity.haml',
form=form,
pagination=pagination,
doc_groups=doc_groups,
tag_summary=tag_summary,
all_doc_ids=all_doc_ids)
@app.route('/activity/map')
@login_required
@roles_accepted('monitor')
def activity_map():
form = ActivityForm(request.args)
if form.format.data == 'places-json':
# places in json format
query = Document.query.options(joinedload('places').joinedload('place'))
query = form.filter_query(query)
return jsonify(DocumentPlace.summary_for_docs(query.all()))
return render_template('dashboard/map.haml',
form=form)
@app.route('/activity/sources')
@login_required
@roles_accepted('monitor')
def activity_sources():
form = ActivityForm(request.args)
sa = SourceAnalyser(doc_ids=form.document_ids())
sa.analyse()
sa.load_utterances()
# problem sources
problem_people = sa.find_problem_people()
problem_people.sort(key=lambda p: -sa.analysed_people[p.id].source_counts_total)
return render_template('dashboard/sources.haml',
form=form,
problem_people=problem_people,
source_analyser=sa)
@app.route('/activity/mentions')
@login_required
@roles_accepted('monitor')
def activity_mentions():
form = ActivityForm(request.args)
ta = TopicAnalyser(doc_ids=form.document_ids())
ta.find_top_people()
return render_template('dashboard/mentions.haml',
form=form,
topic_analyser=ta)
@app.route('/activity/topics')
@login_required
@roles_accepted('monitor')
def activity_topics():
# topics take a while to build, so this
# just returns a shell view which calls back for
# the actual HTML via ajax, served by activity_topics_detail
form = ActivityForm(request.args)
return render_template('dashboard/topics.haml',
form=form)
@app.route('/activity/topics/detail')
@login_required
@roles_accepted('monitor')
def activity_topics_detail():
form = ActivityForm(request.args)
ta = TopicAnalyser(doc_ids=form.document_ids())
ta.find_topics()
ta.save()
db.session.commit()
return render_template('dashboard/topics_detail.haml',
topic_analyser=ta)
@app.route('/activity/taxonomies')
@login_required
@roles_accepted('monitor')
def activity_taxonomies():
form = ActivityForm(request.args)
taxonomies = DocumentTaxonomy.summary_for_docs(form.document_ids())
return render_template('dashboard/taxonomies.haml',
taxonomies=taxonomies,
form=form)
class ActivityForm(Form):
cluster_id = HiddenField('Cluster')
analysis_nature_id = SelectField('Analysis', default=AnalysisNature.ANCHOR_ID)
user_id = SelectField('User', [validators.Optional()], default='')
medium_id = SelectMultipleField('Medium', [validators.Optional()], default='')
country_id = SelectMultipleField('Country', [validators.Optional()], default=default_country_id)
created_at = TextField('Added', [validators.Optional()])
published_at = TextField('Published', [validators.Optional()])
problems = MultiCheckboxField('Article problems', [validators.Optional()], choices=DocumentAnalysisProblem.for_select())
flagged = BooleanField('flagged')
has_url = RadioField('hasurl', [validators.Optional()], choices=[('1', 'with URL'), ('0', 'without URL')])
source_person_id = TextField('With source', [validators.Optional()])
format = HiddenField('format', default='html')
# free text search
q = TextField('Keyword search', [validators.Optional()])
tags = TextField('Tags', [validators.Optional()])
def __init__(self, *args, **kwargs):
super(ActivityForm, self).__init__(*args, **kwargs)
from .models.document import DocumentTag
self.user_id.choices = [['', '(any)'], ['-', '(none)']] + [
[str(u.id), u.short_name()] for u in sorted(User.query.all(), key=lambda u: u.short_name())]
self.medium_id.choices = [(str(m.id), m.name) for m in Medium.query.order_by(Medium.name).all()]
self.analysis_nature_id.choices = [[str(n.id), n.name] for n in AnalysisNature.all()]
self.natures = AnalysisNature.all()
self.tags.choices = [t[0] for t in db.session.query(DocumentTag.tag.distinct()).order_by(DocumentTag.tag)]
# only admins can see all countries
if current_user.admin:
countries = Country.all()
else:
countries = [current_user.country]
self.country_id.choices = [[str(c.id), c.name] for c in countries]
# override the analysis nature id if we have a cluster
if self.cluster_id.data:
self.analysis_nature_id.data = str(self.cluster().members[0].document.analysis_nature_id)
# at least one of these must be set
oneof = [self.created_at, self.published_at, self.user_id, self.medium_id, self.cluster_id]
if not any(x.data for x in oneof):
self.published_at.data = ' - '.join(d.strftime("%Y/%m/%d") for d in [datetime.utcnow() - timedelta(days=14), datetime.utcnow()])
def user(self):
if self.user_id.data and self.user_id.data != '-':
return User.query.get(self.user_id.data)
return None
def media(self):
if self.medium_id.data:
return Medium.query.filter(Medium.id.in_(self.medium_id.data))
else:
return None
def countries(self):
if self.country_id.data:
return Country.query.filter(Country.id.in_(self.country_id.data))
return None
def analysis_nature(self):
if self.analysis_nature_id.data:
return AnalysisNature.query.get(self.analysis_nature_id.data)
return None
def cluster(self):
if self.cluster_id.data:
return Cluster.query.get(self.cluster_id.data)
return None
def source_person(self):
if self.source_person_id.data:
return Person.query.get(self.source_person_id.data)
return None
def get_problems(self):
return [DocumentAnalysisProblem.lookup(code) for code in self.problems.data]
@property
def created_from(self):
if self.created_at.data:
return self.created_at.data.split(' - ')[0].strip()
else:
return None
@property
def created_to(self):
if self.created_at.data and ' - ' in self.created_at.data:
return self.created_at.data.split(' - ')[1].strip() + ' 23:59:59'
else:
return self.created_from
@property
def published_from(self):
if self.published_at.data:
return self.published_at.data.split(' - ')[0].strip()
else:
return None
@property
def published_to(self):
if self.published_at.data and ' - ' in self.published_at.data:
return self.published_at.data.split(' - ')[1].strip() + ' 23:59:59'
else:
return self.published_from
def document_ids(self):
return [d[0] for d in self.filter_query(db.session.query(Document.id)).all()]
def filter_query(self, query):
query = query.filter(Document.analysis_nature_id == self.analysis_nature_id.data)
if self.cluster_id.data:
query = query.join(ClusteredDocument)\
.filter(ClusteredDocument.cluster_id == self.cluster_id.data)
if self.medium_id.data:
query = query.filter(Document.medium_id.in_(self.medium_id.data))
if self.user_id.data:
if self.user_id.data == '-':
query = query.filter(or_(
Document.created_by_user_id == None, # noqa
Document.checked_by_user_id == None))
else:
query = query.filter(or_(
Document.created_by_user_id == self.user_id.data,
Document.checked_by_user_id == self.user_id.data))
if self.country_id.data:
query = query.filter(Document.country_id.in_(self.country_id.data))
if self.created_from:
query = query.filter(Document.created_at >= self.created_from)
if self.created_to:
query = query.filter(Document.created_at <= self.created_to)
if self.published_from:
query = query.filter(Document.published_at >= self.published_from)
if self.published_to:
query = query.filter(Document.published_at <= self.published_to)
if self.source_person_id.data:
query = query\
.join(DocumentSource)\
.filter(DocumentSource.person_id == self.source_person_id.data)
if self.problems.data:
for code in self.problems.data:
query = DocumentAnalysisProblem.lookup(code).filter_query(query)
if self.flagged.data:
query = query.filter(Document.flagged == True) # noqa
if self.has_url.data == '1':
query = query.filter(Document.url != None, Document.url != '') # noqa
elif self.has_url.data == '0':
query = query.filter(or_(Document.url == None, Document.url == '')) # noqa
if self.q.data:
# full text search
query = query.filter(FullTextSearch(self.q.data, Document, FullTextMode.NATURAL))
if self.tags.data:
tags = set(f for f in re.split('\s*,\s*', self.tags.data) if f)
for tag in tags:
query = query.filter(Document.tags.contains(tag))
return query
def filename(self):
filename = ['documents']
if self.created_at.data:
filename.append('added')
filename.append(self.created_at.data.replace(' ', ''))
if self.published_at.data:
filename.append('published')
filename.append(self.published_at.data.replace(' ', ''))
if self.format.data == 'children-ratings.xlsx':
filename.insert(0, 'children-ratings')
ext = 'xlsx'
elif self.format.data == 'media-diversity-ratings.xlsx':
filename.insert(0, 'media-diversity-ratings')
ext = 'xlsx'
else:
ext = self.format.data
return "%s.%s" % ('-'.join(filename), ext)
class ActivityChartHelper:
def __init__(self, form):
self.form = form
# we use these to filter our queries, rather than trying to pull
# complex filter logic into our view queries
self.doc_ids = [d[0] for d in form.filter_query(db.session.query(Document.id)).all()]
def chart_data(self):
return {
'charts': {
'created': self.created_chart(),
'published': self.published_chart(),
'users': self.users_chart(),
'countries': self.countries_chart(),
'media': self.media_chart(),
'problems': self.problems_chart(),
'fairness': self.fairness_chart(),
'markers': self.markers_chart(),
},
'summary': {
'documents': len(self.doc_ids)
}
}
def created_chart(self):
query = db.session.query(
func.date_format(Document.created_at, '%Y/%m/%d').label('t'),
func.count(Document.id),
).group_by('t')
return {
'values': dict(self.filter(query).all())
}
def published_chart(self):
query = db.session.query(
func.date_format(Document.published_at, '%Y/%m/%d').label('t'),
func.count(Document.id),
).group_by('t')
return {
'values': dict(self.filter(query).all())
}
def users_chart(self):
query = db.session.query(
func.ifnull(Document.checked_by_user_id, Document.created_by_user_id),
func.count(Document.id),
).group_by(Document.created_by_user_id)
rows = self.filter(query).all()
users = dict((u.id, u.short_name()) for u in User.query.filter(User.id.in_(r[0] for r in rows)))
return {
'values': dict((users.get(r[0], 'None'), r[1]) for r in rows)
}
def countries_chart(self):
query = db.session.query(
Document.country_id,
func.count(Document.id),
).group_by(Document.country_id)
rows = self.filter(query).all()
countries = dict((c.id, c.name) for c in Country.query.filter(Country.id.in_(r[0] for r in rows)))
return {
'values': dict((countries.get(r[0], 'None'), r[1]) for r in rows)
}
def fairness_chart(self):
query = db.session.query(
Fairness.name.label('t'),
func.count(distinct(DocumentFairness.doc_id))
)\
.join(DocumentFairness)\
.join(Document, DocumentFairness.doc_id == Document.id)\
.group_by('t')
rows = self.filter(query).all()
counts = dict(rows)
counts.setdefault('Fair', 0)
# missing documents are considered fair
counts['Fair'] += len(self.doc_ids) - sum(counts.itervalues())
return {
'values': counts
}
def media_chart(self):
query = db.session.query(Medium.name, func.count(Document.id))\
.join(Document)\
.group_by(Medium.name)
rows = self.filter(query).all()
return {
'values': dict(rows),
'types': dict([m.name, m.medium_type] for m in Medium.query.all())
}
def problems_chart(self):
counts = {}
for p in DocumentAnalysisProblem.all():
query = db.session.query(func.count(distinct(Document.id)))
query = self.filter(p.filter_query(query))
counts[p.short_desc] = query.scalar()
return {
'values': counts
}
def markers_chart(self):
counts = {}
# flagged
query = self.filter(
db.session.query(func.count(Document.id))
.filter(Document.flagged == True)) # noqa
counts['flagged'] = query.scalar()
# with URL
query = self.filter(
db.session.query(func.count(Document.id))
.filter(Document.url != None, Document.url != '')) # noqa
counts['with-url'] = query.scalar()
# without URL
query = self.filter(
db.session.query(func.count(Document.id))
.filter(or_(Document.url == None, Document.url == ''))) # noqa
counts['without-url'] = query.scalar()
# average people sources per document
subq = self.filter(
db.session
.query(func.count(DocumentSource.doc_id).label('count'))
.join(Document, DocumentSource.doc_id == Document.id)
.filter(DocumentSource.quoted == 1)
.group_by(DocumentSource.doc_id))\
.subquery('cnt')
n = float(db.session
.query(func.avg(subq.c.count))
.select_from(subq)
.scalar() or 0)
counts['average-sources-per-document'] = round(n, 2)
return {
'values': counts
}
def filter(self, query):
return query.filter(Document.id.in_(self.doc_ids))
|
{
"content_hash": "4fe7e1324508656ac138349575968f76",
"timestamp": "",
"source": "github",
"line_count": 616,
"max_line_length": 140,
"avg_line_length": 34.90422077922078,
"alnum_prop": 0.5949025626715037,
"repo_name": "Code4SA/mma-dexter",
"id": "494d9e043574d6b3eb7de522630e52cd5285dfec",
"size": "21501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dexter/dashboard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "74553"
},
{
"name": "HTML",
"bytes": "4956"
},
{
"name": "Haml",
"bytes": "109002"
},
{
"name": "JavaScript",
"bytes": "134851"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Procfile",
"bytes": "192"
},
{
"name": "Python",
"bytes": "1949697"
},
{
"name": "SCSS",
"bytes": "12471"
}
],
"symlink_target": ""
}
|
import datetime
from sheets import Registrant, Registrations
DATE_FORMAT = "%m/%d/%Y"
def test_rows_returns_registrants(registrations: Registrations, test_registrants):
"""rows() should return a list of Registrant objects"""
all_registrations = registrations.rows()
assert isinstance(all_registrations, list)
assert len(all_registrations) == len(test_registrants)
registrant = all_registrations[0]
expected = test_registrants[0]
assert registrant == expected
def test_is_registered_returns_true_for_existing_registrants(
registrations: Registrations, test_registrants
):
"""is_registered(registrant) should return True for already registered users"""
registrant = test_registrants[0]
existing_registrant = Registrant(
user_email=registrant.user_email,
hackathon_name=registrant.hackathon_name,
date_registered=registrant.date_registered,
attended=bool(registrant.attended),
)
assert registrations.is_registered(existing_registrant)
def test_is_registered_returns_false_for_new_registrants(
registrations: Registrations, test_registrants
):
"""is_registered(registrant) should return False for already registered users"""
new_registrant = Registrant(
user_email="newregistrant@newompany.com",
hackathon_name="brand_new_hackathon",
date_registered=datetime.date.today(),
attended=None,
)
assert not registrations.is_registered(new_registrant)
def test_register(registrations: Registrations):
"""register() should append new registrants to registrations sheets"""
new_registrant = Registrant(
user_email="newregistrant@newompany.com",
hackathon_name="brand_new_hackathon",
date_registered=datetime.datetime.now(),
attended=None,
)
assert not registrations.is_registered(new_registrant)
registrations.register(new_registrant)
assert registrations.is_registered(new_registrant)
|
{
"content_hash": "0a3bf52d997506da6d2faf4bd1c5a0ac",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 84,
"avg_line_length": 35.285714285714285,
"alnum_prop": 0.7272267206477733,
"repo_name": "looker/sdk-examples",
"id": "9b3ffa750c6f8b6d83e7ad37b32364b3ab39dcde",
"size": "1976",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/hackathon_app/tests/integration/test_registrations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "672"
},
{
"name": "Dockerfile",
"bytes": "290"
},
{
"name": "HTML",
"bytes": "1843"
},
{
"name": "Python",
"bytes": "65243"
},
{
"name": "Ruby",
"bytes": "22550"
},
{
"name": "Shell",
"bytes": "1118"
},
{
"name": "Swift",
"bytes": "538353"
},
{
"name": "TypeScript",
"bytes": "25093"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
from stve.log import LOG as L
from stve.script import StveTestCase
class TestCase(StveTestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
L.info("*** Start TestCase : %s *** " % __file__)
def test(self):
self.assertTrue("stve.picture" in self.service.keys())
@classmethod
def tearDownClass(cls):
L.info("*** End TestCase : %s *** " % __file__)
|
{
"content_hash": "f68436ad15549cc9810b055593b1f109",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 62,
"avg_line_length": 23.772727272727273,
"alnum_prop": 0.5984703632887189,
"repo_name": "TE-ToshiakiTanaka/stve",
"id": "b21f63395d1f1396273e7d1899896c9118e17b14",
"size": "523",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/data/testcase/picture/picture_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "307"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "167886"
},
{
"name": "Shell",
"bytes": "2225"
}
],
"symlink_target": ""
}
|
from pychron.hardware.core.communicators.communicator import Communicator
from pychron.hardware.core.communicators.visa import resource_manager
class GpibCommunicator(Communicator):
"""
uses PyVisa as main interface to GPIB. currently (8/27/14) need to use a 32bit python version.
The NI488.2 framework does not work with a 64bit distribution
"""
primary_address = 0
secondary_address = 0
def open(self, *args, **kw):
self.debug('openning gpib communicator')
self.handle = resource_manager.get_instrument('GPIB{}::{}::INSTR'.format(self.primary_address,
self.secondary_address))
if self.handle is not None:
self.simulation = False
return True
def load(self, config, path, **kw):
self.set_attribute(config, 'primary_address', 'Communications', 'primary_address')
self.set_attribute(config, 'secondary_address', 'Communications', 'secondary_address', optional=False)
return True
def trigger(self):
self.handle.trigger()
def ask(self, cmd):
return self.handle.ask(cmd)
def tell(self, cmd):
self.handle.write(cmd)
# address = 16
#
# def load(self, config, path):
# return True
#
# def open(self, *args, **kw):
# try:
# self.handle = cdll.LoadLibrary(NI_PATH)
# except:
# return False
#
# self.dev_handle = self.handle.ibdev(0, self.address, 0, 4, 1, 0)
# return True
# # print self.dev_handle
# # if self.dev_handle < 0:
# # self.simulation = True
# # else:
# # self.simulation = False
# #
# #
# # print self.simulation, 'fff'
# # return not self.simulation
#
# def ask(self, cmd, verbose=True, *args, **kw):
# # self.handle.ibask(self.dev_handle)
# if self.handle is None:
# if verbose:
# self.info('no handle {}'.format(cmd.strip()))
# return
#
# self._lock.acquire()
# r = ''
# retries = 5
# i = 0
# while len(r) == 0 and i < retries:
# self._write(cmd)
# time.sleep(0.05)
# r = self._read()
#
# i += 1
#
# if verbose:
# self.log_response(cmd, r)
#
# self.handle.ibclr(self.dev_handle)
# self._lock.release()
#
# return r
#
# def tell(self, *args, **kw):
# self.write(*args, **kw)
#
# def write(self, cmd, verbose=True, *args, **kw):
#
# self._write(cmd, *args, **kw)
# if verbose:
# self.info(cmd)
#
# def _write(self, cmd, *args, **kw):
# if self.simulation:
# pass
# else:
# cmd += self._terminator
# self.handle.ibwrt(self.dev_handle, cmd, len(cmd))
#
# def _read(self):
# if self.simulation:
# pass
# else:
# b = create_string_buffer('\0' * 4096)
# retries = 10
# i = 0
# while len(b.value) == 0 and i <= retries:
# self.handle.ibrd(self.dev_handle, b, 4096)
# i += 1
# return b.value.strip()
#
# if __name__ == '__main__':
# g = GPIBCommunicator()
# g.open()
#
# print g.tell('1HX')
# # print g.ask('2TP?')
# ============= EOF ====================================
|
{
"content_hash": "94a8decbf7f747709705d6b6cc96e049",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 110,
"avg_line_length": 28.916666666666668,
"alnum_prop": 0.5054755043227666,
"repo_name": "UManPychron/pychron",
"id": "c5235dc0c1704a9c832a794f731a5b072cb5401d",
"size": "4543",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pychron/hardware/core/communicators/gpib_communicator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "279"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "40346"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10234954"
},
{
"name": "Shell",
"bytes": "10753"
}
],
"symlink_target": ""
}
|
import hmac
import json
import time
from typing import TYPE_CHECKING
import thor
from thor.http import get_header
from redbot.formatter import slack
from redbot.resource import HttpResource
from redbot.resource.fetch import RedHttpClient
from redbot.webui.ratelimit import ratelimiter
from redbot.webui.saved_tests import init_save_file, save_test
if TYPE_CHECKING:
from redbot.webui import RedWebUi # pylint: disable=cyclic-import,unused-import
def slack_run(webui: "RedWebUi") -> None:
"""Handle a slack request."""
webui.test_uri = webui.body_args.get("text", [""])[0].strip()
webui.test_id = init_save_file(webui)
slack_response_uri = webui.body_args.get("response_url", [""])[0].strip()
formatter = slack.SlackFormatter(
webui.config,
None,
webui.output,
slack_uri=slack_response_uri,
test_id=webui.test_id,
)
webui.exchange.response_start(
b"200",
b"OK",
[
(b"Content-Type", formatter.content_type()),
(b"Cache-Control", b"max-age=300"),
],
)
# enforce rate limits
try:
ratelimiter.process_slack(webui)
except ValueError as msg:
webui.output(
json.dumps(
{
"response_type": "ephemeral",
"text": str(msg),
}
)
)
webui.exchange.response_done([])
return # over limit, don't continue.
webui.output(
json.dumps(
{
"response_type": "ephemeral",
"text": f"_Checking_ {webui.test_uri} _..._",
}
)
)
webui.exchange.response_done([])
top_resource = HttpResource(webui.config)
top_resource.set_request(webui.test_uri, req_hdrs=webui.req_hdrs)
formatter.bind_resource(top_resource)
if not verify_slack_secret(webui):
webui.error_response(
formatter,
b"403",
b"Forbidden",
"Incorrect Slack Authentication.",
"Bad slack token.",
)
return
webui.timeout = thor.schedule(int(webui.config["max_runtime"]), formatter.timeout)
@thor.events.on(formatter)
def formatter_done() -> None:
if webui.timeout:
webui.timeout.delete()
webui.timeout = None
save_test(webui, top_resource)
top_resource.check()
def verify_slack_secret(webui: "RedWebUi") -> bool:
"""Verify the slack secret."""
slack_signing_secret = webui.config.get("slack_signing_secret", fallback="").encode(
"utf-8"
)
timestamps = get_header(webui.req_headers, b"x-slack-request-timestamp")
if not timestamps or not timestamps[0].isdigit():
return False
timestamp = timestamps[0]
if abs(time.time() - int(timestamp)) > 60 * 5:
return False
sig_basestring = b"v0:" + timestamp + b":" + webui.req_body
signature = (
f"v0={hmac.new(slack_signing_secret, sig_basestring, 'sha256').hexdigest()}"
)
presented_signature = get_header(webui.req_headers, b"x-slack-signature")
if not presented_signature:
return False
presented_sig = presented_signature[0].decode("utf-8")
return hmac.compare_digest(signature, presented_sig)
def slack_auth(webui: "RedWebUi") -> None:
webui.error_log("Slack Auth Redirect received.")
args = [
("code", webui.query_string.get("code", [""])[0]),
("client_id", webui.config.get("slack_client_id", fallback="")),
("client_secret", webui.config.get("slack_client_secret", fallback="")),
]
payload = "&".join([f"{arg[0]}={arg[1]}" for arg in args])
client = RedHttpClient().exchange()
client.request_start(
b"POST",
b"https://slack.com/api/oauth.v2.access",
[(b"content-type", b"application/x-www-form-urlencoded")],
)
client.request_body(payload.encode("utf-8"))
client.request_done([])
webui.exchange.response_start(b"200", b"OK", [])
webui.output("Response sent to Slack; your app should be installed.")
webui.exchange.response_done([])
|
{
"content_hash": "47a4e2e4501079085fb77f5a895858ff",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 88,
"avg_line_length": 31.776923076923076,
"alnum_prop": 0.6025175502299686,
"repo_name": "mnot/redbot",
"id": "70bf9f1bcd0a294327d026716ffd281a9b0d802c",
"size": "4131",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "redbot/webui/slack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10019"
},
{
"name": "Dockerfile",
"bytes": "314"
},
{
"name": "HTML",
"bytes": "12590"
},
{
"name": "JavaScript",
"bytes": "13611"
},
{
"name": "Makefile",
"bytes": "4539"
},
{
"name": "Python",
"bytes": "379846"
},
{
"name": "SCSS",
"bytes": "10095"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/crafted/weapon/shared_quick_shot_upgrade_mk3.iff"
result.attribute_template_id = 8
result.stfName("space_crafting_n","quick_shot_upgrade_mk3")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "9a1daf45b3fe829dd48b76d4ce919002",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 90,
"avg_line_length": 26.076923076923077,
"alnum_prop": 0.7109144542772862,
"repo_name": "anhstudios/swganh",
"id": "d8a3b636b338589907e9eaa11a2a555f7f930249",
"size": "484",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/ship/crafted/weapon/shared_quick_shot_upgrade_mk3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import os
import shutil
import tempfile
import pytest
from . import helpers
from . import run
from . import refactor
def pytest_addoption(parser):
parser.addoption(
"--integration-case-dir",
default=os.path.join(helpers.test_dir, 'completion'),
help="Directory in which integration test case files locate.")
parser.addoption(
"--refactor-case-dir",
default=os.path.join(helpers.test_dir, 'refactor'),
help="Directory in which refactoring test case files locate.")
parser.addoption(
"--test-files", "-T", default=[], action='append',
help=(
"Specify test files using FILE_NAME[:LINE[,LINE[,...]]]. "
"For example: -T generators.py:10,13,19. "
"Note that you can use -m to specify the test case by id."))
parser.addoption(
"--thirdparty", action='store_true',
help="Include integration tests that requires third party modules.")
def parse_test_files_option(opt):
"""
Parse option passed to --test-files into a key-value pair.
>>> parse_test_files_option('generators.py:10,13,19')
('generators.py', [10, 13, 19])
"""
opt = str(opt)
if ':' in opt:
(f_name, rest) = opt.split(':', 1)
return (f_name, list(map(int, rest.split(','))))
else:
return (opt, [])
def pytest_generate_tests(metafunc):
"""
:type metafunc: _pytest.python.Metafunc
"""
test_files = dict(map(parse_test_files_option,
metafunc.config.option.test_files))
if 'case' in metafunc.fixturenames:
base_dir = metafunc.config.option.integration_case_dir
thirdparty = metafunc.config.option.thirdparty
cases = list(run.collect_dir_tests(base_dir, test_files))
if thirdparty:
cases.extend(run.collect_dir_tests(
os.path.join(base_dir, 'thirdparty'), test_files, True))
metafunc.parametrize('case', cases)
if 'refactor_case' in metafunc.fixturenames:
base_dir = metafunc.config.option.refactor_case_dir
metafunc.parametrize(
'refactor_case',
refactor.collect_dir_tests(base_dir, test_files))
@pytest.fixture()
def isolated_jedi_cache(monkeypatch, tmpdir):
"""
Set `jedi.settings.cache_directory` to a temporary directory during test.
Same as `clean_jedi_cache`, but create the temporary directory for
each test case (scope='function').
"""
from jedi import settings
monkeypatch.setattr(settings, 'cache_directory', str(tmpdir))
@pytest.fixture(scope='session')
def clean_jedi_cache(request):
"""
Set `jedi.settings.cache_directory` to a temporary directory during test.
Note that you can't use built-in `tmpdir` and `monkeypatch`
fixture here because their scope is 'function', which is not used
in 'session' scope fixture.
This fixture is activated in ../pytest.ini.
"""
from jedi import settings
old = settings.cache_directory
tmp = tempfile.mkdtemp(prefix='jedi-test-')
settings.cache_directory = tmp
@request.addfinalizer
def restore():
settings.cache_directory = old
shutil.rmtree(tmp)
|
{
"content_hash": "407bd03557cc8b10a101bbf59dda1fce",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 77,
"avg_line_length": 32.35353535353536,
"alnum_prop": 0.6397127692788012,
"repo_name": "stevenbaker/dotfiles",
"id": "df0ecd1d217e4948a619a8e1a16332cb9a2cecd3",
"size": "3203",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": ".vim/bundle/jedi-vim/jedi/test/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "976"
},
{
"name": "Python",
"bytes": "513983"
},
{
"name": "Shell",
"bytes": "1755"
},
{
"name": "VimL",
"bytes": "220619"
}
],
"symlink_target": ""
}
|
"""Support for tracking Tesla cars."""
import logging
from homeassistant.helpers.event import async_track_utc_time_change
from homeassistant.util import slugify
from . import DOMAIN as TESLA_DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Set up the Tesla tracker."""
tracker = TeslaDeviceTracker(
hass, config, async_see, hass.data[TESLA_DOMAIN]["devices"]["devices_tracker"]
)
await tracker.update_info()
async_track_utc_time_change(hass, tracker.update_info, second=range(0, 60, 30))
return True
class TeslaDeviceTracker:
"""A class representing a Tesla device."""
def __init__(self, hass, config, see, tesla_devices):
"""Initialize the Tesla device scanner."""
self.hass = hass
self.see = see
self.devices = tesla_devices
async def update_info(self, now=None):
"""Update the device info."""
for device in self.devices:
await device.async_update()
name = device.name
_LOGGER.debug("Updating device position: %s", name)
dev_id = slugify(device.uniq_name)
location = device.get_location()
if location:
lat = location["latitude"]
lon = location["longitude"]
attrs = {"trackr_id": dev_id, "id": dev_id, "name": name}
await self.see(
dev_id=dev_id, host_name=name, gps=(lat, lon), attributes=attrs
)
|
{
"content_hash": "c95012dcb6a4de16fdeae55d2c0f9be8",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 86,
"avg_line_length": 34.4,
"alnum_prop": 0.6085271317829457,
"repo_name": "leppa/home-assistant",
"id": "c205cc587eba4e4b453249665926821fa7a68b7c",
"size": "1548",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/tesla/device_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18957740"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# URL Config
ROOT_URLCONF = 'urls'
# Database connections
# For MySQL
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'curo',
# 'USER': 'username',
# 'PASSWORD': 'password',
# 'HOST': 'hostname',
# 'PORT': 'port',
# }
#}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'curo.db',
}
}
# Installed applicatiosn
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.admindocs',
'tastypie',
'curo-api',
'curo-client'
)
FIXTURE_DIRS = (
'fixtures',
)
# Load local settings from local_settings.py
try:
from local_settings import *
except ImportError, exp:
pass
|
{
"content_hash": "d0a29add8c2b01804382632ea10f0801",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 47,
"avg_line_length": 18.520833333333332,
"alnum_prop": 0.5849268841394826,
"repo_name": "reggna/Curo",
"id": "001e4d47d352e3eaab0e87627bde90e498be26f2",
"size": "941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "958525"
},
{
"name": "Python",
"bytes": "6814"
}
],
"symlink_target": ""
}
|
"""Test different accessory types: Switches."""
from datetime import timedelta
import pytest
from homeassistant.components.homekit.const import (
ATTR_VALUE,
TYPE_FAUCET,
TYPE_SHOWER,
TYPE_SPRINKLER,
TYPE_VALVE,
)
from homeassistant.components.homekit.type_switches import Outlet, Switch, Valve
from homeassistant.components.script import ATTR_CAN_CANCEL
from homeassistant.const import ATTR_ENTITY_ID, CONF_TYPE, STATE_OFF, STATE_ON
from homeassistant.core import split_entity_id
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed, async_mock_service
async def test_outlet_set_state(hass, hk_driver, events):
"""Test if Outlet accessory and HA are updated accordingly."""
entity_id = "switch.outlet_test"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = Outlet(hass, hk_driver, "Outlet", entity_id, 2, None)
await hass.async_add_job(acc.run)
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 7 # Outlet
assert acc.char_on.value is False
assert acc.char_outlet_in_use.value is True
hass.states.async_set(entity_id, STATE_ON)
await hass.async_block_till_done()
assert acc.char_on.value is True
hass.states.async_set(entity_id, STATE_OFF)
await hass.async_block_till_done()
assert acc.char_on.value is False
# Set from HomeKit
call_turn_on = async_mock_service(hass, "switch", "turn_on")
call_turn_off = async_mock_service(hass, "switch", "turn_off")
await hass.async_add_job(acc.char_on.client_update_value, True)
await hass.async_block_till_done()
assert call_turn_on
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_job(acc.char_on.client_update_value, False)
await hass.async_block_till_done()
assert call_turn_off
assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] is None
@pytest.mark.parametrize(
"entity_id, attrs",
[
("automation.test", {}),
("input_boolean.test", {}),
("remote.test", {}),
("script.test", {ATTR_CAN_CANCEL: True}),
("switch.test", {}),
],
)
async def test_switch_set_state(hass, hk_driver, entity_id, attrs, events):
"""Test if accessory and HA are updated accordingly."""
domain = split_entity_id(entity_id)[0]
hass.states.async_set(entity_id, None, attrs)
await hass.async_block_till_done()
acc = Switch(hass, hk_driver, "Switch", entity_id, 2, None)
await hass.async_add_job(acc.run)
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 8 # Switch
assert acc.activate_only is False
assert acc.char_on.value is False
hass.states.async_set(entity_id, STATE_ON, attrs)
await hass.async_block_till_done()
assert acc.char_on.value is True
hass.states.async_set(entity_id, STATE_OFF, attrs)
await hass.async_block_till_done()
assert acc.char_on.value is False
# Set from HomeKit
call_turn_on = async_mock_service(hass, domain, "turn_on")
call_turn_off = async_mock_service(hass, domain, "turn_off")
await hass.async_add_job(acc.char_on.client_update_value, True)
await hass.async_block_till_done()
assert call_turn_on
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_job(acc.char_on.client_update_value, False)
await hass.async_block_till_done()
assert call_turn_off
assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] is None
async def test_valve_set_state(hass, hk_driver, events):
"""Test if Valve accessory and HA are updated accordingly."""
entity_id = "switch.valve_test"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = Valve(hass, hk_driver, "Valve", entity_id, 2, {CONF_TYPE: TYPE_FAUCET})
await hass.async_add_job(acc.run)
await hass.async_block_till_done()
assert acc.category == 29 # Faucet
assert acc.char_valve_type.value == 3 # Water faucet
acc = Valve(hass, hk_driver, "Valve", entity_id, 2, {CONF_TYPE: TYPE_SHOWER})
await hass.async_add_job(acc.run)
await hass.async_block_till_done()
assert acc.category == 30 # Shower
assert acc.char_valve_type.value == 2 # Shower head
acc = Valve(hass, hk_driver, "Valve", entity_id, 2, {CONF_TYPE: TYPE_SPRINKLER})
await hass.async_add_job(acc.run)
await hass.async_block_till_done()
assert acc.category == 28 # Sprinkler
assert acc.char_valve_type.value == 1 # Irrigation
acc = Valve(hass, hk_driver, "Valve", entity_id, 2, {CONF_TYPE: TYPE_VALVE})
await hass.async_add_job(acc.run)
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 29 # Faucet
assert acc.char_active.value is False
assert acc.char_in_use.value is False
assert acc.char_valve_type.value == 0 # Generic Valve
hass.states.async_set(entity_id, STATE_ON)
await hass.async_block_till_done()
assert acc.char_active.value is True
assert acc.char_in_use.value is True
hass.states.async_set(entity_id, STATE_OFF)
await hass.async_block_till_done()
assert acc.char_active.value is False
assert acc.char_in_use.value is False
# Set from HomeKit
call_turn_on = async_mock_service(hass, "switch", "turn_on")
call_turn_off = async_mock_service(hass, "switch", "turn_off")
await hass.async_add_job(acc.char_active.client_update_value, True)
await hass.async_block_till_done()
assert acc.char_in_use.value is True
assert call_turn_on
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_job(acc.char_active.client_update_value, False)
await hass.async_block_till_done()
assert acc.char_in_use.value is False
assert call_turn_off
assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] is None
@pytest.mark.parametrize(
"entity_id, attrs",
[
("scene.test", {}),
("script.test", {}),
("script.test", {ATTR_CAN_CANCEL: False}),
],
)
async def test_reset_switch(hass, hk_driver, entity_id, attrs, events):
"""Test if switch accessory is reset correctly."""
domain = split_entity_id(entity_id)[0]
hass.states.async_set(entity_id, None, attrs)
await hass.async_block_till_done()
acc = Switch(hass, hk_driver, "Switch", entity_id, 2, None)
await hass.async_add_job(acc.run)
await hass.async_block_till_done()
assert acc.activate_only is True
assert acc.char_on.value is False
call_turn_on = async_mock_service(hass, domain, "turn_on")
call_turn_off = async_mock_service(hass, domain, "turn_off")
await hass.async_add_job(acc.char_on.client_update_value, True)
await hass.async_block_till_done()
assert acc.char_on.value is True
assert call_turn_on
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is None
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert acc.char_on.value is False
assert len(events) == 1
assert not call_turn_off
await hass.async_add_job(acc.char_on.client_update_value, False)
await hass.async_block_till_done()
assert acc.char_on.value is False
assert len(events) == 1
async def test_reset_switch_reload(hass, hk_driver, events):
"""Test reset switch after script reload."""
entity_id = "script.test"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = Switch(hass, hk_driver, "Switch", entity_id, 2, None)
await hass.async_add_job(acc.run)
await hass.async_block_till_done()
assert acc.activate_only is True
hass.states.async_set(entity_id, None, {ATTR_CAN_CANCEL: True})
await hass.async_block_till_done()
assert acc.activate_only is False
hass.states.async_set(entity_id, None, {ATTR_CAN_CANCEL: False})
await hass.async_block_till_done()
assert acc.activate_only is True
|
{
"content_hash": "bc07603f02fda5ce638a03061300e01b",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 84,
"avg_line_length": 34.475806451612904,
"alnum_prop": 0.675906432748538,
"repo_name": "qedi-r/home-assistant",
"id": "aee61ccf2e783ec99ea8820f33fcd2faa5d1353b",
"size": "8550",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "tests/components/homekit/test_type_switches.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18564720"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
from qstrader.signals.signal import Signal
class VolatilitySignal(Signal):
"""
Indicator class to calculate lookback-period daily
volatility of returns, which is then annualised.
If the number of available returns is less than the
lookback parameter the volatility is calculated on
this subset.
Parameters
----------
start_dt : `pd.Timestamp`
The starting datetime (UTC) of the signal.
universe : `Universe`
The universe of assets to calculate the signals for.
lookbacks : `list[int]`
The number of lookback periods to store prices for.
"""
def __init__(self, start_dt, universe, lookbacks):
bumped_lookbacks = [lookback + 1 for lookback in lookbacks]
super().__init__(start_dt, universe, bumped_lookbacks)
@staticmethod
def _asset_lookback_key(asset, lookback):
"""
Create the buffer dictionary lookup key based
on asset name and lookback period.
Parameters
----------
asset : `str`
The asset symbol name.
lookback : `int`
The lookback period.
Returns
-------
`str`
The lookup key.
"""
return '%s_%s' % (asset, lookback + 1)
def _annualised_vol(self, asset, lookback):
"""
Calculate the annualised volatility for the provided
lookback period based on the price buffers for a
particular asset.
Parameters
----------
asset : `str`
The asset symbol name.
lookback : `int`
The lookback period.
Returns
-------
`float`
The annualised volatility of returns.
"""
series = pd.Series(
self.buffers.prices[
VolatilitySignal._asset_lookback_key(
asset, lookback
)
]
)
returns = series.pct_change().dropna().to_numpy()
if len(returns) < 1:
return 0.0
else:
return np.std(returns) * np.sqrt(252)
def __call__(self, asset, lookback):
"""
Calculate the annualised volatility of
returns for the asset.
Parameters
----------
asset : `str`
The asset symbol name.
lookback : `int`
The lookback period.
Returns
-------
`float`
The annualised volatility of returns.
"""
return self._annualised_vol(asset, lookback)
|
{
"content_hash": "81e6a788e9230daae9e2aef431664f62",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 67,
"avg_line_length": 26.2020202020202,
"alnum_prop": 0.5462606013878181,
"repo_name": "mhallsmoore/qstrader",
"id": "b47a8613abb26fa5c9117d4a235f7b59898431b9",
"size": "2594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qstrader/signals/vol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1614"
},
{
"name": "Python",
"bytes": "310218"
}
],
"symlink_target": ""
}
|
"""
Fast Lomb-Scargle Algorithm, following Press & Rybicki 1989
"""
from __future__ import print_function, division
__all__ = ['LombScargleFast']
import warnings
import numpy as np
from .lomb_scargle import LombScargle
# Precomputed factorials
FACTORIALS = [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800]
def factorial(N):
"""Compute the factorial of N.
If N <= 10, use a fast lookup table; otherwise use scipy.special.factorial
"""
if N < len(FACTORIALS):
return FACTORIALS[N]
else:
from scipy import special
return int(special.factorial(N))
def bitceil(N):
"""
Find the bit (i.e. power of 2) immediately greater than or equal to N
Note: this works for numbers up to 2 ** 64.
Roughly equivalent to int(2 ** np.ceil(np.log2(N)))
"""
# Note: for Python 2.7 and 3.x, this is faster:
# return 1 << int(N - 1).bit_length()
N = int(N) - 1
for i in [1, 2, 4, 8, 16, 32]:
N |= N >> i
return N + 1
def extirpolate(x, y, N=None, M=4):
"""
Extirpolate the values (x, y) onto an integer grid range(N),
using lagrange polynomial weights on the M nearest points.
Parameters
----------
x : array_like
array of abscissas
y : array_like
array of ordinates
N : int
number of integer bins to use. For best performance, N should be larger
than the maximum of x
M : int
number of adjoining points on which to extirpolate.
Returns
-------
yN : ndarray
N extirpolated values associated with range(N)
Example
-------
>>> rng = np.random.RandomState(0)
>>> x = 100 * rng.rand(20)
>>> y = np.sin(x)
>>> y_hat = extirpolate(x, y)
>>> x_hat = np.arange(len(y_hat))
>>> f = lambda x: np.sin(x / 10)
>>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat)))
True
Notes
-----
This code is based on the C implementation of spread() presented in
Numerical Recipes in C, Second Edition (Press et al. 1989; p.583).
"""
x, y = map(np.ravel, np.broadcast_arrays(x, y))
if N is None:
N = int(np.max(x) + 0.5 * M + 1)
# Now use legendre polynomial weights to populate the results array;
# This is an efficient recursive implementation (See Press et al. 1989)
result = np.zeros(N, dtype=y.dtype)
# first take care of the easy cases where x is an integer
integers = (x % 1 == 0)
np.add.at(result, x[integers].astype(int), y[integers])
x, y = x[~integers], y[~integers]
# For each remaining x, find the index describing the extirpolation range.
# i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center,
# adjusted so that the limits are within the range 0...N
ilo = np.clip((x - M // 2).astype(int), 0, N - M)
numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0)
denominator = factorial(M - 1)
for j in range(M):
if j > 0:
denominator *= j / (j - M)
ind = ilo + (M - 1 - j)
np.add.at(result, ind, numerator / (denominator * (x - ind)))
return result
def trig_sum(t, h, df, N, f0=0, freq_factor=1,
oversampling=5, use_fft=True, Mfft=4):
"""Compute (approximate) trigonometric sums for a number of frequencies
This routine computes weighted sine and cosine sums:
S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
The sums can be computed either by a brute force O[N^2] method, or
by an FFT-based O[Nlog(N)] method.
Parameters
----------
t : array_like
array of input times
h : array_like
array weights for the sum
df : float
frequency spacing
N : int
number of frequency bins to return
f0 : float (optional, default=0)
The low frequency to use
freq_factor : float (optional, default=1)
Factor which multiplies the frequency
use_fft : bool
if True, use the approximate FFT algorithm to compute the result.
This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
oversampling : int (default = 5)
oversampling freq_factor for the approximation; roughtly the number of
time samples across the highest-frequency sinusoid. This parameter
contains the tradeoff between accuracy and speed. Not referenced
if use_fft is False.
Mfft : int
The number of adjacent points to use in the FFT approximation.
Not referenced if use_fft is False.
Returns
-------
S, C : ndarrays
summation arrays for frequencies f = df * np.arange(1, N + 1)
"""
df *= freq_factor
f0 *= freq_factor
assert df > 0
t, h = map(np.ravel, np.broadcast_arrays(t, h))
if use_fft:
Mfft = int(Mfft)
assert(Mfft > 0)
# required size of fft is the power of 2 above the oversampling rate
Nfft = bitceil(N * oversampling)
t0 = t.min()
if f0 > 0:
h = h * np.exp(2j * np.pi * f0 * (t - t0))
tnorm = ((t - t0) * Nfft * df) % Nfft
grid = extirpolate(tnorm, h, Nfft, Mfft)
fftgrid = np.fft.ifft(grid)
if t0 != 0:
f = f0 + df * np.arange(Nfft)
fftgrid *= np.exp(2j * np.pi * t0 * f)
fftgrid = fftgrid[:N]
C = Nfft * fftgrid.real
S = Nfft * fftgrid.imag
else:
f = f0 + df * np.arange(N)
C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis]))
S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis]))
return S, C
def lomb_scargle_fast(t, y, dy=1, f0=0, df=None, Nf=None,
center_data=True, fit_offset=True,
use_fft=True, freq_oversampling=5, nyquist_factor=2,
trig_sum_kwds=None):
"""Compute a lomb-scargle periodogram for the given data
This implements both an O[N^2] method if use_fft==False, or an
O[NlogN] method if use_fft==True.
Parameters
----------
t, y, dy : array_like
times, values, and errors of the data points. These should be
broadcastable to the same shape. If dy is not specified, a
constant error will be used.
f0, df, Nf : (float, float, int)
parameters describing the frequency grid, f = f0 + df * arange(Nf).
Defaults, with T = t.max() - t.min():
- f0 = 0
- df is set such that there are ``freq_oversampling`` points per
peak width. ``freq_oversampling`` defaults to 5.
- Nf is set such that the highest frequency is ``nyquist_factor``
times the so-called "average Nyquist frequency".
``nyquist_factor`` defaults to 2.
Note that for unevenly-spaced data, the periodogram can be sensitive
to frequencies far higher than the average Nyquist frequency.
center_data : bool (default=True)
Specify whether to subtract the mean of the data before the fit
fit_offset : bool (default=True)
If True, then compute the floating-mean periodogram; i.e. let the mean
vary with the fit.
use_fft : bool (default=True)
If True, then use the Press & Rybicki O[NlogN] algorithm to compute
the result. Otherwise, use a slower O[N^2] algorithm
Other Parameters
----------------
freq_oversampling : float (default=5)
Oversampling factor for the frequency bins. Only referenced if
``df`` is not specified
nyquist_factor : float (default=2)
Parameter controlling the highest probed frequency. Only referenced
if ``Nf`` is not specified.
trig_sum_kwds : dict or None (optional)
extra keyword arguments to pass to the ``trig_sum`` utility.
Options are ``oversampling`` and ``Mfft``. See documentation
of ``trig_sum`` for details.
Notes
-----
Note that the ``use_fft=True`` algorithm is an approximation to the true
Lomb-Scargle periodogram, and as the number of points grows this
approximation improves. On the other hand, for very small datasets
(<~50 points or so) this approximation may not be useful.
References
----------
.. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis
of unevenly sampled data". ApJ 1:338, p277, 1989
.. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [3] W. Press et al, Numerical Recipies in C (2002)
"""
# Validate and setup input data
t, y, dy = map(np.ravel, np.broadcast_arrays(t, y, dy))
w = 1. / (dy ** 2)
w /= w.sum()
# Validate and setup frequency grid
if df is None:
peak_width = 1. / (t.max() - t.min())
df = peak_width / freq_oversampling
if Nf is None:
avg_Nyquist = 0.5 * len(t) / (t.max() - t.min())
Nf = max(16, (nyquist_factor * avg_Nyquist - f0) / df)
Nf = int(Nf)
assert(df > 0)
assert(Nf > 0)
freq = f0 + df * np.arange(Nf)
# Center the data. Even if we're fitting the offset,
# this step makes the expressions below more succinct
if center_data or fit_offset:
y = y - np.dot(w, y)
# set up arguments to trig_sum
kwargs = dict.copy(trig_sum_kwds or {})
kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf)
#----------------------------------------------------------------------
# 1. compute functions of the time-shift tau at each frequency
Sh, Ch = trig_sum(t, w * y, **kwargs)
S2, C2 = trig_sum(t, w, freq_factor=2, **kwargs)
if fit_offset:
S, C = trig_sum(t, w, **kwargs)
with warnings.catch_warnings():
# Filter "invalid value in divide" warnings for zero-frequency
if f0 == 0:
warnings.simplefilter("ignore")
tan_2omega_tau = (S2 - 2 * S * C) / (C2 - (C * C - S * S))
# fix NaN at zero frequency
if np.isnan(tan_2omega_tau[0]):
tan_2omega_tau[0] = 0
else:
tan_2omega_tau = S2 / C2
# slower/less stable way: we'll use trig identities instead
# omega_tau = 0.5 * np.arctan(tan_2omega_tau)
# S2w, C2w = np.sin(2 * omega_tau), np.cos(2 * omega_tau)
# Sw, Cw = np.sin(omega_tau), np.cos(omega_tau)
S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
Cw = np.sqrt(0.5) * np.sqrt(1 + C2w)
Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w)
#----------------------------------------------------------------------
# 2. Compute the periodogram, following Zechmeister & Kurster
# and using tricks from Press & Rybicki.
YY = np.dot(w, y ** 2)
YC = Ch * Cw + Sh * Sw
YS = Sh * Cw - Ch * Sw
CC = 0.5 * (1 + C2 * C2w + S2 * S2w)
SS = 0.5 * (1 - C2 * C2w - S2 * S2w)
if fit_offset:
CC -= (C * Cw + S * Sw) ** 2
SS -= (S * Cw - C * Sw) ** 2
with warnings.catch_warnings():
# Filter "invalid value in divide" warnings for zero-frequency
if fit_offset and f0 == 0:
warnings.simplefilter("ignore")
power = (YC * YC / CC + YS * YS / SS) / YY
# fix NaN and INF at zero frequency
if np.isnan(power[0]) or np.isinf(power[0]):
power[0] = 0
return freq, power
class LombScargleFast(LombScargle):
"""Fast FFT-based Lomb-Scargle Periodogram Implementation
This implements the O[N log N] lomb-scargle periodogram, described in
Press & Rybicki (1989) [1].
To compute the periodogram via the fast algorithm, use the
``score_frequency_grid()`` method. The ``score()`` method and
``periodogram()`` method will default to the slower algorithm.
See Notes below for more information about the algorithm.
Parameters
----------
optimizer : PeriodicOptimizer instance
Optimizer to use to find the best period. If not specified, the
LinearScanOptimizer will be used.
center_data : boolean (default = True)
If True, then compute the weighted mean of the input data and subtract
before fitting the model.
fit_offset : boolean (default = True)
If True, then fit a floating-mean sinusoid model.
use_fft : boolean (default = True)
Specify whether to use the Press & Rybicki FFT algorithm to compute
the result
ls_kwds : dict
Dictionary of keywords to pass to the ``lomb_scargle_fast`` routine.
fit_period : bool (optional)
If True, then fit for the best period when fit() method is called.
optimizer_kwds : dict (optional)
Dictionary of keyword arguments for constructing the optimizer. For
example, silence optimizer output with `optimizer_kwds={"quiet": True}`.
silence_warnings : bool (default=False)
If False, then warn the user when doing silly things, like calling
``score()`` rather than ``score_frequency_grid()`` or fitting this to
small datasets (fewer than 50 points).
Examples
--------
>>> rng = np.random.RandomState(0)
>>> t = 100 * rng.rand(100)
>>> dy = 0.1
>>> omega = 10
>>> y = np.sin(omega * t) + dy * rng.randn(100)
>>> ls = LombScargleFast().fit(t, y, dy)
>>> ls.optimizer.period_range = (0.2, 1.2)
>>> ls.best_period
Finding optimal frequency:
- Estimated peak width = 0.0639
- Using 5 steps per peak; omega_step = 0.0128
- User-specified period range: 0.2 to 1.2
- Computing periods at 2051 steps
Zooming-in on 5 candidate peaks:
- Computing periods at 1000 steps
0.62826265739259146
>>> ls.predict([0, 0.5])
array([-0.02019474, -0.92910567])
Notes
-----
Currently, a NotImplementedError will be raised if both center_data
and fit_offset are False.
Note also that the fast algorithm is only an approximation to the true
Lomb-Scargle periodogram, and as the number of points grows this
approximation improves. On the other hand, for very small datasets
(<~50 points or so) this approximation may produce incorrect results
for some datasets.
See Also
--------
LombScargle
LombScargleAstroML
References
----------
.. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis
of unevenly sampled data". ApJ 1:338, p277, 1989
"""
def __init__(self, optimizer=None, center_data=True, fit_offset=True,
use_fft=True, ls_kwds=None, Nterms=1,
fit_period=False, optimizer_kwds=None,
silence_warnings=False):
self.use_fft = use_fft
self.ls_kwds = ls_kwds
self.silence_warnings = silence_warnings
if Nterms != 1:
raise ValueError("LombScargleFast supports only Nterms = 1")
LombScargle.__init__(self, optimizer=optimizer,
center_data=center_data, fit_offset=fit_offset,
Nterms=1, regularization=None,
fit_period=fit_period,
optimizer_kwds=optimizer_kwds)
def _score_frequency_grid(self, f0, df, N):
if not self.silence_warnings and self.t.size < 50:
warnings.warn("For smaller datasets, the approximation used by "
"LombScargleFast may not be suitable.\n"
"It is recommended to use LombScargle instead.\n"
"To silence this warning, set "
"``silence_warnings=True``")
freq, P = lomb_scargle_fast(self.t, self.y, self.dy,
f0=f0, df=df, Nf=N,
center_data=self.center_data,
fit_offset=self.fit_offset,
use_fft=self.use_fft,
**(self.ls_kwds or {}))
return P
def _score(self, periods):
if not self.silence_warnings:
warnings.warn("The score() method defaults to a slower O[N^2] "
"algorithm.\nUse the score_frequency_grid() method "
"to access the fast FFT-based algorithm.\n"
"To silence this warning, set "
"``silence_warnings=True``")
return LombScargle._score(self, periods)
|
{
"content_hash": "4332cf2c3067ed11d063bf926f623079",
"timestamp": "",
"source": "github",
"line_count": 450,
"max_line_length": 80,
"avg_line_length": 36.486666666666665,
"alnum_prop": 0.580059686948048,
"repo_name": "astroML/gatspy",
"id": "5ae1175d47bbe58760a9fed7bda246c5cd75c883",
"size": "16419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gatspy/periodic/lomb_scargle_fast.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "217"
},
{
"name": "Python",
"bytes": "125619"
}
],
"symlink_target": ""
}
|
"""
Manage loggers for the api.
"""
import logging, logging.handlers, time
import api
from bson import json_util
from flask import request, has_request_context
from flask import logging as flask_logging
from sys import stdout
from datetime import datetime
critical_error_timeout = 600
log = logging.getLogger(__name__)
class StatsHandler(logging.StreamHandler):
"""
Logs statistical information into the mongodb.
"""
time_format = "%H:%M:%S %Y-%m-%d"
action_parsers = {
"api.user.create_user_request":
lambda params, result=None: {
"username": params["username"],
"new_team": params["create-new-team"]
},
"api.autogen.grade_problem_instance":
lambda pid, tid, key, result=None: {
"pid": pid,
"key": key,
"correct": result["correct"]
},
"api.group.create_group":
lambda uid, group_name, result=None: {
"name": group_name,
"owner": uid
},
"api.group.join_group":
lambda tid, gid, result=None: {
"gid": gid
},
"api.group.leave_group":
lambda tid, gid, result=None: {
"gid": gid
},
"api.group.delete_group":
lambda gid, result=None: {
"gid": gid
},
"api.problem.submit_key":
lambda tid, pid, key, uid=None, ip=None, result=None: {
"pid": pid,
"key": key,
"success": result["correct"]
},
"api.user.update_password_request":
lambda params, uid=None, check_current=False, result=None: {},
"api.utilities.request_password_reset":
lambda username, result=None: {},
"api.team.create_team":
lambda params, result=None: params,
"api.team.assign_shell_account":
lambda tid, result=None: {},
"api.app.hint":
lambda pid, source, result=None: {"pid": pid, "source": source}
}
def __init__(self):
logging.StreamHandler.__init__(self)
def emit(self, record):
"""
Store record into the db.
"""
information = get_request_information()
result = record.msg
if type(result) == dict:
information.update({
"event": result["name"],
"time": datetime.now()
})
information["pass"] = True
information["action"] = {}
if "exception" in result:
information["action"]["exception"] = result["exception"]
information["pass"] = False
elif result["name"] in self.action_parsers:
action_parser = self.action_parsers[result["name"]]
result["kwargs"]["result"] = result["result"]
action_result = action_parser(*result["args"], **result["kwargs"])
information["action"].update(action_result)
api.common.get_conn().statistics.insert(information)
class ExceptionHandler(logging.StreamHandler):
"""
Logs exceptions into mongodb.
"""
def __init__(self):
logging.StreamHandler.__init__(self)
def emit(self, record):
"""
Store record into the db.
"""
information = get_request_information()
information.update({
"event": "exception",
"time": datetime.now(),
"trace": record.msg
})
api.common.get_conn().exceptions.insert(information)
class SevereHandler(logging.handlers.SMTPHandler):
messages = {}
def __init__(self):
logging.handlers.SMTPHandler.__init__(
self,
mailhost=api.utilities.smtp_url,
fromaddr=api.utilities.from_addr,
toaddrs=admin_emails,
subject="Critical Error in {}".format(api.config.competition_name),
credentials=(api.utilities.email_username, api.utilities.email_password),
secure=()
)
def emit(self, record):
"""
Don't excessively emit the same message.
"""
last_time = self.messages.get(record.msg, None)
if last_time is None or time.time() - last_time > critical_error_timeout:
super(SevereHandler, self).emit(record)
self.messages[record.msg] = time.time()
def set_level(name, level):
"""
Get and set log level of a given logger.
Args:
name: name of logger
level: level to set
"""
logger = use(name)
if logger:
logger.setLevel(level)
def use(name):
"""
Alias for logging.getLogger(name)
Args:
name: The name of the logger.
Returns:
The logging object.
"""
return logging.getLogger(name)
def get_request_information():
"""
Returns a dictionary of contextual information about the user at the time of logging.
Returns:
The dictionary.
"""
information = {}
if has_request_context():
information["request"] = {
"api_endpoint_method": request.method,
"api_endpoint": request.path,
"ip": request.remote_addr,
"platform": request.user_agent.platform,
"browser": request.user_agent.browser,
"browser_version": request.user_agent.version,
"user_agent":request.user_agent.string
}
if api.auth.is_logged_in():
user = api.user.get_user()
team = api.user.get_team()
groups = api.team.get_groups()
information["user"] = {
"username": user["username"],
"email": user["email"],
"team_name": team["team_name"],
"school": team["school"],
"groups": [group["name"] for group in groups]
}
return information
def setup_logs(args):
"""
Initialize the api loggers.
Args:
args: dict containing the configuration options.
"""
flask_logging.create_logger = lambda app: use(app.logger_name)
if not args.get("debug", True):
set_level("werkzeug", logging.ERROR)
level = [logging.WARNING, logging.INFO, logging.DEBUG][
min(args.get("verbose", 1), 2)]
internal_error_log = ExceptionHandler()
internal_error_log.setLevel(logging.ERROR)
log.root.setLevel(level)
log.root.addHandler(internal_error_log)
if api.utilities.enable_email:
severe_error_log = SevereHandler()
severe_error_log.setLevel(logging.CRITICAL)
log.root.addHandler(severe_error_log)
stats_log = StatsHandler()
stats_log.setLevel(logging.INFO)
log.root.addHandler(stats_log)
|
{
"content_hash": "e869ff779f33cfbf8d826449a56fbcc6",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 89,
"avg_line_length": 27.182539682539684,
"alnum_prop": 0.5497810218978102,
"repo_name": "stuyCTF/stuyCTF-Platform",
"id": "985113bc147b5956ca228f5ac70f3f7e41af1a19",
"size": "6850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/api/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7294"
},
{
"name": "CoffeeScript",
"bytes": "51286"
},
{
"name": "HTML",
"bytes": "57602"
},
{
"name": "Python",
"bytes": "184241"
},
{
"name": "Shell",
"bytes": "4218"
}
],
"symlink_target": ""
}
|
import datetime
from nose.tools import assert_almost_equal, assert_equal
from .tidal_model import calculate_amplitude, to_hours, Constituent
def test_calculate_amplitude():
test_data = [
(-0.0873440613, 0, 187, 45, 0.088),
(0.0235169771, 2.5, 187, 45, 0.088),
]
for expected, time, phase, speed, amplitude in test_data:
yield (_test_calculate_amplitude,
expected,
time,
Constituent(
'', '',
phase=phase,
speed=speed,
amplitude=amplitude))
def _test_calculate_amplitude(expected_output, time, constituent):
assert_almost_equal(
expected_output,
calculate_amplitude(time, constituent))
def test_to_hours():
timedelta = datetime.timedelta(hours=3, minutes=45)
assert_equal(3.75, to_hours(timedelta))
|
{
"content_hash": "541cbb5d53dbbec7e319399ee85bed4c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 67,
"avg_line_length": 27.8125,
"alnum_prop": 0.5932584269662922,
"repo_name": "sealevelresearch/tide-predictor",
"id": "3c972942d7ed4caad6f51ec4df05765915ec35b2",
"size": "891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tide_predictor/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6959"
}
],
"symlink_target": ""
}
|
'''
Created on Oct 15, 2015
@author: wirkert
'''
import numpy as np
from pandas import DataFrame
import pandas as pd
class AbstractBatch(object):
"""summarizes a batch of simulated mc spectra"""
def __init__(self):
self._nr_layers = 0 # internally keeps track of number of layers
my_index = pd.MultiIndex(levels=[[], []],
labels=[[], []])
self.df = DataFrame(columns=my_index)
def create_parameters(self, nr_samples):
"""create the parameters for the batch, the simulation has
to create the resulting reflectances"""
pass
def nr_elements(self):
return self.df.shape[0]
class GenericBatch(AbstractBatch):
"""generic n-layer batch with each layer having the same oxygenation """
def __init__(self):
super(GenericBatch, self).__init__()
def append_one_layer(self, saO2, nr_samples):
"""helper function to create parameters for one layer"""
# scales data to lie between maxi and mini instead of 0 and 1
scale = lambda x, mini, maxi: x * (maxi - mini) + mini
# shortcut to random generator
gen = np.random.random_sample
gen_n = np.random.normal
# create layer elements
self.df["layer" + str(self._nr_layers), "vhb"] = \
scale(gen(nr_samples), 0, 1.)
self.df["layer" + str(self._nr_layers), "sao2"] = \
saO2
self.df["layer" + str(self._nr_layers), "a_mie"] = \
np.clip(gen_n(loc=18.9, scale=10.2, size=nr_samples),
0.1, np.inf) * 100 # to 1/m
self.df["layer" + str(self._nr_layers), "b_mie"] = \
np.clip(gen_n(loc=1.286, scale=0.521, size=nr_samples), 0, np.inf)
self.df["layer" + str(self._nr_layers), "d"] = \
scale(gen(nr_samples), 0, 1.)
self.df["layer" + str(self._nr_layers), "n"] = \
scale(gen(nr_samples), 1.33, 1.54)
self.df["layer" + str(self._nr_layers), "g"] = \
scale(gen(nr_samples), 0.8, 0.95)
self._nr_layers += 1
def create_parameters(self, nr_samples):
"""Create generic three layer batch with a total diameter of 2mm.
saO2 is the same in all layers, but all other parameters vary randomly
within each layer"""
saO2 = np.random.random_sample(size=nr_samples)
# create three layers with random samples
self.append_one_layer(saO2, nr_samples)
self.append_one_layer(saO2, nr_samples)
self.append_one_layer(saO2, nr_samples)
# "normalize" d to 2mm
# first extract all layers from df
self.df
layers = [l for l in self.df.columns.levels[0] if "layer" in l]
# summarize all ds
sum_d = 0
for l in layers:
sum_d += self.df[l, "d"]
for l in layers:
self.df[l, "d"] = self.df[l, "d"] / sum_d * 2000. * 10 ** -6
self.df[l, "d"] = np.clip(self.df[l, "d"], 25 * 10 ** -6, np.inf)
return self.df
class GenericBatch(AbstractBatch):
"""generic n-layer batch with each layer having the same oxygenation """
def __init__(self):
super(GenericBatch, self).__init__()
def append_one_layer(self, saO2, nr_samples):
"""helper function to create parameters for one layer"""
# scales data to lie between maxi and mini instead of 0 and 1
scale = lambda x, mini, maxi: x * (maxi - mini) + mini
# shortcut to random generator
gen = np.random.random_sample
gen_n = np.random.normal
# create layer elements
self.df["layer" + str(self._nr_layers), "vhb"] = \
scale(gen(nr_samples), 0, 1.)
self.df["layer" + str(self._nr_layers), "sao2"] = \
saO2
self.df["layer" + str(self._nr_layers), "a_mie"] = \
np.clip(gen_n(loc=18.9, scale=10.2, size=nr_samples),
0.1, np.inf) * 100 # to 1/m
self.df["layer" + str(self._nr_layers), "b_mie"] = \
np.clip(gen_n(loc=1.286, scale=0.521, size=nr_samples), 0, np.inf)
self.df["layer" + str(self._nr_layers), "d"] = \
scale(gen(nr_samples), 0, 1.)
self.df["layer" + str(self._nr_layers), "n"] = \
scale(gen(nr_samples), 1.33, 1.54)
self.df["layer" + str(self._nr_layers), "g"] = \
scale(gen(nr_samples), 0.8, 0.95)
self._nr_layers += 1
def create_parameters(self, nr_samples):
"""Create generic three layer batch with a total diameter of 2mm.
saO2 is the same in all layers, but all other parameters vary randomly
within each layer"""
saO2 = np.random.random_sample(size=nr_samples)
# create three layers with random samples
self.append_one_layer(saO2, nr_samples)
self.append_one_layer(saO2, nr_samples)
self.append_one_layer(saO2, nr_samples)
# "normalize" d to 2mm
# first extract all layers from df
self.df
layers = [l for l in self.df.columns.levels[0] if "layer" in l]
# summarize all ds
sum_d = 0
for l in layers:
sum_d += self.df[l, "d"]
for l in layers:
self.df[l, "d"] = self.df[l, "d"] / sum_d * 2000. * 10 ** -6
self.df[l, "d"] = np.clip(self.df[l, "d"], 25 * 10 ** -6, np.inf)
return self.df
class LessGenericBatch(AbstractBatch):
"""less generic three layer batch. This only varies blood volume fraction
w.r.t. the ColonMuscleBatch. Let's see if DA works in this case."""
def __init__(self):
super(LessGenericBatch, self).__init__()
def append_one_layer(self, saO2, n, d_ranges, nr_samples):
"""helper function to create parameters for one layer"""
# scales data to lie between maxi and mini instead of 0 and 1
scale = lambda x, mini, maxi: x * (maxi - mini) + mini
# shortcut to random generator
gen = np.random.random_sample
# create as generic batch
super(LessGenericBatch, self).append_one_layer(saO2, nr_samples)
self._nr_layers -= 1 # we're not finished
# but some changes in specific layer elements
# more specific layer thicknesses
self.df["layer" + str(self._nr_layers), "d"] = \
scale(gen(nr_samples), d_ranges[0], d_ranges[1])
# more specific n
self.df["layer" + str(self._nr_layers), "n"] = \
n
self._nr_layers += 1
def create_parameters(self, nr_samples):
"""Create generic three layer batch with a total diameter of 2mm.
saO2 is the same in all layers, but all other parameters vary randomly
within each layer"""
saO2 = np.random.random_sample(size=nr_samples)
n = np.ones_like(saO2)
# create three layers with random samples
# muscle
self.append_one_layer(saO2, n * 1.36, (600.*10 ** -6, 1010.*10 ** -6),
nr_samples)
# submucosa
self.append_one_layer(saO2, n * 1.36, (415.*10 ** -6, 847.*10 ** -6),
nr_samples)
# mucosa
self.append_one_layer(saO2, n * 1.38, (395.*10 ** -6, 603.*10 ** -6),
nr_samples)
return self.df
class ColonMuscleBatch(GenericBatch):
"""three layer batch simulating colonic tissue"""
def __init__(self):
super(ColonMuscleBatch, self).__init__()
def append_one_layer(self, saO2, n, d_ranges, nr_samples):
"""helper function to create parameters for one layer"""
# scales data to lie between maxi and mini instead of 0 and 1
scale = lambda x, mini, maxi: x * (maxi - mini) + mini
# shortcut to random generator
gen = np.random.random_sample
# create as generic batch
super(ColonMuscleBatch, self).append_one_layer(saO2, nr_samples)
self._nr_layers -= 1 # we're not finished
# but some changes in specific layer elements
# less blood
self.df["layer" + str(self._nr_layers), "vhb"] = \
scale(gen(nr_samples), 0, 0.1)
# more specific layer thicknesses
self.df["layer" + str(self._nr_layers), "d"] = \
scale(gen(nr_samples), d_ranges[0], d_ranges[1])
# more specific n
self.df["layer" + str(self._nr_layers), "n"] = \
n
self._nr_layers += 1
def create_parameters(self, nr_samples):
"""Create generic three layer batch with a total diameter of 2mm.
saO2 is the same in all layers, but all other parameters vary randomly
within each layer"""
saO2 = np.random.random_sample(size=nr_samples)
n = np.ones_like(saO2)
# create three layers with random samples
# muscle
self.append_one_layer(saO2, n * 1.36, (600.*10 ** -6, 1010.*10 ** -6),
nr_samples)
# submucosa
self.append_one_layer(saO2, n * 1.36, (415.*10 ** -6, 847.*10 ** -6),
nr_samples)
# mucosa
self.append_one_layer(saO2, n * 1.38, (395.*10 ** -6, 603.*10 ** -6),
nr_samples)
return self.df
class GenericMeanScatteringBatch(GenericBatch):
"""three layer batch simulating colonic tissue"""
def __init__(self):
super(GenericMeanScatteringBatch, self).__init__()
def append_one_layer(self, saO2, nr_samples):
"""helper function to create parameters for one layer"""
# create as generic batch
super(GenericMeanScatteringBatch, self).append_one_layer(saO2,
nr_samples)
self._nr_layers -= 1 # we're not finished
# restrict exponential scattering to mean value for soft tissue.
self.df["layer" + str(self._nr_layers), "b_mie"] = 1.286
self._nr_layers += 1
class ColonMuscleMeanScatteringBatch(ColonMuscleBatch):
"""three layer batch simulating colonic tissue"""
def __init__(self):
super(ColonMuscleMeanScatteringBatch, self).__init__()
def append_one_layer(self, saO2, n, d_ranges, nr_samples):
"""helper function to create parameters for one layer"""
# create as generic batch
super(ColonMuscleMeanScatteringBatch, self).append_one_layer(saO2,
n,
d_ranges,
nr_samples)
self._nr_layers -= 1 # we're not finished
# restrict exponential scattering to mean value for soft tissue.
self.df["layer" + str(self._nr_layers), "b_mie"] = 1.286
self._nr_layers += 1
class VisualizationBatch(AbstractBatch):
"""batch used for visualization of different spectra. Feel free to adapt
for your visualization purposes."""
def __init__(self):
super(VisualizationBatch, self).__init__()
def append_one_layer(self, vhb, sao2, a_mie, b_mie, d, n, g, nr_samples):
"""helper function to create parameters for one layer"""
# create layer elements
self.df["layer" + str(self._nr_layers), "vhb"] = vhb
self.df["layer" + str(self._nr_layers), "sao2"] = sao2
self.df["layer" + str(self._nr_layers), "a_mie"] = a_mie
self.df["layer" + str(self._nr_layers), "b_mie"] = b_mie
self.df["layer" + str(self._nr_layers), "d"] = d
self.df["layer" + str(self._nr_layers), "n"] = n
self.df["layer" + str(self._nr_layers), "g"] = g
self._nr_layers += 1
def create_parameters(self, nr_samples):
# bvf = np.linspace(0.0, .1, nr_samples)
# saO2 = np.linspace(0., 1., nr_samples)
# d = np.linspace(175, 735, nr_samples) * 10 ** -6
# a_mie = np.linspace(5., 30., nr_samples) * 100
# a_ray = np.linspace(0., 60., nr_samples) * 100
# n = np.linspace(1.33, 1.54, nr_samples)
# g = np.linspace(0, 0.95, nr_samples)
# create three layers with random samples
self.append_one_layer([0.1, 0.02], [0.7, 0.1], 18.9*100., 1.286,
500 * 10 ** -6, 1.38, 0.9,
nr_samples)
self.append_one_layer(0.04, 0.7, 18.9*100., 1.286, 500 * 10 ** -6,
1.36, 0.9,
nr_samples)
self.append_one_layer(0.04, 0.7, 18.9*100., 1.286, 500 * 10 ** -6,
1.36, 0.9,
nr_samples)
return self.df
class IntralipidPhantomBatch(AbstractBatch):
"""batch used for visualization of different spectra. Feel free to adapt
for your visualization purposes."""
def __init__(self):
super(IntralipidPhantomBatch, self).__init__()
def append_one_layer(self, nr_samples):
"""helper function to create parameters for one layer"""
# scales data to lie between maxi and mini instead of 0 and 1
scale = lambda x, mini, maxi: x * (maxi - mini) + mini
# shortcut to random generator
gen = np.random.random_sample
# create layer elements
self.df["layer" + str(self._nr_layers), "vhb"] = \
scale(gen(nr_samples), 0.001, 0.1)
self.df["layer" + str(self._nr_layers), "sao2"] = \
scale(gen(nr_samples), 0., 1.)
self.df["layer" + str(self._nr_layers), "a_mie"] = \
scale(gen(nr_samples), 5., 40.) * 100 # to 1/m
self.df["layer" + str(self._nr_layers), "b_mie"] = \
scale(gen(nr_samples), 2.3, 2.4)
self.df["layer" + str(self._nr_layers), "d"] = \
2000.*10**-6
self.df["layer" + str(self._nr_layers), "n"] = \
scale(gen(nr_samples), 1.33, 1.54)
self.df["layer" + str(self._nr_layers), "g"] = \
scale(gen(nr_samples), 0.8, 0.95)
self._nr_layers += 1
def create_parameters(self, nr_samples):
"""Create intralipid batch with a total diameter of 2mm.
all other parameters vary randomly
within each layer to simulate the interlipid scattering/absorption
properties."""
# create three layers with random samples
self.append_one_layer(nr_samples)
return self.df
|
{
"content_hash": "e087c5c86af4679b1d015f21efe221f8",
"timestamp": "",
"source": "github",
"line_count": 375,
"max_line_length": 80,
"avg_line_length": 38.394666666666666,
"alnum_prop": 0.5533407417696903,
"repo_name": "iwegner/MITK",
"id": "2706262dc6d9eebd8a457bf6bf01df75474ea13a",
"size": "14398",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Modules/Biophotonics/python/iMC/mc/batches.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3340295"
},
{
"name": "C++",
"bytes": "31705331"
},
{
"name": "CMake",
"bytes": "985642"
},
{
"name": "CSS",
"bytes": "118558"
},
{
"name": "HTML",
"bytes": "102168"
},
{
"name": "JavaScript",
"bytes": "162600"
},
{
"name": "Jupyter Notebook",
"bytes": "228462"
},
{
"name": "Makefile",
"bytes": "25077"
},
{
"name": "Objective-C",
"bytes": "26578"
},
{
"name": "Python",
"bytes": "275885"
},
{
"name": "QML",
"bytes": "28009"
},
{
"name": "QMake",
"bytes": "5583"
},
{
"name": "Shell",
"bytes": "1261"
}
],
"symlink_target": ""
}
|
import os
import sys
import re
sys.path.append(os.getcwd())
import argparse
import json
def main():
# Parse arguments to identify the path to the accuracy logs from
# the accuracy and performance runs
parser = argparse.ArgumentParser()
parser.add_argument(
"--unique_sample", "-u",
help="Specifies the path to the summary log for TEST04-A.",
default=""
)
parser.add_argument(
"--same_sample", "-s",
help="Specifies the path to the summary log for TEST04-B.",
default=""
)
args = parser.parse_args()
print("Verifying performance.")
ref_file = open(args.unique_sample, "r")
test_file = open(args.same_sample, "r")
ref_score = 0
test_score = 0
ref_mode = ''
test_mode = ''
performance_issue_unqiue = ''
performance_issue_same = ''
for line in ref_file:
if re.match("Scenario", line):
ref_mode = line.split(": ",1)[1].strip()
continue
if ref_mode == "Single Stream":
if re.match("90th percentile latency", line):
ref_score = line.split(": ",1)[1].strip()
continue
if ref_mode == "Multi Stream":
if re.match("Samples per query", line):
ref_score = line.split(": ",1)[1].strip()
continue
if ref_mode == "Server":
if re.match("Scheduled samples per second", line):
ref_score = line.split(": ",1)[1].strip()
continue
if ref_mode == "Offline":
if re.match("Samples per second", line):
ref_score = line.split(": ",1)[1].strip()
continue
if re.match("\d+ ERROR", line):
error = line.split(" ",1)[0].strip()
print("WARNING: " + error + " ERROR reported in TEST04-A results")
if re.match("performance_issue_unique", line):
performance_issue_unique = line.split(": ",1)[1].strip()
if performance_issue_unique == 'false':
sys.exit("TEST FAIL: Invalid test settings in TEST04-A summary.")
break
for line in test_file:
if re.match("Scenario", line):
test_mode = line.split(": ",1)[1].strip()
continue
if test_mode == "Single Stream":
if re.match("90th percentile latency", line):
test_score = line.split(": ",1)[1].strip()
continue
if test_mode == "Multi Stream":
if re.match("Samples per query", line):
test_score = line.split(": ",1)[1].strip()
continue
if test_mode == "Server":
if re.match("Scheduled samples per second", line):
test_score = line.split(": ",1)[1].strip()
continue
if test_mode == "Offline":
if re.match("Samples per second", line):
test_score = line.split(": ",1)[1].strip()
continue
if re.match("\d+ ERROR", line):
error = line.split(" ",1)[0].strip()
print("WARNING: " + error + " ERROR reported in TEST04-B results")
if re.match("performance_issue_same", line):
performance_issue_same = line.split(": ",1)[1].strip()
if performance_issue_same == 'false':
sys.exit("TEST FAIL: Invalid test settings in TEST04-B summary.")
break
if test_mode != ref_mode:
sys.exit("Test and reference scenarios do not match!")
print("TEST04-A score = {}".format(ref_score))
print("TEST04-B score = {}".format(test_score))
threshold = 0.10
# In single stream mode, latencies can be very short for high performance systems
# and run-to-run variation due to external disturbances (OS) can be significant.
# In this case we relax pass threshold to 20%
if ref_mode == "Single Stream" and float(ref_score) <= 200000:
threshold = 0.20
if float(test_score) < float(ref_score) * (1 + threshold) and float(test_score) > float(ref_score) * (1 - threshold):
print("TEST PASS")
elif (float(test_score) > float(ref_score) and test_mode == "Single Stream"):
print("TEST PASS")
print("Note: TEST04-B is significantly slower than TEST04-A")
elif (float(test_score) < float(ref_score) and test_mode != "Single Stream"):
print("TEST PASS")
print("Note: TEST04-B is significantly slower than TEST04-A")
else:
print("TEST FAIL: Test score invalid")
if __name__ == '__main__':
main()
|
{
"content_hash": "d795f50f963f32d0c1bdea1adece48c5",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 121,
"avg_line_length": 34.1865671641791,
"alnum_prop": 0.5518445754202139,
"repo_name": "mlperf/inference_results_v0.5",
"id": "8a505f7e21b99a23de08c2cf7d6f9956f776a657",
"size": "4605",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "closed/Intel/audit/nnpi-1000_onnx/resnet/Offline/TEST04-A/verify_test4_performance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3233"
},
{
"name": "C",
"bytes": "3952061"
},
{
"name": "C++",
"bytes": "4248758"
},
{
"name": "CMake",
"bytes": "74513"
},
{
"name": "CSS",
"bytes": "28485"
},
{
"name": "Cuda",
"bytes": "234319"
},
{
"name": "Dockerfile",
"bytes": "18506"
},
{
"name": "HTML",
"bytes": "2890"
},
{
"name": "Makefile",
"bytes": "76919"
},
{
"name": "Python",
"bytes": "1573121"
},
{
"name": "Shell",
"bytes": "151430"
}
],
"symlink_target": ""
}
|
from reportlab.lib.pagesizes import letter
from reportlab.lib import colors
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image, Table, TableStyle
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
class SimplePdf(object):
"""
SimplePdf helps to put your content into a PDF document without hassle.
No styles, no fonts, just standard black text, black borders and scaled images.
"""
_default_page_size = letter
_default_spacer_height = 10
_default_spacer_width = 1
_default_margin = 72
_default_font_size = 10
_default_header_font_size = 20
_default_leading_ratio = 1.5
def __init__(self, filename=None, page_size=_default_page_size):
"""
Constructs document with given filename and page size.
Default page size is US Letter.
File won't be saved on disk until you explicitly call save().
:param filename: The file path where PDF will be saved.
:param page_size: Page size as tuple(width, height)
"""
self.filename = filename
self.parts = []
self._page_size = page_size
self.spacer_height = SimplePdf._default_spacer_height
self.spacer_width = SimplePdf._default_spacer_width
self.margin = SimplePdf._default_margin
self.font_size = SimplePdf._default_font_size
self.header_font_size = SimplePdf._default_header_font_size
self.leading_ratio = SimplePdf._default_leading_ratio
self.styles = getSampleStyleSheet()
self.styles.add(ParagraphStyle(name='SimpleStyle', fontSize=self.font_size,
leading=self.font_size * self.leading_ratio))
self.styles.add(ParagraphStyle(name='SimpleHeaderStyle', fontSize=self.header_font_size,
leading=self.header_font_size * self.leading_ratio))
def add_text(self, text):
"""
Adds text to the document.
Text is automatically wrapped.
:param text: Any text as string
"""
content = SimplePdf._prepare_text_for_reportlab(text)
self.parts.append(Paragraph(content, self.styles["SimpleStyle"]))
self._add_some_space()
def add_header(self, header):
"""
Adds header to the document.
Text is automatically wrapped.
:param header: Any text as string
"""
content = SimplePdf._prepare_text_for_reportlab(header)
self._add_some_space()
self.parts.append(Paragraph(content, self.styles["SimpleHeaderStyle"]))
self._add_some_space()
def add_image(self, filename):
"""
Adds image to the document.
Images are scaled automatically to fit within margins.
:param filename: File path of an image
"""
original_image = Image(filename)
aspect_ratio = original_image.imageHeight / float(original_image.imageWidth)
max_width = self._page_size[0] - (2 * self.margin)
width = min(original_image.imageWidth, max_width)
image = Image(filename, width=width, height=(width * aspect_ratio))
self.parts.append(image)
self._add_some_space()
def add_table(self, data):
"""
Adds table to the document.
:param data: List of lists to put in the table
"""
matrix = [[self._wrap_text(cell) for cell in row] for row in data]
table = Table(matrix)
# set all borders black
table.setStyle(TableStyle([('INNERGRID', (0, 0), (-1, -1), 1, colors.black),
('BOX', (0, 0), (-1, -1), 1, colors.black)]))
self.parts.append(table)
self._add_some_space()
def save(self):
"""
Saves PDF file on disk.
NOTE: Please make sure filename is set before saving.
"""
if self.filename is None:
raise TypeError("filename not set")
if not self.parts:
# avoid saving documents with no parts
# PDF viewers can't open such documents
self._add_some_space()
doc = SimpleDocTemplate(self.filename, pagesize=self._page_size,
rightMargin=self.margin, leftMargin=self.margin,
topMargin=self.margin, bottomMargin=self.margin)
doc.build(self.parts)
def _add_some_space(self):
"""
Adds some blank space after last part.
"""
self.parts.append(Spacer(self.spacer_width, self.spacer_height))
def _wrap_text(self, text):
"""
Wraps the text by converting it to Paragraph.
Text in Paragraphs is wrapped automatically.
:param text: string to wrap
:return: wrapped text as Paragraph
"""
paragraph = Paragraph(text, self.styles["SimpleStyle"])
return paragraph
@staticmethod
def _prepare_text_for_reportlab(text):
"""
Transforms text for processing by reportlab.
End of lines are replaced by HTML tags to preserve end of line.
:param text: string to transform
:return: transformed string
"""
content = str(text).replace('\n', '<br />\n')
return content
# TODO: add support for converting PDF to text
|
{
"content_hash": "f13eccf70391dfaf88a49de4ae1faac9",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 96,
"avg_line_length": 33.639240506329116,
"alnum_prop": 0.6101599247412982,
"repo_name": "roksela/super-simple-pdf",
"id": "fe51671dcca8c5a4c5f73a7b1ed0c210b84d8be1",
"size": "5315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "super_simple_pdf/simple_pdf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16558"
}
],
"symlink_target": ""
}
|
import sys
import string
import os
import math
# This function prints usage information
def printHelp ():
print "Usage: " + sys.argv[0] + " [-html] rankcurve1 [rankcurve2 [... rankcurveN]]"
print
print " rankcurve1 ... rankcurveN - name of the rank curve file(s) from csuRankCurve"
print
print "By default, this program will create a new directory for the plots"
print "and create a postscript plot for each of the distance measures"
print "recorded in the rank curve file(s)."
print
print "If the -html option is used, the program will create PNG images"
print "and write out an HTML page that shows all the plots on one page."
print
sys.exit(1)
# Uses "gnuplot" to plot a set of (x,y) pairs, based on Kai She's
# original perl script. The x and y vectors must be of the
# same length.
def ploteps (data, options, filename):
xLabel = "Rank"
yLabel = "Rate"
format = "postscript eps color"
outputFile = filename[:(string.rfind (filename, '.')+1)] + "eps"
p = os.popen ("gnuplot",'w')
p.write ("set xlabel '" + xLabel + "'\n")
p.write ("set ylabel '" + yLabel + "'\n")
p.write ("set xtics 5\n")
p.write ("set key bottom\n")
p.write ("set term " + format + "\n")
p.write ("set output '" + outputFile + "'\n")
p.write ("set size 0.7,0.7\n")
first = 0;
for measure in data.keys ():
if measure != "Rank":
if first == 0:
p.write ("plot [0:50] '-' title \'" + measure + "\' with points ")
first = 1
else:
p.write (", '-' title \'" + measure + "\' with points ")
p.write("\n");
for measure in data.keys ():
if measure != "Rank":
for n in range (len(data["Rank"])):
p.write( `data["Rank"][n]` + " " + `data[measure][n]` + "\n")
p.write ("e\n")
p.write ("quit\n")
p.close ()
def plotpng (data, options, filename):
xLabel = "Rank"
yLabel = "Rate"
format = "png color"
outputFile = filename[:(string.rfind (filename, '.')+1)] + "png"
p = os.popen ("gnuplot",'w')
p.write ("set xlabel '" + xLabel + "'\n")
p.write ("set ylabel '" + yLabel + "'\n")
p.write ("set xtics 5\n")
p.write ("set key bottom\n")
p.write ("set term " + format + "\n")
p.write ("set output '" + outputFile + "'\n")
# p.write ("set size 0.7,0.7\n")
first = 0;
for measure in data.keys ():
if measure != "Rank":
if first == 0:
p.write ("plot [0:50] '-' title \'" + measure + "\' with points ")
first = 1
else:
p.write (", '-' title \'" + measure + "\' with points ")
p.write("\n");
for measure in data.keys ():
if measure != "Rank":
for n in range (len(data["Rank"])):
p.write( `data["Rank"][n]` + " " + `data[measure][n]` + "\n")
p.write ("e\n")
p.write ("quit\n")
p.close ()
# Helper function for generating HTML tags
def writePageHead (fh, pageTitle):
fh.write ("<HTML>\n")
fh.write ("<HEAD>\n")
fh.write ("<TITLE>")
fh.write (pageTitle)
fh.write ("</TITLE>\n")
fh.write ("</HEAD>\n")
fh.write ("<BODY>\n")
fh.write ("<H1>")
fh.write (pageTitle)
fh.write ("</H1>\n")
def writePageTail (fh):
fh.write ("</BODY>\n")
fh.write ("</HTML>\n")
def writeParagraph (fh):
fh.write ("<P>\n")
def writeSectionHeading (fh,anchor,text):
fh.write ("<A NAME=")
fh.write (anchor)
fh.write (">\n")
fh.write ("<H2>")
fh.write (text)
fh.write ("</H2>\n")
fh.write ("</A>")
def writeImgTag (fh,imageName):
fh.write ("<IMG SRC=\"")
fh.write (imageName)
fh.write ("\">\n")
def writeLinkList (fh,links):
fh.write ("<UL>\n")
for l in links:
fh.write ("<LI><A HREF=#")
fh.write (l)
fh.write (">" + l + "</A></LI>\n")
fh.write ("</UL>\n")
# Writes out an HTML page
def writeHtmlPage (options):
pageName = options["outputDir"] + "/index.html"
experimentName = options["outputDir"]
#for filename in options["fileName"]:
measures = options["fileName"]
print "Saving HTML document to " + pageName
fh = open (pageName,'w')
writePageHead (fh, "csuRankCurve output for " + experimentName)
writeLinkList (fh,measures)
for m in measures:
writeSectionHeading (fh,m,"Rank curve for " + m + " measure")
writeImgTag (fh,m + ".png")
writeParagraph (fh)
writePageTail (fh)
fh.close ()
# This function reads a rank curve file and generates graphs
# for each of the measurement metrics in that file
def plotRankCurvesForResult (options,data):
measures = []
for column in data[data.keys()[0]].keys ():
if column != "Rank":
measure = column[(string.rfind (column, '/')+1):]
measures.append (measure)
fName = options["outputDir"] + "/" + measure + options["graphExtension"];
ploteps (data, column, "Rank",
"Percentage of correct matches at rank", fName, options["graphFormat"])
plotpng (data, column, "Rank",
"Percentage of correct matches at rank", fName, options["graphFormat"])
print "Saving graph to " + fName
if options.has_key("htmlPage"):
writeHtmlPage (options["outputDir"] + "/index.html", options["outputDir"], measures)
# Process the command line arguments
def processCommandLineArguments (args,options):
# Print help if the number of arguments is not what we expect
if len (sys.argv) < 2:
printHelp()
idx = 1;
# Parse additional command line arguments
if args[idx] == "-html":
options["htmlPage"] = 1
options["graphFormat"] = "png"
options["graphExtension"] = ".png"
idx = idx + 1
options["outputDir"] = sys.argv[idx]
tmp = len(options["outputDir"])
os.system("mkdir -p " + options["outputDir"])
idx = idx + 1
options["fileName"] = []
while idx < len (sys.argv):
options["fileName"].append( sys.argv[idx] )
idx = idx + 1
def readRankCurve( filename ):
fileHandle = open (filename)
# The first line of the file is a header that tells which tests were run
# The first word of that file out to be "Rank"
headers = string.split (fileHandle.readline ())
if headers[0] != "Rank":
print "This does not appear to be a valid results file!"
sys.exit (1)
# Make sure the directory exists
fName = headers[1];
# y is a map from column names to lists.
y = {}
for column in headers:
tmp = column
if column[-1] == '/':
tmp = column[:-1]
measure = tmp[(string.rfind (tmp, '/')+1):]
y[measure] = []
print "Using measure" + measure
# Read in the data file. As we read through the
# fields, we append values to the appropriate list
while 1:
line = fileHandle.readline();
if not line: break
fields = string.split (line);
# We only look at every other column, because the odd number
# columns only contain values we don't need.
for fieldNo in range (0, len (fields), 2):
column = headers[fieldNo]
tmp = column
if column[-1] == '/':
tmp = column[:-1]
measure = tmp[(string.rfind (tmp, '/')+1):]
#measure = column[(string.rfind (column, '/')+1):]
#measure = column
value = float (fields[fieldNo])
# print measure
y[measure].append (value)
fileHandle.close ()
return y
# MAIN PROGRAM
if __name__=="__main__":
options = {
"graphFormat" : "postscript",
"graphExtension" : ".ps"
}
processCommandLineArguments (sys.argv, options)
# Open the file and do it
try:
for filename in options["fileName"]:
print "Filename: <" + filename + ">"
data = readRankCurve (filename)
ploteps(data, options, filename)
plotpng(data, options, filename)
except IOError:
sys.stderr.write ("File not found: %s\n" + options["fileName"])
|
{
"content_hash": "75c819ab41d2f09fa9b95794ce62938d",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 92,
"avg_line_length": 28.494880546075084,
"alnum_prop": 0.5508444125044916,
"repo_name": "phillipstanleymarbell/sunflower-simulator",
"id": "642feef620a500977c0eb97dee99cfb7bd87969f",
"size": "8523",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "benchmarks/source/ALPBench/Face_Rec/extras/compareRankCurves.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "260463"
},
{
"name": "Awk",
"bytes": "3281"
},
{
"name": "Batchfile",
"bytes": "4514"
},
{
"name": "C",
"bytes": "32057376"
},
{
"name": "C++",
"bytes": "1307243"
},
{
"name": "CSS",
"bytes": "1623"
},
{
"name": "Clarion",
"bytes": "1242"
},
{
"name": "Coq",
"bytes": "61440"
},
{
"name": "DIGITAL Command Language",
"bytes": "34575"
},
{
"name": "Emacs Lisp",
"bytes": "93455"
},
{
"name": "Fortran",
"bytes": "5364"
},
{
"name": "HTML",
"bytes": "1584976"
},
{
"name": "JavaScript",
"bytes": "13582"
},
{
"name": "Logos",
"bytes": "51926"
},
{
"name": "M",
"bytes": "102942"
},
{
"name": "M4",
"bytes": "79874"
},
{
"name": "Makefile",
"bytes": "3074403"
},
{
"name": "Mercury",
"bytes": "702"
},
{
"name": "Module Management System",
"bytes": "28478"
},
{
"name": "OCaml",
"bytes": "253115"
},
{
"name": "Objective-C",
"bytes": "178460"
},
{
"name": "Papyrus",
"bytes": "1649"
},
{
"name": "Perl",
"bytes": "56537"
},
{
"name": "Perl 6",
"bytes": "3850"
},
{
"name": "PostScript",
"bytes": "1720060"
},
{
"name": "Python",
"bytes": "40729"
},
{
"name": "Redcode",
"bytes": "1140"
},
{
"name": "Roff",
"bytes": "1992535"
},
{
"name": "SAS",
"bytes": "28385"
},
{
"name": "SRecode Template",
"bytes": "540157"
},
{
"name": "Shell",
"bytes": "958479"
},
{
"name": "Smalltalk",
"bytes": "2616"
},
{
"name": "Standard ML",
"bytes": "1212"
},
{
"name": "TeX",
"bytes": "332260"
},
{
"name": "WebAssembly",
"bytes": "26452"
},
{
"name": "Yacc",
"bytes": "562515"
},
{
"name": "sed",
"bytes": "206609"
}
],
"symlink_target": ""
}
|
"""
This file tests provides functionality to test that notebooks run without
warning or exception.
"""
import io
import logging
import os
import shutil
import time
from nbconvert.preprocessors import ExecutePreprocessor
import nbformat
IPYTHON_VERSION = 4 # Pin to ipython version 4.
TIME_OUT = 15*60 # Maximum 10 mins/test. Reaching timeout causes test failure.
ATTEMPTS = 8
KERNEL_ERROR_MSG = 'Kernel died before replying to kernel_info'
def run_notebook(notebook, notebook_dir, kernel=None, no_cache=False, temp_dir='tmp_notebook'):
"""Run tutorial Jupyter notebook to catch any execution error.
Parameters
----------
notebook : string
the name of the notebook to be tested
notebook_dir : string
the directory of the notebook to be tested
kernel : string, None
controls which kernel to use when running the notebook. e.g: python2
no_cache : '1' or False
controls whether to clean the temporary directory in which the
notebook was run and re-download any resource file. The default
behavior is to not clean the directory. Set to '1' to force clean the
directory.
NB: in the real CI, the tests will re-download everything since they
start from a clean workspace.
temp_dir: string
The temporary sub-directory directory in which to run the notebook.
Returns
-------
Returns true if the workbook runs with no warning or exception.
"""
logging.info("Running notebook '{}'".format(notebook))
notebook_path = os.path.join(*([notebook_dir] + notebook.split('/')))
working_dir = os.path.join(*([temp_dir] + notebook.split('/')))
if no_cache == '1':
logging.info("Cleaning and setting up temp directory '{}'".format(working_dir))
shutil.rmtree(temp_dir, ignore_errors=True)
errors = []
notebook = None
if not os.path.isdir(working_dir):
os.makedirs(working_dir)
try:
notebook = nbformat.read(notebook_path + '.ipynb', as_version=IPYTHON_VERSION)
if kernel is not None:
eprocessor = ExecutePreprocessor(timeout=TIME_OUT, kernel_name=kernel)
else:
eprocessor = ExecutePreprocessor(timeout=TIME_OUT)
success = False
# There is a low (< 1%) chance that starting a notebook executor will fail due to the kernel
# taking to long to start, or a port collision, etc.
for i in range(ATTEMPTS):
try:
nb, _ = eprocessor.preprocess(notebook, {'metadata': {'path': working_dir}})
success = True
except RuntimeError as rte:
# We check if the exception has to do with the Jupyter kernel failing to start. If
# not, we rethrow to prevent the notebook from erring ATTEMPTS times. It is not
# ideal to inspect the exception message, but necessary for retry logic, as Jupyter
# client throws the generic RuntimeError that can be confused with other Runtime
# errors.
if str(rte) != KERNEL_ERROR_MSG:
raise rte
logging.info("Error starting preprocessor: {}. Attempt {}/{}".format(str(rte), i+1, ATTEMPTS))
time.sleep(1)
continue
break
if not success:
errors.append("Error: Notebook failed to run after {} attempts.".format(ATTEMPTS))
except Exception as err:
err_msg = str(err)
errors.append(err_msg)
finally:
if notebook is not None:
output_file = os.path.join(working_dir, "output.txt")
nbformat.write(notebook, output_file)
output_nb = open(output_file, mode='r', encoding='utf-8')
for line in output_nb:
if "Warning:" in line and "numpy operator signatures" not in line:
errors.append("Warning:\n" + line)
if len(errors) > 0:
logging.error('\n'.join(errors))
return False
return True
|
{
"content_hash": "10a63c093219d246c3d3c593e0b88666",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 110,
"avg_line_length": 40.16831683168317,
"alnum_prop": 0.6255854079368992,
"repo_name": "eric-haibin-lin/mxnet",
"id": "9ee11db428b57713dcdebb8671a750b90faef63b",
"size": "5011",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "tests/utils/notebook_test/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "227904"
},
{
"name": "C++",
"bytes": "9488576"
},
{
"name": "CMake",
"bytes": "157668"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1290499"
},
{
"name": "Dockerfile",
"bytes": "100732"
},
{
"name": "Groovy",
"bytes": "165546"
},
{
"name": "HTML",
"bytes": "40277"
},
{
"name": "Java",
"bytes": "205196"
},
{
"name": "Julia",
"bytes": "445413"
},
{
"name": "Jupyter Notebook",
"bytes": "3660357"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "148945"
},
{
"name": "Perl",
"bytes": "1558292"
},
{
"name": "PowerShell",
"bytes": "9244"
},
{
"name": "Python",
"bytes": "9642639"
},
{
"name": "R",
"bytes": "357994"
},
{
"name": "Raku",
"bytes": "9012"
},
{
"name": "SWIG",
"bytes": "161870"
},
{
"name": "Scala",
"bytes": "1304647"
},
{
"name": "Shell",
"bytes": "460509"
},
{
"name": "Smalltalk",
"bytes": "3497"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import voeventparse as vp
import datetime
from copy import copy
ivorn_base = 'voevent.4pisky.org'
test_trigger_substream = 'TEST-TRIGGER'
test_response_substream = 'TEST-RESPONSE'
alarrm_request_substream = 'ALARRM-REQUEST'
datetime_format_short = '%y%m%d-%H%M.%S'
def create_skeleton_4pisky_voevent(substream, stream_id,
role=vp.definitions.roles.test,
date=None):
author_ivorn = ivorn_base+'/robots'
if date is None:
date = datetime.datetime.utcnow()
v = vp.Voevent(stream=ivorn_base+ '/' + substream,
stream_id=stream_id, role=role)
vp.set_who(v, date=date,
author_ivorn=author_ivorn)
vp.set_author(v,
shortName="4PiSkyBot",
contactName="Tim Staley",
contactEmail="tim.staley@physics.ox.ac.uk"
)
return v
def create_4pisky_test_trigger_voevent():
now = datetime.datetime.utcnow()
test_packet = create_skeleton_4pisky_voevent(
substream=test_trigger_substream,
stream_id=now.strftime(datetime_format_short),
role=vp.definitions.roles.test,
date=now,
)
return test_packet
def create_4pisky_test_response_voevent(stream_id, date):
response = create_skeleton_4pisky_voevent(
substream=test_response_substream,
stream_id=stream_id,
role=vp.definitions.roles.test,
date=date
)
return response
def create_ami_followup_notification(alert, stream_id,
request_status,
superseded_ivorns=None):
orig_pkt = alert.voevent
voevent = create_skeleton_4pisky_voevent(
substream=alarrm_request_substream,
stream_id=stream_id,
role=vp.definitions.roles.utility)
vp.add_how(voevent, descriptions="AMI Large Array, Cambridge",
references=vp.Reference(
"http://www.mrao.cam.ac.uk/facilities/ami/ami-technical-information/"),
)
voevent.Why = copy(orig_pkt.Why)
vp.add_citations(voevent, citations=vp.Citation(ivorn=orig_pkt.attrib['ivorn'],
cite_type=vp.definitions.cite_types.followup))
voevent.What.Description = "A request for AMI-LA follow-up has been made."
request_params = [vp.Param(key, val)
for key, val in request_status.iteritems()]
g = vp.Group(request_params, name='request_status')
voevent.What.append(g)
# Also copy target location into WhereWhen
voevent.WhereWhen = copy(orig_pkt.WhereWhen)
# But the time marker should refer to the AMI observation:
# (We are already citing the original Swift alert)
ac = voevent.WhereWhen.ObsDataLocation.ObservationLocation.AstroCoords
del ac.Time
voevent.WhereWhen.Description = "Target co-ords from original Swift BAT alert"
return voevent
|
{
"content_hash": "caf641439b5cb7af87381a1d3f196227",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 90,
"avg_line_length": 34.28735632183908,
"alnum_prop": 0.631914180355347,
"repo_name": "timstaley/pysovo",
"id": "b3d73179a1a372bba5ea89021ac156cd60ddd5ba",
"size": "2983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysovo/voevent.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "9821"
},
{
"name": "Python",
"bytes": "122859"
}
],
"symlink_target": ""
}
|
from social_flask.strategy import FlaskTemplateStrategy, FlaskStrategy
|
{
"content_hash": "00a213dd3bab18cec0098e2228219532",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 70,
"avg_line_length": 71,
"alnum_prop": 0.8873239436619719,
"repo_name": "rsalmaso/python-social-auth",
"id": "cc9d2e6ed5e56b4e249603be58ae64e97f300795",
"size": "71",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "social/strategies/flask_strategy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "618"
},
{
"name": "Python",
"bytes": "275325"
},
{
"name": "Shell",
"bytes": "479"
}
],
"symlink_target": ""
}
|
from subdaap.provider import Provider
from subdaap.database import Database
from subdaap.connection import Connection
from subdaap.state import State
from subdaap import cache, config, webserver
from daapserver import DaapServer
from apscheduler.schedulers.gevent import GeventScheduler
import resource
import logging
import random
import errno
import os
# Logger instance
logger = logging.getLogger(__name__)
class Application(object):
def __init__(self, config_file, data_dir, verbose=0):
"""
Construct a new application instance.
"""
self.config_file = config_file
self.data_dir = data_dir
self.verbose = verbose
self.server = None
self.provider = None
self.connections = {}
# Setup all parts of the application
self.setup_config()
self.setup_open_files()
self.setup_database()
self.setup_state()
self.setup_connections()
self.setup_cache()
self.setup_provider()
self.setup_server()
self.setup_tasks()
def setup_config(self):
"""
Load the application config from file.
"""
logger.debug("Loading config from %s", self.config_file)
self.config = config.get_config(self.config_file)
def setup_open_files(self):
"""
Get and set open files limit.
"""
open_files_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
new_open_files_limit = self.config["Advanced"]["open files limit"]
logger.info(
"System reports open files limit is %d.", open_files_limit)
if new_open_files_limit != -1:
logger.info(
"Changing open files limit to %d.", new_open_files_limit)
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (
new_open_files_limit, resource.RLIM_INFINITY))
except resource.error as e:
logger.warning(
"Failed to increase the number of open files: %s", e)
def setup_database(self):
"""
Initialize database.
"""
self.db = Database(self.config["Provider"]["database"])
self.db.create_database(drop_all=False)
def setup_state(self):
"""
Setup state.
"""
self.state = State(os.path.join(
self.get_cache_dir(), "provider.state"))
def setup_cache(self):
"""
Setup the caches for items and artwork.
"""
# Initialize caches for items and artwork.
item_cache = cache.ItemCache(
path=self.get_cache_dir(
self.config["Provider"]["item cache dir"]),
max_size=self.config["Provider"]["item cache size"],
prune_threshold=self.config[
"Provider"]["item cache prune threshold"])
artwork_cache = cache.ArtworkCache(
path=self.get_cache_dir(self.config[
"Provider"]["artwork cache dir"]),
max_size=self.config["Provider"]["artwork cache size"],
prune_threshold=self.config[
"Provider"]["artwork cache prune threshold"])
# Create a cache manager
self.cache_manager = cache.CacheManager(
db=self.db,
item_cache=item_cache,
artwork_cache=artwork_cache,
connections=self.connections)
def setup_connections(self):
"""
Initialize the connections.
"""
for name, section in self.config["Connections"].iteritems():
index = len(self.connections) + 1
self.connections[index] = Connection(
db=self.db,
state=self.state,
index=index,
name=name,
url=section["url"],
username=section["username"],
password=section["password"],
synchronization=section["synchronization"],
synchronization_interval=section["synchronization interval"],
transcode=section["transcode"],
transcode_unsupported=section["transcode unsupported"])
def setup_provider(self):
"""
Setup the provider.
"""
# Create provider.
logger.debug(
"Setting up provider for %d connection(s).", len(self.connections))
self.provider = Provider(
server_name=self.config["Provider"]["name"],
db=self.db,
state=self.state,
connections=self.connections,
cache_manager=self.cache_manager)
# Do an initial synchronization if required.
for connection in self.connections.itervalues():
connection.synchronizer.provider = self.provider
connection.synchronizer.synchronize(initial=True)
def setup_server(self):
"""
Create the DAAP server.
"""
logger.debug(
"Setting up DAAP server at %s:%d",
self.config["Daap"]["interface"], self.config["Daap"]["port"])
self.server = DaapServer(
provider=self.provider,
password=self.config["Daap"]["password"],
ip=self.config["Daap"]["interface"],
port=self.config["Daap"]["port"],
cache=self.config["Daap"]["cache"],
cache_timeout=self.config["Daap"]["cache timeout"] * 60,
bonjour=self.config["Daap"]["zeroconf"],
debug=self.verbose > 1)
# Extend server with a web interface
if self.config["Daap"]["web interface"]:
webserver.extend_server_app(self, self.server.app)
def setup_tasks(self):
"""
Setup all tasks that run periodically.
"""
self.scheduler = GeventScheduler()
# Add an initial job
def _job():
job.remove()
self.synchronize(synchronization="startup")
job = self.scheduler.add_job(
_job, max_instances=1, trigger="interval", seconds=1)
# Scheduler task to clean and expire the cache.
self.scheduler.add_job(
self.cache_manager.expire,
max_instances=1, trigger="interval", minutes=5)
self.scheduler.add_job(
self.cache_manager.clean,
max_instances=1, trigger="interval", minutes=30)
# Schedule tasks to synchronize each connection.
for connection in self.connections.itervalues():
self.scheduler.add_job(
self.synchronize, args=([connection, "interval"]),
max_instances=1, trigger="interval",
minutes=connection.synchronization_interval)
def synchronize(self, connections=None, synchronization="manual"):
"""
Synchronize selected connections (or all) given a synchronization
event.
"""
count = 0
connections = connections or self.connections.values()
logger.debug("Synchronization triggered via '%s'.", synchronization)
for connection in connections:
if synchronization == "interval":
if connection.synchronization == "interval":
connection.synchronizer.synchronize()
count += 1
elif synchronization == "startup":
if connection.synchronization == "startup":
if not connection.is_initial_synced:
connection.synchronizer.synchronize()
count += 1
elif synchronization == "manual":
connection.synchronizer.synchronize()
count += 1
logger.debug("Synchronized %d connections.", count)
# Update the cache.
self.cache_manager.cache()
def start(self):
"""
Start the server.
"""
logger.debug("Starting task scheduler.")
self.scheduler.start()
logger.debug("Starting DAAP server.")
self.server.serve_forever()
def stop(self):
"""
Stop the server.
"""
logger.debug("Stopping DAAP server.")
self.server.stop()
logger.debug("Stopping task scheduler.")
self.scheduler.shutdown()
def get_cache_dir(self, *path):
"""
Resolve the path to a cache directory. The path is relative to the data
directory. The directory will be created if it does not exists, and
will be tested for writing.
"""
full_path = os.path.abspath(os.path.normpath(
os.path.join(self.data_dir, *path)))
logger.debug("Resolved %s to %s", path, full_path)
# Create path if required.
try:
os.makedirs(full_path, 0755)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(full_path):
pass
else:
raise Exception("Could not create folder: %s" % full_path)
# Test for writing.
ok = True
test_file = os.path.join(full_path, ".write-test")
while os.path.exists(test_file):
test_file = test_file + str(random.randint(0, 9))
try:
with open(test_file, "w") as fp:
fp.write("test")
except IOError:
ok = False
finally:
try:
os.remove(test_file)
except OSError:
ok = False
if not ok:
raise Exception("Could not write to cache folder: %s" % full_path)
# Cache directory created and tested for writing.
return full_path
|
{
"content_hash": "2f792d2b90b8d351da1844bccdd11227",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 79,
"avg_line_length": 31.41368078175896,
"alnum_prop": 0.5648071339693074,
"repo_name": "ties/SubDaap",
"id": "ac249661b4e36d5f3fe7a6818f145a0e19ab512e",
"size": "9644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "subdaap/application.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9188"
},
{
"name": "Python",
"bytes": "119924"
}
],
"symlink_target": ""
}
|
"""Pretty-print tabular data."""
from collections import namedtuple
from platform import python_version_tuple
import re
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = str
_binary_type = str
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = str
_binary_type = bytes
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.2"
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _latex_line_begin_tabular(colwidths, colaligns):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\\begin{tabular}{" + tabular_columns_fmt + "}\n\hline"
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=DataRow("", "&", "\\\\"),
datarow=DataRow("", "&", "\\\\"),
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_invisible_codes = re.compile("\x1b\[\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except ValueError:
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is int or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(s)
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:<%ds}" % iwidth
return fmt.format(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:^%ds}" % iwidth
return fmt.format(s)
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len(_strip_invisible(s))
else:
return len(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
return strings
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
maxwidth = max(max(list(map(width_fn, strings))), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4 }
invtypes = { 4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type }
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=""):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
return _text_type(val, "ascii")
elif valtype is float:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = list(tabular_data.keys())
rows = list(zip_longest(*list(tabular_data.values()))) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = list(tabular_data.keys())
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")): # namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif headers == "keys" and len(rows) > 0: # keys are column indices
headers = list(map(_text_type, list(range(len(rows[0])))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(headers)
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=[], tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, a two-dimensional NumPy array,
NumPy record array, or a Pandas' dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
and 'latex'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"""
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c,ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h)+2 for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
padded_headers = _pad_row(headers, pad)
padded_rows = [_pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
if padded_headers:
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
else:
for row in padded_rows:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
return "\n".join(lines)
|
{
"content_hash": "05d523bb7681787f0a76e9a82fb227a4",
"timestamp": "",
"source": "github",
"line_count": 847,
"max_line_length": 197,
"avg_line_length": 34.17355371900826,
"alnum_prop": 0.5345655553636207,
"repo_name": "vitchyr/rlkit",
"id": "33ecf0426b96a2796578f40a225ebc54f4db5a09",
"size": "28995",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "rlkit/core/tabulate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3338"
},
{
"name": "Python",
"bytes": "355210"
}
],
"symlink_target": ""
}
|
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.components.grating_coupler_elliptical import grating_coupler_elliptical
from gdsfactory.types import ComponentSpec
@gf.cell
def grating_coupler_array(
grating_coupler: ComponentSpec = grating_coupler_elliptical,
pitch: float = 127.0,
n: int = 6,
port_name: str = "o1",
rotation: int = 0,
) -> Component:
"""Array of rectangular pads.
Args:
grating_coupler: ComponentSpec.
pitch: x spacing.
n: number of pads.
port_name: port name.
rotation: rotation angle for each reference.
"""
c = Component()
grating_coupler = gf.get_component(grating_coupler)
for i in range(n):
gc = c << grating_coupler
gc.rotate(rotation)
gc.x = i * pitch
port_name_new = f"o{i}"
c.add_port(port=gc.ports[port_name], name=port_name_new)
return c
if __name__ == "__main__":
c = grating_coupler_array()
c.show(show_ports=True)
|
{
"content_hash": "7bd9e68119d36bd30a4f273e6d5bcc63",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 87,
"avg_line_length": 26.28205128205128,
"alnum_prop": 0.6390243902439025,
"repo_name": "gdsfactory/gdsfactory",
"id": "a1ac39f7ab6124fe9815983806857b4d4b42fc66",
"size": "1025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gdsfactory/components/grating_coupler_array.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "605"
},
{
"name": "Dockerfile",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "4572"
},
{
"name": "Python",
"bytes": "2471982"
},
{
"name": "Shell",
"bytes": "671"
},
{
"name": "XS",
"bytes": "10045"
}
],
"symlink_target": ""
}
|
""" Myproxy client. Demo on how to use myproxylib.py """
import os
import sys
from myproxylib import MyProxy, UserCredential
__version__ = '0.1'
class MyProxyClient(object):
def __init__(self, host, port=7512, certfile=None, keyfile=None, debug=False):
"""
host -- hostname of MyProxy server
port -- port on which sever listens
certfile - file (path) wherefrom to read (user) certificate.
If not set, it will be defaulted to $HOME/.globus/usercert.pem
keyfile - file (path) wherefrom to read (user) key. If not set
it will be defaulted to $HOME/.globus/userkey.pem
Notice, certfile and keyfile must be PEM encoded
"""
self.debug = debug
self._usercert = None
self._userkey = None
self._keyfile = keyfile # used by SSL Context
self._certfile = certfile
self._my_proxy = MyProxy(host, port)
def set_certfile(self, certfile):
""" setting certfile for communication with MyProxy server"""
self._certfile = certfile
def set_keyfile(self, keyfile):
""" setting keyfile for communication with MyProxy server"""
self._keyfile = keyfile
def set_key_size(self, bits):
""" Sets key size of generated proxy """
self._my_proxy.set_key_size(bits)
print "key size", self._my_proxy.get_key_size()
def set_proxy_type(self, px_type, px_policy):
""" Sest type of proxy to upload """
self._my_proxy.set_proxy_type(px_type)
self._my_proxy.set_proxy_policy(px_policy)
def myproxy_logon(self, username, passphrase, outfile):
"""
Function to retrieve a proxy credential from a MyProxy server
Exceptions: MyProxyError, MyProxyInputError, MyProxySSLError
"""
self._my_proxy.init_context(self._certfile, self._keyfile)
proxy_credential = self._my_proxy.get(username, passphrase)
if self.debug:
print 'Storing proxy in:', outfile
proxy_credential.store_proxy(outfile)
def myproxy_init(self, username, myproxy_passphrase,
keyfile, certfile, lifetime = None):
""" downsized myproxy_init
username -- name used on MyProxy server for storing credential
myproxy_passphrase -- passphrase for credentials on MyProxy server
keyfile -- user local key file
certfile -- user local certificate file
lifetime -- sets max lifetime allowed for retrieved proxy credentials (in secs).
If lifetime not set, we'll set it to a libraries default value
"""
key_passphrase = getpass.getpass(prompt = "Grid passphrase:")
user_credential = UserCredential(keyfile, certfile, key_passphrase)
self._my_proxy.init_context(self._certfile, self._keyfile, key_passphrase)
proxy_credential = self._my_proxy.put(user_credential, username, myproxy_passphrase, lifetime)
if __name__ == '__main__':
import optparse
import getpass
MIN_PASS_PHRASE = 7 # minimal length of myproxy passphrase
if os.environ.has_key('MYPROXY_SERVER'):
MYPROXY_SERVER = os.environ['MYPROXY_SERVER']
else:
MYPROXY_SERVER = 'apollo.switch.ch'
usage = "usage: %prog [options] get|put \n\nDo %prog -h for more help."
parser = optparse.OptionParser(usage = usage, version = "%prog " + __version__)
parser.add_option("-l", "--username", dest = "username", default = os.environ['USER'],
help="The username with which the credential is stored on the MyProxy server")
parser.add_option("-d", "--debug", action = 'store_true', default = False,
help = "Enhance verbosity for debugging purposes")
parser.add_option("", "--limited", action = 'store_true',
default = False,
help = "Creates a limited globus proxy (policy). (default=%default).")
parser.add_option("", "--old", action = 'store_const', const = 'old', dest = 'px_type',
help = "Creates a legacy globus proxy.")
parser.add_option("", "--rfc", action = 'store_const', const = 'rfc', dest = 'px_type',
default = 'rfc',
help = "Creates a RFC3820 compliant proxy (default)." )
parser.add_option("", "--bits", dest = "bits", default = 1024, type ='int',
help="Number of bits in key (512, 1024, 2048, default=%default) " + \
"of signing proxy. All other key sizes defined by myproxy server. ")
parser.add_option("", "--cert", dest = "certfile",
default = '%s/.globus/usercert.pem' % (os.environ['HOME']),
help = "Location of user certificate(default = %default).")
parser.add_option("", "--key", dest = "keyfile",
default = '%s/.globus/userkey.pem' % (os.environ['HOME']),
help = "Location of user certificate(default = %default).")
parser.add_option("", "--out", dest = "outfile",
default = '/tmp/x509up_u%s' % (os.getuid()),
help = "Filenname under which user proxy certificate gets stored (default = %default).")
(options, args) = parser.parse_args()
if not args:
parser.error("incorrect number of arguments")
if args[0] not in ['put', 'get']:
parser.error("wrong argument")
username = options.username
if options.debug:
print 'Invoked with following parameters:'
print 'options:', options
print 'arguments:', args
try:
mp = MyProxyClient(host = MYPROXY_SERVER, debug = options.debug)
if args[0] == 'get':
passphrase = getpass.getpass(prompt="MyProxy passphrase:")
mp.myproxy_logon(username, passphrase, options.outfile)
if options.outfile:
print "A proxy has been received for user %s in %s." % (username, options.outfile)
else:
print "A proxy has been received for user %s" % (username)
elif args[0] == 'put':
passphrase = getpass.getpass(prompt="MyProxy passphrase:")
if len(passphrase) < MIN_PASS_PHRASE:
print 'Error Passphrase must contain at least %d characters' % MIN_PASS_PHRASE
sys.exit(-1)
mp.set_certfile(options.certfile)
mp.set_keyfile(options.keyfile)
mp.set_key_size(options.bits)
policy = 'normal'
if options.limited:
policy = 'limited'
mp.set_proxy_type(options.px_type, policy)
mp.myproxy_init(username, passphrase, options.keyfile, options.certfile)
print 'Credential for delegation was succesfully up-loaded'
except Exception, e:
print "Error:", e
|
{
"content_hash": "cce1ea7dfee3781daa378b5ba98873d3",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 108,
"avg_line_length": 41.64670658682635,
"alnum_prop": 0.5897915168943206,
"repo_name": "placiflury/gridmonitor-sft",
"id": "85b56b95d1012ecaf278eddb179c635e9b7b0063",
"size": "6979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sft/utils/myproxyclient.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "160811"
}
],
"symlink_target": ""
}
|
from __future__ import division, with_statement # confidence high
import glob
import os
import sys
import numpy as np
from ..extern.six import BytesIO
import pyfits as fits
from ..verify import VerifyError
from . import PyfitsTestCase
from .util import ignore_warnings
from nose.tools import assert_raises
from warnings import catch_warnings
class TestHDUListFunctions(PyfitsTestCase):
@ignore_warnings
def test_update_name(self):
hdul = fits.open(self.data('o4sp040b0_raw.fits'))
hdul[4].update_ext_name('Jim', "added by Jim")
hdul[4].update_ext_version(9, "added by Jim")
assert hdul[('JIM', 9)].header['extname'] == 'JIM'
def test_hdu_file_bytes(self):
hdul = fits.open(self.data('checksum.fits'))
res = hdul[0].filebytes()
assert res == 11520
res = hdul[1].filebytes()
assert res == 8640
def test_hdulist_file_info(self):
hdul = fits.open(self.data('checksum.fits'))
res = hdul.fileinfo(0)
def test_fileinfo(**kwargs):
assert res['datSpan'] == kwargs.get('datSpan', 2880)
assert res['resized'] == kwargs.get('resized', False)
assert res['filename'] == self.data('checksum.fits')
assert res['datLoc'] == kwargs.get('datLoc', 8640)
assert res['hdrLoc'] == kwargs.get('hdrLoc', 0)
assert res['filemode'] == 'readonly'
res = hdul.fileinfo(1)
test_fileinfo(datLoc=17280, hdrLoc=11520)
hdu = fits.ImageHDU(data=hdul[0].data)
hdul.insert(1, hdu)
res = hdul.fileinfo(0)
test_fileinfo(resized=True)
res = hdul.fileinfo(1)
test_fileinfo(datSpan=None, resized=True, datLoc=None, hdrLoc=None)
res = hdul.fileinfo(2)
test_fileinfo(resized=1, datLoc=17280, hdrLoc=11520)
def test_create_from_multiple_primary(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/145
Ensure that a validation error occurs when saving an HDUList containing
multiple PrimaryHDUs.
"""
hdul = fits.HDUList([fits.PrimaryHDU(), fits.PrimaryHDU()])
assert_raises(VerifyError, hdul.writeto, self.temp('temp.fits'),
output_verify='exception')
def test_append_primary_to_empty_list(self):
# Tests appending a Simple PrimaryHDU to an empty HDUList.
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_empty_list(self):
"""Tests appending a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_table_extension_to_empty_list(self):
"""Tests appending a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
hdul1 = fits.open(self.data('tb.fits'))
hdul.append(hdul1[1])
info = [(0, 'PRIMARY', 'PrimaryHDU', 4, (), '', ''),
(1, '', 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_groupshdu_to_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.append(hdu)
info = [(0, 'PRIMARY', 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_primary_to_non_empty_list(self):
"""Tests appending a Simple PrimaryHDU to a non-empty HDUList."""
hdul = fits.open(self.data('arange.fits'))
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_non_empty_list(self):
"""Tests appending a Simple ExtensionHDU to a non-empty HDUList."""
hdul = fits.open(self.data('tb.fits'))
hdul.append(hdul[1])
info = [(0, 'PRIMARY', 'PrimaryHDU', 11, (), '', ''),
(1, '', 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_groupshdu_to_non_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
hdu = fits.GroupsHDU()
assert_raises(ValueError, hdul.append, hdu)
def test_insert_primary_to_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_empty_list(self):
"""Tests inserting a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_table_extension_to_empty_list(self):
"""Tests inserting a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
hdul1 = fits.open(self.data('tb.fits'))
hdul.insert(0, hdul1[1])
info = [(0, 'PRIMARY', 'PrimaryHDU', 4, (), '', ''),
(1, '', 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_primary_to_non_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to a non-empty HDUList."""
hdul = fits.open(self.data('arange.fits'))
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(1, hdu)
info = [(0, 'PRIMARY', 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_non_empty_list(self):
"""Tests inserting a Simple ExtensionHDU to a non-empty HDUList."""
hdul = fits.open(self.data('tb.fits'))
hdul.insert(1, hdul[1])
info = [(0, 'PRIMARY', 'PrimaryHDU', 11, (), '', ''),
(1, '', 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_non_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
hdu = fits.GroupsHDU()
assert_raises(ValueError, hdul.insert, 1, hdu)
info = [(0, 'PRIMARY', 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters'),
(1, '', 'ImageHDU', 6, (100,), 'int32', '')]
hdul.insert(0, hdu)
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_begin_of_hdulist_with_groupshdu(self):
"""
Tests inserting a Simple GroupsHDU to the beginning of an HDUList
that that already contains a GroupsHDU.
"""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
assert_raises(ValueError, hdul.insert, 0, hdu)
def test_insert_extension_to_primary_in_non_empty_list(self):
# Tests inserting a Simple ExtensionHDU to a non-empty HDUList.
hdul = fits.open(self.data('tb.fits'))
hdul.insert(0, hdul[1])
info = [(0, 'PRIMARY', 'PrimaryHDU', 4, (), '', ''),
(1, '', 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 'ImageHDU', 12, (), '', ''),
(3, '', 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_image_extension_to_primary_in_non_empty_list(self):
"""
Tests inserting a Simple Image ExtensionHDU to a non-empty HDUList
as the primary HDU.
"""
hdul = fits.open(self.data('tb.fits'))
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 'PrimaryHDU', 5, (100,), 'int32', ''),
(1, '', 'ImageHDU', 12, (), '', ''),
(2, '', 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_filename(self):
"""Tests the HDUList filename method."""
hdul = fits.open(self.data('tb.fits'))
name = hdul.filename()
assert name == self.data('tb.fits')
def test_file_like(self):
"""
Tests the use of a file like object with no tell or seek methods
in HDUList.writeto(), HDULIST.flush() or pyfits.writeto()
"""
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul = fits.HDUList()
hdul.append(hdu)
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul.writeto(tmpfile)
tmpfile.close()
info = [(0, 'PRIMARY', 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_2(self):
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul = fits.open(tmpfile, mode='ostream')
hdul.append(hdu)
hdul.flush()
tmpfile.close()
hdul.close()
info = [(0, 'PRIMARY', 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_3(self):
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
fits.writeto(tmpfile, np.arange(100, dtype=np.int32))
tmpfile.close()
info = [(0, 'PRIMARY', 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_new_hdu_extname(self):
"""
Tests that new extension HDUs that are added to an HDUList can be
properly indexed by their EXTNAME/EXTVER (regression test for
ticket:48).
"""
f = fits.open(self.data('test0.fits'))
hdul = fits.HDUList()
hdul.append(f[0].copy())
hdul.append(fits.ImageHDU(header=f[1].header))
assert hdul[1].header['EXTNAME'] == 'SCI'
assert hdul[1].header['EXTVER'] == 1
assert hdul.index_of(('SCI', 1)) == 1
def test_update_filelike(self):
"""Test opening a file-like object in update mode and resizing the
HDU.
"""
sf = BytesIO()
arr = np.zeros((100, 100))
hdu = fits.PrimaryHDU(data=arr)
hdu.writeto(sf)
sf.seek(0)
arr = np.zeros((200, 200))
hdul = fits.open(sf, mode='update')
hdul[0].data = arr
hdul.flush()
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_flush_readonly(self):
"""Test flushing changes to a file opened in a read only mode."""
oldmtime = os.stat(self.data('test0.fits')).st_mtime
hdul = fits.open(self.data('test0.fits'))
hdul[0].header['FOO'] = 'BAR'
with catch_warnings(record=True) as w:
hdul.flush()
assert len(w) == 1
assert 'mode is not supported' in str(w[0].message)
assert oldmtime == os.stat(self.data('test0.fits')).st_mtime
def test_fix_extend_keyword(self):
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
hdul.append(fits.ImageHDU())
del hdul[0].header['EXTEND']
hdul.verify('silentfix')
assert 'EXTEND' in hdul[0].header
assert hdul[0].header['EXTEND'] == True
def test_new_hdulist_extend_keyword(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/114
Tests that adding a PrimaryHDU to a new HDUList object updates the
EXTEND keyword on that HDU.
"""
h0 = fits.Header()
hdu = fits.PrimaryHDU(header=h0)
sci = fits.ImageHDU(data=np.array(10))
image = fits.HDUList([hdu, sci])
image.writeto(self.temp('temp.fits'))
assert 'EXTEND' in hdu.header
assert hdu.header['EXTEND'] == True
def test_replace_memmaped_array(self):
# Copy the original before we modify it
hdul = fits.open(self.data('test0.fits'))
hdul.writeto(self.temp('temp.fits'))
hdul = fits.open(self.temp('temp.fits'), mode='update', memmap=True)
old_data = hdul[1].data.copy()
hdul[1].data = hdul[1].data + 1
hdul.close()
hdul = fits.open(self.temp('temp.fits'), memmap=True)
assert ((old_data + 1) == hdul[1].data).all()
def test_open_file_with_end_padding(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/106
Open files with end padding bytes.
"""
hdul = fits.open(self.data('test0.fits'),
do_not_scale_image_data=True)
info = hdul.info(output=False)
hdul.writeto(self.temp('temp.fits'))
with open(self.temp('temp.fits'), 'ab') as f:
f.seek(0, os.SEEK_END)
f.write('\0'.encode('latin1') * 2880)
with ignore_warnings():
assert info == fits.info(self.temp('temp.fits'), output=False,
do_not_scale_image_data=True)
def test_open_file_with_bad_header_padding(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/136
Open files with nulls for header block padding instead of spaces.
"""
a = np.arange(100).reshape((10, 10))
hdu = fits.PrimaryHDU(data=a)
hdu.writeto(self.temp('temp.fits'))
# Figure out where the header padding begins and fill it with nulls
end_card_pos = str(hdu.header).index('END' + ' ' * 77)
padding_start = end_card_pos + 80
padding_len = 2880 - padding_start
with open(self.temp('temp.fits'), 'r+b') as f:
f.seek(padding_start)
f.write('\0'.encode('ascii') * padding_len)
with catch_warnings(record=True) as w:
with fits.open(self.temp('temp.fits')) as hdul:
assert ('contains null bytes instead of spaces' in
str(w[0].message))
assert len(w) == 1
assert len(hdul) == 1
assert str(hdul[0].header) == str(hdu.header)
assert (hdul[0].data == a).all()
def test_update_with_truncated_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148
Test that saving an update where the header is shorter than the
original header doesn't leave a stump from the old header in the file.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(hdu.header) < 34:
hdu.header['TEST%d' % idx] = idx
idx += 1
hdu.writeto(self.temp('temp.fits'), checksum=True)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
# Modify the header, forcing it to be rewritten
hdul[0].header['TEST1'] = 2
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data).all()
def test_update_resized_header(self):
"""
Test saving updates to a file where the header is one block smaller
than before, and in the case where the heade ris one block larger than
before.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(str(hdu.header)) <= 2880:
hdu.header['TEST%d' % idx] = idx
idx += 1
orig_header = hdu.header.copy()
hdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
while len(str(hdul[0].header)) > 2880:
del hdul[0].header[-1]
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header == orig_header[:-1]
assert (hdul[0].data == data).all()
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 101
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header['TEST%d' % idx] = idx
idx += 1
# Touch something in the data too so that it has to be rewritten
hdul[0].data[0] = 27
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header[:-37] == orig_header[:-1]
assert hdul[0].data[0] == 27
assert (hdul[0].data[1:] == data[1:]).all()
def test_update_resized_header2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/150
This is similar to test_update_resized_header, but specifically tests a
case of multiple consecutive flush() calls on the same HDUList object,
where each flush() requires a resize.
"""
data1 = np.arange(100)
data2 = np.arange(100) + 100
phdu = fits.PrimaryHDU(data=data1)
hdu = fits.ImageHDU(data=data2)
phdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='append') as hdul:
hdul.append(hdu)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 1
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header['TEST%d' % idx] = idx
idx += 1
hdul.flush()
hdul.append(hdu)
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data1).all()
assert hdul[1].header == hdu.header
assert (hdul[1].data == data2).all()
assert (hdul[2].data == data2).all()
def test_hdul_fromstring(self):
"""
Test creating the HDUList structure in memory from a string containing
an entire FITS file. This is similar to test_hdu_fromstring but for an
entire multi-extension FITS file at once.
"""
# Tests HDUList.fromstring for all of PyFITS' built in test files
def test_fromstring(filename):
with fits.open(self.data(filename)) as hdul:
orig_info = hdul.info(output=False)
with open(self.data(filename), 'rb') as f:
dat = f.read()
hdul2 = fits.HDUList.fromstring(dat)
assert orig_info == hdul2.info(output=False)
for idx in range(len(hdul)):
assert hdul[idx].header == hdul2[idx].header
with ignore_warnings():
if hdul[idx].data is None or hdul2[idx].data is None:
assert hdul[idx].data == hdul2[idx].data
elif (hdul[idx].data.dtype.fields and
hdul2[idx].data.dtype.fields):
# Compare tables
for n in hdul[idx].data.names:
c1 = hdul[idx].data[n]
c2 = hdul2[idx].data[n]
assert (c1 == c2).all()
elif (any(dim == 0 for dim in hdul[idx].data.shape) or
any(dim == 0 for dim in hdul2[idx].data.shape)):
# For some reason some combinations of Python and
# Numpy on Windows result in MemoryErrors when
# trying to work on memmap arrays with more than
# one dimension but some dimensions of size zero,
# so include a special case for that
return hdul[idx].data.shape == hdul2[idx].data.shape
else:
np.testing.assert_array_equal(hdul[idx].data,
hdul2[idx].data)
for filename in glob.glob(os.path.join(self.data_dir, '*.fits')):
if sys.platform == 'win32' and filename == 'zerowidth.fits':
# Running this test on this file causes a crash in some
# versions of Numpy on Windows. See PyFITS ticket
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/174
continue
test_fromstring(os.path.join(self.data_dir, filename))
# Test that creating an HDUList from something silly raises a TypeError
assert_raises(TypeError, fits.HDUList.fromstring, ['a', 'b', 'c'])
def test_save_backup(self):
"""Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/121
Save backup of file before flushing changes.
"""
self.copy_file('scale.fits')
with ignore_warnings():
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
# Make some changes to the original file to force its header
# and data to be rewritten
hdul[0].header['TEST'] = 'TEST'
hdul[0].data[0] = 0
assert os.path.exists(self.temp('scale.fits.bak'))
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul1:
with fits.open(self.temp('scale.fits.bak'),
do_not_scale_image_data=True) as hdul2:
assert hdul1[0].header == hdul2[0].header
assert (hdul1[0].data == hdul2[0].data).all()
with ignore_warnings():
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
# One more time to see if multiple backups are made
hdul[0].header['TEST2'] = 'TEST'
hdul[0].data[0] = 1
assert os.path.exists(self.temp('scale.fits.bak'))
assert os.path.exists(self.temp('scale.fits.bak.1'))
def test_replace_mmap_data(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
hdu_a = fits.PrimaryHDU(data=arr_a)
hdu_a.writeto(self.temp('test_a.fits'), clobber=True)
hdu_b = fits.PrimaryHDU(data=arr_b)
hdu_b.writeto(self.temp('test_b.fits'), clobber=True)
hdul_a = fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a)
hdul_b = fits.open(self.temp('test_b.fits'), memmap=mmap_b)
hdul_a[0].data = hdul_b[0].data
hdul_a.close()
hdul_b.close()
hdul_a = fits.open(self.temp('test_a.fits'))
assert np.all(hdul_a[0].data == arr_b)
with ignore_warnings():
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_replace_mmap_data_2(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work. Like test_replace_mmap_data but with
table data instead of image data.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
col_a = fits.Column(name='a', format='J', array=arr_a)
col_b = fits.Column(name='b', format='J', array=arr_b)
hdu_a = fits.BinTableHDU.from_columns([col_a])
hdu_a.writeto(self.temp('test_a.fits'), clobber=True)
hdu_b = fits.BinTableHDU.from_columns([col_b])
hdu_b.writeto(self.temp('test_b.fits'), clobber=True)
hdul_a = fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a)
hdul_b = fits.open(self.temp('test_b.fits'), memmap=mmap_b)
hdul_a[1].data = hdul_b[1].data
hdul_a.close()
hdul_b.close()
hdul_a = fits.open(self.temp('test_a.fits'))
assert 'b' in hdul_a[1].columns.names
assert 'a' not in hdul_a[1].columns.names
assert np.all(hdul_a[1].data['b'] == arr_b)
with ignore_warnings():
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_extname_in_hdulist(self):
"""
Tests to make sure that the 'in' operator works.
Regression test for https://github.com/astropy/astropy/issues/3060
"""
hdulist = fits.HDUList()
hdulist.append(fits.ImageHDU(name='a'))
assert 'a' in hdulist
assert 'A' in hdulist
assert ('a', 1) in hdulist
assert ('A', 1) in hdulist
assert 'b' not in hdulist
assert ('a', 2) not in hdulist
assert ('b', 1) not in hdulist
assert ('b', 2) not in hdulist
|
{
"content_hash": "cdc00e8b19cb9fd7c4bb505eff955def",
"timestamp": "",
"source": "github",
"line_count": 786,
"max_line_length": 80,
"avg_line_length": 36.48346055979644,
"alnum_prop": 0.5555865532152322,
"repo_name": "spacetelescope/PyFITS",
"id": "a4de1361788ec0119e04ac81e89268ec11d0a879",
"size": "28676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyfits/tests/test_hdulist.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "40680"
},
{
"name": "Python",
"bytes": "1234849"
}
],
"symlink_target": ""
}
|
from atpbar import atpbar
##__________________________________________________________________||
class ReaderComposite:
"""A composite of event readers"
This class is a composite in the composite pattern.
Examples of event readers are instances of `Summarizer` and this
class.
When `event()` is called, it calls `event()` of each reader in the
order in which the readers are added. If a reader returns `False`,
it won't call the remaining readers.
"""
def __init__(self, readers=None):
if readers is None:
readers = [ ]
self.readers = list(readers)
def __repr__(self):
return '{}({!r})'.format(
self.__class__.__name__,
self.readers
)
def add(self, reader):
self.readers.append(reader)
def begin(self, event):
for reader in self.readers:
if not hasattr(reader, 'begin'):
continue
reader.begin(event)
def event(self, event):
for reader in self.readers:
if reader.event(event) is False:
break
def end(self):
for reader in self.readers:
if not hasattr(reader, 'end'):
continue
reader.end()
def merge(self, other):
for r, o in zip(self.readers, other.readers):
if not hasattr(r, 'merge'):
continue
r.merge(o)
def collect(self):
ret = [ ]
for reader in atpbar(self.readers, name='collecting results'):
if not hasattr(reader, 'collect'):
ret.append(None)
continue
ret.append(reader.collect())
return ret
##__________________________________________________________________||
|
{
"content_hash": "3dce2ef0dac7c5a3e30421232493cc18",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 70,
"avg_line_length": 27.446153846153845,
"alnum_prop": 0.5022421524663677,
"repo_name": "alphatwirl/alphatwirl",
"id": "9727d1d97213b9157dc27d98822d2cb5c1d224f6",
"size": "1821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alphatwirl/loop/ReaderComposite.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3603"
},
{
"name": "Python",
"bytes": "775977"
},
{
"name": "R",
"bytes": "1222"
},
{
"name": "Shell",
"bytes": "28"
}
],
"symlink_target": ""
}
|
"""Generate the quadrature abscissas and weights in Fejer quadrature."""
import numpy
import chaospy
from .hypercube import hypercube_quadrature
from .clenshaw_curtis import clenshaw_curtis_simple
def fejer_2(order, domain=(0, 1), growth=False, segments=1):
"""
Generate the quadrature abscissas and weights in Fejér type II quadrature.
Fejér proposed two quadrature rules very similar to
:func:`chaospy.quadrature.clenshaw_curtis`. The only difference is that the
endpoints are removed. That is, Fejér only used the interior extrema of the
Chebyshev polynomials, i.e. the true stationary points. This makes this a
better method for performing quadrature on infinite intervals, as the
evaluation does not contain endpoint values.
Args:
order (int, numpy.ndarray):
Quadrature order.
domain (chaospy.Distribution, numpy.ndarray):
Either distribution or bounding of interval to integrate over.
growth (bool):
If True sets the growth rule for the quadrature rule to only
include orders that enhances nested samples.
segments (int):
Split intervals into N subintervals and create a patched
quadrature based on the segmented quadrature. Can not be lower than
`order`. If 0 is provided, default to square root of `order`.
Nested samples only exist when the number of segments are fixed.
Returns:
abscissas (numpy.ndarray):
The quadrature points for where to evaluate the model function
with ``abscissas.shape == (len(dist), N)`` where ``N`` is the
number of samples.
weights (numpy.ndarray):
The quadrature weights with ``weights.shape == (N,)``.
Notes:
Implemented as proposed by Waldvogel :cite:`waldvogel_fast_2006`.
Example:
>>> abscissas, weights = chaospy.quadrature.fejer_2(3, (0, 1))
>>> abscissas.round(4)
array([[0.0955, 0.3455, 0.6545, 0.9045]])
>>> weights.round(4)
array([0.1804, 0.2996, 0.2996, 0.1804])
See also:
:func:`chaospy.quadrature.gaussian`
:func:`chaospy.quadrature.clenshaw_curtis`
:func:`chaospy.quadrature.fejer_1`
"""
order = numpy.asarray(order)
order = numpy.where(growth, numpy.where(order > 0, 2 ** (order + 1) - 2, 0), order)
return hypercube_quadrature(
quad_func=fejer_2_simple,
order=order,
domain=domain,
segments=segments,
)
def fejer_2_simple(order):
"""
Backend for Fejer type II quadrature.
Same as Clenshaw-Curtis, but with the end nodes removed.
"""
abscissas, weights = clenshaw_curtis_simple(order + 2)
return abscissas[1:-1], weights[1:-1]
|
{
"content_hash": "411770ecd6703aac5ac6ec6869c7fcb0",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 87,
"avg_line_length": 37.12,
"alnum_prop": 0.6537356321839081,
"repo_name": "jonathf/chaospy",
"id": "2be56d22965ac99e2d86185b65e006f30d6a334e",
"size": "2811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chaospy/quadrature/fejer_2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "959784"
}
],
"symlink_target": ""
}
|
matrix_input = input("Matice (čísla odděluj mezerou, řádky čárkou):").split(",")
permutation = list( map( int, input("Permutace (čísla odděluj mezerou):").split(" ") ) )
# Pouze zpracuje zadanou matici tak, aby s ní mohl python pracovat
matrix = []
for m in matrix_input:
matrix.append( list(map(int, m.split(" ") ) ) )
def swap( perm, matrix ):
"""
Prohodí řádky matice podle zadané permutace.
Parametry:
----------
perm: list
Pole délky N obsahující čísla od 0 do N. Určuje permutaci,
se kterou se prohodí řádky matice.
matrix: list
Pole, ve kterém se prohodí řádky.
"""
if len(perm) != len(matrix):
return None
# Vytvoří pole stejné velikosti jako předaná matice
result = [0] * len(matrix)
# Projde matici
for i in range( len( matrix ) ):
# Pokud v poli permutací není aktuální index,
# nevíme kam řádek umístit
if not i in perm:
return None
# Prohodí řádek na správné místo
result[ perm.index(i) ] = matrix[i]
return result
print( swap( permutation, matrix ) )
|
{
"content_hash": "10bc6e122c39326ddad37484fa550b21",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 88,
"avg_line_length": 24.78723404255319,
"alnum_prop": 0.5896995708154507,
"repo_name": "malja/cvut-python",
"id": "dbec6bd5a4f5406d8b4c803512b79e13e23acdeb",
"size": "1523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cviceni05/05_prohozeni_radku_matice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "138285"
}
],
"symlink_target": ""
}
|
import time
import unittest
from datetime import datetime as dt
from unittest import mock
from unittest.mock import ANY, call
from watchtower import CloudWatchLogHandler
from airflow.models import DAG, DagRun, TaskInstance
from airflow.operators.empty import EmptyOperator
from airflow.providers.amazon.aws.hooks.logs import AwsLogsHook
from airflow.providers.amazon.aws.log.cloudwatch_task_handler import CloudwatchTaskHandler
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from tests.test_utils.config import conf_vars
try:
import boto3
import moto
from moto import mock_logs
except ImportError:
mock_logs = None
def get_time_str(time_in_milliseconds):
dt_time = dt.utcfromtimestamp(time_in_milliseconds / 1000.0)
return dt_time.strftime("%Y-%m-%d %H:%M:%S,000")
@unittest.skipIf(mock_logs is None, "Skipping test because moto.mock_logs is not available")
@mock_logs
class TestCloudwatchTaskHandler(unittest.TestCase):
@conf_vars({('logging', 'remote_log_conn_id'): 'aws_default'})
def setUp(self):
self.remote_log_group = 'log_group_name'
self.region_name = 'us-west-2'
self.local_log_location = 'local/log/location'
self.filename_template = '{dag_id}/{task_id}/{execution_date}/{try_number}.log'
self.cloudwatch_task_handler = CloudwatchTaskHandler(
self.local_log_location,
f"arn:aws:logs:{self.region_name}:11111111:log-group:{self.remote_log_group}",
self.filename_template,
)
self.cloudwatch_task_handler.hook
date = datetime(2020, 1, 1)
dag_id = 'dag_for_testing_cloudwatch_task_handler'
task_id = 'task_for_testing_cloudwatch_log_handler'
self.dag = DAG(dag_id=dag_id, start_date=date)
task = EmptyOperator(task_id=task_id, dag=self.dag)
dag_run = DagRun(dag_id=self.dag.dag_id, execution_date=date, run_id="test")
self.ti = TaskInstance(task=task)
self.ti.dag_run = dag_run
self.ti.try_number = 1
self.ti.state = State.RUNNING
self.remote_log_stream = f'{dag_id}/{task_id}/{date.isoformat()}/{self.ti.try_number}.log'.replace(
':', '_'
)
moto.moto_api._internal.models.moto_api_backend.reset()
self.conn = boto3.client('logs', region_name=self.region_name)
def tearDown(self):
self.cloudwatch_task_handler.handler = None
def test_hook(self):
assert isinstance(self.cloudwatch_task_handler.hook, AwsLogsHook)
@conf_vars({('logging', 'remote_log_conn_id'): 'aws_default'})
def test_hook_raises(self):
handler = CloudwatchTaskHandler(
self.local_log_location,
f"arn:aws:logs:{self.region_name}:11111111:log-group:{self.remote_log_group}",
self.filename_template,
)
with mock.patch.object(handler.log, 'error') as mock_error:
with mock.patch("airflow.providers.amazon.aws.hooks.logs.AwsLogsHook") as mock_hook:
mock_hook.side_effect = Exception('Failed to connect')
# Initialize the hook
handler.hook
mock_error.assert_called_once_with(
'Could not create an AwsLogsHook with connection id "%s". Please make '
'sure that apache-airflow[aws] is installed and the Cloudwatch '
'logs connection exists. Exception: "%s"',
'aws_default',
ANY,
)
def test_handler(self):
self.cloudwatch_task_handler.set_context(self.ti)
assert isinstance(self.cloudwatch_task_handler.handler, CloudWatchLogHandler)
def test_write(self):
handler = self.cloudwatch_task_handler
handler.set_context(self.ti)
messages = [str(i) for i in range(10)]
with mock.patch("watchtower.CloudWatchLogHandler.emit") as mock_emit:
for message in messages:
handler.handle(message)
mock_emit.assert_has_calls([call(message) for message in messages])
def test_event_to_str(self):
handler = self.cloudwatch_task_handler
current_time = int(time.time()) * 1000
events = [
{'timestamp': current_time - 2000, 'message': 'First'},
{'timestamp': current_time - 1000, 'message': 'Second'},
{'timestamp': current_time, 'message': 'Third'},
]
assert [handler._event_to_str(event) for event in events] == (
[
f'[{get_time_str(current_time-2000)}] First',
f'[{get_time_str(current_time-1000)}] Second',
f'[{get_time_str(current_time)}] Third',
]
)
def test_read(self):
# Confirmed via AWS Support call:
# CloudWatch events must be ordered chronologically otherwise
# boto3 put_log_event API throws InvalidParameterException
# (moto does not throw this exception)
current_time = int(time.time()) * 1000
generate_log_events(
self.conn,
self.remote_log_group,
self.remote_log_stream,
[
{'timestamp': current_time - 2000, 'message': 'First'},
{'timestamp': current_time - 1000, 'message': 'Second'},
{'timestamp': current_time, 'message': 'Third'},
],
)
msg_template = '*** Reading remote log from Cloudwatch log_group: {} log_stream: {}.\n{}\n'
events = '\n'.join(
[
f'[{get_time_str(current_time-2000)}] First',
f'[{get_time_str(current_time-1000)}] Second',
f'[{get_time_str(current_time)}] Third',
]
)
assert self.cloudwatch_task_handler.read(self.ti) == (
[[('', msg_template.format(self.remote_log_group, self.remote_log_stream, events))]],
[{'end_of_log': True}],
)
def test_read_wrong_log_stream(self):
generate_log_events(
self.conn,
self.remote_log_group,
'alternate_log_stream',
[
{'timestamp': 10000, 'message': 'First'},
{'timestamp': 20000, 'message': 'Second'},
{'timestamp': 30000, 'message': 'Third'},
],
)
msg_template = '*** Reading remote log from Cloudwatch log_group: {} log_stream: {}.\n{}\n'
error_msg = (
'Could not read remote logs from log_group: '
f'{self.remote_log_group} log_stream: {self.remote_log_stream}.'
)
assert self.cloudwatch_task_handler.read(self.ti) == (
[[('', msg_template.format(self.remote_log_group, self.remote_log_stream, error_msg))]],
[{'end_of_log': True}],
)
def test_read_wrong_log_group(self):
generate_log_events(
self.conn,
'alternate_log_group',
self.remote_log_stream,
[
{'timestamp': 10000, 'message': 'First'},
{'timestamp': 20000, 'message': 'Second'},
{'timestamp': 30000, 'message': 'Third'},
],
)
msg_template = '*** Reading remote log from Cloudwatch log_group: {} log_stream: {}.\n{}\n'
error_msg = (
f'Could not read remote logs from log_group: '
f'{self.remote_log_group} log_stream: {self.remote_log_stream}.'
)
assert self.cloudwatch_task_handler.read(self.ti) == (
[[('', msg_template.format(self.remote_log_group, self.remote_log_stream, error_msg))]],
[{'end_of_log': True}],
)
def test_close_prevents_duplicate_calls(self):
with mock.patch("watchtower.CloudWatchLogHandler.close") as mock_log_handler_close:
with mock.patch("airflow.utils.log.file_task_handler.FileTaskHandler.set_context"):
self.cloudwatch_task_handler.set_context(self.ti)
for _ in range(5):
self.cloudwatch_task_handler.close()
mock_log_handler_close.assert_called_once()
def generate_log_events(conn, log_group_name, log_stream_name, log_events):
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name)
conn.put_log_events(logGroupName=log_group_name, logStreamName=log_stream_name, logEvents=log_events)
|
{
"content_hash": "6b7415b3fbb2b93786ce9c92774d204e",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 107,
"avg_line_length": 40.38755980861244,
"alnum_prop": 0.5969671839829405,
"repo_name": "lyft/incubator-airflow",
"id": "dbd2ae28d5ad77000b276dfa6ca475372ebd161e",
"size": "9228",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/providers/amazon/aws/log/test_cloudwatch_task_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "161328"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jinja",
"bytes": "8565"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10019710"
},
{
"name": "Shell",
"bytes": "220780"
}
],
"symlink_target": ""
}
|
"""Common settings and globals."""
from os.path import abspath, basename, dirname, join, normpath
from sys import path
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', 'your_email@example.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = r"#63pmtn&bd$ly7bz4x93jn@q^2a7cxze7n#rq7uf2fooqzq_yt"
########## END SECRET CONFIGURATION
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
########## END SITE CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
# 'django.contrib.admindocs',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'south',
'pbl',
'fluent_comments',
'threadedcomments',
'crispy_forms',
'django.contrib.comments',
'ckeditor',
)
FLUENT_COMMENTS_EXCLUDE_FIELDS = ('title','email', 'url')
COMMENTS_APP = 'fluent_comments'
CKEDITOR_UPLOAD_PATH = "uploads/"
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS
########## END APP CONFIGURATION
CRISPY_TEMPLATE_PACK = 'bootstrap3'
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '%s.wsgi.application' % SITE_NAME
########## END WSGI CONFIGURATION
########## SOUTH CONFIGURATION
# See: http://south.readthedocs.org/en/latest/installation.html#configuring-your-django-installation
INSTALLED_APPS += (
# Database migration helpers:
'south',
)
# Don't need to use South when setting up a test database.
SOUTH_TESTS_MIGRATE = False
########## END SOUTH CONFIGURATION
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
|
{
"content_hash": "eff18729b9cab61b1d55378019aad6eb",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 100,
"avg_line_length": 29.548042704626333,
"alnum_prop": 0.6864988558352403,
"repo_name": "hamole/pbl8",
"id": "9bb159752a58226ab0484dd4628fe335b6e5da04",
"size": "8303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pbl8_project/pbl8_project/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "32871"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
}
|
from mock import patch
from pip.vcs.git import Git
from tests.test_pip import (reset_env, run_pip,
_create_test_package)
def test_get_tag_revs_should_return_tag_name_and_commit_pair():
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'tag', '0.1', cwd=version_pkg_path)
env.run('git', 'tag', '0.2', cwd=version_pkg_path)
commit = env.run('git', 'rev-parse', 'HEAD',
cwd=version_pkg_path).stdout.strip()
git = Git()
result = git.get_tag_revs(version_pkg_path)
assert result == {'0.1': commit, '0.2': commit}, result
def test_get_branch_revs_should_return_branch_name_and_commit_pair():
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'branch', 'branch0.1', cwd=version_pkg_path)
commit = env.run('git', 'rev-parse', 'HEAD',
cwd=version_pkg_path).stdout.strip()
git = Git()
result = git.get_branch_revs(version_pkg_path)
assert result == {'master': commit, 'branch0.1': commit}
def test_get_branch_revs_should_ignore_no_branch():
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'branch', 'branch0.1', cwd=version_pkg_path)
commit = env.run('git', 'rev-parse', 'HEAD',
cwd=version_pkg_path).stdout.strip()
# current branch here is "* (nobranch)"
env.run('git', 'checkout', commit,
cwd=version_pkg_path, expect_stderr=True)
git = Git()
result = git.get_branch_revs(version_pkg_path)
assert result == {'master': commit, 'branch0.1': commit}
@patch('pip.vcs.git.Git.get_tag_revs')
@patch('pip.vcs.git.Git.get_branch_revs')
def test_check_rev_options_should_handle_branch_name(branches_revs_mock,
tags_revs_mock):
branches_revs_mock.return_value = {'master': '123456'}
tags_revs_mock.return_value = {'0.1': '123456'}
git = Git()
result = git.check_rev_options('master', '.', [])
assert result == ['123456']
@patch('pip.vcs.git.Git.get_tag_revs')
@patch('pip.vcs.git.Git.get_branch_revs')
def test_check_rev_options_should_handle_tag_name(branches_revs_mock,
tags_revs_mock):
branches_revs_mock.return_value = {'master': '123456'}
tags_revs_mock.return_value = {'0.1': '123456'}
git = Git()
result = git.check_rev_options('0.1', '.', [])
assert result == ['123456']
@patch('pip.vcs.git.Git.get_tag_revs')
@patch('pip.vcs.git.Git.get_branch_revs')
def test_check_rev_options_should_handle_ambiguous_commit(branches_revs_mock,
tags_revs_mock):
branches_revs_mock.return_value = {'master': '123456'}
tags_revs_mock.return_value = {'0.1': '123456'}
git = Git()
result = git.check_rev_options('0.1', '.', [])
assert result == ['123456'], result
|
{
"content_hash": "ee516e63e6384e15cb1dbfd1b24e565f",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 77,
"avg_line_length": 38.38961038961039,
"alnum_prop": 0.5937077131258457,
"repo_name": "jbenet/heroku-buildpack-python",
"id": "0b3abab2e1331de3de7e2d51ade45042e2b6d794",
"size": "2956",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "vendor/pip-1.2.1/tests/test_vcs_git.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "1393"
},
{
"name": "Python",
"bytes": "20214"
},
{
"name": "Ruby",
"bytes": "0"
},
{
"name": "Shell",
"bytes": "8700"
}
],
"symlink_target": ""
}
|
"""
Find the kth smallest element in linear time using divide and conquer.
Recall we can do this trivially in O(nlogn) time. Sort the list and
access kth element in constant time.
This is a divide and conquer algorithm that can find a solution in O(n) time.
For more information of this algorithm:
https://web.stanford.edu/class/archive/cs/cs161/cs161.1138/lectures/08/Small08.pdf
"""
from __future__ import annotations
from random import choice
def random_pivot(lst):
"""
Choose a random pivot for the list.
We can use a more sophisticated algorithm here, such as the median-of-medians
algorithm.
"""
return choice(lst)
def kth_number(lst: list[int], k: int) -> int:
"""
Return the kth smallest number in lst.
>>> kth_number([2, 1, 3, 4, 5], 3)
3
>>> kth_number([2, 1, 3, 4, 5], 1)
1
>>> kth_number([2, 1, 3, 4, 5], 5)
5
>>> kth_number([3, 2, 5, 6, 7, 8], 2)
3
>>> kth_number([25, 21, 98, 100, 76, 22, 43, 60, 89, 87], 4)
43
"""
# pick a pivot and separate into list based on pivot.
pivot = random_pivot(lst)
# partition based on pivot
# linear time
small = [e for e in lst if e < pivot]
big = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(small) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(small) < k - 1:
return kth_number(big, k - len(small) - 1)
# pivot is in elements smaller than k
else:
return kth_number(small, k)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
{
"content_hash": "d4d0419a22d7112dd1f5d2d38ae1d506",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 82,
"avg_line_length": 27,
"alnum_prop": 0.6142450142450142,
"repo_name": "TheAlgorithms/Python",
"id": "666ad1a39b8a1290529987fe7555ff437743c7ba",
"size": "1755",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "divide_and_conquer/kth_order_statistic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2601694"
}
],
"symlink_target": ""
}
|
import sys
import traceback
from optparse import make_option
from django_extensions.compat import importlib, list_apps
from django_extensions.management.email_notifications import \
EmailNotificationCommand
from django_extensions.management.utils import signalcommand
def vararg_callback(option, opt_str, opt_value, parser):
parser.rargs.insert(0, opt_value)
value = []
for arg in parser.rargs:
# stop on --foo like options
if arg[:2] == "--" and len(arg) > 2:
break
# stop on -a like options
if arg[:1] == "-":
break
value.append(arg)
del parser.rargs[:len(value)]
setattr(parser.values, option.dest, value)
class Command(EmailNotificationCommand):
option_list = EmailNotificationCommand.option_list + (
make_option('--fixtures', action='store_true', dest='infixtures', default=False,
help='Only look in app.fixtures subdir'),
make_option('--noscripts', action='store_true', dest='noscripts', default=False,
help='Look in app.scripts subdir'),
make_option('-s', '--silent', action='store_true', dest='silent', default=False,
help='Run silently, do not show errors and tracebacks'),
make_option('--no-traceback', action='store_true', dest='no_traceback', default=False,
help='Do not show tracebacks'),
make_option('--script-args', action='callback', callback=vararg_callback, type='string',
help='Space-separated argument list to be passed to the scripts. Note that the '
'same arguments will be passed to all named scripts.'),
)
help = 'Runs a script in django context.'
args = "script [script ...]"
@signalcommand
def handle(self, *scripts, **options):
NOTICE = self.style.SQL_TABLE
NOTICE2 = self.style.SQL_FIELD
ERROR = self.style.ERROR
ERROR2 = self.style.NOTICE
subdirs = []
if not options.get('noscripts'):
subdirs.append('scripts')
if options.get('infixtures'):
subdirs.append('fixtures')
verbosity = int(options.get('verbosity', 1))
show_traceback = options.get('traceback', True)
if show_traceback is None:
# XXX: traceback is set to None from Django ?
show_traceback = True
no_traceback = options.get('no_traceback', False)
if no_traceback:
show_traceback = False
silent = options.get('silent', False)
if silent:
verbosity = 0
email_notifications = options.get('email_notifications', False)
if len(subdirs) < 1:
print(NOTICE("No subdirs to run left."))
return
if len(scripts) < 1:
print(ERROR("Script name required."))
return
def run_script(mod, *script_args):
try:
mod.run(*script_args)
if email_notifications:
self.send_email_notification(notification_id=mod.__name__)
except Exception:
if silent:
return
if verbosity > 0:
print(ERROR("Exception while running run() in '%s'" % mod.__name__))
if email_notifications:
self.send_email_notification(
notification_id=mod.__name__, include_traceback=True)
if show_traceback:
raise
def my_import(mod):
if verbosity > 1:
print(NOTICE("Check for %s" % mod))
# check if module exists before importing
try:
importlib.import_module(mod)
t = __import__(mod, [], [], [" "])
except (ImportError, AttributeError) as e:
if str(e).startswith('No module named'):
try:
exc_type, exc_value, exc_traceback = sys.exc_info()
try:
if exc_traceback.tb_next.tb_next is None:
return False
except AttributeError:
pass
finally:
exc_traceback = None
if verbosity > 1:
if verbosity > 2:
traceback.print_exc()
print(ERROR("Cannot import module '%s': %s." % (mod, e)))
return False
if hasattr(t, "run"):
if verbosity > 1:
print(NOTICE2("Found script '%s' ..." % mod))
return t
else:
if verbosity > 1:
print(ERROR2("Find script '%s' but no run() function found." % mod))
def find_modules_for_script(script):
""" find script module which contains 'run' attribute """
modules = []
# first look in apps
for app in list_apps():
for subdir in subdirs:
mod = my_import("%s.%s.%s" % (app, subdir, script))
if mod:
modules.append(mod)
# try app.DIR.script import
sa = script.split(".")
for subdir in subdirs:
nn = ".".join(sa[:-1] + [subdir, sa[-1]])
mod = my_import(nn)
if mod:
modules.append(mod)
# try direct import
if script.find(".") != -1:
mod = my_import(script)
if mod:
modules.append(mod)
return modules
if options.get('script_args'):
script_args = options['script_args']
else:
script_args = []
for script in scripts:
modules = find_modules_for_script(script)
if not modules:
if verbosity > 0 and not silent:
print(ERROR("No (valid) module for script '%s' found" % script))
if verbosity < 2:
print(ERROR("Try running with a higher verbosity level like: -v2 or -v3"))
for mod in modules:
if verbosity > 1:
print(NOTICE2("Running script '%s' ..." % mod.__name__))
run_script(mod, *script_args)
|
{
"content_hash": "a9dffeef849a23a17b20d3594528939b",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 100,
"avg_line_length": 38.232142857142854,
"alnum_prop": 0.5084851315584618,
"repo_name": "jpadilla/django-extensions",
"id": "b094504fefb6d82051c7aec08a6cd9798667bdef",
"size": "6438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_extensions/management/commands/runscript.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "741"
},
{
"name": "HTML",
"bytes": "2168"
},
{
"name": "JavaScript",
"bytes": "39590"
},
{
"name": "Python",
"bytes": "468181"
}
],
"symlink_target": ""
}
|
from superset.db_engine_specs.firebolt import FireboltEngineSpec
from tests.integration_tests.db_engine_specs.base_tests import TestDbEngineSpec
class TestFireboltDbEngineSpec(TestDbEngineSpec):
def test_convert_dttm(self):
dttm = self.get_dttm()
test_cases = {
"DATE": "CAST('2019-01-02' AS DATE)",
"DATETIME": "CAST('2019-01-02T03:04:05' AS DATETIME)",
"TIMESTAMP": "CAST('2019-01-02T03:04:05' AS TIMESTAMP)",
"UNKNOWNTYPE": None,
}
for target_type, expected in test_cases.items():
actual = FireboltEngineSpec.convert_dttm(target_type, dttm)
self.assertEqual(actual, expected)
def test_epoch_to_dttm(self):
assert (
FireboltEngineSpec.epoch_to_dttm().format(col="timestamp_column")
== "from_unixtime(timestamp_column)"
)
|
{
"content_hash": "559cf7e38c92c84a8e19f174de596c26",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 38.21739130434783,
"alnum_prop": 0.6291240045506257,
"repo_name": "zhouyao1994/incubator-superset",
"id": "793b32970bddb85db0426d1f816774c2e75bb02f",
"size": "1664",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/integration_tests/db_engine_specs/firebolt_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4776"
},
{
"name": "Dockerfile",
"bytes": "6940"
},
{
"name": "HTML",
"bytes": "1243911"
},
{
"name": "JavaScript",
"bytes": "2445349"
},
{
"name": "Jinja",
"bytes": "5542"
},
{
"name": "Jupyter Notebook",
"bytes": "1925627"
},
{
"name": "Less",
"bytes": "106438"
},
{
"name": "Makefile",
"bytes": "3946"
},
{
"name": "Mako",
"bytes": "1197"
},
{
"name": "Pug",
"bytes": "2969"
},
{
"name": "Python",
"bytes": "6296253"
},
{
"name": "Shell",
"bytes": "56211"
},
{
"name": "Smarty",
"bytes": "4298"
},
{
"name": "TypeScript",
"bytes": "6909337"
}
],
"symlink_target": ""
}
|
from artefact.utils.consume_parser import ConsumeParser
import re
import json
class Line(ConsumeParser):
# This char is §, the 'Section' character
split_char = '§'
rules = [
(r'^(?P<cookie_id>-?\d*)' + split_char, False),
(r'^(?P<site_id>\d+)' + split_char, False),
(r'^(?P<day>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3})' + split_char, False),
(r'^(?P<event_time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3})' + split_char, False),
(r'^(?P<campagne_id>\d+)' + split_char, False),
(r'^(?P<insertion_id>\d+)' + split_char, False),
(r'^(?P<image_id>\d+)' + split_char, False),
(r'^(?P<insertion_type>\d+){}+;?'.format(split_char), False),
(r'^(?P<targeting>(.*,?)){}'.format(split_char), False),
(r'^(?P<page_id>\d+)?', False)
]
schema = ["cookie_id", "site_id", "day", "event_time", "campagne_id", "insertion_id", "image_id", "insertion_type", "targeting", "page_id"]
@classmethod
def preprocess(cls, line):
# Lines have different versions
# Old version doesn't have pageid or targeting columns
# In that specific case we count the number of split_char
# If the number is lower than the number of rules
# We add the missing split_chars at the end of the string before parsing it
return line+''.join([cls.split_char] * (len(cls.rules) - len(line.split(cls.split_char))))
@classmethod
def postprocess(self, payload):
# Transforming "targeting" from string to json array
# Splitting targeting from
# payload["targeting"] = "$x=y,$z=a,$b=c,,,,,"
# to
# payload["targeting"] = {"x":"y", "z":"a", "b":"c"}
if payload['targeting'] is not '':
# deleting all empty fields "xyz,,,,,"
payload['targeting'] = filter(None, payload["targeting"].split(','))[0]
# spliting by "=" to get key : value
payload['targeting'] = {a[0]: a[1] for a in map(self.split_targeting, payload['targeting'].split(';'))}
else:
payload['targeting'] = {}
payload['targeting'] = json.dumps(payload['targeting'])
return [payload]
@staticmethod
def split_targeting(string):
# delete $ from key name and return (key, value) duet
# import pdb; pdb.set_trace()
split_data = string.replace('$', '').split("=")
if len(split_data) <= 1:
split_data.append("")
return split_data
|
{
"content_hash": "02d1c2ec72f36b8a2d4ad2622789f5e6",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 143,
"avg_line_length": 41.50819672131148,
"alnum_prop": 0.5434439178515008,
"repo_name": "artefactory/artefact.connectors",
"id": "eac5dc480e96156a1d61819af48b783daf3fd2e5",
"size": "2556",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "artefact/connectors/smart/impressions/line.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "226"
},
{
"name": "Python",
"bytes": "174010"
},
{
"name": "Shell",
"bytes": "57"
}
],
"symlink_target": ""
}
|
from django.core import management
from django.core.management.validation import (
ModelErrorCollection, validate_model_signals
)
from django.db.models.signals import post_init
from django.test import TestCase
from django.utils import six
class OnPostInit(object):
def __call__(self, **kwargs):
pass
def on_post_init(**kwargs):
pass
class ModelValidationTest(TestCase):
def test_models_validate(self):
# All our models should validate properly
# Validation Tests:
# * choices= Iterable of Iterables
# See: https://code.djangoproject.com/ticket/20430
# * related_name='+' doesn't clash with another '+'
# See: https://code.djangoproject.com/ticket/21375
management.call_command("validate", stdout=six.StringIO())
def test_model_signal(self):
unresolved_references = post_init.unresolved_references.copy()
post_init.connect(on_post_init, sender='missing-app.Model')
post_init.connect(OnPostInit(), sender='missing-app.Model')
e = ModelErrorCollection(six.StringIO())
validate_model_signals(e)
self.assertSetEqual(
set(e.errors),
{(
'model_validation.tests',
"The `on_post_init` function was connected to the `post_init` "
"signal with a lazy reference to the 'missing-app.Model' "
"sender, which has not been installed."
), (
'model_validation.tests',
"An instance of the `OnPostInit` class was connected to "
"the `post_init` signal with a lazy reference to the "
"'missing-app.Model' sender, which has not been installed."
)}
)
post_init.unresolved_references = unresolved_references
|
{
"content_hash": "cb50f8501e385be290f3814763d3a7a5",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 79,
"avg_line_length": 37.36734693877551,
"alnum_prop": 0.6220644456581104,
"repo_name": "Beeblio/django",
"id": "225656e4cbb55a04c999ab2d4d9a24b7f1e196e4",
"size": "1831",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/model_validation/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42830"
},
{
"name": "HTML",
"bytes": "173915"
},
{
"name": "JavaScript",
"bytes": "102290"
},
{
"name": "Makefile",
"bytes": "239"
},
{
"name": "Python",
"bytes": "9172420"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
flags = tf.flags
FLAGS = tf.app.flags.FLAGS
LSTMTuple = collections.namedtuple('LSTMTuple', ['c', 'h'])
def cell_depth(num):
num /= 2
val = np.log2(1 + num)
assert abs(val - int(val)) == 0
return int(val)
class GenericMultiRNNCell(tf.contrib.rnn.RNNCell):
"""More generic version of MultiRNNCell that allows you to pass in a dropout mask"""
def __init__(self, cells):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. If False, the states are all
concatenated along the column axis. This latter behavior will soon be
deprecated.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
self._cells = cells
@property
def state_size(self):
return tuple(cell.state_size for cell in self._cells)
@property
def output_size(self):
return self._cells[-1].output_size
def __call__(self, inputs, state, input_masks=None, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
with tf.variable_scope(scope or type(self).__name__):
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with tf.variable_scope('Cell%d' % i):
cur_state = state[i]
if input_masks is not None:
cur_inp *= input_masks[i]
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = tuple(new_states)
return cur_inp, new_states
class AlienRNNBuilder(tf.contrib.rnn.RNNCell):
def __init__(self, num_units, params, additional_params, base_size):
self.num_units = num_units
self.cell_create_index = additional_params[0]
self.cell_inject_index = additional_params[1]
self.base_size = base_size
self.cell_params = params[
-2:] # Cell injection parameters are always the last two
params = params[:-2]
self.depth = cell_depth(len(params))
self.params = params
self.units_per_layer = [2**i for i in range(self.depth)
][::-1] # start with the biggest layer
def __call__(self, inputs, state, scope=None):
with tf.variable_scope(scope or type(self).__name__):
definition1 = ['add', 'elem_mult', 'max']
definition2 = [tf.identity, tf.tanh, tf.sigmoid, tf.nn.relu, tf.sin]
layer_outputs = [[] for _ in range(self.depth)]
with tf.variable_scope('rnn_builder'):
curr_index = 0
c, h = state
# Run all dense matrix multiplications at once
big_h_mat = tf.get_variable(
'big_h_mat', [self.num_units,
self.base_size * self.num_units], tf.float32)
big_inputs_mat = tf.get_variable(
'big_inputs_mat', [self.num_units,
self.base_size * self.num_units], tf.float32)
big_h_output = tf.matmul(h, big_h_mat)
big_inputs_output = tf.matmul(inputs, big_inputs_mat)
h_splits = tf.split(big_h_output, self.base_size, axis=1)
inputs_splits = tf.split(big_inputs_output, self.base_size, axis=1)
for layer_num, units in enumerate(self.units_per_layer):
for unit_num in range(units):
with tf.variable_scope(
'layer_{}_unit_{}'.format(layer_num, unit_num)):
if layer_num == 0:
prev1_mat = h_splits[unit_num]
prev2_mat = inputs_splits[unit_num]
else:
prev1_mat = layer_outputs[layer_num - 1][2 * unit_num]
prev2_mat = layer_outputs[layer_num - 1][2 * unit_num + 1]
if definition1[self.params[curr_index]] == 'add':
output = prev1_mat + prev2_mat
elif definition1[self.params[curr_index]] == 'elem_mult':
output = prev1_mat * prev2_mat
elif definition1[self.params[curr_index]] == 'max':
output = tf.maximum(prev1_mat, prev2_mat)
if curr_index / 2 == self.cell_create_index: # Take the new cell before the activation
new_c = tf.identity(output)
output = definition2[self.params[curr_index + 1]](output)
if curr_index / 2 == self.cell_inject_index:
if definition1[self.cell_params[0]] == 'add':
output += c
elif definition1[self.cell_params[0]] == 'elem_mult':
output *= c
elif definition1[self.cell_params[0]] == 'max':
output = tf.maximum(output, c)
output = definition2[self.cell_params[1]](output)
layer_outputs[layer_num].append(output)
curr_index += 2
new_h = layer_outputs[-1][-1]
return new_h, LSTMTuple(new_c, new_h)
@property
def state_size(self):
return LSTMTuple(self.num_units, self.num_units)
@property
def output_size(self):
return self.num_units
class Alien(AlienRNNBuilder):
"""Base 8 Cell."""
def __init__(self, num_units):
params = [
0, 2, 0, 3, 0, 2, 1, 3, 0, 1, 0, 2, 0, 1, 0, 2, 1, 1, 0, 1, 1, 1, 0, 2,
1, 0, 0, 1, 1, 1, 0, 1
]
additional_params = [12, 8]
base_size = 8
super(Alien, self).__init__(num_units, params, additional_params, base_size)
|
{
"content_hash": "d2ebd38944ab10f33354cd3985a55377",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 101,
"avg_line_length": 37.64900662251656,
"alnum_prop": 0.5892700087950747,
"repo_name": "jiaphuan/models",
"id": "6add7ffa4e0d69da56d2bba7d9da3875b5c4dd3b",
"size": "6374",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "research/maskgan/nas_utils/custom_cell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1353"
},
{
"name": "C++",
"bytes": "1224262"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "71060"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Protocol Buffer",
"bytes": "72897"
},
{
"name": "Python",
"bytes": "5957505"
},
{
"name": "Shell",
"bytes": "76858"
}
],
"symlink_target": ""
}
|
from twisted.application import service
from DeviceService import DeviceService
from ControlService import ControlService
from Config import Config
from DB import DB
class MainService(service.MultiService):
def __init__(self):
service.MultiService.__init__(self)
self.config = Config.getconf()
self.controlService = ControlService(self.config)
self.deviceService = DeviceService(self.config)
self.scripts = {}
def field_updated(self, devid, field):
field["DevId"] = devid
for script in self.scripts.values():
script.doif(field)
def script_load(self, res):
self.scripts = res
for script in self.scripts.values():
script.action.set_serv(self)
self.controlService.setServiceParent(self)
self.deviceService.setServiceParent(self)
def startService(self):
from snp import create_self_signed_cert
create_self_signed_cert("keys", self.config.name)
service.MultiService.startService(self)
d = DB.get_scripts()
d.addCallback(self.script_load)
def script_changed(self, script):
script.action.set_serv(self)
self.scripts[script.id] = script
return script
def script_deleted(self, sid):
self.scripts.pop(sid)
return sid
def create_script(self, script):
d = DB.add_script(script)
d.addCallback(self.script_changed)
return d
def edit_script(self, script):
d = DB.edit_script(script)
d.addCallback(self.script_changed)
return d
def delete_script(self, sid):
d = DB.delete_script(sid)
d.addCallback(self.script_deleted)
return d
def get_scripts(self):
d = DB.get_scripts()
d.addCallback(lambda res: res.values())
return d
def get_methods(self):
d = DB.get_methods()
return d
def get_device_data(self, devid):
def callb(res):
def callb(res):
res["Device"]["DevId"] = devid
return res["Device"]
rdevid, ip = res
return self.deviceService.get_device_fields(ip, rdevid).addCallback(callb)
d = DB.get_remote_device_from_local(devid)
d.addCallback(callb)
return d
def update_device_field(self, devid, field, value):
def callb(res):
def callb(res):
res["DevId"] = devid
return res
rdevid, ip = res
return self.deviceService.update_device_field(ip, rdevid, field, value).addCallback(callb)
d = DB.get_remote_device_from_local(devid)
d.addCallback(callb)
return d
def get_servers(self):
def callb(res):
servers = [{"Id": row["id"], "Name": row["name"], "IP": row["ip"], "Port": row["port"]} for row in res]
return servers
d = DB.get_device_servers()
d.addCallback(callb)
return d
def add_server(self, ip, port, pin):
def callb(res):
server = {"Id": res["id"], "Name": res["name"], "IP": res["ip"], "Port": res["port"]}
return server
d = self.deviceService.add_server(ip, port, pin)
d.addCallback(callb)
return d
|
{
"content_hash": "de6102ba5c89834d8c5f1c85f33861cd",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 115,
"avg_line_length": 31.228571428571428,
"alnum_prop": 0.5913388228118329,
"repo_name": "tsnik/SkyNet",
"id": "6e647d625131fa7bacebbff05e8c33bdb8299185",
"size": "3279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SkyNetServer/MainService.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "1752"
},
{
"name": "Python",
"bytes": "81163"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
_version = "0.3.2"
_usage="""
ShowFeats [optional args] <filenames>
if "-" is provided as a filename, data will be read from stdin (the console)
"""
_welcomeMessage="This is ShowFeats version %s"%(_version)
import math
#set up the logger:
from rdkit import RDLogger as logging
logger = logging.logger()
logger.setLevel(logging.INFO)
from rdkit import Geometry
from rdkit.Chem.Features import FeatDirUtilsRD as FeatDirUtils
_featColors = {
'Donor':(0,1,1),
'Acceptor':(1,0,1),
'NegIonizable':(1,0,0),
'PosIonizable':(0,0,1),
'ZnBinder':(1,.5,.5),
'Aromatic':(1,.8,.2),
'LumpedHydrophobe':(.5,.25,0),
'Hydrophobe':(.5,.25,0),
}
def _getVectNormal(v,tol=1e-4):
if math.fabs(v.x)>tol:
res = Geometry.Point3D(v.y,-v.x,0)
elif math.fabs(v.y)>tol:
res = Geometry.Point3D(-v.y,v.x,0)
elif math.fabs(v.z)>tol:
res = Geometry.Point3D(1,0,0)
else:
raise ValueError('cannot find normal to the null vector')
res.Normalize()
return res
_canonArrowhead=None
def _buildCanonArrowhead(headFrac,nSteps,aspect):
global _canonArrowhead
startP = RDGeometry.Point3D(0,0,headFrac)
_canonArrowhead=[startP]
scale = headFrac*aspect
baseV = RDGeometry.Point3D(scale,0,0)
_canonArrowhead.append(baseV)
twopi = 2*math.pi
for i in range(1,nSteps):
v = RDGeometry.Point3D(scale*math.cos(i*twopi),scale*math.sin(i*twopi),0)
_canonArrowhead.append(v)
_globalArrowCGO=[]
_globalSphereCGO=[]
# taken from pymol's cgo.py
BEGIN=2
END=3
TRIANGLE_FAN=6
COLOR=6
VERTEX=4
NORMAL=5
SPHERE=7
CYLINDER=9
ALPHA=25
def _cgoArrowhead(viewer,tail,head,radius,color,label,headFrac=0.3,nSteps=10,aspect=.5):
global _globalArrowCGO
delta = head-tail
normal = _getVectNormal(delta)
delta.Normalize()
dv = head-tail
dv.Normalize()
dv *= headFrac
startP = head
normal*=headFrac*aspect
cgo = [BEGIN,TRIANGLE_FAN,
COLOR,color[0],color[1],color[2],
NORMAL,dv.x,dv.y,dv.z,
VERTEX,head.x+dv.x,head.y+dv.y,head.z+dv.z]
base = [BEGIN,TRIANGLE_FAN,
COLOR,color[0],color[1],color[2],
NORMAL,-dv.x,-dv.y,-dv.z,
VERTEX,head.x,head.y,head.z]
v = startP+normal
cgo.extend([NORMAL,normal.x,normal.y,normal.z])
cgo.extend([VERTEX,v.x,v.y,v.z])
base.extend([VERTEX,v.x,v.y,v.z])
for i in range(1,nSteps):
v = FeatDirUtils.ArbAxisRotation(360./nSteps*i,delta,normal)
cgo.extend([NORMAL,v.x,v.y,v.z])
v += startP
cgo.extend([VERTEX,v.x,v.y,v.z])
base.extend([VERTEX,v.x,v.y,v.z])
cgo.extend([NORMAL,normal.x,normal.y,normal.z])
cgo.extend([VERTEX,startP.x+normal.x,startP.y+normal.y,startP.z+normal.z])
base.extend([VERTEX,startP.x+normal.x,startP.y+normal.y,startP.z+normal.z])
cgo.append(END)
base.append(END)
cgo.extend(base)
#viewer.server.renderCGO(cgo,label)
_globalArrowCGO.extend(cgo)
def ShowArrow(viewer,tail,head,radius,color,label,transparency=0,includeArrowhead=True):
global _globalArrowCGO
if transparency:
_globalArrowCGO.extend([ALPHA,1-transparency])
else:
_globalArrowCGO.extend([ALPHA,1])
_globalArrowCGO.extend([CYLINDER,tail.x,tail.y,tail.z,
head.x,head.y,head.z,
radius*.10,
color[0],color[1],color[2],
color[0],color[1],color[2],
])
if includeArrowhead:
_cgoArrowhead(viewer,tail,head,radius,color,label)
def ShowMolFeats(mol,factory,viewer,radius=0.5,confId=-1,showOnly=True,
name='',transparency=0.0,colors=None,excludeTypes=[],
useFeatDirs=True,featLabel=None,dirLabel=None,includeArrowheads=True,
writeFeats=False,showMol=True,featMapFile=False):
global _globalSphereCGO
if not name:
if mol.HasProp('_Name'):
name = mol.GetProp('_Name')
else:
name = 'molecule'
if not colors:
colors = _featColors
if showMol:
viewer.ShowMol(mol,name=name,showOnly=showOnly,confId=confId)
molFeats=factory.GetFeaturesForMol(mol)
if not featLabel:
featLabel='%s-feats'%name
viewer.server.resetCGO(featLabel)
if not dirLabel:
dirLabel=featLabel+"-dirs"
viewer.server.resetCGO(dirLabel)
for i,feat in enumerate(molFeats):
family=feat.GetFamily()
if family in excludeTypes:
continue
pos = feat.GetPos(confId)
color = colors.get(family,(.5,.5,.5))
nm = '%s(%d)'%(family,i+1)
if transparency:
_globalSphereCGO.extend([ALPHA,1-transparency])
else:
_globalSphereCGO.extend([ALPHA,1])
_globalSphereCGO.extend([COLOR,color[0],color[1],color[2],
SPHERE,pos.x,pos.y,pos.z,
radius])
if writeFeats:
aidText = ' '.join([str(x+1) for x in feat.GetAtomIds()])
print('%s\t%.3f\t%.3f\t%.3f\t1.0\t# %s'%(family,pos.x,pos.y,pos.z,aidText))
if featMapFile:
print(" family=%s pos=(%.3f,%.3f,%.3f) weight=1.0"%(family,pos.x,pos.y,pos.z),end='',file=featMapFile)
if useFeatDirs:
ps = []
if family=='Aromatic':
ps,fType = FeatDirUtils.GetAromaticFeatVects(mol.GetConformer(confId),
feat.GetAtomIds(),pos,
scale=1.0)
elif family=='Donor':
aids = feat.GetAtomIds()
if len(aids)==1:
featAtom=mol.GetAtomWithIdx(aids[0])
hvyNbrs=[x for x in featAtom.GetNeighbors() if x.GetAtomicNum()!=1]
if len(hvyNbrs)==1:
ps,fType = FeatDirUtils.GetDonor1FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==2:
ps,fType = FeatDirUtils.GetDonor2FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==3:
ps,fType = FeatDirUtils.GetDonor3FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif family=='Acceptor':
aids = feat.GetAtomIds()
if len(aids)==1:
featAtom=mol.GetAtomWithIdx(aids[0])
hvyNbrs=[x for x in featAtom.GetNeighbors() if x.GetAtomicNum()!=1]
if len(hvyNbrs)==1:
ps,fType = FeatDirUtils.GetAcceptor1FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==2:
ps,fType = FeatDirUtils.GetAcceptor2FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==3:
ps,fType = FeatDirUtils.GetAcceptor3FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
for tail,head in ps:
ShowArrow(viewer,tail,head,radius,color,dirLabel,
transparency=transparency,includeArrowhead=includeArrowheads)
if featMapFile:
vect = head-tail
print('dir=(%.3f,%.3f,%.3f)'%(vect.x,vect.y,vect.z),end='',file=featMapFile)
if featMapFile:
aidText = ' '.join([str(x+1) for x in feat.GetAtomIds()])
print('# %s'%(aidText),file=featMapFile)
# --- ---- --- ---- --- ---- --- ---- --- ---- --- ----
import sys,os,getopt
from rdkit import RDConfig
from optparse import OptionParser
parser=OptionParser(_usage,version='%prog '+_version)
parser.add_option('-x','--exclude',default='',
help='provide a list of feature names that should be excluded')
parser.add_option('-f','--fdef',default=os.path.join(RDConfig.RDDataDir,'BaseFeatures.fdef'),
help='provide the name of the feature definition (fdef) file.')
parser.add_option('--noDirs','--nodirs',dest='useDirs',default=True,action='store_false',
help='do not draw feature direction indicators')
parser.add_option('--noHeads',dest='includeArrowheads',default=True,action='store_false',
help='do not draw arrowheads on the feature direction indicators')
parser.add_option('--noClear','--noClear',dest='clearAll',default=False,action='store_true',
help='do not clear PyMol on startup')
parser.add_option('--noMols','--nomols',default=False,action='store_true',
help='do not draw the molecules')
parser.add_option('--writeFeats','--write',default=False,action='store_true',
help='print the feature information to the console')
parser.add_option('--featMapFile','--mapFile',default='',
help='save a feature map definition to the specified file')
parser.add_option('--verbose',default=False,action='store_true',
help='be verbose')
if __name__=='__main__':
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.PyMol import MolViewer
options,args = parser.parse_args()
if len(args)<1:
parser.error('please provide either at least one sd or mol file')
try:
v = MolViewer()
except:
logger.error('Unable to connect to PyMol server.\nPlease run ~landrgr1/extern/PyMol/launch.sh to start it.')
sys.exit(1)
if options.clearAll:
v.DeleteAll()
try:
fdef = open(options.fdef,'r').read()
except IOError:
logger.error('ERROR: Could not open fdef file %s'%options.fdef)
sys.exit(1)
factory = AllChem.BuildFeatureFactoryFromString(fdef)
if options.writeFeats:
print('# Family \tX \tY \tZ \tRadius\t # Atom_ids')
if options.featMapFile:
if options.featMapFile=='-':
options.featMapFile=sys.stdout
else:
options.featMapFile=file(options.featMapFile,'w+')
print('# Feature map generated by ShowFeats v%s'%_version, file=options.featMapFile)
print("ScoreMode=All", file=options.featMapFile)
print("DirScoreMode=Ignore", file=options.featMapFile)
print("BeginParams", file=options.featMapFile)
for family in factory.GetFeatureFamilies():
print(" family=%s width=1.0 radius=3.0"%family, file=options.featMapFile)
print("EndParams", file=options.featMapFile)
print("BeginPoints", file=options.featMapFile)
i = 1
for midx,molN in enumerate(args):
if molN!='-':
featLabel='%s_Feats'%molN
else:
featLabel='Mol%d_Feats'%(midx+1)
v.server.resetCGO(featLabel)
# this is a big of kludgery to work around what seems to be a pymol cgo bug:
v.server.sphere((0,0,0),.01,(1,0,1),featLabel)
dirLabel=featLabel+"-dirs"
v.server.resetCGO(dirLabel)
# this is a big of kludgery to work around what seems to be a pymol cgo bug:
v.server.cylinder((0,0,0),(.01,.01,.01),.01,(1,0,1),dirLabel)
if molN != '-':
try:
ms = Chem.SDMolSupplier(molN)
except:
logger.error('Problems reading input file: %s'%molN)
ms = []
else:
ms = Chem.SDMolSupplier()
ms.SetData(sys.stdin.read())
for m in ms:
nm = 'Mol_%d'%(i)
if m.HasProp('_Name'):
nm += '_'+m.GetProp('_Name')
if options.verbose:
if m.HasProp('_Name'):
print("#Molecule: %s"%m.GetProp('_Name'))
else:
print("#Molecule: %s"%nm)
ShowMolFeats(m,factory,v,transparency=0.25,excludeTypes=options.exclude,name=nm,
showOnly=False,
useFeatDirs=options.useDirs,
featLabel=featLabel,dirLabel=dirLabel,
includeArrowheads=options.includeArrowheads,
writeFeats=options.writeFeats,showMol=not options.noMols,
featMapFile=options.featMapFile)
i += 1
if not i%100:
logger.info("Done %d poses"%i)
if ms:
v.server.renderCGO(_globalSphereCGO,featLabel,1)
if options.useDirs:
v.server.renderCGO(_globalArrowCGO,dirLabel,1)
if options.featMapFile:
print("EndPoints",file=options.featMapFile)
sys.exit(0)
|
{
"content_hash": "b8a74d37101ed4de13c8700300977039",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 112,
"avg_line_length": 34.17329545454545,
"alnum_prop": 0.6117715520824674,
"repo_name": "AlexanderSavelyev/rdkit",
"id": "6cc6a2b029a52916fb72a72c77ecf8239f331597",
"size": "12126",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rdkit/Chem/Features/ShowFeats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "319851"
},
{
"name": "C#",
"bytes": "6745"
},
{
"name": "C++",
"bytes": "6485466"
},
{
"name": "CSS",
"bytes": "4742"
},
{
"name": "FORTRAN",
"bytes": "7661"
},
{
"name": "Java",
"bytes": "244997"
},
{
"name": "JavaScript",
"bytes": "12260"
},
{
"name": "Makefile",
"bytes": "3243"
},
{
"name": "Objective-C",
"bytes": "299"
},
{
"name": "Perl",
"bytes": "2032"
},
{
"name": "Python",
"bytes": "2941698"
},
{
"name": "R",
"bytes": "474"
},
{
"name": "Shell",
"bytes": "8899"
}
],
"symlink_target": ""
}
|
"""
User Account templatetags
"""
from coffin import template
from django.template import RequestContext
from jinja2 import contextfunction, Markup
from treeio.core.rendering import render_to_string
register = template.Library()
@contextfunction
def account_notification_count(context):
"Account notification count"
request = context['request']
user = None
if request.user.username:
try:
user = request.user.get_profile()
except Exception:
pass
notifications = 0
account = None
if user:
modules = user.get_perspective().get_modules()
account = modules.filter(name='treeio.account')
if account:
notifications = user.notification_set.count()
response_format = 'html'
if 'response_format' in context:
response_format = context['response_format']
return Markup(render_to_string('account/tags/notification_count',
{'account': account, 'notifications': notifications},
response_format=response_format))
register.object(account_notification_count)
@contextfunction
def notification_setting_list(context, notification_settings, skip_group=False):
"Print a list of settings"
request = context['request']
response_format = 'html'
if 'response_format' in context:
response_format = context['response_format']
return Markup(render_to_string('account/tags/notification_setting_list',
{'settings': notification_settings, 'skip_group': skip_group},
context_instance=RequestContext(request),
response_format=response_format))
register.object(notification_setting_list)
|
{
"content_hash": "ee9a6202c64e270b91751e11d98b9f73",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 93,
"avg_line_length": 32.611111111111114,
"alnum_prop": 0.6547416240772288,
"repo_name": "rogeriofalcone/treeio",
"id": "f4a96a5d8928c0119fd979edc285d1ac2e31efda",
"size": "1874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "account/templatetags/account.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
class CommandError(Exception):
def __init__(self, keyword):
self.value = "Command '%s' Not Found" % keyword
def __str__(self):
return repr(self.value)
|
{
"content_hash": "94fe6a0a7ea2900b10c7ab19e05bc86e",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 49,
"avg_line_length": 32.4,
"alnum_prop": 0.6481481481481481,
"repo_name": "freefood89/untitled",
"id": "dce69c450e716b948558f2a0c499870d92be5ab6",
"size": "162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "untitled/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2269"
}
],
"symlink_target": ""
}
|
"""Checks WebKit style for JSON files."""
import json
import re
class JSONChecker(object):
"""Processes JSON lines for checking style."""
categories = set(('json/syntax', ))
def __init__(self, _, handle_style_error):
self._handle_style_error = handle_style_error
self._handle_style_error.turn_off_line_filtering()
def check(self, lines):
try:
json.loads('\n'.join(lines) + '\n')
except ValueError as error:
self._handle_style_error(
self.line_number_from_json_exception(error), 'json/syntax', 5,
str(error))
except json.JSONDecodeError as error:
self._handle_style_error(error.lineno, 'json/syntax', 5, error.msg)
@staticmethod
def line_number_from_json_exception(error):
match = re.search(r': line (?P<line>\d+) column \d+', str(error))
if not match:
return 0
return int(match.group('line'))
|
{
"content_hash": "cd7f9743238e6f6a7ac02ad612dfad41",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 31.225806451612904,
"alnum_prop": 0.59400826446281,
"repo_name": "ric2b/Vivaldi-browser",
"id": "41dccabb907381d175fd3483a6eaf5ee73395fa5",
"size": "2297",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chromium/third_party/blink/tools/blinkpy/style/checkers/jsonchecker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import mock
from oslo.serialization import jsonutils
from nova import exception
from nova import objects
from nova.pci import pci_stats as pci
from nova.pci import pci_whitelist
from nova import test
from nova.tests.pci import pci_fakes
fake_pci_1 = {
'compute_node_id': 1,
'address': '0000:00:00.1',
'product_id': 'p1',
'vendor_id': 'v1',
'status': 'available',
'extra_k1': 'v1',
'request_id': None,
}
fake_pci_2 = dict(fake_pci_1, vendor_id='v2',
product_id='p2',
address='0000:00:00.2')
fake_pci_3 = dict(fake_pci_1, address='0000:00:00.3')
pci_requests = [objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v1'}]),
objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v2'}])]
pci_requests_multiple = [objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v1'}]),
objects.InstancePCIRequest(count=3,
spec=[{'vendor_id': 'v2'}])]
class PciDeviceStatsTestCase(test.NoDBTestCase):
def _create_fake_devs(self):
self.fake_dev_1 = objects.PciDevice.create(fake_pci_1)
self.fake_dev_2 = objects.PciDevice.create(fake_pci_2)
self.fake_dev_3 = objects.PciDevice.create(fake_pci_3)
map(self.pci_stats.add_device,
[self.fake_dev_1, self.fake_dev_2, self.fake_dev_3])
def setUp(self):
super(PciDeviceStatsTestCase, self).setUp()
self.pci_stats = pci.PciDeviceStats()
# The following two calls need to be made before adding the devices.
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._create_fake_devs()
def test_add_device(self):
self.assertEqual(len(self.pci_stats.pools), 2)
self.assertEqual(set([d['vendor_id'] for d in self.pci_stats]),
set(['v1', 'v2']))
self.assertEqual(set([d['count'] for d in self.pci_stats]),
set([1, 2]))
def test_remove_device(self):
self.pci_stats.remove_device(self.fake_dev_2)
self.assertEqual(len(self.pci_stats.pools), 1)
self.assertEqual(self.pci_stats.pools[0]['count'], 2)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
def test_remove_device_exception(self):
self.pci_stats.remove_device(self.fake_dev_2)
self.assertRaises(exception.PciDevicePoolEmpty,
self.pci_stats.remove_device,
self.fake_dev_2)
def test_json_creat(self):
m = jsonutils.dumps(self.pci_stats)
new_stats = pci.PciDeviceStats(m)
self.assertEqual(len(new_stats.pools), 2)
self.assertEqual(set([d['count'] for d in new_stats]),
set([1, 2]))
self.assertEqual(set([d['vendor_id'] for d in new_stats]),
set(['v1', 'v2']))
def test_support_requests(self):
self.assertEqual(self.pci_stats.support_requests(pci_requests),
True)
self.assertEqual(len(self.pci_stats.pools), 2)
self.assertEqual(set([d['count'] for d in self.pci_stats]),
set((1, 2)))
def test_support_requests_failed(self):
self.assertEqual(
self.pci_stats.support_requests(pci_requests_multiple), False)
self.assertEqual(len(self.pci_stats.pools), 2)
self.assertEqual(set([d['count'] for d in self.pci_stats]),
set([1, 2]))
def test_apply_requests(self):
self.pci_stats.apply_requests(pci_requests)
self.assertEqual(len(self.pci_stats.pools), 1)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
self.assertEqual(self.pci_stats.pools[0]['count'], 1)
def test_apply_requests_failed(self):
self.assertRaises(exception.PciDeviceRequestFailed,
self.pci_stats.apply_requests,
pci_requests_multiple)
def test_consume_requests(self):
devs = self.pci_stats.consume_requests(pci_requests)
self.assertEqual(2, len(devs))
self.assertEqual(set(['v1', 'v2']),
set([dev['vendor_id'] for dev in devs]))
def test_consume_requests_empty(self):
devs = self.pci_stats.consume_requests([])
self.assertEqual(0, len(devs))
def test_consume_requests_failed(self):
self.assertRaises(exception.PciDeviceRequestFailed,
self.pci_stats.consume_requests,
pci_requests_multiple)
@mock.patch.object(pci_whitelist, 'get_pci_devices_filter')
class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceStatsWithTagsTestCase, self).setUp()
self.pci_stats = pci.PciDeviceStats()
self._create_whitelist()
def _create_whitelist(self):
white_list = ['{"vendor_id":"1137","product_id":"0071",'
'"address":"*:0a:00.*","physical_network":"physnet1"}',
'{"vendor_id":"1137","product_id":"0072"}']
self.pci_wlist = pci_whitelist.PciHostDevicesWhiteList(white_list)
def _create_pci_devices(self):
self.pci_tagged_devices = []
for dev in range(4):
pci_dev = {'compute_node_id': 1,
'address': '0000:0a:00.%d' % dev,
'vendor_id': '1137',
'product_id': '0071',
'status': 'available',
'request_id': None}
self.pci_tagged_devices.append(objects.PciDevice.create(pci_dev))
self.pci_untagged_devices = []
for dev in range(3):
pci_dev = {'compute_node_id': 1,
'address': '0000:0b:00.%d' % dev,
'vendor_id': '1137',
'product_id': '0072',
'status': 'available',
'request_id': None}
self.pci_untagged_devices.append(objects.PciDevice.create(pci_dev))
map(self.pci_stats.add_device, self.pci_tagged_devices)
map(self.pci_stats.add_device, self.pci_untagged_devices)
def _assertPoolContent(self, pool, vendor_id, product_id, count, **tags):
self.assertEqual(vendor_id, pool['vendor_id'])
self.assertEqual(product_id, pool['product_id'])
self.assertEqual(count, pool['count'])
if tags:
for k, v in tags.iteritems():
self.assertEqual(v, pool[k])
def _assertPools(self):
# Pools are ordered based on the number of keys. 'product_id',
# 'vendor_id' are always part of the keys. When tags are present,
# they are also part of the keys. In this test class, we have
# two pools with the second one having the tag 'physical_network'
# and the value 'physnet1'
self.assertEqual(2, len(self.pci_stats.pools))
self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
len(self.pci_untagged_devices))
self.assertEqual(self.pci_untagged_devices,
self.pci_stats.pools[0]['devices'])
self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
len(self.pci_tagged_devices),
physical_network='physnet1')
self.assertEqual(self.pci_tagged_devices,
self.pci_stats.pools[1]['devices'])
def test_add_devices(self, mock_get_dev_filter):
mock_get_dev_filter.return_value = self.pci_wlist
self._create_pci_devices()
self._assertPools()
def test_consume_reqeusts(self, mock_get_dev_filter):
mock_get_dev_filter.return_value = self.pci_wlist
self._create_pci_devices()
pci_requests = [objects.InstancePCIRequest(count=1,
spec=[{'physical_network': 'physnet1'}]),
objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '1137',
'product_id': '0072'}])]
devs = self.pci_stats.consume_requests(pci_requests)
self.assertEqual(2, len(devs))
self.assertEqual(set(['0071', '0072']),
set([dev['product_id'] for dev in devs]))
self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 2)
self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071', 3,
physical_network='physnet1')
def test_add_device_no_devspec(self, mock_get_dev_filter):
mock_get_dev_filter.return_value = self.pci_wlist
self._create_pci_devices()
pci_dev = {'compute_node_id': 1,
'address': '0000:0c:00.1',
'vendor_id': '2345',
'product_id': '0172',
'status': 'available',
'request_id': None}
pci_dev_obj = objects.PciDevice.create(pci_dev)
self.pci_stats.add_device(pci_dev_obj)
# There should be no change
self.assertIsNone(
self.pci_stats._create_pool_keys_from_dev(pci_dev_obj))
self._assertPools()
def test_remove_device_no_devspec(self, mock_get_dev_filter):
mock_get_dev_filter.return_value = self.pci_wlist
self._create_pci_devices()
pci_dev = {'compute_node_id': 1,
'address': '0000:0c:00.1',
'vendor_id': '2345',
'product_id': '0172',
'status': 'available',
'request_id': None}
pci_dev_obj = objects.PciDevice.create(pci_dev)
self.pci_stats.remove_device(pci_dev_obj)
# There should be no change
self.assertIsNone(
self.pci_stats._create_pool_keys_from_dev(pci_dev_obj))
self._assertPools()
def test_remove_device(self, mock_get_dev_filter):
mock_get_dev_filter.return_value = self.pci_wlist
self._create_pci_devices()
dev1 = self.pci_untagged_devices.pop()
self.pci_stats.remove_device(dev1)
dev2 = self.pci_tagged_devices.pop()
self.pci_stats.remove_device(dev2)
self._assertPools()
|
{
"content_hash": "6d7e2ef21a843f8aa5b9c85869a2f6c3",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 79,
"avg_line_length": 40.833333333333336,
"alnum_prop": 0.567444120505345,
"repo_name": "angdraug/nova",
"id": "2f830c6cfa18e26e357a9fb3fc4543711e95ff31",
"size": "10930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/pci/test_pci_stats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14991706"
},
{
"name": "Shell",
"bytes": "18352"
}
],
"symlink_target": ""
}
|
from django.test import TestCase, LiveServerTestCase
from django.test import Client
import datetime
import time
# from django.utils import timezone
from django.contrib.auth.models import User
from registration.models import RegistrationProfile
from django.core.urlresolvers import reverse
from selenium import webdriver
import factory
import factory.django
from installer_config.models import EnvironmentProfile
import os
TEST_DOMAIN_NAME = "http://127.0.0.1:8081"
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
username = factory.Sequence(lambda n: u'username%d' % n)
class EnvironmentProfileFactory(factory.django.DjangoModelFactory):
class Meta:
model = EnvironmentProfile
title = factory.Sequence(lambda n: u'EnvironmentalProfile%d' % n)
user = factory.SubFactory(UserFactory)
class CreateUserTestCase(TestCase):
def setUp(self):
self.user1 = UserFactory()
self.user1.save()
def test_user(self):
"""Test to see if user is being created."""
self.user2 = UserFactory()
self.user2.save()
self.assertEqual(User.objects.count(), 2)
self.assertEqual(User.objects.get(username=self.user1.username), self.user1)
self.assertEqual(User.objects.get(username=self.user2.username), self.user2)
class RegistrationTestCase(TestCase):
def setUp(self):
self.user = {}
self.user['user1'] = User.objects.create_user(username='username1',
password='secret')
self.client1 = Client()
def test_login_unauthorized(self):
"""Test that an unauthorized user cannot get in."""
response = self.client1.post('/accounts/login/',
{'username': 'hacker', 'password': 'badpass'})
self.assertEqual(response.status_code, 200)
self.assertIn('Please enter a correct username and password.', response.content)
is_logged_in = self.client1.login(username='hacker', password='badpass')
self.assertFalse(is_logged_in)
def test_login_authorized(self):
"""Test that an authorized user can get in."""
response = self.client1.post('/accounts/login/',
{'username': self.user['user1'].username,
'password': 'secret'})
self.assertEqual(response.status_code, 302)
is_logged_in = self.client1.login(username=self.user['user1'].username,
password='secret')
self.assertTrue(is_logged_in)
def test_logout(self):
"""Test that an authorized user can log out."""
is_logged_in = self.client1.login(username=self.user['user1'].username,
password='secret')
self.assertTrue(is_logged_in)
response = self.client1.post('/accounts/logout/')
# Goes to an intermediate page that the user never sees before
# going back to the home page
self.assertIn('You are now logged out.', response.content)
class UserProfileDetailTestCase(LiveServerTestCase):
"""This class is for testing user login form"""
def setUp(self):
self.driver = webdriver.Firefox()
super(UserProfileDetailTestCase, self).setUp
self.user = User(username='user1')
self.user.set_password('pass')
self.user.is_active = True
def tearDown(self):
self.driver.refresh()
self.driver.quit()
super(UserProfileDetailTestCase, self).tearDown()
def test_goto_homepage(self):
self.driver.get(self.live_server_url)
self.assertIn("ezPy", self.driver.title)
def login_user(self):
"""login user"""
self.driver.get(TEST_DOMAIN_NAME + reverse('auth_login'))
username_field = self.driver.find_element_by_id('id_username')
username_field.send_keys('user1')
password_field = self.driver.find_element_by_id('id_password')
password_field.send_keys('pass')
form = self.driver.find_element_by_tag_name('form')
form.submit()
def test_login_authorized(self):
"""Test that a registered user can get in."""
self.user.save()
self.login_user()
self.assertIn(self.user.username, self.driver.page_source)
def test_login_unregistered(self):
"""Test that an unregistered user cannot get in."""
self.unregistered_user = User(username='unregistered')
self.user.set_password('pass')
self.user.is_active = False
self.assertNotIn(self.unregistered_user.username, self.driver.page_source)
|
{
"content_hash": "cf504d8535f18e66bfa22db1c3096eb7",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 88,
"avg_line_length": 36.1,
"alnum_prop": 0.6394630300447475,
"repo_name": "alibulota/Package_Installer",
"id": "22b5e0e20d856f4b2649fd6619d4312b7b121683",
"size": "4693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "installer/installer/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7514"
},
{
"name": "HTML",
"bytes": "15079"
},
{
"name": "Python",
"bytes": "46760"
}
],
"symlink_target": ""
}
|
from google import pubsub_v1
async def sample_list_topic_snapshots():
# Create a client
client = pubsub_v1.PublisherAsyncClient()
# Initialize request argument(s)
request = pubsub_v1.ListTopicSnapshotsRequest(
topic="topic_value",
)
# Make the request
page_result = client.list_topic_snapshots(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END pubsub_v1_generated_Publisher_ListTopicSnapshots_async]
|
{
"content_hash": "62d274ba125ded993836afc1b739cbba",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 62,
"avg_line_length": 25.25,
"alnum_prop": 0.7049504950495049,
"repo_name": "googleapis/python-pubsub",
"id": "85b983f5d0e85c3772b293b490c179a839b14e3d",
"size": "1895",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/pubsub_v1_generated_publisher_list_topic_snapshots_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1857078"
},
{
"name": "Shell",
"bytes": "34068"
}
],
"symlink_target": ""
}
|
"""
A listening logging server for MiStaMover. This logs to a file (only).
The main object instantiated in the calling code is LoggerServer,
which listens for messages on a port and logs them. The classes
SingleFileLogger and MultiFileLogger are helpers that actually
log these to the relevant files.
"""
import os
#import time
import logging
import LogReceiver
class SingleFileLogger(object):
"""
A logger class which logs all messages it receives to one file
"""
def __init__(self, config, tag, formatter = None):
"""
Inputs: config should be a GlobalConfig / DatasetConfig object
tag is a short tag name which will be used in the log filename
"""
self.base_dir = config["logging"]["base_log_dir"]
if formatter == None:
formatter = logging.Formatter(fmt="[%(asctime)s] %(name)s(%(levelname)s) %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
self.formatter = formatter
self.logger = self.makeLogger(tag)
def makeLogger(self, tag):
"""
Takes a tag (part of filename), and returns a logger object
from the logging module
"""
pathname = self.getPath(tag)
fh = logging.FileHandler(pathname)
fh.setFormatter(self.formatter)
#
# If MiStaMover has already initialised the client before forking the
# daemon, it will have done a getLogger() call already and added
# the SocketHandler (client code) as a handler. getLogger() will
# return a cached copy if called again with the same logger name, and
# we do NOT want to reuse this logger as the server logger, as it will
# end up with a spurious additional client handler as well as the
# server handler that we actually want, causing nasty infinite loops.
# So force a different name by prepending "__SERVER__".
#
logger = logging.getLogger("__SERVER__" + tag)
logger.addHandler(fh)
return logger
def getPath(self, tag):
"""
Get full path for log file with given tag
"""
filename = tag + ".log"
return os.path.join(self.base_dir, filename)
def handle(self, record):
"""
Handle a record (delegates it to the underlying logger object)
"""
return self.logger.handle(record)
class MultiFileLogger(object):
"""
A class whose 'handle' method will return the handle method of
a different file logger class for each tag, so that different
tags get logged to different files.
The SingleFileLogger objects are created on the fly in handle()
when new tags appear that are have not been seen yet, so there is
no need to specify the set of possible files on instantiation
e.g. if a message appears with tag "foo" it will start logging to
foo.log
"""
def __init__(self, config):
"""
Arg is GlobalConfig / DatasetConfig object
"""
self.config = config
# a common formatter for all files - no need for the name in this case
self.formatter = logging.Formatter(fmt="[%(asctime)s] %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
self.loggers = {}
def handle(self, record):
"""
Handles a record by calling the necessary SingleFileLogger,
creating it if not already cached.
"""
tag = record.name
if tag not in self.loggers:
self.loggers[tag] = SingleFileLogger(self.config, tag,
formatter = self.formatter)
return self.loggers[tag].handle(record)
class LoggerServer(object):
"""
A top level logger server, which will receive messages on a port
and pass them to either SingleFileLogger or MultiFileLogger
"""
def __init__(self, config, multi=True):
"""
instantiate with GlobalConfig / DatasetConfig object
set "multi" to True/False depending whether single file or
multi file logging is wanted
"""
self.port = config['logging']['port'] \
or logging.handlers.DEFAULT_TCP_LOGGING_PORT
self.host = 'localhost'
self.config = config
self.multi = multi
def serve(self):
"""
Main loop.
"""
if self.multi:
logger = MultiFileLogger(self.config)
else:
logger = SingleFileLogger(self.config, "mistamover")
tcpserver = LogReceiver.LogRecordSocketReceiver(
logger, self.host, self.port)
tcpserver.serve_until_stopped()
if __name__ == '__main__':
import sys
from TestConfig import gc
server = LoggerServer(gc, multi=False)
os.system("touch ../log/mistamover.log")
pid = os.fork()
if pid == 0:
os.execvp("tail", ["tail", "-f", "../log/mistamover.log"])
sys.exit()
try:
server.serve()
except KeyboardInterrupt:
os.kill(pid, 9)
|
{
"content_hash": "37557e4955a64dbde4b3fe7da03adf4b",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 98,
"avg_line_length": 31.8944099378882,
"alnum_prop": 0.6001947419668938,
"repo_name": "cedadev/mistamover",
"id": "fb7d5951b74b58fc40239ead744391d02b8e6efe",
"size": "5340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/LoggerServer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "Python",
"bytes": "268174"
},
{
"name": "Shell",
"bytes": "424"
}
],
"symlink_target": ""
}
|
"""Python wrappers for Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import sys
import threading
import warnings
import weakref
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.python import tf2
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.experimental.ops import optimization_options
from tensorflow.python.data.experimental.ops import stats_options
from tensorflow.python.data.experimental.ops import threading_options
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import options as options_lib
from tensorflow.python.data.util import random_seed
from tensorflow.python.data.util import structure
from tensorflow.python.data.util import traverse
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.framework import auto_control_deps
from tensorflow.python.framework import auto_control_deps_utils as acd_utils
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed as core_random_seed
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.training.tracking import base as tracking_base
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lazy_loader
from tensorflow.python.util import nest as tf_nest
from tensorflow.python.util.tf_export import tf_export
# Loaded lazily due to a circular dependency (roughly
# tf.function->wrap_function->dataset->autograph->tf.function).
# TODO(b/133251390): Use a regular import.
wrap_function = lazy_loader.LazyLoader(
"wrap_function", globals(),
"tensorflow.python.eager.wrap_function")
# TODO(mdan): Create a public API for this.
autograph_ctx = lazy_loader.LazyLoader(
"autograph_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
autograph = lazy_loader.LazyLoader(
"autograph", globals(),
"tensorflow.python.autograph.impl.api")
ops.NotDifferentiable("ReduceDataset")
# A constant that can be used to enable auto-tuning.
AUTOTUNE = -1
tf_export("data.experimental.AUTOTUNE").export_constant(__name__, "AUTOTUNE")
@tf_export("data.Dataset", v1=[])
@six.add_metaclass(abc.ABCMeta)
class DatasetV2(tracking_base.Trackable, composite_tensor.CompositeTensor):
"""Represents a potentially large set of elements.
The `tf.data.Dataset` API supports writing descriptive and efficient input
pipelines. `Dataset` usage follows a common pattern:
1. Create a source dataset from your input data.
2. Apply dataset transformations to preprocess the data.
3. Iterate over the dataset and process the elements.
Iteration happens in a streaming fashion, so the full dataset does not need to
fit into memory.
Source Datasets:
The simplest way to create a dataset is to create it from a python `list`:
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> for element in dataset:
... print(element)
tf.Tensor(1, shape=(), dtype=int32)
tf.Tensor(2, shape=(), dtype=int32)
tf.Tensor(3, shape=(), dtype=int32)
To process lines from files, use `tf.data.TextLineDataset`:
>>> dataset = tf.data.TextLineDataset(["file1.txt", "file2.txt"])
To process records written in the `TFRecord` format, use `TFRecordDataset`:
>>> dataset = tf.data.TFRecordDataset(["file1.tfrecords", "file2.tfrecords"])
To create a dataset of all files matching a pattern, use
`tf.data.Dataset.list_files`:
>>> dataset = tf.data.Dataset.list_files("/path/*.txt") # doctest: +SKIP
See `tf.data.FixedLengthRecordDataset` and `tf.data.Dataset.from_generator`
for more ways to create datasets.
Transformations:
Once you have a dataset, you can apply transformations to prepare the data for
your model:
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> dataset = dataset.map(lambda x: x*2)
>>> list(dataset.as_numpy_iterator())
[2, 4, 6]
Common Terms:
**Element**: A single output from calling `next()` on a dataset iterator.
Elements may be nested structures containing multiple components. For
example, the element `(1, (3, "apple"))` has one tuple nested in another
tuple. The components are `1`, `3`, and `"apple"`.
**Component**: The leaf in the nested structure of an element.
Supported types:
Elements can be nested structures of tuples, named tuples, and dictionaries.
Element components can be of any type representable by `tf.TypeSpec`,
including `tf.Tensor`, `tf.data.Dataset`, `tf.SparseTensor`,
`tf.RaggedTensor`, and `tf.TensorArray`.
>>> a = 1 # Integer element
>>> b = 2.0 # Float element
>>> c = (1, 2) # Tuple element with 2 components
>>> d = {"a": (2, 2), "b": 3} # Dict element with 3 components
>>> Point = collections.namedtuple("Point", ["x", "y"]) # doctest: +SKIP
>>> e = Point(1, 2) # Named tuple # doctest: +SKIP
>>> f = tf.data.Dataset.range(10) # Dataset element
"""
def __init__(self, variant_tensor):
"""Creates a DatasetV2 object.
This is a difference between DatasetV1 and DatasetV2. DatasetV1 does not
take anything in its constructor whereas in the DatasetV2, we expect
subclasses to create a variant_tensor and pass it in to the super() call.
Args:
variant_tensor: A DT_VARIANT tensor that represents the dataset.
"""
self._variant_tensor_attr = variant_tensor
weak_self = weakref.proxy(self)
self._variant_tracker = self._track_trackable(
_VariantTracker(
self._variant_tensor,
# _trace_variant_creation only works when executing eagerly, so we
# don't want to run it immediately. We also want the _VariantTracker
# to have a weak reference to the Dataset to avoid creating
# reference cycles and making work for the garbage collector.
lambda: weak_self._trace_variant_creation()()), # pylint: disable=unnecessary-lambda,protected-access
name="_variant_tracker")
self._graph_attr = ops.get_default_graph()
@property
def _variant_tensor(self):
return self._variant_tensor_attr
@_variant_tensor.setter
def _variant_tensor(self, _):
raise ValueError("The _variant_tensor property is read-only")
@deprecation.deprecated_args(None, "Use external_state_policy instead",
"allow_stateful")
def _as_serialized_graph(
self,
allow_stateful=None,
strip_device_assignment=None,
external_state_policy=distribute_options.ExternalStatePolicy.WARN):
"""Produces serialized graph representation of the dataset.
Args:
allow_stateful: If true, we allow stateful ops to be present in the graph
def. In that case, the state in these ops would be thrown away.
strip_device_assignment: If true, non-local (i.e. job and task) device
assignment is stripped from ops in the serialized graph.
external_state_policy: The ExternalStatePolicy enum that determines how we
handle input pipelines that depend on external state. By default, its
set to WARN.
Returns:
A scalar `tf.Tensor` of `tf.string` type, representing this dataset as a
serialized graph.
"""
if external_state_policy:
policy = None
if external_state_policy:
policy = external_state_policy.value
return gen_dataset_ops.dataset_to_graph_v2(
self._variant_tensor,
external_state_policy=policy,
strip_device_assignment=strip_device_assignment)
if strip_device_assignment:
return gen_dataset_ops.dataset_to_graph(
self._variant_tensor,
allow_stateful=allow_stateful,
strip_device_assignment=strip_device_assignment)
return gen_dataset_ops.dataset_to_graph(
self._variant_tensor, allow_stateful=allow_stateful)
def _trace_variant_creation(self):
"""Traces a function which outputs a variant `tf.Tensor` for this dataset.
Note that creating this function involves evaluating an op, and is currently
only supported when executing eagerly.
Returns:
A zero-argument `ConcreteFunction` which outputs a variant `tf.Tensor`.
"""
variant = self._variant_tensor
if not isinstance(variant, ops.EagerTensor):
raise NotImplementedError(
"Can only export Datasets which were created executing eagerly. "
"Please file a feature request if this is important to you.")
with context.eager_mode(), ops.device("CPU"):
# pylint: disable=protected-access
graph_def = graph_pb2.GraphDef().FromString(
self._as_serialized_graph(external_state_policy=distribute_options
.ExternalStatePolicy.FAIL).numpy())
output_node_name = None
for node in graph_def.node:
if node.op == "_Retval":
if output_node_name is not None:
raise AssertionError(
"Found multiple return values from the dataset's graph, expected "
"only one.")
output_node_name, = node.input
if output_node_name is None:
raise AssertionError("Could not find the dataset's output node.")
# Add functions used in this Dataset to the function's graph, since they
# need to follow it around (and for example be added to a SavedModel which
# references the dataset).
variant_function = wrap_function.function_from_graph_def(
graph_def, inputs=[], outputs=output_node_name + ":0")
for used_function in self._functions():
used_function.function.add_to_graph(variant_function.graph)
return variant_function
@abc.abstractmethod
def _inputs(self):
"""Returns a list of the input datasets of the dataset."""
raise NotImplementedError("Dataset._inputs")
@property
def _graph(self):
return self._graph_attr
@_graph.setter
def _graph(self, _):
raise ValueError("The _graph property is read-only")
def _has_captured_ref(self):
"""Whether this dataset uses a function that captures ref variables.
Returns:
A boolean, which if true indicates that the dataset or one of its inputs
uses a function that captures ref variables.
"""
if context.executing_eagerly():
# RefVariables are not supported in eager mode
return False
def is_tensor_or_parent_ref(tensor):
if tensor.dtype._is_ref_dtype: # pylint: disable=protected-access
return True
# If the captured tensor is an eager tensor, we cannot trace its inputs.
if isinstance(tensor, ops._EagerTensorBase): # pylint: disable=protected-access
return False
return any(is_tensor_or_parent_ref(x) for x in tensor.op.inputs)
for fn in self._functions():
if any(is_tensor_or_parent_ref(t) for t in fn.function.captured_inputs):
return True
return any(
[input_dataset._has_captured_ref() for input_dataset in self._inputs()]) # pylint: disable=protected-access
# TODO(jsimsa): Change this to be the transitive closure of functions used
# by this dataset and its inputs.
def _functions(self):
"""Returns a list of functions associated with this dataset.
Returns:
A list of `StructuredFunctionWrapper` objects.
"""
return []
def options(self):
"""Returns the options for this dataset and its inputs.
Returns:
A `tf.data.Options` object representing the dataset options.
"""
options = Options()
for input_dataset in self._inputs():
input_options = input_dataset.options()
if input_options is not None:
options = options.merge(input_options)
return options
def _apply_options(self):
"""Apply options, such as optimization configuration, to the dataset."""
dataset = self
options = self.options()
# (1) Apply threading options
if options.experimental_threading is not None:
t_options = options.experimental_threading
if t_options.max_intra_op_parallelism is not None:
dataset = _MaxIntraOpParallelismDataset(
dataset, t_options.max_intra_op_parallelism)
if t_options.private_threadpool_size is not None:
dataset = _PrivateThreadPoolDataset(dataset,
t_options.private_threadpool_size)
# (2) Apply graph rewrite options
# pylint: disable=protected-access
graph_rewrites = options._graph_rewrites()
graph_rewrite_configs = options._graph_rewrite_configs()
# pylint: enable=protected-access
if graph_rewrites:
if self._has_captured_ref():
warnings.warn(
"tf.data graph rewrites are not compatible with tf.Variable. "
"The following rewrites will be disabled: %s. To enable "
"rewrites, use resource variables instead by calling "
"`tf.enable_resource_variables()` at the start of the program." %
", ".join(graph_rewrites))
else:
dataset = _OptimizeDataset(dataset, graph_rewrites,
graph_rewrite_configs)
# (3) Apply autotune options
autotune, algorithm, cpu_budget = options._autotune_settings() # pylint: disable=protected-access
if autotune:
dataset = _ModelDataset(dataset, algorithm, cpu_budget)
# (4) Apply stats aggregator options
if options.experimental_stats and options.experimental_stats.aggregator: # pylint: disable=line-too-long
dataset = _SetStatsAggregatorDataset( # pylint: disable=protected-access
dataset, options.experimental_stats.aggregator,
options.experimental_stats.prefix,
options.experimental_stats.counter_prefix)
return dataset
def __iter__(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
The returned iterator implements the Python iterator protocol and therefore
can only be used in eager mode.
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If not inside of tf.function and not executing eagerly.
"""
if context.executing_eagerly() or ops.inside_function():
return iterator_ops.OwnedIterator(self)
else:
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
@abc.abstractproperty
def element_spec(self):
"""The type specification of an element of this dataset.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3]).element_spec
TensorSpec(shape=(), dtype=tf.int32, name=None)
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of this dataset and specifying the type of individual components.
"""
raise NotImplementedError("Dataset.element_spec")
def __repr__(self):
output_shapes = nest.map_structure(str, get_legacy_output_shapes(self))
output_shapes = str(output_shapes).replace("'", "")
output_types = nest.map_structure(repr, get_legacy_output_types(self))
output_types = str(output_types).replace("'", "")
return ("<%s shapes: %s, types: %s>" % (type(self).__name__, output_shapes,
output_types))
def as_numpy_iterator(self):
"""Returns an iterator which converts all elements of the dataset to numpy.
Use `as_numpy_iterator` to inspect the content of your dataset. To see
element shapes and types, print dataset elements directly instead of using
`as_numpy_iterator`.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> for element in dataset:
... print(element)
tf.Tensor(1, shape=(), dtype=int32)
tf.Tensor(2, shape=(), dtype=int32)
tf.Tensor(3, shape=(), dtype=int32)
This method requires that you are running in eager mode and the dataset's
element_spec contains only `TensorSpec` components.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> for element in dataset.as_numpy_iterator():
... print(element)
1
2
3
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> print(list(dataset.as_numpy_iterator()))
[1, 2, 3]
`as_numpy_iterator()` will preserve the nested structure of dataset
elements.
>>> dataset = tf.data.Dataset.from_tensor_slices({'a': ([1, 2], [3, 4]),
... 'b': [5, 6]})
>>> list(dataset.as_numpy_iterator()) == [{'a': (1, 3), 'b': 5},
... {'a': (2, 4), 'b': 6}]
True
Returns:
An iterable over the elements of the dataset, with their tensors converted
to numpy arrays.
Raises:
TypeError: if an element contains a non-`Tensor` value.
RuntimeError: if eager execution is not enabled.
"""
if not context.executing_eagerly():
raise RuntimeError("as_numpy_iterator() is not supported while tracing "
"functions")
for component_spec in nest.flatten(self.element_spec):
if not isinstance(component_spec, tensor_spec.TensorSpec):
raise TypeError(
"Dataset.as_numpy_iterator() does not support datasets containing "
+ str(component_spec.value_type))
return _NumpyIterator(self)
@property
def _flat_shapes(self):
"""Returns a list `tf.TensorShapes`s for the element tensor representation.
Returns:
A list `tf.TensorShapes`s for the element tensor representation.
"""
return structure.get_flat_tensor_shapes(self.element_spec)
@property
def _flat_types(self):
"""Returns a list `tf.DType`s for the element tensor representation.
Returns:
A list `tf.DType`s for the element tensor representation.
"""
return structure.get_flat_tensor_types(self.element_spec)
@property
def _flat_structure(self):
"""Helper for setting `output_shapes` and `output_types` attrs of an op.
Most dataset op constructors expect `output_shapes` and `output_types`
arguments that represent the flattened structure of an element. This helper
function generates these attrs as a keyword argument dictionary, allowing
`Dataset._variant_tensor` implementations to pass `**self._flat_structure`
to the op constructor.
Returns:
A dictionary of keyword arguments that can be passed to a dataset op
constructor.
"""
return {
"output_shapes": self._flat_shapes,
"output_types": self._flat_types,
}
@property
def _type_spec(self):
return DatasetSpec(self.element_spec)
@staticmethod
def from_tensors(tensors):
"""Creates a `Dataset` with a single element, comprising the given tensors.
`from_tensors` produces a dataset containing only a single element. To slice
the input tensor into multiple elements, use `from_tensor_slices` instead.
>>> dataset = tf.data.Dataset.from_tensors([1, 2, 3])
>>> list(dataset.as_numpy_iterator())
[array([1, 2, 3], dtype=int32)]
>>> dataset = tf.data.Dataset.from_tensors(([1, 2, 3], 'A'))
>>> list(dataset.as_numpy_iterator())
[(array([1, 2, 3], dtype=int32), b'A')]
>>> # You can use `from_tensors` to produce a dataset which repeats
>>> # the same example many times.
>>> example = tf.constant([1,2,3])
>>> dataset = tf.data.Dataset.from_tensors(example).repeat(2)
>>> list(dataset.as_numpy_iterator())
[array([1, 2, 3], dtype=int32), array([1, 2, 3], dtype=int32)]
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If `tensors`
contains one or more large NumPy arrays, consider the alternative described
in [this
guide](https://tensorflow.org/guide/data#consuming_numpy_arrays).
Args:
tensors: A dataset element.
Returns:
Dataset: A `Dataset`.
"""
return TensorDataset(tensors)
@staticmethod
def from_tensor_slices(tensors):
"""Creates a `Dataset` whose elements are slices of the given tensors.
The given tensors are sliced along their first dimension. This operation
preserves the structure of the input tensors, removing the first dimension
of each tensor and using it as the dataset dimension. All input tensors
must have the same size in their first dimensions.
>>> # Slicing a 1D tensor produces scalar tensor elements.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> list(dataset.as_numpy_iterator())
[1, 2, 3]
>>> # Slicing a 2D tensor produces 1D tensor elements.
>>> dataset = tf.data.Dataset.from_tensor_slices([[1, 2], [3, 4]])
>>> list(dataset.as_numpy_iterator())
[array([1, 2], dtype=int32), array([3, 4], dtype=int32)]
>>> # Slicing a tuple of 1D tensors produces tuple elements containing
>>> # scalar tensors.
>>> dataset = tf.data.Dataset.from_tensor_slices(([1, 2], [3, 4], [5, 6]))
>>> list(dataset.as_numpy_iterator())
[(1, 3, 5), (2, 4, 6)]
>>> # Dictionary structure is also preserved.
>>> dataset = tf.data.Dataset.from_tensor_slices({"a": [1, 2], "b": [3, 4]})
>>> list(dataset.as_numpy_iterator()) == [{'a': 1, 'b': 3},
... {'a': 2, 'b': 4}]
True
>>> # Two tensors can be combined into one Dataset object.
>>> features = tf.constant([[1, 3], [2, 1], [3, 3]]) # ==> 3x2 tensor
>>> labels = tf.constant(['A', 'B', 'A']) # ==> 3x1 tensor
>>> dataset = Dataset.from_tensor_slices((features, labels))
>>> # Both the features and the labels tensors can be converted
>>> # to a Dataset object separately and combined after.
>>> features_dataset = Dataset.from_tensor_slices(features)
>>> labels_dataset = Dataset.from_tensor_slices(labels)
>>> dataset = Dataset.zip((features_dataset, labels_dataset))
>>> # A batched feature and label set can be converted to a Dataset
>>> # in similar fashion.
>>> batched_features = tf.constant([[[1, 3], [2, 3]],
... [[2, 1], [1, 2]],
... [[3, 3], [3, 2]]], shape=(3, 2, 2))
>>> batched_labels = tf.constant([['A', 'A'],
... ['B', 'B'],
... ['A', 'B']], shape=(3, 2, 1))
>>> dataset = Dataset.from_tensor_slices((batched_features, batched_labels))
>>> for element in dataset.as_numpy_iterator():
... print(element)
(array([[1, 3],
[2, 3]], dtype=int32), array([[b'A'],
[b'A']], dtype=object))
(array([[2, 1],
[1, 2]], dtype=int32), array([[b'B'],
[b'B']], dtype=object))
(array([[3, 3],
[3, 2]], dtype=int32), array([[b'A'],
[b'B']], dtype=object))
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If `tensors`
contains one or more large NumPy arrays, consider the alternative described
in [this guide](
https://tensorflow.org/guide/data#consuming_numpy_arrays).
Args:
tensors: A dataset element, with each component having the same size in
the first dimension.
Returns:
Dataset: A `Dataset`.
"""
return TensorSliceDataset(tensors)
class _GeneratorState(object):
"""Stores outstanding iterators created from a Python generator.
This class keeps track of potentially multiple iterators that may have
been created from a generator, e.g. in the case that the dataset is
repeated, or nested within a parallel computation.
"""
def __init__(self, generator):
self._generator = generator
self._lock = threading.Lock()
self._next_id = 0 # GUARDED_BY(self._lock)
self._args = {}
self._iterators = {}
def get_next_id(self, *args):
with self._lock:
ret = self._next_id
self._next_id += 1
self._args[ret] = args
# NOTE(mrry): Explicitly create an array of `np.int64` because implicit
# casting in `py_func()` will create an array of `np.int32` on Windows,
# leading to a runtime error.
return np.array(ret, dtype=np.int64)
def get_iterator(self, iterator_id):
try:
return self._iterators[iterator_id]
except KeyError:
iterator = iter(self._generator(*self._args.pop(iterator_id)))
self._iterators[iterator_id] = iterator
return iterator
def iterator_completed(self, iterator_id):
del self._iterators[iterator_id]
@staticmethod
def from_generator(generator, output_types, output_shapes=None, args=None):
"""Creates a `Dataset` whose elements are generated by `generator`.
The `generator` argument must be a callable object that returns
an object that supports the `iter()` protocol (e.g. a generator function).
The elements generated by `generator` must be compatible with the given
`output_types` and (optional) `output_shapes` arguments.
>>> import itertools
>>>
>>> def gen():
... for i in itertools.count(1):
... yield (i, [1] * i)
>>>
>>> dataset = tf.data.Dataset.from_generator(
... gen,
... (tf.int64, tf.int64),
... (tf.TensorShape([]), tf.TensorShape([None])))
>>>
>>> list(dataset.take(3).as_numpy_iterator())
[(1, array([1])), (2, array([1, 1])), (3, array([1, 1, 1]))]
Note: The current implementation of `Dataset.from_generator()` uses
`tf.numpy_function` and inherits the same constraints. In particular, it
requires the `Dataset`- and `Iterator`-related operations to be placed
on a device in the same process as the Python program that called
`Dataset.from_generator()`. The body of `generator` will not be
serialized in a `GraphDef`, and you should not use this method if you
need to serialize your model and restore it in a different environment.
Note: If `generator` depends on mutable global variables or other external
state, be aware that the runtime may invoke `generator` multiple times
(in order to support repeating the `Dataset`) and at any time
between the call to `Dataset.from_generator()` and the production of the
first element from the generator. Mutating global variables or external
state can cause undefined behavior, and we recommend that you explicitly
cache any external state in `generator` before calling
`Dataset.from_generator()`.
Args:
generator: A callable object that returns an object that supports the
`iter()` protocol. If `args` is not specified, `generator` must take no
arguments; otherwise it must take as many arguments as there are values
in `args`.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element yielded by `generator`.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
corresponding to each component of an element yielded by `generator`.
args: (Optional.) A tuple of `tf.Tensor` objects that will be evaluated
and passed to `generator` as NumPy-array arguments.
Returns:
Dataset: A `Dataset`.
"""
if not callable(generator):
raise TypeError("`generator` must be callable.")
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if args is None:
args = ()
else:
args = tuple(ops.convert_n_to_tensor(args, name="args"))
flattened_types = [dtypes.as_dtype(dt) for dt in nest.flatten(output_types)]
flattened_shapes = nest.flatten(output_shapes)
generator_state = DatasetV2._GeneratorState(generator)
def get_iterator_id_fn(unused_dummy):
"""Creates a unique `iterator_id` for each pass over the dataset.
The returned `iterator_id` disambiguates between multiple concurrently
existing iterators.
Args:
unused_dummy: Ignored value.
Returns:
A `tf.int64` tensor whose value uniquely identifies an iterator in
`generator_state`.
"""
return script_ops.numpy_function(generator_state.get_next_id, args,
dtypes.int64)
def generator_next_fn(iterator_id_t):
"""Generates the next element from iterator with ID `iterator_id_t`.
We map this function across an infinite repetition of the
`iterator_id_t`, and raise `StopIteration` to terminate the iteration.
Args:
iterator_id_t: A `tf.int64` tensor whose value uniquely identifies the
iterator in `generator_state` from which to generate an element.
Returns:
The next element to generate from the iterator.
"""
def generator_py_func(iterator_id):
"""A `py_func` that will be called to invoke the iterator."""
# `next()` raises `StopIteration` when there are no more
# elements remaining to be generated.
values = next(generator_state.get_iterator(iterator_id))
# Use the same _convert function from the py_func() implementation to
# convert the returned values to arrays early, so that we can inspect
# their values.
try:
flattened_values = nest.flatten_up_to(output_types, values)
except (TypeError, ValueError):
six.reraise(TypeError, TypeError(
"`generator` yielded an element that did not match the expected "
"structure. The expected structure was %s, but the yielded "
"element was %s." % (output_types, values)), sys.exc_info()[2])
ret_arrays = []
for ret, dtype in zip(flattened_values, flattened_types):
try:
ret_arrays.append(script_ops.FuncRegistry._convert( # pylint: disable=protected-access
ret, dtype=dtype.as_numpy_dtype))
except (TypeError, ValueError):
six.reraise(TypeError, TypeError(
"`generator` yielded an element that could not be converted to "
"the expected type. The expected type was %s, but the yielded "
"element was %s." % (dtype.name, ret)), sys.exc_info()[2])
# Additional type and shape checking to ensure that the components
# of the generated element match the `output_types` and `output_shapes`
# arguments.
for (ret_array, expected_dtype, expected_shape) in zip(
ret_arrays, flattened_types, flattened_shapes):
if ret_array.dtype != expected_dtype.as_numpy_dtype:
raise TypeError(
"`generator` yielded an element of type %s where an element "
"of type %s was expected." % (ret_array.dtype,
expected_dtype.as_numpy_dtype))
if not expected_shape.is_compatible_with(ret_array.shape):
raise ValueError(
"`generator` yielded an element of shape %s where an element "
"of shape %s was expected." % (ret_array.shape, expected_shape))
return ret_arrays
flat_values = script_ops.numpy_function(generator_py_func,
[iterator_id_t], flattened_types)
# The `py_func()` op drops the inferred shapes, so we add them back in
# here.
if output_shapes is not None:
for ret_t, shape in zip(flat_values, flattened_shapes):
ret_t.set_shape(shape)
return nest.pack_sequence_as(output_types, flat_values)
def finalize_fn(iterator_id_t):
"""Releases host-side state for the iterator with ID `iterator_id_t`."""
def finalize_py_func(iterator_id):
generator_state.iterator_completed(iterator_id)
# We return a dummy value so that the `finalize_fn` has a valid
# signature.
# NOTE(mrry): Explicitly create an array of `np.int64` because implicit
# casting in `py_func()` will create an array of `np.int32` on Windows,
# leading to a runtime error.
return np.array(0, dtype=np.int64)
return script_ops.numpy_function(finalize_py_func, [iterator_id_t],
dtypes.int64)
# This function associates each traversal of `generator` with a unique
# iterator ID.
def flat_map_fn(dummy_arg):
# The `get_iterator_id_fn` gets a unique ID for the current instance of
# of the generator.
# The `generator_next_fn` gets the next element from the iterator with the
# given ID, and raises StopIteration when that iterator contains no
# more elements.
return _GeneratorDataset(dummy_arg, get_iterator_id_fn, generator_next_fn,
finalize_fn)
# A single-element dataset that, each time it is evaluated, contains a
# freshly-generated and unique (for the returned dataset) int64
# ID that will be used to identify the appropriate Python state, which
# is encapsulated in `generator_state`, and captured in
# `get_iterator_id_map_fn`.
dummy = 0
id_dataset = Dataset.from_tensors(dummy)
# A dataset that contains all of the elements generated by a
# single iterator created from `generator`, identified by the
# iterator ID contained in `id_dataset`. Lifting the iteration
# into a flat_map here enables multiple repetitions and/or nested
# versions of the returned dataset to be created, because it forces
# the generation of a new ID for each version.
return id_dataset.flat_map(flat_map_fn)
@staticmethod
def range(*args, **kwargs):
"""Creates a `Dataset` of a step-separated range of values.
>>> list(Dataset.range(5).as_numpy_iterator())
[0, 1, 2, 3, 4]
>>> list(Dataset.range(2, 5).as_numpy_iterator())
[2, 3, 4]
>>> list(Dataset.range(1, 5, 2).as_numpy_iterator())
[1, 3]
>>> list(Dataset.range(1, 5, -2).as_numpy_iterator())
[]
>>> list(Dataset.range(5, 1).as_numpy_iterator())
[]
>>> list(Dataset.range(5, 1, -2).as_numpy_iterator())
[5, 3]
>>> list(Dataset.range(2, 5, output_type=tf.int32).as_numpy_iterator())
[2, 3, 4]
>>> list(Dataset.range(1, 5, 2, output_type=tf.float32).as_numpy_iterator())
[1.0, 3.0]
Args:
*args: follows the same semantics as python's xrange.
len(args) == 1 -> start = 0, stop = args[0], step = 1.
len(args) == 2 -> start = args[0], stop = args[1], step = 1.
len(args) == 3 -> start = args[0], stop = args[1], step = args[2].
**kwargs:
- output_type: Its expected dtype. (Optional, default: `tf.int64`).
Returns:
Dataset: A `RangeDataset`.
Raises:
ValueError: if len(args) == 0.
"""
return RangeDataset(*args, **kwargs)
@staticmethod
def zip(datasets):
"""Creates a `Dataset` by zipping together the given datasets.
This method has similar semantics to the built-in `zip()` function
in Python, with the main difference being that the `datasets`
argument can be an arbitrary nested structure of `Dataset` objects.
>>> # The nested structure of the `datasets` argument determines the
>>> # structure of elements in the resulting dataset.
>>> a = tf.data.Dataset.range(1, 4) # ==> [ 1, 2, 3 ]
>>> b = tf.data.Dataset.range(4, 7) # ==> [ 4, 5, 6 ]
>>> ds = tf.data.Dataset.zip((a, b))
>>> list(ds.as_numpy_iterator())
[(1, 4), (2, 5), (3, 6)]
>>> ds = tf.data.Dataset.zip((b, a))
>>> list(ds.as_numpy_iterator())
[(4, 1), (5, 2), (6, 3)]
>>>
>>> # The `datasets` argument may contain an arbitrary number of datasets.
>>> c = tf.data.Dataset.range(7, 13).batch(2) # ==> [ [7, 8],
... # [9, 10],
... # [11, 12] ]
>>> ds = tf.data.Dataset.zip((a, b, c))
>>> for element in ds.as_numpy_iterator():
... print(element)
(1, 4, array([7, 8]))
(2, 5, array([ 9, 10]))
(3, 6, array([11, 12]))
>>>
>>> # The number of elements in the resulting dataset is the same as
>>> # the size of the smallest dataset in `datasets`.
>>> d = tf.data.Dataset.range(13, 15) # ==> [ 13, 14 ]
>>> ds = tf.data.Dataset.zip((a, d))
>>> list(ds.as_numpy_iterator())
[(1, 13), (2, 14)]
Args:
datasets: A nested structure of datasets.
Returns:
Dataset: A `Dataset`.
"""
return ZipDataset(datasets)
def concatenate(self, dataset):
"""Creates a `Dataset` by concatenating the given dataset with this dataset.
>>> a = tf.data.Dataset.range(1, 4) # ==> [ 1, 2, 3 ]
>>> b = tf.data.Dataset.range(4, 8) # ==> [ 4, 5, 6, 7 ]
>>> ds = a.concatenate(b)
>>> list(ds.as_numpy_iterator())
[1, 2, 3, 4, 5, 6, 7]
>>> # The input dataset and dataset to be concatenated should have the same
>>> # nested structures and output types.
>>> c = tf.data.Dataset.zip((a, b))
>>> a.concatenate(c)
Traceback (most recent call last):
TypeError: Two datasets to concatenate have different types
<dtype: 'int64'> and (tf.int64, tf.int64)
>>> d = tf.data.Dataset.from_tensor_slices(["a", "b", "c"])
>>> a.concatenate(d)
Traceback (most recent call last):
TypeError: Two datasets to concatenate have different types
<dtype: 'int64'> and <dtype: 'string'>
Args:
dataset: `Dataset` to be concatenated.
Returns:
Dataset: A `Dataset`.
"""
return ConcatenateDataset(self, dataset)
def prefetch(self, buffer_size):
"""Creates a `Dataset` that prefetches elements from this dataset.
Most dataset input pipelines should end with a call to `prefetch`. This
allows later elements to be prepared while the current element is being
processed. This often improves latency and throughput, at the cost of
using additional memory to store prefetched elements.
Note: Like other `Dataset` methods, prefetch operates on the
elements of the input dataset. It has no concept of examples vs. batches.
`examples.prefetch(2)` will prefetch two elements (2 examples),
while `examples.batch(20).prefetch(2)` will prefetch 2 elements
(2 batches, of 20 examples each).
>>> dataset = tf.data.Dataset.range(3)
>>> dataset = dataset.prefetch(2)
>>> list(dataset.as_numpy_iterator())
[0, 1, 2]
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the maximum
number of elements that will be buffered when prefetching.
Returns:
Dataset: A `Dataset`.
"""
return PrefetchDataset(self, buffer_size)
@staticmethod
def list_files(file_pattern, shuffle=None, seed=None):
"""A dataset of all files matching one or more glob patterns.
The `file_pattern` argument should be a small number of glob patterns.
If your filenames have already been globbed, use
`Dataset.from_tensor_slices(filenames)` instead, as re-globbing every
filename with `list_files` may result in poor performance with remote
storage systems.
Note: The default behavior of this method is to return filenames in
a non-deterministic random shuffled order. Pass a `seed` or `shuffle=False`
to get results in a deterministic order.
Example:
If we had the following files on our filesystem:
- /path/to/dir/a.txt
- /path/to/dir/b.py
- /path/to/dir/c.py
If we pass "/path/to/dir/*.py" as the directory, the dataset
would produce:
- /path/to/dir/b.py
- /path/to/dir/c.py
Args:
file_pattern: A string, a list of strings, or a `tf.Tensor` of string type
(scalar or vector), representing the filename glob (i.e. shell wildcard)
pattern(s) that will be matched.
shuffle: (Optional.) If `True`, the file names will be shuffled randomly.
Defaults to `True`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.random.set_seed` for behavior.
Returns:
Dataset: A `Dataset` of strings corresponding to file names.
"""
with ops.name_scope("list_files"):
if shuffle is None:
shuffle = True
file_pattern = ops.convert_to_tensor(
file_pattern, dtype=dtypes.string, name="file_pattern")
matching_files = gen_io_ops.matching_files(file_pattern)
# Raise an exception if `file_pattern` does not match any files.
condition = math_ops.greater(array_ops.shape(matching_files)[0], 0,
name="match_not_empty")
message = math_ops.add(
"No files matched pattern: ",
string_ops.reduce_join(file_pattern, separator=", "), name="message")
assert_not_empty = control_flow_ops.Assert(
condition, [message], summarize=1, name="assert_not_empty")
with ops.control_dependencies([assert_not_empty]):
matching_files = array_ops.identity(matching_files)
dataset = Dataset.from_tensor_slices(matching_files)
if shuffle:
# NOTE(mrry): The shuffle buffer size must be greater than zero, but the
# list of files might be empty.
buffer_size = math_ops.maximum(
array_ops.shape(matching_files, out_type=dtypes.int64)[0], 1)
dataset = dataset.shuffle(buffer_size, seed=seed)
return dataset
def repeat(self, count=None):
"""Repeats this dataset so each original value is seen `count` times.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> dataset = dataset.repeat(3)
>>> list(dataset.as_numpy_iterator())
[1, 2, 3, 1, 2, 3, 1, 2, 3]
Note: If this dataset is a function of global state (e.g. a random number
generator), then different repetitions may produce different elements.
Args:
count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
number of times the dataset should be repeated. The default behavior (if
`count` is `None` or `-1`) is for the dataset be repeated indefinitely.
Returns:
Dataset: A `Dataset`.
"""
return RepeatDataset(self, count)
def enumerate(self, start=0):
"""Enumerates the elements of this dataset.
It is similar to python's `enumerate`.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> dataset = dataset.enumerate(start=5)
>>> for element in dataset.as_numpy_iterator():
... print(element)
(5, 1)
(6, 2)
(7, 3)
>>> # The nested structure of the input dataset determines the structure of
>>> # elements in the resulting dataset.
>>> dataset = tf.data.Dataset.from_tensor_slices([(7, 8), (9, 10)])
>>> dataset = dataset.enumerate()
>>> for element in dataset.as_numpy_iterator():
... print(element)
(0, array([7, 8], dtype=int32))
(1, array([ 9, 10], dtype=int32))
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start value for
enumeration.
Returns:
Dataset: A `Dataset`.
"""
max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max
return Dataset.zip((Dataset.range(start, max_value), self))
def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):
"""Randomly shuffles the elements of this dataset.
This dataset fills a buffer with `buffer_size` elements, then randomly
samples elements from this buffer, replacing the selected elements with new
elements. For perfect shuffling, a buffer size greater than or equal to the
full size of the dataset is required.
For instance, if your dataset contains 10,000 elements but `buffer_size` is
set to 1,000, then `shuffle` will initially select a random element from
only the first 1,000 elements in the buffer. Once an element is selected,
its space in the buffer is replaced by the next (i.e. 1,001-st) element,
maintaining the 1,000 element buffer.
`reshuffle_each_iteration` controls whether the shuffle order should be
different for each epoch. In TF 1.X, the idiomatic way to create epochs
was through the `repeat` transformation:
>>> dataset = tf.data.Dataset.range(3)
>>> dataset = dataset.shuffle(3, reshuffle_each_iteration=True)
>>> dataset = dataset.repeat(2) # doctest: +SKIP
[1, 0, 2, 1, 2, 0]
>>> dataset = tf.data.Dataset.range(3)
>>> dataset = dataset.shuffle(3, reshuffle_each_iteration=False)
>>> dataset = dataset.repeat(2) # doctest: +SKIP
[1, 0, 2, 1, 0, 2]
In TF 2.0, `tf.data.Dataset` objects are Python iterables which makes it
possible to also create epochs through Python iteration:
>>> dataset = tf.data.Dataset.range(3)
>>> dataset = dataset.shuffle(3, reshuffle_each_iteration=True)
>>> list(dataset.as_numpy_iterator()) # doctest: +SKIP
[1, 0, 2]
>>> list(dataset.as_numpy_iterator()) # doctest: +SKIP
[1, 2, 0]
>>> dataset = tf.data.Dataset.range(3)
>>> dataset = dataset.shuffle(3, reshuffle_each_iteration=False)
>>> list(dataset.as_numpy_iterator()) # doctest: +SKIP
[1, 0, 2]
>>> list(dataset.as_numpy_iterator()) # doctest: +SKIP
[1, 0, 2]
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements from this dataset from which the new dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.random.set_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
Returns:
Dataset: A `Dataset`.
"""
return ShuffleDataset(self, buffer_size, seed, reshuffle_each_iteration)
def cache(self, filename=""):
"""Caches the elements in this dataset.
The first time the dataset is iterated over, its elements will be cached
either in the specified file or in memory. Subsequent iterations will
use the cached data.
Note: For the cache to be finalized, the input dataset must be iterated
through in its entirety. Otherwise, subsequent iterations will not use
cached data.
>>> dataset = tf.data.Dataset.range(5)
>>> dataset = dataset.map(lambda x: x**2)
>>> dataset = dataset.cache()
>>> # The first time reading through the data will generate the data using
>>> # `range` and `map`.
>>> list(dataset.as_numpy_iterator())
[0, 1, 4, 9, 16]
>>> # Subsequent iterations read from the cache.
>>> list(dataset.as_numpy_iterator())
[0, 1, 4, 9, 16]
When caching to a file, the cached data will persist across runs. Even the
first iteration through the data will read from the cache file. Changing
the input pipeline before the call to `.cache()` will have no effect until
the cache file is removed or the filename is changed.
>>> dataset = tf.data.Dataset.range(5)
>>> dataset = dataset.cache("/path/to/file") # doctest: +SKIP
>>> list(dataset.as_numpy_iterator()) # doctest: +SKIP
[0, 1, 2, 3, 4]
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.cache("/path/to/file") # Same file! # doctest: +SKIP
>>> list(dataset.as_numpy_iterator()) # doctest: +SKIP
[0, 1, 2, 3, 4]
Note: `cache` will produce exactly the same elements during each iteration
through the dataset. If you wish to randomize the iteration order, make sure
to call `shuffle` *after* calling `cache`.
Args:
filename: A `tf.string` scalar `tf.Tensor`, representing the name of a
directory on the filesystem to use for caching elements in this Dataset.
If a filename is not provided, the dataset will be cached in memory.
Returns:
Dataset: A `Dataset`.
"""
return CacheDataset(self, filename)
def take(self, count):
"""Creates a `Dataset` with at most `count` elements from this dataset.
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.take(3)
>>> list(dataset.as_numpy_iterator())
[0, 1, 2]
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be taken to form the new dataset.
If `count` is -1, or if `count` is greater than the size of this
dataset, the new dataset will contain all elements of this dataset.
Returns:
Dataset: A `Dataset`.
"""
return TakeDataset(self, count)
def skip(self, count):
"""Creates a `Dataset` that skips `count` elements from this dataset.
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.skip(7)
>>> list(dataset.as_numpy_iterator())
[7, 8, 9]
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be skipped to form the new dataset.
If `count` is greater than the size of this dataset, the new dataset
will contain no elements. If `count` is -1, skips the entire dataset.
Returns:
Dataset: A `Dataset`.
"""
return SkipDataset(self, count)
def shard(self, num_shards, index):
"""Creates a `Dataset` that includes only 1/`num_shards` of this dataset.
`shard` is deterministic. The Dataset produced by `A.shard(n, i)` will
contain all elements of A whose index mod n = i.
>>> A = tf.data.Dataset.range(10)
>>> B = A.shard(num_shards=3, index=0)
>>> list(B.as_numpy_iterator())
[0, 3, 6, 9]
>>> C = A.shard(num_shards=3, index=1)
>>> list(C.as_numpy_iterator())
[1, 4, 7]
>>> D = A.shard(num_shards=3, index=2)
>>> list(D.as_numpy_iterator())
[2, 5, 8]
This dataset operator is very useful when running distributed training, as
it allows each worker to read a unique subset.
When reading a single input file, you can shard elements as follows:
```python
d = tf.data.TFRecordDataset(input_file)
d = d.shard(num_workers, worker_index)
d = d.repeat(num_epochs)
d = d.shuffle(shuffle_buffer_size)
d = d.map(parser_fn, num_parallel_calls=num_map_threads)
```
Important caveats:
- Be sure to shard before you use any randomizing operator (such as
shuffle).
- Generally it is best if the shard operator is used early in the dataset
pipeline. For example, when reading from a set of TFRecord files, shard
before converting the dataset to input samples. This avoids reading every
file on every worker. The following is an example of an efficient
sharding strategy within a complete pipeline:
```python
d = Dataset.list_files(pattern)
d = d.shard(num_workers, worker_index)
d = d.repeat(num_epochs)
d = d.shuffle(shuffle_buffer_size)
d = d.interleave(tf.data.TFRecordDataset,
cycle_length=num_readers, block_length=1)
d = d.map(parser_fn, num_parallel_calls=num_map_threads)
```
Args:
num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of
shards operating in parallel.
index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.
Returns:
Dataset: A `Dataset`.
Raises:
InvalidArgumentError: if `num_shards` or `index` are illegal values.
Note: error checking is done on a best-effort basis, and errors aren't
guaranteed to be caught upon dataset creation. (e.g. providing in a
placeholder tensor bypasses the early checking, and will instead result
in an error during a session.run call.)
"""
return ShardDataset(self, num_shards, index)
def batch(self, batch_size, drop_remainder=False):
"""Combines consecutive elements of this dataset into batches.
>>> dataset = tf.data.Dataset.range(8)
>>> dataset = dataset.batch(3)
>>> list(dataset.as_numpy_iterator())
[array([0, 1, 2]), array([3, 4, 5]), array([6, 7])]
>>> dataset = tf.data.Dataset.range(8)
>>> dataset = dataset.batch(3, drop_remainder=True)
>>> list(dataset.as_numpy_iterator())
[array([0, 1, 2]), array([3, 4, 5])]
The components of the resulting element will have an additional outer
dimension, which will be `batch_size` (or `N % batch_size` for the last
element if `batch_size` does not divide the number of input elements `N`
evenly and `drop_remainder` is `False`). If your program depends on the
batches having the same outer dimension, you should set the `drop_remainder`
argument to `True` to prevent the smaller batch from being produced.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
Dataset: A `Dataset`.
"""
return BatchDataset(self, batch_size, drop_remainder)
def padded_batch(self,
batch_size,
padded_shapes=None,
padding_values=None,
drop_remainder=False):
"""Combines consecutive elements of this dataset into padded batches.
This transformation combines multiple consecutive elements of the input
dataset into a single element.
Like `tf.data.Dataset.batch`, the components of the resulting element will
have an additional outer dimension, which will be `batch_size` (or
`N % batch_size` for the last element if `batch_size` does not divide the
number of input elements `N` evenly and `drop_remainder` is `False`). If
your program depends on the batches having the same outer dimension, you
should set the `drop_remainder` argument to `True` to prevent the smaller
batch from being produced.
Unlike `tf.data.Dataset.batch`, the input elements to be batched may have
different shapes, and this transformation will pad each component to the
respective shape in `padded_shapes`. The `padded_shapes` argument
determines the resulting shape for each dimension of each component in an
output element:
* If the dimension is a constant, the component will be padded out to that
length in that dimension.
* If the dimension is unknown, the component will be padded out to the
maximum length of all elements in that dimension.
>>> A = (tf.data.Dataset
... .range(1, 5, output_type=tf.int32)
... .map(lambda x: tf.fill([x], x)))
>>> # Pad to the smallest per-batch size that fits all elements.
>>> B = A.padded_batch(2)
>>> for element in B.as_numpy_iterator():
... print(element)
[[1 0]
[2 2]]
[[3 3 3 0]
[4 4 4 4]]
>>> # Pad to a fixed size.
>>> C = A.padded_batch(2, padded_shapes=5)
>>> for element in C.as_numpy_iterator():
... print(element)
[[1 0 0 0 0]
[2 2 0 0 0]]
[[3 3 3 0 0]
[4 4 4 4 0]]
>>> # Pad with a custom value.
>>> D = A.padded_batch(2, padded_shapes=5, padding_values=-1)
>>> for element in D.as_numpy_iterator():
... print(element)
[[ 1 -1 -1 -1 -1]
[ 2 2 -1 -1 -1]]
[[ 3 3 3 -1 -1]
[ 4 4 4 4 -1]]
>>> # Components of nested elements can be padded independently.
>>> elements = [([1, 2, 3], [10]),
... ([4, 5], [11, 12])]
>>> dataset = tf.data.Dataset.from_generator(
... lambda: iter(elements), (tf.int32, tf.int32))
>>> # Pad the first component of the tuple to length 4, and the second
>>> # component to the smallest size that fits.
>>> dataset = dataset.padded_batch(2,
... padded_shapes=([4], [None]),
... padding_values=(-1, 100))
>>> list(dataset.as_numpy_iterator())
[(array([[ 1, 2, 3, -1], [ 4, 5, -1, -1]], dtype=int32),
array([[ 10, 100], [ 11, 12]], dtype=int32))]
See also `tf.data.experimental.dense_to_sparse_batch`, which combines
elements that may have different shapes into a `tf.SparseTensor`.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
padded_shapes: (Optional.) A nested structure of `tf.TensorShape` or
`tf.int64` vector tensor-like objects representing the shape to which
the respective component of each input element should be padded prior
to batching. Any unknown dimensions will be padded to the maximum size
of that dimension in each batch. If unset, all dimensions of all
components are padded to the maximum size in the batch. `padded_shapes`
must be set if any component has an unknown rank.
padding_values: (Optional.) A nested structure of scalar-shaped
`tf.Tensor`, representing the padding values to use for the respective
components. None represents that the nested structure should be padded
with default values. Defaults are `0` for numeric types and the empty
string for string types.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
Dataset: A `Dataset`.
Raises:
ValueError: If a component has an unknown rank, and the `padded_shapes`
argument is not set.
"""
if padded_shapes is None:
padded_shapes = get_legacy_output_shapes(self)
# A `tf.TensorShape` only is only falsey if its *rank* is unknown:
# bool(tf.TensorShape(None)) is False
if not all(nest.flatten(padded_shapes)):
raise ValueError("You must set the `padded_shapes` argument to "
"`Dataset.padded_batch` if any component of its "
"input has an unknown rank")
return PaddedBatchDataset(self, batch_size, padded_shapes, padding_values,
drop_remainder)
def map(self, map_func, num_parallel_calls=None, deterministic=None):
"""Maps `map_func` across the elements of this dataset.
This transformation applies `map_func` to each element of this dataset, and
returns a new dataset containing the transformed elements, in the same
order as they appeared in the input. `map_func` can be used to change both
the values and the structure of a dataset's elements. For example, adding 1
to each element, or projecting a subset of element components.
>>> dataset = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
>>> dataset = dataset.map(lambda x: x + 1)
>>> list(dataset.as_numpy_iterator())
[2, 3, 4, 5, 6]
The input signature of `map_func` is determined by the structure of each
element in this dataset.
>>> dataset = Dataset.range(5)
>>> # `map_func` takes a single argument of type `tf.Tensor` with the same
>>> # shape and dtype.
>>> result = dataset.map(lambda x: x + 1)
>>> # Each element is a tuple containing two `tf.Tensor` objects.
>>> elements = [(1, "foo"), (2, "bar"), (3, "baz)")]
>>> dataset = tf.data.Dataset.from_generator(
... lambda: elements, (tf.int32, tf.string))
>>> # `map_func` takes two arguments of type `tf.Tensor`. This function
>>> # projects out just the first component.
>>> result = dataset.map(lambda x_int, y_str: x_int)
>>> list(result.as_numpy_iterator())
[1, 2, 3]
>>> # Each element is a dictionary mapping strings to `tf.Tensor` objects.
>>> elements = ([{"a": 1, "b": "foo"},
... {"a": 2, "b": "bar"},
... {"a": 3, "b": "baz"}])
>>> dataset = tf.data.Dataset.from_generator(
... lambda: elements, {"a": tf.int32, "b": tf.string})
>>> # `map_func` takes a single argument of type `dict` with the same keys
>>> # as the elements.
>>> result = dataset.map(lambda d: str(d["a"]) + d["b"])
The value or values returned by `map_func` determine the structure of each
element in the returned dataset.
>>> dataset = tf.data.Dataset.range(3)
>>> # `map_func` returns two `tf.Tensor` objects.
>>> def g(x):
... return tf.constant(37.0), tf.constant(["Foo", "Bar", "Baz"])
>>> result = dataset.map(g)
>>> result.element_spec
(TensorSpec(shape=(), dtype=tf.float32, name=None), TensorSpec(shape=(3,), \
dtype=tf.string, name=None))
>>> # Python primitives, lists, and NumPy arrays are implicitly converted to
>>> # `tf.Tensor`.
>>> def h(x):
... return 37.0, ["Foo", "Bar"], np.array([1.0, 2.0], dtype=np.float64)
>>> result = dataset.map(h)
>>> result.element_spec
(TensorSpec(shape=(), dtype=tf.float32, name=None), TensorSpec(shape=(2,), \
dtype=tf.string, name=None), TensorSpec(shape=(2,), dtype=tf.float64, \
name=None))
>>> # `map_func` can return nested structures.
>>> def i(x):
... return (37.0, [42, 16]), "foo"
>>> result = dataset.map(i)
>>> result.element_spec
((TensorSpec(shape=(), dtype=tf.float32, name=None),
TensorSpec(shape=(2,), dtype=tf.int32, name=None)),
TensorSpec(shape=(), dtype=tf.string, name=None))
`map_func` can accept as arguments and return any type of dataset element.
Note that irrespective of the context in which `map_func` is defined (eager
vs. graph), tf.data traces the function and executes it as a graph. To use
Python code inside of the function you have two options:
1) Rely on AutoGraph to convert Python code into an equivalent graph
computation. The downside of this approach is that AutoGraph can convert
some but not all Python code.
2) Use `tf.py_function`, which allows you to write arbitrary Python code but
will generally result in worse performance than 1). For example:
>>> d = tf.data.Dataset.from_tensor_slices(['hello', 'world'])
>>> # transform a string tensor to upper case string using a Python function
>>> def upper_case_fn(t: tf.Tensor):
... return t.numpy().decode('utf-8').upper()
>>> d = d.map(lambda x: tf.py_function(func=upper_case_fn,
... inp=[x], Tout=tf.string))
>>> list(d.as_numpy_iterator())
[b'HELLO', b'WORLD']
Performance can often be improved by setting `num_parallel_calls` so that
`map` will use multiple threads to process elements. If deterministic order
isn't required, it can also improve performance to set
`deterministic=False`.
>>> dataset = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
>>> dataset = dataset.map(lambda x: x + 1,
... num_parallel_calls=tf.data.experimental.AUTOTUNE,
... deterministic=False)
Args:
map_func: A function mapping a dataset element to another dataset element.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process asynchronously in parallel.
If not specified, elements will be processed sequentially. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
deterministic: (Optional.) A boolean controlling whether determinism
should be traded for performance by allowing elements to be produced out
of order. If `deterministic` is `None`, the
`tf.data.Options.experimental_deterministic` dataset option (`True` by
default) is used to decide whether to produce elements
deterministically.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return MapDataset(self, map_func, preserve_cardinality=True)
else:
return ParallelMapDataset(
self,
map_func,
num_parallel_calls,
deterministic,
preserve_cardinality=True)
def flat_map(self, map_func):
"""Maps `map_func` across this dataset and flattens the result.
Use `flat_map` if you want to make sure that the order of your dataset
stays the same. For example, to flatten a dataset of batches into a
dataset of their elements:
>>> dataset = Dataset.from_tensor_slices([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> dataset = dataset.flat_map(lambda x: Dataset.from_tensor_slices(x))
>>> list(dataset.as_numpy_iterator())
[1, 2, 3, 4, 5, 6, 7, 8, 9]
`tf.data.Dataset.interleave()` is a generalization of `flat_map`, since
`flat_map` produces the same output as
`tf.data.Dataset.interleave(cycle_length=1)`
Args:
map_func: A function mapping a dataset element to a dataset.
Returns:
Dataset: A `Dataset`.
"""
return FlatMapDataset(self, map_func)
def interleave(self,
map_func,
cycle_length=AUTOTUNE,
block_length=1,
num_parallel_calls=None,
deterministic=None):
"""Maps `map_func` across this dataset, and interleaves the results.
For example, you can use `Dataset.interleave()` to process many input files
concurrently:
>>> # Preprocess 4 files concurrently, and interleave blocks of 16 records
>>> # from each file.
>>> filenames = ["/var/data/file1.txt", "/var/data/file2.txt",
... "/var/data/file3.txt", "/var/data/file4.txt"]
>>> dataset = tf.data.Dataset.from_tensor_slices(filenames)
>>> def parse_fn(filename):
... return tf.data.Dataset.range(10)
>>> dataset = dataset.interleave(lambda x:
... tf.data.TextLineDataset(x).map(parse_fn, num_parallel_calls=1),
... cycle_length=4, block_length=16)
The `cycle_length` and `block_length` arguments control the order in which
elements are produced. `cycle_length` controls the number of input elements
that are processed concurrently. If you set `cycle_length` to 1, this
transformation will handle one input element at a time, and will produce
identical results to `tf.data.Dataset.flat_map`. In general,
this transformation will apply `map_func` to `cycle_length` input elements,
open iterators on the returned `Dataset` objects, and cycle through them
producing `block_length` consecutive elements from each iterator, and
consuming the next input element each time it reaches the end of an
iterator.
For example:
>>> dataset = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
>>> # NOTE: New lines indicate "block" boundaries.
>>> dataset = dataset.interleave(
... lambda x: Dataset.from_tensors(x).repeat(6),
... cycle_length=2, block_length=4)
>>> list(dataset.as_numpy_iterator())
[1, 1, 1, 1,
2, 2, 2, 2,
1, 1,
2, 2,
3, 3, 3, 3,
4, 4, 4, 4,
3, 3,
4, 4,
5, 5, 5, 5,
5, 5]
Note: The order of elements yielded by this transformation is
deterministic, as long as `map_func` is a pure function and
`deterministic=True`. If `map_func` contains any stateful operations, the
order in which that state is accessed is undefined.
Performance can often be improved by setting `num_parallel_calls` so that
`interleave` will use multiple threads to fetch elements. If determinism
isn't required, it can also improve performance to set
`deterministic=False`.
>>> filenames = ["/var/data/file1.txt", "/var/data/file2.txt",
... "/var/data/file3.txt", "/var/data/file4.txt"]
>>> dataset = tf.data.Dataset.from_tensor_slices(filenames)
>>> dataset = dataset.interleave(lambda x: tf.data.TFRecordDataset(x),
... cycle_length=4, num_parallel_calls=tf.data.experimental.AUTOTUNE,
... deterministic=False)
Args:
map_func: A function mapping a dataset element to a dataset.
cycle_length: (Optional.) The number of input elements that will be
processed concurrently. If not specified, the value will be derived from
the number of available CPU cores. If the `num_parallel_calls` argument
is set to `tf.data.experimental.AUTOTUNE`, the `cycle_length` argument
also identifies the maximum degree of parallelism.
block_length: (Optional.) The number of consecutive elements to produce
from each input element before cycling to another input element.
num_parallel_calls: (Optional.) If specified, the implementation creates a
threadpool, which is used to fetch inputs from cycle elements
asynchronously and in parallel. The default behavior is to fetch inputs
from cycle elements synchronously with no parallelism. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
deterministic: (Optional.) A boolean controlling whether determinism
should be traded for performance by allowing elements to be produced out
of order. If `deterministic` is `None`, the
`tf.data.Options.experimental_deterministic` dataset option (`True` by
default) is used to decide whether to produce elements
deterministically.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return InterleaveDataset(self, map_func, cycle_length, block_length)
else:
return ParallelInterleaveDataset(
self,
map_func,
cycle_length,
block_length,
num_parallel_calls,
deterministic=deterministic)
def filter(self, predicate):
"""Filters this dataset according to `predicate`.
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> dataset = dataset.filter(lambda x: x < 3)
>>> list(dataset.as_numpy_iterator())
[1, 2]
>>> # `tf.math.equal(x, y)` is required for equality comparison
>>> def filter_fn(x):
... return tf.math.equal(x, 1)
>>> dataset = dataset.filter(filter_fn)
>>> list(dataset.as_numpy_iterator())
[1]
Args:
predicate: A function mapping a dataset element to a boolean.
Returns:
Dataset: The `Dataset` containing the elements of this dataset for which
`predicate` is `True`.
"""
return FilterDataset(self, predicate)
def apply(self, transformation_func):
"""Applies a transformation function to this dataset.
`apply` enables chaining of custom `Dataset` transformations, which are
represented as functions that take one `Dataset` argument and return a
transformed `Dataset`.
>>> dataset = tf.data.Dataset.range(100)
>>> def dataset_fn(ds):
... return ds.filter(lambda x: x < 5)
>>> dataset = dataset.apply(dataset_fn)
>>> list(dataset.as_numpy_iterator())
[0, 1, 2, 3, 4]
Args:
transformation_func: A function that takes one `Dataset` argument and
returns a `Dataset`.
Returns:
Dataset: The `Dataset` returned by applying `transformation_func` to this
dataset.
"""
dataset = transformation_func(self)
if not isinstance(dataset, DatasetV2):
raise TypeError(
"`transformation_func` must return a Dataset. Got {}.".format(
dataset))
dataset._input_datasets = [self] # pylint: disable=protected-access
return dataset
def window(self, size, shift=None, stride=1, drop_remainder=False):
"""Combines (nests of) input elements into a dataset of (nests of) windows.
A "window" is a finite dataset of flat elements of size `size` (or possibly
fewer if there are not enough input elements to fill the window and
`drop_remainder` evaluates to `False`).
The `shift` argument determines the number of input elements by which the
window moves on each iteration. If windows and elements are both numbered
starting at 0, the first element in window `k` will be element `k * shift`
of the input dataset. In particular, the first element of the first window
will always be the first element of the input dataset.
The `stride` argument determines the stride of the input elements, and the
`shift` argument determines the shift of the window.
For example:
>>> dataset = tf.data.Dataset.range(7).window(2)
>>> for window in dataset:
... print(list(window.as_numpy_iterator()))
[0, 1]
[2, 3]
[4, 5]
[6]
>>> dataset = tf.data.Dataset.range(7).window(3, 2, 1, True)
>>> for window in dataset:
... print(list(window.as_numpy_iterator()))
[0, 1, 2]
[2, 3, 4]
[4, 5, 6]
>>> dataset = tf.data.Dataset.range(7).window(3, 1, 2, True)
>>> for window in dataset:
... print(list(window.as_numpy_iterator()))
[0, 2, 4]
[1, 3, 5]
[2, 4, 6]
Note that when the `window` transformation is applied to a dataset of
nested elements, it produces a dataset of nested windows.
>>> nested = ([1, 2, 3, 4], [5, 6, 7, 8])
>>> dataset = tf.data.Dataset.from_tensor_slices(nested).window(2)
>>> for window in dataset:
... def to_numpy(ds):
... return list(ds.as_numpy_iterator())
... print(tuple(to_numpy(component) for component in window))
([1, 2], [5, 6])
([3, 4], [7, 8])
>>> dataset = tf.data.Dataset.from_tensor_slices({'a': [1, 2, 3, 4]})
>>> dataset = dataset.window(2)
>>> for window in dataset:
... def to_numpy(ds):
... return list(ds.as_numpy_iterator())
... print({'a': to_numpy(window['a'])})
{'a': [1, 2]}
{'a': [3, 4]}
Args:
size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements
of the input dataset to combine into a window. Must be positive.
shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
number of input elements by which the window moves in each iteration.
Defaults to `size`. Must be positive.
stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
stride of the input elements in the sliding window. Must be positive.
The default value of 1 means "retain every input element".
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last window should be dropped if its size is smaller than
`size`.
Returns:
Dataset: A `Dataset` of (nests of) windows -- a finite datasets of flat
elements created from the (nests of) input elements.
"""
if shift is None:
shift = size
return WindowDataset(self, size, shift, stride, drop_remainder)
def reduce(self, initial_state, reduce_func):
"""Reduces the input dataset to a single element.
The transformation calls `reduce_func` successively on every element of
the input dataset until the dataset is exhausted, aggregating information in
its internal state. The `initial_state` argument is used for the initial
state and the final state is returned as the result.
>>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, _: x + 1).numpy()
5
>>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, y: x + y).numpy()
10
Args:
initial_state: An element representing the initial state of the
transformation.
reduce_func: A function that maps `(old_state, input_element)` to
`new_state`. It must take two arguments and return a new element
The structure of `new_state` must match the structure of
`initial_state`.
Returns:
A dataset element corresponding to the final state of the transformation.
"""
with ops.name_scope("initial_state"):
initial_state = structure.normalize_element(initial_state)
state_structure = structure.type_spec_from_value(initial_state)
# Iteratively rerun the reduce function until reaching a fixed point on
# `state_structure`.
need_to_rerun = True
while need_to_rerun:
wrapped_func = StructuredFunctionWrapper(
reduce_func,
"reduce()",
input_structure=(state_structure, self.element_spec),
add_to_graph=False)
# Extract and validate class information from the returned values.
output_classes = wrapped_func.output_classes
state_classes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
state_structure)
for new_state_class, state_class in zip(
nest.flatten(output_classes), nest.flatten(state_classes)):
if not issubclass(new_state_class, state_class):
raise TypeError(
"The element classes for the new state must match the initial "
"state. Expected %s; got %s." %
(state_classes, wrapped_func.output_classes))
# Extract and validate type information from the returned values.
output_types = wrapped_func.output_types
state_types = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
state_structure)
for new_state_type, state_type in zip(
nest.flatten(output_types), nest.flatten(state_types)):
if new_state_type != state_type:
raise TypeError(
"The element types for the new state must match the initial "
"state. Expected %s; got %s." %
(state_types, wrapped_func.output_types))
# Extract shape information from the returned values.
output_shapes = wrapped_func.output_shapes
state_shapes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
state_structure)
flat_state_shapes = nest.flatten(state_shapes)
flat_new_state_shapes = nest.flatten(output_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
# TODO(b/110122868): Support a "most specific compatible structure"
# method for combining structures, to avoid using legacy structures
# here.
state_structure = structure.convert_legacy_structure(
state_types,
nest.pack_sequence_as(state_shapes, weakened_state_shapes),
state_classes)
reduce_func = wrapped_func.function
reduce_func.add_to_graph(ops.get_default_graph())
dataset = self._apply_options()
# pylint: disable=protected-access
return structure.from_compatible_tensor_list(
state_structure,
gen_dataset_ops.reduce_dataset(
dataset._variant_tensor,
structure.to_tensor_list(state_structure, initial_state),
reduce_func.captured_inputs,
f=reduce_func,
output_shapes=structure.get_flat_tensor_shapes(state_structure),
output_types=structure.get_flat_tensor_types(state_structure)))
def unbatch(self):
"""Splits elements of a dataset into multiple elements.
For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,
where `B` may vary for each input element, then for each element in the
dataset, the unbatched dataset will contain `B` consecutive elements
of shape `[a0, a1, ...]`.
>>> elements = [ [1, 2, 3], [1, 2], [1, 2, 3, 4] ]
>>> dataset = tf.data.Dataset.from_generator(lambda: elements, tf.int64)
>>> dataset = dataset.unbatch()
>>> list(dataset.as_numpy_iterator())
[1, 2, 3, 1, 2, 1, 2, 3, 4]
Returns:
A `Dataset`.
"""
normalized_dataset = normalize_to_dense(self)
return _UnbatchDataset(normalized_dataset)
def with_options(self, options):
"""Returns a new `tf.data.Dataset` with the given options set.
The options are "global" in the sense they apply to the entire dataset.
If options are set multiple times, they are merged as long as different
options do not use different non-default values.
>>> ds = tf.data.Dataset.range(5)
>>> ds = ds.interleave(lambda x: tf.data.Dataset.range(5),
... cycle_length=3,
... num_parallel_calls=3)
>>> options = tf.data.Options()
>>> # This will make the interleave order non-deterministic.
>>> options.experimental_deterministic = False
>>> ds = ds.with_options(options)
Args:
options: A `tf.data.Options` that identifies the options the use.
Returns:
Dataset: A `Dataset` with the given options.
Raises:
ValueError: when an option is set more than once to a non-default value
"""
return _OptionsDataset(self, options)
@tf_export(v1=["data.Dataset"])
class DatasetV1(DatasetV2):
"""Represents a potentially large set of elements.
A `Dataset` can be used to represent an input pipeline as a
collection of elements and a "logical plan" of transformations that act on
those elements.
"""
def __init__(self):
try:
variant_tensor = self._as_variant_tensor()
except AttributeError as e:
if "_as_variant_tensor" in str(e):
raise AttributeError("Please use _variant_tensor instead of "
"_as_variant_tensor() to obtain the variant "
"associated with a dataset")
raise AttributeError("{}: A likely cause of this error is that the super "
"call for this dataset is not the last line of the "
"__init__ method. The base class causes the "
"_as_variant_tensor call in its constructor and "
"if that uses attributes defined in the __init__ "
"method, those attrs need to be defined before the "
"super call.".format(e))
super(DatasetV1, self).__init__(variant_tensor)
@abc.abstractmethod
def _as_variant_tensor(self):
"""Creates a scalar `tf.Tensor` of `tf.variant` representing this dataset.
Returns:
A scalar `tf.Tensor` of `tf.variant` type, which represents this dataset.
"""
raise NotImplementedError("Dataset._as_variant_tensor")
@deprecation.deprecated(
None, "Use `for ... in dataset:` to iterate over a dataset. If using "
"`tf.estimator`, return the `Dataset` object directly from your input "
"function. As a last resort, you can use "
"`tf.compat.v1.data.make_one_shot_iterator(dataset)`.")
def make_one_shot_iterator(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
Note: The returned iterator will be initialized automatically.
A "one-shot" iterator does not currently support re-initialization.
Returns:
An `Iterator` over the elements of this dataset.
"""
return self._make_one_shot_iterator()
def _make_one_shot_iterator(self): # pylint: disable=missing-docstring
if context.executing_eagerly():
return iterator_ops.OwnedIterator(self)
_ensure_same_dataset_graph(self)
# Now that we create datasets at python object creation time, the capture
# by value _make_dataset() function would try to capture these variant
# tensor dataset inputs, which are marked as stateful ops and would throw
# an error if we try and capture them. We therefore traverse the graph
# to find all these ops and whitelist them so that the capturing
# logic instead of throwing an error recreates these ops which is what was
# happening before.
all_ds_ops = traverse.obtain_all_variant_tensor_ops(self)
graph_level_seed, op_level_seed = core_random_seed.get_seed(None)
# NOTE(mrry): We capture by value here to ensure that `_make_dataset()` is
# a 0-argument function.
@function.Defun(capture_by_value=True, whitelisted_stateful_ops=all_ds_ops)
def _make_dataset():
"""Factory function for a dataset."""
# NOTE(mrry): `Defun` does not capture the graph-level seed from the
# enclosing graph, so if a graph-level seed is present we set the local
# graph seed based on a combination of the graph- and op-level seeds.
if graph_level_seed is not None:
assert op_level_seed is not None
core_random_seed.set_random_seed(
(graph_level_seed + 87654321 * op_level_seed) % (2 ** 63 - 1))
dataset = self._apply_options()
return dataset._variant_tensor # pylint: disable=protected-access
try:
_make_dataset.add_to_graph(ops.get_default_graph())
except ValueError as err:
if "Cannot capture a stateful node" in str(err):
raise ValueError(
"Failed to create a one-shot iterator for a dataset. "
"`Dataset.make_one_shot_iterator()` does not support datasets that "
"capture stateful objects, such as a `Variable` or `LookupTable`. "
"In these cases, use `Dataset.make_initializable_iterator()`. "
"(Original error: %s)" % err)
else:
six.reraise(ValueError, err)
# pylint: disable=protected-access
return iterator_ops.Iterator(
gen_dataset_ops.one_shot_iterator(
dataset_factory=_make_dataset, **self._flat_structure), None,
get_legacy_output_types(self), get_legacy_output_shapes(self),
get_legacy_output_classes(self))
@deprecation.deprecated(
None, "Use `for ... in dataset:` to iterate over a dataset. If using "
"`tf.estimator`, return the `Dataset` object directly from your input "
"function. As a last resort, you can use "
"`tf.compat.v1.data.make_initializable_iterator(dataset)`.")
def make_initializable_iterator(self, shared_name=None):
"""Creates an `Iterator` for enumerating the elements of this dataset.
Note: The returned iterator will be in an uninitialized state,
and you must run the `iterator.initializer` operation before using it:
```python
dataset = ...
iterator = dataset.make_initializable_iterator()
# ...
sess.run(iterator.initializer)
```
Args:
shared_name: (Optional.) If non-empty, the returned iterator will be
shared under the given name across multiple sessions that share the same
devices (e.g. when using a remote server).
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If eager execution is enabled.
"""
return self._make_initializable_iterator(shared_name)
def _make_initializable_iterator(self, shared_name=None): # pylint: disable=missing-docstring
if context.executing_eagerly():
raise RuntimeError(
"dataset.make_initializable_iterator is not supported when eager "
"execution is enabled. Use `for element in dataset` instead.")
_ensure_same_dataset_graph(self)
dataset = self._apply_options()
if shared_name is None:
shared_name = ""
iterator_resource = gen_dataset_ops.iterator_v2(
container="", shared_name=shared_name, **self._flat_structure)
with ops.colocate_with(iterator_resource):
initializer = gen_dataset_ops.make_iterator(
dataset._variant_tensor, # pylint: disable=protected-access
iterator_resource)
# pylint: disable=protected-access
return iterator_ops.Iterator(
iterator_resource, initializer, get_legacy_output_types(dataset),
get_legacy_output_shapes(dataset), get_legacy_output_classes(dataset))
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_classes(dataset)`.")
def output_classes(self):
"""Returns the class of each component of an element of this dataset.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self.element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_shapes(dataset)`.")
def output_shapes(self):
"""Returns the shape of each component of an element of this dataset.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self.element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_types(dataset)`.")
def output_types(self):
"""Returns the type of each component of an element of this dataset.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self.element_spec)
@property
def element_spec(self):
# TODO(b/110122868): Remove this override once all `Dataset` instances
# implement `element_structure`.
return structure.convert_legacy_structure(
self.output_types, self.output_shapes, self.output_classes)
@staticmethod
@functools.wraps(DatasetV2.from_tensors)
def from_tensors(tensors):
return DatasetV1Adapter(DatasetV2.from_tensors(tensors))
@staticmethod
@functools.wraps(DatasetV2.from_tensor_slices)
def from_tensor_slices(tensors):
return DatasetV1Adapter(DatasetV2.from_tensor_slices(tensors))
@staticmethod
@deprecation.deprecated(None, "Use `tf.data.Dataset.from_tensor_slices()`.")
def from_sparse_tensor_slices(sparse_tensor):
"""Splits each rank-N `tf.SparseTensor` in this dataset row-wise.
Args:
sparse_tensor: A `tf.SparseTensor`.
Returns:
Dataset: A `Dataset` of rank-(N-1) sparse tensors.
"""
return DatasetV1Adapter(SparseTensorSliceDataset(sparse_tensor))
@staticmethod
@functools.wraps(DatasetV2.from_generator)
def from_generator(generator, output_types, output_shapes=None, args=None):
return DatasetV1Adapter(DatasetV2.from_generator(
generator, output_types, output_shapes, args))
@staticmethod
@functools.wraps(DatasetV2.range)
def range(*args, **kwargs):
return DatasetV1Adapter(DatasetV2.range(*args, **kwargs))
@staticmethod
@functools.wraps(DatasetV2.zip)
def zip(datasets):
return DatasetV1Adapter(DatasetV2.zip(datasets))
@functools.wraps(DatasetV2.concatenate)
def concatenate(self, dataset):
return DatasetV1Adapter(super(DatasetV1, self).concatenate(dataset))
@functools.wraps(DatasetV2.prefetch)
def prefetch(self, buffer_size):
return DatasetV1Adapter(super(DatasetV1, self).prefetch(buffer_size))
@staticmethod
@functools.wraps(DatasetV2.list_files)
def list_files(file_pattern, shuffle=None, seed=None):
return DatasetV1Adapter(DatasetV2.list_files(file_pattern, shuffle, seed))
@functools.wraps(DatasetV2.repeat)
def repeat(self, count=None):
return DatasetV1Adapter(super(DatasetV1, self).repeat(count))
@functools.wraps(DatasetV2.shuffle)
def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):
return DatasetV1Adapter(super(DatasetV1, self).shuffle(
buffer_size, seed, reshuffle_each_iteration))
@functools.wraps(DatasetV2.cache)
def cache(self, filename=""):
return DatasetV1Adapter(super(DatasetV1, self).cache(filename))
@functools.wraps(DatasetV2.take)
def take(self, count):
return DatasetV1Adapter(super(DatasetV1, self).take(count))
@functools.wraps(DatasetV2.skip)
def skip(self, count):
return DatasetV1Adapter(super(DatasetV1, self).skip(count))
@functools.wraps(DatasetV2.shard)
def shard(self, num_shards, index):
return DatasetV1Adapter(super(DatasetV1, self).shard(num_shards, index))
@functools.wraps(DatasetV2.batch)
def batch(self, batch_size, drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).batch(
batch_size, drop_remainder))
@functools.wraps(DatasetV2.padded_batch)
def padded_batch(self,
batch_size,
padded_shapes=None,
padding_values=None,
drop_remainder=False):
return DatasetV1Adapter(
super(DatasetV1, self).padded_batch(batch_size, padded_shapes,
padding_values, drop_remainder))
@functools.wraps(DatasetV2.map)
def map(self, map_func, num_parallel_calls=None, deterministic=None):
if num_parallel_calls is None:
return DatasetV1Adapter(
MapDataset(self, map_func, preserve_cardinality=False))
else:
return DatasetV1Adapter(
ParallelMapDataset(
self,
map_func,
num_parallel_calls,
deterministic,
preserve_cardinality=False))
@deprecation.deprecated(None, "Use `tf.data.Dataset.map()")
def map_with_legacy_function(self,
map_func,
num_parallel_calls=None,
deterministic=None):
"""Maps `map_func` across the elements of this dataset.
Note: This is an escape hatch for existing uses of `map` that do not work
with V2 functions. New uses are strongly discouraged and existing uses
should migrate to `map` as this method will be removed in V2.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to
another nested structure of tensors.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process asynchronously in parallel.
If not specified, elements will be processed sequentially. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
deterministic: (Optional.) A boolean controlling whether determinism
should be traded for performance by allowing elements to be produced out
of order. If `deterministic` is `None`, the
`tf.data.Options.experimental_deterministic` dataset option (`True` by
default) is used to decide whether to produce elements
deterministically.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return DatasetV1Adapter(
MapDataset(
self,
map_func,
preserve_cardinality=False,
use_legacy_function=True))
else:
return DatasetV1Adapter(
ParallelMapDataset(
self,
map_func,
num_parallel_calls,
deterministic,
preserve_cardinality=False,
use_legacy_function=True))
@functools.wraps(DatasetV2.flat_map)
def flat_map(self, map_func):
return DatasetV1Adapter(super(DatasetV1, self).flat_map(map_func))
@functools.wraps(DatasetV2.interleave)
def interleave(self,
map_func,
cycle_length=AUTOTUNE,
block_length=1,
num_parallel_calls=None,
deterministic=None):
return DatasetV1Adapter(
super(DatasetV1, self).interleave(map_func, cycle_length, block_length,
num_parallel_calls, deterministic))
@functools.wraps(DatasetV2.filter)
def filter(self, predicate):
return DatasetV1Adapter(super(DatasetV1, self).filter(predicate))
@deprecation.deprecated(None, "Use `tf.data.Dataset.filter()")
def filter_with_legacy_function(self, predicate):
"""Filters this dataset according to `predicate`.
Note: This is an escape hatch for existing uses of `filter` that do not work
with V2 functions. New uses are strongly discouraged and existing uses
should migrate to `filter` as this method will be removed in V2.
Args:
predicate: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
scalar `tf.bool` tensor.
Returns:
Dataset: The `Dataset` containing the elements of this dataset for which
`predicate` is `True`.
"""
return FilterDataset(self, predicate, use_legacy_function=True)
@functools.wraps(DatasetV2.apply)
def apply(self, transformation_func):
return DatasetV1Adapter(super(DatasetV1, self).apply(transformation_func))
@functools.wraps(DatasetV2.window)
def window(self, size, shift=None, stride=1, drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).window(
size, shift, stride, drop_remainder))
@functools.wraps(DatasetV2.unbatch)
def unbatch(self):
return DatasetV1Adapter(super(DatasetV1, self).unbatch())
@functools.wraps(DatasetV2.with_options)
def with_options(self, options):
return DatasetV1Adapter(super(DatasetV1, self).with_options(options))
if tf2.enabled():
Dataset = DatasetV2
else:
Dataset = DatasetV1
class DatasetV1Adapter(DatasetV1):
"""Wraps a V2 `Dataset` object in the `tf.compat.v1.data.Dataset` API."""
def __init__(self, dataset):
self._dataset = dataset
super(DatasetV1Adapter, self).__init__()
def _as_variant_tensor(self):
return self._dataset._variant_tensor # pylint: disable=protected-access
def _has_captured_ref(self):
return self._dataset._has_captured_ref() # pylint: disable=protected-access
def _inputs(self):
return self._dataset._inputs() # pylint: disable=protected-access
def _functions(self):
return self._dataset._functions() # pylint: disable=protected-access
def options(self):
return self._dataset.options()
@property
def element_spec(self):
return self._dataset.element_spec # pylint: disable=protected-access
def __iter__(self):
return iter(self._dataset)
def _ensure_same_dataset_graph(dataset):
"""Walks the dataset graph to ensure all datasets come from the same graph."""
# pylint: disable=protected-access
current_graph = ops.get_default_graph()
bfs_q = Queue.Queue()
bfs_q.put(dataset)
visited = []
while not bfs_q.empty():
ds = bfs_q.get()
visited.append(ds)
ds_graph = ds._graph
if current_graph != ds_graph:
raise ValueError(
"The graph (" + str(current_graph) + ") of the iterator is different "
"from the graph (" + str(ds_graph) + ") the dataset: " +
str(ds._variant_tensor) + " was created in. If you are using the "
"Estimator API, make sure that no part of the dataset returned by "
"the `input_fn` function is defined outside the `input_fn` function. "
"Please ensure that all datasets in the pipeline are created in the "
"same graph as the iterator.")
for input_ds in ds._inputs():
if input_ds not in visited:
bfs_q.put(input_ds)
@tf_export(v1=["data.make_one_shot_iterator"])
def make_one_shot_iterator(dataset):
"""Creates a `tf.compat.v1.data.Iterator` for enumerating dataset elements.
Note: The returned iterator will be initialized automatically.
A "one-shot" iterator does not support re-initialization.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A `tf.compat.v1.data.Iterator` over the elements of this dataset.
"""
try:
# Call the defined `_make_one_shot_iterator()` if there is one, because some
# datasets (e.g. for prefetching) override its behavior.
return dataset._make_one_shot_iterator() # pylint: disable=protected-access
except AttributeError:
return DatasetV1Adapter(dataset)._make_one_shot_iterator() # pylint: disable=protected-access
@tf_export(v1=["data.make_initializable_iterator"])
def make_initializable_iterator(dataset, shared_name=None):
"""Creates a `tf.compat.v1.data.Iterator` for enumerating the elements of a dataset.
Note: The returned iterator will be in an uninitialized state,
and you must run the `iterator.initializer` operation before using it:
```python
dataset = ...
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
# ...
sess.run(iterator.initializer)
```
Args:
dataset: A `tf.data.Dataset`.
shared_name: (Optional.) If non-empty, the returned iterator will be shared
under the given name across multiple sessions that share the same devices
(e.g. when using a remote server).
Returns:
A `tf.compat.v1.data.Iterator` over the elements of `dataset`.
Raises:
RuntimeError: If eager execution is enabled.
"""
try:
# Call the defined `_make_initializable_iterator()` if there is one, because
# some datasets (e.g. for prefetching) override its behavior.
return dataset._make_initializable_iterator(shared_name) # pylint: disable=protected-access
except AttributeError:
return DatasetV1Adapter(dataset)._make_initializable_iterator(shared_name) # pylint: disable=protected-access
@tf_export("data.experimental.get_structure")
def get_structure(dataset_or_iterator):
"""Returns the type specification of an element of a `Dataset` or `Iterator`.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of `dataset_or_iterator` and specifying the type of individual
components.
Raises:
TypeError: If `dataset_or_iterator` is not a `Dataset` or `Iterator` object.
"""
try:
return dataset_or_iterator.element_spec # pylint: disable=protected-access
except AttributeError:
raise TypeError("`dataset_or_iterator` must be a Dataset or Iterator "
"object, but got %s." % type(dataset_or_iterator))
@tf_export(v1=["data.get_output_classes"])
def get_legacy_output_classes(dataset_or_iterator):
"""Returns the output classes of a `Dataset` or `Iterator` elements.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_classes` property.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.
Returns:
A nested structure of Python `type` objects matching the structure of the
dataset / iterator elements and specifying the class of the individual
components.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
get_structure(dataset_or_iterator))
@tf_export(v1=["data.get_output_shapes"])
def get_legacy_output_shapes(dataset_or_iterator):
"""Returns the output shapes of a `Dataset` or `Iterator` elements.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_shapes` property.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.
Returns:
A nested structure of `tf.TensorShape` objects matching the structure of
the dataset / iterator elements and specifying the shape of the individual
components.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
get_structure(dataset_or_iterator))
@tf_export(v1=["data.get_output_types"])
def get_legacy_output_types(dataset_or_iterator):
"""Returns the output shapes of a `Dataset` or `Iterator` elements.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_types` property.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.
Returns:
A nested structure of `tf.DType` objects objects matching the structure of
dataset / iterator elements and specifying the shape of the individual
components.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
get_structure(dataset_or_iterator))
@tf_export("data.Options")
class Options(options_lib.OptionsBase):
"""Represents options for tf.data.Dataset.
An `Options` object can be, for instance, used to control which graph
optimizations to apply or whether to use performance modeling to dynamically
tune the parallelism of operations such as `tf.data.Dataset.map` or
`tf.data.Dataset.interleave`.
After constructing an `Options` object, use `dataset.with_options(options)` to
apply the options to a dataset.
>>> dataset = tf.data.Dataset.range(3)
>>> options = tf.data.Options()
>>> # Set options here.
>>> dataset = dataset.with_options(options)
"""
experimental_deterministic = options_lib.create_option(
name="experimental_deterministic",
ty=bool,
docstring=
"Whether the outputs need to be produced in deterministic order. If None,"
" defaults to True.")
experimental_distribute = options_lib.create_option(
name="experimental_distribute",
ty=distribute_options.DistributeOptions,
docstring=
"The distribution strategy options associated with the dataset. See "
"`tf.data.experimental.DistributeOptions` for more details.",
default_factory=distribute_options.DistributeOptions)
experimental_optimization = options_lib.create_option(
name="experimental_optimization",
ty=optimization_options.OptimizationOptions,
docstring=
"The optimization options associated with the dataset. See "
"`tf.data.experimental.OptimizationOptions` for more details.",
default_factory=optimization_options.OptimizationOptions)
experimental_slack = options_lib.create_option(
name="experimental_slack",
ty=bool,
docstring="Whether to introduce 'slack' in the last `prefetch` of the "
"input pipeline, if it exists. This may reduce CPU contention with "
"accelerator host-side activity at the start of a step. The slack "
"frequency is determined by the number of devices attached to this "
"input pipeline. If None, defaults to False.")
experimental_stats = options_lib.create_option(
name="experimental_stats",
ty=stats_options.StatsOptions,
docstring=
"The statistics options associated with the dataset. See "
"`tf.data.experimental.StatsOptions` for more details.",
default_factory=stats_options.StatsOptions)
experimental_threading = options_lib.create_option(
name="experimental_threading",
ty=threading_options.ThreadingOptions,
docstring=
"The threading options associated with the dataset. See "
"`tf.data.experimental.ThreadingOptions` for more details.",
default_factory=threading_options.ThreadingOptions)
experimental_external_state_policy = options_lib.create_option(
name="experimental_external_state_policy",
ty=distribute_options.ExternalStatePolicy,
docstring="By default, tf.data will refuse to serialize a dataset or "
"checkpoint its iterator if the dataset contains a stateful op as the "
"serialization / checkpointing won't be able to capture its state. "
"Users can -- at their own risk -- override this restriction by "
"explicitly specifying that they are fine throwing away the state "
"in these ops. There are three settings available - IGNORE: in which we"
"completely ignore any state; WARN: We warn the user that some state "
"might be thrown away; FAIL: We fail if any state is being captured.",
default_factory=lambda: distribute_options.ExternalStatePolicy.WARN)
def _graph_rewrites(self):
"""Produces the list of enabled static graph rewrites."""
result = []
if self.experimental_optimization is not None:
result.extend(self.experimental_optimization._graph_rewrites()) # pylint: disable=protected-access
else:
# Apply default options
result.extend(
optimization_options.OptimizationOptions()._graph_rewrites()) # pylint: disable=protected-access
if self.experimental_deterministic is False:
result.append("make_sloppy")
if self.experimental_stats and self.experimental_stats.latency_all_edges:
result.append("latency_all_edges")
if self.experimental_slack:
result.append("slack")
if (self.experimental_distribute and
self.experimental_distribute._make_stateless): # pylint: disable=protected-access
result.append("make_stateless")
return result
def _graph_rewrite_configs(self):
"""Produces the list of configurations for enabled graph optimizations."""
result = []
if self.experimental_optimization:
result.extend(self.experimental_optimization._graph_rewrite_configs()) # pylint: disable=protected-access
if self.experimental_slack:
num_devices = self.experimental_distribute.num_devices
if num_devices is None:
num_devices = 1
result.append("slack:slack_period:%d" % num_devices)
return result
def _autotune_settings(self):
if self.experimental_optimization is not None:
return self.experimental_optimization._autotune_settings() # pylint: disable=protected-access
# Return default autotune options
return optimization_options.OptimizationOptions()._autotune_settings() # pylint: disable=protected-access
def merge(self, options):
"""Merges itself with the given `tf.data.Options`.
The given `tf.data.Options` can be merged as long as there does not exist an
attribute that is set to different values in `self` and `options`.
Args:
options: a `tf.data.Options` to merge with
Raises:
ValueError: if the given `tf.data.Options` cannot be merged
Returns:
New `tf.data.Options()` object which is the result of merging self with
the input `tf.data.Options`.
"""
return options_lib.merge_options(self, options)
class DatasetSource(DatasetV2):
"""Abstract class representing a dataset with no inputs."""
def _inputs(self):
return []
class UnaryDataset(DatasetV2):
"""Abstract class representing a dataset with one input."""
def __init__(self, input_dataset, variant_tensor):
self._input_dataset = input_dataset
super(UnaryDataset, self).__init__(variant_tensor)
def _inputs(self):
return [self._input_dataset]
class UnaryUnchangedStructureDataset(UnaryDataset):
"""Represents a unary dataset with the same input and output structure."""
def __init__(self, input_dataset, variant_tensor):
self._input_dataset = input_dataset
super(UnaryUnchangedStructureDataset, self).__init__(
input_dataset, variant_tensor)
@property
def element_spec(self):
return self._input_dataset.element_spec
class TensorDataset(DatasetSource):
"""A `Dataset` with a single element."""
def __init__(self, element):
"""See `Dataset.from_tensors()` for details."""
element = structure.normalize_element(element)
self._structure = structure.type_spec_from_value(element)
self._tensors = structure.to_tensor_list(self._structure, element)
variant_tensor = gen_dataset_ops.tensor_dataset(
self._tensors,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(TensorDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
class TensorSliceDataset(DatasetSource):
"""A `Dataset` of slices from a dataset element."""
def __init__(self, element):
"""See `Dataset.from_tensor_slices()` for details."""
element = structure.normalize_element(element)
batched_spec = structure.type_spec_from_value(element)
self._tensors = structure.to_batched_tensor_list(batched_spec, element)
self._structure = nest.map_structure(
lambda component_spec: component_spec._unbatch(), batched_spec) # pylint: disable=protected-access
batch_dim = tensor_shape.Dimension(tensor_shape.dimension_value(
self._tensors[0].get_shape()[0]))
for t in self._tensors[1:]:
batch_dim.assert_is_compatible_with(tensor_shape.Dimension(
tensor_shape.dimension_value(t.get_shape()[0])))
variant_tensor = gen_dataset_ops.tensor_slice_dataset(
self._tensors,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(TensorSliceDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
class SparseTensorSliceDataset(DatasetSource):
"""A `Dataset` that splits a rank-N `tf.SparseTensor` into its rows."""
def __init__(self, sparse_tensor):
"""See `Dataset.from_sparse_tensor_slices()` for details."""
if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor):
raise TypeError(
"`sparse_tensor` must be a `tf.SparseTensor` object. Was {}.".format(
sparse_tensor))
self._sparse_tensor = sparse_tensor
indices_shape = self._sparse_tensor.indices.get_shape()
shape_shape = self._sparse_tensor.dense_shape.get_shape()
rank = (indices_shape.dims[1] - 1).merge_with(shape_shape.dims[0] - 1)
self._structure = (tensor_spec.TensorSpec([None, rank], dtypes.int64),
tensor_spec.TensorSpec([None],
self._sparse_tensor.dtype),
tensor_spec.TensorSpec([rank], dtypes.int64))
variant_tensor = gen_dataset_ops.sparse_tensor_slice_dataset(
self._sparse_tensor.indices, self._sparse_tensor.values,
self._sparse_tensor.dense_shape)
super(SparseTensorSliceDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
class _VariantDataset(DatasetV2):
"""A Dataset wrapper around a `tf.variant`-typed function argument."""
def __init__(self, dataset_variant, structure):
self._structure = structure
super(_VariantDataset, self).__init__(dataset_variant)
def _inputs(self):
return []
@property
def element_spec(self):
return self._structure
class _NestedVariant(composite_tensor.CompositeTensor):
def __init__(self, variant_tensor, element_spec, dataset_shape):
self._variant_tensor = variant_tensor
self._element_spec = element_spec
self._dataset_shape = dataset_shape
@property
def _type_spec(self):
return DatasetSpec(self._element_spec, self._dataset_shape)
@tf_export("data.experimental.from_variant")
def from_variant(variant, structure):
"""Constructs a dataset from the given variant and structure.
Args:
variant: A scalar `tf.variant` tensor representing a dataset.
structure: A `tf.data.experimental.Structure` object representing the
structure of each element in the dataset.
Returns:
A `tf.data.Dataset` instance.
"""
return _VariantDataset(variant, structure) # pylint: disable=protected-access
@tf_export("data.experimental.to_variant")
def to_variant(dataset):
"""Returns a variant representing the given dataset.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A scalar `tf.variant` tensor representing the given dataset.
"""
return dataset._variant_tensor # pylint: disable=protected-access
@tf_export(
"data.DatasetSpec",
v1=["data.DatasetSpec", "data.experimental.DatasetStructure"])
class DatasetSpec(type_spec.BatchableTypeSpec):
"""Type specification for `tf.data.Dataset`.
See `tf.TypeSpec` for more information about TensorFlow type specifications.
>>> dataset = tf.data.Dataset.range(3)
>>> tf.data.DatasetSpec.from_value(dataset)
DatasetSpec(TensorSpec(shape=(), dtype=tf.int64, name=None), TensorShape([]))
"""
__slots__ = ["_element_spec", "_dataset_shape"]
def __init__(self, element_spec, dataset_shape=()):
self._element_spec = element_spec
self._dataset_shape = tensor_shape.as_shape(dataset_shape)
@property
def value_type(self):
return _VariantDataset
def _serialize(self):
return (self._element_spec, self._dataset_shape)
@property
def _component_specs(self):
return tensor_spec.TensorSpec(self._dataset_shape, dtypes.variant)
def _to_components(self, value):
return value._variant_tensor # pylint: disable=protected-access
def _from_components(self, components):
# pylint: disable=protected-access
if self._dataset_shape.ndims == 0:
return _VariantDataset(components, self._element_spec)
else:
return _NestedVariant(components, self._element_spec, self._dataset_shape)
def _to_tensor_list(self, value):
return [
ops.convert_to_tensor(
tf_nest.map_structure(lambda x: x._variant_tensor, value)) # pylint: disable=protected-access
]
@staticmethod
def from_value(value):
"""Creates a `DatasetSpec` for the given `tf.data.Dataset` value."""
return DatasetSpec(value.element_spec) # pylint: disable=protected-access
def _batch(self, batch_size):
return DatasetSpec(
self._element_spec,
tensor_shape.TensorShape([batch_size]).concatenate(self._dataset_shape))
def _unbatch(self):
if self._dataset_shape.ndims == 0:
raise ValueError("Unbatching a dataset is only supported for rank >= 1")
return DatasetSpec(self._element_spec, self._dataset_shape[1:])
def _to_batched_tensor_list(self, value):
if self._dataset_shape.ndims == 0:
raise ValueError("Unbatching a dataset is only supported for rank >= 1")
return self._to_tensor_list(value)
def _to_legacy_output_types(self):
return self
def _to_legacy_output_shapes(self):
return self
def _to_legacy_output_classes(self):
return self
class StructuredFunctionWrapper(object):
"""A function wrapper that supports structured arguments and return values."""
# pylint: disable=protected-access
def __init__(self,
func,
transformation_name,
dataset=None,
input_classes=None,
input_shapes=None,
input_types=None,
input_structure=None,
add_to_graph=True,
use_legacy_function=False,
defun_kwargs=None):
"""Creates a new `StructuredFunctionWrapper` for the given function.
Args:
func: A function from a nested structure to another nested structure.
transformation_name: Human-readable name of the transformation in which
this function is being instantiated, for error messages.
dataset: (Optional.) A `tf.data.Dataset`. If given, the structure of this
dataset will be assumed as the structure for `func` arguments; otherwise
`input_classes`, `input_shapes`, and `input_types` must be defined.
input_classes: (Optional.) A nested structure of `type`. If given, this
argument defines the Python types for `func` arguments.
input_shapes: (Optional.) A nested structure of `tf.TensorShape`. If
given, this argument defines the shapes and structure for `func`
arguments.
input_types: (Optional.) A nested structure of `tf.DType`. If given, this
argument defines the element types and structure for `func` arguments.
input_structure: (Optional.) A `Structure` object. If given, this argument
defines the element types and structure for `func` arguments.
add_to_graph: (Optional.) If `True`, the function will be added to the
default graph, if it exists.
use_legacy_function: (Optional.) A boolean that determines whether the
function be created using `tensorflow.python.eager.function.defun`
(default behavior) or `tensorflow.python.framework.function.Defun`
(legacy behavior).
defun_kwargs: (Optional.) A dictionary mapping string argument names to
values. If supplied, will be passed to `function` as keyword arguments.
Raises:
ValueError: If an invalid combination of `dataset`, `input_classes`,
`input_shapes`, and `input_types` is passed.
"""
if input_structure is None:
if dataset is None:
if input_classes is None or input_shapes is None or input_types is None:
raise ValueError("Either `dataset`, `input_structure` or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = structure.convert_legacy_structure(
input_types, input_shapes, input_classes)
else:
if not (input_classes is None and input_shapes is None and
input_types is None):
raise ValueError("Either `dataset`, `input_structure` or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = dataset.element_spec
else:
if not (dataset is None and input_classes is None and input_shapes is None
and input_types is None):
raise ValueError("Either `dataset`, `input_structure`, or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = input_structure
self._func = func
# There is no graph to add in eager mode.
add_to_graph &= not context.executing_eagerly()
# There are some lifetime issues when a legacy function is not added to a
# out-living graph. It's already deprecated so de-prioritizing the fix.
add_to_graph |= use_legacy_function
if defun_kwargs is None:
defun_kwargs = {}
readable_transformation_name = transformation_name.replace(
".", "_")[:-2] if len(transformation_name) > 2 else ""
func_name = "_".join(
[readable_transformation_name,
function_utils.get_func_name(func)])
# Sanitize function name to remove symbols that interfere with graph
# construction.
for symbol in ["<", ">", "\\", "'", " "]:
func_name = func_name.replace(symbol, "")
ag_ctx = autograph_ctx.control_status_ctx()
def _warn_if_collections(transformation_name):
"""Prints a warning if the given graph uses common graph collections.
NOTE(mrry): Currently a warning is only generated for resources. Any
variables created will be automatically hoisted out to the outermost scope
using `init_scope()`. Some collections (such as for control-flow contexts)
are benign and should not generate a warning.
Args:
transformation_name: A human-readable name for the transformation.
"""
warnings.warn("Creating resources inside a function passed to %s "
"is not supported. Create each resource outside the "
"function, and capture it inside the function to use it." %
transformation_name, stacklevel=5)
def _wrapper_helper(*args):
"""Wrapper for passing nested structures to and from tf.data functions."""
nested_args = structure.from_compatible_tensor_list(
self._input_structure, args)
if not _should_unpack_args(nested_args):
nested_args = (nested_args,)
ret = autograph.tf_convert(func, ag_ctx)(*nested_args)
# If `func` returns a list of tensors, `nest.flatten()` and
# `ops.convert_to_tensor()` would conspire to attempt to stack
# those tensors into a single tensor, because the customized
# version of `nest.flatten()` does not recurse into lists. Since
# it is more likely that the list arose from returning the
# result of an operation (such as `tf.numpy_function()`) that returns a
# list of not-necessarily-stackable tensors, we treat the
# returned value is a `tuple` instead. A user wishing to pack
# the return value into a single tensor can use an explicit
# `tf.stack()` before returning.
if isinstance(ret, list):
ret = tuple(ret)
try:
self._output_structure = structure.type_spec_from_value(ret)
except (ValueError, TypeError):
six.reraise(
TypeError,
TypeError("Unsupported return value from function passed to "
"%s: %s." % (transformation_name, ret)),
sys.exc_info()[2])
return ret
if use_legacy_function:
func_name = func_name + "_" + str(ops.uid())
@function.Defun(
*structure.get_flat_tensor_types(self._input_structure),
func_name=func_name,
**defun_kwargs)
def wrapper_fn(*args):
ret = _wrapper_helper(*args)
# _warn_if_collections(transformation_name, ops.get_default_graph(), 0)
return structure.to_tensor_list(self._output_structure, ret)
self._function = wrapper_fn
resource_tracker = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker):
if add_to_graph:
self._function.add_to_graph(ops.get_default_graph())
else:
# Use the private method that will execute `wrapper_fn` but delay
# adding it to the graph in case (e.g.) we need to rerun the function.
self._function._create_definition_if_needed()
if resource_tracker.resources:
_warn_if_collections(transformation_name)
else:
defun_kwargs.update({"func_name": func_name})
# Note: _wrapper_helper will apply autograph based on context.
@eager_function.defun_with_attributes(
input_signature=structure.get_flat_tensor_specs(
self._input_structure),
autograph=False,
attributes=defun_kwargs)
def wrapper_fn(*args): # pylint: disable=missing-docstring
ret = _wrapper_helper(*args)
ret = structure.to_tensor_list(self._output_structure, ret)
return [ops.convert_to_tensor(t) for t in ret]
resource_tracker = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker):
# TODO(b/141462134): Switch to using garbage collection.
self._function = wrapper_fn.get_concrete_function()
if add_to_graph:
self._function.add_to_graph(ops.get_default_graph())
if resource_tracker.resources:
_warn_if_collections(transformation_name)
outer_graph_seed = ops.get_default_graph().seed
if outer_graph_seed and self._function.graph.seed == outer_graph_seed:
if self._function.graph._seed_used:
warnings.warn(
"Seed %s from outer graph might be getting used by function %s, "
"if the random op has not been provided any seed. Explicitly set "
"the seed in the function if this is not the intended behavior."
%(outer_graph_seed, func_name), stacklevel=4)
# pylint: enable=protected-access
@property
def output_structure(self):
return self._output_structure
@property
def output_classes(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self._output_structure)
@property
def output_shapes(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self._output_structure)
@property
def output_types(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self._output_structure)
@property
def function(self):
return self._function
class _GeneratorDataset(DatasetSource):
"""A `Dataset` that generates elements by invoking a function."""
def __init__(self, init_args, init_func, next_func, finalize_func):
"""Constructs a `_GeneratorDataset`.
Args:
init_args: A nested structure representing the arguments to `init_func`.
init_func: A TensorFlow function that will be called on `init_args` each
time a C++ iterator over this dataset is constructed. Returns a nested
structure representing the "state" of the dataset.
next_func: A TensorFlow function that will be called on the result of
`init_func` to produce each element, and that raises `OutOfRangeError`
to terminate iteration.
finalize_func: A TensorFlow function that will be called on the result of
`init_func` immediately before a C++ iterator over this dataset is
destroyed. The return value is ignored.
"""
self._init_args = init_args
self._init_structure = structure.type_spec_from_value(init_args)
self._init_func = StructuredFunctionWrapper(
init_func,
self._transformation_name(),
input_structure=self._init_structure)
self._next_func = StructuredFunctionWrapper(
next_func,
self._transformation_name(),
input_structure=self._init_func.output_structure)
self._finalize_func = StructuredFunctionWrapper(
finalize_func,
self._transformation_name(),
input_structure=self._init_func.output_structure)
variant_tensor = gen_dataset_ops.generator_dataset(
structure.to_tensor_list(self._init_structure, self._init_args) +
self._init_func.function.captured_inputs,
self._next_func.function.captured_inputs,
self._finalize_func.function.captured_inputs,
init_func=self._init_func.function,
next_func=self._next_func.function,
finalize_func=self._finalize_func.function,
**self._flat_structure)
super(_GeneratorDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._next_func.output_structure
def _transformation_name(self):
return "Dataset.from_generator()"
class ZipDataset(DatasetV2):
"""A `Dataset` that zips its inputs together."""
def __init__(self, datasets):
"""See `Dataset.zip()` for details."""
for ds in nest.flatten(datasets):
if not isinstance(ds, DatasetV2):
if isinstance(ds, list):
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects. Nested structures do not "
"support Python lists; please use a tuple instead.")
else:
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects.")
raise TypeError(message)
self._datasets = datasets
self._structure = nest.pack_sequence_as(
self._datasets,
[ds.element_spec for ds in nest.flatten(self._datasets)])
variant_tensor = gen_dataset_ops.zip_dataset(
[ds._variant_tensor for ds in nest.flatten(self._datasets)],
**self._flat_structure)
super(ZipDataset, self).__init__(variant_tensor)
def _inputs(self):
return nest.flatten(self._datasets)
@property
def element_spec(self):
return self._structure
class ConcatenateDataset(DatasetV2):
"""A `Dataset` that concatenates its input with given dataset."""
def __init__(self, input_dataset, dataset_to_concatenate):
"""See `Dataset.concatenate()` for details."""
self._input_dataset = input_dataset
self._dataset_to_concatenate = dataset_to_concatenate
output_types = get_legacy_output_types(input_dataset)
if output_types != get_legacy_output_types(dataset_to_concatenate):
raise TypeError(
"Two datasets to concatenate have different types %s and %s" %
(output_types, get_legacy_output_types(dataset_to_concatenate)))
output_classes = get_legacy_output_classes(input_dataset)
if output_classes != get_legacy_output_classes(dataset_to_concatenate):
raise TypeError(
"Two datasets to concatenate have different classes %s and %s" %
(output_classes, get_legacy_output_classes(dataset_to_concatenate)))
input_shapes = get_legacy_output_shapes(self._input_dataset)
output_shapes = nest.pack_sequence_as(input_shapes, [
ts1.most_specific_compatible_shape(ts2)
for (ts1, ts2) in zip(
nest.flatten(input_shapes),
nest.flatten(get_legacy_output_shapes(
self._dataset_to_concatenate)))
])
self._structure = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
self._input_datasets = [input_dataset, dataset_to_concatenate]
# pylint: disable=protected-access
variant_tensor = gen_dataset_ops.concatenate_dataset(
input_dataset._variant_tensor, dataset_to_concatenate._variant_tensor,
**self._flat_structure)
# pylint: enable=protected-access
super(ConcatenateDataset, self).__init__(variant_tensor)
def _inputs(self):
return self._input_datasets
@property
def element_spec(self):
return self._structure
class RepeatDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that repeats its input several times."""
def __init__(self, input_dataset, count):
"""See `Dataset.repeat()` for details."""
self._input_dataset = input_dataset
if count is None:
self._count = constant_op.constant(-1, dtype=dtypes.int64, name="count")
else:
self._count = ops.convert_to_tensor(
count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.repeat_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._flat_structure)
super(RepeatDataset, self).__init__(input_dataset, variant_tensor)
class RangeDataset(DatasetSource):
"""A `Dataset` of a step separated range of values."""
def __init__(self, *args, **kwargs):
"""See `Dataset.range()` for details."""
self._parse_args(*args, **kwargs)
self._structure = tensor_spec.TensorSpec([], self._output_type)
variant_tensor = gen_dataset_ops.range_dataset(
start=self._start,
stop=self._stop,
step=self._step,
**self._flat_structure)
super(RangeDataset, self).__init__(variant_tensor)
def _parse_args(self, *args, **kwargs):
"""Parse arguments according to the same rules as the `range()` builtin."""
if len(args) == 1:
self._start = self._build_tensor(0, "start")
self._stop = self._build_tensor(args[0], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 2:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 3:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(args[2], "step")
else:
raise ValueError("Invalid arguments to RangeDataset: %s" % str(args))
if "output_type" in kwargs:
self._output_type = kwargs["output_type"]
else:
self._output_type = dtypes.int64
def _build_tensor(self, int64_value, name):
return ops.convert_to_tensor(int64_value, dtype=dtypes.int64, name=name)
@property
def element_spec(self):
return self._structure
class _MemoryCacheDeleter(object):
"""An object which cleans up an anonymous memory cache resource.
An alternative to defining a __del__ method on an object. Even if the parent
object is part of a reference cycle, the cycle will be collectable.
"""
def __init__(self, handle, device, deleter):
self._deleter = deleter
self._handle = handle
self._device = device
self._eager_mode = context.executing_eagerly()
def __del__(self):
with ops.device(self._device):
# Make sure the resource is deleted in the same mode as it was created in.
if self._eager_mode:
with context.eager_mode():
gen_dataset_ops.delete_memory_cache(
handle=self._handle, deleter=self._deleter)
else:
with context.graph_mode():
gen_dataset_ops.delete_memory_cache(
handle=self._handle, deleter=self._deleter)
class _MemoryCache(object):
"""Represents a memory cache resource."""
def __init__(self):
super(_MemoryCache, self).__init__()
self._device = context.context().device_name
self._handle, self._deleter = (gen_dataset_ops.anonymous_memory_cache())
self._resource_deleter = _MemoryCacheDeleter(
handle=self._handle, device=self._device, deleter=self._deleter)
@property
def handle(self):
return self._handle
class CacheDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that caches elements of its input."""
def __init__(self, input_dataset, filename):
"""See `Dataset.cache()` for details."""
self._input_dataset = input_dataset
self._filename = ops.convert_to_tensor(
filename, dtype=dtypes.string, name="filename")
if tf2.enabled() and (context.executing_eagerly() or ops.inside_function()):
self._cache = _MemoryCache()
variant_tensor = gen_dataset_ops.cache_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
filename=self._filename,
cache=self._cache.handle,
**self._flat_structure)
else:
variant_tensor = gen_dataset_ops.cache_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
filename=self._filename,
**self._flat_structure)
super(CacheDataset, self).__init__(input_dataset, variant_tensor)
class _RandomSeedGeneratorDeleter(object):
"""An object which cleans up an anonymous random seed generator resource.
An alternative to defining a __del__ method on an object. Even if the parent
object is part of a reference cycle, the cycle will be collectable.
"""
def __init__(self, handle, device, deleter):
self._deleter = deleter
self._handle = handle
self._device = device
self._eager_mode = context.executing_eagerly()
def __del__(self):
with ops.device(self._device):
# Make sure the resource is deleted in the same mode as it was created in.
if self._eager_mode:
with context.eager_mode():
gen_dataset_ops.delete_random_seed_generator(
handle=self._handle, deleter=self._deleter)
else:
with context.graph_mode():
gen_dataset_ops.delete_random_seed_generator(
handle=self._handle, deleter=self._deleter)
class _RandomSeedGenerator(object):
"""Represents a random seed generator resource."""
def __init__(self, seed, seed2):
super(_RandomSeedGenerator, self).__init__()
self._device = context.context().device_name
self._handle, self._deleter = (
gen_dataset_ops.anonymous_random_seed_generator(seed=seed, seed2=seed2))
self._resource_deleter = _RandomSeedGeneratorDeleter(
handle=self._handle, device=self._device, deleter=self._deleter)
@property
def handle(self):
return self._handle
class ShuffleDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that randomly shuffles the elements of its input."""
def __init__(self,
input_dataset,
buffer_size,
seed=None,
reshuffle_each_iteration=None):
"""Randomly shuffles the elements of this dataset.
Args:
input_dataset: The input dataset.
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements from this dataset from which the new dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.random.set_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
Returns:
A `Dataset`.
Raises:
ValueError: if invalid arguments are provided.
"""
self._input_dataset = input_dataset
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
self._seed, self._seed2 = random_seed.get_seed(seed)
if reshuffle_each_iteration is None:
self._reshuffle_each_iteration = True
else:
self._reshuffle_each_iteration = reshuffle_each_iteration
if tf2.enabled() and self._reshuffle_each_iteration and (
context.executing_eagerly() or ops.inside_function()):
self._seed_generator = _RandomSeedGenerator(self._seed, self._seed2)
variant_tensor = gen_dataset_ops.shuffle_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed_generator=self._seed_generator.handle,
**self._flat_structure)
else:
variant_tensor = gen_dataset_ops.shuffle_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed=self._seed,
seed2=self._seed2,
reshuffle_each_iteration=self._reshuffle_each_iteration,
**self._flat_structure)
super(ShuffleDataset, self).__init__(input_dataset, variant_tensor)
class TakeDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` containing the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.take()` for details."""
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.take_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._flat_structure)
super(TakeDataset, self).__init__(input_dataset, variant_tensor)
class SkipDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` skipping the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.skip()` for details."""
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.skip_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._flat_structure)
super(SkipDataset, self).__init__(input_dataset, variant_tensor)
class ShardDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` for sharding its input."""
def __init__(self, input_dataset, num_shards, index):
"""See `Dataset.shard()` for details."""
self._input_dataset = input_dataset
self._num_shards = ops.convert_to_tensor(
num_shards, dtype=dtypes.int64, name="num_shards")
self._index = ops.convert_to_tensor(index, dtype=dtypes.int64, name="index")
variant_tensor = gen_dataset_ops.shard_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
num_shards=self._num_shards,
index=self._index,
**self._flat_structure)
super(ShardDataset, self).__init__(input_dataset, variant_tensor)
class BatchDataset(UnaryDataset):
"""A `Dataset` that batches contiguous elements from its input."""
def __init__(self, input_dataset, batch_size, drop_remainder):
"""See `Dataset.batch()` for details."""
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
constant_drop_remainder = tensor_util.constant_value(self._drop_remainder)
# pylint: disable=protected-access
if constant_drop_remainder:
# NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically)
# or `False` (explicitly retaining the remainder).
# pylint: disable=g-long-lambda
constant_batch_size = tensor_util.constant_value(self._batch_size)
self._structure = nest.map_structure(
lambda component_spec: component_spec._batch(constant_batch_size),
input_dataset.element_spec)
else:
self._structure = nest.map_structure(
lambda component_spec: component_spec._batch(None),
input_dataset.element_spec)
variant_tensor = gen_dataset_ops.batch_dataset_v2(
input_dataset._variant_tensor,
batch_size=self._batch_size,
drop_remainder=self._drop_remainder,
**self._flat_structure)
super(BatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
class _NumpyIterator(object):
"""Iterator over a dataset with elements converted to numpy."""
def __init__(self, dataset):
self._iterator = iter(dataset)
def __iter__(self):
return self
def next(self):
return nest.map_structure(lambda x: x.numpy(), next(self._iterator))
def __next__(self):
return self.next()
class _VariantTracker(tracking.CapturableResource):
"""Allows export of functions capturing a Dataset in SavedModels.
When saving a SavedModel, `tf.saved_model.save` traverses the object
graph. Since Datasets reference _VariantTracker objects, that traversal will
find a _VariantTracker for each Dataset and so know how to save and restore
functions which reference the Dataset's variant Tensor.
"""
def __init__(self, variant_tensor, resource_creator):
"""Record that `variant_tensor` is associated with `resource_creator`.
Args:
variant_tensor: The variant-dtype Tensor associated with the Dataset. This
Tensor will be a captured input to functions which use the Dataset, and
is used by saving code to identify the corresponding _VariantTracker.
resource_creator: A zero-argument function which creates a new
variant-dtype Tensor. This function will be included in SavedModels and
run to re-create the Dataset's variant Tensor on restore.
"""
super(_VariantTracker, self).__init__(device="CPU")
self._resource_handle = variant_tensor
self._create_resource = resource_creator
def _is_padded_shape_compatible_with(padded_shape, input_component_shape):
"""Returns `True` if `input_component_shape` can be padded to `padded_shape`.
Args:
padded_shape: A `tf.TensorShape`.
input_component_shape: A `tf.TensorShape`.
Returns:
`True` if `input_component_shape` can be padded to `padded_shape`, otherwise
`False`.
"""
if padded_shape.dims is None or input_component_shape.dims is None:
return True
if len(padded_shape.dims) != len(input_component_shape.dims):
return False
for padded_dim, input_dim in zip(
padded_shape.dims, input_component_shape.dims):
if (padded_dim.value is not None and input_dim.value is not None
and padded_dim.value < input_dim.value):
return False
return True
def _padded_shape_to_tensor(padded_shape, input_component_shape):
"""Converts `padded_shape` to a `tf.Tensor` representing that shape.
Args:
padded_shape: A shape-like object, which may be a `tf.TensorShape`, a Python
sequence, or a 1-D `tf.Tensor` of `tf.int64` elements.
input_component_shape: A `tf.TensorShape`, with which `padded_shape` must
be compatible.
Returns:
A 1-D `tf.Tensor` of `tf.int64` elements, representing `padded_shape`.
Raises:
ValueError: If `padded_shape` is not a shape or not compatible with
`input_component_shape`.
TypeError: If `padded_shape` is not convertible to a `tf.int64` tensor.
"""
try:
# Try to convert the `padded_shape` to a `tf.TensorShape`
padded_shape_as_shape = tensor_shape.as_shape(padded_shape)
# We will return the "canonical" tensor representation, which uses
# `-1` in place of `None`.
ret = ops.convert_to_tensor(
[dim if dim is not None else -1
for dim in padded_shape_as_shape.as_list()], dtype=dtypes.int64)
except (TypeError, ValueError):
# The argument was not trivially convertible to a
# `tf.TensorShape`, so fall back on the conversion to tensor
# machinery.
ret = ops.convert_to_tensor(padded_shape, preferred_dtype=dtypes.int64)
if ret.shape.dims is not None and len(ret.shape.dims) != 1:
six.reraise(ValueError, ValueError(
"Padded shape %s must be a 1-D tensor of tf.int64 values, but its "
"shape was %s." % (padded_shape, ret.shape)), sys.exc_info()[2])
if ret.dtype != dtypes.int64:
six.reraise(
TypeError,
TypeError(
"Padded shape %s must be a 1-D tensor of tf.int64 values, but "
"its element type was %s." % (padded_shape, ret.dtype.name)),
sys.exc_info()[2])
padded_shape_as_shape = tensor_util.constant_value_as_shape(ret)
if not _is_padded_shape_compatible_with(padded_shape_as_shape,
input_component_shape):
raise ValueError("The padded shape %s is not compatible with the "
"corresponding input component shape %s."
% (padded_shape_as_shape, input_component_shape))
return ret
def _padding_value_to_tensor(value, output_type):
"""Converts the padding value to a tensor.
Args:
value: The padding value.
output_type: Its expected dtype.
Returns:
A scalar `Tensor`.
Raises:
ValueError: if the padding value is not a scalar.
TypeError: if the padding value's type does not match `output_type`.
"""
value = ops.convert_to_tensor(value, name="padding_value")
if not value.shape.is_compatible_with(tensor_shape.TensorShape([])):
raise ValueError("Padding value should be a scalar, but is not: %s" % value)
if value.dtype != output_type:
raise TypeError("Padding value tensor (%s) does not match output type: %s" %
(value, output_type))
return value
def _padding_values_or_default(padding_values, input_dataset):
"""Returns padding values with None elements replaced with default values."""
def make_zero(t):
if t.base_dtype == dtypes.string:
return ""
elif t.base_dtype == dtypes.variant:
error_msg = ("Unable to create padding for field of type 'variant' "
"because t.base_type == dtypes.variant == "
"{}.".format(t.base_dtype))
raise TypeError(error_msg)
elif t.base_dtype == dtypes.bfloat16:
# Special case `bfloat16` because it is not supported by NumPy.
return constant_op.constant(0, dtype=dtypes.bfloat16)
else:
return np.zeros_like(t.as_numpy_dtype())
def value_or_default(value, default):
return default if value is None else value
default_padding = nest.map_structure(
make_zero,
get_legacy_output_types(input_dataset))
return nest.map_structure_up_to(padding_values, value_or_default,
padding_values, default_padding)
class PaddedBatchDataset(UnaryDataset):
"""A `Dataset` that batches and pads contiguous elements from its input."""
def __init__(self, input_dataset, batch_size, padded_shapes, padding_values,
drop_remainder):
"""See `Dataset.batch()` for details."""
self._input_dataset = input_dataset
def check_types(component_spec):
if not isinstance(component_spec, tensor_spec.TensorSpec):
raise TypeError("Padded batching of components of type ",
type(component_spec), " is not supported.")
nest.map_structure(check_types, input_dataset.element_spec)
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
padding_values = _padding_values_or_default(padding_values, input_dataset)
input_shapes = get_legacy_output_shapes(input_dataset)
flat_padded_shapes = nest.flatten_up_to(input_shapes, padded_shapes)
flat_padded_shapes_as_tensors = []
for input_component_shape, padded_shape in zip(
nest.flatten(input_shapes), flat_padded_shapes):
flat_padded_shapes_as_tensors.append(
_padded_shape_to_tensor(padded_shape, input_component_shape))
self._padded_shapes = nest.pack_sequence_as(input_shapes,
flat_padded_shapes_as_tensors)
self._padding_values = nest.map_structure_up_to(
input_shapes, _padding_value_to_tensor, padding_values,
get_legacy_output_types(input_dataset))
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
def _padded_shape_to_batch_shape(s):
return tensor_shape.TensorShape([
tensor_util.constant_value(self._batch_size)
if smart_cond.smart_constant_value(self._drop_remainder) else None
]).concatenate(tensor_util.constant_value_as_shape(s))
output_shapes = nest.map_structure(
_padded_shape_to_batch_shape, self._padded_shapes)
self._structure = structure.convert_legacy_structure(
get_legacy_output_types(self._input_dataset), output_shapes,
get_legacy_output_classes(self._input_dataset))
# pylint: disable=protected-access
# TODO(jsimsa): Switch to using v2 only any time after 6/30/2018.
if smart_cond.smart_constant_value(self._drop_remainder) is False:
variant_tensor = gen_dataset_ops.padded_batch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
output_shapes=structure.get_flat_tensor_shapes(self._structure))
else:
variant_tensor = gen_dataset_ops.padded_batch_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
drop_remainder=self._drop_remainder,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(PaddedBatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
def _should_unpack_args(args):
"""Returns `True` if `args` should be `*args` when passed to a callable."""
return type(args) is tuple # pylint: disable=unidiomatic-typecheck
class MapDataset(UnaryDataset):
"""A `Dataset` that maps a function over elements in its input."""
def __init__(self,
input_dataset,
map_func,
use_inter_op_parallelism=True,
preserve_cardinality=False,
use_legacy_function=False):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
self._preserve_cardinality = preserve_cardinality
self._map_func = StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
variant_tensor = gen_dataset_ops.map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
use_inter_op_parallelism=self._use_inter_op_parallelism,
preserve_cardinality=self._preserve_cardinality,
**self._flat_structure)
super(MapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._map_func.output_structure
def _transformation_name(self):
return "Dataset.map()"
class ParallelMapDataset(UnaryDataset):
"""A `Dataset` that maps a function over elements in its input in parallel."""
def __init__(self,
input_dataset,
map_func,
num_parallel_calls,
deterministic,
use_inter_op_parallelism=True,
preserve_cardinality=False,
use_legacy_function=False):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
self._map_func = StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
if deterministic is None:
self._deterministic = "default"
elif deterministic:
self._deterministic = "true"
else:
self._deterministic = "false"
self._preserve_cardinality = preserve_cardinality
if deterministic is not None or compat.forward_compatible(2020, 3, 6):
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls")
variant_tensor = gen_dataset_ops.parallel_map_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
num_parallel_calls=self._num_parallel_calls,
deterministic=self._deterministic,
use_inter_op_parallelism=self._use_inter_op_parallelism,
preserve_cardinality=self._preserve_cardinality,
**self._flat_structure)
else:
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int32, name="num_parallel_calls")
variant_tensor = gen_dataset_ops.parallel_map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
num_parallel_calls=self._num_parallel_calls,
use_inter_op_parallelism=self._use_inter_op_parallelism,
preserve_cardinality=self._preserve_cardinality,
**self._flat_structure)
super(ParallelMapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._map_func.output_structure
def _transformation_name(self):
return "Dataset.map()"
class FlatMapDataset(UnaryDataset):
"""A `Dataset` that maps a function over its input and flattens the result."""
def __init__(self, input_dataset, map_func):
"""See `Dataset.flat_map()` for details."""
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetSpec):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
variant_tensor = gen_dataset_ops.flat_map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
**self._flat_structure)
super(FlatMapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "Dataset.flat_map()"
class InterleaveDataset(UnaryDataset):
"""A `Dataset` that interleaves the result of transformed inputs."""
def __init__(self, input_dataset, map_func, cycle_length, block_length):
"""See `Dataset.interleave()` for details."""
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetSpec):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
variant_tensor = gen_dataset_ops.interleave_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
f=self._map_func.function,
**self._flat_structure)
super(InterleaveDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "Dataset.interleave()"
class ParallelInterleaveDataset(UnaryDataset):
"""A `Dataset` that maps a function over its input and interleaves the result."""
def __init__(self,
input_dataset,
map_func,
cycle_length,
block_length,
num_parallel_calls,
buffer_output_elements=AUTOTUNE,
prefetch_input_elements=AUTOTUNE,
deterministic=None):
"""See `Dataset.interleave()` for details."""
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetSpec):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
self._buffer_output_elements = ops.convert_to_tensor(
buffer_output_elements,
dtype=dtypes.int64,
name="buffer_output_elements")
self._prefetch_input_elements = ops.convert_to_tensor(
prefetch_input_elements,
dtype=dtypes.int64,
name="prefetch_input_elements")
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls")
if deterministic is None:
deterministic_string = "default"
elif deterministic:
deterministic_string = "true"
else:
deterministic_string = "false"
if (buffer_output_elements != AUTOTUNE or
prefetch_input_elements != AUTOTUNE or
compat.forward_compatible(2020, 3, 6)):
variant_tensor = gen_dataset_ops.parallel_interleave_dataset_v4(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
self._buffer_output_elements,
self._prefetch_input_elements,
self._num_parallel_calls,
f=self._map_func.function,
deterministic=deterministic_string,
**self._flat_structure)
elif deterministic is not None or compat.forward_compatible(2020, 2, 20):
variant_tensor = gen_dataset_ops.parallel_interleave_dataset_v3(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
self._num_parallel_calls,
f=self._map_func.function,
deterministic=deterministic_string,
**self._flat_structure)
else:
variant_tensor = gen_dataset_ops.parallel_interleave_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
self._num_parallel_calls,
f=self._map_func.function,
**self._flat_structure)
super(ParallelInterleaveDataset, self).__init__(input_dataset,
variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "Dataset.interleave()"
class FilterDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that filters its input according to a predicate function."""
def __init__(self, input_dataset, predicate, use_legacy_function=False):
"""See `Dataset.filter()` for details."""
self._input_dataset = input_dataset
wrapped_func = StructuredFunctionWrapper(
predicate,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
if not wrapped_func.output_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.bool)):
error_msg = ("`predicate` return type must be convertible to a scalar "
"boolean tensor. Was {}.").format(
wrapped_func.output_structure)
raise ValueError(error_msg)
self._predicate = wrapped_func
variant_tensor = gen_dataset_ops.filter_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
other_arguments=self._predicate.function.captured_inputs,
predicate=self._predicate.function,
**self._flat_structure)
super(FilterDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._predicate]
def _transformation_name(self):
return "Dataset.filter()"
class PrefetchDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that asynchronously prefetches its input."""
def __init__(self, input_dataset, buffer_size, slack_period=None):
"""See `Dataset.prefetch()` for details.
Args:
input_dataset: The input dataset.
buffer_size: See `Dataset.prefetch()` for details.
slack_period: (Optional.) An integer. If non-zero, determines the number
of GetNext calls before injecting slack into the execution. This may
reduce CPU contention at the start of a step. Note that a tensorflow
user should not have to set this manually; enable this behavior
automatically via `tf.data.Options.experimental_slack` instead. Defaults
to None.
"""
self._input_dataset = input_dataset
if buffer_size is None:
buffer_size = -1 # This is the sentinel for auto-tuning.
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
variant_tensor = gen_dataset_ops.prefetch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
slack_period=slack_period,
**self._flat_structure)
super(PrefetchDataset, self).__init__(input_dataset, variant_tensor)
class WindowDataset(UnaryDataset):
"""A dataset that creates window datasets from the input elements."""
def __init__(self, input_dataset, size, shift, stride, drop_remainder):
"""See `window_dataset()` for more details."""
self._input_dataset = input_dataset
self._size = ops.convert_to_tensor(size, dtype=dtypes.int64, name="size")
self._shift = ops.convert_to_tensor(shift, dtype=dtypes.int64, name="shift")
self._stride = ops.convert_to_tensor(
stride, dtype=dtypes.int64, name="stride")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
self._structure = nest.pack_sequence_as(
get_legacy_output_classes(input_dataset), [
DatasetSpec( # pylint: disable=g-complex-comprehension
structure.convert_legacy_structure(
output_type, output_shape, output_class))
for output_class, output_shape, output_type in zip(
nest.flatten(get_legacy_output_classes(input_dataset)),
nest.flatten(get_legacy_output_shapes(input_dataset)),
nest.flatten(get_legacy_output_types(input_dataset)))
])
variant_tensor = gen_dataset_ops.window_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._size,
self._shift,
self._stride,
self._drop_remainder,
**self._flat_structure)
super(WindowDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
class _OptionsDataset(UnaryUnchangedStructureDataset):
"""An identity `Dataset` that stores options."""
def __init__(self, input_dataset, options):
self._input_dataset = input_dataset
self._options = input_dataset.options()
if self._options:
self._options = self._options.merge(options)
else:
self._options = options
variant_tensor = input_dataset._variant_tensor # pylint: disable=protected-access
super(_OptionsDataset, self).__init__(input_dataset, variant_tensor)
def options(self):
return self._options
class _ModelDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and models performance."""
def __init__(self, input_dataset, algorithm, cpu_budget):
self._input_dataset = input_dataset
variant_tensor = gen_dataset_ops.model_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
algorithm=algorithm.value,
cpu_budget=cpu_budget,
**self._flat_structure)
super(_ModelDataset, self).__init__(input_dataset, variant_tensor)
class _OptimizeDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and applies optimizations."""
def __init__(self, input_dataset, optimizations, optimization_configs=None):
self._input_dataset = input_dataset
if optimizations is None:
optimizations = []
if optimization_configs is None:
optimization_configs = []
self._optimizations = ops.convert_to_tensor(
optimizations, dtype=dtypes.string, name="optimizations")
variant_tensor = gen_dataset_ops.optimize_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._optimizations,
optimization_configs=optimization_configs,
**self._flat_structure)
super(_OptimizeDataset, self).__init__(input_dataset, variant_tensor)
class _SetStatsAggregatorDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and sets a stats aggregator."""
def __init__(self, input_dataset, aggregator, prefix, counter_prefix):
self._input_dataset = input_dataset
self._stats_aggregator = aggregator
self._prefix = prefix
self._counter_prefix = counter_prefix
variant_tensor = ged_ops.set_stats_aggregator_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._stats_aggregator._resource, # pylint: disable=protected-access
self._prefix,
self._counter_prefix,
**self._flat_structure)
super(_SetStatsAggregatorDataset, self).__init__(input_dataset,
variant_tensor)
class _MaxIntraOpParallelismDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, overriding intra-op parallelism."""
def __init__(self, input_dataset, max_intra_op_parallelism):
self._input_dataset = input_dataset
self._max_intra_op_parallelism = ops.convert_to_tensor(
max_intra_op_parallelism,
dtype=dtypes.int64,
name="max_intra_op_parallelism")
variant_tensor = ged_ops.max_intra_op_parallelism_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._max_intra_op_parallelism,
**self._flat_structure)
super(_MaxIntraOpParallelismDataset, self).__init__(input_dataset,
variant_tensor)
class _PrivateThreadPoolDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, setting a private threadpool."""
def __init__(self, input_dataset, num_threads):
self._input_dataset = input_dataset
self._num_threads = ops.convert_to_tensor(
num_threads, dtype=dtypes.int64, name="num_threads")
variant_tensor = ged_ops.private_thread_pool_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._num_threads,
**self._flat_structure)
super(_PrivateThreadPoolDataset, self).__init__(input_dataset,
variant_tensor)
def normalize_to_dense(dataset):
"""Normalizes non-tensor components in a dataset to dense representations.
This is necessary for dataset transformations that slice along the batch
dimension and are oblivious to non-tensors, e.g. `unbatch`, `rebatch`.
Args:
dataset: Dataset to normalize.
Returns:
A dataset whose sparse and ragged tensors have been normalized to their
dense representations.
"""
# NOTE(mrry): This leads to a somewhat inefficient re-encoding step for all
# non-tensor components.
#
# TODO(mrry): Consider optimizing this if it turns out to be a bottleneck.
if _should_unpack_args(dataset.element_spec):
def normalize(*args):
return structure.to_batched_tensor_list(dataset.element_spec, tuple(args))
else:
def normalize(arg):
return structure.to_batched_tensor_list(dataset.element_spec, arg)
normalized_dataset = dataset.map(normalize)
# NOTE(mrry): Our `map()` has lost information about the structure of
# non-tensor components, so re-apply the structure of the original dataset.
return _RestructuredDataset(normalized_dataset, dataset.element_spec)
class _RestructuredDataset(UnaryDataset):
"""An internal helper for changing the structure and shape of a dataset."""
def __init__(self, dataset, structure):
self._input_dataset = dataset
self._structure = structure
variant_tensor = self._input_dataset._variant_tensor # pylint: disable=protected-access
super(_RestructuredDataset, self).__init__(dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
class _UnbatchDataset(UnaryDataset):
"""A dataset that splits the elements of its input into multiple elements."""
def __init__(self, input_dataset):
"""See `unbatch()` for more details."""
flat_shapes = input_dataset._flat_shapes # pylint: disable=protected-access
if any(s.ndims == 0 for s in flat_shapes):
raise ValueError("Cannot unbatch an input with scalar components.")
known_batch_dim = tensor_shape.Dimension(None)
for s in flat_shapes:
try:
known_batch_dim = known_batch_dim.merge_with(s[0])
except ValueError:
raise ValueError("Cannot unbatch an input whose components have "
"different batch sizes.")
self._input_dataset = input_dataset
self._structure = nest.map_structure(
lambda component_spec: component_spec._unbatch(), # pylint: disable=protected-access
get_structure(input_dataset))
variant_tensor = ged_ops.unbatch_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
**self._flat_structure)
super(_UnbatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
def _collect_resource_inputs(op):
"""Collects resource inputs for the given ops (and its variant inputs)."""
def _process(op_queue, seen_ops):
"""Processes the next element of the op queue.
Args:
op_queue: Queue of Dataset operations to process.
seen_ops: Already processed set of Operations.
Returns:
A 2-tuple containing sets of resource handles. The first tuple entry
contains read-only handles and the second entry contains read-write
handles.
"""
reads = []
writes = []
op = op_queue.pop()
if op in seen_ops:
return reads, writes
seen_ops.add(op)
# TODO(b/150139257): All resource inputs are in writes right now since we
# have not updated the functional ops to set the special attribute that ACD
# uses to figure out which of the op's inputs are read-only.
reads, writes = acd_utils.get_read_write_resource_inputs(op)
# Conservatively assume that any variant inputs are datasets.
op_queue.extend(t.op for t in op.inputs if t.dtype == dtypes.variant)
return reads, writes
op_queue = [op]
seen_ops = set()
all_reads = []
all_writes = []
while op_queue:
reads, writes = _process(op_queue, seen_ops)
all_reads.extend(reads)
all_writes.extend(writes)
return all_reads, all_writes
@auto_control_deps.register_acd_resource_resolver
def _resource_resolver(op, resource_reads, resource_writes):
"""Updates resource inputs for tf.data ops with indirect dependencies."""
updated = False
if op.type in [
"DatasetToSingleElement", "DatasetToTFRecord", "ReduceDataset"
]:
reads, writes = _collect_resource_inputs(op)
for inp in reads:
if inp not in resource_reads:
updated = True
resource_reads.add(inp)
for inp in writes:
if inp not in resource_writes:
updated = True
resource_writes.add(inp)
if op.type in [
"IteratorGetNext", "IteratorGetNextSync", "IteratorGetNextAsOptional"
]:
iterator_resource = op.inputs[0]
make_iterator_ops = [
op for op in iterator_resource.consumers() if op.type == "MakeIterator"
]
if len(make_iterator_ops) == 1:
reads, writes = _collect_resource_inputs(make_iterator_ops[0])
for inp in reads:
if inp not in resource_reads:
updated = True
resource_reads.add(inp)
for inp in writes:
if inp not in resource_writes:
updated = True
resource_writes.add(inp)
return updated
|
{
"content_hash": "cbf8daf8c89dfe3aec4dc24f4d9adbe8",
"timestamp": "",
"source": "github",
"line_count": 4553,
"max_line_length": 116,
"avg_line_length": 39.71535251482539,
"alnum_prop": 0.6619143476529664,
"repo_name": "renyi533/tensorflow",
"id": "c7b2257c5106fbfc2b7753aa0cfa997e91e5f069",
"size": "181513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/ops/dataset_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31572"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "903309"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82507951"
},
{
"name": "CMake",
"bytes": "6967"
},
{
"name": "Dockerfile",
"bytes": "113964"
},
{
"name": "Go",
"bytes": "1871425"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "988219"
},
{
"name": "Jupyter Notebook",
"bytes": "550861"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "2073744"
},
{
"name": "Makefile",
"bytes": "66796"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "319021"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37811412"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "6846"
},
{
"name": "Shell",
"bytes": "696058"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3655758"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
import pytest
from pandas.util._validators import validate_bool_kwarg, validate_kwargs
_fname = "func"
def test_bad_kwarg():
good_arg = "f"
bad_arg = good_arg + "o"
compat_args = OrderedDict()
compat_args[good_arg] = "foo"
compat_args[bad_arg + "o"] = "bar"
kwargs = {good_arg: "foo", bad_arg: "bar"}
msg = (r"{fname}\(\) got an unexpected "
r"keyword argument '{arg}'".format(fname=_fname, arg=bad_arg))
with pytest.raises(TypeError, match=msg):
validate_kwargs(_fname, kwargs, compat_args)
@pytest.mark.parametrize("i", range(1, 3))
def test_not_all_none(i):
bad_arg = "foo"
msg = (r"the '{arg}' parameter is not supported "
r"in the pandas implementation of {func}\(\)".
format(arg=bad_arg, func=_fname))
compat_args = OrderedDict()
compat_args["foo"] = 1
compat_args["bar"] = "s"
compat_args["baz"] = None
kwarg_keys = ("foo", "bar", "baz")
kwarg_vals = (2, "s", None)
kwargs = dict(zip(kwarg_keys[:i], kwarg_vals[:i]))
with pytest.raises(ValueError, match=msg):
validate_kwargs(_fname, kwargs, compat_args)
def test_validation():
# No exceptions should be raised.
compat_args = OrderedDict()
compat_args["f"] = None
compat_args["b"] = 1
compat_args["ba"] = "s"
kwargs = dict(f=None, b=1)
validate_kwargs(_fname, kwargs, compat_args)
@pytest.mark.parametrize("name", ["inplace", "copy"])
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_kwarg_fail(name, value):
msg = ("For argument \"%s\" expected type bool, received type %s" %
(name, type(value).__name__))
with pytest.raises(ValueError, match=msg):
validate_bool_kwarg(value, name)
@pytest.mark.parametrize("name", ["inplace", "copy"])
@pytest.mark.parametrize("value", [True, False, None])
def test_validate_bool_kwarg(name, value):
assert validate_bool_kwarg(value, name) == value
|
{
"content_hash": "3401b866a106e12f06c4a5956f445567",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 73,
"avg_line_length": 28.3943661971831,
"alnum_prop": 0.6200396825396826,
"repo_name": "GuessWhoSamFoo/pandas",
"id": "f36818ddfc9a80585b458ddcf5dd8de67053e15b",
"size": "2040",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/tests/util/test_validate_kwargs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406353"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "556"
},
{
"name": "Python",
"bytes": "14926624"
},
{
"name": "Shell",
"bytes": "29351"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/armor/kashyyykian_ceremonial/shared_armor_kashyyykian_ceremonial_bracer_r.iff"
result.attribute_template_id = 0
result.stfName("wearables_name","armor_kashyyykian_ceremonial_bracer_r")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "9b71d6b7635a2b329e39fd2761ca49f9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 124,
"avg_line_length": 29.692307692307693,
"alnum_prop": 0.7409326424870466,
"repo_name": "obi-two/Rebelion",
"id": "16d1638f92b1c5632a23a5634027e6f2d0cdacc2",
"size": "531",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/wearables/armor/kashyyykian_ceremonial/shared_armor_kashyyykian_ceremonial_bracer_r.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from future.utils import iteritems
import base64
import json
import logging
import os
import signal
import vim
from subprocess import PIPE
from tempfile import NamedTemporaryFile
from ycm import base, paths, vimsupport
from ycmd import utils
from ycmd import server_utils
from ycmd.request_wrap import RequestWrap
from ycm.diagnostic_interface import DiagnosticInterface
from ycm.omni_completer import OmniCompleter
from ycm import syntax_parse
from ycm.client.ycmd_keepalive import YcmdKeepalive
from ycm.client.base_request import ( BaseRequest, BuildRequestData,
HandleServerException )
from ycm.client.completer_available_request import SendCompleterAvailableRequest
from ycm.client.command_request import SendCommandRequest
from ycm.client.completion_request import ( CompletionRequest,
ConvertCompletionDataToVimData )
from ycm.client.debug_info_request import ( SendDebugInfoRequest,
FormatDebugInfoResponse )
from ycm.client.omni_completion_request import OmniCompletionRequest
from ycm.client.event_notification import ( SendEventNotificationAsync,
EventNotification )
from ycm.client.shutdown_request import SendShutdownRequest
def PatchNoProxy():
current_value = os.environ.get('no_proxy', '')
additions = '127.0.0.1,localhost'
os.environ['no_proxy'] = ( additions if not current_value
else current_value + ',' + additions )
# We need this so that Requests doesn't end up using the local HTTP proxy when
# talking to ycmd. Users should actually be setting this themselves when
# configuring a proxy server on their machine, but most don't know they need to
# or how to do it, so we do it for them.
# Relevant issues:
# https://github.com/Valloric/YouCompleteMe/issues/641
# https://github.com/kennethreitz/requests/issues/879
PatchNoProxy()
# Force the Python interpreter embedded in Vim (in which we are running) to
# ignore the SIGINT signal. This helps reduce the fallout of a user pressing
# Ctrl-C in Vim.
signal.signal( signal.SIGINT, signal.SIG_IGN )
HMAC_SECRET_LENGTH = 16
SERVER_SHUTDOWN_MESSAGE = (
"The ycmd server SHUT DOWN (restart with ':YcmRestartServer')." )
EXIT_CODE_UNEXPECTED_MESSAGE = (
"Unexpected exit code {code}. "
"Use the ':YcmToggleLogs' command to check the logs." )
CORE_UNEXPECTED_MESSAGE = (
"Unexpected error while loading the YCM core library. "
"Use the ':YcmToggleLogs' command to check the logs." )
CORE_MISSING_MESSAGE = (
'YCM core library not detected; you need to compile YCM before using it. '
'Follow the instructions in the documentation.' )
CORE_PYTHON2_MESSAGE = (
"YCM core library compiled for Python 2 but loaded in Python 3. "
"Set the 'g:ycm_server_python_interpreter' option to a Python 2 "
"interpreter path." )
CORE_PYTHON3_MESSAGE = (
"YCM core library compiled for Python 3 but loaded in Python 2. "
"Set the 'g:ycm_server_python_interpreter' option to a Python 3 "
"interpreter path." )
CORE_OUTDATED_MESSAGE = (
'YCM core library too old; PLEASE RECOMPILE by running the install.py '
'script. See the documentation for more details.' )
SERVER_IDLE_SUICIDE_SECONDS = 1800 # 30 minutes
DIAGNOSTIC_UI_FILETYPES = set( [ 'cpp', 'cs', 'c', 'objc', 'objcpp',
'typescript' ] )
CLIENT_LOGFILE_FORMAT = 'ycm_'
SERVER_LOGFILE_FORMAT = 'ycmd_{port}_{std}_'
# Flag to set a file handle inheritable by child processes on Windows. See
# https://msdn.microsoft.com/en-us/library/ms724935.aspx
HANDLE_FLAG_INHERIT = 0x00000001
class YouCompleteMe( object ):
def __init__( self, user_options ):
self._available_completers = {}
self._user_options = user_options
self._user_notified_about_crash = False
self._diag_interface = DiagnosticInterface( user_options )
self._omnicomp = OmniCompleter( user_options )
self._latest_file_parse_request = None
self._latest_completion_request = None
self._latest_diagnostics = []
self._logger = logging.getLogger( 'ycm' )
self._client_logfile = None
self._server_stdout = None
self._server_stderr = None
self._server_popen = None
self._filetypes_with_keywords_loaded = set()
self._ycmd_keepalive = YcmdKeepalive()
self._server_is_ready_with_cache = False
self._SetupLogging()
self._SetupServer()
self._ycmd_keepalive.Start()
self._complete_done_hooks = {
'cs': lambda self: self._OnCompleteDone_Csharp()
}
def _SetupServer( self ):
self._available_completers = {}
self._user_notified_about_crash = False
self._filetypes_with_keywords_loaded = set()
self._server_is_ready_with_cache = False
server_port = utils.GetUnusedLocalhostPort()
# The temp options file is deleted by ycmd during startup
with NamedTemporaryFile( delete = False, mode = 'w+' ) as options_file:
hmac_secret = os.urandom( HMAC_SECRET_LENGTH )
options_dict = dict( self._user_options )
options_dict[ 'hmac_secret' ] = utils.ToUnicode(
base64.b64encode( hmac_secret ) )
options_dict[ 'server_keep_logfiles' ] = self._user_options[
'keep_logfiles' ]
json.dump( options_dict, options_file )
options_file.flush()
args = [ paths.PathToPythonInterpreter(),
paths.PathToServerScript(),
'--port={0}'.format( server_port ),
'--options_file={0}'.format( options_file.name ),
'--log={0}'.format( self._user_options[ 'log_level' ] ),
'--idle_suicide_seconds={0}'.format(
SERVER_IDLE_SUICIDE_SECONDS ) ]
self._server_stdout = utils.CreateLogfile(
SERVER_LOGFILE_FORMAT.format( port = server_port, std = 'stdout' ) )
self._server_stderr = utils.CreateLogfile(
SERVER_LOGFILE_FORMAT.format( port = server_port, std = 'stderr' ) )
args.append( '--stdout={0}'.format( self._server_stdout ) )
args.append( '--stderr={0}'.format( self._server_stderr ) )
if self._user_options[ 'keep_logfiles' ]:
args.append( '--keep_logfiles' )
self._server_popen = utils.SafePopen( args, stdin_windows = PIPE,
stdout = PIPE, stderr = PIPE )
BaseRequest.server_location = 'http://127.0.0.1:' + str( server_port )
BaseRequest.hmac_secret = hmac_secret
self._NotifyUserIfServerCrashed()
def _SetupLogging( self ):
def FreeFileFromOtherProcesses( file_object ):
if utils.OnWindows():
from ctypes import windll
import msvcrt
file_handle = msvcrt.get_osfhandle( file_object.fileno() )
windll.kernel32.SetHandleInformation( file_handle,
HANDLE_FLAG_INHERIT,
0 )
self._client_logfile = utils.CreateLogfile( CLIENT_LOGFILE_FORMAT )
log_level = self._user_options[ 'log_level' ]
numeric_level = getattr( logging, log_level.upper(), None )
if not isinstance( numeric_level, int ):
raise ValueError( 'Invalid log level: {0}'.format( log_level ) )
self._logger.setLevel( numeric_level )
handler = logging.FileHandler( self._client_logfile )
# On Windows and Python prior to 3.4, file handles are inherited by child
# processes started with at least one replaced standard stream, which is the
# case when we start the ycmd server (we are redirecting all standard
# outputs into a pipe). These files cannot be removed while the child
# processes are still up. This is not desirable for a logfile because we
# want to remove it at Vim exit without having to wait for the ycmd server
# to be completely shut down. We need to make the logfile handle
# non-inheritable. See https://www.python.org/dev/peps/pep-0446 for more
# details.
FreeFileFromOtherProcesses( handler.stream )
formatter = logging.Formatter( '%(asctime)s - %(levelname)s - %(message)s' )
handler.setFormatter( formatter )
self._logger.addHandler( handler )
def IsServerAlive( self ):
return_code = self._server_popen.poll()
# When the process hasn't finished yet, poll() returns None.
return return_code is None
def IsServerReady( self ):
if not self._server_is_ready_with_cache and self.IsServerAlive():
with HandleServerException( display = False ):
self._server_is_ready_with_cache = BaseRequest.GetDataFromHandler(
'ready' )
return self._server_is_ready_with_cache
def _NotifyUserIfServerCrashed( self ):
if self._user_notified_about_crash or self.IsServerAlive():
return
self._user_notified_about_crash = True
return_code = self._server_popen.poll()
if return_code == server_utils.CORE_UNEXPECTED_STATUS:
error_message = CORE_UNEXPECTED_MESSAGE
elif return_code == server_utils.CORE_MISSING_STATUS:
error_message = CORE_MISSING_MESSAGE
elif return_code == server_utils.CORE_PYTHON2_STATUS:
error_message = CORE_PYTHON2_MESSAGE
elif return_code == server_utils.CORE_PYTHON3_STATUS:
error_message = CORE_PYTHON3_MESSAGE
elif return_code == server_utils.CORE_OUTDATED_STATUS:
error_message = CORE_OUTDATED_MESSAGE
else:
error_message = EXIT_CODE_UNEXPECTED_MESSAGE.format( code = return_code )
server_stderr = '\n'.join(
utils.ToUnicode( self._server_popen.stderr.read() ).splitlines() )
if server_stderr:
self._logger.error( server_stderr )
error_message = SERVER_SHUTDOWN_MESSAGE + ' ' + error_message
self._logger.error( error_message )
vimsupport.PostVimMessage( error_message )
def ServerPid( self ):
if not self._server_popen:
return -1
return self._server_popen.pid
def _ShutdownServer( self ):
SendShutdownRequest()
def RestartServer( self ):
vimsupport.PostVimMessage( 'Restarting ycmd server...' )
self._ShutdownServer()
self._SetupServer()
def CreateCompletionRequest( self, force_semantic = False ):
request_data = BuildRequestData()
if ( not self.NativeFiletypeCompletionAvailable() and
self.CurrentFiletypeCompletionEnabled() ):
wrapped_request_data = RequestWrap( request_data )
if self._omnicomp.ShouldUseNow( wrapped_request_data ):
self._latest_completion_request = OmniCompletionRequest(
self._omnicomp, wrapped_request_data )
return self._latest_completion_request
request_data[ 'working_dir' ] = utils.GetCurrentDirectory()
self._AddExtraConfDataIfNeeded( request_data )
if force_semantic:
request_data[ 'force_semantic' ] = True
self._latest_completion_request = CompletionRequest( request_data )
return self._latest_completion_request
def GetCompletions( self ):
request = self.GetCurrentCompletionRequest()
request.Start()
while not request.Done():
try:
if vimsupport.GetBoolValue( 'complete_check()' ):
return { 'words' : [], 'refresh' : 'always' }
except KeyboardInterrupt:
return { 'words' : [], 'refresh' : 'always' }
results = base.AdjustCandidateInsertionText( request.Response() )
return { 'words' : results, 'refresh' : 'always' }
def SendCommandRequest( self, arguments, completer ):
extra_data = {}
self._AddExtraConfDataIfNeeded( extra_data )
return SendCommandRequest( arguments, completer, extra_data )
def GetDefinedSubcommands( self ):
with HandleServerException():
return BaseRequest.PostDataToHandler( BuildRequestData(),
'defined_subcommands' )
return []
def GetCurrentCompletionRequest( self ):
return self._latest_completion_request
def GetOmniCompleter( self ):
return self._omnicomp
def FiletypeCompleterExistsForFiletype( self, filetype ):
try:
return self._available_completers[ filetype ]
except KeyError:
pass
exists_completer = SendCompleterAvailableRequest( filetype )
if exists_completer is None:
return False
self._available_completers[ filetype ] = exists_completer
return exists_completer
def NativeFiletypeCompletionAvailable( self ):
return any( [ self.FiletypeCompleterExistsForFiletype( x ) for x in
vimsupport.CurrentFiletypes() ] )
def NativeFiletypeCompletionUsable( self ):
return ( self.CurrentFiletypeCompletionEnabled() and
self.NativeFiletypeCompletionAvailable() )
def OnFileReadyToParse( self ):
if not self.IsServerAlive():
self._NotifyUserIfServerCrashed()
return
self._omnicomp.OnFileReadyToParse( None )
extra_data = {}
self._AddTagsFilesIfNeeded( extra_data )
self._AddSyntaxDataIfNeeded( extra_data )
self._AddExtraConfDataIfNeeded( extra_data )
self._latest_file_parse_request = EventNotification(
'FileReadyToParse', extra_data = extra_data )
self._latest_file_parse_request.Start()
def OnBufferUnload( self, deleted_buffer_file ):
SendEventNotificationAsync( 'BufferUnload', filepath = deleted_buffer_file )
def OnBufferVisit( self ):
extra_data = {}
self._AddUltiSnipsDataIfNeeded( extra_data )
SendEventNotificationAsync( 'BufferVisit', extra_data = extra_data )
def OnInsertLeave( self ):
SendEventNotificationAsync( 'InsertLeave' )
def OnCursorMoved( self ):
self._diag_interface.OnCursorMoved()
def _CleanLogfile( self ):
logging.shutdown()
if not self._user_options[ 'keep_logfiles' ]:
if self._client_logfile:
utils.RemoveIfExists( self._client_logfile )
def OnVimLeave( self ):
self._ShutdownServer()
self._CleanLogfile()
def OnCurrentIdentifierFinished( self ):
SendEventNotificationAsync( 'CurrentIdentifierFinished' )
def OnCompleteDone( self ):
complete_done_actions = self.GetCompleteDoneHooks()
for action in complete_done_actions:
action(self)
def GetCompleteDoneHooks( self ):
filetypes = vimsupport.CurrentFiletypes()
for key, value in iteritems( self._complete_done_hooks ):
if key in filetypes:
yield value
def GetCompletionsUserMayHaveCompleted( self ):
latest_completion_request = self.GetCurrentCompletionRequest()
if not latest_completion_request or not latest_completion_request.Done():
return []
completions = latest_completion_request.RawResponse()
result = self._FilterToMatchingCompletions( completions, True )
result = list( result )
if result:
return result
if self._HasCompletionsThatCouldBeCompletedWithMoreText( completions ):
# Since the way that YCM works leads to CompleteDone called on every
# character, return blank if the completion might not be done. This won't
# match if the completion is ended with typing a non-keyword character.
return []
result = self._FilterToMatchingCompletions( completions, False )
return list( result )
def _FilterToMatchingCompletions( self, completions, full_match_only ):
"""Filter to completions matching the item Vim said was completed"""
completed = vimsupport.GetVariableValue( 'v:completed_item' )
for completion in completions:
item = ConvertCompletionDataToVimData( completion )
match_keys = ( [ "word", "abbr", "menu", "info" ] if full_match_only
else [ 'word' ] )
def matcher( key ):
return ( utils.ToUnicode( completed.get( key, "" ) ) ==
utils.ToUnicode( item.get( key, "" ) ) )
if all( [ matcher( i ) for i in match_keys ] ):
yield completion
def _HasCompletionsThatCouldBeCompletedWithMoreText( self, completions ):
completed_item = vimsupport.GetVariableValue( 'v:completed_item' )
if not completed_item:
return False
completed_word = utils.ToUnicode( completed_item[ 'word' ] )
if not completed_word:
return False
# Sometimes CompleteDone is called after the next character is inserted.
# If so, use inserted character to filter possible completions further.
text = vimsupport.TextBeforeCursor()
reject_exact_match = True
if text and text[ -1 ] != completed_word[ -1 ]:
reject_exact_match = False
completed_word += text[ -1 ]
for completion in completions:
word = utils.ToUnicode(
ConvertCompletionDataToVimData( completion )[ 'word' ] )
if reject_exact_match and word == completed_word:
continue
if word.startswith( completed_word ):
return True
return False
def _OnCompleteDone_Csharp( self ):
completions = self.GetCompletionsUserMayHaveCompleted()
namespaces = [ self._GetRequiredNamespaceImport( c )
for c in completions ]
namespaces = [ n for n in namespaces if n ]
if not namespaces:
return
if len( namespaces ) > 1:
choices = [ "{0} {1}".format( i + 1, n )
for i, n in enumerate( namespaces ) ]
choice = vimsupport.PresentDialog( "Insert which namespace:", choices )
if choice < 0:
return
namespace = namespaces[ choice ]
else:
namespace = namespaces[ 0 ]
vimsupport.InsertNamespace( namespace )
def _GetRequiredNamespaceImport( self, completion ):
if ( "extra_data" not in completion
or "required_namespace_import" not in completion[ "extra_data" ] ):
return None
return completion[ "extra_data" ][ "required_namespace_import" ]
def GetErrorCount( self ):
return self._diag_interface.GetErrorCount()
def GetWarningCount( self ):
return self._diag_interface.GetWarningCount()
def DiagnosticUiSupportedForCurrentFiletype( self ):
return any( [ x in DIAGNOSTIC_UI_FILETYPES
for x in vimsupport.CurrentFiletypes() ] )
def ShouldDisplayDiagnostics( self ):
return bool( self._user_options[ 'show_diagnostics_ui' ] and
self.DiagnosticUiSupportedForCurrentFiletype() )
def _PopulateLocationListWithLatestDiagnostics( self ):
# Do nothing if loc list is already populated by diag_interface
if not self._user_options[ 'always_populate_location_list' ]:
self._diag_interface.PopulateLocationList( self._latest_diagnostics )
return bool( self._latest_diagnostics )
def UpdateDiagnosticInterface( self ):
self._diag_interface.UpdateWithNewDiagnostics( self._latest_diagnostics )
def FileParseRequestReady( self, block = False ):
return bool( self._latest_file_parse_request and
( block or self._latest_file_parse_request.Done() ) )
def HandleFileParseRequest( self, block = False ):
# Order is important here:
# FileParseRequestReady has a low cost, while
# NativeFiletypeCompletionUsable is a blocking server request
if ( self.FileParseRequestReady( block ) and
self.NativeFiletypeCompletionUsable() ):
if self.ShouldDisplayDiagnostics():
self._latest_diagnostics = self._latest_file_parse_request.Response()
self.UpdateDiagnosticInterface()
else:
# YCM client has a hard-coded list of filetypes which are known
# to support diagnostics, self.DiagnosticUiSupportedForCurrentFiletype()
#
# For filetypes which don't support diagnostics, we just want to check
# the _latest_file_parse_request for any exception or UnknownExtraConf
# response, to allow the server to raise configuration warnings, etc.
# to the user. We ignore any other supplied data.
self._latest_file_parse_request.Response()
# We set the file parse request to None because we want to prevent
# repeated issuing of the same warnings/errors/prompts. Setting this to
# None makes FileParseRequestReady return False until the next
# request is created.
#
# Note: it is the server's responsibility to determine the frequency of
# error/warning/prompts when receiving a FileReadyToParse event, but
# it our responsibility to ensure that we only apply the
# warning/error/prompt received once (for each event).
self._latest_file_parse_request = None
def DebugInfo( self ):
debug_info = ''
if self._client_logfile:
debug_info += 'Client logfile: {0}\n'.format( self._client_logfile )
extra_data = {}
self._AddExtraConfDataIfNeeded( extra_data )
debug_info += FormatDebugInfoResponse( SendDebugInfoRequest( extra_data ) )
debug_info += (
'Server running at: {0}\n'
'Server process ID: {1}\n'.format( BaseRequest.server_location,
self._server_popen.pid ) )
if self._server_stdout and self._server_stderr:
debug_info += ( 'Server logfiles:\n'
' {0}\n'
' {1}'.format( self._server_stdout,
self._server_stderr ) )
return debug_info
def GetLogfiles( self ):
logfiles_list = [ self._client_logfile,
self._server_stdout,
self._server_stderr ]
debug_info = SendDebugInfoRequest()
if debug_info:
completer = debug_info[ 'completer' ]
if completer:
for server in completer[ 'servers' ]:
logfiles_list.extend( server[ 'logfiles' ] )
logfiles = {}
for logfile in logfiles_list:
logfiles[ os.path.basename( logfile ) ] = logfile
return logfiles
def _OpenLogfile( self, logfile ):
# Open log files in a horizontal window with the same behavior as the
# preview window (same height and winfixheight enabled). Automatically
# watch for changes. Set the cursor position at the end of the file.
options = {
'size': vimsupport.GetIntValue( '&previewheight' ),
'fix': True,
'focus': False,
'watch': True,
'position': 'end'
}
vimsupport.OpenFilename( logfile, options )
def _CloseLogfile( self, logfile ):
vimsupport.CloseBuffersForFilename( logfile )
def ToggleLogs( self, *filenames ):
logfiles = self.GetLogfiles()
if not filenames:
vimsupport.PostVimMessage(
'Available logfiles are:\n'
'{0}'.format( '\n'.join( sorted( list( logfiles ) ) ) ) )
return
for filename in set( filenames ):
if filename not in logfiles:
continue
logfile = logfiles[ filename ]
if not vimsupport.BufferIsVisibleForFilename( logfile ):
self._OpenLogfile( logfile )
continue
self._CloseLogfile( logfile )
def CurrentFiletypeCompletionEnabled( self ):
filetypes = vimsupport.CurrentFiletypes()
filetype_to_disable = self._user_options[
'filetype_specific_completion_to_disable' ]
if '*' in filetype_to_disable:
return False
else:
return not any([ x in filetype_to_disable for x in filetypes ])
def ShowDetailedDiagnostic( self ):
with HandleServerException():
detailed_diagnostic = BaseRequest.PostDataToHandler(
BuildRequestData(), 'detailed_diagnostic' )
if 'message' in detailed_diagnostic:
vimsupport.PostVimMessage( detailed_diagnostic[ 'message' ],
warning = False )
def ForceCompileAndDiagnostics( self ):
if not self.NativeFiletypeCompletionUsable():
vimsupport.PostVimMessage(
'Native filetype completion not supported for current file, '
'cannot force recompilation.', warning = False )
return False
vimsupport.PostVimMessage(
'Forcing compilation, this will block Vim until done.',
warning = False )
self.OnFileReadyToParse()
self.HandleFileParseRequest( block = True )
vimsupport.PostVimMessage( 'Diagnostics refreshed', warning = False )
return True
def ShowDiagnostics( self ):
if not self.ForceCompileAndDiagnostics():
return
if not self._PopulateLocationListWithLatestDiagnostics():
vimsupport.PostVimMessage( 'No warnings or errors detected.',
warning = False )
return
if self._user_options[ 'open_loclist_on_ycm_diags' ]:
vimsupport.OpenLocationList( focus = True )
def _AddSyntaxDataIfNeeded( self, extra_data ):
if not self._user_options[ 'seed_identifiers_with_syntax' ]:
return
filetype = vimsupport.CurrentFiletypes()[ 0 ]
if filetype in self._filetypes_with_keywords_loaded:
return
if self.IsServerReady():
self._filetypes_with_keywords_loaded.add( filetype )
extra_data[ 'syntax_keywords' ] = list(
syntax_parse.SyntaxKeywordsForCurrentBuffer() )
def _AddTagsFilesIfNeeded( self, extra_data ):
def GetTagFiles():
tag_files = vim.eval( 'tagfiles()' )
return [ os.path.join( utils.GetCurrentDirectory(), tag_file )
for tag_file in tag_files ]
if not self._user_options[ 'collect_identifiers_from_tags_files' ]:
return
extra_data[ 'tag_files' ] = GetTagFiles()
def _AddExtraConfDataIfNeeded( self, extra_data ):
def BuildExtraConfData( extra_conf_vim_data ):
return dict( ( expr, vimsupport.VimExpressionToPythonType( expr ) )
for expr in extra_conf_vim_data )
extra_conf_vim_data = self._user_options[ 'extra_conf_vim_data' ]
if extra_conf_vim_data:
extra_data[ 'extra_conf_data' ] = BuildExtraConfData(
extra_conf_vim_data )
def _AddUltiSnipsDataIfNeeded( self, extra_data ):
# See :h UltiSnips#SnippetsInCurrentScope.
try:
vim.eval( 'UltiSnips#SnippetsInCurrentScope( 1 )' )
except vim.error:
return
snippets = vimsupport.GetVariableValue( 'g:current_ulti_dict_info' )
extra_data[ 'ultisnips_snippets' ] = [
{ 'trigger': trigger,
'description': snippet[ 'description' ] }
for trigger, snippet in iteritems( snippets )
]
|
{
"content_hash": "b5c427ecab165309890b2f859011f009",
"timestamp": "",
"source": "github",
"line_count": 734,
"max_line_length": 80,
"avg_line_length": 35.68119891008175,
"alnum_prop": 0.6709812905689194,
"repo_name": "sunchuanleihit/vimrc",
"id": "254ce200786d975c804765dd221b4e8f78c9d87f",
"size": "26961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sources_non_forked/YouCompleteMe/python/ycm/youcompleteme.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "568"
},
{
"name": "CoffeeScript",
"bytes": "5338"
},
{
"name": "Erlang",
"bytes": "10020"
},
{
"name": "Go",
"bytes": "269"
},
{
"name": "HTML",
"bytes": "292"
},
{
"name": "JavaScript",
"bytes": "1064"
},
{
"name": "Makefile",
"bytes": "636"
},
{
"name": "Python",
"bytes": "359807"
},
{
"name": "Ruby",
"bytes": "21568"
},
{
"name": "Shell",
"bytes": "15070"
},
{
"name": "Vim script",
"bytes": "2422117"
}
],
"symlink_target": ""
}
|
__version__ = (0, 9, 3)
import sys, os
import re
import types
import time
import urllib
import json
import string
import base64
import requests
# intra-package imports
from .config import PROTOCOL, SERVER, WS_API_VERSION, WEB_SERVICE, RALLY_REST_HEADERS
from .config import USER_NAME, PASSWORD
from .config import JSON_FORMAT, PAGESIZE, START_INDEX, MAX_ITEMS
from .config import timestamp
###################################################################################################
CONJUNCTION_PATT = re.compile(' (AND|OR) ', re.I) # to allow 'and', 'or', 'AND', 'OR'
#
# define a module global that should be set up/known before a few more module imports
#
_rallyCache = {} # keyed by a context tuple (server, user, password, workspace, project)
# value is a dict with at least:
# a key of 'rally' whose value there is a Rally instance and
# a key of 'hydrator' whose value there is an EntityHydrator instance
#
# another module global for TypeDefinition info
# it's intended to be able to access a TypeDefinition.ref , .ElementName, and .Name attrs only
#
_type_definition_cache = {} # keyed by a type of context info and type definition ElementName
# value is a minimally hydrated TypeDefinition instance
#
# Yo! another module global here...
#
_allowedValueAlias = {} # a dict keyed by entity name
# Project|Release|Iteration : { OID: Name value, ... }
# User & Owner are conspicuously not covered...
warning = sys.stderr.write
###################################################################################################
class RallyRESTAPIError(Exception): pass
#
# define a couple of entry point functions for use by other pkg modules and import the modules
#
def hydrateAnInstance(context, item, existingInstance=None):
global _rallyCache
rallyContext = _rallyCache.get(context, None)
if not rallyContext:
# throwing an Exception is probably the correct thing to do
return None
hydrator = rallyContext.get('hydrator', None)
if not hydrator:
hydrator = EntityHydrator(context, hydration="full")
rallyContext['hydrator'] = hydrator
return hydrator.hydrateInstance(item, existingInstance=existingInstance)
def getResourceByOID(context, entity, oid, **kwargs):
"""
Retrieves a reference in _rallyCache to a Rally instance and uses that to
call its internal _getResourceByOID method
Returns a RallyRESTResponse instance
that has status_code, headers and content attributes.
"""
##
## print "getResourceByOID called:"
## print " context: %s" % context
## print " entity: %s" % entity
## print " oid: %s" % oid
## print " kwargs: %s" % kwargs
## sys.stdout.flush()
##
## if entity == 'context':
## raise Exception("getResourceByOID called to get resource for entity of 'context'")
##
global _rallyCache
rallyContext = _rallyCache.get(context, None)
if not rallyContext:
# raising an Exception is the only thing we can do, don't see any prospect of recovery...
raise RallyRESTAPIError('Unable to find Rally instance for context: %s' % context)
##
## print "_rallyCache.keys:"
## for key in _rallyCache.keys():
## print " -->%s<--" % key
## print ""
## print " apparently no key to match: -->%s<--" % context
## print " context is a %s" % type(context)
##
rally = rallyContext.get('rally')
resp = rally._getResourceByOID(context, entity, oid, **kwargs)
if 'unwrap' not in kwargs or not kwargs.get('unwrap', False):
return resp
response = RallyRESTResponse(rally.session, context, "%s.x" % entity, resp, "full", 1)
return response
# these imports have to take place after the prior class and function defs
from .rallyresp import RallyRESTResponse, ErrorResponse
from .hydrate import EntityHydrator
from .context import RallyContext, RallyContextHelper
from .entity import validRallyType
__all__ = ["Rally", "getResourceByOID", "hydrateAnInstance", "RallyUrlBuilder"]
def _createShellInstance(context, entity_name, item_name, item_ref):
if item_ref.endswith('.js'):
oid = item_ref[:-3].split('/').pop()
else:
oid = item_ref.split('/').pop()
item = {
'ObjectID' : oid,
'Name' : item_name,
'_type' : entity_name,
'_ref' : item_ref,
'ref' : '%s/%s' % (entity_name.lower(), oid)
}
hydrator = EntityHydrator(context, hydration="shell")
return hydrator.hydrateInstance(item)
##################################################################################################
class Rally(object):
"""
An instance of this class provides the instance holder the ability to
interact with Rally via the Rally REST WSAPI.
The holder can create, query (read), update and delete Rally entities
(but only selected appropriate entities).
In addition, there are several convenience methods (for users, workspaces and projects)
that allow the holder to quickly get a picture of their Rally operating environment.
"""
ARTIFACT_TYPE = { 'S' : 'Story',
'US' : 'Story',
'DE' : 'Defect',
'DS' : 'DefectSuite',
'TA' : 'Task',
'TC' : 'TestCase',
'TS' : 'TestSet',
'PI' : 'PortfolioItem'
}
FORMATTED_ID_PATTERN = re.compile(r'^[A-Z]{1,2}\d+$')
#S|US|DE|DS|TA|TC|TS|PI
MAX_ATTACHMENT_SIZE = 5000000 # approx 5MB
def __init__(self, server=SERVER, user=USER_NAME, password=PASSWORD,
version=WS_API_VERSION, warn=True, **kwargs):
self.server = server
self.user = user
self.password = password
self.version = version
self._inflated = False
self.service_url = "%s://%s/%s" % (PROTOCOL, self.server, WEB_SERVICE % self.version)
self.hydration = "full"
self._log = False
self._logDest = None
self._logAttrGet = False
self._warn = warn
config = {}
if kwargs and 'debug' in kwargs and kwargs.get('debug', False):
config['verbose'] = sys.stdout
credentials = requests.auth.HTTPBasicAuth(self.user, self.password)
proxy_dict = {}
https_proxy = os.environ.get('HTTPS_PROXY', None) or os.environ.get('https_proxy', None)
if https_proxy and https_proxy not in ["", None]:
proxy_dict['https'] = https_proxy
verify_ssl_cert = True
if kwargs and 'verify_ssl_cert' in kwargs:
vsc = kwargs.get('verify_ssl_cert')
if vsc in [False, True]:
verify_ssl_cert = vsc
self.session = requests.session(headers=RALLY_REST_HEADERS, auth=credentials,
timeout=10.0, proxies=proxy_dict,
verify=verify_ssl_cert, config=config)
self.contextHelper = RallyContextHelper(self, server, user, password)
self.contextHelper.check(self.server)
global _rallyCache
if self.contextHelper.currentContext() not in _rallyCache:
_rallyCache[self.contextHelper.currentContext()] = {'rally' : self}
if self.contextHelper.defaultContext not in _rallyCache:
_rallyCache[self.contextHelper.defaultContext] = {'rally' : self}
__adjust_cache = False
if 'workspace' in kwargs and kwargs['workspace'] != self.contextHelper.currentContext().workspace \
and kwargs['workspace'] != 'default':
if self.contextHelper.isAccessibleWorkspaceName(kwargs['workspace']):
self.contextHelper.setWorkspace(kwargs['workspace'])
__adjust_cache = True
else:
warning("WARNING: Unable to use your workspace specification, that value is not listed in your subscription\n")
if 'project' in kwargs and kwargs['project'] != self.contextHelper.currentContext().project \
and kwargs['project'] != 'default':
accessibleProjects = [name for name, ref in self.contextHelper.getAccessibleProjects(workspace='current')]
if kwargs['project'] in accessibleProjects:
self.contextHelper.setProject(kwargs['project'])
__adjust_cache = True
else:
issue = ("Unable to use your project specification of '%s', "
"that value is not associated with current workspace setting of: '%s'" )
raise Exception(issue % (kwargs['project'], self.contextHelper.currentContext().workspace))
if 'project' not in kwargs:
#
# It's possible that the invoker has specified a workspace but no project
# and the default project isn't in the workspace that was specified.
# In that case reset the current and default project to first project in
# the list of projects for the current workspace.
#
cdp = self.contextHelper.getProject() # cdp alias for current_default_project
ndp = self.contextHelper.resetDefaultProject() # ndp alias for new_default_project
if ndp != cdp: # have to test both the name and ref values!!
__adjust_cache = True
cdp_name, cdp_ref = cdp
ndp_name, ndp_ref = ndp
# but we'll only issue a warning if the project names are different
if self.warningsEnabled() and ndp_name != cdp_name:
prob = "WARNING: Default project changed to '%s' (%s).\n" + \
" Your normal default project: '%s' is not valid for\n" +\
" the current workspace setting of: '%s'\n"
short_proj_ref = "/".join(ndp_ref.split('/')[-2:])[:-3]
wksp_name, wksp_ref = self.contextHelper.getWorkspace()
warning(prob % (ndp_name, short_proj_ref, cdp_name, wksp_name))
if __adjust_cache:
_rallyCache[self.contextHelper.currentContext()] = {'rally' : self}
def _wpCacheStatus(self):
"""
intended to be only for unit testing...
values could be None, True, False, 'minimal', 'narrow', 'wide'
"""
return self.contextHelper._inflated
def serviceURL(self):
"""
Crutch to allow the RallyContextHelper to pass this along in the initialization
of a RallyContext instance.
"""
return self.service_url
def enableLogging(self, dest=sys.stdout, attrget=False, append=False):
"""
Use this to enable logging. dest can set to the name of a file or an open file/stream (writable).
If attrget is set to true, all Rally REST requests that are executed to obtain attribute informatin
will also be logged. Be careful with that as the volume can get quite large.
The append parm controls whether any existing file will be appended to or overwritten.
"""
self._log = True
if hasattr(dest, 'write'):
self._logDest = dest
elif type(dest) == types.StringType:
try:
mode = 'w'
if append:
mode = 'a'
self._logDest = open(dest, mode)
except IOError as ex:
self._log = False
self._logDest = None
else:
self._log = False
# emit a warning that logging is disabled due to a faulty dest arg
warning('WARNING: Logging dest arg cannot be written to, proceeding with logging disabled.\n')
if self._log:
scopeNote = '%s Following entries record Rally REST API interaction via %s for user: %s' % \
(timestamp(), self.service_url, self.user)
self._logDest.write('%s\n' % scopeNote)
self._logDest.flush()
if attrget:
self._logAttrGet = True
def disableLogging(self):
"""
Disable logging.
"""
if self._log:
self._log = False
self._logAttrGet = False
if self._logDest and self._logDest not in (sys.stdout, sys.stderr):
try:
self._logDest.flush()
self._logDest.close()
except IOError as ex:
# emit a warning that the logging destination was unable to be closed
pass
self._logDest = None
def enableWarnings(self):
self._warn = True
def disableWarnings(self):
self._warn = False
def warningsEnabled(self):
return self._warn == True
def subscriptionName(self):
"""
Returns the name of the subscription in the currently active context.
"""
return self.contextHelper.currentContext().subscription()
def setWorkspace(self, workspaceName):
"""
Given a workspaceName, set that as the currentWorkspace and use the ref for
that workspace in subsequent interactions with Rally.
"""
if not self.contextHelper.isAccessibleWorkspaceName(workspaceName):
raise Exception('Specified workspace not valid for your credentials')
self.contextHelper.setWorkspace(workspaceName)
##
## print self.contextHelper.currentContext()
##
def getWorkspace(self):
"""
Returns a minimally hydrated Workspace instance with the Name and ref
of the workspace in the currently active context.
"""
context = self.contextHelper.currentContext()
wksp_name, wksp_ref = self.contextHelper.getWorkspace()
return _createShellInstance(context, 'Workspace', wksp_name, wksp_ref)
def getWorkspaces(self):
"""
Return a list of minimally hydrated Workspace instances
that are available to the registered user in the currently active context.
Return a list of (workspace.Name, workspace._ref) tuples
that are available to the registered user in the currently active context.
"""
context = self.contextHelper.currentContext()
wkspcs = self.contextHelper.getAccessibleWorkspaces()
workspaces = [_createShellInstance(context, 'Workspace', wksp_name, wksp_ref)
for wksp_name, wksp_ref in sorted(wkspcs)
]
return workspaces
def setProject(self, projectName):
"""
Given a projectName, set that as the current project and use the ref for
that project in subsequent interractions with Rally.
"""
eligible_projects = [proj for proj,ref in self.contextHelper.getAccessibleProjects(workspace='current')]
if projectName not in eligible_projects:
raise Exception('Specified project not valid for your current workspace or credentials')
self.contextHelper.setProject(projectName)
def getProject(self, name=None):
"""
Returns a minimally hydrated Project instance with the Name and ref
of the project in the currently active context if the name keyword arg
is not supplied or the Name and ref of the project identified by the name
as long as the name identifies a valid project in the currently selected workspace.
Returns None if a name parameter is supplied that does not identify a valid project
in the currently selected workspace.
"""
context = self.contextHelper.currentContext()
if not name:
proj_name, proj_ref = self.contextHelper.getProject()
return _createShellInstance(context, 'Project', proj_name, proj_ref)
projs = self.contextHelper.getAccessibleProjects(workspace='current')
hits = [(proj,ref) for proj,ref in projs if str(proj) == str(name)]
if not hits:
return None
tp = projs[0]
tp_ref = tp[1]
return _createShellInstance(context, 'Project', name, tp_ref)
def getProjects(self, workspace=None):
"""
Return a list of minimally hydrated Project instances
that are available to the registered user in the currently active context.
"""
wksp_target = workspace or 'current'
projs = self.contextHelper.getAccessibleProjects(workspace=wksp_target)
context = self.contextHelper.currentContext()
projects = [_createShellInstance(context, 'Project', proj_name, proj_ref)
for proj_name, proj_ref in sorted(projs)
]
return projects
def getUserInfo(self, oid=None, username=None, name=None):
"""
A convenience method to collect specific user related information.
Caller must provide at least one keyword arg and non-None / non-empty value
to identify the user target on which to obtain information.
The name keyword arg is associated with the User.DisplayName attribute
The username keyword arg is associated with the User.UserName attribute
If provided, the oid keyword argument is used, even if other keyword args are
provided. Similarly, if the username keyword arg is provided it is used
even if the name keyword argument is provided.
User
DisplayName
UserName
Disabled
EmailAddress
FirstName
MiddleName
LastName
OnpremLdapUsername
ShortDisplayName
Role
TeamMemberships (from projects)
LastPasswordUpdateDate
UserPermissions - from UserPermission items associated with this User
UserProfile
DefaultWorkspace
DefaultProject
TimeZone
DateFormat
DateTimeFormat
EmailNotificationEnabled
SessionTimeoutSeconds
SessionTimeoutWarning
UserPermission
Name - name of the User Permission
Role
Returns either a single User instance or a list of User instances
"""
context = self.contextHelper.currentContext()
item, response = None, None
if oid:
item = self._itemQuery('User', oid)
elif username:
response = self.get('User', fetch=True, query='UserName = "%s"' % username)
elif name:
response = self.get('User', fetch=True, query='DisplayName = "%s"' % name)
else:
raise RallyRESTAPIError("No specification provided to obtain User information")
if item:
return item
if response:
return [user for user in response]
return None
def getAllUsers(self, workspace=None):
"""
Given that actually getting full information about all users in the workspace
via the Rally WSAPI is somewhat opaque, this method offers a one-stop convenient
means of obtaining usable information about users in the named workspace.
If no workspace is specified, then the current context's workspace is used.
Return a list of User instances (fully hydrated for scalar attributes)
whose ref and collection attributes will be lazy eval'ed upon access.
"""
saved_workspace_name, saved_workspace_ref = self.contextHelper.getWorkspace()
if not workspace:
workspace = saved_workspace_name
self.setWorkspace(workspace)
context, augments = self.contextHelper.identifyContext(workspace=workspace)
workspace_ref = self.contextHelper.currentWorkspaceRef()
# the only reason we are specifically listing the User attributes is to also sneak in
# the TimeZone attribute which is actually an attribute on UserProfile. If we simply
# had the fetch clause of "fetch=true", each access of TimeZone would be a lazy-eval
# (a new request to Rally). So, we do the explicit listing of attributes for better performance.
user_attrs = ["Name", "UserName", "DisplayName", "FirstName", "LastName", "MiddleName",
"CreationDate", "EmailAddress",
"ShortDisplayName", "OnpremLdapUsername",
"LastPasswordUpdateDate", "Disabled",
"Subscription", "SubscriptionAdmin",
"Role", "UserPermissions", "TeamMemberships",
"UserProfile",
"TimeZone" # a UserProfile attribute
]
fields = ",".join(user_attrs)
resource = 'users.js?fetch="%s"&query=&pagesize=200&start=1&workspace=%s' % (fields, workspace_ref)
full_resource_url = '%s/%s' % (self.service_url, resource)
response = self.session.get(full_resource_url)
if response.status_code != 200:
return []
response = RallyRESTResponse(self.session, context, resource, response, "full", 0)
self.setWorkspace(saved_workspace_name)
return [user_rec for user_rec in response]
def _officialRallyEntityName(self, supplied_name):
if supplied_name in ['Story', 'UserStory', 'User Story']:
supplied_name = 'HierarchicalRequirement'
# here's where we'd make an inquiry into entity to see if the supplied_name
# is a Rally entity on which CRUD ops are permissible.
# An Exception is raised if not.
# If supplied_name resolves in some way to a valid Rally entity,
# this returns either a simple name or a TypePath string (like PortfolioItem/Feature)
official_name = validRallyType(self, supplied_name)
return official_name
def _getResourceByOID(self, context, entity, oid, **kwargs):
"""
The _ref URL (containing the entity name and OID) can be used unchanged but the format
of the response is slightly different from that returned when the resource URL is
constructed per spec (by this class's get method). The _ref URL response can be
termed "un-wrapped" in that there's no boilerplate involved, just the JSON object.
Returns a raw response instance (with status_code, headers and content attributes).
"""
##
## print "in _getResourceByOID, OID specific resource ...", entity, oid
## sys.stdout.flush()
##
resource = '%s/%s' % (entity, oid)
if not resource.endswith('.js'):
resource += '.js'
if '_disableAugments' not in kwargs:
contextDict = context.asDict()
##
## print "_getResourceByOID, current contextDict: %s" % repr(contextDict)
## sys.stdout.flush()
##
context, augments = self.contextHelper.identifyContext(**contextDict)
if augments:
resource += ("?" + "&".join(augments))
##
## print "_getResourceByOID, modified contextDict: %s" % repr(context.asDict())
## sys.stdout.flush()
##
full_resource_url = "%s/%s" % (self.service_url, resource)
if self._logAttrGet:
self._logDest.write('%s GET %s\n' % (timestamp(), resource))
self._logDest.flush()
##
## print "issuing GET for resource: %s" % full_resource_url
## sys.stdout.flush()
##
try:
raw_response = self.session.get(full_resource_url)
except Exception as ex:
exctype, value, tb = sys.exc_info()
warning('%s: %s\n' % (exctype, value))
return None
##
## print raw_response: %s" % raw_response
## sys.stdout.flush()
##
return raw_response
def _itemQuery(self, entityName, oid, workspace=None, project=None):
"""
Internal method to retrieve a specific instance of an entity identified by the OID.
"""
##
## print "Rally._itemQuery('%s', %s, workspace=%s, project=%s)" % (entityName, oid, workspace, project)
##
resource = '%s/%s' % (entityName, oid)
context, augments = self.contextHelper.identifyContext(workspace=workspace, project=project)
if augments:
resource += ("?" + "&".join(augments))
if self._log:
self._logDest.write('%s GET %s\n' % (timestamp(), resource))
self._logDest.flush()
response = self._getResourceByOID(context, entityName, oid)
if self._log:
self._logDest.write('%s %s %s\n' % (timestamp(), response.status_code, resource))
self._logDest.flush()
if not response or response.status_code != 200:
problem = "Unreferenceable %s OID: %s" % (entityName, oid)
raise RallyRESTAPIError('%s %s' % (response.status_code, problem))
response = RallyRESTResponse(self.session, context, '%s.x' % entityName, response, "full", 1)
item = response.next()
return item # return back an instance representing the item
def get(self, entity, fetch=False, query=None, order=None, **kwargs):
"""
A REST approach has the world seen as resources with the big 4 ops available on them
(GET, PUT, POST, DELETE). There are other ops but we don't care about them here.
Each resource _should_ have a unique URI that is used to identify the resource.
The GET operation is used to specify that we want a representation of that resource.
For Rally, in order to construct a URI, we need the name of the entity, the attributes
of the entity (and attributes on any child/parent entity related to the named entity),
the query (selection criteria) and the order in which the results should be returned.
The fetch argument (boolean or a comma separated list of attributes) indicates whether
we get complete representations of objects back or whether we get "shell" objects with
refs to be able to retrieve the full info at some later time.
An optional instance=True keyword argument will result in returning an instantiated
Rally Entity if and only if the resultCount of the get is exactly equal to 1.
Otherwise, a RallyRESTResponse instance is returned.
All optional keyword args:
fetch=True/False or "List,Of,Attributes,We,Are,Interested,In"
query='FieldName = "some value"' or ['fld1 = 19', 'fld27 != "Shamu"', etc.]
instance=False/True
pagesize=n
start=n
limit=n
projectScopeUp=True/False
projectScopeDown=True/False
"""
# TODO: this method too long, break into small setup, 2 or 3 subcalls and some wrapup
# set some useful defaults...
pagesize = PAGESIZE
startIndex = START_INDEX
limit = MAX_ITEMS
if kwargs and 'pagesize' in kwargs:
pagesize = kwargs['pagesize']
if kwargs and 'start' in kwargs:
try:
usi = int(kwargs['start']) # usi - user supplied start index
if 0 < usi < MAX_ITEMS: # start index must be greater than 0 and less than max
startIndex = usi
except ValueError as ex:
pass
if kwargs and 'limit' in kwargs:
try:
ulimit = int(kwargs['limit']) # in case someone specifies something like limit=gazillionish
limit = min(ulimit, MAX_ITEMS)
except:
pass
if fetch == True:
fetch = 'true'
self.hydration = "full"
elif fetch == False:
fetch = 'false'
self.hydration = "shell"
elif type(fetch) == types.StringType and fetch.lower() != 'false':
self.hydration = "full"
elif type(fetch) in [types.ListType, types.TupleType]:
fetch = ",".join(fetch)
self.hydration = "full"
entity = self._officialRallyEntityName(entity)
resource = RallyUrlBuilder(entity)
resource.qualify(fetch, query, order, pagesize, startIndex)
if '_disableAugments' in kwargs:
context = RallyContext(self.server, self.user, self.password, self.service_url)
else:
context, augments = self.contextHelper.identifyContext(**kwargs)
workspace_ref = self.contextHelper.currentWorkspaceRef()
project_ref = self.contextHelper.currentProjectRef()
##
## print " workspace_ref: %s" % workspace_ref
## print " project_ref: %s" % project_ref
##
if workspace_ref: # TODO: would we ever _not_ have a workspace_ref?
if 'workspace' not in kwargs or ('workspace' in kwargs and kwargs['workspace'] is not None):
resource.augmentWorkspace(augments, workspace_ref)
if project_ref:
if 'project' not in kwargs or ('project' in kwargs and kwargs['project'] is not None):
resource.augmentProject(augments, project_ref)
resource.augmentScoping(augments)
resource = resource.build() # can also use resource = resource.build(pretty=True)
full_resource_url = "%s/%s" % (self.service_url, resource)
# TODO: see if much of above can be pushed into another method
if self._log:
self._logDest.write('%s GET %s\n' % (timestamp(), resource))
self._logDest.flush()
response = None # in case an exception gets raised in the session.get call ...
try:
# a response has status_code, content and data attributes
# the data attribute is a dict that has a single entry for the key 'QueryResult'
# or 'OperationResult' whose value is in turn a dict with values of
# 'Errors', 'Warnings', 'Results'
response = self.session.get(full_resource_url)
except Exception as ex:
if response:
##
## print response.status_code
##
ret_code, content = response.status_code, response.content
else:
ret_code, content = 404, str(ex.args[0])
if self._log:
self._logDest.write('%s %s\n' % (timestamp(), ret_code))
self._logDest.flush()
errorResponse = ErrorResponse(ret_code, content)
response = RallyRESTResponse(self.session, context, resource, errorResponse, self.hydration, 0)
return response
##
## print "response.status_code is %s" % response.status_code
##
if response.status_code != 200:
if self._log:
code, verbiage = response.status_code, response.content[:56]
self._logDest.write('%s %s %s ...\n' % (timestamp(), code, verbiage))
self._logDest.flush()
##
## print response
##
#if response.status_code == 404:
# problem = "%s Service unavailable from %s, check for proper hostname" % (response.status_code, self.service_url)
# raise Exception(problem)
errorResponse = ErrorResponse(response.status_code, response.content)
response = RallyRESTResponse(self.session, context, resource, errorResponse, self.hydration, 0)
return response
response = RallyRESTResponse(self.session, context, resource, response, self.hydration, limit)
if self._log:
if response.status_code == 200:
desc = '%s TotalResultCount %s' % (entity, response.resultCount)
else:
desc = response.errors[0]
self._logDest.write('%s %s %s\n' % (timestamp(), response.status_code, desc))
self._logDest.flush()
if kwargs and 'instance' in kwargs and kwargs['instance'] == True and response.resultCount == 1:
return response.next()
return response
find = get # offer interface approximately matching Ruby Rally REST API, App SDK Javascript RallyDataSource
def put(self, entityName, itemData, workspace='current', project='current', **kwargs):
"""
Given a Rally entityName, a dict with data that the newly created entity should contain,
issue the REST call and return the newly created target entity item.
"""
# see if we need to transform workspace / project values of 'current' to actual
if workspace == 'current':
workspace = self.getWorkspace().Name # just need the Name here
if project == 'current':
project = self.getProject().Name # just need the Name here
entityName = self._officialRallyEntityName(entityName)
resource = "%s/create.js" % entityName.lower()
context, augments = self.contextHelper.identifyContext(workspace=workspace, project=project)
if augments:
resource += ("?" + "&".join(augments))
full_resource_url = "%s/%s" % (self.service_url, resource)
item = {entityName: itemData}
payload = json.dumps(item)
if self._log:
self._logDest.write('%s PUT %s\n%27.27s %s\n' % (timestamp(), resource, " ", payload))
self._logDest.flush()
response = self.session.put(full_resource_url, data=payload, headers=RALLY_REST_HEADERS)
response = RallyRESTResponse(self.session, context, resource, response, "shell", 0)
if response.status_code != 200:
desc = response.errors[0]
if self._log:
self._logDest.write('%s %s %s\n' % (timestamp(), response.status_code, desc))
self._logDest.flush()
raise RallyRESTAPIError('%s %s' % (response.status_code, desc))
result = response.content
item = result[u'CreateResult'][u'Object']
ref = str(item[u'_ref'])
item_oid = int(ref.split('/')[-1][:-3])
desc = "created %s OID: %s" % (entityName, item_oid)
if self._log:
self._logDest.write('%s %s %s\n' % (timestamp(), response.status_code, desc))
self._logDest.flush()
# now issue a request to get the entity item (mostly so we can get the FormattedID)
# and return it
item = self._itemQuery(entityName, item_oid, workspace=workspace, project=project)
return item
create = put # a more intuitive alias for the operation
def post(self, entityName, itemData, workspace='current', project='current', **kwargs):
"""
Given a Rally entityName, a dict with data that the entity should be updated with,
issue the REST call and return a representation of updated target entity item.
"""
# see if we need to transform workspace / project values of 'current' to actual
if workspace == 'current':
workspace = self.getWorkspace().Name # just need the Name here
if project == 'current':
project = self.getProject().Name # just need the Name here
entityName = self._officialRallyEntityName(entityName)
oid = itemData.get('ObjectID', None)
if not oid:
formattedID = itemData.get('FormattedID', None)
if not formattedID:
raise RallyRESTAPIError('An identifying field (Object or FormattedID) must be specified')
fmtIdQuery = 'FormattedID = "%s"' % formattedID
response = self.get(entityName, fetch="ObjectID", query=fmtIdQuery,
workspace=workspace, project=project)
if response.status_code != 200 or response.resultCount == 0:
raise RallyRESTAPIError('Target %s %s could not be located' % (entityName, formattedID))
target = response.next()
oid = target.ObjectID
##
## print "target OID: %s" % oid
##
itemData['ObjectID'] = oid
resource = '%s/%s.js' % (entityName.lower(), oid)
context, augments = self.contextHelper.identifyContext(workspace=workspace, project=project)
if augments:
resource += ("?" + "&".join(augments))
full_resource_url = "%s/%s" % (self.service_url, resource)
##
## print "resource: %s" % resource
##
item = {entityName: itemData}
payload = json.dumps(item)
if self._log:
self._logDest.write('%s POST %s\n%27.27s %s\n' % (timestamp(), resource, " ", item))
self._logDest.flush()
response = self.session.post(full_resource_url, data=payload, headers=RALLY_REST_HEADERS)
response = RallyRESTResponse(self.session, context, resource, response, "shell", 0)
if response.status_code != 200:
raise RallyRESTAPIError('Unable to update the %s' % entityName)
# now issue a request to get the entity item (mostly so we can get the FormattedID)
# and return it
item = self._itemQuery(entityName, oid, workspace=workspace, project=project)
return item
update = post # a more intuitive alias for the operation
def delete(self, entityName, itemIdent, workspace='current', project='current', **kwargs):
"""
Given a Rally entityName, an identification of a specific Rally instnace of that
entity (in either OID or FormattedID format), issue the REST DELETE call and
return an indication of whether the delete operation was successful.
"""
# see if we need to transform workspace / project values of 'current' to actual
if workspace == 'current':
workspace = self.getWorkspace().Name # just need the Name here
if project == 'current':
project = self.getProject().Name # just need the Name here
entityName = self._officialRallyEntityName(entityName)
# guess at whether itemIdent is an ObjectID or FormattedID via
# regex matching (all digits or 1-2 upcase chars + digits)
objectID = itemIdent # at first assume itemIdent is the ObjectID
if re.match('^[A-Z]{1,2}\d+$', itemIdent):
fmtIdQuery = 'FormattedID = "%s"' % itemIdent
response = self.get(entityName, fetch="ObjectID", query=fmtIdQuery,
workspace=workspace, project=project)
if response.status_code != 200:
raise RallyRESTAPIError('Target %s %s could not be located' % (entityName, itemIdent))
target = response.next()
objectID = target.ObjectID
##
## if kwargs.get('debug', False):
## print "DEBUG: target OID -> %s" % objectID
##
resource = "%s/%s.js" % (entityName.lower(), objectID)
context, augments = self.contextHelper.identifyContext(workspace=workspace, project=project)
if augments:
resource += ("?" + "&".join(augments))
full_resource_url = "%s/%s" % (self.service_url, resource)
if self._log:
self._logDest.write('%s DELETE %s\n' % (timestamp(), resource))
response = self.session.delete(full_resource_url, headers=RALLY_REST_HEADERS)
if response and response.status_code != 200:
if self._log:
self._logDest.write('%s %s %s ...\n' % \
(timestamp(), response.status_code, response.content[:56]))
self._logDest.flush()
##
## if kwargs.get('debug', False):
## print response.status_code, response.headers, response.content
##
errorResponse = ErrorResponse(response.status_code, response.content)
response = RallyRESTResponse(self.session, context, resource, errorResponse, self.hydration, 0)
problem = "ERRORS: %s\nWARNINGS: %s\n" % ("\n".join(response.errors),
"\n".join(response.warnings))
raise RallyRESTAPIError(problem)
##
## print response.content
##
response = RallyRESTResponse(self.session, context, resource, response, "shell", 0)
if response.errors:
status = False
desc = response.errors[0]
else:
status = True
desc = '%s deleted' % entityName
if self._log:
self._logDest.write('%s %s %s\n' % (timestamp(), response.status_code, desc))
self._logDest.flush()
return status
def typedef(self, target_type):
"""
Given the name of a target Rally type definition return an instance
of a TypeDefinition class matching the target. Cache the TypeDefinition
for the context so that repeated calls to the same target_type only result
in one call to Rally.
"""
ctx = self.contextHelper.currentContext()
td_key = (ctx.server, ctx.subs_name, ctx.workspace, ctx.project, target_type)
if td_key not in _type_definition_cache:
td = self.get('TypeDefinition', fetch='ElementName,Name,Parent,TypePath',
query='ElementName = "%s"' % target_type,
instance=True)
if not td:
raise Exception("Invalid Rally entity name: %s" % target_type)
_type_definition_cache[td_key] = td
return _type_definition_cache[td_key]
def getState(self, entity, state_name):
"""
State is now (Sep 2012) a Rally type (aka entity) not a String.
In order to somewhat insulate pyral package users from the increased complexity
of that approach, this is a convenience method that given a target entity (like
Defect, PortfolioItem/<subtype>, etc.) and a state name, an inquiry to the Rally
system is executed and the matching entity is returned.
"""
criteria = [ 'TypeDef.Name = "%s"' % entity,
'Name = "%s"' % state_name
]
state = self.get('State', fetch=True, query=criteria, project=None, instance=True)
return state
def getStates(self, entity):
"""
"""
response = self.get('State', query='TypeDef.Name = "%s"' % entity, project=None)
return [item for item in response]
def allowedValueAlias(self, entity, refUrl):
"""
Use the _allowedValueAlias as a cache. A cache hit results from
having an entity key in _allowedValueAlias AND and entry for the OID
contained in the refUrl, the return is the OID and the alias value.
If there is no cache hit for the entity, issue a GET against
self.service_url/<entity>.js?fetch=ObjectID,Name (or UserName,DisplayName)
"""
urlFields = refUrl.split('/')
oid = urlFields.pop()
result = (oid, "--UNKNOWN OID--")
actualEntity = urlFields.pop().lower() # this may be different from entity, eg.
# entity=SubmittedBy, actualEntity=User
if actualEntity == 'user': # special case processing for user follows...
if actualEntity not in _allowedValueAlias:
_allowedValueAlias[actualEntity] = {}
cache = _allowedValueAlias[actualEntity]
if oid in cache:
result = (oid, cache[oid])
else:
item = self._itemQuery(actualEntity, oid) # query against 'User' for item
cache[oid] = (item.DisplayName, item.UserName)
result = (oid, (item.DisplayName, item.UserName))
else: # normal case, entity == actualEntity (or at least no functional difference)
hydrationSetting = self.hydration
self.hydrate = "shell"
if entity not in _allowedValueAlias:
_allowedValueAlias[entity] = {}
cache = _allowedValueAlias[entity]
if oid in cache:
result = (oid, cache[oid])
else:
fields = "ObjectID,Name"
response = self.get(entity, fetch=fields, pagesize=200) # query for all items in entity
for item in response:
cache[item.oid] = item.Name
if oid in cache:
result = (oid, cache[oid])
self.hydration = hydrationSetting
return result
def getAllowedValues(self, entityName, attributeName, **kwargs):
"""
Given an entityName and and attributeName (assumed to be valid for the entityName)
issue a request to obtain a list of allowed values for the attribute.
"""
# get rid of any pesky spaces in the attributeName
attrName = attributeName.replace(' ', '')
resource = '%s/%s/allowedValues.js' % (entityName, attrName)
context, augments = self.contextHelper.identifyContext(**kwargs)
if augments:
resource += ("?" + "&".join(augments))
full_resource_url = "%s/%s" % (self.service_url, resource)
if self._log:
self._logDest.write('%s GET %s\n' % (timestamp(), resource))
self._logDest.flush()
try:
response = self.session.get(full_resource_url, headers=RALLY_REST_HEADERS)
except Exception as ex:
exception_type, value, traceback = sys.exc_info()
warning('%s: %s\n' % (exception_type, value))
sys.exit(9)
if self._log:
self._logDest.write('%s %s %s\n' % (timestamp(), response.status_code, resource))
self._logDest.flush()
if not response or response.status_code != 200:
problem = "AllowedValues unobtainable for %s.%s" % (entityName, attrName)
raise RallyRESTAPIError('%s %s' % (response.status_code, problem))
try:
allowed_values_dict = json.loads(response.content)
return allowed_values_dict
except Exception as ex:
print "Unable to decode the json.loads target"
print ex.args[0]
return None
def addAttachment(self, artifact, filename, mime_type='text/plain'):
"""
Given an artifact (actual or FormattedID for an artifact), validate
that it exists and then attempt to add an Attachment with the name and
contents of filename into Rally and associate that Attachment
with the Artifact.
Upon the successful creation of the Attachment and linkage to the artifact,
return an instance of the succesfully added Attachment.
Exceptions are raised for other error conditions, such as the filename
identified by the filename parm not existing, or not being a file, or the
attachment file exceeding the maximum allowed size, or failure
to create the AttachmentContent or Attachment.
"""
# determine if artifact exists, if not short-circuit False
# determine if attachment already exists for filename (with same size and content)
# if so, and already attached to artifact, short-circuit True
# if so, but not attached to artifact, save attachment
# if not, create the AttachmentContent with filename content,
# create the Attachment with basename for filename and ref the AttachmentContent
# and supply the ref for the artifact in the Artifact field for Attachment
#
if not os.path.exists(filename):
raise Exception('Named attachment filename: %s not found' % filename)
if not os.path.isfile(filename):
raise Exception('Named attachment filename: %s is not a regular file' % filename)
attachment_file_name = os.path.basename(filename)
attachment_file_size = os.path.getsize(filename)
if attachment_file_size > self.MAX_ATTACHMENT_SIZE:
raise Exception('Attachment file size too large, unable to attach to Rally Artifact')
art_type, artifact = self._realizeArtifact(artifact)
if not art_type:
return False
current_attachments = [att for att in artifact.Attachments]
response = self.get('Attachment', fetch=True, query='Name = "%s"' % attachment_file_name)
if response.resultCount:
attachment = response.next()
already_attached = [att for att in current_attachments if att.oid == attachment.oid]
if already_attached:
return already_attached[0]
contents = ''
with open(filename, 'r') as af:
contents = base64.encodestring(af.read())
# create an AttachmentContent item
ac = self.create('AttachmentContent', {"Content" : contents}, project=None)
if not ac:
raise RallyRESTAPIError('Unable to create AttachmentContent for %s' % attachment_file_name)
attachment_info = { "Name" : attachment_file_name,
"Content" : ac.ref, # ref to AttachmentContent
"ContentType" : mime_type,
"Size" : attachment_file_size, # must be size before encoding!!
"User" : 'user/%s' % self.contextHelper.user_oid,
#"Artifact" : artifact.ref # (Artifact is an 'optional' field)
}
# While it's actually possible to have an Attachment not linked to an Artifact,
# in most cases, it'll be far more useful to have the linkage to an Artifact than not.
if artifact:
attachment_info["Artifact"] = artifact.ref
# and finally, create the Attachment
attachment = self.create('Attachment', attachment_info, project=None)
if not attachment:
raise RallyRESTAPIError('Unable to create Attachment for %s' % attachment_file_name)
return attachment
def addAttachments(self, artifact, attachments):
"""
Attachments must be a list of dicts, with each dict having key-value
pairs for Name, MimeType (or mime_type or content_type or ContentType), Content
"""
candidates = []
attached = []
for attachment in attachments:
att_name = attachment.get('Name', None)
if not att_name:
continue
ct_item = attachment.get('mime_type', None) or attachment.get('MimeType', None) \
or attachment.get('content_type', None) or attachment.get('ContentType', None)
if not ct_item:
print "Bypassing attachment for %s, no mime_type/ContentType setting..." % att_name
continue
candidates.append(att_name)
upd_artifact = self.addAttachment(artifact, att_name, mime_type=ct_item)
if upd_artifact:
attached.append(att_name)
return len(attached) == len(candidates)
def getAttachment(self, artifact, filename):
"""
Given a real artifact instance or the FormattedID of an existing artifact,
obtain the attachment named by filename. If there is such an attachment,
return an Attachment instance with hydration for Name, Size, ContentType, Content,
CreationDate and the User that supplied the attachment.
If no such attachment is present, return None
"""
art_type, artifact = self._realizeArtifact(artifact)
if not art_type:
return False
current_attachments = [att for att in artifact.Attachments]
hits = [att for att in current_attachments if att.Name == filename]
if not hits:
return None
att = hits.pop(0)
if not att._hydrated:
getattr(att, 'Description') # forces the hydration to occur
# For reasons that are unclear, a "normal" pyral GET on 'AttachmentContent' comes
# back as empty even if the specific OID for an AttachmentContent item exists.
# The target URL of the GET has to be constructed in a particular manner.
# Fortunately, our _getResourceByOID method fills this need.
# But, we have to turn the raw response into a RallyRESTResponse ourselves here.
context, augments = self.contextHelper.identifyContext()
resp = self._getResourceByOID(context, 'AttachmentContent', att.Content.oid, project=None)
if resp.status_code not in [200, 201, 202]:
return None
response = RallyRESTResponse(self.session, context, "AttachmentContent.x", resp, "full", 1)
if response.errors or response.resultCount != 1:
return None
att_content = response.next()
att.Content = base64.decodestring(att_content.Content) # maybe further txfm to Unicode ?
return att
def getAttachmentNames(self, artifact):
"""
For the given Artifact, return the names (filenames) of the Attachments
"""
names = []
if artifact.Attachments:
names = [att.Name for att in artifact.Attachments]
return names
def getAttachments(self, artifact):
"""
For the given Artifact, return a list of Attachment records.
Each Attachment record will look like a Rally WSAPI Attachment with
the additional Content attribute that will contain the decoded AttachmentContent.
"""
attachment_names = self.getAttachmentNames(artifact)
attachments = [self.getAttachment(artifact, attachment_name) for attachment_name in attachment_names]
attachments = [att for att in attachments if att is not None]
return attachments
def __disabled__deleteAttachment(self, artifact, filename):
"""
Unfortunately, at this time (WSAPI 1.34+) while AttachmentContent items can be deleted,
Attachment items cannot. So, exposing this method would offer very limited utility.
"""
return False
art_type, artifact = self._realizeArtifact(artifact)
if not art_type:
return False
current_attachments = [att for att in artifact.Attachments]
hits = [att for att in current_attachments if att.Name == filename]
if not hits:
return False
# get the target Attachment and the associated AttachmentContent item
attachment = hits.pop(0)
print attachment.details()
if attachment.Content and attachment.Content.oid:
success = self.delete('AttachmentContent', attachment.Content.oid, project=None)
##
## print "deletion attempt on AttachmentContent %s succeeded? %s" % (attachment.Content.oid, success)
##
if not success:
print "Panic! unable to delete AttachmentContent item for %s" % attachment.Name
return False
# # Squeamishness about the drawbacks of deleting certain entities in Rally has
# # sloshed into the Attachment realm, so can't actually do a delete of an Attachment.
#### 2012-09-24 re-attempt to delete an Attachment with Rally WSAPI 1.37
#### attempt failed, no Exception raised, but Attachment not deleted...
####
# #deleted = self.delete('Attachment', attachment.oid, project=None)
#
# # But, we can still just not include the targeted Attachment here from
#### 2012-09-20 in fact, this is now dysfunctional also as of WSAPI 1.37 backward incompatible changes
# # being included in the list of Attachments for our target artifact
# remaining_attachments = [att for att in current_attachments if att.ref != attachment.ref]
# att_refs = [dict(_ref=str(att.ref)) for att in remaining_attachments]
# artifact_info = { 'ObjectID' : artifact.ObjectID,
# 'Attachments' : att_refs,
# }
# updated = self.update(art_type, artifact_info, project=None)
# if updated:
# return updated
# else:
# return False
def _realizeArtifact(self, artifact):
"""
Helper method to identify the artifact type and to retrieve it if the
artifact value is a FormattedID. If the artifact is already an instance
of a Rally entity, then all that needs to be done is deduce the art_type
from the class name. If the artifact argument given is neither of those
two conditions, return back a 2 tuple of (False, None).
Once you have an Rally instance of the artifact, return back a
2 tuple of (art_type, artifact)
"""
art_type = False
if 'pyral.entity.' in str(type(artifact)):
# we've got the artifact already...
art_type = artifact.__class__.__name__
elif self.FORMATTED_ID_PATTERN.match(artifact):
# artifact is a potential FormattedID value
prefix = artifact[:2]
if prefix[1] in string.digits:
prefix = prefix[0]
art_type = self.ARTIFACT_TYPE[prefix]
response = self.get(art_type, fetch=True, query='FormattedID = %s' % artifact)
if response.resultCount == 1:
artifact = response.next()
else:
art_type = False
else: # artifact isn't anything we can deal with here...
pass
return art_type, artifact
##################################################################################################
class RallyUrlBuilder(object):
"""
An instance of this class is used to collect information needed to construct a
valid URL that can be issued in a REST Request to Rally.
The sequence of use is to obtain a RallyUrlBuilder for a named entity,
provide qualifying criteria, augments, scoping criteria and any provision
for a pretty response, and then call build to return the resulting resource URL.
An instance can be re-used (for the same entity) by simply re-calling the
specification methods with differing values and then re-calling the build method.
"""
parts = ['fetch', 'query', 'order',
'workspace', 'project', 'projectScopeUp', 'projectScopeDown',
'pagesize', 'start', 'pretty'
]
def __init__(self, entity):
self.entity = entity
def qualify(self, fetch, query, order, pagesize, startIndex):
self.fetch = fetch
self.query = query
self.order = order
self.pagesize = pagesize
self.startIndex = startIndex
self.workspace = None
self.project = None
self.scopeUp = None
self.scopeDown = None
self.pretty = False
def build(self, pretty=None):
if pretty:
self.pretty = True
resource = "%s%s?" % (self.entity, JSON_FORMAT)
qualifiers = ['fetch=%s' % self.fetch]
if self.query:
encodedQuery = self._prepQuery(self.query)
qualifiers.append('%s=%s' % ('query', encodedQuery if encodedQuery else ""))
if self.order:
qualifiers.append("order=%s" % urllib.quote(self.order))
if self.workspace:
qualifiers.append(self.workspace)
if self.project:
qualifiers.append(self.project)
if self.scopeUp:
qualifiers.append(self.scopeUp)
if self.scopeDown:
qualifiers.append(self.scopeDown)
qualifiers.append('pagesize=%s' % self.pagesize)
qualifiers.append('start=%s' % self.startIndex)
if self.pretty:
qualifiers.append('pretty=true')
resource += "&".join(qualifiers)
return resource
def _prepQuery(self, query):
if not query:
return None
def _encode(condition):
"""
if cond has pattern of 'thing relation value', then urllib.quote it and return it
if cond has pattern of '(thing relation value)', then urllib.quote content inside parens
then pass that result enclosed in parens back to the caller
"""
if condition[0] != '(' and condition[-1] != ')':
return '(%s)' % urllib.quote(condition)
else:
return urllib.quote(condition)
if type(query) in [types.StringType, types.UnicodeType]:
# if the query as provided is already surrounded by paren chars, return it
# with the guts urllib.quote'ed
if query[0] == "(" and query[-1] == ")":
# restore any interior parens from the %28 / %29 encodings"
return "(%s)" % urllib.quote(query[1:-1]).replace('%28', '(').replace('%29', ')')
if ' AND ' not in query and ' OR ' not in query and ' and ' not in query and ' or ' not in query:
return "(%s)" % urllib.quote(query)
else: # do a regex split using ' AND|OR ' then urllib.quote the individual conditions
CONJUNCTIONS = ['and', 'or', 'AND', 'OR']
parts = CONJUNCTION_PATT.split(query)
parts = [p if p in CONJUNCTIONS else _encode(p) for p in parts]
return "(%s)" % "%20".join(parts)
elif type(query) in [types.ListType, types.TupleType]:
# by fiat (and until requested by a paying customer), we assume the conditions are AND'ed
parts = [_encode(condition) for condition in query]
return "(%s)" % "%20AND%20".join(parts)
elif type(query) == types.DictType: # wow! look at this wildly unfounded assumption about what to do!
parts = []
for field, value in query.items():
# have to enclose string value in double quotes, otherwise turn whatever the value is into a string
tval = '"%s"' % value if type(value) == types.StringType else '%s' % value
parts.append('(%s)' % urllib.quote('%s = %s' % (field, tval)))
anded = "%20AND%20".join(parts)
if len(parts) > 1:
return "(%s)" % anded
else:
return anded
return None
def augmentWorkspace(self, augments, workspace_ref):
wksp_augment = [aug for aug in augments if aug.startswith('workspace=')]
self.workspace = "workspace=%s" % workspace_ref
if wksp_augment:
self.workspace = wksp_augment[0]
def augmentProject(self, augments, project_ref):
proj_augment = [aug for aug in augments if aug.startswith('project=')]
self.project = "project=%s" % project_ref
if proj_augment:
self.project = proj_augment[0]
def augmentScoping(self, augments):
scopeUp = [aug for aug in augments if aug.startswith('projectScopeUp=')]
if scopeUp:
self.scopeUp = scopeUp[0]
scopeDown = [aug for aug in augments if aug.startswith('projectScopeDown=')]
if scopeDown:
self.scopeDown = scopeDown[0]
def beautifyResponse(self):
self.pretty = True
##################################################################################################
class RallyQueryFormatter(object):
CONJUNCTIONS = ['and', 'AND', 'or', 'OR']
CONJUNCTION_PATT = re.compile('\s+(AND|OR)\s+', re.I | re.M)
@staticmethod
def parenGroups(condition):
"""
Keep in mind that Rally WSAPI only supports a binary condition of (x) op (y)
as in "(foo) and (bar)"
or (foo) and ((bar) and (egg))
Note that Rally doesn't handle (x and y and z) directly.
Look at the condition to see if there are any parens other than begin and end
if the only parens are at begin and end, strip them and subject the condition to our
clause grouper and binary condition confabulator.
Otherwise, we'll naively assume the caller knows what they are doing, ie., they are
aware of the binary condition requirement.
"""
# if the caller has a simple query in the form "(something = a_value)"
# then return the query as is (after stripping off the surrounding parens)
if condition.count('(') == 1 \
and condition.count(')') == 1 \
and condition.strip()[0] == '(' \
and condition.strip()[-1] == ')':
return condition.strip()[1:-1]
# if caller has more than one opening paren, summarily return the query
# essentially untouched. The assumption is that the caller has correctly
# done the parenthisized grouping to end up in a binary form
if condition.count('(') > 1:
return condition.strip()
parts = RallyQueryFormatter.CONJUNCTION_PATT.split(condition.strip())
# if no CONJUNCTION is in parts, use the condition as is (simple case)
conjunctions = [p for p in parts if p in RallyQueryFormatter.CONJUNCTIONS]
if not conjunctions:
return condition.strip()
binary_condition = parts.pop()
while parts:
item = parts.pop()
if item in RallyQueryFormatter.CONJUNCTIONS:
conj = item
binary_condition = "%s (%s)" % (conj, binary_condition)
else:
cond = item
binary_condition = "(%s) %s" % (cond, binary_condition)
return binary_condition
##################################################################################################
|
{
"content_hash": "54583f42a17d851beb2fea4fa4474c96",
"timestamp": "",
"source": "github",
"line_count": 1475,
"max_line_length": 129,
"avg_line_length": 45.18779661016949,
"alnum_prop": 0.5897647482446138,
"repo_name": "ktan2020/legacy-automation",
"id": "ef2ccb5c45c37e99f87700db843d49e58f690ebd",
"size": "67204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "win/Lib/site-packages/pyral/restapi.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "913"
},
{
"name": "Ada",
"bytes": "289"
},
{
"name": "Assembly",
"bytes": "687"
},
{
"name": "Boo",
"bytes": "540"
},
{
"name": "C",
"bytes": "40116"
},
{
"name": "C#",
"bytes": "474"
},
{
"name": "C++",
"bytes": "393"
},
{
"name": "CSS",
"bytes": "70883"
},
{
"name": "ColdFusion",
"bytes": "1012"
},
{
"name": "Common Lisp",
"bytes": "1034"
},
{
"name": "D",
"bytes": "1858"
},
{
"name": "Eiffel",
"bytes": "426"
},
{
"name": "Erlang",
"bytes": "9243"
},
{
"name": "FORTRAN",
"bytes": "1810"
},
{
"name": "Forth",
"bytes": "182"
},
{
"name": "Groovy",
"bytes": "2366"
},
{
"name": "Haskell",
"bytes": "816"
},
{
"name": "Haxe",
"bytes": "455"
},
{
"name": "Java",
"bytes": "1155"
},
{
"name": "JavaScript",
"bytes": "69444"
},
{
"name": "Lua",
"bytes": "795"
},
{
"name": "Matlab",
"bytes": "1278"
},
{
"name": "OCaml",
"bytes": "350"
},
{
"name": "Objective-C++",
"bytes": "885"
},
{
"name": "PHP",
"bytes": "1411"
},
{
"name": "Pascal",
"bytes": "388"
},
{
"name": "Perl",
"bytes": "252651"
},
{
"name": "Pike",
"bytes": "589"
},
{
"name": "Python",
"bytes": "42085780"
},
{
"name": "R",
"bytes": "1156"
},
{
"name": "Ruby",
"bytes": "480"
},
{
"name": "Scheme",
"bytes": "282"
},
{
"name": "Shell",
"bytes": "30518"
},
{
"name": "Smalltalk",
"bytes": "926"
},
{
"name": "Squirrel",
"bytes": "697"
},
{
"name": "Stata",
"bytes": "302"
},
{
"name": "SystemVerilog",
"bytes": "3145"
},
{
"name": "Tcl",
"bytes": "1039"
},
{
"name": "TeX",
"bytes": "1746"
},
{
"name": "VHDL",
"bytes": "985"
},
{
"name": "Vala",
"bytes": "664"
},
{
"name": "Verilog",
"bytes": "439"
},
{
"name": "Visual Basic",
"bytes": "2142"
},
{
"name": "XSLT",
"bytes": "152770"
},
{
"name": "ooc",
"bytes": "890"
},
{
"name": "xBase",
"bytes": "769"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from builtins import open
from pants.util.contextutil import temporary_file
from pants_test.task_test_base import TaskTestBase
from pants.contrib.go.targets.go_binary import GoBinary
from pants.contrib.go.tasks.go_go import GoEnv, GoGo, GoInteropTask
class GoInteropTaskTest(TaskTestBase):
class FakeGoInteropTask(GoInteropTask):
def __init__(self, *args, **kwargs):
super(GoInteropTaskTest.FakeGoInteropTask, self).__init__(*args, **kwargs)
self._called_with = None
def execute_with_go_env(self, go_path, import_paths, args, **kwargs):
self._called_with = go_path, import_paths, args, kwargs
@property
def called_with(self):
return self._called_with
@classmethod
def task_type(cls):
return cls.FakeGoInteropTask
def test_no_targets(self):
task = self.create_task(self.context(passthru_args=['vim']))
with self.assertRaises(GoInteropTask.MissingArgsError):
task.execute()
def test_no_passthrough_args(self):
go_binary = self.make_target(spec='src/go:binary', target_type=GoBinary)
task = self.create_task(self.context(target_roots=[go_binary]))
with self.assertRaises(GoInteropTask.MissingArgsError):
task.execute()
def test_missing_both(self):
task = self.create_task(self.context())
with self.assertRaises(GoInteropTask.MissingArgsError):
task.execute()
def test_ok(self):
go_binary = self.make_target(spec='src/go/bob', target_type=GoBinary)
task = self.create_task(self.context(target_roots=[go_binary], passthru_args=['vim']))
task.execute()
self.assertEqual((task.get_gopath(go_binary), ['bob'], ['vim'], {}), task.called_with)
class GoEnvTest(TaskTestBase):
@classmethod
def task_type(cls):
return GoEnv
def test_execute(self):
bob_binary = self.make_target(spec='src/go/bob', target_type=GoBinary)
jane_binary = self.make_target(spec='src/go/jane', target_type=GoBinary)
task = self.create_task(self.context(target_roots=[bob_binary, jane_binary],
passthru_args=['echo', '$GOPATH']))
with temporary_file() as stdout:
task.execute(stdout=stdout)
stdout.close()
with open(stdout.name, 'r') as output:
self.assertEqual(output.read().strip(),
os.pathsep.join([task.get_gopath(bob_binary),
task.get_gopath(jane_binary)]))
class GoGoTest(TaskTestBase):
@classmethod
def task_type(cls):
return GoGo
def test_execute(self):
bob_binary = self.make_target(spec='src/go/bob', target_type=GoBinary)
jane_binary = self.make_target(spec='src/go/jane', target_type=GoBinary)
# A task to execute `go env GOPATH`.
task = self.create_task(self.context(target_roots=[bob_binary, jane_binary],
passthru_args=['env', 'GOPATH']))
with temporary_file() as stdout:
task.execute(stdout=stdout)
stdout.close()
with open(stdout.name, 'r') as output:
self.assertEqual(output.read().strip(),
os.pathsep.join([task.get_gopath(bob_binary),
task.get_gopath(jane_binary)]))
|
{
"content_hash": "bef6e10db52d271daa416f3462f28af1",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 90,
"avg_line_length": 36.72222222222222,
"alnum_prop": 0.6550680786686838,
"repo_name": "twitter/pants",
"id": "fb3df4b7d4f2e847ad7ac72921e94ed99cdc4b02",
"size": "3452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/go/tests/python/pants_test/contrib/go/tasks/test_go_go.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5639"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "85294"
},
{
"name": "Java",
"bytes": "498956"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "6700799"
},
{
"name": "Rust",
"bytes": "765598"
},
{
"name": "Scala",
"bytes": "89346"
},
{
"name": "Shell",
"bytes": "94395"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.periodic_resample.python.ops import gen_periodic_resample_op
from tensorflow.contrib.periodic_resample.python.ops.gen_periodic_resample_op import periodic_resample
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
# pylint: enable=unused-import
_periodic_resample_op = loader.load_op_library(
resource_loader.get_path_to_datafile('_periodic_resample_op.so'))
|
{
"content_hash": "82e5f2bb9ea2ea9b163fe79a4347da35",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 102,
"avg_line_length": 38.666666666666664,
"alnum_prop": 0.8051724137931034,
"repo_name": "ryfeus/lambda-packs",
"id": "348623d8f8d0c2ed60f559eca281343722038100",
"size": "1349",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Keras_tensorflow_nightly/source2.7/tensorflow/contrib/periodic_resample/python/ops/periodic_resample_op.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
import sys
import socket
import string
import os
import traceback
try:
import irc.bot
except ImportError:
raise ImportError("Requires irclib; pip install irc")
from giotto.controllers import GiottoController
from giotto.exceptions import ProgramNotFound
from giotto.utils import parse_kwargs
irc_execution_snippet = """
parser = argparse.ArgumentParser(description='Giotto IRC Controller')
parser.add_argument('--model-mock', action='store_true', help='Mock out the model')
args = parser.parse_args()
config = {
'host': '',
'port': 6667,
'nick': '',
'ident': 'giotto',
'realname': 'Giotto IRC Bot',
'owner': '',
'channels': '', # comma seperated
'magic_token': '!giotto ',
}
from giotto.controllers.irc_ import listen
listen(manifest, config, model_mock=args.model_mock)"""
class IRCController(GiottoController):
name = 'irc'
default_mimetype = 'text/x-irc'
def get_invocation(self):
return self.request.program
def get_controller_name(self):
return 'irc'
def get_raw_data(self):
kwargs = self.request.args
return parse_kwargs(kwargs)
def get_concrete_response(self):
try:
result = self.get_data_response()
except ProgramNotFound:
result = {'body': "Program not found"}
# convert to a format appropriate to the IRC Response api.
return dict(
response=result['body'],
say_to=self.request.sent_to,
)
def get_primitive(self, primitive):
if primitive == 'RAW_PAYLOAD':
return self.get_data()
class IRCRequest(object):
# the program name requested
program = ''
# the usr/channel the message was sent to
sent_to = ''
# PRIVMSG or whatever else...
msg_type = ''
# The message after the magic token has been removed
# eg: !giotto multiply --x=1 --y=2 --> multiply --x=1 --y=2
# note, invocations given through private message have no magic token
# so this value will be the same as `message`
message_token_removed = ''
# the raw message with the magic token still attached
raw_message = ''
# boolean, was this request sent to a channel (True) or through private msg?
channel_msg = None
# opposite of `channel_msg`
private_msg = None
# the username of the person who made the request
username = ''
# the ident of the user who made the request
ident = ''
def __init__(self, event, magic_token, nick):
self.ident = event.source
self.username = self.ident.split("!")[0]
self.msg_type = event.type
self.sent_to = event.target
self.private_message = self.msg_type == "privmsg"
self.raw_message = event.arguments[0]
self.program, self.args = self.get_program_and_args(self.raw_message,magic_token)
def get_program_and_args(self, message, magic_token):
if self.private_message == True:
program = message.split()[0]
args = message.split()[1:]
else:
# channel invocationa
l = len(magic_token)
parsed_message = message[l:]
args = parsed_message.split()[1:]
program = parsed_message.split()[0]
return program, args
def __repr__(self):
return "program: %s, args: %s" % (self.program, self.args)
class IrcBot(irc.bot.SingleServerIRCBot):
def __init__(self, config):
if not config['host']:
raise SystemExit('Error: IRC controller needs to be configured with a hostname')
if not config['nick']:
raise SystemExit('Error: IRC controller needs to be configured with a nick')
print("Connecting to %s:%s as %s" % (config['host'],config['port'], config['nick']))
irc.bot.SingleServerIRCBot.__init__(
self,
[(config['host'],config['port'])],
config['nick'],
config['realname']
)
channels = config['channels']
if channels:
self.channel = channels
print("Joining Channels: %s" % channels)
self.config = config
def on_nicknameinuse(self, connection, event):
connection.nick(connection.get_nickname + "_")
def on_welcome(self, connection, event):
connection.join(self.channel)
def on_privmsg(self, connection, event):
self.process_message(connection, event)
def on_pubmsg(self, connection, event):
if event.arguments[0].startswith(self.config['magic_token']) == False:
return
self.process_message(connection, event)
def process_message(self, connection, event):
request = IRCRequest(
event,
self.config['magic_token'],
connection.get_nickname()
)
try:
controller = IRCController(request, self.config['manifest'], self.config['model_mock'])
result = controller.get_response()
except Exception as exc:
cls = exc.__class__.__name__
connection.privmsg(request.sent_to, "\x0304%s - %s: %s" % (request.program, cls, exc))
traceback.print_exc(file=sys.stdout)
else:
msg = "%s: %s" % (request.username, result['response'])
if request.private_message:
for m in msg.split('\n'):
connection.privmsg(request.username, m)
else:
for m in msg.split('\n'):
connection.privmsg(request.sent_to, m)
def listen(manifest, config, model_mock=False):
"""
IRC listening process.
"""
config['manifest'] = manifest
config['model_mock'] = model_mock
IRC = IrcBot(config)
try:
IRC.start()
except KeyboardInterrupt:
pass
|
{
"content_hash": "d423af164ecfefae4b97981dbc2b16db",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 99,
"avg_line_length": 30.322916666666668,
"alnum_prop": 0.5985915492957746,
"repo_name": "priestc/giotto",
"id": "829c1344fa433205f9637c9932658846ee3ab223",
"size": "5822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "giotto/controllers/irc_.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "123774"
},
{
"name": "Shell",
"bytes": "275"
}
],
"symlink_target": ""
}
|
"""Consume and serialize all of the data from a running TensorBoard instance.
This program connects to a live TensorBoard backend at given port, and saves
all of the data to local disk JSON in a predictable format.
This makes it easy to mock out the TensorBoard backend so that the frontend
may be tested in isolation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import json
import os
import os.path
import shutil
import StringIO
import threading
import urllib
import six
from six.moves import http_client
import tensorflow as tf
from tensorflow.python.summary import event_multiplexer
from tensorflow.tensorboard.backend import server
tf.flags.DEFINE_string('logdir', None, """the logdir to pass to the TensorBoard
backend; data will be read from this logdir for serialization.""")
tf.flags.DEFINE_string('target', None, """The directoy where serialized data
will be written""")
tf.flags.DEFINE_boolean('overwrite', False, """Whether to remove and overwrite
TARGET if it already exists.""")
tf.flags.DEFINE_boolean(
'purge_orphaned_data', True, 'Whether to purge data that '
'may have been orphaned due to TensorBoard restarts. '
'Disabling purge_orphaned_data can be used to debug data '
'disappearance.')
FLAGS = tf.flags.FLAGS
BAD_CHARACTERS = "#%&{}\\/<>*? $!'\":@+`|="
DEFAULT_SUFFIX = '.json'
IMAGE_SUFFIX = '.png'
AUDIO_SUFFIX = '.wav'
GRAPH_SUFFIX = '.pbtxt'
def Url(route, params):
"""Takes route and query params, and produce encoded url for that asset."""
out = route
if params:
# sorting ensures a unique filename for each query
sorted_params = sorted(six.iteritems(params))
out += '?' + urllib.urlencode(sorted_params)
return out
def Clean(s):
"""Clean a string so it can be used as a filepath."""
for c in BAD_CHARACTERS:
s = s.replace(c, '_')
return s
class TensorBoardStaticSerializer(object):
"""Serialize all the routes from a TensorBoard server to static json."""
def __init__(self, connection, target_path):
self.connection = connection
EnsureDirectoryExists(os.path.join(target_path, 'data'))
self.path = target_path
def GetAndSave(self, url, save_suffix, unzip=False):
"""GET the given url. Serialize the result at clean path version of url."""
self.connection.request('GET',
'/data/' + url,
headers={'content-type': 'text/plain'})
response = self.connection.getresponse()
destination = self.path + '/data/' + Clean(url) + save_suffix
if response.status != 200:
raise IOError(url)
if unzip:
s = StringIO.StringIO(response.read())
content = gzip.GzipFile(fileobj=s).read()
else:
content = response.read()
with open(destination, 'w') as f:
f.write(content)
return content
def GetRouteAndSave(self, route, params=None):
"""GET given route and params. Serialize the result. Return as JSON."""
url = Url(route, params)
return json.loads(self.GetAndSave(url, DEFAULT_SUFFIX))
def Run(self):
"""Serialize everything from a TensorBoard backend."""
# get the runs object, which is an index for every tag.
runs = self.GetRouteAndSave('runs')
# collect sampled data.
self.GetRouteAndSave('scalars')
# now let's just download everything!
for run, tag_type_to_tags in six.iteritems(runs):
for tag_type, tags in six.iteritems(tag_type_to_tags):
try:
if tag_type == 'graph':
# in this case, tags is a bool which specifies if graph is present.
if tags:
url = Url('graph', {'run': run})
self.GetAndSave(url, GRAPH_SUFFIX, unzip=True)
elif tag_type == 'images':
for t in tags:
images = self.GetRouteAndSave('images', {'run': run, 'tag': t})
for im in images:
url = 'individualImage?' + im['query']
# pull down the images themselves.
self.GetAndSave(url, IMAGE_SUFFIX)
elif tag_type == 'audio':
for t in tags:
audio = self.GetRouteAndSave('audio', {'run': run, 'tag': t})
for snd in audio:
url = 'individualAudio?' + snd['query']
# pull down the audio clips themselves
self.GetAndSave(url, AUDIO_SUFFIX)
else:
for t in tags:
# Save this, whatever it is :)
self.GetRouteAndSave(tag_type, {'run': run, 'tag': t})
except: #IOError or TypeError as e:
PrintAndLog('Retrieval failed for %s/%s/%s' % (tag_type, run, tags),
tf.logging.WARN)
# PrintAndLog('Got Exception: %s' % e, tf.logging.WARN)
PrintAndLog('continuing...', tf.logging.WARN)
continue
def EnsureDirectoryExists(path):
if not os.path.exists(path):
os.makedirs(path)
def PrintAndLog(msg, lvl=tf.logging.INFO):
tf.logging.log(lvl, msg)
print(msg)
def main(unused_argv=None):
target = FLAGS.target
logdir = FLAGS.logdir
if not target or not logdir:
PrintAndLog('Both --target and --logdir are required.', tf.logging.ERROR)
return -1
if os.path.exists(target):
if FLAGS.overwrite:
if os.path.isdir(target):
shutil.rmtree(target)
else:
os.remove(target)
else:
PrintAndLog('Refusing to overwrite target %s without --overwrite' %
target, tf.logging.ERROR)
return -2
path_to_run = server.ParseEventFilesSpec(FLAGS.logdir)
PrintAndLog('About to load Multiplexer. This may take some time.')
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=server.TENSORBOARD_SIZE_GUIDANCE,
purge_orphaned_data=FLAGS.purge_orphaned_data)
server.ReloadMultiplexer(multiplexer, path_to_run)
PrintAndLog('Multiplexer load finished. Starting TensorBoard server.')
s = server.BuildServer(multiplexer, 'localhost', 0, logdir)
server_thread = threading.Thread(target=s.serve_forever)
server_thread.daemon = True
server_thread.start()
connection = http_client.HTTPConnection('localhost', s.server_address[1])
PrintAndLog('Server setup! Downloading data from the server.')
x = TensorBoardStaticSerializer(connection, target)
x.Run()
PrintAndLog('Done downloading data.')
connection.close()
s.shutdown()
s.server_close()
if __name__ == '__main__':
tf.app.run()
|
{
"content_hash": "909cae0d72029ef3ce042ada83d3027b",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 79,
"avg_line_length": 32.57286432160804,
"alnum_prop": 0.6541190990435051,
"repo_name": "wenwei202/terngrad",
"id": "fd9fd39486c07097ccb88408f78d163ee471d0fe",
"size": "7159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "terngrad/serialize_tensorboard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "45645"
},
{
"name": "Python",
"bytes": "862466"
},
{
"name": "Shell",
"bytes": "67404"
}
],
"symlink_target": ""
}
|
from importlib.metadata import version
from packaging.version import parse
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.extlinks",
"sphinx_autodoc_typehints",
"sphinx_tabs.tabs",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = "asphalt-web"
author = "Alex Grönholm"
copyright = "2022, " + author
v = parse(version(project))
version = v.base_version
release = v.public
language = None
exclude_patterns = ["_build"]
pygments_style = "sphinx"
highlight_language = "python3"
todo_include_todos = False
autodoc_inherit_docstrings = False
autodoc_default_options = {"show-inheritance": True}
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
htmlhelp_basename = project.replace("-", "") + "doc"
extlinks = {
"github": (
f"https://github.com/asphalt-framework/{project}/tree/{release}/%s",
None,
)
}
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"asphalt": ("https://asphalt.readthedocs.io/en/latest/", None),
}
|
{
"content_hash": "02868ba232dca51dd2f9e4fb50e5cae9",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 76,
"avg_line_length": 22.872340425531913,
"alnum_prop": 0.6716279069767442,
"repo_name": "asphalt-framework/asphalt-web",
"id": "89d3c242984701013458bc74e83e2ef4d86c570e",
"size": "1099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "50469"
}
],
"symlink_target": ""
}
|
'''
Copyright 2009, The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# script to highlight adb logcat output for console
# written by jeff sharkey, http://jsharkey.org/
# piping detection and popen() added by other android team members
# instructions
# just run `python coloredlogcat.py` instead of `adb logcat`
# screenshot
# developer.sinnerschrader-mobile.com/colored-logcat-reloaded/507/
# alternative here: https://github.com/JakeWharton/pidcat
import os, sys, re, StringIO
import fcntl, termios, struct
import time
from datetime import datetime
# unpack the current terminal width/height
data = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, '1234')
HEIGHT, WIDTH = struct.unpack('hh',data)
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
def timestamp():
now = time.time()
milliseconds = '%03d' % int((now - int(now)) * 1000)
return datetime.fromtimestamp(now).strftime('%H:%M:%S') + "." + milliseconds
def format(fg=None, bg=None, bright=False, bold=False, dim=False, reset=False):
# manually derived from http://en.wikipedia.org/wiki/ANSI_escape_code#Codes
codes = []
if reset: codes.append("0")
else:
if not fg is None: codes.append("3%d" % (fg))
if not bg is None:
if not bright: codes.append("4%d" % (bg))
else: codes.append("10%d" % (bg))
if bold: codes.append("1")
elif dim: codes.append("2")
else: codes.append("22")
return "\033[%sm" % (";".join(codes))
def indent_wrap(message, indent=0, width=80):
wrap_area = width - indent
messagebuf = StringIO.StringIO()
current = 0
while current < len(message):
next = min(current + wrap_area, len(message))
messagebuf.write(message[current:next])
if next < len(message):
messagebuf.write("\n%s" % (" " * indent))
current = next
return messagebuf.getvalue()
LAST_USED = [RED,GREEN,YELLOW,BLUE,MAGENTA,CYAN,WHITE]
KNOWN_TAGS = {
"dalvikvm": BLUE,
"Process": BLUE,
"ActivityManager": CYAN,
"ActivityThread": CYAN,
}
def allocate_color(tag):
# this will allocate a unique format for the given tag
# since we dont have very many colors, we always keep track of the LRU
if not tag in KNOWN_TAGS:
KNOWN_TAGS[tag] = LAST_USED[0]
color = KNOWN_TAGS[tag]
LAST_USED.remove(color)
LAST_USED.append(color)
return color
RULES = {
#re.compile(r"([\w\.@]+)=([\w\.@]+)"): r"%s\1%s=%s\2%s" % (format(fg=BLUE), format(fg=GREEN), format(fg=BLUE), format(reset=True)),
}
TAGTYPE_WIDTH = 3
TAG_WIDTH = 20
TIME_WIDTH = 11
PROCESS_WIDTH = 8 # 8 or -1
HEADER_SIZE = TAGTYPE_WIDTH + 1 + TIME_WIDTH + 1 + TAG_WIDTH + 1 + PROCESS_WIDTH + 1
TAGTYPES = {
"V": "%s%s%s%s " % (format(fg=WHITE, bg=BLACK), "V".center(TAGTYPE_WIDTH), format(reset=True), format(fg=WHITE)),
"D": "%s%s%s%s " % (format(fg=BLACK, bg=BLUE), "D".center(TAGTYPE_WIDTH), format(reset=True), format(fg=WHITE)),
"I": "%s%s%s%s " % (format(fg=BLACK, bg=GREEN), "I".center(TAGTYPE_WIDTH), format(reset=True), format(fg=WHITE)),
"W": "%s%s%s%s " % (format(fg=BLACK, bg=YELLOW), "W".center(TAGTYPE_WIDTH), format(reset=True), format(fg=YELLOW)),
"E": "%s%s%s%s " % (format(fg=BLACK, bg=RED), "E".center(TAGTYPE_WIDTH), format(reset=True), format(fg=RED)),
}
retag = re.compile("^([A-Z])/([^\(]+)\(([^\)]+)\): (.*)$")
# to pick up -d or -e
adb_args = ' '.join(sys.argv[1:])
# if someone is piping in to us, use stdin as input. if not, invoke adb logcat
if os.isatty(sys.stdin.fileno()):
input = os.popen("adb %s logcat" % adb_args)
else:
input = sys.stdin
while True:
try:
line = input.readline()
except KeyboardInterrupt:
break
match = retag.match(line)
if not match is None:
tagtype, tag, owner, message = match.groups()
linebuf = StringIO.StringIO()
# center process info
if PROCESS_WIDTH > 0:
owner = owner.strip().center(PROCESS_WIDTH)
linebuf.write("%s%s%s " % (format(fg=BLACK, bg=BLACK, bright=True), owner, format(reset=True)))
linebuf.write("%s%s%s" % (format(fg=WHITE), timestamp(), format(reset=True)))
# right-align tag title and allocate color if needed
tag = tag.strip()
color = allocate_color(tag)
tag = tag[-TAG_WIDTH:].rjust(TAG_WIDTH)
linebuf.write("%s%s %s" % (format(fg=color, dim=False), tag, format(reset=True)))
# write out tagtype colored edge
if not tagtype in TAGTYPES: break
linebuf.write(TAGTYPES[tagtype])
# insert line wrapping as needed
message = indent_wrap(message, HEADER_SIZE, WIDTH)
# format tag message using rules
for matcher in RULES:
replace = RULES[matcher]
message = matcher.sub(replace, message)
linebuf.write(message)
linebuf.write(format(reset=True))
line = linebuf.getvalue()
print line
if len(line) == 0: break
|
{
"content_hash": "7a1adbef146d383861e173dbf8d8a4d2",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 135,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.6339446242358864,
"repo_name": "markmckenna/dotfiles",
"id": "e435e5de32fce7f4b55f0a24b60d65df4c5975af",
"size": "5581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/coloredlogcat.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4590"
},
{
"name": "GDB",
"bytes": "29"
},
{
"name": "Makefile",
"bytes": "1450"
},
{
"name": "Objective-C",
"bytes": "1281"
},
{
"name": "Python",
"bytes": "18098"
},
{
"name": "Shell",
"bytes": "23950"
},
{
"name": "Vim script",
"bytes": "304123"
},
{
"name": "Visual Basic",
"bytes": "29004"
}
],
"symlink_target": ""
}
|
from .device import Device
from .pathpoints import BasePathpoint, pathpoint_from_functions
|
{
"content_hash": "927befbea6ea37e6901bb23e01d3153a",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 63,
"avg_line_length": 45,
"alnum_prop": 0.8555555555555555,
"repo_name": "smok-serwis/longshot-python",
"id": "a17af38258e9bb8fb4c1624c0ff2f07e1536bccd",
"size": "92",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "longshot/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15652"
}
],
"symlink_target": ""
}
|
"""
Cement core argument module.
"""
from ..core import backend, exc, interface, handler
Log = backend.minimal_logger(__name__)
def argument_validator(klass, obj):
"""Validates a handler implementation against the IArgument interface."""
members = [
'_setup',
'parse',
'parsed_args',
'add_argument',
]
interface.validate(IArgument, obj, members)
class IArgument(interface.Interface):
"""
This class defines the Argument Handler Interface. Classes that
implement this handler must provide the methods and attributes defined
below. Implementations do *not* subclass from interfaces.
Example:
.. code-block:: python
from cement.core import interface, arg
class MyArgumentHandler(arg.CementArgumentHandler):
class Meta:
interface = arg.IArgument
label = 'my_argument_handler'
"""
class IMeta:
label = 'argument'
validator = argument_validator
# Must be provided by the implementation
Meta = interface.Attribute('Handler Meta-data')
parsed_args = interface.Attribute('Parsed args object')
def _setup(app_obj):
"""
The _setup function is called during application initialization and
must 'setup' the handler object making it ready for the framework
or the application to make further calls to it.
Required Arguments:
app_obj
The application object.
Return: None
"""
def add_argument(self, *args, **kw):
"""
Add arguments for parsing. This should be -o/--option or positional.
Positional Arguments:
args
List of option arguments. Generally something like
['-h', '--help'].
Optional Arguments
dest
The destination name (var). Default: arg[0]'s string.
help
The help text for --help output (for that argument).
action
Must support: ['store', 'store_true', 'store_false',
'store_const']
const
The value stored if action == 'store_const'.
default
The default value.
Return: None
"""
def parse(self, arg_list):
"""
Parse the argument list (i.e. sys.argv). Can return any object as
long as it's members contain those of the added arguments. For
example, if adding a '-v/--version' option that stores to the dest of
'version', then the member must be callable as 'Object().version'.
Must also set self.parsed_args to what is being returned.
Required Arguments:
arg_list
A list of command line arguments.
Return: Callable
"""
class CementArgumentHandler(handler.CementBaseHandler):
"""
Base class that all Argument Handlers should sub-class from.
"""
class Meta:
label = None
interface = IArgument
def __init__(self, *args, **kw):
super(CementArgumentHandler, self).__init__(*args, **kw)
|
{
"content_hash": "4fefb6213c98764b042563e360fa5152",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 79,
"avg_line_length": 28.13821138211382,
"alnum_prop": 0.540306269864201,
"repo_name": "derks/cement",
"id": "87faa000d164f7112fe4caee5a8df5e014f16ae5",
"size": "3461",
"binary": false,
"copies": "1",
"ref": "refs/heads/portland",
"path": "cement/core/arg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""$_memeq, $_strlen, $_streq, $_regex"""
import gdb
import re
class _MemEq(gdb.Function):
"""$_memeq - compare bytes of memory
Usage:
$_memeq(a, b, len)
Returns:
True if len bytes at a and b compare equally.
"""
def __init__(self):
super(_MemEq, self).__init__("_memeq")
def invoke(self, a, b, length):
if length < 0:
raise ValueError("length must be non-negative")
if length == 0:
return True
# The argument(s) to vector are [low_bound,]high_bound.
byte_vector = gdb.lookup_type("char").vector(length - 1)
ptr_byte_vector = byte_vector.pointer()
a_ptr = a.reinterpret_cast(ptr_byte_vector)
b_ptr = b.reinterpret_cast(ptr_byte_vector)
return a_ptr.dereference() == b_ptr.dereference()
class _StrLen(gdb.Function):
"""$_strlen - compute string length
Usage:
$_strlen(a)
Returns:
Length of string a, assumed to be a string in the current language.
"""
def __init__(self):
super(_StrLen, self).__init__("_strlen")
def invoke(self, a):
s = a.string()
return len(s)
class _StrEq(gdb.Function):
"""$_streq - check string equality
Usage:
$_streq(a, b)
Returns:
True if a and b are identical strings in the current language.
Example (amd64-linux):
catch syscall open
cond $bpnum $_streq((char*) $rdi, "foo")
"""
def __init__(self):
super(_StrEq, self).__init__("_streq")
def invoke(self, a, b):
return a.string() == b.string()
class _RegEx(gdb.Function):
"""$_regex - check if a string matches a regular expression
Usage:
$_regex(string, regex)
Returns:
True if string str (in the current language) matches the
regular expression regex.
"""
def __init__(self):
super(_RegEx, self).__init__("_regex")
def invoke(self, string, regex):
s = string.string()
r = re.compile(regex.string())
return bool(r.match(s))
# GDB will import us automagically via gdb/__init__.py.
_MemEq()
_StrLen()
_StrEq()
_RegEx()
|
{
"content_hash": "7dee5d7c7aa0e9b8fb6ba7fbc62ee3f0",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 69,
"avg_line_length": 21.17391304347826,
"alnum_prop": 0.6319301848049281,
"repo_name": "trfiladelfo/tdk",
"id": "9e2ed79a449ab8d61367cbab0c66738d876a1c19",
"size": "2684",
"binary": false,
"copies": "122",
"ref": "refs/heads/master",
"path": "gcc-arm-none-eabi/arm-none-eabi/share/gdb/python/gdb/function/strfns.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "614531"
},
{
"name": "Batchfile",
"bytes": "101839"
},
{
"name": "C",
"bytes": "12540389"
},
{
"name": "C++",
"bytes": "13332391"
},
{
"name": "CSS",
"bytes": "140569"
},
{
"name": "HTML",
"bytes": "23954553"
},
{
"name": "Logos",
"bytes": "8877"
},
{
"name": "Makefile",
"bytes": "129672"
},
{
"name": "Perl",
"bytes": "9844"
},
{
"name": "Python",
"bytes": "180880"
},
{
"name": "Scheme",
"bytes": "3970"
},
{
"name": "Shell",
"bytes": "10777"
},
{
"name": "Tcl",
"bytes": "128365"
},
{
"name": "XC",
"bytes": "8384"
},
{
"name": "XS",
"bytes": "8334"
},
{
"name": "XSLT",
"bytes": "221100"
}
],
"symlink_target": ""
}
|
"""
Interface for setting up and creating a model in Tensorflow.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import tensorflow as tf
from tensorflow.python.framework import ops
# Local imports
import tf_data
import utils as digits
from utils import model_property
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO)
# Constants
SUMMARIZE_TOWER_STATS = False
# from
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/image/cifar10/cifar10_multi_gpu_train.py
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
with tf.name_scope('gradient_average'):
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grads_transformed = tf.concat(grads, 0)
grads_transformed = tf.reduce_mean(grads_transformed, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grads_transformed, v)
average_grads.append(grad_and_var)
return average_grads
class Model(object):
"""
Wrapper around the actual tensorflow workflow process.
This is structured in a way that the user should only care about
creating the model while using the DIGITS UI to select the
optimizer and other options.
This class is executed to start a tensorflow workflow.
"""
def __init__(self, stage, croplen, nclasses, optimization=None, momentum=None, reuse_variable=False):
self.stage = stage
self.croplen = croplen
self.nclasses = nclasses
self.dataloader = None
self.queue_coord = None
self.queue_threads = None
self._optimization = optimization
self._momentum = momentum
self.summaries = []
self.towers = []
self._train = None
self._reuse = reuse_variable
# Touch to initialize
# if optimization:
# self.learning_rate
# self.global_step
# self.optimizer
def create_dataloader(self, db_path):
self.dataloader = tf_data.LoaderFactory.set_source(db_path, is_inference=(self.stage == digits.STAGE_INF))
# @TODO(tzaman) communicate the dataloader summaries to our Model summary list
self.dataloader.stage = self.stage
self.dataloader.croplen = self.croplen
self.dataloader.nclasses = self.nclasses
def init_dataloader(self):
with tf.device('/cpu:0'):
with tf.name_scope(digits.GraphKeys.LOADER):
self.dataloader.create_input_pipeline()
def create_model(self, obj_UserModel, stage_scope, batch_x=None):
if batch_x is None:
self.init_dataloader()
batch_x = self.dataloader.batch_x
if self.stage != digits.STAGE_INF:
batch_y = self.dataloader.batch_y
else:
assert self.stage == digits.STAGE_INF
batch_x = batch_x
available_devices = digits.get_available_gpus()
if not available_devices:
available_devices.append('/cpu:0')
# available_devices = ['/gpu:0', '/gpu:1'] # DEVELOPMENT : virtual multi-gpu
# Split the batch over the batch dimension over the number of available gpu's
if len(available_devices) == 1:
batch_x_split = [batch_x]
if self.stage != digits.STAGE_INF: # Has no labels
batch_y_split = [batch_y]
else:
with tf.name_scope('parallelize'):
# Split them up
batch_x_split = tf.split(batch_x, len(available_devices), 0, name='split_batch')
if self.stage != digits.STAGE_INF: # Has no labels
batch_y_split = tf.split(batch_y, len(available_devices), 0, name='split_batch')
# Run the user model through the build_model function that should be filled in
grad_towers = []
for dev_i, dev_name in enumerate(available_devices):
with tf.device(dev_name):
current_scope = stage_scope if len(available_devices) == 1 else ('tower_%d' % dev_i)
with tf.name_scope(current_scope) as scope_tower:
if self.stage != digits.STAGE_INF:
tower_model = self.add_tower(obj_tower=obj_UserModel,
x=batch_x_split[dev_i],
y=batch_y_split[dev_i])
else:
tower_model = self.add_tower(obj_tower=obj_UserModel,
x=batch_x_split[dev_i],
y=None)
with tf.variable_scope(digits.GraphKeys.MODEL, reuse=dev_i > 0 or self._reuse):
tower_model.inference # touch to initialize
# Reuse the variables in this scope for the next tower/device
tf.get_variable_scope().reuse_variables()
if self.stage == digits.STAGE_INF:
# For inferencing we will only use the inference part of the graph
continue
with tf.name_scope(digits.GraphKeys.LOSS):
for loss in self.get_tower_losses(tower_model):
tf.add_to_collection(digits.GraphKeys.LOSSES, loss['loss'])
# Assemble all made within this scope so far. The user can add custom
# losses to the digits.GraphKeys.LOSSES collection
losses = tf.get_collection(digits.GraphKeys.LOSSES, scope=scope_tower)
losses += ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope=None)
tower_loss = tf.add_n(losses, name='loss')
self.summaries.append(tf.summary.scalar(tower_loss.op.name, tower_loss))
if self.stage == digits.STAGE_TRAIN:
grad_tower_losses = []
for loss in self.get_tower_losses(tower_model):
grad_tower_loss = self.optimizer.compute_gradients(loss['loss'], loss['vars'])
grad_tower_loss = tower_model.gradientUpdate(grad_tower_loss)
grad_tower_losses.append(grad_tower_loss)
grad_towers.append(grad_tower_losses)
# Assemble and average the gradients from all towers
if self.stage == digits.STAGE_TRAIN:
n_gpus = len(available_devices)
if n_gpus == 1:
grad_averages = grad_towers[0]
else:
with tf.device(available_devices[0]):
n_losses = len(grad_towers[0])
grad_averages = []
for loss in xrange(n_losses):
grad_averages.append(average_gradients([grad_towers[gpu][loss] for gpu in xrange(n_gpus)]))
apply_gradient_ops = []
for grad_avg in grad_averages:
apply_gradient_ops.append(self.optimizer.apply_gradients(grad_avg, global_step=self.global_step))
self._train = apply_gradient_ops
def start_queue_runners(self, sess):
logging.info('Starting queue runners (%s)', self.stage)
# Distinguish the queue runner collection (for easily obtaining them by collection key)
queue_runners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS, scope=self.stage+'.*')
for qr in queue_runners:
if self.stage in qr.name:
tf.add_to_collection(digits.GraphKeys.QUEUE_RUNNERS, qr)
self.queue_coord = tf.train.Coordinator()
self.queue_threads = tf.train.start_queue_runners(sess=sess, coord=self.queue_coord,
collection=digits.GraphKeys.QUEUE_RUNNERS)
logging.info('Queue runners started (%s)', self.stage)
def __del__(self):
# Destructor
if self.queue_coord:
# Close and terminate the queues
self.queue_coord.request_stop()
self.queue_coord.join(self.queue_threads)
def add_tower(self, obj_tower, x, y):
is_training = self.stage == digits.STAGE_TRAIN
is_inference = self.stage == digits.STAGE_INF
input_shape = self.dataloader.get_shape()
tower = obj_tower(x, y, input_shape, self.nclasses, is_training, is_inference)
self.towers.append(tower)
return tower
@model_property
def train(self):
return self._train
@model_property
def summary(self):
"""
Merge train summaries
"""
for t in self.towers:
self.summaries += t.summaries
if not len(self.summaries):
logging.error("No summaries defined. Please define at least one summary.")
exit(-1)
return tf.summary.merge(self.summaries)
@model_property
def global_step(self):
# Force global_step on the CPU, becaues the GPU's first step will end at 0 instead of 1.
with tf.device('/cpu:0'):
return tf.get_variable('global_step', [], initializer=tf.constant_initializer(0),
trainable=False)
@model_property
def learning_rate(self):
# @TODO(tzaman): the learning rate is a function of the global step, so we could
# define it entirely in tf ops, instead of a placeholder and feeding.
with tf.device('/cpu:0'):
lr = tf.placeholder(tf.float32, shape=[], name='learning_rate')
self.summaries.append(tf.summary.scalar('lr', lr))
return lr
@model_property
def optimizer(self):
logging.info("Optimizer:%s", self._optimization)
if self._optimization == 'sgd':
return tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
elif self._optimization == 'adadelta':
return tf.train.AdadeltaOptimizer(learning_rate=self.learning_rate)
elif self._optimization == 'adagrad':
return tf.train.AdagradOptimizer(learning_rate=self.learning_rate)
elif self._optimization == 'adagradda':
return tf.train.AdagradDAOptimizer(learning_rate=self.learning_rate,
global_step=self.global_step)
elif self._optimization == 'momentum':
return tf.train.MomentumOptimizer(learning_rate=self.learning_rate,
momentum=self._momentum)
elif self._optimization == 'adam':
return tf.train.AdamOptimizer(learning_rate=self.learning_rate)
elif self._optimization == 'ftrl':
return tf.train.FtrlOptimizer(learning_rate=self.learning_rate)
elif self._optimization == 'rmsprop':
return tf.train.RMSPropOptimizer(learning_rate=self.learning_rate,
momentum=self._momentum)
else:
logging.error("Invalid optimization flag %s", self._optimization)
exit(-1)
def get_tower_losses(self, tower):
"""
Return list of losses
If user-defined model returns only one loss then this is encapsulated into
the expected list of dicts structure
"""
if isinstance(tower.loss, list):
return tower.loss
else:
return [{'loss': tower.loss, 'vars': tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)}]
class Tower(object):
def __init__(self, x, y, input_shape, nclasses, is_training, is_inference):
self.input_shape = input_shape
self.nclasses = nclasses
self.is_training = is_training
self.is_inference = is_inference
self.summaries = []
self.x = x
self.y = y
self.train = None
def gradientUpdate(self, grad):
return grad
|
{
"content_hash": "66e3770f6a1cceff89fea38305120d33",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 115,
"avg_line_length": 42.848874598070736,
"alnum_prop": 0.5800690379708839,
"repo_name": "ethantang95/DIGITS",
"id": "a19df6309fa574f93139cb02c8c010f96e2d54ea",
"size": "13463",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "digits/tools/tensorflow/model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4386"
},
{
"name": "HTML",
"bytes": "2638345"
},
{
"name": "JavaScript",
"bytes": "53917"
},
{
"name": "Lua",
"bytes": "110602"
},
{
"name": "Makefile",
"bytes": "113"
},
{
"name": "Protocol Buffer",
"bytes": "1750"
},
{
"name": "Python",
"bytes": "1230584"
},
{
"name": "Shell",
"bytes": "13547"
}
],
"symlink_target": ""
}
|
import collections
import functools
import logging
import os
import re
import cloudstorage as gcs
import jinja2
import webapp2
import yaml
from google.appengine.api import urlfetch
from google.appengine.api import memcache
from webapp2_extras import sessions
import filters as jinja_filters
PROW_JOBS = yaml.load(open('prow_jobs.yaml'))
DEFAULT_JOBS = {
'kubernetes-jenkins/logs/': {
'ci-kubernetes-e2e-gce-etcd3',
'ci-kubernetes-e2e-gci-gce',
'ci-kubernetes-e2e-gci-gce-slow',
'ci-kubernetes-e2e-gci-gke',
'ci-kubernetes-e2e-gci-gke-slow',
'ci-kubernetes-kubemark-500-gce',
'ci-kubernetes-node-kubelet',
'ci-kubernetes-test-go',
'ci-kubernetes-verify-master',
'kubernetes-build',
'kubernetes-e2e-kops-aws',
},
'kubernetes-jenkins/pr-logs/directory/': {
j['name'] for j in PROW_JOBS['presubmits']['kubernetes/kubernetes'] if j.get('always_run')
},
}
# Maps github organizations/owners to GCS paths for presubmit results
PR_PREFIX = collections.OrderedDict([
('kubernetes', 'kubernetes-jenkins/pr-logs/pull'),
('google', 'kubernetes-jenkins/pr-logs/pull'), # for cadvisor
('istio', 'istio-prow/pull'),
])
PROW_INSTANCES = {
'istio': 'prow.istio.io',
'DEFAULT': 'prow.k8s.io',
}
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__) + '/templates'),
extensions=['jinja2.ext.autoescape', 'jinja2.ext.loopcontrols'],
trim_blocks=True,
autoescape=True)
JINJA_ENVIRONMENT.line_statement_prefix = '%'
jinja_filters.register(JINJA_ENVIRONMENT.filters)
class BaseHandler(webapp2.RequestHandler):
"""Base class for Handlers that render Jinja templates."""
def __init__(self, *args, **kwargs):
super(BaseHandler, self).__init__(*args, **kwargs)
# The default deadline of 5 seconds is too aggressive of a target for GCS
# directory listing operations.
urlfetch.set_default_fetch_deadline(60)
# This example code is from:
# http://webapp2.readthedocs.io/en/latest/api/webapp2_extras/sessions.html
def dispatch(self):
# pylint: disable=attribute-defined-outside-init
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
# Returns a session using the default cookie key.
return self.session_store.get_session()
def render(self, template, context):
"""Render a context dictionary using a given template."""
template = JINJA_ENVIRONMENT.get_template(template)
self.response.write(template.render(context))
class IndexHandler(BaseHandler):
"""Render the index."""
def get(self):
self.render("index.html", {'jobs': DEFAULT_JOBS})
def memcache_memoize(prefix, expires=60 * 60, neg_expires=60):
"""Decorate a function to memoize its results using memcache.
The function must take a single string as input, and return a pickleable
type.
Args:
prefix: A prefix for memcache keys to use for memoization.
expires: How long to memoized values, in seconds.
neg_expires: How long to memoize falsey values, in seconds
Returns:
A decorator closure to wrap the function.
"""
# setting the namespace based on the current version prevents different
# versions from sharing cache values -- meaning there's no need to worry
# about incompatible old key/value pairs
namespace = os.environ['CURRENT_VERSION_ID']
def wrapper(func):
@functools.wraps(func)
def wrapped(*args):
key = '%s%s' % (prefix, args)
data = memcache.get(key, namespace=namespace)
if data is not None:
return data
else:
data = func(*args)
try:
if data:
memcache.add(key, data, expires, namespace=namespace)
else:
memcache.add(key, data, neg_expires, namespace=namespace)
except ValueError:
logging.exception('unable to write to memcache')
return data
return wrapped
return wrapper
@memcache_memoize('gs-ls://', expires=60)
def gcs_ls(path):
"""Enumerate files in a GCS directory. Returns a list of FileStats."""
if path[-1] != '/':
path += '/'
return list(gcs.listbucket(path, delimiter='/'))
def pad_numbers(s):
"""Modify a string to make its numbers suitable for natural sorting."""
return re.sub(r'\d+', lambda m: m.group(0).rjust(16, '0'), s)
|
{
"content_hash": "8ea9c8181ae7f8b08156b96289de42ad",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 98,
"avg_line_length": 33.17567567567568,
"alnum_prop": 0.6411405295315682,
"repo_name": "piosz/test-infra",
"id": "3b5d27fbaba7dfce23d8170e4899f789ca8e21d5",
"size": "5498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gubernator/view_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9951"
},
{
"name": "Go",
"bytes": "1878996"
},
{
"name": "HTML",
"bytes": "54166"
},
{
"name": "JavaScript",
"bytes": "89991"
},
{
"name": "Makefile",
"bytes": "40229"
},
{
"name": "Nginx",
"bytes": "1751"
},
{
"name": "Protocol Buffer",
"bytes": "6319"
},
{
"name": "Python",
"bytes": "796416"
},
{
"name": "Roff",
"bytes": "13936"
},
{
"name": "Shell",
"bytes": "100564"
},
{
"name": "Smarty",
"bytes": "516"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import hashlib
import os
import shutil
import textwrap
import threading
from collections import defaultdict
from textwrap import dedent
from pants.backend.jvm.subsystems.jvm_tool_mixin import JvmToolMixin
from pants.backend.jvm.subsystems.shader import Shader
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.tasks.ivy_task_mixin import IvyResolveFingerprintStrategy, IvyTaskMixin
from pants.backend.jvm.tasks.jar_task import JarTask
from pants.base.exceptions import TaskError
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.target import Target
from pants.invalidation.cache_manager import VersionedTargetSet
from pants.ivy.ivy_subsystem import IvySubsystem
from pants.java import util
from pants.java.executor import Executor
from pants.util.dirutil import safe_mkdir_for
from pants.util.memo import memoized_property
class ShadedToolFingerprintStrategy(IvyResolveFingerprintStrategy):
def __init__(self, main, custom_rules=None):
# The bootstrapper uses no custom confs in its resolves.
super(ShadedToolFingerprintStrategy, self).__init__(confs=None)
self._main = main
self._custom_rules = custom_rules
def compute_fingerprint(self, target):
hasher = hashlib.sha1()
base_fingerprint = super(ShadedToolFingerprintStrategy, self).compute_fingerprint(target)
if base_fingerprint is None:
return None
hasher.update('version=2')
hasher.update(base_fingerprint)
# NB: this series of updates must always cover the same fields that populate `_tuple`'s slots
# to ensure proper invalidation.
hasher.update(self._main)
if self._custom_rules:
for rule in self._custom_rules:
hasher.update(rule.render())
return hasher.hexdigest()
def _tuple(self):
# NB: this tuple's slots - used for `==/hash()` - must be kept in agreement with the hashed
# fields in `compute_fingerprint` to ensure proper invalidation.
return self._main, tuple(self._custom_rules or ())
def __hash__(self):
return hash((type(self),) + self._tuple())
def __eq__(self, other):
return type(self) == type(other) and self._tuple() == other._tuple()
class BootstrapJvmTools(IvyTaskMixin, JarTask):
@classmethod
def product_types(cls):
return ['jvm_build_tools_classpath_callbacks']
@classmethod
def register_options(cls, register):
super(BootstrapJvmTools, cls).register_options(register)
# Must be registered with the shader- prefix, as JarTask already registers --jvm-options
# (indirectly, via NailgunTask).
register('--shader-jvm-options', type=list, metavar='<option>...',
help='Run the tool shader with these extra jvm options.')
@classmethod
def subsystem_dependencies(cls):
return super(BootstrapJvmTools, cls).subsystem_dependencies() + (IvySubsystem, Shader.Factory)
@classmethod
def prepare(cls, options, round_manager):
super(BootstrapJvmTools, cls).prepare(options, round_manager)
Shader.Factory.prepare_tools(round_manager)
class ToolResolveError(TaskError):
"""Indicates an error resolving a required JVM tool classpath."""
@classmethod
def _tool_resolve_error(cls, error, dep_spec, jvm_tool):
msg = dedent("""
Failed to resolve target for tool: {tool}. This target was obtained from
option {option} in scope {scope}. You probably need to add this target to your tools
BUILD file(s), usually located in BUILD.tools in the workspace root.
Exception {etype}: {error}
""".format(tool=dep_spec,
etype=type(error).__name__,
error=error,
scope=jvm_tool.scope,
option=jvm_tool.key))
return cls.ToolResolveError(msg)
@classmethod
def _alternate_target_roots(cls, options, address_mapper, build_graph):
processed = set()
for jvm_tool in JvmToolMixin.get_registered_tools():
dep_spec = jvm_tool.dep_spec(options)
dep_address = Address.parse(dep_spec)
# Some JVM tools are requested multiple times, we only need to handle them once.
if dep_address not in processed:
processed.add(dep_address)
try:
if build_graph.resolve_address(dep_address):
# The user has defined a tool classpath override - we let that stand.
continue
except AddressLookupError as e:
if jvm_tool.classpath is None:
raise cls._tool_resolve_error(e, dep_spec, jvm_tool)
else:
if not jvm_tool.is_default(options):
# The user specified a target spec for this jvm tool that doesn't actually exist.
# We want to error out here instead of just silently using the default option while
# appearing to respect their config.
raise cls.ToolResolveError(dedent("""
Failed to resolve target for tool: {tool}. This target was obtained from
option {option} in scope {scope}.
Make sure you didn't make a typo in the tool's address. You specified that the
tool should use the target found at "{tool}".
This target has a default classpath configured, so you can simply remove:
[{scope}]
{option}: {tool}
from pants.ini (or any other config file) to use the default tool.
The default classpath is: {default_classpath}
Note that tool target addresses in pants.ini should be specified *without* quotes.
""").strip().format(tool=dep_spec,
option=jvm_tool.key,
scope=jvm_tool.scope,
default_classpath=':'.join(map(str, jvm_tool.classpath or ()))))
if jvm_tool.classpath:
tool_classpath_target = JarLibrary(name=dep_address.target_name,
address=dep_address,
build_graph=build_graph,
jars=jvm_tool.classpath)
else:
# The tool classpath is empty by default, so we just inject a dummy target that
# ivy resolves as the empty list classpath. JarLibrary won't do since it requires
# one or more jars, so we just pick a target type ivy has no resolve work to do for.
tool_classpath_target = Target(name=dep_address.target_name,
address=dep_address,
build_graph=build_graph)
build_graph.inject_target(tool_classpath_target, synthetic=True)
# We use the trick of not returning alternate roots, but instead just filling the dep_spec
# holes with a JarLibrary built from a tool's default classpath JarDependency list if there is
# no over-riding targets present. This means we do modify the build_graph, but we at least do
# it at a time in the engine lifecycle cut out for handling that.
return None
def __init__(self, *args, **kwargs):
super(BootstrapJvmTools, self).__init__(*args, **kwargs)
self._tool_cache_path = os.path.join(self.workdir, 'tool_cache')
def execute(self):
registered_tools = JvmToolMixin.get_registered_tools()
if registered_tools:
# Map of scope -> (map of key -> callback).
callback_product_map = self.context.products.get_data('jvm_build_tools_classpath_callbacks',
init_func=lambda: defaultdict(dict))
# We leave a callback in the products map because we want these Ivy calls
# to be done lazily (they might never actually get executed) and we want
# to hit Task.invalidated (called in Task._ivy_resolve) on the instance of
# BootstrapJvmTools rather than the instance of whatever class requires
# the bootstrap tools. It would be awkward and possibly incorrect to call
# self.invalidated twice on a Task that does meaningful invalidation on its
# targets. -pl
for jvm_tool in registered_tools:
dep_spec = jvm_tool.dep_spec(self.context.options)
callback = self.cached_bootstrap_classpath_callback(dep_spec, jvm_tool)
callback_product_map[jvm_tool.scope][jvm_tool.key] = callback
def _resolve_tool_targets(self, dep_spec, jvm_tool):
try:
targets = list(self.context.resolve(dep_spec))
if not targets:
raise KeyError
return targets
except (KeyError, AddressLookupError) as e:
raise self._tool_resolve_error(e, dep_spec, jvm_tool)
def _check_underspecified_tools(self, jvm_tool, targets):
# NOTE: ScalaPlatform allows a user to specify a custom configuration. When this is
# done all of the targets must be defined by the user and defaults are set as None.
# If we catch a case of a scala-platform tool being bootstrapped and we have no user
# specified target we need to throw an exception for the user.
# It is possible for tests to insert synthetic tool targets which we honor here.
class ToolUnderspecified(Exception):
pass
# Bootstrapped tools are inserted as synthetic. If they exist on disk they are later
# updated as non synthetic targets. If it's a synthetic target make sure it has a rev.
synthetic_targets = [t.is_synthetic for t in targets]
empty_revs = [cp.rev is None for cp in jvm_tool.classpath or []]
if any(empty_revs) and any(synthetic_targets):
raise ToolUnderspecified(textwrap.dedent("""
Unable to bootstrap tool: '{}' because no rev was specified. This usually
means that the tool was not defined properly in your build files and no
default option was provided to use for bootstrap.
""".format(jvm_tool.key)))
def _bootstrap_classpath(self, jvm_tool, targets):
self._check_underspecified_tools(jvm_tool, targets)
workunit_name = 'bootstrap-{}'.format(jvm_tool.key)
return self.ivy_classpath(targets, silent=True, workunit_name=workunit_name)
@memoized_property
def shader(self):
return Shader.Factory.create(self.context)
def _bootstrap_shaded_jvm_tool(self, jvm_tool, targets):
fingerprint_strategy = ShadedToolFingerprintStrategy(jvm_tool.main,
custom_rules=jvm_tool.custom_rules)
with self.invalidated(targets,
# We're the only dependent in reality since we shade.
invalidate_dependents=False,
fingerprint_strategy=fingerprint_strategy) as invalidation_check:
# If there are no vts, then there are no resolvable targets, so we exit early with an empty
# classpath. This supports the optional tool classpath case.
if not invalidation_check.all_vts:
return []
tool_vts = self.tool_vts(invalidation_check)
jar_name = '{main}-{hash}.jar'.format(main=jvm_tool.main, hash=tool_vts.cache_key.hash)
shaded_jar = os.path.join(self._tool_cache_path, 'shaded_jars', jar_name)
if not invalidation_check.invalid_vts and os.path.exists(shaded_jar):
return [shaded_jar]
# Ensure we have a single binary jar we can shade.
binary_jar = os.path.join(self._tool_cache_path, 'binary_jars', jar_name)
safe_mkdir_for(binary_jar)
classpath = self._bootstrap_classpath(jvm_tool, targets)
if len(classpath) == 1:
shutil.copy(classpath[0], binary_jar)
else:
with self.open_jar(binary_jar) as jar:
for classpath_jar in classpath:
jar.writejar(classpath_jar)
jar.main(jvm_tool.main)
# Now shade the binary jar and return that single jar as the safe tool classpath.
safe_mkdir_for(shaded_jar)
with self.shader.binary_shader(shaded_jar,
jvm_tool.main,
binary_jar,
custom_rules=jvm_tool.custom_rules,
jvm_options=self.get_options().jvm_options) as shader:
try:
result = util.execute_runner(shader,
workunit_factory=self.context.new_workunit,
workunit_name='shade-{}'.format(jvm_tool.key))
if result != 0:
raise TaskError("Shading of tool '{key}' with main class {main} for {scope} failed "
"with exit code {result}, command run was:\n\t{cmd}"
.format(key=jvm_tool.key,
main=jvm_tool.main,
scope=jvm_tool.scope,
result=result,
cmd=shader.cmd))
except Executor.Error as e:
raise TaskError("Shading of tool '{key}' with main class {main} for {scope} failed "
"with: {exception}".format(key=jvm_tool.key,
main=jvm_tool.main,
scope=jvm_tool.scope,
exception=e))
if self.artifact_cache_writes_enabled():
self.update_artifact_cache([(tool_vts, [shaded_jar])])
return [shaded_jar]
def check_artifact_cache_for(self, invalidation_check):
tool_vts = self.tool_vts(invalidation_check)
return [tool_vts]
def tool_vts(self, invalidation_check):
# The monolithic shaded tool jar is a single output dependent on the entire target set, and is
# not divisible by target. So we can only cache it keyed by the entire target set.
return VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
def _bootstrap_jvm_tool(self, dep_spec, jvm_tool):
targets = self._resolve_tool_targets(dep_spec, jvm_tool)
if jvm_tool.main is None:
return self._bootstrap_classpath(jvm_tool, targets)
else:
return self._bootstrap_shaded_jvm_tool(jvm_tool, targets)
def cached_bootstrap_classpath_callback(self, dep_spec, jvm_tool):
cache = {}
cache_lock = threading.Lock()
def bootstrap_classpath():
with cache_lock:
if 'classpath' not in cache:
cache['classpath'] = self._bootstrap_jvm_tool(dep_spec, jvm_tool)
return cache['classpath']
return bootstrap_classpath
|
{
"content_hash": "73a9ad04fdbace38a2aa2542ee95ea6a",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 100,
"avg_line_length": 46.25705329153605,
"alnum_prop": 0.6368934670642451,
"repo_name": "peiyuwang/pants",
"id": "f9804b4c65a7224d2a546dbdcb8bac1a466ffa64",
"size": "14903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/jvm/tasks/bootstrap_jvm_tools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "78744"
},
{
"name": "Java",
"bytes": "463179"
},
{
"name": "JavaScript",
"bytes": "30784"
},
{
"name": "Protocol Buffer",
"bytes": "4749"
},
{
"name": "Python",
"bytes": "5586816"
},
{
"name": "Rust",
"bytes": "168825"
},
{
"name": "Scala",
"bytes": "79707"
},
{
"name": "Shell",
"bytes": "64292"
},
{
"name": "Thrift",
"bytes": "2183"
}
],
"symlink_target": ""
}
|
import constants.create_data_set_constants as const
import tensorflow as tf
class Cifar10Reader:
@staticmethod
def _convert_from_binary(
example_bytes):
"""
- Casts the label to tf.int32.
- Reshapes the image.
:param example_bytes: Example in bytes from the Cifar10 data set file.
:return: - Label : tf.int32;
- Image : tf.uint8 [height, width, depth].
"""
int32_label = tf.cast(
x=tf.strided_slice(
input_=example_bytes,
begin=[0],
end=[const.CIFAR10_LABEL_BYTES],
strides=[1]
),
dtype=tf.int32
)
int32_label.set_shape([1])
binary_image = tf.reshape(
tensor=tf.strided_slice(
input_=example_bytes,
begin=[const.CIFAR10_LABEL_BYTES],
end=[const.CIFAR10_EXAMPLE_BYTES],
strides=[1]
),
shape=[
const.CIFAR10_IMAGE_DEPTH,
const.CIFAR10_IMAGE_HEIGHT,
const.CIFAR10_IMAGE_WIDTH
]
)
uint8_image = tf.transpose(
a=binary_image,
perm=[
1, 2, 0
]
)
return int32_label, uint8_image
@staticmethod
def _read_example_from_binary_file(
filename_queue):
"""
- Reads an example from filename_queue.
:param filename_queue: QueueRunner
:return: - Label : tf.int32;
- Image : tf.uint8 [height, width, depth].
"""
reader = tf.FixedLengthRecordReader(
record_bytes=const.CIFAR10_EXAMPLE_BYTES,
header_bytes=0,
footer_bytes=0
)
_, value = reader.read(
queue=filename_queue
)
example_bytes = tf.decode_raw(
bytes=value,
out_type=tf.uint8
)
label, uint8_image = Cifar10Reader._convert_from_binary(
example_bytes=example_bytes
)
return label, uint8_image
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Batch methods
@staticmethod
def _get_example(
file_names: []):
"""
- Reads examples from the file_names files.
:param file_names: List with the path of the Cifar10 files.
:return: - Label : tf.int32;
- Image : tf.uint8 [height, width, depth].
"""
filename_queue = tf.train.string_input_producer(
string_tensor=file_names
)
label, image = Cifar10Reader._read_example_from_binary_file(
filename_queue=filename_queue
)
# TODO Remove start
reshaped_image = tf.cast(image, tf.float32)
height = 24
width = 24
resized_image = tf.image.resize_image_with_crop_or_pad(
reshaped_image,
width,
height
)
float_image = tf.image.per_image_standardization(resized_image)
float_image.set_shape([height, width, 3])
# return label, float_image
# TODO Remove end
return label, image
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Public methods
@staticmethod
def get_train_data_set_examples():
"""
:return: - Label : tf.int32;
- Image : tf.uint8 [height, width, depth].
"""
labels, images = Cifar10Reader._get_example(
file_names=const.CIFAR10_TRAIN_FILES_PATH
)
return labels, images
@staticmethod
def get_test_data_set_examples():
"""
:return: - Label : tf.int32;
- Image : tf.uint8 [height, width, depth].
"""
labels, images = Cifar10Reader._get_example(
file_names=[const.CIFAR10_TEST_FILE_PATH]
)
return labels, images
#########################################################################
|
{
"content_hash": "1ba5d3e3308a608a01fe9c1af56cddf9",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 78,
"avg_line_length": 25.76875,
"alnum_prop": 0.48071792384186274,
"repo_name": "dani-i/bachelor-project",
"id": "cd0d2e6555a96cf21f7f59252a8194f7a81d63e8",
"size": "4123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "file_experts/data_set/cifar10_reader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "566079"
}
],
"symlink_target": ""
}
|
"""
Volume driver for IBM FlashSystem storage systems with iSCSI protocol.
Limitations:
1. Cinder driver only works when open_access_enabled=off.
"""
import random
import threading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import flashsystem_common as fscommon
from cinder.volume.drivers.san import san
LOG = logging.getLogger(__name__)
flashsystem_iscsi_opts = [
cfg.IntOpt('flashsystem_iscsi_portid',
default=0,
help='Default iSCSI Port ID of FlashSystem. '
'(Default port is 0.)')
]
CONF = cfg.CONF
CONF.register_opts(flashsystem_iscsi_opts, group=conf.SHARED_CONF_GROUP)
@interface.volumedriver
class FlashSystemISCSIDriver(fscommon.FlashSystemDriver):
"""IBM FlashSystem iSCSI volume driver.
Version history:
.. code-block:: none
1.0.0 - Initial driver
1.0.1 - Code clean up
1.0.2 - Add lock into vdisk map/unmap, connection
initialize/terminate
1.0.3 - Initial driver for iSCSI
1.0.4 - Split Flashsystem driver into common and FC
1.0.5 - Report capability of volume multiattach
1.0.6 - Fix bug #1469581, add I/T mapping check in
terminate_connection
1.0.7 - Fix bug #1505477, add host name check in
_find_host_exhaustive for FC
1.0.8 - Fix bug #1572743, multi-attach attribute
should not be hardcoded, only in iSCSI
1.0.9 - Fix bug #1570574, Cleanup host resource
leaking, changes only in iSCSI
1.0.10 - Fix bug #1585085, add host name check in
_find_host_exhaustive for iSCSI
1.0.11 - Update driver to use ABC metaclasses
1.0.12 - Update driver to support Manage/Unmanage
existing volume
"""
VERSION = "1.0.12"
# ThirdPartySystems wiki page
CI_WIKI_NAME = "IBM_STORAGE_CI"
def __init__(self, *args, **kwargs):
super(FlashSystemISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(fscommon.flashsystem_opts)
self.configuration.append_config_values(flashsystem_iscsi_opts)
self.configuration.append_config_values(san.san_opts)
def _check_vdisk_params(self, params):
# Check that the requested protocol is enabled
if not params['protocol'] in self._protocol:
msg = (_("'%(prot)s' is invalid for "
"flashsystem_connection_protocol "
"in config file. valid value(s) are "
"%(enabled)s.")
% {'prot': params['protocol'],
'enabled': self._protocol})
raise exception.InvalidInput(reason=msg)
# Check if iscsi_ip is set when protocol is iSCSI
if params['protocol'] == 'iSCSI' and params['iscsi_ip'] == 'None':
msg = _("target_ip_address must be set in config file when "
"using protocol 'iSCSI'.")
raise exception.InvalidInput(reason=msg)
def _create_host(self, connector):
"""Create a new host on the storage system.
We create a host and associate it with the given connection
information.
"""
LOG.debug('enter: _create_host: host %s.', connector['host'])
rand_id = six.text_type(random.randint(0, 99999999)).zfill(8)
host_name = '%s-%s' % (self._connector_to_hostname_prefix(connector),
rand_id)
ports = []
if 'iSCSI' == self._protocol and 'initiator' in connector:
ports.append('-iscsiname %s' % connector['initiator'])
self._driver_assert(ports,
(_('_create_host: No connector ports.')))
port1 = ports.pop(0)
arg_name, arg_val = port1.split()
ssh_cmd = ['svctask', 'mkhost', '-force', arg_name, arg_val, '-name',
'"%s"' % host_name]
out, err = self._ssh(ssh_cmd)
self._assert_ssh_return('successfully created' in out,
'_create_host', ssh_cmd, out, err)
for port in ports:
arg_name, arg_val = port.split()
ssh_cmd = ['svctask', 'addhostport', '-force',
arg_name, arg_val, host_name]
out, err = self._ssh(ssh_cmd)
self._assert_ssh_return(
(not out.strip()),
'_create_host', ssh_cmd, out, err)
LOG.debug(
'leave: _create_host: host %(host)s - %(host_name)s.',
{'host': connector['host'], 'host_name': host_name})
return host_name
def _find_host_exhaustive(self, connector, hosts):
LOG.debug('enter: _find_host_exhaustive hosts: %s.', hosts)
hname = connector['host']
hnames = [ihost[0:ihost.rfind('-')] for ihost in hosts]
if hname in hnames:
host = hosts[hnames.index(hname)]
ssh_cmd = ['svcinfo', 'lshost', '-delim', '!', host]
out, err = self._ssh(ssh_cmd)
self._assert_ssh_return(
out.strip(),
'_find_host_exhaustive', ssh_cmd, out, err)
for attr_line in out.split('\n'):
attr_name, foo, attr_val = attr_line.partition('!')
if (attr_name == 'iscsi_name' and
'initiator' in connector and
attr_val == connector['initiator']):
LOG.debug(
'leave: _find_host_exhaustive connector: %s.',
connector)
return host
else:
LOG.warning('Host %(host)s was not found on backend storage.',
{'host': hname})
return None
def _get_vdisk_map_properties(
self, connector, lun_id, vdisk_name, vdisk_id, vdisk_params):
"""Get the map properties of vdisk."""
LOG.debug(
'enter: _get_vdisk_map_properties: vdisk '
'%(vdisk_name)s.', {'vdisk_name': vdisk_name})
preferred_node = '0'
IO_group = '0'
# Get preferred node and other nodes in I/O group
preferred_node_entry = None
io_group_nodes = []
for k, node in self._storage_nodes.items():
if vdisk_params['protocol'] != node['protocol']:
continue
if node['id'] == preferred_node:
preferred_node_entry = node
if node['IO_group'] == IO_group:
io_group_nodes.append(node)
if not io_group_nodes:
msg = (_('No node found in I/O group %(gid)s for volume %(vol)s.')
% {'gid': IO_group, 'vol': vdisk_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not preferred_node_entry:
# Get 1st node in I/O group
preferred_node_entry = io_group_nodes[0]
LOG.warning('_get_vdisk_map_properties: Did not find a '
'preferred node for vdisk %s.', vdisk_name)
properties = {
'target_discovered': False,
'target_lun': lun_id,
'volume_id': vdisk_id,
}
type_str = 'iscsi'
if preferred_node_entry['ipv4']:
ipaddr = preferred_node_entry['ipv4'][0]
else:
ipaddr = preferred_node_entry['ipv6'][0]
iscsi_port = self.configuration.target_port
properties['target_portal'] = '%s:%s' % (ipaddr, iscsi_port)
properties['target_iqn'] = preferred_node_entry['iscsi_name']
LOG.debug(
'leave: _get_vdisk_map_properties: vdisk '
'%(vdisk_name)s.', {'vdisk_name': vdisk_name})
return {'driver_volume_type': type_str, 'data': properties}
@utils.synchronized('flashsystem-init-conn', external=True)
def initialize_connection(self, volume, connector):
"""Perform work so that an iSCSI connection can be made.
To be able to create an iSCSI connection from a given host to a
volume, we must:
1. Translate the given iSCSI name to a host name
2. Create new host on the storage system if it does not yet exist
3. Map the volume to the host if it is not already done
4. Return the connection information for relevant nodes (in the
proper I/O group)
"""
LOG.debug(
'enter: initialize_connection: volume %(vol)s with '
'connector %(conn)s.', {'vol': volume, 'conn': connector})
vdisk_name = volume['name']
vdisk_id = volume['id']
vdisk_params = self._get_vdisk_params(volume['volume_type_id'])
self._wait_vdisk_copy_completed(vdisk_name)
self._driver_assert(
self._is_vdisk_defined(vdisk_name),
(_('vdisk %s is not defined.')
% vdisk_name))
lun_id = self._map_vdisk_to_host(vdisk_name, connector)
properties = {}
try:
properties = self._get_vdisk_map_properties(
connector, lun_id, vdisk_name, vdisk_id, vdisk_params)
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
self.terminate_connection(volume, connector)
LOG.error('Failed to collect return properties for '
'volume %(vol)s and connector %(conn)s.',
{'vol': volume, 'conn': connector})
LOG.debug(
'leave: initialize_connection:\n volume: %(vol)s\n connector '
'%(conn)s\n properties: %(prop)s.',
{'vol': volume,
'conn': connector,
'prop': properties})
return properties
@utils.synchronized('flashsystem-term-conn', external=True)
def terminate_connection(self, volume, connector, **kwargs):
"""Cleanup after connection has been terminated.
When we clean up a terminated connection between a given connector
and volume, we:
1. Translate the given connector to a host name
2. Remove the volume-to-host mapping if it exists
3. Delete the host if it has no more mappings (hosts are created
automatically by this driver when mappings are created)
"""
LOG.debug(
'enter: terminate_connection: volume %(vol)s with '
'connector %(conn)s.',
{'vol': volume, 'conn': connector})
vdisk_name = volume['name']
self._wait_vdisk_copy_completed(vdisk_name)
host_name = self._unmap_vdisk_from_host(vdisk_name, connector)
# checking if host_name none, if not then, check if the host has
# any mappings, if not the host gets deleted.
if host_name:
if not self._get_hostvdisk_mappings(host_name):
self._delete_host(host_name)
LOG.debug(
'leave: terminate_connection: volume %(vol)s with '
'connector %(conn)s.', {'vol': volume, 'conn': connector})
return {'driver_volume_type': 'iscsi'}
def _get_iscsi_ip_addrs(self):
"""get ip address of iSCSI interface."""
LOG.debug('enter: _get_iscsi_ip_addrs')
cmd = ['svcinfo', 'lsportip']
generator = self._port_conf_generator(cmd)
header = next(generator, None)
if not header:
return
for key in self._storage_nodes:
if self._storage_nodes[key]['config_node'] == 'yes':
node = self._storage_nodes[key]
break
if node is None:
msg = _('No config node found.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for port_data in generator:
try:
port_ipv4 = port_data['IP_address']
port_ipv6 = port_data['IP_address_6']
state = port_data['state']
speed = port_data['speed']
except KeyError:
self._handle_keyerror('lsportip', header)
if port_ipv4 == self.configuration.target_ip_address and (
port_data['id'] == (
six.text_type(
self.configuration.flashsystem_iscsi_portid))):
if state not in ('configured', 'online'):
msg = (_('State of node is wrong. Current state is %s.')
% state)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if state in ('configured', 'online') and speed != 'NONE':
if port_ipv4:
node['ipv4'].append(port_ipv4)
if port_ipv6:
node['ipv6'].append(port_ipv6)
break
if not (len(node['ipv4']) or len(node['ipv6'])):
msg = _('No ip address found.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('leave: _get_iscsi_ip_addrs')
def do_setup(self, ctxt):
"""Check that we have all configuration details from the storage."""
LOG.debug('enter: do_setup')
self._context = ctxt
# Get data of configured node
self._get_node_data()
# Get the iSCSI IP addresses of the FlashSystem nodes
self._get_iscsi_ip_addrs()
for k, node in self._storage_nodes.items():
if self.configuration.flashsystem_connection_protocol == 'iSCSI':
if (len(node['ipv4']) or len(node['ipv6']) and
len(node['iscsi_name'])):
node['protocol'] = 'iSCSI'
self._protocol = 'iSCSI'
# Set for vdisk synchronization
self._vdisk_copy_in_progress = set()
self._vdisk_copy_lock = threading.Lock()
self._check_lock_interval = 5
LOG.debug('leave: do_setup')
def _build_default_params(self):
protocol = self.configuration.flashsystem_connection_protocol
if protocol.lower() == 'iscsi':
protocol = 'iSCSI'
return {
'protocol': protocol,
'iscsi_ip': self.configuration.target_ip_address,
'iscsi_port': self.configuration.target_port,
'iscsi_ported': self.configuration.flashsystem_iscsi_portid,
}
def validate_connector(self, connector):
"""Check connector for enabled protocol."""
valid = False
if 'iSCSI' == self._protocol and 'initiator' in connector:
valid = True
if not valid:
LOG.error('The connector does not contain the '
'required information: initiator is missing')
raise exception.InvalidConnectorException(missing=(
'initiator'))
|
{
"content_hash": "ba68b35ab5580142f86b49955e0a2976",
"timestamp": "",
"source": "github",
"line_count": 398,
"max_line_length": 78,
"avg_line_length": 38.027638190954775,
"alnum_prop": 0.5571853320118929,
"repo_name": "Datera/cinder",
"id": "d11837adf66a3fcd58c13b66845e414a533021d9",
"size": "15763",
"binary": false,
"copies": "5",
"ref": "refs/heads/datera_queens_backport",
"path": "cinder/volume/drivers/ibm/flashsystem_iscsi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15242306"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
}
|
"""Kraken - objects.Constraints.ScaleConstraint module.
Classes:
ScaleConstraint - Scale Constraint.
"""
from constraint import Constraint
class ScaleConstraint(Constraint):
"""Scale Constraint."""
def __init__(self, name, metaData=None):
super(ScaleConstraint, self).__init__(name, metaData=metaData)
|
{
"content_hash": "2fec9c186fdc04ab861545ae6c80c3f7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 70,
"avg_line_length": 21.6,
"alnum_prop": 0.7160493827160493,
"repo_name": "oculusstorystudio/kraken",
"id": "e5db7cb92ef27a18a843e2e93cd565de37f419eb",
"size": "324",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop_OSS",
"path": "Python/kraken/core/objects/constraints/scale_constraint.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AMPL",
"bytes": "136"
},
{
"name": "Batchfile",
"bytes": "2584"
},
{
"name": "CSS",
"bytes": "21033"
},
{
"name": "MAXScript",
"bytes": "521"
},
{
"name": "Mathematica",
"bytes": "4442959"
},
{
"name": "Python",
"bytes": "2841362"
},
{
"name": "Shell",
"bytes": "2689"
}
],
"symlink_target": ""
}
|
import numpy as np
import pytest
from astropy.convolution.convolve import convolve, convolve_fft
from astropy.convolution.kernels import Gaussian2DKernel
from astropy.nddata import NDData
def test_basic_nddata():
arr = np.zeros((11, 11))
arr[5, 5] = 1
ndd = NDData(arr)
test_kernel = Gaussian2DKernel(1)
result = convolve(ndd, test_kernel)
x, y = np.mgrid[:11, :11]
expected = result[5, 5] * np.exp(-0.5 * ((x - 5)**2 + (y - 5)**2))
np.testing.assert_allclose(result, expected, atol=1e-6)
resultf = convolve_fft(ndd, test_kernel)
np.testing.assert_allclose(resultf, expected, atol=1e-6)
@pytest.mark.parametrize('convfunc',
[lambda *args: convolve(*args, nan_treatment='interpolate',
normalize_kernel=True),
lambda *args: convolve_fft(*args, nan_treatment='interpolate',
normalize_kernel=True)])
def test_masked_nddata(convfunc):
arr = np.zeros((11, 11))
arr[4, 5] = arr[6, 5] = arr[5, 4] = arr[5, 6] = 0.2
arr[5, 5] = 1.5
ndd_base = NDData(arr)
mask = arr < 0 # this is all False
mask[5, 5] = True
ndd_mask = NDData(arr, mask=mask)
arrnan = arr.copy()
arrnan[5, 5] = np.nan
ndd_nan = NDData(arrnan)
test_kernel = Gaussian2DKernel(1)
result_base = convfunc(ndd_base, test_kernel)
result_nan = convfunc(ndd_nan, test_kernel)
result_mask = convfunc(ndd_mask, test_kernel)
assert np.allclose(result_nan, result_mask)
assert not np.allclose(result_base, result_mask)
assert not np.allclose(result_base, result_nan)
# check to make sure the mask run doesn't talk back to the initial array
assert np.sum(np.isnan(ndd_base.data)) != np.sum(np.isnan(ndd_nan.data))
|
{
"content_hash": "450a956f9d1fd9b7726b05802cdfe1d4",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 88,
"avg_line_length": 32.892857142857146,
"alnum_prop": 0.6096634093376765,
"repo_name": "lpsinger/astropy",
"id": "6e2585ab26d6eb79d15dd1ebfda400f9c8222445",
"size": "1907",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "astropy/convolution/tests/test_convolve_nddata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040074"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78755"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12323563"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
import os
import sys, getopt
from pwd import getpwuid
import logging
class terminalColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def find_owner(filename):
# Para un determinado path, vamos a analizar todos los elementos que contiene
try:
return getpwuid(os.stat(filename).st_uid).pw_name
except:
#print 'ERROR {:>40}'.format(terminalColors.FAIL + 'Fallo al obtener el propietario de ' + filename + terminalColors.ENDC)
logging.debug('{:>40}'.format(terminalColors.FAIL + 'Fallo al obtener el propietario de ' + filename + terminalColors.ENDC))
def list_dir(path, user, ident):
# Para un determinado path, vamos a analizar todos los elementos que contiene
for member in os.listdir(path):
try:
owner = find_owner(path + '/' + member)
# Si es un directorio, tendremos que operar recursivamente con él...
if os.path.isdir(path + '/' + member):
print (' ' * ident) + '{:<49} {:>40}'.format((terminalColors.OKBLUE + member + terminalColors.ENDC), '')
if ( (path + member) not in ['/proc', '/sys', '/usr']):
if path != '/':
list_dir(path + '/' + member, user, ident+2);
else:
list_dir(path + member, user, ident+2);
else:
logging.info(terminalColors.OKBLUE + 'No analizamos ' + path + member + terminalColors.ENDC)
# Si no es un directorio, es un fichero y lo recopilaremos
else:
print (' ' * ident) + '{:<40} {:>40}'.format(member, owner)
'''
Hay dos opciones para volcarlo al fichero:
1- Que no venga parámetro user (no queremos los ficheros de un usuario en concreto, queremos los de todos)
ó
2- Que venga un user dado. En este caso, comprobaremos que el owner del fichero que estamos analizando coincide
con el usuario que queremos.
Evidentemente, si estamos buscando los de un usuario en concreto y no es el owner del fichero que analizamos, no lo volcamos.
'''
if user == '' or (user != '' and owner == user):
fo = open('files_' + owner + '.log', "a")
fo.write(path + '/' + member + '\n')
fo.close()
except (KeyboardInterrupt, SystemExit):
raise
except:
logging.debug('{:>40}'.format(terminalColors.FAIL + 'Fallo analizando ' + path + '/' + member + terminalColors.ENDC))
def main(argv):
path = '.'
user = ''
try:
opts, args = getopt.getopt(argv,"hp:u:",["path=","user="])
except getopt.GetoptError:
print 'list_files.py -p <path> -u <user>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'list_files.py -p <path> -u <user>'
sys.exit()
elif opt in ("-p", "--path"):
path = arg
elif opt in ("-u", "--user"):
user = arg
logging.basicConfig(filename='list_files.log', filemode='w', format='%(asctime)s - %(levelname)s >> %(message)s', level=logging.INFO)
logging.info(terminalColors.OKGREEN + 'Comienzo ejecución' + terminalColors.ENDC)
try:
list_dir(path,user, 0);
except (KeyboardInterrupt, SystemExit):
logging.warning(terminalColors.WARNING + 'Ejecución interrumpida' + terminalColors.ENDC)
sys.exit(2)
logging.info(terminalColors.OKGREEN + 'Fin de la ejecución' + terminalColors.ENDC)
sys.exit(0)
if __name__=='__main__':
os.system("clear");
os.system("rm files_*.log");
main(sys.argv[1:])
print '\n' * 5;
|
{
"content_hash": "abf5f9265e6e447b2344f8ab6f0066a3",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 134,
"avg_line_length": 33.37623762376238,
"alnum_prop": 0.6449124888757045,
"repo_name": "miguelfito/mikeTools",
"id": "6ae1a0d50bdf2c5fb70881fa48f4a49ac4c915c8",
"size": "3411",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "system/list_files.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10547"
}
],
"symlink_target": ""
}
|
'''Manager for predicates.
Created on Jun 15, 2013
@author: Cam Moore
'''
import sys
import inspect
from apps.managers.challenge_mgr.models import RoundSetting, GameInfo
from apps.widgets.smartgrid_library.models import LibraryEvent, LibraryAction
from apps.widgets.smartgrid_design.models import DesignerLevel, DesignerEvent, DesignerAction
# Used to build the unlock_conditions
(_AND, _OR, _NOT, _TRUE, _FALSE) = ('and', 'or', 'not', 'True', 'False')
def eval_predicates(predicates, user):
"""Returns the boolean evaluation result of the predicates against the user."""
ALLOW_DICT = {"True": True, "False": False, "user": user}
ALLOW_DICT.update(get_player_predicates())
ALLOW_DICT.update(get_challenge_predicates())
ALLOW_DICT.update(get_smartgrid_predicates())
for key in ALLOW_DICT:
if "%s(" % key in predicates:
predicates = predicates.replace("%s(" % key, "%s(user," % key)
return eval(predicates, {"__builtins__": None}, ALLOW_DICT)
def eval_play_tester_predicates(predicates, user, draft_slug):
"""Returns the boolean evaluation results of the tester predicates against the user."""
ALLOW_DICT = {"True": True, "False": False, "user": user}
ALLOW_DICT.update(get_smartgrid_tester_predicates())
for key in ALLOW_DICT:
if "%s(" % key in predicates:
predicates = predicates.replace("%s(" % key, "%s('%s', " % (key, draft_slug))
ALLOW_DICT.update(get_player_tester_predicates())
ALLOW_DICT.update(get_challenge_tester_predicates())
for key in ALLOW_DICT:
if "%s(" % key in predicates:
predicates = predicates.replace("%s(" % key, "%s(user," % key)
return eval(predicates, {"__builtins__": None}, ALLOW_DICT)
def get_action_slugs(draft):
"""Returns a list of all the slugs available in the given draft. This includes all the
LibraryAction slugs and any new action slugs in the draft."""
ret = get_library_action_slugs()
for action in DesignerAction.objects.filter(draft=draft):
if action.slug not in ret:
ret.append(action.slug)
return sorted(ret)
def get_action_types():
"""Returns a list of the possible action types."""
return ('activity', 'commitment', 'event')
def get_challenge_predicates():
"""Returns the challenge predicates as a dictionary whose keys are the names of the predicates
and the values are the predicate functions."""
from apps.managers.predicate_mgr.challenge_predicates import game_enabled, reached_round
return {
"game_enabled": game_enabled,
"reached_round": reached_round,
}
def reached_round_tester():
"""Tester predicate replacement for challenge_mgr.predicates.reached_round."""
return True
def get_challenge_tester_predicates():
"""Returns the tester challenge predicates."""
from apps.managers.predicate_mgr.challenge_tester_predicates import game_enabled, reached_round
return {
"game_enabled": game_enabled,
"reached_round": reached_round,
}
def get_defined_predicates():
"""Returns the predicates defined in Makahiki as a dictionary."""
ret = {}
ret.update(get_player_predicates())
ret.update(get_challenge_predicates())
ret.update(get_smartgrid_predicates())
return ret
def get_event_slugs(draft):
"""Returns a list of all the Event slugs available in the given draft."""
ret = get_library_event_slugs()
for event in DesignerEvent.objects.filter(draft=draft):
if event.slug not in ret:
ret.append(event.slug)
return ret
def get_game_names():
"""Returns a list of all the game names."""
ret = []
for info in GameInfo.objects.all():
if info.name not in ret:
ret.append(info.name)
return ret
def get_level_names(draft):
"""Returns a list of all the level names defined in the given draft."""
ret = []
for level in DesignerLevel.objects.filter(draft=draft):
if level.name not in ret:
ret.append(level.name)
return ret
def get_library_action_slugs():
"""Returns a list of the LibraryAction slugs."""
ret = []
for action in LibraryAction.objects.all():
if action.slug not in ret:
ret.append(action.slug)
return ret
def get_library_event_slugs():
"""Returns a list of all the LibraryEvent slugs."""
ret = []
for event in LibraryEvent.objects.all():
if event.slug not in ret:
ret.append(event.slug)
return ret
def get_player_predicates():
"""Returns the predicates associated with players as a dictionary whose keys are the names
of the predicates and values are the predicate functions."""
from apps.managers.predicate_mgr.player_predicates import has_points, is_admin, \
allocated_raffle_ticket, badge_awarded, posted_to_wall, set_profile_pic, daily_visit_count, \
changed_theme, daily_energy_goal_count, referring_count, team_member_point_percent
return {
"is_admin": is_admin,
"has_points": has_points,
"allocated_raffle_ticket": allocated_raffle_ticket,
"badge_awarded": badge_awarded,
"posted_to_wall": posted_to_wall,
"set_profile_pic": set_profile_pic,
"daily_visit_count": daily_visit_count,
"change_theme": changed_theme,
"changed_theme": changed_theme,
"daily_energy_goal_count": daily_energy_goal_count,
"referring_count": referring_count,
"team_member_point_percent": team_member_point_percent,
}
def get_player_tester_predicates():
"""Returns the tester predicates associated with players. This is the same
get_player_predicates()."""
return get_player_predicates()
def get_predicate_parameter_types(predicate_str):
"""Returns a list of the parameter types for the given predicate_str."""
preds = get_defined_predicates()
try:
return inspect.getargspec(preds[predicate_str]).args
except KeyError:
return []
def get_resources():
"""Returns a list of the possible resource choices."""
return ('energy', 'water', 'waste')
def get_round_names():
"""Returns a list of the defined round names."""
ret = []
for r in RoundSetting.objects.all():
if r.name not in ret:
ret.append(r.name)
return ret
def get_smartgrid_predicates(): # pylint: disable=R0914
"""Returns the SmartGrid predicates as a dictionary whose keys are the names of the predicates
and the values are the predicate functions."""
from apps.managers.predicate_mgr.smartgrid_predicates import approved_action, \
approved_all_of_level, approved_all_of_resource, approved_all_of_type, approved_some, \
approved_some_full_spectrum, approved_some_of_level, approved_some_of_resource, \
approved_some_of_type, completed_level, social_bonus_count, submitted_action, \
submitted_all_of_level, submitted_all_of_resource, submitted_all_of_type, submitted_level, \
submitted_some, submitted_some_full_spectrum, submitted_some_of_level, \
submitted_some_of_resource, submitted_some_of_type, unlock_on_date, unlock_on_event
return {
"approved_action": approved_action,
"approved_all_of_level": approved_all_of_level,
"approved_all_of_resource": approved_all_of_resource,
"approved_all_of_type": approved_all_of_type,
"approved_some": approved_some,
"approved_some_full_spectrum": approved_some_full_spectrum,
"approved_some_of_level": approved_some_of_level,
"approved_some_of_resource": approved_some_of_resource,
"approved_some_of_type": approved_some_of_type,
"completed_action": submitted_action,
"completed_level": completed_level,
"completed_some_of": submitted_some_of_type,
"completed_some_of_level": submitted_some_of_level,
"social_bonus_count": social_bonus_count,
"submitted_action": submitted_action,
"submitted_all_of_level": submitted_all_of_level,
"submitted_all_of_resource": submitted_all_of_resource,
"submitted_all_of_type": submitted_all_of_type,
"submitted_level": submitted_level,
"submitted_some": submitted_some,
"submitted_some_full_spectrum": submitted_some_full_spectrum,
"submitted_some_of_level": submitted_some_of_level,
"submitted_some_of_resource": submitted_some_of_resource,
"submitted_some_of_type": submitted_some_of_type,
"unlock_on_date": unlock_on_date,
"unlock_on_event": unlock_on_event,
} # pylint: enable=R0914
def get_smartgrid_tester_predicates(): # pylint: disable=R0914
"""Returns the tester smartgrid predicates."""
from apps.managers.predicate_mgr.smartgrid_tester_predicates import approved_action, \
approved_all_of_level, approved_all_of_resource, approved_all_of_type, approved_some, \
approved_some_full_spectrum, approved_some_of_level, approved_some_of_resource, \
approved_some_of_type, completed_level, social_bonus_count, submitted_action, \
submitted_all_of_level, submitted_all_of_resource, submitted_all_of_type, submitted_level, \
submitted_some, submitted_some_full_spectrum, submitted_some_of_level, \
submitted_some_of_resource, submitted_some_of_type, unlock_on_date, unlock_on_event
return {
"approved_action": approved_action,
"approved_all_of_level": approved_all_of_level,
"approved_all_of_resource": approved_all_of_resource,
"approved_all_of_type": approved_all_of_type,
"approved_some": approved_some,
"approved_some_full_spectrum": approved_some_full_spectrum,
"approved_some_of_level": approved_some_of_level,
"approved_some_of_resource": approved_some_of_resource,
"approved_some_of_type": approved_some_of_type,
"completed_action": submitted_action,
"completed_level": completed_level,
"completed_some_of": submitted_some_of_type,
"completed_some_of_level": submitted_some_of_level,
"social_bonus_count": social_bonus_count,
"submitted_action": submitted_action,
"submitted_all_of_level": submitted_all_of_level,
"submitted_all_of_resource": submitted_all_of_resource,
"submitted_all_of_type": submitted_all_of_type,
"submitted_level": submitted_level,
"submitted_some": submitted_some,
"submitted_some_full_spectrum": submitted_some_full_spectrum,
"submitted_some_of_level": submitted_some_of_level,
"submitted_some_of_resource": submitted_some_of_resource,
"submitted_some_of_type": submitted_some_of_type,
"unlock_on_date": unlock_on_date,
"unlock_on_event": unlock_on_event,
} # pylint: enable=R0914
def get_smartgrid_unlock_predicates():
"""Returns the suggested predicates for Smartgrid Action unlock conditions."""
from apps.managers.predicate_mgr.smartgrid_predicates import approved_action, \
submitted_action, unlock_on_date, unlock_on_event
from apps.managers.predicate_mgr.player_predicates import has_points
return {
"submitted_action": submitted_action,
"approved_action": approved_action,
"has_points": has_points,
"unlock_on_date": unlock_on_date,
"unlock_on_event": unlock_on_event,
}
def get_smartgrid_unlock_predicate_list():
"""Returns the suggested Smartgrid unlock condition predicate list."""
ret = []
ret.append('submitted_action')
ret.append('approved_action')
ret.append('has_points')
ret.append('unlock_on_date')
ret.append('unlock_on_event')
return ret
def is_action_slug_predicate(predicate_fn):
"""Returns true if the predicate_fn takes parameter that is an Action slug."""
return 'action_slug' in inspect.getargspec(predicate_fn).args
def is_action_type_predicate(predicate_fn):
"""Returns True if the predicate_fn takes an action_type parameter."""
return 'action_type' in inspect.getargspec(predicate_fn).args
def is_event_slug_predicate(predicate_fn):
"""Returns True if the predicated_fn takes a parameter that is an event_slug."""
return 'event_slug' in inspect.getargspec(predicate_fn).args
def is_game_name_predicate(predicate_fn):
"""Returns True if the predicate_fn takes a game_name parameter."""
return 'game_name' in inspect.getargspec(predicate_fn).args
def is_level_name_predicate(predicate_fn):
"""Returns True if the predicate_fn takes a level_name parameter."""
return 'level_name' in inspect.getargspec(predicate_fn).args
def is_predicate_name(name):
"""Returns True if the given name is a valid predicate function name."""
predicates = get_defined_predicates()
if name in predicates.keys():
return True
else:
return False
def is_resource_predicate(predicate_fn):
"""Returns True if the predicate_fn takes a resource parameter."""
return 'resource' in inspect.getargspec(predicate_fn).args
def is_round_name_predicate(predicate_fn):
"""Returns True if the predicate_fn takes a round_name parameter."""
return 'round_name' in inspect.getargspec(predicate_fn).args
def validate_form_predicates(predicates):
"""validate the predicates in a form. if error, raise the form validation error."""
from django import forms
from django.contrib.auth.models import User
# Pick a user and see if the conditions result is true or false.
user = User.objects.all()[0]
try:
result = eval_predicates(predicates, user)
# Check if the result type is a boolean
if type(result) != type(True):
raise forms.ValidationError("Expected boolean value but got %s" % type(result))
except Exception:
info = sys.exc_info()
if len(info) > 1:
raise forms.ValidationError("Received exception: %s:%s" % (sys.exc_info()[0],
sys.exc_info()[1]))
else:
raise forms.ValidationError("Received exception: %s" % sys.exc_info()[0])
def validate_predicates(predicates):
"""Validate the predicates string."""
from django.contrib.auth.models import User
error_msg = None
# Pick a user and see if the conditions result is true or false.
user = User.objects.all()[0]
try:
result = eval_predicates(predicates, user)
# Check if the result type is a boolean
if type(result) != type(True):
error_msg = "Expected boolean value but got %s" % type(result)
except Exception:
error_msg = "Received exception: %s" % sys.exc_info()[0]
return error_msg
|
{
"content_hash": "a521c58994c507fb2a240dd8b4e742a0",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 99,
"avg_line_length": 39.58575197889182,
"alnum_prop": 0.6596014130507232,
"repo_name": "yongwen/makahiki",
"id": "b7244c6d3990c0d19e88ef158816b0e536c1f9bc",
"size": "15003",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "makahiki/apps/managers/predicate_mgr/predicate_mgr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "107603"
},
{
"name": "HTML",
"bytes": "568630"
},
{
"name": "JavaScript",
"bytes": "244377"
},
{
"name": "Python",
"bytes": "1489909"
},
{
"name": "Shell",
"bytes": "20118"
}
],
"symlink_target": ""
}
|
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.models import NOT_PROVIDED
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s"
sql_alter_column_null = "MODIFY %(column)s %(type)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
# No 'CASCADE' which works as a no-op in MySQL but is undocumented
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s"
sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s"
sql_create_column_inline_fk = (
', ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) '
'REFERENCES %(to_table)s(%(to_column)s)'
)
sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s"
sql_delete_index = "DROP INDEX %(name)s ON %(table)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"
sql_create_index = 'CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s'
@property
def sql_delete_check(self):
if self.connection.mysql_is_mariadb:
# The name of the column check constraint is the same as the field
# name on MariaDB. Adding IF EXISTS clause prevents migrations
# crash. Constraint is removed during a "MODIFY" column statement.
return 'ALTER TABLE %(table)s DROP CONSTRAINT IF EXISTS %(name)s'
return 'ALTER TABLE %(table)s DROP CHECK %(name)s'
@property
def sql_rename_column(self):
# MariaDB >= 10.5.2 and MySQL >= 8.0.4 support an
# "ALTER TABLE ... RENAME COLUMN" statement.
if self.connection.mysql_is_mariadb:
if self.connection.mysql_version >= (10, 5, 2):
return super().sql_rename_column
elif self.connection.mysql_version >= (8, 0, 4):
return super().sql_rename_column
return 'ALTER TABLE %(table)s CHANGE %(old_column)s %(new_column)s %(type)s'
def quote_value(self, value):
self.connection.ensure_connection()
if isinstance(value, str):
value = value.replace('%', '%%')
# MySQLdb escapes to string, PyMySQL to bytes.
quoted = self.connection.connection.escape(value, self.connection.connection.encoders)
if isinstance(value, str) and isinstance(quoted, bytes):
quoted = quoted.decode()
return quoted
def _is_limited_data_type(self, field):
db_type = field.db_type(self.connection)
return db_type is not None and db_type.lower() in self.connection._limited_data_types
def skip_default(self, field):
if not self._supports_limited_data_type_defaults:
return self._is_limited_data_type(field)
return False
@property
def _supports_limited_data_type_defaults(self):
# MariaDB >= 10.2.1 and MySQL >= 8.0.13 supports defaults for BLOB
# and TEXT.
if self.connection.mysql_is_mariadb:
return self.connection.mysql_version >= (10, 2, 1)
return self.connection.mysql_version >= (8, 0, 13)
def _column_default_sql(self, field):
if (
not self.connection.mysql_is_mariadb and
self._supports_limited_data_type_defaults and
self._is_limited_data_type(field)
):
# MySQL supports defaults for BLOB and TEXT columns only if the
# default value is written as an expression i.e. in parentheses.
return '(%s)'
return super()._column_default_sql(field)
def add_field(self, model, field):
super().add_field(model, field)
# Simulate the effect of a one-off default.
# field.default may be unhashable, so a set isn't used for "in" check.
if self.skip_default(field) and field.default not in (None, NOT_PROVIDED):
effective_default = self.effective_default(field)
self.execute('UPDATE %(table)s SET %(column)s = %%s' % {
'table': self.quote_name(model._meta.db_table),
'column': self.quote_name(field.column),
}, [effective_default])
def _field_should_be_indexed(self, model, field):
create_index = super()._field_should_be_indexed(model, field)
storage = self.connection.introspection.get_storage_engine(
self.connection.cursor(), model._meta.db_table
)
# No need to create an index for ForeignKey fields except if
# db_constraint=False because the index from that constraint won't be
# created.
if (storage == "InnoDB" and
create_index and
field.get_internal_type() == 'ForeignKey' and
field.db_constraint):
return False
return not self._is_limited_data_type(field) and create_index
def _delete_composed_index(self, model, fields, *args):
"""
MySQL can remove an implicit FK index on a field when that field is
covered by another index like a unique_together. "covered" here means
that the more complex index starts like the simpler one.
http://bugs.mysql.com/bug.php?id=37910 / Django ticket #24757
We check here before removing the [unique|index]_together if we have to
recreate a FK index.
"""
first_field = model._meta.get_field(fields[0])
if first_field.get_internal_type() == 'ForeignKey':
constraint_names = self._constraint_names(model, [first_field.column], index=True)
if not constraint_names:
self.execute(self._create_index_sql(model, [first_field], suffix=""))
return super()._delete_composed_index(model, fields, *args)
def _set_field_new_type_null_status(self, field, new_type):
"""
Keep the null property of the old field. If it has changed, it will be
handled separately.
"""
if field.null:
new_type += " NULL"
else:
new_type += " NOT NULL"
return new_type
def _alter_column_type_sql(self, model, old_field, new_field, new_type):
new_type = self._set_field_new_type_null_status(old_field, new_type)
return super()._alter_column_type_sql(model, old_field, new_field, new_type)
def _rename_field_sql(self, table, old_field, new_field, new_type):
new_type = self._set_field_new_type_null_status(old_field, new_type)
return super()._rename_field_sql(table, old_field, new_field, new_type)
|
{
"content_hash": "bd1c624e7ec2cfc9649c2b8298777c80",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 94,
"avg_line_length": 44.91216216216216,
"alnum_prop": 0.6252444711900105,
"repo_name": "theo-l/django",
"id": "71b021f7c5140bd11276a06ccf8531ac05f9691d",
"size": "6647",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "django/db/backends/mysql/schema.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "54515"
},
{
"name": "HTML",
"bytes": "172728"
},
{
"name": "JavaScript",
"bytes": "247742"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11279991"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
DESCRIPTION = """
Runs all the examples for testing purposes and reports successes and failures
to stderr. An example is marked successful if the running thread does not
throw an exception, for threaded examples, such as plotting, one needs to
check the stderr messages as well.
"""
EPILOG = """
Example Usage:
When no examples fail:
$ ./all.py > out
SUCCESSFUL:
- beginner.basic
[...]
NO FAILED EXAMPLES
$
When examples fail:
$ ./all.py -w > out
Traceback (most recent call last):
File "./all.py", line 111, in run_examples
[...]
SUCCESSFUL:
- beginner.basic
[...]
FAILED:
- intermediate.mplot2D
[...]
$
Obviously, we want to achieve the first result.
"""
import imp
import optparse
import os
import sys
import traceback
# add local sympy to the module path
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..")
sympy_dir = os.path.normpath(sympy_dir)
sys.path.insert(0, sympy_dir)
import sympy
TERMINAL_EXAMPLES = [
"beginner.basic",
"beginner.differentiation",
"beginner.expansion",
"beginner.functions",
"beginner.limits_examples",
"beginner.precision",
"beginner.print_pretty",
"beginner.series",
"beginner.substitution",
"intermediate.coupled_cluster",
"intermediate.differential_equations",
"intermediate.infinite_1d_box",
"intermediate.partial_differential_eqs",
"intermediate.trees",
"intermediate.vandermonde",
"advanced.curvilinear_coordinates",
"advanced.dense_coding_example",
"advanced.fem",
"advanced.gibbs_phenomenon",
"advanced.grover_example",
"advanced.hydrogen",
"advanced.pidigits",
"advanced.qft",
"advanced.relativity",
]
WINDOWED_EXAMPLES = [
"beginner.plotting_nice_plot",
"intermediate.mplot2d",
"intermediate.mplot3d",
"intermediate.print_gtk",
"advanced.autowrap_integrators",
"advanced.autowrap_ufuncify",
"advanced.pyglet_plotting",
]
EXAMPLE_DIR = os.path.dirname(__file__)
def __import__(name, globals=None, locals=None, fromlist=None):
"""An alternative to the import function so that we can import
modules defined as strings.
This code was taken from: http://docs.python.org/lib/examples-imp.html
"""
# Fast path: see if the module has already been imported.
try:
return sys.modules[name]
except KeyError:
pass
# If any of the following calls raises an exception,
# there's a problem we can't handle -- let the caller handle it.
module_name = name.split('.')[-1]
module_path = os.path.join(EXAMPLE_DIR, *name.split('.')[:-1])
fp, pathname, description = imp.find_module(module_name, [module_path])
try:
return imp.load_module(module_name, fp, pathname, description)
finally:
# Since we may exit via an exception, close fp explicitly.
if fp:
fp.close()
def load_example_module(example):
"""Loads modules based upon the given package name"""
mod = __import__(example)
return mod
def run_examples(windowed=False, quiet=False, summary=True):
"""Run all examples in the list of modules.
Returns a boolean value indicating whether all the examples were
successful.
"""
successes = []
failures = []
examples = TERMINAL_EXAMPLES
if windowed:
examples += WINDOWED_EXAMPLES
if quiet:
from sympy.utilities.runtests import PyTestReporter
reporter = PyTestReporter()
reporter.write("Testing Examples\n")
reporter.write("-" * reporter.terminal_width)
else:
reporter = None
for example in examples:
if run_example(example, reporter=reporter):
successes.append(example)
else:
failures.append(example)
if summary:
show_summary(successes, failures, reporter=reporter)
return len(failures) == 0
def run_example(example, reporter=None):
"""Run a specific example.
Returns a boolean value indicating whether the example was successful.
"""
if reporter:
reporter.write(example)
else:
print("=" * 79)
print("Running: ", example)
try:
mod = load_example_module(example)
if reporter:
suppress_output(mod.main)
reporter.write("[PASS]", "Green", align="right")
else:
mod.main()
return True
except KeyboardInterrupt as e:
raise e
except:
if reporter:
reporter.write("[FAIL]", "Red", align="right")
traceback.print_exc()
return False
class DummyFile(object):
def write(self, x):
pass
def suppress_output(fn):
"""Suppresses the output of fn on sys.stdout."""
save_stdout = sys.stdout
try:
sys.stdout = DummyFile()
fn()
finally:
sys.stdout = save_stdout
def show_summary(successes, failures, reporter=None):
"""Shows a summary detailing which examples were successful and which failed."""
if reporter:
reporter.write("-" * reporter.terminal_width)
if failures:
reporter.write("FAILED:\n", "Red")
for example in failures:
reporter.write(" %s\n" % example)
else:
reporter.write("ALL EXAMPLES PASSED\n", "Green")
else:
if successes:
print("SUCCESSFUL: ", file=sys.stderr)
for example in successes:
print(" -", example, file=sys.stderr)
else:
print("NO SUCCESSFUL EXAMPLES", file=sys.stderr)
if failures:
print("FAILED: ", file=sys.stderr)
for example in failures:
print(" -", example, file=sys.stderr)
else:
print("NO FAILED EXAMPLES", file=sys.stderr)
def main(*args, **kws):
"""Main script runner"""
parser = optparse.OptionParser()
parser.add_option('-w', '--windowed', action="store_true", dest="windowed",
help="also run examples requiring windowed environment")
parser.add_option('-q', '--quiet', action="store_true", dest="quiet",
help="runs examples in 'quiet mode' suppressing example output and \
showing simple status messages.")
parser.add_option('--no-summary', action="store_true", dest="no_summary",
help="hides the summary at the end of testing the examples")
(options, _) = parser.parse_args()
return 0 if run_examples(windowed=options.windowed, quiet=options.quiet,
summary=not options.no_summary) else 1
if __name__ == "__main__":
sys.exit(main(*sys.argv[1:]))
|
{
"content_hash": "bf0282a4e240494628e57fb3ed411efa",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 84,
"avg_line_length": 28.00414937759336,
"alnum_prop": 0.6246851385390428,
"repo_name": "chaffra/sympy",
"id": "a228a3503ab1cf6a1f0258246035d42791875c42",
"size": "6771",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "examples/all.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "GCC Machine Description",
"bytes": "101"
},
{
"name": "Python",
"bytes": "15170894"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "3087"
},
{
"name": "XSLT",
"bytes": "366200"
}
],
"symlink_target": ""
}
|
import codecs
import os
import re
import sys
from setuptools import setup, find_packages, Extension
from distutils.errors import (CCompilerError, DistutilsExecError,
DistutilsPlatformError)
from distutils.command.build_ext import build_ext
from setuptools.command.test import test as TestCommand
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
USE_CYTHON = False
ext = '.pyx' if USE_CYTHON else '.c'
extensions = [Extension('aiohttp._multidict', ['aiohttp/_multidict' + ext]),
Extension('aiohttp._websocket', ['aiohttp/_websocket' + ext])]
if USE_CYTHON:
extensions = cythonize(extensions)
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except (DistutilsPlatformError, FileNotFoundError):
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError, DistutilsPlatformError, ValueError):
raise BuildFailed()
with codecs.open(os.path.join(os.path.abspath(os.path.dirname(
__file__)), 'aiohttp', '__init__.py'), 'r', 'latin1') as fp:
try:
version = re.findall(r"^__version__ = '([^']+)'\r?$",
fp.read(), re.M)[0]
except IndexError:
raise RuntimeError('Unable to determine version.')
install_requires = ['chardet']
if sys.version_info < (3, 4):
install_requires += ['asyncio', 'enum34']
def read(f):
return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
tests_require = install_requires + ['pytest', 'gunicorn']
args = dict(
name='aiohttp',
version=version,
description=('http client/server for asyncio'),
long_description='\n\n'.join((read('README.rst'), read('CHANGES.txt'))),
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP'],
author='Nikolay Kim',
author_email='fafhrd91@gmail.com',
url='https://github.com/KeepSafe/aiohttp/',
license='Apache 2',
packages=find_packages(),
install_requires=install_requires,
tests_require=tests_require,
include_package_data=True,
ext_modules=extensions,
cmdclass=dict(build_ext=ve_build_ext,
test=PyTest))
try:
setup(**args)
except BuildFailed:
print("************************************************************")
print("Cannot compile C accelerator module, use pure python version")
print("************************************************************")
del args['ext_modules']
del args['cmdclass']
setup(**args)
|
{
"content_hash": "d6305f563e78ddc138781e4be39eea65",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 88,
"avg_line_length": 29.341463414634145,
"alnum_prop": 0.6098642283180936,
"repo_name": "danielnelson/aiohttp",
"id": "924aeb3e3271aea0cb773459f4485512f95d5a11",
"size": "3609",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1359"
},
{
"name": "PowerShell",
"bytes": "3361"
},
{
"name": "Python",
"bytes": "911824"
}
],
"symlink_target": ""
}
|
""" Config for the overall application """
from django.apps.config import AppConfig
class DjCoreConfig(AppConfig):
name = label = 'dj_core'
|
{
"content_hash": "2ee6bb52364e0ebdfec532d3cee4e84b",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 42,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.726027397260274,
"repo_name": "ionata/dj-core",
"id": "217ffa0339c5c15f64fbdaa7bad131f0ed00100e",
"size": "146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dj_core/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "19038"
}
],
"symlink_target": ""
}
|
import os
import sys
if sys.platform == 'wn32':
pybabel = 'flask\\Scripts\\pybabel'
else:
pybabel = 'flask/bin/pybabel'
if len(sys.argv) != 2:
print "usage: tr_init <language-code>"
sys.exit(1)
os.system(pybabel + ' extract -F babel.cfg -k lazy_gettext -o messages.pot app')
os.system(pybabel + ' init -i messages.pot -d app/translations -l ' + sys.argv[1])
os.unlink('messages.pot')
|
{
"content_hash": "65ffa73e82a9fb4a816a669d894172c1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 82,
"avg_line_length": 33.25,
"alnum_prop": 0.6716791979949874,
"repo_name": "VagrantApe/flaskMicroblog",
"id": "9833d65ec0945cce0ae5bc7645b3d6f70645fa8e",
"size": "418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tr_init.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "11331"
},
{
"name": "JavaScript",
"bytes": "22141"
},
{
"name": "Python",
"bytes": "9308893"
},
{
"name": "Shell",
"bytes": "3927"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0006_auto_20160522_0727'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='tag_id',
new_name='tag',
),
]
|
{
"content_hash": "f15122e25f29a8162f056abdcfaeffea",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 43,
"avg_line_length": 19.22222222222222,
"alnum_prop": 0.5578034682080925,
"repo_name": "porimol/django-blog",
"id": "9956d1d45638e049243781d79b80ef62a7c941ee",
"size": "418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cms/migrations/0007_auto_20160522_0728.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10806"
},
{
"name": "Python",
"bytes": "20019"
}
],
"symlink_target": ""
}
|
__author__="Jaimiey Sears, updated by Alex Schendel and Alex Reinemann, 2018"
__copyright__="October 26, 2015"
__version__= 0.50
import queue
import threading
import socket
#from mpl_toolkits.mplot3d import Axes3D
#from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
from LidarCommands.utility import *
from LidarCommands.constants import *
import pickle
import time
#from time import sleep
##############################
# PROGRAM MAIN ENTRY POINT #
##############################
def scan(pub, scanDir, scanID):
pub.publish(scan=scanDir, serialID=scanID)
print("Published command to scan forward")
lt = LidarThreads(debug=False)
if lt == None:
return (None, None)
# make the first thread for reading LIDAR data
debugPrint("Starting", ROSTA)
th1_stop = threading.Event()
th1 = threading.Thread(target=lt.produce, args=(lt.dataQueue, th1_stop, pub, scanDir, scanID,), name="data_reader")
debugPrint("Done making thread 1", ROSTA)
# make the second thread to process the LIDAR data
th2_stop = threading.Event()
th2 = threading.Thread(target=lt.consume, args=(lt.dataQueue, th2_stop,), name="cartesian_converter")
debugPrint("done making thread 2", ROSTA)
# start both threads
th1.start()
th2.start()
# close the threads down
while th1.isAlive():
# th1_stop.set()
th1.join(1.0)
debugPrint("producer stopped", ROSTA)
while th2.isAlive():
th2_stop.set()
th2.join(1.0)
debugPrint("consumer stopped", ROSTA)
th1_stop.set()
th2_stop.set()
x = np.asarray(lt.processedDataArrays[0])
y = np.asarray(lt.processedDataArrays[1])
z = np.asarray(lt.processedDataArrays[2])
distance = np.asarray(lt.processedDataArrays[5])
#plt.pcolormesh([z, lt.processedDataArrays[5]]) # Figure out how this works! Also, why z and dist
#plt.colorbar() # need a colorbar to show the intensity scale
#plt.show()
return lt.scanID, z, distance
debugPrint("Done running threads", ROSTA)
debugPrint("exiting with code {}".format(lt.exit()), ROSTA)
debugPrint("queue size at exit: {}".format(lt.dataQueue.qsize()), ROSTA)
raise SystemExit
#####################
## UNIT TEST 1 END ##
#####################
##
# LidarThreads
# class controls threads for gathering LIDAR data
# **Version 0.10 the actual functions are simulated with time.sleep statements**
##
class LidarThreads():
def __init__(self, debug=False):
# don't forget: netsh interface ip set address "Local Area Connection" static 192.168.0.100
global nhokreadings
self.scanID = 0
# controls a number of debug statements which should only print sometimes
self.debug = debug
self.commandOutput = ""
self.dataOutput = ""
self.slitAngle = START_ANGLE
#command to get data from the lidar.
#MD=Distance measurement with continuous scanning
#Parameters:
#Position at the starting step, length 4, name:Start.
#Position at the ending step, length 4, name:End.Units unknown
#Number of group steps, length 2, name:Grouping Units unknown
#Number of scans to skip, length 1, name:Skips
#Number of measurement scans, length 2, name:Scans
#Documentation: https://en.manu-systems.com/HOK-UTM-30LX-EW_communication_protocol.pdf
strStartCommand = 'MD'+'0300'+'0700'+'00'+'0'+'00'+'\n'
strEndCommand = 'QT'+'\n'
self.StartCommand=bytes(strStartCommand, 'ascii')#convert to ascii encoded binary
self.EndCommand=bytes(strEndCommand, 'ascii')
# establish communication with the sensor.
# NOTE, special network settings are required to connect:
# IP: 192.168.1.11, Subnet Mask: 255.255.255.0 (default) Default Gateway: 192.168.0.1
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.settimeout(.1)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.connect(("192.168.0.10", 10940))
except socket.timeout as e:
debugPrint("I can't connect. Exiting.", SOCKET_MSG)
return None
# dataQueue is a Queue of strings
# each string representing a slice (scan)
self.dataQueue = queue.Queue()
self.processedDataArrays = []
##
# produce
#
# Description: gets data from the LIDAR unit, puts it into the queue
#
# Parameters:
# dataQueue - queue to submit data to
# stop_event - event to listen to for exit
##
def produce(self, dataQueue, stop_event, pub, scanDir, scanID):
counter = 0
angle = -1
start = time.time()
sID=scanID
for i in range (0,15):#number of slices to scan along y-axis (moving servo motor)
# wait for the Queue to empty
while dataQueue.qsize() > 0:
pass
angle = angle+1
# get the starting theta angle
self.slitAngle = START_ANGLE
# get data from the user
# print "\n>>> Rotate LiDAR to {} degrees".format(ang)
# inp = raw_input(">>> Press enter when ready to make a scan\n")
# if inp == "":
# send scan request to the LIDAR
self.socket.sendall(self.StartCommand)
sID=sID+1
#astr ='MD'+'0180'+'0900'+'00'+'0'+'01'+'\n'
#self.socket.sendall(astr.encode())
#sleep(0.1)
debugPrint("Scanning angle...\n", SOCKET_DATA)
# receive data from the LIDAR
for j in range(0, 100):#number of slices to scan along x-axis (resolution)?
try:
temp = self.socket.recv(3)#receive up to 24 bits of data
#debugPrint("Recv:\n" + temp.decode()[:8], SOCKET_DATA)
data = temp.decode().split("\n")#decode the data and split it by new line
data.reverse()
except socket.timeout as e:
debugPrint("waiting for data", SOCKET_MSG)
break
while data:
try:
str = data.pop()
# put data into our queue for the consumer to use
dataQueue.put((str, angle))
except queue.Full as e:
debugPrint("Data Queue is full.", SOCKET_MSG)
continue
counter += 1.0
end = time.time()
#dataQueue.put('end', angle)
debugPrint("Time difference: {}".format(end-start), ROSTA)
self.socket.sendall(self.EndCommand)
self.scanID = sID
##
# consume
#
# Description: consumes data from the queue
#
# Parameters:
# dataQueue - queue to consume from
# stop_event - the event to watch for quitting.
##
def consume(self, dataQueue, stop_event):
counter = 0
xLines = []
yLines = []
zLines = []
phiLines = []
thetaLines = []
distLines = []
timeLines = []
xLines.append([])
yLines.append([])
zLines.append([])
phiLines.append([])
thetaLines.append([])
distLines.append([])
dataSet = ""
currTime = None
emptied = False
i = 0
index = 0
start = time.time()
while not stop_event.is_set():
try:
# get some data from the queue, process it to cartesian
dataline, anglePhi = dataQueue.get(timeout=0.25)
emptied = False
if dataline == 'end':
xLines.append([])
yLines.append([])
zLines.append([])
phiLines.append([])
thetaLines.append([])
distLines.append([])
i += 1
continue
elif dataline == "":
if not dataSet == "":
for string in splitNparts(dataSet,64):
X, Y, Z, dist, phi, th = decode_new(string, anglePhi)
#self.slitAngle = lastAngle
xLines[i].append(X)
yLines[i].append(Y)
zLines[i].append(Z)
phiLines[i].append(phi)
thetaLines.append(th)
distLines.append(dist)
# timeLines = timeLines + currTime
#debugPrint(str(distLines), SOCKET_DATA)
dataSet = ""
continue
elif dataline == self.StartCommand:
counter = 0
else:
counter += 1
#debugPrint("Consumer: data= {}".format(dataline), SOCKET_DATA)
self.commandOutput += dataline + '\n'
# if counter == 4:
# currTime = [decodeShort(dataline[:-1])]
if counter >= 5:
dataSet = dataSet + dataline
except queue.Empty as e:
if not emptied:
debugPrint( "Data Queue is empty", SOCKET_MSG)
emptied = True
continue
self.processedDataArrays = (xLines, yLines, zLines, phiLines, thetaLines, distLines)
end = time.time()
debugPrint("Time difference: {}".format(end-start), ROSTA)
##
# exit
#
# Description: closes out the socket
# returns: 0 on success, -1 on failure
##
def exit(self):
if not self.socket is None:
self.socket.close()
return 0
else:
return -1
|
{
"content_hash": "3bde787993c8d64ad54a606532f4fe86",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 119,
"avg_line_length": 35.44210526315789,
"alnum_prop": 0.5358875358875359,
"repo_name": "MarsRobotics/Experiments",
"id": "b86e05e1a6c30d650963247eabf5ed8ca8d8a11e",
"size": "10102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LIDAR Data Extraction/LidarCommands/Currently Used/rpi/raspi_threads.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "38932"
},
{
"name": "MATLAB",
"bytes": "24773"
},
{
"name": "Python",
"bytes": "129832"
},
{
"name": "Shell",
"bytes": "601"
}
],
"symlink_target": ""
}
|
from netforce.model import Model, fields
class ContactCateg(Model):
_name = "contact.categ"
_string = "Contact Category"
_key = ["code"]
_name_field = "name"
_fields = {
"name": fields.Char("Category Name", required=True, search=True),
"code": fields.Char("Category Code", search=True),
"parent_id": fields.Many2One("contact.categ", "Parent", search=True),
"description": fields.Text("Description"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"full_name": fields.Char("Full Name", function="get_full_name"),
}
_order = "code"
def get_full_name(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
n = obj.name
p = obj.parent_id
while p:
n = p.name + " / " + n
p = p.parent_id
vals[obj.id] = n
return vals
ContactCateg.register()
|
{
"content_hash": "ee5f3ae18f7f627fdc5048997e08e053",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 77,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.5484210526315789,
"repo_name": "anastue/netforce",
"id": "502774508e58cc486d58d493cf71f2d24ba21a14",
"size": "2055",
"binary": false,
"copies": "4",
"ref": "refs/heads/stable-3.1",
"path": "netforce_contact/netforce_contact/models/contact_categ.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "73"
},
{
"name": "CSS",
"bytes": "407336"
},
{
"name": "Groff",
"bytes": "15858"
},
{
"name": "HTML",
"bytes": "477928"
},
{
"name": "Java",
"bytes": "11870"
},
{
"name": "JavaScript",
"bytes": "3711952"
},
{
"name": "Makefile",
"bytes": "353"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3455528"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
}
|
"""Test config flow."""
from unittest.mock import Mock, patch
from homeassistant.components.hassio.handler import HassioAPIError
from homeassistant.const import EVENT_HOMEASSISTANT_START
from homeassistant.setup import async_setup_component
async def test_hassio_discovery_startup(hass, aioclient_mock, hassio_client):
"""Test startup and discovery after event."""
aioclient_mock.get(
"http://127.0.0.1/discovery",
json={
"result": "ok",
"data": {
"discovery": [
{
"service": "mqtt",
"uuid": "test",
"addon": "mosquitto",
"config": {
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
},
}
]
},
},
)
aioclient_mock.get(
"http://127.0.0.1/addons/mosquitto/info",
json={"result": "ok", "data": {"name": "Mosquitto Test"}},
)
assert aioclient_mock.call_count == 0
with patch(
"homeassistant.components.mqtt.config_flow.FlowHandler.async_step_hassio",
return_value={"type": "abort"},
) as mock_mqtt:
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 2
assert mock_mqtt.called
mock_mqtt.assert_called_with(
{
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
"addon": "Mosquitto Test",
}
)
async def test_hassio_discovery_startup_done(hass, aioclient_mock, hassio_client):
"""Test startup and discovery with hass discovery."""
aioclient_mock.post(
"http://127.0.0.1/supervisor/options",
json={"result": "ok", "data": {}},
)
aioclient_mock.get(
"http://127.0.0.1/discovery",
json={
"result": "ok",
"data": {
"discovery": [
{
"service": "mqtt",
"uuid": "test",
"addon": "mosquitto",
"config": {
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
},
}
]
},
},
)
aioclient_mock.get(
"http://127.0.0.1/addons/mosquitto/info",
json={"result": "ok", "data": {"name": "Mosquitto Test"}},
)
with patch(
"homeassistant.components.hassio.HassIO.update_hass_api",
return_value={"result": "ok"},
), patch(
"homeassistant.components.hassio.HassIO.get_info",
Mock(side_effect=HassioAPIError()),
), patch(
"homeassistant.components.mqtt.config_flow.FlowHandler.async_step_hassio",
return_value={"type": "abort"},
) as mock_mqtt:
await hass.async_start()
await async_setup_component(hass, "hassio", {})
await hass.async_block_till_done()
assert aioclient_mock.call_count == 2
assert mock_mqtt.called
mock_mqtt.assert_called_with(
{
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
"addon": "Mosquitto Test",
}
)
async def test_hassio_discovery_webhook(hass, aioclient_mock, hassio_client):
"""Test discovery webhook."""
aioclient_mock.get(
"http://127.0.0.1/discovery/testuuid",
json={
"result": "ok",
"data": {
"service": "mqtt",
"uuid": "test",
"addon": "mosquitto",
"config": {
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
},
},
},
)
aioclient_mock.get(
"http://127.0.0.1/addons/mosquitto/info",
json={"result": "ok", "data": {"name": "Mosquitto Test"}},
)
with patch(
"homeassistant.components.mqtt.config_flow.FlowHandler.async_step_hassio",
return_value={"type": "abort"},
) as mock_mqtt:
resp = await hassio_client.post(
"/api/hassio_push/discovery/testuuid",
json={"addon": "mosquitto", "service": "mqtt", "uuid": "testuuid"},
)
await hass.async_block_till_done()
assert resp.status == 200
assert aioclient_mock.call_count == 2
assert mock_mqtt.called
mock_mqtt.assert_called_with(
{
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
"addon": "Mosquitto Test",
}
)
|
{
"content_hash": "2e0bbc6ba96cc40f7f1bcb0d0c8b5950",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 82,
"avg_line_length": 32.66863905325444,
"alnum_prop": 0.45716355732657127,
"repo_name": "partofthething/home-assistant",
"id": "c23ee40de6ef003d64483da9b5ad361c9d6b670e",
"size": "5521",
"binary": false,
"copies": "9",
"ref": "refs/heads/dev",
"path": "tests/components/hassio/test_discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
}
|
from django.db.models.base import ModelBase
class CheckConstraintMetaClass(ModelBase):
def __new__(cls, name, bases, attrs):
model = super(CheckConstraintMetaClass, cls).__new__(cls, name, bases, attrs)
for (constraint_name, constraint_obj) in model._meta.constraints:
constraint_obj.validate(model._meta)
return model
|
{
"content_hash": "9824d8909762109733e1eca2c8ee784f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 85,
"avg_line_length": 40.111111111111114,
"alnum_prop": 0.6869806094182825,
"repo_name": "theju/django-check-constraints",
"id": "5a35bc2e7f0133c5aee855f69dfb8274f176b1d2",
"size": "361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "check_constraints/metaclass.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "18026"
}
],
"symlink_target": ""
}
|
from nose.tools import * # flake8: noqa
from datetime import datetime
from api.base.settings.defaults import API_BASE
from api_tests import utils as test_utils
from tests.base import ApiTestCase
from tests.factories import ProjectFactory, AuthUserFactory, CommentFactory
class TestCommentReportsView(ApiTestCase):
def setUp(self):
super(TestCommentReportsView, self).setUp()
self.user = AuthUserFactory()
self.contributor = AuthUserFactory()
self.non_contributor = AuthUserFactory()
self.payload = {
'data': {
'type': 'comment_reports',
'attributes': {
'category': 'spam',
'message': 'delicious spam'
}
}
}
def _set_up_private_project_comment_reports(self):
self.private_project = ProjectFactory.build(is_public=False, creator=self.user)
self.private_project.add_contributor(contributor=self.contributor, save=True)
self.comment = CommentFactory.build(node=self.private_project, user=self.contributor)
self.comment.reports = self.comment.reports or {}
self.comment.reports[self.user._id] = {
'category': 'spam',
'text': 'This is spam',
'date': datetime.utcnow(),
'retracted': False,
}
self.comment.save()
self.private_url = '/{}comments/{}/reports/'.format(API_BASE, self.comment._id)
def _set_up_public_project_comment_reports(self, comment_level='public'):
self.public_project = ProjectFactory.build(is_public=True, creator=self.user, comment_level=comment_level)
self.public_project.add_contributor(contributor=self.contributor, save=True)
self.public_comment = CommentFactory.build(node=self.public_project, user=self.contributor)
self.public_comment.reports = self.public_comment.reports or {}
self.public_comment.reports[self.user._id] = {
'category': 'spam',
'text': 'This is spam',
'date': datetime.utcnow(),
'retracted': False,
}
self.public_comment.save()
self.public_url = '/{}comments/{}/reports/'.format(API_BASE, self.public_comment._id)
def test_private_node_logged_out_user_cannot_view_reports(self):
self._set_up_private_project_comment_reports()
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_private_node_logged_in_non_contributor_cannot_view_reports(self):
self._set_up_private_project_comment_reports()
res = self.app.get(self.private_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_only_reporting_user_can_view_reports(self):
self._set_up_private_project_comment_reports()
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert_equal(len(report_json), 1)
assert_in(self.user._id, report_ids)
def test_private_node_reported_user_does_not_see_report(self):
self._set_up_private_project_comment_reports()
res = self.app.get(self.private_url, auth=self.contributor.auth)
assert_equal(res.status_code, 200)
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert_equal(len(report_json), 0)
assert_not_in(self.contributor._id, report_ids)
def test_public_node_only_reporting_contributor_can_view_report(self):
self._set_up_public_project_comment_reports()
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert_equal(len(report_json), 1)
assert_in(self.user._id, report_ids)
def test_public_node_reported_user_does_not_see_report(self):
self._set_up_public_project_comment_reports()
res = self.app.get(self.public_url, auth=self.contributor.auth)
assert_equal(res.status_code, 200)
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert_equal(len(report_json), 0)
assert_not_in(self.contributor._id, report_ids)
def test_public_node_non_contributor_does_not_see_other_user_reports(self):
self._set_up_public_project_comment_reports()
res = self.app.get(self.public_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 200)
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert_equal(len(report_json), 0)
assert_not_in(self.non_contributor._id, report_ids)
def test_public_node_non_contributor_reporter_can_view_own_report(self):
self._set_up_public_project_comment_reports()
self.public_comment.reports[self.non_contributor._id] = {
'category': 'spam',
'text': 'This is spam',
'date': datetime.utcnow(),
'retracted': False,
}
self.public_comment.save()
res = self.app.get(self.public_url, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert_equal(len(report_json), 1)
assert_in(self.non_contributor._id, report_ids)
def test_public_node_logged_out_user_cannot_view_reports(self):
self._set_up_public_project_comment_reports()
res = self.app.get(self.public_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_private_comment_level_non_contributor_cannot_see_reports(self):
project = ProjectFactory(is_public=True, creator=self.user, comment_level='private')
comment = CommentFactory(node=project, user=self.user)
comment.reports = dict()
comment.reports[self.user._id] = {
'category': 'spam',
'text': 'This is spam',
'date': datetime.utcnow(),
'retracted': False,
}
comment.save()
url = '/{}comments/{}/reports/'.format(API_BASE, comment._id)
res = self.app.get(url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_report_comment_invalid_type(self):
self._set_up_private_project_comment_reports()
payload = {
'data': {
'type': 'Not a valid type.',
'attributes': {
'category': 'spam',
'message': 'delicious spam'
}
}
}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_report_comment_no_type(self):
self._set_up_private_project_comment_reports()
payload = {
'data': {
'type': '',
'attributes': {
'category': 'spam',
'message': 'delicious spam'
}
}
}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be blank.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/type')
def test_report_comment_invalid_spam_category(self):
self._set_up_private_project_comment_reports()
category = 'Not a valid category'
payload = {
'data': {
'type': 'comment_reports',
'attributes': {
'category': category,
'message': 'delicious spam'
}
}
}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], '\"' + category + '\"' + ' is not a valid choice.')
def test_report_comment_allow_blank_message(self):
self._set_up_private_project_comment_reports()
comment = CommentFactory(node=self.private_project, user=self.contributor)
url = '/{}comments/{}/reports/'.format(API_BASE, comment._id)
payload = {
'data': {
'type': 'comment_reports',
'attributes': {
'category': 'spam',
'message': ''
}
}
}
res = self.app.post_json_api(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], self.user._id)
assert_equal(res.json['data']['attributes']['message'], payload['data']['attributes']['message'])
def test_private_node_logged_out_user_cannot_report_comment(self):
self._set_up_private_project_comment_reports()
res = self.app.post_json_api(self.private_url, self.payload, expect_errors=True)
assert_equal(res.status_code, 401)
def test_private_node_logged_in_non_contributor_cannot_report_comment(self):
self._set_up_private_project_comment_reports()
res = self.app.post_json_api(self.private_url, self.payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_logged_in_contributor_can_report_comment(self):
self._set_up_private_project_comment_reports()
comment = CommentFactory(node=self.private_project, user=self.contributor)
url = '/{}comments/{}/reports/'.format(API_BASE, comment._id)
res = self.app.post_json_api(url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], self.user._id)
def test_user_cannot_report_own_comment(self):
self._set_up_private_project_comment_reports()
res = self.app.post_json_api(self.private_url, self.payload, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'You cannot report your own comment.')
def test_user_cannot_report_comment_twice(self):
self._set_up_private_project_comment_reports()
# User reports a comment
comment = CommentFactory(node=self.private_project, user=self.contributor)
url = '/{}comments/{}/reports/'.format(API_BASE, comment._id)
res = self.app.post_json_api(url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
# User cannot report the comment again
res = self.app.post_json_api(url, self.payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Comment already reported.')
def test_public_node_logged_out_user_cannot_report_comment(self):
self._set_up_public_project_comment_reports()
res = self.app.post_json_api(self.public_url, self.payload, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_contributor_can_report_comment(self):
self._set_up_public_project_comment_reports()
comment = CommentFactory(node=self.public_project, user=self.contributor)
url = '/{}comments/{}/reports/'.format(API_BASE, comment._id)
res = self.app.post_json_api(url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], self.user._id)
def test_public_node_non_contributor_can_report_comment(self):
""" Test that when a public project allows any osf user to
comment (comment_level == 'public), non-contributors
can also report comments.
"""
self._set_up_public_project_comment_reports()
res = self.app.post_json_api(self.public_url, self.payload, auth=self.non_contributor.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], self.non_contributor._id)
def test_public_node_private_comment_level_non_contributor_cannot_report_comment(self):
self._set_up_public_project_comment_reports(comment_level='private')
res = self.app.get(self.public_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
class TestFileCommentReportsView(ApiTestCase):
def setUp(self):
super(TestFileCommentReportsView, self).setUp()
self.user = AuthUserFactory()
self.contributor = AuthUserFactory()
self.non_contributor = AuthUserFactory()
self.payload = {
'data': {
'type': 'comment_reports',
'attributes': {
'category': 'spam',
'message': 'delicious spam'
}
}
}
def _set_up_private_project_file_comment_reports(self):
self.private_project = ProjectFactory.build(is_public=False, creator=self.user)
self.private_project.add_contributor(contributor=self.contributor, save=True)
self.file = test_utils.create_test_file(self.private_project, self.user)
self.comment = CommentFactory.build(node=self.private_project, target=self.file.get_guid(), user=self.contributor)
self.comment.reports = self.comment.reports or {}
self.comment.reports[self.user._id] = {
'category': 'spam',
'text': 'This is spam',
'date': datetime.utcnow(),
'retracted': False,
}
self.comment.save()
self.private_url = '/{}comments/{}/reports/'.format(API_BASE, self.comment._id)
def _set_up_public_project_file_comment_reports(self, comment_level='public'):
self.public_project = ProjectFactory.build(is_public=True, creator=self.user, comment_level=comment_level)
self.public_project.add_contributor(contributor=self.contributor, save=True)
self.public_file = test_utils.create_test_file(self.public_project, self.user)
self.public_comment = CommentFactory.build(node=self.public_project, target=self.public_file.get_guid(), user=self.contributor)
self.public_comment.reports = self.public_comment.reports or {}
self.public_comment.reports[self.user._id] = {
'category': 'spam',
'text': 'This is spam',
'date': datetime.utcnow(),
'retracted': False,
}
self.public_comment.save()
self.public_url = '/{}comments/{}/reports/'.format(API_BASE, self.public_comment._id)
def test_private_node_logged_out_user_cannot_view_file_comment_reports(self):
self._set_up_private_project_file_comment_reports()
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_private_node_logged_in_non_contributor_cannot_view_file_comment_reports(self):
self._set_up_private_project_file_comment_reports()
res = self.app.get(self.private_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_only_reporting_user_can_view_file_comment_reports(self):
self._set_up_private_project_file_comment_reports()
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert_equal(len(report_json), 1)
assert_in(self.user._id, report_ids)
def test_private_node_reported_user_does_not_see_file_comment_report(self):
self._set_up_private_project_file_comment_reports()
res = self.app.get(self.private_url, auth=self.contributor.auth)
assert_equal(res.status_code, 200)
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert_equal(len(report_json), 0)
assert_not_in(self.contributor._id, report_ids)
def test_public_node_only_reporting_contributor_can_view_file_comment_report(self):
self._set_up_public_project_file_comment_reports()
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert_equal(len(report_json), 1)
assert_in(self.user._id, report_ids)
def test_public_node_reported_user_does_not_see_file_comment_report(self):
self._set_up_public_project_file_comment_reports()
res = self.app.get(self.public_url, auth=self.contributor.auth)
assert_equal(res.status_code, 200)
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert_equal(len(report_json), 0)
assert_not_in(self.contributor._id, report_ids)
def test_public_node_non_contributor_does_not_see_other_user_reports(self):
self._set_up_public_project_file_comment_reports()
res = self.app.get(self.public_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 200)
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert_equal(len(report_json), 0)
assert_not_in(self.non_contributor._id, report_ids)
def test_public_node_non_contributor_reporter_can_view_own_file_comment_report(self):
self._set_up_public_project_file_comment_reports()
self.public_comment.reports[self.non_contributor._id] = {
'category': 'spam',
'text': 'This is spam',
'date': datetime.utcnow(),
'retracted': False,
}
self.public_comment.save()
res = self.app.get(self.public_url, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert_equal(len(report_json), 1)
assert_in(self.non_contributor._id, report_ids)
def test_public_node_logged_out_user_cannot_view_file_comment_reports(self):
self._set_up_public_project_file_comment_reports()
res = self.app.get(self.public_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_private_comment_level_non_contributor_does_not_see_report(self):
self._set_up_public_project_file_comment_reports(comment_level='private')
res = self.app.get(self.public_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_report_file_comment_invalid_type(self):
self._set_up_private_project_file_comment_reports()
payload = {
'data': {
'type': 'Not a valid type.',
'attributes': {
'category': 'spam',
'message': 'delicious spam'
}
}
}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_report_file_comment_no_type(self):
self._set_up_private_project_file_comment_reports()
payload = {
'data': {
'type': '',
'attributes': {
'category': 'spam',
'message': 'delicious spam'
}
}
}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be blank.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/type')
def test_report_file_comment_invalid_spam_category(self):
self._set_up_private_project_file_comment_reports()
category = 'Not a valid category'
payload = {
'data': {
'type': 'comment_reports',
'attributes': {
'category': category,
'message': 'delicious spam'
}
}
}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], '\"' + category + '\"' + ' is not a valid choice.')
def test_report_file_comment_allow_blank_message(self):
self._set_up_private_project_file_comment_reports()
comment = CommentFactory(node=self.private_project, user=self.contributor)
url = '/{}comments/{}/reports/'.format(API_BASE, comment._id)
payload = {
'data': {
'type': 'comment_reports',
'attributes': {
'category': 'spam',
'message': ''
}
}
}
res = self.app.post_json_api(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], self.user._id)
assert_equal(res.json['data']['attributes']['message'], payload['data']['attributes']['message'])
def test_private_node_logged_out_user_cannot_report_file_comment(self):
self._set_up_private_project_file_comment_reports()
res = self.app.post_json_api(self.private_url, self.payload, expect_errors=True)
assert_equal(res.status_code, 401)
def test_private_node_logged_in_non_contributor_cannot_report_file_comment(self):
self._set_up_private_project_file_comment_reports()
res = self.app.post_json_api(self.private_url, self.payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_logged_in_contributor_can_report_file_comment(self):
self._set_up_private_project_file_comment_reports()
comment = CommentFactory(node=self.private_project, user=self.contributor)
url = '/{}comments/{}/reports/'.format(API_BASE, comment._id)
res = self.app.post_json_api(url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], self.user._id)
def test_user_cannot_report_own_file_comment(self):
self._set_up_private_project_file_comment_reports()
res = self.app.post_json_api(self.private_url, self.payload, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'You cannot report your own comment.')
def test_user_cannot_report_file_comment_twice(self):
self._set_up_private_project_file_comment_reports()
# User reports a comment
comment = CommentFactory(node=self.private_project, user=self.contributor)
url = '/{}comments/{}/reports/'.format(API_BASE, comment._id)
res = self.app.post_json_api(url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
# User cannot report the comment again
res = self.app.post_json_api(url, self.payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Comment already reported.')
def test_public_node_logged_out_user_cannot_report_file_comment(self):
self._set_up_public_project_file_comment_reports()
res = self.app.post_json_api(self.public_url, self.payload, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_non_contributor_can_report_file_comment(self):
self._set_up_public_project_file_comment_reports()
res = self.app.post_json_api(self.public_url, self.payload, auth=self.non_contributor.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], self.non_contributor._id)
def test_public_node_contributor_can_report_file_comment(self):
self._set_up_public_project_file_comment_reports()
comment = CommentFactory(node=self.public_project, user=self.contributor)
url = '/{}comments/{}/reports/'.format(API_BASE, comment._id)
res = self.app.post_json_api(url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], self.user._id)
def test_public_node_private_comment_level_non_contributor_cannot_report_file_comment(self):
self._set_up_public_project_file_comment_reports(comment_level='private')
res = self.app.post_json_api(self.public_url, self.payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
|
{
"content_hash": "a29a6a7b155b0067998028f1bb1976d5",
"timestamp": "",
"source": "github",
"line_count": 533,
"max_line_length": 135,
"avg_line_length": 47.9906191369606,
"alnum_prop": 0.6255131162281559,
"repo_name": "brandonPurvis/osf.io",
"id": "de0e895588ae78237cbb403175740a0d47ed1f8c",
"size": "25579",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "api_tests/comments/views/test_comment_report_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "133911"
},
{
"name": "HTML",
"bytes": "68108"
},
{
"name": "JavaScript",
"bytes": "1394041"
},
{
"name": "Mako",
"bytes": "639052"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "4906600"
},
{
"name": "Shell",
"bytes": "2118"
}
],
"symlink_target": ""
}
|
import re
import tempfile
import codecs
import os
from warnings import warn
# Standard packages in Python 2/3 compatibility mode.
from .compatibility import StringIO
from .compatibility import defaultdict
from .compatibility import namedtuple
from .compatibility import force_unicode
from .compatibility import num_types, str_types
# Package imports.
from . import xmlwriter
from .format import Format
from .drawing import Drawing
from .shape import Shape
from .xmlwriter import XMLwriter
from .utility import xl_rowcol_to_cell
from .utility import xl_rowcol_to_cell_fast
from .utility import xl_cell_to_rowcol
from .utility import xl_col_to_name
from .utility import xl_range
from .utility import xl_color
from .utility import get_sparkline_style
from .utility import supported_datetime
from .utility import datetime_to_excel_datetime
from .utility import quote_sheetname
###############################################################################
#
# Decorator functions.
#
###############################################################################
def convert_cell_args(method):
"""
Decorator function to convert A1 notation in cell method calls
to the default row/col notation.
"""
def cell_wrapper(self, *args, **kwargs):
try:
# First arg is an int, default to row/col notation.
if len(args):
int(args[0])
except ValueError:
# First arg isn't an int, convert to A1 notation.
new_args = list(xl_cell_to_rowcol(args[0]))
new_args.extend(args[1:])
args = new_args
return method(self, *args, **kwargs)
return cell_wrapper
def convert_range_args(method):
"""
Decorator function to convert A1 notation in range method calls
to the default row/col notation.
"""
def cell_wrapper(self, *args, **kwargs):
try:
# First arg is an int, default to row/col notation.
if len(args):
int(args[0])
except ValueError:
# First arg isn't an int, convert to A1 notation.
if ':' in args[0]:
cell_1, cell_2 = args[0].split(':')
row_1, col_1 = xl_cell_to_rowcol(cell_1)
row_2, col_2 = xl_cell_to_rowcol(cell_2)
else:
row_1, col_1 = xl_cell_to_rowcol(args[0])
row_2, col_2 = row_1, col_1
new_args = [row_1, col_1, row_2, col_2]
new_args.extend(args[1:])
args = new_args
return method(self, *args, **kwargs)
return cell_wrapper
def convert_column_args(method):
"""
Decorator function to convert A1 notation in columns method calls
to the default row/col notation.
"""
def column_wrapper(self, *args, **kwargs):
try:
# First arg is an int, default to row/col notation.
if len(args):
int(args[0])
except ValueError:
# First arg isn't an int, convert to A1 notation.
cell_1, cell_2 = [col + '1' for col in args[0].split(':')]
_, col_1 = xl_cell_to_rowcol(cell_1)
_, col_2 = xl_cell_to_rowcol(cell_2)
new_args = [col_1, col_2]
new_args.extend(args[1:])
args = new_args
return method(self, *args, **kwargs)
return column_wrapper
###############################################################################
#
# Named tuples used for cell types.
#
###############################################################################
cell_string_tuple = namedtuple('String', 'string, format')
cell_number_tuple = namedtuple('Number', 'number, format')
cell_blank_tuple = namedtuple('Blank', 'format')
cell_boolean_tuple = namedtuple('Boolean', 'boolean, format')
cell_formula_tuple = namedtuple('Formula', 'formula, format, value')
cell_arformula_tuple = namedtuple('ArrayFormula',
'formula, format, value, range')
###############################################################################
#
# Worksheet Class definition.
#
###############################################################################
class Worksheet(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Worksheet file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self):
"""
Constructor.
"""
super(Worksheet, self).__init__()
self.name = None
self.index = None
self.str_table = None
self.palette = None
self.optimization = 0
self.tmpdir = None
self.is_chartsheet = False
self.ext_sheets = []
self.fileclosed = 0
self.excel_version = 2007
self.excel2003_style = False
self.xls_rowmax = 1048576
self.xls_colmax = 16384
self.xls_strmax = 32767
self.dim_rowmin = None
self.dim_rowmax = None
self.dim_colmin = None
self.dim_colmax = None
self.colinfo = {}
self.selections = []
self.hidden = 0
self.active = 0
self.tab_color = 0
self.panes = []
self.active_pane = 3
self.selected = 0
self.page_setup_changed = False
self.paper_size = 0
self.orientation = 1
self.print_options_changed = False
self.hcenter = 0
self.vcenter = 0
self.print_gridlines = 0
self.screen_gridlines = 1
self.print_headers = 0
self.header_footer_changed = False
self.header = ''
self.footer = ''
self.header_footer_aligns = True
self.header_footer_scales = True
self.header_images = []
self.footer_images = []
self.header_images_list = []
self.margin_left = 0.7
self.margin_right = 0.7
self.margin_top = 0.75
self.margin_bottom = 0.75
self.margin_header = 0.3
self.margin_footer = 0.3
self.repeat_row_range = ''
self.repeat_col_range = ''
self.print_area_range = ''
self.page_order = 0
self.black_white = 0
self.draft_quality = 0
self.print_comments = 0
self.page_start = 0
self.fit_page = 0
self.fit_width = 0
self.fit_height = 0
self.hbreaks = []
self.vbreaks = []
self.protect_options = {}
self.set_cols = {}
self.set_rows = defaultdict(dict)
self.zoom = 100
self.zoom_scale_normal = 1
self.print_scale = 100
self.is_right_to_left = 0
self.show_zeros = 1
self.leading_zeros = 0
self.outline_row_level = 0
self.outline_col_level = 0
self.outline_style = 0
self.outline_below = 1
self.outline_right = 1
self.outline_on = 1
self.outline_changed = False
self.original_row_height = 15
self.default_row_height = 15
self.default_row_pixels = 20
self.default_col_pixels = 64
self.default_row_zeroed = 0
self.names = {}
self.write_match = []
self.table = defaultdict(dict)
self.merge = []
self.row_spans = {}
self.has_vml = False
self.has_header_vml = False
self.has_comments = False
self.comments = defaultdict(dict)
self.comments_list = []
self.comments_author = ''
self.comments_visible = 0
self.vml_shape_id = 1024
self.buttons_list = []
self.vml_header_id = 0
self.autofilter_area = ''
self.autofilter_ref = None
self.filter_range = []
self.filter_on = 0
self.filter_range = []
self.filter_cols = {}
self.filter_type = {}
self.col_sizes = {}
self.row_sizes = {}
self.col_formats = {}
self.col_size_changed = False
self.row_size_changed = False
self.last_shape_id = 1
self.rel_count = 0
self.hlink_count = 0
self.hlink_refs = []
self.external_hyper_links = []
self.external_drawing_links = []
self.external_comment_links = []
self.external_vml_links = []
self.external_table_links = []
self.drawing_links = []
self.vml_drawing_links = []
self.charts = []
self.images = []
self.tables = []
self.sparklines = []
self.shapes = []
self.shape_hash = {}
self.drawing = 0
self.rstring = ''
self.previous_row = 0
self.validations = []
self.cond_formats = {}
self.dxf_priority = 1
self.is_chartsheet = 0
self.page_view = 0
self.vba_codename = None
self.date_1904 = False
self.hyperlinks = defaultdict(dict)
self.strings_to_numbers = False
self.strings_to_urls = True
self.nan_inf_to_errors = False
self.strings_to_formulas = True
self.default_date_format = None
self.default_url_format = None
self.row_data_filename = None
self.row_data_fh = None
self.worksheet_meta = None
self.vml_data_id = None
self.vml_shape_id = None
self.row_data_filename = None
self.row_data_fh = None
self.row_data_fh_closed = False
self.vertical_dpi = 0
self.horizontal_dpi = 0
@convert_cell_args
def write(self, row, col, *args):
"""
Write data to a worksheet cell by calling the appropriate write_*()
method based on the type of data being passed.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
*args: Args to pass to sub functions.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
other: Return value of called method.
"""
# Check the number of args passed.
if not len(args):
raise TypeError("write() takes at least 4 arguments (3 given)")
# The first arg should be the token for all write calls.
token = args[0]
# Write None as a blank cell.
if token is None:
return self.write_blank(row, col, *args)
# Write boolean types.
if isinstance(token, bool):
return self.write_boolean(row, col, *args)
# Write datetime objects.
if supported_datetime(token):
return self.write_datetime(row, col, *args)
# Write number types.
if isinstance(token, num_types):
return self.write_number(row, col, *args)
# Write string types.
if isinstance(token, str_types):
# Map the data to the appropriate write_*() method.
if token == '':
return self.write_blank(row, col, *args)
elif self.strings_to_formulas and token.startswith('='):
return self.write_formula(row, col, *args)
elif self.strings_to_urls and re.match('(ftp|http)s?://', token):
return self.write_url(row, col, *args)
elif self.strings_to_urls and re.match('mailto:', token):
return self.write_url(row, col, *args)
elif self.strings_to_urls and re.match('(in|ex)ternal:', token):
return self.write_url(row, col, *args)
elif self.strings_to_numbers:
try:
f = float(token)
if (self.nan_inf_to_errors or
(not self._isnan(f) and not self._isinf(f))):
return self.write_number(row, col, f, *args[1:])
except ValueError:
# Not a number, write as a string.
pass
return self.write_string(row, col, *args)
else:
# We have a plain string.
return self.write_string(row, col, *args)
# We haven't matched a supported type. Try float.
try:
f = float(token)
return self.write_number(row, col, f, *args[1:])
except ValueError:
pass
except TypeError:
raise TypeError("Unsupported type %s in write()" % type(token))
# Finally try string.
try:
str(token)
return self.write_string(row, col, *args)
except ValueError:
raise TypeError("Unsupported type %s in write()" % type(token))
@convert_cell_args
def write_string(self, row, col, string, cell_format=None):
"""
Write a string to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
string: Cell data. Str.
format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String truncated to 32k characters.
"""
str_error = 0
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Check that the string is < 32767 chars.
if len(string) > self.xls_strmax:
string = string[:self.xls_strmax]
str_error = -2
# Write a shared string or an in-line string in optimization mode.
if self.optimization == 0:
string_index = self.str_table._get_shared_string_index(string)
else:
string_index = string
# Write previous row if in in-line string optimization mode.
if self.optimization and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_string_tuple(string_index, cell_format)
return str_error
@convert_cell_args
def write_number(self, row, col, number, cell_format=None):
"""
Write a number to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
number: Cell data. Int or float.
cell_format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
if self._isnan(number) or self._isinf(number):
if self.nan_inf_to_errors:
if self._isnan(number):
return self.write_formula(row, col, '#NUM!', cell_format,
'#NUM!')
elif self._isinf(number):
return self.write_formula(row, col, '1/0', cell_format,
'#DIV/0!')
else:
raise TypeError(
"NAN/INF not supported in write_number() "
"without 'nan_inf_to_errors' Workbook() option")
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string optimization mode.
if self.optimization and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_number_tuple(number, cell_format)
return 0
@convert_cell_args
def write_blank(self, row, col, blank, cell_format=None):
"""
Write a blank cell with formatting to a worksheet cell. The blank
token is ignored and the format only is written to the cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
blank: Any value. It is ignored.
cell_format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Don't write a blank cell unless it has a format.
if cell_format is None:
return 0
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string optimization mode.
if self.optimization and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_blank_tuple(cell_format)
return 0
@convert_cell_args
def write_formula(self, row, col, formula, cell_format=None, value=0):
"""
Write a formula to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
formula: Cell formula.
cell_format: An optional cell Format object.
value: An optional value for the formula. Default is 0.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Hand off array formulas.
if formula.startswith('{') and formula.endswith('}'):
return self.write_array_formula(row, col, row, col, formula,
cell_format, value)
# Remove the formula '=' sign if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
# Write previous row if in in-line string optimization mode.
if self.optimization and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_formula_tuple(formula, cell_format, value)
return 0
@convert_range_args
def write_array_formula(self, first_row, first_col, last_row, last_col,
formula, cell_format=None, value=0):
"""
Write a formula to a worksheet cell.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
formula: Cell formula.
cell_format: An optional cell Format object.
value: An optional value for the formula. Default is 0.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Swap last row/col with first row/col as necessary.
if first_row > last_row:
first_row, last_row = last_row, first_row
if first_col > last_col:
first_col, last_col = last_col, first_col
# Check that row and col are valid and store max and min values
if self._check_dimensions(last_row, last_col):
return -1
# Define array range
if first_row == last_row and first_col == last_col:
cell_range = xl_rowcol_to_cell(first_row, first_col)
else:
cell_range = (xl_rowcol_to_cell(first_row, first_col) + ':'
+ xl_rowcol_to_cell(last_row, last_col))
# Remove array formula braces and the leading =.
if formula[0] == '{':
formula = formula[1:]
if formula[0] == '=':
formula = formula[1:]
if formula[-1] == '}':
formula = formula[:-1]
# Write previous row if in in-line string optimization mode.
if self.optimization and first_row > self.previous_row:
self._write_single_row(first_row)
# Store the cell data in the worksheet data table.
self.table[first_row][first_col] = cell_arformula_tuple(formula,
cell_format,
value,
cell_range)
# Pad out the rest of the area with formatted zeroes.
if not self.optimization:
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row != first_row or col != first_col:
self.write_number(row, col, 0, cell_format)
return 0
@convert_cell_args
def write_datetime(self, row, col, date, cell_format=None):
"""
Write a date or time to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
date: Date and/or time as a datetime object.
cell_format: A cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string optimization mode.
if self.optimization and row > self.previous_row:
self._write_single_row(row)
# Convert datetime to an Excel date.
number = self._convert_date_time(date)
# Add the default date format.
if cell_format is None:
cell_format = self.default_date_format
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_number_tuple(number, cell_format)
return 0
@convert_cell_args
def write_boolean(self, row, col, boolean, cell_format=None):
"""
Write a boolean value to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
boolean: Cell data. bool type.
cell_format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string optimization mode.
if self.optimization and row > self.previous_row:
self._write_single_row(row)
if boolean:
value = 1
else:
value = 0
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_boolean_tuple(value, cell_format)
return 0
# Write a hyperlink. This is comprised of two elements: the displayed
# string and the non-displayed link. The displayed string is the same as
# the link unless an alternative string is specified. The display string
# is written using the write_string() method. Therefore the max characters
# string limit applies.
#
# The hyperlink can be to a http, ftp, mail, internal sheet, or external
# directory urls.
@convert_cell_args
def write_url(self, row, col, url, cell_format=None,
string=None, tip=None):
"""
Write a hyperlink to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
url: Hyperlink url.
format: An optional cell Format object.
string: An optional display string for the hyperlink.
tip: An optional tooltip.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String longer than 32767 characters.
-3: URL longer than Excel limit of 255 characters
-4: Exceeds Excel limit of 65,530 urls per worksheet
"""
# Set the displayed string to the URL unless defined by the user.
if string is None:
string = url
# Default to external link type such as 'http://' or 'external:'.
link_type = 1
# Remove the URI scheme from internal links.
if re.match("internal:", url):
url = url.replace('internal:', '')
string = string.replace('internal:', '')
link_type = 2
# Remove the URI scheme from external links and change the directory
# separator from Unix to Dos.
external = False
if re.match("external:", url):
url = url.replace('external:', '')
url = url.replace('/', '\\')
string = string.replace('external:', '')
string = string.replace('/', '\\')
external = True
# Strip the mailto header.
string = string.replace('mailto:', '')
# Check that row and col are valid and store max and min values
if self._check_dimensions(row, col):
return -1
# Check that the string is < 32767 chars
str_error = 0
if len(string) > self.xls_strmax:
warn("Ignoring URL since it exceeds Excel's string limit of "
"32767 characters")
return -2
# Copy string for use in hyperlink elements.
url_str = string
# External links to URLs and to other Excel workbooks have slightly
# different characteristics that we have to account for.
if link_type == 1:
# Split url into the link and optional anchor/location.
if '#' in url:
url, url_str = url.split('#', 1)
else:
url_str = None
url = self._escape_url(url)
if url_str is not None and not external:
url_str = self._escape_url(url_str)
# Add the file:/// URI to the url for Windows style "C:/" link and
# Network shares.
if re.match('\w:', url) or re.match(r'\\', url):
url = 'file:///' + url
# Convert a .\dir\file.xlsx link to dir\file.xlsx.
url = re.sub(r'^\.\\', '', url)
# Excel limits the escaped URL and location/anchor to 255 characters.
tmp_url_str = url_str or ''
if len(url) > 255 or len(tmp_url_str) > 255:
warn("Ignoring URL '%s' with link or location/anchor > 255 "
"characters since it exceeds Excel's limit for URLS" %
force_unicode(url))
return -3
# Check the limit of URLS per worksheet.
self.hlink_count += 1
if self.hlink_count > 65530:
warn("Ignoring URL '%s' since it exceeds Excel's limit of "
"65,530 URLS per worksheet." % force_unicode(url))
return -5
# Write previous row if in in-line string optimization mode.
if self.optimization == 1 and row > self.previous_row:
self._write_single_row(row)
# Add the default URL format.
if cell_format is None:
cell_format = self.default_url_format
# Write the hyperlink string.
self.write_string(row, col, string, cell_format)
# Store the hyperlink data in a separate structure.
self.hyperlinks[row][col] = {
'link_type': link_type,
'url': url,
'str': url_str,
'tip': tip}
return str_error
@convert_cell_args
def write_rich_string(self, row, col, *args):
"""
Write a "rich" string with multiple formats to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
string_parts: String and format pairs.
cell_format: Optional Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String truncated to 32k characters.
-3: 2 consecutive formats used.
"""
tokens = list(args)
cell_format = None
str_length = 0
string_index = 0
# Check that row and col are valid and store max and min values
if self._check_dimensions(row, col):
return -1
# If the last arg is a format we use it as the cell format.
if isinstance(tokens[-1], Format):
cell_format = tokens.pop()
# Create a temp XMLWriter object and use it to write the rich string
# XML to a string.
fh = StringIO()
self.rstring = XMLwriter()
self.rstring._set_filehandle(fh)
# Create a temp format with the default font for unformatted fragments.
default = Format()
# Convert list of format, string tokens to pairs of (format, string)
# except for the first string fragment which doesn't require a default
# formatting run. Use the default for strings without a leading format.
fragments = []
previous = 'format'
pos = 0
for token in tokens:
if not isinstance(token, Format):
# Token is a string.
if previous != 'format':
# If previous token wasn't a format add one before string.
fragments.append(default)
fragments.append(token)
else:
# If previous token was a format just add the string.
fragments.append(token)
# Keep track of actual string str_length.
str_length += len(token)
previous = 'string'
else:
# Can't allow 2 formats in a row.
if previous == 'format' and pos > 0:
return -3
# Token is a format object. Add it to the fragment list.
fragments.append(token)
previous = 'format'
pos += 1
# If the first token is a string start the <r> element.
if not isinstance(fragments[0], Format):
self.rstring._xml_start_tag('r')
# Write the XML elements for the $format $string fragments.
for token in fragments:
if isinstance(token, Format):
# Write the font run.
self.rstring._xml_start_tag('r')
self._write_font(token)
else:
# Write the string fragment part, with whitespace handling.
attributes = []
if re.search('^\s', token) or re.search('\s$', token):
attributes.append(('xml:space', 'preserve'))
self.rstring._xml_data_element('t', token, attributes)
self.rstring._xml_end_tag('r')
# Read the in-memory string.
string = self.rstring.fh.getvalue()
# Check that the string is < 32767 chars.
if str_length > self.xls_strmax:
return -2
# Write a shared string or an in-line string in optimization mode.
if self.optimization == 0:
string_index = self.str_table._get_shared_string_index(string)
else:
string_index = string
# Write previous row if in in-line string optimization mode.
if self.optimization and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_string_tuple(string_index, cell_format)
return 0
@convert_cell_args
def write_row(self, row, col, data, cell_format=None):
"""
Write a row of data starting from (row, col).
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
data: A list of tokens to be written with write().
format: An optional cell Format object.
Returns:
0: Success.
other: Return value of write() method.
"""
for token in data:
error = self.write(row, col, token, cell_format)
if error:
return error
col += 1
return 0
@convert_cell_args
def write_column(self, row, col, data, cell_format=None):
"""
Write a column of data starting from (row, col).
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
data: A list of tokens to be written with write().
format: An optional cell Format object.
Returns:
0: Success.
other: Return value of write() method.
"""
for token in data:
error = self.write(row, col, token, cell_format)
if error:
return error
row += 1
return 0
@convert_cell_args
def insert_image(self, row, col, filename, options={}):
"""
Insert an image with its top-left corner in a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
filename: Path and filename for image in PNG, JPG or BMP format.
options: Position, scale, url and data stream of the image.
Returns:
0: Success.
"""
x_offset = options.get('x_offset', 0)
y_offset = options.get('y_offset', 0)
x_scale = options.get('x_scale', 1)
y_scale = options.get('y_scale', 1)
url = options.get('url', None)
tip = options.get('tip', None)
anchor = options.get('positioning', None)
image_data = options.get('image_data', None)
if not image_data and not os.path.exists(filename):
warn("Image file '%s' not found." % force_unicode(filename))
return -1
self.images.append([row, col, filename, x_offset, y_offset,
x_scale, y_scale, url, tip, anchor, image_data])
@convert_cell_args
def insert_textbox(self, row, col, text, options=None):
"""
Insert an textbox with its top-left corner in a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
text: The text for the textbox.
options: Textbox options.
Returns:
0: Success.
"""
if options is None:
options = {}
x_offset = options.get('x_offset', 0)
y_offset = options.get('y_offset', 0)
x_scale = options.get('x_scale', 1)
y_scale = options.get('y_scale', 1)
self.shapes.append([row, col, x_offset, y_offset,
x_scale, y_scale, text, options])
@convert_cell_args
def insert_chart(self, row, col, chart, options={}):
"""
Insert an chart with its top-left corner in a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
chart: Chart object.
options: Position and scale of the chart.
Returns:
0: Success.
"""
# Ensure a chart isn't inserted more than once.
if (chart.already_inserted or chart.combined
and chart.combined.already_inserted):
warn('Chart cannot be inserted in a worksheet more than once.')
return
else:
chart.already_inserted = True
if chart.combined:
chart.combined.already_inserted = True
x_offset = options.get('x_offset', 0)
y_offset = options.get('y_offset', 0)
x_scale = options.get('x_scale', 1)
y_scale = options.get('y_scale', 1)
# Allow Chart to override the scale and offset.
if chart.x_scale != 1:
x_scale = chart.x_scale
if chart.y_scale != 1:
y_scale = chart.y_scale
if chart.x_offset:
x_offset = chart.x_offset
if chart.y_offset:
y_offset = chart.y_offset
self.charts.append([row, col, chart,
x_offset, y_offset,
x_scale, y_scale])
@convert_cell_args
def write_comment(self, row, col, comment, options={}):
"""
Write a comment to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
comment: Cell comment. Str.
options: Comment formatting options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String longer than 32k characters.
"""
# Check that row and col are valid and store max and min values
if self._check_dimensions(row, col):
return -1
# Check that the comment string is < 32767 chars.
if len(comment) > self.xls_strmax:
return -2
self.has_vml = 1
self.has_comments = 1
# Process the properties of the cell comment.
self.comments[row][col] = \
self._comment_params(row, col, comment, options)
def show_comments(self):
"""
Make any comments in the worksheet visible.
Args:
None.
Returns:
Nothing.
"""
self.comments_visible = 1
def set_comments_author(self, author):
"""
Set the default author of the cell comments.
Args:
author: Comment author name. String.
Returns:
Nothing.
"""
self.comments_author = author
def get_name(self):
"""
Retrieve the worksheet name.
Args:
None.
Returns:
Nothing.
"""
# There is no set_name() method. Name must be set in add_worksheet().
return self.name
def activate(self):
"""
Set this worksheet as the active worksheet, i.e. the worksheet that is
displayed when the workbook is opened. Also set it as selected.
Note: An active worksheet cannot be hidden.
Args:
None.
Returns:
Nothing.
"""
self.hidden = 0
self.selected = 1
self.worksheet_meta.activesheet = self.index
def select(self):
"""
Set current worksheet as a selected worksheet, i.e. the worksheet
has its tab highlighted.
Note: A selected worksheet cannot be hidden.
Args:
None.
Returns:
Nothing.
"""
self.selected = 1
self.hidden = 0
def hide(self):
"""
Hide the current worksheet.
Args:
None.
Returns:
Nothing.
"""
self.hidden = 1
# A hidden worksheet shouldn't be active or selected.
self.selected = 0
# TODO. Should add a check to see if the sheet is the global
# activesheet or firstsheet and reset them.
def set_first_sheet(self):
"""
Set current worksheet as the first visible sheet. This is necessary
when there are a large number of worksheets and the activated
worksheet is not visible on the screen.
Note: A selected worksheet cannot be hidden.
Args:
None.
Returns:
Nothing.
"""
self.hidden = 0 # Active worksheet can't be hidden.
self.worksheet_meta.firstsheet = self.index
@convert_column_args
def set_column(self, firstcol, lastcol, width=None, cell_format=None,
options={}):
"""
Set the width, and other properties of a single column or a
range of columns.
Args:
firstcol: First column (zero-indexed).
lastcol: Last column (zero-indexed). Can be same as firstcol.
width: Column width. (optional).
cell_format: Column cell_format. (optional).
options: Dict of options such as hidden and level.
Returns:
0: Success.
-1: Column number is out of worksheet bounds.
"""
# Ensure 2nd col is larger than first.
if firstcol > lastcol:
(firstcol, lastcol) = (lastcol, firstcol)
# Don't modify the row dimensions when checking the columns.
ignore_row = True
# Set optional column values.
hidden = options.get('hidden', False)
collapsed = options.get('collapsed', False)
level = options.get('level', 0)
# Store the column dimension only in some conditions.
if cell_format or (width and hidden):
ignore_col = False
else:
ignore_col = True
# Check that each column is valid and store the max and min values.
if self._check_dimensions(0, lastcol, ignore_row, ignore_col):
return -1
if self._check_dimensions(0, firstcol, ignore_row, ignore_col):
return -1
# Set the limits for the outline levels (0 <= x <= 7).
if level < 0:
level = 0
if level > 7:
level = 7
if level > self.outline_col_level:
self.outline_col_level = level
# Store the column data. Padded for sorting.
self.colinfo["%05d" % firstcol] = [firstcol, lastcol, width,
cell_format, hidden, level,
collapsed]
# Store the column change to allow optimizations.
self.col_size_changed = True
# Store the col sizes for use when calculating image vertices taking
# hidden columns into account. Also store the column formats.
# Set width to zero if col is hidden
if hidden:
width = 0
for col in range(firstcol, lastcol + 1):
self.col_sizes[col] = width
if cell_format:
self.col_formats[col] = cell_format
return 0
def set_row(self, row, height=None, cell_format=None, options={}):
"""
Set the width, and other properties of a row.
Args:
row: Row number (zero-indexed).
height: Row width. (optional).
cell_format: Row cell_format. (optional).
options: Dict of options such as hidden, level and collapsed.
Returns:
0: Success.
-1: Row number is out of worksheet bounds.
"""
# Use minimum col in _check_dimensions().
if self.dim_colmin is not None:
min_col = self.dim_colmin
else:
min_col = 0
# Check that row is valid.
if self._check_dimensions(row, min_col):
return -1
if height is None:
height = self.default_row_height
# Set optional row values.
hidden = options.get('hidden', False)
collapsed = options.get('collapsed', False)
level = options.get('level', 0)
# If the height is 0 the row is hidden and the height is the default.
if height == 0:
hidden = 1
height = self.default_row_height
# Set the limits for the outline levels (0 <= x <= 7).
if level < 0:
level = 0
if level > 7:
level = 7
if level > self.outline_row_level:
self.outline_row_level = level
# Store the row properties.
self.set_rows[row] = [height, cell_format, hidden, level, collapsed]
# Store the row change to allow optimizations.
self.row_size_changed = True
if hidden:
height = 0
# Store the row sizes for use when calculating image vertices.
self.row_sizes[row] = height
def set_default_row(self, height=None, hide_unused_rows=False):
"""
Set the default row properties.
Args:
height: Default height. Optional, defaults to 15.
hide_unused_rows: Hide unused rows. Optional, defaults to False.
Returns:
Nothing.
"""
if height is None:
height = self.default_row_height
if height != self.original_row_height:
# Store the row change to allow optimizations.
self.row_size_changed = True
self.default_row_height = height
if hide_unused_rows:
self.default_row_zeroed = 1
@convert_range_args
def merge_range(self, first_row, first_col, last_row, last_col,
data, cell_format=None):
"""
Merge a range of cells.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
data: Cell data.
cell_format: Cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
other: Return value of write().
"""
# Merge a range of cells. The first cell should contain the data and
# the others should be blank. All cells should have the same format.
# Excel doesn't allow a single cell to be merged
if first_row == last_row and first_col == last_col:
warn("Can't merge single cell")
return
# Swap last row/col with first row/col as necessary
if first_row > last_row:
(first_row, last_row) = (last_row, first_row)
if first_col > last_col:
(first_col, last_col) = (last_col, first_col)
# Check that column number is valid and store the max value
if self._check_dimensions(last_row, last_col) == -1:
return
# Store the merge range.
self.merge.append([first_row, first_col, last_row, last_col])
# Write the first cell
self.write(first_row, first_col, data, cell_format)
# Pad out the rest of the area with formatted blank cells.
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
continue
self.write_blank(row, col, '', cell_format)
@convert_range_args
def autofilter(self, first_row, first_col, last_row, last_col):
"""
Set the autofilter area in the worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
Returns:
Nothing.
"""
# Reverse max and min values if necessary.
if last_row < first_row:
(first_row, last_row) = (last_row, first_row)
if last_col < first_col:
(first_col, last_col) = (last_col, first_col)
# Build up the print area range "Sheet1!$A$1:$C$13".
area = self._convert_name_area(first_row, first_col,
last_row, last_col)
ref = xl_range(first_row, first_col, last_row, last_col)
self.autofilter_area = area
self.autofilter_ref = ref
self.filter_range = [first_col, last_col]
def filter_column(self, col, criteria):
"""
Set the column filter criteria.
Args:
col: Filter column (zero-indexed).
criteria: Filter criteria.
Returns:
Nothing.
"""
if not self.autofilter_area:
warn("Must call autofilter() before filter_column()")
return
# Check for a column reference in A1 notation and substitute.
try:
int(col)
except ValueError:
# Convert col ref to a cell ref and then to a col number.
col_letter = col
(_, col) = xl_cell_to_rowcol(col + '1')
if col >= self.xls_colmax:
warn("Invalid column '%s'" % col_letter)
return
(col_first, col_last) = self.filter_range
# Reject column if it is outside filter range.
if col < col_first or col > col_last:
warn("Column '%d' outside autofilter() column range (%d, %d)"
% (col, col_first, col_last))
return
tokens = self._extract_filter_tokens(criteria)
if not (len(tokens) == 3 or len(tokens) == 7):
warn("Incorrect number of tokens in criteria '%s'" % criteria)
tokens = self._parse_filter_expression(criteria, tokens)
# Excel handles single or double custom filters as default filters.
# We need to check for them and handle them accordingly.
if len(tokens) == 2 and tokens[0] == 2:
# Single equality.
self.filter_column_list(col, [tokens[1]])
elif (len(tokens) == 5 and tokens[0] == 2 and tokens[2] == 1
and tokens[3] == 2):
# Double equality with "or" operator.
self.filter_column_list(col, [tokens[1], tokens[4]])
else:
# Non default custom filter.
self.filter_cols[col] = tokens
self.filter_type[col] = 0
self.filter_on = 1
def filter_column_list(self, col, filters):
"""
Set the column filter criteria in Excel 2007 list style.
Args:
col: Filter column (zero-indexed).
filters: List of filter criteria to match.
Returns:
Nothing.
"""
if not self.autofilter_area:
warn("Must call autofilter() before filter_column()")
return
# Check for a column reference in A1 notation and substitute.
try:
int(col)
except ValueError:
# Convert col ref to a cell ref and then to a col number.
col_letter = col
(_, col) = xl_cell_to_rowcol(col + '1')
if col >= self.xls_colmax:
warn("Invalid column '%s'" % col_letter)
return
(col_first, col_last) = self.filter_range
# Reject column if it is outside filter range.
if col < col_first or col > col_last:
warn("Column '%d' outside autofilter() column range "
"(%d,%d)" % (col, col_first, col_last))
return
self.filter_cols[col] = filters
self.filter_type[col] = 1
self.filter_on = 1
@convert_range_args
def data_validation(self, first_row, first_col, last_row, last_col,
options):
"""
Add a data validation to a worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
options: Data validation options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: Incorrect parameter or option.
"""
# Check that row and col are valid without storing the values.
if self._check_dimensions(first_row, first_col, True, True):
return -1
if self._check_dimensions(last_row, last_col, True, True):
return -1
# List of valid input parameters.
valid_parameters = {
'validate': True,
'criteria': True,
'value': True,
'source': True,
'minimum': True,
'maximum': True,
'ignore_blank': True,
'dropdown': True,
'show_input': True,
'input_title': True,
'input_message': True,
'show_error': True,
'error_title': True,
'error_message': True,
'error_type': True,
'other_cells': True,
}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameters:
warn("Unknown parameter '%s' in data_validation()" % param_key)
return -2
# Map alternative parameter names 'source' or 'minimum' to 'value'.
if 'source' in options:
options['value'] = options['source']
if 'minimum' in options:
options['value'] = options['minimum']
# 'validate' is a required parameter.
if 'validate' not in options:
warn("Parameter 'validate' is required in data_validation()")
return -2
# List of valid validation types.
valid_types = {
'any': 'none',
'any value': 'none',
'whole number': 'whole',
'whole': 'whole',
'integer': 'whole',
'decimal': 'decimal',
'list': 'list',
'date': 'date',
'time': 'time',
'text length': 'textLength',
'length': 'textLength',
'custom': 'custom',
}
# Check for valid validation types.
if not options['validate'] in valid_types:
warn("Unknown validation type '%s' for parameter "
"'validate' in data_validation()" % options['validate'])
return -2
else:
options['validate'] = valid_types[options['validate']]
# No action is required for validation type 'any' if there are no
# input messages to display.
if (options['validate'] == 'none'
and options.get('input_title') is None
and options.get('input_message') is None):
return -2
# The any, list and custom validations don't have a criteria so we use
# a default of 'between'.
if (options['validate'] == 'none'
or options['validate'] == 'list'
or options['validate'] == 'custom'):
options['criteria'] = 'between'
options['maximum'] = None
# 'criteria' is a required parameter.
if 'criteria' not in options:
warn("Parameter 'criteria' is required in data_validation()")
return -2
# List of valid criteria types.
criteria_types = {
'between': 'between',
'not between': 'notBetween',
'equal to': 'equal',
'=': 'equal',
'==': 'equal',
'not equal to': 'notEqual',
'!=': 'notEqual',
'<>': 'notEqual',
'greater than': 'greaterThan',
'>': 'greaterThan',
'less than': 'lessThan',
'<': 'lessThan',
'greater than or equal to': 'greaterThanOrEqual',
'>=': 'greaterThanOrEqual',
'less than or equal to': 'lessThanOrEqual',
'<=': 'lessThanOrEqual',
}
# Check for valid criteria types.
if not options['criteria'] in criteria_types:
warn("Unknown criteria type '%s' for parameter "
"'criteria' in data_validation()" % options['criteria'])
return -2
else:
options['criteria'] = criteria_types[options['criteria']]
# 'Between' and 'Not between' criteria require 2 values.
if (options['criteria'] == 'between' or
options['criteria'] == 'notBetween'):
if 'maximum' not in options:
warn("Parameter 'maximum' is required in data_validation() "
"when using 'between' or 'not between' criteria")
return -2
else:
options['maximum'] = None
# List of valid error dialog types.
error_types = {
'stop': 0,
'warning': 1,
'information': 2,
}
# Check for valid error dialog types.
if 'error_type' not in options:
options['error_type'] = 0
elif not options['error_type'] in error_types:
warn("Unknown criteria type '%s' for parameter 'error_type' "
"in data_validation()" % options['error_type'])
return -2
else:
options['error_type'] = error_types[options['error_type']]
# Convert date/times value if required.
if options['validate'] == 'date' or options['validate'] == 'time':
if options['value']:
if not supported_datetime(options['value']):
warn("Data validation 'value/minimum' must be a "
"datetime object.")
return -2
else:
date_time = self._convert_date_time(options['value'])
# Format date number to the same precision as Excel.
options['value'] = "%.15g" % date_time
if options['maximum']:
if not supported_datetime(options['maximum']):
warn("Conditional format 'maximum' must be a "
"datetime object.")
return -2
else:
date_time = self._convert_date_time(options['maximum'])
options['maximum'] = "%.15g" % date_time
# Check that the input title doesn't exceed the maximum length.
if options.get('input_title') and len(options['input_title']) > 32:
warn("Length of input title '%s' exceeds Excel's limit of 32"
% force_unicode(options['input_title']))
return -2
# Check that the error title doesn't exceed the maximum length.
if options.get('error_title') and len(options['error_title']) > 32:
warn("Length of error title '%s' exceeds Excel's limit of 32"
% force_unicode(options['error_title']))
return -2
# Check that the input message doesn't exceed the maximum length.
if (options.get('input_message')
and len(options['input_message']) > 255):
warn("Length of input message '%s' exceeds Excel's limit of 255"
% force_unicode(options['input_message']))
return -2
# Check that the error message doesn't exceed the maximum length.
if (options.get('error_message')
and len(options['error_message']) > 255):
warn("Length of error message '%s' exceeds Excel's limit of 255"
% force_unicode(options['error_message']))
return -2
# Check that the input list doesn't exceed the maximum length.
if options['validate'] == 'list' and type(options['value']) is list:
formula = self._csv_join(*options['value'])
if len(formula) > 255:
warn("Length of list items '%s' exceeds Excel's limit of "
"255, use a formula range instead"
% force_unicode(formula))
return -2
# Set some defaults if they haven't been defined by the user.
if 'ignore_blank' not in options:
options['ignore_blank'] = 1
if 'dropdown' not in options:
options['dropdown'] = 1
if 'show_input' not in options:
options['show_input'] = 1
if 'show_error' not in options:
options['show_error'] = 1
# These are the cells to which the validation is applied.
options['cells'] = [[first_row, first_col, last_row, last_col]]
# A (for now) undocumented parameter to pass additional cell ranges.
if 'other_cells' in options:
options['cells'].extend(options['other_cells'])
# Store the validation information until we close the worksheet.
self.validations.append(options)
@convert_range_args
def conditional_format(self, first_row, first_col, last_row, last_col,
options=None):
"""
Add a conditional format to a worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
options: Conditional format options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: Incorrect parameter or option.
"""
# Check that row and col are valid without storing the values.
if self._check_dimensions(first_row, first_col, True, True):
return -1
if self._check_dimensions(last_row, last_col, True, True):
return -1
if options is None:
options = {}
# Copy the user defined options so they aren't modified.
options = options.copy()
# List of valid input parameters.
valid_parameter = {
'type': True,
'format': True,
'criteria': True,
'value': True,
'minimum': True,
'maximum': True,
'min_type': True,
'mid_type': True,
'max_type': True,
'min_value': True,
'mid_value': True,
'max_value': True,
'min_color': True,
'mid_color': True,
'max_color': True,
'multi_range': True,
'bar_color': 1}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameter:
warn("Unknown parameter '%s' in conditional_formatting()" %
param_key)
return -2
# 'type' is a required parameter.
if 'type' not in options:
warn("Parameter 'type' is required in conditional_formatting()")
return -2
# List of valid validation types.
valid_type = {
'cell': 'cellIs',
'date': 'date',
'time': 'time',
'average': 'aboveAverage',
'duplicate': 'duplicateValues',
'unique': 'uniqueValues',
'top': 'top10',
'bottom': 'top10',
'text': 'text',
'time_period': 'timePeriod',
'blanks': 'containsBlanks',
'no_blanks': 'notContainsBlanks',
'errors': 'containsErrors',
'no_errors': 'notContainsErrors',
'2_color_scale': '2_color_scale',
'3_color_scale': '3_color_scale',
'data_bar': 'dataBar',
'formula': 'expression'}
# Check for valid validation types.
if options['type'] not in valid_type:
warn("Unknown validation type '%s' for parameter 'type' "
"in conditional_formatting()" % options['type'])
return -2
else:
if options['type'] == 'bottom':
options['direction'] = 'bottom'
options['type'] = valid_type[options['type']]
# List of valid criteria types.
criteria_type = {
'between': 'between',
'not between': 'notBetween',
'equal to': 'equal',
'=': 'equal',
'==': 'equal',
'not equal to': 'notEqual',
'!=': 'notEqual',
'<>': 'notEqual',
'greater than': 'greaterThan',
'>': 'greaterThan',
'less than': 'lessThan',
'<': 'lessThan',
'greater than or equal to': 'greaterThanOrEqual',
'>=': 'greaterThanOrEqual',
'less than or equal to': 'lessThanOrEqual',
'<=': 'lessThanOrEqual',
'containing': 'containsText',
'not containing': 'notContains',
'begins with': 'beginsWith',
'ends with': 'endsWith',
'yesterday': 'yesterday',
'today': 'today',
'last 7 days': 'last7Days',
'last week': 'lastWeek',
'this week': 'thisWeek',
'continue week': 'continueWeek',
'last month': 'lastMonth',
'this month': 'thisMonth',
'continue month': 'continueMonth'}
# Check for valid criteria types.
if 'criteria' in options and options['criteria'] in criteria_type:
options['criteria'] = criteria_type[options['criteria']]
# Convert date/times value if required.
if options['type'] == 'date' or options['type'] == 'time':
options['type'] = 'cellIs'
if 'value' in options:
if not supported_datetime(options['value']):
warn("Conditional format 'value' must be a "
"datetime object.")
return -2
else:
date_time = self._convert_date_time(options['value'])
# Format date number to the same precision as Excel.
options['value'] = "%.15g" % date_time
if 'minimum' in options:
if not supported_datetime(options['minimum']):
warn("Conditional format 'minimum' must be a "
"datetime object.")
return -2
else:
date_time = self._convert_date_time(options['minimum'])
options['minimum'] = "%.15g" % date_time
if 'maximum' in options:
if not supported_datetime(options['maximum']):
warn("Conditional format 'maximum' must be a "
"datetime object.")
return -2
else:
date_time = self._convert_date_time(options['maximum'])
options['maximum'] = "%.15g" % date_time
# Swap last row/col for first row/col as necessary
if first_row > last_row:
first_row, last_row = last_row, first_row
if first_col > last_col:
first_col, last_col = last_col, first_col
# Set the formatting range.
# If the first and last cell are the same write a single cell.
if first_row == last_row and first_col == last_col:
cell_range = xl_rowcol_to_cell(first_row, first_col)
start_cell = cell_range
else:
cell_range = xl_range(first_row, first_col, last_row, last_col)
start_cell = xl_rowcol_to_cell(first_row, first_col)
# Override with user defined multiple range if provided.
if 'multi_range' in options:
cell_range = options['multi_range']
cell_range = cell_range.replace('$', '')
# Get the dxf format index.
if 'format' in options and options['format']:
options['format'] = options['format']._get_dxf_index()
# Set the priority based on the order of adding.
options['priority'] = self.dxf_priority
self.dxf_priority += 1
# Special handling of text criteria.
if options['type'] == 'text':
if options['criteria'] == 'containsText':
options['type'] = 'containsText'
options['formula'] = ('NOT(ISERROR(SEARCH("%s",%s)))'
% (options['value'], start_cell))
elif options['criteria'] == 'notContains':
options['type'] = 'notContainsText'
options['formula'] = ('ISERROR(SEARCH("%s",%s))'
% (options['value'], start_cell))
elif options['criteria'] == 'beginsWith':
options['type'] = 'beginsWith'
options['formula'] = ('LEFT(%s,%d)="%s"'
% (start_cell,
len(options['value']),
options['value']))
elif options['criteria'] == 'endsWith':
options['type'] = 'endsWith'
options['formula'] = ('RIGHT(%s,%d)="%s"'
% (start_cell,
len(options['value']),
options['value']))
else:
warn("Invalid text criteria 'options['criteria']' "
"in conditional_formatting()")
# Special handling of time time_period criteria.
if options['type'] == 'timePeriod':
if options['criteria'] == 'yesterday':
options['formula'] = 'FLOOR(%s,1)=TODAY()-1' % start_cell
elif options['criteria'] == 'today':
options['formula'] = 'FLOOR(%s,1)=TODAY()' % start_cell
elif options['criteria'] == 'tomorrow':
options['formula'] = 'FLOOR(%s,1)=TODAY()+1' % start_cell
elif options['criteria'] == 'last7Days':
options['formula'] = \
('AND(TODAY()-FLOOR(%s,1)<=6,FLOOR(%s,1)<=TODAY())' %
(start_cell, start_cell))
elif options['criteria'] == 'lastWeek':
options['formula'] = \
('AND(TODAY()-ROUNDDOWN(%s,0)>=(WEEKDAY(TODAY())),'
'TODAY()-ROUNDDOWN(%s,0)<(WEEKDAY(TODAY())+7))' %
(start_cell, start_cell))
elif options['criteria'] == 'thisWeek':
options['formula'] = \
('AND(TODAY()-ROUNDDOWN(%s,0)<=WEEKDAY(TODAY())-1,'
'ROUNDDOWN(%s,0)-TODAY()<=7-WEEKDAY(TODAY()))' %
(start_cell, start_cell))
elif options['criteria'] == 'continueWeek':
options['formula'] = \
('AND(ROUNDDOWN(%s,0)-TODAY()>(7-WEEKDAY(TODAY())),'
'ROUNDDOWN(%s,0)-TODAY()<(15-WEEKDAY(TODAY())))' %
(start_cell, start_cell))
elif options['criteria'] == 'lastMonth':
options['formula'] = \
('AND(MONTH(%s)=MONTH(TODAY())-1,OR(YEAR(%s)=YEAR('
'TODAY()),AND(MONTH(%s)=1,YEAR(A1)=YEAR(TODAY())-1)))' %
(start_cell, start_cell, start_cell))
elif options['criteria'] == 'thisMonth':
options['formula'] = \
('AND(MONTH(%s)=MONTH(TODAY()),YEAR(%s)=YEAR(TODAY()))' %
(start_cell, start_cell))
elif options['criteria'] == 'continueMonth':
options['formula'] = \
('AND(MONTH(%s)=MONTH(TODAY())+1,OR(YEAR(%s)=YEAR('
'TODAY()),AND(MONTH(%s)=12,YEAR(%s)=YEAR(TODAY())+1)))' %
(start_cell, start_cell, start_cell, start_cell))
else:
warn("Invalid time_period criteria 'options['criteria']' "
"in conditional_formatting()")
# Special handling of blanks/error types.
if options['type'] == 'containsBlanks':
options['formula'] = 'LEN(TRIM(%s))=0' % start_cell
if options['type'] == 'notContainsBlanks':
options['formula'] = 'LEN(TRIM(%s))>0' % start_cell
if options['type'] == 'containsErrors':
options['formula'] = 'ISERROR(%s)' % start_cell
if options['type'] == 'notContainsErrors':
options['formula'] = 'NOT(ISERROR(%s))' % start_cell
# Special handling for 2 color scale.
if options['type'] == '2_color_scale':
options['type'] = 'colorScale'
# Color scales don't use any additional formatting.
options['format'] = None
# Turn off 3 color parameters.
options['mid_type'] = None
options['mid_color'] = None
options.setdefault('min_type', 'min')
options.setdefault('max_type', 'max')
options.setdefault('min_value', 0)
options.setdefault('max_value', 0)
options.setdefault('min_color', '#FF7128')
options.setdefault('max_color', '#FFEF9C')
options['min_color'] = xl_color(options['min_color'])
options['max_color'] = xl_color(options['max_color'])
# Special handling for 3 color scale.
if options['type'] == '3_color_scale':
options['type'] = 'colorScale'
# Color scales don't use any additional formatting.
options['format'] = None
options.setdefault('min_type', 'min')
options.setdefault('mid_type', 'percentile')
options.setdefault('max_type', 'max')
options.setdefault('min_value', 0)
options.setdefault('max_value', 0)
options.setdefault('min_color', '#F8696B')
options.setdefault('mid_color', '#FFEB84')
options.setdefault('max_color', '#63BE7B')
options['min_color'] = xl_color(options['min_color'])
options['mid_color'] = xl_color(options['mid_color'])
options['max_color'] = xl_color(options['max_color'])
# Set a default mid value.
if 'mid_value' not in options:
options['mid_value'] = 50
# Special handling for data bar.
if options['type'] == 'dataBar':
# Color scales don't use any additional formatting.
options['format'] = None
options.setdefault('min_type', 'min')
options.setdefault('max_type', 'max')
options.setdefault('min_value', 0)
options.setdefault('max_value', 0)
options.setdefault('bar_color', '#638EC6')
options['bar_color'] = xl_color(options['bar_color'])
# Store the validation information until we close the worksheet.
if cell_range in self.cond_formats:
self.cond_formats[cell_range].append(options)
else:
self.cond_formats[cell_range] = [options]
@convert_range_args
def add_table(self, first_row, first_col, last_row, last_col,
options=None):
"""
Add an Excel table to a worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
options: Table format options. (Optional)
Returns:
0: Success.
-1: Not supported in optimization mode.
-2: Row or column is out of worksheet bounds.
-3: Incorrect parameter or option.
"""
table = {}
col_formats = {}
if options is None:
options = {}
if self.optimization == 1:
warn("add_table() isn't supported when set_optimization() is on")
return -1
# Check that row and col are valid without storing the values.
if self._check_dimensions(first_row, first_col, True, True):
return -2
if self._check_dimensions(last_row, last_col, True, True):
return -2
# List of valid input parameters.
valid_parameter = {
'autofilter': True,
'banded_columns': True,
'banded_rows': True,
'columns': True,
'data': True,
'first_column': True,
'header_row': True,
'last_column': True,
'name': True,
'style': True,
'total_row': True,
}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameter:
warn("Unknown parameter '%s' in add_table()" % param_key)
return -3
# Turn on Excel's defaults.
options['banded_rows'] = options.get('banded_rows', True)
options['header_row'] = options.get('header_row', True)
options['autofilter'] = options.get('autofilter', True)
# Set the table options.
table['show_first_col'] = options.get('first_column', False)
table['show_last_col'] = options.get('last_column', False)
table['show_row_stripes'] = options.get('banded_rows', False)
table['show_col_stripes'] = options.get('banded_columns', False)
table['header_row_count'] = options.get('header_row', 0)
table['totals_row_shown'] = options.get('total_row', False)
# Set the table name.
if 'name' in options:
name = options['name']
table['name'] = name
if ' ' in name:
warn("Name '%s' in add_table() cannot contain spaces"
% force_unicode(name))
return -3
# Warn if the name contains invalid chars as defined by Excel.
if (not re.match(r'^[\w\\][\w\\.]*$', name, re.UNICODE)
or re.match(r'^\d', name)):
warn("Invalid Excel characters in add_table(): '%s'"
% force_unicode(name))
return -1
# Warn if the name looks like a cell name.
if re.match(r'^[a-zA-Z][a-zA-Z]?[a-dA-D]?[0-9]+$', name):
warn("Name looks like a cell name in add_table(): '%s'"
% force_unicode(name))
return -1
# Warn if the name looks like a R1C1 cell reference.
if (re.match(r'^[rcRC]$', name)
or re.match(r'^[rcRC]\d+[rcRC]\d+$', name)):
warn("Invalid name '%s' like a RC cell ref in add_table()"
% force_unicode(name))
return -1
# Set the table style.
if 'style' in options:
table['style'] = options['style']
# Remove whitespace from style name.
table['style'] = table['style'].replace(' ', '')
else:
table['style'] = "TableStyleMedium9"
# Swap last row/col for first row/col as necessary.
if first_row > last_row:
(first_row, last_row) = (last_row, first_row)
if first_col > last_col:
(first_col, last_col) = (last_col, first_col)
# Set the data range rows (without the header and footer).
first_data_row = first_row
last_data_row = last_row
if 'header_row' in options:
first_data_row += 1
if 'total_row' in options:
last_data_row -= 1
# Set the table and autofilter ranges.
table['range'] = xl_range(first_row, first_col,
last_row, last_col)
table['a_range'] = xl_range(first_row, first_col,
last_data_row, last_col)
# If the header row if off the default is to turn autofilter off.
if not options['header_row']:
options['autofilter'] = 0
# Set the autofilter range.
if options['autofilter']:
table['autofilter'] = table['a_range']
# Add the table columns.
col_id = 1
table['columns'] = []
for col_num in range(first_col, last_col + 1):
# Set up the default column data.
col_data = {
'id': col_id,
'name': 'Column' + str(col_id),
'total_string': '',
'total_function': '',
'total_value': 0,
'formula': '',
'format': None,
'name_format': None,
}
# Overwrite the defaults with any use defined values.
if 'columns' in options:
# Check if there are user defined values for this column.
user_data = options['columns'][col_id - 1]
if user_data:
# Get the column format.
xformat = user_data.get('format', None)
# Map user defined values to internal values.
if user_data.get('header'):
col_data['name'] = user_data['header']
col_data['name_format'] = user_data.get('header_format')
# Handle the column formula.
if 'formula' in user_data and user_data['formula']:
formula = user_data['formula']
# Remove the formula '=' sign if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
# Covert Excel 2010 "@" ref to 2007 "#This Row".
formula = formula.replace('@', '[#This Row],')
col_data['formula'] = formula
for row in range(first_data_row, last_data_row + 1):
self.write_formula(row, col_num, formula, xformat)
# Handle the function for the total row.
if user_data.get('total_function'):
function = user_data['total_function']
# Massage the function name.
function = function.lower()
function = function.replace('_', '')
function = function.replace(' ', '')
if function == 'countnums':
function = 'countNums'
if function == 'stddev':
function = 'stdDev'
col_data['total_function'] = function
formula = \
self._table_function_to_formula(function,
col_data['name'])
value = user_data.get('total_value', 0)
self.write_formula(last_row, col_num, formula, xformat,
value)
elif user_data.get('total_string'):
# Total label only (not a function).
total_string = user_data['total_string']
col_data['total_string'] = total_string
self.write_string(last_row, col_num, total_string,
user_data.get('format'))
# Get the dxf format index.
if xformat is not None:
col_data['format'] = xformat._get_dxf_index()
# Store the column format for writing the cell data.
# It doesn't matter if it is undefined.
col_formats[col_id - 1] = xformat
# Store the column data.
table['columns'].append(col_data)
# Write the column headers to the worksheet.
if options['header_row']:
self.write_string(first_row, col_num, col_data['name'],
col_data['name_format'])
col_id += 1
# Write the cell data if supplied.
if 'data' in options:
data = options['data']
i = 0 # For indexing the row data.
for row in range(first_data_row, last_data_row + 1):
j = 0 # For indexing the col data.
for col in range(first_col, last_col + 1):
if i < len(data) and j < len(data[i]):
token = data[i][j]
if j in col_formats:
self.write(row, col, token, col_formats[j])
else:
self.write(row, col, token, None)
j += 1
i += 1
# Store the table data.
self.tables.append(table)
return table
@convert_cell_args
def add_sparkline(self, row, col, options):
"""
Add sparklines to the worksheet.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
options: Sparkline formatting options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: Incorrect parameter or option.
"""
# Check that row and col are valid without storing the values.
if self._check_dimensions(row, col, True, True):
return -1
sparkline = {'locations': [xl_rowcol_to_cell(row, col)]}
# List of valid input parameters.
valid_parameters = {
'location': True,
'range': True,
'type': True,
'high_point': True,
'low_point': True,
'negative_points': True,
'first_point': True,
'last_point': True,
'markers': True,
'style': True,
'series_color': True,
'negative_color': True,
'markers_color': True,
'first_color': True,
'last_color': True,
'high_color': True,
'low_color': True,
'max': True,
'min': True,
'axis': True,
'reverse': True,
'empty_cells': True,
'show_hidden': True,
'plot_hidden': True,
'date_axis': True,
'weight': True,
}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameters:
warn("Unknown parameter '%s' in add_sparkline()" % param_key)
return -1
# 'range' is a required parameter.
if 'range' not in options:
warn("Parameter 'range' is required in add_sparkline()")
return -2
# Handle the sparkline type.
spark_type = options.get('type', 'line')
if spark_type not in ('line', 'column', 'win_loss'):
warn("Parameter 'type' must be 'line', 'column' "
"or 'win_loss' in add_sparkline()")
return -2
if spark_type == 'win_loss':
spark_type = 'stacked'
sparkline['type'] = spark_type
# We handle single location/range values or list of values.
if 'location' in options:
if type(options['location']) is list:
sparkline['locations'] = options['location']
else:
sparkline['locations'] = [options['location']]
if type(options['range']) is list:
sparkline['ranges'] = options['range']
else:
sparkline['ranges'] = [options['range']]
range_count = len(sparkline['ranges'])
location_count = len(sparkline['locations'])
# The ranges and locations must match.
if range_count != location_count:
warn("Must have the same number of location and range "
"parameters in add_sparkline()")
return -2
# Store the count.
sparkline['count'] = len(sparkline['locations'])
# Get the worksheet name for the range conversion below.
sheetname = quote_sheetname(self.name)
# Cleanup the input ranges.
new_ranges = []
for spark_range in sparkline['ranges']:
# Remove the absolute reference $ symbols.
spark_range = spark_range.replace('$', '')
# Remove the = from formula.
spark_range = spark_range.lstrip('=')
# Convert a simple range into a full Sheet1!A1:D1 range.
if '!' not in spark_range:
spark_range = sheetname + "!" + spark_range
new_ranges.append(spark_range)
sparkline['ranges'] = new_ranges
# Cleanup the input locations.
new_locations = []
for location in sparkline['locations']:
location = location.replace('$', '')
new_locations.append(location)
sparkline['locations'] = new_locations
# Map options.
sparkline['high'] = options.get('high_point')
sparkline['low'] = options.get('low_point')
sparkline['negative'] = options.get('negative_points')
sparkline['first'] = options.get('first_point')
sparkline['last'] = options.get('last_point')
sparkline['markers'] = options.get('markers')
sparkline['min'] = options.get('min')
sparkline['max'] = options.get('max')
sparkline['axis'] = options.get('axis')
sparkline['reverse'] = options.get('reverse')
sparkline['hidden'] = options.get('show_hidden')
sparkline['weight'] = options.get('weight')
# Map empty cells options.
empty = options.get('empty_cells', '')
if empty == 'zero':
sparkline['empty'] = 0
elif empty == 'connect':
sparkline['empty'] = 'span'
else:
sparkline['empty'] = 'gap'
# Map the date axis range.
date_range = options.get('date_axis')
if date_range and '!' not in date_range:
date_range = sheetname + "!" + date_range
sparkline['date_axis'] = date_range
# Set the sparkline styles.
style_id = options.get('style', 0)
style = get_sparkline_style(style_id)
sparkline['series_color'] = style['series']
sparkline['negative_color'] = style['negative']
sparkline['markers_color'] = style['markers']
sparkline['first_color'] = style['first']
sparkline['last_color'] = style['last']
sparkline['high_color'] = style['high']
sparkline['low_color'] = style['low']
# Override the style colors with user defined colors.
self._set_spark_color(sparkline, options, 'series_color')
self._set_spark_color(sparkline, options, 'negative_color')
self._set_spark_color(sparkline, options, 'markers_color')
self._set_spark_color(sparkline, options, 'first_color')
self._set_spark_color(sparkline, options, 'last_color')
self._set_spark_color(sparkline, options, 'high_color')
self._set_spark_color(sparkline, options, 'low_color')
self.sparklines.append(sparkline)
@convert_range_args
def set_selection(self, first_row, first_col, last_row, last_col):
"""
Set the selected cell or cells in a worksheet
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
Returns:
0: Nothing.
"""
pane = None
# Range selection. Do this before swapping max/min to allow the
# selection direction to be reversed.
active_cell = xl_rowcol_to_cell(first_row, first_col)
# Swap last row/col for first row/col if necessary
if first_row > last_row:
(first_row, last_row) = (last_row, first_row)
if first_col > last_col:
(first_col, last_col) = (last_col, first_col)
# If the first and last cell are the same write a single cell.
if (first_row == last_row) and (first_col == last_col):
sqref = active_cell
else:
sqref = xl_range(first_row, first_col, last_row, last_col)
# Selection isn't set for cell A1.
if sqref == 'A1':
return
self.selections = [[pane, active_cell, sqref]]
def outline_settings(self, outline_on=1, outline_below=1, outline_right=1,
outline_style=0):
"""
Control outline settings.
Args:
outline_on: Outlines are visible. Optional, defaults to True.
outline_below: Show row outline symbols below the outline bar.
Optional, defaults to True.
outline_right: Show column outline symbols to the right of the
outline bar. Optional, defaults to True.
outline_style: Use Automatic style. Optional, defaults to False.
Returns:
0: Nothing.
"""
self.outline_on = outline_on
self.outline_below = outline_below
self.outline_right = outline_right
self.outline_style = outline_style
self.outline_changed = True
@convert_cell_args
def freeze_panes(self, row, col, top_row=None, left_col=None, pane_type=0):
"""
Create worksheet panes and mark them as frozen.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
top_row: Topmost visible row in scrolling region of pane.
left_col: Leftmost visible row in scrolling region of pane.
Returns:
0: Nothing.
"""
if top_row is None:
top_row = row
if left_col is None:
left_col = col
self.panes = [row, col, top_row, left_col, pane_type]
@convert_cell_args
def split_panes(self, x, y, top_row=None, left_col=None):
"""
Create worksheet panes and mark them as split.
Args:
x: The position for the vertical split.
y: The position for the horizontal split.
top_row: Topmost visible row in scrolling region of pane.
left_col: Leftmost visible row in scrolling region of pane.
Returns:
0: Nothing.
"""
# Same as freeze panes with a different pane type.
self.freeze_panes(x, y, top_row, left_col, 2)
def set_zoom(self, zoom=100):
"""
Set the worksheet zoom factor.
Args:
zoom: Scale factor: 10 <= zoom <= 400.
Returns:
Nothing.
"""
# Ensure the zoom scale is in Excel's range.
if zoom < 10 or zoom > 400:
warn("Zoom factor %d outside range: 10 <= zoom <= 400" % zoom)
zoom = 100
self.zoom = int(zoom)
def right_to_left(self):
"""
Display the worksheet right to left for some versions of Excel.
Args:
None.
Returns:
Nothing.
"""
self.is_right_to_left = 1
def hide_zero(self):
"""
Hide zero values in worksheet cells.
Args:
None.
Returns:
Nothing.
"""
self.show_zeros = 0
def set_tab_color(self, color):
"""
Set the color of the worksheet tab.
Args:
color: A #RGB color index.
Returns:
Nothing.
"""
self.tab_color = xl_color(color)
def protect(self, password='', options=None):
"""
Set the password and protection options of the worksheet.
Args:
password: An optional password string.
options: A dictionary of worksheet objects to protect.
Returns:
Nothing.
"""
if password != '':
password = self._encode_password(password)
if not options:
options = {}
# Default values for objects that can be protected.
defaults = {
'sheet': True,
'content': False,
'objects': False,
'scenarios': False,
'format_cells': False,
'format_columns': False,
'format_rows': False,
'insert_columns': False,
'insert_rows': False,
'insert_hyperlinks': False,
'delete_columns': False,
'delete_rows': False,
'select_locked_cells': True,
'sort': False,
'autofilter': False,
'pivot_tables': False,
'select_unlocked_cells': True}
# Overwrite the defaults with user specified values.
for key in (options.keys()):
if key in defaults:
defaults[key] = options[key]
else:
warn("Unknown protection object: '%s'" % key)
# Set the password after the user defined values.
defaults['password'] = password
self.protect_options = defaults
@convert_cell_args
def insert_button(self, row, col, options={}):
"""
Insert a button form object into the worksheet.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
options: Button formatting options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
button = self._button_params(row, col, options)
self.buttons_list.append(button)
self.has_vml = 1
###########################################################################
#
# Public API. Page Setup methods.
#
###########################################################################
def set_landscape(self):
"""
Set the page orientation as landscape.
Args:
None.
Returns:
Nothing.
"""
self.orientation = 0
self.page_setup_changed = True
def set_portrait(self):
"""
Set the page orientation as portrait.
Args:
None.
Returns:
Nothing.
"""
self.orientation = 1
self.page_setup_changed = True
def set_page_view(self):
"""
Set the page view mode.
Args:
None.
Returns:
Nothing.
"""
self.page_view = 1
def set_paper(self, paper_size):
"""
Set the paper type. US Letter = 1, A4 = 9.
Args:
paper_size: Paper index.
Returns:
Nothing.
"""
if paper_size:
self.paper_size = paper_size
self.page_setup_changed = True
def center_horizontally(self):
"""
Center the page horizontally.
Args:
None.
Returns:
Nothing.
"""
self.print_options_changed = True
self.hcenter = 1
def center_vertically(self):
"""
Center the page vertically.
Args:
None.
Returns:
Nothing.
"""
self.print_options_changed = True
self.vcenter = 1
def set_margins(self, left=0.7, right=0.7, top=0.75, bottom=0.75):
"""
Set all the page margins in inches.
Args:
left: Left margin.
right: Right margin.
top: Top margin.
bottom: Bottom margin.
Returns:
Nothing.
"""
self.margin_left = left
self.margin_right = right
self.margin_top = top
self.margin_bottom = bottom
def set_header(self, header='', options=None, margin=None):
"""
Set the page header caption and optional margin.
Args:
header: Header string.
margin: Header margin.
options: Header options, mainly for images.
Returns:
Nothing.
"""
header_orig = header
header = header.replace('&[Picture]', '&G')
if len(header) >= 255:
warn('Header string must be less than 255 characters')
return
if options is not None:
# For backward compatibility allow options to be the margin.
if not isinstance(options, dict):
options = {'margin': options}
else:
options = {}
# For backward compatibility.
if margin is not None:
options['margin'] = margin
# Reset the list in case the function is called more than once.
self.header_images = []
if options.get('image_left'):
self.header_images.append([options.get('image_left'),
options.get('image_data_left'),
'LH'])
if options.get('image_center'):
self.header_images.append([options.get('image_center'),
options.get('image_data_center'),
'CH'])
if options.get('image_right'):
self.header_images.append([options.get('image_right'),
options.get('image_data_right'),
'RH'])
placeholder_count = header.count('&G')
image_count = len(self.header_images)
if placeholder_count != image_count:
warn("Number of header images (%s) doesn't match placeholder "
"count (%s) in string: %s"
% (image_count, placeholder_count, header_orig))
self.header_images = []
return
if 'align_with_margins' in options:
self.header_footer_aligns = options['align_with_margins']
if 'scale_with_doc' in options:
self.header_footer_scales = options['scale_with_doc']
self.header = header
self.margin_header = options.get('margin', 0.3)
self.header_footer_changed = True
if image_count:
self.has_header_vml = True
def set_footer(self, footer='', options=None, margin=None):
"""
Set the page footer caption and optional margin.
Args:
footer: Footer string.
margin: Footer margin.
options: Footer options, mainly for images.
Returns:
Nothing.
"""
footer_orig = footer
footer = footer.replace('&[Picture]', '&G')
if len(footer) >= 255:
warn('Footer string must be less than 255 characters')
return
if options is not None:
# For backward compatibility allow options to be the margin.
if not isinstance(options, dict):
options = {'margin': options}
else:
options = {}
# For backward compatibility.
if margin is not None:
options['margin'] = margin
# Reset the list in case the function is called more than once.
self.footer_images = []
if options.get('image_left'):
self.footer_images.append([options.get('image_left'),
options.get('image_data_left'),
'LF'])
if options.get('image_center'):
self.footer_images.append([options.get('image_center'),
options.get('image_data_center'),
'CF'])
if options.get('image_right'):
self.footer_images.append([options.get('image_right'),
options.get('image_data_right'),
'RF'])
placeholder_count = footer.count('&G')
image_count = len(self.footer_images)
if placeholder_count != image_count:
warn("Number of footer images (%s) doesn't match placeholder "
"count (%s) in string: %s"
% (image_count, placeholder_count, footer_orig))
self.footer_images = []
return
if 'align_with_margins' in options:
self.header_footer_aligns = options['align_with_margins']
if 'scale_with_doc' in options:
self.header_footer_scales = options['scale_with_doc']
self.footer = footer
self.margin_footer = options.get('margin', 0.3)
self.header_footer_changed = True
if image_count:
self.has_header_vml = True
def repeat_rows(self, first_row, last_row=None):
"""
Set the rows to repeat at the top of each printed page.
Args:
first_row: Start row for range.
last_row: End row for range.
Returns:
Nothing.
"""
if last_row is None:
last_row = first_row
# Convert rows to 1 based.
first_row += 1
last_row += 1
# Create the row range area like: $1:$2.
area = '$%d:$%d' % (first_row, last_row)
# Build up the print titles area "Sheet1!$1:$2"
sheetname = quote_sheetname(self.name)
self.repeat_row_range = sheetname + '!' + area
@convert_column_args
def repeat_columns(self, first_col, last_col=None):
"""
Set the columns to repeat at the left hand side of each printed page.
Args:
first_col: Start column for range.
last_col: End column for range.
Returns:
Nothing.
"""
if last_col is None:
last_col = first_col
# Convert to A notation.
first_col = xl_col_to_name(first_col, 1)
last_col = xl_col_to_name(last_col, 1)
# Create a column range like $C:$D.
area = first_col + ':' + last_col
# Build up the print area range "=Sheet2!$C:$D"
sheetname = quote_sheetname(self.name)
self.repeat_col_range = sheetname + "!" + area
def hide_gridlines(self, option=1):
"""
Set the option to hide gridlines on the screen and the printed page.
Args:
option: 0 : Don't hide gridlines
1 : Hide printed gridlines only
2 : Hide screen and printed gridlines
Returns:
Nothing.
"""
if option == 0:
self.print_gridlines = 1
self.screen_gridlines = 1
self.print_options_changed = True
elif option == 1:
self.print_gridlines = 0
self.screen_gridlines = 1
else:
self.print_gridlines = 0
self.screen_gridlines = 0
def print_row_col_headers(self):
"""
Set the option to print the row and column headers on the printed page.
Args:
None.
Returns:
Nothing.
"""
self.print_headers = 1
self.print_options_changed = True
@convert_range_args
def print_area(self, first_row, first_col, last_row, last_col):
"""
Set the print area in the current worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Set the print area in the current worksheet.
# Ignore max print area since it is the same as no area for Excel.
if (first_row == 0 and first_col == 0
and last_row == self.xls_rowmax - 1
and last_col == self.xls_colmax - 1):
return
# Build up the print area range "Sheet1!$A$1:$C$13".
area = self._convert_name_area(first_row, first_col,
last_row, last_col)
self.print_area_range = area
def print_across(self):
"""
Set the order in which pages are printed.
Args:
None.
Returns:
Nothing.
"""
self.page_order = 1
self.page_setup_changed = True
def fit_to_pages(self, width, height):
"""
Fit the printed area to a specific number of pages both vertically and
horizontally.
Args:
width: Number of pages horizontally.
height: Number of pages vertically.
Returns:
Nothing.
"""
self.fit_page = 1
self.fit_width = width
self.fit_height = height
self.page_setup_changed = True
def set_start_page(self, start_page):
"""
Set the start page number when printing.
Args:
start_page: Start page number.
Returns:
Nothing.
"""
self.page_start = start_page
def set_print_scale(self, scale):
"""
Set the scale factor for the printed page.
Args:
scale: Print scale. 10 <= scale <= 400.
Returns:
Nothing.
"""
# Confine the scale to Excel's range.
if scale < 10 or scale > 400:
warn("Print scale '%d' outside range: 10 <= scale <= 400" % scale)
return
# Turn off "fit to page" option when print scale is on.
self.fit_page = 0
self.print_scale = int(scale)
self.page_setup_changed = True
def set_h_pagebreaks(self, breaks):
"""
Set the horizontal page breaks on a worksheet.
Args:
breaks: List of rows where the page breaks should be added.
Returns:
Nothing.
"""
self.hbreaks = breaks
def set_v_pagebreaks(self, breaks):
"""
Set the horizontal page breaks on a worksheet.
Args:
breaks: List of columns where the page breaks should be added.
Returns:
Nothing.
"""
self.vbreaks = breaks
def set_vba_name(self, name=None):
"""
Set the VBA name for the worksheet. By default this is the
same as the sheet name: i.e., Sheet1 etc.
Args:
name: The VBA name for the worksheet.
Returns:
Nothing.
"""
if name is not None:
self.vba_codename = name
else:
self.vba_codename = self.name
###########################################################################
#
# Private API.
#
###########################################################################
def _initialize(self, init_data):
self.name = init_data['name']
self.index = init_data['index']
self.str_table = init_data['str_table']
self.worksheet_meta = init_data['worksheet_meta']
self.optimization = init_data['optimization']
self.tmpdir = init_data['tmpdir']
self.date_1904 = init_data['date_1904']
self.strings_to_numbers = init_data['strings_to_numbers']
self.strings_to_formulas = init_data['strings_to_formulas']
self.strings_to_urls = init_data['strings_to_urls']
self.nan_inf_to_errors = init_data['nan_inf_to_errors']
self.default_date_format = init_data['default_date_format']
self.default_url_format = init_data['default_url_format']
self.excel2003_style = init_data['excel2003_style']
if self.excel2003_style:
self.original_row_height = 12.75
self.default_row_height = 12.75
self.default_row_pixels = 17
self.margin_left = 0.75
self.margin_right = 0.75
self.margin_top = 1
self.margin_bottom = 1
self.margin_header = 0.5
self.margin_footer = 0.5
self.header_footer_aligns = False
# Open a temp filehandle to store row data in optimization mode.
if self.optimization == 1:
# This is sub-optimal but we need to create a temp file
# with utf8 encoding in Python < 3.
(fd, filename) = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.row_data_filename = filename
self.row_data_fh = codecs.open(filename, 'w+', 'utf-8')
# Set as the worksheet filehandle until the file is assembled.
self.fh = self.row_data_fh
def _assemble_xml_file(self):
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
# Write the root worksheet element.
self._write_worksheet()
# Write the worksheet properties.
self._write_sheet_pr()
# Write the worksheet dimensions.
self._write_dimension()
# Write the sheet view properties.
self._write_sheet_views()
# Write the sheet format properties.
self._write_sheet_format_pr()
# Write the sheet column info.
self._write_cols()
# Write the worksheet data such as rows columns and cells.
if self.optimization == 0:
self._write_sheet_data()
else:
self._write_optimized_sheet_data()
# Write the sheetProtection element.
self._write_sheet_protection()
# Write the phoneticPr element.
if self.excel2003_style:
self._write_phonetic_pr()
# Write the autoFilter element.
self._write_auto_filter()
# Write the mergeCells element.
self._write_merge_cells()
# Write the conditional formats.
self._write_conditional_formats()
# Write the dataValidations element.
self._write_data_validations()
# Write the hyperlink element.
self._write_hyperlinks()
# Write the printOptions element.
self._write_print_options()
# Write the worksheet page_margins.
self._write_page_margins()
# Write the worksheet page setup.
self._write_page_setup()
# Write the headerFooter element.
self._write_header_footer()
# Write the rowBreaks element.
self._write_row_breaks()
# Write the colBreaks element.
self._write_col_breaks()
# Write the drawing element.
self._write_drawings()
# Write the legacyDrawing element.
self._write_legacy_drawing()
# Write the legacyDrawingHF element.
self._write_legacy_drawing_hf()
# Write the tableParts element.
self._write_table_parts()
# Write the extLst and sparklines.
self._write_ext_sparklines()
# Close the worksheet tag.
self._xml_end_tag('worksheet')
# Close the file.
self._xml_close()
def _check_dimensions(self, row, col, ignore_row=False, ignore_col=False):
# Check that row and col are valid and store the max and min
# values for use in other methods/elements. The ignore_row /
# ignore_col flags is used to indicate that we wish to perform
# the dimension check without storing the value. The ignore
# flags are use by set_row() and data_validate.
# Check that the row/col are within the worksheet bounds.
if row < 0 or col < 0:
return -1
if row >= self.xls_rowmax or col >= self.xls_colmax:
return -1
# In optimization mode we don't change dimensions for rows
# that are already written.
if not ignore_row and not ignore_col and self.optimization == 1:
if row < self.previous_row:
return -2
if not ignore_row:
if self.dim_rowmin is None or row < self.dim_rowmin:
self.dim_rowmin = row
if self.dim_rowmax is None or row > self.dim_rowmax:
self.dim_rowmax = row
if not ignore_col:
if self.dim_colmin is None or col < self.dim_colmin:
self.dim_colmin = col
if self.dim_colmax is None or col > self.dim_colmax:
self.dim_colmax = col
return 0
def _convert_date_time(self, dt_obj):
# Convert a datetime object to an Excel serial date and time.
return datetime_to_excel_datetime(dt_obj, self.date_1904)
def _convert_name_area(self, row_num_1, col_num_1, row_num_2, col_num_2):
# Convert zero indexed rows and columns to the format required by
# worksheet named ranges, eg, "Sheet1!$A$1:$C$13".
range1 = ''
range2 = ''
area = ''
row_col_only = 0
# Convert to A1 notation.
col_char_1 = xl_col_to_name(col_num_1, 1)
col_char_2 = xl_col_to_name(col_num_2, 1)
row_char_1 = '$' + str(row_num_1 + 1)
row_char_2 = '$' + str(row_num_2 + 1)
# We need to handle special cases that refer to rows or columns only.
if row_num_1 == 0 and row_num_2 == self.xls_rowmax - 1:
range1 = col_char_1
range2 = col_char_2
row_col_only = 1
elif col_num_1 == 0 and col_num_2 == self.xls_colmax - 1:
range1 = row_char_1
range2 = row_char_2
row_col_only = 1
else:
range1 = col_char_1 + row_char_1
range2 = col_char_2 + row_char_2
# A repeated range is only written once (if it isn't a special case).
if range1 == range2 and not row_col_only:
area = range1
else:
area = range1 + ':' + range2
# Build up the print area range "Sheet1!$A$1:$C$13".
sheetname = quote_sheetname(self.name)
area = sheetname + "!" + area
return area
def _sort_pagebreaks(self, breaks):
# This is an internal method used to filter elements of a list of
# pagebreaks used in the _store_hbreak() and _store_vbreak() methods.
# It:
# 1. Removes duplicate entries from the list.
# 2. Sorts the list.
# 3. Removes 0 from the list if present.
if not breaks:
return
breaks_set = set(breaks)
if 0 in breaks_set:
breaks_set.remove(0)
breaks_list = list(breaks_set)
breaks_list.sort()
# The Excel 2007 specification says that the maximum number of page
# breaks is 1026. However, in practice it is actually 1023.
max_num_breaks = 1023
if len(breaks_list) > max_num_breaks:
breaks_list = breaks_list[:max_num_breaks]
return breaks_list
def _extract_filter_tokens(self, expression):
# Extract the tokens from the filter expression. The tokens are mainly
# non-whitespace groups. The only tricky part is to extract string
# tokens that contain whitespace and/or quoted double quotes (Excel's
# escaped quotes).
#
# Examples: 'x < 2000'
# 'x > 2000 and x < 5000'
# 'x = "foo"'
# 'x = "foo bar"'
# 'x = "foo "" bar"'
#
if not expression:
return []
token_re = re.compile(r'"(?:[^"]|"")*"|\S+')
tokens = token_re.findall(expression)
new_tokens = []
# Remove single leading and trailing quotes and un-escape other quotes.
for token in tokens:
if token.startswith('"'):
token = token[1:]
if token.endswith('"'):
token = token[:-1]
token = token.replace('""', '"')
new_tokens.append(token)
return new_tokens
def _parse_filter_expression(self, expression, tokens):
# Converts the tokens of a possibly conditional expression into 1 or 2
# sub expressions for further parsing.
#
# Examples:
# ('x', '==', 2000) -> exp1
# ('x', '>', 2000, 'and', 'x', '<', 5000) -> exp1 and exp2
if len(tokens) == 7:
# The number of tokens will be either 3 (for 1 expression)
# or 7 (for 2 expressions).
conditional = tokens[3]
if re.match('(and|&&)', conditional):
conditional = 0
elif re.match('(or|\|\|)', conditional):
conditional = 1
else:
warn("Token '%s' is not a valid conditional "
"in filter expression '%s'" % (conditional, expression))
expression_1 = self._parse_filter_tokens(expression, tokens[0:3])
expression_2 = self._parse_filter_tokens(expression, tokens[4:7])
return expression_1 + [conditional] + expression_2
else:
return self._parse_filter_tokens(expression, tokens)
def _parse_filter_tokens(self, expression, tokens):
# Parse the 3 tokens of a filter expression and return the operator
# and token. The use of numbers instead of operators is a legacy of
# Spreadsheet::WriteExcel.
operators = {
'==': 2,
'=': 2,
'=~': 2,
'eq': 2,
'!=': 5,
'!~': 5,
'ne': 5,
'<>': 5,
'<': 1,
'<=': 3,
'>': 4,
'>=': 6,
}
operator = operators.get(tokens[1], None)
token = tokens[2]
# Special handling of "Top" filter expressions.
if re.match('top|bottom', tokens[0].lower()):
value = int(tokens[1])
if value < 1 or value > 500:
warn("The value '%d' in expression '%s' "
"must be in the range 1 to 500" % (value, expression))
token = token.lower()
if token != 'items' and token != '%':
warn("The type '%s' in expression '%s' "
"must be either 'items' or '%'" % (token, expression))
if tokens[0].lower() == 'top':
operator = 30
else:
operator = 32
if tokens[2] == '%':
operator += 1
token = str(value)
if not operator and tokens[0]:
warn("Token '%s' is not a valid operator "
"in filter expression '%s'" % (token[0], expression))
# Special handling for Blanks/NonBlanks.
if re.match('blanks|nonblanks', token.lower()):
# Only allow Equals or NotEqual in this context.
if operator != 2 and operator != 5:
warn("The operator '%s' in expression '%s' "
"is not valid in relation to Blanks/NonBlanks'"
% (tokens[1], expression))
token = token.lower()
# The operator should always be 2 (=) to flag a "simple" equality
# in the binary record. Therefore we convert <> to =.
if token == 'blanks':
if operator == 5:
token = ' '
else:
if operator == 5:
operator = 2
token = 'blanks'
else:
operator = 5
token = ' '
# if the string token contains an Excel match character then change the
# operator type to indicate a non "simple" equality.
if operator == 2 and re.search('[*?]', token):
operator = 22
return [operator, token]
def _encode_password(self, plaintext):
# Encode the worksheet protection "password" as a simple hash.
# Based on the algorithm by Daniel Rentz of OpenOffice.
i = 0
count = len(plaintext)
digits = []
for char in plaintext:
i += 1
char = ord(char) << i
low_15 = char & 0x7fff
high_15 = char & 0x7fff << 15
high_15 >>= 15
char = low_15 | high_15
digits.append(char)
password_hash = 0x0000
for digit in digits:
password_hash ^= digit
password_hash ^= count
password_hash ^= 0xCE4B
return "%X" % password_hash
def _prepare_image(self, index, image_id, drawing_id, width, height,
name, image_type, x_dpi, y_dpi):
# Set up images/drawings.
drawing_type = 2
(row, col, _, x_offset, y_offset,
x_scale, y_scale, url, tip, anchor, _) = self.images[index]
width *= x_scale
height *= y_scale
# Scale by non 96dpi resolutions.
width *= 96.0 / x_dpi
height *= 96.0 / y_dpi
dimensions = self._position_object_emus(col, row, x_offset, y_offset,
width, height)
# Convert from pixels to emus.
width = int(0.5 + (width * 9525))
height = int(0.5 + (height * 9525))
# Create a Drawing obj to use with worksheet unless one already exists.
if not self.drawing:
drawing = Drawing()
drawing.embedded = 1
self.drawing = drawing
self.external_drawing_links.append(['/drawing',
'../drawings/drawing'
+ str(drawing_id)
+ '.xml', None])
else:
drawing = self.drawing
drawing_object = [drawing_type]
drawing_object.extend(dimensions)
drawing_object.extend([width, height, name, None, url, tip, anchor])
drawing._add_drawing_object(drawing_object)
if url:
rel_type = "/hyperlink"
target_mode = "External"
if re.match('(ftp|http)s?://', url):
target = url
if re.match('external:', url):
target = url.replace('external:', '')
if re.match("internal:", url):
target = url.replace('internal:', '#')
target_mode = None
self.drawing_links.append([rel_type, target, target_mode])
self.drawing_links.append(['/image',
'../media/image'
+ str(image_id) + '.'
+ image_type])
def _prepare_shape(self, index, drawing_id):
# Set up shapes/drawings.
drawing_type = 3
(row, col, x_offset, y_offset,
x_scale, y_scale, text, options) = self.shapes[index]
width = options.get('width', self.default_col_pixels * 3)
height = options.get('height', self.default_row_pixels * 6)
width *= x_scale
height *= y_scale
dimensions = self._position_object_emus(col, row, x_offset, y_offset,
width, height)
# Convert from pixels to emus.
width = int(0.5 + (width * 9525))
height = int(0.5 + (height * 9525))
# Create a Drawing obj to use with worksheet unless one already exists.
if not self.drawing:
drawing = Drawing()
drawing.embedded = 1
self.drawing = drawing
self.external_drawing_links.append(['/drawing',
'../drawings/drawing'
+ str(drawing_id)
+ '.xml', None])
else:
drawing = self.drawing
shape = Shape('rect', 'TextBox', options)
shape.text = text
drawing_object = [drawing_type]
drawing_object.extend(dimensions)
drawing_object.extend([width, height, None, shape, None,
None, None])
drawing._add_drawing_object(drawing_object)
def _prepare_header_image(self, image_id, width, height, name, image_type,
position, x_dpi, y_dpi):
# Set up an image without a drawing object for header/footer images.
# Strip the extension from the filename.
name = re.sub('\..*$', '', name)
self.header_images_list.append([width, height, name, position,
x_dpi, y_dpi])
self.vml_drawing_links.append(['/image',
'../media/image'
+ str(image_id) + '.'
+ image_type])
def _prepare_chart(self, index, chart_id, drawing_id):
# Set up chart/drawings.
drawing_type = 1
(row, col, chart, x_offset, y_offset, x_scale, y_scale) = \
self.charts[index]
chart.id = chart_id - 1
# Use user specified dimensions, if any.
width = int(0.5 + (chart.width * x_scale))
height = int(0.5 + (chart.height * y_scale))
dimensions = self._position_object_emus(col, row, x_offset, y_offset,
width, height)
# Set the chart name for the embedded object if it has been specified.
name = chart.chart_name
# Create a Drawing obj to use with worksheet unless one already exists.
if not self.drawing:
drawing = Drawing()
drawing.embedded = 1
self.drawing = drawing
self.external_drawing_links.append(['/drawing',
'../drawings/drawing'
+ str(drawing_id)
+ '.xml'])
else:
drawing = self.drawing
drawing_object = [drawing_type]
drawing_object.extend(dimensions)
drawing_object.extend([width, height, name, None])
drawing._add_drawing_object(drawing_object)
self.drawing_links.append(['/chart',
'../charts/chart'
+ str(chart_id)
+ '.xml'])
def _position_object_emus(self, col_start, row_start, x1, y1,
width, height):
# Calculate the vertices that define the position of a graphical
# object within the worksheet in EMUs.
#
# The vertices are expressed as English Metric Units (EMUs). There are
# 12,700 EMUs per point. Therefore, 12,700 * 3 /4 = 9,525 EMUs per
# pixel
(col_start, row_start, x1, y1,
col_end, row_end, x2, y2, x_abs, y_abs) = \
self._position_object_pixels(col_start, row_start, x1, y1,
width, height)
# Convert the pixel values to EMUs. See above.
x1 = int(0.5 + 9525 * x1)
y1 = int(0.5 + 9525 * y1)
x2 = int(0.5 + 9525 * x2)
y2 = int(0.5 + 9525 * y2)
x_abs = int(0.5 + 9525 * x_abs)
y_abs = int(0.5 + 9525 * y_abs)
return (col_start, row_start, x1, y1, col_end, row_end, x2, y2,
x_abs, y_abs)
# Calculate the vertices that define the position of a graphical object
# within the worksheet in pixels.
#
# +------------+------------+
# | A | B |
# +-----+------------+------------+
# | |(x1,y1) | |
# | 1 |(A1)._______|______ |
# | | | | |
# | | | | |
# +-----+----| OBJECT |-----+
# | | | | |
# | 2 | |______________. |
# | | | (B2)|
# | | | (x2,y2)|
# +---- +------------+------------+
#
# Example of an object that covers some of the area from cell A1 to B2.
#
# Based on the width and height of the object we need to calculate 8 vars:
#
# col_start, row_start, col_end, row_end, x1, y1, x2, y2.
#
# We also calculate the absolute x and y position of the top left vertex of
# the object. This is required for images.
#
# The width and height of the cells that the object occupies can be
# variable and have to be taken into account.
#
# The values of col_start and row_start are passed in from the calling
# function. The values of col_end and row_end are calculated by
# subtracting the width and height of the object from the width and
# height of the underlying cells.
#
def _position_object_pixels(self, col_start, row_start, x1, y1,
width, height):
# col_start # Col containing upper left corner of object.
# x1 # Distance to left side of object.
#
# row_start # Row containing top left corner of object.
# y1 # Distance to top of object.
#
# col_end # Col containing lower right corner of object.
# x2 # Distance to right side of object.
#
# row_end # Row containing bottom right corner of object.
# y2 # Distance to bottom of object.
#
# width # Width of object frame.
# height # Height of object frame.
#
# x_abs # Absolute distance to left side of object.
# y_abs # Absolute distance to top side of object.
x_abs = 0
y_abs = 0
# Adjust start column for negative offsets.
while x1 < 0 and col_start > 0:
x1 += self._size_col(col_start - 1)
col_start -= 1
# Adjust start row for negative offsets.
while y1 < 0 and row_start > 0:
y1 += self._size_row(row_start - 1)
row_start -= 1
# Ensure that the image isn't shifted off the page at top left.
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
# Calculate the absolute x offset of the top-left vertex.
if self.col_size_changed:
for col_id in range(col_start):
x_abs += self._size_col(col_id)
else:
# Optimization for when the column widths haven't changed.
x_abs += self.default_col_pixels * col_start
x_abs += x1
# Calculate the absolute y offset of the top-left vertex.
if self.row_size_changed:
for row_id in range(row_start):
y_abs += self._size_row(row_id)
else:
# Optimization for when the row heights haven't changed.
y_abs += self.default_row_pixels * row_start
y_abs += y1
# Adjust start column for offsets that are greater than the col width.
while x1 >= self._size_col(col_start):
x1 -= self._size_col(col_start)
col_start += 1
# Adjust start row for offsets that are greater than the row height.
while y1 >= self._size_row(row_start):
y1 -= self._size_row(row_start)
row_start += 1
# Initialize end cell to the same as the start cell.
col_end = col_start
row_end = row_start
width = width + x1
height = height + y1
# Subtract the underlying cell widths to find end cell of the object.
while width >= self._size_col(col_end):
width -= self._size_col(col_end)
col_end += 1
# Subtract the underlying cell heights to find end cell of the object.
while height >= self._size_row(row_end):
height -= self._size_row(row_end)
row_end += 1
# The end vertices are whatever is left from the width and height.
x2 = width
y2 = height
return ([col_start, row_start, x1, y1, col_end, row_end, x2, y2,
x_abs, y_abs])
def _size_col(self, col):
# Convert the width of a cell from user's units to pixels. Excel rounds
# the column width to the nearest pixel. If the width hasn't been set
# by the user we use the default value. If the column is hidden it
# has a value of zero.
max_digit_width = 7 # For Calabri 11.
padding = 5
pixels = 0
# Look up the cell value to see if it has been changed.
if col in self.col_sizes and self.col_sizes[col] is not None:
width = self.col_sizes[col]
# Convert to pixels.
if width == 0:
pixels = 0
elif width < 1:
pixels = int(width * (max_digit_width + padding) + 0.5)
else:
pixels = int(width * max_digit_width + 0.5) + padding
else:
pixels = self.default_col_pixels
return pixels
def _size_row(self, row):
# Convert the height of a cell from user's units to pixels. If the
# height hasn't been set by the user we use the default value. If
# the row is hidden it has a value of zero.
pixels = 0
# Look up the cell value to see if it has been changed
if row in self.row_sizes:
height = self.row_sizes[row]
if height == 0:
pixels = 0
else:
pixels = int(4.0 / 3.0 * height)
else:
pixels = int(4.0 / 3.0 * self.default_row_height)
return pixels
def _comment_params(self, row, col, string, options):
# This method handles the additional optional parameters to
# write_comment() as well as calculating the comment object
# position and vertices.
default_width = 128
default_height = 74
params = {
'author': None,
'color': '#ffffe1',
'start_cell': None,
'start_col': None,
'start_row': None,
'visible': None,
'width': default_width,
'height': default_height,
'x_offset': None,
'x_scale': 1,
'y_offset': None,
'y_scale': 1,
}
# Overwrite the defaults with any user supplied values. Incorrect or
# misspelled parameters are silently ignored.
for key in options.keys():
params[key] = options[key]
# Ensure that a width and height have been set.
if not params['width']:
params['width'] = default_width
if not params['height']:
params['height'] = default_height
# Set the comment background color.
params['color'] = xl_color(params['color']).lower()
# Convert from Excel XML style color to XML html style color.
params['color'] = params['color'].replace('ff', '#', 1)
# Convert a cell reference to a row and column.
if params['start_cell'] is not None:
(start_row, start_col) = xl_cell_to_rowcol(params['start_cell'])
params['start_row'] = start_row
params['start_col'] = start_col
# Set the default start cell and offsets for the comment. These are
# generally fixed in relation to the parent cell. However there are
# some edge cases for cells at the, er, edges.
row_max = self.xls_rowmax
col_max = self.xls_colmax
if params['start_row'] is None:
if row == 0:
params['start_row'] = 0
elif row == row_max - 3:
params['start_row'] = row_max - 7
elif row == row_max - 2:
params['start_row'] = row_max - 6
elif row == row_max - 1:
params['start_row'] = row_max - 5
else:
params['start_row'] = row - 1
if params['y_offset'] is None:
if row == 0:
params['y_offset'] = 2
elif row == row_max - 3:
params['y_offset'] = 16
elif row == row_max - 2:
params['y_offset'] = 16
elif row == row_max - 1:
params['y_offset'] = 14
else:
params['y_offset'] = 10
if params['start_col'] is None:
if col == col_max - 3:
params['start_col'] = col_max - 6
elif col == col_max - 2:
params['start_col'] = col_max - 5
elif col == col_max - 1:
params['start_col'] = col_max - 4
else:
params['start_col'] = col + 1
if params['x_offset'] is None:
if col == col_max - 3:
params['x_offset'] = 49
elif col == col_max - 2:
params['x_offset'] = 49
elif col == col_max - 1:
params['x_offset'] = 49
else:
params['x_offset'] = 15
# Scale the size of the comment box if required.
if params['x_scale']:
params['width'] = params['width'] * params['x_scale']
if params['y_scale']:
params['height'] = params['height'] * params['y_scale']
# Round the dimensions to the nearest pixel.
params['width'] = int(0.5 + params['width'])
params['height'] = int(0.5 + params['height'])
# Calculate the positions of the comment object.
vertices = self._position_object_pixels(
params['start_col'], params['start_row'], params['x_offset'],
params['y_offset'], params['width'], params['height'])
# Add the width and height for VML.
vertices.append(params['width'])
vertices.append(params['height'])
return ([row, col, string, params['author'],
params['visible'], params['color']] + [vertices])
def _button_params(self, row, col, options):
# This method handles the parameters passed to insert_button() as well
# as calculating the comment object position and vertices.
default_height = self.default_row_pixels
default_width = self.default_col_pixels
button_number = 1 + len(self.buttons_list)
button = {'row': row, 'col': col, 'font': {}}
params = {}
# Overwrite the defaults with any user supplied values. Incorrect or
# misspelled parameters are silently ignored.
for key in options.keys():
params[key] = options[key]
# Set the button caption.
caption = params.get('caption')
# Set a default caption if none was specified by user.
if caption is None:
caption = 'Button %d' % button_number
button['font']['caption'] = caption
# Set the macro name.
if params.get('macro'):
button['macro'] = '[0]!' + params['macro']
else:
button['macro'] = '[0]!Button%d_Click' % button_number
# Ensure that a width and height have been set.
params['width'] = params.get('width', default_width)
params['height'] = params.get('height', default_height)
# Set the x/y offsets.
params['x_offset'] = params.get('x_offset', 0)
params['y_offset'] = params.get('y_offset', 0)
# Scale the size of the button if required.
params['width'] = params['width'] * params.get('x_scale', 1)
params['height'] = params['height'] * params.get('y_scale', 1)
# Round the dimensions to the nearest pixel.
params['width'] = int(0.5 + params['width'])
params['height'] = int(0.5 + params['height'])
params['start_row'] = row
params['start_col'] = col
# Calculate the positions of the button object.
vertices = self._position_object_pixels(
params['start_col'], params['start_row'], params['x_offset'],
params['y_offset'], params['width'], params['height'])
# Add the width and height for VML.
vertices.append(params['width'])
vertices.append(params['height'])
button['vertices'] = vertices
return button
def _prepare_vml_objects(self, vml_data_id, vml_shape_id, vml_drawing_id,
comment_id):
comments = []
# Sort the comments into row/column order for easier comparison
# testing and set the external links for comments and buttons.
row_nums = sorted(self.comments.keys())
for row in row_nums:
col_nums = sorted(self.comments[row].keys())
for col in col_nums:
# Set comment visibility if required and not user defined.
if self.comments_visible:
if self.comments[row][col][4] is None:
self.comments[row][col][4] = 1
# Set comment author if not already user defined.
if self.comments[row][col][3] is None:
self.comments[row][col][3] = self.comments_author
comments.append(self.comments[row][col])
self.external_vml_links.append(['/vmlDrawing',
'../drawings/vmlDrawing'
+ str(vml_drawing_id)
+ '.vml'])
if self.has_comments:
self.comments_list = comments
self.external_comment_links.append(['/comments',
'../comments'
+ str(comment_id)
+ '.xml'])
count = len(comments)
start_data_id = vml_data_id
# The VML o:idmap data id contains a comma separated range when there
# is more than one 1024 block of comments, like this: data="1,2".
for i in range(int(count / 1024)):
vml_data_id = '%s,%d' % (vml_data_id, start_data_id + i + 1)
self.vml_data_id = vml_data_id
self.vml_shape_id = vml_shape_id
return count
def _prepare_header_vml_objects(self, vml_header_id, vml_drawing_id):
# Set up external linkage for VML header/footer images.
self.vml_header_id = vml_header_id
self.external_vml_links.append(['/vmlDrawing',
'../drawings/vmlDrawing'
+ str(vml_drawing_id) + '.vml'])
def _prepare_tables(self, table_id, seen):
# Set the table ids for the worksheet tables.
for table in self.tables:
table['id'] = table_id
if table.get('name') is None:
# Set a default name.
table['name'] = 'Table' + str(table_id)
# Check for duplicate table names.
name = table['name'].lower()
if name in seen:
raise Exception("invalid duplicate table name '%s' found." %
table['name'])
else:
seen[name] = True
# Store the link used for the rels file.
self.external_table_links.append(['/table',
'../tables/table'
+ str(table_id)
+ '.xml'])
table_id += 1
def _table_function_to_formula(self, function, col_name):
# Convert a table total function to a worksheet formula.
formula = ''
subtotals = {
'average': 101,
'countNums': 102,
'count': 103,
'max': 104,
'min': 105,
'stdDev': 107,
'sum': 109,
'var': 110,
}
if function in subtotals:
func_num = subtotals[function]
formula = "SUBTOTAL(%s,[%s])" % (func_num, col_name)
else:
warn("Unsupported function '%s' in add_table()" % function)
return formula
def _set_spark_color(self, sparkline, options, user_color):
# Set the sparkline color.
if user_color not in options:
return
sparkline[user_color] = {'rgb': xl_color(options[user_color])}
def _get_range_data(self, row_start, col_start, row_end, col_end):
# Returns a range of data from the worksheet _table to be used in
# chart cached data. Strings are returned as SST ids and decoded
# in the workbook. Return None for data that doesn't exist since
# Excel can chart series with data missing.
if self.optimization:
return ()
data = []
# Iterate through the table data.
for row_num in range(row_start, row_end + 1):
# Store None if row doesn't exist.
if row_num not in self.table:
data.append(None)
continue
for col_num in range(col_start, col_end + 1):
if col_num in self.table[row_num]:
cell = self.table[row_num][col_num]
if type(cell).__name__ == 'Number':
# Return a number with Excel's precision.
data.append("%.15g" % cell.number)
elif type(cell).__name__ == 'String':
# Return a string from it's shared string index.
index = cell.string
string = self.str_table._get_shared_string(index)
data.append(string)
elif (type(cell).__name__ == 'Formula'
or type(cell).__name__ == 'ArrayFormula'):
# Return the formula value.
value = cell.value
if value is None:
value = 0
data.append(value)
elif type(cell).__name__ == 'Blank':
# Return a empty cell.
data.append('')
else:
# Store None if column doesn't exist.
data.append(None)
return data
def _csv_join(self, *items):
# Create a csv string for use with data validation formulas and lists.
# Convert non string types to string.
items = [str(item) if not isinstance(item, str_types) else item
for item in items]
return ','.join(items)
def _escape_url(self, url):
# Don't escape URL if it looks already escaped.
if re.search('%[0-9a-fA-F]{2}', url):
return url
# Can't use url.quote() here because it doesn't match Excel.
url = url.replace('%', '%25')
url = url.replace('"', '%22')
url = url.replace(' ', '%20')
url = url.replace('<', '%3c')
url = url.replace('>', '%3e')
url = url.replace('[', '%5b')
url = url.replace(']', '%5d')
url = url.replace('^', '%5e')
url = url.replace('`', '%60')
url = url.replace('{', '%7b')
url = url.replace('}', '%7d')
return url
###########################################################################
#
# The following font methods are, more or less, duplicated from the
# Styles class. Not the cleanest version of reuse but works for now.
#
###########################################################################
def _write_font(self, xf_format):
# Write the <font> element.
xml_writer = self.rstring
xml_writer._xml_start_tag('rPr')
# Handle the main font properties.
if xf_format.bold:
xml_writer._xml_empty_tag('b')
if xf_format.italic:
xml_writer._xml_empty_tag('i')
if xf_format.font_strikeout:
xml_writer._xml_empty_tag('strike')
if xf_format.font_outline:
xml_writer._xml_empty_tag('outline')
if xf_format.font_shadow:
xml_writer._xml_empty_tag('shadow')
# Handle the underline variants.
if xf_format.underline:
self._write_underline(xf_format.underline)
# Handle super/subscript.
if xf_format.font_script == 1:
self._write_vert_align('superscript')
if xf_format.font_script == 2:
self._write_vert_align('subscript')
# Write the font size
xml_writer._xml_empty_tag('sz', [('val', xf_format.font_size)])
# Handle colors.
if xf_format.theme:
self._write_color('theme', xf_format.theme)
elif xf_format.color_indexed:
self._write_color('indexed', xf_format.color_indexed)
elif xf_format.font_color:
color = self._get_palette_color(xf_format.font_color)
self._write_rstring_color('rgb', color)
else:
self._write_rstring_color('theme', 1)
# Write some other font properties related to font families.
xml_writer._xml_empty_tag('rFont', [('val', xf_format.font_name)])
xml_writer._xml_empty_tag('family', [('val', xf_format.font_family)])
if xf_format.font_name == 'Calibri' and not xf_format.hyperlink:
xml_writer._xml_empty_tag('scheme',
[('val', xf_format.font_scheme)])
xml_writer._xml_end_tag('rPr')
def _write_underline(self, underline):
# Write the underline font element.
attributes = []
# Handle the underline variants.
if underline == 2:
attributes = [('val', 'double')]
elif underline == 33:
attributes = [('val', 'singleAccounting')]
elif underline == 34:
attributes = [('val', 'doubleAccounting')]
self.rstring._xml_empty_tag('u', attributes)
def _write_vert_align(self, val):
# Write the <vertAlign> font sub-element.
attributes = [('val', val)]
self.rstring._xml_empty_tag('vertAlign', attributes)
def _write_rstring_color(self, name, value):
# Write the <color> element.
attributes = [(name, value)]
self.rstring._xml_empty_tag('color', attributes)
def _get_palette_color(self, color):
# Convert the RGB color.
if color[0] == '#':
color = color[1:]
return "FF" + color.upper()
def _isnan(self, x):
# Workaround for lack of math.isnan in Python 2.5/Jython.
return x != x
def _isinf(self, x):
# Workaround for lack of math.isinf in Python 2.5/Jython.
return (x - x) != 0
def _opt_close(self):
# Close the row data filehandle in optimization mode.
if not self.row_data_fh_closed:
self.row_data_fh.close()
self.row_data_fh_closed = True
def _opt_reopen(self):
# Reopen the row data filehandle in optimization mode.
if self.row_data_fh_closed:
filename = self.row_data_filename
self.row_data_fh = codecs.open(filename, 'a+', 'utf-8')
self.row_data_fh_closed = False
self.fh = self.row_data_fh
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_worksheet(self):
# Write the <worksheet> element. This is the root element.
schema = 'http://schemas.openxmlformats.org/'
xmlns = schema + 'spreadsheetml/2006/main'
xmlns_r = schema + 'officeDocument/2006/relationships'
xmlns_mc = schema + 'markup-compatibility/2006'
ms_schema = 'http://schemas.microsoft.com/'
xmlns_x14ac = ms_schema + 'office/spreadsheetml/2009/9/ac'
attributes = [
('xmlns', xmlns),
('xmlns:r', xmlns_r)]
# Add some extra attributes for Excel 2010. Mainly for sparklines.
if self.excel_version == 2010:
attributes.append(('xmlns:mc', xmlns_mc))
attributes.append(('xmlns:x14ac', xmlns_x14ac))
attributes.append(('mc:Ignorable', 'x14ac'))
self._xml_start_tag('worksheet', attributes)
def _write_dimension(self):
# Write the <dimension> element. This specifies the range of
# cells in the worksheet. As a special case, empty
# spreadsheets use 'A1' as a range.
if self.dim_rowmin is None and self.dim_colmin is None:
# If the min dimensions are not defined then no dimensions
# have been set and we use the default 'A1'.
ref = 'A1'
elif self.dim_rowmin is None and self.dim_colmin is not None:
# If the row dimensions aren't set but the column
# dimensions are set then they have been changed via
# set_column().
if self.dim_colmin == self.dim_colmax:
# The dimensions are a single cell and not a range.
ref = xl_rowcol_to_cell(0, self.dim_colmin)
else:
# The dimensions are a cell range.
cell_1 = xl_rowcol_to_cell(0, self.dim_colmin)
cell_2 = xl_rowcol_to_cell(0, self.dim_colmax)
ref = cell_1 + ':' + cell_2
elif (self.dim_rowmin == self.dim_rowmax and
self.dim_colmin == self.dim_colmax):
# The dimensions are a single cell and not a range.
ref = xl_rowcol_to_cell(self.dim_rowmin, self.dim_colmin)
else:
# The dimensions are a cell range.
cell_1 = xl_rowcol_to_cell(self.dim_rowmin, self.dim_colmin)
cell_2 = xl_rowcol_to_cell(self.dim_rowmax, self.dim_colmax)
ref = cell_1 + ':' + cell_2
self._xml_empty_tag('dimension', [('ref', ref)])
def _write_sheet_views(self):
# Write the <sheetViews> element.
self._xml_start_tag('sheetViews')
# Write the sheetView element.
self._write_sheet_view()
self._xml_end_tag('sheetViews')
def _write_sheet_view(self):
# Write the <sheetViews> element.
attributes = []
# Hide screen gridlines if required
if not self.screen_gridlines:
attributes.append(('showGridLines', 0))
# Hide zeroes in cells.
if not self.show_zeros:
attributes.append(('showZeros', 0))
# Display worksheet right to left for Hebrew, Arabic and others.
if self.is_right_to_left:
attributes.append(('rightToLeft', 1))
# Show that the sheet tab is selected.
if self.selected:
attributes.append(('tabSelected', 1))
# Turn outlines off. Also required in the outlinePr element.
if not self.outline_on:
attributes.append(("showOutlineSymbols", 0))
# Set the page view/layout mode if required.
if self.page_view:
attributes.append(('view', 'pageLayout'))
# Set the zoom level.
if self.zoom != 100:
if not self.page_view:
attributes.append(('zoomScale', self.zoom))
if self.zoom_scale_normal:
attributes.append(('zoomScaleNormal', self.zoom))
attributes.append(('workbookViewId', 0))
if self.panes or len(self.selections):
self._xml_start_tag('sheetView', attributes)
self._write_panes()
self._write_selections()
self._xml_end_tag('sheetView')
else:
self._xml_empty_tag('sheetView', attributes)
def _write_sheet_format_pr(self):
# Write the <sheetFormatPr> element.
default_row_height = self.default_row_height
row_level = self.outline_row_level
col_level = self.outline_col_level
attributes = [('defaultRowHeight', default_row_height)]
if self.default_row_height != self.original_row_height:
attributes.append(('customHeight', 1))
if self.default_row_zeroed:
attributes.append(('zeroHeight', 1))
if row_level:
attributes.append(('outlineLevelRow', row_level))
if col_level:
attributes.append(('outlineLevelCol', col_level))
if self.excel_version == 2010:
attributes.append(('x14ac:dyDescent', '0.25'))
self._xml_empty_tag('sheetFormatPr', attributes)
def _write_cols(self):
# Write the <cols> element and <col> sub elements.
# Exit unless some column have been formatted.
if not self.colinfo:
return
self._xml_start_tag('cols')
for col in sorted(self.colinfo.keys()):
self._write_col_info(self.colinfo[col])
self._xml_end_tag('cols')
def _write_col_info(self, col_info):
# Write the <col> element.
(col_min, col_max, width, cell_format,
hidden, level, collapsed) = col_info
custom_width = 1
xf_index = 0
# Get the cell_format index.
if cell_format:
xf_index = cell_format._get_xf_index()
# Set the Excel default column width.
if width is None:
if not hidden:
width = 8.43
custom_width = 0
else:
width = 0
elif width == 8.43:
# Width is defined but same as default.
custom_width = 0
# Convert column width from user units to character width.
if width > 0:
# For Calabri 11.
max_digit_width = 7
padding = 5
if width < 1:
width = int((int(width * (max_digit_width + padding) + 0.5))
/ float(max_digit_width) * 256.0) / 256.0
else:
width = int((int(width * max_digit_width + 0.5) + padding)
/ float(max_digit_width) * 256.0) / 256.0
attributes = [
('min', col_min + 1),
('max', col_max + 1),
('width', "%.15g" % width)]
if xf_index:
attributes.append(('style', xf_index))
if hidden:
attributes.append(('hidden', '1'))
if custom_width:
attributes.append(('customWidth', '1'))
if level:
attributes.append(('outlineLevel', level))
if collapsed:
attributes.append(('collapsed', '1'))
self._xml_empty_tag('col', attributes)
def _write_sheet_data(self):
# Write the <sheetData> element.
if self.dim_rowmin is None:
# If the dimensions aren't defined there is no data to write.
self._xml_empty_tag('sheetData')
else:
self._xml_start_tag('sheetData')
self._write_rows()
self._xml_end_tag('sheetData')
def _write_optimized_sheet_data(self):
# Write the <sheetData> element when the memory optimization is on.
# In this case we read the data stored in the temp file and rewrite
# it to the XML sheet file.
if self.dim_rowmin is None:
# If the dimensions aren't defined then there is no data to write.
self._xml_empty_tag('sheetData')
else:
self._xml_start_tag('sheetData')
# Rewind the filehandle that was used for temp row data.
buff_size = 65536
self.row_data_fh.seek(0)
data = self.row_data_fh.read(buff_size)
while data:
self.fh.write(data)
data = self.row_data_fh.read(buff_size)
self.row_data_fh.close()
os.unlink(self.row_data_filename)
self._xml_end_tag('sheetData')
def _write_page_margins(self):
# Write the <pageMargins> element.
attributes = [
('left', self.margin_left),
('right', self.margin_right),
('top', self.margin_top),
('bottom', self.margin_bottom),
('header', self.margin_header),
('footer', self.margin_footer)]
self._xml_empty_tag('pageMargins', attributes)
def _write_page_setup(self):
# Write the <pageSetup> element.
#
# The following is an example taken from Excel.
#
# <pageSetup
# paperSize="9"
# scale="110"
# fitToWidth="2"
# fitToHeight="2"
# pageOrder="overThenDown"
# orientation="portrait"
# blackAndWhite="1"
# draft="1"
# horizontalDpi="200"
# verticalDpi="200"
# r:id="rId1"
# />
#
attributes = []
# Skip this element if no page setup has changed.
if not self.page_setup_changed:
return
# Set paper size.
if self.paper_size:
attributes.append(('paperSize', self.paper_size))
# Set the print_scale.
if self.print_scale != 100:
attributes.append(('scale', self.print_scale))
# Set the "Fit to page" properties.
if self.fit_page and self.fit_width != 1:
attributes.append(('fitToWidth', self.fit_width))
if self.fit_page and self.fit_height != 1:
attributes.append(('fitToHeight', self.fit_height))
# Set the page print direction.
if self.page_order:
attributes.append(('pageOrder', "overThenDown"))
# Set start page for printing.
if self.page_start > 1:
attributes.append(('firstPageNumber', self.page_start))
# Set page orientation.
if self.orientation:
attributes.append(('orientation', 'portrait'))
else:
attributes.append(('orientation', 'landscape'))
# Set start page for printing.
if self.page_start != 0:
attributes.append(('useFirstPageNumber', '1'))
# Set the DPI. Mainly only for testing.
if self.vertical_dpi:
attributes.append(('verticalDpi', self.vertical_dpi))
if self.horizontal_dpi:
attributes.append(('horizontalDpi', self.horizontal_dpi))
self._xml_empty_tag('pageSetup', attributes)
def _write_print_options(self):
# Write the <printOptions> element.
attributes = []
if not self.print_options_changed:
return
# Set horizontal centering.
if self.hcenter:
attributes.append(('horizontalCentered', 1))
# Set vertical centering.
if self.vcenter:
attributes.append(('verticalCentered', 1))
# Enable row and column headers.
if self.print_headers:
attributes.append(('headings', 1))
# Set printed gridlines.
if self.print_gridlines:
attributes.append(('gridLines', 1))
self._xml_empty_tag('printOptions', attributes)
def _write_header_footer(self):
# Write the <headerFooter> element.
attributes = []
if not self.header_footer_scales:
attributes.append(('scaleWithDoc', 0))
if not self.header_footer_aligns:
attributes.append(('alignWithMargins', 0))
if self.header_footer_changed:
self._xml_start_tag('headerFooter', attributes)
if self.header:
self._write_odd_header()
if self.footer:
self._write_odd_footer()
self._xml_end_tag('headerFooter')
elif self.excel2003_style:
self._xml_empty_tag('headerFooter', attributes)
def _write_odd_header(self):
# Write the <headerFooter> element.
self._xml_data_element('oddHeader', self.header)
def _write_odd_footer(self):
# Write the <headerFooter> element.
self._xml_data_element('oddFooter', self.footer)
def _write_rows(self):
# Write out the worksheet data as a series of rows and cells.
self._calculate_spans()
for row_num in range(self.dim_rowmin, self.dim_rowmax + 1):
if (row_num in self.set_rows or row_num in self.comments
or self.table[row_num]):
# Only process rows with formatting, cell data and/or comments.
span_index = int(row_num / 16)
if span_index in self.row_spans:
span = self.row_spans[span_index]
else:
span = None
if self.table[row_num]:
# Write the cells if the row contains data.
if row_num not in self.set_rows:
self._write_row(row_num, span)
else:
self._write_row(row_num, span, self.set_rows[row_num])
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if col_num in self.table[row_num]:
col_ref = self.table[row_num][col_num]
self._write_cell(row_num, col_num, col_ref)
self._xml_end_tag('row')
elif row_num in self.comments:
# Row with comments in cells.
self._write_empty_row(row_num, span,
self.set_rows[row_num])
else:
# Blank row with attributes only.
self._write_empty_row(row_num, span,
self.set_rows[row_num])
def _write_single_row(self, current_row_num=0):
# Write out the worksheet data as a single row with cells.
# This method is used when memory optimization is on. A single
# row is written and the data table is reset. That way only
# one row of data is kept in memory at any one time. We don't
# write span data in the optimized case since it is optional.
# Set the new previous row as the current row.
row_num = self.previous_row
self.previous_row = current_row_num
if (row_num in self.set_rows or row_num in self.comments
or self.table[row_num]):
# Only process rows with formatting, cell data and/or comments.
# No span data in optimized mode.
span = None
if self.table[row_num]:
# Write the cells if the row contains data.
if row_num not in self.set_rows:
self._write_row(row_num, span)
else:
self._write_row(row_num, span, self.set_rows[row_num])
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if col_num in self.table[row_num]:
col_ref = self.table[row_num][col_num]
self._write_cell(row_num, col_num, col_ref)
self._xml_end_tag('row')
else:
# Row attributes or comments only.
self._write_empty_row(row_num, span, self.set_rows[row_num])
# Reset table.
self.table.clear()
def _calculate_spans(self):
# Calculate the "spans" attribute of the <row> tag. This is an
# XLSX optimization and isn't strictly required. However, it
# makes comparing files easier. The span is the same for each
# block of 16 rows.
spans = {}
span_min = None
span_max = None
for row_num in range(self.dim_rowmin, self.dim_rowmax + 1):
if row_num in self.table:
# Calculate spans for cell data.
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if col_num in self.table[row_num]:
if span_min is None:
span_min = col_num
span_max = col_num
else:
if col_num < span_min:
span_min = col_num
if col_num > span_max:
span_max = col_num
if row_num in self.comments:
# Calculate spans for comments.
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if (row_num in self.comments
and col_num in self.comments[row_num]):
if span_min is None:
span_min = col_num
span_max = col_num
else:
if col_num < span_min:
span_min = col_num
if col_num > span_max:
span_max = col_num
if ((row_num + 1) % 16 == 0) or row_num == self.dim_rowmax:
span_index = int(row_num / 16)
if span_min is not None:
span_min += 1
span_max += 1
spans[span_index] = "%s:%s" % (span_min, span_max)
span_min = None
self.row_spans = spans
def _write_row(self, row, spans, properties=None, empty_row=False):
# Write the <row> element.
xf_index = 0
if properties:
height, cell_format, hidden, level, collapsed = properties
else:
height, cell_format, hidden, level, collapsed = None, None, 0, 0, 0
if height is None:
height = self.default_row_height
attributes = [('r', row + 1)]
# Get the cell_format index.
if cell_format:
xf_index = cell_format._get_xf_index()
# Add row attributes where applicable.
if spans:
attributes.append(('spans', spans))
if xf_index:
attributes.append(('s', xf_index))
if cell_format:
attributes.append(('customFormat', 1))
if height != self.original_row_height:
attributes.append(('ht', height))
if hidden:
attributes.append(('hidden', 1))
if height != self.original_row_height:
attributes.append(('customHeight', 1))
if level:
attributes.append(('outlineLevel', level))
if collapsed:
attributes.append(('collapsed', 1))
if self.excel_version == 2010:
attributes.append(('x14ac:dyDescent', '0.25'))
if empty_row:
self._xml_empty_tag_unencoded('row', attributes)
else:
self._xml_start_tag_unencoded('row', attributes)
def _write_empty_row(self, row, spans, properties=None):
# Write and empty <row> element.
self._write_row(row, spans, properties, empty_row=True)
def _write_cell(self, row, col, cell):
# Write the <cell> element.
# Note. This is the innermost loop so efficiency is important.
error_codes = ['#DIV/0!', '#N/A', '#NAME?', '#NULL!',
'#NUM!', '#REF!', '#VALUE!']
cell_range = xl_rowcol_to_cell_fast(row, col)
attributes = [('r', cell_range)]
if cell.format:
# Add the cell format index.
xf_index = cell.format._get_xf_index()
attributes.append(('s', xf_index))
elif row in self.set_rows and self.set_rows[row][1]:
# Add the row format.
row_xf = self.set_rows[row][1]
attributes.append(('s', row_xf._get_xf_index()))
elif col in self.col_formats:
# Add the column format.
col_xf = self.col_formats[col]
attributes.append(('s', col_xf._get_xf_index()))
# Write the various cell types.
if type(cell).__name__ == 'Number':
# Write a number.
self._xml_number_element(cell.number, attributes)
elif type(cell).__name__ == 'String':
# Write a string.
string = cell.string
if not self.optimization:
# Write a shared string.
self._xml_string_element(string, attributes)
else:
# Write an optimized in-line string.
# Escape control characters. See SharedString.pm for details.
string = re.sub('(_x[0-9a-fA-F]{4}_)', r'_x005F\1', string)
string = re.sub(r'([\x00-\x08\x0B-\x1F])',
lambda match: "_x%04X_" %
ord(match.group(1)), string)
# Write any rich strings without further tags.
if re.search('^<r>', string) and re.search('</r>$', string):
self._xml_rich_inline_string(string, attributes)
else:
# Add attribute to preserve leading or trailing whitespace.
preserve = 0
if re.search('^\s', string) or re.search('\s$', string):
preserve = 1
self._xml_inline_string(string, preserve, attributes)
elif type(cell).__name__ == 'Formula':
# Write a formula. First check the formula value type.
value = cell.value
if type(cell.value) == bool:
attributes.append(('t', 'b'))
if cell.value:
value = 1
else:
value = 0
elif isinstance(cell.value, str_types):
if cell.value in error_codes:
attributes.append(('t', 'e'))
else:
attributes.append(('t', 'str'))
self._xml_formula_element(cell.formula, value, attributes)
elif type(cell).__name__ == 'ArrayFormula':
# Write a array formula.
# First check if the formula value is a string.
try:
float(cell.value)
except ValueError:
attributes.append(('t', 'str'))
# Write an array formula.
self._xml_start_tag('c', attributes)
self._write_cell_array_formula(cell.formula, cell.range)
self._write_cell_value(cell.value)
self._xml_end_tag('c')
elif type(cell).__name__ == 'Blank':
# Write a empty cell.
self._xml_empty_tag('c', attributes)
elif type(cell).__name__ == 'Boolean':
# Write a boolean cell.
attributes.append(('t', 'b'))
self._xml_start_tag('c', attributes)
self._write_cell_value(cell.boolean)
self._xml_end_tag('c')
def _write_cell_value(self, value):
# Write the cell value <v> element.
if value is None:
value = ''
self._xml_data_element('v', value)
def _write_cell_array_formula(self, formula, cell_range):
# Write the cell array formula <f> element.
attributes = [
('t', 'array'),
('ref', cell_range)
]
self._xml_data_element('f', formula, attributes)
def _write_sheet_pr(self):
# Write the <sheetPr> element for Sheet level properties.
attributes = []
if (not self.fit_page
and not self.filter_on
and not self.tab_color
and not self.outline_changed
and not self.vba_codename):
return
if self.vba_codename:
attributes.append(('codeName', self.vba_codename))
if self.filter_on:
attributes.append(('filterMode', 1))
if (self.fit_page
or self.tab_color
or self.outline_changed):
self._xml_start_tag('sheetPr', attributes)
self._write_tab_color()
self._write_outline_pr()
self._write_page_set_up_pr()
self._xml_end_tag('sheetPr')
else:
self._xml_empty_tag('sheetPr', attributes)
def _write_page_set_up_pr(self):
# Write the <pageSetUpPr> element.
if not self.fit_page:
return
attributes = [('fitToPage', 1)]
self._xml_empty_tag('pageSetUpPr', attributes)
def _write_tab_color(self):
# Write the <tabColor> element.
color = self.tab_color
if not color:
return
attributes = [('rgb', color)]
self._xml_empty_tag('tabColor', attributes)
def _write_outline_pr(self):
# Write the <outlinePr> element.
attributes = []
if not self.outline_changed:
return
if self.outline_style:
attributes.append(("applyStyles", 1))
if not self.outline_below:
attributes.append(("summaryBelow", 0))
if not self.outline_right:
attributes.append(("summaryRight", 0))
if not self.outline_on:
attributes.append(("showOutlineSymbols", 0))
self._xml_empty_tag('outlinePr', attributes)
def _write_row_breaks(self):
# Write the <rowBreaks> element.
page_breaks = self._sort_pagebreaks(self.hbreaks)
if not page_breaks:
return
count = len(page_breaks)
attributes = [
('count', count),
('manualBreakCount', count),
]
self._xml_start_tag('rowBreaks', attributes)
for row_num in page_breaks:
self._write_brk(row_num, 16383)
self._xml_end_tag('rowBreaks')
def _write_col_breaks(self):
# Write the <colBreaks> element.
page_breaks = self._sort_pagebreaks(self.vbreaks)
if not page_breaks:
return
count = len(page_breaks)
attributes = [
('count', count),
('manualBreakCount', count),
]
self._xml_start_tag('colBreaks', attributes)
for col_num in page_breaks:
self._write_brk(col_num, 1048575)
self._xml_end_tag('colBreaks')
def _write_brk(self, brk_id, brk_max):
# Write the <brk> element.
attributes = [
('id', brk_id),
('max', brk_max),
('man', 1)]
self._xml_empty_tag('brk', attributes)
def _write_merge_cells(self):
# Write the <mergeCells> element.
merged_cells = self.merge
count = len(merged_cells)
if not count:
return
attributes = [('count', count)]
self._xml_start_tag('mergeCells', attributes)
for merged_range in merged_cells:
# Write the mergeCell element.
self._write_merge_cell(merged_range)
self._xml_end_tag('mergeCells')
def _write_merge_cell(self, merged_range):
# Write the <mergeCell> element.
(row_min, col_min, row_max, col_max) = merged_range
# Convert the merge dimensions to a cell range.
cell_1 = xl_rowcol_to_cell(row_min, col_min)
cell_2 = xl_rowcol_to_cell(row_max, col_max)
ref = cell_1 + ':' + cell_2
attributes = [('ref', ref)]
self._xml_empty_tag('mergeCell', attributes)
def _write_hyperlinks(self):
# Process any stored hyperlinks in row/col order and write the
# <hyperlinks> element. The attributes are different for internal
# and external links.
hlink_refs = []
display = None
# Sort the hyperlinks into row order.
row_nums = sorted(self.hyperlinks.keys())
# Exit if there are no hyperlinks to process.
if not row_nums:
return
# Iterate over the rows.
for row_num in row_nums:
# Sort the hyperlinks into column order.
col_nums = sorted(self.hyperlinks[row_num].keys())
# Iterate over the columns.
for col_num in col_nums:
# Get the link data for this cell.
link = self.hyperlinks[row_num][col_num]
link_type = link["link_type"]
# If the cell isn't a string then we have to add the url as
# the string to display.
if (self.table
and self.table[row_num]
and self.table[row_num][col_num]):
cell = self.table[row_num][col_num]
if type(cell).__name__ != 'String':
display = link["url"]
if link_type == 1:
# External link with rel file relationship.
self.rel_count += 1
hlink_refs.append([link_type,
row_num,
col_num,
self.rel_count,
link["str"],
display,
link["tip"]])
# Links for use by the packager.
self.external_hyper_links.append(['/hyperlink',
link["url"], 'External'])
else:
# Internal link with rel file relationship.
hlink_refs.append([link_type,
row_num,
col_num,
link["url"],
link["str"],
link["tip"]])
# Write the hyperlink elements.
self._xml_start_tag('hyperlinks')
for args in hlink_refs:
link_type = args.pop(0)
if link_type == 1:
self._write_hyperlink_external(*args)
elif link_type == 2:
self._write_hyperlink_internal(*args)
self._xml_end_tag('hyperlinks')
def _write_hyperlink_external(self, row, col, id_num, location=None,
display=None, tooltip=None):
# Write the <hyperlink> element for external links.
ref = xl_rowcol_to_cell(row, col)
r_id = 'rId' + str(id_num)
attributes = [
('ref', ref),
('r:id', r_id)]
if location is not None:
attributes.append(('location', location))
if display is not None:
attributes.append(('display', display))
if tooltip is not None:
attributes.append(('tooltip', tooltip))
self._xml_empty_tag('hyperlink', attributes)
def _write_hyperlink_internal(self, row, col, location=None, display=None,
tooltip=None):
# Write the <hyperlink> element for internal links.
ref = xl_rowcol_to_cell(row, col)
attributes = [
('ref', ref),
('location', location)]
if tooltip is not None:
attributes.append(('tooltip', tooltip))
attributes.append(('display', display))
self._xml_empty_tag('hyperlink', attributes)
def _write_auto_filter(self):
# Write the <autoFilter> element.
if not self.autofilter_ref:
return
attributes = [('ref', self.autofilter_ref)]
if self.filter_on:
# Autofilter defined active filters.
self._xml_start_tag('autoFilter', attributes)
self._write_autofilters()
self._xml_end_tag('autoFilter')
else:
# Autofilter defined without active filters.
self._xml_empty_tag('autoFilter', attributes)
def _write_autofilters(self):
# Function to iterate through the columns that form part of an
# autofilter range and write the appropriate filters.
(col1, col2) = self.filter_range
for col in range(col1, col2 + 1):
# Skip if column doesn't have an active filter.
if col not in self.filter_cols:
continue
# Retrieve the filter tokens and write the autofilter records.
tokens = self.filter_cols[col]
filter_type = self.filter_type[col]
# Filters are relative to first column in the autofilter.
self._write_filter_column(col - col1, filter_type, tokens)
def _write_filter_column(self, col_id, filter_type, filters):
# Write the <filterColumn> element.
attributes = [('colId', col_id)]
self._xml_start_tag('filterColumn', attributes)
if filter_type == 1:
# Type == 1 is the new XLSX style filter.
self._write_filters(filters)
else:
# Type == 0 is the classic "custom" filter.
self._write_custom_filters(filters)
self._xml_end_tag('filterColumn')
def _write_filters(self, filters):
# Write the <filters> element.
if len(filters) == 1 and filters[0] == 'blanks':
# Special case for blank cells only.
self._xml_empty_tag('filters', [('blank', 1)])
else:
# General case.
self._xml_start_tag('filters')
for autofilter in filters:
self._write_filter(autofilter)
self._xml_end_tag('filters')
def _write_filter(self, val):
# Write the <filter> element.
attributes = [('val', val)]
self._xml_empty_tag('filter', attributes)
def _write_custom_filters(self, tokens):
# Write the <customFilters> element.
if len(tokens) == 2:
# One filter expression only.
self._xml_start_tag('customFilters')
self._write_custom_filter(*tokens)
self._xml_end_tag('customFilters')
else:
# Two filter expressions.
attributes = []
# Check if the "join" operand is "and" or "or".
if tokens[2] == 0:
attributes = [('and', 1)]
else:
attributes = [('and', 0)]
# Write the two custom filters.
self._xml_start_tag('customFilters', attributes)
self._write_custom_filter(tokens[0], tokens[1])
self._write_custom_filter(tokens[3], tokens[4])
self._xml_end_tag('customFilters')
def _write_custom_filter(self, operator, val):
# Write the <customFilter> element.
attributes = []
operators = {
1: 'lessThan',
2: 'equal',
3: 'lessThanOrEqual',
4: 'greaterThan',
5: 'notEqual',
6: 'greaterThanOrEqual',
22: 'equal',
}
# Convert the operator from a number to a descriptive string.
if operators[operator] is not None:
operator = operators[operator]
else:
warn("Unknown operator = %s" % operator)
# The 'equal' operator is the default attribute and isn't stored.
if not operator == 'equal':
attributes.append(('operator', operator))
attributes.append(('val', val))
self._xml_empty_tag('customFilter', attributes)
def _write_sheet_protection(self):
# Write the <sheetProtection> element.
attributes = []
if not self.protect_options:
return
options = self.protect_options
if options['password']:
attributes.append(('password', options['password']))
if options['sheet']:
attributes.append(('sheet', 1))
if options['content']:
attributes.append(('content', 1))
if not options['objects']:
attributes.append(('objects', 1))
if not options['scenarios']:
attributes.append(('scenarios', 1))
if options['format_cells']:
attributes.append(('formatCells', 0))
if options['format_columns']:
attributes.append(('formatColumns', 0))
if options['format_rows']:
attributes.append(('formatRows', 0))
if options['insert_columns']:
attributes.append(('insertColumns', 0))
if options['insert_rows']:
attributes.append(('insertRows', 0))
if options['insert_hyperlinks']:
attributes.append(('insertHyperlinks', 0))
if options['delete_columns']:
attributes.append(('deleteColumns', 0))
if options['delete_rows']:
attributes.append(('deleteRows', 0))
if not options['select_locked_cells']:
attributes.append(('selectLockedCells', 1))
if options['sort']:
attributes.append(('sort', 0))
if options['autofilter']:
attributes.append(('autoFilter', 0))
if options['pivot_tables']:
attributes.append(('pivotTables', 0))
if not options['select_unlocked_cells']:
attributes.append(('selectUnlockedCells', 1))
self._xml_empty_tag('sheetProtection', attributes)
def _write_drawings(self):
# Write the <drawing> elements.
if not self.drawing:
return
self.rel_count += 1
self._write_drawing(self.rel_count)
def _write_drawing(self, drawing_id):
# Write the <drawing> element.
r_id = 'rId' + str(drawing_id)
attributes = [('r:id', r_id)]
self._xml_empty_tag('drawing', attributes)
def _write_legacy_drawing(self):
# Write the <legacyDrawing> element.
if not self.has_vml:
return
# Increment the relationship id for any drawings or comments.
self.rel_count += 1
r_id = 'rId' + str(self.rel_count)
attributes = [('r:id', r_id)]
self._xml_empty_tag('legacyDrawing', attributes)
def _write_legacy_drawing_hf(self):
# Write the <legacyDrawingHF> element.
if not self.has_header_vml:
return
# Increment the relationship id for any drawings or comments.
self.rel_count += 1
r_id = 'rId' + str(self.rel_count)
attributes = [('r:id', r_id)]
self._xml_empty_tag('legacyDrawingHF', attributes)
def _write_data_validations(self):
# Write the <dataValidations> element.
validations = self.validations
count = len(validations)
if not count:
return
attributes = [('count', count)]
self._xml_start_tag('dataValidations', attributes)
for validation in validations:
# Write the dataValidation element.
self._write_data_validation(validation)
self._xml_end_tag('dataValidations')
def _write_data_validation(self, options):
# Write the <dataValidation> element.
sqref = ''
attributes = []
# Set the cell range(s) for the data validation.
for cells in options['cells']:
# Add a space between multiple cell ranges.
if sqref != '':
sqref += ' '
(row_first, col_first, row_last, col_last) = cells
# Swap last row/col for first row/col as necessary
if row_first > row_last:
(row_first, row_last) = (row_last, row_first)
if col_first > col_last:
(col_first, col_last) = (col_last, col_first)
# If the first and last cell are the same write a single cell.
if (row_first == row_last) and (col_first == col_last):
sqref += xl_rowcol_to_cell(row_first, col_first)
else:
sqref += xl_range(row_first, col_first, row_last, col_last)
if options['validate'] != 'none':
attributes.append(('type', options['validate']))
if options['criteria'] != 'between':
attributes.append(('operator', options['criteria']))
if 'error_type' in options:
if options['error_type'] == 1:
attributes.append(('errorStyle', 'warning'))
if options['error_type'] == 2:
attributes.append(('errorStyle', 'information'))
if options['ignore_blank']:
attributes.append(('allowBlank', 1))
if not options['dropdown']:
attributes.append(('showDropDown', 1))
if options['show_input']:
attributes.append(('showInputMessage', 1))
if options['show_error']:
attributes.append(('showErrorMessage', 1))
if 'error_title' in options:
attributes.append(('errorTitle', options['error_title']))
if 'error_message' in options:
attributes.append(('error', options['error_message']))
if 'input_title' in options:
attributes.append(('promptTitle', options['input_title']))
if 'input_message' in options:
attributes.append(('prompt', options['input_message']))
attributes.append(('sqref', sqref))
if options['validate'] == 'none':
self._xml_empty_tag('dataValidation', attributes)
else:
self._xml_start_tag('dataValidation', attributes)
# Write the formula1 element.
self._write_formula_1(options['value'])
# Write the formula2 element.
if options['maximum'] is not None:
self._write_formula_2(options['maximum'])
self._xml_end_tag('dataValidation')
def _write_formula_1(self, formula):
# Write the <formula1> element.
if type(formula) is list:
formula = self._csv_join(*formula)
formula = '"%s"' % formula
else:
# Check if the formula is a number.
try:
float(formula)
except ValueError:
# Not a number. Remove the formula '=' sign if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
self._xml_data_element('formula1', formula)
def _write_formula_2(self, formula):
# Write the <formula2> element.
# Check if the formula is a number.
try:
float(formula)
except ValueError:
# Not a number. Remove the formula '=' sign if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
self._xml_data_element('formula2', formula)
def _write_conditional_formats(self):
# Write the Worksheet conditional formats.
ranges = sorted(self.cond_formats.keys())
if not ranges:
return
for cond_range in ranges:
self._write_conditional_formatting(cond_range,
self.cond_formats[cond_range])
def _write_conditional_formatting(self, cond_range, params):
# Write the <conditionalFormatting> element.
attributes = [('sqref', cond_range)]
self._xml_start_tag('conditionalFormatting', attributes)
for param in params:
# Write the cfRule element.
self._write_cf_rule(param)
self._xml_end_tag('conditionalFormatting')
def _write_cf_rule(self, params):
# Write the <cfRule> element.
attributes = [('type', params['type'])]
if 'format' in params and params['format'] is not None:
attributes.append(('dxfId', params['format']))
attributes.append(('priority', params['priority']))
if params['type'] == 'cellIs':
attributes.append(('operator', params['criteria']))
self._xml_start_tag('cfRule', attributes)
if 'minimum' in params and 'maximum' in params:
self._write_formula(params['minimum'])
self._write_formula(params['maximum'])
else:
self._write_formula(params['value'])
self._xml_end_tag('cfRule')
elif params['type'] == 'aboveAverage':
if re.search('below', params['criteria']):
attributes.append(('aboveAverage', 0))
if re.search('equal', params['criteria']):
attributes.append(('equalAverage', 1))
if re.search('[123] std dev', params['criteria']):
match = re.search('([123]) std dev', params['criteria'])
attributes.append(('stdDev', match.group(1)))
self._xml_empty_tag('cfRule', attributes)
elif params['type'] == 'top10':
if 'criteria' in params and params['criteria'] == '%':
attributes.append(('percent', 1))
if 'direction' in params:
attributes.append(('bottom', 1))
rank = params['value'] or 10
attributes.append(('rank', rank))
self._xml_empty_tag('cfRule', attributes)
elif params['type'] == 'duplicateValues':
self._xml_empty_tag('cfRule', attributes)
elif params['type'] == 'uniqueValues':
self._xml_empty_tag('cfRule', attributes)
elif (params['type'] == 'containsText'
or params['type'] == 'notContainsText'
or params['type'] == 'beginsWith'
or params['type'] == 'endsWith'):
attributes.append(('operator', params['criteria']))
attributes.append(('text', params['value']))
self._xml_start_tag('cfRule', attributes)
self._write_formula(params['formula'])
self._xml_end_tag('cfRule')
elif params['type'] == 'timePeriod':
attributes.append(('timePeriod', params['criteria']))
self._xml_start_tag('cfRule', attributes)
self._write_formula(params['formula'])
self._xml_end_tag('cfRule')
elif (params['type'] == 'containsBlanks'
or params['type'] == 'notContainsBlanks'
or params['type'] == 'containsErrors'
or params['type'] == 'notContainsErrors'):
self._xml_start_tag('cfRule', attributes)
self._write_formula(params['formula'])
self._xml_end_tag('cfRule')
elif params['type'] == 'colorScale':
self._xml_start_tag('cfRule', attributes)
self._write_color_scale(params)
self._xml_end_tag('cfRule')
elif params['type'] == 'dataBar':
self._xml_start_tag('cfRule', attributes)
self._write_data_bar(params)
self._xml_end_tag('cfRule')
elif params['type'] == 'expression':
self._xml_start_tag('cfRule', attributes)
self._write_formula(params['criteria'])
self._xml_end_tag('cfRule')
def _write_formula(self, formula):
# Write the <formula> element.
# Check if the formula is a number.
try:
float(formula)
except ValueError:
# Not a number. Remove the formula '=' sign if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
self._xml_data_element('formula', formula)
def _write_color_scale(self, param):
# Write the <colorScale> element.
self._xml_start_tag('colorScale')
self._write_cfvo(param['min_type'], param['min_value'])
if param['mid_type'] is not None:
self._write_cfvo(param['mid_type'], param['mid_value'])
self._write_cfvo(param['max_type'], param['max_value'])
self._write_color('rgb', param['min_color'])
if param['mid_color'] is not None:
self._write_color('rgb', param['mid_color'])
self._write_color('rgb', param['max_color'])
self._xml_end_tag('colorScale')
def _write_data_bar(self, param):
# Write the <dataBar> element.
self._xml_start_tag('dataBar')
self._write_cfvo(param['min_type'], param['min_value'])
self._write_cfvo(param['max_type'], param['max_value'])
self._write_color('rgb', param['bar_color'])
self._xml_end_tag('dataBar')
def _write_cfvo(self, cf_type, val):
# Write the <cfvo> element.
attributes = [('type', cf_type), ('val', val)]
self._xml_empty_tag('cfvo', attributes)
def _write_color(self, name, value):
# Write the <color> element.
attributes = [(name, value)]
self._xml_empty_tag('color', attributes)
def _write_selections(self):
# Write the <selection> elements.
for selection in self.selections:
self._write_selection(*selection)
def _write_selection(self, pane, active_cell, sqref):
# Write the <selection> element.
attributes = []
if pane:
attributes.append(('pane', pane))
if active_cell:
attributes.append(('activeCell', active_cell))
if sqref:
attributes.append(('sqref', sqref))
self._xml_empty_tag('selection', attributes)
def _write_panes(self):
# Write the frozen or split <pane> elements.
panes = self.panes
if not len(panes):
return
if panes[4] == 2:
self._write_split_panes(*panes)
else:
self._write_freeze_panes(*panes)
def _write_freeze_panes(self, row, col, top_row, left_col, pane_type):
# Write the <pane> element for freeze panes.
attributes = []
y_split = row
x_split = col
top_left_cell = xl_rowcol_to_cell(top_row, left_col)
active_pane = ''
state = ''
active_cell = ''
sqref = ''
# Move user cell selection to the panes.
if self.selections:
(_, active_cell, sqref) = self.selections[0]
self.selections = []
# Set the active pane.
if row and col:
active_pane = 'bottomRight'
row_cell = xl_rowcol_to_cell(row, 0)
col_cell = xl_rowcol_to_cell(0, col)
self.selections.append(['topRight', col_cell, col_cell])
self.selections.append(['bottomLeft', row_cell, row_cell])
self.selections.append(['bottomRight', active_cell, sqref])
elif col:
active_pane = 'topRight'
self.selections.append(['topRight', active_cell, sqref])
else:
active_pane = 'bottomLeft'
self.selections.append(['bottomLeft', active_cell, sqref])
# Set the pane type.
if pane_type == 0:
state = 'frozen'
elif pane_type == 1:
state = 'frozenSplit'
else:
state = 'split'
if x_split:
attributes.append(('xSplit', x_split))
if y_split:
attributes.append(('ySplit', y_split))
attributes.append(('topLeftCell', top_left_cell))
attributes.append(('activePane', active_pane))
attributes.append(('state', state))
self._xml_empty_tag('pane', attributes)
def _write_split_panes(self, row, col, top_row, left_col, pane_type):
# Write the <pane> element for split panes.
attributes = []
has_selection = 0
active_pane = ''
active_cell = ''
sqref = ''
y_split = row
x_split = col
# Move user cell selection to the panes.
if self.selections:
(_, active_cell, sqref) = self.selections[0]
self.selections = []
has_selection = 1
# Convert the row and col to 1/20 twip units with padding.
if y_split:
y_split = int(20 * y_split + 300)
if x_split:
x_split = self._calculate_x_split_width(x_split)
# For non-explicit topLeft definitions, estimate the cell offset based
# on the pixels dimensions. This is only a workaround and doesn't take
# adjusted cell dimensions into account.
if top_row == row and left_col == col:
top_row = int(0.5 + (y_split - 300) / 20 / 15)
left_col = int(0.5 + (x_split - 390) / 20 / 3 * 4 / 64)
top_left_cell = xl_rowcol_to_cell(top_row, left_col)
# If there is no selection set the active cell to the top left cell.
if not has_selection:
active_cell = top_left_cell
sqref = top_left_cell
# Set the Cell selections.
if row and col:
active_pane = 'bottomRight'
row_cell = xl_rowcol_to_cell(top_row, 0)
col_cell = xl_rowcol_to_cell(0, left_col)
self.selections.append(['topRight', col_cell, col_cell])
self.selections.append(['bottomLeft', row_cell, row_cell])
self.selections.append(['bottomRight', active_cell, sqref])
elif col:
active_pane = 'topRight'
self.selections.append(['topRight', active_cell, sqref])
else:
active_pane = 'bottomLeft'
self.selections.append(['bottomLeft', active_cell, sqref])
# Format splits to the same precision as Excel.
if x_split:
attributes.append(('xSplit', "%.15g" % x_split))
if y_split:
attributes.append(('ySplit', "%.15g" % y_split))
attributes.append(('topLeftCell', top_left_cell))
if has_selection:
attributes.append(('activePane', active_pane))
self._xml_empty_tag('pane', attributes)
def _calculate_x_split_width(self, width):
# Convert column width from user units to pane split width.
max_digit_width = 7 # For Calabri 11.
padding = 5
# Convert to pixels.
if width < 1:
pixels = int(width * (max_digit_width + padding) + 0.5)
else:
pixels = int(width * max_digit_width + 0.5) + padding
# Convert to points.
points = pixels * 3 / 4
# Convert to twips (twentieths of a point).
twips = points * 20
# Add offset/padding.
width = twips + 390
return width
def _write_table_parts(self):
# Write the <tableParts> element.
tables = self.tables
count = len(tables)
# Return if worksheet doesn't contain any tables.
if not count:
return
attributes = [('count', count,)]
self._xml_start_tag('tableParts', attributes)
for _ in tables:
# Write the tablePart element.
self.rel_count += 1
self._write_table_part(self.rel_count)
self._xml_end_tag('tableParts')
def _write_table_part(self, r_id):
# Write the <tablePart> element.
r_id = 'rId' + str(r_id)
attributes = [('r:id', r_id,)]
self._xml_empty_tag('tablePart', attributes)
def _write_ext_sparklines(self):
# Write the <extLst> element and sparkline sub-elements.
sparklines = self.sparklines
count = len(sparklines)
# Return if worksheet doesn't contain any sparklines.
if not count:
return
# Write the extLst element.
self._xml_start_tag('extLst')
# Write the ext element.
self._write_ext()
# Write the x14:sparklineGroups element.
self._write_sparkline_groups()
# Write the sparkline elements.
for sparkline in reversed(sparklines):
# Write the x14:sparklineGroup element.
self._write_sparkline_group(sparkline)
# Write the x14:colorSeries element.
self._write_color_series(sparkline['series_color'])
# Write the x14:colorNegative element.
self._write_color_negative(sparkline['negative_color'])
# Write the x14:colorAxis element.
self._write_color_axis()
# Write the x14:colorMarkers element.
self._write_color_markers(sparkline['markers_color'])
# Write the x14:colorFirst element.
self._write_color_first(sparkline['first_color'])
# Write the x14:colorLast element.
self._write_color_last(sparkline['last_color'])
# Write the x14:colorHigh element.
self._write_color_high(sparkline['high_color'])
# Write the x14:colorLow element.
self._write_color_low(sparkline['low_color'])
if sparkline['date_axis']:
self._xml_data_element('xm:f', sparkline['date_axis'])
self._write_sparklines(sparkline)
self._xml_end_tag('x14:sparklineGroup')
self._xml_end_tag('x14:sparklineGroups')
self._xml_end_tag('ext')
self._xml_end_tag('extLst')
def _write_sparklines(self, sparkline):
# Write the <x14:sparklines> element and <x14:sparkline> sub-elements.
# Write the sparkline elements.
self._xml_start_tag('x14:sparklines')
for i in range(sparkline['count']):
spark_range = sparkline['ranges'][i]
location = sparkline['locations'][i]
self._xml_start_tag('x14:sparkline')
self._xml_data_element('xm:f', spark_range)
self._xml_data_element('xm:sqref', location)
self._xml_end_tag('x14:sparkline')
self._xml_end_tag('x14:sparklines')
def _write_ext(self):
# Write the <ext> element.
schema = 'http://schemas.microsoft.com/office/'
xmlns_x_14 = schema + 'spreadsheetml/2009/9/main'
uri = '{05C60535-1F16-4fd2-B633-F4F36F0B64E0}'
attributes = [
('xmlns:x14', xmlns_x_14),
('uri', uri),
]
self._xml_start_tag('ext', attributes)
def _write_sparkline_groups(self):
# Write the <x14:sparklineGroups> element.
xmlns_xm = 'http://schemas.microsoft.com/office/excel/2006/main'
attributes = [('xmlns:xm', xmlns_xm)]
self._xml_start_tag('x14:sparklineGroups', attributes)
def _write_sparkline_group(self, options):
# Write the <x14:sparklineGroup> element.
#
# Example for order.
#
# <x14:sparklineGroup
# manualMax="0"
# manualMin="0"
# lineWeight="2.25"
# type="column"
# dateAxis="1"
# displayEmptyCellsAs="span"
# markers="1"
# high="1"
# low="1"
# first="1"
# last="1"
# negative="1"
# displayXAxis="1"
# displayHidden="1"
# minAxisType="custom"
# maxAxisType="custom"
# rightToLeft="1">
#
empty = options.get('empty')
attributes = []
if options.get('max') is not None:
if options['max'] == 'group':
options['cust_max'] = 'group'
else:
attributes.append(('manualMax', options['max']))
options['cust_max'] = 'custom'
if options.get('min') is not None:
if options['min'] == 'group':
options['cust_min'] = 'group'
else:
attributes.append(('manualMin', options['min']))
options['cust_min'] = 'custom'
# Ignore the default type attribute (line).
if options['type'] != 'line':
attributes.append(('type', options['type']))
if options.get('weight'):
attributes.append(('lineWeight', options['weight']))
if options.get('date_axis'):
attributes.append(('dateAxis', 1))
if empty:
attributes.append(('displayEmptyCellsAs', empty))
if options.get('markers'):
attributes.append(('markers', 1))
if options.get('high'):
attributes.append(('high', 1))
if options.get('low'):
attributes.append(('low', 1))
if options.get('first'):
attributes.append(('first', 1))
if options.get('last'):
attributes.append(('last', 1))
if options.get('negative'):
attributes.append(('negative', 1))
if options.get('axis'):
attributes.append(('displayXAxis', 1))
if options.get('hidden'):
attributes.append(('displayHidden', 1))
if options.get('cust_min'):
attributes.append(('minAxisType', options['cust_min']))
if options.get('cust_max'):
attributes.append(('maxAxisType', options['cust_max']))
if options.get('reverse'):
attributes.append(('rightToLeft', 1))
self._xml_start_tag('x14:sparklineGroup', attributes)
def _write_spark_color(self, element, color):
# Helper function for the sparkline color functions below.
attributes = []
if color.get('rgb'):
attributes.append(('rgb', color['rgb']))
if color.get('theme'):
attributes.append(('theme', color['theme']))
if color.get('tint'):
attributes.append(('tint', color['tint']))
self._xml_empty_tag(element, attributes)
def _write_color_series(self, color):
# Write the <x14:colorSeries> element.
self._write_spark_color('x14:colorSeries', color)
def _write_color_negative(self, color):
# Write the <x14:colorNegative> element.
self._write_spark_color('x14:colorNegative', color)
def _write_color_axis(self):
# Write the <x14:colorAxis> element.
self._write_spark_color('x14:colorAxis', {'rgb': 'FF000000'})
def _write_color_markers(self, color):
# Write the <x14:colorMarkers> element.
self._write_spark_color('x14:colorMarkers', color)
def _write_color_first(self, color):
# Write the <x14:colorFirst> element.
self._write_spark_color('x14:colorFirst', color)
def _write_color_last(self, color):
# Write the <x14:colorLast> element.
self._write_spark_color('x14:colorLast', color)
def _write_color_high(self, color):
# Write the <x14:colorHigh> element.
self._write_spark_color('x14:colorHigh', color)
def _write_color_low(self, color):
# Write the <x14:colorLow> element.
self._write_spark_color('x14:colorLow', color)
def _write_phonetic_pr(self):
# Write the <phoneticPr> element.
attributes = [
('fontId', '0'),
('type', 'noConversion'),
]
self._xml_empty_tag('phoneticPr', attributes)
|
{
"content_hash": "b7c652483a75ab574044f757c8aaac68",
"timestamp": "",
"source": "github",
"line_count": 6478,
"max_line_length": 79,
"avg_line_length": 33.67057733868478,
"alnum_prop": 0.5219422514418801,
"repo_name": "jkyeung/XlsxWriter",
"id": "89c0f792ddc064249f158002361064d30677c81b",
"size": "218348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xlsxwriter/worksheet.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7819"
},
{
"name": "Perl",
"bytes": "3504"
},
{
"name": "Python",
"bytes": "2430294"
},
{
"name": "Shell",
"bytes": "6064"
}
],
"symlink_target": ""
}
|
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._dedicated_hosts_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_host_group_request,
build_update_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DedicatedHostsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2021_03_01.aio.ComputeManagementClient`'s
:attr:`dedicated_hosts` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
async def _create_or_update_initial(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: Union[_models.DedicatedHost, IO],
**kwargs: Any
) -> _models.DedicatedHost:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "DedicatedHost")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: _models.DedicatedHost,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.DedicatedHost]:
"""Create or update a dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Create Dedicated Host. Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHost
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.DedicatedHost]:
"""Create or update a dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Create Dedicated Host. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: Union[_models.DedicatedHost, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.DedicatedHost]:
"""Create or update a dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Create Dedicated Host. Is either a model type or
a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHost or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: Union[_models.DedicatedHostUpdate, IO],
**kwargs: Any
) -> _models.DedicatedHost:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "DedicatedHostUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
@overload
async def begin_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: _models.DedicatedHostUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.DedicatedHost]:
"""Update an dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Update Dedicated Host operation. Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHostUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.DedicatedHost]:
"""Update an dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Update Dedicated Host operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: Union[_models.DedicatedHostUpdate, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.DedicatedHost]:
"""Update an dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Update Dedicated Host operation. Is either a
model type or a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHostUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial( # type: ignore
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, host_group_name: str, host_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, host_group_name: str, host_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete a dedicated host.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host. Required.
:type host_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
expand: Optional[Union[str, _models.InstanceViewTypes]] = None,
**kwargs: Any
) -> _models.DedicatedHost:
"""Retrieves information about a dedicated host.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host. Required.
:type host_name: str
:param expand: The expand expression to apply on the operation. 'InstanceView' will retrieve
the list of instance views of the dedicated host. 'UserData' is not supported for dedicated
host. Known values are: "instanceView" and "userData". Default value is None.
:type expand: str or ~azure.mgmt.compute.v2021_03_01.models.InstanceViewTypes
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DedicatedHost or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHost
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost]
request = build_get_request(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
@distributed_trace
def list_by_host_group(
self, resource_group_name: str, host_group_name: str, **kwargs: Any
) -> AsyncIterable["_models.DedicatedHost"]:
"""Lists all of the dedicated hosts in the specified dedicated host group. Use the nextLink
property in the response to get the next page of dedicated hosts.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DedicatedHost or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHostListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_host_group_request(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_host_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DedicatedHostListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_host_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts"} # type: ignore
|
{
"content_hash": "7475867bb91fe5615a19e9dc355503c4",
"timestamp": "",
"source": "github",
"line_count": 787,
"max_line_length": 206,
"avg_line_length": 47.326556543837356,
"alnum_prop": 0.6418675830961714,
"repo_name": "Azure/azure-sdk-for-python",
"id": "dccb98b1fcd4f136d27a771c8713ac4fbe465bb4",
"size": "37746",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_03_01/aio/operations/_dedicated_hosts_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""Unit tests to cover the common module."""
import unittest
import warnings
import fake_filesystem
import fake_tempfile
import mock
import suds
import yaml
import googleads.common
import googleads.errors
class CommonTest(unittest.TestCase):
"""Tests for the googleads.common module."""
# A dictionary with all the required OAuth 2.0 keys
_OAUTH_DICT = {'client_id': 'a', 'client_secret': 'b', 'refresh_token': 'c'}
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem()
self.tempfile = fake_tempfile.FakeTempfileModule(self.filesystem)
self.fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
self.fake_proxy = mock.Mock()
self.fake_proxy.return_value = mock.Mock()
self.fake_proxy.return_value.proxy_host = 'ahost'
self.fake_proxy.return_value.proxy_port = 'aport'
def _CreateYamlFile(self, data, insert_oauth2_key=None):
"""Return the filename of a yaml file created for testing."""
yaml_file = self.tempfile.NamedTemporaryFile(delete=False)
with self.fake_open(yaml_file.name, 'w') as yaml_handle:
for key in data:
if key is insert_oauth2_key:
data[key].update(self._OAUTH_DICT)
yaml_handle.write(yaml.dump({key: data[key]}))
return yaml_file.name
def testLoadFromStorage_missingFile(self):
with mock.patch('googleads.common.open', self.fake_open, create=True):
self.assertRaises(
googleads.errors.GoogleAdsValueError,
googleads.common.LoadFromStorage,
'yaml_filename', 'woo', [], [])
def testLoadFromStorage_missingOAuthKey(self):
yaml_fname = self._CreateYamlFile({'woo': {}})
with mock.patch('googleads.common.open', self.fake_open, create=True):
self.assertRaises(
googleads.errors.GoogleAdsValueError,
googleads.common.LoadFromStorage,
yaml_fname, 'woo', [], [])
def testLoadFromStorage_passesWithNoRequiredKeys(self):
yaml_fname = self._CreateYamlFile({'woo': {}}, 'woo')
with mock.patch('googleads.oauth2.GoogleRefreshTokenClient') as mock_client:
with mock.patch('googleads.common.open', self.fake_open, create=True):
rval = googleads.common.LoadFromStorage(yaml_fname, 'woo', [], [])
mock_client.assert_called_once_with('a', 'b', 'c', None, True, None)
self.assertEqual({'oauth2_client': mock_client.return_value,
'https_proxy': None}, rval)
def testLoadFromStorage_passesWithProxy(self):
yaml_fname = self._CreateYamlFile({'adwords': {},
'proxy_info': {
'host': 'ahost',
'port': 'aport'
}}, 'adwords')
with mock.patch('googleads.oauth2.GoogleRefreshTokenClient') as mock_client:
with mock.patch('googleads.common.open', self.fake_open, create=True):
with mock.patch('httplib2.ProxyInfo', self.fake_proxy):
rval = googleads.common.LoadFromStorage(yaml_fname, 'adwords', [], [])
mock_client.assert_called_once_with('a', 'b', 'c',
self.fake_proxy.return_value,
True, None)
self.assertEqual({'oauth2_client': mock_client.return_value,
'https_proxy': 'ahost:aport'}, rval)
def testLoadFromStorage_failsWithMisconfiguredProxy(self):
yaml_fname = self._CreateYamlFile({'adwords': {},
'proxy_info': {'host': 'ahost'}},
'adwords')
with mock.patch('googleads.oauth2.GoogleRefreshTokenClient'):
with mock.patch('googleads.common.open', self.fake_open, create=True):
with mock.patch('httplib2.ProxyInfo', self.fake_proxy):
self.assertRaises(googleads.errors.GoogleAdsValueError,
googleads.common.LoadFromStorage,
yaml_fname, 'adwords', [], [])
def testLoadFromStorage_missingRequiredKey(self):
with mock.patch('googleads.common.open', self.fake_open, create=True):
# Both keys are missing.
yaml_fname = self._CreateYamlFile({'two': {}}, 'two')
self.assertRaises(
googleads.errors.GoogleAdsValueError,
googleads.common.LoadFromStorage,
yaml_fname, 'two', ['needed', 'keys'], [])
# One key is missing.
yaml_fname = self._CreateYamlFile({'three': {'needed': 'd'}}, 'three')
self.assertRaises(
googleads.errors.GoogleAdsValueError,
googleads.common.LoadFromStorage,
yaml_fname, 'three', ['needed', 'keys'], [])
def testLoadFromStorage(self):
# No optional keys present.
yaml_fname = self._CreateYamlFile({'one': {'needed': 'd', 'keys': 'e'}},
'one')
with mock.patch('googleads.oauth2.GoogleRefreshTokenClient') as mock_client:
with mock.patch('googleads.common.open', self.fake_open, create=True):
rval = googleads.common.LoadFromStorage(
yaml_fname, 'one', ['needed', 'keys'], ['other'])
mock_client.assert_called_once_with('a', 'b', 'c', None, True, None)
self.assertEqual({'oauth2_client': mock_client.return_value,
'needed': 'd', 'keys': 'e',
'https_proxy': None}, rval)
# The optional key is present.
yaml_fname = self._CreateYamlFile({'one': {'needed': 'd', 'keys': 'e',
'other': 'f'}}, 'one')
with mock.patch('googleads.oauth2.GoogleRefreshTokenClient') as mock_client:
with mock.patch('googleads.common.open', self.fake_open, create=True):
rval = googleads.common.LoadFromStorage(
yaml_fname, 'one', ['needed', 'keys'], ['other'])
mock_client.assert_called_once_with('a', 'b', 'c', None, True, None)
self.assertEqual({'oauth2_client': mock_client.return_value,
'needed': 'd', 'keys': 'e', 'other': 'f',
'https_proxy': None}, rval)
def testLoadFromStorage_relativePath(self):
fake_os = fake_filesystem.FakeOsModule(self.filesystem)
yaml_contents = {'one': {'needed': 'd', 'keys': 'e'}}
yaml_contents['one'].update(self._OAUTH_DICT)
self.filesystem.CreateFile('/home/test/yaml/googleads.yaml',
contents=yaml.dump(yaml_contents))
fake_os.chdir('/home/test')
with mock.patch('googleads.oauth2.GoogleRefreshTokenClient') as mock_client:
with mock.patch('googleads.common.os', fake_os):
with mock.patch('googleads.common.open', self.fake_open, create=True):
rval = googleads.common.LoadFromStorage(
'yaml/googleads.yaml', 'one', ['needed', 'keys'], ['other'])
mock_client.assert_called_once_with('a', 'b', 'c', None, True, None)
self.assertEqual({'oauth2_client': mock_client.return_value,
'needed': 'd', 'keys': 'e',
'https_proxy': None}, rval)
def testLoadFromStorage_warningWithUnrecognizedKey(self):
yaml_fname = self._CreateYamlFile(
{'kval': {'Im': 'here', 'whats': 'this?'}}, 'kval')
with mock.patch('googleads.oauth2.GoogleRefreshTokenClient') as mock_client:
with warnings.catch_warnings(record=True) as captured_warnings:
with mock.patch('googleads.common.open', self.fake_open, create=True):
rval = googleads.common.LoadFromStorage(
yaml_fname, 'kval', ['Im'], ['other'])
mock_client.assert_called_once_with('a', 'b', 'c', None, True, None)
self.assertEqual({'oauth2_client': mock_client.return_value,
'Im': 'here', 'https_proxy': None}, rval)
self.assertEqual(len(captured_warnings), 1)
def testGenerateLibSig(self):
my_name = 'Joseph'
self.assertEqual(
' (%s, %s, %s)' % (my_name, googleads.common._COMMON_LIB_SIG,
googleads.common._PYTHON_VERSION),
googleads.common.GenerateLibSig(my_name))
def testPackForSuds(self):
factory = mock.Mock()
# Test that anything other than list, tuple, and dict pass right through.
self.assertEqual('input', googleads.common._PackForSuds('input', factory))
self.assertEqual(set([1]),
googleads.common._PackForSuds(set([1]), factory))
# Test that lists not containing dicts with xsi types return the same
# values, and test that the input list was not modified.
input_list = ['1', set([3]), {'moo': 'cow'}]
self.assertEqual(['1', set([3]), {'moo': 'cow'}],
googleads.common._PackForSuds(input_list, factory))
self.assertEqual(['1', set([3]), {'moo': 'cow'}], input_list)
# Test that dicts without xsi types return the same values, and test that
# the input dict was not modified
input_dict = {'1': 'moo', frozenset([2]): ['val']}
self.assertEqual({'1': 'moo', frozenset([2]): ['val']},
googleads.common._PackForSuds(input_dict, factory))
self.assertEqual({'1': 'moo', frozenset([2]): ['val']}, input_dict)
# Now it gets interesting... Test that a dictionary with xsi type gets
# changed into an object. Test that the input dict is unmodified.
input_dict = {'xsi_type': 'EliteCampaign', 'name': 'Sales', 'id': 123456,
'metadata': {'a': 'b'}}
factory.create.return_value = mock.MagicMock()
factory.create.return_value.__iter__.return_value = iter(
[('id', 0), ('name', 1), ('metadata', 2), ('Campaign.Type', 3),
('status', 4)])
rval = googleads.common._PackForSuds(input_dict, factory)
factory.create.assert_called_once_with('EliteCampaign')
self.assertEqual('Sales', rval.name)
self.assertEqual(123456, rval.id)
self.assertEqual({'a': 'b'}, rval.metadata)
self.assertEqual('EliteCampaign', getattr(rval, 'Campaign.Type'))
self.assertEqual(None, rval.status)
self.assertEqual({'xsi_type': 'EliteCampaign', 'name': 'Sales',
'id': 123456, 'metadata': {'a': 'b'}}, input_dict)
# Test that this all works recursively. Nest dictionaries in dictionaries in
# lists in classes.
factory = mock.Mock()
factory.create.side_effect = [mock.MagicMock(), mock.MagicMock()]
input_list = [{'xsi_type': 'EliteCampaign', 'name': 'Sales', 'id': None,
'metadata': {'xsi_type': 'metadata', 'a': {'b': 'c'}}},
{'i do not have': 'a type'}]
rval = googleads.common._PackForSuds(input_list, factory)
factory.create.assert_any_call('EliteCampaign')
factory.create.assert_any_call('metadata')
self.assertIsInstance(rval, list)
self.assertEqual('Sales', rval[0].name)
self.assertIsInstance(rval[0].id, suds.null)
self.assertEqual({'b': 'c'}, rval[0].metadata.a)
self.assertEqual({'i do not have': 'a type'}, rval[1])
self.assertEqual(
[{'xsi_type': 'EliteCampaign', 'name': 'Sales', 'id': None,
'metadata': {'xsi_type': 'metadata', 'a': {'b': 'c'}}},
{'i do not have': 'a type'}], input_list)
def testPackForSuds_secondNamespace(self):
factory = mock.Mock()
factory.create.side_effect = [suds.TypeNotFound(''), mock.MagicMock()]
input_list = {'xsi_type': 'EliteCampaign', 'name': 'Sales'}
rval = googleads.common._PackForSuds(input_list, factory)
factory.create.assert_any_call('EliteCampaign')
factory.create.assert_any_call('ns0:EliteCampaign')
self.assertEqual('Sales', rval.name)
class SudsServiceProxyTest(unittest.TestCase):
"""Tests for the googleads.common.SudsServiceProxy class."""
def testSudsServiceProxy(self):
header_handler = mock.Mock()
port = mock.Mock()
port.methods = ('SoapMethod',)
services = mock.Mock()
services.ports = [port]
client = mock.Mock()
client.wsdl.services = [services]
suds_service_wrapper = googleads.common.SudsServiceProxy(
client, header_handler)
self.assertEqual(suds_service_wrapper.SoapMethod,
suds_service_wrapper._method_proxies['SoapMethod'])
self.assertEqual(suds_service_wrapper.NotSoapMethod,
client.service.NotSoapMethod)
with mock.patch('googleads.common._PackForSuds') as mock_pack_for_suds:
mock_pack_for_suds.return_value = 'modified_test'
suds_service_wrapper.SoapMethod('test')
mock_pack_for_suds.assert_called_once_with('test', client.factory)
client.service.SoapMethod.assert_called_once_with('modified_test')
header_handler.SetHeaders.assert_called_once_with(client)
class HeaderHandlerTest(unittest.TestCase):
"""Tests for the googleads.common.HeaderHeader class."""
def testSetHeaders(self):
"""For coverage."""
self.assertRaises(
NotImplementedError, googleads.common.HeaderHandler().SetHeaders,
mock.Mock())
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "873f0f42e06ea364fcaf4715a5fff642",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 80,
"avg_line_length": 45.806338028169016,
"alnum_prop": 0.6151894842032439,
"repo_name": "richardfergie/googleads-python-lib",
"id": "06cc0393fe9aed50aa44eb10a21e0ca144786779",
"size": "13627",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/common_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
}
|
"""Provides the web interface for a set of alerts and their graphs."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
from google.appengine.ext import ndb
from dashboard import alerts
from dashboard import chart_handler
from dashboard import update_test_suites
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.models import alert_group
from dashboard.models import anomaly
from dashboard.models import page_state
# This is the max number of alerts to query at once. This is used in cases
# when we may want to query more many more alerts than actually get displayed.
_QUERY_LIMIT = 5000
class GroupReportHandler(chart_handler.ChartHandler):
"""Request handler for requests for group report page."""
def get(self):
"""Renders the UI for the group report page."""
self.RenderStaticHtml('group_report.html')
def post(self):
"""Returns dynamic data for /group_report with some set of alerts.
The set of alerts is determined by the sid, keys, bug ID, AlertGroup ID,
or revision given.
Request parameters:
keys: A comma-separated list of urlsafe Anomaly keys (optional).
bug_id: A bug number on the Chromium issue tracker (optional).
rev: A revision number (optional).
sid: A hash of a group of keys from /short_uri (optional).
group_id: An AlertGroup ID (optional).
Outputs:
JSON for the /group_report page XHR request.
"""
bug_id = self.request.get('bug_id')
rev = self.request.get('rev')
keys = self.request.get('keys')
hash_code = self.request.get('sid')
group_id = self.request.get('group_id')
# sid takes precedence.
if hash_code:
state = ndb.Key(page_state.PageState, hash_code).get()
if state:
keys = json.loads(state.value)
elif keys:
keys = keys.split(',')
try:
alert_list = None
if bug_id:
try:
alert_list, _, _ = anomaly.Anomaly.QueryAsync(
bug_id=bug_id, limit=_QUERY_LIMIT).get_result()
except ValueError:
raise request_handler.InvalidInputError(
'Invalid bug ID "%s".' % bug_id)
elif keys:
alert_list = GetAlertsForKeys(keys)
elif rev:
alert_list = GetAlertsAroundRevision(rev)
elif group_id:
alert_list = GetAlertsForGroupID(group_id)
else:
raise request_handler.InvalidInputError('No anomalies specified.')
alert_dicts = alerts.AnomalyDicts(
[a for a in alert_list if a.key.kind() == 'Anomaly'])
values = {
'alert_list': alert_dicts,
'test_suites': update_test_suites.FetchCachedTestSuites(),
}
if bug_id:
values['bug_id'] = bug_id
if keys:
values['selected_keys'] = keys
self.GetDynamicVariables(values)
self.response.out.write(json.dumps(values))
except request_handler.InvalidInputError as error:
self.response.out.write(json.dumps({'error': str(error)}))
def GetAlertsAroundRevision(rev):
"""Gets the alerts whose revision range includes the given revision.
Args:
rev: A revision number, as a string.
Returns:
list of anomaly.Anomaly
"""
if not _IsInt(rev):
raise request_handler.InvalidInputError('Invalid rev "%s".' % rev)
rev = int(rev)
# We can't make a query that has two inequality filters on two different
# properties (start_revision and end_revision). Therefore we first query
# Anomaly entities based on one of these, then filter the resulting list.
anomaly_query = anomaly.Anomaly.query(anomaly.Anomaly.end_revision >= rev)
anomaly_query = anomaly_query.order(anomaly.Anomaly.end_revision)
anomalies = anomaly_query.fetch(limit=_QUERY_LIMIT)
return [a for a in anomalies if a.start_revision <= rev]
def GetAlertsForKeys(keys):
"""Get alerts for |keys|.
Query for anomalies with overlapping revision. The |keys|
parameter for group_report is a comma-separated list of urlsafe strings
for Keys for Anomaly entities. (Each key corresponds to an alert)
Args:
keys: Comma-separated list of urlsafe strings for Anomaly keys.
Returns:
list of anomaly.Anomaly
"""
urlsafe_keys = keys
try:
keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys]
# Errors that can be thrown here include ProtocolBufferDecodeError
# in google.net.proto.ProtocolBuffer. We want to catch any errors here
# because they're almost certainly urlsafe key decoding errors.
except Exception:
raise request_handler.InvalidInputError('Invalid Anomaly key given.')
requested_anomalies = utils.GetMulti(keys)
for i, anomaly_entity in enumerate(requested_anomalies):
if anomaly_entity is None:
raise request_handler.InvalidInputError(
'No Anomaly found for key %s.' % urlsafe_keys[i])
if not requested_anomalies:
raise request_handler.InvalidInputError('No anomalies found.')
# Just an optimization because we can't fetch anomalies directly based
# on revisions. Apply some filters to reduce unrelated anomalies.
subscriptions = []
for anomaly_entity in requested_anomalies:
subscriptions.extend(anomaly_entity.subscription_names)
subscriptions = list(set(subscriptions))
min_range = utils.MinimumAlertRange(requested_anomalies)
if min_range:
anomalies, _, _ = anomaly.Anomaly.QueryAsync(
subscriptions=subscriptions, limit=_QUERY_LIMIT).get_result()
# Filter out anomalies that have been marked as invalid or ignore.
# Include all anomalies with an overlapping revision range that have
# been associated with a bug, or are not yet triaged.
requested_anomalies_set = set([a.key for a in requested_anomalies])
def _IsValidAlert(a):
if a.key in requested_anomalies_set:
return False
return a.bug_id is None or a.bug_id > 0
anomalies = [a for a in anomalies if _IsValidAlert(a)]
anomalies = _GetOverlaps(anomalies, min_range[0], min_range[1])
anomalies = requested_anomalies + anomalies
else:
anomalies = requested_anomalies
return anomalies
def GetAlertsForGroupID(group_id):
"""Get alerts for AlertGroup.
Args:
group_id: AlertGroup ID
Returns:
list of anomaly.Anomaly
"""
group = alert_group.AlertGroup.GetByID(group_id)
if not group:
raise request_handler.InvalidInputError(
'Invalid AlertGroup ID "%s".' % group_id)
return ndb.get_multi(group.anomalies)
def _IsInt(x):
"""Returns True if the input can be parsed as an int."""
try:
int(x)
return True
except ValueError:
return False
def _GetOverlaps(anomalies, start, end):
"""Gets the minimum range for the list of anomalies.
Args:
anomalies: The list of anomalies.
start: The start revision.
end: The end revision.
Returns:
A list of anomalies.
"""
return [a for a in anomalies
if a.start_revision <= end and a.end_revision >= start]
|
{
"content_hash": "198a8c7e0541e015c4d5a6b51e4f6257",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 78,
"avg_line_length": 32.17972350230415,
"alnum_prop": 0.6932550479736503,
"repo_name": "endlessm/chromium-browser",
"id": "51b9365bc19d408ce1c3bb53e23f4649f39b65df",
"size": "7146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/catapult/dashboard/dashboard/group_report.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import json
import select
from systemd import journal
## JsonReader modifies the standard journal.Reader class
## so that the entries returned by the _Python_ journal.Reader
## can be (trivially) converted into JSON.
##
class JsonReader(journal.Reader):
def _convert_field(self, key, value):
if isinstance(value, (list, tuple)):
return value[0]
else:
return value
#j = journal.Reader() ## read _everything_ (presume running as root)
j = JsonReader()
j.seek_tail() ## advance to the end of the journal.
j.get_next() ## make sure we've (pre)consumed everything.
p = select.poll()
p.register(j, j.get_events())
while True:
p.poll()
for entry in j:
rec = json.dumps(entry, separators=(',',':'))
print(rec)
## loops forever, use ^C to exit.
## Local Variables:
## mode: python
## End:
|
{
"content_hash": "1ea51782723ea304c3e3631044a46bc2",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 69,
"avg_line_length": 23.7027027027027,
"alnum_prop": 0.6339794754846066,
"repo_name": "egustafson/sandbox",
"id": "a8ee4a3751296796fd7e549b257b966492b11d12",
"size": "1096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/journald-systemd/tail_journal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "65426"
},
{
"name": "Assembly",
"bytes": "2103"
},
{
"name": "C",
"bytes": "94748"
},
{
"name": "C++",
"bytes": "52883"
},
{
"name": "Dockerfile",
"bytes": "873"
},
{
"name": "FreeMarker",
"bytes": "195"
},
{
"name": "Game Maker Language",
"bytes": "24204"
},
{
"name": "Go",
"bytes": "128092"
},
{
"name": "Groovy",
"bytes": "584"
},
{
"name": "HTML",
"bytes": "2491"
},
{
"name": "Java",
"bytes": "232698"
},
{
"name": "JavaScript",
"bytes": "278"
},
{
"name": "Lex",
"bytes": "2806"
},
{
"name": "Lua",
"bytes": "809"
},
{
"name": "M4",
"bytes": "1718"
},
{
"name": "Makefile",
"bytes": "22166"
},
{
"name": "Perl",
"bytes": "25945"
},
{
"name": "Python",
"bytes": "131732"
},
{
"name": "Roff",
"bytes": "1455"
},
{
"name": "Ruby",
"bytes": "5870"
},
{
"name": "Scala",
"bytes": "2130"
},
{
"name": "Shell",
"bytes": "7117"
},
{
"name": "Tcl",
"bytes": "4561"
},
{
"name": "TeX",
"bytes": "63201"
},
{
"name": "Yacc",
"bytes": "924"
}
],
"symlink_target": ""
}
|
"""
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
import itertools
from tempfile import mkdtemp
import shutil
import pytest
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from scipy.sparse.csgraph import connected_components
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.tests.test_dist_metrics import METRICS_DEFAULT_PARAMS
from sklearn.utils._testing import assert_almost_equal, create_memmap_backed_data
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster._agglomerative import (
_hc_cut,
_TREE_BUILDERS,
linkage_tree,
_fix_connectivity,
)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics import DistanceMetric
from sklearn.metrics.pairwise import (
PAIRED_DISTANCES,
cosine_distances,
manhattan_distances,
pairwise_distances,
)
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors import kneighbors_graph
from sklearn.cluster._hierarchical_fast import (
average_merge,
max_merge,
mst_linkage_core,
)
from sklearn.utils._fast_dict import IntFloatDict
from sklearn.utils._testing import assert_array_equal
from sklearn.datasets import make_moons, make_circles
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
with pytest.raises(ValueError):
linkage_tree(X, linkage="foo")
with pytest.raises(ValueError):
linkage_tree(X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hierarchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hierarchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = tree_builder(
X.T, connectivity=connectivity
)
n_nodes = 2 * X.shape[1] - 1
assert len(children) + n_leaves == n_nodes
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
with pytest.raises(ValueError):
tree_builder(X.T, connectivity=np.ones((4, 4)))
# Check that fitting with no samples raises an error
with pytest.raises(ValueError):
tree_builder(X.T[:0], connectivity=connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
with pytest.warns(UserWarning):
children, n_nodes, n_leaves, parent = ward_tree(this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert len(children) + n_leaves == n_nodes
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
with pytest.warns(UserWarning):
children, n_nodes, n_leaves, parent = tree_builder(
this_X.T, n_clusters=10
)
n_nodes = 2 * X.shape[1] - 1
assert len(children) + n_leaves == n_nodes
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(
X.T, connectivity=connectivity
)
n_nodes = 2 * X.shape[1] - 1
assert len(children) + n_leaves == n_nodes
def test_zero_cosine_linkage_tree():
# Check that zero vectors in X produce an error when
# 'cosine' affinity is used
X = np.array([[0, 1], [0, 0]])
msg = "Cosine affinity cannot be used when X contains zero vectors"
with pytest.raises(ValueError, match=msg):
linkage_tree(X, affinity="cosine")
@pytest.mark.parametrize("n_clusters, distance_threshold", [(None, 0.5), (10, None)])
@pytest.mark.parametrize("compute_distances", [True, False])
@pytest.mark.parametrize("linkage", ["ward", "complete", "average", "single"])
def test_agglomerative_clustering_distances(
n_clusters, compute_distances, distance_threshold, linkage
):
# Check that when `compute_distances` is True or `distance_threshold` is
# given, the fitted model has an attribute `distances_`.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
clustering = AgglomerativeClustering(
n_clusters=n_clusters,
connectivity=connectivity,
linkage=linkage,
distance_threshold=distance_threshold,
compute_distances=compute_distances,
)
clustering.fit(X)
if compute_distances or (distance_threshold is not None):
assert hasattr(clustering, "distances_")
n_children = clustering.children_.shape[0]
n_nodes = n_children + 1
assert clustering.distances_.shape == (n_nodes - 1,)
else:
assert not hasattr(clustering, "distances_")
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average", "single"):
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage
)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity,
memory=tempdir,
linkage=linkage,
)
clustering.fit(X)
labels = clustering.labels_
assert np.size(np.unique(labels)) == 10
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage
)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_, labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert np.size(np.unique(clustering.labels_)) == 10
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(connectivity.toarray()[:10, :10]),
linkage=linkage,
)
with pytest.raises(ValueError):
clustering.fit(X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
metric="manhattan",
linkage="ward",
)
with pytest.raises(ValueError):
clustering.fit(X)
# Test using another metric than euclidean works with linkage complete
for metric in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
metric=metric,
linkage="complete",
)
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10, connectivity=None, metric=metric, linkage="complete"
)
clustering2.fit(X)
assert_almost_equal(
normalized_mutual_info_score(clustering2.labels_, clustering.labels_), 1
)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage="complete"
)
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity,
metric="precomputed",
linkage="complete",
)
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_agglomerative_clustering_memory_mapped():
"""AgglomerativeClustering must work on mem-mapped dataset.
Non-regression test for issue #19875.
"""
rng = np.random.RandomState(0)
Xmm = create_memmap_backed_data(rng.randn(50, 100))
AgglomerativeClustering(metric="euclidean", linkage="single").fit(Xmm)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert np.size(np.unique(agglo.labels_)) == 5
X_red = agglo.transform(X)
assert X_red.shape[1] == 5
X_full = agglo.inverse_transform(X_red)
assert np.unique(X_full[0]).size == 5
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
with pytest.raises(ValueError):
agglo.fit(X[:0])
def test_single_linkage_clustering():
# Check that we get the correct result in two emblematic cases
moons, moon_labels = make_moons(noise=0.05, random_state=42)
clustering = AgglomerativeClustering(n_clusters=2, linkage="single")
clustering.fit(moons)
assert_almost_equal(
normalized_mutual_info_score(clustering.labels_, moon_labels), 1
)
circles, circle_labels = make_circles(factor=0.5, noise=0.025, random_state=42)
clustering = AgglomerativeClustering(n_clusters=2, linkage="single")
clustering.fit(circles)
assert_almost_equal(
normalized_mutual_info_score(clustering.labels_, circle_labels), 1
)
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert (co_clust[0] == co_clust[1]).all()
def test_sparse_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = 0.1 * rng.normal(size=(n, p))
X -= 4.0 * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(int, copy=False)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](
X, connectivity=connectivity
)
# Sort the order of child nodes per row for consistency
children.sort(axis=1)
assert_array_equal(
children,
children_,
"linkage tree differs from scipy impl for linkage: " + linkage,
)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
with pytest.raises(ValueError):
_hc_cut(n_leaves + 1, children, n_leaves)
# Make sure our custom mst_linkage_core gives
# the same results as scipy's builtin
@pytest.mark.parametrize("seed", range(5))
def test_vector_scikit_single_vs_scipy_single(seed):
n_samples, n_features, n_clusters = 10, 5, 3
rng = np.random.RandomState(seed)
X = 0.1 * rng.normal(size=(n_samples, n_features))
X -= 4.0 * np.arange(n_samples)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method="single")
children_scipy = out[:, :2].astype(int)
children, _, n_leaves, _ = _TREE_BUILDERS["single"](X)
# Sort the order of child nodes per row for consistency
children.sort(axis=1)
assert_array_equal(
children,
children_scipy,
"linkage tree differs from scipy impl for single linkage.",
)
cut = _hc_cut(n_clusters, children, n_leaves)
cut_scipy = _hc_cut(n_clusters, children_scipy, n_leaves)
assess_same_labelling(cut, cut_scipy)
# TODO: Remove filterwarnings in 1.3 when wminkowski is removed
@pytest.mark.filterwarnings("ignore:WMinkowskiDistance:FutureWarning:sklearn")
@pytest.mark.parametrize("metric_param_grid", METRICS_DEFAULT_PARAMS)
def test_mst_linkage_core_memory_mapped(metric_param_grid):
"""The MST-LINKAGE-CORE algorithm must work on mem-mapped dataset.
Non-regression test for issue #19875.
"""
rng = np.random.RandomState(seed=1)
X = rng.normal(size=(20, 4))
Xmm = create_memmap_backed_data(X)
metric, param_grid = metric_param_grid
keys = param_grid.keys()
for vals in itertools.product(*param_grid.values()):
kwargs = dict(zip(keys, vals))
distance_metric = DistanceMetric.get_metric(metric, **kwargs)
mst = mst_linkage_core(X, distance_metric)
mst_mm = mst_linkage_core(Xmm, distance_metric)
np.testing.assert_equal(mst, mst_mm)
def test_identical_points():
# Ensure identical points are handled correctly when using mst with
# a sparse connectivity matrix
X = np.array([[0, 0, 0], [0, 0, 0], [1, 1, 1], [1, 1, 1], [2, 2, 2], [2, 2, 2]])
true_labels = np.array([0, 0, 1, 1, 2, 2])
connectivity = kneighbors_graph(X, n_neighbors=3, include_self=False)
connectivity = 0.5 * (connectivity + connectivity.T)
connectivity, n_components = _fix_connectivity(X, connectivity, "euclidean")
for linkage in ("single", "average", "average", "ward"):
clustering = AgglomerativeClustering(
n_clusters=3, linkage=linkage, connectivity=connectivity
)
clustering.fit(X)
assert_almost_equal(
normalized_mutual_info_score(clustering.labels_, true_labels), 1
)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array(
[
(0.014, 0.120),
(0.014, 0.099),
(0.014, 0.097),
(0.017, 0.153),
(0.017, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.152),
(0.018, 0.149),
(0.018, 0.144),
]
)
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage="ward"
)
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = 0.1 * rng.normal(size=(n, p))
X -= 4.0 * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = 0.1 * rng.normal(size=(n, p))
X -= 4.0 * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity, return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ["average", "complete", "single"]:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage, return_distance=True
)[-1]
unstructured_items = linkage_tree(X, linkage=linkage, return_distance=True)[
-1
]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array(
[
[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355],
]
)
# truth
linkage_X_ward = np.array(
[
[3.0, 4.0, 0.36265956, 2.0],
[1.0, 5.0, 1.77045373, 2.0],
[0.0, 2.0, 2.55760419, 2.0],
[6.0, 8.0, 9.10208346, 4.0],
[7.0, 9.0, 24.7784379, 6.0],
]
)
linkage_X_complete = np.array(
[
[3.0, 4.0, 0.36265956, 2.0],
[1.0, 5.0, 1.77045373, 2.0],
[0.0, 2.0, 2.55760419, 2.0],
[6.0, 8.0, 6.96742194, 4.0],
[7.0, 9.0, 18.77445997, 6.0],
]
)
linkage_X_average = np.array(
[
[3.0, 4.0, 0.36265956, 2.0],
[1.0, 5.0, 1.77045373, 2.0],
[0.0, 2.0, 2.55760419, 2.0],
[6.0, 8.0, 6.55832839, 4.0],
[7.0, 9.0, 15.44089605, 6.0],
]
)
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X, return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ["complete", "average", "single"]
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for linkage, X_truth in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage, return_distance=True
)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage="ward")
with pytest.warns(UserWarning):
w.fit(x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp, copy=False))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50, dtype=np.intp)[::2]
other_values = np.full(50, 0.5)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False)
)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert n_nodes == n_samples - 1
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert n_nodes == n_samples - n_clusters
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert ignore_warnings(linkage_func)(X, connectivity=connectivity)[1] == 5
def test_affinity_passed_to_fix_connectivity():
# Test that the affinity parameter is actually passed to the pairwise
# function
size = 2
rng = np.random.RandomState(0)
X = rng.randn(size, size)
mask = np.array([True, False, False, True])
connectivity = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
class FakeAffinity:
def __init__(self):
self.counter = 0
def increment(self, *args, **kwargs):
self.counter += 1
return self.counter
fa = FakeAffinity()
linkage_tree(X, connectivity=connectivity, affinity=fa.increment)
assert fa.counter == 3
@pytest.mark.parametrize("linkage", ["ward", "complete", "average"])
def test_agglomerative_clustering_with_distance_threshold(linkage):
# Check that we obtain the correct number of clusters with
# agglomerative clustering with distance_threshold.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
# test when distance threshold is set to 10
distance_threshold = 10
for conn in [None, connectivity]:
clustering = AgglomerativeClustering(
n_clusters=None,
distance_threshold=distance_threshold,
connectivity=conn,
linkage=linkage,
)
clustering.fit(X)
clusters_produced = clustering.labels_
num_clusters_produced = len(np.unique(clustering.labels_))
# test if the clusters produced match the point in the linkage tree
# where the distance exceeds the threshold
tree_builder = _TREE_BUILDERS[linkage]
children, n_components, n_leaves, parent, distances = tree_builder(
X, connectivity=conn, n_clusters=None, return_distance=True
)
num_clusters_at_threshold = (
np.count_nonzero(distances >= distance_threshold) + 1
)
# test number of clusters produced
assert num_clusters_at_threshold == num_clusters_produced
# test clusters produced
clusters_at_threshold = _hc_cut(
n_clusters=num_clusters_produced, children=children, n_leaves=n_leaves
)
assert np.array_equiv(clusters_produced, clusters_at_threshold)
def test_small_distance_threshold():
rng = np.random.RandomState(0)
n_samples = 10
X = rng.randint(-300, 300, size=(n_samples, 3))
# this should result in all data in their own clusters, given that
# their pairwise distances are bigger than .1 (which may not be the case
# with a different random seed).
clustering = AgglomerativeClustering(
n_clusters=None, distance_threshold=1.0, linkage="single"
).fit(X)
# check that the pairwise distances are indeed all larger than .1
all_distances = pairwise_distances(X, metric="minkowski", p=2)
np.fill_diagonal(all_distances, np.inf)
assert np.all(all_distances > 0.1)
assert clustering.n_clusters_ == n_samples
def test_cluster_distances_with_distance_threshold():
rng = np.random.RandomState(0)
n_samples = 100
X = rng.randint(-10, 10, size=(n_samples, 3))
# check the distances within the clusters and with other clusters
distance_threshold = 4
clustering = AgglomerativeClustering(
n_clusters=None, distance_threshold=distance_threshold, linkage="single"
).fit(X)
labels = clustering.labels_
D = pairwise_distances(X, metric="minkowski", p=2)
# to avoid taking the 0 diagonal in min()
np.fill_diagonal(D, np.inf)
for label in np.unique(labels):
in_cluster_mask = labels == label
max_in_cluster_distance = (
D[in_cluster_mask][:, in_cluster_mask].min(axis=0).max()
)
min_out_cluster_distance = (
D[in_cluster_mask][:, ~in_cluster_mask].min(axis=0).min()
)
# single data point clusters only have that inf diagonal here
if in_cluster_mask.sum() > 1:
assert max_in_cluster_distance < distance_threshold
assert min_out_cluster_distance >= distance_threshold
@pytest.mark.parametrize("linkage", ["ward", "complete", "average"])
@pytest.mark.parametrize(
("threshold", "y_true"), [(0.5, [1, 0]), (1.0, [1, 0]), (1.5, [0, 0])]
)
def test_agglomerative_clustering_with_distance_threshold_edge_case(
linkage, threshold, y_true
):
# test boundary case of distance_threshold matching the distance
X = [[0], [1]]
clusterer = AgglomerativeClustering(
n_clusters=None, distance_threshold=threshold, linkage=linkage
)
y_pred = clusterer.fit_predict(X)
assert adjusted_rand_score(y_true, y_pred) == 1
def test_dist_threshold_invalid_parameters():
X = [[0], [1]]
with pytest.raises(ValueError, match="Exactly one of "):
AgglomerativeClustering(n_clusters=None, distance_threshold=None).fit(X)
with pytest.raises(ValueError, match="Exactly one of "):
AgglomerativeClustering(n_clusters=2, distance_threshold=1).fit(X)
X = [[0], [1]]
with pytest.raises(ValueError, match="compute_full_tree must be True if"):
AgglomerativeClustering(
n_clusters=None, distance_threshold=1, compute_full_tree=False
).fit(X)
def test_invalid_shape_precomputed_dist_matrix():
# Check that an error is raised when affinity='precomputed'
# and a non square matrix is passed (PR #16257).
rng = np.random.RandomState(0)
X = rng.rand(5, 3)
with pytest.raises(
ValueError,
match=r"Distance matrix should be square, got matrix of shape \(5, 3\)",
):
AgglomerativeClustering(metric="precomputed", linkage="complete").fit(X)
def test_precomputed_connectivity_affinity_with_2_connected_components():
"""Check that connecting components works when connectivity and
affinity are both precomputed and the number of connected components is
greater than 1. Non-regression test for #16151.
"""
connectivity_matrix = np.array(
[
[0, 1, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0],
]
)
# ensure that connectivity_matrix has two connected components
assert connected_components(connectivity_matrix)[0] == 2
rng = np.random.RandomState(0)
X = rng.randn(5, 10)
X_dist = pairwise_distances(X)
clusterer_precomputed = AgglomerativeClustering(
affinity="precomputed", connectivity=connectivity_matrix, linkage="complete"
)
msg = "Completing it to avoid stopping the tree early"
with pytest.warns(UserWarning, match=msg):
clusterer_precomputed.fit(X_dist)
clusterer = AgglomerativeClustering(
connectivity=connectivity_matrix, linkage="complete"
)
with pytest.warns(UserWarning, match=msg):
clusterer.fit(X)
assert_array_equal(clusterer.labels_, clusterer_precomputed.labels_)
assert_array_equal(clusterer.children_, clusterer_precomputed.children_)
# TODO(1.4): Remove
def test_deprecate_affinity():
rng = np.random.RandomState(42)
X = rng.randn(50, 10)
af = AgglomerativeClustering(affinity="euclidean")
msg = (
"Attribute `affinity` was deprecated in version 1.2 and will be removed in 1.4."
" Use `metric` instead"
)
with pytest.warns(FutureWarning, match=msg):
af.fit(X)
with pytest.warns(FutureWarning, match=msg):
af.fit_predict(X)
af = AgglomerativeClustering(metric="euclidean", affinity="euclidean")
msg = "Both `affinity` and `metric` attributes were set. Attribute"
with pytest.raises(ValueError, match=msg):
af.fit(X)
with pytest.raises(ValueError, match=msg):
af.fit_predict(X)
|
{
"content_hash": "2300c98d35eb69934e5558233ed7f5a9",
"timestamp": "",
"source": "github",
"line_count": 913,
"max_line_length": 88,
"avg_line_length": 35.96276013143483,
"alnum_prop": 0.6391240786989096,
"repo_name": "anntzer/scikit-learn",
"id": "b8a478d444873b8211d9e3c9d2a8283fae739321",
"size": "32834",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "sklearn/cluster/tests/test_hierarchical.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "667491"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10429261"
},
{
"name": "Shell",
"bytes": "43325"
}
],
"symlink_target": ""
}
|
'''Utilities for validation system.'''
# cpennello 2014-08-21 From cr.util.
def kooljoin(word, seq):
'''Comma-join sequence of strings, inserting word before the final
sequence element if there are more than two elements. Omit commas and
uses only the word to join the sequence elements if there are fewer
than three elements.
'''
word += ' '
if len(seq) < 3: return (' ' + word).join(seq)
seq = list(seq)
seq[-1] = word + seq[-1]
return ', '.join(seq)
|
{
"content_hash": "3fa61b98b8d49f0317fc4b517e419ad7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 72,
"avg_line_length": 31.733333333333334,
"alnum_prop": 0.6722689075630253,
"repo_name": "crunchyroll/pyvalid",
"id": "3bc7fbdbc49e5a91c29c847238acc0c1fe18b4ca",
"size": "500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "valid/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18572"
}
],
"symlink_target": ""
}
|
from quantum.tests.unit.linuxbridge import test_linuxbridge_plugin
from quantum.tests.unit.openvswitch import test_agent_scheduler
class LbAgentSchedulerTestCase(
test_agent_scheduler.OvsAgentSchedulerTestCase):
plugin_str = test_linuxbridge_plugin.PLUGIN_NAME
class LbL3AgentNotifierTestCase(
test_agent_scheduler.OvsL3AgentNotifierTestCase):
plugin_str = test_linuxbridge_plugin.PLUGIN_NAME
class LbDhcpAgentNotifierTestCase(
test_agent_scheduler.OvsDhcpAgentNotifierTestCase):
plugin_str = test_linuxbridge_plugin.PLUGIN_NAME
|
{
"content_hash": "82bdbfcfe105c426f9a619abfa80ce49",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 66,
"avg_line_length": 32.88235294117647,
"alnum_prop": 0.817531305903399,
"repo_name": "linvictor88/vse-lbaas-driver",
"id": "4506f60688276d9b5bd23ca64918ede70d5b55a6",
"size": "1150",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "quantum/tests/unit/linuxbridge/test_agent_scheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "235"
},
{
"name": "Python",
"bytes": "4400210"
},
{
"name": "Shell",
"bytes": "9109"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_sys_db
short_description: BIG-IP sys db module
description:
- Displays or modifies bigdb database entries.
version_added: "2.4"
author:
- "Eric Jacob (@erjac77)"
options:
name:
description:
- Specifies unique name for the component.
required: true
value:
description:
- Specifies the value to which you want to set the specified database entry.
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Disable SYS DB Setup Utility Wizard
f5bigip_sys_db:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: 'setup.run'
value: 'false'
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_common_f5.base import AnsibleF5Error
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
# reset_to_default=dict(type='bool'),
value=dict(type='str')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
del argument_spec['partition']
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpSysDb(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'read': self._api.tm.sys.dbs.db.load,
'update': self._api.tm.sys.dbs.db.update,
'exists': self._api.tm.sys.dbs.db.exists
}
def _create(self):
raise AnsibleF5Error("%s does not support create" % self.__class__.__name__)
def _delete(self):
raise AnsibleF5Error("%s does not support delete" % self.__class__.__name__)
def flush(self):
result = dict()
has_changed = self._present()
result.update(dict(changed=has_changed))
return result
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpSysDb(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
|
{
"content_hash": "fff5ab0b4df012971286a6ba283da1b7",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 110,
"avg_line_length": 25.46153846153846,
"alnum_prop": 0.629154078549849,
"repo_name": "erjac77/ansible-module-f5bigip",
"id": "3ce22d43c2d2deb0afeb03b4fe119f80b1a622f8",
"size": "3293",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "library/f5bigip_sys_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1183958"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import h5py
import json
import os
import imageio
import sys
import cityscapesscripts.evaluation.instances2dict_with_polygons as cs
import detectron.utils.segms as segms_util
import detectron.utils.boxes as bboxs_util
def parse_args():
parser = argparse.ArgumentParser(description='Convert dataset')
parser.add_argument(
'--dataset', help="cocostuff, cityscapes", default=None, type=str)
parser.add_argument(
'--outdir', help="output dir for json files", default=None, type=str)
parser.add_argument(
'--datadir', help="data dir for annotations to be converted",
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def convert_coco_stuff_mat(data_dir, out_dir):
"""Convert to png and save json with path. This currently only contains
the segmentation labels for objects+stuff in cocostuff - if we need to
combine with other labels from original COCO that will be a TODO."""
sets = ['train', 'val']
categories = []
json_name = 'coco_stuff_%s.json'
ann_dict = {}
for data_set in sets:
file_list = os.path.join(data_dir, '%s.txt')
images = []
with open(file_list % data_set) as f:
for img_id, img_name in enumerate(f):
img_name = img_name.replace('coco', 'COCO').strip('\n')
image = {}
mat_file = os.path.join(
data_dir, 'annotations/%s.mat' % img_name)
data = h5py.File(mat_file, 'r')
labelMap = data.get('S')
if len(categories) == 0:
labelNames = data.get('names')
for idx, n in enumerate(labelNames):
categories.append(
{"id": idx, "name": ''.join(chr(i) for i in data[
n[0]])})
ann_dict['categories'] = categories
imageio.imsave(
os.path.join(data_dir, img_name + '.png'), labelMap)
image['width'] = labelMap.shape[0]
image['height'] = labelMap.shape[1]
image['file_name'] = img_name
image['seg_file_name'] = img_name
image['id'] = img_id
images.append(image)
ann_dict['images'] = images
print("Num images: %s" % len(images))
with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile:
outfile.write(json.dumps(ann_dict))
# for Cityscapes
def getLabelID(self, instID):
if (instID < 1000):
return instID
else:
return int(instID / 1000)
def convert_cityscapes_instance_only(
data_dir, out_dir):
"""Convert from cityscapes format to COCO instance seg format - polygons"""
sets = [
'gtFine_val',
# 'gtFine_train',
# 'gtFine_test',
# 'gtCoarse_train',
# 'gtCoarse_val',
# 'gtCoarse_train_extra'
]
ann_dirs = [
'gtFine_trainvaltest/gtFine/val',
# 'gtFine_trainvaltest/gtFine/train',
# 'gtFine_trainvaltest/gtFine/test',
# 'gtCoarse/train',
# 'gtCoarse/train_extra',
# 'gtCoarse/val'
]
json_name = 'instancesonly_filtered_%s.json'
ends_in = '%s_polygons.json'
img_id = 0
ann_id = 0
cat_id = 1
category_dict = {}
category_instancesonly = [
'person',
'rider',
'car',
'truck',
'bus',
'train',
'motorcycle',
'bicycle',
]
for data_set, ann_dir in zip(sets, ann_dirs):
print('Starting %s' % data_set)
ann_dict = {}
images = []
annotations = []
ann_dir = os.path.join(data_dir, ann_dir)
for root, _, files in os.walk(ann_dir):
for filename in files:
if filename.endswith(ends_in % data_set.split('_')[0]):
if len(images) % 50 == 0:
print("Processed %s images, %s annotations" % (
len(images), len(annotations)))
json_ann = json.load(open(os.path.join(root, filename)))
image = {}
image['id'] = img_id
img_id += 1
image['width'] = json_ann['imgWidth']
image['height'] = json_ann['imgHeight']
image['file_name'] = filename[:-len(
ends_in % data_set.split('_')[0])] + 'leftImg8bit.png'
image['seg_file_name'] = filename[:-len(
ends_in % data_set.split('_')[0])] + \
'%s_instanceIds.png' % data_set.split('_')[0]
images.append(image)
fullname = os.path.join(root, image['seg_file_name'])
objects = cs.instances2dict_with_polygons(
[fullname], verbose=False)[fullname]
for object_cls in objects:
if object_cls not in category_instancesonly:
continue # skip non-instance categories
for obj in objects[object_cls]:
if obj['contours'] == []:
print('Warning: empty contours.')
continue # skip non-instance categories
len_p = [len(p) for p in obj['contours']]
if min(len_p) <= 4:
print('Warning: invalid contours.')
continue # skip non-instance categories
ann = {}
ann['id'] = ann_id
ann_id += 1
ann['image_id'] = image['id']
ann['segmentation'] = obj['contours']
if object_cls not in category_dict:
category_dict[object_cls] = cat_id
cat_id += 1
ann['category_id'] = category_dict[object_cls]
ann['iscrowd'] = 0
ann['area'] = obj['pixelCount']
ann['bbox'] = bboxs_util.xyxy_to_xywh(
segms_util.polys_to_boxes(
[ann['segmentation']])).tolist()[0]
annotations.append(ann)
ann_dict['images'] = images
categories = [{"id": category_dict[name], "name": name} for name in
category_dict]
ann_dict['categories'] = categories
ann_dict['annotations'] = annotations
print("Num categories: %s" % len(categories))
print("Num images: %s" % len(images))
print("Num annotations: %s" % len(annotations))
with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile:
outfile.write(json.dumps(ann_dict))
if __name__ == '__main__':
args = parse_args()
if args.dataset == "cityscapes_instance_only":
convert_cityscapes_instance_only(args.datadir, args.outdir)
elif args.dataset == "cocostuff":
convert_coco_stuff_mat(args.datadir, args.outdir)
else:
print("Dataset not supported: %s" % args.dataset)
|
{
"content_hash": "51f7d0400d4ec25cbe8242ea11894b21",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 80,
"avg_line_length": 37.58910891089109,
"alnum_prop": 0.49861714737258,
"repo_name": "facebookresearch/Detectron",
"id": "3583eca1aef66f37dbc9621ff006cde5885233f2",
"size": "8287",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/convert_cityscapes_to_coco.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "2781"
},
{
"name": "CMake",
"bytes": "34081"
},
{
"name": "Cuda",
"bytes": "1685"
},
{
"name": "Dockerfile",
"bytes": "742"
},
{
"name": "MATLAB",
"bytes": "1821"
},
{
"name": "Makefile",
"bytes": "487"
},
{
"name": "Python",
"bytes": "684254"
}
],
"symlink_target": ""
}
|
from itertools import chain
import xadmin
from django import forms
from django.db.models import ManyToManyField
from django.forms.utils import flatatt
from django.template import loader
from django.utils.encoding import force_text
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from xadmin.util import vendor, DJANGO_11
from xadmin.views import BaseAdminPlugin, ModelFormAdminView
class SelectMultipleTransfer(forms.SelectMultiple):
@property
def media(self):
return vendor('xadmin.widget.select-transfer.js', 'xadmin.widget.select-transfer.css')
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(SelectMultipleTransfer, self).__init__(attrs, choices)
def render_opt(self, selected_choices, option_value, option_label):
option_value = force_text(option_value)
return u'<option value="%s">%s</option>' % (
escape(option_value), conditional_escape(force_text(option_label))), bool(option_value in selected_choices)
def render(self, name, value, attrs=None, choices=()):
if attrs is None:
attrs = {}
attrs['class'] = ''
if self.is_stacked:
attrs['class'] += 'stacked'
if value is None:
value = []
if DJANGO_11:
final_attrs = self.build_attrs(attrs, extra_attrs={'name': name})
else:
final_attrs = self.build_attrs(attrs, name=name)
selected_choices = set(force_text(v) for v in value)
available_output = []
chosen_output = []
for option_value, option_label in chain(self.choices, choices):
if isinstance(option_label, (list, tuple)):
available_output.append(u'<optgroup label="%s">' %
escape(force_text(option_value)))
for option in option_label:
output, selected = self.render_opt(
selected_choices, *option)
if selected:
chosen_output.append(output)
else:
available_output.append(output)
available_output.append(u'</optgroup>')
else:
output, selected = self.render_opt(
selected_choices, option_value, option_label)
if selected:
chosen_output.append(output)
else:
available_output.append(output)
context = {
'verbose_name': self.verbose_name,
'attrs': attrs,
'field_id': attrs['id'],
'flatatts': flatatt(final_attrs),
'available_options': u'\n'.join(available_output),
'chosen_options': u'\n'.join(chosen_output),
}
return mark_safe(loader.render_to_string('xadmin/forms/transfer.html', context))
class SelectMultipleDropdown(forms.SelectMultiple):
@property
def media(self):
return vendor('multiselect.js', 'multiselect.css', 'xadmin.widget.multiselect.js')
def render(self, name, value, attrs=None, choices=()):
if attrs is None:
attrs = {}
attrs['class'] = 'selectmultiple selectdropdown'
return super(SelectMultipleDropdown, self).render(name, value, attrs, choices)
class M2MSelectPlugin(BaseAdminPlugin):
def init_request(self, *args, **kwargs):
return hasattr(self.admin_view, 'style_fields') and \
(
'm2m_transfer' in self.admin_view.style_fields.values() or
'm2m_dropdown' in self.admin_view.style_fields.values()
)
def get_field_style(self, attrs, db_field, style, **kwargs):
if style == 'm2m_transfer' and isinstance(db_field, ManyToManyField):
return {'widget': SelectMultipleTransfer(db_field.verbose_name, False), 'help_text': ''}
if style == 'm2m_dropdown' and isinstance(db_field, ManyToManyField):
return {'widget': SelectMultipleDropdown, 'help_text': ''}
return attrs
xadmin.site.register_plugin(M2MSelectPlugin, ModelFormAdminView)
|
{
"content_hash": "7a2473b044c3843ca018b5eca5119e32",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 119,
"avg_line_length": 39.03669724770642,
"alnum_prop": 0.6072855464159812,
"repo_name": "LennonChin/Django-Practices",
"id": "38e0c917da19e774344e5265b4132751658cb62c",
"size": "4270",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "MxShop/extra_apps/xadmin/plugins/multiselect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "513444"
},
{
"name": "HTML",
"bytes": "501361"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "1810740"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "1739514"
}
],
"symlink_target": ""
}
|
import cherrypy
from girder.models.setting import Setting
from girder.settings import SettingKey
from pytest_girder.assertions import assertStatusOk
from pytest_girder.utils import getResponseBody
from girder.utility.webroot import WebrootBase
def testEscapeJavascript():
# Don't escape alphanumeric characters
alphaNumString = 'abcxyz0189ABCXYZ'
assert WebrootBase._escapeJavascript(alphaNumString) == alphaNumString
# Do escape everything else
dangerString = 'ab\'"<;>\\YZ'
assert WebrootBase._escapeJavascript(dangerString) == \
'ab\\u0027\\u0022\\u003C\\u003B\\u003E\\u005CYZ'
def testAccessWebRoot(server):
"""
Requests the webroot and tests the existence of several
elements in the returned html
"""
# Check webroot default settings
defaultEmailAddress = Setting().getDefault(SettingKey.CONTACT_EMAIL_ADDRESS)
defaultBrandName = Setting().getDefault(SettingKey.BRAND_NAME)
resp = server.request(path='/', method='GET', isJson=False, prefix='')
assertStatusOk(resp)
body = getResponseBody(resp)
assert WebrootBase._escapeJavascript(defaultEmailAddress) in body
assert '<title>%s</title>' % defaultBrandName in body
assert 'girder_app.min.js' in body
assert 'girder_lib.min.js' in body
# Change webroot settings
Setting().set(SettingKey.CONTACT_EMAIL_ADDRESS, 'foo@bar.com')
Setting().set(SettingKey.BRAND_NAME, 'FooBar')
resp = server.request(path='/', method='GET', isJson=False, prefix='')
assertStatusOk(resp)
body = getResponseBody(resp)
assert WebrootBase._escapeJavascript('foo@bar.com') in body
assert '<title>FooBar</title>' in body
# Remove webroot settings
Setting().unset(SettingKey.CONTACT_EMAIL_ADDRESS)
Setting().unset(SettingKey.BRAND_NAME)
resp = server.request(path='/', method='GET', isJson=False, prefix='')
assertStatusOk(resp)
body = getResponseBody(resp)
assert WebrootBase._escapeJavascript(defaultEmailAddress) in body
assert '<title>%s</title>' % defaultBrandName in body
def testWebRootProperlyHandlesCustomStaticPublicPath(server):
cherrypy.config['server']['static_public_path'] = 'http://my-cdn-url.com/static'
resp = server.request(path='/', method='GET', isJson=False, prefix='')
assertStatusOk(resp)
body = getResponseBody(resp)
assert 'href="http://my-cdn-url.com/static/built/Girder_Favicon.png"' in body
# Same assertion should hold true for Swagger
resp = server.request(path='/', method='GET', isJson=False)
assertStatusOk(resp)
body = getResponseBody(resp)
assert 'href="http://my-cdn-url.com/static/built/Girder_Favicon.png"' in body
cherrypy.config['server']['static_public_path'] = '/static'
def testWebRootTemplateFilename():
"""
Test WebrootBase.templateFilename attribute after initialization
and after setting a custom template path.
"""
webroot = WebrootBase(templatePath='/girder/base_template.mako')
assert webroot.templateFilename == 'base_template.mako'
webroot.setTemplatePath('/plugin/custom_template.mako')
assert webroot.templateFilename == 'custom_template.mako'
|
{
"content_hash": "63548012fda91e381f0928b76eae2b2d",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 84,
"avg_line_length": 37.294117647058826,
"alnum_prop": 0.7233438485804417,
"repo_name": "jbeezley/girder",
"id": "3cd0af908b0a70a7bdd8a9017ed38bc5aad8761c",
"size": "3194",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/test_webroot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "27843"
},
{
"name": "CSS",
"bytes": "54063"
},
{
"name": "Dockerfile",
"bytes": "2025"
},
{
"name": "HCL",
"bytes": "1424"
},
{
"name": "HTML",
"bytes": "136378"
},
{
"name": "JavaScript",
"bytes": "1121709"
},
{
"name": "Mako",
"bytes": "7571"
},
{
"name": "Python",
"bytes": "1986658"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Shell",
"bytes": "2205"
}
],
"symlink_target": ""
}
|
import logging
from django.conf import settings
from django_rq import job
from webhooks.senders.base import Senderable
from ..models import WebhookTarget
# For use with custom user models, this lets you define the owner field on a model
WEBHOOK_OWNER_FIELD = getattr(settings, "WEBHOOK_OWNER_FIELD", "username")
# List the attempts as an iterable of integers.
# Each number represents the amount of time to be slept between attempts
# The first number should always be 0 so no time is wasted.
WEBHOOK_ATTEMPTS = getattr(settings, "WEBHOOK_EVENTS", (0, 15, 30, 60))
logger = logging.getLogger(__name__)
class DjangoRQSenderable(Senderable):
def notify(self, message):
logger.info(message)
@job
def worker(wrapped, dkwargs, hash_value=None, *args, **kwargs):
"""
This is an asynchronous sender callable that uses the Django ORM to store
webhooks. Redis is used to handle the message queue.
dkwargs argument requires the following key/values:
:event: A string representing an event.
kwargs argument requires the following key/values
:owner: The user who created/owns the event
"""
if "event" not in dkwargs:
msg = "djwebhooks.decorators.redis_hook requires an 'event' argument in the decorator."
raise TypeError(msg)
event = dkwargs['event']
if "owner" not in kwargs:
msg = "djwebhooks.senders.redis_callable requires an 'owner' argument in the decorated function."
raise TypeError(msg)
owner = kwargs['owner']
if "identifier" not in kwargs:
msg = "djwebhooks.senders.orm_callable requires an 'identifier' argument in the decorated function."
raise TypeError(msg)
identifier = kwargs['identifier']
senderobj = DjangoRQSenderable(
wrapped, dkwargs, hash_value, WEBHOOK_ATTEMPTS, *args, **kwargs
)
# Add the webhook object just so it's around
# TODO - error handling if this can't be found
senderobj.webhook_target = WebhookTarget.objects.get(
event=event,
owner=owner,
identifier=identifier
)
# Get the target url and add it
senderobj.url = senderobj.webhook_target.target_url
# Get the payload. This overides the senderobj.payload property.
senderobj.payload = senderobj.get_payload()
# Get the creator and add it to the payload.
senderobj.payload['owner'] = getattr(kwargs['owner'], WEBHOOK_OWNER_FIELD)
# get the event and add it to the payload
senderobj.payload['event'] = dkwargs['event']
return senderobj.send()
def redisq_callable(wrapped, dkwargs, hash_value=None, *args, **kwargs):
logger.debug("Starting async")
job = worker(wrapped, dkwargs, hash_value, *args, **kwargs)
logger.debug("Ending async")
return job
|
{
"content_hash": "2793c364630659956f3443222912753e",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 108,
"avg_line_length": 31.96590909090909,
"alnum_prop": 0.6910771418414504,
"repo_name": "pydanny/dj-webhooks",
"id": "15823ee1a010ed0be306a3d5c9b893280dd4a6c7",
"size": "2837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djwebhooks/senders/redisq.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "661"
},
{
"name": "Makefile",
"bytes": "1331"
},
{
"name": "Python",
"bytes": "43824"
}
],
"symlink_target": ""
}
|
'''
@author Fabio Zadrozny
'''
import sys
import os
try:
import __builtin__ #@UnusedImport
BUILTIN_MOD = '__builtin__'
except ImportError:
BUILTIN_MOD = 'builtins'
if sys.platform.find('java') == -1:
HAS_WX = False
import unittest
try:
import _pydev_imports_tipper
except:
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import _pydev_imports_tipper
import inspect
class Test(unittest.TestCase):
def p(self, t):
for a in t:
sys.stdout.write('%s\n' % (a,))
def testImports3(self):
tip = _pydev_imports_tipper.GenerateTip('os')
ret = self.assertIn('path', tip)
self.assertEquals('', ret[2])
def testImports2(self):
try:
tip = _pydev_imports_tipper.GenerateTip('OpenGL.GLUT')
self.assertIn('glutDisplayFunc', tip)
self.assertIn('glutInitDisplayMode', tip)
except ImportError:
pass
def testImports4(self):
try:
tip = _pydev_imports_tipper.GenerateTip('mx.DateTime.mxDateTime.mxDateTime')
self.assertIn('now', tip)
except ImportError:
pass
def testImports5(self):
tip = _pydev_imports_tipper.GenerateTip('%s.list' % BUILTIN_MOD)
s = self.assertIn('sort', tip)
self.CheckArgs(
s,
'(cmp=None, key=None, reverse=False)',
'(self, object cmp, object key, bool reverse)',
'(self, cmp: object, key: object, reverse: bool)',
'(key=None, reverse=False)',
)
def testImports2a(self):
tips = _pydev_imports_tipper.GenerateTip('%s.RuntimeError' % BUILTIN_MOD)
self.assertIn('__doc__', tips)
def testImports2b(self):
try:
file
except:
pass
else:
tips = _pydev_imports_tipper.GenerateTip('%s' % BUILTIN_MOD)
t = self.assertIn('file' , tips)
self.assert_('->' in t[1].strip() or 'file' in t[1])
def testImports2c(self):
try:
file # file is not available on py 3
except:
pass
else:
tips = _pydev_imports_tipper.GenerateTip('%s.file' % BUILTIN_MOD)
t = self.assertIn('readlines' , tips)
self.assert_('->' in t[1] or 'sizehint' in t[1])
def testImports(self):
'''
You can print_ the results to check...
'''
if HAS_WX:
tip = _pydev_imports_tipper.GenerateTip('wxPython.wx')
self.assertIn('wxApp' , tip)
tip = _pydev_imports_tipper.GenerateTip('wxPython.wx.wxApp')
try:
tip = _pydev_imports_tipper.GenerateTip('qt')
self.assertIn('QWidget' , tip)
self.assertIn('QDialog' , tip)
tip = _pydev_imports_tipper.GenerateTip('qt.QWidget')
self.assertIn('rect' , tip)
self.assertIn('rect' , tip)
self.assertIn('AltButton' , tip)
tip = _pydev_imports_tipper.GenerateTip('qt.QWidget.AltButton')
self.assertIn('__xor__' , tip)
tip = _pydev_imports_tipper.GenerateTip('qt.QWidget.AltButton.__xor__')
self.assertIn('__class__' , tip)
except ImportError:
pass
tip = _pydev_imports_tipper.GenerateTip(BUILTIN_MOD)
# for t in tip[1]:
# print_ t
self.assertIn('object' , tip)
self.assertIn('tuple' , tip)
self.assertIn('list' , tip)
self.assertIn('RuntimeError' , tip)
self.assertIn('RuntimeWarning' , tip)
# Remove cmp as it's not available on py 3
#t = self.assertIn('cmp' , tip)
#self.CheckArgs(t, '(x, y)', '(object x, object y)', '(x: object, y: object)') #args
t = self.assertIn('isinstance' , tip)
self.CheckArgs(t, '(object, class_or_type_or_tuple)', '(object o, type typeinfo)', '(o: object, typeinfo: type)') #args
t = self.assertIn('compile' , tip)
self.CheckArgs(t, '(source, filename, mode)', '()', '(o: object, name: str, val: object)') #args
t = self.assertIn('setattr' , tip)
self.CheckArgs(t, '(object, name, value)', '(object o, str name, object val)', '(o: object, name: str, val: object)') #args
try:
import compiler
compiler_module = 'compiler'
except ImportError:
try:
import ast
compiler_module = 'ast'
except ImportError:
compiler_module = None
if compiler_module is not None: #Not available in iron python
tip = _pydev_imports_tipper.GenerateTip(compiler_module)
if compiler_module == 'compiler':
self.assertArgs('parse', '(buf, mode)', tip)
self.assertArgs('walk', '(tree, visitor, walker, verbose)', tip)
self.assertIn('parseFile' , tip)
else:
self.assertArgs('parse', '(source, filename, mode)', tip)
self.assertArgs('walk', '(node)', tip)
self.assertIn('parse' , tip)
def CheckArgs(self, t, *expected):
for x in expected:
if x == t[2]:
return
self.fail('Found: %s. Expected: %s' % (t[2], expected))
def assertArgs(self, tok, args, tips):
for a in tips[1]:
if tok == a[0]:
self.assertEquals(args, a[2])
return
raise AssertionError('%s not in %s', tok, tips)
def assertIn(self, tok, tips):
for a in tips[1]:
if tok == a[0]:
return a
raise AssertionError('%s not in %s' % (tok, tips))
def testSearch(self):
s = _pydev_imports_tipper.Search('inspect.ismodule')
(f, line, col), foundAs = s
self.assert_(line > 0)
def testDotNetLibraries(self):
if sys.platform == 'cli':
tip = _pydev_imports_tipper.GenerateTip('System.Drawing')
self.assertIn('Brushes' , tip)
tip = _pydev_imports_tipper.GenerateTip('System.Drawing.Brushes')
self.assertIn('Aqua' , tip)
def testInspect(self):
class C(object):
def metA(self, a, b):
pass
obj = C.metA
if inspect.ismethod (obj):
pass
# print_ obj.im_func
# print_ inspect.getargspec(obj.im_func)
def suite():
s = unittest.TestSuite()
s.addTest(Test("testImports5"))
unittest.TextTestRunner(verbosity=2).run(s)
if __name__ == '__main__':
if sys.platform.find('java') == -1:
# suite()
unittest.main()
else:
sys.stdout.write('Not running python tests in platform: %s\n' % (sys.platform,))
|
{
"content_hash": "4e3c85d5377219c7d83cfe2ed4a20a6c",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 135,
"avg_line_length": 34.222727272727276,
"alnum_prop": 0.48638597423296587,
"repo_name": "liangazhou/django-rdp",
"id": "830cb49dfec15c0efa8cd1e537ad259ed04898f5",
"size": "7529",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "packages/PyDev/plugins/org.python.pydev_4.4.0.201510052309/pysrc/tests/test_simpleTipper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "22310"
},
{
"name": "CSS",
"bytes": "5463444"
},
{
"name": "CoffeeScript",
"bytes": "83631"
},
{
"name": "Groff",
"bytes": "450"
},
{
"name": "HTML",
"bytes": "439341404"
},
{
"name": "JavaScript",
"bytes": "19561573"
},
{
"name": "PHP",
"bytes": "94083"
},
{
"name": "Perl",
"bytes": "9844"
},
{
"name": "Python",
"bytes": "8069"
},
{
"name": "Shell",
"bytes": "11480"
},
{
"name": "XSLT",
"bytes": "224454"
}
],
"symlink_target": ""
}
|
import yaml
import json
import requests
import sys
import webbrowser
from fastscore.suite import Connect
from os.path import expanduser
from ..colors import tcol
from requests_oauthlib import OAuth2Session
from oauthlib.oauth2.rfc6749.clients import WebApplicationClient
from os import environ
if sys.version_info >= (3, 0):
from http.server import HTTPServer, BaseHTTPRequestHandler
else:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
CONFIG_PATH = expanduser('~/.fastscore.oauth2')
DEFAULT_CONFIG = {
'client-id': None,
'requires-client-secret': True,
'client-secret': None,
'has-discovery-endpoint': False,
'discovery-endpoint': None,
'authorization-url': None,
'token-url': None,
'scope': 'email'
}
REDIRECT_URI = 'http://localhost:1234/auth_callback'
def read_config():
try:
with open(CONFIG_PATH, 'r') as f:
return yaml.load(f, Loader=yaml.BaseLoader)
except IOError:
return DEFAULT_CONFIG
def update_config(new_config):
with open(CONFIG_PATH, 'w') as f:
yaml.dump(new_config, stream=f)
def login_oauth2(connect, verbose, discover_endpoint=None, client_id=None, client_secret=None):
conf = read_config()
if discover_endpoint is not None:
conf["discover-endpoint"] = discover_endpoint
if verbose:
print "Using Discover Endpoint: " + discover_endpoint
if client_id is not None:
conf["client-id"] = client_id
if verbose:
print "Using Client ID: " + client_id
if client_secret is not None:
conf["client-secret"] = client_secret
if verbose:
print "Using Client Secret: " + client_secret
if discover_endpoint is not None and client_id is not None:
conf["requires-client-secret"] = False
conf["has-discovery-endpoint"] = True
if verbose:
print "Fetching authorization_url, token_url, scope from discovery endpoint"
conf["authorization-url"], conf["token-url"], conf["scope"] = fetch_discovery_endpoint(conf["discovery-endpoint"])
if verbose:
print "Got authorization-url: " + conf["authorization-url"]
print "Got token-url: " + conf["token-url"]
print "Got scope: " + conf["scope"]
else:
if verbose and (discover_endpoint is not None and client_id is None):
print tcol.WARNING + "Additional config needed: client_id not provided" + tcol.ENDC
if verbose and (client_id is not None and discover_endpoint is None) is not None:
print tcol.WARNING + "Additional config needed: discover_endpoint not provided" + tcol.ENDC
conf = prompt_info(conf, verbose)
update_config(conf)
token = get_token(conf["client-id"], conf["client-secret"], conf["authorization-url"], conf["token-url"], conf["scope"])
connect.set_oauth_secret(token['access_token'])
connect.dump('.fastscore')
print tcol.OKGREEN + 'Authentication successful' + tcol.ENDC
def fetch_discovery_endpoint(discovery_endpoint):
try:
ds = json.loads(requests.get(discovery_endpoint, verify=False).text)
authorization_url = ds["authorization_endpoint"]
token_url = ds["token_endpoint"]
scope = ds["scopes_supported"][0] # Take any valid scope
return authorization_url, token_url, scope
except Exception as e:
print tcol.FAIL + "Unable to fetch config from discovery endpoint" + tcol.ENDC
sys.exit(1)
def prompt_info(conf, verbose):
def prompt(item, old_value):
i = raw_input(item + ' [' + str(old_value) + ']:')
return i if i else old_value
print "Please make sure http://localhost:1234/auth_callback is a whitelisted redirect_uri in your OAuth provider settings."
conf["client-id"] = prompt("Client ID", conf["client-id"])
conf["requires-client-secret"] = prompt("Does your OAuth provider require a Client Secret to perform an Authorization Code Grant Flow? [Y/N]", "Y" if conf["requires-client-secret"] else "N")
while conf["requires-client-secret"] not in ["Y", "N"]:
conf["requires-client-secret"] = prompt("Invalid input. Please enter \"Y\" or \"N\"", "")
conf["requires-client-secret"] = conf["requires-client-secret"] == "Y"
if conf["requires-client-secret"]:
conf["client-secret"] = prompt("Client Secret", conf["client-secret"])
conf["has-discovery-endpoint"] = prompt("Does your OAuth provider have a discovery endpoint? [Y/N]", "Y" if conf["has-discovery-endpoint"] else "N")
while conf["has-discovery-endpoint"] not in ["Y", "N"]:
conf["has-discovery-endpoint"] = prompt("Invalid input. Please enter \"Y\" or \"N\"", "")
conf["has-discovery-endpoint"] = conf["has-discovery-endpoint"] == "Y"
if conf["has-discovery-endpoint"]:
conf["discovery-endpoint"] = prompt("Discovery Endpoint", conf["discovery-endpoint"])
if verbose:
print "Fetching authorization_url, token_url, scope from discovery endpoint"
conf["authorization-url"], conf["token-url"], conf["scope"] = fetch_discovery_endpoint(conf["discovery-endpoint"])
if verbose:
print "Got authorization-url: " + conf["authorization-url"]
print "Got token-url: " + conf["token-url"]
print "Got scope: " + conf["scope"]
else:
print """
Since your OAuth provider does not have a discovery endpoint,
you will need to provide the following information manually."""
conf["authorization-url"] = prompt("Authorization Endpoint", conf["authorization-url"])
conf["token-url"] = prompt("Token Endpoint", conf["token-url"])
conf["scope"] = prompt("Authorization Sope", conf["scope"])
return conf
TOKEN=None
class Handler(BaseHTTPRequestHandler, object):
def __init__(self, *args, **kwargs):
super(Handler, self).__init__(*args, **kwargs)
def do_GET(self):
global TOKEN
if self.path == "/":
# Redirect to Authorization URL
self.send_response(302)
self.send_header('Location', self.authorization_url)
self.end_headers()
elif self.path.startswith("/auth_callback"):
# Retrieve Auth token
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write('Authentication successful. You may now close this tab'.encode('utf-8'))
TOKEN = self.provider.fetch_token(self.token_url, client_secret=self.client_secret, client_id=self.client_id, authorization_response=self.path, verify=False, auth=False, headers={'Cookie': self.headers.get('Cookie')})
else:
self.send_error(404, 'Unexpected path, please login at http://localhost:1234')
self.finish()
def log_message(self, format, *args):
return
def get_token(
client_id=None,
client_secret=None,
authorization_url=None,
token_url=None,
scope='email'
):
Handler.client_id = client_id
Handler.client_secret = client_secret
Handler.token_url = token_url
environ['OAUTHLIB_INSECURE_TRANSPORT'] = "1"
Handler.provider = OAuth2Session(client_id, redirect_uri=REDIRECT_URI, scope=[scope])
Handler.authorization_url, Handler.oauth_state = Handler.provider.authorization_url(authorization_url)
server = HTTPServer(('0.0.0.0', 1234), Handler)
print "Please open http://localhost:1234/ in your browser to authenticate."
webbrowser.open("http://localhost:1234/")
while TOKEN is None:
server.handle_request()
server.server_close()
return TOKEN
|
{
"content_hash": "13476c330c12bbb59e39bdb3c367bbb8",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 233,
"avg_line_length": 41.33510638297872,
"alnum_prop": 0.6454767726161369,
"repo_name": "opendatagroup/fastscore-cli",
"id": "77921b8d7716274e660a79f467ca6a464581877d",
"size": "7771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cli/auth/oauth2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1124"
},
{
"name": "Python",
"bytes": "173186"
}
],
"symlink_target": ""
}
|
import asyncio
import unittest
from unittest import mock
from aiohttp import CIMultiDict
from aiohttp.web import (
MsgType, Request, WebSocketResponse, HTTPMethodNotAllowed, HTTPBadRequest)
from aiohttp.protocol import RawRequestMessage, HttpVersion11
from aiohttp import errors, websocket
class TestWebWebSocket(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def make_request(self, method, path, headers=None):
self.app = mock.Mock()
if headers is None:
headers = CIMultiDict(
{'HOST': 'server.example.com',
'UPGRADE': 'websocket',
'CONNECTION': 'Upgrade',
'SEC-WEBSOCKET-KEY': 'dGhlIHNhbXBsZSBub25jZQ==',
'ORIGIN': 'http://example.com',
'SEC-WEBSOCKET-PROTOCOL': 'chat, superchat',
'SEC-WEBSOCKET-VERSION': '13'})
message = RawRequestMessage(method, path, HttpVersion11, headers,
False, False)
self.payload = mock.Mock()
self.transport = mock.Mock()
self.reader = mock.Mock()
self.writer = mock.Mock()
self.app.loop = self.loop
req = Request(self.app, message, self.payload,
self.transport, self.reader, self.writer)
return req
def test_nonstarted_ping(self):
ws = WebSocketResponse()
with self.assertRaises(RuntimeError):
ws.ping()
def test_nonstarted_pong(self):
ws = WebSocketResponse()
with self.assertRaises(RuntimeError):
ws.pong()
def test_nonstarted_send_str(self):
ws = WebSocketResponse()
with self.assertRaises(RuntimeError):
ws.send_str('string')
def test_nonstarted_send_bytes(self):
ws = WebSocketResponse()
with self.assertRaises(RuntimeError):
ws.send_bytes(b'bytes')
def test_nonstarted_close(self):
ws = WebSocketResponse()
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(ws.close())
def test_nonstarted_receive_str(self):
@asyncio.coroutine
def go():
ws = WebSocketResponse()
with self.assertRaises(RuntimeError):
yield from ws.receive_str()
self.loop.run_until_complete(go())
def test_nonstarted_receive_bytes(self):
@asyncio.coroutine
def go():
ws = WebSocketResponse()
with self.assertRaises(RuntimeError):
yield from ws.receive_bytes()
self.loop.run_until_complete(go())
def test_receive_str_nonstring(self):
@asyncio.coroutine
def go():
req = self.make_request('GET', '/')
ws = WebSocketResponse()
yield from ws.prepare(req)
@asyncio.coroutine
def receive():
return websocket.Message(websocket.MSG_BINARY, b'data', b'')
ws.receive = receive
with self.assertRaises(TypeError):
yield from ws.receive_str()
self.loop.run_until_complete(go())
def test_receive_bytes_nonsbytes(self):
@asyncio.coroutine
def go():
req = self.make_request('GET', '/')
ws = WebSocketResponse()
yield from ws.prepare(req)
@asyncio.coroutine
def receive():
return websocket.Message(websocket.MSG_TEXT, 'data', b'')
ws.receive = receive
with self.assertRaises(TypeError):
yield from ws.receive_bytes()
self.loop.run_until_complete(go())
def test_send_str_nonstring(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
with self.assertRaises(TypeError):
ws.send_str(b'bytes')
def test_send_bytes_nonbytes(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
with self.assertRaises(TypeError):
ws.send_bytes('string')
def test_write(self):
ws = WebSocketResponse()
with self.assertRaises(RuntimeError):
ws.write(b'data')
def test_can_prepare_ok(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse(protocols=('chat',))
self.assertEqual((True, 'chat'), ws.can_prepare(req))
def test_can_prepare_unknown_protocol(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.assertEqual((True, None), ws.can_prepare(req))
def test_can_prepare_invalid_method(self):
req = self.make_request('POST', '/')
ws = WebSocketResponse()
self.assertEqual((False, None), ws.can_prepare(req))
def test_can_prepare_without_upgrade(self):
req = self.make_request('GET', '/',
headers=CIMultiDict({}))
ws = WebSocketResponse()
self.assertEqual((False, None), ws.can_prepare(req))
def test_can_prepare_started(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
with self.assertRaisesRegex(RuntimeError, 'Already started'):
ws.can_prepare(req)
def test_closed_after_ctor(self):
ws = WebSocketResponse()
self.assertFalse(ws.closed)
self.assertIsNone(ws.close_code)
def test_send_str_closed(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
self.loop.run_until_complete(ws.close())
with self.assertRaises(RuntimeError):
ws.send_str('string')
def test_send_bytes_closed(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
self.loop.run_until_complete(ws.close())
with self.assertRaises(RuntimeError):
ws.send_bytes(b'bytes')
def test_ping_closed(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
self.loop.run_until_complete(ws.close())
with self.assertRaises(RuntimeError):
ws.ping()
def test_pong_closed(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
self.loop.run_until_complete(ws.close())
with self.assertRaises(RuntimeError):
ws.pong()
def test_close_idempotent(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
writer = mock.Mock()
ws._writer = writer
self.assertTrue(
self.loop.run_until_complete(ws.close(code=1, message='message1')))
self.assertTrue(ws.closed)
self.assertFalse(
self.loop.run_until_complete(ws.close(code=2, message='message2')))
def test_start_invalid_method(self):
req = self.make_request('POST', '/')
ws = WebSocketResponse()
with self.assertRaises(HTTPMethodNotAllowed):
self.loop.run_until_complete(ws.prepare(req))
def test_start_without_upgrade(self):
req = self.make_request('GET', '/',
headers=CIMultiDict({}))
ws = WebSocketResponse()
with self.assertRaises(HTTPBadRequest):
self.loop.run_until_complete(ws.prepare(req))
def test_wait_closed_before_start(self):
@asyncio.coroutine
def go():
ws = WebSocketResponse()
with self.assertRaises(RuntimeError):
yield from ws.close()
self.loop.run_until_complete(go())
def test_write_eof_not_started(self):
@asyncio.coroutine
def go():
ws = WebSocketResponse()
with self.assertRaises(RuntimeError):
yield from ws.write_eof()
self.loop.run_until_complete(go())
def test_write_eof_idempotent(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
self.loop.run_until_complete(ws.close())
@asyncio.coroutine
def go():
yield from ws.write_eof()
yield from ws.write_eof()
yield from ws.write_eof()
self.loop.run_until_complete(go())
def test_receive_exc_in_reader(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
exc = ValueError()
res = asyncio.Future(loop=self.loop)
res.set_exception(exc)
ws._reader.read.return_value = res
@asyncio.coroutine
def go():
msg = yield from ws.receive()
self.assertTrue(msg.tp, MsgType.error)
self.assertIs(msg.data, exc)
self.assertIs(ws.exception(), exc)
self.loop.run_until_complete(go())
def test_receive_cancelled(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
res = asyncio.Future(loop=self.loop)
res.set_exception(asyncio.CancelledError())
ws._reader.read.return_value = res
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, ws.receive())
def test_receive_timeouterror(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
res = asyncio.Future(loop=self.loop)
res.set_exception(asyncio.TimeoutError())
ws._reader.read.return_value = res
self.assertRaises(
asyncio.TimeoutError,
self.loop.run_until_complete, ws.receive())
def test_receive_client_disconnected(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
exc = errors.ClientDisconnectedError()
res = asyncio.Future(loop=self.loop)
res.set_exception(exc)
ws._reader.read.return_value = res
@asyncio.coroutine
def go():
msg = yield from ws.receive()
self.assertTrue(ws.closed)
self.assertTrue(msg.tp, MsgType.close)
self.assertIs(msg.data, None)
self.assertIs(ws.exception(), None)
self.loop.run_until_complete(go())
def test_multiple_receive_on_close_connection(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
self.loop.run_until_complete(ws.close())
self.loop.run_until_complete(ws.receive())
self.loop.run_until_complete(ws.receive())
self.loop.run_until_complete(ws.receive())
self.loop.run_until_complete(ws.receive())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, ws.receive())
def test_concurrent_receive(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
ws._waiting = True
self.assertRaises(
RuntimeError, self.loop.run_until_complete, ws.receive())
def test_close_exc(self):
req = self.make_request('GET', '/')
reader = self.reader.set_parser.return_value = mock.Mock()
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
exc = ValueError()
reader.read.return_value = asyncio.Future(loop=self.loop)
reader.read.return_value.set_exception(exc)
self.loop.run_until_complete(ws.close())
self.assertTrue(ws.closed)
self.assertIs(ws.exception(), exc)
ws._closed = False
reader.read.return_value = asyncio.Future(loop=self.loop)
reader.read.return_value.set_exception(asyncio.CancelledError())
self.assertRaises(asyncio.CancelledError,
self.loop.run_until_complete, ws.close())
self.assertEqual(ws.close_code, 1006)
def test_close_exc2(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
self.loop.run_until_complete(ws.prepare(req))
exc = ValueError()
self.writer.close.side_effect = exc
ws._writer = self.writer
self.loop.run_until_complete(ws.close())
self.assertTrue(ws.closed)
self.assertIs(ws.exception(), exc)
ws._closed = False
self.writer.close.side_effect = asyncio.CancelledError()
self.assertRaises(asyncio.CancelledError,
self.loop.run_until_complete, ws.close())
def test_start_twice_idempotent(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse()
with self.assertWarns(DeprecationWarning):
impl1 = ws.start(req)
impl2 = ws.start(req)
self.assertIs(impl1, impl2)
def test_can_start_ok(self):
req = self.make_request('GET', '/')
ws = WebSocketResponse(protocols=('chat',))
with self.assertWarns(DeprecationWarning):
self.assertEqual((True, 'chat'), ws.can_start(req))
|
{
"content_hash": "5674d01bc60d46b06c793fc6565f334e",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 79,
"avg_line_length": 33.61576354679803,
"alnum_prop": 0.5914419695193435,
"repo_name": "noodle-learns-programming/aiohttp",
"id": "bc8a7fb11a06e45e5652fb1941fb59b73e133cc3",
"size": "13648",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_web_websocket.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1404"
},
{
"name": "PowerShell",
"bytes": "3361"
},
{
"name": "Python",
"bytes": "910198"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.