code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import numpy as np
from matplotlib import pyplot as plt
from .interval import Interval
class Pbox(object):
def __init__(self, left=None, right=None, steps=200, shape=None, mean_left=None, mean_right=None, var_left=None, var_right=None, interpolation='linear'):
if (left is not None) and (right is None):
right = left
if left is None and right is None:
left = -np.inf
right = np.inf
if isinstance(left, Interval):
left = np.array([left.left()])
if isinstance(right, Interval):
right = np.array([right.right()])
if len(left) != steps:
left = interpolate(left, interpolation=interpolation, left=False, steps=steps)
if len(right) != steps:
right = interpolate(right, interpolation=interpolation, left=True, steps=steps)
self.left = left
self.right = right
self.steps = steps
self.n = self.steps
self.shape = shape
self.mean_left = -np.inf
self.mean_right = np.inf
self.var_left = 0
self.var_right = np.inf
self._computemoments()
if shape is not None: self.shape = shape
if mean_left is not None: self.mean_left = np.max([mean_left, self.mean_left])
if mean_right is not None: self.mean_right = np.min([mean_right, self.mean_right])
if var_left is not None: self.var_left = np.max([var_left, self.var_left])
if var_right is not None: self.var_right = np.min([var_right, self.var_right])
self._checkmoments()
def __repr__(self):
if self.mean_left == self.mean_right:
mean_text = f'{round(self.mean_left, 4)}'
else:
mean_text = f'[{round(self.mean_left, 4)}, {round(self.mean_right, 4)}]'
if self.var_left == self.var_right:
var_text = f'{round(self.var_left, 4)}'
else:
var_text = f'[{round(self.var_left, 4)}, {round(self.var_right, 4)}]'
range_text = f'[{round(np.min([self.left, self.right]), 4), round(np.max([self.left, self.right]), 4)}'
if self.shape is None:
shape_text = ' '
else:
shape_text = f' {self.shape}' # space to start; see below lacking space
return f'Pbox: ~{shape_text}(range={range_text}, mean={mean_text}, var={var_text})'
def __iter__(self):
for val in np.array([self.left,self.right]).flatten():
yield val
def __neg__(self):
if self.shape in ['uniform','normal','cauchy','triangular','skew-normal']:
s = self.shape
else:
s = ''
return Pbox(
left = -np.flip(self.right),
right = -np.flip(self.left),
shape = s,
mean_left = -self.mean_right,
mean_right = -self.mean_left,
var_left = self.var_left,
var_right = self.var_right
)
def __lt__(self,other):
return self.lt(other, method = 'f')
def __rlt__(self,other):
return self.ge(other, method = 'f')
def __le__(self,other):
return self.le(other, method = 'f')
def __rle__(self,other):
return self.gt(other, method = 'f')
def __gt__(self,other):
return self.gt(other, method = 'f')
def __rgt__(self,other):
return self.le(other, method = 'f')
def __ge__(self,other):
return self.ge(other, method = 'f')
def __rge__(self,other):
return self.lt(other, method = 'f')
def __and__(self, other):
return self.logicaland(other, method = 'f')
def __rand__(self,other):
return self.logicaland(other, method = 'f')
def __or__(self, other):
return self.logicalor(other, method = 'f')
def __ror__(self,other):
return self.logicalor(other, method = 'f')
def __add__(self, other):
return self.add(other, method = 'f')
def __radd__(self,other):
return self.add(other, method = 'f')
def __sub__(self,other):
return self.sub(other, method = 'f')
def __rsub__(self,other):
self = - self
return self.add(other, method = 'f')
def __mul__(self,other):
return self.mul(other, method = 'f')
def __rmul__(self,other):
return self.mul(other, method = 'f')
def __truediv__(self, other):
return self.div(other, method = 'f')
def __rtruediv__(self,other):
try:
return other * self.recip()
except:
return NotImplemented
### Local functions ###
def _computemoments(self): # should we compute mean if it is a Cauchy, var if it's a t distribution?
self.mean_left = np.max([self.mean_left, np.mean(self.left)])
self.mean_right = np.min([self.mean_right, np.mean(self.right)])
if not (np.any(self.left <= -np.inf) or np.any(np.inf <= self.right)):
V, JJ = 0, 0
j = np.array(range(self.n))
for J in np.array(range(self.n)) - 1:
ud = [*self.left[j < J], *self.right[J <= j]]
v = sideVariance(ud)
if V < v:
JJ = J
V = v
self.var_right = V
def _checkmoments(self):
a = Interval(self.mean_left, self.mean_right) #mean(x)
b = dwMean(self)
self.mean_left = np.max([left(a), left(b)])
self.mean_right = np.min([right(a), right(b)])
if self.mean_right < self.mean_left:
# use the observed mean
self.mean_left = left(b)
self.mean_right = right(b)
a = Interval(self.var_left, self.var_right) #var(x)
b = dwVariance(self)
self.var_left = np.max([left(a), left(b)])
self.var_right = np.min([right(a),right(b)])
if self.var_right < self.var_left:
# use the observed variance
self.var_left = left(b)
self.var_right = right(b)
### Public functions ###
# "%<%" <- lt <- function(x,y) prob.pbox(frechetconv.pbox(x,negate(y),'+'),0);
# "%>%" <- gt <- function(x,y) xprob.pbox(frechetconv.pbox(y,negate(x),'+'),0)
# "%<=%" <- lte <- function(x,y) xprob.pbox(frechetconv.pbox(x,negate(y),'+'),0);
# "%>=%" <- gte <- function(x,y) prob.pbox(frechetconv.pbox(y,negate(x),'+'),0)
# "%&%" <- function(x,y) and.pbox(x,y);
# "%|%" <- function(x,y) or.pbox(x,y)
def lt(self, other, method = 'f'):
b = self.add(-other, method)
return(b.get_probability(0)) # return (self.add(-other, method)).get_probability(0)
def le(self, other, method = 'f'):
b = self.add(-other, method)
return(b.get_probability(0)) # how is the "or equal to" affecting the calculation?
def gt(self, other, method = 'f'):
self = - self
b = self.add(other, method)
return(b.get_probability(0)) # maybe 1-prob ?
def ge(self, other, method = 'f'):
self = - self
b = self.add(other, method)
return(b.get_probability(0))
#pmin.pbox <- function (..., na.rm = FALSE) {
# elts <- makepbox(...)
# m <- elts[[1]]
# for (each in elts[-1]) m <- frechetconv.pbox(m, each, 'pmin')
# m
# }
#
#pmax.pbox <- function (..., na.rm = FALSE) {
# elts <- makepbox(...)
# m <- elts[[1]]
# for (each in elts[-1]) m <- frechetconv.pbox(m, each, 'pmax')
# m
# }
#
#pminI.pbox <- function (..., na.rm = FALSE) {
# elts <- makepbox(...)
# m <- elts[[1]]
# for (each in elts[-1]) m <- conv.pbox(m, each, 'pmin')
# m
# }
#
#pmaxI.pbox <- function (..., na.rm = FALSE) {
# elts <- makepbox(...)
# m <- elts[[1]]
# for (each in elts[-1]) m <- conv.pbox(m, each, 'pmax')
# m
# }
def logicaland(self, other, method = 'f'): # conjunction
if method=='i': return(self.mul(other,method)) # independence a * b
# else if method=='p': return(self.min(other,method)) # perfect min(a, b)
# else if method=='o': return(max(self.add(other,method)-1, 0)) # opposite max(a + b – 1, 0)
# else if method=='+': return(self.min(other,method)) # positive env(a * b, min(a, b))
# else if method=='-': return(self.min(other,method)) # negative env(max(a + b – 1, 0), a * b)
# otherwise method=='f' :
return(env(max(0, self.add(other,method) - 1), self.min(other,method)))
def logicalor(self, other, method = 'f'): # disjunction
if method=='i': return(1 - (1-self) * (1-other)) # independent 1 – (1 – a) * (1 – b)
# else if method=='p': return(self.max(other,method)) # perfect max(a, b)
# else if method=='o': return(min(self.add(other,method),1)) # opposite min(1, a + b)
# else if method=='+': return(env(,min(self.add(other,method),1)) # positive env(max(a, b), 1 – (1 – a) * (1 – b))
# else if method=='-': return() # negative env(1 – (1 – a) * (1 – b), min(1, a + b))
# otherwise method=='f' :
return(env(self.max(other,method), min(self.add(other,method),1)))
def env(self, other):
if other.__class__.__name__ == 'Interval':
other = Pbox(other, steps = self.steps)
if other.__class__.__name__ == 'Pbox':
if self.steps != other.steps:
raise ArithmeticError("Both Pboxes must have the same number of steps")
nleft = np.minimum(self.left, other.left)
nright = np.maximum(self.right, other.right)
return Pbox(
left = nleft,
right = nright,
steps = self.steps
)
return NotImplemented
def add(self, other, method = 'f'):
if method not in ['f','p','o','i']:
raise ArithmeticError("Calculation method unkown")
if other.__class__.__name__ == 'Interval':
other = Pbox(other, steps = self.steps)
if other.__class__.__name__ == 'Pbox':
if self.steps != other.steps:
raise ArithmeticError("Both Pboxes must have the same number of steps")
if method == 'f':
nleft = np.empty(self.steps)
nright = np.empty(self.steps)
for i in range(0,self.steps):
j = np.array(range(i, self.steps))
k = np.array(range(self.steps - 1, i-1, -1))
nleft[i] = np.min(self.right[j] + other.right[k])
jj = np.array(range(0, i + 1))
kk = np.array(range(i, -1 , -1))
nright[i] = np.max(self.left[jj] + other.left[kk])
elif method == 'p':
nleft = self.left + other.left
nright = self.right + other.right
elif method == 'o':
nleft = self.left + np.flip(other.left)
nright = self.right + np.flip(other.right)
elif method == 'i':
nleft = []
nright = []
for i in self.left:
for j in other.left:
nleft.append(i+j)
for ii in self.right:
for jj in other.right:
nright.append(ii+jj)
nleft.sort()
nright.sort()
return Pbox(
left = nleft,
right = nright,
steps = self.steps
)
else:
try:
# Try adding constant
if self.shape in ['uniform','normal','cauchy','triangular','skew-normal']:
s = self.shape
else:
s = ''
return Pbox(
left = self.left + other,
right = self.right + other,
shape = s,
mean_left = self.mean_left + other,
mean_right = self.mean_right + other,
var_left = self.var_left,
var_right = self.var_right,
steps = self.steps
)
except:
return NotImplemented
def sub(self, other, method = 'f'):
if method == 'o':
method = 'p'
elif method == 'p':
method = 'o'
return self.add(-other, method)
def mul(self, other, method = 'f'):
if method not in ['f','p','o','i']:
raise ArithmeticError("Calculation method unkown")
if other.__class__.__name__ == 'Interval':
other = Pbox(other, steps = self.steps)
if other.__class__.__name__ == 'Pbox':
if self.steps != other.steps:
raise ArithmeticError("Both Pboxes must have the same number of steps")
if method == 'f':
nleft = np.empty(self.steps)
nright = np.empty(self.steps)
for i in range(0,self.steps):
j = np.array(range(i, self.steps))
k = np.array(range(self.steps - 1, i-1, -1))
nleft[i] = np.min(self.right[j] * other.right[k])
jj = np.array(range(0, i + 1))
kk = np.array(range(i, -1 , -1))
nright[i] = np.max(self.left[jj] * other.left[kk])
elif method == 'p':
nleft = self.left * other.left
nright = self.right * other.right
elif method == 'o':
nleft = self.left * np.flip(other.left)
nright = self.right * np.flip(other.right)
elif method == 'i':
nleft = []
nright = []
for i in self.left:
for j in other.left:
nleft.append(i*j)
for ii in self.right:
for jj in other.right:
nright.append(ii*jj)
nleft.sort()
nright.sort()
return Pbox(
left = nleft,
right = nright,
steps = self.steps
)
else:
try:
# Try adding constant
if self.shape in ['uniform','normal','cauchy','triangular','skew-normal']:
s = self.shape
else:
s = ''
return Pbox(
left = self.left * other,
right = self.right * other,
shape = s,
mean_left = self.mean_left * other,
mean_right = self.mean_right * other,
var_left = self.var_left,
var_right = self.var_right,
steps = self.steps
)
except:
return NotImplemented
def div(self, other, method = 'f'):
if method == 'o':
method = 'p'
elif method == 'p':
method = 'o'
return self.mul(1/other, method)
def recip(self):
return Pbox(
left = 1 / np.flip(self.right),
right = 1 / np.flip(self.left),
steps = self.steps
)
def show(self,now = True,**kwargs):
# If you want to know why numpy is the WORST thing about Python
# see the get_x code
left, right = self.get_x()
y = self.get_y()
plt.plot(left,y,**kwargs)
plt.plot(right,y,**kwargs)
if now:
plt.show()
else:
return plt
def get_interval(self, *args):
if len(args) == 1:
if args[0] == 1:
# asking for whole pbox bounds
return Interval(min(self.left),max(self.right))
p1 = (1-args[0])/2
p2 = 1-p1
elif len(args) == 2:
p1 = args[0]
p2 = args[1]
else:
raise Exception('Too many inputs')
y = np.append(np.insert(np.linspace(0,1,self.steps),0,0),1)
y1 = 0
while y[y1] < p1:
y1 += 1
y2 = len(y)-1
while y[y2] > p2:
y2 -= 1
x1 = self.left[y1]
x2 = self.right[y2]
return Interval(x1,x2)
def get_probability(self, val):
p = np.append(np.insert(np.linspace(0,1,self.steps),0,0),1)
i = 0
while i < self.steps and self.left[i] < val:
i += 1
ub = p[i]
j = 0
while j < self.steps and self.right[j] < val:
j += 1
lb = p[j]
return Interval(lb,ub)
def support(self):
return np.linspace(0,1,self.steps)
def get_x(self):
# returns the x values for plotting
left = np.append(np.insert(self.left,0,min(self.left)),max(self.right))
right = np.append(np.insert(self.right,0,min(self.left)),max(self.right))
return left, right
def get_y(self):
# returns y values for plotting
return np.append(np.insert(np.linspace(0,1,self.steps),0,0),1)
# Public functions
# Functions
def env_int(*args):
left = min([min(i) if is_iterable(i) else i for i in args])
right = max([max(i) if is_iterable(i) else i for i in args])
return Interval(left, right)
def left(imp):
if isinstance(imp, Interval) or isinstance(imp, pbox.Pbox):
return imp.left()
elif is_iterable(imp):
return min(imp)
else:
return imp
def right(imp):
if isinstance(imp, Interval) or isinstance(imp, pbox.Pbox):
return imp.right()
elif is_iterable(imp):
return max(imp)
else:
return imp
def left_list(implist, verbose=False):
if not is_iterable(implist):
return np.array(implist)
return np.array([left(imp) for imp in implist])
def right_list(implist, verbose=False):
if not is_iterable(implist):
return np.array(implist)
return np.array([right(imp) for imp in implist])
def qleftquantiles(pp, x, p): # if first p is not zero, the left tail will be -Inf
return [max(left_list(x)[right_list(p) <= P]) for P in pp]
def qrightquantiles(pp, x, p): # if last p is not one, the right tail will be Inf
return [min(right_list(x)[P <= left_list(p)]) for P in pp]
def quantiles(x, p, steps=200):
left = qleftquantiles(ii(steps=steps), x, p)
right = qrightquantiles(jj(steps=steps), x, p)
return pbox.Pbox(left=left, right=right) # quantiles are in x and the associated cumulative probabilities are in p
def interp_step(u, steps=200):
u = np.sort(u)
seq = np.linspace(start=0, stop=len(u) - 0.00001, num=steps, endpoint=True)
seq = np.array([trunc(seq_val) for seq_val in seq])
return u[seq]
def interp_cubicspline(vals, steps=200):
vals = np.sort(vals) # sort
vals_steps = np.array(range(len(vals))) + 1
vals_steps = vals_steps / len(vals_steps)
steps = np.array(range(steps)) + 1
steps = steps / len(steps)
interped = interp.CubicSpline(vals_steps, vals)
return interped(steps)
def interp_left(u, steps=200):
p = np.array(range(len(u))) / (len(u) - 1)
pp, x = ii(steps=steps), u
return qleftquantiles(pp, x, p)
def interp_right(d, steps=200):
p = np.array(range(len(d))) / (len(d) - 1)
pp, x = jj(steps=steps), d
return qrightquantiles(pp, x, p)
def interp_outer(x, left, steps=200):
if (left) :
return interp_left(x, steps=steps)
else:
return interp_right(x, steps=steps)
def interp_linear(V, steps=200):
m = len(V) - 1
if m == 0: return np.repeat(V, steps)
if steps == 1: return np.array([min(V), max(V)])
d = 1 / m
n = round(d * steps * 200)
if n == 0:
c = V
else:
c = []
for i in range(m):
v = V[i]
w = V[i + 1]
c.extend(np.linspace(start=v, stop=w, num=n))
u = [c[round((len(c) - 1) * (k + 0) / (steps - 1))] for k in range(steps)]
return np.array(u)
def interpolate(u, interpolation='linear', left=True, steps=200):
if interpolation == 'outer':
return interp_outer(u, left, steps=steps)
elif interpolation == 'spline':
return interp_cubicspline(u, steps=steps)
elif interpolation == 'step':
return interp_step(u, steps=steps)
else:
return interp_linear(u, steps=steps)
def sideVariance(w, mu=None):
if not isinstance(w, np.ndarray): w = np.array(w)
if mu is None: mu = np.mean(w)
return max(0, np.mean((w - mu) ** 2))
def dwMean(pbox):
return Interval(np.mean(pbox.right), np.mean(pbox.left))
def dwVariance(pbox):
if np.any(np.isinf(pbox.left)) or np.any(np.isinf(pbox.right)):
return Interval(0, np.inf)
if np.all(pbox.right[0] == pbox.right) and np.all(pbox.left[0] == pbox.left):
return Interval(0, (pbox.right[0] - pbox.left[0]) ** (2 / 4))
vr = sideVariance(pbox.left, np.mean(pbox.left))
w = np.copy(pbox.left)
n = len(pbox.left)
for i in reversed(range(n)):
w[i] = pbox.right[i]
v = sideVariance(w, np.mean(w))
if np.isnan(vr) or np.isnan(v):
vr = np.inf
elif vr < v:
vr = v
if pbox.left[n - 1] <= pbox.right[0]:
vl = 0.0
else:
x = pbox.right
vl = sideVariance(w, np.mean(w))
for i in reversed(range(n)):
w[i] = pbox.left[i]
here = w[i]
if 1 < i:
for j in reversed(range(i-1)):
if w[i] < w[j]:
w[j] = here
v = sideVariance(w, np.mean(w))
if np.isnan(vl) or np.isnan(v):
vl = 0
elif v < vl:
vl = v
return Interval(vl, vr)
def straddles(x):
return (left(x) <= 0) and (0 <= right(x)) # includes zero
def straddlingzero(x):
return (left(x) < 0) and (0 < right(x)) # neglects zero as an endpoint
def env(x,y):
return x.env(y)
def pnt(a):
if type(a) == pba.pbox.Pbox:
return (a.mean_left + a.mean_right) / 2
elif type(a) == list:
return [pnt(b) for b in a]
else:
return (a)
def rng(a):
if type(a) == pba.pbox.Pbox:
# return pba.Interval(a.mean_left, a.mean_right)
# return pba.Interval(a.mean_left-np.sqrt(a.var_right),
# a.mean_right+np.sqrt(a.var_right))
return a.get_interval(0.025, 0.975)
elif type(a) == list:
return [pnt(b) for b in a]
else:
return (a)
def pltem(ax, t, y, simple=True):
if simple:
y = [rng(v) for v in y]
y1 = [v.left() for v in y]
y2 = [v.right() for v in y]
ax.plot(t, y1)
ax.plot(t, y2)
else:
pass
|
[
"numpy.minimum",
"numpy.maximum",
"matplotlib.pyplot.show",
"numpy.copy",
"matplotlib.pyplot.plot",
"numpy.flip",
"numpy.empty",
"numpy.isinf",
"numpy.isnan",
"numpy.any",
"numpy.sort",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.min",
"numpy.linspace",
"numpy.all",
"numpy.repeat"
] |
[((18611, 18621), 'numpy.sort', 'np.sort', (['u'], {}), '(u)\n', (18618, 18621), True, 'import numpy as np\n'), ((18830, 18843), 'numpy.sort', 'np.sort', (['vals'], {}), '(vals)\n', (18837, 18843), True, 'import numpy as np\n'), ((20015, 20026), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (20023, 20026), True, 'import numpy as np\n'), ((20978, 20996), 'numpy.copy', 'np.copy', (['pbox.left'], {}), '(pbox.left)\n', (20985, 20996), True, 'import numpy as np\n'), ((9387, 9420), 'numpy.minimum', 'np.minimum', (['self.left', 'other.left'], {}), '(self.left, other.left)\n', (9397, 9420), True, 'import numpy as np\n'), ((9438, 9473), 'numpy.maximum', 'np.maximum', (['self.right', 'other.right'], {}), '(self.right, other.right)\n', (9448, 9473), True, 'import numpy as np\n'), ((15480, 15507), 'matplotlib.pyplot.plot', 'plt.plot', (['left', 'y'], {}), '(left, y, **kwargs)\n', (15488, 15507), True, 'from matplotlib import pyplot as plt\n'), ((15514, 15542), 'matplotlib.pyplot.plot', 'plt.plot', (['right', 'y'], {}), '(right, y, **kwargs)\n', (15522, 15542), True, 'from matplotlib import pyplot as plt\n'), ((16700, 16729), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.steps'], {}), '(0, 1, self.steps)\n', (16711, 16729), True, 'import numpy as np\n'), ((17792, 17809), 'numpy.array', 'np.array', (['implist'], {}), '(implist)\n', (17800, 17809), True, 'import numpy as np\n'), ((17952, 17969), 'numpy.array', 'np.array', (['implist'], {}), '(implist)\n', (17960, 17969), True, 'import numpy as np\n'), ((19618, 19637), 'numpy.repeat', 'np.repeat', (['V', 'steps'], {}), '(V, steps)\n', (19627, 19637), True, 'import numpy as np\n'), ((20468, 20479), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (20476, 20479), True, 'import numpy as np\n'), ((20504, 20514), 'numpy.mean', 'np.mean', (['w'], {}), '(w)\n', (20511, 20514), True, 'import numpy as np\n'), ((20533, 20555), 'numpy.mean', 'np.mean', (['((w - mu) ** 2)'], {}), '((w - mu) ** 2)\n', (20540, 20555), True, 'import numpy as np\n'), ((20596, 20615), 'numpy.mean', 'np.mean', (['pbox.right'], {}), '(pbox.right)\n', (20603, 20615), True, 'import numpy as np\n'), ((20617, 20635), 'numpy.mean', 'np.mean', (['pbox.left'], {}), '(pbox.left)\n', (20624, 20635), True, 'import numpy as np\n'), ((20771, 20806), 'numpy.all', 'np.all', (['(pbox.right[0] == pbox.right)'], {}), '(pbox.right[0] == pbox.right)\n', (20777, 20806), True, 'import numpy as np\n'), ((20811, 20844), 'numpy.all', 'np.all', (['(pbox.left[0] == pbox.left)'], {}), '(pbox.left[0] == pbox.left)\n', (20817, 20844), True, 'import numpy as np\n'), ((20950, 20968), 'numpy.mean', 'np.mean', (['pbox.left'], {}), '(pbox.left)\n', (20957, 20968), True, 'import numpy as np\n'), ((1253, 1288), 'numpy.max', 'np.max', (['[mean_left, self.mean_left]'], {}), '([mean_left, self.mean_left])\n', (1259, 1288), True, 'import numpy as np\n'), ((1342, 1379), 'numpy.min', 'np.min', (['[mean_right, self.mean_right]'], {}), '([mean_right, self.mean_right])\n', (1348, 1379), True, 'import numpy as np\n'), ((1429, 1462), 'numpy.max', 'np.max', (['[var_left, self.var_left]'], {}), '([var_left, self.var_left])\n', (1435, 1462), True, 'import numpy as np\n'), ((1514, 1549), 'numpy.min', 'np.min', (['[var_right, self.var_right]'], {}), '([var_right, self.var_right])\n', (1520, 1549), True, 'import numpy as np\n'), ((15569, 15579), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15577, 15579), True, 'from matplotlib import pyplot as plt\n'), ((20674, 20693), 'numpy.isinf', 'np.isinf', (['pbox.left'], {}), '(pbox.left)\n', (20682, 20693), True, 'import numpy as np\n'), ((20705, 20725), 'numpy.isinf', 'np.isinf', (['pbox.right'], {}), '(pbox.right)\n', (20713, 20725), True, 'import numpy as np\n'), ((21111, 21121), 'numpy.mean', 'np.mean', (['w'], {}), '(w)\n', (21118, 21121), True, 'import numpy as np\n'), ((21135, 21147), 'numpy.isnan', 'np.isnan', (['vr'], {}), '(vr)\n', (21143, 21147), True, 'import numpy as np\n'), ((21151, 21162), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (21159, 21162), True, 'import numpy as np\n'), ((21350, 21360), 'numpy.mean', 'np.mean', (['w'], {}), '(w)\n', (21357, 21360), True, 'import numpy as np\n'), ((2405, 2438), 'numpy.array', 'np.array', (['[self.left, self.right]'], {}), '([self.left, self.right])\n', (2413, 2438), True, 'import numpy as np\n'), ((4740, 4758), 'numpy.mean', 'np.mean', (['self.left'], {}), '(self.left)\n', (4747, 4758), True, 'import numpy as np\n'), ((4812, 4831), 'numpy.mean', 'np.mean', (['self.right'], {}), '(self.right)\n', (4819, 4831), True, 'import numpy as np\n'), ((4851, 4879), 'numpy.any', 'np.any', (['(self.left <= -np.inf)'], {}), '(self.left <= -np.inf)\n', (4857, 4879), True, 'import numpy as np\n'), ((4883, 4911), 'numpy.any', 'np.any', (['(np.inf <= self.right)'], {}), '(np.inf <= self.right)\n', (4889, 4911), True, 'import numpy as np\n'), ((10141, 10161), 'numpy.empty', 'np.empty', (['self.steps'], {}), '(self.steps)\n', (10149, 10161), True, 'import numpy as np\n'), ((10187, 10207), 'numpy.empty', 'np.empty', (['self.steps'], {}), '(self.steps)\n', (10195, 10207), True, 'import numpy as np\n'), ((12861, 12881), 'numpy.empty', 'np.empty', (['self.steps'], {}), '(self.steps)\n', (12869, 12881), True, 'import numpy as np\n'), ((12907, 12927), 'numpy.empty', 'np.empty', (['self.steps'], {}), '(self.steps)\n', (12915, 12927), True, 'import numpy as np\n'), ((16053, 16082), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.steps'], {}), '(0, 1, self.steps)\n', (16064, 16082), True, 'import numpy as np\n'), ((16377, 16406), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.steps'], {}), '(0, 1, self.steps)\n', (16388, 16406), True, 'import numpy as np\n'), ((17080, 17109), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.steps'], {}), '(0, 1, self.steps)\n', (17091, 17109), True, 'import numpy as np\n'), ((19886, 19921), 'numpy.linspace', 'np.linspace', ([], {'start': 'v', 'stop': 'w', 'num': 'n'}), '(start=v, stop=w, num=n)\n', (19897, 19921), True, 'import numpy as np\n'), ((21631, 21641), 'numpy.mean', 'np.mean', (['w'], {}), '(w)\n', (21638, 21641), True, 'import numpy as np\n'), ((21659, 21671), 'numpy.isnan', 'np.isnan', (['vl'], {}), '(vl)\n', (21667, 21671), True, 'import numpy as np\n'), ((21675, 21686), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (21683, 21686), True, 'import numpy as np\n'), ((2680, 2699), 'numpy.flip', 'np.flip', (['self.right'], {}), '(self.right)\n', (2687, 2699), True, 'import numpy as np\n'), ((2722, 2740), 'numpy.flip', 'np.flip', (['self.left'], {}), '(self.left)\n', (2729, 2740), True, 'import numpy as np\n'), ((10407, 10445), 'numpy.min', 'np.min', (['(self.right[j] + other.right[k])'], {}), '(self.right[j] + other.right[k])\n', (10413, 10445), True, 'import numpy as np\n'), ((10584, 10622), 'numpy.max', 'np.max', (['(self.left[jj] + other.left[kk])'], {}), '(self.left[jj] + other.left[kk])\n', (10590, 10622), True, 'import numpy as np\n'), ((13127, 13165), 'numpy.min', 'np.min', (['(self.right[j] * other.right[k])'], {}), '(self.right[j] * other.right[k])\n', (13133, 13165), True, 'import numpy as np\n'), ((13304, 13342), 'numpy.max', 'np.max', (['(self.left[jj] * other.left[kk])'], {}), '(self.left[jj] * other.left[kk])\n', (13310, 13342), True, 'import numpy as np\n'), ((15160, 15179), 'numpy.flip', 'np.flip', (['self.right'], {}), '(self.right)\n', (15167, 15179), True, 'import numpy as np\n'), ((15205, 15223), 'numpy.flip', 'np.flip', (['self.left'], {}), '(self.left)\n', (15212, 15223), True, 'import numpy as np\n'), ((2028, 2059), 'numpy.min', 'np.min', (['[self.left, self.right]'], {}), '([self.left, self.right])\n', (2034, 2059), True, 'import numpy as np\n'), ((2071, 2102), 'numpy.max', 'np.max', (['[self.left, self.right]'], {}), '([self.left, self.right])\n', (2077, 2102), True, 'import numpy as np\n'), ((10826, 10845), 'numpy.flip', 'np.flip', (['other.left'], {}), '(other.left)\n', (10833, 10845), True, 'import numpy as np\n'), ((10884, 10904), 'numpy.flip', 'np.flip', (['other.right'], {}), '(other.right)\n', (10891, 10904), True, 'import numpy as np\n'), ((13546, 13565), 'numpy.flip', 'np.flip', (['other.left'], {}), '(other.left)\n', (13553, 13565), True, 'import numpy as np\n'), ((13604, 13624), 'numpy.flip', 'np.flip', (['other.right'], {}), '(other.right)\n', (13611, 13624), True, 'import numpy as np\n')]
|
from django.contrib import admin
from .models import Bulletin
admin.site.register(Bulletin)
|
[
"django.contrib.admin.site.register"
] |
[((64, 93), 'django.contrib.admin.site.register', 'admin.site.register', (['Bulletin'], {}), '(Bulletin)\n', (83, 93), False, 'from django.contrib import admin\n')]
|
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for recommendation_model_launcher."""
import os
from absl import flags
import tensorflow as tf
from model import input_pipeline
from model import recommendation_model_launcher as launcher
from google.protobuf import text_format
FLAGS = flags.FLAGS
FAKE_MOVIE_GENRE_VOCAB = [
'UNK',
'Comedy',
'Drama',
'Romance',
'Animation',
'Children'
]
TEST_INPUT_CONFIG = """
activity_feature_groups {
features {
feature_name: "context_movie_id"
feature_type: INT
vocab_size: 3952
embedding_dim: 8
feature_length: 5
}
features {
feature_name: "context_movie_rating"
feature_type: FLOAT
feature_length: 5
}
encoder_type: CNN
}
activity_feature_groups {
features {
feature_name: "context_movie_genre"
feature_type: STRING
vocab_name: "movie_genre_vocab.txt"
vocab_size: 19
embedding_dim: 8
feature_length: 8
}
encoder_type: BOW
}
label_feature {
feature_name: "label_movie_id"
feature_type: INT
vocab_size: 3952
embedding_dim: 8
feature_length: 1
}
"""
EXAMPLE1 = text_format.Parse(
"""
features {
feature {
key: "context_movie_id"
value {
int64_list {
value: [1, 2, 0, 0, 0]
}
}
}
feature {
key: "context_movie_rating"
value {
float_list {
value: [3.5, 4.0, 0.0, 0.0, 0.0]
}
}
}
feature {
key: "context_movie_genre"
value {
bytes_list {
value: [
"Animation", "Children", "Comedy", "Comedy", "Romance", "UNK", "UNK", "UNK"
]
}
}
}
feature {
key: "label_movie_id"
value {
int64_list {
value: [3]
}
}
}
}""", tf.train.Example())
class RecommendationModelLauncherTest(tf.test.TestCase):
def _AssertSparseTensorValueEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def _assertInputDetail(self, input_details, index, name, shape):
self.assertEqual(name, input_details[index]['name'])
self.assertEqual(shape, input_details[index]['shape'])
def setUp(self):
super().setUp()
self.tmp_dir = self.create_tempdir()
self.test_input_config_file = os.path.join(self.tmp_dir,
'input_config.pbtxt')
self.test_movie_genre_vocab_file = os.path.join(self.tmp_dir,
'movie_genre_vocab.txt')
self.test_input_data_file = os.path.join(self.tmp_dir,
'test_input_data.tfrecord')
with open(self.test_input_config_file, 'w', encoding='utf-8') as f:
f.write(TEST_INPUT_CONFIG)
with open(self.test_movie_genre_vocab_file, 'w', encoding='utf-8') as f:
for item in FAKE_MOVIE_GENRE_VOCAB:
f.write(item + '\n')
with tf.io.TFRecordWriter(self.test_input_data_file) as file_writer:
file_writer.write(EXAMPLE1.SerializeToString())
self.test_model_dir = os.path.join(self.tmp_dir, 'test_model_dir')
FLAGS.training_data_filepattern = self.test_input_data_file
FLAGS.testing_data_filepattern = self.test_input_data_file
FLAGS.input_config_file = self.test_input_config_file
FLAGS.model_dir = self.test_model_dir
FLAGS.hidden_layer_dims = [8, 4]
FLAGS.eval_top_k = [1, 5]
FLAGS.num_predictions = 5
FLAGS.conv_num_filter_ratios = [2, 4]
FLAGS.conv_kernel_size = 4
FLAGS.lstm_num_units = 16
def testModelTrainEvalExport(self):
"""Verifies that model can be trained and evaluated."""
tf.io.gfile.mkdir(FLAGS.model_dir)
input_config = launcher.load_input_config()
model_config = launcher.prepare_model_config()
dataset = input_pipeline.get_input_dataset(
data_filepattern=self.test_input_data_file,
input_config=input_config,
vocab_file_dir=self.tmp_dir,
batch_size=8)
model = launcher.build_keras_model(input_config, model_config)
launcher.train_and_eval(
model=model,
model_dir=FLAGS.model_dir,
train_input_dataset=dataset,
eval_input_dataset=dataset,
steps_per_epoch=2,
epochs=2,
eval_steps=1)
self.assertTrue(os.path.exists(self.test_model_dir))
summaries_dir = os.path.join(self.test_model_dir, 'summaries')
self.assertTrue(os.path.exists(summaries_dir))
export_dir = os.path.join(FLAGS.model_dir, 'export')
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.model_dir)
launcher.save_model(
checkpoint_path=latest_checkpoint,
export_dir=export_dir,
input_config=input_config,
model_config=model_config)
savedmodel_path = os.path.join(export_dir, 'saved_model.pb')
self.assertTrue(os.path.exists(savedmodel_path))
imported = tf.saved_model.load(export_dir, tags=None)
infer = imported.signatures['serving_default']
context_movie_id = tf.range(5, dtype=tf.int32)
context_movie_rating = tf.range(5, dtype=tf.float32)
context_movie_genre = tf.range(8, dtype=tf.int32)
predictions = infer(context_movie_id=context_movie_id,
context_movie_rating=context_movie_rating,
context_movie_genre=context_movie_genre)
self.assertAllEqual([5], predictions['top_prediction_ids'].shape)
self.assertAllEqual([5], predictions['top_prediction_scores'].shape)
launcher.export_tflite(export_dir)
tflite_model_path = os.path.join(export_dir, 'model.tflite')
self.assertTrue(os.path.exists(tflite_model_path))
f = open(tflite_model_path, 'rb')
interpreter = tf.lite.Interpreter(model_content=f.read())
interpreter.allocate_tensors()
inference_signature = interpreter.get_signature_list()['serving_default']
self.assertAllEqual(
['context_movie_genre', 'context_movie_id', 'context_movie_rating'],
inference_signature['inputs'])
self.assertAllEqual(['top_prediction_ids', 'top_prediction_scores'],
inference_signature['outputs'])
serving_name_to_tenors = {
'serving_default_context_movie_id:0': context_movie_id,
'serving_default_context_movie_rating:0': context_movie_rating,
'serving_default_context_movie_genre:0': context_movie_genre
}
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
indice_to_tensors = {}
for input_detail in input_details:
indice_to_tensors[input_detail['index']] = serving_name_to_tenors[
input_detail['name']]
for index, tensor in indice_to_tensors.items():
interpreter.set_tensor(index, tensor)
interpreter.invoke()
tflite_top_predictions_ids = interpreter.get_tensor(
output_details[0]['index'])
tflite_top_prediction_scores = interpreter.get_tensor(
output_details[1]['index'])
self.assertAllEqual([5], tflite_top_predictions_ids.shape)
self.assertAllEqual([5], tflite_top_prediction_scores.shape)
if __name__ == '__main__':
launcher.define_flags()
tf.test.main()
|
[
"tensorflow.test.main",
"tensorflow.range",
"model.recommendation_model_launcher.prepare_model_config",
"tensorflow.train.Example",
"model.input_pipeline.get_input_dataset",
"tensorflow.saved_model.load",
"tensorflow.io.TFRecordWriter",
"os.path.exists",
"model.recommendation_model_launcher.define_flags",
"model.recommendation_model_launcher.export_tflite",
"model.recommendation_model_launcher.train_and_eval",
"tensorflow.train.latest_checkpoint",
"model.recommendation_model_launcher.save_model",
"tensorflow.io.gfile.mkdir",
"os.path.join",
"model.recommendation_model_launcher.load_input_config",
"model.recommendation_model_launcher.build_keras_model"
] |
[((2677, 2695), 'tensorflow.train.Example', 'tf.train.Example', ([], {}), '()\n', (2693, 2695), True, 'import tensorflow as tf\n'), ((8043, 8066), 'model.recommendation_model_launcher.define_flags', 'launcher.define_flags', ([], {}), '()\n', (8064, 8066), True, 'from model import recommendation_model_launcher as launcher\n'), ((8069, 8083), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (8081, 8083), True, 'import tensorflow as tf\n'), ((3249, 3297), 'os.path.join', 'os.path.join', (['self.tmp_dir', '"""input_config.pbtxt"""'], {}), "(self.tmp_dir, 'input_config.pbtxt')\n", (3261, 3297), False, 'import os\n'), ((3384, 3435), 'os.path.join', 'os.path.join', (['self.tmp_dir', '"""movie_genre_vocab.txt"""'], {}), "(self.tmp_dir, 'movie_genre_vocab.txt')\n", (3396, 3435), False, 'import os\n'), ((3520, 3574), 'os.path.join', 'os.path.join', (['self.tmp_dir', '"""test_input_data.tfrecord"""'], {}), "(self.tmp_dir, 'test_input_data.tfrecord')\n", (3532, 3574), False, 'import os\n'), ((4027, 4071), 'os.path.join', 'os.path.join', (['self.tmp_dir', '"""test_model_dir"""'], {}), "(self.tmp_dir, 'test_model_dir')\n", (4039, 4071), False, 'import os\n'), ((4603, 4637), 'tensorflow.io.gfile.mkdir', 'tf.io.gfile.mkdir', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (4620, 4637), True, 'import tensorflow as tf\n'), ((4657, 4685), 'model.recommendation_model_launcher.load_input_config', 'launcher.load_input_config', ([], {}), '()\n', (4683, 4685), True, 'from model import recommendation_model_launcher as launcher\n'), ((4705, 4736), 'model.recommendation_model_launcher.prepare_model_config', 'launcher.prepare_model_config', ([], {}), '()\n', (4734, 4736), True, 'from model import recommendation_model_launcher as launcher\n'), ((4751, 4901), 'model.input_pipeline.get_input_dataset', 'input_pipeline.get_input_dataset', ([], {'data_filepattern': 'self.test_input_data_file', 'input_config': 'input_config', 'vocab_file_dir': 'self.tmp_dir', 'batch_size': '(8)'}), '(data_filepattern=self.test_input_data_file,\n input_config=input_config, vocab_file_dir=self.tmp_dir, batch_size=8)\n', (4783, 4901), False, 'from model import input_pipeline\n'), ((4943, 4997), 'model.recommendation_model_launcher.build_keras_model', 'launcher.build_keras_model', (['input_config', 'model_config'], {}), '(input_config, model_config)\n', (4969, 4997), True, 'from model import recommendation_model_launcher as launcher\n'), ((5002, 5173), 'model.recommendation_model_launcher.train_and_eval', 'launcher.train_and_eval', ([], {'model': 'model', 'model_dir': 'FLAGS.model_dir', 'train_input_dataset': 'dataset', 'eval_input_dataset': 'dataset', 'steps_per_epoch': '(2)', 'epochs': '(2)', 'eval_steps': '(1)'}), '(model=model, model_dir=FLAGS.model_dir,\n train_input_dataset=dataset, eval_input_dataset=dataset,\n steps_per_epoch=2, epochs=2, eval_steps=1)\n', (5025, 5173), True, 'from model import recommendation_model_launcher as launcher\n'), ((5300, 5346), 'os.path.join', 'os.path.join', (['self.test_model_dir', '"""summaries"""'], {}), "(self.test_model_dir, 'summaries')\n", (5312, 5346), False, 'import os\n'), ((5415, 5454), 'os.path.join', 'os.path.join', (['FLAGS.model_dir', '"""export"""'], {}), "(FLAGS.model_dir, 'export')\n", (5427, 5454), False, 'import os\n'), ((5479, 5522), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (5505, 5522), True, 'import tensorflow as tf\n'), ((5527, 5663), 'model.recommendation_model_launcher.save_model', 'launcher.save_model', ([], {'checkpoint_path': 'latest_checkpoint', 'export_dir': 'export_dir', 'input_config': 'input_config', 'model_config': 'model_config'}), '(checkpoint_path=latest_checkpoint, export_dir=\n export_dir, input_config=input_config, model_config=model_config)\n', (5546, 5663), True, 'from model import recommendation_model_launcher as launcher\n'), ((5714, 5756), 'os.path.join', 'os.path.join', (['export_dir', '"""saved_model.pb"""'], {}), "(export_dir, 'saved_model.pb')\n", (5726, 5756), False, 'import os\n'), ((5825, 5867), 'tensorflow.saved_model.load', 'tf.saved_model.load', (['export_dir'], {'tags': 'None'}), '(export_dir, tags=None)\n', (5844, 5867), True, 'import tensorflow as tf\n'), ((5942, 5969), 'tensorflow.range', 'tf.range', (['(5)'], {'dtype': 'tf.int32'}), '(5, dtype=tf.int32)\n', (5950, 5969), True, 'import tensorflow as tf\n'), ((5997, 6026), 'tensorflow.range', 'tf.range', (['(5)'], {'dtype': 'tf.float32'}), '(5, dtype=tf.float32)\n', (6005, 6026), True, 'import tensorflow as tf\n'), ((6053, 6080), 'tensorflow.range', 'tf.range', (['(8)'], {'dtype': 'tf.int32'}), '(8, dtype=tf.int32)\n', (6061, 6080), True, 'import tensorflow as tf\n'), ((6419, 6453), 'model.recommendation_model_launcher.export_tflite', 'launcher.export_tflite', (['export_dir'], {}), '(export_dir)\n', (6441, 6453), True, 'from model import recommendation_model_launcher as launcher\n'), ((6478, 6518), 'os.path.join', 'os.path.join', (['export_dir', '"""model.tflite"""'], {}), "(export_dir, 'model.tflite')\n", (6490, 6518), False, 'import os\n'), ((3882, 3929), 'tensorflow.io.TFRecordWriter', 'tf.io.TFRecordWriter', (['self.test_input_data_file'], {}), '(self.test_input_data_file)\n', (3902, 3929), True, 'import tensorflow as tf\n'), ((5243, 5278), 'os.path.exists', 'os.path.exists', (['self.test_model_dir'], {}), '(self.test_model_dir)\n', (5257, 5278), False, 'import os\n'), ((5367, 5396), 'os.path.exists', 'os.path.exists', (['summaries_dir'], {}), '(summaries_dir)\n', (5381, 5396), False, 'import os\n'), ((5777, 5808), 'os.path.exists', 'os.path.exists', (['savedmodel_path'], {}), '(savedmodel_path)\n', (5791, 5808), False, 'import os\n'), ((6539, 6572), 'os.path.exists', 'os.path.exists', (['tflite_model_path'], {}), '(tflite_model_path)\n', (6553, 6572), False, 'import os\n')]
|
from __future__ import absolute_import
import unittest
from src.solver import Solver
class TestSolver(unittest.TestCase):
def test_ctor(self):
solver = Solver("", "", False)
self.assertEqual(solver.name, "")
self.assertEqual(solver.quiet_mode, False)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"src.solver.Solver"
] |
[((317, 332), 'unittest.main', 'unittest.main', ([], {}), '()\n', (330, 332), False, 'import unittest\n'), ((168, 189), 'src.solver.Solver', 'Solver', (['""""""', '""""""', '(False)'], {}), "('', '', False)\n", (174, 189), False, 'from src.solver import Solver\n')]
|
import random
rock = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
paper = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
scissors = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
user=int(input("What do you choose? Type 0 for Rock, 1 for Paper, 2 for Scissors.\n"))
temp={0:"print(rock)",1:"print(paper)",2:"print(scissors)"}
if(user in [0,1,2]):
print("You chose:")
eval(temp[user])
print("Computer chose:")
comp=random.randint(0,2)
eval(temp[comp])
if((user==0 and comp==2) or (user==1 and comp==0) or (user==2 and comp==1)):
print("You win :)")
elif(user==comp):
print("It's a draw :|")
else:
print("You lose :(")
else:
print("Wrong choice.")
|
[
"random.randint"
] |
[((588, 608), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (602, 608), False, 'import random\n')]
|
from __future__ import division
from iotbx.option_parser import option_parser
from scitbx.array_family import flex
import sys
from libtbx.utils import Sorry
from mmtbx import scaling
import math
from iotbx import data_plots
def plotit(fobs,
sigma,
fcalc,
alpha,
beta,
epsilon,
centric,
out,
limit=5.0,
steps=1000,
plot_title="Outlier plot"):
fobs_a = flex.double( [fobs] )
fcalc_a = flex.double( [fcalc] )
epsilon_a = flex.double( [epsilon] )
alpha_a = flex.double( [alpha] )
beta_a = flex.double( [beta] )
centric_a = flex.bool ( [centric] )
p_calc = scaling.likelihood_ratio_outlier_test(
fobs_a,
None,
fcalc_a,
epsilon_a,
centric_a,
alpha_a,
beta_a)
print >> out
print >> out,"#Input parameters: "
print >> out,"#Title : ", plot_title
print >> out,"#F-calc : ", fcalc
print >> out,"#F-obs : ", fobs
print >> out,"#epsilon : ", epsilon
print >> out,"#alpha : ", alpha
print >> out,"#beta : ", beta
print >> out,"#centric : ", centric
mode = p_calc.posterior_mode()[0]
snd_der = math.sqrt(1.0/ math.fabs( p_calc.posterior_mode_snd_der()[0] ) )
print >> out,"#A Gaussian approximation of the likelihood function"
print >> out,"#could be constructed as follows with: "
print >> out,"# exp[-(fobs-mode)**2/(2*stdev**2)] /(sqrt(2 pi) stdev)"
print >> out,"#with"
print >> out,"#mode = ", mode
print >> out,"#stdev = ", snd_der
print >> out
print >> out,"#The log likelihood values for the mode and "
print >> out,"#observed values are"
print >> out,"#Log[P(fobs)] : ", p_calc.log_likelihood()[0]
print >> out,"#Log[P(mode)] : ", p_calc.posterior_mode_log_likelihood()[0]
print >> out,"#Their difference is:"
print >> out,"#delta : ", p_calc.log_likelihood()[0]-p_calc.posterior_mode_log_likelihood()[0]
print >> out,"#"
mean_fobs = p_calc.mean_fobs()
print >> out,"#mean f_obs : ", mean_fobs[0], " (first moment)"
low_limit = mode-snd_der*limit
if low_limit<0:
low_limit=0
high_limit = mode+limit*snd_der
if fobs < low_limit:
low_limit = fobs-2.0*snd_der
if low_limit<0:
low_limit=0
if fobs > high_limit:
high_limit = fobs+2.0*snd_der
fobs_a = flex.double( range(steps) )*(
high_limit-low_limit)/float(steps)+low_limit
fcalc_a = flex.double( [fcalc]*steps )
epsilon_a = flex.double( [epsilon]*steps )
alpha_a = flex.double( [alpha]*steps )
beta_a = flex.double( [beta]*steps )
centric_a = flex.bool ( [centric]*steps )
p_calc = scaling.likelihood_ratio_outlier_test(
fobs_a,
None,
fcalc_a,
epsilon_a,
centric_a,
alpha_a,
beta_a)
ll = p_calc.log_likelihood() #-p_calc.posterior_mode_log_likelihood()
ll = flex.exp( ll )
if (sigma is None) or (sigma <=0 ):
sigma=fobs/30.0
obs_gauss = (fobs_a - fobs)/float(sigma)
obs_gauss = flex.exp( -obs_gauss*obs_gauss/2.0 ) /(
math.sqrt(2.0*math.pi*sigma*sigma))
max_ll = flex.max( ll )*1.10
truncate_mask = flex.bool( obs_gauss >= max_ll )
obs_gauss = obs_gauss.set_selected( truncate_mask, max_ll )
ccp4_loggraph_plot = data_plots.plot_data(
plot_title=plot_title,
x_label = 'Fobs',
y_label = 'P(Fobs)',
x_data = fobs_a,
y_data = ll,
y_legend = 'P(Fobs|Fcalc,alpha,beta)',
comments = 'Fobs=%5.2f, sigma=%5.2f, Fcalc=%5.2f'%(fobs,sigma,fcalc) )
ccp4_loggraph_plot.add_data(
y_data = obs_gauss,
y_legend = "P(Fobs|<Fobs>,sigma)"
)
data_plots.plot_data_loggraph(ccp4_loggraph_plot,out)
def run(args):
command_line = (option_parser(
usage="mmtbx.p-plotter [options]",
description="produces a gnuplot plot")
.option(None, "--fobs",
action="store",
type="float",
help="F obs",
metavar="FLOAT")
.option( None, "--sigma",
action="store",
type="float",
help="sigma Fobs",
metavar="FLOAT")
.option(None, "--fcalc",
action="store",
type="float",
help="F calc",
metavar="FLOAT")
.option(None, "--alpha",
action="store",
type="float",
help="alpha",
metavar="FLOAT")
.option(None, "--beta",
action="store",
type="float",
help="beta")
.option(None, "--epsilon",
action="store",
type="float",
help="epsilon")
.option(None, "--centric",
action="store_true",
default=False,
help="centricity flag")
.option(None, "--limit",
action="store",
type="float",
default=10,
help="plotting limit")
.option(None, "--steps",
action="store",
type="int",
default=1000,
help="number of steps")
).process(args=args)
if command_line.options.fobs is None:
raise Sorry("please provide fobs")
if command_line.options.fcalc is None:
raise Sorry("please provide fcalc")
if command_line.options.epsilon is None:
raise Sorry("please provide epsilon")
if command_line.options.alpha is None:
raise Sorry("please provide alpha")
if command_line.options.beta is None:
raise Sorry("please provide beta")
#print dir(command_line.options)
plottery = plotit( command_line.options.fobs,
command_line.options.sigma,
command_line.options.fcalc,
command_line.options.alpha,
command_line.options.beta,
command_line.options.epsilon,
command_line.options.centric,
sys.stdout,
command_line.options.limit,
command_line.options.steps)
if (__name__=="__main__"):
run(sys.argv[0:])
|
[
"math.sqrt",
"mmtbx.scaling.likelihood_ratio_outlier_test",
"scitbx.array_family.flex.max",
"iotbx.data_plots.plot_data_loggraph",
"iotbx.data_plots.plot_data",
"iotbx.option_parser.option_parser",
"scitbx.array_family.flex.bool",
"scitbx.array_family.flex.double",
"scitbx.array_family.flex.exp",
"libtbx.utils.Sorry"
] |
[((469, 488), 'scitbx.array_family.flex.double', 'flex.double', (['[fobs]'], {}), '([fobs])\n', (480, 488), False, 'from scitbx.array_family import flex\n'), ((505, 525), 'scitbx.array_family.flex.double', 'flex.double', (['[fcalc]'], {}), '([fcalc])\n', (516, 525), False, 'from scitbx.array_family import flex\n'), ((542, 564), 'scitbx.array_family.flex.double', 'flex.double', (['[epsilon]'], {}), '([epsilon])\n', (553, 564), False, 'from scitbx.array_family import flex\n'), ((581, 601), 'scitbx.array_family.flex.double', 'flex.double', (['[alpha]'], {}), '([alpha])\n', (592, 601), False, 'from scitbx.array_family import flex\n'), ((618, 637), 'scitbx.array_family.flex.double', 'flex.double', (['[beta]'], {}), '([beta])\n', (629, 637), False, 'from scitbx.array_family import flex\n'), ((654, 674), 'scitbx.array_family.flex.bool', 'flex.bool', (['[centric]'], {}), '([centric])\n', (663, 674), False, 'from scitbx.array_family import flex\n'), ((691, 794), 'mmtbx.scaling.likelihood_ratio_outlier_test', 'scaling.likelihood_ratio_outlier_test', (['fobs_a', 'None', 'fcalc_a', 'epsilon_a', 'centric_a', 'alpha_a', 'beta_a'], {}), '(fobs_a, None, fcalc_a, epsilon_a,\n centric_a, alpha_a, beta_a)\n', (728, 794), False, 'from mmtbx import scaling\n'), ((2470, 2498), 'scitbx.array_family.flex.double', 'flex.double', (['([fcalc] * steps)'], {}), '([fcalc] * steps)\n', (2481, 2498), False, 'from scitbx.array_family import flex\n'), ((2513, 2543), 'scitbx.array_family.flex.double', 'flex.double', (['([epsilon] * steps)'], {}), '([epsilon] * steps)\n', (2524, 2543), False, 'from scitbx.array_family import flex\n'), ((2558, 2586), 'scitbx.array_family.flex.double', 'flex.double', (['([alpha] * steps)'], {}), '([alpha] * steps)\n', (2569, 2586), False, 'from scitbx.array_family import flex\n'), ((2601, 2628), 'scitbx.array_family.flex.double', 'flex.double', (['([beta] * steps)'], {}), '([beta] * steps)\n', (2612, 2628), False, 'from scitbx.array_family import flex\n'), ((2643, 2671), 'scitbx.array_family.flex.bool', 'flex.bool', (['([centric] * steps)'], {}), '([centric] * steps)\n', (2652, 2671), False, 'from scitbx.array_family import flex\n'), ((2686, 2789), 'mmtbx.scaling.likelihood_ratio_outlier_test', 'scaling.likelihood_ratio_outlier_test', (['fobs_a', 'None', 'fcalc_a', 'epsilon_a', 'centric_a', 'alpha_a', 'beta_a'], {}), '(fobs_a, None, fcalc_a, epsilon_a,\n centric_a, alpha_a, beta_a)\n', (2723, 2789), False, 'from mmtbx import scaling\n'), ((2898, 2910), 'scitbx.array_family.flex.exp', 'flex.exp', (['ll'], {}), '(ll)\n', (2906, 2910), False, 'from scitbx.array_family import flex\n'), ((3159, 3189), 'scitbx.array_family.flex.bool', 'flex.bool', (['(obs_gauss >= max_ll)'], {}), '(obs_gauss >= max_ll)\n', (3168, 3189), False, 'from scitbx.array_family import flex\n'), ((3279, 3507), 'iotbx.data_plots.plot_data', 'data_plots.plot_data', ([], {'plot_title': 'plot_title', 'x_label': '"""Fobs"""', 'y_label': '"""P(Fobs)"""', 'x_data': 'fobs_a', 'y_data': 'll', 'y_legend': '"""P(Fobs|Fcalc,alpha,beta)"""', 'comments': "('Fobs=%5.2f, sigma=%5.2f, Fcalc=%5.2f' % (fobs, sigma, fcalc))"}), "(plot_title=plot_title, x_label='Fobs', y_label=\n 'P(Fobs)', x_data=fobs_a, y_data=ll, y_legend=\n 'P(Fobs|Fcalc,alpha,beta)', comments=\n 'Fobs=%5.2f, sigma=%5.2f, Fcalc=%5.2f' % (fobs, sigma, fcalc))\n", (3299, 3507), False, 'from iotbx import data_plots\n'), ((3632, 3686), 'iotbx.data_plots.plot_data_loggraph', 'data_plots.plot_data_loggraph', (['ccp4_loggraph_plot', 'out'], {}), '(ccp4_loggraph_plot, out)\n', (3661, 3686), False, 'from iotbx import data_plots\n'), ((3029, 3067), 'scitbx.array_family.flex.exp', 'flex.exp', (['(-obs_gauss * obs_gauss / 2.0)'], {}), '(-obs_gauss * obs_gauss / 2.0)\n', (3037, 3067), False, 'from scitbx.array_family import flex\n'), ((3073, 3113), 'math.sqrt', 'math.sqrt', (['(2.0 * math.pi * sigma * sigma)'], {}), '(2.0 * math.pi * sigma * sigma)\n', (3082, 3113), False, 'import math\n'), ((3121, 3133), 'scitbx.array_family.flex.max', 'flex.max', (['ll'], {}), '(ll)\n', (3129, 3133), False, 'from scitbx.array_family import flex\n'), ((5686, 5714), 'libtbx.utils.Sorry', 'Sorry', (['"""please provide fobs"""'], {}), "('please provide fobs')\n", (5691, 5714), False, 'from libtbx.utils import Sorry\n'), ((5766, 5795), 'libtbx.utils.Sorry', 'Sorry', (['"""please provide fcalc"""'], {}), "('please provide fcalc')\n", (5771, 5795), False, 'from libtbx.utils import Sorry\n'), ((5849, 5880), 'libtbx.utils.Sorry', 'Sorry', (['"""please provide epsilon"""'], {}), "('please provide epsilon')\n", (5854, 5880), False, 'from libtbx.utils import Sorry\n'), ((5932, 5961), 'libtbx.utils.Sorry', 'Sorry', (['"""please provide alpha"""'], {}), "('please provide alpha')\n", (5937, 5961), False, 'from libtbx.utils import Sorry\n'), ((6012, 6040), 'libtbx.utils.Sorry', 'Sorry', (['"""please provide beta"""'], {}), "('please provide beta')\n", (6017, 6040), False, 'from libtbx.utils import Sorry\n'), ((3720, 3812), 'iotbx.option_parser.option_parser', 'option_parser', ([], {'usage': '"""mmtbx.p-plotter [options]"""', 'description': '"""produces a gnuplot plot"""'}), "(usage='mmtbx.p-plotter [options]', description=\n 'produces a gnuplot plot')\n", (3733, 3812), False, 'from iotbx.option_parser import option_parser\n')]
|
# Link: https://github.com/nicolas3470/Python-synchronization/blob/master/q06.py
from threading import Lock, Condition
from utils import Thread, execution_manager
from barbershop import Barber, Customer
def delay():
time.sleep(random.randint(0, 2))
class BarberShop:
def __init__(self, num_seats):
self.barbers_ready = False
self.num_seats = num_seats
self.open_seats = num_seats
self.shop_mutex = Lock()
self.barber_condition = Condition(self.shop_mutex)
self.customer_condition = Condition(self.shop_mutex)
def barber_ready(self):
# Barber is ready
# Signal one of the waiting customers
with self.shop_mutex:
while self.open_seats == self.num_seats:
self.barbers_ready = False
self.customer_condition.wait()
self.barbers_ready = True
self.open_seats += 1
self.barber_condition.notify()
def customer_enter(self):
# Return True, if customer sat
# Else, return false, customer left
with self.shop_mutex:
if self.open_seats > 0:
self.open_seats -= 1
return True
else:
return False
def customer_sit(self):
# Sit until Barber is ready
with self.shop_mutex:
self.customer_condition.notify()
while not self.barbers_ready:
self.barber_condition.wait()
self.barbers_ready = False
num_barbers = 3
num_customers = 6
num_seats = 3
barbershop = BarberShop(num_seats)
#execution_manager()
[Thread(Barber, i) for i in range(num_barbers)]
[Thread(Customer, i) for i in range(num_customers)]
|
[
"threading.Lock",
"threading.Condition",
"utils.Thread"
] |
[((1631, 1648), 'utils.Thread', 'Thread', (['Barber', 'i'], {}), '(Barber, i)\n', (1637, 1648), False, 'from utils import Thread, execution_manager\n'), ((1679, 1698), 'utils.Thread', 'Thread', (['Customer', 'i'], {}), '(Customer, i)\n', (1685, 1698), False, 'from utils import Thread, execution_manager\n'), ((443, 449), 'threading.Lock', 'Lock', ([], {}), '()\n', (447, 449), False, 'from threading import Lock, Condition\n'), ((482, 508), 'threading.Condition', 'Condition', (['self.shop_mutex'], {}), '(self.shop_mutex)\n', (491, 508), False, 'from threading import Lock, Condition\n'), ((543, 569), 'threading.Condition', 'Condition', (['self.shop_mutex'], {}), '(self.shop_mutex)\n', (552, 569), False, 'from threading import Lock, Condition\n')]
|
""" test automol.graph
"""
import numpy
import automol
from automol import graph
C8H13O_CGR = (
{0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, None), 7: ('C', 1, None), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, None), frozenset({5, 7}): (1, None)})
C8H13O_RGR = (
{0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, None), 7: ('C', 1, None), 8: ('O', 0, None)},
{frozenset({1, 4}): (2, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (2, None), frozenset({5, 7}): (1, None)})
C8H13O_SGR = (
{0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, False), 7: ('C', 1, False), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, False), frozenset({5, 7}): (1, None)})
C3H3_CGR = (
{0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None),
frozenset({2, 0}): (1, None)})
C3H3_RGRS = (
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None),
frozenset({2, 0}): (1, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (2, None),
frozenset({2, 0}): (1, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None),
frozenset({2, 0}): (2, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (2, None), frozenset({1, 2}): (1, None),
frozenset({2, 0}): (1, None)}),
)
C2_CGR = ({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (1, None)})
C2_RGRS = (
({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (1, None)}),
({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (2, None)}),
({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (3, None)}),
)
CH2FH2H_CGR_IMP = (
{0: ('F', 0, None), 1: ('C', 2, None), 2: ('H', 1, None),
3: ('H', 0, None)},
{frozenset({0, 1}): (1, None)})
CH2FH2H_CGR_EXP = (
{0: ('F', 0, None), 1: ('C', 0, None), 2: ('H', 0, None),
3: ('H', 0, None), 4: ('H', 0, None), 5: ('H', 0, None),
6: ('H', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 4}): (1, None),
frozenset({1, 5}): (1, None), frozenset({2, 6}): (1, None)})
C2H2CL2F2_CGR = (
{0: ('C', 1, None), 1: ('C', 1, None), 2: ('F', 0, None),
3: ('Cl', 0, None), 4: ('F', 0, None), 5: ('Cl', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({1, 4}): (1, None),
frozenset({1, 5}): (1, None)})
C2H2CL2F2_SGRS = (
({0: ('C', 1, False), 1: ('C', 1, False), 2: ('F', 0, None),
3: ('Cl', 0, None), 4: ('F', 0, None), 5: ('Cl', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({1, 4}): (1, None),
frozenset({1, 5}): (1, None)}),
({0: ('C', 1, False), 1: ('C', 1, True), 2: ('F', 0, None),
3: ('Cl', 0, None), 4: ('F', 0, None), 5: ('Cl', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({1, 4}): (1, None),
frozenset({1, 5}): (1, None)}),
({0: ('C', 1, True), 1: ('C', 1, False), 2: ('F', 0, None),
3: ('Cl', 0, None), 4: ('F', 0, None), 5: ('Cl', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({1, 4}): (1, None),
frozenset({1, 5}): (1, None)}),
({0: ('C', 1, True), 1: ('C', 1, True), 2: ('F', 0, None),
3: ('Cl', 0, None), 4: ('F', 0, None), 5: ('Cl', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({1, 4}): (1, None),
frozenset({1, 5}): (1, None)})
)
C3H3CL2F3_CGR = (
{0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None),
3: ('Cl', 0, None), 4: ('Cl', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('F', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 5}): (1, None), frozenset({2, 4}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 6}): (1, None),
frozenset({2, 7}): (1, None)})
C3H3CL2F3_SGRS = (
({0: ('C', 1, None), 1: ('C', 1, False), 2: ('C', 1, False),
3: ('Cl', 0, None), 4: ('Cl', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('F', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 5}): (1, None), frozenset({2, 4}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 6}): (1, None),
frozenset({2, 7}): (1, None)}),
({0: ('C', 1, None), 1: ('C', 1, True), 2: ('C', 1, True),
3: ('Cl', 0, None), 4: ('Cl', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('F', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 5}): (1, None), frozenset({2, 4}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 6}): (1, None),
frozenset({2, 7}): (1, None)}),
({0: ('C', 1, False), 1: ('C', 1, False), 2: ('C', 1, True),
3: ('Cl', 0, None), 4: ('Cl', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('F', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 5}): (1, None), frozenset({2, 4}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 6}): (1, None),
frozenset({2, 7}): (1, None)}),
({0: ('C', 1, False), 1: ('C', 1, True), 2: ('C', 1, False),
3: ('Cl', 0, None), 4: ('Cl', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('F', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 5}): (1, None), frozenset({2, 4}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 6}): (1, None),
frozenset({2, 7}): (1, None)}),
({0: ('C', 1, True), 1: ('C', 1, False), 2: ('C', 1, True),
3: ('Cl', 0, None), 4: ('Cl', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('F', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 5}): (1, None), frozenset({2, 4}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 6}): (1, None),
frozenset({2, 7}): (1, None)}),
({0: ('C', 1, True), 1: ('C', 1, True), 2: ('C', 1, False),
3: ('Cl', 0, None), 4: ('Cl', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('F', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 5}): (1, None), frozenset({2, 4}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 6}): (1, None),
frozenset({2, 7}): (1, None)}),
)
C3H5N3_CGR = (
{0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 0, None),
3: ('N', 1, None), 4: ('N', 1, None), 5: ('N', 1, None)},
{frozenset({1, 4}): (1, None), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({0, 2}): (1, None),
frozenset({2, 5}): (1, None)})
C3H5N3_SGRS = (
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 0, None),
3: ('N', 1, None), 4: ('N', 1, None), 5: ('N', 1, None)},
{frozenset({1, 4}): (1, False), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, False), frozenset({0, 2}): (1, None),
frozenset({2, 5}): (1, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 0, None),
3: ('N', 1, None), 4: ('N', 1, None), 5: ('N', 1, None)},
{frozenset({1, 4}): (1, True), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, False), frozenset({0, 2}): (1, None),
frozenset({2, 5}): (1, False)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 0, None),
3: ('N', 1, None), 4: ('N', 1, None), 5: ('N', 1, None)},
{frozenset({1, 4}): (1, True), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, False), frozenset({0, 2}): (1, None),
frozenset({2, 5}): (1, True)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 0, None),
3: ('N', 1, None), 4: ('N', 1, None), 5: ('N', 1, None)},
{frozenset({1, 4}): (1, False), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, True), frozenset({0, 2}): (1, None),
frozenset({2, 5}): (1, False)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 0, None),
3: ('N', 1, None), 4: ('N', 1, None), 5: ('N', 1, None)},
{frozenset({1, 4}): (1, False), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, True), frozenset({0, 2}): (1, None),
frozenset({2, 5}): (1, True)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 0, None),
3: ('N', 1, None), 4: ('N', 1, None), 5: ('N', 1, None)},
{frozenset({1, 4}): (1, True), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, True), frozenset({0, 2}): (1, None),
frozenset({2, 5}): (1, None)}),
)
C8H13O_SGRS = (
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, False), 7: ('C', 1, False), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, False), frozenset({5, 7}): (1, None)}),
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, False), 7: ('C', 1, False), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, True), frozenset({5, 7}): (1, None)}),
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, False), 7: ('C', 1, True), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, False), frozenset({5, 7}): (1, None)}),
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, False), 7: ('C', 1, True), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, True), frozenset({5, 7}): (1, None)}),
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, True), 7: ('C', 1, False), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, False), frozenset({5, 7}): (1, None)}),
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, True), 7: ('C', 1, False), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, True), frozenset({5, 7}): (1, None)}),
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, True), 7: ('C', 1, True), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, False), frozenset({5, 7}): (1, None)}),
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, True), 7: ('C', 1, True), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, True), frozenset({5, 7}): (1, None)}),
)
def test__from_data():
""" test getters
"""
cgr = automol.graph.from_data(
atm_sym_dct=graph.atom_symbols(C8H13O_CGR),
bnd_keys=graph.bond_keys(C8H13O_CGR),
atm_imp_hyd_vlc_dct=(
graph.atom_implicit_hydrogen_valences(C8H13O_CGR)),
)
assert cgr == C8H13O_CGR
rgr = automol.graph.from_data(
atm_sym_dct=graph.atom_symbols(C8H13O_RGR),
bnd_keys=graph.bond_keys(C8H13O_RGR),
atm_imp_hyd_vlc_dct=(
graph.atom_implicit_hydrogen_valences(C8H13O_RGR)),
bnd_ord_dct=graph.bond_orders(C8H13O_RGR),
)
assert rgr == C8H13O_RGR
sgr = automol.graph.from_data(
atm_sym_dct=graph.atom_symbols(C8H13O_SGR),
bnd_keys=graph.bond_keys(C8H13O_SGR),
atm_imp_hyd_vlc_dct=(
graph.atom_implicit_hydrogen_valences(C8H13O_SGR)),
atm_ste_par_dct=graph.atom_stereo_parities(C8H13O_SGR),
bnd_ste_par_dct=graph.bond_stereo_parities(C8H13O_SGR)
)
assert sgr == C8H13O_SGR
def test__set_atom_implicit_hydrogen_valences():
""" test graph.set_atom_implicit_hydrogen_valences
"""
atm_keys = graph.atom_keys(C8H13O_CGR)
cgr = graph.set_atom_implicit_hydrogen_valences(
C8H13O_CGR, {atm_key: 0 for atm_key in atm_keys})
assert cgr == automol.graph.from_data(
graph.atom_symbols(C8H13O_CGR), graph.bond_keys(C8H13O_CGR))
def test__string():
""" test graph.string and graph.from_string
"""
for sgr in C8H13O_SGRS:
assert sgr == automol.graph.from_string(automol.graph.string(sgr))
def test__without_bond_orders():
""" test graph.without_bond_orders
"""
assert C8H13O_CGR == graph.without_bond_orders(C8H13O_RGR)
def test__without_stereo_parities():
""" test graph.without_stereo_parities
"""
assert C8H13O_CGR == graph.without_stereo_parities(C8H13O_SGR)
def test__electron_count():
""" test graph.electron_count
"""
assert graph.electron_count(C8H13O_CGR) == 69
def test__atom_count():
""" test graph.electron_count
"""
assert graph.atom_count(C8H13O_CGR) == 22
assert graph.atom_count(C8H13O_CGR, with_implicit=False) == 9
def test__heavy_atom_count():
""" test graph.explicit_hydrogen_count
"""
cgr = graph.explicit(C8H13O_CGR)
assert graph.heavy_atom_count(cgr) == 9
def test__atoms_neighbor_atom_keys():
""" test graph.atoms_neighbor_atom_keys
"""
assert graph.atoms_neighbor_atom_keys(C8H13O_CGR) == {
0: frozenset({3}),
1: frozenset({4}),
2: frozenset({6}),
3: frozenset({0, 5}),
4: frozenset({1, 6}),
5: frozenset({3, 7}),
6: frozenset({2, 4, 7}),
7: frozenset({8, 5, 6}),
8: frozenset({7})
}
def test__atoms_second_degree_neighbor_atom_keys():
""" test graph.atoms_neighbor_atom_keys
"""
assert graph.atoms_second_degree_neighbor_atom_keys(C8H13O_CGR) == {
0: frozenset({5}),
1: frozenset({6}),
2: frozenset({4, 7}),
3: frozenset({7}),
4: frozenset({2, 7}),
5: frozenset({0, 8, 6}),
6: frozenset({8, 1, 5}),
7: frozenset({2, 3, 4}),
8: frozenset({5, 6}),
}
def test__atoms_bond_keys():
""" test graph.atoms_neighbor_atom_keys
"""
assert graph.atoms_bond_keys(C8H13O_CGR) == {
0: frozenset({frozenset({0, 3})}),
1: frozenset({frozenset({1, 4})}),
2: frozenset({frozenset({2, 6})}),
3: frozenset({frozenset({3, 5}), frozenset({0, 3})}),
4: frozenset({frozenset({1, 4}), frozenset({4, 6})}),
5: frozenset({frozenset({3, 5}), frozenset({5, 7})}),
6: frozenset({frozenset({6, 7}), frozenset({4, 6}),
frozenset({2, 6})}),
7: frozenset({frozenset({6, 7}), frozenset({5, 7}),
frozenset({8, 7})}),
8: frozenset({frozenset({8, 7})})
}
# # bond properties
def test__bonds_neighbor_atom_keys():
""" test graph.bonds_neighbor_atom_keys
"""
assert graph.bonds_neighbor_atom_keys(C8H13O_CGR) == {
frozenset({1, 4}): frozenset({6}),
frozenset({4, 6}): frozenset({1, 2, 7}),
frozenset({2, 6}): frozenset({4, 7}),
frozenset({0, 3}): frozenset({5}),
frozenset({6, 7}): frozenset({8, 2, 4, 5}),
frozenset({8, 7}): frozenset({5, 6}),
frozenset({3, 5}): frozenset({0, 7}),
frozenset({5, 7}): frozenset({8, 3, 6})
}
# # other properties
def test__branch():
""" test graph.branch
"""
assert graph.branch(C8H13O_CGR, 6, frozenset({6, 4})) == (
{1: ('C', 2, None), 4: ('C', 1, None), 6: ('C', 1, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None)}
)
def test__connected_components():
""" test graph.connected_components
"""
gra1 = C3H3_CGR
gra2 = C2_CGR
gra1_natms = automol.formula.atom_count(graph.formula(C3H3_CGR))
gra2 = graph.transform_keys(gra2, lambda x: x + gra1_natms)
gra = graph.union(gra1, gra2)
cmp_gras = graph.connected_components(gra)
assert cmp_gras in [(gra1, gra2), (gra2, gra1)]
def test__subgraph():
""" test graph.subgraph
"""
assert graph.subgraph(C3H3_CGR, (1, 2)) == (
{1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({1, 2}): (1, None)})
def test__bond_induced_subgraph():
""" test graph.bond_induced_subgraph
"""
assert graph.bond_induced_subgraph(
C3H3_CGR, [frozenset({0, 1}), frozenset({1, 2})]) == (
{0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None)})
# # transformations
def test__relabel():
""" test graph.relabel
"""
assert graph.relabel(C3H3_CGR, {0: 10, 1: 11, 2: 12}) == (
{10: ('C', 1, None), 11: ('C', 1, None), 12: ('C', 1, None)},
{frozenset({10, 11}): (1, None), frozenset({11, 12}): (1, None),
frozenset({12, 10}): (1, None)})
def test__remove_atoms():
""" test graph.remove_atoms
"""
assert graph.remove_atoms(C3H3_CGR, (0,)) == (
{1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({1, 2}): (1, None)})
def test__remove_bonds():
""" test graph.remove_bonds
"""
assert graph.remove_bonds(C3H3_CGR, [frozenset({1, 2})]) == (
{0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({2, 0}): (1, None)})
# implicit/explicit hydrogen functions
# # atom properties
def test__atom_explicit_hydrogen_valences():
""" test graph.atom_explicit_hydrogen_valences
"""
assert graph.atom_explicit_hydrogen_valences(CH2FH2H_CGR_EXP) == {
0: 0, 1: 2, 2: 1, 3: 0, 4: 0, 5: 0, 6: 0
}
def test__atom_explicit_hydrogen_keys():
""" test graph.atom_explicit_hydrogen_keys
"""
assert graph.atom_explicit_hydrogen_keys(CH2FH2H_CGR_EXP) == {
0: frozenset(),
1: frozenset({4, 5}),
2: frozenset({6}),
3: frozenset(),
4: frozenset(),
5: frozenset(),
6: frozenset()
}
# # other properties
def test__backbone_keys():
""" test graph.backbone_keys
"""
assert graph.backbone_keys(CH2FH2H_CGR_EXP) == frozenset({0, 1, 2, 3})
def test__explicit_hydrogen_keys():
""" test graph.explicit_hydrogen_keys
"""
assert graph.explicit_hydrogen_keys(CH2FH2H_CGR_EXP) == frozenset(
{4, 5, 6})
def test__explicit():
""" test graph.explicit
"""
assert CH2FH2H_CGR_EXP == graph.explicit(CH2FH2H_CGR_IMP)
def test__implicit():
""" test graph.implicit
"""
assert CH2FH2H_CGR_IMP == graph.implicit(graph.explicit(CH2FH2H_CGR_IMP))
# # comparisons
def test__backbone_isomorphic():
""" test graph.backbone_isomorphic
"""
assert graph.backbone_isomorphic(CH2FH2H_CGR_IMP, CH2FH2H_CGR_EXP)
cgr = C8H13O_CGR
natms = len(graph.atoms(cgr))
for _ in range(10):
pmt_dct = dict(enumerate(numpy.random.permutation(natms)))
cgr_pmt = graph.relabel(cgr, pmt_dct)
assert graph.backbone_isomorphic(cgr, cgr_pmt)
def test__backbone_isomorphism():
""" test graph.backbone_isomorphism
"""
cgr = C8H13O_CGR
natms = len(graph.atoms(cgr))
for _ in range(10):
pmt_dct = dict(enumerate(numpy.random.permutation(natms)))
cgr_pmt = graph.relabel(cgr, pmt_dct)
assert graph.backbone_isomorphism(cgr, cgr_pmt) == pmt_dct
def test__backbone_unique():
""" test graph.backbone_unique
"""
assert graph.backbone_unique(C3H3_RGRS) == C3H3_RGRS[:2]
# chemistry library
def test__atom_element_valences():
""" test graph.atom_element_valences
"""
assert graph.atom_element_valences(C8H13O_CGR) == {
0: 4, 1: 4, 2: 4, 3: 4, 4: 4, 5: 4, 6: 4, 7: 4, 8: 2}
def test__atom_lone_pair_counts():
""" test graph.atom_lone_pair_counts
"""
assert graph.atom_lone_pair_counts(C8H13O_CGR) == {
0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 2}
def test__atom_bond_valences():
""" test graph.atom_bond_valences
"""
assert graph.atom_bond_valences(C8H13O_CGR) == {
0: 4, 1: 3, 2: 4, 3: 3, 4: 3, 5: 3, 6: 4, 7: 4, 8: 1}
def test__atom_unsaturated_valences():
""" test graph.atom_unsaturated_valences
"""
assert graph.atom_unsaturated_valences(C8H13O_CGR) == {
0: 0, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1, 6: 0, 7: 0, 8: 1}
def test__unsaturated_atom_keys():
""" test graph.unsaturated_atom_keys
"""
assert graph.unsaturated_atom_keys(C8H13O_CGR) == frozenset(
{1, 3, 4, 5, 8})
def test__maximum_spin_multiplicity():
""" test graph.maximum_spin_multiplicity
"""
assert graph.maximum_spin_multiplicity(C2_CGR) == 7
def test__possible_spin_multiplicities():
""" test graph.possible_spin_multiplicities
"""
assert graph.possible_spin_multiplicities(C2_CGR) == (1, 3, 5, 7)
# miscellaneous
def test__bond_symmetry_numbers():
""" test graph.bond_symmetry_numbers
"""
assert graph.bond_symmetry_numbers(C8H13O_CGR) == {
frozenset({1, 4}): 1, frozenset({4, 6}): 1, frozenset({2, 6}): 3,
frozenset({0, 3}): 3, frozenset({6, 7}): 1, frozenset({8, 7}): 1,
frozenset({3, 5}): 1, frozenset({5, 7}): 1}
# resonance graph library
# # atom properties
def test__resonance_dominant_atom_hybridizations():
""" test graph.resonance_dominant_atom_hybridizations
"""
assert graph.resonance_dominant_atom_hybridizations(C3H3_CGR) == {
0: 2, 1: 2, 2: 2}
assert graph.resonance_dominant_atom_hybridizations(C8H13O_CGR) == {
0: 3, 1: 2, 2: 3, 3: 2, 4: 2, 5: 2, 6: 3, 7: 3, 8: 3}
cgr = ({0: ('C', 0, None), 1: ('C', 0, None), 2: ('O', 0, None),
3: ('H', 0, None), 4: ('H', 0, None), 5: ('H', 0, None),
6: ('X', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({0, 1}): (1, None),
frozenset({2, 5}): (1, None)})
print(graph.resonance_dominant_atom_hybridizations(cgr))
def test__resonance_dominant_atom_centered_cumulene_keys():
""" test graph.resonance_dominant_atom_centered_cumulene_keys
"""
cgr = ({0: ('C', 1, None), 1: ('C', 2, None), 2: ('C', 0, None),
3: ('C', 0, None), 4: ('C', 1, None), 5: ('C', 0, None),
6: ('C', 0, None)},
{frozenset({4, 6}): (1, None), frozenset({0, 2}): (1, None),
frozenset({2, 4}): (1, None), frozenset({5, 6}): (1, None),
frozenset({3, 5}): (1, None), frozenset({1, 3}): (1, None)})
assert (graph.resonance_dominant_atom_centered_cumulene_keys(cgr) ==
frozenset({(frozenset({1, 4}), 5)}))
def test__resonance_dominant_bond_centered_cumulene_keys():
""" test graph.resonance_dominant_bond_centered_cumulene_keys
"""
cgr = ({0: ('C', 1, None), 1: ('C', 2, None), 2: ('C', 0, None),
3: ('C', 0, None), 4: ('C', 1, None), 5: ('C', 0, None)},
{frozenset({4, 5}): (1, None), frozenset({0, 2}): (1, None),
frozenset({2, 4}): (1, None), frozenset({3, 5}): (1, None),
frozenset({1, 3}): (1, None)})
assert (graph.resonance_dominant_bond_centered_cumulene_keys(cgr) ==
frozenset({(frozenset({1, 4}), frozenset({3, 5}))}))
def test__resonance_dominant_radical_atom_keys():
""" test graph.resonance_dominant_radical_atom_keys
"""
assert graph.resonance_dominant_radical_atom_keys(C3H3_CGR) == frozenset(
{0, 1, 2})
assert graph.resonance_dominant_radical_atom_keys(C8H13O_CGR) == frozenset(
{8})
def test__sigma_radical_atom_keys():
""" test graph.sigma_radical_atom_keys
"""
# CCC#[C]
gra = ({0: ('C', 3, None), 1: ('C', 0, None), 2: ('C', 2, None),
3: ('C', 0, None)},
{frozenset({0, 2}): (1, None), frozenset({1, 3}): (1, None),
frozenset({2, 3}): (1, None)})
assert graph.sigma_radical_atom_keys(gra) == frozenset({1})
# [C]#CC(CC)(CCC#[C])CC#[C]
gra = ({0: ('C', 0, None), 1: ('C', 0, None), 2: ('C', 3, None),
3: ('C', 0, None), 4: ('C', 0, None), 5: ('C', 0, None),
6: ('C', 2, None), 7: ('C', 0, None), 8: ('C', 2, None),
9: ('C', 2, None), 10: ('C', 2, None), 11: ('C', 0, None)},
{frozenset({8, 4}): (1, None), frozenset({3, 7}): (1, None),
frozenset({2, 6}): (1, None), frozenset({0, 4}): (1, None),
frozenset({8, 10}): (1, None), frozenset({9, 11}): (1, None),
frozenset({1, 5}): (1, None), frozenset({9, 5}): (1, None),
frozenset({11, 7}): (1, None), frozenset({10, 11}): (1, None),
frozenset({11, 6}): (1, None)})
assert graph.sigma_radical_atom_keys(gra) == frozenset({0, 1, 3})
# # bond properties
def test__resonance_dominant_bond_orders():
""" test graph.resonance_dominant_bond_orders
"""
assert graph.resonance_dominant_bond_orders(C3H3_CGR) == {
frozenset({0, 1}): frozenset({1, 2}),
frozenset({0, 2}): frozenset({1, 2}),
frozenset({1, 2}): frozenset({1, 2})
}
# # transformations
def test__resonances():
""" test graph.resonances
"""
assert graph.resonances(C3H3_CGR) == C3H3_RGRS
def test__subresonances():
""" test graph.subresonances
"""
assert graph.subresonances(C2_RGRS[1]) == C2_RGRS[1:]
def test__dominant_resonances():
""" test graph.dominant_resonances
"""
assert graph.dominant_resonances(C3H3_CGR) == C3H3_RGRS[1:]
def test__dominant_resonance():
""" test graph.dominant_resonance
"""
assert graph.dominant_resonance(C3H3_CGR) == C3H3_RGRS[1]
def test__rotational_bond_keys():
""" test graph.rotational_bond_keys
"""
cgr = ({0: ('C', 2, None), 1: ('C', 2, None), 2: ('C', 1, None),
3: ('C', 1, None)},
{frozenset({0, 2}): (1, None), frozenset({1, 3}): (1, None),
frozenset({2, 3}): (1, None)})
cgr = automol.graph.explicit(cgr)
assert (automol.graph.rotational_bond_keys(cgr) ==
frozenset({frozenset({2, 3})}))
cgr = ({0: ('C', 3, None), 1: ('C', 3, None), 2: ('C', 2, None),
3: ('C', 2, None)},
{frozenset({0, 2}): (1, None), frozenset({1, 3}): (1, None),
frozenset({2, 3}): (1, None)})
assert (automol.graph.rotational_bond_keys(cgr) ==
frozenset({frozenset({0, 2}), frozenset({1, 3}),
frozenset({2, 3})}))
assert (automol.graph.rotational_bond_keys(cgr, with_h_rotors=False) ==
frozenset({frozenset({2, 3})}))
# stereo graph library
def test__stereogenic_atom_keys():
""" test graph.stereogenic_atom_keys
"""
assert graph.stereogenic_atom_keys(C8H13O_CGR) == frozenset({6, 7})
assert graph.stereogenic_atom_keys(C3H3CL2F3_CGR) == frozenset({1, 2})
cgr = ({0: ('C', 2, None), 1: ('C', 3, None), 2: ('C', 1, None),
3: ('O', 1, None)},
{frozenset({0, 2}): (1, None), frozenset({2, 3}): (1, None),
frozenset({1, 2}): (1, None)})
assert graph.stereogenic_atom_keys(cgr) == frozenset({2})
def test__stereogenic_bond_keys():
""" test graph.stereogenic_bond_keys
"""
print(graph.stereogenic_bond_keys(C8H13O_CGR))
print(graph.stereogenic_bond_keys(C3H5N3_CGR))
assert graph.stereogenic_bond_keys(C8H13O_CGR) == frozenset(
{frozenset({3, 5})})
assert graph.stereogenic_bond_keys(C3H5N3_CGR) == frozenset(
{frozenset({1, 4}), frozenset({0, 3})})
def test__stereomers():
""" test graph.stereomers
"""
assert graph.stereomers(C2H2CL2F2_CGR) == C2H2CL2F2_SGRS
assert graph.stereomers(C3H3CL2F3_CGR) == C3H3CL2F3_SGRS
assert graph.stereomers(C3H5N3_CGR) == C3H5N3_SGRS
assert graph.stereomers(C8H13O_CGR) == C8H13O_SGRS
def test__to_index_based_stereo():
""" test graph.stereomers
"""
for sgr in C2H2CL2F2_SGRS:
sgr = graph.explicit(sgr)
idx_sgr = graph.to_index_based_stereo(sgr)
assert sgr == graph.from_index_based_stereo(idx_sgr)
for sgr in C3H3CL2F3_SGRS:
sgr = graph.explicit(sgr)
idx_sgr = graph.to_index_based_stereo(sgr)
assert sgr == graph.from_index_based_stereo(idx_sgr)
for sgr in C3H5N3_SGRS:
sgr = graph.explicit(sgr)
idx_sgr = graph.to_index_based_stereo(sgr)
assert sgr == graph.from_index_based_stereo(idx_sgr)
for sgr in C8H13O_SGRS:
sgr = graph.explicit(sgr)
idx_sgr = graph.to_index_based_stereo(sgr)
assert sgr == graph.from_index_based_stereo(idx_sgr)
def test__ring_systems():
""" test graph.ring_systems
"""
ich = automol.smiles.inchi('C12CC(C1)C2CC3C(C3)CCC4C5CCC(CC5)C4')
gra = automol.inchi.graph(ich)
rsys = sorted(graph.ring_systems(gra), key=graph.atom_count)
assert list(map(graph.atom_count, rsys)) == [7, 12, 21]
# ISOBUTANE
C4H10_GRA = ({0: ('C', 0, None), 1: ('C', 0, None), 2: ('C', 0, None),
3: ('C', 0, None), 4: ('H', 0, None), 5: ('H', 0, None),
6: ('H', 0, None), 7: ('H', 0, None), 8: ('H', 0, None),
9: ('H', 0, None), 10: ('H', 0, None), 11: ('H', 0, None),
12: ('H', 0, None), 13: ('H', 0, None)},
{frozenset({0, 3}): (1, None), frozenset({0, 4}): (1, None),
frozenset({0, 5}): (1, None), frozenset({0, 6}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 7}): (1, None),
frozenset({8, 1}): (1, None), frozenset({1, 9}): (1, None),
frozenset({2, 3}): (1, None), frozenset({2, 10}): (1, None),
frozenset({2, 11}): (1, None), frozenset({2, 12}): (1, None),
frozenset({3, 13}): (1, None)})
def test__equivalent_atoms():
""" test graph.equivalent_atoms
"""
# central carbon
assert graph.equivalent_atoms(C4H10_GRA, 3) == {3}
# central hydrogen
assert graph.equivalent_atoms(C4H10_GRA, 13) == {13}
# terminal carbons
assert graph.equivalent_atoms(C4H10_GRA, 0) == {0, 1, 2}
assert graph.equivalent_atoms(C4H10_GRA, 1) == {0, 1, 2}
assert graph.equivalent_atoms(C4H10_GRA, 2) == {0, 1, 2}
# terminal hydrogens
assert graph.equivalent_atoms(C4H10_GRA, 4) == {4, 5, 6, 7, 8, 9, 10,
11, 12}
assert graph.equivalent_atoms(C4H10_GRA, 5) == {4, 5, 6, 7, 8, 9, 10,
11, 12}
assert graph.equivalent_atoms(C4H10_GRA, 6) == {4, 5, 6, 7, 8, 9, 10,
11, 12}
assert graph.equivalent_atoms(C4H10_GRA, 11) == {4, 5, 6, 7, 8, 9, 10,
11, 12}
assert graph.equivalent_atoms(C4H10_GRA, 12) == {4, 5, 6, 7, 8, 9, 10,
11, 12}
def test__equivalent_bonds():
""" test graph.equivalent_atoms
"""
assert graph.equivalent_bonds(C4H10_GRA, (2, 3)) == {
(0, 3), (1, 3), (2, 3)}
def test__vmat__vmatrix():
""" test graph.vmat.vmatrix
"""
ich = automol.smiles.inchi('C12CC(C1)C2CC3C(C3)CCC4C5CCC(CC5)C4')
gra = automol.inchi.graph(ich)
_, zma_keys = graph.vmat.vmatrix(gra)
assert set(zma_keys) == graph.atom_keys(gra)
# FC=CC=CF + [OH] => FC=C[CH]C(O)F
C4H5F2O_TSG = ({0: ('C', 0, None), 1: ('C', 0, None), 2: ('C', 0, None),
3: ('C', 0, None), 4: ('F', 0, None), 5: ('F', 0, None),
6: ('H', 0, None), 7: ('H', 0, None), 8: ('H', 0, None),
9: ('H', 0, None), 10: ('O', 0, None), 11: ('H', 0, None)},
{frozenset({8, 2}): (1, None), frozenset({2, 10}): (0.1, None),
frozenset({0, 6}): (1, None), frozenset({1, 7}): (1, None),
frozenset({9, 3}): (1, None), frozenset({0, 1}): (1, None),
frozenset({0, 2}): (1, True), frozenset({2, 4}): (1, None),
frozenset({3, 5}): (1, None), frozenset({10, 11}): (1, None),
frozenset({1, 3}): (1, False)})
# FC=C(C(O)F)C(O)F + [OH] => FC(O)[C](C(O)F)C(O)F
C4H5F3O2_TSG = ({0: ('C', 0, None), 1: ('C', 0, None), 2: ('C', 0, False),
3: ('C', 0, True), 4: ('F', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('O', 0, None), 8: ('O', 0, None),
9: ('H', 0, None), 10: ('H', 0, None), 11: ('H', 0, None),
12: ('H', 0, None), 13: ('H', 0, None), 14: ('O', 0, None),
15: ('H', 0, None)},
{frozenset({12, 7}): (1, None), frozenset({2, 10}): (1, None),
frozenset({1, 2}): (1, None), frozenset({0, 1}): (1, True),
frozenset({3, 6}): (1, None), frozenset({2, 7}): (1, None),
frozenset({2, 5}): (1, None), frozenset({0, 4}): (1, None),
frozenset({8, 3}): (1, None), frozenset({0, 14}): (0.1, None),
frozenset({8, 13}): (1, None), frozenset({14, 15}): (1, None),
frozenset({11, 3}): (1, None), frozenset({1, 3}): (1, None),
frozenset({0, 9}): (1, None)})
def test__ts__nonconserved_atom_stereo_keys():
""" test graph.ts.nonconserved_atom_stereo_keys
"""
assert graph.ts.nonconserved_atom_stereo_keys(C4H5F2O_TSG) == (
(frozenset({2}), frozenset()))
assert graph.ts.nonconserved_atom_stereo_keys(C4H5F3O2_TSG) == (
(frozenset({0}), frozenset()))
def test__ts__nonconserved_bond_stereo_keys():
""" test graph.ts.nonconserved_bond_stereo_keys
"""
assert graph.ts.nonconserved_bond_stereo_keys(C4H5F2O_TSG) == (
(frozenset({frozenset({0, 1})}), frozenset({frozenset({0, 2})})))
assert graph.ts.nonconserved_bond_stereo_keys(C4H5F3O2_TSG) == (
(frozenset(), frozenset({frozenset({0, 1})})))
def test__ts__compatible_reverse_stereomers():
""" test graph.ts.stereo_expand_reverse_graphs
"""
for ste_tsg in graph.ts.stereomers(C4H5F2O_TSG):
ste_tsgs = [
s
for r in graph.ts.compatible_reverse_stereomers(ste_tsg)
for s in graph.ts.compatible_reverse_stereomers(r)]
assert any(s == ste_tsg for s in ste_tsgs)
for ste_tsg in graph.ts.stereomers(C4H5F3O2_TSG):
ste_tsgs = [
s
for r in graph.ts.compatible_reverse_stereomers(ste_tsg)
for s in graph.ts.compatible_reverse_stereomers(r)]
assert any(s == ste_tsg for s in ste_tsgs)
if __name__ == '__main__':
# test__from_data()
# test__set_atom_implicit_hydrogen_valences()
# test__without_bond_orders()
# test__without_stereo_parities()
# test__atom_explicit_hydrogen_valences()
# test__atom_explicit_hydrogen_keys()
# test__explicit()
# test__backbone_keys()
# test__explicit_hydrogen_keys()
# test__stereomers()
# test__heuristic_geometry()
# test__connected_components()
# test__unsaturated_atom_keys()
# test__bonds_neighbor_atom_keys()
# test__resonance_dominant_radical_atom_keys()
# test__remove_bonds()
# test__resonance_dominant_atom_centered_cumulene_keys()
# test__resonance_dominant_bond_centered_cumulene_keys()
# test__stereogenic_bond_keys()
# test__resonance_dominant_atom_hybridizations()
# test__rotational_bond_keys()
# test__electron_count()
# test__atom_count()
# test__heavy_atom_count()
# test__subresonances()
# test__sigma_radical_atom_keys()
# test__stereomers()
# test__to_index_based_stereo()
# test__ts__nonconserved_atom_stereo_keys()
# test__ts__nonconserved_bond_stereo_keys()
# test__ts__compatible_reverse_stereomers()
# test__vmat__vmatrix()
# test__branch()
test__equivalent_atoms()
test__equivalent_bonds()
|
[
"automol.graph.transform_keys",
"automol.graph.atom_symbols",
"automol.graph.from_index_based_stereo",
"automol.graph.stereogenic_bond_keys",
"automol.graph.atom_stereo_parities",
"automol.graph.string",
"automol.graph.subgraph",
"automol.graph.resonance_dominant_radical_atom_keys",
"automol.graph.to_index_based_stereo",
"automol.graph.bond_orders",
"automol.graph.ts.stereomers",
"automol.graph.bond_keys",
"automol.graph.equivalent_bonds",
"automol.graph.resonances",
"automol.graph.atom_element_valences",
"automol.graph.ts.nonconserved_atom_stereo_keys",
"automol.smiles.inchi",
"automol.graph.atom_keys",
"automol.graph.atoms_neighbor_atom_keys",
"automol.graph.atom_lone_pair_counts",
"automol.graph.atom_explicit_hydrogen_valences",
"automol.graph.possible_spin_multiplicities",
"automol.graph.relabel",
"automol.graph.dominant_resonances",
"automol.graph.atom_explicit_hydrogen_keys",
"automol.graph.resonance_dominant_atom_centered_cumulene_keys",
"automol.graph.remove_atoms",
"automol.graph.electron_count",
"automol.graph.connected_components",
"automol.inchi.graph",
"automol.graph.atom_unsaturated_valences",
"automol.graph.backbone_isomorphism",
"automol.graph.union",
"automol.graph.resonance_dominant_bond_orders",
"automol.graph.equivalent_atoms",
"automol.graph.dominant_resonance",
"automol.graph.stereogenic_atom_keys",
"automol.graph.set_atom_implicit_hydrogen_valences",
"automol.graph.ring_systems",
"automol.graph.stereomers",
"automol.graph.atom_implicit_hydrogen_valences",
"automol.graph.formula",
"automol.graph.backbone_keys",
"numpy.random.permutation",
"automol.graph.atoms_bond_keys",
"automol.graph.explicit",
"automol.graph.vmat.vmatrix",
"automol.graph.bonds_neighbor_atom_keys",
"automol.graph.atom_count",
"automol.graph.explicit_hydrogen_keys",
"automol.graph.atom_bond_valences",
"automol.graph.ts.compatible_reverse_stereomers",
"automol.graph.atoms",
"automol.graph.rotational_bond_keys",
"automol.graph.sigma_radical_atom_keys",
"automol.graph.without_stereo_parities",
"automol.graph.atoms_second_degree_neighbor_atom_keys",
"automol.graph.bond_symmetry_numbers",
"automol.graph.resonance_dominant_bond_centered_cumulene_keys",
"automol.graph.maximum_spin_multiplicity",
"automol.graph.backbone_unique",
"automol.graph.backbone_isomorphic",
"automol.graph.subresonances",
"automol.graph.without_bond_orders",
"automol.graph.heavy_atom_count",
"automol.graph.resonance_dominant_atom_hybridizations",
"automol.graph.unsaturated_atom_keys",
"automol.graph.bond_stereo_parities",
"automol.graph.ts.nonconserved_bond_stereo_keys"
] |
[((14521, 14548), 'automol.graph.atom_keys', 'graph.atom_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (14536, 14548), False, 'from automol import graph\n'), ((14559, 14656), 'automol.graph.set_atom_implicit_hydrogen_valences', 'graph.set_atom_implicit_hydrogen_valences', (['C8H13O_CGR', '{atm_key: (0) for atm_key in atm_keys}'], {}), '(C8H13O_CGR, {atm_key: (0) for\n atm_key in atm_keys})\n', (14600, 14656), False, 'from automol import graph\n'), ((15651, 15677), 'automol.graph.explicit', 'graph.explicit', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (15665, 15677), False, 'from automol import graph\n'), ((18334, 18386), 'automol.graph.transform_keys', 'graph.transform_keys', (['gra2', '(lambda x: x + gra1_natms)'], {}), '(gra2, lambda x: x + gra1_natms)\n', (18354, 18386), False, 'from automol import graph\n'), ((18398, 18421), 'automol.graph.union', 'graph.union', (['gra1', 'gra2'], {}), '(gra1, gra2)\n', (18409, 18421), False, 'from automol import graph\n'), ((18437, 18468), 'automol.graph.connected_components', 'graph.connected_components', (['gra'], {}), '(gra)\n', (18463, 18468), False, 'from automol import graph\n'), ((21207, 21266), 'automol.graph.backbone_isomorphic', 'graph.backbone_isomorphic', (['CH2FH2H_CGR_IMP', 'CH2FH2H_CGR_EXP'], {}), '(CH2FH2H_CGR_IMP, CH2FH2H_CGR_EXP)\n', (21232, 21266), False, 'from automol import graph\n'), ((28420, 28447), 'automol.graph.explicit', 'automol.graph.explicit', (['cgr'], {}), '(cgr)\n', (28442, 28447), False, 'import automol\n'), ((31128, 31187), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""C12CC(C1)C2CC3C(C3)CCC4C5CCC(CC5)C4"""'], {}), "('C12CC(C1)C2CC3C(C3)CCC4C5CCC(CC5)C4')\n", (31148, 31187), False, 'import automol\n'), ((31198, 31222), 'automol.inchi.graph', 'automol.inchi.graph', (['ich'], {}), '(ich)\n', (31217, 31222), False, 'import automol\n'), ((33578, 33637), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""C12CC(C1)C2CC3C(C3)CCC4C5CCC(CC5)C4"""'], {}), "('C12CC(C1)C2CC3C(C3)CCC4C5CCC(CC5)C4')\n", (33598, 33637), False, 'import automol\n'), ((33648, 33672), 'automol.inchi.graph', 'automol.inchi.graph', (['ich'], {}), '(ich)\n', (33667, 33672), False, 'import automol\n'), ((33691, 33714), 'automol.graph.vmat.vmatrix', 'graph.vmat.vmatrix', (['gra'], {}), '(gra)\n', (33709, 33714), False, 'from automol import graph\n'), ((36416, 36448), 'automol.graph.ts.stereomers', 'graph.ts.stereomers', (['C4H5F2O_TSG'], {}), '(C4H5F2O_TSG)\n', (36435, 36448), False, 'from automol import graph\n'), ((36689, 36722), 'automol.graph.ts.stereomers', 'graph.ts.stereomers', (['C4H5F3O2_TSG'], {}), '(C4H5F3O2_TSG)\n', (36708, 36722), False, 'from automol import graph\n'), ((15061, 15098), 'automol.graph.without_bond_orders', 'graph.without_bond_orders', (['C8H13O_RGR'], {}), '(C8H13O_RGR)\n', (15086, 15098), False, 'from automol import graph\n'), ((15214, 15255), 'automol.graph.without_stereo_parities', 'graph.without_stereo_parities', (['C8H13O_SGR'], {}), '(C8H13O_SGR)\n', (15243, 15255), False, 'from automol import graph\n'), ((15339, 15371), 'automol.graph.electron_count', 'graph.electron_count', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (15359, 15371), False, 'from automol import graph\n'), ((15457, 15485), 'automol.graph.atom_count', 'graph.atom_count', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (15473, 15485), False, 'from automol import graph\n'), ((15503, 15552), 'automol.graph.atom_count', 'graph.atom_count', (['C8H13O_CGR'], {'with_implicit': '(False)'}), '(C8H13O_CGR, with_implicit=False)\n', (15519, 15552), False, 'from automol import graph\n'), ((15689, 15716), 'automol.graph.heavy_atom_count', 'graph.heavy_atom_count', (['cgr'], {}), '(cgr)\n', (15711, 15716), False, 'from automol import graph\n'), ((15825, 15867), 'automol.graph.atoms_neighbor_atom_keys', 'graph.atoms_neighbor_atom_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (15855, 15867), False, 'from automol import graph\n'), ((16259, 16315), 'automol.graph.atoms_second_degree_neighbor_atom_keys', 'graph.atoms_second_degree_neighbor_atom_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (16303, 16315), False, 'from automol import graph\n'), ((16691, 16724), 'automol.graph.atoms_bond_keys', 'graph.atoms_bond_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (16712, 16724), False, 'from automol import graph\n'), ((17423, 17465), 'automol.graph.bonds_neighbor_atom_keys', 'graph.bonds_neighbor_atom_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (17453, 17465), False, 'from automol import graph\n'), ((18298, 18321), 'automol.graph.formula', 'graph.formula', (['C3H3_CGR'], {}), '(C3H3_CGR)\n', (18311, 18321), False, 'from automol import graph\n'), ((18592, 18624), 'automol.graph.subgraph', 'graph.subgraph', (['C3H3_CGR', '(1, 2)'], {}), '(C3H3_CGR, (1, 2))\n', (18606, 18624), False, 'from automol import graph\n'), ((19141, 19193), 'automol.graph.relabel', 'graph.relabel', (['C3H3_CGR', '{(0): 10, (1): 11, (2): 12}'], {}), '(C3H3_CGR, {(0): 10, (1): 11, (2): 12})\n', (19154, 19193), False, 'from automol import graph\n'), ((19457, 19491), 'automol.graph.remove_atoms', 'graph.remove_atoms', (['C3H3_CGR', '(0,)'], {}), '(C3H3_CGR, (0,))\n', (19475, 19491), False, 'from automol import graph\n'), ((20032, 20086), 'automol.graph.atom_explicit_hydrogen_valences', 'graph.atom_explicit_hydrogen_valences', (['CH2FH2H_CGR_EXP'], {}), '(CH2FH2H_CGR_EXP)\n', (20069, 20086), False, 'from automol import graph\n'), ((20256, 20306), 'automol.graph.atom_explicit_hydrogen_keys', 'graph.atom_explicit_hydrogen_keys', (['CH2FH2H_CGR_EXP'], {}), '(CH2FH2H_CGR_EXP)\n', (20289, 20306), False, 'from automol import graph\n'), ((20596, 20632), 'automol.graph.backbone_keys', 'graph.backbone_keys', (['CH2FH2H_CGR_EXP'], {}), '(CH2FH2H_CGR_EXP)\n', (20615, 20632), False, 'from automol import graph\n'), ((20759, 20804), 'automol.graph.explicit_hydrogen_keys', 'graph.explicit_hydrogen_keys', (['CH2FH2H_CGR_EXP'], {}), '(CH2FH2H_CGR_EXP)\n', (20787, 20804), False, 'from automol import graph\n'), ((20928, 20959), 'automol.graph.explicit', 'graph.explicit', (['CH2FH2H_CGR_IMP'], {}), '(CH2FH2H_CGR_IMP)\n', (20942, 20959), False, 'from automol import graph\n'), ((21305, 21321), 'automol.graph.atoms', 'graph.atoms', (['cgr'], {}), '(cgr)\n', (21316, 21321), False, 'from automol import graph\n'), ((21432, 21459), 'automol.graph.relabel', 'graph.relabel', (['cgr', 'pmt_dct'], {}), '(cgr, pmt_dct)\n', (21445, 21459), False, 'from automol import graph\n'), ((21475, 21514), 'automol.graph.backbone_isomorphic', 'graph.backbone_isomorphic', (['cgr', 'cgr_pmt'], {}), '(cgr, cgr_pmt)\n', (21500, 21514), False, 'from automol import graph\n'), ((21636, 21652), 'automol.graph.atoms', 'graph.atoms', (['cgr'], {}), '(cgr)\n', (21647, 21652), False, 'from automol import graph\n'), ((21763, 21790), 'automol.graph.relabel', 'graph.relabel', (['cgr', 'pmt_dct'], {}), '(cgr, pmt_dct)\n', (21776, 21790), False, 'from automol import graph\n'), ((21943, 21975), 'automol.graph.backbone_unique', 'graph.backbone_unique', (['C3H3_RGRS'], {}), '(C3H3_RGRS)\n', (21964, 21975), False, 'from automol import graph\n'), ((22110, 22149), 'automol.graph.atom_element_valences', 'graph.atom_element_valences', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (22137, 22149), False, 'from automol import graph\n'), ((22314, 22353), 'automol.graph.atom_lone_pair_counts', 'graph.atom_lone_pair_counts', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (22341, 22353), False, 'from automol import graph\n'), ((22512, 22548), 'automol.graph.atom_bond_valences', 'graph.atom_bond_valences', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (22536, 22548), False, 'from automol import graph\n'), ((22721, 22764), 'automol.graph.atom_unsaturated_valences', 'graph.atom_unsaturated_valences', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (22752, 22764), False, 'from automol import graph\n'), ((22929, 22968), 'automol.graph.unsaturated_atom_keys', 'graph.unsaturated_atom_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (22956, 22968), False, 'from automol import graph\n'), ((23113, 23152), 'automol.graph.maximum_spin_multiplicity', 'graph.maximum_spin_multiplicity', (['C2_CGR'], {}), '(C2_CGR)\n', (23144, 23152), False, 'from automol import graph\n'), ((23269, 23311), 'automol.graph.possible_spin_multiplicities', 'graph.possible_spin_multiplicities', (['C2_CGR'], {}), '(C2_CGR)\n', (23303, 23311), False, 'from automol import graph\n'), ((23441, 23480), 'automol.graph.bond_symmetry_numbers', 'graph.bond_symmetry_numbers', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (23468, 23480), False, 'from automol import graph\n'), ((23863, 23917), 'automol.graph.resonance_dominant_atom_hybridizations', 'graph.resonance_dominant_atom_hybridizations', (['C3H3_CGR'], {}), '(C3H3_CGR)\n', (23907, 23917), False, 'from automol import graph\n'), ((23960, 24016), 'automol.graph.resonance_dominant_atom_hybridizations', 'graph.resonance_dominant_atom_hybridizations', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (24004, 24016), False, 'from automol import graph\n'), ((24452, 24501), 'automol.graph.resonance_dominant_atom_hybridizations', 'graph.resonance_dominant_atom_hybridizations', (['cgr'], {}), '(cgr)\n', (24496, 24501), False, 'from automol import graph\n'), ((25038, 25095), 'automol.graph.resonance_dominant_atom_centered_cumulene_keys', 'graph.resonance_dominant_atom_centered_cumulene_keys', (['cgr'], {}), '(cgr)\n', (25090, 25095), False, 'from automol import graph\n'), ((25622, 25679), 'automol.graph.resonance_dominant_bond_centered_cumulene_keys', 'graph.resonance_dominant_bond_centered_cumulene_keys', (['cgr'], {}), '(cgr)\n', (25674, 25679), False, 'from automol import graph\n'), ((25875, 25927), 'automol.graph.resonance_dominant_radical_atom_keys', 'graph.resonance_dominant_radical_atom_keys', (['C3H3_CGR'], {}), '(C3H3_CGR)\n', (25917, 25927), False, 'from automol import graph\n'), ((25972, 26026), 'automol.graph.resonance_dominant_radical_atom_keys', 'graph.resonance_dominant_radical_atom_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (26014, 26026), False, 'from automol import graph\n'), ((26385, 26419), 'automol.graph.sigma_radical_atom_keys', 'graph.sigma_radical_atom_keys', (['gra'], {}), '(gra)\n', (26414, 26419), False, 'from automol import graph\n'), ((27170, 27204), 'automol.graph.sigma_radical_atom_keys', 'graph.sigma_radical_atom_keys', (['gra'], {}), '(gra)\n', (27199, 27204), False, 'from automol import graph\n'), ((27364, 27410), 'automol.graph.resonance_dominant_bond_orders', 'graph.resonance_dominant_bond_orders', (['C3H3_CGR'], {}), '(C3H3_CGR)\n', (27400, 27410), False, 'from automol import graph\n'), ((27654, 27680), 'automol.graph.resonances', 'graph.resonances', (['C3H3_CGR'], {}), '(C3H3_CGR)\n', (27670, 27680), False, 'from automol import graph\n'), ((27775, 27806), 'automol.graph.subresonances', 'graph.subresonances', (['C2_RGRS[1]'], {}), '(C2_RGRS[1])\n', (27794, 27806), False, 'from automol import graph\n'), ((27915, 27950), 'automol.graph.dominant_resonances', 'graph.dominant_resonances', (['C3H3_CGR'], {}), '(C3H3_CGR)\n', (27940, 27950), False, 'from automol import graph\n'), ((28059, 28093), 'automol.graph.dominant_resonance', 'graph.dominant_resonance', (['C3H3_CGR'], {}), '(C3H3_CGR)\n', (28083, 28093), False, 'from automol import graph\n'), ((28460, 28499), 'automol.graph.rotational_bond_keys', 'automol.graph.rotational_bond_keys', (['cgr'], {}), '(cgr)\n', (28494, 28499), False, 'import automol\n'), ((28776, 28815), 'automol.graph.rotational_bond_keys', 'automol.graph.rotational_bond_keys', (['cgr'], {}), '(cgr)\n', (28810, 28815), False, 'import automol\n'), ((28936, 28996), 'automol.graph.rotational_bond_keys', 'automol.graph.rotational_bond_keys', (['cgr'], {'with_h_rotors': '(False)'}), '(cgr, with_h_rotors=False)\n', (28970, 28996), False, 'import automol\n'), ((29164, 29203), 'automol.graph.stereogenic_atom_keys', 'graph.stereogenic_atom_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (29191, 29203), False, 'from automol import graph\n'), ((29236, 29278), 'automol.graph.stereogenic_atom_keys', 'graph.stereogenic_atom_keys', (['C3H3CL2F3_CGR'], {}), '(C3H3CL2F3_CGR)\n', (29263, 29278), False, 'from automol import graph\n'), ((29528, 29560), 'automol.graph.stereogenic_atom_keys', 'graph.stereogenic_atom_keys', (['cgr'], {}), '(cgr)\n', (29555, 29560), False, 'from automol import graph\n'), ((29675, 29714), 'automol.graph.stereogenic_bond_keys', 'graph.stereogenic_bond_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (29702, 29714), False, 'from automol import graph\n'), ((29726, 29765), 'automol.graph.stereogenic_bond_keys', 'graph.stereogenic_bond_keys', (['C3H5N3_CGR'], {}), '(C3H5N3_CGR)\n', (29753, 29765), False, 'from automol import graph\n'), ((29778, 29817), 'automol.graph.stereogenic_bond_keys', 'graph.stereogenic_bond_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (29805, 29817), False, 'from automol import graph\n'), ((29872, 29911), 'automol.graph.stereogenic_bond_keys', 'graph.stereogenic_bond_keys', (['C3H5N3_CGR'], {}), '(C3H5N3_CGR)\n', (29899, 29911), False, 'from automol import graph\n'), ((30049, 30080), 'automol.graph.stereomers', 'graph.stereomers', (['C2H2CL2F2_CGR'], {}), '(C2H2CL2F2_CGR)\n', (30065, 30080), False, 'from automol import graph\n'), ((30110, 30141), 'automol.graph.stereomers', 'graph.stereomers', (['C3H3CL2F3_CGR'], {}), '(C3H3CL2F3_CGR)\n', (30126, 30141), False, 'from automol import graph\n'), ((30171, 30199), 'automol.graph.stereomers', 'graph.stereomers', (['C3H5N3_CGR'], {}), '(C3H5N3_CGR)\n', (30187, 30199), False, 'from automol import graph\n'), ((30226, 30254), 'automol.graph.stereomers', 'graph.stereomers', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (30242, 30254), False, 'from automol import graph\n'), ((30390, 30409), 'automol.graph.explicit', 'graph.explicit', (['sgr'], {}), '(sgr)\n', (30404, 30409), False, 'from automol import graph\n'), ((30428, 30460), 'automol.graph.to_index_based_stereo', 'graph.to_index_based_stereo', (['sgr'], {}), '(sgr)\n', (30455, 30460), False, 'from automol import graph\n'), ((30568, 30587), 'automol.graph.explicit', 'graph.explicit', (['sgr'], {}), '(sgr)\n', (30582, 30587), False, 'from automol import graph\n'), ((30606, 30638), 'automol.graph.to_index_based_stereo', 'graph.to_index_based_stereo', (['sgr'], {}), '(sgr)\n', (30633, 30638), False, 'from automol import graph\n'), ((30743, 30762), 'automol.graph.explicit', 'graph.explicit', (['sgr'], {}), '(sgr)\n', (30757, 30762), False, 'from automol import graph\n'), ((30781, 30813), 'automol.graph.to_index_based_stereo', 'graph.to_index_based_stereo', (['sgr'], {}), '(sgr)\n', (30808, 30813), False, 'from automol import graph\n'), ((30918, 30937), 'automol.graph.explicit', 'graph.explicit', (['sgr'], {}), '(sgr)\n', (30932, 30937), False, 'from automol import graph\n'), ((30956, 30988), 'automol.graph.to_index_based_stereo', 'graph.to_index_based_stereo', (['sgr'], {}), '(sgr)\n', (30983, 30988), False, 'from automol import graph\n'), ((31241, 31264), 'automol.graph.ring_systems', 'graph.ring_systems', (['gra'], {}), '(gra)\n', (31259, 31264), False, 'from automol import graph\n'), ((32304, 32340), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(3)'], {}), '(C4H10_GRA, 3)\n', (32326, 32340), False, 'from automol import graph\n'), ((32382, 32419), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(13)'], {}), '(C4H10_GRA, 13)\n', (32404, 32419), False, 'from automol import graph\n'), ((32462, 32498), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(0)'], {}), '(C4H10_GRA, 0)\n', (32484, 32498), False, 'from automol import graph\n'), ((32523, 32559), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(1)'], {}), '(C4H10_GRA, 1)\n', (32545, 32559), False, 'from automol import graph\n'), ((32584, 32620), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(2)'], {}), '(C4H10_GRA, 2)\n', (32606, 32620), False, 'from automol import graph\n'), ((32670, 32706), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(4)'], {}), '(C4H10_GRA, 4)\n', (32692, 32706), False, 'from automol import graph\n'), ((32804, 32840), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(5)'], {}), '(C4H10_GRA, 5)\n', (32826, 32840), False, 'from automol import graph\n'), ((32938, 32974), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(6)'], {}), '(C4H10_GRA, 6)\n', (32960, 32974), False, 'from automol import graph\n'), ((33072, 33109), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(11)'], {}), '(C4H10_GRA, 11)\n', (33094, 33109), False, 'from automol import graph\n'), ((33208, 33245), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(12)'], {}), '(C4H10_GRA, 12)\n', (33230, 33245), False, 'from automol import graph\n'), ((33420, 33461), 'automol.graph.equivalent_bonds', 'graph.equivalent_bonds', (['C4H10_GRA', '(2, 3)'], {}), '(C4H10_GRA, (2, 3))\n', (33442, 33461), False, 'from automol import graph\n'), ((33743, 33763), 'automol.graph.atom_keys', 'graph.atom_keys', (['gra'], {}), '(gra)\n', (33758, 33763), False, 'from automol import graph\n'), ((35710, 35761), 'automol.graph.ts.nonconserved_atom_stereo_keys', 'graph.ts.nonconserved_atom_stereo_keys', (['C4H5F2O_TSG'], {}), '(C4H5F2O_TSG)\n', (35748, 35761), False, 'from automol import graph\n'), ((35817, 35869), 'automol.graph.ts.nonconserved_atom_stereo_keys', 'graph.ts.nonconserved_atom_stereo_keys', (['C4H5F3O2_TSG'], {}), '(C4H5F3O2_TSG)\n', (35855, 35869), False, 'from automol import graph\n'), ((36034, 36085), 'automol.graph.ts.nonconserved_bond_stereo_keys', 'graph.ts.nonconserved_bond_stereo_keys', (['C4H5F2O_TSG'], {}), '(C4H5F2O_TSG)\n', (36072, 36085), False, 'from automol import graph\n'), ((36176, 36228), 'automol.graph.ts.nonconserved_bond_stereo_keys', 'graph.ts.nonconserved_bond_stereo_keys', (['C4H5F3O2_TSG'], {}), '(C4H5F3O2_TSG)\n', (36214, 36228), False, 'from automol import graph\n'), ((13481, 13511), 'automol.graph.atom_symbols', 'graph.atom_symbols', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (13499, 13511), False, 'from automol import graph\n'), ((13530, 13557), 'automol.graph.bond_keys', 'graph.bond_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (13545, 13557), False, 'from automol import graph\n'), ((13601, 13650), 'automol.graph.atom_implicit_hydrogen_valences', 'graph.atom_implicit_hydrogen_valences', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (13638, 13650), False, 'from automol import graph\n'), ((13744, 13774), 'automol.graph.atom_symbols', 'graph.atom_symbols', (['C8H13O_RGR'], {}), '(C8H13O_RGR)\n', (13762, 13774), False, 'from automol import graph\n'), ((13793, 13820), 'automol.graph.bond_keys', 'graph.bond_keys', (['C8H13O_RGR'], {}), '(C8H13O_RGR)\n', (13808, 13820), False, 'from automol import graph\n'), ((13864, 13913), 'automol.graph.atom_implicit_hydrogen_valences', 'graph.atom_implicit_hydrogen_valences', (['C8H13O_RGR'], {}), '(C8H13O_RGR)\n', (13901, 13913), False, 'from automol import graph\n'), ((13936, 13965), 'automol.graph.bond_orders', 'graph.bond_orders', (['C8H13O_RGR'], {}), '(C8H13O_RGR)\n', (13953, 13965), False, 'from automol import graph\n'), ((14058, 14088), 'automol.graph.atom_symbols', 'graph.atom_symbols', (['C8H13O_SGR'], {}), '(C8H13O_SGR)\n', (14076, 14088), False, 'from automol import graph\n'), ((14107, 14134), 'automol.graph.bond_keys', 'graph.bond_keys', (['C8H13O_SGR'], {}), '(C8H13O_SGR)\n', (14122, 14134), False, 'from automol import graph\n'), ((14178, 14227), 'automol.graph.atom_implicit_hydrogen_valences', 'graph.atom_implicit_hydrogen_valences', (['C8H13O_SGR'], {}), '(C8H13O_SGR)\n', (14215, 14227), False, 'from automol import graph\n'), ((14254, 14292), 'automol.graph.atom_stereo_parities', 'graph.atom_stereo_parities', (['C8H13O_SGR'], {}), '(C8H13O_SGR)\n', (14280, 14292), False, 'from automol import graph\n'), ((14318, 14356), 'automol.graph.bond_stereo_parities', 'graph.bond_stereo_parities', (['C8H13O_SGR'], {}), '(C8H13O_SGR)\n', (14344, 14356), False, 'from automol import graph\n'), ((14712, 14742), 'automol.graph.atom_symbols', 'graph.atom_symbols', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (14730, 14742), False, 'from automol import graph\n'), ((14744, 14771), 'automol.graph.bond_keys', 'graph.bond_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (14759, 14771), False, 'from automol import graph\n'), ((21065, 21096), 'automol.graph.explicit', 'graph.explicit', (['CH2FH2H_CGR_IMP'], {}), '(CH2FH2H_CGR_IMP)\n', (21079, 21096), False, 'from automol import graph\n'), ((21806, 21846), 'automol.graph.backbone_isomorphism', 'graph.backbone_isomorphism', (['cgr', 'cgr_pmt'], {}), '(cgr, cgr_pmt)\n', (21832, 21846), False, 'from automol import graph\n'), ((30483, 30521), 'automol.graph.from_index_based_stereo', 'graph.from_index_based_stereo', (['idx_sgr'], {}), '(idx_sgr)\n', (30512, 30521), False, 'from automol import graph\n'), ((30661, 30699), 'automol.graph.from_index_based_stereo', 'graph.from_index_based_stereo', (['idx_sgr'], {}), '(idx_sgr)\n', (30690, 30699), False, 'from automol import graph\n'), ((30836, 30874), 'automol.graph.from_index_based_stereo', 'graph.from_index_based_stereo', (['idx_sgr'], {}), '(idx_sgr)\n', (30865, 30874), False, 'from automol import graph\n'), ((31011, 31049), 'automol.graph.from_index_based_stereo', 'graph.from_index_based_stereo', (['idx_sgr'], {}), '(idx_sgr)\n', (31040, 31049), False, 'from automol import graph\n'), ((14927, 14952), 'automol.graph.string', 'automol.graph.string', (['sgr'], {}), '(sgr)\n', (14947, 14952), False, 'import automol\n'), ((21380, 21411), 'numpy.random.permutation', 'numpy.random.permutation', (['natms'], {}), '(natms)\n', (21404, 21411), False, 'import numpy\n'), ((21711, 21742), 'numpy.random.permutation', 'numpy.random.permutation', (['natms'], {}), '(natms)\n', (21735, 21742), False, 'import numpy\n'), ((36506, 36553), 'automol.graph.ts.compatible_reverse_stereomers', 'graph.ts.compatible_reverse_stereomers', (['ste_tsg'], {}), '(ste_tsg)\n', (36544, 36553), False, 'from automol import graph\n'), ((36575, 36616), 'automol.graph.ts.compatible_reverse_stereomers', 'graph.ts.compatible_reverse_stereomers', (['r'], {}), '(r)\n', (36613, 36616), False, 'from automol import graph\n'), ((36780, 36827), 'automol.graph.ts.compatible_reverse_stereomers', 'graph.ts.compatible_reverse_stereomers', (['ste_tsg'], {}), '(ste_tsg)\n', (36818, 36827), False, 'from automol import graph\n'), ((36849, 36890), 'automol.graph.ts.compatible_reverse_stereomers', 'graph.ts.compatible_reverse_stereomers', (['r'], {}), '(r)\n', (36887, 36890), False, 'from automol import graph\n')]
|
import botbowl
from botbowl.core import Action, Agent
import numpy as np
from copy import deepcopy
import random
import time
from botbowl.core.model import Team
PRINT = False
IGNORE_IN_GAME = [botbowl.ActionType.PLACE_PLAYER, botbowl.ActionType.END_SETUP, botbowl.ActionType.SETUP_FORMATION_SPREAD,
botbowl.ActionType.SETUP_FORMATION_LINE, botbowl.ActionType.SETUP_FORMATION_WEDGE, botbowl.ActionType.SETUP_FORMATION_ZONE]
class Node:
def __init__(self, action=None, parent=None, C=4):
self.parent = parent
self.children = []
self.action = action
self.evaluations = []
self.C = C
self.n_wins = 0
self.n_sims = 0
def UTC(self, root):
if self.n_sims != 0:
return self.n_wins / self.n_sims + self.C * (np.sqrt(np.log(root.n_sims) / self.n_sims))
else:
return float('inf')
def extract_children(self, game: botbowl.Game):
for action_choice in game.get_available_actions():
for player in action_choice.players:
self.children.append(
Node(Action(action_choice.action_type, player=player), parent=self))
for position in action_choice.positions:
self.children.append(
Node(Action(action_choice.action_type, position=position), parent=self))
if len(action_choice.players) == len(action_choice.positions) == 0:
self.children.append(
Node(Action(action_choice.action_type), parent=self))
return self
class SearchBot(botbowl.Agent):
def __init__(self, name, budget=10, time_budget=5, seed=None):
super().__init__(name)
self.my_team = None
self.budget = budget
self.time_budget = time_budget
self.path = []
self.last_action = None
def new_game(self, game, team):
print("NEW GAME woop woop")
self.my_team = team
def end_game(self, game: botbowl.Game):
# game._end_game()
print("END GAME")
pass
def selection(self, node: Node) -> Node:
return node.children[np.argmax([n.UTC(node) for n in node.children])]
def rollout(self, game: botbowl.Game, node: Node):
step_before_rollout = game.get_step()
if PRINT:
print(
f'condition 1: {not game.state.game_over and len(node.children) == 0}')
while not game.state.game_over and len(node.children) == 0:
action = np.random.choice(
node.extract_children(game).children).action
# if True:
# print('---------------->', action)
if action.action_type != botbowl.ActionType.PLACE_PLAYER:
game.step(action)
win = game.get_winner()
if PRINT:
print(f'winner: {win}')
if win == None:
# DRAW -- score is zero
score = -1
elif win == self:
score = 10
else:
score = -5
game.revert(step_before_rollout) # not sure if necessary
return score
def expand(self, game: botbowl.Game, node: Node):
game.step(node.action)
self.path.append(node)
node.extract_children(game=game)
def backpropagate(self, score, node: Node):
for n in range(len(self.path)):
self.path[n].n_sims += 1
self.path[n].n_wins += score
node.n_sims += 1
node.n_wins += score
def act(self, game: botbowl.Game):
game_copy = deepcopy(game)
game_copy.enable_forward_model()
game_copy.home_agent.human = True
game_copy.away_agent.human = True
root_step = game_copy.get_step()
root_node = Node()
available_actions = [
elem.action_type for elem in game_copy.get_available_actions()]
if PRINT:
print(available_actions)
# if we only have one action, return it, no need to choose what the best action can be
# if len(available_actions) == 1:
# return Action(available_actions[0])
# handle placing ball randomly on board
if len(available_actions) == 1:
if available_actions[0] == botbowl.ActionType.PLACE_BALL:
if PRINT:
print(
f'positions: {game_copy.get_available_actions()[0].positions}')
return Action(botbowl.ActionType.PLACE_BALL, position=np.random.choice(game.get_available_actions()[0].positions))
# else:
# print(f'single action is: {available_actions[0]}')
# input()
# handle heads or tail
if botbowl.ActionType.HEADS in available_actions or botbowl.ActionType.TAILS in available_actions:
return np.random.choice([Action(botbowl.ActionType.HEADS), Action(botbowl.ActionType.TAILS)])
# handle kick or receive
if botbowl.ActionType.KICK in available_actions or botbowl.ActionType.RECEIVE in available_actions:
# return np.random.choice([Action(botbowl.ActionType.KICK), Action(botbowl.ActionType.RECEIVE)])
return Action(botbowl.ActionType.KICK) # TODO remove
# handle the action to setup the bot team
if botbowl.ActionType.PLACE_PLAYER in available_actions or botbowl.ActionType.END_SETUP in available_actions or botbowl.ActionType.SETUP_FORMATION_SPREAD in available_actions or botbowl.ActionType.SETUP_FORMATION_WEDGE in available_actions:
available_actions.remove(botbowl.ActionType.PLACE_PLAYER)
for elem in game_copy.get_players_on_pitch(team=self.my_team):
return Action(botbowl.ActionType.END_SETUP)
available_actions.remove(botbowl.ActionType.END_SETUP)
return Action(np.random.choice(available_actions))
if game.get_ball().on_ground and botbowl.ActionType.MOVE in available_actions and self.last_action == botbowl.ActionType.START_MOVE:
return Action(botbowl.ActionType.MOVE, game.get_ball().position,
player=np.random.choice(game.get_players_on_pitch(team=self.my_team)))
root_node.extract_children(game=game_copy)
start = time.time()
for i in range(self.budget):
# while time.time() - start < self.time_budget:
# selection of node
node = self.selection(root_node)
self.path = [root_node]
while True:
if node.n_sims == 0:
score = self.rollout(game=game_copy, node=node)
self.backpropagate(score=score, node=node)
break
else:
self.expand(game=game_copy, node=node)
node = self.selection(node)
# if time.time() - start >= self.time_budget:
# break
game_copy.revert(root_step)
self.last_action = root_node.children[np.argmax(
[n.n_wins for n in root_node.children])].action
return self.last_action
# Register the bot to the framework
botbowl.register_bot('MCTS-bot-budget-10', SearchBot)
|
[
"copy.deepcopy",
"numpy.log",
"numpy.argmax",
"time.time",
"botbowl.core.Action",
"numpy.random.choice",
"botbowl.register_bot"
] |
[((7138, 7191), 'botbowl.register_bot', 'botbowl.register_bot', (['"""MCTS-bot-budget-10"""', 'SearchBot'], {}), "('MCTS-bot-budget-10', SearchBot)\n", (7158, 7191), False, 'import botbowl\n'), ((3561, 3575), 'copy.deepcopy', 'deepcopy', (['game'], {}), '(game)\n', (3569, 3575), False, 'from copy import deepcopy\n'), ((6246, 6257), 'time.time', 'time.time', ([], {}), '()\n', (6255, 6257), False, 'import time\n'), ((5180, 5211), 'botbowl.core.Action', 'Action', (['botbowl.ActionType.KICK'], {}), '(botbowl.ActionType.KICK)\n', (5186, 5211), False, 'from botbowl.core import Action, Agent\n'), ((5695, 5731), 'botbowl.core.Action', 'Action', (['botbowl.ActionType.END_SETUP'], {}), '(botbowl.ActionType.END_SETUP)\n', (5701, 5731), False, 'from botbowl.core import Action, Agent\n'), ((5825, 5860), 'numpy.random.choice', 'np.random.choice', (['available_actions'], {}), '(available_actions)\n', (5841, 5860), True, 'import numpy as np\n'), ((6996, 7045), 'numpy.argmax', 'np.argmax', (['[n.n_wins for n in root_node.children]'], {}), '([n.n_wins for n in root_node.children])\n', (7005, 7045), True, 'import numpy as np\n'), ((4841, 4873), 'botbowl.core.Action', 'Action', (['botbowl.ActionType.HEADS'], {}), '(botbowl.ActionType.HEADS)\n', (4847, 4873), False, 'from botbowl.core import Action, Agent\n'), ((4875, 4907), 'botbowl.core.Action', 'Action', (['botbowl.ActionType.TAILS'], {}), '(botbowl.ActionType.TAILS)\n', (4881, 4907), False, 'from botbowl.core import Action, Agent\n'), ((1122, 1170), 'botbowl.core.Action', 'Action', (['action_choice.action_type'], {'player': 'player'}), '(action_choice.action_type, player=player)\n', (1128, 1170), False, 'from botbowl.core import Action, Agent\n'), ((1302, 1354), 'botbowl.core.Action', 'Action', (['action_choice.action_type'], {'position': 'position'}), '(action_choice.action_type, position=position)\n', (1308, 1354), False, 'from botbowl.core import Action, Agent\n'), ((1513, 1546), 'botbowl.core.Action', 'Action', (['action_choice.action_type'], {}), '(action_choice.action_type)\n', (1519, 1546), False, 'from botbowl.core import Action, Agent\n'), ((816, 835), 'numpy.log', 'np.log', (['root.n_sims'], {}), '(root.n_sims)\n', (822, 835), True, 'import numpy as np\n')]
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2016-2017 the HERA Collaboration
# Licensed under the BSD License.
"Files."
__all__ = str('''
File
FileInstance
FileEvent
''').split()
import sys
import datetime
import json
import os.path
import re
from flask import flash, redirect, render_template, url_for
from sqlalchemy.engine.row import Row
from . import app, db, logger
from .dbutil import NotNull, SQLAlchemyError
from .webutil import ServerError, json_api, login_required, optional_arg, required_arg
from .observation import Observation
from .store import Store
def infer_file_obsid(parent_dirs, name, info):
"""Infer the obsid associated with a file based on the limited information we
have about it. Raises an exception if this cannot be done *with
certainty*.
The "hera" mode does this by looking for existing files whose names start
with the same "zen.JD" prefix.
The "none" mode refuses to do this.
There is also a secret "_testing" mode.
"""
mode = app.config.get('obsid_inference_mode', 'none')
if mode == 'none':
raise ServerError('refusing to try to infer the obsid of candidate new file \"%s\"', name)
if mode == 'hera':
bits = name.split('.')
if len(bits) < 4:
raise ServerError(
'need to infer obsid of HERA file \"%s\", but its name looks weird', name)
prefix = '.'.join(bits[:3])
obsids = list(db.session.query(File.obsid)
.filter(File.name.like(prefix + '.%'))
.group_by(File.obsid))
if len(obsids) != 1:
raise ServerError('need to infer obsid of HERA file \"%s\", but got %d candidate '
'obsids from similarly-named files', name, len(obsids))
return obsids[0]
if mode == '_testing':
bits = name.split('.')
if len(bits) < 4:
raise ServerError(
'need to infer obsid of _testing file \"%s\", but its name looks weird', name)
jd = float(bits[1] + '.' + bits[2])
from astropy.time import Time
from math import floor
return int(floor(Time(jd, format='jd', scale='utc').gps))
raise ServerError('configuration problem: unknown "obsid_inference_mode" setting %r', mode)
class File (db.Model):
"""A File describes a data product generated by HERA.
The information described in a File structure never changes, and is
universal between Librarians. Actual "instances" of files come and go, but
a File record should never be deleted. The only exception to this is the
"source" column, which is Librarian-dependent.
A File may represent an actual flat file or a directory tree. The latter
use case is important for MIRIAD data files, which are directories, and
which we want to store in their native form for rapid analysis.
File names are unique. Here, the "name" is a Unix 'basename', i.e. it
contains no directory components or slashes. Every new file must have a
unique new name.
"""
__tablename__ = 'file'
name = db.Column(db.String(256), primary_key=True)
type = NotNull(db.String(32))
create_time = NotNull(db.DateTime) # rounded to integer seconds
obsid = db.Column(db.BigInteger, db.ForeignKey(Observation.obsid), nullable=True)
size = NotNull(db.BigInteger)
md5 = NotNull(db.String(32))
source = NotNull(db.String(64))
observation = db.relationship('Observation', back_populates='files')
instances = db.relationship('FileInstance', back_populates='file')
events = db.relationship('FileEvent', back_populates='file')
def __init__(self, name, type, obsid, source, size, md5, create_time=None):
if create_time is None:
# We round our times to whole seconds so that they can be
# accurately represented as integer Unix times, just in case
# floating-point rounding could sneak in as an issue.
create_time = datetime.datetime.utcnow().replace(microsecond=0)
from hera_librarian import utils
md5 = utils.normalize_and_validate_md5(md5)
self.name = name
self.type = type
self.create_time = create_time
self.obsid = obsid
self.source = source
self.size = size
self.md5 = md5
self._validate()
@property
def name_as_json(self):
import json
return json.dumps(self.name)
def _validate(self):
"""Check that this object's fields follow our invariants.
"""
from hera_librarian import utils
if '/' in self.name:
raise ValueError('illegal file name "%s": names may not contain "/"' % self.name)
utils.normalize_and_validate_md5(self.md5)
if not (self.size >= 0): # catches NaNs, just in case ...
raise ValueError('illegal size %d of file "%s": negative' % (self.size, self.name))
@classmethod
def get_inferring_info(cls, store, store_path, source_name, info=None, null_obsid=False):
"""Get a File instance based on a file currently located in a store. We infer
the file's properties and those of any dependent database records
(Observation, ObservingSession), which means that we can only do this
for certain kinds of files whose formats we understand.
If new File and Observation records need to be created in the DB, that
is done. If *info* is given, we use it; otherwise we SSH into the
store to gather the info ourselves.
If *null_obsid* is True, the entry is expected and required to have a
null obsid. If False (the default), the file must have an obsid --
either explicitly specified, or inferred from the file contents.
"""
parent_dirs = os.path.dirname(store_path)
name = os.path.basename(store_path)
prev = cls.query.get(name)
if prev is not None:
# If there's already a record for this File name, then its corresponding
# Observation etc must already be available. Let's leave well enough alone:
return prev
# Darn. We're going to have to create the File, and maybe its
# Observation too. Get to it.
if info is None:
try:
info = store.get_info_for_path(store_path)
except Exception as e:
raise ServerError('cannot register %s:%s: %s', store.name, store_path, e)
size = required_arg(info, int, 'size')
md5 = required_arg(info, str, 'md5')
type = required_arg(info, str, 'type')
from .observation import Observation
from . import mc_integration as MC
obsid = optional_arg(info, int, 'obsid')
if null_obsid:
if obsid is not None:
raise ServerError('new file %s is expected to have a null obsid, but it has %r',
name, obsid)
else:
if obsid is None:
# Our UV data files embed their obsids in a way that we can
# extract robustly, but we want to be able to ingest new files
# that don't necessarily have obsid information embedded. We used
# to do this by guessing from the JD in the filename, but that
# proved to be unreliable (as you might guess). So we now have a
# configurable scheme to make this possible; the only implemented
# technique still looks at filenames, but does it in a somewhat
# better-justified way where it requires preexisting files to have
# an assigned obsid that it can copy.
obsid = infer_file_obsid(parent_dirs, name, info)
obs = Observation.query.get(obsid)
if obs is None:
# The other piece of the puzzle is that we used to sometimes
# create new Observation records based on data that we tried to
# infer from standalone files. Now that the we have an on-site M&C
# system that records the canonical metadata for observations,
# that mode is deprecated. On-site, we only create Observations
# from M&C. Off-site, we only get them from uploads from other
# Librarians.
MC.create_observation_record(obsid)
if isinstance(obsid, Row):
# convert from Row object to integer
obsid = obsid._asdict()["obsid"]
fobj = File(name, type, obsid, source_name, size, md5)
if MC.is_file_record_invalid(fobj):
raise ServerError('new file %s (obsid %s) rejected by M&C; see M&C error logs for the reason',
name, obsid)
db.session.add(fobj)
try:
db.session.commit()
except SQLAlchemyError:
db.session.rollback()
app.log_exception(sys.exc_info())
raise ServerError('failed to add new file %s to database; see logs for details', name)
MC.note_file_created(fobj)
return fobj
def delete_instances(self, mode='standard', restrict_to_store=None):
"""DANGER ZONE! Delete instances of this file on all stores!
We have a safety interlock: each FileInstance has a "deletion_policy"
flag that specifies, well, the internal policy about whether it can be
deleted. The default is that no deletions are allowed.
Of course, this command will only execute deletions that are allowed
under the policy. It returns status information about how many
deletions actually occurred.
If `mode` is "noop", the logic is exercised but the deletions are not
run. Currently, the only other allowed mode is "standard".
If `restrict_to_store` is not None, it should be a Store class
instance. Only instances kept on the specified store will be deleted
-- all other instances will be kept.
"""
if mode == 'standard':
noop = False
elif mode == 'noop':
noop = True
else:
raise ServerError('unexpected deletion operations mode %r' % (mode,))
n_deleted = 0
n_kept = 0
n_error = 0
# If we make Librarian files read-only, we'll need to chmod them back
# to writeable before we can blow them away -- this wouldn't be
# necessary if we only stored flat files, but we store directories
# too.
pmode = app.config.get('permissions_mode', 'readonly')
need_chmod = (pmode == 'readonly')
for inst in self.instances:
# Currently, the policy is just binary: allowed, or not. Be very
# careful about changing the logic here, since this is the core of
# the safety interlock that prevents us from accidentally blowing
# away the entire data archive! Don't be That Guy or That Gal!
if inst.deletion_policy != DeletionPolicy.ALLOWED:
n_kept += 1
continue
# Implement the `restrict_to_store` feature. We could move this
# into the SQL query (implicit in `self.instances` above) but meh,
# this keeps things more uniform regarding n_kept etc.
if restrict_to_store is not None and inst.store != restrict_to_store.id:
n_kept += 1
continue
# OK. If we've gotten here, we are 100% sure that it is OK to delete
# this instance.
store = inst.store_object
try:
if noop:
logger.info('NOOP-delete call matched instance "%s"', inst.descriptive_name())
else:
logger.info('attempting to delete instance "%s"', inst.descriptive_name())
store._delete(inst.store_path, chmod_before=need_chmod)
except Exception as e:
# This could happen if we can't SSH to the store or something.
# Safest course of action seems to be to not modify the database
# or anything else.
n_error += 1
logger.warn('failed to delete instance "%s": %s', inst.descriptive_name(), e)
continue
# Looks like we succeeded in blowing it away.
if not noop:
db.session.add(self.make_instance_deletion_event(inst, store))
db.session.delete(inst)
n_deleted += 1
if not noop:
try:
db.session.commit()
except SQLAlchemyError:
db.session.rollback()
app.log_exception(sys.exc_info())
raise ServerError(
'deleted instances but failed to update database! DB/FS consistency broken!')
return {
'n_deleted': n_deleted,
'n_kept': n_kept,
'n_error': n_error,
}
@property
def create_time_unix(self):
import calendar
return calendar.timegm(self.create_time.timetuple())
@property
def create_time_astropy(self):
from astropy.time import Time
return Time(self.create_time)
def to_dict(self):
"""Note that 'source' is not a propagated quantity, and that we explicitly
include the null 'obsid' if that is the case.
"""
return dict(
name=self.name,
type=self.type,
create_time=self.create_time_unix,
obsid=self.obsid,
size=self.size,
md5=self.md5
)
@classmethod
def from_dict(cls, source, info):
name = required_arg(info, str, 'name')
type = required_arg(info, str, 'type')
ctime_unix = required_arg(info, int, 'create_time')
size = required_arg(info, int, 'size')
md5 = required_arg(info, str, 'md5')
# obsid needs special handling: it must be present, but it can be None.
try:
obsid = info['obsid']
except KeyError:
raise ServerError('required parameter "obsid" not provided')
if obsid is not None and not isinstance(obsid, int):
raise ServerError('parameter "obsid" should be an integer or None, but got %r', obsid)
return cls(name, type, obsid, source, size, md5, datetime.datetime.fromtimestamp(ctime_unix))
def make_generic_event(self, type, **kwargs):
"""Create a new FileEvent record relating to this file. The new event is not
added or committed to the database.
"""
return FileEvent(self.name, type, kwargs)
def make_instance_creation_event(self, instance, store):
return self.make_generic_event('create_instance',
store_name=store.name,
parent_dirs=instance.parent_dirs)
def make_instance_deletion_event(self, instance, store):
return self.make_generic_event('delete_instance',
store_name=store.name,
parent_dirs=instance.parent_dirs)
def make_copy_launched_event(self, connection_name, remote_store_path):
return self.make_generic_event('launch_copy',
connection_name=connection_name,
remote_store_path=remote_store_path)
def make_copy_finished_event(self, connection_name, remote_store_path,
error_code, error_message, duration=None,
average_rate=None):
extras = {}
if duration is not None:
extras['duration'] = duration # seconds
if average_rate is not None:
extras['average_rate'] = average_rate # kilobytes/sec
return self.make_generic_event('copy_finished',
connection_name=connection_name,
remote_store_path=remote_store_path,
error_code=error_code,
error_message=error_message,
**extras)
class DeletionPolicy (object):
"""A simple enumeration of symbolic constants for the "deletion_policy"
column in the FileInstance table.
"""
DISALLOWED = 0
ALLOWED = 1
def __init__(self): assert False, 'instantiation of enum not allowed'
@classmethod
def parse_safe(cls, text):
if text == 'disallowed':
return cls.DISALLOWED
if text == 'allowed':
return cls.ALLOWED
logger.warn('unrecognized deletion policy %r; using DISALLOWED', text)
return cls.DISALLOWED
@classmethod
def textualize(cls, value):
if value == cls.DISALLOWED:
return 'disallowed'
if value == cls.ALLOWED:
return 'allowed'
return '???(%r)' % (value, )
class FileInstance (db.Model):
"""A FileInstance is a copy of a File that lives on one of this Librarian's
stores.
Because the File record knows the key attributes of the file that we're
instantiating (size, MD5 sum), a FileInstance record only needs to keep
track of the location of this instance: its store, its parent directory,
and the file name (which, because File names are unique, is a foreign key
into the File table).
Even though File names are unique, for organizational purposes they are
sorted into directories when instantiated in actual stores. In current
practice this is generally done by JD although this is not baked into the
design.
"""
__tablename__ = 'file_instance'
store = db.Column(db.BigInteger, db.ForeignKey(Store.id), primary_key=True)
parent_dirs = db.Column(db.String(128), primary_key=True)
name = db.Column(db.String(256), db.ForeignKey(File.name), primary_key=True)
deletion_policy = NotNull(db.Integer, default=DeletionPolicy.DISALLOWED)
file = db.relationship('File', back_populates='instances')
store_object = db.relationship('Store', back_populates='instances')
name_index = db.Index('file_instance_name', name)
def __init__(self, store_obj, parent_dirs, name, deletion_policy=DeletionPolicy.DISALLOWED):
if '/' in name:
raise ValueError('illegal file name "%s": names may not contain "/"' % name)
self.store = store_obj.id
self.parent_dirs = parent_dirs
self.name = name
self.deletion_policy = deletion_policy
@property
def store_name(self):
return self.store_object.name
@property
def store_path(self):
import os.path
return os.path.join(self.parent_dirs, self.name)
def full_path_on_store(self):
import os.path
return os.path.join(self.store_object.path_prefix, self.parent_dirs, self.name)
def descriptive_name(self):
return self.store_name + ':' + self.store_path
@property
def deletion_policy_text(self):
return DeletionPolicy.textualize(self.deletion_policy)
def to_dict(self):
return dict(
store_name=self.store_object.name,
store_ssh_host=self.store_object.ssh_host,
parent_dirs=self.parent_dirs,
name=self.name,
deletion_policy=self.deletion_policy_text,
full_path_on_store=self.full_path_on_store()
)
class FileEvent (db.Model):
"""A FileEvent is a something that happens to a File on this Librarian.
Note that events are per-File, not per-FileInstance. One reason for this
is that FileInstance records may get deleted, and we want to be able to track
history even after that happens.
On the other hand, FileEvents are private per Librarian. They are not
synchronized from one Librarian to another and are not globally unique.
The nature of a FileEvent payload is defined by its type. We suggest
JSON-encoded text. The payload is limited to 512 bytes so there's only so
much you can carry.
"""
__tablename__ = 'file_event'
id = db.Column(db.BigInteger, primary_key=True)
name = db.Column(db.String(256), db.ForeignKey(File.name))
time = NotNull(db.DateTime)
type = db.Column(db.String(64))
payload = db.Column(db.Text)
file = db.relationship('File', back_populates='events')
name_index = db.Index('file_event_name', name)
def __init__(self, name, type, payload_struct):
if '/' in name:
raise ValueError('illegal file name "%s": names may not contain "/"' % name)
self.name = name
self.time = datetime.datetime.utcnow().replace(microsecond=0)
self.type = type
self.payload = json.dumps(payload_struct)
@property
def payload_json(self):
return json.loads(self.payload)
# RPC endpoints
@app.route('/api/create_file_event', methods=['GET', 'POST'])
@json_api
def create_file_event(args, sourcename=None):
"""Create a FileEvent record for a File.
We enforce basically no structure on the event data.
"""
file_name = required_arg(args, str, 'file_name')
type = required_arg(args, str, 'type')
payload = required_arg(args, dict, 'payload')
file = File.query.get(file_name)
if file is None:
raise ServerError('no known file "%s"', file_name)
event = file.make_generic_event(type, **payload)
db.session.add(event)
try:
db.session.commit()
except SQLAlchemyError:
db.session.rollback()
app.log_exception(sys.exc_info())
raise ServerError('failed to add event to database -- see server logs for details')
return {}
@app.route('/api/locate_file_instance', methods=['GET', 'POST'])
@json_api
def locate_file_instance(args, sourcename=None):
"""Tell the caller where to find an instance of the named file.
"""
file_name = required_arg(args, str, 'file_name')
file = File.query.get(file_name)
if file is None:
raise ServerError('no known file "%s"', file_name)
for inst in file.instances:
return {
'full_path_on_store': inst.full_path_on_store(),
'store_name': inst.store_name,
'store_path': inst.store_path,
'store_ssh_host': inst.store_object.ssh_host,
}
raise ServerError('no instances of file "%s" on this librarian', file_name)
@app.route('/api/set_one_file_deletion_policy', methods=['GET', 'POST'])
@json_api
def set_one_file_deletion_policy(args, sourcename=None):
"""Set the deletion policy of one instance of a file.
The "one instance" restriction is just a bit of a sanity-check to throw up
barriers against deleting all instances of a file if more than one
instance actually exists.
If the optional 'restrict_to_store' argument is supplied, only instances
on the specified store will be modified. This is useful when clearing out
a store for deactivation (see also the "offload" functionality). Note that
the "one instance" limit still applies.
"""
file_name = required_arg(args, str, 'file_name')
deletion_policy = required_arg(args, str, 'deletion_policy')
restrict_to_store = optional_arg(args, str, 'restrict_to_store')
if restrict_to_store is not None:
from .store import Store
restrict_to_store = Store.get_by_name(restrict_to_store) # ServerError if lookup fails
file = File.query.get(file_name)
if file is None:
raise ServerError('no known file "%s"', file_name)
deletion_policy = DeletionPolicy.parse_safe(deletion_policy)
for inst in file.instances:
# We could do this filter in SQL but it's easier to just do it this way;
# you can't call filter() on `file.instances`.
if restrict_to_store is not None and inst.store != restrict_to_store.id:
continue
inst.deletion_policy = deletion_policy
break # just one!
else:
raise ServerError('no instances of file "%s" on this librarian', file_name)
db.session.add(file.make_generic_event('instance_deletion_policy_changed',
store_name=inst.store_object.name,
parent_dirs=inst.parent_dirs,
new_policy=deletion_policy))
try:
db.session.commit()
except SQLAlchemyError:
db.session.rollback()
app.log_exception(sys.exc_info())
raise ServerError('failed to commit changes to the database')
return {}
@app.route('/api/delete_file_instances', methods=['GET', 'POST'])
@json_api
def delete_file_instances(args, sourcename=None):
"""DANGER ZONE! Delete instances of the named file on all stores!
See File.delete_instances for a description of the safety interlocks.
"""
file_name = required_arg(args, str, 'file_name')
mode = optional_arg(args, str, 'mode', 'standard')
restrict_to_store = optional_arg(args, str, 'restrict_to_store')
if restrict_to_store is not None:
from .store import Store
restrict_to_store = Store.get_by_name(restrict_to_store) # ServerError if lookup fails
file = File.query.get(file_name)
if file is None:
raise ServerError('no known file "%s"', file_name)
return file.delete_instances(mode=mode, restrict_to_store=restrict_to_store)
@app.route('/api/delete_file_instances_matching_query', methods=['GET', 'POST'])
@json_api
def delete_file_instances_matching_query(args, sourcename=None):
"""DANGER ZONE! Delete instances of lots of files on the store!
See File.delete_instances for a description of the safety interlocks.
"""
query = required_arg(args, str, 'query')
mode = optional_arg(args, str, 'mode', 'standard')
restrict_to_store = optional_arg(args, str, 'restrict_to_store')
if restrict_to_store is not None:
from .store import Store
restrict_to_store = Store.get_by_name(restrict_to_store) # ServerError if lookup fails
from .search import compile_search
query = compile_search(query, query_type='files')
stats = {}
for file in query:
stats[file.name] = file.delete_instances(mode=mode, restrict_to_store=restrict_to_store)
return {
'stats': stats,
}
# Web user interface
@app.route('/files/<string:name>')
@login_required
def specific_file(name):
file = File.query.get(name)
if file is None:
flash('No such file "%s" known' % name)
return redirect(url_for('index'))
instances = list(FileInstance.query.filter(FileInstance.name == name))
events = sorted(file.events, key=lambda e: e.time, reverse=True)
return render_template(
'file-individual.html',
title='%s File %s' % (file.type, file.name),
file=file,
instances=instances,
events=events,
)
|
[
"flask.flash",
"json.loads",
"astropy.time.Time",
"hera_librarian.utils.normalize_and_validate_md5",
"json.dumps",
"datetime.datetime.utcnow",
"flask.url_for",
"flask.render_template",
"datetime.datetime.fromtimestamp",
"sys.exc_info"
] |
[((26869, 27004), 'flask.render_template', 'render_template', (['"""file-individual.html"""'], {'title': "('%s File %s' % (file.type, file.name))", 'file': 'file', 'instances': 'instances', 'events': 'events'}), "('file-individual.html', title='%s File %s' % (file.type,\n file.name), file=file, instances=instances, events=events)\n", (26884, 27004), False, 'from flask import flash, redirect, render_template, url_for\n'), ((4109, 4146), 'hera_librarian.utils.normalize_and_validate_md5', 'utils.normalize_and_validate_md5', (['md5'], {}), '(md5)\n', (4141, 4146), False, 'from hera_librarian import utils\n'), ((4444, 4465), 'json.dumps', 'json.dumps', (['self.name'], {}), '(self.name)\n', (4454, 4465), False, 'import json\n'), ((4745, 4787), 'hera_librarian.utils.normalize_and_validate_md5', 'utils.normalize_and_validate_md5', (['self.md5'], {}), '(self.md5)\n', (4777, 4787), False, 'from hera_librarian import utils\n'), ((13282, 13304), 'astropy.time.Time', 'Time', (['self.create_time'], {}), '(self.create_time)\n', (13286, 13304), False, 'from astropy.time import Time\n'), ((20882, 20908), 'json.dumps', 'json.dumps', (['payload_struct'], {}), '(payload_struct)\n', (20892, 20908), False, 'import json\n'), ((20967, 20991), 'json.loads', 'json.loads', (['self.payload'], {}), '(self.payload)\n', (20977, 20991), False, 'import json\n'), ((26630, 26669), 'flask.flash', 'flash', (['(\'No such file "%s" known\' % name)'], {}), '(\'No such file "%s" known\' % name)\n', (26635, 26669), False, 'from flask import flash, redirect, render_template, url_for\n'), ((14443, 14486), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ctime_unix'], {}), '(ctime_unix)\n', (14474, 14486), False, 'import datetime\n'), ((26694, 26710), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (26701, 26710), False, 'from flask import flash, redirect, render_template, url_for\n'), ((20784, 20810), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (20808, 20810), False, 'import datetime\n'), ((21707, 21721), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (21719, 21721), False, 'import sys\n'), ((24622, 24636), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (24634, 24636), False, 'import sys\n'), ((2166, 2200), 'astropy.time.Time', 'Time', (['jd'], {'format': '"""jd"""', 'scale': '"""utc"""'}), "(jd, format='jd', scale='utc')\n", (2170, 2200), False, 'from astropy.time import Time\n'), ((4003, 4029), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (4027, 4029), False, 'import datetime\n'), ((8980, 8994), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8992, 8994), False, 'import sys\n'), ((12772, 12786), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (12784, 12786), False, 'import sys\n')]
|
import logging
class LoggerFileClass:
LOGGER_TYPE_FILE = "file"
LOGGER_TYPE_MONGO_DB = "mongo_db"
LOG_FILE_NM = "LoggerFile.log"
"""
Description: This class is used to do logging of events, events like user action, exception, error.
"""
def __init__(self, logger_nm):
"""
Description: This function is used to create logger object. Initialization of logger done here.
:param logger_nm: name of logger
"""
# Get Logger
self.logger = logging.getLogger(logger_nm)
# Creating Log formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s')
file_handler = logging.FileHandler(self.LOG_FILE_NM)
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
def add_debug_log(self, msg):
"""
Description : Use to add debug log
:param msg: Log message
:return:
"""
self.logger.debug(msg)
def add_info_log(self, msg):
"""
Description : Use to add info log
:param msg: Log message
:return:
"""
self.logger.info(msg)
def add_warning_log(self, msg):
"""
Description : Use to add warning log
:param msg: Log message
:return:
"""
self.logger.warning(msg)
def add_exception_log(self, msg):
"""
Description : Use to add exception log
:param msg: Log message
:return:
"""
self.logger.exception(msg)
def add_error_log(self, msg):
"""
Description : Use to add error log
:param msg: Log message
:return:
"""
self.logger.error(msg)
|
[
"logging.Formatter",
"logging.FileHandler",
"logging.getLogger"
] |
[((518, 546), 'logging.getLogger', 'logging.getLogger', (['logger_nm'], {}), '(logger_nm)\n', (535, 546), False, 'import logging\n'), ((600, 667), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s:%(levelname)s:%(name)s:%(message)s"""'], {}), "('%(asctime)s:%(levelname)s:%(name)s:%(message)s')\n", (617, 667), False, 'import logging\n'), ((692, 729), 'logging.FileHandler', 'logging.FileHandler', (['self.LOG_FILE_NM'], {}), '(self.LOG_FILE_NM)\n', (711, 729), False, 'import logging\n')]
|
import math
import matplotlib
import numpy as np
from typing import Sequence
from PIL import Image
from io import BytesIO
from contextlib import contextmanager
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from figpptx.slide_editor import SlideTransformer, Box
def to_image(arg, **kwargs):
if isinstance(arg, matplotlib.figure.Figure):
return fig_to_image(arg, **kwargs)
elif isinstance(arg, Axes):
is_tight = kwargs.pop("is_tight", True)
return ax_to_image(arg, is_tight, **kwargs)
elif isinstance(arg, Artist):
return artists_to_image(arg)
elif isinstance(arg, Image.Image):
return arg.copy()
if isinstance(arg, Sequence):
if all(isinstance(elem, Artist) for elem in arg):
return artists_to_image(arg)
else:
raise ValueError("All elements must be ``Artist``.")
raise ValueError(f"``{arg}`` cannot be converted to image.")
def fig_to_image(fig, **kwargs):
"""Convert ``matplotlib.Figure`` to ``PIL.Image``.
Args:
kwargs (str):
Keyword parameters for ``Figure.savefig`` except ``fname``.
"""
# Ref: https://stackoverflow.com/questions/8598673/how-to-save-a-pylab-figure-into-in-memory-file-which-can-be-read-into-pil-image/8598881 # NOQA
kwargs["format"] = kwargs.get("format", "png")
kwargs["transparent"] = kwargs.get("transparent", True)
buf = BytesIO()
fig.savefig(buf, **kwargs)
buf.seek(0)
image = Image.open(buf).copy()
buf.close()
return image
def ax_to_image(ax, is_tight=True, **kwargs):
"""Convert ``matplotlib.Axes`` to ``PIL.Image``."""
kwargs["transparent"] = kwargs.get("transparent", True)
fig = ax.figure
artists = fig.get_children() # [TODO] Check ``get_axes`` is more apt?
with _store_visibility(artists):
for artist in artists:
if artist is not ax:
artist.set_visible(False)
image = fig_to_image(fig, **kwargs)
if is_tight:
image = _crop_image(image, ax)
bbox = ax.get_tightbbox(fig.canvas.get_renderer())
xmin, xmax = math.floor(bbox.xmin), math.ceil(bbox.xmax)
ymin, ymax = math.floor(bbox.ymin), math.ceil(bbox.ymax)
image = image.crop([xmin, ymin, xmax, ymax])
return image
def artists_to_image(artists, is_tight=True, **kwargs):
if isinstance(artists, Artist):
artists = [artists]
if not artists:
raise ValueError("``Empty Collection of Artists.``")
# Check whether the all belongs to the same figure.
figures = [artist.get_figure() for artist in artists]
figures = [figure for figure in figures if figure]
figures = set(figures)
if not figures:
raise ValueError("Figure does not exist.")
elif 1 < len(figures):
ValueError("All the ``Artists`` must belong to the same Figure.")
figure = list(figures)[0]
target_pairs = sum([_get_artist_pairs(artist) for artist in artists], [])
target_ids = {id(pair[0]) for pair in target_pairs}
pairs = _get_artist_pairs(figure)
leaf_artists = [artist for artist, has_child in pairs if not has_child]
with _store_visibility(leaf_artists):
for artist in leaf_artists:
if id(artist) not in target_ids:
artist.set_visible(False)
image = fig_to_image(figure, **kwargs)
if is_tight:
image = _crop_image(image, artists)
return image
def _get_artist_pairs(root):
result = list()
def _inner(artist):
children = artist.get_children()
has_child = True if children else False
for child in children:
_inner(child)
pair = (artist, has_child)
result.append(pair)
_inner(root)
return result
def _get_bbox(image):
"""
(2020-01-12)
``Image.getbbox()`` does not seem to work intendedly. (Really?)
So, substitution is implemented.
"""
assert image.mode == "RGBA"
width, height = image.size
array = np.array(image)
alpha = array[:, :, -1]
ys, xs = np.where(alpha != 0)
xmin, xmax = np.min(xs) - 1, np.max(xs) + 1
ymin, ymax = np.min(ys) - 1, np.max(ys) + 1
xmin = np.clip(xmin, 0, width)
xmax = np.clip(xmax, 0, width)
ymin = np.clip(ymin, 0, height)
ymax = np.clip(ymax, 0, height)
return xmin, ymin, xmax, ymax
def _crop_image(fig_image, artist):
"""Crop the ``fig_image`` so that only ROI of ``target`` remains."""
width, height = fig_image.size
from figpptx import artist_misc
transformer = SlideTransformer(0, 0, size=(width, height), offset=(0, 0))
if isinstance(artist, Axes):
fig = artist_misc.to_figure(artist)
renderer = fig.canvas.get_renderer()
bbox = artist.get_tightbbox(renderer)
vertices = transformer.transform(bbox)
box = Box.from_vertices(vertices)
elif isinstance(artist, Artist):
box = transformer.get_box(artist)
elif isinstance(artist, Sequence):
boxes = [transformer.get_box(elem) for elem in artist]
box = Box.union(boxes)
else:
raise ValueError("Argument Error.", artist)
xmin, xmax = math.floor(box.left), math.ceil(box.left + box.width)
ymin, ymax = math.floor(box.top), math.ceil(box.top + box.height)
xmin, xmax = max(0, xmin), min(xmax, width - 1)
ymin, ymax = max(0, ymin), min(ymax, height - 1)
image = fig_image.crop([xmin, ymin, xmax + 1, ymax + 1])
return image
@contextmanager
def _store_visibility(artists):
stored = dict()
for artist in artists:
stored[id(artist)] = artist.get_visible()
def _restore():
for artist in artists:
artist.set_visible(stored[id(artist)])
try:
yield
except Exception as e:
_restore()
raise e
else:
_restore()
if __name__ == "__main__":
pass
|
[
"io.BytesIO",
"math.ceil",
"figpptx.slide_editor.Box.from_vertices",
"math.floor",
"numpy.clip",
"figpptx.artist_misc.to_figure",
"PIL.Image.open",
"numpy.min",
"numpy.where",
"numpy.array",
"numpy.max",
"figpptx.slide_editor.Box.union",
"figpptx.slide_editor.SlideTransformer"
] |
[((1432, 1441), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1439, 1441), False, 'from io import BytesIO\n'), ((4024, 4039), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (4032, 4039), True, 'import numpy as np\n'), ((4081, 4101), 'numpy.where', 'np.where', (['(alpha != 0)'], {}), '(alpha != 0)\n', (4089, 4101), True, 'import numpy as np\n'), ((4209, 4232), 'numpy.clip', 'np.clip', (['xmin', '(0)', 'width'], {}), '(xmin, 0, width)\n', (4216, 4232), True, 'import numpy as np\n'), ((4244, 4267), 'numpy.clip', 'np.clip', (['xmax', '(0)', 'width'], {}), '(xmax, 0, width)\n', (4251, 4267), True, 'import numpy as np\n'), ((4279, 4303), 'numpy.clip', 'np.clip', (['ymin', '(0)', 'height'], {}), '(ymin, 0, height)\n', (4286, 4303), True, 'import numpy as np\n'), ((4315, 4339), 'numpy.clip', 'np.clip', (['ymax', '(0)', 'height'], {}), '(ymax, 0, height)\n', (4322, 4339), True, 'import numpy as np\n'), ((4576, 4635), 'figpptx.slide_editor.SlideTransformer', 'SlideTransformer', (['(0)', '(0)'], {'size': '(width, height)', 'offset': '(0, 0)'}), '(0, 0, size=(width, height), offset=(0, 0))\n', (4592, 4635), False, 'from figpptx.slide_editor import SlideTransformer, Box\n'), ((4683, 4712), 'figpptx.artist_misc.to_figure', 'artist_misc.to_figure', (['artist'], {}), '(artist)\n', (4704, 4712), False, 'from figpptx import artist_misc\n'), ((4865, 4892), 'figpptx.slide_editor.Box.from_vertices', 'Box.from_vertices', (['vertices'], {}), '(vertices)\n', (4882, 4892), False, 'from figpptx.slide_editor import SlideTransformer, Box\n'), ((5185, 5205), 'math.floor', 'math.floor', (['box.left'], {}), '(box.left)\n', (5195, 5205), False, 'import math\n'), ((5207, 5238), 'math.ceil', 'math.ceil', (['(box.left + box.width)'], {}), '(box.left + box.width)\n', (5216, 5238), False, 'import math\n'), ((5256, 5275), 'math.floor', 'math.floor', (['box.top'], {}), '(box.top)\n', (5266, 5275), False, 'import math\n'), ((5277, 5308), 'math.ceil', 'math.ceil', (['(box.top + box.height)'], {}), '(box.top + box.height)\n', (5286, 5308), False, 'import math\n'), ((1501, 1516), 'PIL.Image.open', 'Image.open', (['buf'], {}), '(buf)\n', (1511, 1516), False, 'from PIL import Image\n'), ((2143, 2164), 'math.floor', 'math.floor', (['bbox.xmin'], {}), '(bbox.xmin)\n', (2153, 2164), False, 'import math\n'), ((2166, 2186), 'math.ceil', 'math.ceil', (['bbox.xmax'], {}), '(bbox.xmax)\n', (2175, 2186), False, 'import math\n'), ((2208, 2229), 'math.floor', 'math.floor', (['bbox.ymin'], {}), '(bbox.ymin)\n', (2218, 2229), False, 'import math\n'), ((2231, 2251), 'math.ceil', 'math.ceil', (['bbox.ymax'], {}), '(bbox.ymax)\n', (2240, 2251), False, 'import math\n'), ((4119, 4129), 'numpy.min', 'np.min', (['xs'], {}), '(xs)\n', (4125, 4129), True, 'import numpy as np\n'), ((4135, 4145), 'numpy.max', 'np.max', (['xs'], {}), '(xs)\n', (4141, 4145), True, 'import numpy as np\n'), ((4167, 4177), 'numpy.min', 'np.min', (['ys'], {}), '(ys)\n', (4173, 4177), True, 'import numpy as np\n'), ((4183, 4193), 'numpy.max', 'np.max', (['ys'], {}), '(ys)\n', (4189, 4193), True, 'import numpy as np\n'), ((5088, 5104), 'figpptx.slide_editor.Box.union', 'Box.union', (['boxes'], {}), '(boxes)\n', (5097, 5104), False, 'from figpptx.slide_editor import SlideTransformer, Box\n')]
|
# Eitaa PyKit
# v1.1
import requests
from bs4 import BeautifulSoup
class Eitaa(object):
def __init__(self, token):
self.token = token
def send_message(self, chat_id, text, pin=False, view_delete=-1):
r = requests.post(
f"https://eitaayar.ir/api/{self.token}/sendMessage",
data={
'chat_id': chat_id,
'text': text,
'pin': int(pin),
'viewCountForDelete': view_delete,
}
)
if bool(r.json()['ok']):
return True
else:
return r.json()
def send_file(self, chat_id, caption, file, pin=False, view_delete=-1):
r = requests.post(
f"https://eitaayar.ir/api/{self.token}/sendFile",
data={
'chat_id': chat_id,
'caption': caption,
'pin': int(pin),
'viewCountForDelete': view_delete,
},
files={
'file': open(file, 'rb'),
}
)
if bool(r.json()['ok']):
return True
else:
return r.json()
@staticmethod
def get_info(channel_id):
r = requests.get(f"https://eitaa.com/{channel_id}")
soup = BeautifulSoup(r.text, 'html.parser')
channel_name = soup.find('div', attrs={'class': 'tgme_page_title'}).text
channel_image_url = soup.find(
'img', attrs={'class': 'tgme_page_photo_image'})['src']
users_count = (str(soup.find('div', attrs={
'style': 'display: block;text-align: center;font-weight: bold'}).text).split(' '))[0]
desc = soup.find('div', attrs={'class': 'text-center'}).text
result = {
'name': " ".join(channel_name.split()),
'image_url': channel_image_url,
'users': users_count,
'desc': desc,
}
return result
|
[
"bs4.BeautifulSoup",
"requests.get"
] |
[((1248, 1295), 'requests.get', 'requests.get', (['f"""https://eitaa.com/{channel_id}"""'], {}), "(f'https://eitaa.com/{channel_id}')\n", (1260, 1295), False, 'import requests\n'), ((1312, 1348), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""html.parser"""'], {}), "(r.text, 'html.parser')\n", (1325, 1348), False, 'from bs4 import BeautifulSoup\n')]
|
# -*- coding: utf-8 -*-
from django.core.management import CommandError, call_command
from django.test import TestCase
from django.test.utils import override_settings
from six import StringIO
try:
from unittest import mock
except ImportError:
import mock
class ResetSchemaExceptionsTests(TestCase):
"""Tests if reset_schema command raises exceptions."""
def test_should_raise_CommandError_when_router_does_not_exist(self):
with self.assertRaisesRegexp(CommandError, 'Unknown database router non-existing_router'):
call_command('reset_schema', '--router=non-existing_router')
@override_settings(DATABASES={
'default': {
'ENGINE': 'django.db.backends.mysql',
},
})
def test_should_raise_CommandError_when_database_ENGINE_different_thant_postgresql(self):
with self.assertRaisesRegexp(CommandError, 'This command can be used only with PostgreSQL databases.'):
call_command('reset_schema')
@override_settings(DATABASES={
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'test',
'USER': 'test',
'PASSWORD': '<PASSWORD>',
'HOST': 'localhost',
},
})
class ResetSchemaTests(TestCase):
"""Tests for reset_chema command."""
def test_should_drop_schema_and_create_new_one(self):
m_cursor = mock.Mock()
m_router = mock.Mock()
m_router.cursor.return_value = mock.Mock(
__enter__=mock.Mock(return_value=m_cursor),
__exit__=mock.Mock(return_value=False),
)
expected_calls = [
mock.call('DROP SCHEMA test_public CASCADE'),
mock.call('CREATE SCHEMA test_public'),
]
with mock.patch('django_extensions.management.commands.reset_schema.connections', {'default': m_router}):
call_command('reset_schema', '--noinput', '--schema=test_public')
m_cursor.execute.assert_has_calls(expected_calls, any_order=False)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('django_extensions.management.commands.reset_schema.input')
def test_should_cancel_reset_schema_and_print_info_if_input_is_different_than_yes(self, m_input, m_stdout):
m_input.return_value = 'no'
call_command('reset_schema')
self.assertEqual("Reset cancelled.\n", m_stdout.getvalue())
|
[
"mock.call",
"mock.patch",
"django.core.management.call_command",
"mock.Mock",
"django.test.utils.override_settings"
] |
[((990, 1171), 'django.test.utils.override_settings', 'override_settings', ([], {'DATABASES': "{'default': {'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME':\n 'test', 'USER': 'test', 'PASSWORD': '<PASSWORD>', 'HOST': 'localhost'}}"}), "(DATABASES={'default': {'ENGINE':\n 'django.db.backends.postgresql_psycopg2', 'NAME': 'test', 'USER':\n 'test', 'PASSWORD': '<PASSWORD>', 'HOST': 'localhost'}})\n", (1007, 1171), False, 'from django.test.utils import override_settings\n'), ((621, 706), 'django.test.utils.override_settings', 'override_settings', ([], {'DATABASES': "{'default': {'ENGINE': 'django.db.backends.mysql'}}"}), "(DATABASES={'default': {'ENGINE': 'django.db.backends.mysql'}}\n )\n", (638, 706), False, 'from django.test.utils import override_settings\n'), ((2004, 2051), 'mock.patch', 'mock.patch', (['"""sys.stdout"""'], {'new_callable': 'StringIO'}), "('sys.stdout', new_callable=StringIO)\n", (2014, 2051), False, 'import mock\n'), ((2057, 2127), 'mock.patch', 'mock.patch', (['"""django_extensions.management.commands.reset_schema.input"""'], {}), "('django_extensions.management.commands.reset_schema.input')\n", (2067, 2127), False, 'import mock\n'), ((1371, 1382), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1380, 1382), False, 'import mock\n'), ((1402, 1413), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1411, 1413), False, 'import mock\n'), ((2285, 2313), 'django.core.management.call_command', 'call_command', (['"""reset_schema"""'], {}), "('reset_schema')\n", (2297, 2313), False, 'from django.core.management import CommandError, call_command\n'), ((554, 614), 'django.core.management.call_command', 'call_command', (['"""reset_schema"""', '"""--router=non-existing_router"""'], {}), "('reset_schema', '--router=non-existing_router')\n", (566, 614), False, 'from django.core.management import CommandError, call_command\n'), ((958, 986), 'django.core.management.call_command', 'call_command', (['"""reset_schema"""'], {}), "('reset_schema')\n", (970, 986), False, 'from django.core.management import CommandError, call_command\n'), ((1621, 1665), 'mock.call', 'mock.call', (['"""DROP SCHEMA test_public CASCADE"""'], {}), "('DROP SCHEMA test_public CASCADE')\n", (1630, 1665), False, 'import mock\n'), ((1679, 1717), 'mock.call', 'mock.call', (['"""CREATE SCHEMA test_public"""'], {}), "('CREATE SCHEMA test_public')\n", (1688, 1717), False, 'import mock\n'), ((1743, 1846), 'mock.patch', 'mock.patch', (['"""django_extensions.management.commands.reset_schema.connections"""', "{'default': m_router}"], {}), "('django_extensions.management.commands.reset_schema.connections',\n {'default': m_router})\n", (1753, 1846), False, 'import mock\n'), ((1856, 1921), 'django.core.management.call_command', 'call_command', (['"""reset_schema"""', '"""--noinput"""', '"""--schema=test_public"""'], {}), "('reset_schema', '--noinput', '--schema=test_public')\n", (1868, 1921), False, 'from django.core.management import CommandError, call_command\n'), ((1486, 1518), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'm_cursor'}), '(return_value=m_cursor)\n', (1495, 1518), False, 'import mock\n'), ((1541, 1570), 'mock.Mock', 'mock.Mock', ([], {'return_value': '(False)'}), '(return_value=False)\n', (1550, 1570), False, 'import mock\n')]
|
from flask import (
Flask,
render_template,
make_response,
redirect,
url_for,
request,
)
import json
import datetime
import glob
import random
# set random seed 42 for reproducibility (important to maintain stable word lists)
random.seed(42)
app = Flask(__name__)
###############################################################################
# DATA
###############################################################################
print("Loading data...")
data_dir = "data/"
# if not glob.glob(data_dir):
# data_dir = "../data/"
# if not glob.glob(data_dir):
# data_dir = "webapp/data/"
# print(f"data_dir: {data_dir}")
# load other_wordles.json file
with open(f"{data_dir}other_wordles.json", "r") as f:
other_wordles = json.load(f)
def load_characters(lang):
if not glob.glob(f"{data_dir}languages/{lang}/{lang}_characters.txt"):
characters = set()
with open(f"{data_dir}languages/{lang}/{lang}_5words.txt", "r") as f:
for line in f:
characters.update(line.strip())
with open(f"{data_dir}languages/{lang}/{lang}_characters.txt", "w") as f:
# sort characters
characters = sorted(characters)
# write char per newline
for char in characters:
f.write(char + "\n")
with open(f"{data_dir}languages/{lang}/{lang}_characters.txt", "r") as f:
characters = [line.strip() for line in f]
return characters
language_codes = [f.split("/")[-1] for f in glob.glob(f"{data_dir}/languages/*")]
language_characters = {lang: load_characters(lang) for lang in language_codes}
def load_words(lang):
"""loads the words and does some basic QA"""
_5words = []
with open(f"{data_dir}/languages/{lang}/{lang}_5words.txt", "r") as f:
for line in f:
_5words.append(line.strip())
# QA
_5words = [word.lower() for word in _5words if len(word) == 5 and word.isalpha()]
# remove words without correct characters
_5words = [
word
for word in _5words
if all([char in language_characters[lang] for char in word])
]
# we don't want words in order, so if .txt is not pre-shuffled, shuffle
last_letter = ""
n_in_order = 0
for word in _5words:
letter = word[0]
# check if sorted
if letter <= last_letter:
n_in_order += 1
last_letter = letter
# if 80% of words are in order, then we consider the list sorted and we shuffle it deterministically
if n_in_order / len(_5words) > 0.8:
random.shuffle(_5words)
print(f"{lang} words are sorted, shuffling")
return _5words
def load_supplemental_words(lang):
"""loads the supplemental words file if it exists"""
try:
with open(f"{data_dir}languages/{lang}/{lang}_5words_supplement.txt", "r") as f:
supplemental_words = [line.strip() for line in f]
supplemental_words = [
word
for word in supplemental_words
if all([char in language_characters[lang] for char in word])
]
except FileNotFoundError:
supplemental_words = []
return supplemental_words
def load_language_config(lang):
try:
with open(f"{data_dir}languages/{lang}/language_config.json", "r") as f:
language_config = json.load(f)
return language_config
except:
# english is fallback (not ideal but better than empty...)
with open(f"{data_dir}default_language_config.json", "r") as f:
language_config = json.load(f)
return language_config
def load_keyboard(lang):
try:
with open(f"{data_dir}languages/{lang}/{lang}_keyboard.json", "r") as f:
keyboard = json.load(f)
return keyboard
except:
return []
def get_todays_idx():
n_days = (datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).days
idx = n_days - 18992 + 195
return idx
language_codes_5words = {l_code: load_words(l_code) for l_code in language_codes}
language_codes_5words_supplements = {
l_code: load_supplemental_words(l_code) for l_code in language_codes
}
language_configs = {l_code: load_language_config(l_code) for l_code in language_codes}
keyboards = {k: load_keyboard(k) for k in language_codes}
def load_languages():
"""returns a dict of language codes mapped to their english name and native name"""
languages = {}
# for each language folder, get the language config.name and config.name_natove
for lang in language_codes:
language_config = language_configs[lang]
languages[lang] = {
"language_name": language_config["name"],
"language_name_native": language_config["name_native"],
"language_code": lang,
}
return languages
languages = load_languages()
# status
with open("../scripts/out/status_list.txt", "r") as f:
status_list = [line.strip() for line in f]
status_list_str = ""
for status in status_list:
status_list_str += (
f"<option value='{status}'>{status}{' '*(20-len(status))}</option>"
)
status_list_str += (
"<a href='https://github.com/Hugo0/wordle' target='_blank'>more at Github</a>"
)
# print stats about how many languages we have
print("\n***********************************************")
print(f" STATS")
print(f"- {len(languages)} languages")
print(
f"- {len([k for (k, v) in language_codes_5words_supplements.items() if v !=[]])} languages with supplemental words"
)
print(
f"- The language with least words is {min(language_codes_5words, key=lambda k: len(language_codes_5words[k]))}, with {len(language_codes_5words[min(language_codes_5words, key=lambda k: len(language_codes_5words[k]))])} words"
)
print(
f"- The language with most words is {max(language_codes_5words, key=lambda k: len(language_codes_5words[k]))}, with {len(language_codes_5words[max(language_codes_5words, key=lambda k: len(language_codes_5words[k]))])} words"
)
print(
f"- Average number of words per language is {sum(len(language_codes_5words[l_code]) for l_code in language_codes)/len(language_codes):.2f}"
)
print(
f"- Average length of supplemental words per language is {sum(len(language_codes_5words_supplements[l_code]) for l_code in language_codes)/len(language_codes):.2f}"
)
print(f"- There are {len(other_wordles)} other wordles")
print(f"***********************************************\n")
###############################################################################
# CLASSES
###############################################################################
class Language:
"""Holds the attributes of a language"""
def __init__(self, language_code, word_list):
self.language_code = language_code
self.word_list = word_list
self.word_list_supplement = language_codes_5words_supplements[language_code]
todays_idx = get_todays_idx()
self.daily_word = word_list[todays_idx % len(word_list)]
self.todays_idx = todays_idx
self.config = language_configs[language_code]
self.characters = language_characters[language_code]
# remove chars that aren't used to reduce bloat a bit
characters_used = []
for word in self.word_list:
characters_used += list(word)
characters_used = list(set(characters_used))
self.characters = [char for char in self.characters if char in characters_used]
self.keyboard = keyboards[language_code]
if self.keyboard == []: # if no keyboard defined, then use available chars
# keyboard of ten characters per row
for i, c in enumerate(self.characters):
if i % 10 == 0:
self.keyboard.append([])
self.keyboard[-1].append(c)
self.keyboard[-1].insert(0, "⇨")
self.keyboard[-1].append("⌫")
# Deal with bottom row being too crammed:
if len(self.keyboard[-1]) == 11:
popped_c = self.keyboard[-1].pop(1)
self.keyboard[-2].insert(-1, popped_c)
if len(self.keyboard[-1]) == 12:
popped_c = self.keyboard[-2].pop(0)
self.keyboard[-3].insert(-1, popped_c)
popped_c = self.keyboard[-1].pop(2)
self.keyboard[-2].insert(-1, popped_c)
popped_c = self.keyboard[-1].pop(2)
self.keyboard[-2].insert(-1, popped_c)
###############################################################################
# ROUTES
###############################################################################
# before request, redirect to https (unless localhost)
@app.before_request
def before_request():
print("BEFORE REQUEST")
if (
request.url.startswith("http://")
and not "localhost" in request.url
and not "127.0.0" in request.url
):
url = request.url.replace("http://", "https://", 1)
code = 301
return redirect(url, code=code)
@app.route("/")
def index():
return render_template(
"index.html",
languages=languages,
language_codes=language_codes,
todays_idx=get_todays_idx(),
other_wordles=other_wordles,
)
@app.route("/stats")
def stats():
return status_list_str
# sitemap
@app.route("/sitemap.xml")
def site_map():
response = make_response(
render_template(
"sitemap.xml", languages=languages, base_url="https://wordle.global"
)
)
response.headers["Content-Type"] = "application/xml"
return response
# arbitrary app route
@app.route("/<lang_code>")
def language(lang_code):
if lang_code not in language_codes:
return "Language not found"
word_list = language_codes_5words[lang_code]
language = Language(lang_code, word_list)
return render_template("game.html", language=language)
if __name__ == "__main__":
app.run()
|
[
"json.load",
"flask.redirect",
"random.shuffle",
"flask.Flask",
"datetime.datetime",
"flask.request.url.replace",
"datetime.datetime.utcnow",
"random.seed",
"flask.render_template",
"glob.glob",
"flask.request.url.startswith"
] |
[((252, 267), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (263, 267), False, 'import random\n'), ((275, 290), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (280, 290), False, 'from flask import Flask, render_template, make_response, redirect, url_for, request\n'), ((764, 776), 'json.load', 'json.load', (['f'], {}), '(f)\n', (773, 776), False, 'import json\n'), ((9932, 9979), 'flask.render_template', 'render_template', (['"""game.html"""'], {'language': 'language'}), "('game.html', language=language)\n", (9947, 9979), False, 'from flask import Flask, render_template, make_response, redirect, url_for, request\n'), ((817, 879), 'glob.glob', 'glob.glob', (['f"""{data_dir}languages/{lang}/{lang}_characters.txt"""'], {}), "(f'{data_dir}languages/{lang}/{lang}_characters.txt')\n", (826, 879), False, 'import glob\n'), ((1524, 1560), 'glob.glob', 'glob.glob', (['f"""{data_dir}/languages/*"""'], {}), "(f'{data_dir}/languages/*')\n", (1533, 1560), False, 'import glob\n'), ((2580, 2603), 'random.shuffle', 'random.shuffle', (['_5words'], {}), '(_5words)\n', (2594, 2603), False, 'import random\n'), ((8854, 8887), 'flask.request.url.startswith', 'request.url.startswith', (['"""http://"""'], {}), "('http://')\n", (8876, 8887), False, 'from flask import Flask, render_template, make_response, redirect, url_for, request\n'), ((8993, 9038), 'flask.request.url.replace', 'request.url.replace', (['"""http://"""', '"""https://"""', '(1)'], {}), "('http://', 'https://', 1)\n", (9012, 9038), False, 'from flask import Flask, render_template, make_response, redirect, url_for, request\n'), ((9073, 9097), 'flask.redirect', 'redirect', (['url'], {'code': 'code'}), '(url, code=code)\n', (9081, 9097), False, 'from flask import Flask, render_template, make_response, redirect, url_for, request\n'), ((9483, 9573), 'flask.render_template', 'render_template', (['"""sitemap.xml"""'], {'languages': 'languages', 'base_url': '"""https://wordle.global"""'}), "('sitemap.xml', languages=languages, base_url=\n 'https://wordle.global')\n", (9498, 9573), False, 'from flask import Flask, render_template, make_response, redirect, url_for, request\n'), ((3350, 3362), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3359, 3362), False, 'import json\n'), ((3759, 3771), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3768, 3771), False, 'import json\n'), ((3864, 3890), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3888, 3890), False, 'import datetime\n'), ((3893, 3922), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (3910, 3922), False, 'import datetime\n'), ((3575, 3587), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3584, 3587), False, 'import json\n')]
|
import pytest
import numpy as np
from numpy.testing import assert_, run_module_suite
from qutip import (smesolve, mesolve, photocurrent_mesolve, liouvillian,
QobjEvo, spre, spost, destroy, coherent, parallel_map,
qeye, fock_dm, general_stochastic, ket2dm, num)
def f(t, args):
return args["a"] * t
@pytest.mark.slow
def test_smesolve_homodyne_methods():
"Stochastic: smesolve: homodyne methods with single jump operator"
def arccoth(x):
return 0.5*np.log((1.+x)/(x-1.))
th = 0.1 # Interaction parameter
alpha = np.cos(th)
beta = np.sin(th)
gamma = 1.
N = 30 # number of Fock states
Id = qeye(N)
a = destroy(N)
s = 0.5*((alpha+beta)*a + (alpha-beta)*a.dag())
x = (a + a.dag()) * 2**-0.5
H = Id
c_op = [gamma**0.5 * a]
sc_op = [s]
e_op = [x, x*x]
rho0 = fock_dm(N,0) # initial vacuum state
T = 3. # final time
# number of time steps for which we save the expectation values
N_store = 121
Nsub = 10
tlist = np.linspace(0, T, N_store)
ddt = (tlist[1]-tlist[0])
#### Analytic solution
y0 = 0.5
A = (gamma**2 + alpha**2 * (beta**2 + 4*gamma) - 2*alpha*beta*gamma)**0.5
B = arccoth((-4*alpha**2*y0 + alpha*beta - gamma)/A)
y_an = (alpha*beta - gamma + A / np.tanh(0.5*A*tlist - B))/(4*alpha**2)
list_methods_tol = [['euler-maruyama', 2e-2],
['pc-euler', 2e-3],
['pc-euler-2', 2e-3],
['platen', 1e-3],
['milstein', 1e-3],
['milstein-imp', 1e-3],
['rouchon', 1e-3],
['taylor1.5', 1e-4],
['taylor1.5-imp', 1e-4],
['explicit1.5', 1e-4],
['taylor2.0', 1e-4]]
for n_method in list_methods_tol:
sol = smesolve(H, rho0, tlist, c_op, sc_op, e_op,
nsubsteps=Nsub, method='homodyne', solver = n_method[0])
sol2 = smesolve(H, rho0, tlist, c_op, sc_op, e_op, store_measurement=0,
nsubsteps=Nsub, method='homodyne', solver = n_method[0],
noise = sol.noise)
sol3 = smesolve(H, rho0, tlist, c_op, sc_op, e_op,
nsubsteps=Nsub*5, method='homodyne',
solver = n_method[0], tol=1e-8)
err = 1/T * np.sum(np.abs(y_an - \
(sol.expect[1]-sol.expect[0]*sol.expect[0].conj())))*ddt
err3 = 1/T * np.sum(np.abs(y_an - \
(sol3.expect[1]-sol3.expect[0]*sol3.expect[0].conj())))*ddt
print(n_method[0], ': deviation =', err, ', tol =', n_method[1])
assert_(err < n_method[1])
# 5* more substep should decrease the error
assert_(err3 < err)
# just to check that noise is not affected by smesolve
assert_(np.all(sol.noise == sol2.noise))
assert_(np.all(sol.expect[0] == sol2.expect[0]))
sol = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=10, ntraj=2,
nsubsteps=Nsub, method='homodyne', solver='euler',
store_measurement=1)
sol2 = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=10, ntraj=2,
nsubsteps=Nsub, method='homodyne', solver='euler',
store_measurement=0)
sol3 = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=11, ntraj=2,
nsubsteps=Nsub, method='homodyne', solver='euler')
# sol and sol2 have the same seed, sol3 differ.
assert_(np.all(sol.noise == sol2.noise))
assert_(np.all(sol.noise != sol3.noise))
assert_(not np.all(sol.measurement[0] == 0.+0j))
assert_(np.all(sol2.measurement[0] == 0.+0j))
sol = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=np.array([1,2]),
ntraj=2, nsubsteps=Nsub, method='homodyne', solver='euler')
sol2 = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=np.array([2,1]),
ntraj=2, nsubsteps=Nsub, method='homodyne', solver='euler')
# sol and sol2 have the seed of traj 1 and 2 reversed.
assert_(np.all(sol.noise[0,:,:,:] == sol2.noise[1,:,:,:]))
assert_(np.all(sol.noise[1,:,:,:] == sol2.noise[0,:,:,:]))
def test_smesolve_photocurrent():
"Stochastic: photocurrent_mesolve"
tol = 0.01
N = 4
gamma = 0.25
ntraj = 20
nsubsteps = 100
a = destroy(N)
H = [[a.dag() * a,f]]
psi0 = coherent(N, 0.5)
sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())]
times = np.linspace(0, 1.0, 21)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2})
res = photocurrent_mesolve(H, psi0, times, [], sc_ops, e_ops, args={"a":2},
ntraj=ntraj, nsubsteps=nsubsteps, store_measurement=True,
map_func=parallel_map)
assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
assert_(len(res.measurement) == ntraj)
assert_(all([m.shape == (len(times), len(sc_ops))
for m in res.measurement]))
def test_smesolve_homodyne():
"Stochastic: smesolve: homodyne, time-dependent H"
tol = 0.01
N = 4
gamma = 0.25
ntraj = 20
nsubsteps = 100
a = destroy(N)
H = [[a.dag() * a,f]]
psi0 = coherent(N, 0.5)
sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())]
times = np.linspace(0, 1.0, 21)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2})
list_methods_tol = ['euler-maruyama',
'pc-euler',
'pc-euler-2',
'platen',
'milstein',
'milstein-imp',
'rouchon',
'taylor15',
'taylor15-imp',
'explicit15']
for solver in list_methods_tol:
res = smesolve(H, psi0, times, [], sc_ops, e_ops,
ntraj=ntraj, nsubsteps=nsubsteps, args={"a":2},
method='homodyne', store_measurement=True,
solver=solver, map_func=parallel_map)
assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
assert_(len(res.measurement) == ntraj)
assert_(all([m.shape == (len(times), len(sc_ops))
for m in res.measurement]))
@pytest.mark.slow
def test_smesolve_heterodyne():
"Stochastic: smesolve: heterodyne, time-dependent H"
tol = 0.01
N = 4
gamma = 0.25
ntraj = 20
nsubsteps = 100
a = destroy(N)
H = [[a.dag() * a, f]]
psi0 = coherent(N, 0.5)
sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())]
times = np.linspace(0, 1.0, 21)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2})
list_methods_tol = ['euler-maruyama',
'pc-euler',
'pc-euler-2',
'platen',
'milstein',
'milstein-imp',
'rouchon',
'taylor15',
'taylor15-imp',
'explicit15']
for solver in list_methods_tol:
res = smesolve(H, psi0, times, [], sc_ops, e_ops,
ntraj=ntraj, nsubsteps=nsubsteps, args={"a":2},
method='heterodyne', store_measurement=True,
solver=solver, map_func=parallel_map)
assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
assert_(len(res.measurement) == ntraj)
assert_(all([m.shape == (len(times), len(sc_ops), 2)
for m in res.measurement]))
@pytest.mark.slow
def test_general_stochastic():
"Stochastic: general_stochastic"
"Reproduce smesolve homodyne"
tol = 0.025
N = 4
gamma = 0.25
ntraj = 20
nsubsteps = 50
a = destroy(N)
H = [[a.dag() * a,f]]
psi0 = coherent(N, 0.5)
sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())]
L = liouvillian(QobjEvo([[a.dag() * a,f]], args={"a":2}), c_ops = sc_ops)
L.compile()
sc_opsM = [QobjEvo(spre(op) + spost(op.dag())) for op in sc_ops]
[op.compile() for op in sc_opsM]
e_opsM = [spre(op) for op in e_ops]
def d1(t, vec):
return L.mul_vec(t,vec)
def d2(t, vec):
out = []
for op in sc_opsM:
out.append(op.mul_vec(t,vec)-op.expect(t,vec)*vec)
return np.stack(out)
times = np.linspace(0, 0.5, 13)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2})
list_methods_tol = ['euler-maruyama',
'platen',
'explicit15']
for solver in list_methods_tol:
res = general_stochastic(ket2dm(psi0),times,d1,d2,len_d2=2, e_ops=e_opsM,
normalize=False, ntraj=ntraj, nsubsteps=nsubsteps,
solver=solver)
assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
assert_(len(res.measurement) == ntraj)
def f_dargs(a, args):
return args["expect_op_3"] - 1
def test_ssesolve_feedback():
"Stochastic: ssesolve: time-dependent H with feedback"
tol = 0.01
N = 4
ntraj = 10
nsubsteps = 100
a = destroy(N)
H = [num(N)]
psi0 = coherent(N, 2.5)
sc_ops = [[a + a.dag(), f_dargs]]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag()), qeye(N)]
times = np.linspace(0, 10, 101)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops,
args={"expect_op_3":qeye(N)})
res = smesolve(H, psi0, times, sc_ops=sc_ops, e_ops=e_ops, noise=1,
ntraj=ntraj, nsubsteps=nsubsteps, method='homodyne',
map_func=parallel_map, args={"expect_op_3":qeye(N)})
print(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
if __name__ == "__main__":
run_module_suite()
|
[
"qutip.num",
"qutip.coherent",
"numpy.sin",
"qutip.destroy",
"qutip.fock_dm",
"numpy.testing.run_module_suite",
"numpy.linspace",
"qutip.photocurrent_mesolve",
"qutip.mesolve",
"qutip.spre",
"numpy.stack",
"qutip.smesolve",
"numpy.tanh",
"qutip.qeye",
"numpy.testing.assert_",
"numpy.cos",
"numpy.all",
"numpy.log",
"qutip.ket2dm",
"numpy.array",
"numpy.sqrt"
] |
[((583, 593), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (589, 593), True, 'import numpy as np\n'), ((605, 615), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (611, 615), True, 'import numpy as np\n'), ((692, 699), 'qutip.qeye', 'qeye', (['N'], {}), '(N)\n', (696, 699), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((708, 718), 'qutip.destroy', 'destroy', (['N'], {}), '(N)\n', (715, 718), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((889, 902), 'qutip.fock_dm', 'fock_dm', (['N', '(0)'], {}), '(N, 0)\n', (896, 902), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((1085, 1111), 'numpy.linspace', 'np.linspace', (['(0)', 'T', 'N_store'], {}), '(0, T, N_store)\n', (1096, 1111), True, 'import numpy as np\n'), ((3065, 3207), 'qutip.smesolve', 'smesolve', (['H', 'rho0', 'tlist[:2]', 'c_op', 'sc_op', 'e_op'], {'noise': '(10)', 'ntraj': '(2)', 'nsubsteps': 'Nsub', 'method': '"""homodyne"""', 'solver': '"""euler"""', 'store_measurement': '(1)'}), "(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=10, ntraj=2,\n nsubsteps=Nsub, method='homodyne', solver='euler', store_measurement=1)\n", (3073, 3207), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((3255, 3397), 'qutip.smesolve', 'smesolve', (['H', 'rho0', 'tlist[:2]', 'c_op', 'sc_op', 'e_op'], {'noise': '(10)', 'ntraj': '(2)', 'nsubsteps': 'Nsub', 'method': '"""homodyne"""', 'solver': '"""euler"""', 'store_measurement': '(0)'}), "(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=10, ntraj=2,\n nsubsteps=Nsub, method='homodyne', solver='euler', store_measurement=0)\n", (3263, 3397), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((3445, 3566), 'qutip.smesolve', 'smesolve', (['H', 'rho0', 'tlist[:2]', 'c_op', 'sc_op', 'e_op'], {'noise': '(11)', 'ntraj': '(2)', 'nsubsteps': 'Nsub', 'method': '"""homodyne"""', 'solver': '"""euler"""'}), "(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=11, ntraj=2,\n nsubsteps=Nsub, method='homodyne', solver='euler')\n", (3453, 3566), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((4495, 4505), 'qutip.destroy', 'destroy', (['N'], {}), '(N)\n', (4502, 4505), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((4544, 4560), 'qutip.coherent', 'coherent', (['N', '(0.5)'], {}), '(N, 0.5)\n', (4552, 4560), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((4694, 4717), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', '(21)'], {}), '(0, 1.0, 21)\n', (4705, 4717), True, 'import numpy as np\n'), ((4732, 4785), 'qutip.mesolve', 'mesolve', (['H', 'psi0', 'times', 'sc_ops', 'e_ops'], {'args': "{'a': 2}"}), "(H, psi0, times, sc_ops, e_ops, args={'a': 2})\n", (4739, 4785), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((4795, 4955), 'qutip.photocurrent_mesolve', 'photocurrent_mesolve', (['H', 'psi0', 'times', '[]', 'sc_ops', 'e_ops'], {'args': "{'a': 2}", 'ntraj': 'ntraj', 'nsubsteps': 'nsubsteps', 'store_measurement': '(True)', 'map_func': 'parallel_map'}), "(H, psi0, times, [], sc_ops, e_ops, args={'a': 2},\n ntraj=ntraj, nsubsteps=nsubsteps, store_measurement=True, map_func=\n parallel_map)\n", (4815, 4955), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((5425, 5435), 'qutip.destroy', 'destroy', (['N'], {}), '(N)\n', (5432, 5435), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((5474, 5490), 'qutip.coherent', 'coherent', (['N', '(0.5)'], {}), '(N, 0.5)\n', (5482, 5490), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((5624, 5647), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', '(21)'], {}), '(0, 1.0, 21)\n', (5635, 5647), True, 'import numpy as np\n'), ((5662, 5715), 'qutip.mesolve', 'mesolve', (['H', 'psi0', 'times', 'sc_ops', 'e_ops'], {'args': "{'a': 2}"}), "(H, psi0, times, sc_ops, e_ops, args={'a': 2})\n", (5669, 5715), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((6863, 6873), 'qutip.destroy', 'destroy', (['N'], {}), '(N)\n', (6870, 6873), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((6913, 6929), 'qutip.coherent', 'coherent', (['N', '(0.5)'], {}), '(N, 0.5)\n', (6921, 6929), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((7063, 7086), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', '(21)'], {}), '(0, 1.0, 21)\n', (7074, 7086), True, 'import numpy as np\n'), ((7101, 7154), 'qutip.mesolve', 'mesolve', (['H', 'psi0', 'times', 'sc_ops', 'e_ops'], {'args': "{'a': 2}"}), "(H, psi0, times, sc_ops, e_ops, args={'a': 2})\n", (7108, 7154), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((8319, 8329), 'qutip.destroy', 'destroy', (['N'], {}), '(N)\n', (8326, 8329), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((8368, 8384), 'qutip.coherent', 'coherent', (['N', '(0.5)'], {}), '(N, 0.5)\n', (8376, 8384), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((8969, 8992), 'numpy.linspace', 'np.linspace', (['(0)', '(0.5)', '(13)'], {}), '(0, 0.5, 13)\n', (8980, 8992), True, 'import numpy as np\n'), ((9007, 9060), 'qutip.mesolve', 'mesolve', (['H', 'psi0', 'times', 'sc_ops', 'e_ops'], {'args': "{'a': 2}"}), "(H, psi0, times, sc_ops, e_ops, args={'a': 2})\n", (9014, 9060), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((9809, 9819), 'qutip.destroy', 'destroy', (['N'], {}), '(N)\n', (9816, 9819), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((9849, 9865), 'qutip.coherent', 'coherent', (['N', '(2.5)'], {}), '(N, 2.5)\n', (9857, 9865), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((9986, 10009), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(101)'], {}), '(0, 10, 101)\n', (9997, 10009), True, 'import numpy as np\n'), ((10486, 10504), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {}), '()\n', (10502, 10504), False, 'from numpy.testing import assert_, run_module_suite\n'), ((1951, 2054), 'qutip.smesolve', 'smesolve', (['H', 'rho0', 'tlist', 'c_op', 'sc_op', 'e_op'], {'nsubsteps': 'Nsub', 'method': '"""homodyne"""', 'solver': 'n_method[0]'}), "(H, rho0, tlist, c_op, sc_op, e_op, nsubsteps=Nsub, method=\n 'homodyne', solver=n_method[0])\n", (1959, 2054), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((2090, 2231), 'qutip.smesolve', 'smesolve', (['H', 'rho0', 'tlist', 'c_op', 'sc_op', 'e_op'], {'store_measurement': '(0)', 'nsubsteps': 'Nsub', 'method': '"""homodyne"""', 'solver': 'n_method[0]', 'noise': 'sol.noise'}), "(H, rho0, tlist, c_op, sc_op, e_op, store_measurement=0, nsubsteps=\n Nsub, method='homodyne', solver=n_method[0], noise=sol.noise)\n", (2098, 2231), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((2292, 2410), 'qutip.smesolve', 'smesolve', (['H', 'rho0', 'tlist', 'c_op', 'sc_op', 'e_op'], {'nsubsteps': '(Nsub * 5)', 'method': '"""homodyne"""', 'solver': 'n_method[0]', 'tol': '(1e-08)'}), "(H, rho0, tlist, c_op, sc_op, e_op, nsubsteps=Nsub * 5, method=\n 'homodyne', solver=n_method[0], tol=1e-08)\n", (2300, 2410), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((2778, 2804), 'numpy.testing.assert_', 'assert_', (['(err < n_method[1])'], {}), '(err < n_method[1])\n', (2785, 2804), False, 'from numpy.testing import assert_, run_module_suite\n'), ((2865, 2884), 'numpy.testing.assert_', 'assert_', (['(err3 < err)'], {}), '(err3 < err)\n', (2872, 2884), False, 'from numpy.testing import assert_, run_module_suite\n'), ((3647, 3678), 'numpy.all', 'np.all', (['(sol.noise == sol2.noise)'], {}), '(sol.noise == sol2.noise)\n', (3653, 3678), True, 'import numpy as np\n'), ((3692, 3723), 'numpy.all', 'np.all', (['(sol.noise != sol3.noise)'], {}), '(sol.noise != sol3.noise)\n', (3698, 3723), True, 'import numpy as np\n'), ((3790, 3831), 'numpy.all', 'np.all', (['(sol2.measurement[0] == 0.0 + 0.0j)'], {}), '(sol2.measurement[0] == 0.0 + 0.0j)\n', (3796, 3831), True, 'import numpy as np\n'), ((4220, 4275), 'numpy.all', 'np.all', (['(sol.noise[0, :, :, :] == sol2.noise[1, :, :, :])'], {}), '(sol.noise[0, :, :, :] == sol2.noise[1, :, :, :])\n', (4226, 4275), True, 'import numpy as np\n'), ((4283, 4338), 'numpy.all', 'np.all', (['(sol.noise[1, :, :, :] == sol2.noise[0, :, :, :])'], {}), '(sol.noise[1, :, :, :] == sol2.noise[0, :, :, :])\n', (4289, 4338), True, 'import numpy as np\n'), ((6140, 6322), 'qutip.smesolve', 'smesolve', (['H', 'psi0', 'times', '[]', 'sc_ops', 'e_ops'], {'ntraj': 'ntraj', 'nsubsteps': 'nsubsteps', 'args': "{'a': 2}", 'method': '"""homodyne"""', 'store_measurement': '(True)', 'solver': 'solver', 'map_func': 'parallel_map'}), "(H, psi0, times, [], sc_ops, e_ops, ntraj=ntraj, nsubsteps=\n nsubsteps, args={'a': 2}, method='homodyne', store_measurement=True,\n solver=solver, map_func=parallel_map)\n", (6148, 6322), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((7579, 7763), 'qutip.smesolve', 'smesolve', (['H', 'psi0', 'times', '[]', 'sc_ops', 'e_ops'], {'ntraj': 'ntraj', 'nsubsteps': 'nsubsteps', 'args': "{'a': 2}", 'method': '"""heterodyne"""', 'store_measurement': '(True)', 'solver': 'solver', 'map_func': 'parallel_map'}), "(H, psi0, times, [], sc_ops, e_ops, ntraj=ntraj, nsubsteps=\n nsubsteps, args={'a': 2}, method='heterodyne', store_measurement=True,\n solver=solver, map_func=parallel_map)\n", (7587, 7763), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((8720, 8728), 'qutip.spre', 'spre', (['op'], {}), '(op)\n', (8724, 8728), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((8942, 8955), 'numpy.stack', 'np.stack', (['out'], {}), '(out)\n', (8950, 8955), True, 'import numpy as np\n'), ((9830, 9836), 'qutip.num', 'num', (['N'], {}), '(N)\n', (9833, 9836), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((9964, 9971), 'qutip.qeye', 'qeye', (['N'], {}), '(N)\n', (9968, 9971), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((511, 540), 'numpy.log', 'np.log', (['((1.0 + x) / (x - 1.0))'], {}), '((1.0 + x) / (x - 1.0))\n', (517, 540), True, 'import numpy as np\n'), ((2964, 2995), 'numpy.all', 'np.all', (['(sol.noise == sol2.noise)'], {}), '(sol.noise == sol2.noise)\n', (2970, 2995), True, 'import numpy as np\n'), ((3013, 3052), 'numpy.all', 'np.all', (['(sol.expect[0] == sol2.expect[0])'], {}), '(sol.expect[0] == sol2.expect[0])\n', (3019, 3052), True, 'import numpy as np\n'), ((3741, 3781), 'numpy.all', 'np.all', (['(sol.measurement[0] == 0.0 + 0.0j)'], {}), '(sol.measurement[0] == 0.0 + 0.0j)\n', (3747, 3781), True, 'import numpy as np\n'), ((3892, 3908), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (3900, 3908), True, 'import numpy as np\n'), ((4053, 4069), 'numpy.array', 'np.array', (['[2, 1]'], {}), '([2, 1])\n', (4061, 4069), True, 'import numpy as np\n'), ((4575, 4589), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (4582, 4589), True, 'import numpy as np\n'), ((5505, 5519), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (5512, 5519), True, 'import numpy as np\n'), ((6944, 6958), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (6951, 6958), True, 'import numpy as np\n'), ((8399, 8413), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (8406, 8413), True, 'import numpy as np\n'), ((9243, 9255), 'qutip.ket2dm', 'ket2dm', (['psi0'], {}), '(psi0)\n', (9249, 9255), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((1355, 1383), 'numpy.tanh', 'np.tanh', (['(0.5 * A * tlist - B)'], {}), '(0.5 * A * tlist - B)\n', (1362, 1383), True, 'import numpy as np\n'), ((4595, 4609), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (4602, 4609), True, 'import numpy as np\n'), ((5525, 5539), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (5532, 5539), True, 'import numpy as np\n'), ((6964, 6978), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (6971, 6978), True, 'import numpy as np\n'), ((8419, 8433), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (8426, 8433), True, 'import numpy as np\n'), ((8623, 8631), 'qutip.spre', 'spre', (['op'], {}), '(op)\n', (8627, 8631), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((10105, 10112), 'qutip.qeye', 'qeye', (['N'], {}), '(N)\n', (10109, 10112), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((10321, 10328), 'qutip.qeye', 'qeye', (['N'], {}), '(N)\n', (10325, 10328), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n')]
|
"""
===============================
A demo of rooms environment
===============================
Illustration of NRooms environment
.. video:: ../../video_plot_rooms.mp4
:width: 600
"""
# sphinx_gallery_thumbnail_path = 'thumbnails/video_plot_rooms.jpg'
from rlberry.envs.benchmarks.grid_exploration.nroom import NRoom
from rlberry.agents.dynprog import ValueIterationAgent
env = NRoom(
nrooms=9,
remove_walls=False,
room_size=9,
initial_state_distribution="center",
include_traps=True,
)
horizon = env.observation_space.n
agent = ValueIterationAgent(env, gamma=0.999, horizon=horizon)
print("fitting...")
info = agent.fit()
print(info)
env.enable_rendering()
for _ in range(10):
state = env.reset()
for tt in range(horizon):
# action = agent.policy(state)
action = env.action_space.sample()
next_s, _, done, _ = env.step(action)
if done:
break
state = next_s
env.render()
video = env.save_video("_video/video_plot_rooms.mp4")
|
[
"rlberry.agents.dynprog.ValueIterationAgent",
"rlberry.envs.benchmarks.grid_exploration.nroom.NRoom"
] |
[((387, 497), 'rlberry.envs.benchmarks.grid_exploration.nroom.NRoom', 'NRoom', ([], {'nrooms': '(9)', 'remove_walls': '(False)', 'room_size': '(9)', 'initial_state_distribution': '"""center"""', 'include_traps': '(True)'}), "(nrooms=9, remove_walls=False, room_size=9, initial_state_distribution\n ='center', include_traps=True)\n", (392, 497), False, 'from rlberry.envs.benchmarks.grid_exploration.nroom import NRoom\n'), ((559, 613), 'rlberry.agents.dynprog.ValueIterationAgent', 'ValueIterationAgent', (['env'], {'gamma': '(0.999)', 'horizon': 'horizon'}), '(env, gamma=0.999, horizon=horizon)\n', (578, 613), False, 'from rlberry.agents.dynprog import ValueIterationAgent\n')]
|
from typing import Dict, List, Union
from typeguard import check_argument_types
import tensorflow as tf
import numpy as np
from neuralmonkey.decoders.autoregressive import AutoregressiveDecoder
from neuralmonkey.decoders.sequence_labeler import SequenceLabeler
from neuralmonkey.decorators import tensor
from neuralmonkey.runners.base_runner import BaseRunner
SupportedDecoders = Union[AutoregressiveDecoder, SequenceLabeler]
class XentRunner(BaseRunner[SupportedDecoders]):
# pylint: disable=too-few-public-methods
# Pylint issue here: https://github.com/PyCQA/pylint/issues/2607
class Executable(BaseRunner.Executable["XentRunner"]):
def collect_results(self, results: List[Dict]) -> None:
xents = np.mean([res["xents"] for res in results], axis=0)
self.set_runner_result(outputs=xents.tolist(),
losses=[float(np.mean(xents))])
# pylint: enable=too-few-public-methods
def __init__(self,
output_series: str,
decoder: SupportedDecoders) -> None:
check_argument_types()
super().__init__(output_series, decoder)
@tensor
def fetches(self) -> Dict[str, tf.Tensor]:
return {"xents": self.decoder.train_xents}
@property
def loss_names(self) -> List[str]:
return ["xent"]
|
[
"numpy.mean",
"typeguard.check_argument_types"
] |
[((1083, 1105), 'typeguard.check_argument_types', 'check_argument_types', ([], {}), '()\n', (1103, 1105), False, 'from typeguard import check_argument_types\n'), ((739, 789), 'numpy.mean', 'np.mean', (["[res['xents'] for res in results]"], {'axis': '(0)'}), "([res['xents'] for res in results], axis=0)\n", (746, 789), True, 'import numpy as np\n'), ((898, 912), 'numpy.mean', 'np.mean', (['xents'], {}), '(xents)\n', (905, 912), True, 'import numpy as np\n')]
|
from ged4py import GedcomReader
path = r"C:\Python\Python38\Django\familysite\ca1z66_78236416fprf45ca4e51z3.ged"
people = []
with GedcomReader(path) as parser:
for i, indi in enumerate(parser.records0("INDI")):
people.append(i)
json = ''
json += '[ \n'
with GedcomReader(path) as parser:
for i, indi in enumerate(parser.records0("INDI")):
gedcom_id = indi.__dict__['xref_id']
father = indi.father
mother = indi.mother
if indi.sex == 'F':
child = "Дочь"
elif indi.sex == 'M':
child = "Сын"
try:
c = Individual.objects.get(gedcom_id__exact=gedcom_id)
individual_1_id = c.id
except Individual.DoesNotExist:
c = None
individual_1_id = None
if father:
father_id = father.__dict__['xref_id']
try:
a = Individual.objects.get(gedcom_id__exact=father_id)
individual_2_father_id = a.id
except Individual.DoesNotExist:
a = None
if mother:
mother_id = mother.__dict__['xref_id']
try:
b=Individual.objects.get(gedcom_id__exact=mother_id)
individual_2_mother_id = b.id
except Individual.DoesNotExist:
b = None
#individual_id
if father:
json += '\t{ \n'
json += '\t\t"model" : "familyroster.Relationship"' + ',\n'
json += '\t\t"pk" : ' + f'{i+1}' + ',\n'
json += '\t\t"fields" : {\n'
json += '\t\t\t"relationship_type" : ' + f'"Child-Father"' + ',\n'
json += '\t\t\t"individual_1_id" : ' + f'"{individual_1_id}"' + ',\n'
json += '\t\t\t"individual_1_role_id" : ' + f'"{child}"' + ',\n'
json += '\t\t\t"individual_2_id" : ' + f'"{individual_2_father_id}"' + ',\n'
json += '\t\t\t"individual_2_role_id" : ' + f'"Отец"' + '\n'
#if individual_notes:
# json += '\t\t\t"individual_notes" : ' + f'"{individual_notes}"' + '\n
#rint(type(json))
#print(f'length: {len(people)}')
if i == (len(people) - 1):
json += '\t\t}\n'
json += '\t}\n'
else:
json += '\t\t}\n'
json += '\t},\n'
if mother:
json += '\t{ \n'
json += '\t\t"model" : "familyroster.Relationship"' + ',\n'
json += '\t\t"pk" : ' + f'{i+1}' + ',\n'
json += '\t\t"fields" : {\n'
json += '\t\t\t"relationship_type" : ' + f'"Child-Mother"' + ',\n'
json += '\t\t\t"individual_1_id" : ' + f'"{individual_1_id}"' + ',\n'
json += '\t\t\t"individual_1_role_id" : ' + f'"{child}"' + ',\n'
json += '\t\t\t"individual_2_id" : ' + f'"{individual_2_mother_id}"' + ',\n'
json += '\t\t\t"individual_2_role_id" : ' + f'"Мать"' + '\n'
#if individual_notes:
# json += '\t\t\t"individual_notes" : ' + f'"{individual_notes}"' + '\n
#rint(type(json))
#print(f'length: {len(people)}')
if i == (len(people) - 1):
json += '\t\t}\n'
json += '\t}\n'
else:
json += '\t\t}\n'
json += '\t},\n'
json += ']'
f = open("relationship_father_mother.json", "w", encoding="UTF-8") # creat/open the output file
f.write(json)
f.close() # save
|
[
"ged4py.GedcomReader"
] |
[((133, 151), 'ged4py.GedcomReader', 'GedcomReader', (['path'], {}), '(path)\n', (145, 151), False, 'from ged4py import GedcomReader\n'), ((278, 296), 'ged4py.GedcomReader', 'GedcomReader', (['path'], {}), '(path)\n', (290, 296), False, 'from ged4py import GedcomReader\n')]
|
from PIL import Image
from image import crop_to_bin
img = Image.open('ui.png')
count = 0
card_name_dict = ["7s", "5p", "5s", "5z", "6m",
"6p", "6s", "6z", "7m", "7p",
"5m", "7z", "8m", "8p", "8s",
"9m", "9p", "9s", "", "2z",
"0p", "1s", "1z", "2m", "2p",
"0s", "2s", "3p", "3s", "3z",
"1m", "0m", "4m", "4s", "4z",
"1p", "3m", "4p", "", ""]
for x in range(0, 647, 81):
for y in range(0, 649, 130):
# x0 y0 x1 y1
region = (x, y + 7, x + 80, y + 122)
crop_img = img.crop(region)
card_name = card_name_dict[count]
if card_name != "":
crop_img.save("cards\%s.png" % str(card_name))
binary_img = crop_to_bin(crop_img)
binary_img.save("bin\%s.png" % str(card_name))
count = count + 1
|
[
"image.crop_to_bin",
"PIL.Image.open"
] |
[((59, 79), 'PIL.Image.open', 'Image.open', (['"""ui.png"""'], {}), "('ui.png')\n", (69, 79), False, 'from PIL import Image\n'), ((788, 809), 'image.crop_to_bin', 'crop_to_bin', (['crop_img'], {}), '(crop_img)\n', (799, 809), False, 'from image import crop_to_bin\n')]
|
# %%
from operator import index
import os
try:
os.chdir(os.path.join(os.getcwd(), '.'))
print(os.getcwd())
except:
pass
# %%
from IPython import get_ipython
# %%
import pandas as pd
import numpy as np
import os
#%%
wordle = pd.read_csv('./data/wordle.csv.zip', sep=',', encoding="utf-8", index_col=0)
wordle.sort_values(['peso', 'count'], ascending=False, inplace=True)
w = wordle
w.head(30).sort_values(['count'], ascending=False)
#%%
sim = 'ri'
nao = 'aseght'
posicao_sim = ['r','','','','']
posicao_nao = ['','i','i','','']
r = wordle
for c in sim:
r = r[r[c]]
for c in nao:
r = r[~r[c]]
p = 0
for c in posicao_sim:
p += 1
if c != '':
r = r[r[c+str(p)]]
p = 0
for chars in posicao_nao:
p += 1
for c in chars:
r = r[~r[c+str(p)]]
print('Quantidade de palavras possíveis', r.shape[0])
print('Top 30')
r.head(30).sort_values(['count'], ascending=False)
# %%
|
[
"pandas.read_csv",
"os.getcwd"
] |
[((240, 316), 'pandas.read_csv', 'pd.read_csv', (['"""./data/wordle.csv.zip"""'], {'sep': '""","""', 'encoding': '"""utf-8"""', 'index_col': '(0)'}), "('./data/wordle.csv.zip', sep=',', encoding='utf-8', index_col=0)\n", (251, 316), True, 'import pandas as pd\n'), ((102, 113), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (111, 113), False, 'import os\n'), ((73, 84), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (82, 84), False, 'import os\n')]
|
from sqlalchemy import Column, Integer, String, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
class Server(Base):
__tablename__ = 'example_server'
id = Column(Integer, primary_key=True)
ip = Column(String, nullable=False)
hostname = Column(String, nullable=False)
power_on = Column(Boolean, server_default='False')
|
[
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.Column"
] |
[((160, 178), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (176, 178), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((248, 281), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (254, 281), False, 'from sqlalchemy import Column, Integer, String, Boolean\n'), ((292, 322), 'sqlalchemy.Column', 'Column', (['String'], {'nullable': '(False)'}), '(String, nullable=False)\n', (298, 322), False, 'from sqlalchemy import Column, Integer, String, Boolean\n'), ((338, 368), 'sqlalchemy.Column', 'Column', (['String'], {'nullable': '(False)'}), '(String, nullable=False)\n', (344, 368), False, 'from sqlalchemy import Column, Integer, String, Boolean\n'), ((384, 423), 'sqlalchemy.Column', 'Column', (['Boolean'], {'server_default': '"""False"""'}), "(Boolean, server_default='False')\n", (390, 423), False, 'from sqlalchemy import Column, Integer, String, Boolean\n')]
|
from django.conf import settings
from django.urls import path
from django.conf.urls.static import static
from . import views
urlpatterns = [
path('generic', views.GenericClassView.as_view(), name='generic_view'),
path('template', views.TemplateClassView.as_view(), name='template_view'),
path('list', views.ProtectedClassView.as_view(), name='protected_view'),
path('redirect', views.RedirectClassView.as_view(), name='redirect_view'),
path('contact', views.ContactView.as_view(), name='contact_view'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
[
"django.conf.urls.static.static"
] |
[((529, 592), 'django.conf.urls.static.static', 'static', (['settings.STATIC_URL'], {'document_root': 'settings.STATIC_ROOT'}), '(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n', (535, 592), False, 'from django.conf.urls.static import static\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2019-11-07 10:15
from __future__ import unicode_literals
from django.db import migrations
import norduniclient as nc
def forwards_func(apps, schema_editor):
NodeType = apps.get_model('noclook', 'NodeType')
NodeHandle = apps.get_model('noclook', 'NodeHandle')
organization_type = NodeType.objects.get_or_create(type='Organization', slug='organization')[0] # organization
all_organizations = NodeHandle.objects.filter(node_type=organization_type)
check_fields = [
'affiliation_customer',
'affiliation_end_customer',
'affiliation_provider',
'affiliation_partner',
'affiliation_host_user',
'affiliation_site_owner',
]
for organization in all_organizations:
orgnode = nc.get_node_model(nc.graphdb.manager, organization.handle_id)
#orgnode = organization.get_node()
for field in check_fields:
org_val = orgnode.data.get(field, None)
if not org_val:
orgnode.add_property(field, False)
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('noclook', '0018_orgtypes_20191030_1256'),
]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
[
"django.db.migrations.RunPython",
"norduniclient.get_node_model"
] |
[((809, 870), 'norduniclient.get_node_model', 'nc.get_node_model', (['nc.graphdb.manager', 'organization.handle_id'], {}), '(nc.graphdb.manager, organization.handle_id)\n', (826, 870), True, 'import norduniclient as nc\n'), ((1279, 1328), 'django.db.migrations.RunPython', 'migrations.RunPython', (['forwards_func', 'reverse_func'], {}), '(forwards_func, reverse_func)\n', (1299, 1328), False, 'from django.db import migrations\n')]
|
from utils.utils import mount_message
class GameServer:
def __init__(self, queue_send, queue_receive):
self.queue_send = queue_send
self.queue_receive = queue_receive
def _return_function(self, send):
def f():
self.queue_send.put(send)
return f
def create_room(self, name_player, name_room, password):
if name_player == "":
return "Name Player must be a string no empty"
elif name_room == "":
return "Name Room must be a string no empty"
self.queue_send.put(mount_message("menu_create", (name_player, name_room, password)))
return None
def enter_room(self, name_player, id_room, password):
if name_player == "":
return "Name Player must be a string no empty"
elif id_room == "":
return "Id Room must be a string no empty"
self.queue_send.put(mount_message("menu_enter", (name_player, id_room, password)))
return None
def exit_room(self):
send = {
"type": "room",
"subtype": "exit"
}
return self._return_function(send)
def end(self):
send = {
"type": "menu",
"subtype": "end"
}
self.queue_send.put(send)
while not self.queue_send.empty():
continue
|
[
"utils.utils.mount_message"
] |
[((568, 632), 'utils.utils.mount_message', 'mount_message', (['"""menu_create"""', '(name_player, name_room, password)'], {}), "('menu_create', (name_player, name_room, password))\n", (581, 632), False, 'from utils.utils import mount_message\n'), ((914, 975), 'utils.utils.mount_message', 'mount_message', (['"""menu_enter"""', '(name_player, id_room, password)'], {}), "('menu_enter', (name_player, id_room, password))\n", (927, 975), False, 'from utils.utils import mount_message\n')]
|
import os
from django.conf import settings
from factory import DjangoModelFactory, Sequence, SubFactory
from factory.fuzzy import FuzzyInteger, FuzzyChoice
from mii_sorter.models import Episode, Season, Serie, Movie, RegexRenaming
class SerieFactory(DjangoModelFactory):
class Meta:
model = Serie
django_get_or_create = ('name',)
name = FuzzyChoice(['Serie1', 'Serie2', 'Serie3'])
class SeasonFactory(DjangoModelFactory):
class Meta:
model = Season
django_get_or_create = ('serie', 'number',)
number = FuzzyInteger(low=1, high=10)
serie = SubFactory(SerieFactory)
class EpisodeFactory(DjangoModelFactory):
class Meta:
model = Episode
django_get_or_create = ('season', 'number',)
number = FuzzyInteger(low=1, high=24)
season = SubFactory(SeasonFactory)
file_path = Sequence(lambda n: os.path.join(settings.DESTINATION_FOLDER, 'serie_%s.mkv' % n))
file_size = FuzzyInteger(low=100, high=1000)
class MovieFactory(DjangoModelFactory):
class Meta:
model = Movie
django_get_or_create = ('title', )
folder_path = Sequence(lambda n: os.path.join(settings.DESTINATION_FOLDER, 'movie_%s.mkv' % n))
file_size = FuzzyInteger(low=100, high=1000)
title = FuzzyChoice(['Movie1', 'Movie2', 'Movie3'])
year = FuzzyInteger(low=2000, high=2010)
imdb_id = Sequence(lambda n: '%s' % n)
rating = 5.0
seen = False
indexed = False
class RegexRenamingFactory(DjangoModelFactory):
class Meta:
model = RegexRenaming
old = ''
new = ''
|
[
"factory.fuzzy.FuzzyInteger",
"factory.SubFactory",
"factory.fuzzy.FuzzyChoice",
"factory.Sequence",
"os.path.join"
] |
[((367, 410), 'factory.fuzzy.FuzzyChoice', 'FuzzyChoice', (["['Serie1', 'Serie2', 'Serie3']"], {}), "(['Serie1', 'Serie2', 'Serie3'])\n", (378, 410), False, 'from factory.fuzzy import FuzzyInteger, FuzzyChoice\n'), ((559, 587), 'factory.fuzzy.FuzzyInteger', 'FuzzyInteger', ([], {'low': '(1)', 'high': '(10)'}), '(low=1, high=10)\n', (571, 587), False, 'from factory.fuzzy import FuzzyInteger, FuzzyChoice\n'), ((600, 624), 'factory.SubFactory', 'SubFactory', (['SerieFactory'], {}), '(SerieFactory)\n', (610, 624), False, 'from factory import DjangoModelFactory, Sequence, SubFactory\n'), ((776, 804), 'factory.fuzzy.FuzzyInteger', 'FuzzyInteger', ([], {'low': '(1)', 'high': '(24)'}), '(low=1, high=24)\n', (788, 804), False, 'from factory.fuzzy import FuzzyInteger, FuzzyChoice\n'), ((818, 843), 'factory.SubFactory', 'SubFactory', (['SeasonFactory'], {}), '(SeasonFactory)\n', (828, 843), False, 'from factory import DjangoModelFactory, Sequence, SubFactory\n'), ((958, 990), 'factory.fuzzy.FuzzyInteger', 'FuzzyInteger', ([], {'low': '(100)', 'high': '(1000)'}), '(low=100, high=1000)\n', (970, 990), False, 'from factory.fuzzy import FuzzyInteger, FuzzyChoice\n'), ((1231, 1263), 'factory.fuzzy.FuzzyInteger', 'FuzzyInteger', ([], {'low': '(100)', 'high': '(1000)'}), '(low=100, high=1000)\n', (1243, 1263), False, 'from factory.fuzzy import FuzzyInteger, FuzzyChoice\n'), ((1276, 1319), 'factory.fuzzy.FuzzyChoice', 'FuzzyChoice', (["['Movie1', 'Movie2', 'Movie3']"], {}), "(['Movie1', 'Movie2', 'Movie3'])\n", (1287, 1319), False, 'from factory.fuzzy import FuzzyInteger, FuzzyChoice\n'), ((1331, 1364), 'factory.fuzzy.FuzzyInteger', 'FuzzyInteger', ([], {'low': '(2000)', 'high': '(2010)'}), '(low=2000, high=2010)\n', (1343, 1364), False, 'from factory.fuzzy import FuzzyInteger, FuzzyChoice\n'), ((1379, 1407), 'factory.Sequence', 'Sequence', (["(lambda n: '%s' % n)"], {}), "(lambda n: '%s' % n)\n", (1387, 1407), False, 'from factory import DjangoModelFactory, Sequence, SubFactory\n'), ((879, 940), 'os.path.join', 'os.path.join', (['settings.DESTINATION_FOLDER', "('serie_%s.mkv' % n)"], {}), "(settings.DESTINATION_FOLDER, 'serie_%s.mkv' % n)\n", (891, 940), False, 'import os\n'), ((1152, 1213), 'os.path.join', 'os.path.join', (['settings.DESTINATION_FOLDER', "('movie_%s.mkv' % n)"], {}), "(settings.DESTINATION_FOLDER, 'movie_%s.mkv' % n)\n", (1164, 1213), False, 'import os\n')]
|
# Form / Message Bar
# Use message bars to indicate relevant status information.
# #form #message_bar
# ---
from h2o_wave import site, ui
page = site['/demo']
page['example'] = ui.form_card(
box='1 1 4 -1',
items=[
ui.message_bar(type='blocked', text='This action is blocked.'),
ui.message_bar(type='error', text='This is an error message'),
ui.message_bar(type='warning', text='This is a warning message.'),
ui.message_bar(type='info', text='This is an information message.'),
ui.message_bar(type='success', text='This is an success message.'),
ui.message_bar(type='danger', text='This is a danger message.'),
]
)
page.save()
|
[
"h2o_wave.ui.message_bar"
] |
[((233, 295), 'h2o_wave.ui.message_bar', 'ui.message_bar', ([], {'type': '"""blocked"""', 'text': '"""This action is blocked."""'}), "(type='blocked', text='This action is blocked.')\n", (247, 295), False, 'from h2o_wave import site, ui\n'), ((305, 366), 'h2o_wave.ui.message_bar', 'ui.message_bar', ([], {'type': '"""error"""', 'text': '"""This is an error message"""'}), "(type='error', text='This is an error message')\n", (319, 366), False, 'from h2o_wave import site, ui\n'), ((376, 441), 'h2o_wave.ui.message_bar', 'ui.message_bar', ([], {'type': '"""warning"""', 'text': '"""This is a warning message."""'}), "(type='warning', text='This is a warning message.')\n", (390, 441), False, 'from h2o_wave import site, ui\n'), ((451, 518), 'h2o_wave.ui.message_bar', 'ui.message_bar', ([], {'type': '"""info"""', 'text': '"""This is an information message."""'}), "(type='info', text='This is an information message.')\n", (465, 518), False, 'from h2o_wave import site, ui\n'), ((528, 594), 'h2o_wave.ui.message_bar', 'ui.message_bar', ([], {'type': '"""success"""', 'text': '"""This is an success message."""'}), "(type='success', text='This is an success message.')\n", (542, 594), False, 'from h2o_wave import site, ui\n'), ((604, 667), 'h2o_wave.ui.message_bar', 'ui.message_bar', ([], {'type': '"""danger"""', 'text': '"""This is a danger message."""'}), "(type='danger', text='This is a danger message.')\n", (618, 667), False, 'from h2o_wave import site, ui\n')]
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import division
from future.utils import iteritems, itervalues
import abc
from future.utils import with_metaclass
import weakref
from ngraph.util.names import NameableValue
from ngraph.op_graph.op_graph import AssignableTensorOp, TensorValueOp
from ngraph.transformers.base import Transformer
from ngraph.transformers.base import DeviceTensor as BaseDeviceTensorView
from ngraph.transformers.base import Computation as BaseDeviceComputation
from ngraph.transformers.exop import ExecutionState
from ngraph.transformers.passes.exopdelegate import ExOpGraphOpAccessor
from ngraph.util.trace_events import TraceEventTracker
class DeviceComputation(BaseDeviceComputation):
"""
A callable that can run computations on the device.
"""
def __init__(self, transformer, computation_op, **kwargs):
super(DeviceComputation, self).__init__(transformer, computation_op, **kwargs)
def generate_profile(self, profiler_start, profiler_stop):
tracker = TraceEventTracker(self.computation_op.name)
start = iter(profiler_start)
stop = iter(profiler_stop)
for exop in self.computation_decl.exop_block:
start_time = next(start) * 1e6
duration = (next(stop) * 1e6) - start_time
args = {}
count = 0
for input_decl in exop.input_decls:
args["input{}".format(count)] = input_decl.source_output_decl.exop.name
count += 1
args['name'] = exop.name
tracker.add_operation("ExOp", exop.op.short_name, 0, 0, start_time, duration, args)
tracker.serialize_to_file()
class DeviceBuffer(NameableValue):
def __init__(self, transformer, buffer, **kwargs):
super(DeviceBuffer, self).__init__(name=buffer.buffer_name, **kwargs)
self.transformer = transformer
self.device_computation = transformer.device_computation
self.size = buffer.size
self.device_tensors = dict()
def device_tensor(self, tensor_decl, offset=0):
"""
Get a device tensor based at offset.
Args:
tensor_decl: A TensorDecl.
offset: Byte offset to this buffer.
Returns:
The device tensor.
"""
if tensor_decl.is_compile_only:
raise ValueError("Allocating compile-only tensor")
device_tensor = self.device_tensors.get(tensor_decl.buffer_key, None)
if device_tensor is None:
device_tensor = self.transformer.make_device_tensor(self.device_computation,
tensor_decl)
self.device_tensors[tensor_decl.buffer_key] = device_tensor
return device_tensor
def codegen(self):
pass
class DeviceTensor(with_metaclass(abc.ABCMeta, object)):
"""
Something that can provide storage.
Arguments:
transformer: The transformer associated with this device buffer.
computation: The computation.
tensor_decl: The associated TensorDecl.
Attributes:
transformer: The transformer associated with this device buffer.
bytes: Size of storage.
dtype: Alignment of storage.
views: All direct tensor views of this buffer.
"""
def __init__(self, transformer, device_computation, tensor_decl, **kwargs):
super(DeviceTensor, self).__init__(**kwargs)
self.transformer = transformer
self.name = tensor_decl.variable_name
if tensor_decl.is_compile_only:
raise ValueError("Storage allocation for compile-only tensor")
self.tensor_decl = tensor_decl
self.device_computation = device_computation
self.__views = weakref.WeakValueDictionary()
@property
def is_persistent(self):
return self.tensor_decl.is_persistent
@property
def size(self):
return self.tensor_decl.size
@property
def buffer_pool_offset(self):
return self.tensor_decl.buffer_pool_offset
@property
def element_type(self):
return self.tensor_decl.element_type
@property
def views(self):
"""
Returns: Iterator over views of the buffer
"""
return self.__views.values()
def transform_allocate(self):
"""
Generate allocation code.
"""
self.transform_allocate_views()
def transform_allocate_views(self):
"""Generate code for allocating views"""
for view in self.views:
view.transform_allocate()
def device_tensor_view(self, tensor_view_decl):
"""
Returns a DeviceTensor for tensor_view_decl.
Arguments:
tensor_view_decl: The view of the tensor.
Returns: A DeviceTensor.
"""
tensor_view = self.__views.get(tensor_view_decl.key, None)
if tensor_view is not None:
return tensor_view
tensor_view = self.make_device_tensor_view(tensor_view_decl)
self.__views[tensor_view_decl.key] = tensor_view
return tensor_view
@abc.abstractmethod
def make_device_tensor_view(self, tensor_view_decl):
"""
Creates a DeviceTensorView for tensor_view_decl.
Arguments:
tensor_view_decl: The view of the tensor.
Returns: A DeviceTensorView.
"""
class DeviceTensorView(BaseDeviceTensorView):
"""
Extends DeviceBuffer with exop behavior.
Arguments:
device_tensor:
The device tensor for associated with this view.
tensor_view_decl:
The description of the tensor view to create.
"""
def __init__(self, device_tensor, tensor_view_decl, **kwargs):
super(DeviceTensorView, self).__init__(device_tensor.transformer,
device_tensor,
tensor_view_decl.tensor_description,
**kwargs)
self.name = tensor_view_decl.name
self.device_tensor = device_tensor
self.tensor_view_decl = tensor_view_decl
def transform_allocate(self):
raise ValueError("Deprecated API")
class ExecutionGraphTransformer(Transformer):
def __init__(self, **kwargs):
super(ExecutionGraphTransformer, self).__init__(**kwargs)
self.execution_state = ExecutionState(self)
self.device_buffers = dict()
self.device_tensors = dict()
self.device_tensor_views = dict()
self.device_computations = dict()
self.device_initializations = dict()
@property
def use_exop(self):
"""
Returns: True if this transformer uses the execution graph.
"""
return True
def run_registered_graph_passes(self, computation_decl, **kwargs):
op_accessor = ExOpGraphOpAccessor()
for graph_pass in self.graph_passes:
graph_pass.wrapped_do_pass(op_accessor=op_accessor,
computation_decl=computation_decl,
**kwargs)
@abc.abstractmethod
def make_device_tensor(self, computation, tensor_decl):
"""
Make a DeviceTensor.
Arguments:
computation:
tensor_decl: An TensorDecl.
returns: A DeviceTensor.
"""
def initialize_allocations(self):
"""
Inititializes allocation caches.
"""
raise ValueError()
def get_op_tensor_view(self, op):
"""
Returns the tensor view for this op.
Args:
op: A computation graph op.
Returns:
A device tensor view.
"""
if isinstance(op, AssignableTensorOp):
tensor_decl = self.execution_state.get_op_tensor(op)
return self.device_tensor_view(tensor_decl.values[0].tensor_view_decl)
else:
raise ValueError()
def get_tensor_view_value(self, op, host_tensor=None):
"""
Returns the contents of the tensor view for op.
Args:
op: The computation graph op.
host_tensor: Optional tensor to copy value into.
Returns:
A NumPy tensor with the elements associated with op.
"""
return self.get_op_tensor_view(op).get(host_tensor)
def load_computation(self, computation_decl):
"""
Load a computation and associated storage into the current execution state.
Args:
computation_decl: A ComputationDecl for the computation.
Returns:
An executable for the computation.
"""
self.device_computation = computation_decl.device_computation
exop_block = computation_decl.exop_block
self.start_allocate_computation(computation_decl)
for input_decl in itervalues(computation_decl.op_returns):
self.device_tensor_view(input_decl.tensor_view_decl)
for exop in exop_block:
for input_decl in exop.input_decls:
self.device_tensor_view(input_decl.tensor_view_decl)
for input_decl in exop.write_args:
self.device_tensor_view(input_decl.tensor_view_decl)
for output_decl in exop.output_decls:
self.device_tensor_view(output_decl.tensor_view_decl)
# Make sure we have values for ops that got optimized out
for input_decl in computation_decl.returns.input_decls:
output_decl = input_decl.source_output_decl
if isinstance(output_decl.exop.op, TensorValueOp):
tensor_decl = exop.computation_decl.get_tensor_decl(
op=output_decl.exop.op.value_tensor)
self.device_tensor_view(
tensor_decl.get_tensor_view(output_decl.exop.op.tensor_description()))
else:
self.device_tensor_view(output_decl.tensor_view_decl)
for param in computation_decl.computation_op.parameters:
tensor_decl = computation_decl.get_tensor_decl(op=param.tensor)
self.device_tensor_view(tensor_decl.root_tensor_view_decl)
self.finish_allocate_computation(computation_decl)
self.start_define_computation(computation_decl)
for exop in exop_block:
self.generate_exop(exop)
self.finish_define_computation(computation_decl)
executor = self.finish_load_computation(computation_decl)
self.run_device_tensor_initializations()
return executor
def start_allocate_computation(self, computation):
pass
def finish_allocate_computation(self, computation):
pass
def start_define_computation(self, computation):
pass
def finish_define_computation(self, computation):
pass
def finish_load_computation(self, computation):
pass
def make_device_buffer(self, buffer):
return DeviceBuffer(self, buffer)
def device_buffer(self, exop_buffer):
"""
Return the storage associated with buffer, creating if necessary.
Args:
exop_buffer:
Returns:
"""
device_buffer = self.device_buffers.get(exop_buffer, None)
if device_buffer is None:
device_buffer = self.make_device_buffer(exop_buffer)
self.device_buffers[exop_buffer] = device_buffer
device_buffer.codegen()
return device_buffer
def device_tensor_from_tensor_decl(self, tensor_decl):
"""
Returns the device tensor, creating if necessary.
Args:
tensor_decl:
Returns:
"""
device_tensor = self.device_tensors.get(tensor_decl, None)
if device_tensor is None:
# buffer = tensor.buffer
buffer = tensor_decl
device_buffer = self.device_buffer(buffer)
device_tensor = device_buffer.device_tensor(tensor_decl)
self.device_tensors[tensor_decl] = device_tensor
device_tensor.codegen()
return device_tensor
def device_tensor_view(self, tensor_view_decl):
"""
Returns the device_tensor, creating if necessary.
Args:
tensor_view_decl: The tensor view.
Returns:
"""
if tensor_view_decl.tensor_decl.is_compile_only:
return None
device_tensor_view = self.device_tensor_views.get(tensor_view_decl, None)
if device_tensor_view is None:
tensor_decl = tensor_view_decl.tensor_decl
device_tensor = self.device_tensor_from_tensor_decl(tensor_decl)
device_tensor_view = device_tensor.device_tensor_view(tensor_view_decl)
self.device_tensor_views[tensor_view_decl] = device_tensor_view
device_tensor_view.codegen()
if tensor_decl.initial_value is not None \
or tensor_decl.is_persistent \
or tensor_decl.is_input:
init_device_tensor_view = self.device_tensor_view(
tensor_decl.root_tensor_view_decl)
if tensor_decl.initial_value is not None:
self.add_device_tensor_initialization(init_device_tensor_view,
tensor_decl.initial_value)
return device_tensor_view
def add_device_tensor_initialization(self, device_tensor_view, host_tensor):
self.device_initializations[device_tensor_view] = host_tensor
def run_device_tensor_initializations(self):
for device_tensor_view, host_tensor in iteritems(self.device_initializations):
device_tensor_view[()] = host_tensor
self.device_initializations = dict()
def host_to_device(self, device_computation, parameters, args):
computation_decl = device_computation.computation_decl
for op, arg in zip(parameters, args):
tensor_decl = computation_decl.get_tensor_decl(op=op.tensor)
device_tensor = self.device_tensor_view(tensor_decl.root_tensor_view_decl)
device_tensor[()] = arg
def device_to_host(self, device_computation, op, tensor=None):
computation_decl = device_computation.computation_decl
if isinstance(op, AssignableTensorOp):
tensor_decl = computation_decl.get_tensor_decl(op=op)
device_tensor = self.device_tensor_view(tensor_decl.root_tensor_view_decl)
else:
tensor_view = computation_decl.op_returns[op.tensor].tensor_view_decl
device_tensor = self.device_tensor_view(tensor_view)
return device_tensor.get(tensor)
computation_count = 0
def add_computation(self, computation_op):
"""
Adds a computation to the transformer.
Arguments:
computation_op: A computation Op.
Returns:
Callable.
"""
device_computation = self.device_computations.get(computation_op, None)
if device_computation is not None:
return device_computation
execution_graph = self.execution_state.make_execution_graph(computation_op)
computation_decl = execution_graph.computation_decl
self.run_registered_graph_passes(computation_decl=computation_decl)
ExecutionGraphTransformer.computation_count += 1
device_computation = self.make_computation(computation_op)
computation_decl.device_computation = device_computation
device_computation.computation_decl = computation_decl
self.device_computations[computation_op] = device_computation
device_computation.executor = self.load_computation(computation_decl)
return device_computation
|
[
"ngraph.transformers.exop.ExecutionState",
"future.utils.itervalues",
"ngraph.transformers.passes.exopdelegate.ExOpGraphOpAccessor",
"ngraph.util.trace_events.TraceEventTracker",
"future.utils.with_metaclass",
"future.utils.iteritems",
"weakref.WeakValueDictionary"
] |
[((3541, 3576), 'future.utils.with_metaclass', 'with_metaclass', (['abc.ABCMeta', 'object'], {}), '(abc.ABCMeta, object)\n', (3555, 3576), False, 'from future.utils import with_metaclass\n'), ((1740, 1783), 'ngraph.util.trace_events.TraceEventTracker', 'TraceEventTracker', (['self.computation_op.name'], {}), '(self.computation_op.name)\n', (1757, 1783), False, 'from ngraph.util.trace_events import TraceEventTracker\n'), ((4474, 4503), 'weakref.WeakValueDictionary', 'weakref.WeakValueDictionary', ([], {}), '()\n', (4501, 4503), False, 'import weakref\n'), ((7125, 7145), 'ngraph.transformers.exop.ExecutionState', 'ExecutionState', (['self'], {}), '(self)\n', (7139, 7145), False, 'from ngraph.transformers.exop import ExecutionState\n'), ((7596, 7617), 'ngraph.transformers.passes.exopdelegate.ExOpGraphOpAccessor', 'ExOpGraphOpAccessor', ([], {}), '()\n', (7615, 7617), False, 'from ngraph.transformers.passes.exopdelegate import ExOpGraphOpAccessor\n'), ((9609, 9648), 'future.utils.itervalues', 'itervalues', (['computation_decl.op_returns'], {}), '(computation_decl.op_returns)\n', (9619, 9648), False, 'from future.utils import iteritems, itervalues\n'), ((14360, 14398), 'future.utils.iteritems', 'iteritems', (['self.device_initializations'], {}), '(self.device_initializations)\n', (14369, 14398), False, 'from future.utils import iteritems, itervalues\n')]
|
from mars_profiling.report.presentation.core import FrequencyTable
from mars_profiling.report.presentation.flavours.html import templates
class HTMLFrequencyTable(FrequencyTable):
def render(self):
return templates.template("frequency_table.html").render(**self.content)
|
[
"mars_profiling.report.presentation.flavours.html.templates.template"
] |
[((219, 261), 'mars_profiling.report.presentation.flavours.html.templates.template', 'templates.template', (['"""frequency_table.html"""'], {}), "('frequency_table.html')\n", (237, 261), False, 'from mars_profiling.report.presentation.flavours.html import templates\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 1 15:45:38 2022
@author: erri
"""
import numpy as np
import os
run = 'q07_1'
DoD_name = 'DoD_s1-s0_filt_nozero_rst.txt'
home_dir = os.getcwd()
DoDs_dir = os.path.join(home_dir, 'DoDs')
DoD_path = os.path.join(DoDs_dir, 'DoD_' + run, DoD_name)
DoD = np.loadtxt(DoD_path, delimiter='\t')
array = np.where(DoD==-999, np.nan, DoD)
def morph_quantities(array):
import numpy as np
'''
This function ...
Input:
array: 2D numpy array
2D array with np.nans insted of -999.
'''
# Define total volume matrix, Deposition matrix and Scour matrix
vol_array = np.where(np.isnan(array), 0, array) # Total volume matrix
dep_array = (vol_array>0)*vol_array # DoD of only deposition data
sco_array = (vol_array<0)*vol_array # DoD of only scour data
# Volume are calculated as the sum of the cell value. The measure unit is a length.
# To obtain a volume, the _vol value has to be multiply by the cell dimension.
tot_vol = np.sum(vol_array) # Total net volume as the algebric sum of all the cells [L]
sum_vol = np.sum(np.abs(vol_array)) # Sum of scour and deposition volume as algebric sum of the abs of each cell [l]
dep_vol = np.sum(dep_array) # Deposition volume as the sum of the value of deposition cell [L]
sco_vol = np.sum(sco_array) # Scour volume as the sum of the value of scour cell [L]
# Define nature array as -1=sco, 0=no_changes, and 1=dep
nature_array = np.where(array>0, 1, array)
nature_array = np.where(nature_array<0, -1, nature_array)
# Define activity array: VERIFIED
tot_act_array = np.where(np.isnan(nature_array), 0, nature_array) # Where active then 1
dep_act_array = tot_act_array*(tot_act_array>0) # Where scour then 1
sco_act_array = tot_act_array*(tot_act_array<0) # Where scour then 1
# Calculate morphological quantities VERIFIED
# Active area array is calculated as the number of active cell. To obtain a width the number of cell has to be multiply by the crosswise length of the generic cell
morph_act_area = np.count_nonzero(abs(tot_act_array)) # Active area both in terms of scour and deposition in number of cells [-]
morph_act_area_dep = np.sum(dep_act_array) # Active deposition area in number of cells [-]
morph_act_area_sco = np.sum(abs(sco_act_array)) # Active scour area in number of cells [-]
# Create active width for each cross section
act_width_array = np.array([np.nansum(abs(tot_act_array), axis=0)]) # Array of the crosswise morphological total active width in number of cells
act_width_array_dep = np.array([np.nansum(dep_act_array, axis=0)]) # Array of the crosswise morphological deposition active width in number of cells
act_width_array_sco = np.array([np.nansum(abs(sco_act_array), axis=0)]) # Array of the crosswise morphological scour active width in number of cells
# Calculate the mean of each active width array: VERIFIED
act_width_mean = np.nanmean(act_width_array) # Total mean active width in number of cells (could be a real number)
act_width_mean_dep = np.nanmean(act_width_array_dep) # Deposition mean active width in number of cells (could be a real number)
act_width_mean_sco = np.nanmean(act_width_array_sco) # Scour mean active width in number of cells (could be a real number)
# Calculate active thickness for total volumes, deposition volumes and scour volumes VERIFIED
vol_array=np.where(vol_array==0, np.nan, vol_array)
dep_array=np.where(dep_array==0, np.nan, dep_array)
sco_array=np.where(sco_array==0, np.nan, sco_array)
act_thickness = np.nanmean(np.abs(dep_array)) + np.nanmean(np.abs(sco_array)) # Active thickness as the average of scour and deposition active thickness
act_thickness_dep = np.nanmean(np.abs(dep_array)) # Deposition active thickness (abs(V_sco) + V_dep)/act_area [mm]
act_thickness_sco = np.nanmean(np.abs(sco_array)) # Scour active thickness (abs(V_sco) + V_dep)/act_area [mm]
# Calculate the Bed Relief Index
bri = np.nanstd(array)
return tot_vol, sum_vol, dep_vol, sco_vol, morph_act_area, morph_act_area_dep, morph_act_area_sco, act_width_mean, act_width_mean_dep, act_width_mean_sco, act_thickness, act_thickness_dep, act_thickness_sco, bri
tot_vol, sum_vol, dep_vol, sco_vol, morph_act_area, morph_act_area_dep, morph_act_area_sco, act_width_mean, act_width_mean_dep, act_width_mean_sco, act_thickness, act_thickness_dep, act_thickness_sco, bri = morph_quantities(array)
|
[
"numpy.nansum",
"numpy.sum",
"numpy.abs",
"os.getcwd",
"numpy.nanstd",
"numpy.isnan",
"numpy.where",
"numpy.loadtxt",
"os.path.join",
"numpy.nanmean"
] |
[((204, 215), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (213, 215), False, 'import os\n'), ((227, 257), 'os.path.join', 'os.path.join', (['home_dir', '"""DoDs"""'], {}), "(home_dir, 'DoDs')\n", (239, 257), False, 'import os\n'), ((269, 315), 'os.path.join', 'os.path.join', (['DoDs_dir', "('DoD_' + run)", 'DoD_name'], {}), "(DoDs_dir, 'DoD_' + run, DoD_name)\n", (281, 315), False, 'import os\n'), ((322, 358), 'numpy.loadtxt', 'np.loadtxt', (['DoD_path'], {'delimiter': '"""\t"""'}), "(DoD_path, delimiter='\\t')\n", (332, 358), True, 'import numpy as np\n'), ((368, 402), 'numpy.where', 'np.where', (['(DoD == -999)', 'np.nan', 'DoD'], {}), '(DoD == -999, np.nan, DoD)\n', (376, 402), True, 'import numpy as np\n'), ((1063, 1080), 'numpy.sum', 'np.sum', (['vol_array'], {}), '(vol_array)\n', (1069, 1080), True, 'import numpy as np\n'), ((1276, 1293), 'numpy.sum', 'np.sum', (['dep_array'], {}), '(dep_array)\n', (1282, 1293), True, 'import numpy as np\n'), ((1376, 1393), 'numpy.sum', 'np.sum', (['sco_array'], {}), '(sco_array)\n', (1382, 1393), True, 'import numpy as np\n'), ((1536, 1565), 'numpy.where', 'np.where', (['(array > 0)', '(1)', 'array'], {}), '(array > 0, 1, array)\n', (1544, 1565), True, 'import numpy as np\n'), ((1583, 1627), 'numpy.where', 'np.where', (['(nature_array < 0)', '(-1)', 'nature_array'], {}), '(nature_array < 0, -1, nature_array)\n', (1591, 1627), True, 'import numpy as np\n'), ((2288, 2309), 'numpy.sum', 'np.sum', (['dep_act_array'], {}), '(dep_act_array)\n', (2294, 2309), True, 'import numpy as np\n'), ((3050, 3077), 'numpy.nanmean', 'np.nanmean', (['act_width_array'], {}), '(act_width_array)\n', (3060, 3077), True, 'import numpy as np\n'), ((3173, 3204), 'numpy.nanmean', 'np.nanmean', (['act_width_array_dep'], {}), '(act_width_array_dep)\n', (3183, 3204), True, 'import numpy as np\n'), ((3305, 3336), 'numpy.nanmean', 'np.nanmean', (['act_width_array_sco'], {}), '(act_width_array_sco)\n', (3315, 3336), True, 'import numpy as np\n'), ((3524, 3567), 'numpy.where', 'np.where', (['(vol_array == 0)', 'np.nan', 'vol_array'], {}), '(vol_array == 0, np.nan, vol_array)\n', (3532, 3567), True, 'import numpy as np\n'), ((3580, 3623), 'numpy.where', 'np.where', (['(dep_array == 0)', 'np.nan', 'dep_array'], {}), '(dep_array == 0, np.nan, dep_array)\n', (3588, 3623), True, 'import numpy as np\n'), ((3636, 3679), 'numpy.where', 'np.where', (['(sco_array == 0)', 'np.nan', 'sco_array'], {}), '(sco_array == 0, np.nan, sco_array)\n', (3644, 3679), True, 'import numpy as np\n'), ((4120, 4136), 'numpy.nanstd', 'np.nanstd', (['array'], {}), '(array)\n', (4129, 4136), True, 'import numpy as np\n'), ((689, 704), 'numpy.isnan', 'np.isnan', (['array'], {}), '(array)\n', (697, 704), True, 'import numpy as np\n'), ((1162, 1179), 'numpy.abs', 'np.abs', (['vol_array'], {}), '(vol_array)\n', (1168, 1179), True, 'import numpy as np\n'), ((1698, 1720), 'numpy.isnan', 'np.isnan', (['nature_array'], {}), '(nature_array)\n', (1706, 1720), True, 'import numpy as np\n'), ((3870, 3887), 'numpy.abs', 'np.abs', (['dep_array'], {}), '(dep_array)\n', (3876, 3887), True, 'import numpy as np\n'), ((3989, 4006), 'numpy.abs', 'np.abs', (['sco_array'], {}), '(sco_array)\n', (3995, 4006), True, 'import numpy as np\n'), ((2692, 2724), 'numpy.nansum', 'np.nansum', (['dep_act_array'], {'axis': '(0)'}), '(dep_act_array, axis=0)\n', (2701, 2724), True, 'import numpy as np\n'), ((3709, 3726), 'numpy.abs', 'np.abs', (['dep_array'], {}), '(dep_array)\n', (3715, 3726), True, 'import numpy as np\n'), ((3741, 3758), 'numpy.abs', 'np.abs', (['sco_array'], {}), '(sco_array)\n', (3747, 3758), True, 'import numpy as np\n')]
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author <NAME> (DreamHost)
import sys
import mock
import unittest2 as unittest
from quantum.db import migration
from quantum.db.migration import cli
class TestDbMigration(unittest.TestCase):
def test_should_run_plugin_in_list(self):
self.assertTrue(migration.should_run('foo', ['foo', 'bar']))
self.assertFalse(migration.should_run('foo', ['bar']))
def test_should_run_plugin_wildcard(self):
self.assertTrue(migration.should_run('foo', ['*']))
class TestCli(unittest.TestCase):
def setUp(self):
self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command')
self.do_alembic_cmd = self.do_alembic_cmd_p.start()
def tearDown(self):
self.do_alembic_cmd_p.stop()
cli.CONF.reset()
def _main_test_helper(self, argv, func_name, exp_args=(), exp_kwargs={}):
with mock.patch.object(sys, 'argv', argv):
cli.main()
self.do_alembic_cmd.assert_has_calls(
[mock.call(mock.ANY, func_name, *exp_args, **exp_kwargs)]
)
def test_stamp(self):
self._main_test_helper(
['prog', 'stamp', 'foo'],
'stamp',
('foo',),
{'sql': False}
)
self._main_test_helper(
['prog', 'stamp', 'foo', '--sql'],
'stamp',
('foo',),
{'sql': True}
)
def test_current(self):
self._main_test_helper(['prog', 'current'], 'current')
def test_history(self):
self._main_test_helper(['prog', 'history'], 'history')
def test_check_migration(self):
self._main_test_helper(['prog', 'check_migration'], 'branches')
def test_database_sync_revision(self):
self._main_test_helper(
['prog', 'revision', '--autogenerate', '-m', 'message'],
'revision',
(),
{'message': 'message', 'sql': False, 'autogenerate': True}
)
self._main_test_helper(
['prog', 'revision', '--sql', '-m', 'message'],
'revision',
(),
{'message': 'message', 'sql': True, 'autogenerate': False}
)
def test_upgrade(self):
self._main_test_helper(
['prog', 'upgrade', '--sql', 'head'],
'upgrade',
('head',),
{'sql': True}
)
self._main_test_helper(
['prog', 'upgrade', '--delta', '3'],
'upgrade',
('+3',),
{'sql': False}
)
def test_downgrade(self):
self._main_test_helper(
['prog', 'downgrade', '--sql', 'folsom'],
'downgrade',
('folsom',),
{'sql': True}
)
self._main_test_helper(
['prog', 'downgrade', '--delta', '2'],
'downgrade',
('-2',),
{'sql': False}
)
|
[
"quantum.db.migration.should_run",
"mock.patch.object",
"quantum.db.migration.cli.main",
"mock.call",
"quantum.db.migration.cli.CONF.reset"
] |
[((1269, 1313), 'mock.patch.object', 'mock.patch.object', (['cli', '"""do_alembic_command"""'], {}), "(cli, 'do_alembic_command')\n", (1286, 1313), False, 'import mock\n'), ((1444, 1460), 'quantum.db.migration.cli.CONF.reset', 'cli.CONF.reset', ([], {}), '()\n', (1458, 1460), False, 'from quantum.db.migration import cli\n'), ((964, 1007), 'quantum.db.migration.should_run', 'migration.should_run', (['"""foo"""', "['foo', 'bar']"], {}), "('foo', ['foo', 'bar'])\n", (984, 1007), False, 'from quantum.db import migration\n'), ((1034, 1070), 'quantum.db.migration.should_run', 'migration.should_run', (['"""foo"""', "['bar']"], {}), "('foo', ['bar'])\n", (1054, 1070), False, 'from quantum.db import migration\n'), ((1144, 1178), 'quantum.db.migration.should_run', 'migration.should_run', (['"""foo"""', "['*']"], {}), "('foo', ['*'])\n", (1164, 1178), False, 'from quantum.db import migration\n'), ((1553, 1589), 'mock.patch.object', 'mock.patch.object', (['sys', '"""argv"""', 'argv'], {}), "(sys, 'argv', argv)\n", (1570, 1589), False, 'import mock\n'), ((1603, 1613), 'quantum.db.migration.cli.main', 'cli.main', ([], {}), '()\n', (1611, 1613), False, 'from quantum.db.migration import cli\n'), ((1681, 1736), 'mock.call', 'mock.call', (['mock.ANY', 'func_name', '*exp_args'], {}), '(mock.ANY, func_name, *exp_args, **exp_kwargs)\n', (1690, 1736), False, 'import mock\n')]
|
# pylint: disable=no-self-use,unused-variable,expression-not-assigned
from unittest.mock import Mock, patch
import log
import pytest
from expecter import expect
from gitman import cli
from gitman.common import _Config
from gitman.exceptions import ScriptFailure, UncommittedChanges
class TestMain:
"""Unit tests for the top-level arguments."""
def test_main(self):
"""Verify the top-level command can be run."""
mock_function = Mock(return_value=True)
cli.main([], mock_function)
mock_function.assert_called_once_with()
def test_main_fail(self):
"""Verify error in commands are detected."""
with pytest.raises(SystemExit):
cli.main([], Mock(return_value=False))
def test_main_help(self):
"""Verify the help text can be displayed."""
with pytest.raises(SystemExit):
cli.main(["--help"])
def test_main_none(self):
"""Verify it's an error to specify no command."""
with pytest.raises(SystemExit):
cli.main([])
def test_main_interrupt(self):
"""Verify a command can be interrupted."""
with pytest.raises(SystemExit):
cli.main([], Mock(side_effect=KeyboardInterrupt))
def test_main_error(self):
"""Verify runtime errors are handled."""
with pytest.raises(SystemExit):
cli.main([], Mock(side_effect=UncommittedChanges))
with pytest.raises(SystemExit):
cli.main([], Mock(side_effect=ScriptFailure))
class TestInit:
"""Unit tests for the `init` command."""
@patch("gitman.commands.init")
def test_install(self, mock_init):
"""Verify the 'install' command can be run."""
cli.main(["init"])
mock_init.assert_called_once_with(force=False)
class TestInstall:
"""Unit tests for the `install` command."""
@patch("gitman.commands.install")
def test_install(self, mock_install):
"""Verify the 'install' command can be run."""
cli.main(["install"])
mock_install.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=False,
fetch=False,
clean=False,
skip_changes=False,
skip_default_group=False,
)
@patch("gitman.commands.install")
def test_install_root(self, mock_install):
"""Verify the project's root can be specified."""
cli.main(["install", "--root", "mock/path/to/root"])
mock_install.assert_called_once_with(
root="mock/path/to/root",
depth=5,
force=False,
force_interactive=False,
fetch=False,
clean=False,
skip_changes=False,
skip_default_group=False,
)
@patch("gitman.commands.install")
def test_install_force(self, mock_install):
"""Verify dependencies can be force-installed."""
cli.main(["install", "--force"])
mock_install.assert_called_once_with(
root=None,
depth=5,
force=True,
force_interactive=False,
fetch=False,
clean=False,
skip_changes=False,
skip_default_group=False,
)
@patch("gitman.commands.install")
def test_install_fetch(self, mock_install):
"""Verify fetching can be enabled."""
cli.main(["install", "--fetch"])
mock_install.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=False,
fetch=True,
clean=False,
skip_changes=False,
skip_default_group=False,
)
@patch("gitman.commands.install")
def test_install_clean(self, mock_install):
"""Verify dependency cleaning can be enabled."""
cli.main(["install", "--clean"])
mock_install.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=False,
fetch=False,
clean=True,
skip_changes=False,
skip_default_group=False,
)
@patch("gitman.commands.install")
def test_install_specific_sources(self, mock_install):
"""Verify individual dependencies can be installed."""
cli.main(["install", "foo", "bar"])
mock_install.assert_called_once_with(
"foo",
"bar",
root=None,
depth=5,
force=False,
force_interactive=False,
fetch=False,
clean=False,
skip_changes=False,
skip_default_group=False,
)
@patch("gitman.commands.install")
def test_install_with_depth(self, mock_update):
"""Verify the 'install' command can be limited by depth."""
cli.main(["install", "--depth", "10"])
mock_update.assert_called_once_with(
root=None,
depth=10,
force=False,
force_interactive=False,
fetch=False,
clean=False,
skip_changes=False,
skip_default_group=False,
)
@patch("gitman.commands.install", Mock())
def test_install_with_depth_invalid(self):
"""Verify depths below 1 are rejected."""
with pytest.raises(SystemExit):
cli.main(["install", "--depth", "0"])
with pytest.raises(SystemExit):
cli.main(["install", "--depth", "-1"])
class TestUpdate:
"""Unit tests for the `update` command."""
@patch("gitman.commands.update")
def test_update(self, mock_update):
"""Verify the 'update' command can be run."""
cli.main(["update"])
mock_update.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=False,
clean=False,
recurse=False,
lock=None,
skip_changes=False,
)
@patch("gitman.commands.update")
def test_update_recursive(self, mock_update):
"""Verify the 'update' command can be run recursively."""
cli.main(["update", "--all"])
mock_update.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=False,
clean=False,
recurse=True,
lock=None,
skip_changes=False,
)
@patch("gitman.commands.update")
def test_update_no_lock(self, mock_update):
"""Verify the 'update' command can disable locking."""
cli.main(["update", "--skip-lock"])
mock_update.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=False,
clean=False,
recurse=False,
lock=False,
skip_changes=False,
)
@patch("gitman.commands.update")
def test_update_skip_changes(self, mock_update):
"""Verify the 'update' command with skip changes option."""
cli.main(["update", "--skip-changes"])
mock_update.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=False,
clean=False,
recurse=False,
lock=None,
skip_changes=True,
)
@patch("gitman.commands.update")
def test_update_force(self, mock_update):
"""Verify the 'update' command with force option."""
cli.main(["update", "--force"])
mock_update.assert_called_once_with(
root=None,
depth=5,
force=True,
force_interactive=False,
clean=False,
recurse=False,
lock=None,
skip_changes=False,
)
@patch("gitman.commands.update")
def test_update_force_interactive(self, mock_update):
"""Verify the 'update' command with force-interactive option."""
cli.main(["update", "--force-interactive"])
mock_update.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=True,
clean=False,
recurse=False,
lock=None,
skip_changes=False,
)
@patch("gitman.commands.update")
def test_update_specific_sources(self, mock_install):
"""Verify individual dependencies can be installed."""
cli.main(["update", "foo", "bar"])
mock_install.assert_called_once_with(
"foo",
"bar",
root=None,
depth=5,
force=False,
force_interactive=False,
clean=False,
recurse=False,
lock=None,
skip_changes=False,
)
@patch("gitman.commands.update")
def test_update_with_depth(self, mock_update):
"""Verify the 'update' command can be limited by depth."""
cli.main(["update", "--depth", "10"])
mock_update.assert_called_once_with(
root=None,
depth=10,
force=False,
force_interactive=False,
clean=False,
recurse=False,
lock=None,
skip_changes=False,
)
class TestList:
"""Unit tests for the `list` command."""
@patch("gitman.commands.display")
def test_list(self, mock_display):
"""Verify the 'list' command can be run."""
cli.main(["list"])
mock_display.assert_called_once_with(root=None, depth=5, allow_dirty=True)
@patch("gitman.commands.display")
def test_list_root(self, mock_display):
"""Verify the project's root can be specified."""
cli.main(["list", "--root", "mock/path/to/root"])
mock_display.assert_called_once_with(
root="mock/path/to/root", depth=5, allow_dirty=True
)
@patch("gitman.commands.display")
def test_list_no_dirty(self, mock_display):
"""Verify the 'list' command can be set to fail when dirty."""
cli.main(["list", "--fail-if-dirty"])
mock_display.assert_called_once_with(root=None, depth=5, allow_dirty=False)
@patch("gitman.commands.display")
def test_update_with_depth(self, mock_update):
"""Verify the 'list' command can be limited by depth."""
cli.main(["list", "--depth", "10"])
mock_update.assert_called_once_with(root=None, depth=10, allow_dirty=True)
def describe_lock():
@patch("gitman.commands.lock")
def with_no_arguments(lock):
cli.main(["lock"])
lock.assert_called_once_with(root=None)
@patch("gitman.commands.lock")
def with_dependencies(lock):
cli.main(["lock", "foo", "bar"])
lock.assert_called_once_with("foo", "bar", root=None)
class TestUninstall:
"""Unit tests for the `uninstall` command."""
@patch("gitman.commands.delete")
def test_uninstall(self, mock_uninstall):
"""Verify the 'uninstall' command can be run."""
cli.main(["uninstall"])
mock_uninstall.assert_called_once_with(
root=None, force=False, keep_location=False
)
@patch("gitman.commands.delete")
def test_uninstall_root(self, mock_uninstall):
"""Verify the project's root can be specified."""
cli.main(["uninstall", "--root", "mock/path/to/root"])
mock_uninstall.assert_called_once_with(
root="mock/path/to/root", force=False, keep_location=False
)
@patch("gitman.commands.delete")
def test_uninstall_force(self, mock_uninstall):
"""Verify the 'uninstall' command can be forced."""
cli.main(["uninstall", "--force"])
mock_uninstall.assert_called_once_with(
root=None, force=True, keep_location=False
)
@patch("gitman.commands.delete")
def test_uninstall_keep_location(self, mock_uninstall):
"""Verify the 'uninstall' command can be run with keep_location."""
cli.main(["uninstall", "--keep-location"])
mock_uninstall.assert_called_once_with(
root=None, force=False, keep_location=True
)
def describe_show():
@patch("gitman.commands.show")
def with_no_arguments(show):
cli.main(["show"])
show.assert_called_once_with(root=None)
@patch("gitman.commands.show")
def with_root(show):
cli.main(["show", "--root", "mock/root"])
show.assert_called_once_with(root="mock/root")
@patch("gitman.commands.show")
def with_names(show):
cli.main(["show", "foo", "bar"])
show.assert_called_once_with("foo", "bar", root=None)
@patch("gitman.commands.show")
def with_config(show):
cli.main(["show", "--config"])
show.assert_called_once_with("__config__", root=None)
@patch("gitman.commands.show")
def with_log(show):
cli.main(["show", "--log"])
show.assert_called_once_with("__log__", root=None)
def describe_edit():
@patch("gitman.commands.edit")
def with_no_arguments(edit):
cli.main(["edit"])
edit.assert_called_once_with(root=None)
@patch("gitman.commands.edit")
def with_root(edit):
cli.main(["edit", "--root", "mock/root"])
edit.assert_called_once_with(root="mock/root")
def describe_logging():
argument_verbosity = [
(None, 0),
("-v", 1),
("-vv", 2),
("-vvv", 3),
("-vvvv", 4),
("-vvvvv", 4),
("-q", -1),
]
@pytest.mark.parametrize("argument,verbosity", argument_verbosity)
def at_each_level(argument, verbosity):
def function(*args, **kwargs):
log.debug(args)
log.debug(kwargs)
log.warning("warning")
log.error("error")
return True
cli.main([argument] if argument else [], function)
expect(_Config.verbosity) == verbosity
|
[
"log.debug",
"unittest.mock.Mock",
"expecter.expect",
"gitman.cli.main",
"unittest.mock.patch",
"pytest.raises",
"log.warning",
"pytest.mark.parametrize",
"log.error"
] |
[((1594, 1623), 'unittest.mock.patch', 'patch', (['"""gitman.commands.init"""'], {}), "('gitman.commands.init')\n", (1599, 1623), False, 'from unittest.mock import Mock, patch\n'), ((1876, 1908), 'unittest.mock.patch', 'patch', (['"""gitman.commands.install"""'], {}), "('gitman.commands.install')\n", (1881, 1908), False, 'from unittest.mock import Mock, patch\n'), ((2325, 2357), 'unittest.mock.patch', 'patch', (['"""gitman.commands.install"""'], {}), "('gitman.commands.install')\n", (2330, 2357), False, 'from unittest.mock import Mock, patch\n'), ((2828, 2860), 'unittest.mock.patch', 'patch', (['"""gitman.commands.install"""'], {}), "('gitman.commands.install')\n", (2833, 2860), False, 'from unittest.mock import Mock, patch\n'), ((3296, 3328), 'unittest.mock.patch', 'patch', (['"""gitman.commands.install"""'], {}), "('gitman.commands.install')\n", (3301, 3328), False, 'from unittest.mock import Mock, patch\n'), ((3752, 3784), 'unittest.mock.patch', 'patch', (['"""gitman.commands.install"""'], {}), "('gitman.commands.install')\n", (3757, 3784), False, 'from unittest.mock import Mock, patch\n'), ((4219, 4251), 'unittest.mock.patch', 'patch', (['"""gitman.commands.install"""'], {}), "('gitman.commands.install')\n", (4224, 4251), False, 'from unittest.mock import Mock, patch\n'), ((4745, 4777), 'unittest.mock.patch', 'patch', (['"""gitman.commands.install"""'], {}), "('gitman.commands.install')\n", (4750, 4777), False, 'from unittest.mock import Mock, patch\n'), ((5626, 5657), 'unittest.mock.patch', 'patch', (['"""gitman.commands.update"""'], {}), "('gitman.commands.update')\n", (5631, 5657), False, 'from unittest.mock import Mock, patch\n'), ((6056, 6087), 'unittest.mock.patch', 'patch', (['"""gitman.commands.update"""'], {}), "('gitman.commands.update')\n", (6061, 6087), False, 'from unittest.mock import Mock, patch\n'), ((6516, 6547), 'unittest.mock.patch', 'patch', (['"""gitman.commands.update"""'], {}), "('gitman.commands.update')\n", (6521, 6547), False, 'from unittest.mock import Mock, patch\n'), ((6979, 7010), 'unittest.mock.patch', 'patch', (['"""gitman.commands.update"""'], {}), "('gitman.commands.update')\n", (6984, 7010), False, 'from unittest.mock import Mock, patch\n'), ((7453, 7484), 'unittest.mock.patch', 'patch', (['"""gitman.commands.update"""'], {}), "('gitman.commands.update')\n", (7458, 7484), False, 'from unittest.mock import Mock, patch\n'), ((7906, 7937), 'unittest.mock.patch', 'patch', (['"""gitman.commands.update"""'], {}), "('gitman.commands.update')\n", (7911, 7937), False, 'from unittest.mock import Mock, patch\n'), ((8395, 8426), 'unittest.mock.patch', 'patch', (['"""gitman.commands.update"""'], {}), "('gitman.commands.update')\n", (8400, 8426), False, 'from unittest.mock import Mock, patch\n'), ((8905, 8936), 'unittest.mock.patch', 'patch', (['"""gitman.commands.update"""'], {}), "('gitman.commands.update')\n", (8910, 8936), False, 'from unittest.mock import Mock, patch\n'), ((9440, 9472), 'unittest.mock.patch', 'patch', (['"""gitman.commands.display"""'], {}), "('gitman.commands.display')\n", (9445, 9472), False, 'from unittest.mock import Mock, patch\n'), ((9681, 9713), 'unittest.mock.patch', 'patch', (['"""gitman.commands.display"""'], {}), "('gitman.commands.display')\n", (9686, 9713), False, 'from unittest.mock import Mock, patch\n'), ((10001, 10033), 'unittest.mock.patch', 'patch', (['"""gitman.commands.display"""'], {}), "('gitman.commands.display')\n", (10006, 10033), False, 'from unittest.mock import Mock, patch\n'), ((10290, 10322), 'unittest.mock.patch', 'patch', (['"""gitman.commands.display"""'], {}), "('gitman.commands.display')\n", (10295, 10322), False, 'from unittest.mock import Mock, patch\n'), ((10595, 10624), 'unittest.mock.patch', 'patch', (['"""gitman.commands.lock"""'], {}), "('gitman.commands.lock')\n", (10600, 10624), False, 'from unittest.mock import Mock, patch\n'), ((10739, 10768), 'unittest.mock.patch', 'patch', (['"""gitman.commands.lock"""'], {}), "('gitman.commands.lock')\n", (10744, 10768), False, 'from unittest.mock import Mock, patch\n'), ((10984, 11015), 'unittest.mock.patch', 'patch', (['"""gitman.commands.delete"""'], {}), "('gitman.commands.delete')\n", (10989, 11015), False, 'from unittest.mock import Mock, patch\n'), ((11272, 11303), 'unittest.mock.patch', 'patch', (['"""gitman.commands.delete"""'], {}), "('gitman.commands.delete')\n", (11277, 11303), False, 'from unittest.mock import Mock, patch\n'), ((11612, 11643), 'unittest.mock.patch', 'patch', (['"""gitman.commands.delete"""'], {}), "('gitman.commands.delete')\n", (11617, 11643), False, 'from unittest.mock import Mock, patch\n'), ((11919, 11950), 'unittest.mock.patch', 'patch', (['"""gitman.commands.delete"""'], {}), "('gitman.commands.delete')\n", (11924, 11950), False, 'from unittest.mock import Mock, patch\n'), ((12280, 12309), 'unittest.mock.patch', 'patch', (['"""gitman.commands.show"""'], {}), "('gitman.commands.show')\n", (12285, 12309), False, 'from unittest.mock import Mock, patch\n'), ((12424, 12453), 'unittest.mock.patch', 'patch', (['"""gitman.commands.show"""'], {}), "('gitman.commands.show')\n", (12429, 12453), False, 'from unittest.mock import Mock, patch\n'), ((12590, 12619), 'unittest.mock.patch', 'patch', (['"""gitman.commands.show"""'], {}), "('gitman.commands.show')\n", (12595, 12619), False, 'from unittest.mock import Mock, patch\n'), ((12755, 12784), 'unittest.mock.patch', 'patch', (['"""gitman.commands.show"""'], {}), "('gitman.commands.show')\n", (12760, 12784), False, 'from unittest.mock import Mock, patch\n'), ((12919, 12948), 'unittest.mock.patch', 'patch', (['"""gitman.commands.show"""'], {}), "('gitman.commands.show')\n", (12924, 12948), False, 'from unittest.mock import Mock, patch\n'), ((13096, 13125), 'unittest.mock.patch', 'patch', (['"""gitman.commands.edit"""'], {}), "('gitman.commands.edit')\n", (13101, 13125), False, 'from unittest.mock import Mock, patch\n'), ((13240, 13269), 'unittest.mock.patch', 'patch', (['"""gitman.commands.edit"""'], {}), "('gitman.commands.edit')\n", (13245, 13269), False, 'from unittest.mock import Mock, patch\n'), ((13610, 13675), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""argument,verbosity"""', 'argument_verbosity'], {}), "('argument,verbosity', argument_verbosity)\n", (13633, 13675), False, 'import pytest\n'), ((458, 481), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (462, 481), False, 'from unittest.mock import Mock, patch\n'), ((491, 518), 'gitman.cli.main', 'cli.main', (['[]', 'mock_function'], {}), '([], mock_function)\n', (499, 518), False, 'from gitman import cli\n'), ((1726, 1744), 'gitman.cli.main', 'cli.main', (["['init']"], {}), "(['init'])\n", (1734, 1744), False, 'from gitman import cli\n'), ((2014, 2035), 'gitman.cli.main', 'cli.main', (["['install']"], {}), "(['install'])\n", (2022, 2035), False, 'from gitman import cli\n'), ((2471, 2523), 'gitman.cli.main', 'cli.main', (["['install', '--root', 'mock/path/to/root']"], {}), "(['install', '--root', 'mock/path/to/root'])\n", (2479, 2523), False, 'from gitman import cli\n'), ((2975, 3007), 'gitman.cli.main', 'cli.main', (["['install', '--force']"], {}), "(['install', '--force'])\n", (2983, 3007), False, 'from gitman import cli\n'), ((3431, 3463), 'gitman.cli.main', 'cli.main', (["['install', '--fetch']"], {}), "(['install', '--fetch'])\n", (3439, 3463), False, 'from gitman import cli\n'), ((3898, 3930), 'gitman.cli.main', 'cli.main', (["['install', '--clean']"], {}), "(['install', '--clean'])\n", (3906, 3930), False, 'from gitman import cli\n'), ((4382, 4417), 'gitman.cli.main', 'cli.main', (["['install', 'foo', 'bar']"], {}), "(['install', 'foo', 'bar'])\n", (4390, 4417), False, 'from gitman import cli\n'), ((4906, 4944), 'gitman.cli.main', 'cli.main', (["['install', '--depth', '10']"], {}), "(['install', '--depth', '10'])\n", (4914, 4944), False, 'from gitman import cli\n'), ((5267, 5273), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (5271, 5273), False, 'from unittest.mock import Mock, patch\n'), ((5760, 5780), 'gitman.cli.main', 'cli.main', (["['update']"], {}), "(['update'])\n", (5768, 5780), False, 'from gitman import cli\n'), ((6212, 6241), 'gitman.cli.main', 'cli.main', (["['update', '--all']"], {}), "(['update', '--all'])\n", (6220, 6241), False, 'from gitman import cli\n'), ((6667, 6702), 'gitman.cli.main', 'cli.main', (["['update', '--skip-lock']"], {}), "(['update', '--skip-lock'])\n", (6675, 6702), False, 'from gitman import cli\n'), ((7140, 7178), 'gitman.cli.main', 'cli.main', (["['update', '--skip-changes']"], {}), "(['update', '--skip-changes'])\n", (7148, 7178), False, 'from gitman import cli\n'), ((7600, 7631), 'gitman.cli.main', 'cli.main', (["['update', '--force']"], {}), "(['update', '--force'])\n", (7608, 7631), False, 'from gitman import cli\n'), ((8077, 8120), 'gitman.cli.main', 'cli.main', (["['update', '--force-interactive']"], {}), "(['update', '--force-interactive'])\n", (8085, 8120), False, 'from gitman import cli\n'), ((8556, 8590), 'gitman.cli.main', 'cli.main', (["['update', 'foo', 'bar']"], {}), "(['update', 'foo', 'bar'])\n", (8564, 8590), False, 'from gitman import cli\n'), ((9063, 9100), 'gitman.cli.main', 'cli.main', (["['update', '--depth', '10']"], {}), "(['update', '--depth', '10'])\n", (9071, 9100), False, 'from gitman import cli\n'), ((9572, 9590), 'gitman.cli.main', 'cli.main', (["['list']"], {}), "(['list'])\n", (9580, 9590), False, 'from gitman import cli\n'), ((9824, 9873), 'gitman.cli.main', 'cli.main', (["['list', '--root', 'mock/path/to/root']"], {}), "(['list', '--root', 'mock/path/to/root'])\n", (9832, 9873), False, 'from gitman import cli\n'), ((10161, 10198), 'gitman.cli.main', 'cli.main', (["['list', '--fail-if-dirty']"], {}), "(['list', '--fail-if-dirty'])\n", (10169, 10198), False, 'from gitman import cli\n'), ((10447, 10482), 'gitman.cli.main', 'cli.main', (["['list', '--depth', '10']"], {}), "(['list', '--depth', '10'])\n", (10455, 10482), False, 'from gitman import cli\n'), ((10666, 10684), 'gitman.cli.main', 'cli.main', (["['lock']"], {}), "(['lock'])\n", (10674, 10684), False, 'from gitman import cli\n'), ((10810, 10842), 'gitman.cli.main', 'cli.main', (["['lock', 'foo', 'bar']"], {}), "(['lock', 'foo', 'bar'])\n", (10818, 10842), False, 'from gitman import cli\n'), ((11127, 11150), 'gitman.cli.main', 'cli.main', (["['uninstall']"], {}), "(['uninstall'])\n", (11135, 11150), False, 'from gitman import cli\n'), ((11421, 11475), 'gitman.cli.main', 'cli.main', (["['uninstall', '--root', 'mock/path/to/root']"], {}), "(['uninstall', '--root', 'mock/path/to/root'])\n", (11429, 11475), False, 'from gitman import cli\n'), ((11764, 11798), 'gitman.cli.main', 'cli.main', (["['uninstall', '--force']"], {}), "(['uninstall', '--force'])\n", (11772, 11798), False, 'from gitman import cli\n'), ((12095, 12137), 'gitman.cli.main', 'cli.main', (["['uninstall', '--keep-location']"], {}), "(['uninstall', '--keep-location'])\n", (12103, 12137), False, 'from gitman import cli\n'), ((12351, 12369), 'gitman.cli.main', 'cli.main', (["['show']"], {}), "(['show'])\n", (12359, 12369), False, 'from gitman import cli\n'), ((12487, 12528), 'gitman.cli.main', 'cli.main', (["['show', '--root', 'mock/root']"], {}), "(['show', '--root', 'mock/root'])\n", (12495, 12528), False, 'from gitman import cli\n'), ((12654, 12686), 'gitman.cli.main', 'cli.main', (["['show', 'foo', 'bar']"], {}), "(['show', 'foo', 'bar'])\n", (12662, 12686), False, 'from gitman import cli\n'), ((12820, 12850), 'gitman.cli.main', 'cli.main', (["['show', '--config']"], {}), "(['show', '--config'])\n", (12828, 12850), False, 'from gitman import cli\n'), ((12981, 13008), 'gitman.cli.main', 'cli.main', (["['show', '--log']"], {}), "(['show', '--log'])\n", (12989, 13008), False, 'from gitman import cli\n'), ((13167, 13185), 'gitman.cli.main', 'cli.main', (["['edit']"], {}), "(['edit'])\n", (13175, 13185), False, 'from gitman import cli\n'), ((13303, 13344), 'gitman.cli.main', 'cli.main', (["['edit', '--root', 'mock/root']"], {}), "(['edit', '--root', 'mock/root'])\n", (13311, 13344), False, 'from gitman import cli\n'), ((13916, 13966), 'gitman.cli.main', 'cli.main', (['([argument] if argument else [])', 'function'], {}), '([argument] if argument else [], function)\n', (13924, 13966), False, 'from gitman import cli\n'), ((665, 690), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (678, 690), False, 'import pytest\n'), ((840, 865), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (853, 865), False, 'import pytest\n'), ((879, 899), 'gitman.cli.main', 'cli.main', (["['--help']"], {}), "(['--help'])\n", (887, 899), False, 'from gitman import cli\n'), ((1002, 1027), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (1015, 1027), False, 'import pytest\n'), ((1041, 1053), 'gitman.cli.main', 'cli.main', (['[]'], {}), '([])\n', (1049, 1053), False, 'from gitman import cli\n'), ((1154, 1179), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (1167, 1179), False, 'import pytest\n'), ((1337, 1362), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (1350, 1362), False, 'import pytest\n'), ((1440, 1465), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (1453, 1465), False, 'import pytest\n'), ((5385, 5410), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (5398, 5410), False, 'import pytest\n'), ((5424, 5461), 'gitman.cli.main', 'cli.main', (["['install', '--depth', '0']"], {}), "(['install', '--depth', '0'])\n", (5432, 5461), False, 'from gitman import cli\n'), ((5475, 5500), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (5488, 5500), False, 'import pytest\n'), ((5514, 5552), 'gitman.cli.main', 'cli.main', (["['install', '--depth', '-1']"], {}), "(['install', '--depth', '-1'])\n", (5522, 5552), False, 'from gitman import cli\n'), ((13771, 13786), 'log.debug', 'log.debug', (['args'], {}), '(args)\n', (13780, 13786), False, 'import log\n'), ((13799, 13816), 'log.debug', 'log.debug', (['kwargs'], {}), '(kwargs)\n', (13808, 13816), False, 'import log\n'), ((13829, 13851), 'log.warning', 'log.warning', (['"""warning"""'], {}), "('warning')\n", (13840, 13851), False, 'import log\n'), ((13864, 13882), 'log.error', 'log.error', (['"""error"""'], {}), "('error')\n", (13873, 13882), False, 'import log\n'), ((13975, 14000), 'expecter.expect', 'expect', (['_Config.verbosity'], {}), '(_Config.verbosity)\n', (13981, 14000), False, 'from expecter import expect\n'), ((717, 741), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(False)'}), '(return_value=False)\n', (721, 741), False, 'from unittest.mock import Mock, patch\n'), ((1206, 1241), 'unittest.mock.Mock', 'Mock', ([], {'side_effect': 'KeyboardInterrupt'}), '(side_effect=KeyboardInterrupt)\n', (1210, 1241), False, 'from unittest.mock import Mock, patch\n'), ((1389, 1425), 'unittest.mock.Mock', 'Mock', ([], {'side_effect': 'UncommittedChanges'}), '(side_effect=UncommittedChanges)\n', (1393, 1425), False, 'from unittest.mock import Mock, patch\n'), ((1492, 1523), 'unittest.mock.Mock', 'Mock', ([], {'side_effect': 'ScriptFailure'}), '(side_effect=ScriptFailure)\n', (1496, 1523), False, 'from unittest.mock import Mock, patch\n')]
|
# SPDX-FileCopyrightText: 2018 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_l3gd20`
====================================================
Adafruit 9-DOF Absolute Orientation IMU Fusion Breakout - L3GD20
This is a CircuitPython driver for the Bosch L3GD20 nine degree of freedom
inertial measurement unit module with sensor fusion.
* Author(s): <NAME>
Implementation Notes
--------------------
**Hardware:**
* `L3GD20H Triple-Axis Gyro Breakout Board <https://www.adafruit.com/product/1032>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Register library: https://github.com/adafruit/Adafruit_CircuitPython_Register
"""
# imports
__version__ = "2.3.3"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_l3gd20.git"
from micropython import const
from adafruit_register.i2c_struct import Struct
try:
from struct import unpack
except ImportError:
from ustruct import unpack
__version__ = "2.3.3"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_L3GD20.git"
L3DS20_RANGE_250DPS = const(0)
L3DS20_RANGE_500DPS = const(1)
L3DS20_RANGE_2000DPS = const(2)
L3DS20_RATE_100HZ = const(0x00)
L3DS20_RATE_200HZ = const(0x40)
L3DS20_RATE_400HZ = const(0x80)
L3DS20_RATE_800HZ = const(0xC0)
_L3GD20_REGISTER_CTRL_REG1 = const(0x20)
_L3GD20_REGISTER_CTRL_REG4 = const(0x23)
# _L3GD20_REGISTER_OUT_X_L = const(0x28)
_L3GD20_REGISTER_OUT_X_L_X80 = const(0xA8)
_L3GD20_REGISTER_OUT_X_L_X40 = const(0x68)
_ID_REGISTER = const(0x0F)
_L3GD20_CHIP_ID = const(0xD4)
_L3GD20H_CHIP_ID = const(0xD7)
_L3GD20_SENSITIVITY_250DPS = 0.00875 ## Roughly 22/256 for fixed point match
_L3GD20_SENSITIVITY_500DPS = 0.0175 ## Roughly 45/256
_L3GD20_SENSITIVITY_2000DPS = 0.070 ## Roughly 18/256
# pylint: disable=no-member
class L3GD20:
"""
Driver for the L3GD20 3-axis Gyroscope sensor.
:param int rng: a range value one of L3DS20_RANGE_250DPS (default), L3DS20_RANGE_500DPS, or
L3DS20_RANGE_2000DPS
:param int rate: a rate value one of L3DS20_RATE_100HZ (default), L3DS20_RATE_200HZ,
L3DS20_RATE_400HZ, or L3DS20_RATE_800HZ
"""
def __init__(self, rng=L3DS20_RANGE_250DPS, rate=L3DS20_RATE_100HZ):
chip_id = self.read_register(_ID_REGISTER)
if chip_id not in (_L3GD20_CHIP_ID, _L3GD20H_CHIP_ID):
raise RuntimeError(
"bad chip id (%x != %x or %x)"
% (chip_id, _L3GD20_CHIP_ID, _L3GD20H_CHIP_ID)
)
if rng not in (L3DS20_RANGE_250DPS, L3DS20_RANGE_500DPS, L3DS20_RANGE_2000DPS):
raise ValueError(
"Range value must be one of L3DS20_RANGE_250DPS, "
"L3DS20_RANGE_500DPS, or L3DS20_RANGE_2000DPS"
)
# Set CTRL_REG1 (0x20)
# ====================================================================
# BIT Symbol Description Default
# --- ------ --------------------------------------------- -------
# 7-6 DR1#0 Output data rate
# 5-4 BW1#0 Bandwidth selection
# 3 PD 0 = Power-down mode, 1 = normal#sleep mode
# 2 ZEN Z-axis enable (0 = disabled, 1 = enabled)
# 1 YEN Y-axis enable (0 = disabled, 1 = enabled)
# 0 XEN X-axis enable (0 = disabled, 1 = enabled)
# Switch to normal mode and enable all three channels
self.write_register(_L3GD20_REGISTER_CTRL_REG1, rate | 0x0F)
# Set CTRL_REG2 (0x21)
# ====================================================================
# BIT Symbol Description Default
# --- ------ --------------------------------------------- -------
# 5-4 HPM1#0 High-pass filter mode selection
# 3-0 HPCF3..0 High-pass filter cutoff frequency selection
# Nothing to do ... keep default values
# ------------------------------------------------------------------
# Set CTRL_REG3 (0x22)
# ====================================================================
# BIT Symbol Description Default
# --- ------ --------------------------------------------- -------
# 7 I1_Int1 Interrupt enable on INT1 (0=disable,1=enable)
# 6 I1_Boot Boot status on INT1 (0=disable,1=enable)
# 5 H-Lactive Interrupt active config on INT1 (0=high,1=low)
# 4 PP_OD Push-Pull#Open-Drain (0=PP, 1=OD)
# 3 I2_DRDY Data ready on DRDY#INT2 (0=disable,1=enable)
# 2 I2_WTM FIFO wtrmrk int on DRDY#INT2 (0=dsbl,1=enbl)
# 1 I2_ORun FIFO overrun int on DRDY#INT2 (0=dsbl,1=enbl)
# 0 I2_Empty FIFI empty int on DRDY#INT2 (0=dsbl,1=enbl)
# Nothing to do ... keep default values
# -----------------------------------------------------------------
# Set CTRL_REG4 (0x23)
# ====================================================================
# BIT Symbol Description Default
# --- ------ --------------------------------------------- -------
# 7 BDU Block Data Update (0=continuous, 1=LSB#MSB)
# 6 BLE Big#Little-Endian (0=Data LSB, 1=Data MSB)
# 5-4 FS1#0 Full scale selection
# 00 = 250 dps
# 01 = 500 dps
# 10 = 2000 dps
# 11 = 2000 dps
# 0 SIM SPI Mode (0=4-wire, 1=3-wire)
# Adjust resolution if requested
if rng == L3DS20_RANGE_250DPS:
self.scale = _L3GD20_SENSITIVITY_250DPS
self.write_register(_L3GD20_REGISTER_CTRL_REG4, 0x00)
if rng == L3DS20_RANGE_500DPS:
self.scale = _L3GD20_SENSITIVITY_500DPS
self.write_register(_L3GD20_REGISTER_CTRL_REG4, 0x10)
if rng == L3DS20_RANGE_2000DPS:
self.scale = _L3GD20_SENSITIVITY_2000DPS
self.write_register(_L3GD20_REGISTER_CTRL_REG4, 0x20)
# ------------------------------------------------------------------
# Set CTRL_REG5 (0x24)
# ====================================================================
# BIT Symbol Description Default
# --- ------ --------------------------------------------- -------
# 7 BOOT Reboot memory content (0=normal, 1=reboot)
# 6 FIFO_EN FIFO enable (0=FIFO disable, 1=enable)
# 4 HPen High-pass filter enable (0=disable,1=enable)
# 3-2 INT1_SEL INT1 Selection config
# 1-0 OUT_SEL Out selection config
# Nothing to do ... keep default values
# ------------------------------------------------------------------
@property
def gyro(self):
"""
x, y, z angular momentum tuple floats, rescaled appropriately for
range selected
"""
raw = self.gyro_raw
return tuple(self.scale * v for v in raw)
class L3GD20_I2C(L3GD20):
"""
Driver for L3GD20 Gyroscope using I2C communications
:param ~busio.I2C i2c: initialized busio I2C class
:param int rng: the optional range value: L3DS20_RANGE_250DPS(default), L3DS20_RANGE_500DPS, or
L3DS20_RANGE_2000DPS
:param address: the optional device address, 0x68 is the default address
"""
gyro_raw = Struct(_L3GD20_REGISTER_OUT_X_L_X80, "<hhh")
"""Gives the raw gyro readings, in units of rad/s."""
def __init__(
self, i2c, rng=L3DS20_RANGE_250DPS, address=0x6B, rate=L3DS20_RATE_100HZ
):
import adafruit_bus_device.i2c_device as i2c_device # pylint: disable=import-outside-toplevel
self.i2c_device = i2c_device.I2CDevice(i2c, address)
self.buffer = bytearray(2)
super().__init__(rng, rate)
def write_register(self, register, value):
"""
Update a register with a byte value
:param int register: which device register to write
:param value: a byte to write
"""
self.buffer[0] = register
self.buffer[1] = value
with self.i2c_device as i2c:
i2c.write(self.buffer)
def read_register(self, register):
"""
Returns a byte value from a register
:param register: the register to read a byte
"""
self.buffer[0] = register
with self.i2c_device as i2c:
i2c.write_then_readinto(self.buffer, self.buffer, out_end=1, in_start=1)
return self.buffer[1]
class L3GD20_SPI(L3GD20):
"""
Driver for L3GD20 Gyroscope using SPI communications
:param ~busio.SPI spi_busio: initialized busio SPI class
:param ~digitalio.DigitalInOut cs: digital in/out to use as chip select signal
:param int rng: the optional range value: L3DS20_RANGE_250DPS(default), L3DS20_RANGE_500DPS, or
L3DS20_RANGE_2000DPS
:param baudrate: spi baud rate default is 100000
"""
def __init__(
self,
spi_busio,
cs,
rng=L3DS20_RANGE_250DPS,
baudrate=100000,
rate=L3DS20_RATE_100HZ,
): # pylint: disable=too-many-arguments
import adafruit_bus_device.spi_device as spi_device # pylint: disable=import-outside-toplevel
self._spi = spi_device.SPIDevice(spi_busio, cs, baudrate=baudrate)
self._spi_bytearray1 = bytearray(1)
self._spi_bytearray6 = bytearray(6)
super().__init__(rng, rate)
def write_register(self, register, value):
"""
Low level register writing over SPI, writes one 8-bit value
:param int register: which device register to write
:param value: a byte to write
"""
register &= 0x7F # Write, bit 7 low.
with self._spi as spi:
spi.write(bytes([register, value & 0xFF]))
def read_register(self, register):
"""
Low level register reading over SPI, returns a list of values
:param register: the register to read a byte
"""
register = (register | 0x80) & 0xFF # Read single, bit 7 high.
with self._spi as spi:
self._spi_bytearray1[0] = register
spi.write(self._spi_bytearray1)
spi.readinto(self._spi_bytearray1)
# Uncomment to dump bytearray:
# print("$%02X => %s" % (register, [hex(i) for i in self._spi_bytearray1]))
return self._spi_bytearray1[0]
def read_bytes(self, register, buffer):
"""
Low level register stream reading over SPI, returns a list of values
:param register: the register to read bytes
:param bytearray buffer: buffer to fill with data from stream
"""
register = (register | 0x80) & 0xFF # Read single, bit 7 high.
with self._spi as spi:
self._spi_bytearray1[0] = register
spi.write(self._spi_bytearray1)
spi.readinto(buffer)
@property
def gyro_raw(self):
"""Gives the raw gyro readings, in units of rad/s."""
buffer = self._spi_bytearray6
self.read_bytes(_L3GD20_REGISTER_OUT_X_L_X40, buffer)
return unpack("<hhh", buffer)
|
[
"adafruit_register.i2c_struct.Struct",
"micropython.const",
"adafruit_bus_device.spi_device.SPIDevice",
"ustruct.unpack",
"adafruit_bus_device.i2c_device.I2CDevice"
] |
[((1171, 1179), 'micropython.const', 'const', (['(0)'], {}), '(0)\n', (1176, 1179), False, 'from micropython import const\n'), ((1202, 1210), 'micropython.const', 'const', (['(1)'], {}), '(1)\n', (1207, 1210), False, 'from micropython import const\n'), ((1234, 1242), 'micropython.const', 'const', (['(2)'], {}), '(2)\n', (1239, 1242), False, 'from micropython import const\n'), ((1264, 1272), 'micropython.const', 'const', (['(0)'], {}), '(0)\n', (1269, 1272), False, 'from micropython import const\n'), ((1296, 1305), 'micropython.const', 'const', (['(64)'], {}), '(64)\n', (1301, 1305), False, 'from micropython import const\n'), ((1328, 1338), 'micropython.const', 'const', (['(128)'], {}), '(128)\n', (1333, 1338), False, 'from micropython import const\n'), ((1360, 1370), 'micropython.const', 'const', (['(192)'], {}), '(192)\n', (1365, 1370), False, 'from micropython import const\n'), ((1402, 1411), 'micropython.const', 'const', (['(32)'], {}), '(32)\n', (1407, 1411), False, 'from micropython import const\n'), ((1443, 1452), 'micropython.const', 'const', (['(35)'], {}), '(35)\n', (1448, 1452), False, 'from micropython import const\n'), ((1528, 1538), 'micropython.const', 'const', (['(168)'], {}), '(168)\n', (1533, 1538), False, 'from micropython import const\n'), ((1571, 1581), 'micropython.const', 'const', (['(104)'], {}), '(104)\n', (1576, 1581), False, 'from micropython import const\n'), ((1599, 1608), 'micropython.const', 'const', (['(15)'], {}), '(15)\n', (1604, 1608), False, 'from micropython import const\n'), ((1630, 1640), 'micropython.const', 'const', (['(212)'], {}), '(212)\n', (1635, 1640), False, 'from micropython import const\n'), ((1661, 1671), 'micropython.const', 'const', (['(215)'], {}), '(215)\n', (1666, 1671), False, 'from micropython import const\n'), ((7732, 7776), 'adafruit_register.i2c_struct.Struct', 'Struct', (['_L3GD20_REGISTER_OUT_X_L_X80', '"""<hhh"""'], {}), "(_L3GD20_REGISTER_OUT_X_L_X80, '<hhh')\n", (7738, 7776), False, 'from adafruit_register.i2c_struct import Struct\n'), ((8072, 8106), 'adafruit_bus_device.i2c_device.I2CDevice', 'i2c_device.I2CDevice', (['i2c', 'address'], {}), '(i2c, address)\n', (8092, 8106), True, 'import adafruit_bus_device.i2c_device as i2c_device\n'), ((9630, 9684), 'adafruit_bus_device.spi_device.SPIDevice', 'spi_device.SPIDevice', (['spi_busio', 'cs'], {'baudrate': 'baudrate'}), '(spi_busio, cs, baudrate=baudrate)\n', (9650, 9684), True, 'import adafruit_bus_device.spi_device as spi_device\n'), ((11495, 11517), 'ustruct.unpack', 'unpack', (['"""<hhh"""', 'buffer'], {}), "('<hhh', buffer)\n", (11501, 11517), False, 'from ustruct import unpack\n')]
|
from __future__ import print_function, absolute_import, unicode_literals, division
import six
from six.moves import (zip, filter, map, reduce, input, range)
from IPython.core.display import Image as display_image
from .dot import (
render_nx_as_dot, clear_formatting,
format_graph_for_lifespan,
format_graph_for_worm_counts,
format_graph_for_true_counts,
format_graph_for_moved
)
from ..subgraph import nearby, neartime
def look(graph, target, jumps, ref=False, ctype='lifespan'):
"""
In *graph*, a waldo.network.Graph, around *target*, show the network out to
an (undirected) distance of *jumps*. Optionally show a colored reference.
"""
subgraph = nearby(graph, target, jumps)
if ctype == 'lifespan':
format_graph_for_lifespan(subgraph, ref=ref, focus=graph.where_is(target))
elif ctype == 'worm_count':
format_graph_for_worm_counts(subgraph, ref=ref)
elif ctype == 'true_count':
format_graph_for_true_counts(subgraph, ref=ref)
elif ctype == 'moved_bool':
format_graph_for_moved(subgraph, ref=ref)
temp_file = render_nx_as_dot(subgraph)
return display_image(temp_file)
def save_graphs(ex_id, graph, target, jumps, ref=False):
"""
In *graph*, a waldo.network.Graph, around *target*, show the network out to
an (undirected) distance of *jumps*. Optionally show a colored reference.
"""
subgraph = nearby(graph, target, jumps)
format_graph_for_lifespan(subgraph, focus=graph.where_is(target))
of = '{eid}_lifespan.gv'.format(eid=ex_id)
print(of)
temp_file = render_nx_as_dot(subgraph, output_file=of)
clear_formatting(subgraph)
format_graph_for_worm_counts(subgraph)
of = '{eid}_worm_counts.gv'.format(eid=ex_id)
temp_file = render_nx_as_dot(subgraph, output_file=of)
clear_formatting(subgraph)
format_graph_for_true_counts(subgraph)
of = '{eid}_true_counts.gv'.format(eid=ex_id)
temp_file = render_nx_as_dot(subgraph, output_file=of)
clear_formatting(subgraph)
format_graph_for_moved(subgraph)
of = '{eid}_seed_counts.gv'.format(eid=ex_id)
temp_file = render_nx_as_dot(subgraph, output_file=of)
clear_formatting(subgraph)
return display_image(temp_file)
def look2(graph, target, jumps, ref=False):
"""
In *graph*, a waldo.network.Graph, around *target*, show the network out to
an (undirected) distance of *jumps*. Optionally show a colored reference.
"""
subgraph = nearby(graph, target, jumps)
format_graph_for_worm_counts(subgraph, ref=ref)
temp_file = render_nx_as_dot(subgraph)
return display_image(temp_file)
def look_time(graph, fstart, fend, ref=False):
subgraph = neartime(graph, fstart, fend)
temp_file = render_nx_as_dot(subgraph, ref=ref)
return display_image(temp_file)
|
[
"IPython.core.display.Image"
] |
[((1148, 1172), 'IPython.core.display.Image', 'display_image', (['temp_file'], {}), '(temp_file)\n', (1161, 1172), True, 'from IPython.core.display import Image as display_image\n'), ((2234, 2258), 'IPython.core.display.Image', 'display_image', (['temp_file'], {}), '(temp_file)\n', (2247, 2258), True, 'from IPython.core.display import Image as display_image\n'), ((2630, 2654), 'IPython.core.display.Image', 'display_image', (['temp_file'], {}), '(temp_file)\n', (2643, 2654), True, 'from IPython.core.display import Image as display_image\n'), ((2811, 2835), 'IPython.core.display.Image', 'display_image', (['temp_file'], {}), '(temp_file)\n', (2824, 2835), True, 'from IPython.core.display import Image as display_image\n')]
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import math
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
try:
log2 = math.log2
except:
def log2(v):
return math.log(v, 2)
def mkLed(numports=8):
if log2(numports) % 1.0 != 0.0:
raise ValueError('numports must be power of 2')
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
idata = [ m.Input('idata_%02d' % i, 32) for i in range(numports) ]
ivalid = [ m.Input('ivalid_%02d' % i) for i in range(numports) ]
odata = m.OutputReg('odata', 32, initval=0)
ovalid = m.OutputReg('ovalid', initval=0)
seq = Seq(m, 'seq', clk, rst)
pdata = idata
pvalid = ivalid
ndata = []
nvalid = []
for s in range( int(log2(numports)) ):
for i in range( numports >> (s+1) ):
td = m.TmpReg(32, initval=0)
tv = m.TmpReg(initval=0)
ndata.append(td)
nvalid.append(tv)
cond = AndList(pvalid[i*2], pvalid[i*2+1])
seq.If(cond)(
td(pdata[i*2] + pdata[i*2+1])
)
seq(
tv(cond)
)
pdata = ndata
pvalid = nvalid
ndata = []
nvalid = []
seq(
odata(pdata[-1])
)
seq(
ovalid(pvalid[-1])
)
seq.make_always()
return m
def mkTest():
m = Module('test')
# target instance
led = mkLed()
# copy paras and ports
params = m.copy_params(led)
ports = m.copy_sim_ports(led)
clk = ports['CLK']
rst = ports['RST']
idata = [ p for k, p in sorted(ports.items(), key=lambda x:x[0]) if k.startswith('idata') ]
ivalid = [ p for k, p in sorted(ports.items(), key=lambda x:x[0]) if k.startswith('ivalid') ]
uut = m.Instance(led, 'uut',
params=m.connect_params(led),
ports=m.connect_ports(led))
reset_stmt = []
for d in idata:
reset_stmt.append( d(0) )
for v in ivalid:
reset_stmt.append( v(0) )
simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, reset_stmt, period=100)
nclk = simulation.next_clock
init.add(
Delay(1000),
nclk(clk),
[ d(0) for d in idata ],
[ v(0) for v in ivalid ],
nclk(clk),
[ d(i+1) for i, d in enumerate(idata) ],
[ v(1) for v in ivalid ],
nclk(clk),
[ d(1) for d in idata ],
[ v(0) for v in ivalid ],
nclk(clk),
[ d(i+10) for i, d in enumerate(idata) ],
[ v(1) for v in ivalid ],
nclk(clk),
[ v(0) for v in ivalid ],
[ nclk(clk) for _ in range(10) ],
Systask('finish'),
)
return m
if __name__ == '__main__':
test = mkTest()
verilog = test.to_verilog('tmp.v')
print(verilog)
|
[
"math.log",
"os.path.abspath"
] |
[((385, 399), 'math.log', 'math.log', (['v', '(2)'], {}), '(v, 2)\n', (393, 399), False, 'import math\n'), ((260, 285), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (275, 285), False, 'import os\n')]
|
"""
Utilities for working with datasets in
`YOLO format <https://github.com/AlexeyAB/darknet>`_.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import os
import eta.core.utils as etau
import fiftyone as fo
import fiftyone.core.labels as fol
import fiftyone.core.utils as fou
import fiftyone.utils.data as foud
class YOLOSampleParser(foud.ImageDetectionSampleParser):
"""Parser for samples in
`YOLO format <https://github.com/AlexeyAB/darknet>`_.
This implementation supports samples that are
``(image_or_path, anno_txt_path)`` tuples, where:
- ``image_or_path`` is either an image that can be converted to numpy
format via ``np.asarray()`` or the path to an image on disk
- ``anno_txt_path`` is the path to a YOLO labels TXT file on disk. Or,
for unlabeled images, ``anno_txt_path`` can be ``None``.
See :class:`fiftyone.types.dataset_types.YOLODataset` for format details.
Args:
classes (None): a list of class label strings. If provided, it is
assumed that the ``target`` values are class IDs that should be
mapped to label strings via ``classes[target]``
"""
def __init__(self, classes=None):
super().__init__(
label_field=None,
bounding_box_field=None,
confidence_field=None,
attributes_field=None,
classes=classes,
normalized=True,
)
def _parse_label(self, target, img=None):
if target is None:
return None
return load_yolo_annotations(target, self.classes)
class YOLODatasetImporter(foud.LabeledImageDatasetImporter):
"""Importer for YOLO datasets stored on disk.
See :class:`fiftyone.types.dataset_types.YOLODataset` for format details.
Args:
dataset_dir: the dataset directory
skip_unlabeled (False): whether to skip unlabeled images when importing
shuffle (False): whether to randomly shuffle the order in which the
samples are imported
seed (None): a random seed to use when shuffling
max_samples (None): a maximum number of samples to import. By default,
all samples are imported
"""
def __init__(
self,
dataset_dir,
skip_unlabeled=False,
shuffle=False,
seed=None,
max_samples=None,
):
super().__init__(
dataset_dir,
skip_unlabeled=skip_unlabeled,
shuffle=shuffle,
seed=seed,
max_samples=max_samples,
)
self._classes = None
self._info = None
self._uuids_to_image_paths = None
self._uuids_to_labels_paths = None
self._uuids = None
self._iter_uuids = None
self._num_samples = None
def __iter__(self):
self._iter_uuids = iter(self._uuids)
return self
def __len__(self):
return self._num_samples
def __next__(self):
uuid = next(self._iter_uuids)
try:
image_path = self._uuids_to_image_paths[uuid]
except KeyError:
raise ValueError("No image found for sample '%s'" % uuid)
labels_path = self._uuids_to_labels_paths.get(uuid, None)
if labels_path:
# Labeled image
detections = load_yolo_annotations(labels_path, self._classes)
else:
# Unlabeled image
detections = None
return image_path, None, detections
@property
def has_dataset_info(self):
return True
@property
def has_image_metadata(self):
return False
@property
def label_cls(self):
return fol.Detections
def setup(self):
classes_path = os.path.join(self.dataset_dir, "obj.names")
if os.path.exists(classes_path):
classes = _read_file_lines(classes_path)
else:
classes = None
info = {}
if classes is not None:
info["classes"] = classes
images_path = os.path.join(self.dataset_dir, "images.txt")
if os.path.exists(images_path):
images = _read_file_lines(images_path)
else:
images = []
uuids = []
uuids_to_image_paths = {}
uuids_to_labels_paths = {}
for image in images:
uuid = os.path.splitext(os.path.basename(image))[0]
uuids.append(uuid)
uuids_to_image_paths[uuid] = os.path.join(self.dataset_dir, image)
labels_path = os.path.join(
self.dataset_dir, os.path.splitext(image)[0] + ".txt"
)
if os.path.exists(labels_path):
uuids_to_labels_paths[uuid] = labels_path
if self.skip_unlabeled:
uuids = list(uuids_to_labels_paths.keys())
self._classes = classes
self._info = info
self._uuids = self._preprocess_list(uuids)
self._uuids_to_image_paths = uuids_to_image_paths
self._uuids_to_labels_paths = uuids_to_labels_paths
self._num_samples = len(self._uuids)
def get_dataset_info(self):
return self._info
class YOLODatasetExporter(foud.LabeledImageDatasetExporter):
"""Exporter that writes YOLO datasets to disk.
See :class:`fiftyone.types.dataset_types.YOLODataset` for format details.
Args:
export_dir: the directory to write the export
classes (None): the list of possible class labels. If not provided,
this list will be extracted when :meth:`log_collection` is called,
if possible
image_format (None): the image format to use when writing in-memory
images to disk. By default, ``fiftyone.config.default_image_ext``
is used
"""
def __init__(self, export_dir, classes=None, image_format=None):
if image_format is None:
image_format = fo.config.default_image_ext
super().__init__(export_dir)
self.classes = classes
self.image_format = image_format
self._classes = None
self._dynamic_classes = classes is None
self._labels_map_rev = None
self._obj_names_path = None
self._images_path = None
self._data_dir = None
self._images = None
self._filename_maker = None
self._writer = None
@property
def requires_image_metadata(self):
return False
@property
def label_cls(self):
return fol.Detections
def setup(self):
self._obj_names_path = os.path.join(self.export_dir, "obj.names")
self._images_path = os.path.join(self.export_dir, "images.txt")
self._data_dir = os.path.join(self.export_dir, "data")
self._classes = {}
self._labels_map_rev = {}
self._images = []
self._filename_maker = fou.UniqueFilenameMaker(
output_dir=self._data_dir,
default_ext=self.image_format,
ignore_exts=True,
)
self._writer = YOLOAnnotationWriter()
etau.ensure_dir(self._data_dir)
self._parse_classes()
def log_collection(self, sample_collection):
if self.classes is None:
if sample_collection.default_classes:
self.classes = sample_collection.default_classes
self._parse_classes()
self._dynamic_classes = False
elif sample_collection.classes:
self.classes = next(iter(sample_collection.classes.values()))
self._parse_classes()
self._dynamic_classes = False
elif "classes" in sample_collection.info:
self.classes = sample_collection.info["classes"]
self._parse_classes()
self._dynamic_classes = False
def export_sample(self, image_or_path, detections, metadata=None):
out_image_path = self._export_image_or_path(
image_or_path, self._filename_maker
)
if detections is None:
return
self._images.append(os.path.relpath(out_image_path, self.export_dir))
out_labels_path = os.path.splitext(out_image_path)[0] + ".txt"
self._writer.write(
detections,
out_labels_path,
self._labels_map_rev,
dynamic_classes=self._dynamic_classes,
)
def close(self, *args):
if self._dynamic_classes:
classes = _to_classes(self._labels_map_rev)
else:
classes = self.classes
_write_file_lines(classes, self._obj_names_path)
_write_file_lines(self._images, self._images_path)
def _parse_classes(self):
if self.classes is not None:
self._labels_map_rev = _to_labels_map_rev(self.classes)
class YOLOAnnotationWriter(object):
"""Class for writing annotations in YOLO format.
See :class:`fiftyone.types.dataset_types.YOLODataset` for format details.
"""
def write(
self, detections, txt_path, labels_map_rev, dynamic_classes=False
):
"""Writes the detections to disk.
Args:
detections: a :class:`fiftyone.core.labels.Detections` instance
txt_path: the path to write the annotation TXT file
labels_map_rev: a dictionary mapping class label strings to target
integers
dynamic_classes (False): whether to dynamically add labels to
labels_map_rev
"""
rows = []
for detection in detections.detections:
row = _make_yolo_row(detection, labels_map_rev, dynamic_classes)
rows.append(row)
_write_file_lines(rows, txt_path)
def load_yolo_annotations(txt_path, classes):
"""Loads the YOLO annotations from the given TXT file.
See :class:`fiftyone.types.dataset_types.YOLODataset` for format details.
Args:
txt_path: the path to the annotations TXT file
classes: the list of class label strings
Returns:
a :class:`fiftyone.core.detections.Detections` instance
"""
detections = []
for row in _read_file_lines(txt_path):
detection = _parse_yolo_row(row, classes)
detections.append(detection)
return fol.Detections(detections=detections)
def _parse_yolo_row(row, classes):
target, xc, yc, w, h = row.split()
try:
label = classes[int(target)]
except:
label = str(target)
bounding_box = [
(float(xc) - 0.5 * float(w)),
(float(yc) - 0.5 * float(h)),
float(w),
float(h),
]
return fol.Detection(label=label, bounding_box=bounding_box)
def _make_yolo_row(detection, labels_map_rev, dynamic_classes):
label = detection.label
if dynamic_classes and label not in labels_map_rev:
target = len(labels_map_rev)
labels_map_rev[label] = target
else:
target = labels_map_rev[label]
xtl, ytl, w, h = detection.bounding_box
xc = xtl + 0.5 * w
yc = ytl + 0.5 * h
return "%d %f %f %f %f" % (target, xc, yc, w, h)
def _read_file_lines(path):
with open(path, "r") as f:
lines = [l.strip() for l in f.read().splitlines()]
return [l for l in lines if l]
def _write_file_lines(lines, outpath):
etau.write_file("\n".join(lines), outpath)
def _to_labels_map_rev(classes):
return {c: i for i, c in enumerate(classes)}
def _to_classes(labels_map_rev):
targets_to_labels = {v: k for k, v in labels_map_rev.items()}
classes = []
for target in range(max(targets_to_labels.keys()) + 1):
if target in targets_to_labels:
classes.append(targets_to_labels[target])
else:
classes.append(str(target))
return classes
|
[
"fiftyone.core.utils.UniqueFilenameMaker",
"fiftyone.core.labels.Detection",
"os.path.basename",
"os.path.exists",
"fiftyone.core.labels.Detections",
"os.path.relpath",
"os.path.splitext",
"eta.core.utils.ensure_dir",
"os.path.join"
] |
[((10264, 10301), 'fiftyone.core.labels.Detections', 'fol.Detections', ([], {'detections': 'detections'}), '(detections=detections)\n', (10278, 10301), True, 'import fiftyone.core.labels as fol\n'), ((10617, 10670), 'fiftyone.core.labels.Detection', 'fol.Detection', ([], {'label': 'label', 'bounding_box': 'bounding_box'}), '(label=label, bounding_box=bounding_box)\n', (10630, 10670), True, 'import fiftyone.core.labels as fol\n'), ((3774, 3817), 'os.path.join', 'os.path.join', (['self.dataset_dir', '"""obj.names"""'], {}), "(self.dataset_dir, 'obj.names')\n", (3786, 3817), False, 'import os\n'), ((3829, 3857), 'os.path.exists', 'os.path.exists', (['classes_path'], {}), '(classes_path)\n', (3843, 3857), False, 'import os\n'), ((4065, 4109), 'os.path.join', 'os.path.join', (['self.dataset_dir', '"""images.txt"""'], {}), "(self.dataset_dir, 'images.txt')\n", (4077, 4109), False, 'import os\n'), ((4121, 4148), 'os.path.exists', 'os.path.exists', (['images_path'], {}), '(images_path)\n', (4135, 4148), False, 'import os\n'), ((6571, 6613), 'os.path.join', 'os.path.join', (['self.export_dir', '"""obj.names"""'], {}), "(self.export_dir, 'obj.names')\n", (6583, 6613), False, 'import os\n'), ((6642, 6685), 'os.path.join', 'os.path.join', (['self.export_dir', '"""images.txt"""'], {}), "(self.export_dir, 'images.txt')\n", (6654, 6685), False, 'import os\n'), ((6711, 6748), 'os.path.join', 'os.path.join', (['self.export_dir', '"""data"""'], {}), "(self.export_dir, 'data')\n", (6723, 6748), False, 'import os\n'), ((6869, 6973), 'fiftyone.core.utils.UniqueFilenameMaker', 'fou.UniqueFilenameMaker', ([], {'output_dir': 'self._data_dir', 'default_ext': 'self.image_format', 'ignore_exts': '(True)'}), '(output_dir=self._data_dir, default_ext=self.\n image_format, ignore_exts=True)\n', (6892, 6973), True, 'import fiftyone.core.utils as fou\n'), ((7071, 7102), 'eta.core.utils.ensure_dir', 'etau.ensure_dir', (['self._data_dir'], {}), '(self._data_dir)\n', (7086, 7102), True, 'import eta.core.utils as etau\n'), ((4494, 4531), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'image'], {}), '(self.dataset_dir, image)\n', (4506, 4531), False, 'import os\n'), ((4673, 4700), 'os.path.exists', 'os.path.exists', (['labels_path'], {}), '(labels_path)\n', (4687, 4700), False, 'import os\n'), ((8087, 8135), 'os.path.relpath', 'os.path.relpath', (['out_image_path', 'self.export_dir'], {}), '(out_image_path, self.export_dir)\n', (8102, 8135), False, 'import os\n'), ((8164, 8196), 'os.path.splitext', 'os.path.splitext', (['out_image_path'], {}), '(out_image_path)\n', (8180, 8196), False, 'import os\n'), ((4393, 4416), 'os.path.basename', 'os.path.basename', (['image'], {}), '(image)\n', (4409, 4416), False, 'import os\n'), ((4607, 4630), 'os.path.splitext', 'os.path.splitext', (['image'], {}), '(image)\n', (4623, 4630), False, 'import os\n')]
|
# Copyright 2016 <NAME> and The Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import gzip
import logging
import os
import pickle
import re
import shutil
import time
import numpy as np
from IProgress import ProgressBar, Percentage
from cameo import fba
from cameo.flux_analysis.analysis import n_carbon
from cobra.core.reaction import Reaction
from marsi import config
__all__ = ['data_dir', 'log_dir', 'pickle_large', 'unpickle_large', 'frange', 'src_dir', 'internal_data_dir']
data_dir = os.path.join(config.prj_dir, "data")
models_dir = os.path.join(config.prj_dir, "models")
log_dir = os.path.join(config.prj_dir, "log")
src_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)))
internal_data_dir = os.path.join(src_dir, 'io', 'files')
INCHI_KEY_TYPE = np.dtype("a27")
BIOMASS_RE = re.compile("biomass", re.IGNORECASE)
MAX_BYTES = 2 ** 31 - 1
logger = logging.getLogger(__name__)
def pickle_large(obj, file_path, progress=False):
with open(file_path, 'wb') as model_handler:
bytes_out = pickle.dumps(obj)
output_size = len(bytes_out)
if progress:
pbar = ProgressBar(maxval=output_size, widgets=["Writing ", Percentage()])
for idx in pbar(range(0, output_size, MAX_BYTES)):
model_handler.write(bytes_out[idx:idx + MAX_BYTES])
else:
for idx in range(0, output_size, MAX_BYTES):
model_handler.write(bytes_out[idx:idx + MAX_BYTES])
def unpickle_large(file_path, progress=False):
input_size = os.path.getsize(file_path)
logger.debug("Input size: %f bytes" % input_size)
with open(file_path, 'rb') as file_handler:
bytes_in = bytearray(0)
if progress:
pbar = ProgressBar(maxval=input_size, widgets=["Loading ", Percentage()])
for _ in pbar(range(0, input_size, MAX_BYTES)):
bytes_in += file_handler.read(MAX_BYTES)
else:
for _ in range(0, input_size, MAX_BYTES):
bytes_in += file_handler.read(MAX_BYTES)
return pickle.loads(bytes_in)
def frange(start, stop=None, steps=10):
"""
Float range generator.
Generates *steps* equally separated between *start* and *stop*.
If *stop* is None, the values are between 0 and *start*
Parameters
----------
start : float
The initial value.
stop : float
The final value.
steps : int
Number of values to generate.
Returns
-------
generator
A generator that yields float.
"""
if stop is None:
stop = start
start = 0
# Python 2 division of int returns int
start = float(start)
stop = float(stop)
step_size = (stop - start) / float(steps)
logger.debug("Step size %f" % step_size)
for i in range(steps):
logger.debug("Iteration %i: %f" % (i + 1, i * step_size))
yield start + i * step_size
def unique(l):
"""
Removes repeated values from a list.
Parameters
----------
l: list
Returns
-------
list
The same list with only unique values.
"""
s = set()
n = 0
for x in l:
if x not in s:
s.add(x)
l[n] = x
n += 1
del l[n:]
def timing(debug=False): # pragma: no cover
def function_wrapper(func):
if debug:
def debug_wrap_func(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
stop = time.time()
if config.log.level >= config.Level.DEBUG:
print('%s function took %0.3f ms' % (func.__name__, (stop - start) * 1000.0))
return ret
return debug_wrap_func
else:
def wrap_func(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
stop = time.time()
print('%s function took %0.3f ms' % (func.__name__, (stop - start) * 1000.0))
return ret
return wrap_func
return function_wrapper
def default_carbon_sources(model):
solution = fba(model)
carbon_sources = []
for ex in model.exchanges:
assert isinstance(ex, Reaction)
if ex.lower_bound < 0 and solution[ex.id] < 0 < n_carbon(ex):
logger.debug("Found carbon source: %s")
carbon_sources.append(ex)
return carbon_sources
def gunzip(file):
assert file[-3:] == ".gz"
in_name = file
out_name = file[0:-3]
with gzip.open(in_name, 'rb') as f_in, open(out_name, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def search_metabolites(model, species_id, ignore_external=True):
if ignore_external:
return model.metabolites.query(lambda mid: mid[:-2] == species_id and mid[-2:] != "_e", attribute='id')
else:
return model.metabolites.query(lambda mid: mid[:-2] == species_id, attribute='id')
|
[
"pickle.loads",
"gzip.open",
"pickle.dumps",
"IProgress.Percentage",
"os.path.getsize",
"os.path.dirname",
"numpy.dtype",
"time.time",
"cameo.fba",
"cameo.flux_analysis.analysis.n_carbon",
"shutil.copyfileobj",
"os.path.join",
"logging.getLogger",
"re.compile"
] |
[((1086, 1122), 'os.path.join', 'os.path.join', (['config.prj_dir', '"""data"""'], {}), "(config.prj_dir, 'data')\n", (1098, 1122), False, 'import os\n'), ((1136, 1174), 'os.path.join', 'os.path.join', (['config.prj_dir', '"""models"""'], {}), "(config.prj_dir, 'models')\n", (1148, 1174), False, 'import os\n'), ((1185, 1220), 'os.path.join', 'os.path.join', (['config.prj_dir', '"""log"""'], {}), "(config.prj_dir, 'log')\n", (1197, 1220), False, 'import os\n'), ((1309, 1345), 'os.path.join', 'os.path.join', (['src_dir', '"""io"""', '"""files"""'], {}), "(src_dir, 'io', 'files')\n", (1321, 1345), False, 'import os\n'), ((1364, 1379), 'numpy.dtype', 'np.dtype', (['"""a27"""'], {}), "('a27')\n", (1372, 1379), True, 'import numpy as np\n'), ((1394, 1430), 're.compile', 're.compile', (['"""biomass"""', 're.IGNORECASE'], {}), "('biomass', re.IGNORECASE)\n", (1404, 1430), False, 'import re\n'), ((1466, 1493), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1483, 1493), False, 'import logging\n'), ((2114, 2140), 'os.path.getsize', 'os.path.getsize', (['file_path'], {}), '(file_path)\n', (2129, 2140), False, 'import os\n'), ((2637, 2659), 'pickle.loads', 'pickle.loads', (['bytes_in'], {}), '(bytes_in)\n', (2649, 2659), False, 'import pickle\n'), ((4717, 4727), 'cameo.fba', 'fba', (['model'], {}), '(model)\n', (4720, 4727), False, 'from cameo import fba\n'), ((1260, 1285), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1275, 1285), False, 'import os\n'), ((1615, 1632), 'pickle.dumps', 'pickle.dumps', (['obj'], {}), '(obj)\n', (1627, 1632), False, 'import pickle\n'), ((5115, 5139), 'gzip.open', 'gzip.open', (['in_name', '"""rb"""'], {}), "(in_name, 'rb')\n", (5124, 5139), False, 'import gzip\n'), ((5188, 5219), 'shutil.copyfileobj', 'shutil.copyfileobj', (['f_in', 'f_out'], {}), '(f_in, f_out)\n', (5206, 5219), False, 'import shutil\n'), ((4004, 4015), 'time.time', 'time.time', ([], {}), '()\n', (4013, 4015), False, 'import time\n'), ((4083, 4094), 'time.time', 'time.time', ([], {}), '()\n', (4092, 4094), False, 'import time\n'), ((4396, 4407), 'time.time', 'time.time', ([], {}), '()\n', (4405, 4407), False, 'import time\n'), ((4475, 4486), 'time.time', 'time.time', ([], {}), '()\n', (4484, 4486), False, 'import time\n'), ((4880, 4892), 'cameo.flux_analysis.analysis.n_carbon', 'n_carbon', (['ex'], {}), '(ex)\n', (4888, 4892), False, 'from cameo.flux_analysis.analysis import n_carbon\n'), ((1763, 1775), 'IProgress.Percentage', 'Percentage', ([], {}), '()\n', (1773, 1775), False, 'from IProgress import ProgressBar, Percentage\n'), ((2368, 2380), 'IProgress.Percentage', 'Percentage', ([], {}), '()\n', (2378, 2380), False, 'from IProgress import ProgressBar, Percentage\n')]
|
from PyKaraokeSearch import search_clubdam, ClubDamSearchQuery
if __name__ == '__main__':
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('keyword', type=str)
parser.add_argument('-o', '--output_path', type=str)
parser.add_argument('-t', '--timeout', type=float)
args = parser.parse_args()
keyword = args.keyword
output_path = args.output_path
timeout = args.timeout
result = search_clubdam(ClubDamSearchQuery(keyword=keyword), timeout=timeout)
with open(output_path if output_path else 0, 'w') as fp:
json.dump(result, fp, ensure_ascii=False)
|
[
"json.dump",
"argparse.ArgumentParser",
"PyKaraokeSearch.ClubDamSearchQuery"
] |
[((141, 166), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (164, 166), False, 'import argparse\n'), ((474, 509), 'PyKaraokeSearch.ClubDamSearchQuery', 'ClubDamSearchQuery', ([], {'keyword': 'keyword'}), '(keyword=keyword)\n', (492, 509), False, 'from PyKaraokeSearch import search_clubdam, ClubDamSearchQuery\n'), ((598, 639), 'json.dump', 'json.dump', (['result', 'fp'], {'ensure_ascii': '(False)'}), '(result, fp, ensure_ascii=False)\n', (607, 639), False, 'import json\n')]
|
from flask import Flask, render_template
from formulario import CadastroForm
app = Flask (__name__)
app.config['SECRET_KEY'] = 'dkshvfdikhgolfhvljh'
@app.route('/')
def home():
return render_template('home.html')
@app.route('/cadastro', methods=["GET", "POST"])
def Cadastro():
formulario=CadastroForm()
return render_template(
'cadastro.html',
formulario=formulario
)
@app.route('/login')
def Login():
return render_template('login.html')
if (__name__ =='__main__'):
app.run(debug=True, port = 5001)
|
[
"formulario.CadastroForm",
"flask.Flask",
"flask.render_template"
] |
[((90, 105), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (95, 105), False, 'from flask import Flask, render_template\n'), ((199, 227), 'flask.render_template', 'render_template', (['"""home.html"""'], {}), "('home.html')\n", (214, 227), False, 'from flask import Flask, render_template\n'), ((313, 327), 'formulario.CadastroForm', 'CadastroForm', ([], {}), '()\n', (325, 327), False, 'from formulario import CadastroForm\n'), ((340, 395), 'flask.render_template', 'render_template', (['"""cadastro.html"""'], {'formulario': 'formulario'}), "('cadastro.html', formulario=formulario)\n", (355, 395), False, 'from flask import Flask, render_template\n'), ((477, 506), 'flask.render_template', 'render_template', (['"""login.html"""'], {}), "('login.html')\n", (492, 506), False, 'from flask import Flask, render_template\n')]
|
from __future__ import division
import random
import threading
import numpy as np
# Major -> Mixolydian (-) | Lydian (-) 0
# Dorian -> Minor (-) | Mixolydian (+) 1
# Phyrgian -> Locrian (-) | Minor (+) 2
# Lydian -> Mixolydian (-) | Major (+) 3
# Mixolydian -> Dorian (-) | Major (+) 4
# Minor -> Phyrgian (-) | Dorian (+) 5
# Locrian -> Locrian (-) | Phyrgian (+) 6
SCALE_ORDER = (6, 2, 5, 1, 4, 3, 0)
class MinMax(object):
def __init__(self, min_, max_):
self.min = min_
self.max = max_
@property
def diff(self):
return self.max - self.min
@property
def minmax(self):
return (self.min, self.max)
def norm(self, n):
return (n - self.min) / self.max
E_RANGE = MinMax(-50.0, 50.0)
D_RANGE = MinMax(-50.0, 50.0)
C_RANGE = MinMax(-50.0, 50.0)
T_RANGE = MinMax(60, 180)
V_RANGE = MinMax(70, 127)
class LifeState(object):
def __init__(self, inputs, debug=False):
self.debug = debug
self.inputs = inputs
self.update_rate = 3 # secs
self.energy = 0
self.disposition = 0
self.chaos = 0
self.update()
def update(self):
if len(self.inputs) > 0:
ranges = zip(*(E_RANGE.minmax, D_RANGE.minmax, C_RANGE.minmax))
states = [(input.energy,
input.disposition,
input.chaos)
for input in self.inputs]
states = np.clip(states, *ranges)
energies, dispositions, chaoses = zip(*states.tolist)
self.energy = np.mean(energies)
self.disposition = np.mean(dispositions)
self.chaos = np.mean(chaoses)
if self.debug:
msg = ['State update',
"Energy: {}".format(self.energy),
"Disposition: {}".format(self.disposition),
"Chaos: {}".format(self.chaos),
'']
print('\n'.join(msg))
threading.Timer(self.update_rate, self.update).start()
def get_tempo(self, old_tempo): # Energy +/- Chaos
# f(en) = {log(en + 1) * (T - 90) / log(E + 1) + 90 | en >= 0
# (e^(en + E) - 1) * (90 - t) / (e^E - 1) + 60 | en <= 0}
# en = energy, E = max_energy, T = max_tempo, t = min_tempo
energy = self.energy
chaos = self.chaos
if energy >= 0:
_a = np.log10(energy + 1)
_b = T_RANGE.max - 90
_c = np.log10(E_RANGE.max + 1)
_d = 90
else:
_a = np.exp(energy + E_RANGE.max) - 1
_b = 90 - T_RANGE.min
_c = np.exp(E_RANGE.max) - 1
_d = 60
tempo = _a * _b / _c + _d
tempo += chaos / C_RANGE.diff
# Tempo can only change by at most 20 bpm
tempo = np.clip(tempo, old_tempo - 20, old_tempo + 20)
return tempo
def get_key(self, old_key): # Disposition
disposition = self.disposition
d_ratio = D_RANGE.norm(disposition)
if 0 <= d_ratio < 0.05:
target_scale = 0
elif 0.05 <= d_ratio < 0.12:
target_scale = 1
elif 0.12 <= d_ratio < 0.3:
target_scale = 2
elif 0.3 <= d_ratio < 0.4:
target_scale = 3
elif 0.4 <= d_ratio < 0.7:
target_scale = random.choice((4, 5))
elif 0.7 <= d_ratio <= 1:
target_scale = 6
scale_rank = SCALE_ORDER.index(old_key[1])
direction = np.sign(target_scale - scale_rank)
scale = SCALE_ORDER[scale_rank + direction]
root = 0
key = (root, scale)
return key
def get_octave(self, old_octave):
octave = old_octave
if random.random() < 0.1:
octave = old_octave + random.choice((-1, 1))
if abs(octave) > 1:
octave = old_octave
return octave
def get_volume(self, _old_volume): # Energy +/- Chaos
energy = self.energy
chaos = self.chaos
e_ratio = E_RANGE.norm(energy)
volume = e_ratio * (V_RANGE.diff) + V_RANGE.min
volume += chaos / C_RANGE.diff
return int(volume)
def get_dissonance(self, _old_dissonance): # Disposition
disposition = self.disposition
d_ratio = D_RANGE.norm(disposition)
if 0 <= d_ratio < 0.1:
dissonance = 0.2
elif 0.1 <= d_ratio < 0.9:
dissonance = 0.1
elif 0.9 <= d_ratio <= 1:
dissonance = 0.05
return dissonance
def get_length_ratio(self, _old_length_ratio): # Energy +/- Chaos
energy = self.energy
e_ratio = E_RANGE.norm(energy)
if 0 <= e_ratio < 0.1:
length_ratio = (1, 2)
elif 0.1 <= e_ratio < 0.6:
length_ratio = (1, 4)
elif 0.6 <= e_ratio < 0.8:
length_ratio = (1, 3)
elif 0.8 <= e_ratio < 0.9:
length_ratio = (1, 2)
elif 0.9 <= e_ratio <= 1:
length_ratio = (2, 1)
return length_ratio
|
[
"threading.Timer",
"random.choice",
"numpy.clip",
"random.random",
"numpy.mean",
"numpy.exp",
"numpy.sign",
"numpy.log10"
] |
[((2880, 2926), 'numpy.clip', 'np.clip', (['tempo', '(old_tempo - 20)', '(old_tempo + 20)'], {}), '(tempo, old_tempo - 20, old_tempo + 20)\n', (2887, 2926), True, 'import numpy as np\n'), ((3557, 3591), 'numpy.sign', 'np.sign', (['(target_scale - scale_rank)'], {}), '(target_scale - scale_rank)\n', (3564, 3591), True, 'import numpy as np\n'), ((1486, 1510), 'numpy.clip', 'np.clip', (['states', '*ranges'], {}), '(states, *ranges)\n', (1493, 1510), True, 'import numpy as np\n'), ((1604, 1621), 'numpy.mean', 'np.mean', (['energies'], {}), '(energies)\n', (1611, 1621), True, 'import numpy as np\n'), ((1653, 1674), 'numpy.mean', 'np.mean', (['dispositions'], {}), '(dispositions)\n', (1660, 1674), True, 'import numpy as np\n'), ((1700, 1716), 'numpy.mean', 'np.mean', (['chaoses'], {}), '(chaoses)\n', (1707, 1716), True, 'import numpy as np\n'), ((2463, 2483), 'numpy.log10', 'np.log10', (['(energy + 1)'], {}), '(energy + 1)\n', (2471, 2483), True, 'import numpy as np\n'), ((2535, 2560), 'numpy.log10', 'np.log10', (['(E_RANGE.max + 1)'], {}), '(E_RANGE.max + 1)\n', (2543, 2560), True, 'import numpy as np\n'), ((3790, 3805), 'random.random', 'random.random', ([], {}), '()\n', (3803, 3805), False, 'import random\n'), ((2037, 2083), 'threading.Timer', 'threading.Timer', (['self.update_rate', 'self.update'], {}), '(self.update_rate, self.update)\n', (2052, 2083), False, 'import threading\n'), ((2612, 2640), 'numpy.exp', 'np.exp', (['(energy + E_RANGE.max)'], {}), '(energy + E_RANGE.max)\n', (2618, 2640), True, 'import numpy as np\n'), ((2696, 2715), 'numpy.exp', 'np.exp', (['E_RANGE.max'], {}), '(E_RANGE.max)\n', (2702, 2715), True, 'import numpy as np\n'), ((3847, 3869), 'random.choice', 'random.choice', (['(-1, 1)'], {}), '((-1, 1))\n', (3860, 3869), False, 'import random\n'), ((3400, 3421), 'random.choice', 'random.choice', (['(4, 5)'], {}), '((4, 5))\n', (3413, 3421), False, 'import random\n')]
|
import pstree
from pstree import Tree
EXAMPLE_TREE = (
Tree("init", Tree("amd")
, Tree("2*[strsd]")
, Tree("atd")
, Tree("crond")
, Tree("deskguide_apple")
, Tree("eth0")
, Tree("gdm", Tree("gdm", Tree("X")
, Tree("gnome-session", Tree("Gnome")
, Tree("ssh-agent")
, Tree("true"))))
, Tree("geyes_applet")
, Tree("gkb_applet")
, Tree("gnome-name-serv")
, Tree("gnome-terminal", Tree("bash", Tree("vim"))
, Tree("bash")
, Tree("bash", Tree("pstree"))
, Tree("bash", Tree("ssh"))
, Tree("bash", Tree("mozilla-bin", Tree("mozilla-bin", Tree("3*[mozilla-bin]"))))
, Tree("gnome-pty-helper"))
, Tree("gpm")
, Tree("gweather")
, Tree("kapm-idled"))
)
def pstree_demo():
pstree.print_tree(EXAMPLE_TREE)
print()
def ps_tree_demo_vertical():
import io
stream = io.StringIO()
pstree.print_tree(EXAMPLE_TREE, stream=stream)
import transpose
vertical_lines = transpose.transpose(
stream.getvalue(), char_map=pstree.BASIC_CHAR_MAP
)
print(*vertical_lines, sep="\n")
if __name__ == "__main__":
# pstree_demo()
ps_tree_demo_vertical()
|
[
"pstree.print_tree",
"io.StringIO",
"pstree.Tree"
] |
[((70, 81), 'pstree.Tree', 'Tree', (['"""amd"""'], {}), "('amd')\n", (74, 81), False, 'from pstree import Tree\n'), ((95, 112), 'pstree.Tree', 'Tree', (['"""2*[strsd]"""'], {}), "('2*[strsd]')\n", (99, 112), False, 'from pstree import Tree\n'), ((126, 137), 'pstree.Tree', 'Tree', (['"""atd"""'], {}), "('atd')\n", (130, 137), False, 'from pstree import Tree\n'), ((151, 164), 'pstree.Tree', 'Tree', (['"""crond"""'], {}), "('crond')\n", (155, 164), False, 'from pstree import Tree\n'), ((178, 201), 'pstree.Tree', 'Tree', (['"""deskguide_apple"""'], {}), "('deskguide_apple')\n", (182, 201), False, 'from pstree import Tree\n'), ((215, 227), 'pstree.Tree', 'Tree', (['"""eth0"""'], {}), "('eth0')\n", (219, 227), False, 'from pstree import Tree\n'), ((513, 533), 'pstree.Tree', 'Tree', (['"""geyes_applet"""'], {}), "('geyes_applet')\n", (517, 533), False, 'from pstree import Tree\n'), ((547, 565), 'pstree.Tree', 'Tree', (['"""gkb_applet"""'], {}), "('gkb_applet')\n", (551, 565), False, 'from pstree import Tree\n'), ((579, 602), 'pstree.Tree', 'Tree', (['"""gnome-name-serv"""'], {}), "('gnome-name-serv')\n", (583, 602), False, 'from pstree import Tree\n'), ((1032, 1043), 'pstree.Tree', 'Tree', (['"""gpm"""'], {}), "('gpm')\n", (1036, 1043), False, 'from pstree import Tree\n'), ((1057, 1073), 'pstree.Tree', 'Tree', (['"""gweather"""'], {}), "('gweather')\n", (1061, 1073), False, 'from pstree import Tree\n'), ((1087, 1105), 'pstree.Tree', 'Tree', (['"""kapm-idled"""'], {}), "('kapm-idled')\n", (1091, 1105), False, 'from pstree import Tree\n'), ((1134, 1165), 'pstree.print_tree', 'pstree.print_tree', (['EXAMPLE_TREE'], {}), '(EXAMPLE_TREE)\n', (1151, 1165), False, 'import pstree\n'), ((1237, 1250), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1248, 1250), False, 'import io\n'), ((1255, 1301), 'pstree.print_tree', 'pstree.print_tree', (['EXAMPLE_TREE'], {'stream': 'stream'}), '(EXAMPLE_TREE, stream=stream)\n', (1272, 1301), False, 'import pstree\n'), ((701, 713), 'pstree.Tree', 'Tree', (['"""bash"""'], {}), "('bash')\n", (705, 713), False, 'from pstree import Tree\n'), ((993, 1017), 'pstree.Tree', 'Tree', (['"""gnome-pty-helper"""'], {}), "('gnome-pty-helper')\n", (997, 1017), False, 'from pstree import Tree\n'), ((265, 274), 'pstree.Tree', 'Tree', (['"""X"""'], {}), "('X')\n", (269, 274), False, 'from pstree import Tree\n'), ((652, 663), 'pstree.Tree', 'Tree', (['"""vim"""'], {}), "('vim')\n", (656, 663), False, 'from pstree import Tree\n'), ((763, 777), 'pstree.Tree', 'Tree', (['"""pstree"""'], {}), "('pstree')\n", (767, 777), False, 'from pstree import Tree\n'), ((828, 839), 'pstree.Tree', 'Tree', (['"""ssh"""'], {}), "('ssh')\n", (832, 839), False, 'from pstree import Tree\n'), ((334, 347), 'pstree.Tree', 'Tree', (['"""Gnome"""'], {}), "('Gnome')\n", (338, 347), False, 'from pstree import Tree\n'), ((407, 424), 'pstree.Tree', 'Tree', (['"""ssh-agent"""'], {}), "('ssh-agent')\n", (411, 424), False, 'from pstree import Tree\n'), ((484, 496), 'pstree.Tree', 'Tree', (['"""true"""'], {}), "('true')\n", (488, 496), False, 'from pstree import Tree\n'), ((930, 953), 'pstree.Tree', 'Tree', (['"""3*[mozilla-bin]"""'], {}), "('3*[mozilla-bin]')\n", (934, 953), False, 'from pstree import Tree\n')]
|
#!/usr/bin/env python3
# coding:utf-8
from datetime import date # 和 004_11.py 换汤不换药,date(y1, m1, d1).__sub__(date(y2, m2, d2)) 计算两个日期之差
year_input, month_input, day_input = [ int(x) for x in input("input year/month/day(e.g. 2019/1/1): ").split('/')]
print(date(year_input, month_input, day_input).__sub__(date(year_input-1, 12, 31)).days)
|
[
"datetime.date"
] |
[((308, 336), 'datetime.date', 'date', (['(year_input - 1)', '(12)', '(31)'], {}), '(year_input - 1, 12, 31)\n', (312, 336), False, 'from datetime import date\n'), ((259, 299), 'datetime.date', 'date', (['year_input', 'month_input', 'day_input'], {}), '(year_input, month_input, day_input)\n', (263, 299), False, 'from datetime import date\n')]
|
import torch
from torch.nn.functional import one_hot
import h5py
import shutil
import numpy as np
from pathlib import Path
from tqdm import tqdm
from time import time
from utils.metrics import calc_ece, calc_nll_brier, BrierLoss
from runners.base_runner import BaseRunner, reduce_tensor, gather_tensor
class CnnRunner(BaseRunner):
def __init__(self, loader, model, optim, lr_scheduler, num_epoch, loss_with_weight,
val_metric, test_metric, logger, model_path, rank, adv_training):
self.num_epoch = num_epoch
self.epoch = 0
self.loss_with_weight = loss_with_weight
self.adv_training = adv_training
self.val_metric = val_metric
self.test_metric = test_metric
self.optim = optim
self.lr_scheduler = lr_scheduler
self.best_score = 0.
self.save_kwargs = {}
self.world_size = torch.distributed.get_world_size()
super().__init__(loader, model, logger, model_path, rank)
self.load()
def _calc_loss(self, img, label):
self.model.train()
output = self.model(img.cuda(non_blocking=True))
label = label.cuda(non_blocking=True)
loss_ = 0
for loss, w in self.loss_with_weight:
_loss = w * loss(output, label)
loss_ += _loss
return loss_
def fgsm(self, img, label):
step_size = 0.01
# loss_fn = torch.nn.CrossEntropyLoss()
loss_fn = self.loss_with_weight[0][0]
img = img.cuda()
img.requires_grad = True
self.model.eval()
self.model.zero_grad()
output = self.model(img)
loss = loss_fn(output, label.cuda())
loss.backward()
grad_sign = img.grad.sign()
img_new = img + step_size * grad_sign
return img_new.cpu().detach()
def _train_a_batch(self, batch):
if self.adv_training:
img_new = self.fgsm(*batch)
batch[0] = img_new
loss = self._calc_loss(*batch)
self.optim.zero_grad()
loss.backward()
self.optim.step()
_loss = reduce_tensor(loss, True).item()
return _loss
@torch.no_grad()
def _valid_a_batch(self, img, label, with_output=False):
output = self.model(img.cuda(non_blocking=True))
label = label.cuda(non_blocking=True)
result = self.val_metric(output, label)
if with_output:
result = [result, output]
return result
def train(self):
self.log("Start to train", 'debug')
for epoch in range(self.epoch, self.num_epoch):
self.model.train()
loader = self.loader.load("train")
if self.rank == 0:
t_iter = tqdm(loader, total=self.loader.len,
desc=f"[Train {epoch}]")
else:
t_iter = loader
losses = 0
times = []
for i, batch in enumerate(t_iter):
t = time()
loss = self._train_a_batch(batch)
times += [time() - t]
losses += loss
if self.rank == 0:
t_iter.set_postfix(loss=f"{loss:.4} / {losses/(i+1):.4}")
self.log(f"[Train] epoch:{epoch} loss:{losses/(i+1)}", 'info')
print("Batch Training Time : ", np.mean(times))
self.lr_scheduler.step()
self.val(epoch)
def val(self, epoch):
loader = self.loader.load('val')
v_iter = loader
metrics = []
self.model.eval()
for batch in v_iter:
_metric = self._valid_a_batch(*batch, with_output=False)
metrics += [gather_tensor(_metric).cpu().numpy()]
acc = np.concatenate(metrics).mean()
self.log(f"[Val] {epoch} Score: {acc}", 'info')
if self.rank == 0:
self.save(epoch, acc, **self.save_kwargs)
def test(self, is_seg):
self.load('model.pth')
loader = self.loader.load('test')
if self.rank == 0:
t_iter = tqdm(loader, total=self.loader.len)
else:
t_iter = loader
outputs = []
labels = []
metrics = []
self.model.eval()
for img, label in t_iter:
_metric, output = self._valid_a_batch(img, label, with_output=True)
labels += [gather_tensor(label).cpu().numpy()]
outputs += [gather_tensor(output).cpu().numpy()]
metrics += [gather_tensor(_metric).cpu().numpy()]
if is_seg:
met = np.concatenate(metrics).mean()
self.log(f"[Test] MeanIOU: {met:.2f}", 'info')
save_path = Path(self.model_path) / 'infer'
save_path.mkdir(parents=True, exist_ok=True)
index = 0
for out, label in zip(outputs, labels):
for i in range(label.shape[0]):
l = label[i]
o = out[i]
with h5py.File(f"{save_path}/{index}.h5", 'w') as h:
h.create_dataset('output', data=o)
h.create_dataset('label', data=l)
index += 1
else:
labels = np.concatenate(labels)
outputs = np.concatenate(outputs, axis=0)
acc = (outputs.argmax(1) == labels).mean() * 100
ece = calc_ece(outputs, labels)
nll, brier = calc_nll_brier(outputs, labels, self.num_classes)
log = f"[Test] ACC: {acc:.2f}, ECE : {ece:.2f}, "
log += f"NLL : {nll:.2f}, Brier : {brier:.2f}"
self.log(log, 'info')
with h5py.File(f"{self.model_path}/output.h5", 'w') as h:
h.create_dataset('output', data=outputs)
h.create_dataset('label', data=labels)
def save(self, epoch, metric, file_name="model", **kwargs):
torch.save({"epoch": epoch,
"param": self.model.state_dict(),
"optimizer": self.optim.state_dict(),
"score": metric,
"best": self.best_score,
"lr_schdlr": self.lr_scheduler.state_dict(),
**kwargs}, f"{self.model_path}/{file_name}.pth")
cond = metric >= self.best_score
if cond:
self.log(f"{self.best_score} -------------------> {metric}", 'debug')
self.best_score = metric
shutil.copy2(f"{self.model_path}/{file_name}.pth",
f"{self.model_path}/best.pth")
self.log(f"Model has saved at {epoch} epoch.", 'debug')
def load(self, file_name="model.pth"):
self.log(self.model_path, 'debug')
if (self.model_path / file_name).exists():
self.log(f"Loading {self.model_path} File", 'debug')
ckpoint = torch.load(f"{self.model_path}/{file_name}", map_location='cpu')
for key, value in ckpoint.items():
if key == 'param':
self.model.load_state_dict(value)
elif key == 'optimizer':
self.optim.load_state_dict(value)
elif key == 'lr_schdlr':
self.lr_scheduler.load_state_dict(value)
elif key == 'epoch':
self.epoch = value + 1
elif key == 'best':
self.best_score = value
else:
self.__dict__[key] = value
self.log(f"Model Type : {file_name}, epoch : {self.epoch}", 'debug')
else:
self.log("Failed to load, not existing file", 'debug')
def get_lr(self):
return self.lr_scheduler.optimizer.param_groups[0]['lr']
|
[
"tqdm.tqdm",
"h5py.File",
"runners.base_runner.gather_tensor",
"runners.base_runner.reduce_tensor",
"shutil.copy2",
"torch.load",
"time.time",
"pathlib.Path",
"numpy.mean",
"torch.distributed.get_world_size",
"utils.metrics.calc_nll_brier",
"torch.no_grad",
"utils.metrics.calc_ece",
"numpy.concatenate"
] |
[((2154, 2169), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2167, 2169), False, 'import torch\n'), ((883, 917), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (915, 917), False, 'import torch\n'), ((4048, 4083), 'tqdm.tqdm', 'tqdm', (['loader'], {'total': 'self.loader.len'}), '(loader, total=self.loader.len)\n', (4052, 4083), False, 'from tqdm import tqdm\n'), ((5194, 5216), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (5208, 5216), True, 'import numpy as np\n'), ((5239, 5270), 'numpy.concatenate', 'np.concatenate', (['outputs'], {'axis': '(0)'}), '(outputs, axis=0)\n', (5253, 5270), True, 'import numpy as np\n'), ((5350, 5375), 'utils.metrics.calc_ece', 'calc_ece', (['outputs', 'labels'], {}), '(outputs, labels)\n', (5358, 5375), False, 'from utils.metrics import calc_ece, calc_nll_brier, BrierLoss\n'), ((5401, 5450), 'utils.metrics.calc_nll_brier', 'calc_nll_brier', (['outputs', 'labels', 'self.num_classes'], {}), '(outputs, labels, self.num_classes)\n', (5415, 5450), False, 'from utils.metrics import calc_ece, calc_nll_brier, BrierLoss\n'), ((6408, 6493), 'shutil.copy2', 'shutil.copy2', (['f"""{self.model_path}/{file_name}.pth"""', 'f"""{self.model_path}/best.pth"""'], {}), "(f'{self.model_path}/{file_name}.pth',\n f'{self.model_path}/best.pth')\n", (6420, 6493), False, 'import shutil\n'), ((6808, 6872), 'torch.load', 'torch.load', (['f"""{self.model_path}/{file_name}"""'], {'map_location': '"""cpu"""'}), "(f'{self.model_path}/{file_name}', map_location='cpu')\n", (6818, 6872), False, 'import torch\n'), ((2094, 2119), 'runners.base_runner.reduce_tensor', 'reduce_tensor', (['loss', '(True)'], {}), '(loss, True)\n', (2107, 2119), False, 'from runners.base_runner import BaseRunner, reduce_tensor, gather_tensor\n'), ((2722, 2782), 'tqdm.tqdm', 'tqdm', (['loader'], {'total': 'self.loader.len', 'desc': 'f"""[Train {epoch}]"""'}), "(loader, total=self.loader.len, desc=f'[Train {epoch}]')\n", (2726, 2782), False, 'from tqdm import tqdm\n'), ((2976, 2982), 'time.time', 'time', ([], {}), '()\n', (2980, 2982), False, 'from time import time\n'), ((3335, 3349), 'numpy.mean', 'np.mean', (['times'], {}), '(times)\n', (3342, 3349), True, 'import numpy as np\n'), ((3730, 3753), 'numpy.concatenate', 'np.concatenate', (['metrics'], {}), '(metrics)\n', (3744, 3753), True, 'import numpy as np\n'), ((4662, 4683), 'pathlib.Path', 'Path', (['self.model_path'], {}), '(self.model_path)\n', (4666, 4683), False, 'from pathlib import Path\n'), ((5623, 5669), 'h5py.File', 'h5py.File', (['f"""{self.model_path}/output.h5"""', '"""w"""'], {}), "(f'{self.model_path}/output.h5', 'w')\n", (5632, 5669), False, 'import h5py\n'), ((4548, 4571), 'numpy.concatenate', 'np.concatenate', (['metrics'], {}), '(metrics)\n', (4562, 4571), True, 'import numpy as np\n'), ((3059, 3065), 'time.time', 'time', ([], {}), '()\n', (3063, 3065), False, 'from time import time\n'), ((4963, 5004), 'h5py.File', 'h5py.File', (['f"""{save_path}/{index}.h5"""', '"""w"""'], {}), "(f'{save_path}/{index}.h5', 'w')\n", (4972, 5004), False, 'import h5py\n'), ((3678, 3700), 'runners.base_runner.gather_tensor', 'gather_tensor', (['_metric'], {}), '(_metric)\n', (3691, 3700), False, 'from runners.base_runner import BaseRunner, reduce_tensor, gather_tensor\n'), ((4352, 4372), 'runners.base_runner.gather_tensor', 'gather_tensor', (['label'], {}), '(label)\n', (4365, 4372), False, 'from runners.base_runner import BaseRunner, reduce_tensor, gather_tensor\n'), ((4412, 4433), 'runners.base_runner.gather_tensor', 'gather_tensor', (['output'], {}), '(output)\n', (4425, 4433), False, 'from runners.base_runner import BaseRunner, reduce_tensor, gather_tensor\n'), ((4473, 4495), 'runners.base_runner.gather_tensor', 'gather_tensor', (['_metric'], {}), '(_metric)\n', (4486, 4495), False, 'from runners.base_runner import BaseRunner, reduce_tensor, gather_tensor\n')]
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import unittest
import mock
from google.appengine.ext import ndb
from dashboard import find_anomalies
from dashboard import find_change_points
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import alert_group
from dashboard.models import anomaly
from dashboard.models import anomaly_config
from dashboard.models import graph_data
from dashboard.models import histogram
from dashboard.models.subscription import Subscription
from dashboard.models.subscription import VISIBILITY
from tracing.value.diagnostics import reserved_infos
from dashboard.sheriff_config_client import SheriffConfigClient
# pylint: disable=too-many-lines
# Sample time series.
_TEST_ROW_DATA = [
(241105, 2136.7),
(241116, 2140.3),
(241151, 2149.1),
(241154, 2147.2),
(241156, 2130.6),
(241160, 2136.2),
(241188, 2146.7),
(241201, 2141.8),
(241226, 2140.6),
(241247, 2128.1),
(241249, 2134.2),
(241254, 2130.0),
(241262, 2136.0),
(241268, 2142.6),
(241271, 2149.1),
(241282, 2156.6),
(241294, 2125.3),
(241298, 2155.5),
(241303, 2148.5),
(241317, 2146.2),
(241323, 2123.3),
(241330, 2121.5),
(241342, 2141.2),
(241355, 2145.2),
(241371, 2136.3),
(241386, 2144.0),
(241405, 2138.1),
(241420, 2147.6),
(241432, 2140.7),
(241441, 2132.2),
(241452, 2138.2),
(241455, 2139.3),
(241471, 2134.0),
(241488, 2137.2),
(241503, 2152.5),
(241520, 2136.3),
(241524, 2139.3),
(241529, 2143.5),
(241532, 2145.5),
(241535, 2147.0),
(241537, 2184.1),
(241546, 2180.8),
(241553, 2181.5),
(241559, 2176.8),
(241566, 2174.0),
(241577, 2182.8),
(241579, 2184.8),
(241582, 2190.5),
(241584, 2183.1),
(241609, 2178.3),
(241620, 2178.1),
(241645, 2190.8),
(241653, 2177.7),
(241666, 2185.3),
(241697, 2173.8),
(241716, 2172.1),
(241735, 2172.5),
(241757, 2174.7),
(241766, 2196.7),
(241782, 2184.1),
]
def _MakeSampleChangePoint(x_value, median_before, median_after):
"""Makes a sample find_change_points.ChangePoint for use in these tests."""
# The only thing that matters in these tests is the revision number
# and the values before and after.
return find_change_points.ChangePoint(
x_value=x_value,
median_before=median_before,
median_after=median_after,
window_start=1,
window_end=8,
size_before=None,
size_after=None,
relative_change=None,
std_dev_before=None,
t_statistic=None,
degrees_of_freedom=None,
p_value=None,
extended_start=x_value,
extended_end=x_value,
)
class EndRevisionMatcher(object):
"""Custom matcher to test if an anomaly matches a given end rev."""
def __init__(self, end_revision):
"""Initializes with the end time to check."""
self._end_revision = end_revision
def __eq__(self, rhs):
"""Checks to see if RHS has the same end time."""
return self._end_revision == rhs.end_revision
def __repr__(self):
"""Shows a readable revision which can be printed when assert fails."""
return '<IsEndRevision %d>' % self._end_revision
class ModelMatcher(object):
"""Custom matcher to check if two ndb entity names match."""
def __init__(self, name):
"""Initializes with the name of the entity."""
self._name = name
def __eq__(self, rhs):
"""Checks to see if RHS has the same name."""
return (rhs.key.string_id() if rhs.key else rhs.name) == self._name
def __repr__(self):
"""Shows a readable revision which can be printed when assert fails."""
return '<IsModel %s>' % self._name
@ndb.tasklet
def _MockTasklet(*_):
raise ndb.Return(None)
@mock.patch.object(SheriffConfigClient, '__init__',
mock.MagicMock(return_value=None))
class ProcessAlertsTest(testing_common.TestCase):
def setUp(self):
super(ProcessAlertsTest, self).setUp()
self.SetCurrentUser('<EMAIL>', is_admin=True)
def _AddDataForTests(self, stats=None, masters=None):
if not masters:
masters = ['ChromiumGPU']
testing_common.AddTests(masters, ['linux-release'], {
'scrolling_benchmark': {
'ref': {},
},
})
for m in masters:
ref = utils.TestKey('%s/linux-release/scrolling_benchmark/ref' % m).get()
ref.units = 'ms'
for i in range(9000, 10070, 5):
# Internal-only data should be found.
test_container_key = utils.GetTestContainerKey(ref.key)
r = graph_data.Row(
id=i + 1,
value=float(i * 3),
parent=test_container_key,
internal_only=True)
if stats:
for s in stats:
setattr(r, s, i)
r.put()
def _DataSeries(self):
return [(r.revision, r, r.value) for r in list(graph_data.Row.query())]
@mock.patch.object(find_anomalies.find_change_points, 'FindChangePoints',
mock.MagicMock(return_value=[
_MakeSampleChangePoint(10011, 50, 100),
_MakeSampleChangePoint(10041, 200, 100),
_MakeSampleChangePoint(10061, 0, 100),
]))
@mock.patch.object(find_anomalies.email_sheriff, 'EmailSheriff')
def testProcessTest(self, mock_email_sheriff):
self._AddDataForTests()
test_path = 'ChromiumGPU/linux-release/scrolling_benchmark/ref'
test = utils.TestKey(test_path).get()
test.UpdateSheriff()
test.put()
alert_group_key1 = alert_group.AlertGroup(
name='scrolling_benchmark',
subscription_name='sheriff1',
status=alert_group.AlertGroup.Status.untriaged,
active=True,
revision=alert_group.RevisionRange(
repository='chromium', start=10000, end=10070),
).put()
alert_group_key2 = alert_group.AlertGroup(
name='scrolling_benchmark',
subscription_name='sheriff2',
status=alert_group.AlertGroup.Status.untriaged,
active=True,
revision=alert_group.RevisionRange(
repository='chromium', start=10000, end=10070),
).put()
s1 = Subscription(name='sheriff1', visibility=VISIBILITY.PUBLIC)
s2 = Subscription(name='sheriff2', visibility=VISIBILITY.PUBLIC)
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([s1, s2], None))) as m:
find_anomalies.ProcessTests([test.key])
self.assertEqual(m.call_args_list, [mock.call(test.key.id())])
self.ExecuteDeferredTasks('default')
expected_calls = [
mock.call(
[ModelMatcher('sheriff1'),
ModelMatcher('sheriff2')],
ModelMatcher('ChromiumGPU/linux-release/scrolling_benchmark/ref'),
EndRevisionMatcher(10011)),
mock.call(
[ModelMatcher('sheriff1'),
ModelMatcher('sheriff2')],
ModelMatcher('ChromiumGPU/linux-release/scrolling_benchmark/ref'),
EndRevisionMatcher(10041)),
mock.call(
[ModelMatcher('sheriff1'),
ModelMatcher('sheriff2')],
ModelMatcher('ChromiumGPU/linux-release/scrolling_benchmark/ref'),
EndRevisionMatcher(10061))
]
self.assertEqual(expected_calls, mock_email_sheriff.call_args_list)
anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(len(anomalies), 3)
for a in anomalies:
self.assertEqual(a.groups, [alert_group_key1, alert_group_key2])
def AnomalyExists(anomalies, test, percent_changed, direction,
start_revision, end_revision, subscription_names,
internal_only, units, absolute_delta, statistic):
for a in anomalies:
if (a.test == test and a.percent_changed == percent_changed
and a.direction == direction and a.start_revision == start_revision
and a.end_revision == end_revision
and a.subscription_names == subscription_names
and a.internal_only == internal_only and a.units == units
and a.absolute_delta == absolute_delta
and a.statistic == statistic):
return True
return False
self.assertTrue(
AnomalyExists(
anomalies,
test.key,
percent_changed=100,
direction=anomaly.UP,
start_revision=10007,
end_revision=10011,
subscription_names=['sheriff1', 'sheriff2'],
internal_only=False,
units='ms',
absolute_delta=50,
statistic='avg'))
self.assertTrue(
AnomalyExists(
anomalies,
test.key,
percent_changed=-50,
direction=anomaly.DOWN,
start_revision=10037,
end_revision=10041,
subscription_names=['sheriff1', 'sheriff2'],
internal_only=False,
units='ms',
absolute_delta=-100,
statistic='avg'))
self.assertTrue(
AnomalyExists(
anomalies,
test.key,
percent_changed=sys.float_info.max,
direction=anomaly.UP,
start_revision=10057,
end_revision=10061,
internal_only=False,
units='ms',
subscription_names=['sheriff1', 'sheriff2'],
absolute_delta=100,
statistic='avg'))
# This is here just to verify that AnomalyExists returns False sometimes.
self.assertFalse(
AnomalyExists(
anomalies,
test.key,
percent_changed=100,
direction=anomaly.DOWN,
start_revision=10037,
end_revision=10041,
subscription_names=['sheriff1', 'sheriff2'],
internal_only=False,
units='ms',
absolute_delta=500,
statistic='avg'))
@mock.patch.object(find_anomalies, '_ProcessTestStat')
def testProcessTest_SkipsClankInternal(self, mock_process_stat):
mock_process_stat.side_effect = _MockTasklet
self._AddDataForTests(masters=['ClankInternal'])
test_path = 'ClankInternal/linux-release/scrolling_benchmark/ref'
test = utils.TestKey(test_path).get()
a = anomaly.Anomaly(
test=test.key,
start_revision=10061,
end_revision=10062,
statistic='avg')
a.put()
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([], None))) as m:
find_anomalies.ProcessTests([test.key])
self.assertEqual(m.call_args_list, [])
self.ExecuteDeferredTasks('default')
self.assertFalse(mock_process_stat.called)
@mock.patch.object(find_anomalies, '_ProcessTestStat')
def testProcessTest_UsesLastAlert_Avg(self, mock_process_stat):
mock_process_stat.side_effect = _MockTasklet
self._AddDataForTests()
test_path = 'ChromiumGPU/linux-release/scrolling_benchmark/ref'
test = utils.TestKey(test_path).get()
a = anomaly.Anomaly(
test=test.key,
start_revision=10061,
end_revision=10062,
statistic='avg')
a.put()
test.UpdateSheriff()
test.put()
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([], None))):
find_anomalies.ProcessTests([test.key])
self.ExecuteDeferredTasks('default')
query = graph_data.Row.query(projection=['revision', 'timestamp', 'value'])
query = query.filter(graph_data.Row.revision > 10062)
query = query.filter(
graph_data.Row.parent_test == utils.OldStyleTestKey(test.key))
row_data = query.fetch()
rows = [(r.revision, r, r.value) for r in row_data]
mock_process_stat.assert_called_with(mock.ANY, mock.ANY, mock.ANY, rows,
None)
anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(len(anomalies), 1)
@mock.patch.object(find_anomalies, '_ProcessTestStat')
def testProcessTest_SkipsLastAlert_NotAvg(self, mock_process_stat):
self._AddDataForTests(stats=('count',))
test_path = 'ChromiumGPU/linux-release/scrolling_benchmark/ref'
test = utils.TestKey(test_path).get()
a = anomaly.Anomaly(
test=test.key,
start_revision=10061,
end_revision=10062,
statistic='count')
a.put()
test.UpdateSheriff()
test.put()
@ndb.tasklet
def _AssertParams(config, test_entity, stat, rows, ref_rows):
del config
del test_entity
del stat
del ref_rows
assert rows[0][0] < a.end_revision
mock_process_stat.side_effect = _AssertParams
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([], None))):
find_anomalies.ProcessTests([test.key])
self.ExecuteDeferredTasks('default')
@mock.patch.object(
find_anomalies.find_change_points, 'FindChangePoints',
mock.MagicMock(return_value=[_MakeSampleChangePoint(10011, 100, 50)]))
def testProcessTest_ImprovementMarkedAsImprovement(self):
self._AddDataForTests()
test = utils.TestKey(
'ChromiumGPU/linux-release/scrolling_benchmark/ref').get()
test.improvement_direction = anomaly.DOWN
test.UpdateSheriff()
test.put()
s = Subscription(name='sheriff', visibility=VISIBILITY.PUBLIC)
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([s], None))) as m:
find_anomalies.ProcessTests([test.key])
self.assertEqual(m.call_args_list, [mock.call(test.key.id())])
anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(len(anomalies), 1)
self.assertTrue(anomalies[0].is_improvement)
@mock.patch('logging.error')
def testProcessTest_NoSheriff_ErrorLogged(self, mock_logging_error):
self._AddDataForTests()
ref = utils.TestKey(
'ChromiumGPU/linux-release/scrolling_benchmark/ref').get()
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([], None))):
find_anomalies.ProcessTests([ref.key])
mock_logging_error.assert_called_with('No subscription for %s',
ref.key.string_id())
@mock.patch.object(find_anomalies.find_change_points, 'FindChangePoints',
mock.MagicMock(return_value=[
_MakeSampleChangePoint(10026, 55.2, 57.8),
_MakeSampleChangePoint(10041, 45.2, 37.8),
]))
@mock.patch.object(find_anomalies.email_sheriff, 'EmailSheriff')
def testProcessTest_FiltersOutImprovements(self, mock_email_sheriff):
self._AddDataForTests()
test = utils.TestKey(
'ChromiumGPU/linux-release/scrolling_benchmark/ref').get()
test.improvement_direction = anomaly.UP
test.UpdateSheriff()
test.put()
s = Subscription(name='sheriff', visibility=VISIBILITY.PUBLIC)
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([s], None))) as m:
find_anomalies.ProcessTests([test.key])
self.assertEqual(m.call_args_list, [mock.call(test.key.id())])
self.ExecuteDeferredTasks('default')
mock_email_sheriff.assert_called_once_with(
[ModelMatcher('sheriff')],
ModelMatcher('ChromiumGPU/linux-release/scrolling_benchmark/ref'),
EndRevisionMatcher(10041))
@mock.patch.object(find_anomalies.find_change_points, 'FindChangePoints',
mock.MagicMock(return_value=[
_MakeSampleChangePoint(10011, 50, 100),
]))
@mock.patch.object(find_anomalies.email_sheriff, 'EmailSheriff')
def testProcessTest_InternalOnlyTest(self, mock_email_sheriff):
self._AddDataForTests()
test = utils.TestKey(
'ChromiumGPU/linux-release/scrolling_benchmark/ref').get()
test.internal_only = True
test.UpdateSheriff()
test.put()
s = Subscription(name='sheriff', visibility=VISIBILITY.PUBLIC)
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([s], None))) as m:
find_anomalies.ProcessTests([test.key])
self.assertEqual(m.call_args_list, [mock.call(test.key.id())])
self.ExecuteDeferredTasks('default')
expected_calls = [
mock.call(
[ModelMatcher('sheriff')],
ModelMatcher('ChromiumGPU/linux-release/scrolling_benchmark/ref'),
EndRevisionMatcher(10011))
]
self.assertEqual(expected_calls, mock_email_sheriff.call_args_list)
anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(len(anomalies), 1)
self.assertEqual(test.key, anomalies[0].test)
self.assertEqual(100, anomalies[0].percent_changed)
self.assertEqual(anomaly.UP, anomalies[0].direction)
self.assertEqual(10007, anomalies[0].start_revision)
self.assertEqual(10011, anomalies[0].end_revision)
self.assertTrue(anomalies[0].internal_only)
def testProcessTest_CreatesAnAnomaly_RefMovesToo_BenchmarkDuration(self):
testing_common.AddTests(['ChromiumGPU'], ['linux-release'], {
'foo': {
'benchmark_duration': {
'ref': {}
}
},
})
ref = utils.TestKey(
'ChromiumGPU/linux-release/foo/benchmark_duration/ref').get()
non_ref = utils.TestKey(
'ChromiumGPU/linux-release/foo/benchmark_duration').get()
test_container_key = utils.GetTestContainerKey(ref.key)
test_container_key_non_ref = utils.GetTestContainerKey(non_ref.key)
for row in _TEST_ROW_DATA:
graph_data.Row(id=row[0], value=row[1], parent=test_container_key).put()
graph_data.Row(
id=row[0], value=row[1], parent=test_container_key_non_ref).put()
ref.UpdateSheriff()
ref.put()
s = Subscription(name='sheriff', visibility=VISIBILITY.PUBLIC)
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([s], None))) as m:
find_anomalies.ProcessTests([ref.key])
self.assertEqual(m.call_args_list, [mock.call(ref.key.id())])
new_anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(1, len(new_anomalies))
def testProcessTest_AnomaliesMatchRefSeries_NoAlertCreated(self):
# Tests that a Anomaly entity is not created if both the test and its
# corresponding ref build series have the same data.
testing_common.AddTests(['ChromiumGPU'], ['linux-release'], {
'scrolling_benchmark': {
'ref': {}
},
})
ref = utils.TestKey(
'ChromiumGPU/linux-release/scrolling_benchmark/ref').get()
non_ref = utils.TestKey(
'ChromiumGPU/linux-release/scrolling_benchmark').get()
test_container_key = utils.GetTestContainerKey(ref.key)
test_container_key_non_ref = utils.GetTestContainerKey(non_ref.key)
for row in _TEST_ROW_DATA:
graph_data.Row(id=row[0], value=row[1], parent=test_container_key).put()
graph_data.Row(
id=row[0], value=row[1], parent=test_container_key_non_ref).put()
ref.UpdateSheriff()
ref.put()
non_ref.UpdateSheriff()
non_ref.put()
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([], None))):
find_anomalies.ProcessTests([non_ref.key])
new_anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(0, len(new_anomalies))
def testProcessTest_AnomalyDoesNotMatchRefSeries_AlertCreated(self):
# Tests that an Anomaly entity is created when non-ref series goes up, but
# the ref series stays flat.
testing_common.AddTests(['ChromiumGPU'], ['linux-release'], {
'scrolling_benchmark': {
'ref': {}
},
})
ref = utils.TestKey(
'ChromiumGPU/linux-release/scrolling_benchmark/ref').get()
non_ref = utils.TestKey(
'ChromiumGPU/linux-release/scrolling_benchmark').get()
test_container_key = utils.GetTestContainerKey(ref.key)
test_container_key_non_ref = utils.GetTestContainerKey(non_ref.key)
for row in _TEST_ROW_DATA:
graph_data.Row(id=row[0], value=2125.375, parent=test_container_key).put()
graph_data.Row(
id=row[0], value=row[1], parent=test_container_key_non_ref).put()
ref.UpdateSheriff()
ref.put()
non_ref.UpdateSheriff()
non_ref.put()
s = Subscription(name='sheriff', visibility=VISIBILITY.PUBLIC)
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([s], None))) as m:
find_anomalies.ProcessTests([non_ref.key])
self.assertEqual(m.call_args_list, [mock.call(non_ref.key.id())])
new_anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(len(new_anomalies), 1)
def testProcessTest_CreatesAnAnomaly(self):
testing_common.AddTests(['ChromiumGPU'], ['linux-release'], {
'scrolling_benchmark': {
'ref': {}
},
})
ref = utils.TestKey(
'ChromiumGPU/linux-release/scrolling_benchmark/ref').get()
test_container_key = utils.GetTestContainerKey(ref.key)
for row in _TEST_ROW_DATA:
graph_data.Row(id=row[0], value=row[1], parent=test_container_key).put()
ref.UpdateSheriff()
ref.put()
s = Subscription(name='sheriff', visibility=VISIBILITY.PUBLIC)
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([s], None))) as m:
find_anomalies.ProcessTests([ref.key])
self.assertEqual(m.call_args_list, [mock.call(ref.key.id())])
new_anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(1, len(new_anomalies))
self.assertEqual(anomaly.UP, new_anomalies[0].direction)
self.assertEqual(241533, new_anomalies[0].start_revision)
self.assertEqual(241546, new_anomalies[0].end_revision)
def testProcessTest_RefineAnomalyPlacement_OffByOneBefore(self):
testing_common.AddTests(
['ChromiumPerf'], ['linux-perf'],
{'blink_perf.layout': {
'nested-percent-height-tables': {}
}})
test = utils.TestKey(
'ChromiumPerf/linux-perf/blink_perf.layout/nested-percent-height-tables'
).get()
test_container_key = utils.GetTestContainerKey(test.key)
sample_data = [
(728446, 480.2504),
(728462, 487.685),
(728469, 486.6389),
(728480, 477.6597),
(728492, 471.2238),
(728512, 480.4379),
(728539, 464.5573),
(728594, 489.0594),
(728644, 484.4796),
(728714, 489.5986),
(728751, 489.474),
(728788, 481.9336),
(728835, 484.089),
(728869, 485.4287),
(728883, 476.8234),
(728907, 487.4736),
(728938, 490.601),
(728986, 483.5039),
(729021, 485.176),
(729066, 484.5855),
(729105, 483.9114),
(729119, 483.559),
(729161, 477.6875),
(729201, 484.9668),
(729240, 480.7091),
(729270, 484.5506),
(729292, 495.1445),
(729309, 479.9111),
(729329, 479.8815),
(729391, 487.5683),
(729430, 476.7355),
(729478, 487.7251),
(729525, 493.1012),
(729568, 497.7565),
(729608, 499.6481),
(729642, 496.1591),
(729658, 493.4581),
(729687, 486.1097),
(729706, 478.036),
(729730, 480.4222), # In crbug/1041688 this was the original placement.
(729764, 421.0342), # We instead should be setting it here.
(729795, 428.0284),
(729846, 433.8261),
(729883, 429.49),
(729920, 436.3342),
(729975, 434.3996),
(730011, 428.3672),
(730054, 436.309),
(730094, 435.3792),
(730128, 433.0537),
]
for row in sample_data:
graph_data.Row(id=row[0], value=row[1], parent=test_container_key).put()
test.UpdateSheriff()
test.put()
s = Subscription(name='sheriff', visibility=VISIBILITY.PUBLIC)
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([s], None))) as m:
find_anomalies.ProcessTests([test.key])
self.assertEqual(m.call_args_list, [mock.call(test.test_path)])
new_anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(1, len(new_anomalies))
self.assertEqual(anomaly.DOWN, new_anomalies[0].direction)
self.assertEqual(729731, new_anomalies[0].start_revision)
self.assertEqual(729764, new_anomalies[0].end_revision)
def testProcessTest_RefineAnomalyPlacement_OffByOneStable(self):
testing_common.AddTests(
['ChromiumPerf'], ['linux-perf'], {
'memory.desktop': {
('memory:chrome:all_processes:'
'reported_by_chrome:v8:effective_size_avg'): {}
}
})
test = utils.TestKey(
('ChromiumPerf/linux-perf/memory.desktop/'
'memory:chrome:all_processes:reported_by_chrome:v8:effective_size_avg'
)).get()
test_container_key = utils.GetTestContainerKey(test.key)
sample_data = [
(733480, 1381203.0),
(733494, 1381220.0),
(733504, 1381212.0),
(733524, 1381220.0),
(733538, 1381211.0),
(733544, 1381212.0),
(733549, 1381220.0),
(733563, 1381220.0),
(733581, 1381220.0),
(733597, 1381212.0),
(733611, 1381228.0),
(733641, 1381212.0),
(733675, 1381204.0),
(733721, 1381212.0),
(733766, 1381211.0),
(733804, 1381204.0),
(733835, 1381219.0),
(733865, 1381211.0),
(733885, 1381219.0),
(733908, 1381204.0),
(733920, 1381211.0),
(733937, 1381220.0),
(734091, 1381211.0),
(734133, 1381219.0),
(734181, 1381204.0),
(734211, 1381720.0),
(734248, 1381712.0),
(734277, 1381696.0),
(734311, 1381704.0),
(734341, 1381703.0),
(734372, 1381704.0),
(734405, 1381703.0),
(734431, 1381711.0),
(734456, 1381720.0),
(734487, 1381703.0),
(734521, 1381704.0),
(734554, 1381726.0),
(734598, 1381704.0),
(734630, 1381703.0), # In crbug/1041688 this is where it was placed.
(734673, 1529888.0), # This is where it should be.
(734705, 1529888.0),
(734739, 1529860.0),
(734770, 1529860.0),
(734793, 1529888.0),
(734829, 1529860.0),
]
for row in sample_data:
graph_data.Row(id=row[0], value=row[1], parent=test_container_key).put()
test.UpdateSheriff()
test.put()
s = Subscription(name='sheriff', visibility=VISIBILITY.PUBLIC)
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([s], None))) as m:
find_anomalies.ProcessTests([test.key])
self.assertEqual(m.call_args_list, [mock.call(test.test_path)])
new_anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(1, len(new_anomalies))
self.assertEqual(anomaly.UP, new_anomalies[0].direction)
self.assertEqual(734631, new_anomalies[0].start_revision)
self.assertEqual(734673, new_anomalies[0].end_revision)
def testProcessTest_RefineAnomalyPlacement_MinSize0Max2Elements(self):
testing_common.AddTests(['ChromiumPerf'], ['linux-perf'],
{'sizes': {
'method_count': {}
}})
test = utils.TestKey(('ChromiumPerf/linux-perf/sizes/method_count')).get()
test_container_key = utils.GetTestContainerKey(test.key)
custom_config = {
'max_window_size': 10,
'min_absolute_change': 50,
'min_relative_change': 0,
'min_segment_size': 0,
}
anomaly_config.AnomalyConfig(
config=custom_config, patterns=[test.test_path]).put()
test.UpdateSheriff()
test.put()
self.assertEqual(custom_config, anomaly_config.GetAnomalyConfigDict(test))
sample_data = [
(6990, 100),
(6991, 100),
(6992, 100),
(6993, 100),
(6994, 100),
(6995, 100),
(6996, 100),
(6997, 100),
(6998, 100),
(6999, 100),
(7000, 100),
(7001, 155),
(7002, 155),
(7003, 155),
]
for row in sample_data:
graph_data.Row(id=row[0], value=row[1], parent=test_container_key).put()
test.UpdateSheriff()
test.put()
s = Subscription(name='sheriff', visibility=VISIBILITY.PUBLIC)
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([s], None))) as m:
find_anomalies.ProcessTests([test.key])
self.assertEqual(m.call_args_list, [mock.call(test.test_path)])
new_anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(1, len(new_anomalies))
self.assertEqual(anomaly.UP, new_anomalies[0].direction)
self.assertEqual(7001, new_anomalies[0].start_revision)
self.assertEqual(7001, new_anomalies[0].end_revision)
def testProcessTest_MultipleChangePoints(self):
testing_common.AddTests(
['ChromiumPerf'], ['linux-perf'],
{'blink_perf.layout': {
'nested-percent-height-tables': {}
}})
test = utils.TestKey(
'ChromiumPerf/linux-perf/blink_perf.layout/nested-percent-height-tables'
).get()
test_container_key = utils.GetTestContainerKey(test.key)
sample_data = [
(804863, 13830765),
(804867, 16667862),
(804879, 13929296),
(804891, 13823876),
(804896, 13908794),
(804900, 13899281),
(804907, 14901462),
(804921, 13890597),
(804935, 13969113),
(804946, 13996520),
(804957, 13913104),
(805143, 16770364),
(805175, 14858529),
(805179, 14013942),
(805185, 14857516),
(805195, 14895168),
(805196, 14944037),
(805205, 13919484),
(805211, 15736581),
(805231, 14730142),
(805236, 13892102),
(805247, 14808876),
(805253, 14903648),
(805262, 13896626),
(805276, 15797878),
(805281, 14542593),
(805285, 15733168),
(805290, 13882841),
(805302, 15727394),
(805314, 15758058),
(805333, 16074960),
(805345, 16142162),
(805359, 16138912),
(805384, 17914289),
(805412, 18368834),
(805428, 18055197),
(805457, 19673614),
(805482, 19705606),
(805502, 19609089),
(805509, 19576745),
(805531, 19600059),
(805550, 19702969),
(805564, 19660953),
(805584, 19830273),
(805600, 19800662),
(805606, 19493150),
(805620, 19700545),
(805624, 19623731),
(805628, 19683921),
(805634, 19660001),
]
for row in sample_data:
graph_data.Row(id=row[0], value=row[1], parent=test_container_key).put()
test.UpdateSheriff()
test.put()
s = Subscription(name='sheriff', visibility=VISIBILITY.PUBLIC)
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([s], None))) as m:
find_anomalies.ProcessTests([test.key])
self.assertEqual(m.call_args_list, [mock.call(test.test_path)])
new_anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(2, len(new_anomalies))
self.assertEqual(anomaly.UP, new_anomalies[0].direction)
self.assertEqual(805429, new_anomalies[0].start_revision)
self.assertEqual(805457, new_anomalies[0].end_revision)
self.assertEqual(805315, new_anomalies[1].start_revision)
self.assertEqual(805428, new_anomalies[1].end_revision)
def testProcessTest__RefineAnomalyPlacement_BalancedEstimator1(self):
testing_common.AddTests(
['ChromiumPerf'], ['linux-perf'],
{'blink_perf.layout': {
'nested-percent-height-tables': {}
}})
test = utils.TestKey(
'ChromiumPerf/linux-perf/blink_perf.layout/nested-percent-height-tables'
).get()
test_container_key = utils.GetTestContainerKey(test.key)
sample_data = [
(818289, 2009771),
(818290, 1966080),
(818291, 1966080),
(818293, 1966080),
(818294, 2053461),
(818296, 2009771),
(818298, 1966080),
(818301, 2009771),
(818303, 2009771),
(818305, 2009771),
(818306, 2009771),
(818307, 1966080),
(818308, 2009771),
(818309, 2009771),
(818310, 1966080),
(818311, 2009771),
(818312, 1966080),
(818317, 1966080),
(818318, 1966080),
(818320, 2053461),
(818322, 2009771),
(818326, 1966080),
(818331, 1966080),
(818335, 1966080),
(818340, 2009771),
(818347, 2009771),
(818350, 1966080),
(818353, 1966080),
(818354, 2009771),
(818361, 2009771),
(818362, 1966080),
(818374, 2009771),
(818379, 2009771),
(818382, 2053461),
(818389, 2009771),
(818402, 1966080),
(818409, 2009771),
(818416, 1966080),
(818420, 1966080),
(818430, 2009771),
(818440, 2228224),
(818450, 2228224),
(818461, 2228224),
(818469, 2228224),
(818481, 2228224),
(818498, 2271915),
(818514, 2228224),
(818531, 2271915),
(818571, 2271915),
(818583, 2271915),
]
for row in sample_data:
graph_data.Row(id=row[0], value=row[1], parent=test_container_key).put()
test.UpdateSheriff()
test.put()
s = Subscription(name='sheriff', visibility=VISIBILITY.PUBLIC)
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([s], None))) as m:
find_anomalies.ProcessTests([test.key])
self.assertEqual(m.call_args_list, [mock.call(test.test_path)])
new_anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(1, len(new_anomalies))
self.assertEqual(anomaly.UP, new_anomalies[0].direction)
self.assertEqual(818431, new_anomalies[0].start_revision)
self.assertEqual(818440, new_anomalies[0].end_revision)
def testProcessTest__RefineAnomalyPlacement_BalancedEstimator2(self):
testing_common.AddTests(
['ChromiumPerf'], ['linux-perf'],
{'blink_perf.layout': {
'nested-percent-height-tables': {}
}})
test = utils.TestKey(
'ChromiumPerf/linux-perf/blink_perf.layout/nested-percent-height-tables'
).get()
test_container_key = utils.GetTestContainerKey(test.key)
sample_data = [
(793468, 136.5382),
(793486, 137.7192),
(793495, 137.4038),
(793504, 137.4919),
(793505, 137.4465),
(793518, 136.9279),
(793525, 137.3501),
(793528, 136.9622),
(793543, 137.1027),
(793550, 137.7351),
(793555, 137.1511),
(793559, 137.2094),
(793560, 136.5192),
(793565, 138.1536),
(793580, 137.4172),
(793590, 136.8746),
(793601, 137.5016),
(793609, 137.0773),
(793625, 137.4702),
(793646, 135.9019),
(793657, 137.2827),
(793702, 136.5978),
(793712, 136.0732),
(793721, 132.1820),
(793742, 122.1631),
(793760, 136.3152),
(793774, 136.9616),
(793788, 136.8438),
(794016, 136.3022),
(794024, 136.3495),
(794027, 136.3145),
(794036, 136.5502),
(794043, 136.3861),
(794051, 136.2035),
(794059, 136.2348),
(794066, 136.2594),
(794074, 135.9686),
(794088, 136.7375),
(794107, 136.5570),
(794132, 129.9924), # This one is a potential change point - but weak
(794143, 135.8275),
(794154, 107.2502), # This is a better change point
(794158, 108.3948),
(794160, 107.3564),
(794196, 107.9707),
(794236, 111.3168),
(794268, 108.7905),
(794281, 111.1065),
(794319, 109.7699),
(794320, 109.8082),
]
for row in sample_data:
graph_data.Row(id=row[0], value=row[1], parent=test_container_key).put()
test.UpdateSheriff()
test.put()
s = Subscription(name='sheriff', visibility=VISIBILITY.PUBLIC)
with mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([s], None))) as m:
find_anomalies.ProcessTests([test.key])
self.assertEqual(m.call_args_list, [mock.call(test.test_path)])
new_anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(1, len(new_anomalies))
self.assertEqual(anomaly.DOWN, new_anomalies[0].direction)
self.assertEqual(794144, new_anomalies[0].start_revision)
self.assertEqual(794154, new_anomalies[0].end_revision)
def testProcessTest__RefineAnomalyPlacement_OnePassEDivisive(self):
testing_common.AddTests(
['ChromiumPerf'], ['linux-perf'],
{'blink_perf.layout': {
'nested-percent-height-tables': {}
}})
test = utils.TestKey(
'ChromiumPerf/linux-perf/blink_perf.layout/nested-percent-height-tables'
).get()
test_container_key = utils.GetTestContainerKey(test.key)
# 1608562683 will be anomaly if we run E-Divisive from 1608525044.
sample_data = [
(1608404024, 246272),
(1608407660, 249344),
(1608417360, 246272),
(1608422547, 246784),
(1608434678, 248832),
(1608440108, 248320),
(1608442260, 250880),
(1608452306, 248832),
(1608457404, 247296),
(1608459374, 247296),
(1608463502, 249344),
(1608469894, 247296),
(1608471945, 247296),
(1608477313, 246272),
(1608481014, 248832),
(1608484511, 247296),
(1608486532, 246784),
(1608488082, 248832),
(1608491972, 246784),
(1608493895, 248832),
(1608495366, 248320),
(1608498927, 252416),
(1608501293, 246784),
(1608505924, 246272),
(1608507885, 246784),
(1608509593, 250368),
(1608512971, 246784),
(1608515075, 246272),
(1608519889, 247296),
(1608521956, 254464),
(1608525044, 247296),
(1608526992, 244736),
(1608528640, 245760),
(1608530391, 246784),
(1608531986, 245760),
(1608533763, 245760),
(1608538109, 246272),
(1608539988, 246784),
(1608545280, 251392),
(1608547200, 251026),
(1608550736, 248320),
(1608552820, 248832),
(1608554780, 251392),
(1608560589, 247296),
(1608562683, 251904),
(1608564319, 268800),
(1608566089, 263168),
(1608567823, 266240),
(1608569370, 266752),
(1608570921, 264192),
]
for row in sample_data:
graph_data.Row(id=row[0], value=row[1], parent=test_container_key).put()
test.UpdateSheriff()
test.put()
new_anomalies = anomaly.Anomaly.query().fetch()
self.assertEqual(0, len(new_anomalies))
def testMakeAnomalyEntity_NoRefBuild(self):
testing_common.AddTests(['ChromiumPerf'], ['linux'], {
'page_cycler_v2': {
'cnn': {},
'yahoo': {},
'nytimes': {},
},
})
test = utils.TestKey('ChromiumPerf/linux/page_cycler_v2').get()
testing_common.AddRows(test.test_path, [100, 200, 300, 400])
alert = find_anomalies._MakeAnomalyEntity(
_MakeSampleChangePoint(10011, 50, 100), test, 'avg', self._DataSeries(),
{}).get_result()
self.assertIsNone(alert.ref_test)
def testMakeAnomalyEntity_RefBuildSlash(self):
testing_common.AddTests(['ChromiumPerf'], ['linux'], {
'page_cycler_v2': {
'ref': {},
'cnn': {},
'yahoo': {},
'nytimes': {},
},
})
test = utils.TestKey('ChromiumPerf/linux/page_cycler_v2').get()
testing_common.AddRows(test.test_path, [100, 200, 300, 400])
alert = find_anomalies._MakeAnomalyEntity(
_MakeSampleChangePoint(10011, 50, 100), test, 'avg', self._DataSeries(),
{}).get_result()
self.assertEqual(alert.ref_test.string_id(),
'ChromiumPerf/linux/page_cycler_v2/ref')
def testMakeAnomalyEntity_RefBuildUnderscore(self):
testing_common.AddTests(['ChromiumPerf'], ['linux'], {
'page_cycler_v2': {
'cnn': {},
'cnn_ref': {},
'yahoo': {},
'nytimes': {},
},
})
test = utils.TestKey('ChromiumPerf/linux/page_cycler_v2/cnn').get()
testing_common.AddRows(test.test_path, [100, 200, 300, 400])
alert = find_anomalies._MakeAnomalyEntity(
_MakeSampleChangePoint(10011, 50, 100), test, 'avg', self._DataSeries(),
{}).get_result()
self.assertEqual(alert.ref_test.string_id(),
'ChromiumPerf/linux/page_cycler_v2/cnn_ref')
self.assertIsNone(alert.display_start)
self.assertIsNone(alert.display_end)
def testMakeAnomalyEntity_RevisionRanges(self):
testing_common.AddTests(['ClankInternal'], ['linux'], {
'page_cycler_v2': {
'cnn': {},
'cnn_ref': {},
'yahoo': {},
'nytimes': {},
},
})
test = utils.TestKey('ClankInternal/linux/page_cycler_v2/cnn').get()
testing_common.AddRows(test.test_path, [100, 200, 300, 400])
for row in graph_data.Row.query():
# Different enough to ensure it is picked up properly.
row.r_commit_pos = int(row.value) + 2
row.put()
alert = find_anomalies._MakeAnomalyEntity(
_MakeSampleChangePoint(300, 50, 100), test, 'avg', self._DataSeries(),
{}).get_result()
self.assertEqual(alert.display_start, 203)
self.assertEqual(alert.display_end, 302)
def testMakeAnomalyEntity_AddsOwnership(self):
data_samples = [{
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'values': ['<EMAIL>', '<EMAIL>']
}, {
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'values': ['abc']
}, {
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb828',
'values': ['This is an info blurb.']
}]
test_key = utils.TestKey('ChromiumPerf/linux/page_cycler_v2/cnn')
testing_common.AddTests(['ChromiumPerf'], ['linux'], {
'page_cycler_v2': {
'cnn': {},
'cnn_ref': {},
'yahoo': {},
'nytimes': {},
},
})
test = test_key.get()
testing_common.AddRows(test.test_path, [100, 200, 300, 400])
suite_key = utils.TestKey('ChromiumPerf/linux/page_cycler_v2')
entity = histogram.SparseDiagnostic(
data=data_samples[0],
test=suite_key,
start_revision=1,
end_revision=sys.maxsize,
id=data_samples[0]['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
entity = histogram.SparseDiagnostic(
data=data_samples[1],
test=suite_key,
start_revision=1,
end_revision=sys.maxsize,
id=data_samples[1]['guid'],
name=reserved_infos.BUG_COMPONENTS.name)
entity.put()
entity = histogram.SparseDiagnostic(
data=data_samples[2],
test=suite_key,
start_revision=1,
end_revision=sys.maxsize,
id=data_samples[2]['guid'],
name=reserved_infos.INFO_BLURB.name)
entity.put()
alert = find_anomalies._MakeAnomalyEntity(
_MakeSampleChangePoint(10011, 50, 100), test, 'avg', self._DataSeries(),
{}).get_result()
self.assertEqual(alert.ownership['component'], 'abc')
self.assertListEqual(alert.ownership['emails'],
['<EMAIL>', '<EMAIL>'])
self.assertEqual(alert.ownership['info_blurb'], 'This is an info blurb.')
def testMakeAnomalyEntity_AlertGrouping(self):
data_sample = {
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'values': ['group123', 'group234']
}
testing_common.AddTests(['ChromiumPerf'], ['linux'], {
'page_cycler_v2': {
'cnn': {},
'cnn_ref': {},
'yahoo': {},
'nytimes': {},
},
})
test = utils.TestKey('ChromiumPerf/linux/page_cycler_v2/cnn').get()
testing_common.AddRows(test.test_path, [100, 200, 300, 400])
suite_key = utils.TestKey('ChromiumPerf/linux/page_cycler_v2')
entity = histogram.SparseDiagnostic(
data=data_sample,
test=suite_key,
start_revision=1,
end_revision=sys.maxsize,
id=data_sample['guid'],
name=reserved_infos.ALERT_GROUPING.name)
entity.put()
entity.put()
alert = find_anomalies._MakeAnomalyEntity(
_MakeSampleChangePoint(10011, 50, 100), test, 'avg', self._DataSeries(),
{}).get_result()
self.assertEqual(alert.alert_grouping, ['group123', 'group234'])
if __name__ == '__main__':
unittest.main()
|
[
"dashboard.models.anomaly.Anomaly.query",
"dashboard.common.testing_common.AddRows",
"dashboard.common.utils.GetTestContainerKey",
"dashboard.common.utils.OldStyleTestKey",
"dashboard.models.anomaly_config.AnomalyConfig",
"dashboard.common.testing_common.AddTests",
"dashboard.models.subscription.Subscription",
"unittest.main",
"dashboard.find_anomalies.ProcessTests",
"dashboard.models.anomaly_config.GetAnomalyConfigDict",
"dashboard.models.histogram.SparseDiagnostic",
"google.appengine.ext.ndb.Return",
"mock.patch.object",
"dashboard.models.graph_data.Row",
"mock.call",
"dashboard.models.alert_group.RevisionRange",
"mock.patch",
"dashboard.models.graph_data.Row.query",
"dashboard.find_change_points.ChangePoint",
"dashboard.models.anomaly.Anomaly",
"dashboard.common.utils.TestKey",
"mock.MagicMock"
] |
[((2595, 2927), 'dashboard.find_change_points.ChangePoint', 'find_change_points.ChangePoint', ([], {'x_value': 'x_value', 'median_before': 'median_before', 'median_after': 'median_after', 'window_start': '(1)', 'window_end': '(8)', 'size_before': 'None', 'size_after': 'None', 'relative_change': 'None', 'std_dev_before': 'None', 't_statistic': 'None', 'degrees_of_freedom': 'None', 'p_value': 'None', 'extended_start': 'x_value', 'extended_end': 'x_value'}), '(x_value=x_value, median_before=median_before,\n median_after=median_after, window_start=1, window_end=8, size_before=\n None, size_after=None, relative_change=None, std_dev_before=None,\n t_statistic=None, degrees_of_freedom=None, p_value=None, extended_start\n =x_value, extended_end=x_value)\n', (2625, 2927), False, 'from dashboard import find_change_points\n'), ((4038, 4054), 'google.appengine.ext.ndb.Return', 'ndb.Return', (['None'], {}), '(None)\n', (4048, 4054), False, 'from google.appengine.ext import ndb\n'), ((5536, 5599), 'mock.patch.object', 'mock.patch.object', (['find_anomalies.email_sheriff', '"""EmailSheriff"""'], {}), "(find_anomalies.email_sheriff, 'EmailSheriff')\n", (5553, 5599), False, 'import mock\n'), ((10219, 10272), 'mock.patch.object', 'mock.patch.object', (['find_anomalies', '"""_ProcessTestStat"""'], {}), "(find_anomalies, '_ProcessTestStat')\n", (10236, 10272), False, 'import mock\n'), ((11015, 11068), 'mock.patch.object', 'mock.patch.object', (['find_anomalies', '"""_ProcessTestStat"""'], {}), "(find_anomalies, '_ProcessTestStat')\n", (11032, 11068), False, 'import mock\n'), ((12260, 12313), 'mock.patch.object', 'mock.patch.object', (['find_anomalies', '"""_ProcessTestStat"""'], {}), "(find_anomalies, '_ProcessTestStat')\n", (12277, 12313), False, 'import mock\n'), ((14070, 14097), 'mock.patch', 'mock.patch', (['"""logging.error"""'], {}), "('logging.error')\n", (14080, 14097), False, 'import mock\n'), ((14883, 14946), 'mock.patch.object', 'mock.patch.object', (['find_anomalies.email_sheriff', '"""EmailSheriff"""'], {}), "(find_anomalies.email_sheriff, 'EmailSheriff')\n", (14900, 14946), False, 'import mock\n'), ((15993, 16056), 'mock.patch.object', 'mock.patch.object', (['find_anomalies.email_sheriff', '"""EmailSheriff"""'], {}), "(find_anomalies.email_sheriff, 'EmailSheriff')\n", (16010, 16056), False, 'import mock\n'), ((4128, 4161), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': 'None'}), '(return_value=None)\n', (4142, 4161), False, 'import mock\n'), ((45510, 45525), 'unittest.main', 'unittest.main', ([], {}), '()\n', (45523, 45525), False, 'import unittest\n'), ((4439, 4532), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (['masters', "['linux-release']", "{'scrolling_benchmark': {'ref': {}}}"], {}), "(masters, ['linux-release'], {'scrolling_benchmark':\n {'ref': {}}})\n", (4462, 4532), False, 'from dashboard.common import testing_common\n'), ((6466, 6525), 'dashboard.models.subscription.Subscription', 'Subscription', ([], {'name': '"""sheriff1"""', 'visibility': 'VISIBILITY.PUBLIC'}), "(name='sheriff1', visibility=VISIBILITY.PUBLIC)\n", (6478, 6525), False, 'from dashboard.models.subscription import Subscription\n'), ((6535, 6594), 'dashboard.models.subscription.Subscription', 'Subscription', ([], {'name': '"""sheriff2"""', 'visibility': 'VISIBILITY.PUBLIC'}), "(name='sheriff2', visibility=VISIBILITY.PUBLIC)\n", (6547, 6594), False, 'from dashboard.models.subscription import Subscription\n'), ((10564, 10657), 'dashboard.models.anomaly.Anomaly', 'anomaly.Anomaly', ([], {'test': 'test.key', 'start_revision': '(10061)', 'end_revision': '(10062)', 'statistic': '"""avg"""'}), "(test=test.key, start_revision=10061, end_revision=10062,\n statistic='avg')\n", (10579, 10657), False, 'from dashboard.models import anomaly\n'), ((11332, 11425), 'dashboard.models.anomaly.Anomaly', 'anomaly.Anomaly', ([], {'test': 'test.key', 'start_revision': '(10061)', 'end_revision': '(10062)', 'statistic': '"""avg"""'}), "(test=test.key, start_revision=10061, end_revision=10062,\n statistic='avg')\n", (11347, 11425), False, 'from dashboard.models import anomaly\n'), ((11735, 11802), 'dashboard.models.graph_data.Row.query', 'graph_data.Row.query', ([], {'projection': "['revision', 'timestamp', 'value']"}), "(projection=['revision', 'timestamp', 'value'])\n", (11755, 11802), False, 'from dashboard.models import graph_data\n'), ((12547, 12642), 'dashboard.models.anomaly.Anomaly', 'anomaly.Anomaly', ([], {'test': 'test.key', 'start_revision': '(10061)', 'end_revision': '(10062)', 'statistic': '"""count"""'}), "(test=test.key, start_revision=10061, end_revision=10062,\n statistic='count')\n", (12562, 12642), False, 'from dashboard.models import anomaly\n'), ((13623, 13681), 'dashboard.models.subscription.Subscription', 'Subscription', ([], {'name': '"""sheriff"""', 'visibility': 'VISIBILITY.PUBLIC'}), "(name='sheriff', visibility=VISIBILITY.PUBLIC)\n", (13635, 13681), False, 'from dashboard.models.subscription import Subscription\n'), ((15232, 15290), 'dashboard.models.subscription.Subscription', 'Subscription', ([], {'name': '"""sheriff"""', 'visibility': 'VISIBILITY.PUBLIC'}), "(name='sheriff', visibility=VISIBILITY.PUBLIC)\n", (15244, 15290), False, 'from dashboard.models.subscription import Subscription\n'), ((16323, 16381), 'dashboard.models.subscription.Subscription', 'Subscription', ([], {'name': '"""sheriff"""', 'visibility': 'VISIBILITY.PUBLIC'}), "(name='sheriff', visibility=VISIBILITY.PUBLIC)\n", (16335, 16381), False, 'from dashboard.models.subscription import Subscription\n'), ((17440, 17550), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumGPU']", "['linux-release']", "{'foo': {'benchmark_duration': {'ref': {}}}}"], {}), "(['ChromiumGPU'], ['linux-release'], {'foo': {\n 'benchmark_duration': {'ref': {}}}})\n", (17463, 17550), False, 'from dashboard.common import testing_common\n'), ((17828, 17862), 'dashboard.common.utils.GetTestContainerKey', 'utils.GetTestContainerKey', (['ref.key'], {}), '(ref.key)\n', (17853, 17862), False, 'from dashboard.common import utils\n'), ((17896, 17934), 'dashboard.common.utils.GetTestContainerKey', 'utils.GetTestContainerKey', (['non_ref.key'], {}), '(non_ref.key)\n', (17921, 17934), False, 'from dashboard.common import utils\n'), ((18189, 18247), 'dashboard.models.subscription.Subscription', 'Subscription', ([], {'name': '"""sheriff"""', 'visibility': 'VISIBILITY.PUBLIC'}), "(name='sheriff', visibility=VISIBILITY.PUBLIC)\n", (18201, 18247), False, 'from dashboard.models.subscription import Subscription\n'), ((18793, 18895), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumGPU']", "['linux-release']", "{'scrolling_benchmark': {'ref': {}}}"], {}), "(['ChromiumGPU'], ['linux-release'], {\n 'scrolling_benchmark': {'ref': {}}})\n", (18816, 18895), False, 'from dashboard.common import testing_common\n'), ((19137, 19171), 'dashboard.common.utils.GetTestContainerKey', 'utils.GetTestContainerKey', (['ref.key'], {}), '(ref.key)\n', (19162, 19171), False, 'from dashboard.common import utils\n'), ((19205, 19243), 'dashboard.common.utils.GetTestContainerKey', 'utils.GetTestContainerKey', (['non_ref.key'], {}), '(non_ref.key)\n', (19230, 19243), False, 'from dashboard.common import utils\n'), ((19995, 20097), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumGPU']", "['linux-release']", "{'scrolling_benchmark': {'ref': {}}}"], {}), "(['ChromiumGPU'], ['linux-release'], {\n 'scrolling_benchmark': {'ref': {}}})\n", (20018, 20097), False, 'from dashboard.common import testing_common\n'), ((20339, 20373), 'dashboard.common.utils.GetTestContainerKey', 'utils.GetTestContainerKey', (['ref.key'], {}), '(ref.key)\n', (20364, 20373), False, 'from dashboard.common import utils\n'), ((20407, 20445), 'dashboard.common.utils.GetTestContainerKey', 'utils.GetTestContainerKey', (['non_ref.key'], {}), '(non_ref.key)\n', (20432, 20445), False, 'from dashboard.common import utils\n'), ((20748, 20806), 'dashboard.models.subscription.Subscription', 'Subscription', ([], {'name': '"""sheriff"""', 'visibility': 'VISIBILITY.PUBLIC'}), "(name='sheriff', visibility=VISIBILITY.PUBLIC)\n", (20760, 20806), False, 'from dashboard.models.subscription import Subscription\n'), ((21207, 21309), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumGPU']", "['linux-release']", "{'scrolling_benchmark': {'ref': {}}}"], {}), "(['ChromiumGPU'], ['linux-release'], {\n 'scrolling_benchmark': {'ref': {}}})\n", (21230, 21309), False, 'from dashboard.common import testing_common\n'), ((21459, 21493), 'dashboard.common.utils.GetTestContainerKey', 'utils.GetTestContainerKey', (['ref.key'], {}), '(ref.key)\n', (21484, 21493), False, 'from dashboard.common import utils\n'), ((21650, 21708), 'dashboard.models.subscription.Subscription', 'Subscription', ([], {'name': '"""sheriff"""', 'visibility': 'VISIBILITY.PUBLIC'}), "(name='sheriff', visibility=VISIBILITY.PUBLIC)\n", (21662, 21708), False, 'from dashboard.models.subscription import Subscription\n'), ((22305, 22428), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumPerf']", "['linux-perf']", "{'blink_perf.layout': {'nested-percent-height-tables': {}}}"], {}), "(['ChromiumPerf'], ['linux-perf'], {\n 'blink_perf.layout': {'nested-percent-height-tables': {}}})\n", (22328, 22428), False, 'from dashboard.common import testing_common\n'), ((22607, 22642), 'dashboard.common.utils.GetTestContainerKey', 'utils.GetTestContainerKey', (['test.key'], {}), '(test.key)\n', (22632, 22642), False, 'from dashboard.common import utils\n'), ((24308, 24366), 'dashboard.models.subscription.Subscription', 'Subscription', ([], {'name': '"""sheriff"""', 'visibility': 'VISIBILITY.PUBLIC'}), "(name='sheriff', visibility=VISIBILITY.PUBLIC)\n", (24320, 24366), False, 'from dashboard.models.subscription import Subscription\n'), ((24968, 25131), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumPerf']", "['linux-perf']", "{'memory.desktop': {\n 'memory:chrome:all_processes:reported_by_chrome:v8:effective_size_avg': {}}\n }"], {}), "(['ChromiumPerf'], ['linux-perf'], {'memory.desktop':\n {'memory:chrome:all_processes:reported_by_chrome:v8:effective_size_avg':\n {}}})\n", (24991, 25131), False, 'from dashboard.common import testing_common\n'), ((25406, 25441), 'dashboard.common.utils.GetTestContainerKey', 'utils.GetTestContainerKey', (['test.key'], {}), '(test.key)\n', (25431, 25441), False, 'from dashboard.common import utils\n'), ((27008, 27066), 'dashboard.models.subscription.Subscription', 'Subscription', ([], {'name': '"""sheriff"""', 'visibility': 'VISIBILITY.PUBLIC'}), "(name='sheriff', visibility=VISIBILITY.PUBLIC)\n", (27020, 27066), False, 'from dashboard.models.subscription import Subscription\n'), ((27672, 27767), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumPerf']", "['linux-perf']", "{'sizes': {'method_count': {}}}"], {}), "(['ChromiumPerf'], ['linux-perf'], {'sizes': {\n 'method_count': {}}})\n", (27695, 27767), False, 'from dashboard.common import testing_common\n'), ((27957, 27992), 'dashboard.common.utils.GetTestContainerKey', 'utils.GetTestContainerKey', (['test.key'], {}), '(test.key)\n', (27982, 27992), False, 'from dashboard.common import utils\n'), ((28843, 28901), 'dashboard.models.subscription.Subscription', 'Subscription', ([], {'name': '"""sheriff"""', 'visibility': 'VISIBILITY.PUBLIC'}), "(name='sheriff', visibility=VISIBILITY.PUBLIC)\n", (28855, 28901), False, 'from dashboard.models.subscription import Subscription\n'), ((29480, 29603), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumPerf']", "['linux-perf']", "{'blink_perf.layout': {'nested-percent-height-tables': {}}}"], {}), "(['ChromiumPerf'], ['linux-perf'], {\n 'blink_perf.layout': {'nested-percent-height-tables': {}}})\n", (29503, 29603), False, 'from dashboard.common import testing_common\n'), ((29782, 29817), 'dashboard.common.utils.GetTestContainerKey', 'utils.GetTestContainerKey', (['test.key'], {}), '(test.key)\n', (29807, 29817), False, 'from dashboard.common import utils\n'), ((31399, 31457), 'dashboard.models.subscription.Subscription', 'Subscription', ([], {'name': '"""sheriff"""', 'visibility': 'VISIBILITY.PUBLIC'}), "(name='sheriff', visibility=VISIBILITY.PUBLIC)\n", (31411, 31457), False, 'from dashboard.models.subscription import Subscription\n'), ((32184, 32307), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumPerf']", "['linux-perf']", "{'blink_perf.layout': {'nested-percent-height-tables': {}}}"], {}), "(['ChromiumPerf'], ['linux-perf'], {\n 'blink_perf.layout': {'nested-percent-height-tables': {}}})\n", (32207, 32307), False, 'from dashboard.common import testing_common\n'), ((32486, 32521), 'dashboard.common.utils.GetTestContainerKey', 'utils.GetTestContainerKey', (['test.key'], {}), '(test.key)\n', (32511, 32521), False, 'from dashboard.common import utils\n'), ((34053, 34111), 'dashboard.models.subscription.Subscription', 'Subscription', ([], {'name': '"""sheriff"""', 'visibility': 'VISIBILITY.PUBLIC'}), "(name='sheriff', visibility=VISIBILITY.PUBLIC)\n", (34065, 34111), False, 'from dashboard.models.subscription import Subscription\n'), ((34716, 34839), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumPerf']", "['linux-perf']", "{'blink_perf.layout': {'nested-percent-height-tables': {}}}"], {}), "(['ChromiumPerf'], ['linux-perf'], {\n 'blink_perf.layout': {'nested-percent-height-tables': {}}})\n", (34739, 34839), False, 'from dashboard.common import testing_common\n'), ((35018, 35053), 'dashboard.common.utils.GetTestContainerKey', 'utils.GetTestContainerKey', (['test.key'], {}), '(test.key)\n', (35043, 35053), False, 'from dashboard.common import utils\n'), ((36719, 36777), 'dashboard.models.subscription.Subscription', 'Subscription', ([], {'name': '"""sheriff"""', 'visibility': 'VISIBILITY.PUBLIC'}), "(name='sheriff', visibility=VISIBILITY.PUBLIC)\n", (36731, 36777), False, 'from dashboard.models.subscription import Subscription\n'), ((37382, 37505), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumPerf']", "['linux-perf']", "{'blink_perf.layout': {'nested-percent-height-tables': {}}}"], {}), "(['ChromiumPerf'], ['linux-perf'], {\n 'blink_perf.layout': {'nested-percent-height-tables': {}}})\n", (37405, 37505), False, 'from dashboard.common import testing_common\n'), ((37684, 37719), 'dashboard.common.utils.GetTestContainerKey', 'utils.GetTestContainerKey', (['test.key'], {}), '(test.key)\n', (37709, 37719), False, 'from dashboard.common import utils\n'), ((39611, 39729), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumPerf']", "['linux']", "{'page_cycler_v2': {'cnn': {}, 'yahoo': {}, 'nytimes': {}}}"], {}), "(['ChromiumPerf'], ['linux'], {'page_cycler_v2': {\n 'cnn': {}, 'yahoo': {}, 'nytimes': {}}})\n", (39634, 39729), False, 'from dashboard.common import testing_common\n'), ((39859, 39919), 'dashboard.common.testing_common.AddRows', 'testing_common.AddRows', (['test.test_path', '[100, 200, 300, 400]'], {}), '(test.test_path, [100, 200, 300, 400])\n', (39881, 39919), False, 'from dashboard.common import testing_common\n'), ((40166, 40295), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumPerf']", "['linux']", "{'page_cycler_v2': {'ref': {}, 'cnn': {}, 'yahoo': {}, 'nytimes': {}}}"], {}), "(['ChromiumPerf'], ['linux'], {'page_cycler_v2': {\n 'ref': {}, 'cnn': {}, 'yahoo': {}, 'nytimes': {}}})\n", (40189, 40295), False, 'from dashboard.common import testing_common\n'), ((40437, 40497), 'dashboard.common.testing_common.AddRows', 'testing_common.AddRows', (['test.test_path', '[100, 200, 300, 400]'], {}), '(test.test_path, [100, 200, 300, 400])\n', (40459, 40497), False, 'from dashboard.common import testing_common\n'), ((40822, 40955), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumPerf']", "['linux']", "{'page_cycler_v2': {'cnn': {}, 'cnn_ref': {}, 'yahoo': {}, 'nytimes': {}}}"], {}), "(['ChromiumPerf'], ['linux'], {'page_cycler_v2': {\n 'cnn': {}, 'cnn_ref': {}, 'yahoo': {}, 'nytimes': {}}})\n", (40845, 40955), False, 'from dashboard.common import testing_common\n'), ((41101, 41161), 'dashboard.common.testing_common.AddRows', 'testing_common.AddRows', (['test.test_path', '[100, 200, 300, 400]'], {}), '(test.test_path, [100, 200, 300, 400])\n', (41123, 41161), False, 'from dashboard.common import testing_common\n'), ((41570, 41704), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ClankInternal']", "['linux']", "{'page_cycler_v2': {'cnn': {}, 'cnn_ref': {}, 'yahoo': {}, 'nytimes': {}}}"], {}), "(['ClankInternal'], ['linux'], {'page_cycler_v2': {\n 'cnn': {}, 'cnn_ref': {}, 'yahoo': {}, 'nytimes': {}}})\n", (41593, 41704), False, 'from dashboard.common import testing_common\n'), ((41851, 41911), 'dashboard.common.testing_common.AddRows', 'testing_common.AddRows', (['test.test_path', '[100, 200, 300, 400]'], {}), '(test.test_path, [100, 200, 300, 400])\n', (41873, 41911), False, 'from dashboard.common import testing_common\n'), ((41927, 41949), 'dashboard.models.graph_data.Row.query', 'graph_data.Row.query', ([], {}), '()\n', (41947, 41949), False, 'from dashboard.models import graph_data\n'), ((42799, 42853), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumPerf/linux/page_cycler_v2/cnn"""'], {}), "('ChromiumPerf/linux/page_cycler_v2/cnn')\n", (42812, 42853), False, 'from dashboard.common import utils\n'), ((42858, 42991), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumPerf']", "['linux']", "{'page_cycler_v2': {'cnn': {}, 'cnn_ref': {}, 'yahoo': {}, 'nytimes': {}}}"], {}), "(['ChromiumPerf'], ['linux'], {'page_cycler_v2': {\n 'cnn': {}, 'cnn_ref': {}, 'yahoo': {}, 'nytimes': {}}})\n", (42881, 42991), False, 'from dashboard.common import testing_common\n'), ((43091, 43151), 'dashboard.common.testing_common.AddRows', 'testing_common.AddRows', (['test.test_path', '[100, 200, 300, 400]'], {}), '(test.test_path, [100, 200, 300, 400])\n', (43113, 43151), False, 'from dashboard.common import testing_common\n'), ((43169, 43219), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumPerf/linux/page_cycler_v2"""'], {}), "('ChromiumPerf/linux/page_cycler_v2')\n", (43182, 43219), False, 'from dashboard.common import utils\n'), ((43233, 43410), 'dashboard.models.histogram.SparseDiagnostic', 'histogram.SparseDiagnostic', ([], {'data': 'data_samples[0]', 'test': 'suite_key', 'start_revision': '(1)', 'end_revision': 'sys.maxsize', 'id': "data_samples[0]['guid']", 'name': 'reserved_infos.OWNERS.name'}), "(data=data_samples[0], test=suite_key,\n start_revision=1, end_revision=sys.maxsize, id=data_samples[0]['guid'],\n name=reserved_infos.OWNERS.name)\n", (43259, 43410), False, 'from dashboard.models import histogram\n'), ((43483, 43668), 'dashboard.models.histogram.SparseDiagnostic', 'histogram.SparseDiagnostic', ([], {'data': 'data_samples[1]', 'test': 'suite_key', 'start_revision': '(1)', 'end_revision': 'sys.maxsize', 'id': "data_samples[1]['guid']", 'name': 'reserved_infos.BUG_COMPONENTS.name'}), "(data=data_samples[1], test=suite_key,\n start_revision=1, end_revision=sys.maxsize, id=data_samples[1]['guid'],\n name=reserved_infos.BUG_COMPONENTS.name)\n", (43509, 43668), False, 'from dashboard.models import histogram\n'), ((43741, 43922), 'dashboard.models.histogram.SparseDiagnostic', 'histogram.SparseDiagnostic', ([], {'data': 'data_samples[2]', 'test': 'suite_key', 'start_revision': '(1)', 'end_revision': 'sys.maxsize', 'id': "data_samples[2]['guid']", 'name': 'reserved_infos.INFO_BLURB.name'}), "(data=data_samples[2], test=suite_key,\n start_revision=1, end_revision=sys.maxsize, id=data_samples[2]['guid'],\n name=reserved_infos.INFO_BLURB.name)\n", (43767, 43922), False, 'from dashboard.models import histogram\n'), ((44583, 44716), 'dashboard.common.testing_common.AddTests', 'testing_common.AddTests', (["['ChromiumPerf']", "['linux']", "{'page_cycler_v2': {'cnn': {}, 'cnn_ref': {}, 'yahoo': {}, 'nytimes': {}}}"], {}), "(['ChromiumPerf'], ['linux'], {'page_cycler_v2': {\n 'cnn': {}, 'cnn_ref': {}, 'yahoo': {}, 'nytimes': {}}})\n", (44606, 44716), False, 'from dashboard.common import testing_common\n'), ((44862, 44922), 'dashboard.common.testing_common.AddRows', 'testing_common.AddRows', (['test.test_path', '[100, 200, 300, 400]'], {}), '(test.test_path, [100, 200, 300, 400])\n', (44884, 44922), False, 'from dashboard.common import testing_common\n'), ((44940, 44990), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumPerf/linux/page_cycler_v2"""'], {}), "('ChromiumPerf/linux/page_cycler_v2')\n", (44953, 44990), False, 'from dashboard.common import utils\n'), ((45004, 45183), 'dashboard.models.histogram.SparseDiagnostic', 'histogram.SparseDiagnostic', ([], {'data': 'data_sample', 'test': 'suite_key', 'start_revision': '(1)', 'end_revision': 'sys.maxsize', 'id': "data_sample['guid']", 'name': 'reserved_infos.ALERT_GROUPING.name'}), "(data=data_sample, test=suite_key, start_revision\n =1, end_revision=sys.maxsize, id=data_sample['guid'], name=\n reserved_infos.ALERT_GROUPING.name)\n", (45030, 45183), False, 'from dashboard.models import histogram\n'), ((6738, 6777), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[test.key]'], {}), '([test.key])\n', (6765, 6777), False, 'from dashboard import find_anomalies\n'), ((10837, 10876), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[test.key]'], {}), '([test.key])\n', (10864, 10876), False, 'from dashboard import find_anomalies\n'), ((11641, 11680), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[test.key]'], {}), '([test.key])\n', (11668, 11680), False, 'from dashboard import find_anomalies\n'), ((13106, 13145), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[test.key]'], {}), '([test.key])\n', (13133, 13145), False, 'from dashboard import find_anomalies\n'), ((13820, 13859), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[test.key]'], {}), '([test.key])\n', (13847, 13859), False, 'from dashboard import find_anomalies\n'), ((14421, 14459), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[ref.key]'], {}), '([ref.key])\n', (14448, 14459), False, 'from dashboard import find_anomalies\n'), ((15429, 15468), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[test.key]'], {}), '([test.key])\n', (15456, 15468), False, 'from dashboard import find_anomalies\n'), ((16520, 16559), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[test.key]'], {}), '([test.key])\n', (16547, 16559), False, 'from dashboard import find_anomalies\n'), ((18386, 18424), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[ref.key]'], {}), '([ref.key])\n', (18413, 18424), False, 'from dashboard import find_anomalies\n'), ((19668, 19710), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[non_ref.key]'], {}), '([non_ref.key])\n', (19695, 19710), False, 'from dashboard import find_anomalies\n'), ((20945, 20987), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[non_ref.key]'], {}), '([non_ref.key])\n', (20972, 20987), False, 'from dashboard import find_anomalies\n'), ((21847, 21885), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[ref.key]'], {}), '([ref.key])\n', (21874, 21885), False, 'from dashboard import find_anomalies\n'), ((24505, 24544), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[test.key]'], {}), '([test.key])\n', (24532, 24544), False, 'from dashboard import find_anomalies\n'), ((27205, 27244), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[test.key]'], {}), '([test.key])\n', (27232, 27244), False, 'from dashboard import find_anomalies\n'), ((28325, 28366), 'dashboard.models.anomaly_config.GetAnomalyConfigDict', 'anomaly_config.GetAnomalyConfigDict', (['test'], {}), '(test)\n', (28360, 28366), False, 'from dashboard.models import anomaly_config\n'), ((29040, 29079), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[test.key]'], {}), '([test.key])\n', (29067, 29079), False, 'from dashboard import find_anomalies\n'), ((31596, 31635), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[test.key]'], {}), '([test.key])\n', (31623, 31635), False, 'from dashboard import find_anomalies\n'), ((34250, 34289), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[test.key]'], {}), '([test.key])\n', (34277, 34289), False, 'from dashboard import find_anomalies\n'), ((36916, 36955), 'dashboard.find_anomalies.ProcessTests', 'find_anomalies.ProcessTests', (['[test.key]'], {}), '([test.key])\n', (36943, 36955), False, 'from dashboard import find_anomalies\n'), ((4806, 4840), 'dashboard.common.utils.GetTestContainerKey', 'utils.GetTestContainerKey', (['ref.key'], {}), '(ref.key)\n', (4831, 4840), False, 'from dashboard.common import utils\n'), ((5756, 5780), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['test_path'], {}), '(test_path)\n', (5769, 5780), False, 'from dashboard.common import utils\n'), ((6679, 6724), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([s1, s2], None)'}), '(return_value=([s1, s2], None))\n', (6693, 6724), False, 'import mock\n'), ((7657, 7680), 'dashboard.models.anomaly.Anomaly.query', 'anomaly.Anomaly.query', ([], {}), '()\n', (7678, 7680), False, 'from dashboard.models import anomaly\n'), ((10524, 10548), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['test_path'], {}), '(test_path)\n', (10537, 10548), False, 'from dashboard.common import utils\n'), ((10784, 10823), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([], None)'}), '(return_value=([], None))\n', (10798, 10823), False, 'import mock\n'), ((11292, 11316), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['test_path'], {}), '(test_path)\n', (11305, 11316), False, 'from dashboard.common import utils\n'), ((11593, 11632), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([], None)'}), '(return_value=([], None))\n', (11607, 11632), False, 'import mock\n'), ((11925, 11956), 'dashboard.common.utils.OldStyleTestKey', 'utils.OldStyleTestKey', (['test.key'], {}), '(test.key)\n', (11946, 11956), False, 'from dashboard.common import utils\n'), ((12184, 12207), 'dashboard.models.anomaly.Anomaly.query', 'anomaly.Anomaly.query', ([], {}), '()\n', (12205, 12207), False, 'from dashboard.models import anomaly\n'), ((12507, 12531), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['test_path'], {}), '(test_path)\n', (12520, 12531), False, 'from dashboard.common import utils\n'), ((13058, 13097), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([], None)'}), '(return_value=([], None))\n', (13072, 13097), False, 'import mock\n'), ((13447, 13513), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumGPU/linux-release/scrolling_benchmark/ref"""'], {}), "('ChromiumGPU/linux-release/scrolling_benchmark/ref')\n", (13460, 13513), False, 'from dashboard.common import utils\n'), ((13766, 13806), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([s], None)'}), '(return_value=([s], None))\n', (13780, 13806), False, 'import mock\n'), ((13945, 13968), 'dashboard.models.anomaly.Anomaly.query', 'anomaly.Anomaly.query', ([], {}), '()\n', (13966, 13968), False, 'from dashboard.models import anomaly\n'), ((14207, 14273), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumGPU/linux-release/scrolling_benchmark/ref"""'], {}), "('ChromiumGPU/linux-release/scrolling_benchmark/ref')\n", (14220, 14273), False, 'from dashboard.common import utils\n'), ((14373, 14412), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([], None)'}), '(return_value=([], None))\n', (14387, 14412), False, 'import mock\n'), ((15058, 15124), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumGPU/linux-release/scrolling_benchmark/ref"""'], {}), "('ChromiumGPU/linux-release/scrolling_benchmark/ref')\n", (15071, 15124), False, 'from dashboard.common import utils\n'), ((15375, 15415), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([s], None)'}), '(return_value=([s], None))\n', (15389, 15415), False, 'import mock\n'), ((16162, 16228), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumGPU/linux-release/scrolling_benchmark/ref"""'], {}), "('ChromiumGPU/linux-release/scrolling_benchmark/ref')\n", (16175, 16228), False, 'from dashboard.common import utils\n'), ((16466, 16506), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([s], None)'}), '(return_value=([s], None))\n', (16480, 16506), False, 'import mock\n'), ((16964, 16987), 'dashboard.models.anomaly.Anomaly.query', 'anomaly.Anomaly.query', ([], {}), '()\n', (16985, 16987), False, 'from dashboard.models import anomaly\n'), ((17623, 17692), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumGPU/linux-release/foo/benchmark_duration/ref"""'], {}), "('ChromiumGPU/linux-release/foo/benchmark_duration/ref')\n", (17636, 17692), False, 'from dashboard.common import utils\n'), ((17722, 17787), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumGPU/linux-release/foo/benchmark_duration"""'], {}), "('ChromiumGPU/linux-release/foo/benchmark_duration')\n", (17735, 17787), False, 'from dashboard.common import utils\n'), ((18332, 18372), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([s], None)'}), '(return_value=([s], None))\n', (18346, 18372), False, 'import mock\n'), ((18513, 18536), 'dashboard.models.anomaly.Anomaly.query', 'anomaly.Anomaly.query', ([], {}), '()\n', (18534, 18536), False, 'from dashboard.models import anomaly\n'), ((18938, 19004), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumGPU/linux-release/scrolling_benchmark/ref"""'], {}), "('ChromiumGPU/linux-release/scrolling_benchmark/ref')\n", (18951, 19004), False, 'from dashboard.common import utils\n'), ((19034, 19096), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumGPU/linux-release/scrolling_benchmark"""'], {}), "('ChromiumGPU/linux-release/scrolling_benchmark')\n", (19047, 19096), False, 'from dashboard.common import utils\n'), ((19620, 19659), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([], None)'}), '(return_value=([], None))\n', (19634, 19659), False, 'import mock\n'), ((19731, 19754), 'dashboard.models.anomaly.Anomaly.query', 'anomaly.Anomaly.query', ([], {}), '()\n', (19752, 19754), False, 'from dashboard.models import anomaly\n'), ((20140, 20206), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumGPU/linux-release/scrolling_benchmark/ref"""'], {}), "('ChromiumGPU/linux-release/scrolling_benchmark/ref')\n", (20153, 20206), False, 'from dashboard.common import utils\n'), ((20236, 20298), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumGPU/linux-release/scrolling_benchmark"""'], {}), "('ChromiumGPU/linux-release/scrolling_benchmark')\n", (20249, 20298), False, 'from dashboard.common import utils\n'), ((20891, 20931), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([s], None)'}), '(return_value=([s], None))\n', (20905, 20931), False, 'import mock\n'), ((21080, 21103), 'dashboard.models.anomaly.Anomaly.query', 'anomaly.Anomaly.query', ([], {}), '()\n', (21101, 21103), False, 'from dashboard.models import anomaly\n'), ((21352, 21418), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumGPU/linux-release/scrolling_benchmark/ref"""'], {}), "('ChromiumGPU/linux-release/scrolling_benchmark/ref')\n", (21365, 21418), False, 'from dashboard.common import utils\n'), ((21793, 21833), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([s], None)'}), '(return_value=([s], None))\n', (21807, 21833), False, 'import mock\n'), ((21974, 21997), 'dashboard.models.anomaly.Anomaly.query', 'anomaly.Anomaly.query', ([], {}), '()\n', (21995, 21997), False, 'from dashboard.models import anomaly\n'), ((22474, 22566), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumPerf/linux-perf/blink_perf.layout/nested-percent-height-tables"""'], {}), "(\n 'ChromiumPerf/linux-perf/blink_perf.layout/nested-percent-height-tables')\n", (22487, 22566), False, 'from dashboard.common import utils\n'), ((24451, 24491), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([s], None)'}), '(return_value=([s], None))\n', (24465, 24491), False, 'import mock\n'), ((24635, 24658), 'dashboard.models.anomaly.Anomaly.query', 'anomaly.Anomaly.query', ([], {}), '()\n', (24656, 24658), False, 'from dashboard.models import anomaly\n'), ((25218, 25352), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumPerf/linux-perf/memory.desktop/memory:chrome:all_processes:reported_by_chrome:v8:effective_size_avg"""'], {}), "(\n 'ChromiumPerf/linux-perf/memory.desktop/memory:chrome:all_processes:reported_by_chrome:v8:effective_size_avg'\n )\n", (25231, 25352), False, 'from dashboard.common import utils\n'), ((27151, 27191), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([s], None)'}), '(return_value=([s], None))\n', (27165, 27191), False, 'import mock\n'), ((27335, 27358), 'dashboard.models.anomaly.Anomaly.query', 'anomaly.Anomaly.query', ([], {}), '()\n', (27356, 27358), False, 'from dashboard.models import anomaly\n'), ((27864, 27923), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumPerf/linux-perf/sizes/method_count"""'], {}), "('ChromiumPerf/linux-perf/sizes/method_count')\n", (27877, 27923), False, 'from dashboard.common import utils\n'), ((28156, 28233), 'dashboard.models.anomaly_config.AnomalyConfig', 'anomaly_config.AnomalyConfig', ([], {'config': 'custom_config', 'patterns': '[test.test_path]'}), '(config=custom_config, patterns=[test.test_path])\n', (28184, 28233), False, 'from dashboard.models import anomaly_config\n'), ((28986, 29026), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([s], None)'}), '(return_value=([s], None))\n', (29000, 29026), False, 'import mock\n'), ((29170, 29193), 'dashboard.models.anomaly.Anomaly.query', 'anomaly.Anomaly.query', ([], {}), '()\n', (29191, 29193), False, 'from dashboard.models import anomaly\n'), ((29649, 29741), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumPerf/linux-perf/blink_perf.layout/nested-percent-height-tables"""'], {}), "(\n 'ChromiumPerf/linux-perf/blink_perf.layout/nested-percent-height-tables')\n", (29662, 29741), False, 'from dashboard.common import utils\n'), ((31542, 31582), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([s], None)'}), '(return_value=([s], None))\n', (31556, 31582), False, 'import mock\n'), ((31726, 31749), 'dashboard.models.anomaly.Anomaly.query', 'anomaly.Anomaly.query', ([], {}), '()\n', (31747, 31749), False, 'from dashboard.models import anomaly\n'), ((32353, 32445), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumPerf/linux-perf/blink_perf.layout/nested-percent-height-tables"""'], {}), "(\n 'ChromiumPerf/linux-perf/blink_perf.layout/nested-percent-height-tables')\n", (32366, 32445), False, 'from dashboard.common import utils\n'), ((34196, 34236), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([s], None)'}), '(return_value=([s], None))\n', (34210, 34236), False, 'import mock\n'), ((34380, 34403), 'dashboard.models.anomaly.Anomaly.query', 'anomaly.Anomaly.query', ([], {}), '()\n', (34401, 34403), False, 'from dashboard.models import anomaly\n'), ((34885, 34977), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumPerf/linux-perf/blink_perf.layout/nested-percent-height-tables"""'], {}), "(\n 'ChromiumPerf/linux-perf/blink_perf.layout/nested-percent-height-tables')\n", (34898, 34977), False, 'from dashboard.common import utils\n'), ((36862, 36902), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([s], None)'}), '(return_value=([s], None))\n', (36876, 36902), False, 'import mock\n'), ((37046, 37069), 'dashboard.models.anomaly.Anomaly.query', 'anomaly.Anomaly.query', ([], {}), '()\n', (37067, 37069), False, 'from dashboard.models import anomaly\n'), ((37551, 37643), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumPerf/linux-perf/blink_perf.layout/nested-percent-height-tables"""'], {}), "(\n 'ChromiumPerf/linux-perf/blink_perf.layout/nested-percent-height-tables')\n", (37564, 37643), False, 'from dashboard.common import utils\n'), ((39484, 39507), 'dashboard.models.anomaly.Anomaly.query', 'anomaly.Anomaly.query', ([], {}), '()\n', (39505, 39507), False, 'from dashboard.models import anomaly\n'), ((39798, 39848), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumPerf/linux/page_cycler_v2"""'], {}), "('ChromiumPerf/linux/page_cycler_v2')\n", (39811, 39848), False, 'from dashboard.common import utils\n'), ((40376, 40426), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumPerf/linux/page_cycler_v2"""'], {}), "('ChromiumPerf/linux/page_cycler_v2')\n", (40389, 40426), False, 'from dashboard.common import utils\n'), ((41036, 41090), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumPerf/linux/page_cycler_v2/cnn"""'], {}), "('ChromiumPerf/linux/page_cycler_v2/cnn')\n", (41049, 41090), False, 'from dashboard.common import utils\n'), ((41785, 41840), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ClankInternal/linux/page_cycler_v2/cnn"""'], {}), "('ClankInternal/linux/page_cycler_v2/cnn')\n", (41798, 41840), False, 'from dashboard.common import utils\n'), ((44797, 44851), 'dashboard.common.utils.TestKey', 'utils.TestKey', (['"""ChromiumPerf/linux/page_cycler_v2/cnn"""'], {}), "('ChromiumPerf/linux/page_cycler_v2/cnn')\n", (44810, 44851), False, 'from dashboard.common import utils\n'), ((4602, 4663), 'dashboard.common.utils.TestKey', 'utils.TestKey', (["('%s/linux-release/scrolling_benchmark/ref' % m)"], {}), "('%s/linux-release/scrolling_benchmark/ref' % m)\n", (4615, 4663), False, 'from dashboard.common import utils\n'), ((5160, 5182), 'dashboard.models.graph_data.Row.query', 'graph_data.Row.query', ([], {}), '()\n', (5180, 5182), False, 'from dashboard.models import graph_data\n'), ((17972, 18038), 'dashboard.models.graph_data.Row', 'graph_data.Row', ([], {'id': 'row[0]', 'value': 'row[1]', 'parent': 'test_container_key'}), '(id=row[0], value=row[1], parent=test_container_key)\n', (17986, 18038), False, 'from dashboard.models import graph_data\n'), ((18051, 18125), 'dashboard.models.graph_data.Row', 'graph_data.Row', ([], {'id': 'row[0]', 'value': 'row[1]', 'parent': 'test_container_key_non_ref'}), '(id=row[0], value=row[1], parent=test_container_key_non_ref)\n', (18065, 18125), False, 'from dashboard.models import graph_data\n'), ((19281, 19347), 'dashboard.models.graph_data.Row', 'graph_data.Row', ([], {'id': 'row[0]', 'value': 'row[1]', 'parent': 'test_container_key'}), '(id=row[0], value=row[1], parent=test_container_key)\n', (19295, 19347), False, 'from dashboard.models import graph_data\n'), ((19360, 19434), 'dashboard.models.graph_data.Row', 'graph_data.Row', ([], {'id': 'row[0]', 'value': 'row[1]', 'parent': 'test_container_key_non_ref'}), '(id=row[0], value=row[1], parent=test_container_key_non_ref)\n', (19374, 19434), False, 'from dashboard.models import graph_data\n'), ((20483, 20551), 'dashboard.models.graph_data.Row', 'graph_data.Row', ([], {'id': 'row[0]', 'value': '(2125.375)', 'parent': 'test_container_key'}), '(id=row[0], value=2125.375, parent=test_container_key)\n', (20497, 20551), False, 'from dashboard.models import graph_data\n'), ((20564, 20638), 'dashboard.models.graph_data.Row', 'graph_data.Row', ([], {'id': 'row[0]', 'value': 'row[1]', 'parent': 'test_container_key_non_ref'}), '(id=row[0], value=row[1], parent=test_container_key_non_ref)\n', (20578, 20638), False, 'from dashboard.models import graph_data\n'), ((21531, 21597), 'dashboard.models.graph_data.Row', 'graph_data.Row', ([], {'id': 'row[0]', 'value': 'row[1]', 'parent': 'test_container_key'}), '(id=row[0], value=row[1], parent=test_container_key)\n', (21545, 21597), False, 'from dashboard.models import graph_data\n'), ((24187, 24253), 'dashboard.models.graph_data.Row', 'graph_data.Row', ([], {'id': 'row[0]', 'value': 'row[1]', 'parent': 'test_container_key'}), '(id=row[0], value=row[1], parent=test_container_key)\n', (24201, 24253), False, 'from dashboard.models import graph_data\n'), ((24587, 24612), 'mock.call', 'mock.call', (['test.test_path'], {}), '(test.test_path)\n', (24596, 24612), False, 'import mock\n'), ((26887, 26953), 'dashboard.models.graph_data.Row', 'graph_data.Row', ([], {'id': 'row[0]', 'value': 'row[1]', 'parent': 'test_container_key'}), '(id=row[0], value=row[1], parent=test_container_key)\n', (26901, 26953), False, 'from dashboard.models import graph_data\n'), ((27287, 27312), 'mock.call', 'mock.call', (['test.test_path'], {}), '(test.test_path)\n', (27296, 27312), False, 'import mock\n'), ((28722, 28788), 'dashboard.models.graph_data.Row', 'graph_data.Row', ([], {'id': 'row[0]', 'value': 'row[1]', 'parent': 'test_container_key'}), '(id=row[0], value=row[1], parent=test_container_key)\n', (28736, 28788), False, 'from dashboard.models import graph_data\n'), ((29122, 29147), 'mock.call', 'mock.call', (['test.test_path'], {}), '(test.test_path)\n', (29131, 29147), False, 'import mock\n'), ((31278, 31344), 'dashboard.models.graph_data.Row', 'graph_data.Row', ([], {'id': 'row[0]', 'value': 'row[1]', 'parent': 'test_container_key'}), '(id=row[0], value=row[1], parent=test_container_key)\n', (31292, 31344), False, 'from dashboard.models import graph_data\n'), ((31678, 31703), 'mock.call', 'mock.call', (['test.test_path'], {}), '(test.test_path)\n', (31687, 31703), False, 'import mock\n'), ((33932, 33998), 'dashboard.models.graph_data.Row', 'graph_data.Row', ([], {'id': 'row[0]', 'value': 'row[1]', 'parent': 'test_container_key'}), '(id=row[0], value=row[1], parent=test_container_key)\n', (33946, 33998), False, 'from dashboard.models import graph_data\n'), ((34332, 34357), 'mock.call', 'mock.call', (['test.test_path'], {}), '(test.test_path)\n', (34341, 34357), False, 'import mock\n'), ((36598, 36664), 'dashboard.models.graph_data.Row', 'graph_data.Row', ([], {'id': 'row[0]', 'value': 'row[1]', 'parent': 'test_container_key'}), '(id=row[0], value=row[1], parent=test_container_key)\n', (36612, 36664), False, 'from dashboard.models import graph_data\n'), ((36998, 37023), 'mock.call', 'mock.call', (['test.test_path'], {}), '(test.test_path)\n', (37007, 37023), False, 'import mock\n'), ((39351, 39417), 'dashboard.models.graph_data.Row', 'graph_data.Row', ([], {'id': 'row[0]', 'value': 'row[1]', 'parent': 'test_container_key'}), '(id=row[0], value=row[1], parent=test_container_key)\n', (39365, 39417), False, 'from dashboard.models import graph_data\n'), ((6043, 6115), 'dashboard.models.alert_group.RevisionRange', 'alert_group.RevisionRange', ([], {'repository': '"""chromium"""', 'start': '(10000)', 'end': '(10070)'}), "(repository='chromium', start=10000, end=10070)\n", (6068, 6115), False, 'from dashboard.models import alert_group\n'), ((6357, 6429), 'dashboard.models.alert_group.RevisionRange', 'alert_group.RevisionRange', ([], {'repository': '"""chromium"""', 'start': '(10000)', 'end': '(10070)'}), "(repository='chromium', start=10000, end=10070)\n", (6382, 6429), False, 'from dashboard.models import alert_group\n')]
|
from pyflakes.checker import Checker
import sys
import ast
import os
from pathlib import Path
from filecmp import dircmp
import subprocess
from pytest import raises
import pytest
from ..removestar import (names_to_replace, star_imports, get_names,
get_names_from_dir, get_names_dynamically, fix_code,
get_mod_filename, replace_imports,
is_noqa_comment_allowing_star_import,
ExternalModuleError)
code_mod1 = """\
a = 1
aa = 2
b = 3
"""
mod1_names = {'a', 'aa', 'b'}
code_mod2 = """\
b = 1
c = 2
cc = 3
"""
mod2_names = {'b', 'c', 'cc'}
code_mod3 = """\
name = 0
"""
mod3_names = {'name'}
code_mod4 = """\
from .mod1 import *
from .mod2 import *
from .mod3 import name
def func():
return a + b + c + d + d + name
"""
mod4_names = {'a', 'aa', 'b', 'c', 'cc', 'name', 'func'}
code_mod4_fixed = """\
from .mod1 import a
from .mod2 import b, c
from .mod3 import name
def func():
return a + b + c + d + d + name
"""
code_mod5 = """\
from module.mod1 import *
from module.mod2 import *
from module.mod3 import name
def func():
return a + b + c + d + d + name
"""
mod5_names = {'a', 'aa', 'b', 'c', 'cc', 'name', 'func'}
code_mod5_fixed = """\
from module.mod1 import a
from module.mod2 import b, c
from module.mod3 import name
def func():
return a + b + c + d + d + name
"""
code_mod6 = """\
from os.path import *
isfile(join('a', 'b'))
"""
code_mod6_fixed = """\
from os.path import isfile, join
isfile(join('a', 'b'))
"""
code_mod7 = """\
from .mod6 import *
"""
code_mod7_fixed = ""
mod7_names = {'isfile', 'join'}
code_mod8 = """\
a = 1
b = 2
c = 3
__all__ = ['a']
__all__ += ['b']
"""
mod8_names = {'a', 'b'}
code_mod9 = """\
from .mod8 import *
def func():
return a + b
"""
code_mod9_fixed = """\
from .mod8 import a, b
def func():
return a + b
"""
mod9_names = {'a', 'b', 'func'}
code_submod1 = """\
from ..mod1 import *
from ..mod2 import *
from ..mod3 import name
from .submod3 import *
def func():
return a + b + c + d + d + e + name
"""
submod1_names = {'a', 'aa', 'b', 'c', 'cc', 'e', 'name', 'func'}
code_submod1_fixed = """\
from ..mod1 import a
from ..mod2 import b, c
from ..mod3 import name
from .submod3 import e
def func():
return a + b + c + d + d + e + name
"""
code_submod2 = """\
from module.mod1 import *
from module.mod2 import *
from module.mod3 import name
from module.submod.submod3 import *
def func():
return a + b + c + d + d + e + name
"""
submod2_names = {'a', 'aa', 'b', 'c', 'cc', 'e', 'name', 'func'}
code_submod2_fixed = """\
from module.mod1 import a
from module.mod2 import b, c
from module.mod3 import name
from module.submod.submod3 import e
def func():
return a + b + c + d + d + e + name
"""
code_submod3 = """\
e = 1
"""
submod3_names = {'e'}
code_submod4 = """\
from . import *
func()
"""
submod4_names = {'func'}
code_submod4_fixed = """\
from . import func
func()
"""
code_submod_init = """\
from .submod1 import func
"""
submod_names = {'func'}
# An actual import adds submod1 and submod3 to the submod namespace, since
# they are imported submodule names. The static code does not yet support
# these. If any other imports happen first, like 'import submod.submod2',
# those would be included as well.
submod_dynamic_names = {'submod1', 'submod3', 'func'}
code_bad_syntax = """\
from mod
"""
code_mod_unfixable = """\
from .mod1 import *;
from .mod2 import\t*
def func():
return a + c
"""
mod_unfixable_names = {'a', 'aa', 'b', 'c', 'cc', 'func'}
code_mod_commented_unused_star = """\
from .mod1 import * # comment about mod1
from .mod2 import * # noqa
"""
mod_commented_unused_star_names = {'a', 'aa', 'b', 'c', 'cc'}
code_mod_commented_unused_star_fixed = """\
# comment about mod1
from .mod2 import * # noqa
"""
code_mod_commented_star = """\
from .mod1 import * # noqa
from .mod2 import * # noqa: F401
from .mod3 import * # generic comment
def func():
return a + c + name
"""
mod_commented_star_names = {'a', 'aa', 'b', 'c', 'cc', 'name', 'func'}
code_mod_commented_star_fixed = """\
from .mod1 import * # noqa
from .mod2 import * # noqa: F401
from .mod3 import name # generic comment
def func():
return a + c + name
"""
code_submod_recursive_init = """\
from .submod1 import *
"""
submod_recursive_names = {'a', 'b'}
submod_recursive_dynamic_names = {'submod1', 'a', 'b'}
code_submod_recursive_submod1 = """\
a = 1
b = 2
"""
submod_recursive_submod1_names = {'a', 'b'}
code_submod_recursive_submod2 = """\
from . import *
def func():
return a + 1
"""
submod_recursive_submod2_names = {'a', 'b', 'func'}
submod_recursive_submod2_dynamic_names = {'a', 'b', 'func', 'submod1'}
code_submod_recursive_submod2_fixed = """\
from . import a
def func():
return a + 1
"""
def create_module(module):
os.makedirs(module)
with open(module/'mod1.py', 'w') as f:
f.write(code_mod1)
with open(module/'mod2.py', 'w') as f:
f.write(code_mod2)
with open(module/'mod3.py', 'w') as f:
f.write(code_mod3)
with open(module/'mod4.py', 'w') as f:
f.write(code_mod4)
with open(module/'mod5.py', 'w') as f:
f.write(code_mod5)
with open(module/'mod6.py', 'w') as f:
f.write(code_mod6)
with open(module/'mod7.py', 'w') as f:
f.write(code_mod7)
with open(module/'mod8.py', 'w') as f:
f.write(code_mod8)
with open(module/'mod9.py', 'w') as f:
f.write(code_mod9)
with open(module/'__init__.py', 'w') as f:
pass
with open(module/'mod_bad.py', 'w') as f:
f.write(code_bad_syntax)
with open(module/'mod_unfixable.py', 'w') as f:
f.write(code_mod_unfixable)
with open(module/'mod_commented_unused_star.py', 'w') as f:
f.write(code_mod_commented_unused_star)
with open(module/'mod_commented_star.py', 'w') as f:
f.write(code_mod_commented_star)
submod = module/'submod'
os.makedirs(submod)
with open(submod/'__init__.py', 'w') as f:
f.write(code_submod_init)
with open(submod/'submod1.py', 'w') as f:
f.write(code_submod1)
with open(submod/'submod2.py', 'w') as f:
f.write(code_submod2)
with open(submod/'submod3.py', 'w') as f:
f.write(code_submod3)
with open(submod/'submod4.py', 'w') as f:
f.write(code_submod4)
submod_recursive = module/'submod_recursive'
os.makedirs(submod_recursive)
with open(submod_recursive/'__init__.py', 'w') as f:
f.write(code_submod_recursive_init)
with open(submod_recursive/'submod1.py', 'w') as f:
f.write(code_submod_recursive_submod1)
with open(submod_recursive/'submod2.py', 'w') as f:
f.write(code_submod_recursive_submod2)
def test_names_to_replace():
for code in [code_mod1, code_mod2, code_mod3, code_mod7, code_mod8,
code_submod3, code_submod_init, code_submod_recursive_init,
code_submod_recursive_submod1]:
names = names_to_replace(Checker(ast.parse(code)))
assert names == set()
for code in [code_mod4, code_mod5]:
names = names_to_replace(Checker(ast.parse(code)))
assert names == {'a', 'b', 'c', 'd'}
for code in [code_submod1, code_submod2]:
names = names_to_replace(Checker(ast.parse(code)))
assert names == {'a', 'b', 'c', 'd', 'e'}
names = names_to_replace(Checker(ast.parse(code_submod4)))
assert names == {'func'}
names = names_to_replace(Checker(ast.parse(code_mod6)))
assert names == {'isfile', 'join'}
names = names_to_replace(Checker(ast.parse(code_submod_recursive_submod2)))
assert names == {'a'}
names = names_to_replace(Checker(ast.parse(code_mod9)))
assert names == {'a', 'b'}
names = names_to_replace(Checker(ast.parse(code_mod_unfixable)))
assert names == {'a', 'c'}
names = names_to_replace(Checker(ast.parse(code_mod_commented_unused_star)))
assert names == set()
names = names_to_replace(Checker(ast.parse(code_mod_commented_star)))
assert names == {'a', 'c', 'name'}
def test_star_imports():
for code in [code_mod1, code_mod2, code_mod3, code_mod8, code_submod3,
code_submod_init, code_submod_recursive_submod1]:
stars = star_imports(Checker(ast.parse(code)))
assert stars == []
stars = star_imports(Checker(ast.parse(code_mod4)))
assert stars == ['.mod1', '.mod2']
stars = star_imports(Checker(ast.parse(code_mod5)))
assert stars == ['module.mod1', 'module.mod2']
stars = star_imports(Checker(ast.parse(code_mod6)))
assert stars == ['os.path']
stars = star_imports(Checker(ast.parse(code_mod7)))
assert stars == ['.mod6']
stars = star_imports(Checker(ast.parse(code_mod9)))
assert stars == ['.mod8']
stars = star_imports(Checker(ast.parse(code_submod1)))
assert stars == ['..mod1', '..mod2', '.submod3']
stars = star_imports(Checker(ast.parse(code_submod2)))
assert stars == ['module.mod1', 'module.mod2', 'module.submod.submod3']
for code in [code_submod4, code_submod_recursive_submod2]:
stars = star_imports(Checker(ast.parse(code)))
assert stars == ['.']
stars = star_imports(Checker(ast.parse(code_submod_recursive_init)))
assert stars == ['.submod1']
stars = star_imports(Checker(ast.parse(code_mod_unfixable)))
assert stars == ['.mod1', '.mod2']
stars = star_imports(Checker(ast.parse(code_mod_commented_unused_star)))
assert stars == ['.mod1', '.mod2']
stars = star_imports(Checker(ast.parse(code_mod_commented_star)))
assert stars == ['.mod1', '.mod2', '.mod3']
def test_get_names():
names = get_names(code_mod1)
assert names == {'a', 'aa', 'b'}
names = get_names(code_mod2)
assert names == {'b', 'c', 'cc'}
names = get_names(code_mod3)
assert names == {'name'}
names = get_names(code_mod4)
# TODO: Remove the imported name 'name'
assert names == {'.mod1.*', '.mod2.*', 'name', 'func'}
names = get_names(code_mod5)
# TODO: Remove the imported name 'name'
assert names == {'module.mod1.*', 'module.mod2.*', 'name', 'func'}
names = get_names(code_mod6)
assert names == {'os.path.*'}
names = get_names(code_submod_init)
assert names == {'func'}
names = get_names(code_submod1)
# TODO: Remove the imported name 'name'
assert names == {'..mod1.*', '..mod2.*', '.submod3.*', 'name', 'func'}
names = get_names(code_submod2)
# TODO: Remove the imported name 'name'
assert names == {'module.mod1.*', 'module.mod2.*',
'module.submod.submod3.*', 'name', 'func'}
names = get_names(code_submod3)
assert names == {'e'}
names = get_names(code_submod4)
assert names == {'..*'}
raises(SyntaxError, lambda: get_names(code_bad_syntax))
names = get_names(code_mod_unfixable)
assert names == {'.mod1.*', '.mod2.*', 'func'}
names = get_names(code_mod_commented_unused_star)
assert names == {'.mod1.*', '.mod2.*'}
names = get_names(code_mod_commented_star)
assert names == {'.mod1.*', '.mod2.*', '.mod3.*', 'func'}
names = get_names(code_submod_recursive_init)
assert names == {'.submod1.*'}
names = get_names(code_submod_recursive_submod1)
assert names == {'a', 'b'}
names = get_names(code_submod_recursive_submod2)
assert names == {'..*', 'func'}
@pytest.mark.parametrize('relative', [True, False])
def test_get_names_from_dir(tmpdir, relative):
directory = tmpdir/'module'
create_module(directory)
if relative:
chdir = tmpdir
directory = Path('module')
else:
chdir = '.'
curdir = os.path.abspath('.')
try:
os.chdir(chdir)
assert get_names_from_dir('.mod1', directory) == mod1_names
assert get_names_from_dir('.mod2', directory) == mod2_names
assert get_names_from_dir('.mod3', directory) == mod3_names
assert get_names_from_dir('.mod4', directory) == mod4_names
assert get_names_from_dir('.mod5', directory) == mod5_names
assert get_names_from_dir('.mod6', directory) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('.mod6', directory, allow_dynamic=False))
assert get_names_from_dir('.mod7', directory) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('.mod7', directory, allow_dynamic=False))
assert get_names_from_dir('.mod8', directory) == mod8_names
assert get_names_from_dir('.mod9', directory) == mod9_names
assert get_names_from_dir('.mod_unfixable', directory) == mod_unfixable_names
assert get_names_from_dir('.mod_commented_unused_star', directory) == mod_commented_unused_star_names
assert get_names_from_dir('.mod_commented_star', directory) == mod_commented_star_names
assert get_names_from_dir('.submod', directory) == submod_names
assert get_names_from_dir('.submod.submod1', directory) == submod1_names
assert get_names_from_dir('.submod.submod2', directory) == submod2_names
assert get_names_from_dir('.submod.submod3', directory) == submod3_names
assert get_names_from_dir('.submod.submod4', directory) == submod4_names
assert get_names_from_dir('.submod_recursive', directory) == submod_recursive_names
assert get_names_from_dir('.submod_recursive.submod1', directory) == submod_recursive_submod1_names
assert get_names_from_dir('.submod_recursive.submod2', directory) == submod_recursive_submod2_names
assert get_names_from_dir('module.mod1', directory) == mod1_names
assert get_names_from_dir('module.mod2', directory) == mod2_names
assert get_names_from_dir('module.mod3', directory) == mod3_names
assert get_names_from_dir('module.mod4', directory) == mod4_names
assert get_names_from_dir('module.mod5', directory) == mod5_names
assert get_names_from_dir('module.mod6', directory) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('module.mod6', directory, allow_dynamic=False))
assert get_names_from_dir('module.mod7', directory) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('module.mod7', directory, allow_dynamic=False))
assert get_names_from_dir('module.mod8', directory) == mod8_names
assert get_names_from_dir('module.mod9', directory) == mod9_names
assert get_names_from_dir('module.mod_unfixable', directory) == mod_unfixable_names
assert get_names_from_dir('module.mod_commented_unused_star', directory) == mod_commented_unused_star_names
assert get_names_from_dir('module.mod_commented_star', directory) == mod_commented_star_names
assert get_names_from_dir('module.submod', directory) == submod_names
assert get_names_from_dir('module.submod.submod1', directory) == submod1_names
assert get_names_from_dir('module.submod.submod2', directory) == submod2_names
assert get_names_from_dir('module.submod.submod3', directory) == submod3_names
assert get_names_from_dir('module.submod.submod4', directory) == submod4_names
assert get_names_from_dir('module.submod_recursive', directory) == submod_recursive_names
assert get_names_from_dir('module.submod_recursive.submod1', directory) == submod_recursive_submod1_names
assert get_names_from_dir('module.submod_recursive.submod2', directory) == submod_recursive_submod2_names
submod = directory/'submod'
assert get_names_from_dir('..submod', submod) == submod_names
assert get_names_from_dir('.', submod) == submod_names
assert get_names_from_dir('.submod1', submod) == submod1_names
assert get_names_from_dir('.submod2', submod) == submod2_names
assert get_names_from_dir('.submod3', submod) == submod3_names
assert get_names_from_dir('.submod4', submod) == submod4_names
assert get_names_from_dir('..mod1', submod) == mod1_names
assert get_names_from_dir('..mod2', submod) == mod2_names
assert get_names_from_dir('..mod3', submod) == mod3_names
assert get_names_from_dir('..mod4', submod) == mod4_names
assert get_names_from_dir('..mod5', submod) == mod5_names
assert get_names_from_dir('..mod6', submod) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('..mod6', submod, allow_dynamic=False))
assert get_names_from_dir('..mod7', submod) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('..mod7', submod, allow_dynamic=False))
assert get_names_from_dir('..mod8', submod) == mod8_names
assert get_names_from_dir('..mod9', submod) == mod9_names
assert get_names_from_dir('..mod_unfixable', submod) == mod_unfixable_names
assert get_names_from_dir('..mod_commented_unused_star', submod) == mod_commented_unused_star_names
assert get_names_from_dir('..mod_commented_star', submod) == mod_commented_star_names
assert get_names_from_dir('..submod_recursive', submod) == submod_recursive_names
assert get_names_from_dir('..submod_recursive.submod1', submod) == submod_recursive_submod1_names
assert get_names_from_dir('..submod_recursive.submod2', submod) == submod_recursive_submod2_names
assert get_names_from_dir('module.mod1', submod) == mod1_names
assert get_names_from_dir('module.mod2', submod) == mod2_names
assert get_names_from_dir('module.mod3', submod) == mod3_names
assert get_names_from_dir('module.mod4', submod) == mod4_names
assert get_names_from_dir('module.mod5', submod) == mod5_names
assert get_names_from_dir('module.mod6', submod) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('module.mod6', submod, allow_dynamic=False))
assert get_names_from_dir('module.mod7', submod) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('module.mod7', submod, allow_dynamic=False))
assert get_names_from_dir('module.mod8', submod) == mod8_names
assert get_names_from_dir('module.mod9', submod) == mod9_names
assert get_names_from_dir('module.mod_unfixable', submod) == mod_unfixable_names
assert get_names_from_dir('module.mod_commented_unused_star', submod) == mod_commented_unused_star_names
assert get_names_from_dir('module.mod_commented_star', submod) == mod_commented_star_names
assert get_names_from_dir('module.submod', submod) == submod_names
assert get_names_from_dir('module.submod.submod1', submod) == submod1_names
assert get_names_from_dir('module.submod.submod2', submod) == submod2_names
assert get_names_from_dir('module.submod.submod3', submod) == submod3_names
assert get_names_from_dir('module.submod.submod4', submod) == submod4_names
assert get_names_from_dir('module.submod_recursive', submod) == submod_recursive_names
assert get_names_from_dir('module.submod_recursive.submod1', submod) == submod_recursive_submod1_names
assert get_names_from_dir('module.submod_recursive.submod2', submod) == submod_recursive_submod2_names
submod_recursive = directory/'submod_recursive'
assert get_names_from_dir('..submod', submod_recursive) == submod_names
assert get_names_from_dir('..submod.submod1', submod_recursive) == submod1_names
assert get_names_from_dir('..submod.submod2', submod_recursive) == submod2_names
assert get_names_from_dir('..submod.submod3', submod_recursive) == submod3_names
assert get_names_from_dir('..submod.submod4', submod_recursive) == submod4_names
assert get_names_from_dir('..mod1', submod_recursive) == mod1_names
assert get_names_from_dir('..mod2', submod_recursive) == mod2_names
assert get_names_from_dir('..mod3', submod_recursive) == mod3_names
assert get_names_from_dir('..mod4', submod_recursive) == mod4_names
assert get_names_from_dir('..mod5', submod_recursive) == mod5_names
assert get_names_from_dir('..mod6', submod_recursive) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('..mod6', submod_recursive, allow_dynamic=False))
assert get_names_from_dir('..mod7', submod_recursive) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('..mod7', submod_recursive, allow_dynamic=False))
assert get_names_from_dir('..mod8', submod_recursive) == mod8_names
assert get_names_from_dir('..mod9', submod_recursive) == mod9_names
assert get_names_from_dir('..mod_unfixable', submod_recursive) == mod_unfixable_names
assert get_names_from_dir('..mod_commented_unused_star', submod_recursive) == mod_commented_unused_star_names
assert get_names_from_dir('..mod_commented_star', submod_recursive) == mod_commented_star_names
assert get_names_from_dir('.', submod_recursive) == submod_recursive_names
assert get_names_from_dir('..submod_recursive', submod_recursive) == submod_recursive_names
assert get_names_from_dir('.submod1', submod_recursive) == submod_recursive_submod1_names
assert get_names_from_dir('.submod2', submod_recursive) == submod_recursive_submod2_names
assert get_names_from_dir('module.mod1', submod_recursive) == mod1_names
assert get_names_from_dir('module.mod2', submod_recursive) == mod2_names
assert get_names_from_dir('module.mod3', submod_recursive) == mod3_names
assert get_names_from_dir('module.mod4', submod_recursive) == mod4_names
assert get_names_from_dir('module.mod5', submod_recursive) == mod5_names
assert get_names_from_dir('module.mod6', submod_recursive) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('module.mod6', submod, allow_dynamic=False))
assert get_names_from_dir('module.mod7', submod_recursive) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('module.mod7', submod, allow_dynamic=False))
assert get_names_from_dir('module.mod8', submod_recursive) == mod8_names
assert get_names_from_dir('module.mod9', submod_recursive) == mod9_names
assert get_names_from_dir('module.mod_unfixable', submod_recursive) == mod_unfixable_names
assert get_names_from_dir('module.mod_commented_unused_star', submod) == mod_commented_unused_star_names
assert get_names_from_dir('module.mod_commented_star', submod) == mod_commented_star_names
assert get_names_from_dir('module.submod', submod_recursive) == submod_names
assert get_names_from_dir('module.submod.submod1', submod_recursive) == submod1_names
assert get_names_from_dir('module.submod.submod2', submod_recursive) == submod2_names
assert get_names_from_dir('module.submod.submod3', submod_recursive) == submod3_names
assert get_names_from_dir('module.submod.submod4', submod_recursive) == submod4_names
assert get_names_from_dir('module.submod_recursive', submod_recursive) == submod_recursive_names
assert get_names_from_dir('module.submod_recursive.submod1', submod_recursive) == submod_recursive_submod1_names
assert get_names_from_dir('module.submod_recursive.submod2', submod_recursive) == submod_recursive_submod2_names
raises(ExternalModuleError, lambda: get_names_from_dir('os.path', directory))
raises(ExternalModuleError, lambda: get_names_from_dir('os.path', submod))
raises(RuntimeError, lambda: get_names_from_dir('.mod_bad', directory))
raises(RuntimeError, lambda: get_names_from_dir('module.mod_bad', directory))
raises(RuntimeError, lambda: get_names_from_dir('.mod_doesnt_exist', directory))
raises(RuntimeError, lambda: get_names_from_dir('module.mod_doesnt_exist', directory))
finally:
os.chdir(curdir)
def test_get_names_dynamically(tmpdir):
os_path = get_names_dynamically('os.path')
assert 'isfile' in os_path
assert 'join' in os_path
directory = tmpdir/'module'
create_module(directory)
sys_path = sys.path
try:
sys.path.insert(0, str(tmpdir))
assert get_names_dynamically('module.mod1') == mod1_names
assert get_names_dynamically('module.mod2') == mod2_names
assert get_names_dynamically('module.mod3') == mod3_names
assert get_names_dynamically('module.mod4') == mod4_names
assert get_names_dynamically('module.mod5') == mod5_names
assert get_names_dynamically('module.mod6') == os_path
assert get_names_dynamically('module.mod7') == os_path
assert get_names_dynamically('module.mod8') == mod8_names
assert get_names_dynamically('module.mod9') == mod9_names
assert get_names_dynamically('module.mod_unfixable') == mod_unfixable_names
assert get_names_dynamically('module.mod_commented_unused_star') == mod_commented_unused_star_names
assert get_names_dynamically('module.mod_commented_star') == mod_commented_star_names
assert get_names_dynamically('module.submod') == submod_dynamic_names
assert get_names_dynamically('module.submod.submod1') == submod1_names
assert get_names_dynamically('module.submod.submod2') == submod2_names
assert get_names_dynamically('module.submod.submod3') == submod3_names
raises(RuntimeError, lambda: get_names_dynamically('module.submod.submod4'))
assert get_names_dynamically('module.submod_recursive') == submod_recursive_dynamic_names
assert get_names_dynamically('module.submod_recursive.submod1') == submod_recursive_submod1_names
assert get_names_dynamically('module.submod_recursive.submod2') == submod_recursive_submod2_dynamic_names
# Doesn't actually import because of the undefined name 'd'
# assert get_names_dynamically('module.submod.submod4') == submod4_names
finally:
sys.path = sys_path
raises(RuntimeError, lambda: get_names_dynamically('notarealmodule'))
def test_fix_code(tmpdir, capsys):
# TODO: Test the verbose and quiet flags
directory = tmpdir/'module'
create_module(directory)
assert fix_code(code_mod1, file=directory/'mod1.py') == code_mod1
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_mod2, file=directory/'mod2.py') == code_mod2
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_mod3, file=directory/'mod3.py') == code_mod3
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_mod4, file=directory/'mod4.py') == code_mod4_fixed
out, err = capsys.readouterr()
assert not out
assert 'Warning' in err
assert str(directory/'mod4.py') in err
assert "'b'" in err
assert "'a'" not in err
assert "'.mod1'" in err
assert "'.mod2'" in err
assert "Using '.mod2'" in err
assert "could not find import for 'd'" in err
assert fix_code(code_mod5, file=directory/'mod5.py') == code_mod5_fixed
out, err = capsys.readouterr()
assert not out
assert 'Warning' in err
assert str(directory/'mod5.py') in err
assert "'b'" in err
assert "'a'" not in err
assert "'module.mod1'" in err
assert "'module.mod2'" in err
assert "Using 'module.mod2'" in err
assert "could not find import for 'd'" in err
assert fix_code(code_mod6, file=directory/'mod6.py') == code_mod6_fixed
out, err = capsys.readouterr()
assert not out
assert not err
assert raises(NotImplementedError, lambda: fix_code(code_mod6, file=directory/'mod6.py', allow_dynamic=False))
assert fix_code(code_mod7, file=directory/'mod7.py') == code_mod7_fixed
out, err = capsys.readouterr()
assert not out
assert not err
assert raises(NotImplementedError, lambda: fix_code(code_mod7, file=directory/'mod7.py', allow_dynamic=False))
assert fix_code(code_mod8, file=directory/'mod8.py') == code_mod8
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_mod9, file=directory/'mod9.py') == code_mod9_fixed
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_mod_unfixable, file=directory/'mod_unfixable.py') == code_mod_unfixable
out, err = capsys.readouterr()
assert not out
assert 'Warning' in err
assert 'Could not find the star imports for' in err
for mod in ["'.mod1'", "'.mod2'"]:
assert mod in err
assert fix_code(code_mod_commented_unused_star, file=directory/'mod_commented_unused_star.py') == code_mod_commented_unused_star_fixed
out, err = capsys.readouterr()
assert not out
assert 'Warning' in err
assert ("The removed star import statement for '.mod1' had an inline "
"comment which may not make sense without the import") in err
assert fix_code(code_mod_commented_star, file=directory/'mod_commented_star.py') == code_mod_commented_star_fixed
out, err = capsys.readouterr()
assert not out
assert not err
submod = directory/'submod'
assert fix_code(code_submod_init, file=submod/'__init__.py') == code_submod_init
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_submod1, file=submod/'submod1.py') == code_submod1_fixed
out, err = capsys.readouterr()
assert not out
assert 'Warning' in err
assert str(submod/'submod1.py') in err
assert "'b'" in err
assert "'a'" not in err
assert "'..mod1'" in err
assert "'..mod2'" in err
assert "'.mod1'" not in err
assert "'.mod2'" not in err
assert "Using '..mod2'" in err
assert "could not find import for 'd'" in err
assert fix_code(code_submod2, file=submod/'submod2.py') == code_submod2_fixed
out, err = capsys.readouterr()
assert not out
assert 'Warning' in err
assert str(submod/'submod2.py') in err
assert "'b'" in err
assert "'a'" not in err
assert "'module.mod1'" in err
assert "'module.mod2'" in err
assert "'module.submod.submod3'" not in err
assert "'module.submod.mod1'" not in err
assert "'module.submod.mod2'" not in err
assert "Using 'module.mod2'" in err
assert "could not find import for 'd'" in err
assert fix_code(code_submod3, file=submod/'submod3.py') == code_submod3
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_submod4, file=submod/'submod4.py') == code_submod4_fixed
out, err = capsys.readouterr()
assert not out
assert not err
submod_recursive = directory/'submod_recursive'
# TODO: It's not actually useful to test this
assert fix_code(code_submod_recursive_init, file=submod_recursive/'__init__.py') == ""
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_submod_recursive_submod1, file=submod_recursive/'submod1.py') == code_submod_recursive_submod1
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_submod_recursive_submod2, file=submod_recursive/'submod2.py') == code_submod_recursive_submod2_fixed
out, err = capsys.readouterr()
assert not out
assert not err
raises(RuntimeError, lambda: fix_code(code_bad_syntax, file=directory/'mod_bad.py'))
out, err = capsys.readouterr()
assert not out
assert not err
def touch(f):
with open(f, 'w'):
pass
@pytest.mark.parametrize('relative', [True, False])
def test_get_mod_filename(tmpdir, relative):
if relative:
chdir = tmpdir
tmpdir = Path('.')
else:
chdir = '.'
curdir = os.path.abspath('.')
try:
os.chdir(chdir)
module = tmpdir/'module'
os.makedirs(module)
touch(module/'__init__.py')
touch(module/'mod1.py')
submod = module/'submod'
os.makedirs(submod)
touch(submod/'__init__.py')
touch(submod/'mod1.py')
subsubmod = submod/'submod'
os.makedirs(subsubmod)
touch(subsubmod/'__init__.py')
touch(subsubmod/'mod1.py')
def _test(mod, directory, expected):
result = os.path.abspath(get_mod_filename(mod, directory))
assert result == os.path.abspath(expected)
_test('.', module, module/'__init__.py')
_test('.mod1', module, module/'mod1.py')
_test('.submod', module, submod/'__init__.py')
_test('.submod.mod1', module, submod/'mod1.py')
_test('.submod.submod', module, subsubmod/'__init__.py')
_test('.submod.submod.mod1', module, subsubmod/'mod1.py')
raises(RuntimeError, lambda: get_mod_filename('.notreal', module))
_test('module', module, module/'__init__.py')
_test('module.mod1', module, module/'mod1.py')
_test('module.submod', module, submod/'__init__.py')
_test('module.submod.mod1', module, submod/'mod1.py')
_test('module.submod.submod', module, subsubmod/'__init__.py')
_test('module.submod.submod.mod1', module, subsubmod/'mod1.py')
raises(RuntimeError, lambda: get_mod_filename('module.notreal', module))
raises(RuntimeError, lambda: get_mod_filename('module.submod.notreal', module))
raises(ExternalModuleError, lambda: get_mod_filename('notreal.notreal', module))
_test('..', submod, module/'__init__.py')
_test('..mod1', submod, module/'mod1.py')
_test('.', submod, submod/'__init__.py')
_test('.mod1', submod, submod/'mod1.py')
_test('..submod', submod, submod/'__init__.py')
_test('..submod.mod1', submod, submod/'mod1.py')
_test('.submod', submod, subsubmod/'__init__.py')
_test('.submod.mod1', submod, subsubmod/'mod1.py')
_test('..submod.submod', submod, subsubmod/'__init__.py')
_test('..submod.submod.mod1', submod, subsubmod/'mod1.py')
raises(RuntimeError, lambda: get_mod_filename('.notreal', submod))
raises(RuntimeError, lambda: get_mod_filename('..notreal', submod))
_test('module', submod, module/'__init__.py')
_test('module.mod1', submod, module/'mod1.py')
_test('module.submod', submod, submod/'__init__.py')
_test('module.submod.mod1', submod, submod/'mod1.py')
_test('module.submod.submod', submod, subsubmod/'__init__.py')
_test('module.submod.submod.mod1', submod, subsubmod/'mod1.py')
raises(RuntimeError, lambda: get_mod_filename('module.notreal', submod))
raises(RuntimeError, lambda: get_mod_filename('module.submod.notreal', submod))
raises(ExternalModuleError, lambda: get_mod_filename('notreal.notreal', submod))
_test('...', subsubmod, module/'__init__.py')
_test('...mod1', subsubmod, module/'mod1.py')
_test('..', subsubmod, submod/'__init__.py')
_test('..mod1', subsubmod, submod/'mod1.py')
_test('...submod', subsubmod, submod/'__init__.py')
_test('...submod.mod1', subsubmod, submod/'mod1.py')
_test('.', subsubmod, subsubmod/'__init__.py')
_test('.mod1', subsubmod, subsubmod/'mod1.py')
_test('...submod.submod', subsubmod, subsubmod/'__init__.py')
_test('...submod.submod.mod1', subsubmod, subsubmod/'mod1.py')
_test('..submod', subsubmod, subsubmod/'__init__.py')
_test('..submod.mod1', subsubmod, subsubmod/'mod1.py')
raises(RuntimeError, lambda: get_mod_filename('.notreal', subsubmod))
raises(RuntimeError, lambda: get_mod_filename('..notreal', subsubmod))
raises(RuntimeError, lambda: get_mod_filename('..notreal', subsubmod))
_test('module', subsubmod, module/'__init__.py')
_test('module.mod1', subsubmod, module/'mod1.py')
_test('module.submod', subsubmod, submod/'__init__.py')
_test('module.submod.mod1', subsubmod, submod/'mod1.py')
_test('module.submod.submod', subsubmod, subsubmod/'__init__.py')
_test('module.submod.submod.mod1', subsubmod, subsubmod/'mod1.py')
raises(RuntimeError, lambda: get_mod_filename('module.notreal', subsubmod))
raises(RuntimeError, lambda: get_mod_filename('module.submod.notreal', subsubmod))
raises(ExternalModuleError, lambda: get_mod_filename('notreal.notreal', subsubmod))
finally:
os.chdir(curdir)
def test_replace_imports():
# The verbose and quiet flags are already tested in test_fix_code
for code in [code_mod1, code_mod2, code_mod3, code_mod8, code_submod3,
code_submod_init, code_submod_recursive_submod1, code_mod_unfixable]:
assert replace_imports(code, repls={}, verbose=False, quiet=True) == code
assert replace_imports(code_mod4, repls={'.mod1': ['a'], '.mod2': ['b', 'c']}, verbose=False, quiet=True) == code_mod4_fixed
assert replace_imports(code_mod5, repls={'module.mod1': ['a'], 'module.mod2': ['b', 'c']}, verbose=False, quiet=True) == code_mod5_fixed
assert replace_imports(code_mod6, repls={'os.path': ['isfile', 'join']}, verbose=False, quiet=False) == code_mod6_fixed
assert replace_imports(code_mod7, repls={'.mod6': []}, verbose=False, quiet=False) == code_mod7_fixed
assert replace_imports(code_mod9, repls={'.mod8': ['a', 'b']}, verbose=False, quiet=False) == code_mod9_fixed
assert replace_imports(code_submod1, repls={'..mod1': ['a'], '..mod2':
['b', 'c'], '.submod3': ['e']}, verbose=False, quiet=True) == code_submod1_fixed
assert replace_imports(code_submod2, repls={'module.mod1': ['a'],
'module.mod2': ['b', 'c'], 'module.submod.submod3': ['e']}, verbose=False, quiet=True) == code_submod2_fixed
assert replace_imports(code_submod4, repls={'.': ['func']}, verbose=False, quiet=True) == code_submod4_fixed
assert replace_imports(code_submod_recursive_submod2, repls={'.': ['a']}) == code_submod_recursive_submod2_fixed
assert replace_imports(code_mod_unfixable, repls={'.mod1': ['a'], '.mod2': ['c'], '.mod3': ['name']}) == code_mod_unfixable
assert replace_imports(code_mod_commented_unused_star, repls={'.mod1': [], '.mod2': []}) == code_mod_commented_unused_star_fixed
assert replace_imports(code_mod_commented_star, repls={'.mod1': ['a'], '.mod2': ['c'], '.mod3': ['name']}) == code_mod_commented_star_fixed
@pytest.mark.parametrize('verbose_enabled, verbose_kwarg', [
(False, {}), # Default is False
(False, {'verbose': False}),
(True, {'verbose': True}),
], ids=['implicit no verbose', 'explicit no verbose', 'explicit verbose'])
@pytest.mark.parametrize('kwargs, fixed_code, verbose_messages', [
(dict(code=code_mod4, repls={'.mod1': ['a'], '.mod2': ['b', 'c']}),
code_mod4_fixed, [
"Replacing 'from .mod1 import *' with 'from .mod1 import a'",
"Replacing 'from .mod2 import *' with 'from .mod2 import b, c'"
]),
(dict(code=code_mod4, repls={'.mod1': ['a'], '.mod2': ['b', 'c']}, file='directory/mod4.py'),
code_mod4_fixed, [
"directory/mod4.py: Replacing 'from .mod1 import *' with 'from .mod1 import a'",
"directory/mod4.py: Replacing 'from .mod2 import *' with 'from .mod2 import b, c'"
]),
(dict(code=code_mod_commented_star, repls={'.mod1': ['a'], '.mod2': ['c'], '.mod3': ['name']}),
code_mod_commented_star_fixed, [
"Replacing 'from .mod3 import *' with 'from .mod3 import name'",
"Retaining 'from .mod1 import *' due to noqa comment",
"Retaining 'from .mod2 import *' due to noqa comment"
]),
(dict(code=code_mod_commented_star, repls={'.mod1': ['a'], '.mod2': ['c'], '.mod3': ['name']}, file='directory/mod_commented_star.py'),
code_mod_commented_star_fixed, [
"directory/mod_commented_star.py: Replacing 'from .mod3 import *' with 'from .mod3 import name'",
"directory/mod_commented_star.py: Retaining 'from .mod1 import *' due to noqa comment",
"directory/mod_commented_star.py: Retaining 'from .mod2 import *' due to noqa comment"
]),
], ids=[
'mod4 without file',
'mod4 with file',
'mod_commented_star without file',
'mod_commented_star with file'
])
def test_replace_imports_verbose_messages(kwargs, fixed_code, verbose_messages, verbose_enabled, verbose_kwarg, capsys):
assert replace_imports(**kwargs, **verbose_kwarg) == fixed_code
_, err = capsys.readouterr()
if verbose_enabled:
assert sorted(err.splitlines()) == verbose_messages
else:
assert err == ''
def test_replace_imports_warnings(capsys):
assert replace_imports(code_mod_unfixable, file='module/mod_unfixable.py', repls={'.mod1': ['a'], '.mod2': ['c']}) == code_mod_unfixable
out, err = capsys.readouterr()
assert set(err.splitlines()) == {
"Warning: module/mod_unfixable.py: Could not find the star imports for '.mod1'",
"Warning: module/mod_unfixable.py: Could not find the star imports for '.mod2'"
}
assert replace_imports(code_mod_unfixable, file=None, repls={'.mod1': ['a'], '.mod2': ['c']}) == code_mod_unfixable
out, err = capsys.readouterr()
assert set(err.splitlines()) == {
"Warning: Could not find the star imports for '.mod1'",
"Warning: Could not find the star imports for '.mod2'"
}
assert replace_imports(code_mod_unfixable, quiet=True, repls={'.mod1': ['a'], '.mod2': ['c']}) == code_mod_unfixable
out, err = capsys.readouterr()
assert err == ''
assert replace_imports(code_mod_commented_unused_star, file='module/mod_commented_unused_star.py', repls={'.mod1': [], '.mod2': []}) == code_mod_commented_unused_star_fixed
out, err = capsys.readouterr()
assert set(err.splitlines()) == {
"Warning: module/mod_commented_unused_star.py: The removed star import statement for '.mod1' had an inline comment which may not make sense without the import",
}
assert replace_imports(code_mod_commented_unused_star, file=None, repls={'.mod1': [], '.mod2': []}) == code_mod_commented_unused_star_fixed
out, err = capsys.readouterr()
assert set(err.splitlines()) == {
"Warning: The removed star import statement for '.mod1' had an inline comment which may not make sense without the import",
}
assert replace_imports(code_mod_commented_unused_star, quiet=True, repls={'.mod1': [], '.mod2': []}) == code_mod_commented_unused_star_fixed
out, err = capsys.readouterr()
assert err == ''
def test_replace_imports_line_wrapping():
code = """\
from reallyreallylongmodulename import *
print(longname1, longname2, longname3, longname4, longname5, longname6,
longname7, longname8, longname9)
"""
code_fixed = """\
{imp}
print(longname1, longname2, longname3, longname4, longname5, longname6,
longname7, longname8, longname9)
"""
repls = {'reallyreallylongmodulename': ['longname1', 'longname2', 'longname3', 'longname4', 'longname5', 'longname6', 'longname7', 'longname8', 'longname9']}
assert replace_imports(code, repls) == code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1, longname2, longname3, longname4, longname5,
longname6, longname7, longname8, longname9)''')
# Make sure the first line has at least one imported name.
# There's no point to doing
#
# from mod import (
# name,
#
# if we are aligning the names to the opening parenthesis anyway.
assert replace_imports(code, repls, max_line_length=49) == code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1,
longname2,
longname3,
longname4,
longname5,
longname6,
longname7,
longname8,
longname9)''')
assert replace_imports(code, repls, max_line_length=50) == code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1,
longname2,
longname3,
longname4,
longname5,
longname6,
longname7,
longname8,
longname9)''')
assert replace_imports(code, repls, max_line_length=51) == code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1,
longname2,
longname3,
longname4,
longname5,
longname6,
longname7,
longname8,
longname9)''')
assert replace_imports(code, repls, max_line_length=120) == code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1, longname2, longname3, longname4, longname5, longname6, longname7,
longname8, longname9)''')
assert len("from reallyreallylongmodulename import longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8, longname9") == 136
assert replace_imports(code, repls, max_line_length=137) == code_fixed.format(imp='''\
from reallyreallylongmodulename import longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8, longname9''')
assert replace_imports(code, repls, max_line_length=136) == code_fixed.format(imp='''\
from reallyreallylongmodulename import longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8, longname9''')
assert replace_imports(code, repls, max_line_length=135) == code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8,
longname9)''')
assert replace_imports(code, repls, max_line_length=200) == code_fixed.format(imp='''\
from reallyreallylongmodulename import longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8, longname9''')
assert replace_imports(code, repls, max_line_length=float('inf')) == code_fixed.format(imp='''\
from reallyreallylongmodulename import longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8, longname9''')
@pytest.mark.parametrize('case_permutation', [
lambda s: s,
lambda s: s.upper(),
lambda s: s.lower()
], ids=['same case', 'upper case', 'lower case'])
@pytest.mark.parametrize('allows_star, comment', [
(True, '# noqa'),
(True, '#noqa'),
(True, '# noqa '),
(False, '# noqa foo bar'),
(False, '# noqa:'),
(False, '# noqa :'),
(True, '# noqa: F401'),
(True, '#noqa:F401'),
(True, '# noqa: F401 '),
(True, '#\tnoqa:\tF401\t'),
(True, '# noqa: F403'),
(True, '# noqa: A1,F403,A1'),
(True, '# noqa: A1 F401 A1'),
(True, '# noqa: A1, F401, A1'),
(True, '# noqa: A1 , F401 , A1'),
(False, '# generic comment'),
(False, '#'),
(False, ''),
(False, '# foo: F401'),
(False, '# F401'),
(False, '# noqa F401'), # missing : after noqa
])
def test_is_noqa_comment_allowing_star_import(case_permutation, allows_star, comment):
assert is_noqa_comment_allowing_star_import(case_permutation(comment)) is allows_star
def _dirs_equal(cmp):
if cmp.diff_files:
return False
if not cmp.subdirs:
return True
return all(_dirs_equal(c) for c in cmp.subdirs.values())
def test_cli(tmpdir):
from ..__main__ import __file__
# TODO: Test the verbose and quiet flags
directory_orig = tmpdir/'orig'/'module'
directory = tmpdir/'module'
create_module(directory)
create_module(directory_orig)
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
# Make sure we are running the command for the right file
p = subprocess.run([sys.executable, '-m', 'removestar', '--_this-file', 'none'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
assert p.stderr == ''
assert p.stdout == __file__
p = subprocess.run([sys.executable, '-m', 'removestar', directory],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
warnings = set(f"""\
Warning: {directory}/submod/submod1.py: 'b' comes from multiple modules: '..mod1', '..mod2'. Using '..mod2'.
Warning: {directory}/submod/submod1.py: could not find import for 'd'
Warning: {directory}/submod/submod2.py: 'b' comes from multiple modules: 'module.mod1', 'module.mod2'. Using 'module.mod2'.
Warning: {directory}/submod/submod2.py: could not find import for 'd'
Warning: {directory}/mod4.py: 'b' comes from multiple modules: '.mod1', '.mod2'. Using '.mod2'.
Warning: {directory}/mod4.py: could not find import for 'd'
Warning: {directory}/mod5.py: 'b' comes from multiple modules: 'module.mod1', 'module.mod2'. Using 'module.mod2'.
Warning: {directory}/mod5.py: could not find import for 'd'
Warning: {directory}/mod_unfixable.py: Could not find the star imports for '.mod1'
Warning: {directory}/mod_unfixable.py: Could not find the star imports for '.mod2'
Warning: {directory}/mod_commented_unused_star.py: The removed star import statement for '.mod1' had an inline comment which may not make sense without the import
""".splitlines())
error = f"Error with {directory}/mod_bad.py: SyntaxError: invalid syntax (mod_bad.py, line 1)"
assert set(p.stderr.splitlines()) == warnings.union({error})
diffs = [
f"""\
--- original/{directory}/mod4.py
+++ fixed/{directory}/mod4.py
@@ -1,5 +1,5 @@
-from .mod1 import *
-from .mod2 import *
+from .mod1 import a
+from .mod2 import b, c
from .mod3 import name
\n\
def func():\
""",
f"""\
--- original/{directory}/mod5.py
+++ fixed/{directory}/mod5.py
@@ -1,5 +1,5 @@
-from module.mod1 import *
-from module.mod2 import *
+from module.mod1 import a
+from module.mod2 import b, c
from module.mod3 import name
\n\
def func():\
""",
f"""\
--- original/{directory}/mod6.py
+++ fixed/{directory}/mod6.py
@@ -1,2 +1,2 @@
-from os.path import *
+from os.path import isfile, join
isfile(join('a', 'b'))\
""",
f"""\
--- original/{directory}/mod7.py
+++ fixed/{directory}/mod7.py
@@ -1 +0,0 @@
-from .mod6 import *\
""",
f"""\
--- original/{directory}/mod9.py
+++ fixed/{directory}/mod9.py
@@ -1,4 +1,4 @@
-from .mod8 import *
+from .mod8 import a, b
\n\
def func():
return a + b\
""",
f"""\
--- original/{directory}/mod_commented_unused_star.py
+++ fixed/{directory}/mod_commented_unused_star.py
@@ -1,2 +1,2 @@
-from .mod1 import * # comment about mod1
+# comment about mod1
from .mod2 import * # noqa\
""",
f"""\
--- original/{directory}/mod_commented_star.py
+++ fixed/{directory}/mod_commented_star.py
@@ -1,6 +1,6 @@
from .mod1 import * # noqa
from .mod2 import * # noqa: F401
-from .mod3 import * # generic comment
+from .mod3 import name # generic comment
\n\
def func():\
""",
f"""\
--- original/{directory}/submod/submod1.py
+++ fixed/{directory}/submod/submod1.py
@@ -1,7 +1,7 @@
-from ..mod1 import *
-from ..mod2 import *
+from ..mod1 import a
+from ..mod2 import b, c
from ..mod3 import name
-from .submod3 import *
+from .submod3 import e
\n\
def func():
return a + b + c + d + d + e + name\
""",
f"""\
--- original/{directory}/submod/submod2.py
+++ fixed/{directory}/submod/submod2.py
@@ -1,7 +1,7 @@
-from module.mod1 import *
-from module.mod2 import *
+from module.mod1 import a
+from module.mod2 import b, c
from module.mod3 import name
-from module.submod.submod3 import *
+from module.submod.submod3 import e
\n\
def func():
return a + b + c + d + d + e + name\
""",
f"""\
--- original/{directory}/submod/submod4.py
+++ fixed/{directory}/submod/submod4.py
@@ -1,3 +1,3 @@
-from . import *
+from . import func
\n\
func()\
""",
f"""\
--- original/{directory}/submod_recursive/submod2.py
+++ fixed/{directory}/submod_recursive/submod2.py
@@ -1,4 +1,4 @@
-from . import *
+from . import a
\n\
def func():
return a + 1\
""",
]
unchanged = ['__init__.py', 'mod_bad.py', 'mod_unfixable.py']
for d in diffs:
assert d in p.stdout, p.stdout
for mod_path in unchanged:
assert '--- original/{directory}/{mod_path}' not in p.stdout
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
p = subprocess.run([sys.executable, '-m', 'removestar', '--quiet', directory],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
assert p.stderr == ''
for d in diffs:
assert d in p.stdout
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
p = subprocess.run([sys.executable, '-m', 'removestar', '--verbose', directory],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding='utf-8')
changes = set(f"""\
{directory}/mod4.py: Replacing 'from .mod1 import *' with 'from .mod1 import a'
{directory}/mod4.py: Replacing 'from .mod2 import *' with 'from .mod2 import b, c'
{directory}/mod5.py: Replacing 'from module.mod1 import *' with 'from module.mod1 import a'
{directory}/mod5.py: Replacing 'from module.mod2 import *' with 'from module.mod2 import b, c'
{directory}/mod6.py: Replacing 'from os.path import *' with 'from os.path import isfile, join'
{directory}/mod7.py: Replacing 'from .mod6 import *' with ''
{directory}/mod9.py: Replacing 'from .mod8 import *' with 'from .mod8 import a, b'
{directory}/mod_commented_unused_star.py: Replacing 'from .mod1 import *' with ''
{directory}/mod_commented_unused_star.py: Retaining 'from .mod2 import *' due to noqa comment
{directory}/mod_commented_star.py: Replacing 'from .mod3 import *' with 'from .mod3 import name'
{directory}/mod_commented_star.py: Retaining 'from .mod1 import *' due to noqa comment
{directory}/mod_commented_star.py: Retaining 'from .mod2 import *' due to noqa comment
{directory}/submod/submod1.py: Replacing 'from ..mod1 import *' with 'from ..mod1 import a'
{directory}/submod/submod1.py: Replacing 'from ..mod2 import *' with 'from ..mod2 import b, c'
{directory}/submod/submod1.py: Replacing 'from .submod3 import *' with 'from .submod3 import e'
{directory}/submod/submod4.py: Replacing 'from . import *' with 'from . import func'
{directory}/submod/submod2.py: Replacing 'from module.mod1 import *' with 'from module.mod1 import a'
{directory}/submod/submod2.py: Replacing 'from module.mod2 import *' with 'from module.mod2 import b, c'
{directory}/submod/submod2.py: Replacing 'from module.submod.submod3 import *' with 'from module.submod.submod3 import e'
{directory}/submod_recursive/submod2.py: Replacing 'from . import *' with 'from . import a'
""".splitlines())
assert set(p.stderr.splitlines()) == changes.union({error}).union(warnings)
for d in diffs:
assert d in p.stdout, p.stdout
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
p = subprocess.run([sys.executable, '-m', 'removestar', '--no-dynamic-importing', directory],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding='utf-8')
static_error = set(f"""\
Error with {directory}/mod6.py: Static determination of external module imports is not supported.
Error with {directory}/mod7.py: Static determination of external module imports is not supported.
""".splitlines())
assert set(p.stderr.splitlines()) == {error}.union(static_error).union(warnings)
for d in diffs:
if 'mod6' in d:
assert d not in p.stdout
else:
assert d in p.stdout, p.stdout
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
# Test --quiet hides both errors
p = subprocess.run([sys.executable, '-m', 'removestar', '--quiet', '--no-dynamic-importing', directory],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding='utf-8')
assert p.stderr == ''
for d in diffs:
if 'mod6' in d:
assert d not in p.stdout
else:
assert d in p.stdout, p.stdout
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
# XXX: This modifies directory, so keep it at the end of the test
p = subprocess.run([sys.executable, '-m', 'removestar', '--quiet', '-i', directory],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
assert p.stderr == ''
assert p.stdout == ''
cmp = dircmp(directory, directory_orig)
assert not _dirs_equal(cmp)
assert cmp.diff_files == ['mod4.py', 'mod5.py', 'mod6.py', 'mod7.py', 'mod9.py', 'mod_commented_star.py', 'mod_commented_unused_star.py']
assert cmp.subdirs['submod'].diff_files == ['submod1.py', 'submod2.py', 'submod4.py']
assert cmp.subdirs['submod_recursive'].diff_files == ['submod2.py']
with open(directory/'mod4.py') as f:
assert f.read() == code_mod4_fixed
with open(directory/'mod5.py') as f:
assert f.read() == code_mod5_fixed
with open(directory/'mod6.py') as f:
assert f.read() == code_mod6_fixed
with open(directory/'mod7.py') as f:
assert f.read() == code_mod7_fixed
with open(directory/'mod9.py') as f:
assert f.read() == code_mod9_fixed
with open(directory/'mod_commented_unused_star.py') as f:
assert f.read() == code_mod_commented_unused_star_fixed
with open(directory/'mod_commented_star.py') as f:
assert f.read() == code_mod_commented_star_fixed
with open(directory/'submod'/'submod1.py') as f:
assert f.read() == code_submod1_fixed
with open(directory/'submod'/'submod2.py') as f:
assert f.read() == code_submod2_fixed
with open(directory/'submod'/'submod4.py') as f:
assert f.read() == code_submod4_fixed
with open(directory/'submod_recursive'/'submod2.py') as f:
assert f.read() == code_submod_recursive_submod2_fixed
with open(directory/'mod_bad.py') as f:
assert f.read() == code_bad_syntax
with open(directory/'mod_unfixable.py') as f:
assert f.read() == code_mod_unfixable
# Test error on nonexistent file
p = subprocess.run([sys.executable, '-m', 'removestar', directory/'notarealfile.py'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding='utf-8')
assert p.stderr == f'Error: {directory}/notarealfile.py: no such file or directory\n'
assert p.stdout == ''
|
[
"subprocess.run",
"os.path.abspath",
"os.makedirs",
"pathlib.Path",
"pytest.mark.parametrize",
"ast.parse",
"os.chdir",
"filecmp.dircmp"
] |
[((11458, 11508), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""relative"""', '[True, False]'], {}), "('relative', [True, False])\n", (11481, 11508), False, 'import pytest\n'), ((31880, 31930), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""relative"""', '[True, False]'], {}), "('relative', [True, False])\n", (31903, 31930), False, 'import pytest\n'), ((38702, 38912), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""verbose_enabled, verbose_kwarg"""', "[(False, {}), (False, {'verbose': False}), (True, {'verbose': True})]"], {'ids': "['implicit no verbose', 'explicit no verbose', 'explicit verbose']"}), "('verbose_enabled, verbose_kwarg', [(False, {}), (\n False, {'verbose': False}), (True, {'verbose': True})], ids=[\n 'implicit no verbose', 'explicit no verbose', 'explicit verbose'])\n", (38725, 38912), False, 'import pytest\n'), ((47281, 47865), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""allows_star, comment"""', "[(True, '# noqa'), (True, '#noqa'), (True, '# noqa '), (False,\n '# noqa foo bar'), (False, '# noqa:'), (False, '# noqa :'), (True,\n '# noqa: F401'), (True, '#noqa:F401'), (True, '# noqa: F401 '), (\n True, '#\\tnoqa:\\tF401\\t'), (True, '# noqa: F403'), (True,\n '# noqa: A1,F403,A1'), (True, '# noqa: A1 F401 A1'), (True,\n '# noqa: A1, F401, A1'), (True, '# noqa: A1 , F401 , A1'), (False,\n '# generic comment'), (False, '#'), (False, ''), (False, '# foo: F401'),\n (False, '# F401'), (False, '# noqa F401')]"], {}), "('allows_star, comment', [(True, '# noqa'), (True,\n '#noqa'), (True, '# noqa '), (False, '# noqa foo bar'), (False,\n '# noqa:'), (False, '# noqa :'), (True, '# noqa: F401'), (True,\n '#noqa:F401'), (True, '# noqa: F401 '), (True, '#\\tnoqa:\\tF401\\t'),\n (True, '# noqa: F403'), (True, '# noqa: A1,F403,A1'), (True,\n '# noqa: A1 F401 A1'), (True, '# noqa: A1, F401, A1'), (True,\n '# noqa: A1 , F401 , A1'), (False, '# generic comment'), (False, '#'),\n (False, ''), (False, '# foo: F401'), (False, '# F401'), (False,\n '# noqa F401')])\n", (47304, 47865), False, 'import pytest\n'), ((4886, 4905), 'os.makedirs', 'os.makedirs', (['module'], {}), '(module)\n', (4897, 4905), False, 'import os\n'), ((6006, 6025), 'os.makedirs', 'os.makedirs', (['submod'], {}), '(submod)\n', (6017, 6025), False, 'import os\n'), ((6464, 6493), 'os.makedirs', 'os.makedirs', (['submod_recursive'], {}), '(submod_recursive)\n', (6475, 6493), False, 'import os\n'), ((11735, 11755), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (11750, 11755), False, 'import os\n'), ((32086, 32106), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (32101, 32106), False, 'import os\n'), ((48548, 48581), 'filecmp.dircmp', 'dircmp', (['directory', 'directory_orig'], {}), '(directory, directory_orig)\n', (48554, 48581), False, 'from filecmp import dircmp\n'), ((48681, 48827), 'subprocess.run', 'subprocess.run', (["[sys.executable, '-m', 'removestar', '--_this-file', 'none']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'encoding': '"""utf-8"""'}), "([sys.executable, '-m', 'removestar', '--_this-file', 'none'],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')\n", (48695, 48827), False, 'import subprocess\n'), ((48910, 49044), 'subprocess.run', 'subprocess.run', (["[sys.executable, '-m', 'removestar', directory]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'encoding': '"""utf-8"""'}), "([sys.executable, '-m', 'removestar', directory], stdout=\n subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')\n", (48924, 49044), False, 'import subprocess\n'), ((53081, 53114), 'filecmp.dircmp', 'dircmp', (['directory', 'directory_orig'], {}), '(directory, directory_orig)\n', (53087, 53114), False, 'from filecmp import dircmp\n'), ((53152, 53296), 'subprocess.run', 'subprocess.run', (["[sys.executable, '-m', 'removestar', '--quiet', directory]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'encoding': '"""utf-8"""'}), "([sys.executable, '-m', 'removestar', '--quiet', directory],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')\n", (53166, 53296), False, 'import subprocess\n'), ((53401, 53434), 'filecmp.dircmp', 'dircmp', (['directory', 'directory_orig'], {}), '(directory, directory_orig)\n', (53407, 53434), False, 'from filecmp import dircmp\n'), ((53472, 53618), 'subprocess.run', 'subprocess.run', (["[sys.executable, '-m', 'removestar', '--verbose', directory]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'encoding': '"""utf-8"""'}), "([sys.executable, '-m', 'removestar', '--verbose', directory],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')\n", (53486, 53618), False, 'import subprocess\n'), ((55678, 55711), 'filecmp.dircmp', 'dircmp', (['directory', 'directory_orig'], {}), '(directory, directory_orig)\n', (55684, 55711), False, 'from filecmp import dircmp\n'), ((55750, 55914), 'subprocess.run', 'subprocess.run', (["[sys.executable, '-m', 'removestar', '--no-dynamic-importing', directory]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'encoding': '"""utf-8"""'}), "([sys.executable, '-m', 'removestar',\n '--no-dynamic-importing', directory], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, encoding='utf-8')\n", (55764, 55914), False, 'import subprocess\n'), ((56428, 56461), 'filecmp.dircmp', 'dircmp', (['directory', 'directory_orig'], {}), '(directory, directory_orig)\n', (56434, 56461), False, 'from filecmp import dircmp\n'), ((56536, 56711), 'subprocess.run', 'subprocess.run', (["[sys.executable, '-m', 'removestar', '--quiet', '--no-dynamic-importing',\n directory]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'encoding': '"""utf-8"""'}), "([sys.executable, '-m', 'removestar', '--quiet',\n '--no-dynamic-importing', directory], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, encoding='utf-8')\n", (56550, 56711), False, 'import subprocess\n'), ((56923, 56956), 'filecmp.dircmp', 'dircmp', (['directory', 'directory_orig'], {}), '(directory, directory_orig)\n', (56929, 56956), False, 'from filecmp import dircmp\n'), ((57064, 57219), 'subprocess.run', 'subprocess.run', (["[sys.executable, '-m', 'removestar', '--quiet', '-i', directory]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'encoding': '"""utf-8"""'}), "([sys.executable, '-m', 'removestar', '--quiet', '-i',\n directory], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding=\n 'utf-8')\n", (57078, 57219), False, 'import subprocess\n'), ((57296, 57329), 'filecmp.dircmp', 'dircmp', (['directory', 'directory_orig'], {}), '(directory, directory_orig)\n', (57302, 57329), False, 'from filecmp import dircmp\n'), ((58976, 59133), 'subprocess.run', 'subprocess.run', (["[sys.executable, '-m', 'removestar', directory / 'notarealfile.py']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'encoding': '"""utf-8"""'}), "([sys.executable, '-m', 'removestar', directory /\n 'notarealfile.py'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n encoding='utf-8')\n", (58990, 59133), False, 'import subprocess\n'), ((11677, 11691), 'pathlib.Path', 'Path', (['"""module"""'], {}), "('module')\n", (11681, 11691), False, 'from pathlib import Path\n'), ((11773, 11788), 'os.chdir', 'os.chdir', (['chdir'], {}), '(chdir)\n', (11781, 11788), False, 'import os\n'), ((24253, 24269), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (24261, 24269), False, 'import os\n'), ((32033, 32042), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (32037, 32042), False, 'from pathlib import Path\n'), ((32124, 32139), 'os.chdir', 'os.chdir', (['chdir'], {}), '(chdir)\n', (32132, 32139), False, 'import os\n'), ((32182, 32201), 'os.makedirs', 'os.makedirs', (['module'], {}), '(module)\n', (32193, 32201), False, 'import os\n'), ((32311, 32330), 'os.makedirs', 'os.makedirs', (['submod'], {}), '(submod)\n', (32322, 32330), False, 'import os\n'), ((32443, 32465), 'os.makedirs', 'os.makedirs', (['subsubmod'], {}), '(subsubmod)\n', (32454, 32465), False, 'import os\n'), ((36739, 36755), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (36747, 36755), False, 'import os\n'), ((7457, 7480), 'ast.parse', 'ast.parse', (['code_submod4'], {}), '(code_submod4)\n', (7466, 7480), False, 'import ast\n'), ((7550, 7570), 'ast.parse', 'ast.parse', (['code_mod6'], {}), '(code_mod6)\n', (7559, 7570), False, 'import ast\n'), ((7650, 7690), 'ast.parse', 'ast.parse', (['code_submod_recursive_submod2'], {}), '(code_submod_recursive_submod2)\n', (7659, 7690), False, 'import ast\n'), ((7757, 7777), 'ast.parse', 'ast.parse', (['code_mod9'], {}), '(code_mod9)\n', (7766, 7777), False, 'import ast\n'), ((7849, 7878), 'ast.parse', 'ast.parse', (['code_mod_unfixable'], {}), '(code_mod_unfixable)\n', (7858, 7878), False, 'import ast\n'), ((7950, 7991), 'ast.parse', 'ast.parse', (['code_mod_commented_unused_star'], {}), '(code_mod_commented_unused_star)\n', (7959, 7991), False, 'import ast\n'), ((8058, 8092), 'ast.parse', 'ast.parse', (['code_mod_commented_star'], {}), '(code_mod_commented_star)\n', (8067, 8092), False, 'import ast\n'), ((8418, 8438), 'ast.parse', 'ast.parse', (['code_mod4'], {}), '(code_mod4)\n', (8427, 8438), False, 'import ast\n'), ((8514, 8534), 'ast.parse', 'ast.parse', (['code_mod5'], {}), '(code_mod5)\n', (8523, 8534), False, 'import ast\n'), ((8622, 8642), 'ast.parse', 'ast.parse', (['code_mod6'], {}), '(code_mod6)\n', (8631, 8642), False, 'import ast\n'), ((8711, 8731), 'ast.parse', 'ast.parse', (['code_mod7'], {}), '(code_mod7)\n', (8720, 8731), False, 'import ast\n'), ((8798, 8818), 'ast.parse', 'ast.parse', (['code_mod9'], {}), '(code_mod9)\n', (8807, 8818), False, 'import ast\n'), ((8885, 8908), 'ast.parse', 'ast.parse', (['code_submod1'], {}), '(code_submod1)\n', (8894, 8908), False, 'import ast\n'), ((8998, 9021), 'ast.parse', 'ast.parse', (['code_submod2'], {}), '(code_submod2)\n', (9007, 9021), False, 'import ast\n'), ((9283, 9320), 'ast.parse', 'ast.parse', (['code_submod_recursive_init'], {}), '(code_submod_recursive_init)\n', (9292, 9320), False, 'import ast\n'), ((9390, 9419), 'ast.parse', 'ast.parse', (['code_mod_unfixable'], {}), '(code_mod_unfixable)\n', (9399, 9419), False, 'import ast\n'), ((9495, 9536), 'ast.parse', 'ast.parse', (['code_mod_commented_unused_star'], {}), '(code_mod_commented_unused_star)\n', (9504, 9536), False, 'import ast\n'), ((9612, 9646), 'ast.parse', 'ast.parse', (['code_mod_commented_star'], {}), '(code_mod_commented_star)\n', (9621, 9646), False, 'import ast\n'), ((7070, 7085), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (7079, 7085), False, 'import ast\n'), ((7200, 7215), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (7209, 7215), False, 'import ast\n'), ((7351, 7366), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (7360, 7366), False, 'import ast\n'), ((8339, 8354), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (8348, 8354), False, 'import ast\n'), ((9201, 9216), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (9210, 9216), False, 'import ast\n'), ((32686, 32711), 'os.path.abspath', 'os.path.abspath', (['expected'], {}), '(expected)\n', (32701, 32711), False, 'import os\n')]
|
from json import JSONDecodeError
from logging import getLogger
from httpx import HTTPStatusError, Request, RequestError, Response
logger = getLogger(__name__)
def log_request(request: Request) -> None:
"""Hook to log http requests"""
logger.debug(
f"Request event hook: {request.method} {request.url} - Waiting for response"
)
def log_response(response: Response) -> None:
"""Hook to log responses to http requests"""
request = response.request
logger.debug(
f"Response event hook: {request.method} {request.url}"
f" - Status {response.status_code}"
)
def raise_on_4xx_5xx(response: Response) -> None:
"""
Hook to raise an error on http responses with codes indicating an error.
If an error is message is found raise as an ApiError
"""
try:
response.raise_for_status()
except RequestError as exc:
logger.debug(f"An error occurred while requesting {exc.request.url!r}.")
raise exc
except HTTPStatusError as exc:
response.read() # without this, you can get a ResponseNotRead error
try:
parsed_response = exc.response.json()
except JSONDecodeError:
parsed_response = {"_raw": exc.response.text}
debug_message = (
f"Error response {exc.response.status_code} "
f"while requesting {exc.request.url!r}. "
f"Response: {parsed_response}. "
)
if "message" in parsed_response:
error_message = parsed_response["message"]
logger.debug(f"{debug_message} Message: {error_message}")
raise RuntimeError(error_message) from exc
logger.debug(debug_message)
raise exc
|
[
"logging.getLogger"
] |
[((141, 160), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (150, 160), False, 'from logging import getLogger\n')]
|
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
import sys
PY2 = sys.version_info[0] == 2
PY26 = PY2 and sys.version_info[1] < 7
NAME = 'pyexcel-gantt'
AUTHOR = 'C.W.'
VERSION = '0.0.1'
EMAIL = '<EMAIL>'
LICENSE = 'New BSD'
DESCRIPTION = (
'draws gantt chart using frappe-gantt.js for pyexcel data' +
''
)
URL = 'https://github.com/pyexcel/pyexcel-gantt'
DOWNLOAD_URL = '%s/archive/0.0.1.tar.gz' % URL
FILES = ['README.rst', 'CHANGELOG.rst']
KEYWORDS = [
'python'
]
CLASSIFIERS = [
'Topic :: Office/Business',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: BSD License',
]
INSTALL_REQUIRES = [
'jinja2',
]
PACKAGES = find_packages(exclude=['ez_setup', 'examples', 'tests'])
EXTRAS_REQUIRE = {
}
def read_files(*files):
"""Read files into setup"""
text = ""
for single_file in files:
content = read(single_file)
text = text + content + "\n"
return text
def read(afile):
"""Read a file into setup"""
with open(afile, 'r') as opened_file:
content = filter_out_test_code(opened_file)
content = "".join(list(content))
return content
def filter_out_test_code(file_handle):
found_test_code = False
for line in file_handle.readlines():
if line.startswith('.. testcode:'):
found_test_code = True
continue
if found_test_code is True:
if line.startswith(' '):
continue
else:
empty_line = line.strip()
if len(empty_line) == 0:
continue
else:
found_test_code = False
yield line
else:
for keyword in ['|version|', '|today|']:
if keyword in line:
break
else:
yield line
if __name__ == '__main__':
setup(
name=NAME,
author=AUTHOR,
version=VERSION,
author_email=EMAIL,
description=DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
long_description=read_files(*FILES),
license=LICENSE,
keywords=KEYWORDS,
extras_require=EXTRAS_REQUIRE,
tests_require=['nose'],
install_requires=INSTALL_REQUIRES,
packages=PACKAGES,
include_package_data=True,
zip_safe=False,
classifiers=CLASSIFIERS
)
|
[
"ez_setup.use_setuptools",
"setuptools.find_packages"
] |
[((1184, 1240), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['ez_setup', 'examples', 'tests']"}), "(exclude=['ez_setup', 'examples', 'tests'])\n", (1197, 1240), False, 'from setuptools import setup, find_packages\n'), ((117, 133), 'ez_setup.use_setuptools', 'use_setuptools', ([], {}), '()\n', (131, 133), False, 'from ez_setup import use_setuptools\n')]
|
import numpy as np
import torch
# 相交矩形的面积
def intersect(box_a, box_b):
"""计算两组矩形两两之间相交区域的面积
Args:
box_a: (tensor) bounding boxes, Shape: [A, 4].
box_b: (tensor) bounding boxes, Shape: [B, 4].
Return:
(tensor) intersection area, Shape: [A, B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def jaccard(box_a, box_b):
"""计算两组矩形两两之间的iou
Args:
box_a: (tensor) bounding boxes, Shape: [A, 4].
box_b: (tensor) bounding boxes, Shape: [B, 4].
Return:
ious: (tensor) Shape: [A, B]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A, B]
area_b = ((box_b[:, 2]-box_b[:, 0]) *
(box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A, B]
union = area_a + area_b - inter
return inter / union # [A, B]
def _matrix_nms(bboxes, cate_labels, cate_scores, kernel='gaussian', sigma=2.0):
"""Matrix NMS for multi-class bboxes.
Args:
bboxes (Tensor): shape (n, 4)
cate_labels (Tensor): shape (n), mask labels in descending order
cate_scores (Tensor): shape (n), mask scores in descending order
kernel (str): 'linear' or 'gaussian'
sigma (float): std in gaussian method
Returns:
Tensor: cate_scores_update, tensors of shape (n)
"""
n_samples = len(cate_labels)
if n_samples == 0:
return []
# 计算一个n×n的IOU矩阵,两组矩形两两之间的IOU
iou_matrix = jaccard(bboxes, bboxes) # shape: [n_samples, n_samples]
iou_matrix = iou_matrix.triu(diagonal=1) # 只取上三角部分
# label_specific matrix.
cate_labels_x = cate_labels.expand(n_samples, n_samples) # shape: [n_samples, n_samples]
# 第i行第j列表示的是第i个预测框和第j个预测框的类别id是否相同。我们抑制的是同类的预测框。
label_matrix = (cate_labels_x == cate_labels_x.transpose(1, 0)).float().triu(diagonal=1) # shape: [n_samples, n_samples]
# IoU compensation
# 非同类的iou置为0,同类的iou保留。逐列取最大iou
compensate_iou, _ = (iou_matrix * label_matrix).max(0) # shape: [n_samples, ]
compensate_iou = compensate_iou.expand(n_samples, n_samples).transpose(1, 0) # shape: [n_samples, n_samples]
# IoU decay
# 非同类的iou置为0,同类的iou保留。
decay_iou = iou_matrix * label_matrix # shape: [n_samples, n_samples]
# matrix nms
if kernel == 'gaussian':
decay_matrix = torch.exp(-1 * sigma * (decay_iou ** 2))
compensate_matrix = torch.exp(-1 * sigma * (compensate_iou ** 2))
decay_coefficient, _ = (decay_matrix / compensate_matrix).min(0)
elif kernel == 'linear':
decay_matrix = (1-decay_iou)/(1-compensate_iou)
decay_coefficient, _ = decay_matrix.min(0)
else:
raise NotImplementedError
# 更新分数
cate_scores_update = cate_scores * decay_coefficient
return cate_scores_update
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.):
inds = (scores > score_threshold)
cate_scores = scores[inds]
if len(cate_scores) == 0:
return torch.zeros((1, 6), device=bboxes.device) - 1.0
inds = inds.nonzero()
cate_labels = inds[:, 1]
bboxes = bboxes[inds[:, 0]]
# sort and keep top nms_top_k
sort_inds = torch.argsort(cate_scores, descending=True)
if nms_top_k > 0 and len(sort_inds) > nms_top_k:
sort_inds = sort_inds[:nms_top_k]
bboxes = bboxes[sort_inds, :]
cate_scores = cate_scores[sort_inds]
cate_labels = cate_labels[sort_inds]
# Matrix NMS
kernel = 'gaussian' if use_gaussian else 'linear'
cate_scores = _matrix_nms(bboxes, cate_labels, cate_scores, kernel=kernel, sigma=gaussian_sigma)
# filter.
keep = cate_scores >= post_threshold
if keep.sum() == 0:
return torch.zeros((1, 6), device=bboxes.device) - 1.0
bboxes = bboxes[keep, :]
cate_scores = cate_scores[keep]
cate_labels = cate_labels[keep]
# sort and keep keep_top_k
sort_inds = torch.argsort(cate_scores, descending=True)
if len(sort_inds) > keep_top_k:
sort_inds = sort_inds[:keep_top_k]
bboxes = bboxes[sort_inds, :]
cate_scores = cate_scores[sort_inds]
cate_labels = cate_labels[sort_inds]
cate_scores = cate_scores.unsqueeze(1)
cate_labels = cate_labels.unsqueeze(1).float()
# pred = torch.cat([cate_labels, cate_scores, bboxes], 1)
# return pred
return bboxes, cate_scores, cate_labels.type(torch.int32)
# yolo_box
# https://github.com/miemie2013/Pytorch-PPYOLO/blob/master/model/head.py
def yolo_box(conv_output, anchors, stride, num_classes, scale_x_y, im_size, clip_bbox, conf_thresh, use_gpu=False):
conv_output = conv_output.permute(0, 2, 3, 1)
conv_shape = conv_output.shape
batch_size = conv_shape[0]
output_size = conv_shape[1]
anchor_per_scale = len(anchors)
conv_output = conv_output.reshape((batch_size, output_size, output_size, anchor_per_scale, 5 + num_classes))
conv_raw_dxdy = conv_output[:, :, :, :, 0:2]
conv_raw_dwdh = conv_output[:, :, :, :, 2:4]
conv_raw_conf = conv_output[:, :, :, :, 4:5]
conv_raw_prob = conv_output[:, :, :, :, 5: ]
rows = torch.arange(0, output_size, dtype=torch.float32, device=conv_raw_dxdy.device)
cols = torch.arange(0, output_size, dtype=torch.float32, device=conv_raw_dxdy.device)
rows = rows[np.newaxis, np.newaxis, :, np.newaxis, np.newaxis].repeat((1, output_size, 1, 1, 1))
cols = cols[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis].repeat((1, 1, output_size, 1, 1))
offset = torch.cat([rows, cols], dim=-1)
offset = offset.repeat((batch_size, 1, 1, anchor_per_scale, 1))
# Grid Sensitive
pred_xy = (scale_x_y * torch.sigmoid(conv_raw_dxdy) + offset - (scale_x_y - 1.0) * 0.5 ) * stride
# _anchors = torch.Tensor(anchors, device=conv_raw_dxdy.device) # RuntimeError: legacy constructor for device type: cpu was passed device type: cuda, but device type must be: cpu
_anchors = torch.Tensor(anchors)
if use_gpu and torch.cuda.is_available():
_anchors = _anchors.cuda()
pred_wh = (torch.exp(conv_raw_dwdh) * _anchors)
pred_xyxy = torch.cat([pred_xy - pred_wh / 2, pred_xy + pred_wh / 2], dim=-1) # 左上角xy + 右下角xy
pred_conf = torch.sigmoid(conv_raw_conf)
# mask = (pred_conf > conf_thresh).float()
pred_prob = torch.sigmoid(conv_raw_prob)
pred_scores = pred_conf * pred_prob
# pred_scores = pred_scores * mask
# pred_xyxy = pred_xyxy * mask
# paddle中实际的顺序
# pred_xyxy = pred_xyxy.permute(0, 3, 1, 2, 4)
# pred_scores = pred_scores.permute(0, 3, 1, 2, 4)
pred_xyxy = pred_xyxy.reshape((batch_size, output_size*output_size*anchor_per_scale, 4))
pred_scores = pred_scores.reshape((batch_size, pred_xyxy.shape[1], num_classes))
im_size = torch.from_numpy(im_size)
_im_size_h = im_size[:, 0:1]
_im_size_w = im_size[:, 1:2]
_im_size = torch.cat([_im_size_w, _im_size_h], 1)
_im_size = _im_size.unsqueeze(1)
_im_size = _im_size.repeat((1, pred_xyxy.shape[1], 1))
pred_x0y0 = pred_xyxy[:, :, 0:2] / output_size / stride * _im_size
pred_x1y1 = pred_xyxy[:, :, 2:4] / output_size / stride * _im_size
if clip_bbox:
x0 = pred_x0y0[:, :, 0:1]
y0 = pred_x0y0[:, :, 1:2]
x1 = pred_x1y1[:, :, 0:1]
y1 = pred_x1y1[:, :, 1:2]
x0 = torch.where(x0 < 0, x0 * 0, x0)
y0 = torch.where(y0 < 0, y0 * 0, y0)
x1 = torch.where(x1 > _im_size[:, :, 0:1], _im_size[:, :, 0:1], x1)
y1 = torch.where(y1 > _im_size[:, :, 1:2], _im_size[:, :, 1:2], y1)
pred_xyxy = torch.cat([x0, y0, x1, y1], -1)
else:
pred_xyxy = torch.cat([pred_x0y0, pred_x1y1], -1)
return pred_xyxy, pred_scores
|
[
"torch.where",
"torch.argsort",
"torch.cat",
"torch.exp",
"torch.sigmoid",
"torch.clamp",
"torch.Tensor",
"torch.arange",
"torch.cuda.is_available",
"torch.zeros",
"torch.from_numpy"
] |
[((608, 643), 'torch.clamp', 'torch.clamp', (['(max_xy - min_xy)'], {'min': '(0)'}), '(max_xy - min_xy, min=0)\n', (619, 643), False, 'import torch\n'), ((3757, 3800), 'torch.argsort', 'torch.argsort', (['cate_scores'], {'descending': '(True)'}), '(cate_scores, descending=True)\n', (3770, 3800), False, 'import torch\n'), ((4477, 4520), 'torch.argsort', 'torch.argsort', (['cate_scores'], {'descending': '(True)'}), '(cate_scores, descending=True)\n', (4490, 4520), False, 'import torch\n'), ((5678, 5756), 'torch.arange', 'torch.arange', (['(0)', 'output_size'], {'dtype': 'torch.float32', 'device': 'conv_raw_dxdy.device'}), '(0, output_size, dtype=torch.float32, device=conv_raw_dxdy.device)\n', (5690, 5756), False, 'import torch\n'), ((5768, 5846), 'torch.arange', 'torch.arange', (['(0)', 'output_size'], {'dtype': 'torch.float32', 'device': 'conv_raw_dxdy.device'}), '(0, output_size, dtype=torch.float32, device=conv_raw_dxdy.device)\n', (5780, 5846), False, 'import torch\n'), ((6062, 6093), 'torch.cat', 'torch.cat', (['[rows, cols]'], {'dim': '(-1)'}), '([rows, cols], dim=-1)\n', (6071, 6093), False, 'import torch\n'), ((6486, 6507), 'torch.Tensor', 'torch.Tensor', (['anchors'], {}), '(anchors)\n', (6498, 6507), False, 'import torch\n'), ((6658, 6723), 'torch.cat', 'torch.cat', (['[pred_xy - pred_wh / 2, pred_xy + pred_wh / 2]'], {'dim': '(-1)'}), '([pred_xy - pred_wh / 2, pred_xy + pred_wh / 2], dim=-1)\n', (6667, 6723), False, 'import torch\n'), ((6758, 6786), 'torch.sigmoid', 'torch.sigmoid', (['conv_raw_conf'], {}), '(conv_raw_conf)\n', (6771, 6786), False, 'import torch\n'), ((6850, 6878), 'torch.sigmoid', 'torch.sigmoid', (['conv_raw_prob'], {}), '(conv_raw_prob)\n', (6863, 6878), False, 'import torch\n'), ((7312, 7337), 'torch.from_numpy', 'torch.from_numpy', (['im_size'], {}), '(im_size)\n', (7328, 7337), False, 'import torch\n'), ((7419, 7457), 'torch.cat', 'torch.cat', (['[_im_size_w, _im_size_h]', '(1)'], {}), '([_im_size_w, _im_size_h], 1)\n', (7428, 7457), False, 'import torch\n'), ((2753, 2791), 'torch.exp', 'torch.exp', (['(-1 * sigma * decay_iou ** 2)'], {}), '(-1 * sigma * decay_iou ** 2)\n', (2762, 2791), False, 'import torch\n'), ((2822, 2865), 'torch.exp', 'torch.exp', (['(-1 * sigma * compensate_iou ** 2)'], {}), '(-1 * sigma * compensate_iou ** 2)\n', (2831, 2865), False, 'import torch\n'), ((6527, 6552), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6550, 6552), False, 'import torch\n'), ((6604, 6628), 'torch.exp', 'torch.exp', (['conv_raw_dwdh'], {}), '(conv_raw_dwdh)\n', (6613, 6628), False, 'import torch\n'), ((7864, 7895), 'torch.where', 'torch.where', (['(x0 < 0)', '(x0 * 0)', 'x0'], {}), '(x0 < 0, x0 * 0, x0)\n', (7875, 7895), False, 'import torch\n'), ((7909, 7940), 'torch.where', 'torch.where', (['(y0 < 0)', '(y0 * 0)', 'y0'], {}), '(y0 < 0, y0 * 0, y0)\n', (7920, 7940), False, 'import torch\n'), ((7954, 8016), 'torch.where', 'torch.where', (['(x1 > _im_size[:, :, 0:1])', '_im_size[:, :, 0:1]', 'x1'], {}), '(x1 > _im_size[:, :, 0:1], _im_size[:, :, 0:1], x1)\n', (7965, 8016), False, 'import torch\n'), ((8030, 8092), 'torch.where', 'torch.where', (['(y1 > _im_size[:, :, 1:2])', '_im_size[:, :, 1:2]', 'y1'], {}), '(y1 > _im_size[:, :, 1:2], _im_size[:, :, 1:2], y1)\n', (8041, 8092), False, 'import torch\n'), ((8113, 8144), 'torch.cat', 'torch.cat', (['[x0, y0, x1, y1]', '(-1)'], {}), '([x0, y0, x1, y1], -1)\n', (8122, 8144), False, 'import torch\n'), ((8175, 8212), 'torch.cat', 'torch.cat', (['[pred_x0y0, pred_x1y1]', '(-1)'], {}), '([pred_x0y0, pred_x1y1], -1)\n', (8184, 8212), False, 'import torch\n'), ((3570, 3611), 'torch.zeros', 'torch.zeros', (['(1, 6)'], {'device': 'bboxes.device'}), '((1, 6), device=bboxes.device)\n', (3581, 3611), False, 'import torch\n'), ((4280, 4321), 'torch.zeros', 'torch.zeros', (['(1, 6)'], {'device': 'bboxes.device'}), '((1, 6), device=bboxes.device)\n', (4291, 4321), False, 'import torch\n'), ((6210, 6238), 'torch.sigmoid', 'torch.sigmoid', (['conv_raw_dxdy'], {}), '(conv_raw_dxdy)\n', (6223, 6238), False, 'import torch\n')]
|
#!/usr/bin/env python
#
# Copyright (C) 2015 <NAME> <<EMAIL>>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the Mumble Developers nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (unicode_literals, print_function, division)
import os
import platform
import sys
allowed_prefixes = ('qt', 'qtbase')
def dirToQrc(of, dirName, alreadyProcessedLangs):
processedLangs = []
absDirName = os.path.abspath(dirName)
fns = os.listdir(dirName)
for fn in fns:
fnRoot, fnExt = os.path.splitext(fn)
if fnExt.lower() != '.qm':
continue
lastUnderscoreIdx = fnRoot.rfind('_')
if lastUnderscoreIdx == -1:
continue
prefix = fnRoot[:lastUnderscoreIdx]
lang = fnRoot[lastUnderscoreIdx+1:]
# Handle en_US-style locale names
if lang.upper() == lang:
nextToLastUnderscoreIdx = prefix.rfind('_')
prefix = fnRoot[:nextToLastUnderscoreIdx]
lang = fnRoot[nextToLastUnderscoreIdx+1:]
if lang in alreadyProcessedLangs:
continue
if not prefix in allowed_prefixes:
continue
absFn = os.path.join(absDirName, fn)
if platform.system() == 'Windows':
absFn = absFn.replace('\\', '/')
of.write(' <file alias="{0}">{1}</file>\n'.format(fn, absFn))
processedLangs.append(lang)
return processedLangs
def main():
# python generate-mumble_qt-qrc.py <output-fn> [inputs...]
output = sys.argv[1]
inputs = sys.argv[2:]
of = open(output, 'w')
of.write('<!DOCTYPE RCC><RCC version="1.0">\n')
of.write('<qresource>\n')
processedLangs = []
for dirName in inputs:
newlyProcssedLangs = dirToQrc(of, dirName, processedLangs)
processedLangs.extend(newlyProcssedLangs)
of.write('</qresource>\n')
of.write('</RCC>\n')
of.close()
if __name__ == '__main__':
main()
|
[
"os.path.abspath",
"os.path.splitext",
"platform.system",
"os.path.join",
"os.listdir"
] |
[((1797, 1821), 'os.path.abspath', 'os.path.abspath', (['dirName'], {}), '(dirName)\n', (1812, 1821), False, 'import os\n'), ((1829, 1848), 'os.listdir', 'os.listdir', (['dirName'], {}), '(dirName)\n', (1839, 1848), False, 'import os\n'), ((1883, 1903), 'os.path.splitext', 'os.path.splitext', (['fn'], {}), '(fn)\n', (1899, 1903), False, 'import os\n'), ((2410, 2438), 'os.path.join', 'os.path.join', (['absDirName', 'fn'], {}), '(absDirName, fn)\n', (2422, 2438), False, 'import os\n'), ((2444, 2461), 'platform.system', 'platform.system', ([], {}), '()\n', (2459, 2461), False, 'import platform\n')]
|
import os
import torch
import pickle
import pytest
import tempfile
import h5py
import numpy as np
from timeit import timeit
from tianshou.data import Batch, SegmentTree, \
ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer
from tianshou.data.utils.converter import to_hdf5
if __name__ == '__main__':
from env import MyTestEnv
else: # pytest
from test.base.env import MyTestEnv
def test_replaybuffer(size=10, bufsize=20):
env = MyTestEnv(size)
buf = ReplayBuffer(bufsize)
buf.update(buf)
assert str(buf) == buf.__class__.__name__ + '()'
obs = env.reset()
action_list = [1] * 5 + [0] * 10 + [1] * 10
for i, a in enumerate(action_list):
obs_next, rew, done, info = env.step(a)
buf.add(obs, [a], rew, done, obs_next, info)
obs = obs_next
assert len(buf) == min(bufsize, i + 1)
with pytest.raises(ValueError):
buf._add_to_buffer('rew', np.array([1, 2, 3]))
assert buf.act.dtype == np.object
assert isinstance(buf.act[0], list)
data, indice = buf.sample(bufsize * 2)
assert (indice < len(buf)).all()
assert (data.obs < size).all()
assert (0 <= data.done).all() and (data.done <= 1).all()
b = ReplayBuffer(size=10)
b.add(1, 1, 1, 'str', 1, {'a': 3, 'b': {'c': 5.0}})
assert b.obs[0] == 1
assert b.done[0] == 'str'
assert np.all(b.obs[1:] == 0)
assert np.all(b.done[1:] == np.array(None))
assert b.info.a[0] == 3 and b.info.a.dtype == np.integer
assert np.all(b.info.a[1:] == 0)
assert b.info.b.c[0] == 5.0 and b.info.b.c.dtype == np.inexact
assert np.all(b.info.b.c[1:] == 0.0)
with pytest.raises(IndexError):
b[22]
b = ListReplayBuffer()
with pytest.raises(NotImplementedError):
b.sample(0)
def test_ignore_obs_next(size=10):
# Issue 82
buf = ReplayBuffer(size, ignore_obs_next=True)
for i in range(size):
buf.add(obs={'mask1': np.array([i, 1, 1, 0, 0]),
'mask2': np.array([i + 4, 0, 1, 0, 0]),
'mask': i},
act={'act_id': i,
'position_id': i + 3},
rew=i,
done=i % 3 == 0,
info={'if': i})
indice = np.arange(len(buf))
orig = np.arange(len(buf))
data = buf[indice]
data2 = buf[indice]
assert isinstance(data, Batch)
assert isinstance(data2, Batch)
assert np.allclose(indice, orig)
assert np.allclose(data.obs_next.mask, data2.obs_next.mask)
assert np.allclose(data.obs_next.mask, [0, 2, 3, 3, 5, 6, 6, 8, 9, 9])
buf.stack_num = 4
data = buf[indice]
data2 = buf[indice]
assert np.allclose(data.obs_next.mask, data2.obs_next.mask)
assert np.allclose(data.obs_next.mask, np.array([
[0, 0, 0, 0], [1, 1, 1, 2], [1, 1, 2, 3], [1, 1, 2, 3],
[4, 4, 4, 5], [4, 4, 5, 6], [4, 4, 5, 6],
[7, 7, 7, 8], [7, 7, 8, 9], [7, 7, 8, 9]]))
assert np.allclose(data.info['if'], data2.info['if'])
assert np.allclose(data.info['if'], np.array([
[0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 2], [1, 1, 2, 3],
[4, 4, 4, 4], [4, 4, 4, 5], [4, 4, 5, 6],
[7, 7, 7, 7], [7, 7, 7, 8], [7, 7, 8, 9]]))
assert data.obs_next
def test_stack(size=5, bufsize=9, stack_num=4):
env = MyTestEnv(size)
buf = ReplayBuffer(bufsize, stack_num=stack_num)
buf2 = ReplayBuffer(bufsize, stack_num=stack_num, sample_avail=True)
buf3 = ReplayBuffer(bufsize, stack_num=stack_num, save_only_last_obs=True)
obs = env.reset(1)
for i in range(16):
obs_next, rew, done, info = env.step(1)
buf.add(obs, 1, rew, done, None, info)
buf2.add(obs, 1, rew, done, None, info)
buf3.add([None, None, obs], 1, rew, done, [None, obs], info)
obs = obs_next
if done:
obs = env.reset(1)
indice = np.arange(len(buf))
assert np.allclose(buf.get(indice, 'obs')[..., 0], [
[1, 1, 1, 2], [1, 1, 2, 3], [1, 2, 3, 4],
[1, 1, 1, 1], [1, 1, 1, 2], [1, 1, 2, 3],
[1, 2, 3, 4], [4, 4, 4, 4], [1, 1, 1, 1]])
assert np.allclose(buf.get(indice, 'obs'), buf3.get(indice, 'obs'))
assert np.allclose(buf.get(indice, 'obs'), buf3.get(indice, 'obs_next'))
_, indice = buf2.sample(0)
assert indice.tolist() == [2, 6]
_, indice = buf2.sample(1)
assert indice in [2, 6]
with pytest.raises(IndexError):
buf[bufsize * 2]
def test_priortized_replaybuffer(size=32, bufsize=15):
env = MyTestEnv(size)
buf = PrioritizedReplayBuffer(bufsize, 0.5, 0.5)
obs = env.reset()
action_list = [1] * 5 + [0] * 10 + [1] * 10
for i, a in enumerate(action_list):
obs_next, rew, done, info = env.step(a)
buf.add(obs, a, rew, done, obs_next, info, np.random.randn() - 0.5)
obs = obs_next
data, indice = buf.sample(len(buf) // 2)
if len(buf) // 2 == 0:
assert len(data) == len(buf)
else:
assert len(data) == len(buf) // 2
assert len(buf) == min(bufsize, i + 1)
data, indice = buf.sample(len(buf) // 2)
buf.update_weight(indice, -data.weight / 2)
assert np.allclose(
buf.weight[indice], np.abs(-data.weight / 2) ** buf._alpha)
def test_update():
buf1 = ReplayBuffer(4, stack_num=2)
buf2 = ReplayBuffer(4, stack_num=2)
for i in range(5):
buf1.add(obs=np.array([i]), act=float(i), rew=i * i,
done=i % 2 == 0, info={'incident': 'found'})
assert len(buf1) > len(buf2)
buf2.update(buf1)
assert len(buf1) == len(buf2)
assert (buf2[0].obs == buf1[1].obs).all()
assert (buf2[-1].obs == buf1[0].obs).all()
def test_segtree():
realop = np.sum
# small test
actual_len = 8
tree = SegmentTree(actual_len) # 1-15. 8-15 are leaf nodes
assert len(tree) == actual_len
assert np.all([tree[i] == 0. for i in range(actual_len)])
with pytest.raises(IndexError):
tree[actual_len]
naive = np.zeros([actual_len])
for _ in range(1000):
# random choose a place to perform single update
index = np.random.randint(actual_len)
value = np.random.rand()
naive[index] = value
tree[index] = value
for i in range(actual_len):
for j in range(i + 1, actual_len):
ref = realop(naive[i:j])
out = tree.reduce(i, j)
assert np.allclose(ref, out), (ref, out)
assert np.allclose(tree.reduce(start=1), realop(naive[1:]))
assert np.allclose(tree.reduce(end=-1), realop(naive[:-1]))
# batch setitem
for _ in range(1000):
index = np.random.choice(actual_len, size=4)
value = np.random.rand(4)
naive[index] = value
tree[index] = value
assert np.allclose(realop(naive), tree.reduce())
for i in range(10):
left = np.random.randint(actual_len)
right = np.random.randint(left + 1, actual_len + 1)
assert np.allclose(realop(naive[left:right]),
tree.reduce(left, right))
# large test
actual_len = 16384
tree = SegmentTree(actual_len)
naive = np.zeros([actual_len])
for _ in range(1000):
index = np.random.choice(actual_len, size=64)
value = np.random.rand(64)
naive[index] = value
tree[index] = value
assert np.allclose(realop(naive), tree.reduce())
for i in range(10):
left = np.random.randint(actual_len)
right = np.random.randint(left + 1, actual_len + 1)
assert np.allclose(realop(naive[left:right]),
tree.reduce(left, right))
# test prefix-sum-idx
actual_len = 8
tree = SegmentTree(actual_len)
naive = np.random.rand(actual_len)
tree[np.arange(actual_len)] = naive
for _ in range(1000):
scalar = np.random.rand() * naive.sum()
index = tree.get_prefix_sum_idx(scalar)
assert naive[:index].sum() <= scalar <= naive[:index + 1].sum()
# corner case here
naive = np.ones(actual_len, np.int)
tree[np.arange(actual_len)] = naive
for scalar in range(actual_len):
index = tree.get_prefix_sum_idx(scalar * 1.)
assert naive[:index].sum() <= scalar <= naive[:index + 1].sum()
tree = SegmentTree(10)
tree[np.arange(3)] = np.array([0.1, 0, 0.1])
assert np.allclose(tree.get_prefix_sum_idx(
np.array([0, .1, .1 + 1e-6, .2 - 1e-6])), [0, 0, 2, 2])
with pytest.raises(AssertionError):
tree.get_prefix_sum_idx(.2)
# test large prefix-sum-idx
actual_len = 16384
tree = SegmentTree(actual_len)
naive = np.random.rand(actual_len)
tree[np.arange(actual_len)] = naive
for _ in range(1000):
scalar = np.random.rand() * naive.sum()
index = tree.get_prefix_sum_idx(scalar)
assert naive[:index].sum() <= scalar <= naive[:index + 1].sum()
# profile
if __name__ == '__main__':
size = 100000
bsz = 64
naive = np.random.rand(size)
tree = SegmentTree(size)
tree[np.arange(size)] = naive
def sample_npbuf():
return np.random.choice(size, bsz, p=naive / naive.sum())
def sample_tree():
scalar = np.random.rand(bsz) * tree.reduce()
return tree.get_prefix_sum_idx(scalar)
print('npbuf', timeit(sample_npbuf, setup=sample_npbuf, number=1000))
print('tree', timeit(sample_tree, setup=sample_tree, number=1000))
def test_pickle():
size = 100
vbuf = ReplayBuffer(size, stack_num=2)
lbuf = ListReplayBuffer()
pbuf = PrioritizedReplayBuffer(size, 0.6, 0.4)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
rew = torch.tensor([1.]).to(device)
for i in range(4):
vbuf.add(obs=Batch(index=np.array([i])), act=0, rew=rew, done=0)
for i in range(3):
lbuf.add(obs=Batch(index=np.array([i])), act=1, rew=rew, done=0)
for i in range(5):
pbuf.add(obs=Batch(index=np.array([i])),
act=2, rew=rew, done=0, weight=np.random.rand())
# save & load
_vbuf = pickle.loads(pickle.dumps(vbuf))
_lbuf = pickle.loads(pickle.dumps(lbuf))
_pbuf = pickle.loads(pickle.dumps(pbuf))
assert len(_vbuf) == len(vbuf) and np.allclose(_vbuf.act, vbuf.act)
assert len(_lbuf) == len(lbuf) and np.allclose(_lbuf.act, lbuf.act)
assert len(_pbuf) == len(pbuf) and np.allclose(_pbuf.act, pbuf.act)
# make sure the meta var is identical
assert _vbuf.stack_num == vbuf.stack_num
assert np.allclose(_pbuf.weight[np.arange(len(_pbuf))],
pbuf.weight[np.arange(len(pbuf))])
def test_hdf5():
size = 100
buffers = {
"array": ReplayBuffer(size, stack_num=2),
"list": ListReplayBuffer(),
"prioritized": PrioritizedReplayBuffer(size, 0.6, 0.4)
}
buffer_types = {k: b.__class__ for k, b in buffers.items()}
device = 'cuda' if torch.cuda.is_available() else 'cpu'
rew = torch.tensor([1.]).to(device)
for i in range(4):
kwargs = {
'obs': Batch(index=np.array([i])),
'act': i,
'rew': rew,
'done': 0,
'info': {"number": {"n": i}, 'extra': None},
}
buffers["array"].add(**kwargs)
buffers["list"].add(**kwargs)
buffers["prioritized"].add(weight=np.random.rand(), **kwargs)
# save
paths = {}
for k, buf in buffers.items():
f, path = tempfile.mkstemp(suffix='.hdf5')
os.close(f)
buf.save_hdf5(path)
paths[k] = path
# load replay buffer
_buffers = {k: buffer_types[k].load_hdf5(paths[k]) for k in paths.keys()}
# compare
for k in buffers.keys():
assert len(_buffers[k]) == len(buffers[k])
assert np.allclose(_buffers[k].act, buffers[k].act)
assert _buffers[k].stack_num == buffers[k].stack_num
assert _buffers[k]._maxsize == buffers[k]._maxsize
assert _buffers[k]._index == buffers[k]._index
assert np.all(_buffers[k]._indices == buffers[k]._indices)
for k in ["array", "prioritized"]:
assert isinstance(buffers[k].get(0, "info"), Batch)
assert isinstance(_buffers[k].get(0, "info"), Batch)
for k in ["array"]:
assert np.all(
buffers[k][:].info.number.n == _buffers[k][:].info.number.n)
assert np.all(
buffers[k][:].info.extra == _buffers[k][:].info.extra)
for path in paths.values():
os.remove(path)
# raise exception when value cannot be pickled
data = {"not_supported": lambda x: x*x}
grp = h5py.Group
with pytest.raises(NotImplementedError):
to_hdf5(data, grp)
# ndarray with data type not supported by HDF5 that cannot be pickled
data = {"not_supported": np.array(lambda x: x*x)}
grp = h5py.Group
with pytest.raises(RuntimeError):
to_hdf5(data, grp)
if __name__ == '__main__':
test_hdf5()
test_replaybuffer()
test_ignore_obs_next()
test_stack()
test_pickle()
test_segtree()
test_priortized_replaybuffer()
test_priortized_replaybuffer(233333, 200000)
test_update()
|
[
"os.remove",
"numpy.abs",
"numpy.allclose",
"numpy.ones",
"numpy.random.randint",
"numpy.arange",
"os.close",
"timeit.timeit",
"tianshou.data.ListReplayBuffer",
"numpy.random.randn",
"tianshou.data.PrioritizedReplayBuffer",
"tianshou.data.SegmentTree",
"pytest.raises",
"numpy.random.choice",
"pickle.dumps",
"torch.cuda.is_available",
"test.base.env.MyTestEnv",
"numpy.all",
"tianshou.data.ReplayBuffer",
"tempfile.mkstemp",
"tianshou.data.utils.converter.to_hdf5",
"numpy.zeros",
"numpy.array",
"numpy.random.rand",
"torch.tensor"
] |
[((453, 468), 'test.base.env.MyTestEnv', 'MyTestEnv', (['size'], {}), '(size)\n', (462, 468), False, 'from test.base.env import MyTestEnv\n'), ((479, 500), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['bufsize'], {}), '(bufsize)\n', (491, 500), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((1208, 1229), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', ([], {'size': '(10)'}), '(size=10)\n', (1220, 1229), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((1352, 1374), 'numpy.all', 'np.all', (['(b.obs[1:] == 0)'], {}), '(b.obs[1:] == 0)\n', (1358, 1374), True, 'import numpy as np\n'), ((1495, 1520), 'numpy.all', 'np.all', (['(b.info.a[1:] == 0)'], {}), '(b.info.a[1:] == 0)\n', (1501, 1520), True, 'import numpy as np\n'), ((1599, 1628), 'numpy.all', 'np.all', (['(b.info.b.c[1:] == 0.0)'], {}), '(b.info.b.c[1:] == 0.0)\n', (1605, 1628), True, 'import numpy as np\n'), ((1687, 1705), 'tianshou.data.ListReplayBuffer', 'ListReplayBuffer', ([], {}), '()\n', (1703, 1705), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((1833, 1873), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['size'], {'ignore_obs_next': '(True)'}), '(size, ignore_obs_next=True)\n', (1845, 1873), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((2410, 2435), 'numpy.allclose', 'np.allclose', (['indice', 'orig'], {}), '(indice, orig)\n', (2421, 2435), True, 'import numpy as np\n'), ((2447, 2499), 'numpy.allclose', 'np.allclose', (['data.obs_next.mask', 'data2.obs_next.mask'], {}), '(data.obs_next.mask, data2.obs_next.mask)\n', (2458, 2499), True, 'import numpy as np\n'), ((2511, 2574), 'numpy.allclose', 'np.allclose', (['data.obs_next.mask', '[0, 2, 3, 3, 5, 6, 6, 8, 9, 9]'], {}), '(data.obs_next.mask, [0, 2, 3, 3, 5, 6, 6, 8, 9, 9])\n', (2522, 2574), True, 'import numpy as np\n'), ((2655, 2707), 'numpy.allclose', 'np.allclose', (['data.obs_next.mask', 'data2.obs_next.mask'], {}), '(data.obs_next.mask, data2.obs_next.mask)\n', (2666, 2707), True, 'import numpy as np\n'), ((2939, 2985), 'numpy.allclose', 'np.allclose', (["data.info['if']", "data2.info['if']"], {}), "(data.info['if'], data2.info['if'])\n", (2950, 2985), True, 'import numpy as np\n'), ((3288, 3303), 'test.base.env.MyTestEnv', 'MyTestEnv', (['size'], {}), '(size)\n', (3297, 3303), False, 'from test.base.env import MyTestEnv\n'), ((3314, 3356), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['bufsize'], {'stack_num': 'stack_num'}), '(bufsize, stack_num=stack_num)\n', (3326, 3356), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((3368, 3429), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['bufsize'], {'stack_num': 'stack_num', 'sample_avail': '(True)'}), '(bufsize, stack_num=stack_num, sample_avail=True)\n', (3380, 3429), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((3441, 3508), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['bufsize'], {'stack_num': 'stack_num', 'save_only_last_obs': '(True)'}), '(bufsize, stack_num=stack_num, save_only_last_obs=True)\n', (3453, 3508), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((4484, 4499), 'test.base.env.MyTestEnv', 'MyTestEnv', (['size'], {}), '(size)\n', (4493, 4499), False, 'from test.base.env import MyTestEnv\n'), ((4510, 4552), 'tianshou.data.PrioritizedReplayBuffer', 'PrioritizedReplayBuffer', (['bufsize', '(0.5)', '(0.5)'], {}), '(bufsize, 0.5, 0.5)\n', (4533, 4552), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((5255, 5283), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['(4)'], {'stack_num': '(2)'}), '(4, stack_num=2)\n', (5267, 5283), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((5295, 5323), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['(4)'], {'stack_num': '(2)'}), '(4, stack_num=2)\n', (5307, 5323), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((5741, 5764), 'tianshou.data.SegmentTree', 'SegmentTree', (['actual_len'], {}), '(actual_len)\n', (5752, 5764), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((5964, 5986), 'numpy.zeros', 'np.zeros', (['[actual_len]'], {}), '([actual_len])\n', (5972, 5986), True, 'import numpy as np\n'), ((7109, 7132), 'tianshou.data.SegmentTree', 'SegmentTree', (['actual_len'], {}), '(actual_len)\n', (7120, 7132), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((7145, 7167), 'numpy.zeros', 'np.zeros', (['[actual_len]'], {}), '([actual_len])\n', (7153, 7167), True, 'import numpy as np\n'), ((7710, 7733), 'tianshou.data.SegmentTree', 'SegmentTree', (['actual_len'], {}), '(actual_len)\n', (7721, 7733), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((7746, 7772), 'numpy.random.rand', 'np.random.rand', (['actual_len'], {}), '(actual_len)\n', (7760, 7772), True, 'import numpy as np\n'), ((8042, 8069), 'numpy.ones', 'np.ones', (['actual_len', 'np.int'], {}), '(actual_len, np.int)\n', (8049, 8069), True, 'import numpy as np\n'), ((8283, 8298), 'tianshou.data.SegmentTree', 'SegmentTree', (['(10)'], {}), '(10)\n', (8294, 8298), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((8324, 8347), 'numpy.array', 'np.array', (['[0.1, 0, 0.1]'], {}), '([0.1, 0, 0.1])\n', (8332, 8347), True, 'import numpy as np\n'), ((8602, 8625), 'tianshou.data.SegmentTree', 'SegmentTree', (['actual_len'], {}), '(actual_len)\n', (8613, 8625), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((8638, 8664), 'numpy.random.rand', 'np.random.rand', (['actual_len'], {}), '(actual_len)\n', (8652, 8664), True, 'import numpy as np\n'), ((9528, 9559), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['size'], {'stack_num': '(2)'}), '(size, stack_num=2)\n', (9540, 9559), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((9571, 9589), 'tianshou.data.ListReplayBuffer', 'ListReplayBuffer', ([], {}), '()\n', (9587, 9589), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((9601, 9640), 'tianshou.data.PrioritizedReplayBuffer', 'PrioritizedReplayBuffer', (['size', '(0.6)', '(0.4)'], {}), '(size, 0.6, 0.4)\n', (9624, 9640), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((864, 889), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (877, 889), False, 'import pytest\n'), ((1638, 1663), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (1651, 1663), False, 'import pytest\n'), ((1715, 1749), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (1728, 1749), False, 'import pytest\n'), ((2751, 2905), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [1, 1, 1, 2], [1, 1, 2, 3], [1, 1, 2, 3], [4, 4, 4, 5], [4, \n 4, 5, 6], [4, 4, 5, 6], [7, 7, 7, 8], [7, 7, 8, 9], [7, 7, 8, 9]]'], {}), '([[0, 0, 0, 0], [1, 1, 1, 2], [1, 1, 2, 3], [1, 1, 2, 3], [4, 4, 4,\n 5], [4, 4, 5, 6], [4, 4, 5, 6], [7, 7, 7, 8], [7, 7, 8, 9], [7, 7, 8, 9]])\n', (2759, 2905), True, 'import numpy as np\n'), ((3026, 3180), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 2], [1, 1, 2, 3], [4, 4, 4, 4], [4, \n 4, 4, 5], [4, 4, 5, 6], [7, 7, 7, 7], [7, 7, 7, 8], [7, 7, 8, 9]]'], {}), '([[0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 2], [1, 1, 2, 3], [4, 4, 4,\n 4], [4, 4, 4, 5], [4, 4, 5, 6], [7, 7, 7, 7], [7, 7, 7, 8], [7, 7, 8, 9]])\n', (3034, 3180), True, 'import numpy as np\n'), ((4365, 4390), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (4378, 4390), False, 'import pytest\n'), ((5900, 5925), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (5913, 5925), False, 'import pytest\n'), ((6086, 6115), 'numpy.random.randint', 'np.random.randint', (['actual_len'], {}), '(actual_len)\n', (6103, 6115), True, 'import numpy as np\n'), ((6132, 6148), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6146, 6148), True, 'import numpy as np\n'), ((6617, 6653), 'numpy.random.choice', 'np.random.choice', (['actual_len'], {'size': '(4)'}), '(actual_len, size=4)\n', (6633, 6653), True, 'import numpy as np\n'), ((6670, 6687), 'numpy.random.rand', 'np.random.rand', (['(4)'], {}), '(4)\n', (6684, 6687), True, 'import numpy as np\n'), ((7210, 7247), 'numpy.random.choice', 'np.random.choice', (['actual_len'], {'size': '(64)'}), '(actual_len, size=64)\n', (7226, 7247), True, 'import numpy as np\n'), ((7264, 7282), 'numpy.random.rand', 'np.random.rand', (['(64)'], {}), '(64)\n', (7278, 7282), True, 'import numpy as np\n'), ((7782, 7803), 'numpy.arange', 'np.arange', (['actual_len'], {}), '(actual_len)\n', (7791, 7803), True, 'import numpy as np\n'), ((8079, 8100), 'numpy.arange', 'np.arange', (['actual_len'], {}), '(actual_len)\n', (8088, 8100), True, 'import numpy as np\n'), ((8308, 8320), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (8317, 8320), True, 'import numpy as np\n'), ((8469, 8498), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (8482, 8498), False, 'import pytest\n'), ((8674, 8695), 'numpy.arange', 'np.arange', (['actual_len'], {}), '(actual_len)\n', (8683, 8695), True, 'import numpy as np\n'), ((9000, 9020), 'numpy.random.rand', 'np.random.rand', (['size'], {}), '(size)\n', (9014, 9020), True, 'import numpy as np\n'), ((9036, 9053), 'tianshou.data.SegmentTree', 'SegmentTree', (['size'], {}), '(size)\n', (9047, 9053), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((9664, 9689), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9687, 9689), False, 'import torch\n'), ((10114, 10132), 'pickle.dumps', 'pickle.dumps', (['vbuf'], {}), '(vbuf)\n', (10126, 10132), False, 'import pickle\n'), ((10159, 10177), 'pickle.dumps', 'pickle.dumps', (['lbuf'], {}), '(lbuf)\n', (10171, 10177), False, 'import pickle\n'), ((10204, 10222), 'pickle.dumps', 'pickle.dumps', (['pbuf'], {}), '(pbuf)\n', (10216, 10222), False, 'import pickle\n'), ((10263, 10295), 'numpy.allclose', 'np.allclose', (['_vbuf.act', 'vbuf.act'], {}), '(_vbuf.act, vbuf.act)\n', (10274, 10295), True, 'import numpy as np\n'), ((10335, 10367), 'numpy.allclose', 'np.allclose', (['_lbuf.act', 'lbuf.act'], {}), '(_lbuf.act, lbuf.act)\n', (10346, 10367), True, 'import numpy as np\n'), ((10407, 10439), 'numpy.allclose', 'np.allclose', (['_pbuf.act', 'pbuf.act'], {}), '(_pbuf.act, pbuf.act)\n', (10418, 10439), True, 'import numpy as np\n'), ((10712, 10743), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['size'], {'stack_num': '(2)'}), '(size, stack_num=2)\n', (10724, 10743), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((10761, 10779), 'tianshou.data.ListReplayBuffer', 'ListReplayBuffer', ([], {}), '()\n', (10777, 10779), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((10804, 10843), 'tianshou.data.PrioritizedReplayBuffer', 'PrioritizedReplayBuffer', (['size', '(0.6)', '(0.4)'], {}), '(size, 0.6, 0.4)\n', (10827, 10843), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((10937, 10962), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10960, 10962), False, 'import torch\n'), ((11466, 11498), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".hdf5"""'}), "(suffix='.hdf5')\n", (11482, 11498), False, 'import tempfile\n'), ((11507, 11518), 'os.close', 'os.close', (['f'], {}), '(f)\n', (11515, 11518), False, 'import os\n'), ((11785, 11829), 'numpy.allclose', 'np.allclose', (['_buffers[k].act', 'buffers[k].act'], {}), '(_buffers[k].act, buffers[k].act)\n', (11796, 11829), True, 'import numpy as np\n'), ((12020, 12071), 'numpy.all', 'np.all', (['(_buffers[k]._indices == buffers[k]._indices)'], {}), '(_buffers[k]._indices == buffers[k]._indices)\n', (12026, 12071), True, 'import numpy as np\n'), ((12271, 12338), 'numpy.all', 'np.all', (['(buffers[k][:].info.number.n == _buffers[k][:].info.number.n)'], {}), '(buffers[k][:].info.number.n == _buffers[k][:].info.number.n)\n', (12277, 12338), True, 'import numpy as np\n'), ((12367, 12428), 'numpy.all', 'np.all', (['(buffers[k][:].info.extra == _buffers[k][:].info.extra)'], {}), '(buffers[k][:].info.extra == _buffers[k][:].info.extra)\n', (12373, 12428), True, 'import numpy as np\n'), ((12483, 12498), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (12492, 12498), False, 'import os\n'), ((12625, 12659), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (12638, 12659), False, 'import pytest\n'), ((12669, 12687), 'tianshou.data.utils.converter.to_hdf5', 'to_hdf5', (['data', 'grp'], {}), '(data, grp)\n', (12676, 12687), False, 'from tianshou.data.utils.converter import to_hdf5\n'), ((12791, 12816), 'numpy.array', 'np.array', (['(lambda x: x * x)'], {}), '(lambda x: x * x)\n', (12799, 12816), True, 'import numpy as np\n'), ((12846, 12873), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (12859, 12873), False, 'import pytest\n'), ((12883, 12901), 'tianshou.data.utils.converter.to_hdf5', 'to_hdf5', (['data', 'grp'], {}), '(data, grp)\n', (12890, 12901), False, 'from tianshou.data.utils.converter import to_hdf5\n'), ((925, 944), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (933, 944), True, 'import numpy as np\n'), ((1407, 1421), 'numpy.array', 'np.array', (['None'], {}), '(None)\n', (1415, 1421), True, 'import numpy as np\n'), ((5183, 5207), 'numpy.abs', 'np.abs', (['(-data.weight / 2)'], {}), '(-data.weight / 2)\n', (5189, 5207), True, 'import numpy as np\n'), ((6849, 6878), 'numpy.random.randint', 'np.random.randint', (['actual_len'], {}), '(actual_len)\n', (6866, 6878), True, 'import numpy as np\n'), ((6899, 6942), 'numpy.random.randint', 'np.random.randint', (['(left + 1)', '(actual_len + 1)'], {}), '(left + 1, actual_len + 1)\n', (6916, 6942), True, 'import numpy as np\n'), ((7444, 7473), 'numpy.random.randint', 'np.random.randint', (['actual_len'], {}), '(actual_len)\n', (7461, 7473), True, 'import numpy as np\n'), ((7494, 7537), 'numpy.random.randint', 'np.random.randint', (['(left + 1)', '(actual_len + 1)'], {}), '(left + 1, actual_len + 1)\n', (7511, 7537), True, 'import numpy as np\n'), ((7856, 7872), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7870, 7872), True, 'import numpy as np\n'), ((8404, 8448), 'numpy.array', 'np.array', (['[0, 0.1, 0.1 + 1e-06, 0.2 - 1e-06]'], {}), '([0, 0.1, 0.1 + 1e-06, 0.2 - 1e-06])\n', (8412, 8448), True, 'import numpy as np\n'), ((8748, 8764), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8762, 8764), True, 'import numpy as np\n'), ((9067, 9082), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (9076, 9082), True, 'import numpy as np\n'), ((9351, 9404), 'timeit.timeit', 'timeit', (['sample_npbuf'], {'setup': 'sample_npbuf', 'number': '(1000)'}), '(sample_npbuf, setup=sample_npbuf, number=1000)\n', (9357, 9404), False, 'from timeit import timeit\n'), ((9428, 9479), 'timeit.timeit', 'timeit', (['sample_tree'], {'setup': 'sample_tree', 'number': '(1000)'}), '(sample_tree, setup=sample_tree, number=1000)\n', (9434, 9479), False, 'from timeit import timeit\n'), ((9711, 9730), 'torch.tensor', 'torch.tensor', (['[1.0]'], {}), '([1.0])\n', (9723, 9730), False, 'import torch\n'), ((10984, 11003), 'torch.tensor', 'torch.tensor', (['[1.0]'], {}), '([1.0])\n', (10996, 11003), False, 'import torch\n'), ((4762, 4779), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (4777, 4779), True, 'import numpy as np\n'), ((5368, 5381), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (5376, 5381), True, 'import numpy as np\n'), ((6393, 6414), 'numpy.allclose', 'np.allclose', (['ref', 'out'], {}), '(ref, out)\n', (6404, 6414), True, 'import numpy as np\n'), ((9240, 9259), 'numpy.random.rand', 'np.random.rand', (['bsz'], {}), '(bsz)\n', (9254, 9259), True, 'import numpy as np\n'), ((10053, 10069), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (10067, 10069), True, 'import numpy as np\n'), ((11358, 11374), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (11372, 11374), True, 'import numpy as np\n'), ((1930, 1955), 'numpy.array', 'np.array', (['[i, 1, 1, 0, 0]'], {}), '([i, 1, 1, 0, 0])\n', (1938, 1955), True, 'import numpy as np\n'), ((1987, 2016), 'numpy.array', 'np.array', (['[i + 4, 0, 1, 0, 0]'], {}), '([i + 4, 0, 1, 0, 0])\n', (1995, 2016), True, 'import numpy as np\n'), ((11087, 11100), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (11095, 11100), True, 'import numpy as np\n'), ((9797, 9810), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (9805, 9810), True, 'import numpy as np\n'), ((9893, 9906), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (9901, 9906), True, 'import numpy as np\n'), ((9989, 10002), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (9997, 10002), True, 'import numpy as np\n')]
|
import torch
import torch.nn.functional as F
from .. import BaseModel, register_model
from cogdl.utils import (
add_remaining_self_loops,
remove_self_loops,
row_normalization,
symmetric_normalization,
to_undirected,
spmm,
dropout_adj,
)
def get_adj(row, col, asymm_norm=False, set_diag=True, remove_diag=False):
edge_index = torch.stack([row, col])
edge_attr = torch.ones(edge_index.shape[1]).to(edge_index.device)
if set_diag:
edge_index, edge_attr = add_remaining_self_loops(edge_index, edge_attr)
elif remove_diag:
edge_index, _ = remove_self_loops(edge_index)
num_nodes = int(torch.max(edge_index)) + 1
if not asymm_norm:
edge_attr = row_normalization(num_nodes, edge_index, edge_attr)
else:
edge_attr = symmetric_normalization(num_nodes, edge_index, edge_attr)
return edge_index, edge_attr
@register_model("sign")
class MLP(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--num-features', type=int)
parser.add_argument("--num-classes", type=int)
parser.add_argument('--hidden-size', type=int, default=512)
parser.add_argument('--num-layers', type=int, default=3)
parser.add_argument('--dropout', type=float, default=0.3)
parser.add_argument('--dropedge-rate', type=float, default=0.2)
parser.add_argument('--directed', action='store_true')
parser.add_argument('--num-propagations', type=int, default=1)
parser.add_argument('--asymm-norm', action='store_true')
parser.add_argument('--set-diag', action='store_true')
parser.add_argument('--remove-diag', action='store_true')
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(
args.num_features,
args.hidden_size,
args.num_classes,
args.num_layers,
args.dropout,
args.directed,
args.dropedge_rate,
args.num_propagations,
args.asymm_norm,
args.set_diag,
args.remove_diag,
)
def __init__(
self,
num_features,
hidden_size,
num_classes,
num_layers,
dropout,
dropedge_rate,
undirected,
num_propagations,
asymm_norm,
set_diag,
remove_diag,
):
super(MLP, self).__init__()
self.dropout = dropout
self.dropedge_rate = dropedge_rate
self.undirected = undirected
self.num_propagations = num_propagations
self.asymm_norm = asymm_norm
self.set_diag = set_diag
self.remove_diag = remove_diag
self.lins = torch.nn.ModuleList()
self.lins.append(torch.nn.Linear((1 + 2 * self.num_propagations) * num_features, hidden_size))
self.bns = torch.nn.ModuleList()
self.bns.append(torch.nn.BatchNorm1d(hidden_size))
for _ in range(num_layers - 2):
self.lins.append(torch.nn.Linear(hidden_size, hidden_size))
self.bns.append(torch.nn.BatchNorm1d(hidden_size))
self.lins.append(torch.nn.Linear(hidden_size, num_classes))
self.cache_x = None
def reset_parameters(self):
for lin in self.lins:
lin.reset_parameters()
for bn in self.bns:
bn.reset_parameters()
def _preprocessing(self, x, edge_index):
num_nodes = x.shape[0]
op_embedding = []
op_embedding.append(x)
# Convert to numpy arrays on cpu
edge_index, _ = dropout_adj(edge_index, drop_rate=self.dropedge_rate)
row, col = edge_index
if self.undirected:
edge_index = to_undirected(edge_index, num_nodes)
row, col = edge_index
# adj matrix
edge_index, edge_attr = get_adj(
row, col, asymm_norm=self.asymm_norm, set_diag=self.set_diag, remove_diag=self.remove_diag
)
nx = x
for _ in range(self.num_propagations):
nx = spmm(edge_index, edge_attr, nx)
op_embedding.append(nx)
# transpose adj matrix
edge_index, edge_attr = get_adj(
col, row, asymm_norm=self.asymm_norm, set_diag=self.set_diag, remove_diag=self.remove_diag
)
nx = x
for _ in range(self.num_propagations):
nx = spmm(edge_index, edge_attr, nx)
op_embedding.append(nx)
return torch.cat(op_embedding, dim=1)
def forward(self, x, edge_index):
if self.cache_x is None:
self.cache_x = self._preprocessing(x, edge_index)
x = self.cache_x
for i, lin in enumerate(self.lins[:-1]):
x = lin(x)
x = self.bns[i](x)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[-1](x)
return torch.log_softmax(x, dim=-1)
def node_classification_loss(self, data, mask=None):
if mask is None:
mask = data.train_mask
edge_index = data.edge_index_train if hasattr(data, "edge_index_train") and self.training else data.edge_index
pred = self.forward(data.x, edge_index)
return self.loss_fn(pred[mask], data.y[mask])
def predict(self, data):
return self.forward(data.x, data.edge_index)
|
[
"cogdl.utils.dropout_adj",
"torch.log_softmax",
"torch.ones",
"torch.stack",
"torch.nn.ModuleList",
"torch.nn.BatchNorm1d",
"cogdl.utils.to_undirected",
"torch.cat",
"torch.nn.functional.dropout",
"cogdl.utils.symmetric_normalization",
"cogdl.utils.spmm",
"torch.max",
"torch.nn.Linear",
"torch.nn.functional.relu",
"cogdl.utils.add_remaining_self_loops",
"cogdl.utils.row_normalization",
"cogdl.utils.remove_self_loops"
] |
[((360, 383), 'torch.stack', 'torch.stack', (['[row, col]'], {}), '([row, col])\n', (371, 383), False, 'import torch\n'), ((503, 550), 'cogdl.utils.add_remaining_self_loops', 'add_remaining_self_loops', (['edge_index', 'edge_attr'], {}), '(edge_index, edge_attr)\n', (527, 550), False, 'from cogdl.utils import add_remaining_self_loops, remove_self_loops, row_normalization, symmetric_normalization, to_undirected, spmm, dropout_adj\n'), ((718, 769), 'cogdl.utils.row_normalization', 'row_normalization', (['num_nodes', 'edge_index', 'edge_attr'], {}), '(num_nodes, edge_index, edge_attr)\n', (735, 769), False, 'from cogdl.utils import add_remaining_self_loops, remove_self_loops, row_normalization, symmetric_normalization, to_undirected, spmm, dropout_adj\n'), ((800, 857), 'cogdl.utils.symmetric_normalization', 'symmetric_normalization', (['num_nodes', 'edge_index', 'edge_attr'], {}), '(num_nodes, edge_index, edge_attr)\n', (823, 857), False, 'from cogdl.utils import add_remaining_self_loops, remove_self_loops, row_normalization, symmetric_normalization, to_undirected, spmm, dropout_adj\n'), ((2804, 2825), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ([], {}), '()\n', (2823, 2825), False, 'import torch\n'), ((2948, 2969), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ([], {}), '()\n', (2967, 2969), False, 'import torch\n'), ((3662, 3715), 'cogdl.utils.dropout_adj', 'dropout_adj', (['edge_index'], {'drop_rate': 'self.dropedge_rate'}), '(edge_index, drop_rate=self.dropedge_rate)\n', (3673, 3715), False, 'from cogdl.utils import add_remaining_self_loops, remove_self_loops, row_normalization, symmetric_normalization, to_undirected, spmm, dropout_adj\n'), ((4545, 4575), 'torch.cat', 'torch.cat', (['op_embedding'], {'dim': '(1)'}), '(op_embedding, dim=1)\n', (4554, 4575), False, 'import torch\n'), ((4977, 5005), 'torch.log_softmax', 'torch.log_softmax', (['x'], {'dim': '(-1)'}), '(x, dim=-1)\n', (4994, 5005), False, 'import torch\n'), ((400, 431), 'torch.ones', 'torch.ones', (['edge_index.shape[1]'], {}), '(edge_index.shape[1])\n', (410, 431), False, 'import torch\n'), ((597, 626), 'cogdl.utils.remove_self_loops', 'remove_self_loops', (['edge_index'], {}), '(edge_index)\n', (614, 626), False, 'from cogdl.utils import add_remaining_self_loops, remove_self_loops, row_normalization, symmetric_normalization, to_undirected, spmm, dropout_adj\n'), ((648, 669), 'torch.max', 'torch.max', (['edge_index'], {}), '(edge_index)\n', (657, 669), False, 'import torch\n'), ((2851, 2927), 'torch.nn.Linear', 'torch.nn.Linear', (['((1 + 2 * self.num_propagations) * num_features)', 'hidden_size'], {}), '((1 + 2 * self.num_propagations) * num_features, hidden_size)\n', (2866, 2927), False, 'import torch\n'), ((2994, 3027), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['hidden_size'], {}), '(hidden_size)\n', (3014, 3027), False, 'import torch\n'), ((3229, 3270), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden_size', 'num_classes'], {}), '(hidden_size, num_classes)\n', (3244, 3270), False, 'import torch\n'), ((3800, 3836), 'cogdl.utils.to_undirected', 'to_undirected', (['edge_index', 'num_nodes'], {}), '(edge_index, num_nodes)\n', (3813, 3836), False, 'from cogdl.utils import add_remaining_self_loops, remove_self_loops, row_normalization, symmetric_normalization, to_undirected, spmm, dropout_adj\n'), ((4127, 4158), 'cogdl.utils.spmm', 'spmm', (['edge_index', 'edge_attr', 'nx'], {}), '(edge_index, edge_attr, nx)\n', (4131, 4158), False, 'from cogdl.utils import add_remaining_self_loops, remove_self_loops, row_normalization, symmetric_normalization, to_undirected, spmm, dropout_adj\n'), ((4461, 4492), 'cogdl.utils.spmm', 'spmm', (['edge_index', 'edge_attr', 'nx'], {}), '(edge_index, edge_attr, nx)\n', (4465, 4492), False, 'from cogdl.utils import add_remaining_self_loops, remove_self_loops, row_normalization, symmetric_normalization, to_undirected, spmm, dropout_adj\n'), ((4854, 4863), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (4860, 4863), True, 'import torch.nn.functional as F\n'), ((4880, 4932), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.dropout', 'training': 'self.training'}), '(x, p=self.dropout, training=self.training)\n', (4889, 4932), True, 'import torch.nn.functional as F\n'), ((3098, 3139), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (3113, 3139), False, 'import torch\n'), ((3169, 3202), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['hidden_size'], {}), '(hidden_size)\n', (3189, 3202), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
import os
import shutil
from tempfile import mkstemp
from typing import Optional
import cx_Oracle
import psycopg2
from psycopg2.extras import execute_values
from pyinterprod import logger
from pyinterprod.utils.kvdb import KVdb
from pyinterprod.utils.pg import url2dict
def import_similarity_comments(swp_url: str, ipr_url: str):
logger.info("populating")
pg_con = psycopg2.connect(**url2dict(ipr_url))
with pg_con.cursor() as pg_cur:
pg_cur.execute("DROP TABLE IF EXISTS protein_similarity")
pg_cur.execute(
"""
CREATE TABLE protein_similarity (
comment_id INTEGER NOT NULL,
comment_text TEXT NOT NULL,
protein_acc VARCHAR(15) NOT NULL
)
"""
)
ora_con = cx_Oracle.connect(swp_url)
ora_cur = ora_con.cursor()
ora_cur.execute(
"""
SELECT
DENSE_RANK() OVER (ORDER BY TEXT),
TEXT,
ACCESSION
FROM (
SELECT E.ACCESSION, NVL(B.TEXT, SS.TEXT) AS TEXT
FROM SPTR.DBENTRY E
INNER JOIN SPTR.COMMENT_BLOCK B
ON E.DBENTRY_ID = B.DBENTRY_ID
AND B.COMMENT_TOPICS_ID = 34 -- SIMILARITY comments
LEFT OUTER JOIN SPTR.COMMENT_STRUCTURE S
ON B.COMMENT_BLOCK_ID = S.COMMENT_BLOCK_ID
AND S.CC_STRUCTURE_TYPE_ID = 1 -- TEXT structure
LEFT OUTER JOIN SPTR.COMMENT_SUBSTRUCTURE SS
ON S.COMMENT_STRUCTURE_ID = SS.COMMENT_STRUCTURE_ID
WHERE E.ENTRY_TYPE = 0 -- Swiss-Prot
AND E.MERGE_STATUS != 'R' -- not 'Redundant'
AND E.DELETED = 'N' -- not deleted
AND E.FIRST_PUBLIC IS NOT NULL -- published
)
"""
)
sql = "INSERT INTO protein_similarity VALUES %s"
execute_values(pg_cur, sql, ora_cur, page_size=1000)
ora_cur.close()
ora_con.close()
pg_cur.execute(
"""
CREATE INDEX protein_similarity_comment_idx
ON protein_similarity (comment_id)
"""
)
pg_cur.execute(
"""
CREATE INDEX protein_similarity_protein_idx
ON protein_similarity (protein_acc)
"""
)
pg_con.commit()
pg_con.close()
logger.info("complete")
def import_protein_names(swp_url: str, ipr_url: str, database: str,
tmpdir: Optional[str] = None):
logger.info("populating protein2name")
fd, tmp_database = mkstemp(dir=tmpdir)
os.close(fd)
os.remove(tmp_database)
pg_con = psycopg2.connect(**url2dict(ipr_url))
with pg_con.cursor() as pg_cur:
pg_cur.execute("DROP TABLE IF EXISTS protein_name")
pg_cur.execute("DROP TABLE IF EXISTS protein2name")
pg_cur.execute(
"""
CREATE TABLE protein_name (
name_id INTEGER NOT NULL
CONSTRAINT protein_name_pkey PRIMARY KEY,
text TEXT NOT NULL
)
"""
)
pg_cur.execute(
"""
CREATE TABLE protein2name (
protein_acc VARCHAR(15) NOT NULL
CONSTRAINT protein2name_pkey PRIMARY KEY,
name_id INTEGER NOT NULL
)
"""
)
ora_con = cx_Oracle.connect(swp_url)
ora_cur = ora_con.cursor()
ora_cur.execute(
"""
SELECT ACCESSION, DESCR
FROM (
SELECT
E.ACCESSION,
D.DESCR,
ROW_NUMBER() OVER (
PARTITION BY E.ACCESSION
ORDER BY CV.DESC_ID, -- 1=RecName, 2=AltName, 3=SubName
CV.ORDER_IN, -- Swiss-Prot manual order
D.DESCR -- TrEMBL alphabetic order
) R
FROM SPTR.DBENTRY E
INNER JOIN SPTR.DBENTRY_2_DESC D
ON E.DBENTRY_ID = D.DBENTRY_ID
AND D.DESC_ID IN (1,4,11,13,16,23,25,28,35) --Full description section
INNER JOIN SPTR.CV_DESC CV
ON D.DESC_ID = CV.DESC_ID
WHERE E.ENTRY_TYPE IN (0, 1) -- Swiss-Prot/TrEMBL
AND E.MERGE_STATUS != 'R' -- not 'Redundant'
AND E.DELETED = 'N' -- not deleted
AND E.FIRST_PUBLIC IS NOT NULL -- published
)
WHERE R = 1 -- one name per protein
"""
)
names = {}
values = []
i = 0
with KVdb(tmp_database, True) as namesdb:
for protein_acc, text in ora_cur:
try:
name_id = names[text]
except KeyError:
name_id = names[text] = len(names) + 1
values.append((protein_acc, name_id))
namesdb[protein_acc] = name_id
i += 1
if not i % 100000:
namesdb.sync()
execute_values(
cur=pg_cur,
sql="INSERT INTO protein2name VALUES %s",
argslist=values,
page_size=1000
)
values = []
if not i % 10000000:
logger.info(f"{i:>12,}")
ora_cur.close()
ora_con.close()
logger.info(f"{i:>12,}")
if values:
execute_values(
cur=pg_cur,
sql="INSERT INTO protein2name VALUES %s",
argslist=values,
page_size=1000
)
logger.info("populating protein_name")
execute_values(
cur=pg_cur,
sql="INSERT INTO protein_name VALUES %s",
argslist=((name_id, text) for text, name_id in names.items()),
page_size=1000
)
logger.info("analyzing tables")
pg_cur.execute("ANALYZE protein2name")
pg_cur.execute("ANALYZE protein_name")
pg_con.commit()
pg_con.close()
logger.info("copying database")
shutil.copyfile(tmp_database, database)
logger.info(f"disk usage: {os.path.getsize(tmp_database)/1024**2:.0f} MB")
os.remove(tmp_database)
logger.info("complete")
def import_proteins(ora_url: str, pg_url: str):
logger.info("populating")
pg_con = psycopg2.connect(**url2dict(pg_url))
with pg_con.cursor() as pg_cur:
pg_cur.execute("DROP TABLE IF EXISTS protein")
pg_cur.execute(
"""
CREATE TABLE protein (
accession VARCHAR(15) NOT NULL
CONSTRAINT protein_pkey PRIMARY KEY,
identifier VARCHAR(16) NOT NULL,
length INTEGER NOT NULL,
taxon_id INTEGER NOT NULL,
is_fragment BOOLEAN NOT NULL,
is_reviewed BOOLEAN NOT NULL
)
"""
)
ora_con = cx_Oracle.connect(ora_url)
ora_cur = ora_con.cursor()
ora_cur.execute(
"""
SELECT PROTEIN_AC, NAME, LEN, TAX_ID, FRAGMENT, DBCODE
FROM INTERPRO.PROTEIN
"""
)
sql = "INSERT INTO protein VALUES %s"
execute_values(pg_cur, sql, ((
row[0],
row[1],
row[2],
row[3],
row[4] == 'Y',
row[5] == 'S'
) for row in ora_cur), page_size=1000)
ora_cur.close()
ora_con.close()
pg_cur.execute(
"""
CREATE UNIQUE INDEX protein_identifier_uidx
ON protein (identifier)
"""
)
pg_cur.execute(
"""
CREATE INDEX protein_reviewed_idx
ON protein (is_reviewed)
"""
)
pg_cur.execute(
"""
CREATE INDEX protein_taxon_idx
ON protein (taxon_id)
"""
)
pg_con.commit()
pg_con.close()
logger.info("complete")
|
[
"os.remove",
"tempfile.mkstemp",
"pyinterprod.utils.kvdb.KVdb",
"os.path.getsize",
"pyinterprod.logger.info",
"os.close",
"pyinterprod.utils.pg.url2dict",
"shutil.copyfile",
"cx_Oracle.connect",
"psycopg2.extras.execute_values"
] |
[((363, 388), 'pyinterprod.logger.info', 'logger.info', (['"""populating"""'], {}), "('populating')\n", (374, 388), False, 'from pyinterprod import logger\n'), ((2547, 2570), 'pyinterprod.logger.info', 'logger.info', (['"""complete"""'], {}), "('complete')\n", (2558, 2570), False, 'from pyinterprod import logger\n'), ((2701, 2739), 'pyinterprod.logger.info', 'logger.info', (['"""populating protein2name"""'], {}), "('populating protein2name')\n", (2712, 2739), False, 'from pyinterprod import logger\n'), ((2763, 2782), 'tempfile.mkstemp', 'mkstemp', ([], {'dir': 'tmpdir'}), '(dir=tmpdir)\n', (2770, 2782), False, 'from tempfile import mkstemp\n'), ((2787, 2799), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (2795, 2799), False, 'import os\n'), ((2804, 2827), 'os.remove', 'os.remove', (['tmp_database'], {}), '(tmp_database)\n', (2813, 2827), False, 'import os\n'), ((6427, 6458), 'pyinterprod.logger.info', 'logger.info', (['"""copying database"""'], {}), "('copying database')\n", (6438, 6458), False, 'from pyinterprod import logger\n'), ((6463, 6502), 'shutil.copyfile', 'shutil.copyfile', (['tmp_database', 'database'], {}), '(tmp_database, database)\n', (6478, 6502), False, 'import shutil\n'), ((6586, 6609), 'os.remove', 'os.remove', (['tmp_database'], {}), '(tmp_database)\n', (6595, 6609), False, 'import os\n'), ((6614, 6637), 'pyinterprod.logger.info', 'logger.info', (['"""complete"""'], {}), "('complete')\n", (6625, 6637), False, 'from pyinterprod import logger\n'), ((6692, 6717), 'pyinterprod.logger.info', 'logger.info', (['"""populating"""'], {}), "('populating')\n", (6703, 6717), False, 'from pyinterprod import logger\n'), ((8366, 8389), 'pyinterprod.logger.info', 'logger.info', (['"""complete"""'], {}), "('complete')\n", (8377, 8389), False, 'from pyinterprod import logger\n'), ((825, 851), 'cx_Oracle.connect', 'cx_Oracle.connect', (['swp_url'], {}), '(swp_url)\n', (842, 851), False, 'import cx_Oracle\n'), ((2057, 2109), 'psycopg2.extras.execute_values', 'execute_values', (['pg_cur', 'sql', 'ora_cur'], {'page_size': '(1000)'}), '(pg_cur, sql, ora_cur, page_size=1000)\n', (2071, 2109), False, 'from psycopg2.extras import execute_values\n'), ((3587, 3613), 'cx_Oracle.connect', 'cx_Oracle.connect', (['swp_url'], {}), '(swp_url)\n', (3604, 3613), False, 'import cx_Oracle\n'), ((5744, 5768), 'pyinterprod.logger.info', 'logger.info', (['f"""{i:>12,}"""'], {}), "(f'{i:>12,}')\n", (5755, 5768), False, 'from pyinterprod import logger\n'), ((5990, 6028), 'pyinterprod.logger.info', 'logger.info', (['"""populating protein_name"""'], {}), "('populating protein_name')\n", (6001, 6028), False, 'from pyinterprod import logger\n'), ((6252, 6283), 'pyinterprod.logger.info', 'logger.info', (['"""analyzing tables"""'], {}), "('analyzing tables')\n", (6263, 6283), False, 'from pyinterprod import logger\n'), ((7321, 7347), 'cx_Oracle.connect', 'cx_Oracle.connect', (['ora_url'], {}), '(ora_url)\n', (7338, 7347), False, 'import cx_Oracle\n'), ((7606, 7738), 'psycopg2.extras.execute_values', 'execute_values', (['pg_cur', 'sql', "((row[0], row[1], row[2], row[3], row[4] == 'Y', row[5] == 'S') for row in\n ora_cur)"], {'page_size': '(1000)'}), "(pg_cur, sql, ((row[0], row[1], row[2], row[3], row[4] == 'Y',\n row[5] == 'S') for row in ora_cur), page_size=1000)\n", (7620, 7738), False, 'from psycopg2.extras import execute_values\n'), ((421, 438), 'pyinterprod.utils.pg.url2dict', 'url2dict', (['ipr_url'], {}), '(ipr_url)\n', (429, 438), False, 'from pyinterprod.utils.pg import url2dict\n'), ((2861, 2878), 'pyinterprod.utils.pg.url2dict', 'url2dict', (['ipr_url'], {}), '(ipr_url)\n', (2869, 2878), False, 'from pyinterprod.utils.pg import url2dict\n'), ((4890, 4914), 'pyinterprod.utils.kvdb.KVdb', 'KVdb', (['tmp_database', '(True)'], {}), '(tmp_database, True)\n', (4894, 4914), False, 'from pyinterprod.utils.kvdb import KVdb\n'), ((5801, 5906), 'psycopg2.extras.execute_values', 'execute_values', ([], {'cur': 'pg_cur', 'sql': '"""INSERT INTO protein2name VALUES %s"""', 'argslist': 'values', 'page_size': '(1000)'}), "(cur=pg_cur, sql='INSERT INTO protein2name VALUES %s',\n argslist=values, page_size=1000)\n", (5815, 5906), False, 'from psycopg2.extras import execute_values\n'), ((6750, 6766), 'pyinterprod.utils.pg.url2dict', 'url2dict', (['pg_url'], {}), '(pg_url)\n', (6758, 6766), False, 'from pyinterprod.utils.pg import url2dict\n'), ((5344, 5449), 'psycopg2.extras.execute_values', 'execute_values', ([], {'cur': 'pg_cur', 'sql': '"""INSERT INTO protein2name VALUES %s"""', 'argslist': 'values', 'page_size': '(1000)'}), "(cur=pg_cur, sql='INSERT INTO protein2name VALUES %s',\n argslist=values, page_size=1000)\n", (5358, 5449), False, 'from psycopg2.extras import execute_values\n'), ((6534, 6563), 'os.path.getsize', 'os.path.getsize', (['tmp_database'], {}), '(tmp_database)\n', (6549, 6563), False, 'import os\n'), ((5662, 5686), 'pyinterprod.logger.info', 'logger.info', (['f"""{i:>12,}"""'], {}), "(f'{i:>12,}')\n", (5673, 5686), False, 'from pyinterprod import logger\n')]
|
# API endpoints for uploaded media, including avatars
from flask import Blueprint, current_app, jsonify, request, send_from_directory, send_file
from flask_jwt_extended import get_jwt_identity, jwt_required
from liblio import db, jwt, liblio_uploads
from liblio.error import APIError
from liblio.models import Upload, Avatar
from liblio.helpers import decode_printable_id
from . import API_PATH
# BLUEPRINT_PATH="{api}/".format(api=API_PATH)
# Media access is top-level
BLUEPRINT_PATH="/"
blueprint = Blueprint('media', __name__, url_prefix=BLUEPRINT_PATH)
### Routes
@blueprint.route('media/<media_fid>')
def get_media(media_fid):
"""Get uploaded media by its Flake ID."""
media = Upload.query.filter_by(flake=decode_printable_id(media_fid)).first()
if media is not None:
path = liblio_uploads.path(media.filename)
return send_file(path)
else:
raise APIError(404, "Media not found")
@blueprint.route('avatars/<media_fid>')
def get_avatar(media_fid):
"""Get uploaded avatar by its Flake ID."""
avatar = Avatar.query.filter_by(flake=decode_printable_id(media_fid)).first()
if avatar is not None:
path = liblio_uploads.path(avatar.filename)
return send_file(path)
else:
raise APIError(404, "Media not found")
|
[
"liblio.helpers.decode_printable_id",
"flask.Blueprint",
"liblio.error.APIError",
"liblio.liblio_uploads.path",
"flask.send_file"
] |
[((506, 561), 'flask.Blueprint', 'Blueprint', (['"""media"""', '__name__'], {'url_prefix': 'BLUEPRINT_PATH'}), "('media', __name__, url_prefix=BLUEPRINT_PATH)\n", (515, 561), False, 'from flask import Blueprint, current_app, jsonify, request, send_from_directory, send_file\n'), ((809, 844), 'liblio.liblio_uploads.path', 'liblio_uploads.path', (['media.filename'], {}), '(media.filename)\n', (828, 844), False, 'from liblio import db, jwt, liblio_uploads\n'), ((860, 875), 'flask.send_file', 'send_file', (['path'], {}), '(path)\n', (869, 875), False, 'from flask import Blueprint, current_app, jsonify, request, send_from_directory, send_file\n'), ((900, 932), 'liblio.error.APIError', 'APIError', (['(404)', '"""Media not found"""'], {}), "(404, 'Media not found')\n", (908, 932), False, 'from liblio.error import APIError\n'), ((1174, 1210), 'liblio.liblio_uploads.path', 'liblio_uploads.path', (['avatar.filename'], {}), '(avatar.filename)\n', (1193, 1210), False, 'from liblio import db, jwt, liblio_uploads\n'), ((1226, 1241), 'flask.send_file', 'send_file', (['path'], {}), '(path)\n', (1235, 1241), False, 'from flask import Blueprint, current_app, jsonify, request, send_from_directory, send_file\n'), ((1266, 1298), 'liblio.error.APIError', 'APIError', (['(404)', '"""Media not found"""'], {}), "(404, 'Media not found')\n", (1274, 1298), False, 'from liblio.error import APIError\n'), ((727, 757), 'liblio.helpers.decode_printable_id', 'decode_printable_id', (['media_fid'], {}), '(media_fid)\n', (746, 757), False, 'from liblio.helpers import decode_printable_id\n'), ((1091, 1121), 'liblio.helpers.decode_printable_id', 'decode_printable_id', (['media_fid'], {}), '(media_fid)\n', (1110, 1121), False, 'from liblio.helpers import decode_printable_id\n')]
|
import pickle
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from src.data_process_utils import create_folder, standardize_smi, get_encoded_smi
from src.CONSTS import BATCH_SIZE_PRED
def get_train_val_test_data():
create_folder('predictor_data/train_data/')
create_folder('predictor_data/test_data/')
df_data_1 = pd.read_csv('predictor_data/data_clean_a2d.csv')
df_data_2 = pd.read_csv('predictor_data/data_clean_kop.csv')
df_data = pd.concat([df_data_1, df_data_2])
df_data.loc[:, 'Data'] = df_data.SMILES.map(standardize_smi)
# train, val, test split
df_train, df_test \
= train_test_split(df_data, test_size=0.1, random_state=43)
df_train, df_val \
= train_test_split(df_train, test_size=0.1, random_state=43)
df_train.to_csv('predictor_data/train_data/df_train.csv', index=False)
df_test.to_csv('predictor_data/test_data/df_test.csv', index=False)
df_val.to_csv('predictor_data/test_data/df_val.csv', index=False)
max_y = np.quantile(df_train.pCHEMBL.values, 0.98)
min_y = np.quantile(df_train.pCHEMBL.values, 0.02)
with open('predictor_data/train_data/' + 'y_max_min.pkl', 'wb') as f:
pickle.dump((min_y, max_y), f)
def get_val_data():
df_val = pd.read_csv('predictor_data/test_data/df_val.csv')
with open('predictor_data/train_data/y_max_min.pkl', 'rb') as handle:
y_min, y_max = pickle.load(handle)
x = []
y = []
for _, row in df_val.iterrows():
x.append(get_encoded_smi(row.Data))
_y = (row.pCHEMBL - y_min) / (y_max - y_min)
y.append(_y)
_data = (np.vstack(x), np.vstack(y))
with open('predictor_data/test_data/' + 'Xy_val.pkl', 'wb') as f:
pickle.dump(_data, f)
def data_iterator_train():
df_train = pd.read_csv('predictor_data/train_data/df_train.csv')
with open('predictor_data/train_data/y_max_min.pkl', 'rb') as handle:
y_min, y_max = pickle.load(handle)
while True:
df_train = df_train.sample(frac=1).reset_index(drop=True)
x = []
y = []
for _, row in df_train.iterrows():
x.append(get_encoded_smi(row.Data))
_y = (row.pCHEMBL - y_min) / (y_max - y_min)
y.append(_y)
if len(x) >= BATCH_SIZE_PRED:
yield (np.vstack(x), np.vstack(y))
x = []
y = []
if x:
yield (np.vstack(x), np.vstack(y))
x = []
y = []
def data_iterator_test(test_df_path):
df_test = pd.read_csv(test_df_path)
with open('predictor_data/train_data/y_max_min.pkl', 'rb') as handle:
y_min, y_max = pickle.load(handle)
x = []
y = []
for _, row in df_test.iterrows():
x.append(get_encoded_smi(row.Data))
_y = (row.pCHEMBL - y_min) / (y_max - y_min)
y.append(_y)
if len(x) >= BATCH_SIZE_PRED:
yield (np.vstack(x), np.vstack(y))
x = []
y = []
if x:
yield (np.vstack(x), np.vstack(y))
x = []
y = []
if __name__ == "__main__":
get_train_val_test_data()
get_val_data()
# df_train = pd.read_csv('predictor_data/train_data/df_train.csv')
# for x, y in data_iterator_test('predictor_data/test_data/df_test.csv'):
# breakpoint()
# print(x.shape)
# # for x, y in data_iterator_train():
# # print(x.shape)
|
[
"src.data_process_utils.get_encoded_smi",
"pickle.dump",
"numpy.quantile",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"src.data_process_utils.create_folder",
"pickle.load",
"pandas.concat",
"numpy.vstack"
] |
[((265, 308), 'src.data_process_utils.create_folder', 'create_folder', (['"""predictor_data/train_data/"""'], {}), "('predictor_data/train_data/')\n", (278, 308), False, 'from src.data_process_utils import create_folder, standardize_smi, get_encoded_smi\n'), ((313, 355), 'src.data_process_utils.create_folder', 'create_folder', (['"""predictor_data/test_data/"""'], {}), "('predictor_data/test_data/')\n", (326, 355), False, 'from src.data_process_utils import create_folder, standardize_smi, get_encoded_smi\n'), ((372, 420), 'pandas.read_csv', 'pd.read_csv', (['"""predictor_data/data_clean_a2d.csv"""'], {}), "('predictor_data/data_clean_a2d.csv')\n", (383, 420), True, 'import pandas as pd\n'), ((437, 485), 'pandas.read_csv', 'pd.read_csv', (['"""predictor_data/data_clean_kop.csv"""'], {}), "('predictor_data/data_clean_kop.csv')\n", (448, 485), True, 'import pandas as pd\n'), ((500, 533), 'pandas.concat', 'pd.concat', (['[df_data_1, df_data_2]'], {}), '([df_data_1, df_data_2])\n', (509, 533), True, 'import pandas as pd\n'), ((663, 720), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df_data'], {'test_size': '(0.1)', 'random_state': '(43)'}), '(df_data, test_size=0.1, random_state=43)\n', (679, 720), False, 'from sklearn.model_selection import train_test_split\n'), ((755, 813), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df_train'], {'test_size': '(0.1)', 'random_state': '(43)'}), '(df_train, test_size=0.1, random_state=43)\n', (771, 813), False, 'from sklearn.model_selection import train_test_split\n'), ((1044, 1086), 'numpy.quantile', 'np.quantile', (['df_train.pCHEMBL.values', '(0.98)'], {}), '(df_train.pCHEMBL.values, 0.98)\n', (1055, 1086), True, 'import numpy as np\n'), ((1099, 1141), 'numpy.quantile', 'np.quantile', (['df_train.pCHEMBL.values', '(0.02)'], {}), '(df_train.pCHEMBL.values, 0.02)\n', (1110, 1141), True, 'import numpy as np\n'), ((1290, 1340), 'pandas.read_csv', 'pd.read_csv', (['"""predictor_data/test_data/df_val.csv"""'], {}), "('predictor_data/test_data/df_val.csv')\n", (1301, 1340), True, 'import pandas as pd\n'), ((1821, 1874), 'pandas.read_csv', 'pd.read_csv', (['"""predictor_data/train_data/df_train.csv"""'], {}), "('predictor_data/train_data/df_train.csv')\n", (1832, 1874), True, 'import pandas as pd\n'), ((2570, 2595), 'pandas.read_csv', 'pd.read_csv', (['test_df_path'], {}), '(test_df_path)\n', (2581, 2595), True, 'import pandas as pd\n'), ((1224, 1254), 'pickle.dump', 'pickle.dump', (['(min_y, max_y)', 'f'], {}), '((min_y, max_y), f)\n', (1235, 1254), False, 'import pickle\n'), ((1438, 1457), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1449, 1457), False, 'import pickle\n'), ((1649, 1661), 'numpy.vstack', 'np.vstack', (['x'], {}), '(x)\n', (1658, 1661), True, 'import numpy as np\n'), ((1663, 1675), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (1672, 1675), True, 'import numpy as np\n'), ((1755, 1776), 'pickle.dump', 'pickle.dump', (['_data', 'f'], {}), '(_data, f)\n', (1766, 1776), False, 'import pickle\n'), ((1972, 1991), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1983, 1991), False, 'import pickle\n'), ((2693, 2712), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (2704, 2712), False, 'import pickle\n'), ((1534, 1559), 'src.data_process_utils.get_encoded_smi', 'get_encoded_smi', (['row.Data'], {}), '(row.Data)\n', (1549, 1559), False, 'from src.data_process_utils import create_folder, standardize_smi, get_encoded_smi\n'), ((2790, 2815), 'src.data_process_utils.get_encoded_smi', 'get_encoded_smi', (['row.Data'], {}), '(row.Data)\n', (2805, 2815), False, 'from src.data_process_utils import create_folder, standardize_smi, get_encoded_smi\n'), ((2168, 2193), 'src.data_process_utils.get_encoded_smi', 'get_encoded_smi', (['row.Data'], {}), '(row.Data)\n', (2183, 2193), False, 'from src.data_process_utils import create_folder, standardize_smi, get_encoded_smi\n'), ((3040, 3052), 'numpy.vstack', 'np.vstack', (['x'], {}), '(x)\n', (3049, 3052), True, 'import numpy as np\n'), ((3054, 3066), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (3063, 3066), True, 'import numpy as np\n'), ((2450, 2462), 'numpy.vstack', 'np.vstack', (['x'], {}), '(x)\n', (2459, 2462), True, 'import numpy as np\n'), ((2464, 2476), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (2473, 2476), True, 'import numpy as np\n'), ((2948, 2960), 'numpy.vstack', 'np.vstack', (['x'], {}), '(x)\n', (2957, 2960), True, 'import numpy as np\n'), ((2962, 2974), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (2971, 2974), True, 'import numpy as np\n'), ((2342, 2354), 'numpy.vstack', 'np.vstack', (['x'], {}), '(x)\n', (2351, 2354), True, 'import numpy as np\n'), ((2356, 2368), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (2365, 2368), True, 'import numpy as np\n')]
|
__author__ = "arunrajms"
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.decorators.csrf import csrf_exempt
from views import DiscoveryRuleList
from views import DiscoveryRuleDetailList
urlpatterns = patterns('',
url(r'^$',DiscoveryRuleList.as_view(),name='Full_Pools'),
url(r'^(?P<id>\w+)/$',DiscoveryRuleDetailList.as_view(),name='Full_Pools'),
)
|
[
"views.DiscoveryRuleDetailList.as_view",
"views.DiscoveryRuleList.as_view"
] |
[((298, 325), 'views.DiscoveryRuleList.as_view', 'DiscoveryRuleList.as_view', ([], {}), '()\n', (323, 325), False, 'from views import DiscoveryRuleList\n'), ((373, 406), 'views.DiscoveryRuleDetailList.as_view', 'DiscoveryRuleDetailList.as_view', ([], {}), '()\n', (404, 406), False, 'from views import DiscoveryRuleDetailList\n')]
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.errors import CheckException
from datadog_checks.mcache.mcache import InvalidConfigError
from common import (PORT, SERVICE_CHECK, HOST)
def test_bad_config(check):
"""
If misconfigured, the check should raise an InvalidConfigError
"""
with pytest.raises(InvalidConfigError):
check.check({})
def test_service_ko(check, aggregator):
"""
If the service is down, the service check should be sent accordingly
"""
tags = ["host:{}".format(HOST), "port:{}".format(PORT), "foo:bar"]
with pytest.raises(CheckException):
check.check({'url': "{}".format(HOST), 'port': PORT, 'tags': ["foo:bar"]})
assert len(aggregator.service_checks(SERVICE_CHECK)) == 1
sc = aggregator.service_checks(SERVICE_CHECK)[0]
assert sc.status == check.CRITICAL
assert sc.tags == tags
|
[
"pytest.raises"
] |
[((402, 435), 'pytest.raises', 'pytest.raises', (['InvalidConfigError'], {}), '(InvalidConfigError)\n', (415, 435), False, 'import pytest\n'), ((672, 701), 'pytest.raises', 'pytest.raises', (['CheckException'], {}), '(CheckException)\n', (685, 701), False, 'import pytest\n')]
|
import os
import sys
import re
import datetime
import calendar
import boto3
import botocore
s3client = boto3.client('s3')
BUCKET_NAME = os.getenv('bucket_name') # S3 bucket of transaction emails
ddbclient = boto3.client('dynamodb')
TABLE_NAME = os.getenv('table_name') # DynamoDB table of transaction data
NUM_DIGITS = 4 # Last digits of credit card
WS = '(?:\s| )*' # Whitespace regex
def lambda_handler(event, context):
'''Get the email describing the transaction, parse it for the transaction
data, and write that data to DynamoDB.'''
ses_notification = event['Records'][0]['ses']
message_id = ses_notification['mail']['messageId']
try:
email = s3client.get_object(Bucket=BUCKET_NAME, Key=message_id)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == '404':
# Could not find email. Exit program.
print('The object does not exist. Key: ' + message_id)
sys.exit(1)
else:
raise
contents = email['Body'].read().decode('utf-8')
(last_digits, date, amount, payee) = parse(contents)
save_to_db(message_id, last_digits, date, amount, payee)
def parse(contents):
'''Parse the contents of the email for transaction data.'''
if 'Your purchase exceeds the amount you set' not in contents:
sys.exit(0)
last_digits = re.split(r'account{0}number{0}ending{0}with{0}'.format(WS),
contents, 1)[1][:NUM_DIGITS]
remainder = re.split(r'Merchant:{0}'.format(WS), contents, 1)[1]
remainder = re.split(r'{0}Amount:{0}\$'.format(WS), remainder, 1)
payee = remainder[0]
remainder = re.split(r'{0}Date:{0}'.format(WS), remainder[1], 1)
amount = remainder[0]
remainder = re.split(r'{0}Wasn\'t'.format(WS), remainder[1], 1)[0]
date = format_date(remainder)
return (last_digits, date, amount, payee)
def format_date(date):
'''Convert dates to ISO 8601 (RFC 3339 "full-date") format.'''
remainder = re.split('(?:\s| )+'.format(WS), date, 1)
month_name = remainder[0]
remainder = re.split(',{0}'.format(WS), remainder[1], 1)
day = remainder[0]
year = remainder[1]
month = datetime.date.month # Fallback
for i in range(12):
if month_name == calendar.month_name[i]:
month = i
break
return '{0}-{1}-{2}'.format(year, month, day)
def save_to_db(message_id, last_digits, date, amount, payee):
ddbclient.put_item(TableName=TABLE_NAME,
Item={'message_id': {'S': message_id},
'last_digits': {'S': last_digits},
'amount': {'S': amount},
'payee': {'S': payee},
'date': {'S': date}
},
ConditionExpression='attribute_not_exists(message_id)')
|
[
"sys.exit",
"os.getenv",
"boto3.client"
] |
[((106, 124), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (118, 124), False, 'import boto3\n'), ((139, 163), 'os.getenv', 'os.getenv', (['"""bucket_name"""'], {}), "('bucket_name')\n", (148, 163), False, 'import os\n'), ((211, 235), 'boto3.client', 'boto3.client', (['"""dynamodb"""'], {}), "('dynamodb')\n", (223, 235), False, 'import boto3\n'), ((249, 272), 'os.getenv', 'os.getenv', (['"""table_name"""'], {}), "('table_name')\n", (258, 272), False, 'import os\n'), ((1347, 1358), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1355, 1358), False, 'import sys\n'), ((971, 982), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (979, 982), False, 'import sys\n')]
|
import mock
from octoprint_discordremote import DiscordRemotePlugin
from unittests.discordremotetestcase import DiscordRemoteTestCase
def mock_global_get_boolean(array):
return {
str(['webcam', 'flipV']): False,
str(['webcam', 'flipH']): False,
str(['webcam', 'rotate90']): False,
}[str(array)]
class TestCommand(DiscordRemoteTestCase):
def test_plugin_get_snapshot_http(self):
plugin = DiscordRemotePlugin()
plugin._settings = mock.Mock()
plugin._settings.global_get = mock.Mock()
plugin._settings.global_get.return_value = "http://ValidSnapshot"
plugin._settings.global_get_boolean = mock_global_get_boolean
plugin._logger = mock.Mock()
with open("unittests/test_pattern.png", "rb") as f:
file_data = f.read()
with mock.patch("requests.get") as mock_requests_get:
mock_requests_get.return_value = mock.Mock()
mock_requests_get.return_value.content = file_data
snapshots = plugin.get_snapshot()
self.assertIsNotNone(snapshots)
self.assertEqual(1, len(snapshots))
snapshot = snapshots[0]
self.assertEqual(2, len(snapshot))
self.assertEqual("snapshot.png", snapshot[0])
snapshot_data = snapshot[1].read()
self.assertEqual(len(file_data), len(snapshot_data))
self.assertEqual([file_data], [snapshot_data])
def test_plugin_get_snapshot_file(self):
plugin = DiscordRemotePlugin()
plugin._settings = mock.Mock()
plugin._settings.global_get = mock.Mock()
plugin._settings.global_get.return_value = "file://unittests/test_pattern.png"
plugin._settings.global_get_boolean = mock_global_get_boolean
plugin._logger = mock.Mock()
with open("unittests/test_pattern.png", "rb") as f:
file_data = f.read()
snapshots = plugin.get_snapshot()
self.assertIsNotNone(snapshots)
self.assertEqual(1, len(snapshots))
snapshot = snapshots[0]
self.assertEqual(2, len(snapshot))
self.assertEqual("snapshot.png", snapshot[0])
snapshot_data = snapshot[1].read()
self.assertEqual(len(file_data), len(snapshot_data))
self.assertEqual([file_data], [snapshot_data])
def test_plugin_get_printer_name(self):
plugin = DiscordRemotePlugin()
plugin._settings = mock.Mock()
plugin._settings.global_get = mock.Mock()
plugin._settings.global_get.return_value = "DiscordBot"
self.assertEqual(plugin._settings.global_get.return_value, plugin.get_printer_name())
plugin._settings.global_get.return_value = None
self.assertEqual("OctoPrint", plugin.get_printer_name())
def test_get_print_time_spent(self):
plugin = DiscordRemotePlugin()
plugin._printer = mock.Mock()
plugin._printer.get_current_data = mock.Mock()
plugin._printer.get_current_data.return_value = {}
self.assertEqual('Unknown', plugin.get_print_time_spent())
plugin._printer.get_current_data.return_value = {'progress': {}}
self.assertEqual('Unknown', plugin.get_print_time_spent())
plugin._printer.get_current_data.return_value = {'progress': {'printTime': None}}
self.assertEqual('Unknown', plugin.get_print_time_remaining())
plugin._printer.get_current_data.return_value = {'progress': {'printTime': 1234}}
self.assertEqual('20 minutes and 34 seconds', plugin.get_print_time_spent())
def test_get_print_time_remaining(self):
plugin = DiscordRemotePlugin()
plugin._printer = mock.Mock()
plugin._printer.get_current_data = mock.Mock()
plugin._printer.get_current_data.return_value = {}
self.assertEqual('Unknown', plugin.get_print_time_remaining())
plugin._printer.get_current_data.return_value = {'progress': {}}
self.assertEqual('Unknown', plugin.get_print_time_remaining())
plugin._printer.get_current_data.return_value = {'progress': {'printTimeLeft': None}}
self.assertEqual('Unknown', plugin.get_print_time_remaining())
plugin._printer.get_current_data.return_value = {'progress': {'printTimeLeft': 1234}}
self.assertEqual('20 minutes and 34 seconds', plugin.get_print_time_remaining())
|
[
"mock.patch",
"mock.Mock",
"octoprint_discordremote.DiscordRemotePlugin"
] |
[((436, 457), 'octoprint_discordremote.DiscordRemotePlugin', 'DiscordRemotePlugin', ([], {}), '()\n', (455, 457), False, 'from octoprint_discordremote import DiscordRemotePlugin\n'), ((485, 496), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (494, 496), False, 'import mock\n'), ((535, 546), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (544, 546), False, 'import mock\n'), ((716, 727), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (725, 727), False, 'import mock\n'), ((1520, 1541), 'octoprint_discordremote.DiscordRemotePlugin', 'DiscordRemotePlugin', ([], {}), '()\n', (1539, 1541), False, 'from octoprint_discordremote import DiscordRemotePlugin\n'), ((1569, 1580), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1578, 1580), False, 'import mock\n'), ((1619, 1630), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1628, 1630), False, 'import mock\n'), ((1813, 1824), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1822, 1824), False, 'import mock\n'), ((2397, 2418), 'octoprint_discordremote.DiscordRemotePlugin', 'DiscordRemotePlugin', ([], {}), '()\n', (2416, 2418), False, 'from octoprint_discordremote import DiscordRemotePlugin\n'), ((2446, 2457), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2455, 2457), False, 'import mock\n'), ((2496, 2507), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2505, 2507), False, 'import mock\n'), ((2847, 2868), 'octoprint_discordremote.DiscordRemotePlugin', 'DiscordRemotePlugin', ([], {}), '()\n', (2866, 2868), False, 'from octoprint_discordremote import DiscordRemotePlugin\n'), ((2895, 2906), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2904, 2906), False, 'import mock\n'), ((2950, 2961), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2959, 2961), False, 'import mock\n'), ((3631, 3652), 'octoprint_discordremote.DiscordRemotePlugin', 'DiscordRemotePlugin', ([], {}), '()\n', (3650, 3652), False, 'from octoprint_discordremote import DiscordRemotePlugin\n'), ((3679, 3690), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (3688, 3690), False, 'import mock\n'), ((3734, 3745), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (3743, 3745), False, 'import mock\n'), ((836, 862), 'mock.patch', 'mock.patch', (['"""requests.get"""'], {}), "('requests.get')\n", (846, 862), False, 'import mock\n'), ((930, 941), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (939, 941), False, 'import mock\n')]
|
################
## adaline.py ##
################
# original implementation
# <NAME>, Python Machine Learning, 3rd Edition
#############
## imports ##
#############
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_is_fitted
from sklearn.utils.multiclass import unique_labels
from rktools.monitors import ProgressBar
###############################################################################
## AdalineGD ##
###############################################################################
class AdalineGD(BaseEstimator, ClassifierMixin):
"""
The ADAptive LInear NEuron classifier.
Parameters
----------
* lr: float
Learning rate (between 0.0 and 1.0)
* n_epochs: int
Passes over the training dataset.
* random_state: int
Random number generator seed for random weight initialization.
Attributes
-----------
* w_: 1d-array
Weights after fitting.
* cost_: list
Sum-of-squares cost function value in each epoch. Indeed, now
the convergence criteria is no more the error at each epoch; but
the value of the cost function J.
"""
#################
## __init__() ##
#################
# TODO Pass an potential logger as paramater
def __init__(self, lr=0.01, n_epochs=50, random_state=1):
self.lr = lr
self.n_epochs = n_epochs
self.random_state = random_state
#####################
## init_weights() ##
#####################
def init_weights(self, n_features):
"""
Initialize the weight coefficients
"""
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01, size=1 + n_features)
############
## fit() ##
############
def fit(self, X, y):
"""
Fit training data.
Parameters
----------
* X : {array-like}, shape = [n_examples, n_features]
Training vectors, where n_examples is the number of examples and
n_features is the number of features.
* y : array-like, shape = [n_examples]
Target values.
Returns
-------
* self : object
"""
# check matrices
self.X_, self.y_ = check_X_y(X, y)
# Store the classes seen during fit
self.classes_ = unique_labels(y)
# The algorithm
self.init_weights(self.X_.shape[1])
self.cost_ = []
progress_bar = ProgressBar(max_value = self.n_epochs, desc="AdalineGD Epoch:")
for i in range(self.n_epochs):
net_input = self.net_input(X)
output = self.activation(net_input) # here, no effect
errors = (y - output)
# At each epoch, the coefficients are updated using
# the whole training dataset X, instead of one sample x_i
self.w_[1:] += self.lr * X.T.dot(errors)
self.w_[0] += self.lr * errors.sum()
# cost = J(W) = 1/2 * SSE
# with SSE = sum of error^2
cost = (errors**2).sum() / 2.0
self.cost_.append(cost)
progress_bar.update(1)
# end for
progress_bar.close()
return self
##################
## net_input() ##
##################
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_[1:]) + self.w_[0]
##################
## activation() ##
##################
def activation(self, X):
"""Compute linear activation"""
# Please note that the "activation" method has no effect
# in the code since it is simply an identity function. We
# could write `output = self.net_input(X)` directly instead.
# The purpose of the activation is more conceptual, i.e.,
# in the case of logistic regression
# we could change it to a sigmoid function to implement a logistic regression classifier.
return X
###############
## predict() ##
###############
def predict(self, X):
"""Return class label after unit step"""
# Raise an error if not fitted
check_is_fitted(self)
return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1)
# End AdalineGD
################################################################################
## AdalineSGD ##
################################################################################
class AdalineSGD(BaseEstimator, ClassifierMixin):
"""
ADAptive LInear NEuron classifier with SGD.
Parameters
------------
* lr : float
Learning rate (between 0.0 and 1.0)
* n_epochs : int
Passes over the training dataset.
* shuffle : bool (default: True)
Shuffles training data every epoch if True to prevent cycles.
* random_state : int
Random number generator seed for random weight initialization.
Attributes
-----------
* w_ : 1d-array
Weights after fitting.
* cost_ : list
Sum-of-squares cost function value averaged over all training examples in each epoch.
"""
################
## __init__() ##
################
def __init__(self, lr=0.01, n_epochs=10, shuffle=True, random_state=1):
self.lr = lr
self.n_epochs = n_epochs
self.w_initialized = False
self.shuffle = shuffle
self.random_state = random_state
###########################
## init_weights() ##
###########################
def init_weights(self, n_features):
"""
Initialize weights to small random numbers
"""
self.rgen = np.random.RandomState(self.random_state)
self.w_ = self.rgen.normal(loc=0.0, scale=0.01, size=1 + n_features)
self.w_initialized = True
############
## fit() ##
############
def fit(self, X, y):
"""
Fit training data.
Parameters
----------
X : {array-like}, shape = [n_examples, n_features]
Training vectors, where n_examples is the number of examples and
n_features is the number of features.
y : array-like, shape = [n_examples]
Target values.
Returns
-------
self : object
"""
# check matrices
self.X_, self.y_ = check_X_y(X, y)
# Store the classes seen during fit
self.classes_ = unique_labels(y)
# The algorithm
self.init_weights(X.shape[1])
self.cost_ = []
progress_bar = ProgressBar(max_value = self.n_epochs, desc="AdalineSGD Epoch:")
for _ in range(self.n_epochs):
if self.shuffle:
X, y = self._shuffle(X, y)
cost = []
for xi, target in zip(X, y):
cost.append(self._update_weights(xi, target))
avg_cost = sum(cost) / len(y)
self.cost_.append(avg_cost)
progress_bar.update(1)
# end for
progress_bar.close()
return self
###################
## partial_fit() ##
###################
def partial_fit(self, X, y):
"""
Fit training data without reinitializing the weights
"""
if not self.w_initialized:
self.init_weights(X.shape[1])
if y.ravel().shape[0] > 1:
for xi, target in zip(X, y):
self._update_weights(xi, target)
else:
self._update_weights(X, y)
return self
################
## _shuffle() ##
################
def _shuffle(self, X, y):
"""Shuffle training data"""
r = self.rgen.permutation(len(y))
return X[r], y[r]
#######################
## _update_weights() ##
#######################
def _update_weights(self, xi, target):
"""Apply Adaline learning rule to update the weights"""
output = self.activation(self.net_input(xi))
error = (target - output)
self.w_[1:] += self.lr * xi.dot(error)
self.w_[0] += self.lr * error
cost = 0.5 * error**2
return cost
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_[1:]) + self.w_[0]
def activation(self, X):
"""Compute linear activation"""
return X
###############
## predict() ##
###############
def predict(self, X):
"""
Return class label after unit step
"""
check_is_fitted(self)
return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1)
# End of AdalineSGD
|
[
"rktools.monitors.ProgressBar",
"sklearn.utils.validation.check_X_y",
"numpy.random.RandomState",
"sklearn.utils.validation.check_is_fitted",
"sklearn.utils.multiclass.unique_labels",
"numpy.dot"
] |
[((1768, 1808), 'numpy.random.RandomState', 'np.random.RandomState', (['self.random_state'], {}), '(self.random_state)\n', (1789, 1808), True, 'import numpy as np\n'), ((2423, 2438), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (2432, 2438), False, 'from sklearn.utils.validation import check_X_y, check_is_fitted\n'), ((2508, 2524), 'sklearn.utils.multiclass.unique_labels', 'unique_labels', (['y'], {}), '(y)\n', (2521, 2524), False, 'from sklearn.utils.multiclass import unique_labels\n'), ((2644, 2705), 'rktools.monitors.ProgressBar', 'ProgressBar', ([], {'max_value': 'self.n_epochs', 'desc': '"""AdalineGD Epoch:"""'}), "(max_value=self.n_epochs, desc='AdalineGD Epoch:')\n", (2655, 2705), False, 'from rktools.monitors import ProgressBar\n'), ((4335, 4356), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (4350, 4356), False, 'from sklearn.utils.validation import check_X_y, check_is_fitted\n'), ((5894, 5934), 'numpy.random.RandomState', 'np.random.RandomState', (['self.random_state'], {}), '(self.random_state)\n', (5915, 5934), True, 'import numpy as np\n'), ((6583, 6598), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (6592, 6598), False, 'from sklearn.utils.validation import check_X_y, check_is_fitted\n'), ((6668, 6684), 'sklearn.utils.multiclass.unique_labels', 'unique_labels', (['y'], {}), '(y)\n', (6681, 6684), False, 'from sklearn.utils.multiclass import unique_labels\n'), ((6806, 6868), 'rktools.monitors.ProgressBar', 'ProgressBar', ([], {'max_value': 'self.n_epochs', 'desc': '"""AdalineSGD Epoch:"""'}), "(max_value=self.n_epochs, desc='AdalineSGD Epoch:')\n", (6817, 6868), False, 'from rktools.monitors import ProgressBar\n'), ((8777, 8798), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (8792, 8798), False, 'from sklearn.utils.validation import check_X_y, check_is_fitted\n'), ((3547, 3569), 'numpy.dot', 'np.dot', (['X', 'self.w_[1:]'], {}), '(X, self.w_[1:])\n', (3553, 3569), True, 'import numpy as np\n'), ((8481, 8503), 'numpy.dot', 'np.dot', (['X', 'self.w_[1:]'], {}), '(X, self.w_[1:])\n', (8487, 8503), True, 'import numpy as np\n')]
|
# Sum of numbers that can be written as the sum of their fifth power.
from __future__ import print_function
import timeit
try:
range = xrange
except NameError:
pass
start = timeit.default_timer()
def euler_30():
fifth_pow = lambda x: x**5
return sum([i for i in range(2, 1000000) if
sum(map(fifth_pow, map(int, str(i)))) == i])
print("Answer: {}".format(euler_30()))
stop = timeit.default_timer()
print("Time: {0:9.5f}".format(stop - start))
|
[
"timeit.default_timer"
] |
[((186, 208), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (206, 208), False, 'import timeit\n'), ((413, 435), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (433, 435), False, 'import timeit\n')]
|
"""
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
import json
import os.path
import sys
from io import StringIO
from tempfile import TemporaryDirectory
from unittest import TestCase
import requests
from git import Repo
from testcontainers.compose import DockerCompose
from testcontainers.core.waiting_utils import wait_container_is_ready
@wait_container_is_ready()
def wait_for(url):
response = requests.get(url)
response.raise_for_status()
class EndToEndTest(TestCase):
"""Base fixture for e2e tests. Redirects stdout to a buffer to help
assert the app's output.
"""
def setUp(self):
self._buffer = StringIO()
sys.stdout = self._buffer
@property
def output(self):
"""
:return: What the app wrote to stdout.
:rtype: str
"""
return self._buffer.getvalue()
class HTTPDTest(EndToEndTest):
httpd = None
@classmethod
def setUpClass(cls):
# Define the image
cls.httpd = DockerCompose(
filepath='tests/e2e/images/httpd',
pull=True
)
# Launch the container
cls.httpd.start()
# Wait for HTTPD to be ready
wait_for('http://localhost:8080/')
@classmethod
def tearDownClass(cls):
cls.httpd.stop()
class JenkinsTest(EndToEndTest):
"""Spawns a container with a simple installation for tests to work over.
:ivar jenkins: The container's driver.
"""
jenkins = None
@classmethod
def setUpClass(cls):
# Define the image
cls.jenkins = DockerCompose(
filepath='tests/e2e/images/jenkins',
pull=True
)
# Launch the container
cls.jenkins.start()
# Wait for Jenkins to be ready
wait_for('http://localhost:8080/api/json')
@classmethod
def tearDownClass(cls):
cls.jenkins.stop()
class ZuulTest(EndToEndTest):
"""Spawns a container with a simple installation for tests to work
over. The installation follows the guide described here:
`Zuul Quick-Start
<https://zuul-ci.org/docs/zuul/latest/tutorials/quick-start.html>`_.
:ivar dir: A directory where zuul's repository is cloned into.
:ivar zuul: The container's driver.
"""
dir = None
zuul = None
@classmethod
def setUpClass(cls):
# Download Zuul example's docker description
cls.dir = TemporaryDirectory()
Repo.clone_from('https://opendev.org/zuul/zuul', cls.dir.name)
# Define the image
cls.zuul = DockerCompose(
filepath=os.path.join(cls.dir.name, 'doc/source/examples'),
compose_file_name=['docker-compose.yaml'],
pull=True
)
# Launch the container
cls.zuul.start()
# Wait for Zuul to be ready
wait_for('http://localhost:9000/api')
@classmethod
def tearDownClass(cls):
cls.zuul.stop()
cls.dir.cleanup()
class ElasticSearchTest(EndToEndTest):
"""Spawns a container with a simple installation for tests to work over.
:ivar elasticsearch: The container's driver.
"""
elasticsearch = None
@classmethod
def setUpClass(cls):
# Define the image
cls.elasticsearch = DockerCompose(
filepath='tests/e2e/images/elasticsearch',
pull=True
)
# Launch the container
cls.elasticsearch.start()
# Wait for ElasticSearch to be ready
wait_for('http://localhost:9200')
# Prepare database
jenkins_mapping = 'tests/e2e/images/elasticsearch/jenkins.mapping.json'
with open(jenkins_mapping, 'r', encoding='utf-8') as mapping:
# Create the index
requests.put(
'http://localhost:9200/jenkins'
)
# It is a big mapping, increase the number of possible fields
requests.put(
'http://localhost:9200/jenkins/_settings',
json={
'index.mapping.total_fields.limit': 2000
}
)
# Load the mapping
requests.put(
'http://localhost:9200/jenkins/_mapping',
json=json.load(mapping)
)
@classmethod
def tearDownClass(cls):
cls.elasticsearch.stop()
|
[
"io.StringIO",
"json.load",
"tempfile.TemporaryDirectory",
"testcontainers.core.waiting_utils.wait_container_is_ready",
"git.Repo.clone_from",
"requests.get",
"requests.put",
"testcontainers.compose.DockerCompose"
] |
[((902, 927), 'testcontainers.core.waiting_utils.wait_container_is_ready', 'wait_container_is_ready', ([], {}), '()\n', (925, 927), False, 'from testcontainers.core.waiting_utils import wait_container_is_ready\n'), ((962, 979), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (974, 979), False, 'import requests\n'), ((1198, 1208), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1206, 1208), False, 'from io import StringIO\n'), ((1551, 1610), 'testcontainers.compose.DockerCompose', 'DockerCompose', ([], {'filepath': '"""tests/e2e/images/httpd"""', 'pull': '(True)'}), "(filepath='tests/e2e/images/httpd', pull=True)\n", (1564, 1610), False, 'from testcontainers.compose import DockerCompose\n'), ((2131, 2192), 'testcontainers.compose.DockerCompose', 'DockerCompose', ([], {'filepath': '"""tests/e2e/images/jenkins"""', 'pull': '(True)'}), "(filepath='tests/e2e/images/jenkins', pull=True)\n", (2144, 2192), False, 'from testcontainers.compose import DockerCompose\n'), ((2972, 2992), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (2990, 2992), False, 'from tempfile import TemporaryDirectory\n'), ((3002, 3064), 'git.Repo.clone_from', 'Repo.clone_from', (['"""https://opendev.org/zuul/zuul"""', 'cls.dir.name'], {}), "('https://opendev.org/zuul/zuul', cls.dir.name)\n", (3017, 3064), False, 'from git import Repo\n'), ((3822, 3889), 'testcontainers.compose.DockerCompose', 'DockerCompose', ([], {'filepath': '"""tests/e2e/images/elasticsearch"""', 'pull': '(True)'}), "(filepath='tests/e2e/images/elasticsearch', pull=True)\n", (3835, 3889), False, 'from testcontainers.compose import DockerCompose\n'), ((4300, 4345), 'requests.put', 'requests.put', (['"""http://localhost:9200/jenkins"""'], {}), "('http://localhost:9200/jenkins')\n", (4312, 4345), False, 'import requests\n'), ((4463, 4572), 'requests.put', 'requests.put', (['"""http://localhost:9200/jenkins/_settings"""'], {'json': "{'index.mapping.total_fields.limit': 2000}"}), "('http://localhost:9200/jenkins/_settings', json={\n 'index.mapping.total_fields.limit': 2000})\n", (4475, 4572), False, 'import requests\n'), ((4789, 4807), 'json.load', 'json.load', (['mapping'], {}), '(mapping)\n', (4798, 4807), False, 'import json\n')]
|
import numpy as np
from odeintw import odeintw
from epidemioptim.environments.models.base_model import BaseModel
from epidemioptim.utils import *
PATH_TO_DATA = get_repo_path() + '/data/jane_model_data/ScenarioPlanFranceOne16.xlsx'
PATH_TO_HOME_MATRIX = get_repo_path() + '/data/jane_model_data/contactHome.txt'
PATH_TO_SCHOOL_MATRIX = get_repo_path() + '/data/jane_model_data/contactSchool.txt'
PATH_TO_WORK_MATRIX = get_repo_path() + '/data/jane_model_data/contactWork.txt'
PATH_TO_OTHER_MATRIX = get_repo_path() + '/data/jane_model_data/contactOtherPlaces.txt'
PATH_TO_COMORBIDITY_MATRIX = get_repo_path() + '/data/jane_model_data/coMorbidity.txt'
# ODE model
def vaccination_model(y: tuple,
t: float,
A: tuple,
alpha: tuple,
beta: tuple,
c: tuple,
delta: tuple,
epsilon: float,
gamma: tuple,
kappa: tuple,
nu: float,
omega: tuple,
p1: tuple,
p2: tuple,
p3: tuple,
rho: float,
sigma: float,
sigma2: float
):
"""
Parameters
----------
y: tuple
y = [S1, S2, S3, S4, E21, E22, E23, E31, E32, E33, E41, E42, E43, V11, V12, V13, V14, V21, V22, V23, V24, I2, I3, I4]
Si: # susceptible individuals with i level of infectivity
E2i: # individuals in mild latent state
E3i: # individuals in moderate latent state
E4i: # individuals in severe latent state
Ii: # symptomatic infected individuals with i level of infectivity
V1i: # vaccinated people with one dose, i being the immunity level
V2i: # vaccinated people with two doses, i being the immunity level
t: int
Timestep.
p1: tuple
Probability to go to mild class for an age group.
p2: tuple
Probability to go to moderate class for an age group.
p3: tuple
Probability to go to severe class for an age group.
alpha: tuple
Susceptibilty of individuals from Sin (i immunity status, n age group).
kappa: tuple
Rates of progress through the pre-infectious period of infection.
gamma: tuple
Recovery rate of infected individuals from Ijm (j immunity status, m age group).
rho: float
Vaccination efficacy for the first dose.
omega: tuple
Waning rate of immunity of individuals from Sin (i immunity status, n age group).
delta: tuple
Disease-induced mortality rate of infected individuals from Ijm (j immunity status, m age group).
A: tuple
Per capita activity counts of individuals in age group n
c: tuple
Mixing matrix between individuals in age group a and age groupe n, modified given mitigation, strategy, PPE,
social distancing, hand washing compliance (k-value)
sigma: float
Vaccination rate.
Returns
-------
tuple
Next states.
"""
origin = y.T
S1, S2, S3, S4 = origin[0], origin[1], origin[2], origin[3]
E21, E22, E23 = origin[4], origin[5], origin[6]
E31, E32, E33 = origin[7], origin[8], origin[9]
E41, E42, E43 = origin[10], origin[11], origin[12]
V11, V21, V31, V41 = origin[13], origin[14], origin[15], origin[16]
V12, V22, V32, V42 = origin[17], origin[18], origin[19], origin[20]
I2, I3, I4 = origin[21], origin[22], origin[23]
# Infect calculation
T = S1 + S2 + S3 + S4 + E21 + E22 + E23 + E31 + E32 + E33 + E41 + E42 + E43 + V11 + V21 + V31 + V41 + V12 + V22 + V32 + V42 + I2 + I3 + I4
# VOC and infectivity calculation
Xm = sum(np.multiply((beta), np.array((I2,I3,I4)).T).T)
Ym = np.divide(Xm, T)
infect = np.dot(np.array(c).T, Ym)
# Protection from severe disease (new qq)
qq = 0.3
pv2 = [(1-0.5*qq)*p2[1], (1-qq)*p2[2]+1/2*qq*p2[1], qq*p2[2]]
pv3 = [0*p2[1], (1-qq)*p3[2]+1/2*qq*p3[1], qq*p3[2]]
pv2 = np.array(pv2)
pv3 = np.array(pv3)
# Susceptible compartments
dS1dt = - sum(p1)*alpha[0]*A[0]*S1*infect + omega[1]*S2 - sigma*rho*S1 + omega[1]*V11
dS2dt = - sum(p2)*alpha[1]*A[1]*S2*infect + omega[2]*S3 - omega[1]*S2 - sigma*rho*S2 + gamma[1]*I2 + omega[2]*V21
dS3dt = - (p3[1]+p3[2])*alpha[2]*A[2]*S3*infect + omega[3]*S4 - omega[2]*S3 - sigma*rho*S3 + gamma[2]*I3 + omega[3]*(V31+V41+V12+V22+V32+V42)
dS4dt = - omega[3]*S4 - sigma*rho*S4 + gamma[3]*I4
# Exposed compartments
# To I2
dE21dt = p1[0]*alpha[0]*A[0]*S1*infect + p2[0]*alpha[1]*A[1]*S2*infect + pv2[0]*epsilon*alpha[1]*A[1]*V11*infect - kappa[1]*E21
dE22dt = kappa[1]*E21 - kappa[2]*E22
dE23dt = kappa[2]*E22 - kappa[3]*E23
# To I3
dE31dt = p1[1]*alpha[0]*A[0]*S1*infect + p2[1]*alpha[1]*A[1]*S2*infect + p3[1]*alpha[2]*A[2]*S3*infect + pv2[1]*epsilon*alpha[1]*A[1]*V11*infect + pv3[1]*epsilon*alpha[2]*A[2]*V21*infect - kappa[1]*E31
dE32dt = kappa[1]*E31 - kappa[2]*E32
dE33dt = kappa[2]*E32 - kappa[3]*E33
# To I4
dE41dt = p1[2]*alpha[0]*A[0]*S1*infect + p2[2]*alpha[1]*A[1]*S2*infect + p3[2]*alpha[2]*A[2]*S3*infect + pv2[2]*epsilon*alpha[1]*A[1]*V11*infect + pv3[2]*epsilon*alpha[2]*A[2]*V21*infect - kappa[1]*E41
dE42dt = kappa[1]*E41 - kappa[2]*E42
dE43dt = kappa[2]*E42 - kappa[3]*E43
# Vaccinated compartments
dV11dt = sigma*rho*S1 - sigma2*rho*V11 - sum(pv2)*epsilon*alpha[1]*A[1]*V11*infect - omega[1]*V11
dV21dt = sigma*rho*S2 - sigma2*rho*V21 - sum(pv3)*epsilon*alpha[2]*A[2]*V21*infect - omega[2]*V21
dV31dt = sigma*rho*S3 - sigma2*rho*V31 - omega[3]*V31
dV41dt = sigma*rho*S4 - sigma2*rho*V41 - omega[3]*V41
dV12dt = sigma2*rho*V11 - omega[3]*V12
dV22dt = sigma2*rho*V21 - omega[3]*V22
dV32dt = sigma2*rho*V31 - omega[3]*V32
dV42dt = sigma2*rho*V41 - omega[3]*V42
# From S to V
dCV11dt = sigma*rho*S1
dCV21dt = sigma*rho*S2
dCV31dt = sigma*rho*S3
dCV41dt = sigma*rho*S4
# From V1 to V2
dCV12dt = sigma2*rho*V11
dCV22dt = sigma2*rho*V21
dCV32dt = sigma2*rho*V31
dCV42dt = sigma2*rho*V41
# Infected compartments
dI2dt = kappa[3]*E23 - delta[1]*I2 - gamma[1]*I2
dI3dt = kappa[3]*E33 - delta[2]*I3 - gamma[2]*I3
dI4dt = kappa[3]*E43 - delta[3]*I4 - gamma[3]*I4
dydt = np.array((dS1dt, dS2dt, dS3dt, dS4dt, dE21dt, dE22dt, dE23dt, dE31dt, dE32dt, dE33dt, dE41dt, dE42dt, dE43dt, dV11dt, dV21dt, dV31dt, dV41dt, dV12dt, dV22dt, dV32dt, dV42dt, dI2dt, dI3dt, dI4dt, dCV11dt, dCV21dt, dCV31dt, dCV41dt, dCV12dt, dCV22dt, dCV32dt, dCV42dt))
return dydt.T
class HeffernanOdeModel16(BaseModel):
def __init__(self,
stochastic=False,
range_delay=None
):
# Groups and raw data
self._age_groups = ['0-4', '5-9', '10-14', '15-19', '20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54', '55-59', '60-64', '65-69', '70-74', '75+']
self._pop_size = pd.read_excel(PATH_TO_DATA, sheet_name='population', skiprows=3, usecols=(2,2))['Unnamed: 2']
self.pop_size = dict(zip(self._age_groups, (self._pop_size)))
self.step_list = [0,71,73,76,91,121,152,153,173,182,185,201,213,239,244,274,290,295,303,305,335,349,353,366,369,370,377,381,384,391,397,398,
402,404,405,409,412,418,419,425,426,431,433,440,447,454,456,459,461,465,468,472,475,481,482,486,488,489,494,496,497,501,503,
510,517,524,531,546,552,578,609,639,661,670,677,717,731,762,768,775,782,789,790,796,821]
# Matrices
self.p1 = get_text_file_data(PATH_TO_COMORBIDITY_MATRIX)
self.p2 = get_text_file_data(PATH_TO_COMORBIDITY_MATRIX)
self.p3 = [[0] + sub[1:] for sub in self.p1]
self.work = get_text_file_data(PATH_TO_WORK_MATRIX)
self.other = get_text_file_data(PATH_TO_OTHER_MATRIX)
self.home = get_text_file_data(PATH_TO_HOME_MATRIX)
self.school = get_text_file_data(PATH_TO_SCHOOL_MATRIX)
self.perturbations_matrices = get_perturbations_matrices(PATH_TO_DATA)
self.contact_modifiers = get_contact_modifiers(PATH_TO_DATA)
self.transition_matrices = get_transition_matrices(self.pop_size, self.home, self.school, self.work, self.other)
# Vaccination data
self.whovaccinated = get_target_population()
self._vaccination_coverage = get_coverage(PATH_TO_DATA)
self.vaccination_coverage = self._compute_delta_coverage()
self.active_vaccination = vaccination_active(PATH_TO_DATA)
self.mitigation_windows = mitigation_time(self.step_list)
self.number_doses = [1679218,3008288,6026744,12000000,12000000,12000000,12000000,12000000,12000000,0,0]
self.coverage_threshold = [0, 0, 0, 0, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.8, 0.8, 0.8, 0.8, 0.8]
self.dCV1 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.dCV2 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.vacStep = 0
self.newstep = 0
self.nbrConf = 0
# Tracking variables
self.step = 0
self.t = 0
self.k = 1
self.stochastic = stochastic
self._all_internal_params_distribs = dict()
self._all_initial_state_distribs = dict()
# Initialize distributions of parameters and initial conditions for all regions
self.define_params_and_initial_state_distributions()
# Sample initial conditions and initial model parameters
internal_params_labels = ['A', 'alpha', 'beta', 'c', 'delta', 'epsilon', 'gamma', 'kappa', 'nu', 'omega', 'p1', 'p2', 'p3', 'rho', 'sigma', 'sigma2']
# Define ODE model
self.internal_model = vaccination_model
super().__init__(internal_states_labels=['S1', 'S2', 'S3', 'S4', 'E21', 'E22', 'E23', 'E31', 'E32', 'E33', 'E41', 'E42', 'E43',
'V11', 'V21', 'V31', 'V41', 'V12', 'V22', 'V32', 'V42', 'I2', 'I3', 'I4', 'CV11', 'CV21', 'CV31', 'CV41', 'CV12', 'CV22', 'CV32', 'CV42'],
internal_params_labels=internal_params_labels,
stochastic=stochastic,
range_delay=range_delay)
def define_params_and_initial_state_distributions(self):
"""
Extract and define distributions of parameters for all age groups
"""
for i in self._age_groups:
self._all_internal_params_distribs[i] = dict(A=np.array(calculate_A_and_c(0, 1, self.contact_modifiers, self.perturbations_matrices, self.transition_matrices)[0]),
alpha=np.array(duplicate_data([1, 2/3, 1/3, 0], 16)).T,
beta=np.array(duplicate_data([0.04, 0.08, 0.008], 16)),
c=calculate_A_and_c(0, 1, self.contact_modifiers, self.perturbations_matrices, self.transition_matrices)[1],
delta=np.array([[0.0, 0.0, 0.0, 0.00001],
[0.0, 0.0, 0.0, 0.00001],
[0.0, 0.0, 0.0, 0.00001],
[0.0, 0.0, 0.0, 0.00001],
[0.0, 0.0, 0.0, 0.00005],
[0.0, 0.0, 0.0, 0.00005],
[0.0, 0.0, 0.0, 0.0002],
[0.0, 0.0, 0.0, 0.0002],
[0.0, 0.0, 0.0, 0.0005],
[0.0, 0.0, 0.0, 0.0005],
[0.0, 0.0, 0.0, 0.002],
[0.0, 0.0, 0.0, 0.002],
[0.0, 0.0, 0.0, 0.007],
[0.0, 0.0, 0.0, 0.007],
[0.0, 0.0, 0.0, 0.019],
[0.0, 0.0, 0.0, 0.083]]).T,
epsilon=1-0.559,
gamma=np.array(duplicate_data([0, 0.2, 0.1, 1/15], 16)).T,
kappa=np.array(duplicate_data([0, 1/1.5, 1/1.5, 1/1.5], 16)).T,
nu=0,
omega=np.array(duplicate_data([0, 1/365, 1/365, 1/365], 16)).T,
p1=np.array(self.p1).T,
p2=np.array(self.p2).T,
p3=np.array(self.p3).T,
rho=0.894,
sigma=np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
sigma2=np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
)
self._all_initial_state_distribs[i] = dict(S20=DiracDist(params=0, stochastic=self.stochastic),
S30=DiracDist(params=0, stochastic=self.stochastic),
S40=DiracDist(params=0, stochastic=self.stochastic),
E210=DiracDist(params=0, stochastic=self.stochastic),
E220=DiracDist(params=0, stochastic=self.stochastic),
E230=DiracDist(params=0, stochastic=self.stochastic),
E310=DiracDist(params=0, stochastic=self.stochastic),
E320=DiracDist(params=0, stochastic=self.stochastic),
E330=DiracDist(params=0, stochastic=self.stochastic),
E410=DiracDist(params=0, stochastic=self.stochastic),
E420=DiracDist(params=0, stochastic=self.stochastic),
E430=DiracDist(params=0, stochastic=self.stochastic),
V110=DiracDist(params=0, stochastic=self.stochastic),
V210=DiracDist(params=0, stochastic=self.stochastic),
V310=DiracDist(params=0, stochastic=self.stochastic),
V410=DiracDist(params=0, stochastic=self.stochastic),
V120=DiracDist(params=0, stochastic=self.stochastic),
V220=DiracDist(params=0, stochastic=self.stochastic),
V320=DiracDist(params=0, stochastic=self.stochastic),
V420=DiracDist(params=0, stochastic=self.stochastic),
I20=DiracDist(params=0, stochastic=self.stochastic),
I30=DiracDist(params=0, stochastic=self.stochastic),
I40=DiracDist(params=0, stochastic=self.stochastic),
CV110=DiracDist(params=0, stochastic=self.stochastic),
CV210=DiracDist(params=0, stochastic=self.stochastic),
CV310=DiracDist(params=0, stochastic=self.stochastic),
CV410=DiracDist(params=0, stochastic=self.stochastic),
CV120=DiracDist(params=0, stochastic=self.stochastic),
CV220=DiracDist(params=0, stochastic=self.stochastic),
CV320=DiracDist(params=0, stochastic=self.stochastic),
CV420=DiracDist(params=0, stochastic=self.stochastic))
def reset(self, delay=None) -> None:
"""
Resets the model parameters, and state, add delay.
Parameters
----------
delay: int, optional
Number of days the model should be run for before the start of the episode.
Default is 0.
"""
self._sample_model_params()
self._sample_initial_state()
self._reset_state()
self.dCV1 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.dCV2 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.vacStep = 0
self.step = 0
self.t = 0
self.k = 1
if self.stochastic:
if delay is not None:
self.delay(random=False, delay=delay)
else:
self.delay()
def _reset_state(self):
"""
Resets model state to initial state.
"""
self.current_state = dict()
for i in self._age_groups:
self.current_state[i] = dict(zip(self.internal_states_labels, np.array([self.initial_state[i]['{}0'.format(s)] for s in self.internal_states_labels])))
def _get_model_params(self) -> tuple:
"""
Get current parameters of the model
Returns
-------
tuple
tuple of the model parameters in the order of the list of labels
"""
return tuple([self.current_internal_params[k] for k in self.internal_params_labels])
def _get_current_state(self):
"""
Get current state in the order of state labels.
"""
state = []
for i in self._age_groups:
state.append([self.current_state[i]['{}'.format(s)] for s in self.internal_states_labels])
return state
def convert_states(self, state):
for i in state.keys():
state[i].tolist()
true_state = dict()
for i in self._age_groups:
true_state[i] = dict()
grp=0
for i in true_state.keys():
for j in state.keys():
true_state[i][j] = state[j][grp]
grp+=1
return true_state
def _sample_initial_state(self):
"""
Samples an initial model state from its distribution (Dirac distributions if self.stochastic is False).
"""
self.initial_state = dict()
for i in self._age_groups:
self.initial_state[i] = dict()
for k in self._all_initial_state_distribs[i].keys():
self.initial_state[i][k] = self._all_initial_state_distribs[i][k].sample()
if i in ['20-24', '25-29', '30-34', '35-39', '40-44', '45-49']:
self.initial_state[i]['I20'] = 10/6
self.initial_state[i]['I30'] = 1/6
# S10 is computed from other states, as the sum of all states equals the population size N
self.initial_state[i]['S10'] = self.pop_size[i] - np.sum([self.initial_state[i]['{}0'.format(s)] for s in self.internal_states_labels[1:]])
def _sample_model_params(self):
"""
Samples parameters of the model from their distribution (Dirac distributions if self.stochastic is False).
"""
self.initial_internal_params = dict()
for k in self._all_internal_params_distribs['0-4'].keys():
self.initial_internal_params[k] = self._all_internal_params_distribs['0-4'][k]
self._reset_model_params()
def _set_current_state(self, current_state):
"""
Set current state to given values.
Parameters
----------
current_state: 1D nd.array
State the current state should be set to.
"""
self.current_state = dict(zip(self.internal_states_labels, current_state.T))
def _compute_delta_coverage(self):
"""
Compute the goal coverage for each month regarding the Excel sheet
"""
maxcoverage = [x*100 for x in self._vaccination_coverage]
_deltaCoverage = list(range(len(maxcoverage)))
_deltaCoverage[0] = maxcoverage[0]
for i in range(1, len(maxcoverage)):
if maxcoverage[i] != maxcoverage[i-1]:
_deltaCoverage[i] = maxcoverage[i] - maxcoverage[i-1]
else:
_deltaCoverage[i] = maxcoverage[i-1]
for i in range(len(_deltaCoverage)):
if _deltaCoverage[i] == 0:
_deltaCoverage[i] = 10e-6
for i in range(1,14):
_deltaCoverage[-i] = _deltaCoverage[-14]
return _deltaCoverage
def compute_sigma(self):
"""
Computes sigma, the vaccination rate, regarding for each group if they are eligible to vaccination during a time period
"""
mwl = self.mitigation_windows[self.step]
lowVP = 1
pi = lowVP*(self.vaccination_coverage[self.vacStep]/100)
classes = ['S1', 'S2', 'S3', 'S4']
popGrp = ['S1', 'S2', 'S3', 'S4', 'E21', 'E22', 'E23', 'E31', 'E32', 'E33', 'E41', 'E42',
'E43', 'V11', 'V21', 'V31', 'V41', 'V12', 'V22', 'V32', 'V42', 'I2', 'I3', 'I4']
sigma = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
wcv, Ntot = 0, 0
for n in range(16):
Ntot += sum([self.current_state[self._age_groups[n]]['{}'.format(s)] for s in popGrp])
if self.whovaccinated[self.vacStep][n] == 1:
wcv += sum([self.current_state[self._age_groups[n]]['{}'.format(y)] for y in classes])
g = (pi*Ntot/wcv)
if g>1:
g=0.999999999
for k in range(16):
if self.whovaccinated[self.vacStep+1][k] == 1:
sigma[k] = 1/mwl*(-math.log(1-g))
if sigma[k] < 0:
sigma[k] = 0
for f in range(16):
size = sum([self.current_state[self._age_groups[f]]['{}'.format(s)] for s in popGrp])
if self.dCV1[f]/size >= self.coverage_threshold[f]/0.8944:
sigma[f] = 0
if self.t > 670:
return [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
return sigma
def politic_decision_module(self):
"""
Change contact matrices and k values regarding the number of cases (I4 + half of I3)
4 scenarios:
- no lockdown, holiday
- no holiday, lockdown
- holiday, lockdown
- no holiday, no lockdown
"""
# A CHAQUE DATE
total_I4 = 0
for f in range(16):
total_I4 += (sum([self.current_state[self._age_groups[f]]['I3']])*0.5 + sum([self.current_state[self._age_groups[f]]['I4']]))
if total_I4 > 37000:
if self.t > 531:
if self.t < 609:
self.newstep = 19
self.k = 0.3
self.newstep = 4
self.nbrConf += 1
#print(self.t, self.nbrConf) # print the number of lockdowns
elif total_I4 < 37000:
if self.t > 531:
if self.t < 609:
self.k = 0.6
self.newstep = 27
elif self.t > 609:
self.k = 0.55
self.newstep = 22
else:
self.k = 0.6
self.newstep = 22
def run_n_steps(self, current_state=None, n=1, labelled_states=False):
"""
Runs the model for n steps
Parameters
----------
current_state: 1D nd.array
Current model state.
n: int
Number of steps the model should be run for.
labelled_states: bool
Whether the result should be a dict with state labels or a nd array.
Returns
-------
dict or 2D nd.array
Returns a dict if labelled_states is True, where keys are state labels.
Returns an array of size (n, n_states) of the last n model states.
"""
if current_state is None:
current_state = np.array(self._get_current_state())
for f in range(16):
# Update the number of people vaccinated with 1 or 2 doses
self.dCV1[f] = sum([self.current_state[self._age_groups[f]]['{}'.format(s)] for s in ['CV11', 'CV21', 'CV31', 'CV41']])
self.dCV2[f] = sum([self.current_state[self._age_groups[f]]['{}'.format(s)] for s in ['CV12', 'CV22', 'CV32', 'CV42']])
if(self.t == self.step_list[self.step]):
self.k = k_value(self.t)
A_c = calculate_A_and_c(self.step, self.k, self.contact_modifiers, self.perturbations_matrices, self.transition_matrices)
self.current_internal_params['A'], self.current_internal_params['c'] = np.array(A_c[0]), A_c[1]
self.current_internal_params['nu'] = nu_value(self.t)
if self.t > 369:
# To uncomment if we want to run the initial model
# sigma = self.compute_sigma()
# self.current_internal_params['sigma'] = np.array(sigma)
self.current_internal_params['sigma2'] = np.array(duplicate_data(1/42, 16))
self.k = k_value(self.t)
##### TO COMMENT ####
# if we want to run the initial model
self.politic_decision_module()
A_c = calculate_A_and_c(self.newstep, self.k, self.contact_modifiers, self.perturbations_matrices, self.transition_matrices)
self.current_internal_params['A'], self.current_internal_params['c'] = np.array(A_c[0]), A_c[1]
##### END #####
self.vacStep += 1
self.step += 1
# Use the odeint library to run the ODE model
z = odeintw(self.internal_model, current_state, np.linspace(0, n, n + 1), args=(self._get_model_params()))
self._set_current_state(current_state=z[-1].copy()) # save new current state
self.t += 1
self.current_state = self.convert_states(self.current_state)
#print(self.nbrConf)
# format results
if labelled_states:
return self._convert_to_labelled_states(np.atleast_2d(z[1:]))
else:
return np.atleast_2d(z[1:])
if __name__ == '__main__':
# Get model
model = HeffernanOdeModel16(stochastic=False)
labels = ['0-4', '5-9', '10-14', '15-19', '20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54', '55-59', '60-64', '65-69', '70-74', '75+']
# Run simulation
simulation_horizon = 821
model_states = []
for i in range(simulation_horizon):
model_state = model.run_n_steps()
model_states += model_state.tolist()
# Plot
time = np.arange(simulation_horizon)
plot_preds(t=time,states=np.array(model_states).transpose()[23], title="Évolution du nombre de cas incident sévères (I$_4$) de COVID-19 avec vaccination")
# Plot hospitalizations (I4) and cases (I4 + half of I3)
i4tot = []
castot = []
for i in model_states:
tot = 0
tat = 0
for j in i:
tat += j[23]
tot += j[22]*0.5 + j[23]
i4tot.append(tot)
castot.append(tat)
plt.plot(time, np.array(i4tot), label='I$_4$ + 0.5*I$_3$')
plt.plot(time, np.array(castot), label='I$_4$', color='red')
# plt.plot(np.linspace(142, 579, (579-142)), (np.array(get_incidence())), label='Données SIDEP')
plt.axvline(x=370, label='Beginning of vaccination campaign', color='red', linewidth=1, linestyle='--')
plt.axvline(x=631, label='Fin de la première dose', linewidth=1, linestyle='--')
# plt.xlabel("Temps (en jours)")
# plt.ylabel(r'Nombre de personnes hospitalisées')
# plt.legend()
# plt.title("Évolution du nombre de cas incident modérés et sévères (I$_3$ + I$_4$) de COVID-19 avec vaccination")
plt.show()
|
[
"numpy.divide",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"numpy.atleast_2d"
] |
[((3881, 3897), 'numpy.divide', 'np.divide', (['Xm', 'T'], {}), '(Xm, T)\n', (3890, 3897), True, 'import numpy as np\n'), ((4130, 4143), 'numpy.array', 'np.array', (['pv2'], {}), '(pv2)\n', (4138, 4143), True, 'import numpy as np\n'), ((4154, 4167), 'numpy.array', 'np.array', (['pv3'], {}), '(pv3)\n', (4162, 4167), True, 'import numpy as np\n'), ((6454, 6733), 'numpy.array', 'np.array', (['(dS1dt, dS2dt, dS3dt, dS4dt, dE21dt, dE22dt, dE23dt, dE31dt, dE32dt, dE33dt,\n dE41dt, dE42dt, dE43dt, dV11dt, dV21dt, dV31dt, dV41dt, dV12dt, dV22dt,\n dV32dt, dV42dt, dI2dt, dI3dt, dI4dt, dCV11dt, dCV21dt, dCV31dt, dCV41dt,\n dCV12dt, dCV22dt, dCV32dt, dCV42dt)'], {}), '((dS1dt, dS2dt, dS3dt, dS4dt, dE21dt, dE22dt, dE23dt, dE31dt,\n dE32dt, dE33dt, dE41dt, dE42dt, dE43dt, dV11dt, dV21dt, dV31dt, dV41dt,\n dV12dt, dV22dt, dV32dt, dV42dt, dI2dt, dI3dt, dI4dt, dCV11dt, dCV21dt,\n dCV31dt, dCV41dt, dCV12dt, dCV22dt, dCV32dt, dCV42dt))\n', (6462, 6733), True, 'import numpy as np\n'), ((28047, 28076), 'numpy.arange', 'np.arange', (['simulation_horizon'], {}), '(simulation_horizon)\n', (28056, 28076), True, 'import numpy as np\n'), ((28546, 28561), 'numpy.array', 'np.array', (['i4tot'], {}), '(i4tot)\n', (28554, 28561), True, 'import numpy as np\n'), ((28609, 28625), 'numpy.array', 'np.array', (['castot'], {}), '(castot)\n', (28617, 28625), True, 'import numpy as np\n'), ((3918, 3929), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (3926, 3929), True, 'import numpy as np\n'), ((27128, 27152), 'numpy.linspace', 'np.linspace', (['(0)', 'n', '(n + 1)'], {}), '(0, n, n + 1)\n', (27139, 27152), True, 'import numpy as np\n'), ((27551, 27571), 'numpy.atleast_2d', 'np.atleast_2d', (['z[1:]'], {}), '(z[1:])\n', (27564, 27571), True, 'import numpy as np\n'), ((26091, 26107), 'numpy.array', 'np.array', (['A_c[0]'], {}), '(A_c[0])\n', (26099, 26107), True, 'import numpy as np\n'), ((27496, 27516), 'numpy.atleast_2d', 'np.atleast_2d', (['z[1:]'], {}), '(z[1:])\n', (27509, 27516), True, 'import numpy as np\n'), ((3845, 3867), 'numpy.array', 'np.array', (['(I2, I3, I4)'], {}), '((I2, I3, I4))\n', (3853, 3867), True, 'import numpy as np\n'), ((13621, 13716), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n ]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0])\n', (13629, 13716), True, 'import numpy as np\n'), ((13778, 13873), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n ]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0])\n', (13786, 13873), True, 'import numpy as np\n'), ((26899, 26915), 'numpy.array', 'np.array', (['A_c[0]'], {}), '(A_c[0])\n', (26907, 26915), True, 'import numpy as np\n'), ((11258, 11679), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 1e-05], [0.0, 0.0, 0.0, 1e-05], [0.0, 0.0, 0.0, 1e-05], [\n 0.0, 0.0, 0.0, 1e-05], [0.0, 0.0, 0.0, 5e-05], [0.0, 0.0, 0.0, 5e-05],\n [0.0, 0.0, 0.0, 0.0002], [0.0, 0.0, 0.0, 0.0002], [0.0, 0.0, 0.0, \n 0.0005], [0.0, 0.0, 0.0, 0.0005], [0.0, 0.0, 0.0, 0.002], [0.0, 0.0, \n 0.0, 0.002], [0.0, 0.0, 0.0, 0.007], [0.0, 0.0, 0.0, 0.007], [0.0, 0.0,\n 0.0, 0.019], [0.0, 0.0, 0.0, 0.083]]'], {}), '([[0.0, 0.0, 0.0, 1e-05], [0.0, 0.0, 0.0, 1e-05], [0.0, 0.0, 0.0, \n 1e-05], [0.0, 0.0, 0.0, 1e-05], [0.0, 0.0, 0.0, 5e-05], [0.0, 0.0, 0.0,\n 5e-05], [0.0, 0.0, 0.0, 0.0002], [0.0, 0.0, 0.0, 0.0002], [0.0, 0.0, \n 0.0, 0.0005], [0.0, 0.0, 0.0, 0.0005], [0.0, 0.0, 0.0, 0.002], [0.0, \n 0.0, 0.0, 0.002], [0.0, 0.0, 0.0, 0.007], [0.0, 0.0, 0.0, 0.007], [0.0,\n 0.0, 0.0, 0.019], [0.0, 0.0, 0.0, 0.083]])\n', (11266, 11679), True, 'import numpy as np\n'), ((13307, 13324), 'numpy.array', 'np.array', (['self.p1'], {}), '(self.p1)\n', (13315, 13324), True, 'import numpy as np\n'), ((13388, 13405), 'numpy.array', 'np.array', (['self.p2'], {}), '(self.p2)\n', (13396, 13405), True, 'import numpy as np\n'), ((13469, 13486), 'numpy.array', 'np.array', (['self.p3'], {}), '(self.p3)\n', (13477, 13486), True, 'import numpy as np\n'), ((28106, 28128), 'numpy.array', 'np.array', (['model_states'], {}), '(model_states)\n', (28114, 28128), True, 'import numpy as np\n')]
|
from allauth.account.views import LoginView, LogoutView
from django.conf import settings
from django.conf.urls import include
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.urls import path
from django.views.generic import TemplateView
from bar.views import MenuView
from camps.views import CampDetailView, CampListView, CampRedirectView
from feedback.views import FeedbackCreate
from info.views import CampInfoView
from people.views import PeopleView
from sponsors.views import SponsorsView
from villages.views import (
VillageDeleteView,
VillageDetailView,
VillageListView,
VillageUpdateView,
)
# require 2fa token entry (if enabled on admin account) when logging into /admin by using allauth login form
admin.site.login = login_required(admin.site.login)
urlpatterns = [
path("o/", include("oauth2_provider.urls", namespace="oauth2_provider")),
path("profile/", include("allauth.urls")),
path("profile/", include("allauth_2fa.urls")),
path("profile/", include("profiles.urls", namespace="profiles")),
path("tickets/", include("tickets.urls", namespace="tickets")),
path("shop/", include("shop.urls", namespace="shop")),
path("news/", include("news.urls", namespace="news")),
path(
"contact/", TemplateView.as_view(template_name="contact.html"), name="contact"
),
path("conduct/", TemplateView.as_view(template_name="coc.html"), name="conduct"),
path("login/", LoginView.as_view(), name="account_login"),
path("logout/", LogoutView.as_view(), name="account_logout"),
path(
"privacy-policy/",
TemplateView.as_view(template_name="legal/privacy_policy.html"),
name="privacy-policy",
),
path(
"general-terms-and-conditions/",
TemplateView.as_view(template_name="legal/general_terms_and_conditions.html"),
name="general-terms",
),
path("admin/", admin.site.urls),
# We don't need CSRF checks for the API
# path("api/", csrf_exempt(GraphQLView.as_view(graphiql=True))),
path("camps/", CampListView.as_view(), name="camp_list"),
path("token/", include("tokens.urls", namespace="tokens")),
path("maps/", include("maps.urls", namespace="maps")),
# camp redirect views here
path(
"",
CampRedirectView.as_view(),
kwargs={"page": "camp_detail"},
name="camp_detail_redirect",
),
path(
"program/",
CampRedirectView.as_view(),
kwargs={"page": "program:schedule_index"},
name="schedule_index_redirect",
),
path(
"info/",
CampRedirectView.as_view(),
kwargs={"page": "info"},
name="info_redirect",
),
path(
"sponsors/",
CampRedirectView.as_view(),
kwargs={"page": "sponsors"},
name="sponsors_redirect",
),
path(
"villages/",
CampRedirectView.as_view(),
kwargs={"page": "village_list"},
name="village_list_redirect",
),
path(
"wishlist/",
CampRedirectView.as_view(),
kwargs={"page": "wishlist:list"},
name="wish_list_redirect",
),
path(
"backoffice/",
CampRedirectView.as_view(),
kwargs={"page": "backoffice:index"},
name="backoffice_redirect",
),
path(
"phonebook/",
CampRedirectView.as_view(),
kwargs={"page": "phonebook:list"},
name="phone_book_redirect",
),
path("people/", PeopleView.as_view(), name="people"),
# camp specific urls below here
path(
"<slug:camp_slug>/",
include(
[
path("", CampDetailView.as_view(), name="camp_detail"),
path("info/", CampInfoView.as_view(), name="info"),
path("program/", include("program.urls", namespace="program")),
path("sponsors/", SponsorsView.as_view(), name="sponsors"),
path("bar/menu/", MenuView.as_view(), name="menu"),
path(
"villages/",
include(
[
path("", VillageListView.as_view(), name="village_list"),
path(
"<slug:slug>/delete/",
VillageDeleteView.as_view(),
name="village_delete",
),
path(
"<slug:slug>/edit/",
VillageUpdateView.as_view(),
name="village_update",
),
# this has to be the last url in the list
path(
"<slug:slug>/",
VillageDetailView.as_view(),
name="village_detail",
),
]
),
),
path("teams/", include("teams.urls", namespace="teams")),
path("rideshare/", include("rideshare.urls", namespace="rideshare")),
path("backoffice/", include("backoffice.urls", namespace="backoffice")),
path("feedback/", FeedbackCreate.as_view(), name="feedback"),
path("economy/", include("economy.urls", namespace="economy")),
path("wishlist/", include("wishlist.urls", namespace="wishlist")),
path("facilities/", include("facilities.urls", namespace="facilities")),
path("phonebook/", include("phonebook.urls", namespace="phonebook")),
]
),
),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
|
[
"django.contrib.auth.decorators.login_required",
"people.views.PeopleView.as_view",
"villages.views.VillageDetailView.as_view",
"django.conf.urls.include",
"django.urls.path",
"sponsors.views.SponsorsView.as_view",
"allauth.account.views.LoginView.as_view",
"camps.views.CampDetailView.as_view",
"camps.views.CampListView.as_view",
"bar.views.MenuView.as_view",
"villages.views.VillageUpdateView.as_view",
"info.views.CampInfoView.as_view",
"villages.views.VillageListView.as_view",
"django.views.generic.TemplateView.as_view",
"camps.views.CampRedirectView.as_view",
"allauth.account.views.LogoutView.as_view",
"feedback.views.FeedbackCreate.as_view",
"villages.views.VillageDeleteView.as_view"
] |
[((799, 831), 'django.contrib.auth.decorators.login_required', 'login_required', (['admin.site.login'], {}), '(admin.site.login)\n', (813, 831), False, 'from django.contrib.auth.decorators import login_required\n'), ((1927, 1958), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (1931, 1958), False, 'from django.urls import path\n'), ((864, 924), 'django.conf.urls.include', 'include', (['"""oauth2_provider.urls"""'], {'namespace': '"""oauth2_provider"""'}), "('oauth2_provider.urls', namespace='oauth2_provider')\n", (871, 924), False, 'from django.conf.urls import include\n'), ((948, 971), 'django.conf.urls.include', 'include', (['"""allauth.urls"""'], {}), "('allauth.urls')\n", (955, 971), False, 'from django.conf.urls import include\n'), ((995, 1022), 'django.conf.urls.include', 'include', (['"""allauth_2fa.urls"""'], {}), "('allauth_2fa.urls')\n", (1002, 1022), False, 'from django.conf.urls import include\n'), ((1046, 1092), 'django.conf.urls.include', 'include', (['"""profiles.urls"""'], {'namespace': '"""profiles"""'}), "('profiles.urls', namespace='profiles')\n", (1053, 1092), False, 'from django.conf.urls import include\n'), ((1116, 1160), 'django.conf.urls.include', 'include', (['"""tickets.urls"""'], {'namespace': '"""tickets"""'}), "('tickets.urls', namespace='tickets')\n", (1123, 1160), False, 'from django.conf.urls import include\n'), ((1181, 1219), 'django.conf.urls.include', 'include', (['"""shop.urls"""'], {'namespace': '"""shop"""'}), "('shop.urls', namespace='shop')\n", (1188, 1219), False, 'from django.conf.urls import include\n'), ((1240, 1278), 'django.conf.urls.include', 'include', (['"""news.urls"""'], {'namespace': '"""news"""'}), "('news.urls', namespace='news')\n", (1247, 1278), False, 'from django.conf.urls import include\n'), ((1311, 1361), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""contact.html"""'}), "(template_name='contact.html')\n", (1331, 1361), False, 'from django.views.generic import TemplateView\n'), ((1406, 1452), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""coc.html"""'}), "(template_name='coc.html')\n", (1426, 1452), False, 'from django.views.generic import TemplateView\n'), ((1490, 1509), 'allauth.account.views.LoginView.as_view', 'LoginView.as_view', ([], {}), '()\n', (1507, 1509), False, 'from allauth.account.views import LoginView, LogoutView\n'), ((1554, 1574), 'allauth.account.views.LogoutView.as_view', 'LogoutView.as_view', ([], {}), '()\n', (1572, 1574), False, 'from allauth.account.views import LoginView, LogoutView\n'), ((1645, 1708), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""legal/privacy_policy.html"""'}), "(template_name='legal/privacy_policy.html')\n", (1665, 1708), False, 'from django.views.generic import TemplateView\n'), ((1807, 1884), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""legal/general_terms_and_conditions.html"""'}), "(template_name='legal/general_terms_and_conditions.html')\n", (1827, 1884), False, 'from django.views.generic import TemplateView\n'), ((2092, 2114), 'camps.views.CampListView.as_view', 'CampListView.as_view', ([], {}), '()\n', (2112, 2114), False, 'from camps.views import CampDetailView, CampListView, CampRedirectView\n'), ((2154, 2196), 'django.conf.urls.include', 'include', (['"""tokens.urls"""'], {'namespace': '"""tokens"""'}), "('tokens.urls', namespace='tokens')\n", (2161, 2196), False, 'from django.conf.urls import include\n'), ((2217, 2255), 'django.conf.urls.include', 'include', (['"""maps.urls"""'], {'namespace': '"""maps"""'}), "('maps.urls', namespace='maps')\n", (2224, 2255), False, 'from django.conf.urls import include\n'), ((2319, 2345), 'camps.views.CampRedirectView.as_view', 'CampRedirectView.as_view', ([], {}), '()\n', (2343, 2345), False, 'from camps.views import CampDetailView, CampListView, CampRedirectView\n'), ((2469, 2495), 'camps.views.CampRedirectView.as_view', 'CampRedirectView.as_view', ([], {}), '()\n', (2493, 2495), False, 'from camps.views import CampDetailView, CampListView, CampRedirectView\n'), ((2630, 2656), 'camps.views.CampRedirectView.as_view', 'CampRedirectView.as_view', ([], {}), '()\n', (2654, 2656), False, 'from camps.views import CampDetailView, CampListView, CampRedirectView\n'), ((2767, 2793), 'camps.views.CampRedirectView.as_view', 'CampRedirectView.as_view', ([], {}), '()\n', (2791, 2793), False, 'from camps.views import CampDetailView, CampListView, CampRedirectView\n'), ((2912, 2938), 'camps.views.CampRedirectView.as_view', 'CampRedirectView.as_view', ([], {}), '()\n', (2936, 2938), False, 'from camps.views import CampDetailView, CampListView, CampRedirectView\n'), ((3065, 3091), 'camps.views.CampRedirectView.as_view', 'CampRedirectView.as_view', ([], {}), '()\n', (3089, 3091), False, 'from camps.views import CampDetailView, CampListView, CampRedirectView\n'), ((3218, 3244), 'camps.views.CampRedirectView.as_view', 'CampRedirectView.as_view', ([], {}), '()\n', (3242, 3244), False, 'from camps.views import CampDetailView, CampListView, CampRedirectView\n'), ((3374, 3400), 'camps.views.CampRedirectView.as_view', 'CampRedirectView.as_view', ([], {}), '()\n', (3398, 3400), False, 'from camps.views import CampDetailView, CampListView, CampRedirectView\n'), ((3508, 3528), 'people.views.PeopleView.as_view', 'PeopleView.as_view', ([], {}), '()\n', (3526, 3528), False, 'from people.views import PeopleView\n'), ((5832, 5859), 'django.conf.urls.include', 'include', (['debug_toolbar.urls'], {}), '(debug_toolbar.urls)\n', (5839, 5859), False, 'from django.conf.urls import include\n'), ((3677, 3701), 'camps.views.CampDetailView.as_view', 'CampDetailView.as_view', ([], {}), '()\n', (3699, 3701), False, 'from camps.views import CampDetailView, CampListView, CampRedirectView\n'), ((3754, 3776), 'info.views.CampInfoView.as_view', 'CampInfoView.as_view', ([], {}), '()\n', (3774, 3776), False, 'from info.views import CampInfoView\n'), ((3825, 3869), 'django.conf.urls.include', 'include', (['"""program.urls"""'], {'namespace': '"""program"""'}), "('program.urls', namespace='program')\n", (3832, 3869), False, 'from django.conf.urls import include\n'), ((3906, 3928), 'sponsors.views.SponsorsView.as_view', 'SponsorsView.as_view', ([], {}), '()\n', (3926, 3928), False, 'from sponsors.views import SponsorsView\n'), ((3982, 4000), 'bar.views.MenuView.as_view', 'MenuView.as_view', ([], {}), '()\n', (3998, 4000), False, 'from bar.views import MenuView\n'), ((5080, 5120), 'django.conf.urls.include', 'include', (['"""teams.urls"""'], {'namespace': '"""teams"""'}), "('teams.urls', namespace='teams')\n", (5087, 5120), False, 'from django.conf.urls import include\n'), ((5158, 5206), 'django.conf.urls.include', 'include', (['"""rideshare.urls"""'], {'namespace': '"""rideshare"""'}), "('rideshare.urls', namespace='rideshare')\n", (5165, 5206), False, 'from django.conf.urls import include\n'), ((5245, 5295), 'django.conf.urls.include', 'include', (['"""backoffice.urls"""'], {'namespace': '"""backoffice"""'}), "('backoffice.urls', namespace='backoffice')\n", (5252, 5295), False, 'from django.conf.urls import include\n'), ((5332, 5356), 'feedback.views.FeedbackCreate.as_view', 'FeedbackCreate.as_view', ([], {}), '()\n', (5354, 5356), False, 'from feedback.views import FeedbackCreate\n'), ((5409, 5453), 'django.conf.urls.include', 'include', (['"""economy.urls"""'], {'namespace': '"""economy"""'}), "('economy.urls', namespace='economy')\n", (5416, 5453), False, 'from django.conf.urls import include\n'), ((5490, 5536), 'django.conf.urls.include', 'include', (['"""wishlist.urls"""'], {'namespace': '"""wishlist"""'}), "('wishlist.urls', namespace='wishlist')\n", (5497, 5536), False, 'from django.conf.urls import include\n'), ((5575, 5625), 'django.conf.urls.include', 'include', (['"""facilities.urls"""'], {'namespace': '"""facilities"""'}), "('facilities.urls', namespace='facilities')\n", (5582, 5625), False, 'from django.conf.urls import include\n'), ((5663, 5711), 'django.conf.urls.include', 'include', (['"""phonebook.urls"""'], {'namespace': '"""phonebook"""'}), "('phonebook.urls', namespace='phonebook')\n", (5670, 5711), False, 'from django.conf.urls import include\n'), ((4163, 4188), 'villages.views.VillageListView.as_view', 'VillageListView.as_view', ([], {}), '()\n', (4186, 4188), False, 'from villages.views import VillageDeleteView, VillageDetailView, VillageListView, VillageUpdateView\n'), ((4333, 4360), 'villages.views.VillageDeleteView.as_view', 'VillageDeleteView.as_view', ([], {}), '()\n', (4358, 4360), False, 'from villages.views import VillageDeleteView, VillageDetailView, VillageListView, VillageUpdateView\n'), ((4567, 4594), 'villages.views.VillageUpdateView.as_view', 'VillageUpdateView.as_view', ([], {}), '()\n', (4592, 4594), False, 'from villages.views import VillageDeleteView, VillageDetailView, VillageListView, VillageUpdateView\n'), ((4866, 4893), 'villages.views.VillageDetailView.as_view', 'VillageDetailView.as_view', ([], {}), '()\n', (4891, 4893), False, 'from villages.views import VillageDeleteView, VillageDetailView, VillageListView, VillageUpdateView\n')]
|
# from matplotlib import rc
import matplotlib.cm as mplcm
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sns
from particle.statistics import (
calculate_avg_vel,
calculate_l1_convergence,
moving_average,
)
from particle.processing import (
get_master_yaml,
get_parameter_range,
match_parameters,
load_traj_data,
)
# Standard plotting choices
# rc("text", usetex=True)
sns.set(style="white", context="talk")
search_parameters = {
# "scaling": "Local",
# "D": 0.25,
# "phi": "Gamma",
# "dt": 0.005,
# "G": "Smooth",
# "option": "numba",
# "initial_dist_x": "one_cluster",
# "T_end": 200.0,
# "initial_dist_v": "pos_normal_dn",
# "particle_count": 600,
} # {"particle_count": 600}
# os.chdir("D:/InteractingParticleSystems/noisysystem_temp")
os.chdir("E:/")
# os.chdir("/Volumes/Extreme SSD/InteractingParticleSystems/noisysystem_temp")
# Path to YAML file relative to current directory
yaml_path = "./TimestepExperiments/LowGammaLoweringTimestepLowParticles"
# "../Experiments/one_cluster_low_gamma_ten_runs"
history = get_master_yaml(yaml_path)
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(10, 3), sharex=True)
cm = plt.get_cmap("coolwarm")
cNorm = colors.DivergingNorm(vmin=0.01, vcenter=0.05, vmax=0.25)
scalarMap = mplcm.ScalarMappable(norm=cNorm, cmap=cm)
gammas = get_parameter_range("gamma", history)
# np.array([0.25]) # np.arange(0.05, 0.15, 0.05)
# np.concatenate(([0.01], np.arange(0.05, 0.3, 0.05)))
# np.arange(0.01, 0.3, 0.05)
# np.concatenate(
# ([0.01], np.arange(0.05, 0.2, 0.05))
# )
for gamma in gammas:
search_parameters["gamma"] = gamma
file_names = match_parameters(search_parameters, history)
for idx, file_name in enumerate(file_names):
print(file_name)
t, x, v = load_traj_data(file_name, data_path="Experiments/Data.nosync/")
error = calculate_l1_convergence(t, x, v)
avg_vel = calculate_avg_vel(t, x, v)
if idx == 0:
avg_vel_store = np.zeros((len(file_names), len(avg_vel)))
error_store = np.zeros((len(file_names), len(error)))
ax1.plot(
t,
error,
color=scalarMap.to_rgba(history[file_name]["gamma"]),
label=f"{history[file_name]['gamma']}",
alpha=0.1,
zorder=1,
)
ax2.plot(
t,
avg_vel,
color=scalarMap.to_rgba(history[file_name]["gamma"]),
label=f"{history[file_name]['gamma']}",
alpha=0.1,
zorder=1,
)
error_store[idx, :] = error
avg_vel_store[idx, :] = avg_vel
# ax1.plot(
# t,
# np.mean(error_store, axis=0),
# color=scalarMap.to_rgba(history[file_name]["gamma"]),
# label=f"{history[file_name]['gamma']}",
# alpha=0.8,
# zorder=2,
# )
#
# ax2.plot(
# t,
# np.mean(avg_vel_store, axis=0),
# color=scalarMap.to_rgba(history[file_name]["gamma"]),
# label=f"{history[file_name]['gamma']}",
# alpha=0.8,
# zorder=2,
# )
expected_errors = {
"480": 7.52,
"600": 6.69,
"700": 6.26,
"1000": 5.25,
}
exp_error = expected_errors[str(search_parameters["particle_count"])]
ax1.plot([0, t[-1]], [exp_error, exp_error], "k--", alpha=0.2)
ax1.plot(
t[19:], moving_average(np.mean(error_store, axis=0), n=20), "r",
)
ax2.plot([0, t[-1]], [1, 1], "k--", alpha=0.2)
ax2.plot(
t[19:], moving_average(np.mean(avg_vel_store, axis=0), n=20), "r",
)
print(
f"Final difference in distance is {moving_average(np.mean(error_store, axis=0), n=20)[-1] - exp_error}"
)
print(
f"Final difference in velocity is {1- moving_average(np.mean(avg_vel_store, axis=0), n=20)[-1]}"
)
ax1.set(xlabel="Time", ylabel=r"$\ell^1$ Error")
ax2.set(xlabel="Time", ylabel=r"$\bar{M}^N(t)$")
# cbar = fig.colorbar(scalarMap, ticks=np.arange(0, max(gammas), 0.05))
# cbar.set_label(r"Interaction $\gamma$", rotation=270)
# cbar.ax.get_yaxis().labelpad = 15
plt.subplots_adjust(left=0.07, right=0.97, bottom=0.15, top=0.9, wspace=0.23)
plt.tight_layout()
plt.show()
# fig.savefig(f"OneClusterVaryGamma_longrun_log.jpg", dpi=300)
|
[
"matplotlib.pyplot.tight_layout",
"particle.processing.get_parameter_range",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"matplotlib.cm.ScalarMappable",
"particle.processing.load_traj_data",
"particle.statistics.calculate_l1_convergence",
"matplotlib.pyplot.subplots",
"matplotlib.colors.DivergingNorm",
"particle.processing.match_parameters",
"particle.statistics.calculate_avg_vel",
"numpy.mean",
"particle.processing.get_master_yaml",
"matplotlib.pyplot.subplots_adjust",
"seaborn.set",
"os.chdir"
] |
[((465, 503), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""', 'context': '"""talk"""'}), "(style='white', context='talk')\n", (472, 503), True, 'import seaborn as sns\n'), ((878, 893), 'os.chdir', 'os.chdir', (['"""E:/"""'], {}), "('E:/')\n", (886, 893), False, 'import os\n'), ((1157, 1183), 'particle.processing.get_master_yaml', 'get_master_yaml', (['yaml_path'], {}), '(yaml_path)\n', (1172, 1183), False, 'from particle.processing import get_master_yaml, get_parameter_range, match_parameters, load_traj_data\n'), ((1203, 1251), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(10, 3)', 'sharex': '(True)'}), '(1, 2, figsize=(10, 3), sharex=True)\n', (1215, 1251), True, 'import matplotlib.pyplot as plt\n'), ((1257, 1281), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""coolwarm"""'], {}), "('coolwarm')\n", (1269, 1281), True, 'import matplotlib.pyplot as plt\n'), ((1290, 1346), 'matplotlib.colors.DivergingNorm', 'colors.DivergingNorm', ([], {'vmin': '(0.01)', 'vcenter': '(0.05)', 'vmax': '(0.25)'}), '(vmin=0.01, vcenter=0.05, vmax=0.25)\n', (1310, 1346), True, 'import matplotlib.colors as colors\n'), ((1359, 1400), 'matplotlib.cm.ScalarMappable', 'mplcm.ScalarMappable', ([], {'norm': 'cNorm', 'cmap': 'cm'}), '(norm=cNorm, cmap=cm)\n', (1379, 1400), True, 'import matplotlib.cm as mplcm\n'), ((1410, 1447), 'particle.processing.get_parameter_range', 'get_parameter_range', (['"""gamma"""', 'history'], {}), "('gamma', history)\n", (1429, 1447), False, 'from particle.processing import get_master_yaml, get_parameter_range, match_parameters, load_traj_data\n'), ((4107, 4184), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.07)', 'right': '(0.97)', 'bottom': '(0.15)', 'top': '(0.9)', 'wspace': '(0.23)'}), '(left=0.07, right=0.97, bottom=0.15, top=0.9, wspace=0.23)\n', (4126, 4184), True, 'import matplotlib.pyplot as plt\n'), ((4185, 4203), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4201, 4203), True, 'import matplotlib.pyplot as plt\n'), ((4204, 4214), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4212, 4214), True, 'import matplotlib.pyplot as plt\n'), ((1728, 1772), 'particle.processing.match_parameters', 'match_parameters', (['search_parameters', 'history'], {}), '(search_parameters, history)\n', (1744, 1772), False, 'from particle.processing import get_master_yaml, get_parameter_range, match_parameters, load_traj_data\n'), ((1865, 1928), 'particle.processing.load_traj_data', 'load_traj_data', (['file_name'], {'data_path': '"""Experiments/Data.nosync/"""'}), "(file_name, data_path='Experiments/Data.nosync/')\n", (1879, 1928), False, 'from particle.processing import get_master_yaml, get_parameter_range, match_parameters, load_traj_data\n'), ((1945, 1978), 'particle.statistics.calculate_l1_convergence', 'calculate_l1_convergence', (['t', 'x', 'v'], {}), '(t, x, v)\n', (1969, 1978), False, 'from particle.statistics import calculate_avg_vel, calculate_l1_convergence, moving_average\n'), ((1997, 2023), 'particle.statistics.calculate_avg_vel', 'calculate_avg_vel', (['t', 'x', 'v'], {}), '(t, x, v)\n', (2014, 2023), False, 'from particle.statistics import calculate_avg_vel, calculate_l1_convergence, moving_average\n'), ((3443, 3471), 'numpy.mean', 'np.mean', (['error_store'], {'axis': '(0)'}), '(error_store, axis=0)\n', (3450, 3471), True, 'import numpy as np\n'), ((3571, 3601), 'numpy.mean', 'np.mean', (['avg_vel_store'], {'axis': '(0)'}), '(avg_vel_store, axis=0)\n', (3578, 3601), True, 'import numpy as np\n'), ((3678, 3706), 'numpy.mean', 'np.mean', (['error_store'], {'axis': '(0)'}), '(error_store, axis=0)\n', (3685, 3706), True, 'import numpy as np\n'), ((3798, 3828), 'numpy.mean', 'np.mean', (['avg_vel_store'], {'axis': '(0)'}), '(avg_vel_store, axis=0)\n', (3805, 3828), True, 'import numpy as np\n')]
|
import argparse
import functools
import os
from datetime import datetime
import paddle
from paddle.static import InputSpec
from utils.se_resnet_vd import SE_ResNet_vd
from utils.utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('input_shape', str, '(None, 1, 257, 257)', '数据输入的形状')
add_arg('save_model', str, 'models/', '模型保存的路径')
add_arg('resume', str, 'models/epoch_49', '导出模型文件夹路径')
args = parser.parse_args()
print_arguments(args)
model = SE_ResNet_vd()
input_shape = eval(args.input_shape)
paddle.summary(model, input_size=input_shape)
model.set_state_dict(paddle.load(os.path.join(args.resume, 'model.pdparams')))
print('[%s] 成功加载模型参数和优化方法参数' % datetime.now())
# 保存预测模型
if not os.path.exists(os.path.join(args.save_model, 'infer')):
os.makedirs(os.path.join(args.save_model, 'infer'))
paddle.jit.save(layer=model,
path=os.path.join(args.save_model, 'infer/model'),
input_spec=[InputSpec(shape=[input_shape[0], input_shape[1], input_shape[2], input_shape[3]], dtype=paddle.float32)])
print('[%s] 模型导出成功:%s' % (datetime.now(), os.path.join(args.save_model, 'infer/model')))
|
[
"functools.partial",
"argparse.ArgumentParser",
"utils.utility.print_arguments",
"datetime.datetime.now",
"paddle.summary",
"utils.se_resnet_vd.SE_ResNet_vd",
"os.path.join",
"paddle.static.InputSpec"
] |
[((236, 280), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (259, 280), False, 'import argparse\n'), ((291, 341), 'functools.partial', 'functools.partial', (['add_arguments'], {'argparser': 'parser'}), '(add_arguments, argparser=parser)\n', (308, 341), False, 'import functools\n'), ((590, 611), 'utils.utility.print_arguments', 'print_arguments', (['args'], {}), '(args)\n', (605, 611), False, 'from utils.utility import add_arguments, print_arguments\n'), ((621, 635), 'utils.se_resnet_vd.SE_ResNet_vd', 'SE_ResNet_vd', ([], {}), '()\n', (633, 635), False, 'from utils.se_resnet_vd import SE_ResNet_vd\n'), ((673, 718), 'paddle.summary', 'paddle.summary', (['model'], {'input_size': 'input_shape'}), '(model, input_size=input_shape)\n', (687, 718), False, 'import paddle\n'), ((753, 796), 'os.path.join', 'os.path.join', (['args.resume', '"""model.pdparams"""'], {}), "(args.resume, 'model.pdparams')\n", (765, 796), False, 'import os\n'), ((845, 859), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (857, 859), False, 'from datetime import datetime\n'), ((879, 917), 'os.path.join', 'os.path.join', (['args.save_model', '"""infer"""'], {}), "(args.save_model, 'infer')\n", (891, 917), False, 'import os\n'), ((936, 974), 'os.path.join', 'os.path.join', (['args.save_model', '"""infer"""'], {}), "(args.save_model, 'infer')\n", (948, 974), False, 'import os\n'), ((1026, 1070), 'os.path.join', 'os.path.join', (['args.save_model', '"""infer/model"""'], {}), "(args.save_model, 'infer/model')\n", (1038, 1070), False, 'import os\n'), ((1100, 1207), 'paddle.static.InputSpec', 'InputSpec', ([], {'shape': '[input_shape[0], input_shape[1], input_shape[2], input_shape[3]]', 'dtype': 'paddle.float32'}), '(shape=[input_shape[0], input_shape[1], input_shape[2],\n input_shape[3]], dtype=paddle.float32)\n', (1109, 1207), False, 'from paddle.static import InputSpec\n'), ((1246, 1260), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1258, 1260), False, 'from datetime import datetime\n'), ((1262, 1306), 'os.path.join', 'os.path.join', (['args.save_model', '"""infer/model"""'], {}), "(args.save_model, 'infer/model')\n", (1274, 1306), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v4.resources.types import merchant_center_link
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v4.services",
marshal="google.ads.googleads.v4",
manifest={
"ListMerchantCenterLinksRequest",
"ListMerchantCenterLinksResponse",
"GetMerchantCenterLinkRequest",
"MutateMerchantCenterLinkRequest",
"MerchantCenterLinkOperation",
"MutateMerchantCenterLinkResponse",
"MutateMerchantCenterLinkResult",
},
)
class ListMerchantCenterLinksRequest(proto.Message):
r"""Request message for
[MerchantCenterLinkService.ListMerchantCenterLinks][google.ads.googleads.v4.services.MerchantCenterLinkService.ListMerchantCenterLinks].
Attributes:
customer_id (str):
Required. The ID of the customer onto which
to apply the Merchant Center link list
operation.
"""
customer_id = proto.Field(proto.STRING, number=1)
class ListMerchantCenterLinksResponse(proto.Message):
r"""Response message for
[MerchantCenterLinkService.ListMerchantCenterLinks][google.ads.googleads.v4.services.MerchantCenterLinkService.ListMerchantCenterLinks].
Attributes:
merchant_center_links (Sequence[google.ads.googleads.v4.resources.types.MerchantCenterLink]):
Merchant Center links available for the
requested customer
"""
merchant_center_links = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=merchant_center_link.MerchantCenterLink,
)
class GetMerchantCenterLinkRequest(proto.Message):
r"""Request message for
[MerchantCenterLinkService.GetMerchantCenterLink][google.ads.googleads.v4.services.MerchantCenterLinkService.GetMerchantCenterLink].
Attributes:
resource_name (str):
Required. Resource name of the Merchant
Center link.
"""
resource_name = proto.Field(proto.STRING, number=1)
class MutateMerchantCenterLinkRequest(proto.Message):
r"""Request message for
[MerchantCenterLinkService.MutateMerchantCenterLink][google.ads.googleads.v4.services.MerchantCenterLinkService.MutateMerchantCenterLink].
Attributes:
customer_id (str):
Required. The ID of the customer being
modified.
operation (google.ads.googleads.v4.services.types.MerchantCenterLinkOperation):
Required. The operation to perform on the
link
"""
customer_id = proto.Field(proto.STRING, number=1)
operation = proto.Field(
proto.MESSAGE, number=2, message="MerchantCenterLinkOperation",
)
class MerchantCenterLinkOperation(proto.Message):
r"""A single update on a Merchant Center link.
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
update (google.ads.googleads.v4.resources.types.MerchantCenterLink):
Update operation: The merchant center link is
expected to have a valid resource name.
remove (str):
Remove operation: A resource name for the removed merchant
center link is expected, in this format:
``customers/{customer_id}/merchantCenterLinks/{merchant_center_id}``
"""
update_mask = proto.Field(
proto.MESSAGE, number=3, message=field_mask.FieldMask,
)
update = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=merchant_center_link.MerchantCenterLink,
)
remove = proto.Field(proto.STRING, number=2, oneof="operation")
class MutateMerchantCenterLinkResponse(proto.Message):
r"""Response message for Merchant Center link mutate.
Attributes:
result (google.ads.googleads.v4.services.types.MutateMerchantCenterLinkResult):
Result for the mutate.
"""
result = proto.Field(
proto.MESSAGE, number=2, message="MutateMerchantCenterLinkResult",
)
class MutateMerchantCenterLinkResult(proto.Message):
r"""The result for the Merchant Center link mutate.
Attributes:
resource_name (str):
Returned for successful operations.
"""
resource_name = proto.Field(proto.STRING, number=1)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"proto.RepeatedField",
"proto.module",
"proto.Field"
] |
[((796, 1152), 'proto.module', 'proto.module', ([], {'package': '"""google.ads.googleads.v4.services"""', 'marshal': '"""google.ads.googleads.v4"""', 'manifest': "{'ListMerchantCenterLinksRequest', 'ListMerchantCenterLinksResponse',\n 'GetMerchantCenterLinkRequest', 'MutateMerchantCenterLinkRequest',\n 'MerchantCenterLinkOperation', 'MutateMerchantCenterLinkResponse',\n 'MutateMerchantCenterLinkResult'}"}), "(package='google.ads.googleads.v4.services', marshal=\n 'google.ads.googleads.v4', manifest={'ListMerchantCenterLinksRequest',\n 'ListMerchantCenterLinksResponse', 'GetMerchantCenterLinkRequest',\n 'MutateMerchantCenterLinkRequest', 'MerchantCenterLinkOperation',\n 'MutateMerchantCenterLinkResponse', 'MutateMerchantCenterLinkResult'})\n", (808, 1152), False, 'import proto\n'), ((1639, 1674), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (1650, 1674), False, 'import proto\n'), ((2140, 2238), 'proto.RepeatedField', 'proto.RepeatedField', (['proto.MESSAGE'], {'number': '(1)', 'message': 'merchant_center_link.MerchantCenterLink'}), '(proto.MESSAGE, number=1, message=merchant_center_link.\n MerchantCenterLink)\n', (2159, 2238), False, 'import proto\n'), ((2635, 2670), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (2646, 2670), False, 'import proto\n'), ((3201, 3236), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (3212, 3236), False, 'import proto\n'), ((3253, 3328), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(2)', 'message': '"""MerchantCenterLinkOperation"""'}), "(proto.MESSAGE, number=2, message='MerchantCenterLinkOperation')\n", (3264, 3328), False, 'import proto\n'), ((4069, 4135), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(3)', 'message': 'field_mask.FieldMask'}), '(proto.MESSAGE, number=3, message=field_mask.FieldMask)\n', (4080, 4135), False, 'import proto\n'), ((4164, 4273), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(1)', 'oneof': '"""operation"""', 'message': 'merchant_center_link.MerchantCenterLink'}), "(proto.MESSAGE, number=1, oneof='operation', message=\n merchant_center_link.MerchantCenterLink)\n", (4175, 4273), False, 'import proto\n'), ((4321, 4375), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)', 'oneof': '"""operation"""'}), "(proto.STRING, number=2, oneof='operation')\n", (4332, 4375), False, 'import proto\n'), ((4653, 4731), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(2)', 'message': '"""MutateMerchantCenterLinkResult"""'}), "(proto.MESSAGE, number=2, message='MutateMerchantCenterLinkResult')\n", (4664, 4731), False, 'import proto\n'), ((4981, 5016), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (4992, 5016), False, 'import proto\n')]
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test for the converter dag dependency to circuit and circuit to dag
dependency."""
import unittest
from qiskit.converters.dagdependency_to_circuit import dagdependency_to_circuit
from qiskit.converters.circuit_to_dagdependency import circuit_to_dagdependency
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.test import QiskitTestCase
class TestCircuitToDagCanonical(QiskitTestCase):
"""Test QuantumCircuit to DAGDependency."""
def test_circuit_and_dag_canonical(self):
"""Check convert to dag dependency and back"""
qr = QuantumRegister(3)
cr = ClassicalRegister(3)
circuit_in = QuantumCircuit(qr, cr)
circuit_in.h(qr[0])
circuit_in.h(qr[1])
circuit_in.measure(qr[0], cr[0])
circuit_in.measure(qr[1], cr[1])
circuit_in.x(qr[0]).c_if(cr, 0x3)
circuit_in.measure(qr[0], cr[0])
circuit_in.measure(qr[1], cr[1])
circuit_in.measure(qr[2], cr[2])
dag_dependency = circuit_to_dagdependency(circuit_in)
circuit_out = dagdependency_to_circuit(dag_dependency)
self.assertEqual(circuit_out, circuit_in)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"unittest.main",
"qiskit.QuantumCircuit",
"qiskit.converters.circuit_to_dagdependency.circuit_to_dagdependency",
"qiskit.ClassicalRegister",
"qiskit.converters.dagdependency_to_circuit.dagdependency_to_circuit",
"qiskit.QuantumRegister"
] |
[((1698, 1724), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (1711, 1724), False, 'import unittest\n'), ((1090, 1108), 'qiskit.QuantumRegister', 'QuantumRegister', (['(3)'], {}), '(3)\n', (1105, 1108), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((1122, 1142), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['(3)'], {}), '(3)\n', (1139, 1142), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((1164, 1186), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (1178, 1186), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((1515, 1551), 'qiskit.converters.circuit_to_dagdependency.circuit_to_dagdependency', 'circuit_to_dagdependency', (['circuit_in'], {}), '(circuit_in)\n', (1539, 1551), False, 'from qiskit.converters.circuit_to_dagdependency import circuit_to_dagdependency\n'), ((1574, 1614), 'qiskit.converters.dagdependency_to_circuit.dagdependency_to_circuit', 'dagdependency_to_circuit', (['dag_dependency'], {}), '(dag_dependency)\n', (1598, 1614), False, 'from qiskit.converters.dagdependency_to_circuit import dagdependency_to_circuit\n')]
|
#!/usr/bin/env python3
"""Tool for determing and diplaying controller version and build info."""
import argparse
import re
import os
import subprocess
from subprocess import check_output
from datetime import datetime
from pipes import quote
def cmp(a, b):
return (a > b) - (a < b)
def normalize(s):
"""Convert to integer."""
try:
x = int(s)
except ValueError:
x = s
return x
def dotted_cmp(A, B):
"""Compare two semvers."""
a = '.'.split(A)
b = '.'.split(B)
i = 0
while True:
if i >= len(a) and i >= len(b):
return 0
elif i >= len(a):
return 1
elif i >= len(b):
return -1
c = cmp(normalize(a[i]), normalize(b[i]))
if c:
return c
i += 1
def pre_cmp(a, b):
"""Compare two semvers."""
if a == '' and b == '':
return 0
elif a == '':
return 1
elif b == '':
return -1
else:
return dotted_cmp(a, b)
def version_selftest():
"""Self-test for Version tool."""
# Test
test_orig = [
Version('0.1.1+builda.x.y'),
Version('0.1.1'),
Version('1.2.3-alpha'),
Version('1.2.3-alpha.2'),
Version('1.2.3-alpha.10'),
Version('1.2.3-beta'),
Version('1.2.3'),
Version('10.0.0'),
]
test_sorted = sorted(test_orig)
if test_orig != test_sorted:
print(test_orig)
print(test_sorted)
raise Exception("Self-test: version sorting broken")
if Version('10.2.3') < Version('1.1.0'):
raise Exception("Self-test: version sorting broken")
if Version('10.2.3') == Version('1.1.0'):
raise Exception("Self-test: version eq broken")
if Version('1.1.0-alpha') == Version('1.1.0'):
raise Exception("Self-test: version eq broken")
if Version('1.1.0') != Version('1.1.0'):
raise Exception("Self-test: version eq broken")
class NotVersionError(ValueError):
"""Exception representing and invalid Version."""
pass
class Version(object):
"""Represent a semver version."""
def __init__(self, ver_str, prefix=''):
"""Initialize version."""
# See semver.org 2.0
pre_pat = r'-(?P<prerel>[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)'
build_pat = r'\+(?P<build>[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)'
ver_pat = r'(?P<major>[0-9]+)\.(?P<minor>[0-9]+)\.(?P<patch>[0-9]+)'
ver_regx = re.compile('{}{}(?:{})?(?:{})?'
.format(prefix, ver_pat, pre_pat, build_pat))
m = ver_regx.match(ver_str)
if not m:
raise NotVersionError(
"Version {} is not in proper semver format".format(ver_str))
self.major = m.group('major')
self.minor = m.group('minor')
self.patch = m.group('patch')
self.pre = m.group('prerel') or ''
self.build = m.group('build') or ''
def __lt__(self, other):
"""Define comparison operator."""
output = cmp(self.major, other.major) or \
cmp(self.minor, other.minor) or \
cmp(self.patch, other.patch) or \
pre_cmp(self.pre, other.pre)
return True if output == -1 else False
def __eq__(self, other):
"""Define comparison operator."""
output = cmp(self.major, other.major) or \
cmp(self.minor, other.minor) or \
cmp(self.patch, other.patch) or \
pre_cmp(self.pre, other.pre)
return True if output == 0 else False
def __gt__(self, other):
"""Define comparison operator."""
output = cmp(self.major, other.major) or \
cmp(self.minor, other.minor) or \
cmp(self.patch, other.patch) or \
pre_cmp(self.pre, other.pre)
return True if output == 1 else False
def __str__(self):
"""Define string operator."""
s = '{}.{}.{}'.format(self.major, self.minor, self.patch)
if self.pre:
s += '-'
s += self.pre
if self.build:
s += '+'
s += self.build
return s
def __repr__(self):
"""Represent the Version object."""
return self.__str__()
class VersionInfo(object):
"""Represent all relevant version info.
Determine the following information for a build:
- semver
- git SHA
- buildinfo
"""
def __init__(self, version_file):
"""Determine version, SHA, and buildinfo."""
data = version_file.read().strip()
self._ver_file = Version(data)
if self._ver_file.pre or self._ver_file.build:
raise Exception("{} contains more than major.minor.patch"
.format(version_file.name))
self._vers = {
'major': self._ver_file.major,
'minor': self._ver_file.minor,
'patch': self._ver_file.patch,
'tag': None,
}
# If this commit has any version tags, they must be <= the version
# listed in the version-file. The version or a pre-release of the
# version must be represented
tags = check_output(['git', 'tag', '-l', '--contains', 'HEAD'
]).decode().splitlines()
tag_versions = []
for t in tags:
try:
ver = Version(t, prefix='v')
if ver > self._ver_file:
raise Exception(
"next-version.txt appears to need updating." +
"This commit has a version tag ({}) that is newer."
.format(ver)
)
tag_versions.append(ver)
except NotVersionError:
pass
# Also ensure that the version file is the same major.minor.patch as
# the newest tag
if tag_versions:
tag_ver = sorted(tag_versions)[-1]
self._vers['tag'] = tag_ver
if tag_ver.major != self._ver_file.major or \
tag_ver.minor != self._ver_file.minor or \
tag_ver.patch != self._ver_file.patch:
raise Exception(
"next-version.txt appears to need updating." +
"This commit has a version tag ({}) that doesn't match."
.format(tag_ver)
)
# If the there is a tag matching the version file, it must point at the
# current commit.
as_tag = 'v' + data
check_tag = check_output(['git', 'tag', '-l', as_tag]).decode().strip()
if check_tag != "":
# Make sure v$VERSION is a tag pointing at HEAD
rc = subprocess.call(
'git tag -l --contains HEAD | grep -q "^{}$"'.format(as_tag),
shell=True)
if rc != 0:
raise Exception("next-version.txt appears to need updating." +
"There is already a tag with that version, " +
"and it isn't the current commit")
# Add extra info
self._vers['sha'] = check_output(['git', 'rev-parse', 'HEAD']).decode().strip()
self._vers['version_str'] = self.version_str()
self._vers['build_info'] = self.buildinfo()
def version_str(self):
"""
Return the canonical version string for this build.
Will start with a v, and will have
"""
if self._vers['tag']:
return self.format("{tag}")
else:
return self.format("{major}.{minor}.{patch}-WIP")
def buildinfo(self):
"""Return a string that meets the semver buildinfo format."""
# sha = check_output(['git', 'rev-parse', 'HEAD'])
sha_info = check_output([
'git', 'describe', '--match=DO_NOT_CREATE_THIS_TAG',
'--always', '--dirty']).decode().strip()
datestr = datetime.now().strftime('%Y%m%d%H%M%S')
ci_info = self._ci_info()
if ci_info:
return "n{build_num}-{build_id}".format(**ci_info)
else:
# This is a developer build
return "{}-{}-{}".format(
os.environ['USER'],
sha_info,
datestr)
def format(self, template):
"""
Return a formatted string using the provided template.
Template should contain version properties
"""
return template.format(**self._vers)
def _ci_info(self):
if os.environ.get('CI', None):
# This is a CI build. True for travis and gitlab at least.
if os.environ.get('TRAVIS', None):
info = {
'build_num': os.environ['TRAVIS_BUILD_NUMBER'],
'build_id': os.environ['TRAVIS_BUILD_ID'],
'slug': os.environ['TRAVIS_REPO_SLUG'],
}
# Travis build
ci_tag = os.environ.get('TRAVIS_TAG', None)
if ci_tag:
try:
ci_ver = Version(ci_tag, prefix='v')
if ci_ver != self._vers['tag']:
raise Exception("CI tag doesn't match newest tag")
except NotVersionError:
pass
info['tag'] = os.environ['TRAVIS_TAG']
return info
elif os.environ.get('CI_SERVER_NAME', None):
info = {
'build_num': os.environ['CI_BUILD_REF'],
'build_id': os.environ['CI_BUILD_ID'],
'slug': os.environ['CI_PROJECT_PATH'],
}
# Travis build
ci_tag = os.environ.get('CI_COMMIT_REF_NAME', None)
if ci_tag:
try:
ci_ver = Version(ci_tag, prefix='v')
if ci_ver != self._vers['tag']:
raise Exception("CI tag doesn't match newest tag")
except NotVersionError:
pass
info['tag'] = os.environ['CI_COMMIT_REF_NAME']
return info
else:
raise Exception("This CI build isn't handled")
return None
def print_docker_build_args(args):
"""Create labels for Docker container."""
ver = VersionInfo(args.version_file)
"""
OVERRIDE_VERSION can be passed in as a config parameter to the Jenkins
build job. This will override the version label in the Docker image.
"""
version = ver.version_str()
if os.environ.get('OVERRIDE_VERSION', None):
version = os.environ['OVERRIDE_VERSION']
labels = {
'scm_version': ver.format('{sha}'),
'version': version,
}
ret = []
for k, v in labels.items():
ret.append('--label')
ret.append(quote('{}={}'.format(k, v)))
print(' '.join(ret))
def main():
"""Entry point for Version Tool."""
version_selftest()
parser = argparse.ArgumentParser(description='Version Tool')
parser.add_argument('--version-file',
type=argparse.FileType('r'),
default='next-version.txt')
subparsers = parser.add_subparsers(help='sub-command help')
def add_printer(cmd, tmpl, **kwargs):
def print_func(args):
ver = VersionInfo(args.version_file)
print(ver.format(tmpl))
sub = subparsers.add_parser(cmd, **kwargs)
sub.set_defaults(func=print_func)
return sub
add_printer('version', '{version_str}', help='print the version number')
add_printer('build-info', '{build_info}', help='print the build-info')
add_printer('major', '{major}', help='print the major version number')
add_printer('minor', '{minor}', help='print the minor version number')
add_printer('patch', '{patch}', help='print the patch version number')
sub = subparsers.add_parser('docker-build-args',
help='"--label" args to pass to docker build')
sub.set_defaults(func=print_docker_build_args)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
[
"argparse.ArgumentParser",
"subprocess.check_output",
"os.environ.get",
"datetime.datetime.now",
"argparse.FileType"
] |
[((10617, 10657), 'os.environ.get', 'os.environ.get', (['"""OVERRIDE_VERSION"""', 'None'], {}), "('OVERRIDE_VERSION', None)\n", (10631, 10657), False, 'import os\n'), ((11042, 11093), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Version Tool"""'}), "(description='Version Tool')\n", (11065, 11093), False, 'import argparse\n'), ((8511, 8537), 'os.environ.get', 'os.environ.get', (['"""CI"""', 'None'], {}), "('CI', None)\n", (8525, 8537), False, 'import os\n'), ((8625, 8655), 'os.environ.get', 'os.environ.get', (['"""TRAVIS"""', 'None'], {}), "('TRAVIS', None)\n", (8639, 8655), False, 'import os\n'), ((11165, 11187), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (11182, 11187), False, 'import argparse\n'), ((7922, 7936), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7934, 7936), False, 'from datetime import datetime\n'), ((8949, 8983), 'os.environ.get', 'os.environ.get', (['"""TRAVIS_TAG"""', 'None'], {}), "('TRAVIS_TAG', None)\n", (8963, 8983), False, 'import os\n'), ((9409, 9447), 'os.environ.get', 'os.environ.get', (['"""CI_SERVER_NAME"""', 'None'], {}), "('CI_SERVER_NAME', None)\n", (9423, 9447), False, 'import os\n'), ((9729, 9771), 'os.environ.get', 'os.environ.get', (['"""CI_COMMIT_REF_NAME"""', 'None'], {}), "('CI_COMMIT_REF_NAME', None)\n", (9743, 9771), False, 'import os\n'), ((5161, 5217), 'subprocess.check_output', 'check_output', (["['git', 'tag', '-l', '--contains', 'HEAD']"], {}), "(['git', 'tag', '-l', '--contains', 'HEAD'])\n", (5173, 5217), False, 'from subprocess import check_output\n'), ((6536, 6578), 'subprocess.check_output', 'check_output', (["['git', 'tag', '-l', as_tag]"], {}), "(['git', 'tag', '-l', as_tag])\n", (6548, 6578), False, 'from subprocess import check_output\n'), ((7127, 7169), 'subprocess.check_output', 'check_output', (["['git', 'rev-parse', 'HEAD']"], {}), "(['git', 'rev-parse', 'HEAD'])\n", (7139, 7169), False, 'from subprocess import check_output\n'), ((7771, 7865), 'subprocess.check_output', 'check_output', (["['git', 'describe', '--match=DO_NOT_CREATE_THIS_TAG', '--always', '--dirty']"], {}), "(['git', 'describe', '--match=DO_NOT_CREATE_THIS_TAG',\n '--always', '--dirty'])\n", (7783, 7865), False, 'from subprocess import check_output\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from Cryptodome import Random
from Cryptodome.Cipher import AES
from Cryptodome.Util.Padding import pad, unpad
class AESHandler(object):
@staticmethod
def generate_iv():
return Random.new().read(AES.block_size)
@staticmethod
def generate_key():
key = Random.get_random_bytes(32)
return key
@staticmethod
def aes_gcm_encrypt_with_iv(plain_text: bytes, hdr: bytes, key: bytes, iv: bytes):
cipher = AES.new(key=key, mode=AES.MODE_GCM, nonce=iv)
cipher.update(hdr)
cipher_text, mac_tag = cipher.encrypt_and_digest(plain_text)
return mac_tag, cipher_text
@staticmethod
def aes_gcm_decrypt_with_iv(cipher_text: bytes, hdr: bytes, mac_tag: bytes, key: bytes, iv: bytes):
cipher = AES.new(key=key, mode=AES.MODE_GCM, nonce=iv)
cipher.update(hdr)
try:
plain_text = cipher.decrypt_and_verify(cipher_text, mac_tag)
except ValueError:
plain_text = b""
except KeyError:
plain_text = b""
return plain_text
@staticmethod
def aes_gcm_encrypt(plain_text: bytes, hdr: bytes, key: bytes):
cipher = AES.new(key=key, mode=AES.MODE_GCM)
cipher.update(hdr)
cipher_text, mac_tag = cipher.encrypt_and_digest(plain_text)
nonce = cipher.nonce
return nonce, mac_tag, cipher_text
@staticmethod
def aes_gcm_decrypt(cipher_text: bytes, hdr: bytes, nonce: bytes, mac_tag: bytes, key: bytes):
cipher = AES.new(key=key, mode=AES.MODE_GCM, nonce=nonce)
cipher.update(hdr)
try:
plain_text = cipher.decrypt_and_verify(cipher_text, mac_tag)
except ValueError:
plain_text = b""
except KeyError:
plain_text = b""
return plain_text
@staticmethod
def aes_ctr_encrypt(plain_text: bytes, key: bytes):
cipher = AES.new(key=key, mode=AES.MODE_CTR)
cipher_text = cipher.encrypt(plain_text)
nonce = cipher.nonce
return nonce, cipher_text
@staticmethod
def aes_ctr_decrypt(cipher_text: bytes, nonce: bytes, key: bytes):
cipher = AES.new(key=key, mode=AES.MODE_CTR, nonce=nonce)
plain_text = cipher.decrypt(cipher_text)
return plain_text
@staticmethod
def aes_cbc_encrypt(plain_text: bytes, key: bytes):
iv = AESHandler.generate_iv()
cipher = AES.new(key=key, mode=AES.MODE_CBC, iv=iv)
return cipher.IV, cipher.encrypt(pad(plain_text, AES.block_size))
@staticmethod
def aes_cbc_decrypt(cipher_text: bytes, iv: bytes, key: bytes):
cipher = AES.new(key=key, mode=AES.MODE_CBC, iv=iv)
return unpad(cipher.decrypt(cipher_text), AES.block_size)
|
[
"Cryptodome.Cipher.AES.new",
"Cryptodome.Random.new",
"Cryptodome.Random.get_random_bytes",
"Cryptodome.Util.Padding.pad"
] |
[((334, 361), 'Cryptodome.Random.get_random_bytes', 'Random.get_random_bytes', (['(32)'], {}), '(32)\n', (357, 361), False, 'from Cryptodome import Random\n'), ((504, 549), 'Cryptodome.Cipher.AES.new', 'AES.new', ([], {'key': 'key', 'mode': 'AES.MODE_GCM', 'nonce': 'iv'}), '(key=key, mode=AES.MODE_GCM, nonce=iv)\n', (511, 549), False, 'from Cryptodome.Cipher import AES\n'), ((822, 867), 'Cryptodome.Cipher.AES.new', 'AES.new', ([], {'key': 'key', 'mode': 'AES.MODE_GCM', 'nonce': 'iv'}), '(key=key, mode=AES.MODE_GCM, nonce=iv)\n', (829, 867), False, 'from Cryptodome.Cipher import AES\n'), ((1221, 1256), 'Cryptodome.Cipher.AES.new', 'AES.new', ([], {'key': 'key', 'mode': 'AES.MODE_GCM'}), '(key=key, mode=AES.MODE_GCM)\n', (1228, 1256), False, 'from Cryptodome.Cipher import AES\n'), ((1560, 1608), 'Cryptodome.Cipher.AES.new', 'AES.new', ([], {'key': 'key', 'mode': 'AES.MODE_GCM', 'nonce': 'nonce'}), '(key=key, mode=AES.MODE_GCM, nonce=nonce)\n', (1567, 1608), False, 'from Cryptodome.Cipher import AES\n'), ((1950, 1985), 'Cryptodome.Cipher.AES.new', 'AES.new', ([], {'key': 'key', 'mode': 'AES.MODE_CTR'}), '(key=key, mode=AES.MODE_CTR)\n', (1957, 1985), False, 'from Cryptodome.Cipher import AES\n'), ((2205, 2253), 'Cryptodome.Cipher.AES.new', 'AES.new', ([], {'key': 'key', 'mode': 'AES.MODE_CTR', 'nonce': 'nonce'}), '(key=key, mode=AES.MODE_CTR, nonce=nonce)\n', (2212, 2253), False, 'from Cryptodome.Cipher import AES\n'), ((2459, 2501), 'Cryptodome.Cipher.AES.new', 'AES.new', ([], {'key': 'key', 'mode': 'AES.MODE_CBC', 'iv': 'iv'}), '(key=key, mode=AES.MODE_CBC, iv=iv)\n', (2466, 2501), False, 'from Cryptodome.Cipher import AES\n'), ((2680, 2722), 'Cryptodome.Cipher.AES.new', 'AES.new', ([], {'key': 'key', 'mode': 'AES.MODE_CBC', 'iv': 'iv'}), '(key=key, mode=AES.MODE_CBC, iv=iv)\n', (2687, 2722), False, 'from Cryptodome.Cipher import AES\n'), ((243, 255), 'Cryptodome.Random.new', 'Random.new', ([], {}), '()\n', (253, 255), False, 'from Cryptodome import Random\n'), ((2543, 2574), 'Cryptodome.Util.Padding.pad', 'pad', (['plain_text', 'AES.block_size'], {}), '(plain_text, AES.block_size)\n', (2546, 2574), False, 'from Cryptodome.Util.Padding import pad, unpad\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-24 15:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('climatemodels', '0074_remove_inputdatainformation_climate_data_sets'),
]
operations = [
migrations.AddField(
model_name='sector',
name='has_sector_specific_values',
field=models.BooleanField(default=True),
),
]
|
[
"django.db.models.BooleanField"
] |
[((452, 485), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (471, 485), False, 'from django.db import migrations, models\n')]
|
from train_lstm import JigsawLSTMModel, CONFIG
from pre_process import encode_sentence
import numpy as np
import torch
import pandas as pd
from tqdm import tqdm
import gc
import ast,emoji, string, re
from torch.utils.data import Dataset, DataLoader
# PyTorch Lightning
import pytorch_lightning as pl
MODEL_PATHS = [
'../models/checkpoints/lstm/fold_0_lstm.ckpt',
'../models/checkpoints/lstm/fold_1_lstm.ckpt',
'../models/checkpoints/lstm/fold_2_lstm.ckpt',
'../models/checkpoints/lstm/fold_3_lstm.ckpt',
'../models/checkpoints/lstm/fold_4_lstm.ckpt'
]
class JigsawEncodedDataset(Dataset):
def __init__(self, df):
self.X = df["encoded"]
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return {"encoding":torch.from_numpy(self.X.loc[idx].astype(np.int32))}
def valid_fn(model, dataloader1, dataloader2, device):
model.eval()
model.freeze()
model=model.to(device)
dataset_size = 0
running_loss = 0.0
LT_PREDS = []
MT_PREDS = []
bar = tqdm(enumerate(dataloader1), total=len(dataloader1))
for step, data in bar:
enc = data['encoding']
_, outputs = model(enc.to(device))
MT_PREDS.append(outputs.view(-1).cpu().detach().numpy())
bar = tqdm(enumerate(dataloader2), total=len(dataloader2))
for step, data in bar:
enc = data['encoding']
_, outputs = model(enc.to(device))
LT_PREDS.append(outputs.view(-1).cpu().detach().numpy())
gc.collect()
return np.concatenate(LT_PREDS),np.concatenate(MT_PREDS)
def inference(model_paths, dataloader1, dataloader2, device):
final_preds1,final_preds2 = [],[]
for i, path in enumerate(model_paths):
model=JigsawLSTMModel.load_from_checkpoint(
checkpoint_path=path,
n_classes=CONFIG['num_classes'],
vocab_size=CONFIG['vocab_size'],embedding_dim=CONFIG['embedding_dim'],hidden_dim=CONFIG['hidden_dim'],num_layers=CONFIG['num_layers']
)
print(f"Getting predictions for model {i+1}")
lt_preds,mt_preds = valid_fn(model, dataloader1, dataloader2, device)
final_preds1.append(lt_preds)
final_preds2.append(mt_preds)
final_preds1 = np.array(final_preds1)
final_preds1 = np.mean(final_preds1, axis=0)
final_preds2 = np.array(final_preds2)
final_preds2 = np.mean(final_preds2, axis=0)
print(f'val is : {(final_preds1 < final_preds2).mean()}')
if __name__=="__main__":
df1 = pd.read_csv("../input/jigsaw-toxic-severity-rating/validation_data_more_toxic.csv")
df1['encoded']=df1['encoded'].apply(lambda x: np.fromstring(x, dtype=int, sep=' '))
df2 = pd.read_csv("../input/jigsaw-toxic-severity-rating/validation_data_less_toxic.csv")
df2['encoded']=df2['encoded'].apply(lambda x: np.fromstring(x, dtype=int, sep=' '))
test_dataset1 = JigsawEncodedDataset(df1)
test_loader1= DataLoader(test_dataset1, batch_size=CONFIG['valid_batch_size'],
num_workers=8, shuffle=False, pin_memory=True)
test_dataset2 = JigsawEncodedDataset(df2)
test_loader2= DataLoader(test_dataset2, batch_size=CONFIG['valid_batch_size'],
num_workers=8, shuffle=False, pin_memory=True)
inference(MODEL_PATHS, test_loader1, test_loader2, CONFIG['device'])
|
[
"train_lstm.JigsawLSTMModel.load_from_checkpoint",
"torch.utils.data.DataLoader",
"pandas.read_csv",
"gc.collect",
"numpy.mean",
"numpy.array",
"numpy.fromstring",
"numpy.concatenate"
] |
[((1535, 1547), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1545, 1547), False, 'import gc\n'), ((2256, 2278), 'numpy.array', 'np.array', (['final_preds1'], {}), '(final_preds1)\n', (2264, 2278), True, 'import numpy as np\n'), ((2298, 2327), 'numpy.mean', 'np.mean', (['final_preds1'], {'axis': '(0)'}), '(final_preds1, axis=0)\n', (2305, 2327), True, 'import numpy as np\n'), ((2347, 2369), 'numpy.array', 'np.array', (['final_preds2'], {}), '(final_preds2)\n', (2355, 2369), True, 'import numpy as np\n'), ((2389, 2418), 'numpy.mean', 'np.mean', (['final_preds2'], {'axis': '(0)'}), '(final_preds2, axis=0)\n', (2396, 2418), True, 'import numpy as np\n'), ((2521, 2609), 'pandas.read_csv', 'pd.read_csv', (['"""../input/jigsaw-toxic-severity-rating/validation_data_more_toxic.csv"""'], {}), "(\n '../input/jigsaw-toxic-severity-rating/validation_data_more_toxic.csv')\n", (2532, 2609), True, 'import pandas as pd\n'), ((2703, 2791), 'pandas.read_csv', 'pd.read_csv', (['"""../input/jigsaw-toxic-severity-rating/validation_data_less_toxic.csv"""'], {}), "(\n '../input/jigsaw-toxic-severity-rating/validation_data_less_toxic.csv')\n", (2714, 2791), True, 'import pandas as pd\n'), ((2940, 3055), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset1'], {'batch_size': "CONFIG['valid_batch_size']", 'num_workers': '(8)', 'shuffle': '(False)', 'pin_memory': '(True)'}), "(test_dataset1, batch_size=CONFIG['valid_batch_size'],\n num_workers=8, shuffle=False, pin_memory=True)\n", (2950, 3055), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3146, 3261), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset2'], {'batch_size': "CONFIG['valid_batch_size']", 'num_workers': '(8)', 'shuffle': '(False)', 'pin_memory': '(True)'}), "(test_dataset2, batch_size=CONFIG['valid_batch_size'],\n num_workers=8, shuffle=False, pin_memory=True)\n", (3156, 3261), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1559, 1583), 'numpy.concatenate', 'np.concatenate', (['LT_PREDS'], {}), '(LT_PREDS)\n', (1573, 1583), True, 'import numpy as np\n'), ((1584, 1608), 'numpy.concatenate', 'np.concatenate', (['MT_PREDS'], {}), '(MT_PREDS)\n', (1598, 1608), True, 'import numpy as np\n'), ((1768, 2012), 'train_lstm.JigsawLSTMModel.load_from_checkpoint', 'JigsawLSTMModel.load_from_checkpoint', ([], {'checkpoint_path': 'path', 'n_classes': "CONFIG['num_classes']", 'vocab_size': "CONFIG['vocab_size']", 'embedding_dim': "CONFIG['embedding_dim']", 'hidden_dim': "CONFIG['hidden_dim']", 'num_layers': "CONFIG['num_layers']"}), "(checkpoint_path=path, n_classes=CONFIG\n ['num_classes'], vocab_size=CONFIG['vocab_size'], embedding_dim=CONFIG[\n 'embedding_dim'], hidden_dim=CONFIG['hidden_dim'], num_layers=CONFIG[\n 'num_layers'])\n", (1804, 2012), False, 'from train_lstm import JigsawLSTMModel, CONFIG\n'), ((2655, 2691), 'numpy.fromstring', 'np.fromstring', (['x'], {'dtype': 'int', 'sep': '""" """'}), "(x, dtype=int, sep=' ')\n", (2668, 2691), True, 'import numpy as np\n'), ((2837, 2873), 'numpy.fromstring', 'np.fromstring', (['x'], {'dtype': 'int', 'sep': '""" """'}), "(x, dtype=int, sep=' ')\n", (2850, 2873), True, 'import numpy as np\n')]
|
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.core import signals
from indico.core.logger import Logger
logger = Logger.get('events.notes')
@signals.users.merged.connect
def _merge_users(target, source, **kwargs):
from indico.modules.events.notes.models.notes import EventNoteRevision
EventNoteRevision.find(user_id=source.id).update({EventNoteRevision.user_id: target.id})
@signals.event_management.get_cloners.connect
def _get_attachment_cloner(sender, **kwargs):
from indico.modules.events.notes.clone import NoteCloner
return NoteCloner
|
[
"indico.core.logger.Logger.get",
"indico.modules.events.notes.models.notes.EventNoteRevision.find"
] |
[((336, 362), 'indico.core.logger.Logger.get', 'Logger.get', (['"""events.notes"""'], {}), "('events.notes')\n", (346, 362), False, 'from indico.core.logger import Logger\n'), ((518, 559), 'indico.modules.events.notes.models.notes.EventNoteRevision.find', 'EventNoteRevision.find', ([], {'user_id': 'source.id'}), '(user_id=source.id)\n', (540, 559), False, 'from indico.modules.events.notes.models.notes import EventNoteRevision\n')]
|
# -*- coding: utf-8 -*-
# @Time : 2020/11/4 16:04
# @Email : <EMAIL>
# @Software: PyCharm
# @License: BSD 3-Clause
from itertools import combinations_with_replacement
import torch.nn.functional as F
import numpy as np
import os
import torch
from numpy import random
from torch import nn
from torch.nn import Module
from torch.utils import tensorboard
from cams.cam3d import GradCAM3dpp, GradCAM3d
from cams.nnn import Indexes
class Moudle1(Module):
def __init__(self, *args): # 鍒濆鍖?
super(Moudle1, self).__init__()
D_in, dens_out = 1, 22
D1, D2 = 6, 1
dense1, dense2 = 27, 64
AvgPool3d_x, AvgPool3d_y, AvgPool3d_z =10,10,10
self.link = D2 * AvgPool3d_x * AvgPool3d_y * AvgPool3d_x
model_conv = nn.Sequential(
# Indexes(D_in, D2,(10,10,10)),
nn.Conv3d(D_in, D2, 1, stride=1, padding=0),
# nn.BatchNorm3d(D2),
# nn.ReLU(True),
# nn.MaxPool3d(3, stride=1, padding=1),
# nn.Dropout3d()
)
model_sigmod = nn.Sigmoid()
model_Linear = nn.Sequential(
nn.ReLU(True),
nn.Dropout(),
nn.Linear(self.link, dens_out),
nn.ReLU(True),
# nn.Dropout(),
# nn.Linear(dens_out, dens_out),
# nn.ReLU(True),
# nn.Dropout(),
# nn.Linear(dense2, dens_out),
)
self.model_conv = model_conv
self.model_sigmod = model_sigmod
self.avgpool = nn.AdaptiveAvgPool3d((AvgPool3d_x, AvgPool3d_y, AvgPool3d_z))
self.model_Linear = model_Linear
def forward(self, x, t=1):
if t == 0:
x = self.model_conv(x)
print("conv out", x.shape)
x = self.model_sigmod(x)
x = self.avgpool(x)
print("avgpool", x.shape)
x = torch.flatten(x, start_dim=1, end_dim=-1)
print("flatten", x.shape)
x = self.model_Linear(x)
print("linear", x.shape)
else:
x = self.model_conv(x)
x = self.avgpool(x)
x = torch.flatten(x, start_dim=1, end_dim=-1)
x = self.model_Linear(x)
return x
def run(train, test=None):
if test is None:
test = train
train_x, train_y= train
model = Moudle1()
device = torch.device('cuda:0')
# device = torch.device('cpu')
model.to(device)
learning_rate = 1e-4
optimizer = torch.optim.SGD(model.parameters(), lr=0.01) # 鍏锋湁閫氱敤浼樺寲绠楁硶鐨勪紭鍖栧寘锛屽SGD,Adam
#
loss_fn = torch.nn.CrossEntropyLoss(reduction='mean') # 涓昏鏄敤鏉ュ垽瀹氬疄闄呯殑杈撳嚭涓庢湡鏈涚殑杈撳嚭鐨勬帴杩戠▼搴?
# loss_fn = torch.nn.MSELoss(reduction='mean') # 涓昏鏄敤鏉ュ垽瀹氬疄闄呯殑杈撳嚭涓庢湡鏈涚殑杈撳嚭鐨勬帴杩戠▼搴?
for t in range(20000):
train_x = train_x.to(device)
train_y = train_y.to(device)
y_pred = model(train_x, t)
# y_pred = y_pred*we
# prob = F.softmax(y_pred, dim=1)
# prob = F.relu(y_pred)
# _, idx = torch.max(prob, dim=1)
loss = loss_fn(y_pred,train_y)
if loss.item() < 0.001:
break
# if t % 10 == 9:
print(t, loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
# if t%50==0:
writer.add_scalar('loss', loss.item(), global_step=t)
test_x, test_y = test
test_x = test_x.to(device)
test_y = test_y.to(device)
y_pred = model(test_x)
loss2 = loss_fn(y_pred, test_y)
print(loss2.item())
writer.close()
return model
random.seed(0)
torch.random.manual_seed(0)
def get():
x = random.random((120, 10, 10, 10)) + 0.00001
# key = np.full((3,3,3),0.5)
# key[1,1,1]=1.0
# iter = list(combinations_with_replacement(range(8), 3))
# y = []
# for ai, index in enumerate(iter):
# i, j, k = index
# print(ai, index)
# x[ai, i:i + 3, j:j + 3, k:k + 3] = key
# # x[ai, i:i + 3, j:j + 3, k:k + 3] = x[ai, i:i + 3, j:j + 3, k:k + 3] + key
# l1, l2, l3 = random.randint(0, 8, 3)
# x[ai, l1:l1 + 3, l2:l2 + 3, l3:l3 + 3] = x[ai, l1:l1 + 3, l2:l2 + 3, l3:l3 + 3] + key
# # y.append((i ** 2 + j ** 2 + k ** 2) ** 0.5)
# y.append((i + j + k))
iter = list(combinations_with_replacement(range(1,9), 3))
y = []
for ai, index in enumerate(iter):
i, j, k = index
print(ai, index)
x[ai, i, j, k] = 1.0
# x[ai, i:i + 3, j:j + 3, k:k + 3] = x[ai, i:i + 3, j:j + 3, k:k + 3] + key
l1, l2, l3 = random.randint(1, 9, 3)
x[ai, l1, l2, l3] = 1.0
# y.append((i ** 2 + j ** 2 + k ** 2) ** 0.5)
y.append((i + j + k-3))
x = torch.tensor(x)
x = x.unsqueeze(dim=1)
y = torch.tensor(y).reshape((-1, 1))
x = x.type(torch.float32)
y = y.type(torch.float32)
x = x / torch.max(x)
return x, y
def del_files(path_file):
ls = os.listdir(path_file)
for i in ls:
f_path = os.path.join(path_file, i)
# 判断是否是一个目录,若是,则递归删除
if os.path.isdir(f_path):
del_files(f_path)
else:
os.remove(f_path)
writer = tensorboard.SummaryWriter(log_dir="/home/iap13/wcx/tb/exp1", flush_secs=10)
data = [get() for i in range(10)]
x, y = zip(*data)
x = torch.cat(x, dim=0)
y = torch.cat(y, dim=0)
y_ = torch.zeros((1200, 22))
y = y.type(torch.long).squeeze()
y_ = torch.index_fill(y_, 1, y, torch.tensor(1))
# model = run((x, y), None)
# torch.save(model.state_dict(), "model_dict")
model = Moudle1()
model.load_state_dict(torch.load("model_dict"))
device = torch.device('cpu')
model.to(device)
model.eval()
target_layer = model.model_conv[-1]
# wrapped_model = GradCAM3d(model, target_layer)
wrapped_model = GradCAM3dpp(model, target_layer)
# wrapped_model = SmoothGradCAMpp(model, target_layer)
x = x.to(device)
y = y.to(device)
# for i in range(0, 1):
# xi = x[i]
# yi = y[i]
#
# tensor_shown = xi.unsqueeze(0)
#
# cams, idx = wrapped_model.forward(tensor_shown)
# cams = cams.squeeze().cpu().numpy()
# xi = xi.squeeze().cpu().numpy()
# for t in range(10):
# writer.add_images('countdown%d'%i,
# cams[t],
# global_step=t,
# dataformats='HW')
# writer.close()
i=2
xi = x[i]
yi = y[i]
tensor_shown = xi.unsqueeze(0)
cams, idx = wrapped_model.forward(tensor_shown)
cams = cams.squeeze().cpu().numpy()
xi = xi.squeeze().cpu().numpy()
for t in range(10):
writer.add_images('countdown%d'%i,
cams[t],
global_step=t,
dataformats='HW')
writer.close()
# model = Moudle1()
# writer.add_graph(model.eval(),x)
# writer.close()
|
[
"torch.nn.Dropout",
"os.remove",
"numpy.random.seed",
"torch.cat",
"numpy.random.randint",
"cams.cam3d.GradCAM3dpp",
"torch.device",
"os.path.join",
"torch.flatten",
"torch.nn.Conv3d",
"torch.load",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.Linear",
"torch.zeros",
"torch.random.manual_seed",
"torch.max",
"os.listdir",
"torch.nn.Sigmoid",
"torch.nn.AdaptiveAvgPool3d",
"torch.nn.ReLU",
"os.path.isdir",
"torch.nn.CrossEntropyLoss",
"numpy.random.random",
"torch.tensor"
] |
[((3557, 3571), 'numpy.random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (3568, 3571), False, 'from numpy import random\n'), ((3572, 3599), 'torch.random.manual_seed', 'torch.random.manual_seed', (['(0)'], {}), '(0)\n', (3596, 3599), False, 'import torch\n'), ((5152, 5227), 'torch.utils.tensorboard.SummaryWriter', 'tensorboard.SummaryWriter', ([], {'log_dir': '"""/home/iap13/wcx/tb/exp1"""', 'flush_secs': '(10)'}), "(log_dir='/home/iap13/wcx/tb/exp1', flush_secs=10)\n", (5177, 5227), False, 'from torch.utils import tensorboard\n'), ((5286, 5305), 'torch.cat', 'torch.cat', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (5295, 5305), False, 'import torch\n'), ((5310, 5329), 'torch.cat', 'torch.cat', (['y'], {'dim': '(0)'}), '(y, dim=0)\n', (5319, 5329), False, 'import torch\n'), ((5336, 5359), 'torch.zeros', 'torch.zeros', (['(1200, 22)'], {}), '((1200, 22))\n', (5347, 5359), False, 'import torch\n'), ((5595, 5614), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5607, 5614), False, 'import torch\n'), ((5748, 5780), 'cams.cam3d.GradCAM3dpp', 'GradCAM3dpp', (['model', 'target_layer'], {}), '(model, target_layer)\n', (5759, 5780), False, 'from cams.cam3d import GradCAM3dpp, GradCAM3d\n'), ((2355, 2377), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (2367, 2377), False, 'import torch\n'), ((2574, 2617), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (2599, 2617), False, 'import torch\n'), ((4699, 4714), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (4711, 4714), False, 'import torch\n'), ((4921, 4942), 'os.listdir', 'os.listdir', (['path_file'], {}), '(path_file)\n', (4931, 4942), False, 'import os\n'), ((5425, 5440), 'torch.tensor', 'torch.tensor', (['(1)'], {}), '(1)\n', (5437, 5440), False, 'import torch\n'), ((5559, 5583), 'torch.load', 'torch.load', (['"""model_dict"""'], {}), "('model_dict')\n", (5569, 5583), False, 'import torch\n'), ((1060, 1072), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1070, 1072), False, 'from torch import nn\n'), ((1521, 1582), 'torch.nn.AdaptiveAvgPool3d', 'nn.AdaptiveAvgPool3d', (['(AvgPool3d_x, AvgPool3d_y, AvgPool3d_z)'], {}), '((AvgPool3d_x, AvgPool3d_y, AvgPool3d_z))\n', (1541, 1582), False, 'from torch import nn\n'), ((3621, 3653), 'numpy.random.random', 'random.random', (['(120, 10, 10, 10)'], {}), '((120, 10, 10, 10))\n', (3634, 3653), False, 'from numpy import random\n'), ((4549, 4572), 'numpy.random.randint', 'random.randint', (['(1)', '(9)', '(3)'], {}), '(1, 9, 3)\n', (4563, 4572), False, 'from numpy import random\n'), ((4855, 4867), 'torch.max', 'torch.max', (['x'], {}), '(x)\n', (4864, 4867), False, 'import torch\n'), ((4977, 5003), 'os.path.join', 'os.path.join', (['path_file', 'i'], {}), '(path_file, i)\n', (4989, 5003), False, 'import os\n'), ((5044, 5065), 'os.path.isdir', 'os.path.isdir', (['f_path'], {}), '(f_path)\n', (5057, 5065), False, 'import os\n'), ((838, 881), 'torch.nn.Conv3d', 'nn.Conv3d', (['D_in', 'D2', '(1)'], {'stride': '(1)', 'padding': '(0)'}), '(D_in, D2, 1, stride=1, padding=0)\n', (847, 881), False, 'from torch import nn\n'), ((1123, 1136), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1130, 1136), False, 'from torch import nn\n'), ((1150, 1162), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (1160, 1162), False, 'from torch import nn\n'), ((1176, 1206), 'torch.nn.Linear', 'nn.Linear', (['self.link', 'dens_out'], {}), '(self.link, dens_out)\n', (1185, 1206), False, 'from torch import nn\n'), ((1220, 1233), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1227, 1233), False, 'from torch import nn\n'), ((1873, 1914), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(1)', 'end_dim': '(-1)'}), '(x, start_dim=1, end_dim=-1)\n', (1886, 1914), False, 'import torch\n'), ((2124, 2165), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(1)', 'end_dim': '(-1)'}), '(x, start_dim=1, end_dim=-1)\n', (2137, 2165), False, 'import torch\n'), ((4750, 4765), 'torch.tensor', 'torch.tensor', (['y'], {}), '(y)\n', (4762, 4765), False, 'import torch\n'), ((5123, 5140), 'os.remove', 'os.remove', (['f_path'], {}), '(f_path)\n', (5132, 5140), False, 'import os\n')]
|
import logging
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s: %(asctime)s {%(filename)s:%(lineno)d}: %(message)s "
)
from sanskrit_data.schema import common
class RootAnalysis(common.JsonObject):
schema = common.recursively_merge_json_schemas(JsonObject.schema, ({
"type": "object",
"description": "Analysis of any root.",
"properties": {
common.TYPE_FIELD: {
"enum": ["RootAnalysis"]
},
"root": "string",
"pratyayas": {
"type": "array",
"item": "string"
},
},
}))
class Praatipadika(common.JsonObject):
schema = common.recursively_merge_json_schemas(JsonObject.schema, ({
"type": "object",
"description": "A prAtipadika.",
"properties": {
common.TYPE_FIELD: {
"enum": ["Praatipadika"]
},
"root": "string",
"prakaara": "string",
"linga": "string",
"rootAnalysis": RootAnalysis.schema,
},
}))
|
[
"logging.basicConfig",
"sanskrit_data.schema.common.recursively_merge_json_schemas"
] |
[((15, 137), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(levelname)s: %(asctime)s {%(filename)s:%(lineno)d}: %(message)s """'}), "(level=logging.DEBUG, format=\n '%(levelname)s: %(asctime)s {%(filename)s:%(lineno)d}: %(message)s ')\n", (34, 137), False, 'import logging\n'), ((233, 493), 'sanskrit_data.schema.common.recursively_merge_json_schemas', 'common.recursively_merge_json_schemas', (['JsonObject.schema', "{'type': 'object', 'description': 'Analysis of any root.', 'properties': {\n common.TYPE_FIELD: {'enum': ['RootAnalysis']}, 'root': 'string',\n 'pratyayas': {'type': 'array', 'item': 'string'}}}"], {}), "(JsonObject.schema, {'type': 'object',\n 'description': 'Analysis of any root.', 'properties': {common.\n TYPE_FIELD: {'enum': ['RootAnalysis']}, 'root': 'string', 'pratyayas':\n {'type': 'array', 'item': 'string'}}})\n", (270, 493), False, 'from sanskrit_data.schema import common\n'), ((617, 898), 'sanskrit_data.schema.common.recursively_merge_json_schemas', 'common.recursively_merge_json_schemas', (['JsonObject.schema', "{'type': 'object', 'description': 'A prAtipadika.', 'properties': {common.\n TYPE_FIELD: {'enum': ['Praatipadika']}, 'root': 'string', 'prakaara':\n 'string', 'linga': 'string', 'rootAnalysis': RootAnalysis.schema}}"], {}), "(JsonObject.schema, {'type': 'object',\n 'description': 'A prAtipadika.', 'properties': {common.TYPE_FIELD: {\n 'enum': ['Praatipadika']}, 'root': 'string', 'prakaara': 'string',\n 'linga': 'string', 'rootAnalysis': RootAnalysis.schema}})\n", (654, 898), False, 'from sanskrit_data.schema import common\n')]
|
from wtforms import Form, BooleanField, StringField, validators
# from wtforms import HiddenField
from wtforms import PasswordField
class LoginForm(Form):
login = StringField('Username', [validators.Length(min=4, max=40)])
password = PasswordField('password', [validators.Length(min=4, max=40)])
class TaskForm(Form):
taskname = StringField('Task', [validators.Length(min=2, max=255)])
status = BooleanField('Status',
# [validators.InputRequired()],
default=True)
|
[
"wtforms.BooleanField",
"wtforms.validators.Length"
] |
[((415, 451), 'wtforms.BooleanField', 'BooleanField', (['"""Status"""'], {'default': '(True)'}), "('Status', default=True)\n", (427, 451), False, 'from wtforms import Form, BooleanField, StringField, validators\n'), ((194, 226), 'wtforms.validators.Length', 'validators.Length', ([], {'min': '(4)', 'max': '(40)'}), '(min=4, max=40)\n', (211, 226), False, 'from wtforms import Form, BooleanField, StringField, validators\n'), ((271, 303), 'wtforms.validators.Length', 'validators.Length', ([], {'min': '(4)', 'max': '(40)'}), '(min=4, max=40)\n', (288, 303), False, 'from wtforms import Form, BooleanField, StringField, validators\n'), ((366, 399), 'wtforms.validators.Length', 'validators.Length', ([], {'min': '(2)', 'max': '(255)'}), '(min=2, max=255)\n', (383, 399), False, 'from wtforms import Form, BooleanField, StringField, validators\n')]
|
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
import pandas as pd
from QUANTAXIS.QAMarket.common import exchange_code
from QUANTAXIS.QAUtil import (
QA_util_log_info,
QA_util_random_with_topic,
QA_util_to_json_from_pandas
)
from QUANTAXIS.QAUtil.QAParameter import AMOUNT_MODEL, ORDER_STATUS, ORDER_DIRECTION, ORDER_MODEL
from QUANTAXIS.QAUtil.QADate import QA_util_stamp2datetime
"""
重新定义Order模式
在2017-12-15的Account-remake-version 分支中
Bid类全部被更名为Order类
用于和 bid_ask 区分
by yutiansut@2017/12/15
@2018/1/9
需要重新考虑 order的重复创建耗时问题?
order_frame 是一个管理性面板 但是还是需要一个缓存dict?
@2018/05/25
不建议保存两份变量, 维护起来很麻烦,容易出错。
"""
class QA_Order():
'''
记录order
'''
def __init__(
self,
price=None,
date=None,
datetime=None,
sending_time=None,
trade_time=False,
amount=0,
market_type=None,
frequence=None,
towards=None,
code=None,
user=None,
account_cookie=None,
strategy=None,
order_model=None,
money=None,
amount_model=AMOUNT_MODEL.BY_AMOUNT,
broker=None,
order_id=None,
trade_id=False,
_status=ORDER_STATUS.NEW,
callback=False,
commission_coeff=0.00025,
tax_coeff=0.001,
exchange_id=None,
*args,
**kwargs
):
'''
QA_Order 对象表示一个委托业务, 有如下字段
- price 委托价格 (限价单用)
- date 委托日期 (一般日线级别回测用)
- datetime 当前时间 (分钟线级别和实时用)
- sending_time 委托时间 (分钟线级别和实时用)
- trade_time 成交时间 [list] (分钟/日线/实盘时用, 一笔订单多次成交会不断append进去)
- amount 委托数量
- frequence 频率 (回测用 DAY/1min/5min/15min/30min/...)
- towards 买卖方向
- code 订单的品种
- user 订单发起者
- account_cookie 订单发起账户的标识
- stratgy 策略号
- order_model 委托方式(限价/市价/下一个bar/) type str eg 'limit'
- money 订单金额
- amount_model 委托量模式(按量委托/按总成交额委托) type str 'by_amount'
- order_id 委托单id
- trade_id 成交单id
- _status 内部维护的订单状态
- callback 当订单状态改变的时候 主动回调的函数(可以理解为自动执行的OnOrderAction)
- commission_coeff 手续费系数
- tax_coeff 印花税系数(股票)
- exchange_id 交易所id (一般用于实盘期货)
:param args: type tuple
:param kwargs: type dict
# 2018-08-12 把order变成一个状态机>
# 以前的order只是一个信息承载的工具,现在需要让他具备状态的方法
NEW = 100
SUCCESS_ALL = 200
SUCCESS_PART = 203 # success_part 是部分成交 一个中间状态 剩余的订单还在委托队列中
QUEUED = 300 # queued 用于表示在order_queue中 实际表达的意思是订单存活 待成交
CANCEL = 400
CANCEL_PART = 402 # cancel_part是部分撤单(及 下单后成交了一部分 剩余的被撤单 这是一个最终状态)
SETTLED = 500
FAILED = 600
'''
self.price = price
self.datetime = None
# 🛠todo 移动到 Util 类中 时间处理函数
if datetime is None and date is not None:
self.date = date
self.datetime = '{} 09:31:00'.format(self.date)
elif date is None and datetime is not None:
self.date = datetime[0:10]
self.datetime = datetime
elif date is not None and datetime is not None:
self.date = date
self.datetime = datetime
else:
pass
self.sending_time = self.datetime if sending_time is None else sending_time # 下单时间
self.trade_time = trade_time if trade_time else [] # 成交时间
self.amount = amount # 委托数量
self.trade_amount = 0 # 成交数量
self.cancel_amount = 0 # 撤销数量
self.towards = towards # side
self.code = code # 委托证券代码
self.user = user # 委托用户
self.market_type = market_type # 委托市场类别
self.frequence = frequence # 委托所在的频率(回测用)
self.account_cookie = account_cookie
self.strategy = strategy
self.type = market_type # see below
self.order_model = order_model
self.amount_model = amount_model
self.order_id = QA_util_random_with_topic(
topic='Order'
) if order_id is None else order_id
self.realorder_id = self.order_id
self.commission_coeff = commission_coeff
self.tax_coeff = tax_coeff
self.trade_id = trade_id if trade_id else []
self.trade_price = 0 # 成交均价
self.broker = broker
self.callback = callback # 委托成功的callback
self.money = money # 委托需要的金钱
self.reason = None # 原因列表
self.exchange_id = exchange_id
self.time_condition = 'GFD' # 当日有效
self._status = _status
self.exchange_code = exchange_code
# 增加订单对于多账户以及多级别账户的支持 2018/11/12
self.mainacc_id = None if 'mainacc_id' not in kwargs.keys(
) else kwargs['mainacc_id']
self.subacc_id = None if 'subacc_id' not in kwargs.keys(
) else kwargs['subacc_id']
@property
def pending_amount(self):
return self.amount - self.cancel_amount - self.trade_amount
def __repr__(self):
'''
输出格式化对象
:return: 字符串
'''
return '< QA_Order realorder_id {} datetime:{} code:{} amount:{} price:{} towards:{} btype:{} order_id:{} account:{} status:{} >'.format(
self.realorder_id,
self.datetime,
self.code,
self.amount,
self.price,
self.towards,
self.type,
self.order_id,
self.account_cookie,
self.status
)
@property
def status(self):
# 以下几个都是最终状态 并且是外部动作导致的
if self._status in [ORDER_STATUS.FAILED,
ORDER_STATUS.NEXT,
ORDER_STATUS.SETTLED,
ORDER_STATUS.CANCEL_ALL,
ORDER_STATUS.CANCEL_PART]:
return self._status
if self.pending_amount <= 0:
self._status = ORDER_STATUS.SUCCESS_ALL
return self._status
elif self.pending_amount > 0 and self.trade_amount > 0:
self._status = ORDER_STATUS.SUCCESS_PART
return self._status
elif self.trade_amount == 0:
self._status = ORDER_STATUS.QUEUED
return self._status
def get_exchange(self, code):
return self.exchange_code[code.lower()]
def create(self):
"""创建订单
"""
# 创建一笔订单(未进入委托队列-- 在创建的时候调用)
self._status = ORDER_STATUS.NEW
def cancel(self):
"""撤单
Arguments:
amount {int} -- 撤单数量
"""
self.cancel_amount = self.amount - self.trade_amount
if self.trade_amount == 0:
# 未交易 直接订单全撤
self._status = ORDER_STATUS.CANCEL_ALL
else:
# 部分交易 剩余订单全撤
self._status = ORDER_STATUS.CANCEL_PART
def failed(self, reason=None):
"""失败订单(未成功创建入broker)
Arguments:
reason {str} -- 失败原因
"""
# 订单创建失败(如废单/场外废单/价格高于涨停价/价格低于跌停价/通讯失败)
self._status = ORDER_STATUS.FAILED
self.reason = str(reason)
def trade(self, trade_id, trade_price, trade_amount, trade_time):
"""trade 状态
Arguments:
amount {[type]} -- [description]
"""
if self.status in [ORDER_STATUS.SUCCESS_PART, ORDER_STATUS.QUEUED]:
trade_amount = int(trade_amount)
trade_id = str(trade_id)
if trade_amount < 1:
self._status = ORDER_STATUS.NEXT
else:
if trade_id not in self.trade_id:
trade_price = float(trade_price)
trade_time = str(trade_time)
self.trade_id.append(trade_id)
self.trade_price = (
self.trade_price * self.trade_amount +
trade_price * trade_amount
) / (
self.trade_amount + trade_amount
)
self.trade_amount += trade_amount
self.trade_time.append(trade_time)
self.callback(
self.code,
trade_id,
self.order_id,
self.realorder_id,
trade_price,
trade_amount,
self.towards,
trade_time
)
else:
pass
else:
raise RuntimeError(
'ORDER STATUS {} CANNNOT TRADE'.format(self.status)
)
def queued(self, realorder_id):
self.realorder_id = realorder_id
self._status = ORDER_STATUS.QUEUED
def settle(self):
self._status = ORDER_STATUS.SETTLED
def get(self, key, exception=None):
try:
if key is None:
print("key is none , return none!")
return None
return eval('self.{}'.format(key))
except Exception as e:
return exception
# 🛠todo 建议取消,直接调用var
def callingback(self):
"""回调函数
Returns:
[type] -- [description]
"""
if self.callback:
return self.callback
def info(self):
'''
:return:
'''
return vars(self)
# 对象转变成 dfs
def to_df(self):
return pd.DataFrame([
vars(self),
])
# 🛠todo 建议取消,直接调用var?
def to_dict(self):
'''
把对象中的属性转变成字典类型
:return: dict
'''
return vars(self)
def to_otgdict(self):
"""{
"aid": "insert_order", # //必填, 下单请求
# //必填, 需要与登录用户名一致, 或为登录用户的子账户(例如登录用户为user1, 则报单 user_id 应当为 user1 或 user1.some_unit)
"user_id": account_cookie,
# //必填, 委托单号, 需确保在一个账号中不重复, 限长512字节
"order_id": order_id if order_id else QA.QA_util_random_with_topic('QAOTG'),
"exchange_id": exchange_id, # //必填, 下单到哪个交易所
"instrument_id": code, # //必填, 下单合约代码
"direction": order_direction, # //必填, 下单买卖方向
# //必填, 下单开平方向, 仅当指令相关对象不支持开平机制(例如股票)时可不填写此字段
"offset": order_offset,
"volume": volume, # //必填, 下单手数
"price_type": "LIMIT", # //必填, 报单价格类型
"limit_price": price, # //当 price_type == LIMIT 时需要填写此字段, 报单价格
"volume_condition": "ANY",
"time_condition": "GFD",
}
"""
return {
"aid": "insert_order", # //必填, 下单请求
# //必填, 需要与登录用户名一致, 或为登录用户的子账户(例如登录用户为user1, 则报单 user_id 应当为 user1 或 user1.some_unit)
"user_id": self.account_cookie,
# //必填, 委托单号, 需确保在一个账号中不重复, 限长512字节
"order_id": self.order_id,
"exchange_id": self.exchange_id, # //必填, 下单到哪个交易所
"instrument_id": self.code, # //必填, 下单合约代码
"direction": self.direction, # //必填, 下单买卖方向
# //必填, 下单开平方向, 仅当指令相关对象不支持开平机制(例如股票)时可不填写此字段
"offset": self.offset,
"volume": self.amount, # //必填, 下单手数
"price_type": self.order_model, # //必填, 报单价格类型
"limit_price": self.price, # //当 price_type == LIMIT 时需要填写此字段, 报单价格
"volume_condition": "ANY",
"time_condition": "GFD",
}
def to_qatradegatway(self):
direction = 'BUY' if self.direction > 0 else 'SELL'
return {
'topic': 'sendorder',
'account_cookie': self.account_cookie,
'strategy_id': self.strategy,
'order_direction': direction,
'code': self.code.lower(),
'price': self.price,
'order_time': self.sending_time,
'exchange_id': self.get_exchange(self.code),
'order_offset': self.offset,
'volume': self.amount,
'order_id': self.order_id
}
def from_otgformat(self, otgOrder):
"""[summary]
Arguments:
otgOrder {[type]} -- [description]
{'seqno': 6,
'user_id': '106184',
'order_id': 'WDRB_QA01_FtNlyBem',
'exchange_id': 'SHFE',
'instrument_id': 'rb1905',
'direction': 'SELL',
'offset': 'OPEN',
'volume_orign': 50, #(总报单手数)
'price_type': 'LIMIT', # "LIMIT" (价格类型, ANY=市价, LIMIT=限价)
'limit_price': 3432.0, # 4500.0 (委托价格, 仅当 price_type = LIMIT 时有效)
'time_condition': 'GFD',# "GFD" (时间条件, IOC=立即完成,否则撤销, GFS=本节有效, GFD=当日有效, GTC=撤销前有效, GFA=集合竞价有效)
'volume_condition': 'ANY', # "ANY" (手数条件, ANY=任何数量, MIN=最小数量, ALL=全部数量)
'insert_date_time': 1545656460000000000,# 1501074872000000000 (下单时间(按北京时间),自unix epoch(1970-01-01 00:00:00 GMT)以来的纳秒数)
'exchange_order_id': ' 3738',
'status': 'FINISHED', # "ALIVE" (委托单状态, ALIVE=有效, FINISHED=已完)
'volume_left': 0,
'last_msg': '全部成交报单已提交'} # "报单成功" (委托单状态信息)
"""
self.order_id = otgOrder.get('order_id')
self.account_cookie = otgOrder.get('user_id')
self.exchange_id = otgOrder.get('exchange_id')
self.code = str(otgOrder.get('instrument_id')).upper()
self.offset = otgOrder.get('offset')
self.direction = otgOrder.get('direction')
self.towards = eval('ORDER_DIRECTION.{}_{}'.format(
self.direction,
self.offset
))
self.amount = otgOrder.get('volume_orign')
self.trade_amount = self.amount - otgOrder.get('volume_left')
self.price = otgOrder.get('limit_price')
self.order_model = eval(
'ORDER_MODEL.{}'.format(otgOrder.get('price_type'))
)
self.time_condition = otgOrder.get('time_condition')
self.datetime = QA_util_stamp2datetime(
int(otgOrder.get('insert_date_time'))
)
self.sending_time = self.datetime
self.volume_condition = otgOrder.get('volume_condition')
self.message = otgOrder.get('last_msg')
self._status = ORDER_STATUS.NEW
if '已撤单' in self.message or '拒绝' in self.message:
self._status = ORDER_STATUS.FAILED
self.realorder_id = otgOrder.get('exchange_order_id')
return self
def from_dict(self, order_dict):
'''
从字段类型的字段 填充 对象的字段
:param order_dict: dict 类型
:return: self QA_Order
'''
try:
# QA_util_log_info('QA_ORDER CHANGE: from {} change to {}'.format(
# self.order_id, order['order_id']))
self.price = order_dict['price']
self.date = order_dict['date']
self.datetime = order_dict['datetime']
self.sending_time = order_dict['sending_time'] # 下单时间
self.trade_time = order_dict['trade_time']
self.amount = order_dict['amount']
self.frequence = order_dict['frequence']
self.market_type = order_dict['market_type']
self.towards = order_dict['towards']
self.code = order_dict['code']
self.user = order_dict['user']
self.account_cookie = order_dict['account_cookie']
self.strategy = order_dict['strategy']
self.type = order_dict['type']
self.order_model = order_dict['order_model']
self.amount_model = order_dict['amount_model']
self.order_id = order_dict['order_id']
self.realorder_id = order_dict['realorder_id']
self.trade_id = order_dict['trade_id']
self.callback = order_dict['callback']
self.commission_coeff = order_dict['commission_coeff']
self.tax_coeff = order_dict['tax_coeff']
self.money = order_dict['money']
self._status = order_dict['_status']
self.cancel_amount = order_dict['cancel_amount']
self.trade_amount = order_dict['trade_amount']
self.trade_price = order_dict['trade_price']
self.reason = order_dict['reason']
return self
except Exception as e:
QA_util_log_info('Failed to tran from dict {}'.format(e))
class QA_OrderQueue(): # also the order tree ?? what's the tree means?
"""
一个待成交队列
queue是一个dataframe
这里面都是对于方法的封装
queue_df 的意图
对orderqueue进行管理 这是一个dataframe
然后等到要恢复订单的时候 再去用orderid恢复他
就好比 你下了个单子
你就在小本本上记一笔
然后成交了你打个勾
撤单了你打个叉
你看看你还有多少单子在委托你就数数小本子
这个小本子 就是orderqueue的dataframe
"""
def __init__(self):
"""重新修改 优化性能
1. 维护两个dict
order_list 是一天的所有订单
deal_list 是历史的成交单(settle以后 , 把order_list append进去)
"""
self.order_list = {}
self.deal_list = {}
def __repr__(self):
return '< QA_ORDERQueue >'
# return '< QA_OrderQueue AMOUNT {} WAITING TRADE {} >'.format(len(self.queue_df), len(self.pending))
def __call__(self):
return self.order_list
def insert_order(self, order):
'''
:param order: QA_Order类型
:return:
'''
#print(" *>> QAOrder!insert_order {}".format(order))
# QUEUED = 300 # queued 用于表示在order_queue中 实际表达的意思是订单存活 待成交
#order.status = ORDER_STATUS.QUEUED
# 🛠 todo 是为了速度快把order对象转换成 df 对象的吗?
#self.queue_df = self.queue_df.append(order.to_df(), ignore_index=True)
#self.queue_df.set_index('order_id', drop=True, inplace=True)
if order is not None:
self.order_list[order.order_id] = order
return order
else:
print('QAERROR Wrong for get None type while insert order to Queue')
def update_order(self, order):
self.order_list[order.order_id] = order
@property
def order_ids(self):
return list(self.order_list.keys())
@property
def len(self):
return len(self.order_list)
def settle(self):
"""结算
清空订单簿
"""
self.deal_list.update(self.order_list)
self.order_list = {}
@property
def pending(self):
'''
600 废单 未委托成功
200 委托成功,完全交易
203 委托成功,未完全成功
300 委托队列 待成交
400 已撤单
500 服务器撤单/每日结算
订单生成(100) -- 废单(600)
订单生成(100) -- 进入待成交队列(300) -- 完全成交(200) -- 每日结算(500)-- 死亡
订单生成(100) -- 进入待成交队列(300) -- 部分成交(203) -- 未成交(300) -- 每日结算(500) -- 死亡
订单生成(100) -- 进入待成交队列(300) -- 主动撤单(400) -- 每日结算(500) -- 死亡
选择待成交列表
:return: dataframe
'''
try:
return [
item for item in self.order_list.values() if item.status in [
ORDER_STATUS.QUEUED,
ORDER_STATUS.NEXT,
ORDER_STATUS.SUCCESS_PART
]
]
except:
return []
@property
def failed(self):
try:
return [
item for item in self.order_list.values()
if item.status in [ORDER_STATUS.FAILED]
]
except:
return []
@property
def canceled(self):
try:
return [
item for item in self.order_list.values() if item.status in
[ORDER_STATUS.CANCEL_ALL,
ORDER_STATUS.CANCEL_PART]
]
except:
return []
@property
def untrade(self):
try:
return [
item for item in self.order_list.values()
if item.status in [ORDER_STATUS.QUEUED]
]
except:
return []
# 🛠todo 订单队列
def set_status(self, order_id, new_status):
try:
if order_id in self.order_ids:
self.order_list[order_id].status = new_status
else:
pass
except:
return None
def to_df(self):
try:
return pd.concat([x.to_df() for x in self.order_list.values()])
except:
pass
if __name__ == '__main__':
ax = QA_Order()
print(ax.info())
print(ax.to_df())
|
[
"QUANTAXIS.QAUtil.QA_util_random_with_topic"
] |
[((5386, 5426), 'QUANTAXIS.QAUtil.QA_util_random_with_topic', 'QA_util_random_with_topic', ([], {'topic': '"""Order"""'}), "(topic='Order')\n", (5411, 5426), False, 'from QUANTAXIS.QAUtil import QA_util_log_info, QA_util_random_with_topic, QA_util_to_json_from_pandas\n')]
|
"""
Python implementation of the MSD radix sort algorithm.
It used the binary representation of the integers to sort
them...
https://en.wikipedia.org/wiki/Radix_sort
"""
from __future__ import annotations
def msd_radix_sort(list_of_ints: list[int]) -> list[int]:
"""
Implementation of the MSD radix sort algorithm. Only works
with positive integers
:param list_of_ints: A list of integers
:return: Returns the sorted list
>>> msd_radix_sort([40, 12, 1, 100, 4,87])
[1, 4, 12, 40, 100]
>>> msd_radix_sort([])
[]
>>> msd_radix_sort([123, 345, 123, 80])
[80, 123, 123, 345]
>>> msd_radix_sort([1209, 834598, 1, 540402, 45])
[1, 45, 1209, 540402, 834598]
>>> msd_radix_sort([-1, 34, 45])
Traceback (most recent call last):
...
ValueError: All numbers must be positive
"""
if not list_of_ints:
return []
if min(list_of_ints) < 0:
raise ValueError("All numbers must be positive")
most_bits = max(len(bin(x)[2:]) for x in list_of_ints)
return _msd_radix_sort(list_of_ints, most_bits)
def _msd_radix_sort(list_of_ints: list[int], bit_position: int) -> list[int]:
"""
Sort the given list based on the bit at bit_position. Numbers with a
0 at that position will be at the start of the list, numbers with a
1 at the end.
:param list_of_ints: A list of integers
:param bit_position: the position of the bit that gets compared
:return: Returns a partially sorted list
>>> _msd_radix_sort([45, 2, 32], 1)
[2, 32, 45]
>>> _msd_radix_sort([10, 4, 12,45,98,96,110], 2)
[4, 12, 10]
"""
if bit_position == 0 or len(list_of_ints) in [0, 1]:
return list_of_ints
zeros = list()
ones = list()
# Split numbers based on bit at bit_position from the right
for number in list_of_ints:
if (number >> (bit_position - 1)) & 1:
# number has a one at bit bit_position
ones.append(number)
else:
# number has a zero at bit bit_position
zeros.append(number)
# recursively split both lists further
zeros = _msd_radix_sort(zeros, bit_position - 1)
ones = _msd_radix_sort(ones, bit_position - 1)
# recombine lists
res = zeros
res.extend(ones)
return res
def msd_radix_sort_inplace(list_of_ints: list[int]):
"""
Inplace implementation of the MSD radix sort algorithm.
Sorts based on the binary representation of the integers.
>>> lst = [1, 345, 23, 89, 0, 3]
>>> msd_radix_sort_inplace(lst)
>>> lst == sorted(lst)
True
>>> lst = [1, 43, 0, 0, 0, 24, 3, 3]
>>> msd_radix_sort_inplace(lst)
>>> lst == sorted(lst)
True
>>> lst = []
>>> msd_radix_sort_inplace(lst)
>>> lst == []
True
>>> lst = [-1, 34, 23, 4, -42]
>>> msd_radix_sort_inplace(lst)
Traceback (most recent call last):
...
ValueError: All numbers must be positive
"""
length = len(list_of_ints)
if not list_of_ints or length == 1:
return
if min(list_of_ints) < 0:
raise ValueError("All numbers must be positive")
most_bits = max(len(bin(x)[2:]) for x in list_of_ints)
_msd_radix_sort_inplace(list_of_ints, most_bits, 0, length)
def _msd_radix_sort_inplace(
list_of_ints: list[int], bit_position: int, begin_index: int, end_index: int
):
"""
Sort the given list based on the bit at bit_position. Numbers with a
0 at that position will be at the start of the list, numbers with a
1 at the end.
>>> lst = [45, 2, 32, 24, 534, 2932]
>>> _msd_radix_sort_inplace(lst, 1, 0, 3)
>>> lst == [32, 2, 45, 24, 534, 2932]
True
>>> lst = [0, 2, 1, 3, 12, 10, 4, 90, 54, 2323, 756]
>>> _msd_radix_sort_inplace(lst, 2, 4, 7)
>>> lst == [0, 2, 1, 3, 12, 4, 10, 90, 54, 2323, 756]
True
"""
if bit_position == 0 or end_index - begin_index <= 1:
return
bit_position -= 1
i = begin_index
j = end_index - 1
while i <= j:
changed = False
if not ((list_of_ints[i] >> bit_position) & 1):
# found zero at the beginning
i += 1
changed = True
if (list_of_ints[j] >> bit_position) & 1:
# found one at the end
j -= 1
changed = True
if changed:
continue
list_of_ints[i], list_of_ints[j] = list_of_ints[j], list_of_ints[i]
j -= 1
if not j == i:
i += 1
_msd_radix_sort_inplace(list_of_ints, bit_position, begin_index, i)
_msd_radix_sort_inplace(list_of_ints, bit_position, i, end_index)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"doctest.testmod"
] |
[((4684, 4701), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (4699, 4701), False, 'import doctest\n')]
|
#!/usr/bin/python
# this script will update the versions in plist and installer files to match that in resource.h
import plistlib, os, datetime, fileinput, glob, sys, string
scriptpath = os.path.dirname(os.path.realpath(__file__))
def replacestrs(filename, s, r):
files = glob.glob(filename)
for line in fileinput.input(files,inplace=1):
line = line.replace(s, r)
sys.stdout.write(line)
def update_plist(plistpath, CFBundleGetInfoString, CFBundleVersion):
plist = plistlib.readPlist(plistpath)
plist['CFBundleGetInfoString'] = CFBundleGetInfoString
plist['CFBundleVersion'] = CFBundleVersion
plist['CFBundleShortVersionString'] = CFBundleVersion
plistlib.writePlist(plist, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
def main():
FullVersionStr = ""
for line in fileinput.input(scriptpath + "/JuceLibraryCode/AppConfig.h",inplace=0):
if "#define JucePlugin_Version " in line:
FullVersionStr = line.lstrip("#define JucePlugin_Version ").strip()
today = datetime.date.today()
CFBundleGetInfoString = FullVersionStr + ", Copyright MatthieuBrucher, " + str(today.year)
CFBundleVersion = FullVersionStr
print("update_version.py - setting version to " + FullVersionStr)
print("Updating plist version info...")
import glob
for plistpath in glob.glob(scriptpath + "/Builds/MacOSX/*.plist"):
update_plist(plistpath, CFBundleGetInfoString, CFBundleVersion)
print("Updating Mac Installer version info...")
plistpath = scriptpath + "/installer/ATKAutoSwell.pkgproj"
installer = plistlib.readPlist(plistpath)
for x in installer['PACKAGES']:
x['PACKAGE_SETTINGS']['VERSION'] = FullVersionStr
plistlib.writePlist(installer, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
print("Updating Windows Installer version info...")
for line in fileinput.input(scriptpath + "/installer/ATKAutoSwell.iss",inplace=1):
if "AppVersion" in line:
line="AppVersion=" + FullVersionStr + "\n"
sys.stdout.write(line)
if __name__ == '__main__':
main()
|
[
"sys.stdout.write",
"fileinput.input",
"os.path.realpath",
"plistlib.readPlist",
"datetime.date.today",
"glob.glob",
"plistlib.writePlist"
] |
[((205, 231), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (221, 231), False, 'import plistlib, os, datetime, fileinput, glob, sys, string\n'), ((277, 296), 'glob.glob', 'glob.glob', (['filename'], {}), '(filename)\n', (286, 296), False, 'import glob\n'), ((314, 347), 'fileinput.input', 'fileinput.input', (['files'], {'inplace': '(1)'}), '(files, inplace=1)\n', (329, 347), False, 'import plistlib, os, datetime, fileinput, glob, sys, string\n'), ((485, 514), 'plistlib.readPlist', 'plistlib.readPlist', (['plistpath'], {}), '(plistpath)\n', (503, 514), False, 'import plistlib, os, datetime, fileinput, glob, sys, string\n'), ((675, 712), 'plistlib.writePlist', 'plistlib.writePlist', (['plist', 'plistpath'], {}), '(plist, plistpath)\n', (694, 712), False, 'import plistlib, os, datetime, fileinput, glob, sys, string\n'), ((824, 895), 'fileinput.input', 'fileinput.input', (["(scriptpath + '/JuceLibraryCode/AppConfig.h')"], {'inplace': '(0)'}), "(scriptpath + '/JuceLibraryCode/AppConfig.h', inplace=0)\n", (839, 895), False, 'import plistlib, os, datetime, fileinput, glob, sys, string\n'), ((1031, 1052), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1050, 1052), False, 'import plistlib, os, datetime, fileinput, glob, sys, string\n'), ((1330, 1378), 'glob.glob', 'glob.glob', (["(scriptpath + '/Builds/MacOSX/*.plist')"], {}), "(scriptpath + '/Builds/MacOSX/*.plist')\n", (1339, 1378), False, 'import glob\n'), ((1577, 1606), 'plistlib.readPlist', 'plistlib.readPlist', (['plistpath'], {}), '(plistpath)\n', (1595, 1606), False, 'import plistlib, os, datetime, fileinput, glob, sys, string\n'), ((1703, 1744), 'plistlib.writePlist', 'plistlib.writePlist', (['installer', 'plistpath'], {}), '(installer, plistpath)\n', (1722, 1744), False, 'import plistlib, os, datetime, fileinput, glob, sys, string\n'), ((1880, 1950), 'fileinput.input', 'fileinput.input', (["(scriptpath + '/installer/ATKAutoSwell.iss')"], {'inplace': '(1)'}), "(scriptpath + '/installer/ATKAutoSwell.iss', inplace=1)\n", (1895, 1950), False, 'import plistlib, os, datetime, fileinput, glob, sys, string\n'), ((382, 404), 'sys.stdout.write', 'sys.stdout.write', (['line'], {}), '(line)\n', (398, 404), False, 'import plistlib, os, datetime, fileinput, glob, sys, string\n'), ((2033, 2055), 'sys.stdout.write', 'sys.stdout.write', (['line'], {}), '(line)\n', (2049, 2055), False, 'import plistlib, os, datetime, fileinput, glob, sys, string\n')]
|
from django.contrib import admin
from django.contrib.flatpages.admin import FlatPageAdmin
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.utils.translation import ugettext as _
from pagedown.widgets import AdminPagedownWidget
from explorer.models import *
from status.models import *
from stickers.models import *
from highscore.models import *
class MissionAdmin(admin.ModelAdmin):
list_display = ('name','number')
class ChallengeAdmin(admin.ModelAdmin):
list_display = ('name','number','mission')
# Define a new FlatPageAdmin
class FlatPageAdmin(FlatPageAdmin):
fieldsets = (
(None, {'fields': ('url', 'title', 'content', 'sites')}),
(_('Advanced options'), {
'classes': ('collapse', ),
'fields': (
'enable_comments',
'registration_required',
'template_name',
),
}),
)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == "sites":
kwargs["initial"] = [Site.objects.get_current()]
return super(FlatPageAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class FactAdmin(admin.ModelAdmin):
list_display = ('tagline','category','published')
class BodyAdmin(admin.ModelAdmin):
list_display = ('name','icon','active')
admin.site.register(Mission, MissionAdmin)
admin.site.register(Challenge, ChallengeAdmin)
admin.site.register(Sticker)
admin.site.register(PersonSticker)
admin.site.register(Body, BodyAdmin)
admin.site.register(Score)
admin.site.register(LevelScore)
admin.site.register(Fact, FactAdmin)
admin.site.register(Season)
# Re-register FlatPageAdmin
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, FlatPageAdmin)
|
[
"django.contrib.sites.models.Site.objects.get_current",
"django.utils.translation.ugettext",
"django.contrib.admin.site.register",
"django.contrib.admin.site.unregister"
] |
[((1402, 1444), 'django.contrib.admin.site.register', 'admin.site.register', (['Mission', 'MissionAdmin'], {}), '(Mission, MissionAdmin)\n', (1421, 1444), False, 'from django.contrib import admin\n'), ((1445, 1491), 'django.contrib.admin.site.register', 'admin.site.register', (['Challenge', 'ChallengeAdmin'], {}), '(Challenge, ChallengeAdmin)\n', (1464, 1491), False, 'from django.contrib import admin\n'), ((1492, 1520), 'django.contrib.admin.site.register', 'admin.site.register', (['Sticker'], {}), '(Sticker)\n', (1511, 1520), False, 'from django.contrib import admin\n'), ((1521, 1555), 'django.contrib.admin.site.register', 'admin.site.register', (['PersonSticker'], {}), '(PersonSticker)\n', (1540, 1555), False, 'from django.contrib import admin\n'), ((1556, 1592), 'django.contrib.admin.site.register', 'admin.site.register', (['Body', 'BodyAdmin'], {}), '(Body, BodyAdmin)\n', (1575, 1592), False, 'from django.contrib import admin\n'), ((1593, 1619), 'django.contrib.admin.site.register', 'admin.site.register', (['Score'], {}), '(Score)\n', (1612, 1619), False, 'from django.contrib import admin\n'), ((1620, 1651), 'django.contrib.admin.site.register', 'admin.site.register', (['LevelScore'], {}), '(LevelScore)\n', (1639, 1651), False, 'from django.contrib import admin\n'), ((1652, 1688), 'django.contrib.admin.site.register', 'admin.site.register', (['Fact', 'FactAdmin'], {}), '(Fact, FactAdmin)\n', (1671, 1688), False, 'from django.contrib import admin\n'), ((1689, 1716), 'django.contrib.admin.site.register', 'admin.site.register', (['Season'], {}), '(Season)\n', (1708, 1716), False, 'from django.contrib import admin\n'), ((1746, 1777), 'django.contrib.admin.site.unregister', 'admin.site.unregister', (['FlatPage'], {}), '(FlatPage)\n', (1767, 1777), False, 'from django.contrib import admin\n'), ((1778, 1822), 'django.contrib.admin.site.register', 'admin.site.register', (['FlatPage', 'FlatPageAdmin'], {}), '(FlatPage, FlatPageAdmin)\n', (1797, 1822), False, 'from django.contrib import admin\n'), ((732, 753), 'django.utils.translation.ugettext', '_', (['"""Advanced options"""'], {}), "('Advanced options')\n", (733, 753), True, 'from django.utils.translation import ugettext as _\n'), ((1107, 1133), 'django.contrib.sites.models.Site.objects.get_current', 'Site.objects.get_current', ([], {}), '()\n', (1131, 1133), False, 'from django.contrib.sites.models import Site\n')]
|
import sys
from taurus.external.qt import Qt
from taurus.qt.qtgui.container import TaurusWidget
from taurus.qt.qtgui.display import TaurusLabel
from taurus.qt.qtgui.input import TaurusWheelEdit
from taurus.qt.qtgui.application import TaurusApplication
app = TaurusApplication(sys.argv)
panel = TaurusWidget()
layout = Qt.QHBoxLayout()
panel.setLayout(layout)
w1 = TaurusLabel()
w2 = TaurusLabel()
w3 = TaurusWheelEdit()
w4 = TaurusLabel()
layout.addWidget(w1)
layout.addWidget(w2)
layout.addWidget(w3)
layout.addWidget(w4)
w1.setUseParentModel(True)
w2.setUseParentModel(True)
w3.setUseParentModel(True)
w4.setUseParentModel(True)
panel.setModel('sys/taurustest/1')
w1.setModel('/position#label')
w2.setModel('/position')
w3.setModel('/position')
w4.setModel('/position#unit')
panel.show()
sys.exit(app.exec_())
|
[
"taurus.qt.qtgui.input.TaurusWheelEdit",
"taurus.qt.qtgui.container.TaurusWidget",
"taurus.qt.qtgui.display.TaurusLabel",
"taurus.qt.qtgui.application.TaurusApplication",
"taurus.external.qt.Qt.QHBoxLayout"
] |
[((259, 286), 'taurus.qt.qtgui.application.TaurusApplication', 'TaurusApplication', (['sys.argv'], {}), '(sys.argv)\n', (276, 286), False, 'from taurus.qt.qtgui.application import TaurusApplication\n'), ((296, 310), 'taurus.qt.qtgui.container.TaurusWidget', 'TaurusWidget', ([], {}), '()\n', (308, 310), False, 'from taurus.qt.qtgui.container import TaurusWidget\n'), ((320, 336), 'taurus.external.qt.Qt.QHBoxLayout', 'Qt.QHBoxLayout', ([], {}), '()\n', (334, 336), False, 'from taurus.external.qt import Qt\n'), ((367, 380), 'taurus.qt.qtgui.display.TaurusLabel', 'TaurusLabel', ([], {}), '()\n', (378, 380), False, 'from taurus.qt.qtgui.display import TaurusLabel\n'), ((386, 399), 'taurus.qt.qtgui.display.TaurusLabel', 'TaurusLabel', ([], {}), '()\n', (397, 399), False, 'from taurus.qt.qtgui.display import TaurusLabel\n'), ((405, 422), 'taurus.qt.qtgui.input.TaurusWheelEdit', 'TaurusWheelEdit', ([], {}), '()\n', (420, 422), False, 'from taurus.qt.qtgui.input import TaurusWheelEdit\n'), ((428, 441), 'taurus.qt.qtgui.display.TaurusLabel', 'TaurusLabel', ([], {}), '()\n', (439, 441), False, 'from taurus.qt.qtgui.display import TaurusLabel\n')]
|
# Autogenerated by onnx-model-maker. Don't modify it manually.
import onnx
import onnx.helper
import onnx.numpy_helper
from onnx_model_maker import omm
from onnx_model_maker import onnx_mm_export
from onnx_model_maker.ops.op_helper import _add_input
@onnx_mm_export("v3.GRU")
def GRU(X, W, R, B=None, sequence_lens=None, initial_h=None, **kwargs):
_inputs = []
for i in (X, W, R, B, sequence_lens, initial_h):
_add_input(i, _inputs)
idx = omm.op_counter["GRU"]
omm.op_counter["GRU"] += 1
node = onnx.helper.make_node("GRU",
_inputs, [f'_t_GRU_{idx}_Y', f'_t_GRU_{idx}_Y_h'],
name=f"GRU_{idx}",
**kwargs)
onnx.checker.check_node(node, omm.ctx)
omm.model.graph.node.append(node)
return node
|
[
"onnx_model_maker.omm.model.graph.node.append",
"onnx.helper.make_node",
"onnx.checker.check_node",
"onnx_model_maker.ops.op_helper._add_input",
"onnx_model_maker.onnx_mm_export"
] |
[((254, 278), 'onnx_model_maker.onnx_mm_export', 'onnx_mm_export', (['"""v3.GRU"""'], {}), "('v3.GRU')\n", (268, 278), False, 'from onnx_model_maker import onnx_mm_export\n'), ((513, 625), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""GRU"""', '_inputs', "[f'_t_GRU_{idx}_Y', f'_t_GRU_{idx}_Y_h']"], {'name': 'f"""GRU_{idx}"""'}), "('GRU', _inputs, [f'_t_GRU_{idx}_Y',\n f'_t_GRU_{idx}_Y_h'], name=f'GRU_{idx}', **kwargs)\n", (534, 625), False, 'import onnx\n'), ((717, 755), 'onnx.checker.check_node', 'onnx.checker.check_node', (['node', 'omm.ctx'], {}), '(node, omm.ctx)\n', (740, 755), False, 'import onnx\n'), ((758, 791), 'onnx_model_maker.omm.model.graph.node.append', 'omm.model.graph.node.append', (['node'], {}), '(node)\n', (785, 791), False, 'from onnx_model_maker import omm\n'), ((421, 443), 'onnx_model_maker.ops.op_helper._add_input', '_add_input', (['i', '_inputs'], {}), '(i, _inputs)\n', (431, 443), False, 'from onnx_model_maker.ops.op_helper import _add_input\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import builtins
import copy
import inspect
import itertools
import operator
from collections.abc import Iterable
from functools import reduce
from math import ceil, log
import numpy as np
from ...config import options
from ...core.operand import OperandStage
from ...serialize import KeyField, AnyField, BoolField, Int32Field
from ..core import Tensor, TensorOrder
from ..array_utils import get_array_module, as_same_device, device, cp
from ..utils import check_out_param, validate_axis
from ..operands import TensorHasInput, TensorOperandMixin
from ..datasource import tensor as astensor
def numel(x, **kwargs):
xp = get_array_module(x)
return xp.sum(xp.ones_like(x), **kwargs)
def nannumel(x, **kwargs):
x_size = reduce(operator.mul, x.shape)
xp = get_array_module(x)
return x_size - xp.sum(xp.isnan(x), **kwargs)
class TensorReductionMixin(TensorOperandMixin):
__slots__ = ()
@classmethod
def _is_cum(cls):
return False
@classmethod
def _calc_order(cls, a, out):
return out.order if out is not None else a.order
@classmethod
def _is_sparse(cls, input_sparse, shape):
return False
def _call(self, a, out):
a = astensor(a)
if out is not None and not isinstance(out, Tensor):
raise TypeError(f'out should be Tensor object, got {type(out)} instead')
axis = getattr(self, 'axis', None)
keepdims = getattr(self, 'keepdims', None)
order = self._calc_order(a, out)
if self._is_cum():
if axis is None:
a, axis = a.ravel(), 0
setattr(self, '_axis', axis)
shape = a.shape
else:
axis = list(range(len(a.shape))) if axis is None else axis
if not isinstance(axis, Iterable):
axis = (validate_axis(a.ndim, axis),)
axis = set(axis)
shape = tuple(s if i not in axis else 1 for i, s in enumerate(a.shape)
if keepdims or i not in axis)
self.sparse = self._is_sparse(a.issparse(), shape)
t = self.new_tensor([a], shape, order=order)
if out is None:
return t
check_out_param(out, t, 'same_kind')
out_shape, out_dtype = out.shape, out.dtype
# if `out` is specified, use out's dtype and shape
if out_shape != t.shape:
if out.ndim > t.ndim:
raise ValueError('output has too many dimensions')
raise ValueError(f'output shape should be {t.shape}, got {out_shape}')
setattr(self, 'dtype', out_dtype)
out.data = t.data
return out
def _new_chunks(self, inputs, kws=None, **kw):
chunks = super()._new_chunks(inputs, kws=kws, **kw)
setattr(self, '_input', getattr(self, '_inputs')[0])
return chunks
def _new_tileables(self, inputs, kws=None, **kw):
tensors = super()._new_tileables(inputs, kws=kws, **kw)
setattr(self, '_input', getattr(self, '_inputs')[0])
return tensors
def __call__(self, a, out=None):
return self._call(a, out=out)
@staticmethod
def _reduced_shape(shape, axes):
return tuple(1 if i in axes else s for i, s in enumerate(shape))
@staticmethod
def _reduced_nsplits(nsplits, axes):
return tuple((1,) * len(c) if i in axes else c
for i, c in enumerate(nsplits))
@staticmethod
def _concatenate_shape(tensor, combine_block):
return tuple(builtins.sum(nsplit[i] for i in cb)
for nsplit, cb in zip(tensor.nsplits, combine_block))
@staticmethod
def _combine_split(ax, combine_size, chunk_shape):
if ax not in combine_size:
return tuple((i,) for i in range(chunk_shape[ax]))
else:
size = combine_size[ax]
shape = chunk_shape[ax]
index = tuple(range(shape))
return tuple(index[i:i + size] for i in range(0, shape, size))
def _get_op_kw(self):
return None
@classmethod
def get_axis(cls, axis):
return tuple(axis) if axis is not None else axis
@classmethod
def get_arg_axis(cls, axis, ndim):
return None if len(axis) == ndim or ndim == 1 else axis[0]
@classmethod
def _tree_reduction(cls, tensor, axis):
op = tensor.op
kw = getattr(op, '_get_op_kw')() or {}
keepdims = op.keepdims
combine_size = op.combine_size or options.combine_size
if isinstance(combine_size, dict):
combine_size = dict((ax, combine_size.get(ax)) for ax in axis)
else:
assert isinstance(combine_size, int)
n = builtins.max(int(combine_size ** (1.0 / (len(axis) or 1))), 2)
combine_size = dict((ax, n) for ax in axis)
times = 1
for i, n in enumerate(tensor.chunk_shape):
if i in combine_size and combine_size[i] != 1:
times = int(builtins.max(times, ceil(log(n, combine_size[i]))))
for i in range(times - 1):
[tensor] = cls._partial_reduction(tensor, axis, op.dtype, True, combine_size, OperandStage.combine)
return cls._partial_reduction(tensor, axis, op.dtype, keepdims, combine_size, OperandStage.agg, kw)
@classmethod
def _partial_reduction(cls, tensor, axis, dtype, keepdims, combine_size, stage, kw=None):
from ..merge.concatenate import TensorConcatenate
kw = kw or {}
axes = sorted(combine_size.keys())
op_type = type(tensor.op)
combine_blocks = [cls._combine_split(i, combine_size, tensor.chunk_shape)
for i in range(tensor.ndim)]
combine_blocks_idxes = [range(len(blocks)) for blocks in combine_blocks]
chunks = []
for combine_block_idx, combine_block in zip(itertools.product(*combine_blocks_idxes),
itertools.product(*combine_blocks)):
chks = [tensor.cix[idx] for idx in itertools.product(*combine_block)]
if len(chks) > 1:
op = TensorConcatenate(axis=axes, dtype=chks[0].dtype)
chk = op.new_chunk(chks, shape=cls._concatenate_shape(tensor, combine_block),
order=tensor.order)
else:
chk = chks[0]
shape = tuple(s if i not in combine_size else 1
for i, s in enumerate(chk.shape) if keepdims or i not in combine_size)
agg_op = op_type(stage=stage, axis=axis, dtype=dtype, keepdims=keepdims, **kw)
chunk = agg_op.new_chunk([chk], shape=shape,
index=tuple(idx for i, idx in enumerate(combine_block_idx)
if keepdims or i not in combine_size),
order=tensor.order)
chunks.append(chunk)
nsplits = [
tuple(c.shape[i] for c in chunks if builtins.all(idx == 0 for j, idx in enumerate(c.index) if j != i))
for i in range(len(chunks[0].shape))]
shape = tuple(builtins.sum(nsplit) for nsplit in nsplits)
agg_op = op_type(stage=stage, axis=axis, dtype=dtype, keepdims=keepdims, combine_size=combine_size, **kw)
return agg_op.new_tensors([tensor], shape, order=tensor.order,
chunks=chunks, nsplits=nsplits)
@classmethod
def tile(cls, op):
in_tensor = op.inputs[0]
out_tensor = op.outputs[0]
axis = tuple(range(in_tensor.ndim)) if op.axis is None else op.axis
if isinstance(axis, int):
axis = (axis,)
axis = tuple(validate_axis(in_tensor.ndim, ax) for ax in axis)
if len(in_tensor.chunks) == 1:
c = in_tensor.chunks[0]
new_op = op.copy().reset_key()
setattr(new_op, '_axis', axis)
shape = list(cls._reduced_shape(c.shape, axis))
nsplits = list(cls._reduced_nsplits(in_tensor.nsplits, axis))
chunk_index = list(c.index)
if not op.keepdims and axis:
for ax in axis:
shape[ax] = None
nsplits[ax] = None
chunk_index[ax] = None
shape = tuple(s for s in shape if s is not None)
nsplits = tuple(ns for ns in nsplits if ns is not None)
chunk_index = tuple(i for i in chunk_index if i is not None)
chunks = new_op.new_chunks([c], shape=shape, index=chunk_index, order=out_tensor.order)
return op.copy().new_tensors(op.inputs, op.outputs[0].shape, order=out_tensor.order,
chunks=chunks, nsplits=nsplits)
chunks = []
kw = getattr(op, '_get_op_kw')() or {}
for c in in_tensor.chunks:
chunk_op = type(op)(stage=OperandStage.map, axis=axis, dtype=op.dtype, keepdims=True,
combine_size=op.combine_size, **kw)
chunks.append(chunk_op.new_chunk([c], shape=cls._reduced_shape(c.shape, axis),
order=out_tensor.order, index=c.index))
new_op = op.copy()
tensor = new_op.new_tensor(op.inputs, cls._reduced_shape(in_tensor.shape, axis),
order=out_tensor.order,
nsplits=cls._reduced_nsplits(in_tensor.nsplits, axis), chunks=chunks)
return cls._tree_reduction(tensor, axis)
@classmethod
def execute_agg(cls, ctx, op):
(input_chunk,), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
axis = cls.get_axis(op.axis)
func_name = getattr(cls, '_func_name', None)
reduce_func = getattr(xp, func_name)
out = op.outputs[0]
with device(device_id):
if input_chunk.size == 0 and op.keepdims:
# input chunk is empty, when keepdims is True, return itself
ret = input_chunk
elif "dtype" in inspect.getfullargspec(reduce_func).args:
ret = reduce_func(input_chunk, axis=axis,
dtype=op.dtype,
keepdims=bool(op.keepdims))
else:
ret = reduce_func(input_chunk, axis=axis,
keepdims=bool(op.keepdims))
if hasattr(ret, 'astype'):
# for non-object dtype
ret = ret.astype(op.dtype, order=out.order.value, copy=False)
ctx[out.key] = ret
@classmethod
def execute_one_chunk(cls, ctx, op):
cls.execute_agg(ctx, op)
@classmethod
def execute(cls, ctx, op):
if op.stage == OperandStage.map:
return cls.execute_map(ctx, op)
elif op.stage == OperandStage.combine:
return cls.execute_combine(ctx, op)
elif op.stage == OperandStage.agg:
return cls.execute_agg(ctx, op)
else:
return cls.execute_one_chunk(ctx, op)
class TensorArgReductionMixin(TensorReductionMixin):
__slots__ = ()
@staticmethod
def _get_arg_axis(axis, ndim):
if axis is None:
axis = tuple(range(ndim))
ravel = True
elif isinstance(axis, int):
axis = validate_axis(ndim, axis)
axis = (axis,)
ravel = ndim == 1
else:
raise TypeError("axis must be either `None` or int, "
f"got '{axis}'")
return axis, ravel
@staticmethod
def _get_offset(tensor, axis, chunk, ravel):
nsplits = tensor.nsplits
offset = tuple(builtins.sum(split[:idx]) for split, idx in zip(nsplits, chunk.index))
if not ravel:
offset = offset[axis[0]]
return offset
@classmethod
def _calc_order(cls, a, out):
return out.order if out is not None else TensorOrder.C_ORDER
@classmethod
def tile(cls, op):
in_tensor = op.inputs[0]
out_tensor = op.outputs[0]
axis, ravel = cls._get_arg_axis(op.axis, in_tensor.ndim)
chunks = []
for c in in_tensor.chunks:
offset = cls._get_offset(in_tensor, axis, c, ravel)
chunk_op = type(op)(stage=OperandStage.map, axis=axis, dtype=op.dtype,
offset=offset, total_shape=in_tensor.shape,
combine_size=op.combine_size)
chunk = chunk_op.new_chunk([c], shape=cls._reduced_shape(c.shape, axis),
index=c.index, order=out_tensor.order)
chunks.append(chunk)
new_op = op.copy()
tensor = new_op.new_tensor(op.inputs, cls._reduced_shape(in_tensor.shape, axis),
order=out_tensor.order,
nsplits=cls._reduced_nsplits(in_tensor.nsplits, axis), chunks=chunks)
return cls._tree_reduction(tensor, axis)
@classmethod
def execute_agg(cls, ctx, op):
axis = cls.get_arg_axis(op.axis, op.inputs[0].ndim)
(vals, arg), device_id, xp = as_same_device(
ctx[op.inputs[0].key], device=op.device, ret_extra=True)
func_name = getattr(cls, '_func_name')
arg_func = getattr(xp, func_name)
with device(device_id):
if xp.any(xp.isnan(vals)) and 'nan' in func_name:
raise ValueError("All NaN slice encountered")
if axis is None:
local_args = arg_func(vals, axis=axis)
arg = arg.ravel()[local_args]
else:
local_args = arg_func(vals, axis=axis)
inds = np.ogrid[tuple(map(slice, local_args.shape))]
if xp != np:
inds = [xp.asarray(it) for it in inds]
inds.insert(axis, local_args)
arg = arg[tuple(inds)]
ctx[op.outputs[0].key] = arg
@classmethod
def execute_map(cls, ctx, op):
arg_axis = cls.get_arg_axis(op.axis, op.inputs[0].ndim)
(in_chunk,), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
func_name = getattr(cls, '_func_name')
agg_func_name = getattr(cls, '_agg_func_name')
arg_func = getattr(xp, func_name)
agg_func_name = getattr(xp, agg_func_name)
offset = op.offset
chunk = op.outputs[0]
with device(device_id):
vals = agg_func_name(in_chunk, axis=arg_axis)
if hasattr(vals, 'reshape'):
vals = vals.reshape(chunk.shape)
try:
arg = arg_func(in_chunk, axis=arg_axis)
if hasattr(arg, 'reshape'):
arg = arg.reshape(chunk.shape)
except ValueError:
# handle all NaN
arg = arg_func(xp.where(xp.isnan(in_chunk), np.inf, in_chunk),
axis=arg_axis).reshape(chunk.shape)
if arg_axis is None:
if xp == cp:
# we need to copy to do cpu computation, then copy back to gpu
# cuz unravel_index and ravel_multi_index are not implemented in cupy
in_chunk = in_chunk.get()
total_shape = op.total_shape
ind = np.unravel_index(arg.ravel()[0], in_chunk.shape)
total_ind = tuple(o + i for (o, i) in zip(offset, ind))
res = np.ravel_multi_index(total_ind, total_shape)
if xp == cp:
# copy back
with xp.cuda.Device(in_chunk.device.id):
arg[:] = xp.asarray(res)
else:
arg[:] = res
else:
arg += offset
ctx[op.outputs[0].key] = (vals, arg)
@classmethod
def execute_combine(cls, ctx, op):
axis = cls.get_arg_axis(op.axis, op.inputs[0].ndim)
(vals, arg), device_id, xp = as_same_device(
ctx[op.inputs[0].key], device=op.device, ret_extra=True)
func_name = getattr(cls, '_func_name')
arg_func = getattr(xp, func_name)
with device(device_id):
if axis is None:
local_args = arg_func(vals, axis=axis).reshape(op.outputs[0].shape)
vals = vals.ravel()[local_args]
arg = arg.ravel()[local_args]
else:
local_args = arg_func(vals, axis=axis)
inds = np.ogrid[tuple(map(slice, local_args.shape))]
if xp != np:
inds = [xp.asarray(it) for it in inds]
inds.insert(axis, local_args)
inds_tuple = tuple(inds)
vals = vals[inds_tuple].reshape(op.outputs[0].shape)
arg = arg[inds_tuple].reshape(op.outputs[0].shape)
ctx[op.outputs[0].key] = (vals, arg)
class TensorCumReductionMixin(TensorReductionMixin):
__slots__ = ()
@classmethod
def _is_cum(cls):
return True
@staticmethod
def _get_op_types():
raise NotImplementedError
@classmethod
def tile(cls, op):
from ..indexing.slice import TensorSlice
in_tensor = op.inputs[0]
out_tensor = op.outputs[0]
axis = op.axis
if not isinstance(axis, int):
raise ValueError("axis must be a integer")
axis = validate_axis(in_tensor.ndim, axis)
if axis is None:
raise NotImplementedError
op_type, bin_op_type = getattr(op, '_get_op_types')()
chunks = []
for c in in_tensor.chunks:
chunk_op = op_type(axis=op.axis, dtype=op.dtype)
chunks.append(chunk_op.new_chunk([c], shape=c.shape,
index=c.index, order=out_tensor.order))
inter_tensor = copy.copy(in_tensor)
inter_tensor._chunks = chunks
slc = [slice(None) if i != axis else slice(-1, None)
for i in range(in_tensor.ndim)]
output_chunks = []
for chunk in chunks:
if chunk.index[axis] == 0:
output_chunks.append(chunk)
continue
to_cum_chunks = []
for i in range(chunk.index[axis]):
to_cum_index = chunk.index[:axis] + (i,) + chunk.index[axis + 1:]
shape = chunk.shape[:axis] + (1,) + chunk.shape[axis + 1:]
to_cum_chunk = inter_tensor.cix[to_cum_index]
slice_op = TensorSlice(slices=slc, dtype=chunk.dtype)
sliced_chunk = slice_op.new_chunk([to_cum_chunk], shape=shape,
index=to_cum_index, order=out_tensor.order)
to_cum_chunks.append(sliced_chunk)
to_cum_chunks.append(chunk)
bin_op = bin_op_type(args=to_cum_chunks, dtype=chunk.dtype)
output_chunk = bin_op.new_chunk(to_cum_chunks, shape=chunk.shape,
index=chunk.index, order=out_tensor.order)
output_chunks.append(output_chunk)
new_op = op.copy()
return new_op.new_tensors(op.inputs, in_tensor.shape, order=out_tensor.order,
chunks=output_chunks, nsplits=in_tensor.nsplits)
@classmethod
def execute(cls, ctx, op):
(x,), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
func_name = getattr(cls, '_func_name')
cum_func = getattr(xp, func_name)
if xp != np:
func = getattr(xp, cum_func.__name__)
else:
func = cum_func
with device(device_id):
ctx[op.outputs[0].key] = func(x, axis=op.axis, dtype=op.dtype)
class TensorReduction(TensorHasInput):
_input = KeyField('input')
_out = KeyField('out')
_axis = AnyField('axis') # can be None or int or tuple of ints, just infer the data
_keepdims = BoolField('keepdims')
_combine_size = AnyField('combine_size')
@property
def axis(self):
return getattr(self, '_axis', None)
@property
def keepdims(self):
return getattr(self, '_keepdims', None)
@property
def combine_size(self):
return getattr(self, '_combine_size', None)
def _rewrite_stage(self, stage):
if stage == OperandStage.map and not hasattr(self, 'execute_map'):
return OperandStage.agg
elif stage == OperandStage.combine and not hasattr(self, 'execute_combine'):
return OperandStage.agg
return stage
class TensorCumReduction(TensorHasInput):
_input = KeyField('input')
_axis = Int32Field('axis')
@property
def axis(self):
return getattr(self, '_axis', None)
|
[
"inspect.getfullargspec",
"copy.copy",
"numpy.ravel_multi_index",
"itertools.product",
"functools.reduce",
"math.log",
"builtins.sum"
] |
[((1376, 1405), 'functools.reduce', 'reduce', (['operator.mul', 'x.shape'], {}), '(operator.mul, x.shape)\n', (1382, 1405), False, 'from functools import reduce\n'), ((18610, 18630), 'copy.copy', 'copy.copy', (['in_tensor'], {}), '(in_tensor)\n', (18619, 18630), False, 'import copy\n'), ((6480, 6520), 'itertools.product', 'itertools.product', (['*combine_blocks_idxes'], {}), '(*combine_blocks_idxes)\n', (6497, 6520), False, 'import itertools\n'), ((6574, 6608), 'itertools.product', 'itertools.product', (['*combine_blocks'], {}), '(*combine_blocks)\n', (6591, 6608), False, 'import itertools\n'), ((4152, 4187), 'builtins.sum', 'builtins.sum', (['(nsplit[i] for i in cb)'], {}), '(nsplit[i] for i in cb)\n', (4164, 4187), False, 'import builtins\n'), ((7778, 7798), 'builtins.sum', 'builtins.sum', (['nsplit'], {}), '(nsplit)\n', (7790, 7798), False, 'import builtins\n'), ((12382, 12407), 'builtins.sum', 'builtins.sum', (['split[:idx]'], {}), '(split[:idx])\n', (12394, 12407), False, 'import builtins\n'), ((16217, 16261), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['total_ind', 'total_shape'], {}), '(total_ind, total_shape)\n', (16237, 16261), True, 'import numpy as np\n'), ((6658, 6691), 'itertools.product', 'itertools.product', (['*combine_block'], {}), '(*combine_block)\n', (6675, 6691), False, 'import itertools\n'), ((10742, 10777), 'inspect.getfullargspec', 'inspect.getfullargspec', (['reduce_func'], {}), '(reduce_func)\n', (10764, 10777), False, 'import inspect\n'), ((5635, 5658), 'math.log', 'log', (['n', 'combine_size[i]'], {}), '(n, combine_size[i])\n', (5638, 5658), False, 'from math import ceil, log\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class GetBucketObjectResult:
"""
A collection of values returned by getBucketObject.
"""
def __init__(__self__, body=None, cache_control=None, content_disposition=None, content_encoding=None, content_language=None, content_length=None, content_type=None, etag=None, expiration=None, expires=None, last_modified=None, metadata=None, server_side_encryption=None, sse_kms_key_id=None, storage_class=None, tags=None, version_id=None, website_redirect_location=None, id=None):
if body and not isinstance(body, str):
raise TypeError('Expected argument body to be a str')
__self__.body = body
"""
Object data (see **limitations above** to understand cases in which this field is actually available)
"""
if cache_control and not isinstance(cache_control, str):
raise TypeError('Expected argument cache_control to be a str')
__self__.cache_control = cache_control
"""
Specifies caching behavior along the request/reply chain.
"""
if content_disposition and not isinstance(content_disposition, str):
raise TypeError('Expected argument content_disposition to be a str')
__self__.content_disposition = content_disposition
"""
Specifies presentational information for the object.
"""
if content_encoding and not isinstance(content_encoding, str):
raise TypeError('Expected argument content_encoding to be a str')
__self__.content_encoding = content_encoding
"""
Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.
"""
if content_language and not isinstance(content_language, str):
raise TypeError('Expected argument content_language to be a str')
__self__.content_language = content_language
"""
The language the content is in.
"""
if content_length and not isinstance(content_length, float):
raise TypeError('Expected argument content_length to be a float')
__self__.content_length = content_length
"""
Size of the body in bytes.
"""
if content_type and not isinstance(content_type, str):
raise TypeError('Expected argument content_type to be a str')
__self__.content_type = content_type
"""
A standard MIME type describing the format of the object data.
"""
if etag and not isinstance(etag, str):
raise TypeError('Expected argument etag to be a str')
__self__.etag = etag
"""
[ETag](https://en.wikipedia.org/wiki/HTTP_ETag) generated for the object (an MD5 sum of the object content in case it's not encrypted)
"""
if expiration and not isinstance(expiration, str):
raise TypeError('Expected argument expiration to be a str')
__self__.expiration = expiration
"""
If the object expiration is configured (see [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)), the field includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.
"""
if expires and not isinstance(expires, str):
raise TypeError('Expected argument expires to be a str')
__self__.expires = expires
"""
The date and time at which the object is no longer cacheable.
"""
if last_modified and not isinstance(last_modified, str):
raise TypeError('Expected argument last_modified to be a str')
__self__.last_modified = last_modified
"""
Last modified date of the object in RFC1123 format (e.g. `Mon, 02 Jan 2006 15:04:05 MST`)
"""
if metadata and not isinstance(metadata, dict):
raise TypeError('Expected argument metadata to be a dict')
__self__.metadata = metadata
"""
A map of metadata stored with the object in S3
"""
if server_side_encryption and not isinstance(server_side_encryption, str):
raise TypeError('Expected argument server_side_encryption to be a str')
__self__.server_side_encryption = server_side_encryption
"""
If the object is stored using server-side encryption (KMS or Amazon S3-managed encryption key), this field includes the chosen encryption and algorithm used.
"""
if sse_kms_key_id and not isinstance(sse_kms_key_id, str):
raise TypeError('Expected argument sse_kms_key_id to be a str')
__self__.sse_kms_key_id = sse_kms_key_id
"""
If present, specifies the ID of the Key Management Service (KMS) master encryption key that was used for the object.
"""
if storage_class and not isinstance(storage_class, str):
raise TypeError('Expected argument storage_class to be a str')
__self__.storage_class = storage_class
"""
[Storage class](http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) information of the object. Available for all objects except for `Standard` storage class objects.
"""
if tags and not isinstance(tags, dict):
raise TypeError('Expected argument tags to be a dict')
__self__.tags = tags
"""
A mapping of tags assigned to the object.
"""
if version_id and not isinstance(version_id, str):
raise TypeError('Expected argument version_id to be a str')
__self__.version_id = version_id
"""
The latest version ID of the object returned.
"""
if website_redirect_location and not isinstance(website_redirect_location, str):
raise TypeError('Expected argument website_redirect_location to be a str')
__self__.website_redirect_location = website_redirect_location
"""
If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.
"""
if id and not isinstance(id, str):
raise TypeError('Expected argument id to be a str')
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
async def get_bucket_object(bucket=None,key=None,range=None,tags=None,version_id=None,opts=None):
"""
The S3 object data source allows access to the metadata and
_optionally_ (see below) content of an object stored inside S3 bucket.
> **Note:** The content of an object (`body` field) is available only for objects which have a human-readable `Content-Type` (`text/*` and `application/json`). This is to prevent printing unsafe characters and potentially downloading large amount of data which would be thrown away in favour of metadata.
"""
__args__ = dict()
__args__['bucket'] = bucket
__args__['key'] = key
__args__['range'] = range
__args__['tags'] = tags
__args__['versionId'] = version_id
__ret__ = await pulumi.runtime.invoke('aws:s3/getBucketObject:getBucketObject', __args__, opts=opts)
return GetBucketObjectResult(
body=__ret__.get('body'),
cache_control=__ret__.get('cacheControl'),
content_disposition=__ret__.get('contentDisposition'),
content_encoding=__ret__.get('contentEncoding'),
content_language=__ret__.get('contentLanguage'),
content_length=__ret__.get('contentLength'),
content_type=__ret__.get('contentType'),
etag=__ret__.get('etag'),
expiration=__ret__.get('expiration'),
expires=__ret__.get('expires'),
last_modified=__ret__.get('lastModified'),
metadata=__ret__.get('metadata'),
server_side_encryption=__ret__.get('serverSideEncryption'),
sse_kms_key_id=__ret__.get('sseKmsKeyId'),
storage_class=__ret__.get('storageClass'),
tags=__ret__.get('tags'),
version_id=__ret__.get('versionId'),
website_redirect_location=__ret__.get('websiteRedirectLocation'),
id=__ret__.get('id'))
|
[
"pulumi.runtime.invoke"
] |
[((7594, 7682), 'pulumi.runtime.invoke', 'pulumi.runtime.invoke', (['"""aws:s3/getBucketObject:getBucketObject"""', '__args__'], {'opts': 'opts'}), "('aws:s3/getBucketObject:getBucketObject', __args__,\n opts=opts)\n", (7615, 7682), False, 'import pulumi\n')]
|
###
# Copyright (c) 2005, <NAME>
# Copyright (c) 2010-2021, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('RSS')
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified themself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('RSS', True)
class FeedNames(registry.SpaceSeparatedListOfStrings):
List = callbacks.CanonicalNameSet
class FeedItemSortOrder(registry.OnlySomeStrings):
"""Valid values include 'asInFeed', 'oldestFirst', 'newestFirst'."""
validStrings = ('asInFeed', 'oldestFirst', 'newestFirst', 'outdatedFirst',
'updatedFirst')
RSS = conf.registerPlugin('RSS')
conf.registerGlobalValue(RSS, 'feeds',
FeedNames([], _("""Determines what feeds should be accessible as
commands.""")))
########
# Format
conf.registerChannelValue(RSS, 'headlineSeparator',
registry.StringSurroundedBySpaces('|', _("""Determines what string is
used to separate headlines in new feeds.""")))
conf.registerChannelValue(RSS, 'format',
registry.String(_('$date: $title <$link>'), _("""The format the bot
will use for displaying headlines of a RSS feed that is triggered
manually. In addition to fields defined by feedparser ($published
(the entry date), $title, $link, $description, $id, etc.), the following
variables can be used: $feed_name, $date (parsed date, as defined in
supybot.reply.format.time)""")))
conf.registerChannelValue(RSS, 'announceFormat',
registry.String(_('News from $feed_name: $title <$link>'),
_("""The format the bot will use for displaying headlines of a RSS feed
that is announced. See supybot.plugins.RSS.format for the available
variables.""")))
###########
# Announces
conf.registerChannelValue(RSS, 'announce',
registry.SpaceSeparatedSetOfStrings([], _("""Determines which RSS feeds
should be announced in the channel; valid input is a list of strings
(either registered RSS feeds or RSS feed URLs) separated by spaces.""")))
conf.registerGlobalValue(RSS, 'waitPeriod',
registry.PositiveInteger(1800, _("""Indicates how many seconds the bot will
wait between retrieving RSS feeds; requests made within this period will
return cached results.""")))
conf.registerGlobalValue(RSS, 'sortFeedItems',
FeedItemSortOrder('asInFeed', _("""Determines whether feed items should be
sorted by their publication/update timestamp or kept in the same order as
they appear in a feed.""")))
conf.registerChannelValue(RSS, 'notice',
registry.Boolean(False, _("""Determines whether announces will be sent
as notices instead of privmsgs.""")))
conf.registerChannelValue(RSS, 'maximumAnnounceHeadlines',
registry.PositiveInteger(5, _("""Indicates how many new news entries may
be sent at the same time. Extra entries will be discarded.""")))
####################
# Headlines filtering
conf.registerChannelValue(RSS, 'defaultNumberOfHeadlines',
registry.PositiveInteger(1, _("""Indicates how many headlines an rss feed
will output by default, if no number is provided.""")))
conf.registerChannelValue(RSS, 'initialAnnounceHeadlines',
registry.Integer(5, _("""Indicates how many headlines an rss feed
will output when it is first added to announce for a channel.""")))
conf.registerChannelValue(RSS, 'keywordWhitelist',
registry.SpaceSeparatedSetOfStrings([], _("""Space separated list of
strings, lets you filter headlines to those containing one or more items
in this whitelist.""")))
conf.registerChannelValue(RSS, 'keywordBlacklist',
registry.SpaceSeparatedSetOfStrings([], _("""Space separated list of
strings, lets you filter headlines to those not containing any items
in this blacklist.""")))
def register_feed_config(name, url=''):
RSS.feeds().add(name)
conf.registerGlobalValue(RSS.feeds, name,
registry.String(url, _("""The URL for the feed %s. Note that because
announced lines are cached, you may need to reload this plugin after
changing this option.""" % name)))
feed_group = conf.registerGroup(RSS.feeds, name)
conf.registerChannelValue(feed_group, 'format',
registry.String('', _("""Feed-specific format. Defaults to
supybot.plugins.RSS.format if empty.""")))
conf.registerChannelValue(feed_group, 'announceFormat',
registry.String('', _("""Feed-specific announce format.
Defaults to supybot.plugins.RSS.announceFormat if empty.""")))
conf.registerGlobalValue(feed_group, 'waitPeriod',
registry.NonNegativeInteger(0, _("""If set to a non-zero
value, overrides supybot.plugins.RSS.waitPeriod for this
particular feed.""")))
for name in RSS.feeds():
register_feed_config(name)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
[
"supybot.i18n.PluginInternationalization",
"supybot.conf.registerGroup",
"supybot.conf.registerPlugin"
] |
[((1793, 1826), 'supybot.i18n.PluginInternationalization', 'PluginInternationalization', (['"""RSS"""'], {}), "('RSS')\n", (1819, 1826), False, 'from supybot.i18n import PluginInternationalization, internationalizeDocstring\n'), ((2554, 2580), 'supybot.conf.registerPlugin', 'conf.registerPlugin', (['"""RSS"""'], {}), "('RSS')\n", (2573, 2580), True, 'import supybot.conf as conf\n'), ((2187, 2219), 'supybot.conf.registerPlugin', 'conf.registerPlugin', (['"""RSS"""', '(True)'], {}), "('RSS', True)\n", (2206, 2219), True, 'import supybot.conf as conf\n'), ((5986, 6021), 'supybot.conf.registerGroup', 'conf.registerGroup', (['RSS.feeds', 'name'], {}), '(RSS.feeds, name)\n', (6004, 6021), True, 'import supybot.conf as conf\n')]
|
# Módulos servem para dividir grandes problemas em problemas menores
# Exemplo:
# inteiro ficaria dessa forma, mas se fosse um programa realmente grande, ou colossal, eu posso pegas as funções
import Aula22uteis
num = int(input("Digite um valor: "))
fat = Aula22uteis.fatorial(num)
print(f"O fatorial de {num} é {fat}")
print(f'O dobro de {num} é {Aula22uteis.dobro(num)}')
#para eu não precisar usar o Aula22uteis. antes de cada var , eu só preciso fazer
# obs.: Não é recomendado usar essa de baixo pois pode dar conflito no sistema
"""
from Aula22uteis import fatorial, dobro
num = int(input("Digite um valor: "))
fat = fatorial(num)
print(f"O fatorial de {num} é {fat}")
print(f'O dobro de {num} é {dobro(num)}')
"""
|
[
"Aula22uteis.dobro",
"Aula22uteis.fatorial"
] |
[((258, 283), 'Aula22uteis.fatorial', 'Aula22uteis.fatorial', (['num'], {}), '(num)\n', (278, 283), False, 'import Aula22uteis\n'), ((351, 373), 'Aula22uteis.dobro', 'Aula22uteis.dobro', (['num'], {}), '(num)\n', (368, 373), False, 'import Aula22uteis\n')]
|
import requests
from uniswap import Uniswap
PROVIDER = 'https://eth.elara.cybernode.ai/'
address = "0x0000000000000000000000000000000000000000"
private_key = None
uniswap_wrapper = Uniswap(address, private_key, version=2, provider=PROVIDER) # pass version=2 to use Uniswap v2
eth = "0x0000000000000000000000000000000000000000"
gol = "0xF4ecdBa8ba4144Ff3a2d8792Cad9051431Aa4F64"
ETH_GGOL_PRICE = uniswap_wrapper.get_eth_token_output_price(gol, 1) / (1*10**18)
GGOL_SUPPLY = 14_406_844_988_437 / 10**9
def get_data():
return requests.get('https://api.coingecko.com/api/v3/coins/ethereum').json()
def get_gol_market_data():
resp = get_data()
current_price = get_current_price(resp)['current_price']
market_cap = get_market_cap(current_price)['market_cap']
return {
"market_data": {
"current_price": current_price,
"market_cap": market_cap,
"market_cap_rank": None,
"price_change_percentage_24h": resp['market_data']["price_change_percentage_24h"],
"price_change_percentage_7d": resp['market_data']["price_change_percentage_7d"],
"price_change_percentage_30d": resp['market_data']["price_change_percentage_30d"]
}
}
def get_current_price(resp):
mult = ETH_GGOL_PRICE * 10**9
return {
"current_price": {
"usd": resp['market_data']["current_price"]['usd'] * mult,
"btc": resp['market_data']["current_price"]['btc'] * mult,
# "atom": resp['market_data']["current_price"]['atom'] * ETH_GGOL_PRICE,
"eth": mult
}
}
def get_market_cap(current_price):
return {
"market_cap": {
"usd": current_price["usd"] * GGOL_SUPPLY,
"btc": current_price["btc"] * GGOL_SUPPLY,
# "atom": current_price["atom"] * GGOL_SUPPLY,
"eth": current_price["eth"] * GGOL_SUPPLY
}
}
|
[
"uniswap.Uniswap",
"requests.get"
] |
[((186, 245), 'uniswap.Uniswap', 'Uniswap', (['address', 'private_key'], {'version': '(2)', 'provider': 'PROVIDER'}), '(address, private_key, version=2, provider=PROVIDER)\n', (193, 245), False, 'from uniswap import Uniswap\n'), ((535, 598), 'requests.get', 'requests.get', (['"""https://api.coingecko.com/api/v3/coins/ethereum"""'], {}), "('https://api.coingecko.com/api/v3/coins/ethereum')\n", (547, 598), False, 'import requests\n')]
|
# -*- coding: utf-8 -*-
import pickle
import torch
from torch.autograd import Variable
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
from dataset_gen import DatasetGenerator
file = 'train'
with open(file, mode='rb') as f:
pos, neg = pickle.load(f)
train = []
train.extend(pos[:50])
train.extend(neg)
test = []
test.extend(pos[50:])
N, D_in, H, D_out = 64, 200, 100, 2
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in))
y = Variable(torch.randn(N, D_out), requires_grad=False)
print(x)
print(y)
# Use the nn package to define our model and loss function.
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
torch.nn.NLLLoss(weight=[5, 1])
loss_fn = torch.nn.MSELoss(size_average=False)
# Use the optim package to define an Optimizer that will update the weights of
# the model for us. Here we will use Adam; the optim package contains many other
# optimization algoriths. The first argument to the Adam constructor tells the
# optimizer which Variables it should update.
datasets = DatasetGenerator()
# batch = datasets.next_batch(N)
learning_rate = 1e-2
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for t in range(500):
# Forward pass: compute predicted y by passing x to the model.
y_pred = model(x)
# Compute and print loss.
loss = loss_fn(y_pred, y)
print(t, loss.data[0])
# Before the backward pass, use the optimizer object to zero all of the
# gradients for the variables it will update (which are the learnable weights
# of the model)
optimizer.zero_grad()
# Backward pass: compute gradient of the loss with respect to model
# parameters
loss.backward()
# Calling the step function on an Optimizer makes an update to its
# parameters
optimizer.step()
|
[
"torch.nn.MSELoss",
"torch.nn.ReLU",
"torch.randn",
"dataset_gen.DatasetGenerator",
"torch.nn.NLLLoss",
"pickle.load",
"torch.nn.Linear"
] |
[((814, 845), 'torch.nn.NLLLoss', 'torch.nn.NLLLoss', ([], {'weight': '[5, 1]'}), '(weight=[5, 1])\n', (830, 845), False, 'import torch\n'), ((856, 892), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'size_average': '(False)'}), '(size_average=False)\n', (872, 892), False, 'import torch\n'), ((1191, 1209), 'dataset_gen.DatasetGenerator', 'DatasetGenerator', ([], {}), '()\n', (1207, 1209), False, 'from dataset_gen import DatasetGenerator\n'), ((290, 304), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (301, 304), False, 'import pickle\n'), ((542, 562), 'torch.randn', 'torch.randn', (['N', 'D_in'], {}), '(N, D_in)\n', (553, 562), False, 'import torch\n'), ((577, 598), 'torch.randn', 'torch.randn', (['N', 'D_out'], {}), '(N, D_out)\n', (588, 598), False, 'import torch\n'), ((734, 758), 'torch.nn.Linear', 'torch.nn.Linear', (['D_in', 'H'], {}), '(D_in, H)\n', (749, 758), False, 'import torch\n'), ((764, 779), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (777, 779), False, 'import torch\n'), ((785, 810), 'torch.nn.Linear', 'torch.nn.Linear', (['H', 'D_out'], {}), '(H, D_out)\n', (800, 810), False, 'import torch\n')]
|
"""
The REST API is using the Python package 'klein', which makes it possible to
create HTTP routes and handlers with Twisted in a similar style to Flask:
https://github.com/twisted/klein
"""
import json
from klein import Klein
from logzero import logger
from neo.Network.NodeLeader import NodeLeader
from neo.Implementations.Notifications.LevelDB.NotificationDB import NotificationDB
from neo.Core.Blockchain import Blockchain
from neocore.UInt160 import UInt160
from neocore.UInt256 import UInt256
from neo.Settings import settings
from neo.api.utils import cors_header
API_URL_PREFIX = "/v1"
class RestApi:
app = Klein()
notif = None
def __init__(self):
self.notif = NotificationDB.instance()
#
# REST API Routes
#
@app.route('/')
def home(self, request):
endpoints_html = """<ul>
<li><pre>{apiPrefix}/notifications/block/<height></pre> <em>notifications by block</em></li>
<li><pre>{apiPrefix}/notifications/addr/<addr></pre><em>notifications by address</em></li>
<li><pre>{apiPrefix}/notifications/tx/<hash></pre><em>notifications by tx</em></li>
<li><pre>{apiPrefix}/notifications/contract/<hash></pre><em>notifications by contract</em></li>
<li><pre>{apiPrefix}/tokens</pre><em>lists all NEP5 Tokens</em></li>
<li><pre>{apiPrefix}/token/<contract_hash></pre><em>list an NEP5 Token</em></li>
<li><pre>{apiPrefix}/status</pre> <em>current block height and version</em></li>
</ul>
""".format(apiPrefix=API_URL_PREFIX)
return """<html>
<style>body {padding:20px;max-width:800px;pre { background-color:#eee; }</style>
<body>
<p>
<h2>REST API for NEO %s</h2>
(see also <a href="https://github.com/CityOfZion/neo-python">neo-python</a>, <a href="https://github.com/CityOfZion/neo-python/blob/development/api-server.py">api-server.py</a>)
</p>
<hr/>
<h2>endpoints:</h2>
<p>%s</p>
<div>
<hr/>
<h3>pagination</h3>
<p>results are offered in page size of 500</p>
<p>you may request a different page by specifying the <code>page</code> query string param, for example:</p>
<pre>/block/123456?page=3</pre>
<p>page index starts at 0, so the 2nd page would be <code>?page=1</code></p>
<hr/>
<h3>sample output</h3>
<pre>
{
"page_len": 1000,
"total": 4,
"page": 0,
"current_height": 982506,
"results": [
{
"type": "SmartContract.Runtime.Notify",
"contract": "400cbed5b41014788d939eaf6286e336e7140f8c",
"tx": "d0805fd7ec19a4a414374ae3720447d2576659053eb7588b85a0f9f1fd629791",
"block": 928119,
"addr_from": "AU<KEY>",
"amount": 1,
"addr_to": "AL<KEY>",
"notify_type": "transfer"
},
{
"type": "SmartContract.Runtime.Notify",
"contract": "<KEY>",
"tx": "667df082eaa16ce2b07e48e214eb019b3e9450e76daea4f5b0450578a07836ef",
"block": 936352,
"addr_from": "<KEY>",
"amount": 1,
"addr_to": "<KEY>",
"notify_type": "transfer"
},
{
"type": "SmartContract.Runtime.Notify",
"contract": "<KEY>",
"tx": "eda792e7814e128eecda992f78a11577ee0604827de4aa91ffcda4616c889191",
"block": 939449,
"addr_from": "<KEY>",
"amount": 1,
"addr_to": "<KEY>",
"notify_type": "transfer"
},
{
"type": "SmartContract.Runtime.Notify",
"contract": "<KEY>",
"tx": "6d0f1decbf3874d08d41f2cc9e8672cd3507c962668c15793e3dd3e01fc3551c",
"block": 942369,
"addr_from": "ALULT5WpeiHnEXYFe72Yq7nRB3ZBmsBypq",
"amount": 1,
"addr_to": "APaGQT4dx4gUDApVPnbtZvChJ8UKRsZBdt",
"notify_type": "transfer"
}
],
"message": ""
}</pre>
</div>
</body>
</html>""" % (settings.net_name, endpoints_html)
@app.route('%s/notifications/block/<int:block>' % API_URL_PREFIX, methods=['GET'])
@cors_header
def get_by_block(self, request, block):
request.setHeader('Content-Type', 'application/json')
try:
if int(block) > Blockchain.Default().Height:
return self.format_message("Higher than current block")
else:
notifications = self.notif.get_by_block(block)
except Exception as e:
logger.info("Could not get notifications for block %s %s" % (block, e))
return self.format_message("Could not get notifications for block %s because %s " % (block, e))
return self.format_notifications(request, notifications)
@app.route('%s/notifications/addr/<string:address>' % API_URL_PREFIX, methods=['GET'])
@cors_header
def get_by_addr(self, request, address):
request.setHeader('Content-Type', 'application/json')
try:
notifications = self.notif.get_by_addr(address)
except Exception as e:
logger.info("Could not get notifications for address %s " % address)
return self.format_message("Could not get notifications for address %s because %s" % (address, e))
return self.format_notifications(request, notifications)
@app.route('%s/notifications/tx/<string:tx_hash>' % API_URL_PREFIX, methods=['GET'])
@cors_header
def get_by_tx(self, request, tx_hash):
request.setHeader('Content-Type', 'application/json')
bc = Blockchain.Default() # type: Blockchain
notifications = []
try:
hash = UInt256.ParseString(tx_hash)
tx, height = bc.GetTransaction(hash)
block_notifications = self.notif.get_by_block(height - 1)
for n in block_notifications:
if n.tx_hash == tx.Hash:
notifications.append(n)
except Exception as e:
logger.info("Could not get tx with hash %s because %s " % (tx_hash, e))
return self.format_message("Could not get tx with hash %s because %s " % (tx_hash, e))
return self.format_notifications(request, notifications)
@app.route('%s/notifications/contract/<string:contract_hash>' % API_URL_PREFIX, methods=['GET'])
@cors_header
def get_by_contract(self, request, contract_hash):
request.setHeader('Content-Type', 'application/json')
try:
hash = UInt160.ParseString(contract_hash)
notifications = self.notif.get_by_contract(hash)
except Exception as e:
logger.info("Could not get notifications for contract %s " % contract_hash)
return self.format_message("Could not get notifications for contract hash %s because %s" % (contract_hash, e))
return self.format_notifications(request, notifications)
@app.route('%s/tokens' % API_URL_PREFIX, methods=['GET'])
@cors_header
def get_tokens(self, request):
request.setHeader('Content-Type', 'application/json')
notifications = self.notif.get_tokens()
return self.format_notifications(request, notifications)
@app.route('%s/token/<string:contract_hash>' % API_URL_PREFIX, methods=['GET'])
@cors_header
def get_token(self, request, contract_hash):
request.setHeader('Content-Type', 'application/json')
try:
uint160 = UInt160.ParseString(contract_hash)
contract_event = self.notif.get_token(uint160)
if not contract_event:
return self.format_message("Could not find contract with hash %s" % contract_hash)
notifications = [contract_event]
except Exception as e:
logger.info("Could not get contract with hash %s because %s " % (contract_hash, e))
return self.format_message("Could not get contract with hash %s because %s " % (contract_hash, e))
return self.format_notifications(request, notifications)
@app.route('%s/status' % API_URL_PREFIX, methods=['GET'])
@cors_header
def get_status(self, request):
request.setHeader('Content-Type', 'application/json')
return json.dumps({
'current_height': Blockchain.Default().Height,
'version': settings.VERSION_NAME,
'num_peers': len(NodeLeader.Instance().Peers)
}, indent=4, sort_keys=True)
def format_notifications(self, request, notifications, show_none=False):
notif_len = len(notifications)
page_len = 500
page = 0
message = ''
if b'page' in request.args:
try:
page = int(request.args[b'page'][0])
except Exception as e:
print("could not get page: %s" % e)
start = page_len * page
end = start + page_len
if start > notif_len:
message = 'page greater than result length'
notifications = notifications[start:end]
return json.dumps({
'current_height': Blockchain.Default().Height + 1,
'message': message,
'total': notif_len,
'results': None if show_none else [n.ToJson() for n in notifications],
'page': page,
'page_len': page_len
}, indent=4, sort_keys=True)
def format_message(self, message):
return json.dumps({
'current_height': Blockchain.Default().Height + 1,
'message': message,
'total': 0,
'results': None,
'page': 0,
'page_len': 0
}, indent=4, sort_keys=True)
|
[
"neo.Network.NodeLeader.NodeLeader.Instance",
"neo.Core.Blockchain.Blockchain.Default",
"neocore.UInt256.UInt256.ParseString",
"logzero.logger.info",
"neo.Implementations.Notifications.LevelDB.NotificationDB.NotificationDB.instance",
"klein.Klein",
"neocore.UInt160.UInt160.ParseString"
] |
[((626, 633), 'klein.Klein', 'Klein', ([], {}), '()\n', (631, 633), False, 'from klein import Klein\n'), ((697, 722), 'neo.Implementations.Notifications.LevelDB.NotificationDB.NotificationDB.instance', 'NotificationDB.instance', ([], {}), '()\n', (720, 722), False, 'from neo.Implementations.Notifications.LevelDB.NotificationDB import NotificationDB\n'), ((6080, 6100), 'neo.Core.Blockchain.Blockchain.Default', 'Blockchain.Default', ([], {}), '()\n', (6098, 6100), False, 'from neo.Core.Blockchain import Blockchain\n'), ((6180, 6208), 'neocore.UInt256.UInt256.ParseString', 'UInt256.ParseString', (['tx_hash'], {}), '(tx_hash)\n', (6199, 6208), False, 'from neocore.UInt256 import UInt256\n'), ((7003, 7037), 'neocore.UInt160.UInt160.ParseString', 'UInt160.ParseString', (['contract_hash'], {}), '(contract_hash)\n', (7022, 7037), False, 'from neocore.UInt160 import UInt160\n'), ((7944, 7978), 'neocore.UInt160.UInt160.ParseString', 'UInt160.ParseString', (['contract_hash'], {}), '(contract_hash)\n', (7963, 7978), False, 'from neocore.UInt160 import UInt160\n'), ((5032, 5103), 'logzero.logger.info', 'logger.info', (["('Could not get notifications for block %s %s' % (block, e))"], {}), "('Could not get notifications for block %s %s' % (block, e))\n", (5043, 5103), False, 'from logzero import logger\n'), ((5609, 5677), 'logzero.logger.info', 'logger.info', (["('Could not get notifications for address %s ' % address)"], {}), "('Could not get notifications for address %s ' % address)\n", (5620, 5677), False, 'from logzero import logger\n'), ((6498, 6569), 'logzero.logger.info', 'logger.info', (["('Could not get tx with hash %s because %s ' % (tx_hash, e))"], {}), "('Could not get tx with hash %s because %s ' % (tx_hash, e))\n", (6509, 6569), False, 'from logzero import logger\n'), ((7142, 7217), 'logzero.logger.info', 'logger.info', (["('Could not get notifications for contract %s ' % contract_hash)"], {}), "('Could not get notifications for contract %s ' % contract_hash)\n", (7153, 7217), False, 'from logzero import logger\n'), ((8260, 8348), 'logzero.logger.info', 'logger.info', (["('Could not get contract with hash %s because %s ' % (contract_hash, e))"], {}), "('Could not get contract with hash %s because %s ' % (\n contract_hash, e))\n", (8271, 8348), False, 'from logzero import logger\n'), ((4807, 4827), 'neo.Core.Blockchain.Blockchain.Default', 'Blockchain.Default', ([], {}), '()\n', (4825, 4827), False, 'from neo.Core.Blockchain import Blockchain\n'), ((8756, 8776), 'neo.Core.Blockchain.Blockchain.Default', 'Blockchain.Default', ([], {}), '()\n', (8774, 8776), False, 'from neo.Core.Blockchain import Blockchain\n'), ((8860, 8881), 'neo.Network.NodeLeader.NodeLeader.Instance', 'NodeLeader.Instance', ([], {}), '()\n', (8879, 8881), False, 'from neo.Network.NodeLeader import NodeLeader\n'), ((9557, 9577), 'neo.Core.Blockchain.Blockchain.Default', 'Blockchain.Default', ([], {}), '()\n', (9575, 9577), False, 'from neo.Core.Blockchain import Blockchain\n'), ((9931, 9951), 'neo.Core.Blockchain.Blockchain.Default', 'Blockchain.Default', ([], {}), '()\n', (9949, 9951), False, 'from neo.Core.Blockchain import Blockchain\n')]
|
from eng_to_ipa import transcribe
import sqlite3
import re
from os.path import join, abspath, dirname
conn = sqlite3.connect(join(abspath(dirname(__file__)),
"../eng_to_ipa/resources/CMU_dict.db"))
c = conn.cursor()
def create_dictionary_table():
try:
c.execute("""CREATE TABLE eng_ipa
(id INTEGER PRIMARY KEY,
word text NOT NULL,
phonemes text NOT NULL,
ipa text NOT NULL
)""")
conn.commit()
except sqlite3.OperationalError:
c.execute("DROP TABLE eng_ipa;")
conn.commit()
create_dictionary_table()
def insert_dictionary_values():
"""takes the prepared data and places it into the database"""
dictionary_data = []
with open(join(abspath(dirname(__file__)), '..\\eng_to_ipa\\resources\\CMU_source_files/cmudict-0.7b.txt'),
encoding="UTF-8") as source_file:
for line in source_file.readlines():
word = re.sub(r"\(\d\)", "", line.split(" ")[0]).lower()
phonemes = line.split(" ")[1].replace("\n", "").lower()
ipa = transcribe.cmu_to_ipa([[phonemes]], stress_marking="both")[0][0]
dictionary_data.append((str(word), str(phonemes), str(ipa)))
c.executemany("INSERT INTO eng_ipa(word, phonemes, ipa) VALUES (?, ?, ?)", dictionary_data)
conn.commit()
if __name__ == "__main__":
# create_dictionary_table()
# insert_dictionary_values()
# test
c.execute("SELECT * FROM eng_ipa WHERE "
"REPLACE(REPLACE(ipa, 'ˌ', ''), 'ˈ', '') "
"LIKE \"%nstr%\"")
for r in c.fetchall():
print(str(r))
|
[
"os.path.dirname",
"eng_to_ipa.transcribe.cmu_to_ipa"
] |
[((139, 156), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (146, 156), False, 'from os.path import join, abspath, dirname\n'), ((830, 847), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (837, 847), False, 'from os.path import join, abspath, dirname\n'), ((1165, 1223), 'eng_to_ipa.transcribe.cmu_to_ipa', 'transcribe.cmu_to_ipa', (['[[phonemes]]'], {'stress_marking': '"""both"""'}), "([[phonemes]], stress_marking='both')\n", (1186, 1223), False, 'from eng_to_ipa import transcribe\n')]
|
import gzip
import os
from mup.converters.utils import readFile, skipHeader
class Converter(object):
name = 'Unnamed'
# Set to True if this converter wraps the reference implementation for the
# markup it supports
reference = False
# Set to True if this converter uses online tools
online = False
def supports(self, filepath):
raise NotImplementedError
def convert(self, filename):
ext = os.path.splitext(filename)[1]
if ext == '.gz':
fl = gzip.open(filename, 'rb')
else:
fl = open(filename, 'rb')
src = readFile(fl)
src = skipHeader(src)
return self._doConvert(src)
def _doConvert(self, txt):
raise NotImplementedError
|
[
"mup.converters.utils.readFile",
"gzip.open",
"os.path.splitext",
"mup.converters.utils.skipHeader"
] |
[((605, 617), 'mup.converters.utils.readFile', 'readFile', (['fl'], {}), '(fl)\n', (613, 617), False, 'from mup.converters.utils import readFile, skipHeader\n'), ((633, 648), 'mup.converters.utils.skipHeader', 'skipHeader', (['src'], {}), '(src)\n', (643, 648), False, 'from mup.converters.utils import readFile, skipHeader\n'), ((441, 467), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (457, 467), False, 'import os\n'), ((513, 538), 'gzip.open', 'gzip.open', (['filename', '"""rb"""'], {}), "(filename, 'rb')\n", (522, 538), False, 'import gzip\n')]
|
""" This is algos.euler.views module.
This module provides the view of the REST API from the Euler algo.
"""
from algos.euler.models import training_samples as ts
from algos.euler.models import predictions
from algos.euler.serializers import PredictionSerializer, PriceChangeSerializer
from rest_framework.response import Response
def get_predicted_changes(query_params):
""" Get a list of predicted profitable changes.
"""
order = query_params.get('order_by')
if order is not None:
order = order.split(',')
else:
order = ['date']
all_predictions = predictions.get_predictions(
instrument=query_params.get('instrument'),
start=query_params.get('start'),
end=query_params.get('end'),
order_by=order)
serializer = PredictionSerializer(all_predictions, many=True)
return Response(serializer.data)
def get_actual_changes(query_params):
""" Get a list of actual profitable changes.
"""
order = query_params.get('order_by')
if order is not None:
order = order.split(',')
else:
order = ['date']
all_changes = ts.get_samples(
instrument=query_params.get('instrument'),
start=query_params.get('start'),
end=query_params.get('end'),
order_by=order)
serializer = PriceChangeSerializer(all_changes, many=True)
return Response(serializer.data)
def clean_predictions():
""" Clean up predictions made by Euler for a non trading day.
"""
all_dates = []
for sample in ts.get_all(['date']):
all_dates.append(sample.date)
all_dates = list(set(all_dates))
all_dates.sort()
last_day = all_dates[-1]
predictions.Prediction.objects.filter(date__lte=last_day).exclude(
date__in=all_dates).delete()
|
[
"algos.euler.serializers.PriceChangeSerializer",
"algos.euler.models.training_samples.get_all",
"rest_framework.response.Response",
"algos.euler.serializers.PredictionSerializer",
"algos.euler.models.predictions.Prediction.objects.filter"
] |
[((794, 842), 'algos.euler.serializers.PredictionSerializer', 'PredictionSerializer', (['all_predictions'], {'many': '(True)'}), '(all_predictions, many=True)\n', (814, 842), False, 'from algos.euler.serializers import PredictionSerializer, PriceChangeSerializer\n'), ((854, 879), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (862, 879), False, 'from rest_framework.response import Response\n'), ((1317, 1362), 'algos.euler.serializers.PriceChangeSerializer', 'PriceChangeSerializer', (['all_changes'], {'many': '(True)'}), '(all_changes, many=True)\n', (1338, 1362), False, 'from algos.euler.serializers import PredictionSerializer, PriceChangeSerializer\n'), ((1374, 1399), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1382, 1399), False, 'from rest_framework.response import Response\n'), ((1538, 1558), 'algos.euler.models.training_samples.get_all', 'ts.get_all', (["['date']"], {}), "(['date'])\n", (1548, 1558), True, 'from algos.euler.models import training_samples as ts\n'), ((1692, 1749), 'algos.euler.models.predictions.Prediction.objects.filter', 'predictions.Prediction.objects.filter', ([], {'date__lte': 'last_day'}), '(date__lte=last_day)\n', (1729, 1749), False, 'from algos.euler.models import predictions\n')]
|
"""
# Copyright 2021 21CN Corporation Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# -*- coding: utf-8 -*-
import os
import re
import uuid
import zipfile
from io import IOBase
from pathlib import Path
import jwt
from jwt import PyJWTError
from config import jwt_public_key, base_dir
from core.log import logger
FAILURE = 'Failure'
SUCCESS = 'Success'
FAILURE_JSON = '{"code": 500}'
QUEUED = 'queued'
SAVING = 'saving'
DEACTIVATED = 'deactivated'
UPLOADING = 'uploading'
ACTIVE = 'active'
KILLED = 'killed'
DOWNLOADING = 'downloading'
COMPRESSING = 'compressing'
WAITING = 'waiting'
PUSHING = 'pushing'
UPLOADED = 'uploaded'
ERROR = 'error'
INSTANTIATING = 'Instantiating'
INSTANTIATED = 'Instantiated'
TERMINATED = 'Terminated'
TERMINATING = 'Terminating'
APP_PACKAGE_DIR = base_dir + '/package'
RC_FILE_DIR = base_dir + '/config'
LOG = logger
_IPV4_PATTERN = '^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}' \
'(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'
_UUID_PATTERN = '^[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$'
APP_INS_ERR_MDG = 'appInstanceId is required'
def create_dir(path):
"""
创建目录
"""
try:
os.makedirs(path)
except OSError:
LOG.debug('文件夹已存在')
except Exception as exception:
LOG.error(exception, exc_info=True)
return False
return True
def exists_path(path):
"""
判断目录是否存在
"""
file = Path(path)
return file.exists()
def delete_dir(path):
"""
删除目录
"""
if not exists_path(path):
return
if os.path.isfile(path):
os.remove(path)
return
for i in os.listdir(path):
file_data = path + '/' + i
if os.path.isfile(file_data):
os.remove(file_data)
else:
delete_dir(file_data)
os.rmdir(path)
def unzip(file, target):
"""
解压缩
Args:
file:
target:
Returns:
"""
create_dir(target)
with zipfile.ZipFile(file) as zip_file:
namelist = zip_file.namelist()
for _file in namelist:
zip_file.extract(_file, target)
def validate_access_token(access_token):
"""
校验token
"""
if access_token is None:
return True
try:
payload = jwt.decode(access_token, jwt_public_key, algorithms=['RS256'])
if 'authorities' not in payload:
return False
if 'userId' not in payload:
return False
if 'user_name' not in payload:
return False
except PyJWTError:
LOG.debug("skip accessToken check")
# test, change false future
return True
return True
def validate_ipv4_address(host_ip):
"""
验证ipv4格式
"""
if host_ip is None:
return False
pattern = re.compile(_IPV4_PATTERN)
return pattern.match(host_ip)
def gen_uuid():
"""
生产uuid
"""
return ''.join(str(uuid.uuid4()).split('-'))
def validate_uuid(param):
"""
校验uuid格式
"""
if param is None:
LOG.error('param require')
return False
pattern = re.compile(_UUID_PATTERN)
return pattern.match(param)
def validate_input_params(param):
"""
校验通用参数,host_ip和token,返回host_ip
Args:
param: 包含hostIp和accessToken
Returns:
host_ip
"""
access_token = param.accessToken
host_ip = param.hostIp
if not validate_access_token(access_token):
LOG.error('accessToken not valid')
return None
if not validate_ipv4_address(host_ip):
LOG.error('hostIp not match ipv4')
return None
if not param.tenantId:
LOG.error('tenantId is required')
return None
return host_ip
class StreamReader(IOBase):
def __init__(self, request_iter):
self.data_iter = request_iter
self.is_end = False
def read(self, size) -> bytes:
if self.is_end:
return bytes()
request = next(self.data_iter, None)
if request is None:
self.is_end = True
return bytes()
return request.content
def fileno(self) -> int:
raise IOError('not a file')
def isatty(self) -> bool:
return False
def readable(self) -> bool:
return True
def seek(self, __offset: int, __whence: int = ...) -> int:
raise IOError('can not seek!')
def seekable(self) -> bool:
return False
|
[
"os.remove",
"uuid.uuid4",
"zipfile.ZipFile",
"os.makedirs",
"pathlib.Path",
"os.path.isfile",
"jwt.decode",
"os.rmdir",
"os.listdir",
"re.compile"
] |
[((1919, 1929), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (1923, 1929), False, 'from pathlib import Path\n'), ((2056, 2076), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (2070, 2076), False, 'import os\n'), ((2130, 2146), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2140, 2146), False, 'import os\n'), ((2306, 2320), 'os.rmdir', 'os.rmdir', (['path'], {}), '(path)\n', (2314, 2320), False, 'import os\n'), ((3274, 3299), 're.compile', 're.compile', (['_IPV4_PATTERN'], {}), '(_IPV4_PATTERN)\n', (3284, 3299), False, 'import re\n'), ((3577, 3602), 're.compile', 're.compile', (['_UUID_PATTERN'], {}), '(_UUID_PATTERN)\n', (3587, 3602), False, 'import re\n'), ((1672, 1689), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1683, 1689), False, 'import os\n'), ((2086, 2101), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (2095, 2101), False, 'import os\n'), ((2194, 2219), 'os.path.isfile', 'os.path.isfile', (['file_data'], {}), '(file_data)\n', (2208, 2219), False, 'import os\n'), ((2459, 2480), 'zipfile.ZipFile', 'zipfile.ZipFile', (['file'], {}), '(file)\n', (2474, 2480), False, 'import zipfile\n'), ((2755, 2817), 'jwt.decode', 'jwt.decode', (['access_token', 'jwt_public_key'], {'algorithms': "['RS256']"}), "(access_token, jwt_public_key, algorithms=['RS256'])\n", (2765, 2817), False, 'import jwt\n'), ((2233, 2253), 'os.remove', 'os.remove', (['file_data'], {}), '(file_data)\n', (2242, 2253), False, 'import os\n'), ((3402, 3414), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3412, 3414), False, 'import uuid\n')]
|
import xgboost as xgb
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import MultiOutputClassifier, MultiOutputRegressor
import mlrun
import mlrun.frameworks.xgboost as mlrun_xgboost
from mlrun.frameworks._ml_common.utils import AlgorithmFunctionality, ModelType
from ..functions import MLFunctions
class XGBoostFunctions(MLFunctions):
@staticmethod
def train(
context: mlrun.MLClientCtx, algorithm_functionality: str, model_name: str = None
):
algorithm_functionality = AlgorithmFunctionality(algorithm_functionality)
model = XGBoostFunctions._get_model(
algorithm_functionality=algorithm_functionality
)
x_train, x_test, y_train, y_test = XGBoostFunctions._get_dataset(
algorithm_functionality=algorithm_functionality, for_training=True
)
mlrun_xgboost.apply_mlrun(
model=model, model_name=model_name, x_test=x_test, y_test=y_test
)
model.fit(x_train, y_train)
@staticmethod
def evaluate(
context: mlrun.MLClientCtx, algorithm_functionality: str, model_path: str
):
algorithm_functionality = AlgorithmFunctionality(algorithm_functionality)
x, y = XGBoostFunctions._get_dataset(
algorithm_functionality=algorithm_functionality, for_training=False
)
model_handler = mlrun_xgboost.apply_mlrun(model_path=model_path, y_test=y)
model = model_handler.model
model.predict(x)
@staticmethod
def _get_model(algorithm_functionality: AlgorithmFunctionality) -> ModelType:
if algorithm_functionality.is_classification():
if algorithm_functionality.is_single_output():
return xgb.XGBClassifier()
if algorithm_functionality.is_binary_classification():
return MultiOutputClassifier(xgb.XGBClassifier())
return MultiOutputClassifier(OneVsRestClassifier(xgb.XGBClassifier()))
if algorithm_functionality.is_single_output():
return xgb.XGBRegressor()
return MultiOutputRegressor(xgb.XGBRegressor())
|
[
"mlrun.frameworks._ml_common.utils.AlgorithmFunctionality",
"xgboost.XGBRegressor",
"mlrun.frameworks.xgboost.apply_mlrun",
"xgboost.XGBClassifier"
] |
[((531, 578), 'mlrun.frameworks._ml_common.utils.AlgorithmFunctionality', 'AlgorithmFunctionality', (['algorithm_functionality'], {}), '(algorithm_functionality)\n', (553, 578), False, 'from mlrun.frameworks._ml_common.utils import AlgorithmFunctionality, ModelType\n'), ((866, 961), 'mlrun.frameworks.xgboost.apply_mlrun', 'mlrun_xgboost.apply_mlrun', ([], {'model': 'model', 'model_name': 'model_name', 'x_test': 'x_test', 'y_test': 'y_test'}), '(model=model, model_name=model_name, x_test=x_test,\n y_test=y_test)\n', (891, 961), True, 'import mlrun.frameworks.xgboost as mlrun_xgboost\n'), ((1176, 1223), 'mlrun.frameworks._ml_common.utils.AlgorithmFunctionality', 'AlgorithmFunctionality', (['algorithm_functionality'], {}), '(algorithm_functionality)\n', (1198, 1223), False, 'from mlrun.frameworks._ml_common.utils import AlgorithmFunctionality, ModelType\n'), ((1384, 1442), 'mlrun.frameworks.xgboost.apply_mlrun', 'mlrun_xgboost.apply_mlrun', ([], {'model_path': 'model_path', 'y_test': 'y'}), '(model_path=model_path, y_test=y)\n', (1409, 1442), True, 'import mlrun.frameworks.xgboost as mlrun_xgboost\n'), ((2053, 2071), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {}), '()\n', (2069, 2071), True, 'import xgboost as xgb\n'), ((2108, 2126), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {}), '()\n', (2124, 2126), True, 'import xgboost as xgb\n'), ((1743, 1762), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {}), '()\n', (1760, 1762), True, 'import xgboost as xgb\n'), ((1875, 1894), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {}), '()\n', (1892, 1894), True, 'import xgboost as xgb\n'), ((1957, 1976), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {}), '()\n', (1974, 1976), True, 'import xgboost as xgb\n')]
|
from django.contrib import messages
from django.urls import reverse, resolve
def warn_no_phone_number(get_response):
def middleware(request):
if request.user.is_authenticated and not (request.user.is_staff or request.user.is_superuser) \
and request.user.phone_number is None:
resolved = resolve(request.path_info)
current_url = ":".join([resolved.namespace, resolved.url_name])
if resolved.url_name is not None and current_url != "accounts:edit_profile":
edit_profile = reverse("accounts:edit_profile")
messages.add_message(request, messages.ERROR,
"Innan du registrerar dig på jobb måste du "
"<a href='%s'>lägga till ett telefonnummer.</a>" % edit_profile)
response = get_response(request)
return response
return middleware
def warn_not_read_guide(get_response):
def middleware(request):
if request.user.is_authenticated and not (request.user.is_staff or request.user.is_superuser) \
and not request.user.read_guide:
resolved = resolve(request.path_info)
if resolved.url_name is not None and resolved.url_name != "guide":
guide = reverse("guide")
messages.add_message(request, messages.ERROR,
"Innan du registrerar dig på jobb måste du "
"<a href='%s'>läsa jobbguiden.</a>" % guide)
response = get_response(request)
return response
return middleware
|
[
"django.urls.reverse",
"django.urls.resolve",
"django.contrib.messages.add_message"
] |
[((331, 357), 'django.urls.resolve', 'resolve', (['request.path_info'], {}), '(request.path_info)\n', (338, 357), False, 'from django.urls import reverse, resolve\n'), ((1172, 1198), 'django.urls.resolve', 'resolve', (['request.path_info'], {}), '(request.path_info)\n', (1179, 1198), False, 'from django.urls import reverse, resolve\n'), ((555, 587), 'django.urls.reverse', 'reverse', (['"""accounts:edit_profile"""'], {}), "('accounts:edit_profile')\n", (562, 587), False, 'from django.urls import reverse, resolve\n'), ((605, 767), 'django.contrib.messages.add_message', 'messages.add_message', (['request', 'messages.ERROR', '("Innan du registrerar dig på jobb måste du <a href=\'%s\'>lägga till ett telefonnummer.</a>"\n % edit_profile)'], {}), '(request, messages.ERROR, \n "Innan du registrerar dig på jobb måste du <a href=\'%s\'>lägga till ett telefonnummer.</a>"\n % edit_profile)\n', (625, 767), False, 'from django.contrib import messages\n'), ((1303, 1319), 'django.urls.reverse', 'reverse', (['"""guide"""'], {}), "('guide')\n", (1310, 1319), False, 'from django.urls import reverse, resolve\n'), ((1337, 1479), 'django.contrib.messages.add_message', 'messages.add_message', (['request', 'messages.ERROR', '("Innan du registrerar dig på jobb måste du <a href=\'%s\'>läsa jobbguiden.</a>"\n % guide)'], {}), '(request, messages.ERROR, \n "Innan du registrerar dig på jobb måste du <a href=\'%s\'>läsa jobbguiden.</a>"\n % guide)\n', (1357, 1479), False, 'from django.contrib import messages\n')]
|
from abc import ABC
import itertools
from collections.abc import Iterator
from typing import TypeVar
from bolinette import core
_T = TypeVar("_T")
class Properties(ABC):
def __init__(self, parent):
self.parent = parent
@staticmethod
def _get_cls_attributes_of_type(
obj: type, attr_type: type[_T]
) -> Iterator[tuple[str, _T]]:
parent_attrs = (
Properties._get_cls_attributes_of_type(parent, attr_type)
for parent in obj.__bases__
)
return itertools.chain(
*parent_attrs,
(
(name, attribute)
for name, attribute in vars(obj).items()
if isinstance(attribute, attr_type)
)
)
@staticmethod
def _get_attributes_of_type(obj, attr_type):
return (
(name, attribute)
for name, attribute in vars(obj).items()
if isinstance(attribute, attr_type)
)
def get_instantiable(
self, of_type: type[core.abc.T_Instance]
) -> Iterator[tuple[str, "core.InstantiableAttribute[core.abc.T_Instance]"]]:
attrs = self._get_cls_attributes_of_type(
type(self.parent), core.InstantiableAttribute
)
return ((name, attr) for name, attr in attrs if attr.type == of_type)
|
[
"typing.TypeVar"
] |
[((135, 148), 'typing.TypeVar', 'TypeVar', (['"""_T"""'], {}), "('_T')\n", (142, 148), False, 'from typing import TypeVar\n')]
|
#!/usr/bin/env python2
"""Cleanup interactive transcript received on standard input.
This mostly consists in pretty-pretting JSON messages and sorting their
fields, to permit text-based comparisons against reference transcripts.
Usage: python cleanup.py fname.clean < fname.dirty
"""
import json
import sys
import re
import os
def cleanup_json(js):
if isinstance(js, dict):
if "fname" in js:
js["fname"] = os.path.basename(js["fname"].replace('\\', '/'))
if "location" in js:
js["location"] = "<location removed>"
for v in js.itervalues():
cleanup_json(v)
elif isinstance(js, list):
for v in js:
cleanup_json(v)
def cleanup_one(line):
line = re.sub(r"\b(?<![\\])u[0-9]+\b", "u[...]", line)
try:
js = json.loads(line)
if js.get("kind") == "protocol-info":
js = {"kind": js.get("kind"), "rest": "[...]"} # Drop message body
if js.get("kind") == "message" and js.get("level") == "progress":
return "" # Drop full message
cleanup_json(js)
return json.dumps(js, ensure_ascii=False, sort_keys=True) + "\n"
except Exception as ex:
return line
def main():
# Writing to stdout converts newlines, which confuses diff on Windows, so
# write to a file instead. There's no reasonable way to do this in a Python
# 2/3 compatible way, so the following is Python-2 only.
lines = [line.decode("utf-8") for line in sys.stdin]
with open(sys.argv[1], mode="wb") as out:
for line in lines:
out.write(cleanup_one(line).encode("utf-8"))
out.flush()
if __name__ == '__main__':
main()
|
[
"re.sub",
"json.loads",
"json.dumps"
] |
[((740, 790), 're.sub', 're.sub', (['"""\\\\b(?<![\\\\\\\\])u[0-9]+\\\\b"""', '"""u[...]"""', 'line'], {}), "('\\\\b(?<![\\\\\\\\])u[0-9]+\\\\b', 'u[...]', line)\n", (746, 790), False, 'import re\n'), ((810, 826), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (820, 826), False, 'import json\n'), ((1108, 1158), 'json.dumps', 'json.dumps', (['js'], {'ensure_ascii': '(False)', 'sort_keys': '(True)'}), '(js, ensure_ascii=False, sort_keys=True)\n', (1118, 1158), False, 'import json\n')]
|
# Sample usage of mealoptimizer
import mealoptimizer as mp
mp.print_foods_in_database()
# Make a list of things you want in a meal
food_list = [ 'whole milk','almonds','banana','apple' ]
# Set your target macro ratios ( in percentages )
protein_carb_fat_ratio_percentages = [ 30, 50, 20 ] # must add to 100%
# Set Calorie target
calorie_target = 300 # Cals
# Get protein, carbohydrate and fat amounts in grams for the set calorie target
protein_carb_fat_amounts_gram = mp.get_macro_weight_targets( calorie_target, protein_carb_fat_ratio_percentages )
# Find best combination of the food amounts so that you are as close as possible your target protein, carbohydrate and fat amounts
food_amounts_gram = mp.optimize_food_amounts( food_list, protein_carb_fat_amounts_gram )
# See the actual macro ratio
mp.print_meal_analysis( food_list, food_amounts_gram )
|
[
"mealoptimizer.get_macro_weight_targets",
"mealoptimizer.print_meal_analysis",
"mealoptimizer.print_foods_in_database",
"mealoptimizer.optimize_food_amounts"
] |
[((61, 89), 'mealoptimizer.print_foods_in_database', 'mp.print_foods_in_database', ([], {}), '()\n', (87, 89), True, 'import mealoptimizer as mp\n'), ((475, 554), 'mealoptimizer.get_macro_weight_targets', 'mp.get_macro_weight_targets', (['calorie_target', 'protein_carb_fat_ratio_percentages'], {}), '(calorie_target, protein_carb_fat_ratio_percentages)\n', (502, 554), True, 'import mealoptimizer as mp\n'), ((709, 775), 'mealoptimizer.optimize_food_amounts', 'mp.optimize_food_amounts', (['food_list', 'protein_carb_fat_amounts_gram'], {}), '(food_list, protein_carb_fat_amounts_gram)\n', (733, 775), True, 'import mealoptimizer as mp\n'), ((808, 860), 'mealoptimizer.print_meal_analysis', 'mp.print_meal_analysis', (['food_list', 'food_amounts_gram'], {}), '(food_list, food_amounts_gram)\n', (830, 860), True, 'import mealoptimizer as mp\n')]
|
import torch
import torch.nn.functional as F
import numpy
from babyai.rl.utils import DictList
# dictionary that defines what head is required for each extra info used for auxiliary supervision
required_heads = {'seen_state': 'binary',
'see_door': 'binary',
'see_obj': 'binary',
'obj_in_instr': 'binary',
'in_front_of_what': 'multiclass9', # multi class classifier with 9 possible classes
'visit_proportion': 'continuous01', # continous regressor with outputs in [0, 1]
'bot_action': 'binary'
}
class ExtraInfoCollector:
'''
This class, used in rl.algos.base, allows connecting the extra information from the environment, and the
corresponding predictions using the specific heads in the model. It transforms them so that they are easy to use
to evaluate losses
'''
def __init__(self, aux_info, shape, device):
self.aux_info = aux_info
self.shape = shape
self.device = device
self.collected_info = dict()
self.extra_predictions = dict()
for info in self.aux_info:
self.collected_info[info] = torch.zeros(*shape, device=self.device)
if required_heads[info] == 'binary' or required_heads[info].startswith('continuous'):
# we predict one number only
self.extra_predictions[info] = torch.zeros(*shape, 1, device=self.device)
elif required_heads[info].startswith('multiclass'):
# means that this is a multi-class classification and we need to predict the whole proba distr
n_classes = int(required_heads[info].replace('multiclass', ''))
self.extra_predictions[info] = torch.zeros(*shape, n_classes, device=self.device)
else:
raise ValueError("{} not supported".format(required_heads[info]))
def process(self, env_info):
# env_info is now a tuple of dicts
env_info = [{k: v for k, v in dic.items() if k in self.aux_info} for dic in env_info]
env_info = {k: [env_info[_][k] for _ in range(len(env_info))] for k in env_info[0].keys()}
# env_info is now a dict of lists
return env_info
def fill_dictionaries(self, index, env_info, extra_predictions):
for info in self.aux_info:
dtype = torch.long if required_heads[info].startswith('multiclass') else torch.float
self.collected_info[info][index] = torch.tensor(env_info[info], dtype=dtype, device=self.device)
self.extra_predictions[info][index] = extra_predictions[info]
def end_collection(self, exps):
collected_info = dict()
extra_predictions = dict()
for info in self.aux_info:
# T x P -> P x T -> P * T
collected_info[info] = self.collected_info[info].transpose(0, 1).reshape(-1)
if required_heads[info] == 'binary' or required_heads[info].startswith('continuous'):
# T x P x 1 -> P x T x 1 -> P * T
extra_predictions[info] = self.extra_predictions[info].transpose(0, 1).reshape(-1)
elif type(required_heads[info]) == int:
# T x P x k -> P x T x k -> (P * T) x k
k = required_heads[info] # number of classes
extra_predictions[info] = self.extra_predictions[info].transpose(0, 1).reshape(-1, k)
# convert the dicts to DictLists, and add them to the exps DictList.
exps.collected_info = DictList(collected_info)
exps.extra_predictions = DictList(extra_predictions)
return exps
class SupervisedLossUpdater:
'''
This class, used by PPO, allows the evaluation of the supervised loss when using extra information from the
environment. It also handles logging accuracies/L2 distances/etc...
'''
def __init__(self, aux_info, supervised_loss_coef, recurrence, device):
self.aux_info = aux_info
self.supervised_loss_coef = supervised_loss_coef
self.recurrence = recurrence
self.device = device
self.log_supervised_losses = []
self.log_supervised_accuracies = []
self.log_supervised_L2_losses = []
self.log_supervised_prevalences = []
self.batch_supervised_loss = 0
self.batch_supervised_accuracy = 0
self.batch_supervised_L2_loss = 0
self.batch_supervised_prevalence = 0
def init_epoch(self):
self.log_supervised_losses = []
self.log_supervised_accuracies = []
self.log_supervised_L2_losses = []
self.log_supervised_prevalences = []
def init_batch(self):
self.batch_supervised_loss = 0
self.batch_supervised_accuracy = 0
self.batch_supervised_L2_loss = 0
self.batch_supervised_prevalence = 0
def eval_subbatch(self, extra_predictions, sb):
supervised_loss = torch.tensor(0., device=self.device)
supervised_accuracy = torch.tensor(0., device=self.device)
supervised_L2_loss = torch.tensor(0., device=self.device)
supervised_prevalence = torch.tensor(0., device=self.device)
binary_classification_tasks = 0
classification_tasks = 0
regression_tasks = 0
for pos, info in enumerate(self.aux_info):
coef = self.supervised_loss_coef[pos]
pred = extra_predictions[info]
target = dict.__getitem__(sb.collected_info, info)
if required_heads[info] == 'binary':
binary_classification_tasks += 1
classification_tasks += 1
supervised_loss += coef * F.binary_cross_entropy_with_logits(pred.reshape(-1), target)
supervised_accuracy += ((pred.reshape(-1) > 0).float() == target).float().mean()
supervised_prevalence += target.mean()
elif required_heads[info].startswith('continuous'):
regression_tasks += 1
mse = F.mse_loss(pred.reshape(-1), target)
supervised_loss += coef * mse
supervised_L2_loss += mse
elif required_heads[info].startswith('multiclass'):
classification_tasks += 1
supervised_accuracy += (pred.argmax(1).float() == target).float().mean()
supervised_loss += coef * F.cross_entropy(pred, target.long())
else:
raise ValueError("{} not supported".format(required_heads[info]))
if binary_classification_tasks > 0:
supervised_prevalence /= binary_classification_tasks
else:
supervised_prevalence = torch.tensor(-1)
if classification_tasks > 0:
supervised_accuracy /= classification_tasks
else:
supervised_accuracy = torch.tensor(-1)
if regression_tasks > 0:
supervised_L2_loss /= regression_tasks
else:
supervised_L2_loss = torch.tensor(-1)
self.batch_supervised_loss += supervised_loss.item()
self.batch_supervised_accuracy += supervised_accuracy.item()
self.batch_supervised_L2_loss += supervised_L2_loss.item()
self.batch_supervised_prevalence += supervised_prevalence.item()
return supervised_loss
def update_batch_values(self):
self.batch_supervised_loss /= self.recurrence
self.batch_supervised_accuracy /= self.recurrence
self.batch_supervised_L2_loss /= self.recurrence
self.batch_supervised_prevalence /= self.recurrence
def update_epoch_logs(self):
self.log_supervised_losses.append(self.batch_supervised_loss)
self.log_supervised_accuracies.append(self.batch_supervised_accuracy)
self.log_supervised_L2_losses.append(self.batch_supervised_L2_loss)
self.log_supervised_prevalences.append(self.batch_supervised_prevalence)
def end_training(self, logs):
logs["supervised_loss"] = numpy.mean(self.log_supervised_losses)
logs["supervised_accuracy"] = numpy.mean(self.log_supervised_accuracies)
logs["supervised_L2_loss"] = numpy.mean(self.log_supervised_L2_losses)
logs["supervised_prevalence"] = numpy.mean(self.log_supervised_prevalences)
return logs
|
[
"torch.zeros",
"numpy.mean",
"babyai.rl.utils.DictList",
"torch.tensor"
] |
[((3547, 3571), 'babyai.rl.utils.DictList', 'DictList', (['collected_info'], {}), '(collected_info)\n', (3555, 3571), False, 'from babyai.rl.utils import DictList\n'), ((3605, 3632), 'babyai.rl.utils.DictList', 'DictList', (['extra_predictions'], {}), '(extra_predictions)\n', (3613, 3632), False, 'from babyai.rl.utils import DictList\n'), ((4934, 4971), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'device': 'self.device'}), '(0.0, device=self.device)\n', (4946, 4971), False, 'import torch\n'), ((5001, 5038), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'device': 'self.device'}), '(0.0, device=self.device)\n', (5013, 5038), False, 'import torch\n'), ((5067, 5104), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'device': 'self.device'}), '(0.0, device=self.device)\n', (5079, 5104), False, 'import torch\n'), ((5136, 5173), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'device': 'self.device'}), '(0.0, device=self.device)\n', (5148, 5173), False, 'import torch\n'), ((7960, 7998), 'numpy.mean', 'numpy.mean', (['self.log_supervised_losses'], {}), '(self.log_supervised_losses)\n', (7970, 7998), False, 'import numpy\n'), ((8037, 8079), 'numpy.mean', 'numpy.mean', (['self.log_supervised_accuracies'], {}), '(self.log_supervised_accuracies)\n', (8047, 8079), False, 'import numpy\n'), ((8117, 8158), 'numpy.mean', 'numpy.mean', (['self.log_supervised_L2_losses'], {}), '(self.log_supervised_L2_losses)\n', (8127, 8158), False, 'import numpy\n'), ((8199, 8242), 'numpy.mean', 'numpy.mean', (['self.log_supervised_prevalences'], {}), '(self.log_supervised_prevalences)\n', (8209, 8242), False, 'import numpy\n'), ((1208, 1247), 'torch.zeros', 'torch.zeros', (['*shape'], {'device': 'self.device'}), '(*shape, device=self.device)\n', (1219, 1247), False, 'import torch\n'), ((2519, 2580), 'torch.tensor', 'torch.tensor', (['env_info[info]'], {'dtype': 'dtype', 'device': 'self.device'}), '(env_info[info], dtype=dtype, device=self.device)\n', (2531, 2580), False, 'import torch\n'), ((6661, 6677), 'torch.tensor', 'torch.tensor', (['(-1)'], {}), '(-1)\n', (6673, 6677), False, 'import torch\n'), ((6819, 6835), 'torch.tensor', 'torch.tensor', (['(-1)'], {}), '(-1)\n', (6831, 6835), False, 'import torch\n'), ((6967, 6983), 'torch.tensor', 'torch.tensor', (['(-1)'], {}), '(-1)\n', (6979, 6983), False, 'import torch\n'), ((1438, 1480), 'torch.zeros', 'torch.zeros', (['*shape', '(1)'], {'device': 'self.device'}), '(*shape, 1, device=self.device)\n', (1449, 1480), False, 'import torch\n'), ((1783, 1833), 'torch.zeros', 'torch.zeros', (['*shape', 'n_classes'], {'device': 'self.device'}), '(*shape, n_classes, device=self.device)\n', (1794, 1833), False, 'import torch\n')]
|
import random as rd
import numpy as np
import networkx as nx
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn import metrics
from numpy import linalg as LA
import matplotlib.pyplot as plt
from sklearn.cluster import SpectralClustering
DG= nx.DiGraph()
G=nx.Graph()
Cluster=[14,15,16,17,18,19,20,21,22,26,122,125,56,57,58,64,65,67,68,71,98,47,49,50,59,61,93,94,42,7,8,9,10,12,13,23,24,25,28,29,30,31,33,34,35,36,37,38,39,40,41,43,44,48,60,90,123,62,63,92,95,99]
'''
Compute purity for clustering
'''
def purity(y_true,y_pred):
contigency_matrix=metrics.cluster.contingency_matrix(y_true,y_pred)
return np.sum(np.amax(contigency_matrix,axis=0))/np.sum(contigency_matrix)
'''
compute precision, recall and F score
for clustering
'''
def precision(y_true,y_pred):
tp=0.0
fp=0.0
n1=len(y_true)
for i in range(n1):
for j in range(i+1,n1):
if y_true[i]==y_true[j] and y_pred[i]==y_pred[j]:
tp+=1
if y_true[i]!=y_true[j] and y_pred[i]==y_pred[j]:
fp+=1
if tp==0 and fp==0:
return 0
else:
return tp/(tp+fp)
def recall(y_true,y_pred):
tp=0.0
fn=0.0
n1=len(y_true)
for i in range(n1):
for j in range(i+1,n1):
if y_true[i]==y_true[j] and y_pred[i]==y_pred[j]:
tp+=1
if y_true[i]==y_true[j] and y_pred[i]!=y_pred[j]:
fn+=1
if tp==0 and fn==0:
return 0
else:
return tp/(tp+fn)
def F_score(y_true,y_pred,beta):
P=precision(y_true,y_pred)
R=recall(y_true,y_pred)
if P==0 and R==0:
return 0
else:
return ((beta*beta+1)*P*R)/(beta*beta*P+R)
'''
test whether 3 nodes form motif instance of M6
'''
def is_motif(i,j,k):
global DG
nodes=[i,j,k]
H=DG.subgraph(nodes)
M6=nx.DiGraph()
for u in range(3):
M6.add_node(u)
M6.add_edge(0,1)
M6.add_edge(0,2)
M6.add_edge(1,2)
M6.add_edge(2,1)
return nx.is_isomorphic(H,M6)
'''
compute total motif instances of M6
'''
def count_motif(i,j):
global G
nb1=set(list(G[i]))
nb2=set(list(G[j]))
nb=list(nb1&nb2)
num=0
for k in nb:
if is_motif(i,j,k):
num+=1
return num
'''
initialize the network and regularize the adjacency matrix
'''
def initial_network(f1,f2):
global DG,Cluster,G
dic=dict()
n=len(Cluster)
for i in range(n):
dic[Cluster[i]]=i
DG1=nx.DiGraph()
nn=128
for i in range(nn):
names=f1.readline()
DG1.add_node(i,name=names,color=0)
for line in f2:
strli=line.split()
a=int(strli[0])
b=int(strli[1])
DG1.add_edge(a,b)
H=DG1.subgraph(Cluster)
for u in H.nodes():
DG.add_node(dic[u],name=H.nodes[u]['name'],color=0)
G.add_node(dic[u],name=H.nodes[u]['name'],color=0)
for e in H.edges():
DG.add_edge(dic[e[0]],dic[e[1]])
G.add_edge(dic[e[0]],dic[e[1]])
A=np.zeros((n, n))
for i in range(n):
for j in range(i+1,n):
num=count_motif(i,j)
A[i,j]=num
A[j,i]=num
return A
'''
regularize the motif adjacency matrix
'''
def regularize_matrix(A,tau):
global Cluster
n=len(Cluster)
T=np.ones((n,n))
A=A+(tau/n)*T
return A
'''
construct multihop matrix
'''
def multihop_matrix(A,k):
global G
print("hop number: "+str(k))
N=(A.shape)[0]
Ctmp=A
S=np.zeros((N,N))
S=S+A
for i in range(k-1):
Ctmp=np.matmul(Ctmp,A)
S=S+Ctmp
for i in range(N):
for j in range(N):
if A[i,j]!=0:
A[i,j]=S[i,j]
return A
'''
multihop spectral clustering
'''
def spectral_community_detect(A,groups):
global G
model = SpectralClustering(n_clusters=groups,eigen_solver='arpack',random_state=56,affinity='precomputed')
model.fit(A)
labels=model.labels_
return labels
'''
evaluate the community
'''
def evaluate_community(y_true,y_pred):
res1=purity(y_true,y_pred)
res2=normalized_mutual_info_score(y_true,y_pred,average_method='arithmetic')
res3=adjusted_rand_score(y_true,y_pred)
res4=F_score(y_true,y_pred,1)
return res1,res2,res3,res4
def main():
global G,DG
F1=open('bay-nodes.txt', 'r')
F2=open('bay-edges.txt','r')
Class1=[1,1,1,2,2,2,2,2,2,3,4,4,5,5,5,5,5,6,6,5,5,3,3,3,7,8,7,7,3,9,10,10,10,11,11,12,12,12,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,7,5,4,7,6,7,7,6]
Class2=[1,1,1,1,1,1,1,1,1,2,3,3,4,4,4,4,4,5,5,4,4,2,2,2,6,5,6,6,2,7,7,7,7,7,7,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,6,4,3,6,5,6,6,5]
A=initial_network(F1,F2)
tau=2.5
A=regularize_matrix(A,tau)
q=2
k=4
A=multihop_matrix(A,q)
pred=spectral_community_detect(A,k)
purity,nmi,ari,f1=evaluate_community(Class1,pred)
print("class1 Purity: "+str(purity))
print("class1 NMI: "+str(nmi))
print("class1 ARI: "+str(ari))
print("class1 F1: "+str(f1))
print("#########################")
purity,nmi,ari,f1=evaluate_community(Class2,pred)
print("class2 Purity: "+str(purity))
print("class2 NMI: "+str(nmi))
print("class2 ARI: "+str(ari))
print("class2 F1: "+str(f1))
main()
|
[
"sklearn.metrics.cluster.contingency_matrix",
"numpy.sum",
"sklearn.cluster.SpectralClustering",
"sklearn.metrics.cluster.adjusted_rand_score",
"numpy.zeros",
"numpy.ones",
"numpy.amax",
"networkx.Graph",
"numpy.matmul",
"networkx.is_isomorphic",
"networkx.DiGraph",
"sklearn.metrics.cluster.normalized_mutual_info_score"
] |
[((337, 349), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (347, 349), True, 'import networkx as nx\n'), ((353, 363), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (361, 363), True, 'import networkx as nx\n'), ((655, 705), 'sklearn.metrics.cluster.contingency_matrix', 'metrics.cluster.contingency_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (689, 705), False, 'from sklearn import metrics\n'), ((1980, 1992), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1990, 1992), True, 'import networkx as nx\n'), ((2141, 2164), 'networkx.is_isomorphic', 'nx.is_isomorphic', (['H', 'M6'], {}), '(H, M6)\n', (2157, 2164), True, 'import networkx as nx\n'), ((2648, 2660), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (2658, 2660), True, 'import networkx as nx\n'), ((3187, 3203), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (3195, 3203), True, 'import numpy as np\n'), ((3487, 3502), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (3494, 3502), True, 'import numpy as np\n'), ((3690, 3706), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (3698, 3706), True, 'import numpy as np\n'), ((4036, 4142), 'sklearn.cluster.SpectralClustering', 'SpectralClustering', ([], {'n_clusters': 'groups', 'eigen_solver': '"""arpack"""', 'random_state': '(56)', 'affinity': '"""precomputed"""'}), "(n_clusters=groups, eigen_solver='arpack', random_state=\n 56, affinity='precomputed')\n", (4054, 4142), False, 'from sklearn.cluster import SpectralClustering\n'), ((4328, 4401), 'sklearn.metrics.cluster.normalized_mutual_info_score', 'normalized_mutual_info_score', (['y_true', 'y_pred'], {'average_method': '"""arithmetic"""'}), "(y_true, y_pred, average_method='arithmetic')\n", (4356, 4401), False, 'from sklearn.metrics.cluster import normalized_mutual_info_score\n'), ((4410, 4445), 'sklearn.metrics.cluster.adjusted_rand_score', 'adjusted_rand_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (4429, 4445), False, 'from sklearn.metrics.cluster import adjusted_rand_score\n'), ((759, 784), 'numpy.sum', 'np.sum', (['contigency_matrix'], {}), '(contigency_matrix)\n', (765, 784), True, 'import numpy as np\n'), ((3757, 3775), 'numpy.matmul', 'np.matmul', (['Ctmp', 'A'], {}), '(Ctmp, A)\n', (3766, 3775), True, 'import numpy as np\n'), ((724, 758), 'numpy.amax', 'np.amax', (['contigency_matrix'], {'axis': '(0)'}), '(contigency_matrix, axis=0)\n', (731, 758), True, 'import numpy as np\n')]
|
# Copyright (C) 2020 Intel Corporation
import requests
import json
import base64
import urllib.parse
import hashlib ## python3
import time
import random
from collections import OrderedDict
import argparse
import sys
def main(args):
params = OrderedDict()
params['device'] = args.device
params['local'] = args.local # 1 for local inference 0 for remote inference
start_time = time.time()
## notice: must uset http_proxy, otherwise can not get link
url = 'http://localhost:8080/cgi-bin/fcgi_policy'
res = requests.post(url,data = params)
processing_time = time.time() - start_time
if res.status_code == 200:
print(res.content.decode('utf-8'))
print("processing time is:", processing_time)
else:
print ("the error number is:", res.status_code)
if __name__ == "__main__":
parser = argparse.ArgumentParser(usage = "it's usage tip.", description = "help info.")
parser.add_argument("-d", "--device", choices=['CPU', 'GPU'], default = "CPU", help = "choose CPU device or GPU device")
parser.add_argument("-l", "--local", choices=['0', '1'], default = "1", help = "choose local inference or not. 1 for local inference 0 for remote inference")
args = parser.parse_args()
main(args)
|
[
"collections.OrderedDict",
"requests.post",
"argparse.ArgumentParser",
"time.time"
] |
[((249, 262), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (260, 262), False, 'from collections import OrderedDict\n'), ((396, 407), 'time.time', 'time.time', ([], {}), '()\n', (405, 407), False, 'import time\n'), ((536, 567), 'requests.post', 'requests.post', (['url'], {'data': 'params'}), '(url, data=params)\n', (549, 567), False, 'import requests\n'), ((854, 928), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""it\'s usage tip."""', 'description': '"""help info."""'}), '(usage="it\'s usage tip.", description=\'help info.\')\n', (877, 928), False, 'import argparse\n'), ((592, 603), 'time.time', 'time.time', ([], {}), '()\n', (601, 603), False, 'import time\n')]
|