language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | scipy__scipy | scipy/optimize/_linesearch.py | {
"start": 434,
"end": 27216
} | class ____(RuntimeWarning):
pass
def _check_c1_c2(c1, c2):
if not (0 < c1 < c2 < 1):
raise ValueError("'c1' and 'c2' do not satisfy"
"'0 < c1 < c2 < 1'.")
#------------------------------------------------------------------------------
# Minpack's Wolfe line and scalar searches
#------------------------------------------------------------------------------
def line_search_wolfe1(f, fprime, xk, pk, gfk=None,
old_fval=None, old_old_fval=None,
args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8,
xtol=1e-14):
"""
As `scalar_search_wolfe1` but do a line search to direction `pk`
Parameters
----------
f : callable
Function `f(x)`
fprime : callable
Gradient of `f`
xk : array_like
Current point
pk : array_like
Search direction
gfk : array_like, optional
Gradient of `f` at point `xk`
old_fval : float, optional
Value of `f` at point `xk`
old_old_fval : float, optional
Value of `f` at point preceding `xk`
The rest of the parameters are the same as for `scalar_search_wolfe1`.
Returns
-------
stp, f_count, g_count, fval, old_fval
As in `line_search_wolfe1`
gval : array
Gradient of `f` at the final point
Notes
-----
Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``.
"""
if gfk is None:
gfk = fprime(xk, *args)
gval = [gfk]
gc = [0]
fc = [0]
def phi(s):
fc[0] += 1
return f(xk + s*pk, *args)
def derphi(s):
gval[0] = fprime(xk + s*pk, *args)
gc[0] += 1
return np.dot(gval[0], pk)
derphi0 = np.dot(gfk, pk)
stp, fval, old_fval = scalar_search_wolfe1(
phi, derphi, old_fval, old_old_fval, derphi0,
c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)
return stp, fc[0], gc[0], fval, old_fval, gval[0]
def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None,
c1=1e-4, c2=0.9,
amax=50, amin=1e-8, xtol=1e-14):
"""
Scalar function search for alpha that satisfies strong Wolfe conditions
alpha > 0 is assumed to be a descent direction.
Parameters
----------
phi : callable phi(alpha)
Function at point `alpha`
derphi : callable phi'(alpha)
Objective function derivative. Returns a scalar.
phi0 : float, optional
Value of phi at 0
old_phi0 : float, optional
Value of phi at previous point
derphi0 : float, optional
Value derphi at 0
c1 : float, optional
Parameter for Armijo condition rule.
c2 : float, optional
Parameter for curvature condition rule.
amax, amin : float, optional
Maximum and minimum step size
xtol : float, optional
Relative tolerance for an acceptable step.
Returns
-------
alpha : float
Step size, or None if no suitable step was found
phi : float
Value of `phi` at the new point `alpha`
phi0 : float
Value of `phi` at `alpha=0`
Notes
-----
Uses routine DCSRCH from MINPACK.
Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1`` as described in [1]_.
References
----------
.. [1] Nocedal, J., & Wright, S. J. (2006). Numerical optimization.
In Springer Series in Operations Research and Financial Engineering.
(Springer Series in Operations Research and Financial Engineering).
Springer Nature.
"""
_check_c1_c2(c1, c2)
if phi0 is None:
phi0 = phi(0.)
if derphi0 is None:
derphi0 = derphi(0.)
if old_phi0 is not None and derphi0 != 0:
alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
if alpha1 < 0:
alpha1 = 1.0
else:
alpha1 = 1.0
maxiter = 100
dcsrch = DCSRCH(phi, derphi, c1, c2, xtol, amin, amax)
stp, phi1, phi0, task = dcsrch(
alpha1, phi0=phi0, derphi0=derphi0, maxiter=maxiter
)
return stp, phi1, phi0
line_search = line_search_wolfe1
#------------------------------------------------------------------------------
# Pure-Python Wolfe line and scalar searches
#------------------------------------------------------------------------------
# Note: `line_search_wolfe2` is the public `scipy.optimize.line_search`
def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None,
old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=None,
extra_condition=None, maxiter=10):
"""Find alpha that satisfies strong Wolfe conditions.
Parameters
----------
f : callable f(x,*args)
Objective function.
myfprime : callable f'(x,*args)
Objective function gradient.
xk : ndarray
Starting point.
pk : ndarray
Search direction. The search direction must be a descent direction
for the algorithm to converge.
gfk : ndarray, optional
Gradient value for x=xk (xk being the current parameter
estimate). Will be recomputed if omitted.
old_fval : float, optional
Function value for x=xk. Will be recomputed if omitted.
old_old_fval : float, optional
Function value for the point preceding x=xk.
args : tuple, optional
Additional arguments passed to objective function.
c1 : float, optional
Parameter for Armijo condition rule.
c2 : float, optional
Parameter for curvature condition rule.
amax : float, optional
Maximum step size
extra_condition : callable, optional
A callable of the form ``extra_condition(alpha, x, f, g)``
returning a boolean. Arguments are the proposed step ``alpha``
and the corresponding ``x``, ``f`` and ``g`` values. The line search
accepts the value of ``alpha`` only if this
callable returns ``True``. If the callable returns ``False``
for the step length, the algorithm will continue with
new iterates. The callable is only called for iterates
satisfying the strong Wolfe conditions.
maxiter : int, optional
Maximum number of iterations to perform.
Returns
-------
alpha : float or None
Alpha for which ``x_new = x0 + alpha * pk``,
or None if the line search algorithm did not converge.
fc : int
Number of function evaluations made.
gc : int
Number of gradient evaluations made.
new_fval : float or None
New function value ``f(x_new)=f(x0+alpha*pk)``,
or None if the line search algorithm did not converge.
old_fval : float
Old function value ``f(x0)``.
new_slope : float or None
The local slope along the search direction at the
new value ``<myfprime(x_new), pk>``,
or None if the line search algorithm did not converge.
Notes
-----
Uses the line search algorithm to enforce strong Wolfe
conditions. See Wright and Nocedal, 'Numerical Optimization',
1999, pp. 59-61.
The search direction `pk` must be a descent direction (e.g.
``-myfprime(xk)``) to find a step length that satisfies the strong Wolfe
conditions. If the search direction is not a descent direction (e.g.
``myfprime(xk)``), then `alpha`, `new_fval`, and `new_slope` will be None.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import line_search
An objective function and its gradient are defined.
>>> def obj_func(x):
... return (x[0])**2+(x[1])**2
>>> def obj_grad(x):
... return [2*x[0], 2*x[1]]
We can find alpha that satisfies strong Wolfe conditions.
>>> start_point = np.array([1.8, 1.7])
>>> search_gradient = np.array([-1.0, -1.0])
>>> line_search(obj_func, obj_grad, start_point, search_gradient)
(1.0, 2, 1, 1.1300000000000001, 6.13, [1.6, 1.4])
"""
fc = [0]
gc = [0]
gval = [None]
gval_alpha = [None]
def phi(alpha):
fc[0] += 1
return f(xk + alpha * pk, *args)
fprime = myfprime
def derphi(alpha):
gc[0] += 1
gval[0] = fprime(xk + alpha * pk, *args) # store for later use
gval_alpha[0] = alpha
return np.dot(gval[0], pk)
if gfk is None:
gfk = fprime(xk, *args)
derphi0 = np.dot(gfk, pk)
if extra_condition is not None:
# Add the current gradient as argument, to avoid needless
# re-evaluation
def extra_condition2(alpha, phi):
if gval_alpha[0] != alpha:
derphi(alpha)
x = xk + alpha * pk
return extra_condition(alpha, x, phi, gval[0])
else:
extra_condition2 = None
alpha_star, phi_star, old_fval, derphi_star = scalar_search_wolfe2(
phi, derphi, old_fval, old_old_fval, derphi0, c1, c2, amax,
extra_condition2, maxiter=maxiter)
if derphi_star is None:
warn('The line search algorithm did not converge',
LineSearchWarning, stacklevel=2)
else:
# derphi_star is a number (derphi) -- so use the most recently
# calculated gradient used in computing it derphi = gfk*pk
# this is the gradient at the next step no need to compute it
# again in the outer loop.
derphi_star = gval[0]
return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star
def scalar_search_wolfe2(phi, derphi, phi0=None,
old_phi0=None, derphi0=None,
c1=1e-4, c2=0.9, amax=None,
extra_condition=None, maxiter=10):
"""Find alpha that satisfies strong Wolfe conditions.
alpha > 0 is assumed to be a descent direction.
Parameters
----------
phi : callable phi(alpha)
Objective scalar function.
derphi : callable phi'(alpha)
Objective function derivative. Returns a scalar.
phi0 : float, optional
Value of phi at 0.
old_phi0 : float, optional
Value of phi at previous point.
derphi0 : float, optional
Value of derphi at 0
c1 : float, optional
Parameter for Armijo condition rule.
c2 : float, optional
Parameter for curvature condition rule.
amax : float, optional
Maximum step size.
extra_condition : callable, optional
A callable of the form ``extra_condition(alpha, phi_value)``
returning a boolean. The line search accepts the value
of ``alpha`` only if this callable returns ``True``.
If the callable returns ``False`` for the step length,
the algorithm will continue with new iterates.
The callable is only called for iterates satisfying
the strong Wolfe conditions.
maxiter : int, optional
Maximum number of iterations to perform.
Returns
-------
alpha_star : float or None
Best alpha, or None if the line search algorithm did not converge.
phi_star : float
phi at alpha_star.
phi0 : float
phi at 0.
derphi_star : float or None
derphi at alpha_star, or None if the line search algorithm
did not converge.
Notes
-----
Uses the line search algorithm to enforce strong Wolfe
conditions. See Wright and Nocedal, 'Numerical Optimization',
1999, pp. 59-61.
"""
_check_c1_c2(c1, c2)
if phi0 is None:
phi0 = phi(0.)
if derphi0 is None:
derphi0 = derphi(0.)
alpha0 = 0
if old_phi0 is not None and derphi0 != 0:
alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
else:
alpha1 = 1.0
if alpha1 < 0:
alpha1 = 1.0
if amax is not None:
alpha1 = min(alpha1, amax)
phi_a1 = phi(alpha1)
#derphi_a1 = derphi(alpha1) evaluated below
phi_a0 = phi0
derphi_a0 = derphi0
if extra_condition is None:
def extra_condition(alpha, phi):
return True
for i in range(maxiter):
if alpha1 == 0 or (amax is not None and alpha0 > amax):
# alpha1 == 0: This shouldn't happen. Perhaps the increment has
# slipped below machine precision?
alpha_star = None
phi_star = phi0
phi0 = old_phi0
derphi_star = None
if alpha1 == 0:
msg = 'Rounding errors prevent the line search from converging'
else:
msg = "The line search algorithm could not find a solution " + \
f"less than or equal to amax: {amax}"
warn(msg, LineSearchWarning, stacklevel=2)
break
not_first_iteration = i > 0
if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \
((phi_a1 >= phi_a0) and not_first_iteration):
alpha_star, phi_star, derphi_star = \
_zoom(alpha0, alpha1, phi_a0,
phi_a1, derphi_a0, phi, derphi,
phi0, derphi0, c1, c2, extra_condition)
break
derphi_a1 = derphi(alpha1)
if (abs(derphi_a1) <= -c2*derphi0):
if extra_condition(alpha1, phi_a1):
alpha_star = alpha1
phi_star = phi_a1
derphi_star = derphi_a1
break
if (derphi_a1 >= 0):
alpha_star, phi_star, derphi_star = \
_zoom(alpha1, alpha0, phi_a1,
phi_a0, derphi_a1, phi, derphi,
phi0, derphi0, c1, c2, extra_condition)
break
alpha2 = 2 * alpha1 # increase by factor of two on each iteration
if amax is not None:
alpha2 = min(alpha2, amax)
alpha0 = alpha1
alpha1 = alpha2
phi_a0 = phi_a1
phi_a1 = phi(alpha1)
derphi_a0 = derphi_a1
else:
# stopping test maxiter reached
alpha_star = alpha1
phi_star = phi_a1
derphi_star = None
warn('The line search algorithm did not converge',
LineSearchWarning, stacklevel=2)
return alpha_star, phi_star, phi0, derphi_star
def _cubicmin(a, fa, fpa, b, fb, c, fc):
"""
Finds the minimizer for a cubic polynomial that goes through the
points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.
If no minimizer can be found, return None.
"""
# f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D
with np.errstate(divide='raise', over='raise', invalid='raise'):
try:
C = fpa
db = b - a
dc = c - a
denom = (db * dc) ** 2 * (db - dc)
d1 = np.empty((2, 2))
d1[0, 0] = dc ** 2
d1[0, 1] = -db ** 2
d1[1, 0] = -dc ** 3
d1[1, 1] = db ** 3
[A, B] = np.dot(d1, np.asarray([fb - fa - C * db,
fc - fa - C * dc]).flatten())
A /= denom
B /= denom
radical = B * B - 3 * A * C
xmin = a + (-B + np.sqrt(radical)) / (3 * A)
except ArithmeticError:
return None
if not np.isfinite(xmin):
return None
return xmin
def _quadmin(a, fa, fpa, b, fb):
"""
Finds the minimizer for a quadratic polynomial that goes through
the points (a,fa), (b,fb) with derivative at a of fpa.
"""
# f(x) = B*(x-a)^2 + C*(x-a) + D
with np.errstate(divide='raise', over='raise', invalid='raise'):
try:
D = fa
C = fpa
db = b - a * 1.0
B = (fb - D - C * db) / (db * db)
xmin = a - C / (2.0 * B)
except ArithmeticError:
return None
if not np.isfinite(xmin):
return None
return xmin
def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo,
phi, derphi, phi0, derphi0, c1, c2, extra_condition):
"""Zoom stage of approximate linesearch satisfying strong Wolfe conditions.
Part of the optimization algorithm in `scalar_search_wolfe2`.
Notes
-----
Implements Algorithm 3.6 (zoom) in Wright and Nocedal,
'Numerical Optimization', 1999, pp. 61.
"""
maxiter = 10
i = 0
delta1 = 0.2 # cubic interpolant check
delta2 = 0.1 # quadratic interpolant check
phi_rec = phi0
a_rec = 0
while True:
# interpolate to find a trial step length between a_lo and
# a_hi Need to choose interpolation here. Use cubic
# interpolation and then if the result is within delta *
# dalpha or outside of the interval bounded by a_lo or a_hi
# then use quadratic interpolation, if the result is still too
# close, then use bisection
dalpha = a_hi - a_lo
if dalpha < 0:
a, b = a_hi, a_lo
else:
a, b = a_lo, a_hi
# minimizer of cubic interpolant
# (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)
#
# if the result is too close to the end points (or out of the
# interval), then use quadratic interpolation with phi_lo,
# derphi_lo and phi_hi if the result is still too close to the
# end points (or out of the interval) then use bisection
if (i > 0):
cchk = delta1 * dalpha
a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi,
a_rec, phi_rec)
if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk):
qchk = delta2 * dalpha
a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk):
a_j = a_lo + 0.5*dalpha
# Check new value of a_j
phi_aj = phi(a_j)
if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo):
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_j
phi_hi = phi_aj
else:
derphi_aj = derphi(a_j)
if abs(derphi_aj) <= -c2*derphi0 and extra_condition(a_j, phi_aj):
a_star = a_j
val_star = phi_aj
valprime_star = derphi_aj
break
if derphi_aj*(a_hi - a_lo) >= 0:
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_lo
phi_hi = phi_lo
else:
phi_rec = phi_lo
a_rec = a_lo
a_lo = a_j
phi_lo = phi_aj
derphi_lo = derphi_aj
i += 1
if (i > maxiter):
# Failed to find a conforming step size
a_star = None
val_star = None
valprime_star = None
break
return a_star, val_star, valprime_star
#------------------------------------------------------------------------------
# Armijo line and scalar searches
#------------------------------------------------------------------------------
def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
"""Minimize over alpha, the function ``f(xk+alpha pk)``.
Parameters
----------
f : callable
Function to be minimized.
xk : array_like
Current point.
pk : array_like
Search direction.
gfk : array_like
Gradient of `f` at point `xk`.
old_fval : float
Value of `f` at point `xk`.
args : tuple, optional
Optional arguments.
c1 : float, optional
Value to control stopping criterion.
alpha0 : scalar, optional
Value of `alpha` at start of the optimization.
Returns
-------
alpha
f_count
f_val_at_alpha
Notes
-----
Uses the interpolation algorithm (Armijo backtracking) as suggested by
Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57
"""
xk = np.atleast_1d(xk)
fc = [0]
def phi(alpha1):
fc[0] += 1
return f(xk + alpha1*pk, *args)
if old_fval is None:
phi0 = phi(0.)
else:
phi0 = old_fval # compute f(xk) -- done in past loop
derphi0 = np.dot(gfk, pk)
alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1,
alpha0=alpha0)
return alpha, fc[0], phi1
def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
"""
Compatibility wrapper for `line_search_armijo`
"""
r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1,
alpha0=alpha0)
return r[0], r[1], 0, r[2]
def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0):
"""Minimize over alpha, the function ``phi(alpha)``.
Uses the interpolation algorithm (Armijo backtracking) as suggested by
Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57
alpha > 0 is assumed to be a descent direction.
Returns
-------
alpha
phi1
"""
phi_a0 = phi(alpha0)
if phi_a0 <= phi0 + c1*alpha0*derphi0:
return alpha0, phi_a0
# Otherwise, compute the minimizer of a quadratic interpolant:
alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)
phi_a1 = phi(alpha1)
if (phi_a1 <= phi0 + c1*alpha1*derphi0):
return alpha1, phi_a1
# Otherwise, loop with cubic interpolation until we find an alpha which
# satisfies the first Wolfe condition (since we are backtracking, we will
# assume that the value of alpha is not too small and satisfies the second
# condition.
while alpha1 > amin: # we are assuming alpha>0 is a descent direction
factor = alpha0**2 * alpha1**2 * (alpha1-alpha0)
a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \
alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0)
a = a / factor
b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \
alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0)
b = b / factor
alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a)
phi_a2 = phi(alpha2)
if (phi_a2 <= phi0 + c1*alpha2*derphi0):
return alpha2, phi_a2
if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96:
alpha2 = alpha1 / 2.0
alpha0 = alpha1
alpha1 = alpha2
phi_a0 = phi_a1
phi_a1 = phi_a2
# Failed to find a suitable step length
return None, phi_a1
#------------------------------------------------------------------------------
# Non-monotone line search for DF-SANE
#------------------------------------------------------------------------------
def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta,
gamma=1e-4, tau_min=0.1, tau_max=0.5):
"""
Nonmonotone backtracking line search as described in [1]_
Parameters
----------
f : callable
Function returning a tuple ``(f, F)`` where ``f`` is the value
of a merit function and ``F`` the residual.
x_k : ndarray
Initial position.
d : ndarray
Search direction.
prev_fs : float
List of previous merit function values. Should have ``len(prev_fs) <= M``
where ``M`` is the nonmonotonicity window parameter.
eta : float
Allowed merit function increase, see [1]_
gamma, tau_min, tau_max : float, optional
Search parameters, see [1]_
Returns
-------
alpha : float
Step length
xp : ndarray
Next position
fp : float
Merit function value at next position
Fp : ndarray
Residual at next position
References
----------
[1] "Spectral residual method without gradient information for solving
large-scale nonlinear systems of equations." W. La Cruz,
J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).
"""
f_k = prev_fs[-1]
f_bar = max(prev_fs)
alpha_p = 1
alpha_m = 1
alpha = 1
while True:
xp = x_k + alpha_p * d
fp, Fp = f(xp)
if fp <= f_bar + eta - gamma * alpha_p**2 * f_k:
alpha = alpha_p
break
alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
xp = x_k - alpha_m * d
fp, Fp = f(xp)
if fp <= f_bar + eta - gamma * alpha_m**2 * f_k:
alpha = -alpha_m
break
alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
return alpha, xp, fp, Fp
def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta,
gamma=1e-4, tau_min=0.1, tau_max=0.5,
nu=0.85):
"""
Nonmonotone line search from [1]
Parameters
----------
f : callable
Function returning a tuple ``(f, F)`` where ``f`` is the value
of a merit function and ``F`` the residual.
x_k : ndarray
Initial position.
d : ndarray
Search direction.
f_k : float
Initial merit function value.
C, Q : float
Control parameters. On the first iteration, give values
Q=1.0, C=f_k
eta : float
Allowed merit function increase, see [1]_
nu, gamma, tau_min, tau_max : float, optional
Search parameters, see [1]_
Returns
-------
alpha : float
Step length
xp : ndarray
Next position
fp : float
Merit function value at next position
Fp : ndarray
Residual at next position
C : float
New value for the control parameter C
Q : float
New value for the control parameter Q
References
----------
.. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line
search and its application to the spectral residual
method'', IMA J. Numer. Anal. 29, 814 (2009).
"""
alpha_p = 1
alpha_m = 1
alpha = 1
while True:
xp = x_k + alpha_p * d
fp, Fp = f(xp)
if fp <= C + eta - gamma * alpha_p**2 * f_k:
alpha = alpha_p
break
alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
xp = x_k - alpha_m * d
fp, Fp = f(xp)
if fp <= C + eta - gamma * alpha_m**2 * f_k:
alpha = -alpha_m
break
alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
# Update C and Q
Q_next = nu * Q + 1
C = (nu * Q * (C + eta) + fp) / Q_next
Q = Q_next
return alpha, xp, fp, Fp, C, Q
| LineSearchWarning |
python | redis__redis-py | redis/commands/search/aggregation.py | {
"start": 1690,
"end": 1897
} | class ____:
"""
This special class is used to indicate sort direction.
"""
DIRSTRING: Optional[str] = None
def __init__(self, field: str) -> None:
self.field = field
| SortDirection |
python | pandas-dev__pandas | pandas/tests/io/formats/test_format.py | {
"start": 72440,
"end": 74245
} | class ____:
def test_days(self):
x = pd.to_timedelta(list(range(5)) + [NaT], unit="D")._values
result = fmt._Timedelta64Formatter(x).get_result()
assert result[0].strip() == "0 days"
assert result[1].strip() == "1 days"
result = fmt._Timedelta64Formatter(x[1:2]).get_result()
assert result[0].strip() == "1 days"
result = fmt._Timedelta64Formatter(x).get_result()
assert result[0].strip() == "0 days"
assert result[1].strip() == "1 days"
result = fmt._Timedelta64Formatter(x[1:2]).get_result()
assert result[0].strip() == "1 days"
def test_days_neg(self):
x = pd.to_timedelta(list(range(5)) + [NaT], unit="D")._values
result = fmt._Timedelta64Formatter(-x).get_result()
assert result[0].strip() == "0 days"
assert result[1].strip() == "-1 days"
def test_subdays(self):
y = pd.to_timedelta(list(range(5)) + [NaT], unit="s")._values
result = fmt._Timedelta64Formatter(y).get_result()
assert result[0].strip() == "0 days 00:00:00"
assert result[1].strip() == "0 days 00:00:01"
def test_subdays_neg(self):
y = pd.to_timedelta(list(range(5)) + [NaT], unit="s")._values
result = fmt._Timedelta64Formatter(-y).get_result()
assert result[0].strip() == "0 days 00:00:00"
assert result[1].strip() == "-1 days +23:59:59"
def test_zero(self):
x = pd.to_timedelta(list(range(1)) + [NaT], unit="D")._values
result = fmt._Timedelta64Formatter(x).get_result()
assert result[0].strip() == "0 days"
x = pd.to_timedelta(list(range(1)), unit="D")._values
result = fmt._Timedelta64Formatter(x).get_result()
assert result[0].strip() == "0 days"
| TestTimedelta64Formatter |
python | great-expectations__great_expectations | contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_profile_numeric_columns_diff_between_exclusive_threshold_range.py | {
"start": 919,
"end": 5697
} | class ____(DataProfilerProfileMetricProvider):
metric_name = "data_profiler.profile_numeric_columns_diff_between_threshold_range"
value_keys = (
"profile_path",
"limit_check_report_keys",
"numerical_diff_statistics",
)
@metric_value(engine=PandasExecutionEngine)
def _pandas( # noqa: C901 - too complex
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
runtime_configuration: Dict,
):
profile_diff = metrics.get("data_profiler.profile_diff")
numeric_columns = metrics.get("data_profiler.profile_numeric_columns")
limit_check_report_keys = metric_value_kwargs["limit_check_report_keys"]
numerical_diff_statistics = metric_value_kwargs["numerical_diff_statistics"]
columns = list(profile_diff["global_stats"]["profile_schema"][1].keys())
data_stats = profile_diff["data_stats"]
requested_columns = {}
# Adds columns if generic column key is provided
# Note: Copy is required for all metric arguments to ensure metric_value_id is identified correctly
limit_check_report_keys_copy = copy.deepcopy(limit_check_report_keys)
limit_check_report_keys_copy = replace_generic_operator_in_report_keys(
limit_check_report_keys_copy, numeric_columns
)
for col, stats in limit_check_report_keys_copy.items():
if col not in numeric_columns: # Makes sure column requested is numeric
requested_columns[col] = "Column is Non-Numeric"
continue
# adds stats if generic stat key is provided
numerical_diff_statistics_copy = copy.deepcopy(numerical_diff_statistics)
stats = replace_generic_operator_in_report_keys(stats, numerical_diff_statistics_copy)
if col not in columns: # Makes sure column exists within profile schema
requested_columns[col] = "Column requested was not found."
continue
col_data_stats = {}
for data_stat in data_stats:
if data_stat["column_name"] == col:
col_data_stats = data_stat["statistics"]
break
requested_columns[col] = {}
for stat, bounds in stats.items():
if stat not in col_data_stats:
requested_columns[col][stat] = "Statistic requested was not found."
continue
diff_val = col_data_stats[stat]
if diff_val == "unchanged": # In the case there is no delta
diff_val = 0
between_bounds = is_value_between_bounds(
diff_val, bounds["lower"], bounds["upper"], inclusive=False
)
if not between_bounds:
requested_columns[col][stat] = {
"lower_bound": bounds["lower"],
"upper_bound": bounds["upper"],
"value_found": diff_val,
}
else:
requested_columns[col][stat] = True
return requested_columns
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""
Returns a dictionary of given metric names and their corresponding configuration, specifying
the metric types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if (
metric.metric_name
== "data_profiler.profile_numeric_columns_diff_between_threshold_range"
):
dependencies["data_profiler.profile_diff"] = MetricConfiguration(
metric_name="data_profiler.profile_diff",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
dependencies["data_profiler.profile_numeric_columns"] = MetricConfiguration(
metric_name="data_profiler.profile_numeric_columns",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
return dependencies
| DataProfilerProfileNumericColumnsDiffBetweenThresholdRange |
python | walkccc__LeetCode | solutions/1905. Count Sub Islands/1905.py | {
"start": 0,
"end": 592
} | class ____:
def countSubIslands(
self,
grid1: list[list[int]],
grid2: list[list[int]],
) -> int:
m = len(grid2)
n = len(grid2[0])
def dfs(i: int, j: int) -> int:
if i < 0 or i == m or j < 0 or j == n:
return 1
if grid2[i][j] != 1:
return 1
grid2[i][j] = 2 # Mark 2 as visited.
return (dfs(i + 1, j) & dfs(i - 1, j) &
dfs(i, j + 1) & dfs(i, j - 1) & grid1[i][j])
ans = 0
for i in range(m):
for j in range(n):
if grid2[i][j] == 1:
ans += dfs(i, j)
return ans
| Solution |
python | anthropics__anthropic-sdk-python | src/anthropic/types/citation_page_location_param.py | {
"start": 253,
"end": 540
} | class ____(TypedDict, total=False):
cited_text: Required[str]
document_index: Required[int]
document_title: Required[Optional[str]]
end_page_number: Required[int]
start_page_number: Required[int]
type: Required[Literal["page_location"]]
| CitationPageLocationParam |
python | Textualize__textual | tests/animations/test_tabs_underline_animation.py | {
"start": 239,
"end": 1983
} | class ____(App[None]):
def compose(self) -> ComposeResult:
with TabbedContent():
for _ in range(10):
yield Label("Hey!")
async def test_tabs_underline_animates_on_full() -> None:
"""The underline takes some time to move when animated."""
app = TabbedContentApp()
app.animation_level = "full"
animations: list[str] = []
async with app.run_test() as pilot:
animator = app.animator
animator._record_animation = animations.append
app.query_one(Tabs).action_previous_tab()
await pilot.pause()
assert "highlight_start" in animations
assert "highlight_end" in animations
async def test_tabs_underline_animates_on_basic() -> None:
"""The underline takes some time to move when animated."""
app = TabbedContentApp()
app.animation_level = "basic"
animations: list[str] = []
async with app.run_test() as pilot:
animator = app.animator
animator._record_animation = animations.append
app.query_one(Tabs).action_previous_tab()
await pilot.pause()
assert "highlight_start" in animations
assert "highlight_end" in animations
async def test_tabs_underline_does_not_animate_on_none() -> None:
"""The underline jumps to its final position when not animated."""
app = TabbedContentApp()
app.animation_level = "none"
animations: list[str] = []
async with app.run_test() as pilot:
animator = app.animator
animator._record_animation = animations.append
app.query_one(Tabs).action_previous_tab()
await pilot.pause()
assert "highlight_start" not in animations
assert "highlight_end" not in animations
| TabbedContentApp |
python | mahmoud__glom | glom/matching.py | {
"start": 14708,
"end": 17528
} | class ____:
""":attr:`~glom.M` is similar to :attr:`~glom.T`, a stand-in for the
current target, but where :attr:`~glom.T` allows for attribute and
key access and method calls, :attr:`~glom.M` allows for comparison
operators.
If a comparison succeeds, the target is returned unchanged.
If a comparison fails, :class:`~glom.MatchError` is thrown.
Some examples:
>>> glom(1, M > 0)
1
>>> glom(0, M == 0)
0
>>> glom('a', M != 'b') == 'a'
True
:attr:`~glom.M` by itself evaluates the current target for truthiness.
For example, `M | Val(None)` is a simple idiom for normalizing all falsey values to None:
>>> from glom import Val
>>> glom([0, False, "", None], [M | Val(None)])
[None, None, None, None]
For convenience, ``&`` and ``|`` operators are overloaded to
construct :attr:`~glom.And` and :attr:`~glom.Or` instances.
>>> glom(1.0, (M > 0) & float)
1.0
.. note::
Python's operator overloading may make for concise code,
but it has its limits.
Because bitwise operators (``&`` and ``|``) have higher precedence
than comparison operators (``>``, ``<``, etc.), expressions must
be parenthesized.
>>> M > 0 & float
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for &: 'int' and 'type'
Similarly, because of special handling around ternary
comparisons (``1 < M < 5``) are implemented via
short-circuiting evaluation, they also cannot be captured by
:data:`M`.
"""
__slots__ = ()
def __call__(self, spec):
"""wrap a sub-spec in order to apply comparison operators to the result"""
if not isinstance(spec, type(T)):
# TODO: open this up for other specs so we can do other
# checks, like function calls
raise TypeError("M() only accepts T-style specs, not %s" % type(spec).__name__)
return _MSubspec(spec)
def __eq__(self, other):
return _MExpr(self, '=', other)
def __ne__(self, other):
return _MExpr(self, '!', other)
def __gt__(self, other):
return _MExpr(self, '>', other)
def __lt__(self, other):
return _MExpr(self, '<', other)
def __ge__(self, other):
return _MExpr(self, 'g', other)
def __le__(self, other):
return _MExpr(self, 'l', other)
def __and__(self, other):
return And(self, other)
__rand__ = __and__
def __or__(self, other):
return Or(self, other)
def __invert__(self):
return Not(self)
def __repr__(self):
return "M"
def glomit(self, target, spec):
if target:
return target
raise MatchError("{0!r} not truthy", target)
M = _MType()
| _MType |
python | google__pytype | pytype/overlays/typed_dict.py | {
"start": 7893,
"end": 9556
} | class ____(abstract.PyTDClass):
"""A template for typed dicts."""
def __init__(self, props, base_cls, ctx):
self.props = props
self._base_cls = base_cls # TypedDictBuilder for constructing subclasses
super().__init__(props.name, ctx.convert.dict_type.pytd_cls, ctx)
self.init_method = self._make_init(props)
def __repr__(self):
return f"TypedDictClass({self.name})"
def _make_init(self, props):
# __init__ method for type checking signatures.
# We construct this here and pass it to TypedDictClass because we need
# access to abstract.SimpleFunction.
sig = function.Signature.from_param_names(
f"{props.name}.__init__",
props.fields.keys(),
kind=pytd.ParameterKind.KWONLY,
)
sig.annotations = dict(props.fields)
sig.defaults = {
k: self.ctx.new_unsolvable(self.ctx.root_node) for k in props.optional
}
return abstract.SimpleFunction(sig, self.ctx)
def _new_instance(self, container, node, args):
self.init_method.match_and_map_args(node, args, None)
ret = TypedDict(self.props, self.ctx)
for k, v in args.namedargs.items():
ret.set_str_item(node, k, v)
ret.cls = self
return ret
def instantiate_value(self, node, container):
args = function.Args(())
for name, typ in self.props.fields.items():
args.namedargs[name] = typ.instantiate(node)
return self._new_instance(container, node, args)
def instantiate(self, node, container=None):
return self.instantiate_value(node, container).to_variable(node)
def make_class(self, *args, **kwargs):
return self._base_cls.make_class(*args, **kwargs)
| TypedDictClass |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 354989,
"end": 355756
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UpdateNotificationRestrictionSetting"""
__schema__ = github_schema
__field_names__ = ("owner_id", "setting_value", "client_mutation_id")
owner_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="ownerId")
"""The ID of the owner on which to set the restrict notifications
setting.
"""
setting_value = sgqlc.types.Field(sgqlc.types.non_null(NotificationRestrictionSettingValue), graphql_name="settingValue")
"""The value for the restrict notifications setting."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateNotificationRestrictionSettingInput |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/ExampleApp.py | {
"start": 9273,
"end": 21662
} | class ____(QtWidgets.QMainWindow):
# update qtLibCombo item order to match bindings in the UI file and recreate
# the templates files if you change bindings.
bindings = {'PyQt6': 0, 'PySide6': 1, 'PyQt5': 2, 'PySide2': 3}
modules = tuple(m.name for m in pkgutil.iter_modules())
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
self.ui = ui_template.Ui_Form()
self.cw = QtWidgets.QWidget()
self.setCentralWidget(self.cw)
self.ui.setupUi(self.cw)
self.setWindowTitle("PyQtGraph Examples")
self.codeBtn = QtWidgets.QPushButton('Run Edited Code')
self.codeLayout = QtWidgets.QGridLayout()
self.ui.codeView.setLayout(self.codeLayout)
self.hl = PythonHighlighter(self.ui.codeView.document())
app = QtWidgets.QApplication.instance()
policy = QtWidgets.QSizePolicy.Policy.Expanding
self.codeLayout.addItem(QtWidgets.QSpacerItem(100,100, policy, policy), 0, 0)
self.codeLayout.addWidget(self.codeBtn, 1, 1)
self.codeBtn.hide()
textFil = self.ui.exampleFilter
self.curListener = None
self.ui.exampleFilter.setFocus()
self.ui.qtLibCombo.addItems(self.bindings.keys())
self.ui.qtLibCombo.setCurrentIndex(self.bindings[QT_LIB])
def onComboChanged(searchType):
if self.curListener is not None:
self.curListener.disconnect()
self.curListener = textFil.textChanged
# In case the regex was invalid before switching to title search,
# ensure the "invalid" color is reset
self.ui.exampleFilter.setStyleSheet('')
if searchType == 'Content Search':
self.curListener.connect(self.filterByContent)
else:
self.hl.searchText = None
self.curListener.connect(self.filterByTitle)
# Fire on current text, too
self.curListener.emit(textFil.text())
self.ui.searchFiles.currentTextChanged.connect(onComboChanged)
onComboChanged(self.ui.searchFiles.currentText())
self.itemCache = []
self.populateTree(self.ui.exampleTree.invisibleRootItem(), utils.examples_)
self.ui.exampleTree.expandAll()
self.resize(1000,500)
self.show()
self.ui.splitter.setSizes([250,750])
self.oldText = self.ui.codeView.toPlainText()
self.ui.loadBtn.clicked.connect(self.loadFile)
self.ui.exampleTree.currentItemChanged.connect(self.showFile)
self.ui.exampleTree.itemDoubleClicked.connect(self.loadFile)
self.ui.codeView.textChanged.connect(self.onTextChange)
self.codeBtn.clicked.connect(self.runEditedCode)
self.updateCodeViewTabWidth(self.ui.codeView.font())
def event(self, event: Optional[QtCore.QEvent]):
if event is None:
return super().event(None)
if event.type() in [
QtCore.QEvent.Type.ApplicationPaletteChange,
]:
app = pg.mkQApp()
try:
darkMode = app.styleHints().colorScheme() == QtCore.Qt.ColorScheme.Dark
except AttributeError:
palette = app.palette()
windowTextLightness = palette.color(QtGui.QPalette.ColorRole.WindowText).lightness()
windowLightness = palette.color(QtGui.QPalette.ColorRole.Window).lightness()
darkMode = windowTextLightness > windowLightness
app.setProperty('darkMode', darkMode)
self.hl = PythonHighlighter(self.ui.codeView.document())
return super().event(event)
def updateCodeViewTabWidth(self,font):
"""
Change the codeView tabStopDistance to 4 spaces based on the size of the current font
"""
fm = QtGui.QFontMetrics(font)
tabWidth = fm.horizontalAdvance(' ' * 4)
# the default value is 80 pixels! that's more than 2x what we want.
self.ui.codeView.setTabStopDistance(tabWidth)
def showEvent(self, event) -> None:
super(ExampleLoader, self).showEvent(event)
disabledColor = QColor(QtCore.Qt.GlobalColor.red)
for name, idx in self.bindings.items():
disableBinding = name not in self.modules
if disableBinding:
item = self.ui.qtLibCombo.model().item(idx)
item.setData(disabledColor, QtCore.Qt.ItemDataRole.ForegroundRole)
item.setEnabled(False)
item.setToolTip(f'{item.text()} is not installed')
def onTextChange(self):
"""
textChanged fires when the highlighter is reassigned the same document.
Prevent this from showing "run edited code" by checking for actual
content change
"""
newText = self.ui.codeView.toPlainText()
if newText != self.oldText:
self.oldText = newText
self.codeEdited()
def filterByTitle(self, text):
self.showExamplesByTitle(self.getMatchingTitles(text))
self.hl.setDocument(self.ui.codeView.document())
def filterByContent(self, text=None):
# If the new text isn't valid regex, fail early and highlight the search filter red to indicate a problem
# to the user
validRegex = True
try:
re.compile(text)
self.ui.exampleFilter.setStyleSheet('')
except re.error:
colors = DarkThemeColors if app.property('darkMode') else LightThemeColors
errorColor = pg.mkColor(colors.Red)
validRegex = False
errorColor.setAlpha(100)
# Tuple prints nicely :)
self.ui.exampleFilter.setStyleSheet(f'background: rgba{errorColor.getRgb()}')
if not validRegex:
return
checkDict = unnestedDict(utils.examples_)
self.hl.searchText = text
# Need to reapply to current document
self.hl.setDocument(self.ui.codeView.document())
titles = []
text = text.lower()
for kk, vv in checkDict.items():
if isinstance(vv, Namespace):
vv = vv.filename
filename = os.path.join(path, vv)
contents = self.getExampleContent(filename).lower()
if text in contents:
titles.append(kk)
self.showExamplesByTitle(titles)
def getMatchingTitles(self, text, exDict=None, acceptAll=False):
if exDict is None:
exDict = utils.examples_
text = text.lower()
titles = []
for kk, vv in exDict.items():
matched = acceptAll or text in kk.lower()
if isinstance(vv, dict):
titles.extend(self.getMatchingTitles(text, vv, acceptAll=matched))
elif matched:
titles.append(kk)
return titles
def showExamplesByTitle(self, titles):
QTWI = QtWidgets.QTreeWidgetItemIterator
flag = QTWI.IteratorFlag.NoChildren
treeIter = QTWI(self.ui.exampleTree, flag)
item = treeIter.value()
while item is not None:
parent = item.parent()
show = (item.childCount() or item.text(0) in titles)
item.setHidden(not show)
# If all children of a parent are gone, hide it
if parent:
hideParent = True
for ii in range(parent.childCount()):
if not parent.child(ii).isHidden():
hideParent = False
break
parent.setHidden(hideParent)
treeIter += 1
item = treeIter.value()
def simulate_black_mode(self):
"""
used to simulate MacOS "black mode" on other platforms
intended for debug only, as it manage only the QPlainTextEdit
"""
# first, a dark background
c = QtGui.QColor('#171717')
p = self.ui.codeView.palette()
p.setColor(QtGui.QPalette.ColorGroup.Active, QtGui.QPalette.ColorRole.Base, c)
p.setColor(QtGui.QPalette.ColorGroup.Inactive, QtGui.QPalette.ColorRole.Base, c)
self.ui.codeView.setPalette(p)
# then, a light font
f = QtGui.QTextCharFormat()
f.setForeground(QtGui.QColor('white'))
self.ui.codeView.setCurrentCharFormat(f)
# finally, override application automatic detection
app = QtWidgets.QApplication.instance()
app.setProperty('darkMode', True)
def populateTree(self, root, examples):
bold_font = None
for key, val in examples.items():
item = QtWidgets.QTreeWidgetItem([key])
self.itemCache.append(item) # PyQt 4.9.6 no longer keeps references to these wrappers,
# so we need to make an explicit reference or else the .file
# attribute will disappear.
if isinstance(val, OrderedDict):
self.populateTree(item, val)
elif isinstance(val, Namespace):
item.file = val.filename
if 'recommended' in val:
if bold_font is None:
bold_font = item.font(0)
bold_font.setBold(True)
item.setFont(0, bold_font)
else:
item.file = val
root.addChild(item)
def currentFile(self):
item = self.ui.exampleTree.currentItem()
if hasattr(item, 'file'):
return os.path.join(path, item.file)
return None
def loadFile(self, *, edited=False):
# make *edited* keyword-only so it is not confused for extra arguments
# sent by ui signals
qtLib = self.ui.qtLibCombo.currentText()
env = dict(os.environ, PYQTGRAPH_QT_LIB=qtLib)
example_path = os.path.abspath(os.path.dirname(__file__))
path = os.path.dirname(os.path.dirname(example_path))
env['PYTHONPATH'] = f'{path}'
if edited:
proc = subprocess.Popen([sys.executable, '-'], stdin=subprocess.PIPE, cwd=example_path, env=env)
code = self.ui.codeView.toPlainText().encode('UTF-8')
proc.stdin.write(code)
proc.stdin.close()
else:
fn = self.currentFile()
if fn is None:
return
subprocess.Popen([sys.executable, fn], cwd=path, env=env)
def showFile(self):
fn = self.currentFile()
text = self.getExampleContent(fn)
self.ui.codeView.setPlainText(text)
self.ui.loadedFileLabel.setText(fn)
self.codeBtn.hide()
@lru_cache(100)
def getExampleContent(self, filename):
if filename is None:
self.ui.codeView.clear()
return
if os.path.isdir(filename):
filename = os.path.join(filename, '__main__.py')
with open(filename, "r") as currentFile:
text = currentFile.read()
return text
def codeEdited(self):
self.codeBtn.show()
def runEditedCode(self):
self.loadFile(edited=True)
def keyPressEvent(self, event):
super().keyPressEvent(event)
if not (event.modifiers() & QtCore.Qt.KeyboardModifier.ControlModifier):
return
key = event.key()
Key = QtCore.Qt.Key
# Allow quick navigate to search
if key == Key.Key_F:
self.ui.exampleFilter.setFocus()
event.accept()
return
if key not in [Key.Key_Plus, Key.Key_Minus, Key.Key_Underscore, Key.Key_Equal, Key.Key_0]:
return
font = self.ui.codeView.font()
oldSize = font.pointSize()
if key == Key.Key_Plus or key == Key.Key_Equal:
font.setPointSize(oldSize + max(oldSize*.15, 1))
elif key == Key.Key_Minus or key == Key.Key_Underscore:
newSize = oldSize - max(oldSize*.15, 1)
font.setPointSize(max(newSize, 1))
elif key == Key.Key_0:
# Reset to original size
font.setPointSize(10)
self.ui.codeView.setFont(font)
self.updateCodeViewTabWidth(font)
event.accept()
def main():
app = pg.mkQApp()
loader = ExampleLoader()
loader.ui.exampleTree.setCurrentIndex(
loader.ui.exampleTree.model().index(0,0)
)
pg.exec()
if __name__ == '__main__':
main()
| ExampleLoader |
python | readthedocs__readthedocs.org | readthedocs/organizations/querysets.py | {
"start": 5561,
"end": 5659
} | class ____(SettingsOverrideObject):
_default_class = BaseOrganizationQuerySet
| OrganizationQuerySet |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_tpu.py | {
"start": 4911,
"end": 10083
} | class ____(BoringModel):
def validation_step(self, *args, **kwargs):
out = super().validation_step(*args, **kwargs)
self.log("val_loss", out["x"])
return out
@RunIf(tpu=True, standalone=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_model_tpu_early_stop(tmp_path):
model = CustomBoringModel()
trainer = Trainer(
callbacks=[EarlyStopping(monitor="val_loss")],
default_root_dir=tmp_path,
enable_progress_bar=False,
max_epochs=2,
limit_train_batches=2,
limit_val_batches=2,
accelerator="tpu",
devices="auto",
)
trainer.fit(model)
trainer.test(model, dataloaders=DataLoader(RandomDataset(32, 2000), batch_size=32))
@RunIf(tpu=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_tpu_grad_norm(tmp_path):
"""Test if grad_norm works on TPU."""
trainer_options = {
"default_root_dir": tmp_path,
"enable_progress_bar": False,
"max_epochs": 4,
"accelerator": "tpu",
"devices": 1,
"limit_train_batches": 0.4,
"limit_val_batches": 0.4,
"gradient_clip_val": 0.5,
}
model = BoringModel()
tpipes.run_model_test(trainer_options, model, with_hpc=False)
@RunIf(tpu=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_tpu_clip_grad_by_value(tmp_path):
"""Test if clip_gradients by value works on TPU."""
trainer_options = {
"default_root_dir": tmp_path,
"enable_progress_bar": False,
"max_epochs": 4,
"accelerator": "tpu",
"devices": 1,
"limit_train_batches": 3,
"limit_val_batches": 3,
"gradient_clip_val": 0.5,
"gradient_clip_algorithm": "value",
}
model = BoringModel()
tpipes.run_model_test(trainer_options, model, with_hpc=False)
@RunIf(tpu=True, standalone=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_dataloaders_passed_to_fit(tmp_path):
"""Test if dataloaders passed to trainer works on TPU."""
model = BoringModel()
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, accelerator="tpu", devices="auto")
trainer.fit(model, train_dataloaders=model.train_dataloader(), val_dataloaders=model.val_dataloader())
@pytest.mark.parametrize("devices", [[1, 8], "9, ", [9], [-1], 2, 10])
def test_tpu_misconfiguration(devices, tpu_available):
with pytest.raises(ValueError, match="`devices` can only be"):
Trainer(accelerator="tpu", devices=devices)
@pytest.mark.skipif(XLAAccelerator.is_available(), reason="test requires missing TPU")
@mock.patch("lightning.fabric.accelerators.xla._using_pjrt", return_value=True)
def test_exception_when_no_tpu_found(_, xla_available):
"""Test if exception is thrown when xla devices are not available."""
with pytest.raises(MisconfigurationException, match="XLAAccelerator` can not run on your system"):
Trainer(accelerator="tpu", devices=8)
@pytest.mark.parametrize("devices", [1, 4, [1]])
@RunIf(tpu=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_accelerator_set_when_using_tpu(devices):
"""Test if the accelerator is set to `tpu` when devices is not None."""
assert isinstance(Trainer(accelerator="tpu", devices=devices).accelerator, XLAAccelerator)
@RunIf(tpu=True, standalone=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_if_test_works_with_checkpoint_false(tmp_path):
"""Ensure that model trains properly when `enable_checkpointing` is set to False."""
# Train a model on TPU
model = BoringModel()
trainer = Trainer(
max_epochs=1,
accelerator="tpu",
devices="auto",
default_root_dir=tmp_path,
fast_dev_run=True,
enable_checkpointing=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
def wrap_launch_function(fn, strategy, *args, **kwargs):
# the launcher does not manage this automatically. explanation available in:
# https://github.com/Lightning-AI/pytorch-lightning/pull/14926#discussion_r982976718
strategy.setup_environment()
return fn(*args, **kwargs)
def xla_launch(fn):
# TODO: the accelerator should be optional to just launch processes, but this requires lazy initialization
accelerator = XLAAccelerator()
strategy = XLAStrategy(
accelerator=accelerator,
parallel_devices=XLAAccelerator.get_parallel_devices(XLAAccelerator.auto_device_count()),
)
launcher = _XLALauncher(strategy=strategy)
wrapped = partial(wrap_launch_function, fn, strategy)
return launcher.launch(wrapped, strategy)
def tpu_sync_dist_fn(strategy):
sync = _Sync(strategy.reduce, _should=True, _op=torch.distributed.ReduceOp.SUM)
value = torch.tensor([1.0])
value = sync(value)
assert value.item() == strategy.world_size
@RunIf(tpu=True, standalone=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_tpu_sync_dist():
"""Test tpu spawn sync dist operation."""
xla_launch(tpu_sync_dist_fn)
| CustomBoringModel |
python | openai__gym | tests/envs/utils_envs.py | {
"start": 1086,
"end": 1382
} | class ____(gym.Env):
"""Environment that has neither human- nor rgb-rendering"""
metadata = {"render_modes": ["ascii"], "render_fps": 4}
def __init__(self, render_mode=None):
assert render_mode in self.metadata["render_modes"]
self.render_mode = render_mode
| NoHumanNoRGB |
python | tensorflow__tensorflow | tensorflow/python/trackable/base.py | {
"start": 4036,
"end": 5364
} | class ____(object):
"""Tensor wrapper for managing update UIDs in `Variables`.
When supplied as an initial value, objects of this type let a `Variable`
(`Variable`, `ResourceVariable`, etc.) know the UID of the restore the initial
value came from. This allows deferred restorations to be sequenced in the
order the user specified them, and lets us fall back on assignment if an
initial value is not set (e.g. due to a custom getter interfering).
See comments in _add_variable_with_custom_getter for more information about
how `CheckpointInitialValue` is used.
"""
def __init__(self, checkpoint_position, shape=None, shard_info=None):
if shard_info:
full_shape_str = " ".join("%d" % d for d in shape) + " "
slice_spec = ":".join(
"%d,%d" % (o, s) for o, s in zip(shard_info.offset, shard_info.shape))
shape_and_slice = full_shape_str + slice_spec
else:
shape_and_slice = ""
self.wrapped_value = checkpoint_position.value_tensors(
{VARIABLE_VALUE_KEY: shape_and_slice})[VARIABLE_VALUE_KEY]
self._checkpoint_position = checkpoint_position
def __tf_tensor__(self, dtype=None, name=None):
del dtype
del name
return self.wrapped_value
@property
def checkpoint_position(self):
return self._checkpoint_position
| CheckpointInitialValue |
python | ray-project__ray | python/ray/data/_internal/execution/operators/aggregate_num_rows.py | {
"start": 301,
"end": 1971
} | class ____(PhysicalOperator):
"""Count number of rows in input bundles.
This operator aggregates the number of rows in input bundles using the bundles'
block metadata. It outputs a single row with the specified column name.
"""
def __init__(
self,
input_dependencies,
data_context: DataContext,
column_name: str,
):
super().__init__(
"AggregateNumRows",
input_dependencies,
data_context,
)
self._column_name = column_name
self._num_rows = 0
self._has_outputted = False
self._estimated_num_output_bundles = 1
self._estimated_output_num_rows = 1
def has_next(self) -> bool:
return self._inputs_complete and not self._has_outputted
def _get_next_inner(self) -> RefBundle:
assert self._inputs_complete
builder = DelegatingBlockBuilder()
builder.add({self._column_name: self._num_rows})
block = builder.build()
block_ref = ray.put(block)
metadata = BlockAccessor.for_block(block).get_metadata()
schema = BlockAccessor.for_block(block).schema()
bundle = RefBundle([(block_ref, metadata)], owns_blocks=True, schema=schema)
self._has_outputted = True
return bundle
def get_stats(self) -> StatsDict:
return {}
def _add_input_inner(self, refs, input_index) -> None:
assert refs.num_rows() is not None
self._num_rows += refs.num_rows()
def throttling_disabled(self) -> bool:
return True
def implements_accurate_memory_accounting(self) -> bool:
return True
| AggregateNumRows |
python | kamyu104__LeetCode-Solutions | Python/count-subarrays-of-length-three-with-a-condition.py | {
"start": 37,
"end": 258
} | class ____(object):
def countSubarrays(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return sum((nums[i-1]+nums[i+1])*2 == nums[i] for i in xrange(1, len(nums)-1))
| Solution |
python | fluentpython__example-code-2e | 17-it-generator/sentence_gen.py | {
"start": 144,
"end": 484
} | class ____:
def __init__(self, text):
self.text = text
self.words = RE_WORD.findall(text)
def __repr__(self):
return 'Sentence(%s)' % reprlib.repr(self.text)
def __iter__(self):
for word in self.words: # <1>
yield word # <2>
# <3>
# done! <4>
# end::SENTENCE_GEN[]
| Sentence |
python | catalyst-team__catalyst | catalyst/contrib/losses/contrastive.py | {
"start": 73,
"end": 1739
} | class ____(nn.Module):
"""The Contrastive embedding loss.
It has been proposed in `Dimensionality Reduction
by Learning an Invariant Mapping`_.
.. _Dimensionality Reduction by Learning an Invariant Mapping:
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=1.0, reduction="mean"):
"""
Args:
margin: margin parameter
reduction: criterion reduction type
"""
super().__init__()
self.margin = margin
self.reduction = reduction or "none"
def forward(
self,
embeddings_left: torch.Tensor,
embeddings_right: torch.Tensor,
distance_true,
) -> torch.Tensor:
"""Forward propagation method for the contrastive loss.
Args:
embeddings_left: left objects embeddings
embeddings_right: right objects embeddings
distance_true: true distances
Returns:
torch.Tensor: loss
"""
# euclidian distance
diff = embeddings_left - embeddings_right
distance_pred = torch.sqrt(torch.sum(torch.pow(diff, 2), 1))
bs = len(distance_true)
margin_distance = self.margin - distance_pred
margin_distance = torch.clamp(margin_distance, min=0.0)
loss = (1 - distance_true) * torch.pow(
distance_pred, 2
) + distance_true * torch.pow(margin_distance, 2)
if self.reduction == "mean":
loss = torch.sum(loss) / 2.0 / bs
elif self.reduction == "sum":
loss = torch.sum(loss)
return loss
| ContrastiveEmbeddingLoss |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 5727,
"end": 5989
} | class ____(PydanticTypeError):
code = 'enum'
def __str__(self) -> str:
permitted = ', '.join(repr(v.value) for v in self.enum_values) # type: ignore
return f'value is not a valid enumeration member; permitted: {permitted}'
| EnumMemberError |
python | apache__airflow | providers/apprise/src/airflow/providers/apprise/hooks/apprise.py | {
"start": 1247,
"end": 7441
} | class ____(BaseHook):
"""
Use Apprise(https://github.com/caronc/apprise) to interact with notification services.
The complete list of notification services supported by Apprise can be found at:
https://github.com/caronc/apprise/wiki#notification-services.
:param apprise_conn_id: :ref:`Apprise connection id <howto/connection:apprise>`
that has services configured in the `config` field.
"""
conn_name_attr = "apprise_conn_id"
default_conn_name = "apprise_default"
conn_type = "apprise"
hook_name = "Apprise"
def __init__(self, apprise_conn_id: str = default_conn_name) -> None:
super().__init__()
self.apprise_conn_id = apprise_conn_id
def get_config_from_conn(self, conn: Connection):
config = conn.extra_dejson["config"]
return json.loads(config) if isinstance(config, str) else config
def set_config_from_conn(self, conn: Connection, apprise_obj: apprise.Apprise):
"""Set config from connection to apprise object."""
config_object = self.get_config_from_conn(conn=conn)
if isinstance(config_object, list):
for config in config_object:
apprise_obj.add(config["path"], tag=config.get("tag", None))
elif isinstance(config_object, dict):
apprise_obj.add(config_object["path"], tag=config_object.get("tag", None))
else:
raise ValueError(
f"Only types of dict or list[dict] are expected in Apprise connections,"
f" got {type(config_object)}"
)
def notify(
self,
body: str,
title: str | None = None,
notify_type: NotifyType = NotifyType.INFO,
body_format: NotifyFormat = NotifyFormat.TEXT,
tag: str | Iterable[str] = "all",
attach: AppriseAttachment | None = None,
interpret_escapes: bool | None = None,
config: AppriseConfig | None = None,
):
r"""
Send message to plugged-in services.
:param body: Specify the message body
:param title: Specify the message title. (optional)
:param notify_type: Specify the message type (default=info). Possible values are "info",
"success", "failure", and "warning"
:param body_format: Specify the input message format (default=text). Possible values are "text",
"html", and "markdown".
:param tag: Specify one or more tags to filter which services to notify
:param attach: Specify one or more file attachment locations
:param interpret_escapes: Enable interpretation of backslash escapes. For example, this would convert
sequences such as \n and \r to their respective ascii new-line and carriage return characters
:param config: Specify one or more configuration
"""
title = title or ""
apprise_obj = apprise.Apprise()
if config:
apprise_obj.add(config)
else:
conn = self.get_connection(self.apprise_conn_id)
self.set_config_from_conn(conn=conn, apprise_obj=apprise_obj)
apprise_obj.notify(
body=body,
title=title,
notify_type=notify_type,
body_format=body_format,
tag=tag,
attach=attach,
interpret_escapes=interpret_escapes,
)
async def async_notify(
self,
body: str,
title: str | None = None,
notify_type: NotifyType = NotifyType.INFO,
body_format: NotifyFormat = NotifyFormat.TEXT,
tag: str | Iterable[str] = "all",
attach: AppriseAttachment | None = None,
interpret_escapes: bool | None = None,
config: AppriseConfig | None = None,
):
r"""
Send message to plugged-in services asynchronously.
:param body: Specify the message body
:param title: Specify the message title. (optional)
:param notify_type: Specify the message type (default=info). Possible values are "info",
"success", "failure", and "warning"
:param body_format: Specify the input message format (default=text). Possible values are "text",
"html", and "markdown".
:param tag: Specify one or more tags to filter which services to notify
:param attach: Specify one or more file attachment locations
:param interpret_escapes: Enable interpretation of backslash escapes. For example, this would convert
sequences such as \n and \r to their respective ascii new-line and carriage return characters
:param config: Specify one or more configuration
"""
title = title or ""
apprise_obj = apprise.Apprise()
if config:
apprise_obj.add(config)
else:
conn = await get_async_connection(self.apprise_conn_id)
self.set_config_from_conn(conn=conn, apprise_obj=apprise_obj)
await apprise_obj.async_notify(
body=body,
title=title,
notify_type=notify_type,
body_format=body_format,
tag=tag,
attach=attach,
interpret_escapes=interpret_escapes,
)
def get_conn(self) -> None:
raise NotImplementedError()
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3PasswordFieldWidget
from flask_babel import lazy_gettext
from wtforms import PasswordField
return {
"config": PasswordField(
lazy_gettext("config"),
widget=BS3PasswordFieldWidget(),
description='format example - {"path": "service url", "tag": "alerts"} or '
'[{"path": "service url", "tag": "alerts"},'
' {"path": "service url", "tag": "alerts"}]',
),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
return {
"hidden_fields": ["host", "schema", "login", "password", "port", "extra"],
"relabeling": {},
}
| AppriseHook |
python | getsentry__sentry | tests/sentry/tasks/test_assemble.py | {
"start": 1390,
"end": 6918
} | class ____(BaseAssembleTest):
def test_wrong_dif(self) -> None:
content1 = b"foo"
fileobj1 = ContentFile(content1)
content2 = b"bar"
fileobj2 = ContentFile(content2)
content3 = b"baz"
fileobj3 = ContentFile(content3)
total_checksum = sha1(content2 + content1 + content3).hexdigest()
# The order here is on purpose because we check for the order of checksums
blob1 = FileBlob.from_file_with_organization(fileobj1, self.organization)
blob3 = FileBlob.from_file_with_organization(fileobj3, self.organization)
blob2 = FileBlob.from_file_with_organization(fileobj2, self.organization)
chunks = [blob2.checksum, blob1.checksum, blob3.checksum]
assemble_dif(
project_id=self.project.id, name="foo.sym", checksum=total_checksum, chunks=chunks
)
status, _ = get_assemble_status(AssembleTask.DIF, self.project.id, total_checksum)
assert status == ChunkFileState.ERROR
def test_dif(self) -> None:
sym_file = self.load_fixture("crash.sym")
blob1 = FileBlob.from_file_with_organization(ContentFile(sym_file), self.organization)
total_checksum = sha1(sym_file).hexdigest()
assemble_dif(
project_id=self.project.id,
name="crash.sym",
checksum=total_checksum,
chunks=[blob1.checksum],
)
status, _ = get_assemble_status(AssembleTask.DIF, self.project.id, total_checksum)
assert status == ChunkFileState.OK
dif = ProjectDebugFile.objects.filter(
project_id=self.project.id, checksum=total_checksum
).get()
assert dif.file.headers == {"Content-Type": "text/x-breakpad"}
def test_assemble_from_files(self) -> None:
files = []
file_checksum = sha1()
for _ in range(8):
blob = os.urandom(1024 * 1024 * 8)
hash = sha1(blob).hexdigest()
file_checksum.update(blob)
files.append((io.BytesIO(blob), hash))
# upload all blobs
FileBlob.from_files(files, organization=self.organization)
# find all blobs
for reference, checksum in files:
file_blob = FileBlob.objects.get(checksum=checksum)
ref_bytes = reference.getvalue()
with file_blob.getfile() as f:
assert f.read(len(ref_bytes)) == ref_bytes
FileBlobOwner.objects.filter(blob=file_blob, organization_id=self.organization.id).get()
rv = assemble_file(
AssembleTask.DIF,
self.project,
"testfile",
file_checksum.hexdigest(),
[x[1] for x in files],
"dummy.type",
)
assert rv is not None
f, tmp = rv
tmp.close()
assert f.checksum == file_checksum.hexdigest()
assert f.type == "dummy.type"
# upload all blobs a second time
for f, _ in files:
f.seek(0)
FileBlob.from_files(files, organization=self.organization)
# assemble a second time
rv = assemble_file(
AssembleTask.DIF,
self.project,
"testfile",
file_checksum.hexdigest(),
[x[1] for x in files],
"dummy.type",
)
assert rv is not None
f, tmp = rv
tmp.close()
assert f.checksum == file_checksum.hexdigest()
def test_assemble_duplicate_blobs(self) -> None:
files = []
file_checksum = sha1()
blob = os.urandom(1024 * 1024 * 8)
hash = sha1(blob).hexdigest()
for _ in range(8):
file_checksum.update(blob)
files.append((io.BytesIO(blob), hash))
# upload all blobs
FileBlob.from_files(files, organization=self.organization)
# find all blobs
for reference, checksum in files:
file_blob = FileBlob.objects.get(checksum=checksum)
ref_bytes = reference.getvalue()
with file_blob.getfile() as f:
assert f.read(len(ref_bytes)) == ref_bytes
FileBlobOwner.objects.filter(blob=file_blob, organization_id=self.organization.id).get()
rv = assemble_file(
AssembleTask.DIF,
self.project,
"testfile",
file_checksum.hexdigest(),
[x[1] for x in files],
"dummy.type",
)
assert rv is not None
f, tmp = rv
tmp.close()
assert f.checksum == file_checksum.hexdigest()
assert f.type == "dummy.type"
def test_assemble_debug_id_override(self) -> None:
sym_file = self.load_fixture("crash.sym")
blob1 = FileBlob.from_file_with_organization(ContentFile(sym_file), self.organization)
total_checksum = sha1(sym_file).hexdigest()
assemble_dif(
project_id=self.project.id,
name="crash.sym",
checksum=total_checksum,
chunks=[blob1.checksum],
debug_id="67e9247c-814e-392b-a027-dbde6748fcbf-beef",
)
status, _ = get_assemble_status(AssembleTask.DIF, self.project.id, total_checksum)
assert status == ChunkFileState.OK
dif = ProjectDebugFile.objects.filter(
project_id=self.project.id, checksum=total_checksum
).get()
assert dif.file.headers == {"Content-Type": "text/x-breakpad"}
assert dif.debug_id == "67e9247c-814e-392b-a027-dbde6748fcbf-beef"
| AssembleDifTest |
python | doocs__leetcode | solution/3100-3199/3196.Maximize Total Cost of Alternating Subarrays/Solution.py | {
"start": 0,
"end": 354
} | class ____:
def maximumTotalCost(self, nums: List[int]) -> int:
@cache
def dfs(i: int, j: int) -> int:
if i >= len(nums):
return 0
ans = nums[i] + dfs(i + 1, 1)
if j == 1:
ans = max(ans, -nums[i] + dfs(i + 1, 0))
return ans
return dfs(0, 0)
| Solution |
python | crytic__slither | slither/tools/mutator/mutators/MVIV.py | {
"start": 269,
"end": 2648
} | class ____(AbstractMutator): # pylint: disable=too-few-public-methods
NAME = "MVIV"
HELP = "variable initialization using a value"
def _mutate(self) -> Dict:
result: Dict = {}
variable: Variable
# Create fault for state variables declaration
for variable in self.contract.state_variables_declared:
if variable.initialized:
# Cannot remove the initialization of constant variables
if variable.is_constant:
continue
if isinstance(variable.expression, Literal):
# Get the string
start = variable.source_mapping.start
stop = variable.expression.source_mapping.start
old_str = variable.source_mapping.content
new_str = old_str[: old_str.find("=")]
line_no = variable.node_initialization.source_mapping.lines
if not line_no[0] in self.dont_mutate_line:
create_patch_with_line(
result,
self.in_file,
start,
stop + variable.expression.source_mapping.length,
old_str,
new_str,
line_no[0],
)
for function in self.contract.functions_and_modifiers_declared:
for variable in function.local_variables:
if variable.initialized and isinstance(variable.expression, Literal):
start = variable.source_mapping.start
stop = variable.expression.source_mapping.start
old_str = variable.source_mapping.content
new_str = old_str[: old_str.find("=")]
line_no = variable.source_mapping.lines
if not line_no[0] in self.dont_mutate_line:
create_patch_with_line(
result,
self.in_file,
start,
stop + variable.expression.source_mapping.length,
old_str,
new_str,
line_no[0],
)
return result
| MVIV |
python | getsentry__sentry | tests/sentry/users/api/endpoints/test_user_permission_details.py | {
"start": 1628,
"end": 3178
} | class ____(UserDetailsTest):
method = "GET"
def test_superuser_with_permission(self) -> None:
self.login_as(self.superuser, superuser=True)
self.add_user_permission(self.superuser, "broadcasts.admin")
self.get_success_response("me", "broadcasts.admin", status_code=204)
def test_superuser_without_permission(self) -> None:
self.login_as(self.superuser, superuser=True)
self.get_error_response("me", "broadcasts.admin", status_code=404)
@override_options({"staff.ga-rollout": True})
@patch.object(StaffPermission, "has_permission", wraps=StaffPermission().has_permission)
def test_staff_with_permission(self, mock_has_permission: MagicMock) -> None:
self.login_as(self.staff_user, staff=True)
self.add_user_permission(self.staff_user, "broadcasts.admin")
self.get_success_response("me", "broadcasts.admin", status_code=204)
# ensure we fail the scope check and call is_active_staff
assert mock_has_permission.call_count == 1
@override_options({"staff.ga-rollout": True})
@patch.object(StaffPermission, "has_permission", wraps=StaffPermission().has_permission)
def test_staff_without_permission(self, mock_has_permission: MagicMock) -> None:
self.login_as(self.staff_user, staff=True)
self.get_error_response("me", "broadcasts.admin", status_code=404)
# ensure we fail the scope check and call is_active_staff
assert mock_has_permission.call_count == 1
@control_silo_test
| UserPermissionDetailsGetTest |
python | ray-project__ray | python/ray/serve/_private/application_state.py | {
"start": 3169,
"end": 3959
} | class ____:
status: ApplicationStatus
message: str = ""
deployment_timestamp: float = 0
def debug_string(self):
return json.dumps(asdict(self), indent=4)
def to_proto(self):
return ApplicationStatusInfoProto(
status=f"APPLICATION_STATUS_{self.status.name}",
message=self.message,
deployment_timestamp=self.deployment_timestamp,
)
@classmethod
def from_proto(cls, proto: ApplicationStatusInfoProto):
status = ApplicationStatusProto.Name(proto.status)[len("APPLICATION_STATUS_") :]
return cls(
status=ApplicationStatus(status),
message=proto.message,
deployment_timestamp=proto.deployment_timestamp,
)
@dataclass(eq=True)
| ApplicationStatusInfo |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linalg_ops_test.py | {
"start": 17364,
"end": 17456
} | class ____(test.TestCase, _LUMatrixInverse):
use_static_shape = False
| LUMatrixInverseDynamic |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/composition.py | {
"start": 2493,
"end": 4670
} | class ____:
"""The return value for an output when invoking a node in a composition function."""
node_name: str
output_name: str
node_type: str
def __init__(self, node_name: str, output_name: str, node_type: str):
self.node_name = check.str_param(node_name, "node_name")
self.output_name = check.str_param(output_name, "output_name")
self.node_type = check.str_param(node_type, "node_type")
def __iter__(self) -> NoReturn:
raise DagsterInvariantViolationError(
f'Attempted to iterate over an {self.__class__.__name__}. This object represents the output "{self.output_name}" '
f'from the op/graph "{self.node_name}". Consider defining multiple Outs if you seek to pass '
"different parts of this output to different op/graph."
)
def __getitem__(self, idx: object) -> NoReturn:
raise DagsterInvariantViolationError(
f'Attempted to index in to an {self.__class__.__name__}. This object represents the output "{self.output_name}" '
f"from the {self.describe_node()}. Consider defining multiple Outs if you seek to pass "
f"different parts of this output to different {self.node_type}s."
)
def describe_node(self) -> str:
return f"{self.node_type} '{self.node_name}'"
def alias(self, _) -> NoReturn:
raise DagsterInvariantViolationError(
f"In {current_context().source} {current_context().name}, attempted to call alias method for {self.__class__.__name__}. This object "
f'represents the output "{self.output_name}" from the already invoked {self.describe_node()}. Consider '
"checking the location of parentheses."
)
def with_hooks(self, _) -> NoReturn:
raise DagsterInvariantViolationError(
f"In {current_context().source} {current_context().name}, attempted to call hook method for {self.__class__.__name__}. This object "
f'represents the output "{self.output_name}" from the already invoked {self.describe_node()}. Consider '
"checking the location of parentheses."
)
| InvokedNodeOutputHandle |
python | pandas-dev__pandas | pandas/tests/series/methods/test_interpolate.py | {
"start": 1742,
"end": 31108
} | class ____:
@pytest.mark.xfail(reason="EA.fillna does not handle 'linear' method")
def test_interpolate_period_values(self):
orig = Series(date_range("2012-01-01", periods=5))
ser = orig.copy()
ser[2] = pd.NaT
# period cast
ser_per = ser.dt.to_period("D")
res_per = ser_per.interpolate()
expected_per = orig.dt.to_period("D")
tm.assert_series_equal(res_per, expected_per)
def test_interpolate(self, datetime_series):
ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index)
ts_copy = ts.copy()
# Set data between Tuesday and Thursday to NaN for 2 consecutive weeks.
# Linear interpolation should fill in the missing values correctly,
# as the index is equally-spaced within each week.
ts_copy[1:4] = np.nan
ts_copy[6:9] = np.nan
linear_interp = ts_copy.interpolate(method="linear")
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series(
[d.toordinal() for d in datetime_series.index], index=datetime_series.index
).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.nan
time_interp = ord_ts_copy.interpolate(method="time")
tm.assert_series_equal(time_interp, ord_ts)
def test_interpolate_time_raises_for_non_timeseries(self):
# When method='time' is used on a non-TimeSeries that contains a null
# value, a ValueError should be raised.
non_ts = Series([0, 1, 2, np.nan])
msg = "time-weighted interpolation only works on Series.* with a DatetimeIndex"
with pytest.raises(ValueError, match=msg):
non_ts.interpolate(method="time")
def test_interpolate_cubicspline(self):
pytest.importorskip("scipy")
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
result = ser.reindex(new_index).interpolate(method="cubicspline").loc[1:3]
tm.assert_series_equal(result, expected)
def test_interpolate_pchip(self):
pytest.importorskip("scipy")
ser = Series(np.sort(np.random.default_rng(2).uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(
Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
).astype(float)
interp_s = ser.reindex(new_index).interpolate(method="pchip")
# does not blow up, GH5977
interp_s.loc[49:51]
def test_interpolate_akima(self):
pytest.importorskip("scipy")
ser = Series([10, 11, 12, 13])
# interpolate at new_index where `der` is zero
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima")
tm.assert_series_equal(interp_s.loc[1:3], expected)
# interpolate at new_index where `der` is a non-zero int
expected = Series(
[11.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0, 13.0],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima", der=1)
tm.assert_series_equal(interp_s.loc[1:3], expected)
def test_interpolate_piecewise_polynomial(self):
pytest.importorskip("scipy")
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="piecewise_polynomial")
tm.assert_series_equal(interp_s.loc[1:3], expected)
def test_interpolate_from_derivatives(self):
pytest.importorskip("scipy")
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="from_derivatives")
tm.assert_series_equal(interp_s.loc[1:3], expected)
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no("scipy")
),
],
)
def test_interpolate_corners(self, kwargs):
s = Series([np.nan, np.nan])
tm.assert_series_equal(s.interpolate(**kwargs), s)
s = Series([], dtype=object).interpolate()
tm.assert_series_equal(s.interpolate(**kwargs), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.default_rng(2).random(30)))
s.loc[::3] = np.random.default_rng(2).standard_normal(10)
vals = s.index.values.astype(float)
result = s.interpolate(method="index")
bad = isna(s)
good = ~bad
expected = Series(
np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]
)
tm.assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method="values")
tm.assert_series_equal(other_result, result)
tm.assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
"time-weighted interpolation only works on Series or DataFrames "
"with a DatetimeIndex"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="time")
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no("scipy")
),
],
)
def test_nan_interpolate(self, kwargs):
s = Series([0, 1, np.nan, 3])
result = s.interpolate(**kwargs)
expected = Series([0.0, 1.0, 2.0, 3.0])
tm.assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1.0, 2.0, 3.0, 4.0], index=[1, 3, 5, 9])
tm.assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list("abcd"))
result = s.interpolate()
expected = Series([0.0, 1.0, 2.0, 2.0], index=list("abcd"))
tm.assert_series_equal(result, expected)
def test_interp_quad(self):
pytest.importorskip("scipy")
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method="quadratic")
expected = Series([1.0, 4.0, 9.0, 16.0], index=[1, 2, 3, 4])
tm.assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
pytest.importorskip("scipy")
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1.0, 3.0, 7.5, 12.0, 18.5, 25.0])
result = s.interpolate(method="slinear")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="slinear")
tm.assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25.0])
result = s.interpolate(method="nearest")
tm.assert_series_equal(result, expected.astype("float"))
result = s.interpolate(method="nearest")
tm.assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25.0])
result = s.interpolate(method="zero")
tm.assert_series_equal(result, expected.astype("float"))
result = s.interpolate(method="zero")
tm.assert_series_equal(result, expected)
# quadratic
# GH #15662.
expected = Series([1, 3.0, 6.823529, 12.0, 18.058824, 25.0])
result = s.interpolate(method="quadratic")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="quadratic")
tm.assert_series_equal(result, expected)
# cubic
expected = Series([1.0, 3.0, 6.8, 12.0, 18.2, 25.0])
result = s.interpolate(method="cubic")
tm.assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])
result = s.interpolate(method="linear", limit=2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("limit", [-1, 0])
def test_interpolate_invalid_nonpositive_limit(self, nontemporal_method, limit):
# GH 9217: make sure limit is greater than zero.
s = Series([1, 2, np.nan, 4])
method, kwargs = nontemporal_method
with pytest.raises(ValueError, match="Limit must be greater than 0"):
s.interpolate(limit=limit, method=method, **kwargs)
def test_interpolate_invalid_float_limit(self, nontemporal_method):
# GH 9217: make sure limit is an integer.
s = Series([1, 2, np.nan, 4])
method, kwargs = nontemporal_method
limit = 2.0
with pytest.raises(ValueError, match="Limit must be an integer"):
s.interpolate(limit=limit, method=method, **kwargs)
@pytest.mark.parametrize("invalid_method", [None, "nonexistent_method"])
def test_interp_invalid_method(self, invalid_method):
s = Series([1, 3, np.nan, 12, np.nan, 25])
msg = "Can not interpolate with method=nonexistent_method"
if invalid_method is None:
msg = "'method' should be a string, not None"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=invalid_method)
# When an invalid method and invalid limit (such as -1) are
# provided, the error message reflects the invalid method.
with pytest.raises(ValueError, match=msg):
s.interpolate(method=invalid_method, limit=-1)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])
result = s.interpolate(method="linear", limit=2, limit_direction="forward")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="linear", limit=2, limit_direction="FORWARD")
tm.assert_series_equal(result, expected)
def test_interp_unlimited(self):
# these test are for issue #16282 default Limit=None is unlimited
s = Series([np.nan, 1.0, 3.0, np.nan, np.nan, np.nan, 11.0, np.nan])
expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])
result = s.interpolate(method="linear", limit_direction="both")
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])
result = s.interpolate(method="linear", limit_direction="forward")
tm.assert_series_equal(result, expected)
expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, np.nan])
result = s.interpolate(method="linear", limit_direction="backward")
tm.assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
r"Invalid limit_direction: expecting one of \['forward', "
r"'backward', 'both'\], got 'abc'"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit=2, limit_direction="abc")
# raises an error even if no limit is specified.
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit_direction="abc")
# limit_area introduced GH #16284
def test_interp_limit_area(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 3.0, 4.0, 5.0, 6.0, 7.0, np.nan, np.nan])
result = s.interpolate(method="linear", limit_area="inside")
tm.assert_series_equal(result, expected)
expected = Series(
[np.nan, np.nan, 3.0, 4.0, np.nan, np.nan, 7.0, np.nan, np.nan]
)
result = s.interpolate(method="linear", limit_area="inside", limit=1)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, np.nan, 3.0, 4.0, np.nan, 6.0, 7.0, np.nan, np.nan])
result = s.interpolate(
method="linear", limit_area="inside", limit_direction="both", limit=1
)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0])
result = s.interpolate(method="linear", limit_area="outside")
tm.assert_series_equal(result, expected)
expected = Series(
[np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan]
)
result = s.interpolate(method="linear", limit_area="outside", limit=1)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan])
result = s.interpolate(
method="linear", limit_area="outside", limit_direction="both", limit=1
)
tm.assert_series_equal(result, expected)
expected = Series([3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan])
result = s.interpolate(
method="linear", limit_area="outside", limit_direction="backward"
)
tm.assert_series_equal(result, expected)
# raises an error even if limit type is wrong.
msg = r"Invalid limit_area: expecting one of \['inside', 'outside'\], got abc"
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit_area="abc")
@pytest.mark.parametrize(
"data, kwargs",
(
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
{"method": "pad", "limit_area": "inside"},
),
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
{"method": "pad", "limit_area": "inside", "limit": 1},
),
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
{"method": "pad", "limit_area": "outside"},
),
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
{"method": "pad", "limit_area": "outside", "limit": 1},
),
(
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
{"method": "pad", "limit_area": "outside", "limit": 1},
),
(
range(5),
{"method": "pad", "limit_area": "outside", "limit": 1},
),
),
)
def test_interp_limit_area_with_pad(self, data, kwargs):
# GH26796
s = Series(data)
msg = "Can not interpolate with method=pad"
with pytest.raises(ValueError, match=msg):
s.interpolate(**kwargs)
@pytest.mark.parametrize(
"data, kwargs",
(
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
{"method": "bfill", "limit_area": "inside"},
),
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
{"method": "bfill", "limit_area": "inside", "limit": 1},
),
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
{"method": "bfill", "limit_area": "outside"},
),
(
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
{"method": "bfill", "limit_area": "outside", "limit": 1},
),
),
)
def test_interp_limit_area_with_backfill(self, data, kwargs):
# GH26796
s = Series(data)
msg = "Can not interpolate with method=bfill"
with pytest.raises(ValueError, match=msg):
s.interpolate(**kwargs)
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1.0, 3.0, np.nan, 7.0, 9.0, 11.0])
result = s.interpolate(method="linear", limit=2, limit_direction="backward")
tm.assert_series_equal(result, expected)
expected = Series([1.0, 3.0, 5.0, np.nan, 9.0, 11.0])
result = s.interpolate(method="linear", limit=1, limit_direction="both")
tm.assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12, np.nan])
expected = Series([1.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0, 10.0, 11.0, 12.0, 12.0])
result = s.interpolate(method="linear", limit=2, limit_direction="both")
tm.assert_series_equal(result, expected)
expected = Series(
[1.0, 3.0, 4.0, np.nan, 6.0, 7.0, 9.0, 10.0, 11.0, 12.0, 12.0]
)
result = s.interpolate(method="linear", limit=1, limit_direction="both")
tm.assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5.0, 5.0, 5.0, 7.0, 9.0, np.nan])
result = s.interpolate(method="linear", limit=2, limit_direction="backward")
tm.assert_series_equal(result, expected)
expected = Series([5.0, 5.0, 5.0, 7.0, 9.0, 9.0])
result = s.interpolate(method="linear", limit=2, limit_direction="both")
tm.assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5.0, 7.0, 7.0, np.nan])
result = s.interpolate(method="linear", limit=1, limit_direction="forward")
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 5.0, 5.0, 7.0, np.nan, np.nan])
result = s.interpolate(method="linear", limit=1, limit_direction="backward")
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 5.0, 5.0, 7.0, 7.0, np.nan])
result = s.interpolate(method="linear", limit=1, limit_direction="both")
tm.assert_series_equal(result, expected)
def test_interp_all_good(self):
pytest.importorskip("scipy")
s = Series([1, 2, 3])
result = s.interpolate(method="polynomial", order=1)
tm.assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
tm.assert_series_equal(result, s)
@pytest.mark.parametrize(
"check_scipy", [False, pytest.param(True, marks=td.skip_if_no("scipy"))]
)
def test_interp_multiIndex(self, check_scipy):
idx = MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c")])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
tm.assert_series_equal(result, expected)
msg = "Only `method=linear` interpolation is supported on MultiIndexes"
if check_scipy:
with pytest.raises(ValueError, match=msg):
s.interpolate(method="polynomial", order=1)
def test_interp_nonmono_raise(self):
pytest.importorskip("scipy")
s = Series([1, np.nan, 3], index=[0, 2, 1])
msg = "krogh interpolation requires that the index be monotonic"
with pytest.raises(ValueError, match=msg):
s.interpolate(method="krogh")
@pytest.mark.parametrize("method", ["nearest", "pad"])
def test_interp_datetime64(self, method, tz_naive_fixture):
pytest.importorskip("scipy")
df = Series(
[1, np.nan, 3], index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture)
)
if method == "nearest":
result = df.interpolate(method=method)
expected = Series(
[1.0, 1.0, 3.0],
index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture),
)
tm.assert_series_equal(result, expected)
else:
msg = "Can not interpolate with method=pad"
with pytest.raises(ValueError, match=msg):
df.interpolate(method=method)
def test_interp_pad_datetime64tz_values(self):
# GH#27628 missing.interpolate_2d should handle datetimetz values
dti = date_range("2015-04-05", periods=3, tz="US/Central")
ser = Series(dti)
ser[1] = pd.NaT
msg = "Can not interpolate with method=pad"
with pytest.raises(ValueError, match=msg):
ser.interpolate(method="pad")
def test_interp_limit_no_nans(self):
# GH 7173
s = Series([1.0, 2.0, 3.0])
result = s.interpolate(limit=1)
expected = s
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("method", ["polynomial", "spline"])
def test_no_order(self, method):
# see GH-10633, GH-24014
pytest.importorskip("scipy")
s = Series([0, 1, np.nan, 3])
msg = "You must specify the order of the spline or polynomial"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=method)
@pytest.mark.parametrize("order", [-1, -1.0, 0, 0.0, np.nan])
def test_interpolate_spline_invalid_order(self, order):
pytest.importorskip("scipy")
s = Series([0, 1, np.nan, 3])
msg = "order needs to be specified and greater than 0"
with pytest.raises(ValueError, match=msg):
s.interpolate(method="spline", order=order)
def test_spline(self):
pytest.importorskip("scipy")
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method="spline", order=1)
expected = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
tm.assert_series_equal(result, expected)
def test_spline_extrapolate(self):
pytest.importorskip("scipy")
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method="spline", order=1, ext=3)
expected3 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 6.0])
tm.assert_series_equal(result3, expected3)
result1 = s.interpolate(method="spline", order=1, ext=0)
expected1 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
tm.assert_series_equal(result1, expected1)
def test_spline_smooth(self):
pytest.importorskip("scipy")
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
assert (
s.interpolate(method="spline", order=3, s=0)[5]
!= s.interpolate(method="spline", order=3)[5]
)
def test_spline_interpolation(self):
# Explicit cast to float to avoid implicit cast when setting np.nan
pytest.importorskip("scipy")
s = Series(np.arange(10) ** 2, dtype="float")
s[np.random.default_rng(2).integers(0, 9, 3)] = np.nan
result1 = s.interpolate(method="spline", order=1)
expected1 = s.interpolate(method="spline", order=1)
tm.assert_series_equal(result1, expected1)
def test_interp_timedelta64(self):
# GH 6424
df = Series([1, np.nan, 3], index=pd.to_timedelta([1, 2, 3]))
result = df.interpolate(method="time")
expected = Series([1.0, 2.0, 3.0], index=pd.to_timedelta([1, 2, 3]))
tm.assert_series_equal(result, expected)
# test for non uniform spacing
df = Series([1, np.nan, 3], index=pd.to_timedelta([1, 2, 4]))
result = df.interpolate(method="time")
expected = Series([1.0, 1.666667, 3.0], index=pd.to_timedelta([1, 2, 4]))
tm.assert_series_equal(result, expected)
def test_series_interpolate_method_values(self):
# GH#1646
rng = date_range("1/1/2000", "1/20/2000", freq="D")
ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)
ts[::2] = np.nan
result = ts.interpolate(method="values")
exp = ts.interpolate()
tm.assert_series_equal(result, exp)
def test_series_interpolate_intraday(self):
# #1698
index = date_range("1/1/2012", periods=4, freq="12D")
ts = Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).sort_values()
exp = ts.reindex(new_index).interpolate(method="time")
index = date_range("1/1/2012", periods=4, freq="12h")
ts = Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).sort_values()
result = ts.reindex(new_index).interpolate(method="time")
tm.assert_numpy_array_equal(result.values, exp.values)
@pytest.mark.parametrize(
"ind",
[
["a", "b", "c", "d"],
pd.period_range(start="2019-01-01", periods=4),
pd.interval_range(start=0, end=4),
],
)
def test_interp_non_timedelta_index(self, interp_methods_ind, ind):
# gh 21662
df = pd.DataFrame([0, 1, np.nan, 3], index=ind)
method, kwargs = interp_methods_ind
if method == "pchip":
pytest.importorskip("scipy")
if method == "linear":
result = df[0].interpolate(**kwargs)
expected = Series([0.0, 1.0, 2.0, 3.0], name=0, index=ind)
tm.assert_series_equal(result, expected)
else:
expected_error = (
"Index column must be numeric or datetime type when "
f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating."
)
with pytest.raises(ValueError, match=expected_error):
df[0].interpolate(method=method, **kwargs)
def test_interpolate_timedelta_index(self, request, interp_methods_ind):
"""
Tests for non numerical index types - object, period, timedelta
Note that all methods except time, index, nearest and values
are tested here.
"""
# gh 21662
pytest.importorskip("scipy")
ind = pd.timedelta_range(start=1, periods=4)
df = pd.DataFrame([0, 1, np.nan, 3], index=ind)
method, kwargs = interp_methods_ind
if method in {"cubic", "zero"}:
request.applymarker(
pytest.mark.xfail(
reason=f"{method} interpolation is not supported for TimedeltaIndex"
)
)
result = df[0].interpolate(method=method, **kwargs)
expected = Series([0.0, 1.0, 2.0, 3.0], name=0, index=ind)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ascending, expected_values",
[(True, [1, 2, 3, 9, 10]), (False, [10, 9, 3, 2, 1])],
)
def test_interpolate_unsorted_index(self, ascending, expected_values):
# GH 21037
ts = Series(data=[10, 9, np.nan, 2, 1], index=[10, 9, 3, 2, 1])
result = ts.sort_index(ascending=ascending).interpolate(method="index")
expected = Series(data=expected_values, index=expected_values, dtype=float)
tm.assert_series_equal(result, expected)
def test_interpolate_asfreq_raises(self):
ser = Series(["a", None, "b"], dtype=object)
msg = "Can not interpolate with method=asfreq"
with pytest.raises(ValueError, match=msg):
ser.interpolate(method="asfreq")
def test_interpolate_fill_value(self):
# GH#54920
pytest.importorskip("scipy")
ser = Series([np.nan, 0, 1, np.nan, 3, np.nan])
result = ser.interpolate(method="nearest", fill_value=0)
expected = Series([np.nan, 0, 1, 1, 3, 0])
tm.assert_series_equal(result, expected)
| TestSeriesInterpolateData |
python | ipython__ipython | tests/test_zzz_autoreload.py | {
"start": 22933,
"end": 23184
} | class ____(object):
def __init__(self, x):
self.x = x
def bar(self, y):
return self.x + y
@property
def quux(self):
return 42
def zzz(self):
'''This method will be deleted below'''
return 99
| Baz |
python | ray-project__ray | python/ray/tune/tests/test_experiment.py | {
"start": 264,
"end": 3150
} | class ____(unittest.TestCase):
def tearDown(self):
ray.shutdown()
def setUp(self):
def train_fn(config):
for i in range(100):
ray.tune.report(dict(timesteps_total=i))
register_trainable("f1", train_fn)
def testConvertExperimentFromExperiment(self):
exp1 = Experiment(
**{"name": "foo", "run": "f1", "config": {"script_min_iter_time_s": 0}}
)
result = _convert_to_experiment_list(exp1)
self.assertEqual(len(result), 1)
self.assertEqual(type(result), list)
def testConvertExperimentNone(self):
result = _convert_to_experiment_list(None)
self.assertEqual(len(result), 0)
self.assertEqual(type(result), list)
def testConvertExperimentList(self):
exp1 = Experiment(
**{"name": "foo", "run": "f1", "config": {"script_min_iter_time_s": 0}}
)
result = _convert_to_experiment_list([exp1, exp1])
self.assertEqual(len(result), 2)
self.assertEqual(type(result), list)
def testConvertExperimentJSON(self):
experiment = {
"name": {"run": "f1", "config": {"script_min_iter_time_s": 0}},
"named": {"run": "f1", "config": {"script_min_iter_time_s": 0}},
}
result = _convert_to_experiment_list(experiment)
self.assertEqual(len(result), 2)
self.assertEqual(type(result), list)
def testConvertExperimentIncorrect(self):
self.assertRaises(TuneError, lambda: _convert_to_experiment_list("hi"))
def testFuncTrainableCheckpointConfigValidation(self):
"""Raise an error when trying to specify checkpoint_at_end/checkpoint_frequency
with a function trainable."""
with self.assertRaises(ValueError):
Experiment(
name="foo",
run="f1", # Will point to a wrapped function trainable
checkpoint_config=CheckpointConfig(checkpoint_at_end=True),
)
with self.assertRaises(ValueError):
Experiment(
name="foo",
run="f1",
checkpoint_config=CheckpointConfig(checkpoint_frequency=1),
)
with self.assertRaises(ValueError):
Experiment(
name="foo",
run=lambda config: 1,
checkpoint_config=CheckpointConfig(checkpoint_at_end=True),
)
def testInvalidExperimentConfig(self):
with self.assertRaises(ValueError):
Experiment(name="foo", run="f1", config="invalid")
class InvalidClass:
def to_dict(self):
return {"valid": 1}
with self.assertRaises(ValueError):
Experiment(name="foo", run="f1", config=InvalidClass())
Experiment(name="foo", run="f1", config=InvalidClass().to_dict())
| ExperimentTest |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dlp.py | {
"start": 12245,
"end": 13179
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_delete_stored_info_type(self, mock_hook):
mock_hook.return_value.delete_stored_info_type.return_value = mock.MagicMock()
operator = CloudDLPDeleteStoredInfoTypeOperator(
stored_info_type_id=STORED_INFO_TYPE_ID,
organization_id=ORGANIZATION_ID,
task_id="id",
)
operator.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.delete_stored_info_type.assert_called_once_with(
stored_info_type_id=STORED_INFO_TYPE_ID,
organization_id=ORGANIZATION_ID,
project_id=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudDLPDeleteStoredInfoTypeOperator |
python | joke2k__faker | tests/providers/test_phone_number.py | {
"start": 11876,
"end": 12621
} | class ____:
"""Test En_US phone provider methods"""
def test_basic_phone_number(self, faker, num_samples):
pattern_no_whitespaces: Pattern = re.compile(
r"\d{9}",
)
pattern_dashes: Pattern = re.compile(r"\d{3}-\d{3}-\d{4}")
pattern_parens: Pattern = re.compile(r"\(\d{3}\)\d{3}-\d{4}")
patterns = [pattern_no_whitespaces, pattern_dashes, pattern_parens]
for _ in range(num_samples):
phone_number = faker.basic_phone_number()
pattern_is_found = False
for pattern in patterns:
if re.match(pattern, phone_number):
pattern_is_found = True
break
assert pattern_is_found
| TestEnUs |
python | mkdocs__mkdocs | mkdocs/tests/config/config_options_legacy_tests.py | {
"start": 26351,
"end": 28338
} | class ____(TestCase):
def test_valid_path(self):
paths = [os.path.dirname(__file__)]
class Schema:
option = c.ListOfPaths()
self.get_config(Schema, {'option': paths})
def test_missing_path(self):
paths = [os.path.join("does", "not", "exist", "i", "hope")]
class Schema:
option = c.ListOfPaths()
with self.expect_error(
option=f"The path '{paths[0]}' isn't an existing file or directory."
):
self.get_config(Schema, {'option': paths})
def test_non_path(self):
paths = [os.path.dirname(__file__), None]
class Schema:
option = c.ListOfPaths()
with self.expect_error(
option="Expected type: <class 'str'> but received: <class 'NoneType'>"
):
self.get_config(Schema, {'option': paths})
def test_empty_list(self):
class Schema:
option = c.ListOfPaths()
conf = self.get_config(Schema, {'option': []})
self.assertEqual(conf['option'], [])
def test_non_list(self):
paths = os.path.dirname(__file__)
class Schema:
option = c.ListOfPaths()
with self.expect_error(option="Expected a list of items, but a <class 'str'> was given."):
self.get_config(Schema, {'option': paths})
def test_file(self):
paths = [__file__]
class Schema:
option = c.ListOfPaths()
self.get_config(Schema, {'option': paths})
@tempdir()
def test_paths_localized_to_config(self, base_path):
with open(os.path.join(base_path, 'foo'), 'w') as f:
f.write('hi')
class Schema:
watch = c.ListOfPaths()
conf = self.get_config(
Schema,
{'watch': ['foo']},
config_file_path=os.path.join(base_path, 'mkdocs.yml'),
)
self.assertEqual(conf['watch'], [os.path.join(base_path, 'foo')])
| ListOfPathsTest |
python | streamlit__streamlit | lib/tests/streamlit/runtime/caching/common_cache_test.py | {
"start": 25119,
"end": 29298
} | class ____(unittest.TestCase):
# The number of threads to run our tests on
NUM_THREADS = 50
def setUp(self):
mock_runtime = MagicMock(spec=Runtime)
mock_runtime.cache_storage_manager = MemoryCacheStorageManager()
Runtime._instance = mock_runtime
def tearDown(self):
# Some of these tests reach directly into CALL_STACK data and twiddle it.
# Reset default values on teardown.
# Clear caches
st.cache_data.clear()
st.cache_resource.clear()
# And some tests create widgets, and can result in DuplicateWidgetID
# errors on subsequent runs.
ctx = script_run_context.get_script_run_ctx()
if ctx is not None:
ctx.widget_ids_this_run.clear()
ctx.widget_user_keys_this_run.clear()
super().tearDown()
@parameterized.expand(
[("cache_data", cache_data), ("cache_resource", cache_resource)]
)
def test_get_cache(self, _, cache_decorator):
"""Accessing a cached value is safe from multiple threads."""
cached_func_call_count = [0]
@cache_decorator
def foo():
cached_func_call_count[0] += 1
return 42
def call_foo(_: int) -> None:
assert foo() == 42
# Call foo from multiple threads and assert no errors.
call_on_threads(call_foo, self.NUM_THREADS)
# The cached function should only be called once (see `test_compute_value_only_once`).
assert cached_func_call_count[0] == 1
@parameterized.expand(
[("cache_data", cache_data), ("cache_resource", cache_resource)]
)
def test_compute_value_only_once(self, _, cache_decorator):
"""Cached values should be computed only once, even if multiple sessions read from an
unwarmed cache simultaneously.
"""
cached_func_call_count = [0]
@cache_decorator
def foo():
assert cached_func_call_count[0] == 0, (
"A cached value was computed multiple times!"
)
cached_func_call_count[0] += 1
# Sleep to "guarantee" that our other threads try to access the
# cached data while it's being computed. (The other threads should
# block on cache computation, so this function should only
# be called a single time.)
time.sleep(0.25)
return 42
def call_foo(_: int) -> None:
assert foo() == 42
call_on_threads(call_foo, num_threads=self.NUM_THREADS, timeout=0.5)
@parameterized.expand(
[
("cache_data", cache_data, cache_data.clear),
("cache_resource", cache_resource, cache_resource.clear),
]
)
def test_clear_all_caches(self, _, cache_decorator, clear_cache_func):
"""Clearing all caches is safe to call from multiple threads."""
@cache_decorator
def foo():
return 42
# Populate the cache
foo()
def clear_caches(_: int) -> None:
clear_cache_func()
# Clear the cache from a bunch of threads and assert no errors.
call_on_threads(clear_caches, self.NUM_THREADS)
# Sanity check: ensure we can still call our cached function.
assert foo() == 42
@parameterized.expand(
[("cache_data", cache_data), ("cache_resource", cache_resource)]
)
def test_clear_single_cache(self, _, cache_decorator):
"""It's safe to clear a single function cache from multiple threads."""
@cache_decorator
def foo():
return 42
# Populate the cache
foo()
def clear_foo(_: int) -> None:
foo.clear()
# Clear it from a bunch of threads and assert no errors.
call_on_threads(clear_foo, self.NUM_THREADS)
# Sanity check: ensure we can still call our cached function.
assert foo() == 42
def test_arrow_replay():
"""Regression test for https://github.com/streamlit/streamlit/issues/6103"""
at = AppTest.from_file("test_data/arrow_replay.py").run()
assert not at.exception
| CommonCacheThreadingTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec24.py | {
"start": 1342,
"end": 1736
} | class ____:
@cache
def in_class(self, a: int, b: str) -> str: ...
reveal_type(not_in_class, expected_text="_wrapped_cache[Any, (a: int, b: str), str]")
not_in_class(1, "")
a = A()
reveal_type(a.in_class, expected_text="_wrapped_cache[A, (a: int, b: str), str]")
a.in_class(1, "")
reveal_type(A.in_class, expected_text="_callable_cache[(A, a: int, b: str), str]")
A.in_class(a, 1, "")
| A |
python | ray-project__ray | python/ray/tune/tests/test_trial_scheduler_resource_changing.py | {
"start": 679,
"end": 6613
} | class ____(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.tune_controller, *_ = create_execution_test_objects(
resources={"CPU": 8, "GPU": 8},
reuse_actors=False,
tune_controller_cls=MockTuneController,
storage=mock_storage_context(),
)
def tearDown(self) -> None:
shutil.rmtree(self.tmpdir)
def _prepareTrials(self, scheduler, base_pgf):
trial1 = Trial("mock", config=dict(num=1), stub=True)
trial1.placement_group_factory = base_pgf
trial2 = Trial("mock", config=dict(num=2), stub=True)
trial2.placement_group_factory = base_pgf
trial3 = Trial("mock", config=dict(num=3), stub=True)
trial3.placement_group_factory = base_pgf
trial4 = Trial("mock", config=dict(num=4), stub=True)
trial4.placement_group_factory = base_pgf
self.tune_controller._trials = [trial1, trial2, trial3, trial4]
scheduler.on_trial_add(self.tune_controller, trial1)
scheduler.on_trial_add(self.tune_controller, trial2)
scheduler.on_trial_add(self.tune_controller, trial3)
scheduler.on_trial_add(self.tune_controller, trial4)
trial1.status = Trial.RUNNING
trial2.status = Trial.RUNNING
trial3.status = Trial.RUNNING
trial4.status = Trial.RUNNING
return trial1, trial2, trial3, trial4
def _allocateAndAssertNewResources(self, trial, scheduler, target_pgf, metric=1):
result = {"metric": metric, "training_iteration": 4}
trial.run_metadata.last_result = result
decision = scheduler.on_trial_result(self.tune_controller, trial, result)
assert decision == TrialScheduler.PAUSE
trial.status = Trial.PENDING
scheduler.choose_trial_to_run(self.tune_controller)
assert trial.placement_group_factory == target_pgf
trial.status = Trial.RUNNING
def testAllocateFreeResources(self):
scheduler = ResourceChangingScheduler(
resources_allocation_function=DistributeResources(add_bundles=False)
)
base_pgf = PlacementGroupFactory([{"CPU": 1, "GPU": 0}])
trial1, trial2, trial3, trial4 = self._prepareTrials(scheduler, base_pgf)
self._allocateAndAssertNewResources(
trial1, scheduler, PlacementGroupFactory([{"CPU": 2}])
)
self._allocateAndAssertNewResources(
trial2, scheduler, PlacementGroupFactory([{"CPU": 2}])
)
trial4.status = Trial.TERMINATED
self._allocateAndAssertNewResources(
trial1, scheduler, PlacementGroupFactory([{"CPU": 3}])
)
trial3.status = Trial.TERMINATED
self._allocateAndAssertNewResources(
trial1, scheduler, PlacementGroupFactory([{"CPU": 4}])
)
trial2.status = Trial.TERMINATED
self._allocateAndAssertNewResources(
trial1, scheduler, PlacementGroupFactory([{"CPU": 8}])
)
def testAllocateFreeResourcesWithIncreaseBy(self):
scheduler = ResourceChangingScheduler(
resources_allocation_function=DistributeResources(
add_bundles=False, increase_by={"CPU": 2, "GPU": 2}
)
)
base_pgf = PlacementGroupFactory([{"CPU": 2, "GPU": 2}])
trial1, trial2, trial3, trial4 = self._prepareTrials(scheduler, base_pgf)
decision = scheduler.on_trial_result(
self.tune_controller, trial1, {"metric": 1, "training_iteration": 4}
)
assert decision == TrialScheduler.CONTINUE
trial4.status = Trial.TERMINATED
self._allocateAndAssertNewResources(
trial1, scheduler, PlacementGroupFactory([{"CPU": 4, "GPU": 4}])
)
trial3.status = Trial.TERMINATED
self._allocateAndAssertNewResources(
trial2, scheduler, PlacementGroupFactory([{"CPU": 4, "GPU": 4}])
)
trial2.status = Trial.TERMINATED
self._allocateAndAssertNewResources(
trial1, scheduler, PlacementGroupFactory([{"CPU": 8, "GPU": 8}])
)
def testAllocateFreeResourcesWithIncreaseByTimes(self):
scheduler = ResourceChangingScheduler(
resources_allocation_function=DistributeResources(
add_bundles=False, increase_by={"GPU": 2}, increase_by_times=2
)
)
base_pgf = PlacementGroupFactory([{"CPU": 1, "GPU": 2}])
trial1, trial2, trial3, trial4 = self._prepareTrials(scheduler, base_pgf)
decision = scheduler.on_trial_result(
self.tune_controller, trial1, {"metric": 1, "training_iteration": 4}
)
assert decision == TrialScheduler.CONTINUE
trial4.status = Trial.TERMINATED
self._allocateAndAssertNewResources(
trial1, scheduler, PlacementGroupFactory([{"CPU": 1, "GPU": 4}])
)
trial3.status = Trial.TERMINATED
self._allocateAndAssertNewResources(
trial2, scheduler, PlacementGroupFactory([{"CPU": 1, "GPU": 4}])
)
trial2.status = Trial.TERMINATED
self._allocateAndAssertNewResources(
trial1, scheduler, PlacementGroupFactory([{"CPU": 1, "GPU": 6}])
)
def testDeallocateResources(self):
scheduler = ResourceChangingScheduler(
resources_allocation_function=DistributeResources(
add_bundles=False, increase_by={"GPU": 2}
)
)
base_pgf = PlacementGroupFactory([{"CPU": 1, "GPU": 2}])
trial1, trial2, trial3, trial4 = self._prepareTrials(scheduler, base_pgf)
trial1.placement_group_factory = PlacementGroupFactory([{"CPU": 1, "GPU": 4}])
trial4.status = Trial.PENDING
self._allocateAndAssertNewResources(
trial1, scheduler, PlacementGroupFactory([{"CPU": 1, "GPU": 2}])
)
| TestUniformResourceAllocation |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py | {
"start": 1992,
"end": 2083
} | class ____:
def f(self):
super = print
super(C, self)
import builtins
| C |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 90307,
"end": 90494
} | class ____(Union):
_fields_ = [
('vgpuSchedDataWithARR', c_nvmlVgpuSchedDataWithARR_t),
('vgpuSchedData', c_nvmlVgpuSchedData_t),
]
| c_nvmlVgpuSchedulerParams_t |
python | pyparsing__pyparsing | tests/test_simple_unit.py | {
"start": 17340,
"end": 18326
} | class ____(PyparsingExpressionTestCase):
markup_convert_map = {
"*": "B",
"_": "U",
"/": "I",
}
# do not make staticmethod
# @staticmethod
def markup_convert(t):
htmltag = TestTransformStringUsingParseActions.markup_convert_map[
t.markup_symbol
]
return f"<{htmltag}>{t.body}</{htmltag}>"
tests = [
PyparsingTest(
desc="Use transform_string to convert simple markup to HTML",
expr=(
pp.one_of(markup_convert_map)("markup_symbol")
+ "("
+ pp.CharsNotIn(")")("body")
+ ")"
).add_parse_action(markup_convert),
text="Show in *(bold), _(underscore), or /(italic) type",
expected_list=[
"Show in <B>bold</B>, <U>underscore</U>, or <I>italic</I> type"
],
parse_fn="transform_string",
),
]
| TestTransformStringUsingParseActions |
python | spack__spack | lib/spack/spack/solver/input_analysis.py | {
"start": 702,
"end": 2211
} | class ____:
"""Returns information needed to set up an ASP problem"""
def unreachable(self, *, pkg_name: str, when_spec: spack.spec.Spec) -> bool:
"""Returns true if the context can determine that the condition cannot ever
be met on pkg_name.
"""
raise NotImplementedError
def candidate_targets(self) -> List[spack.vendor.archspec.cpu.Microarchitecture]:
"""Returns a list of targets that are candidate for concretization"""
raise NotImplementedError
def possible_dependencies(
self,
*specs: Union[spack.spec.Spec, str],
allowed_deps: dt.DepFlag,
transitive: bool = True,
strict_depflag: bool = False,
expand_virtuals: bool = True,
) -> PossibleGraph:
"""Returns the set of possible dependencies, and the set of possible virtuals.
Runtime packages, which may be injected by compilers, needs to be added to specs if
the dependency is not explicit in the package.py recipe.
Args:
transitive: return transitive dependencies if True, only direct dependencies if False
allowed_deps: dependency types to consider
strict_depflag: if True, only the specific dep type is considered, if False any
deptype that intersects with allowed deptype is considered
expand_virtuals: expand virtual dependencies into all possible implementations
"""
raise NotImplementedError
| PossibleDependencyGraph |
python | bokeh__bokeh | src/bokeh/core/property/singletons.py | {
"start": 1487,
"end": 1837
} | class ____:
""" Indicates no value set, which is not the same as setting ``None``. """
def __copy__(self) -> UndefinedType:
return self
def __str__(self) -> str:
return "Undefined"
def __repr__(self) -> str:
return "Undefined"
Undefined = UndefinedType()
Optional: TypeAlias = T | UndefinedType
| UndefinedType |
python | huggingface__transformers | src/transformers/models/edgetam_video/modeling_edgetam_video.py | {
"start": 45979,
"end": 46663
} | class ____(nn.Module):
def __init__(self, config: EdgeTamVideoConfig):
super().__init__()
self.config = config
self.hidden_size = config.memory_attention_hidden_size
self.intermediate_size = config.memory_attention_mlp_hidden_size
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size)
self.dropout = nn.Dropout(config.memory_attention_dropout)
self.act_fn = ACT2FN[config.memory_attention_mlp_hidden_act]
def forward(self, x):
return self.down_proj(self.dropout(self.act_fn(self.up_proj(x))))
| EdgeTamVideoMemoryAttentionMLP |
python | psf__requests | src/requests/cookies.py | {
"start": 2894,
"end": 4999
} | class ____:
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `http.cookiejar` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookiejar` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: http.cookiejar.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, "_original_response") and response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""
Produce an appropriate Cookie header string to be sent with `request`, or None.
:rtype: str
"""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get("Cookie")
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name != name:
continue
if domain is not None and domain != cookie.domain:
continue
if path is not None and path != cookie.path:
continue
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
| MockResponse |
python | matplotlib__matplotlib | lib/matplotlib/cm.py | {
"start": 1932,
"end": 8134
} | class ____(Mapping):
r"""
Container for colormaps that are known to Matplotlib by name.
The universal registry instance is `matplotlib.colormaps`. There should be
no need for users to instantiate `.ColormapRegistry` themselves.
Read access uses a dict-like interface mapping names to `.Colormap`\s::
import matplotlib as mpl
cmap = mpl.colormaps['viridis']
Returned `.Colormap`\s are copies, so that their modification does not
change the global definition of the colormap.
Additional colormaps can be added via `.ColormapRegistry.register`::
mpl.colormaps.register(my_colormap)
To get a list of all registered colormaps, you can do::
from matplotlib import colormaps
list(colormaps)
"""
def __init__(self, cmaps):
self._cmaps = cmaps
self._builtin_cmaps = tuple(cmaps)
def __getitem__(self, item):
cmap = _api.check_getitem(self._cmaps, colormap=item, _error_cls=KeyError)
return cmap.copy()
def __iter__(self):
return iter(self._cmaps)
def __len__(self):
return len(self._cmaps)
def __str__(self):
return ('ColormapRegistry; available colormaps:\n' +
', '.join(f"'{name}'" for name in self))
def __call__(self):
"""
Return a list of the registered colormap names.
This exists only for backward-compatibility in `.pyplot` which had a
``plt.colormaps()`` method. The recommended way to get this list is
now ``list(colormaps)``.
"""
return list(self)
def register(self, cmap, *, name=None, force=False):
"""
Register a new colormap.
The colormap name can then be used as a string argument to any ``cmap``
parameter in Matplotlib. It is also available in ``pyplot.get_cmap``.
The colormap registry stores a copy of the given colormap, so that
future changes to the original colormap instance do not affect the
registered colormap. Think of this as the registry taking a snapshot
of the colormap at registration.
Parameters
----------
cmap : matplotlib.colors.Colormap
The colormap to register.
name : str, optional
The name for the colormap. If not given, ``cmap.name`` is used.
force : bool, default: False
If False, a ValueError is raised if trying to overwrite an already
registered name. True supports overwriting registered colormaps
other than the builtin colormaps.
"""
_api.check_isinstance(colors.Colormap, cmap=cmap)
name = name or cmap.name
if name in self:
if not force:
# don't allow registering an already existing cmap
# unless explicitly asked to
raise ValueError(
f'A colormap named "{name}" is already registered.')
elif name in self._builtin_cmaps:
# We don't allow overriding a builtin.
raise ValueError("Re-registering the builtin cmap "
f"{name!r} is not allowed.")
# Warn that we are updating an already existing colormap
_api.warn_external(f"Overwriting the cmap {name!r} "
"that was already in the registry.")
self._cmaps[name] = cmap.copy()
# Someone may set the extremes of a builtin colormap and want to register it
# with a different name for future lookups. The object would still have the
# builtin name, so we should update it to the registered name
if self._cmaps[name].name != name:
self._cmaps[name].name = name
def unregister(self, name):
"""
Remove a colormap from the registry.
You cannot remove built-in colormaps.
If the named colormap is not registered, returns with no error, raises
if you try to de-register a default colormap.
.. warning::
Colormap names are currently a shared namespace that may be used
by multiple packages. Use `unregister` only if you know you
have registered that name before. In particular, do not
unregister just in case to clean the name before registering a
new colormap.
Parameters
----------
name : str
The name of the colormap to be removed.
Raises
------
ValueError
If you try to remove a default built-in colormap.
"""
if name in self._builtin_cmaps:
raise ValueError(f"cannot unregister {name!r} which is a builtin "
"colormap.")
self._cmaps.pop(name, None)
def get_cmap(self, cmap):
"""
Return a color map specified through *cmap*.
Parameters
----------
cmap : str or `~matplotlib.colors.Colormap` or None
- if a `.Colormap`, return it
- if a string, look it up in ``mpl.colormaps``
- if None, return the Colormap defined in :rc:`image.cmap`
Returns
-------
Colormap
"""
# get the default color map
if cmap is None:
return self[mpl.rcParams["image.cmap"]]
# if the user passed in a Colormap, simply return it
if isinstance(cmap, colors.Colormap):
return cmap
if isinstance(cmap, str):
_api.check_in_list(sorted(_colormaps), cmap=cmap)
# otherwise, it must be a string so look it up
return self[cmap]
raise TypeError(
'get_cmap expects None or an instance of a str or Colormap . ' +
f'you passed {cmap!r} of type {type(cmap)}'
)
# public access to the colormaps should be via `matplotlib.colormaps`. For now,
# we still create the registry here, but that should stay an implementation
# detail.
_colormaps = ColormapRegistry(_gen_cmap_registry())
globals().update(_colormaps)
_multivar_colormaps = ColormapRegistry(multivar_cmaps)
_bivar_colormaps = ColormapRegistry(bivar_cmaps)
| ColormapRegistry |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 10985,
"end": 11120
} | class ____(desc_sig_element, _sig_element=True):
"""Node for punctuation in a signature."""
classes = ['p']
| desc_sig_punctuation |
python | viewflow__viewflow | tests/forms/test_forms_renderers.py | {
"start": 2777,
"end": 3156
} | class ____(forms.Form):
username = forms.CharField()
promocode = forms.CharField(widget=forms.HiddenInput, required=False)
def clean_promocode(self):
promo = self.cleaned_data.get("promocode", "")
if promo:
raise forms.ValidationError("Promocode must be empty")
def clean(self):
raise forms.ValidationError("Form error")
| TestForm |
python | pytorch__pytorch | test/onnx/torchlib/ops_test_common.py | {
"start": 10203,
"end": 25827
} | class ____(RuntimeError):
"""ONNX Runtime Aborted."""
def _ort_session_run(serialized_model: bytes, ort_inputs: Mapping[str, Any]):
"""Run a model with ONNX Runtime."""
# Disable all ORT optimizations
session_options = onnxruntime.SessionOptions()
session_options.graph_optimization_level = (
onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL
)
session = ort.InferenceSession(
serialized_model, session_options, providers=("CPUExecutionProvider",)
)
return session.run(None, ort_inputs)
def _ort_session_run_return_dict(
serialized_model: bytes, ort_inputs: Mapping[str, Any], return_dict
) -> None:
"""Run a model with ONNX Runtime and store the results in return_dict."""
try:
return_dict["results"] = _ort_session_run(serialized_model, ort_inputs)
return_dict["error"] = None
except Exception as e: # pylint: disable=broad-except
return_dict["results"] = None
return_dict["error"] = e
def _safe_ort_session_run(serialized_model: bytes, ort_inputs: Mapping[str, Any]):
"""Run a model with ONNX Runtime in a separate process.
Args:
serialized_model: Serialized ONNX model proto.
ort_inputs: Inputs to the model.
Returns:
The inference result.
Raises:
OrtAbortedError if the process did not execute successfully.
"""
manager = multiprocessing.Manager()
return_dict = manager.dict()
process = multiprocessing.Process(
target=_ort_session_run_return_dict,
args=(serialized_model, ort_inputs, return_dict),
)
process.start()
process.join()
process.close()
if not return_dict:
raise OrtAbortedError
if return_dict["error"] is not None:
raise return_dict["error"]
return return_dict["results"]
def _format_model_and_input_information(onnx_model, inputs):
return (
f"Inputs:\n{pprint.pformat(inputs)}\nModel:\n{onnx.printer.to_text(onnx_model)}"
)
_TORCH_DTYPE_TO_ONNX_STRING = {
torch.bool: "tensor(bool)",
torch.uint8: "tensor(uint8)",
torch.int8: "tensor(int8)",
torch.int16: "tensor(int16)",
torch.int32: "tensor(int32)",
torch.int64: "tensor(int64)",
torch.float16: "tensor(float16)",
torch.float32: "tensor(float)",
torch.float64: "tensor(double)",
torch.complex64: "tensor(complex64)",
torch.complex128: "tensor(complex128)",
torch.bfloat16: "tensor(bfloat16)",
}
_TORCH_DTYPE_TO_ONNX: dict[torch.dtype, ir.DataType] = {
torch.bfloat16: ir.DataType.BFLOAT16,
torch.bool: ir.DataType.BOOL,
torch.complex128: ir.DataType.COMPLEX128,
torch.complex64: ir.DataType.COMPLEX64,
torch.float16: ir.DataType.FLOAT16,
torch.float32: ir.DataType.FLOAT,
torch.float64: ir.DataType.DOUBLE,
torch.float8_e4m3fn: ir.DataType.FLOAT8E4M3FN,
torch.float8_e4m3fnuz: ir.DataType.FLOAT8E4M3FNUZ,
torch.float8_e5m2: ir.DataType.FLOAT8E5M2,
torch.float8_e5m2fnuz: ir.DataType.FLOAT8E5M2FNUZ,
torch.int16: ir.DataType.INT16,
torch.int32: ir.DataType.INT32,
torch.int64: ir.DataType.INT64,
torch.int8: ir.DataType.INT8,
torch.uint8: ir.DataType.UINT8,
torch.uint16: ir.DataType.UINT16,
torch.uint32: ir.DataType.UINT32,
torch.uint64: ir.DataType.UINT64,
}
def dtype_op_schema_compatible(dtype: torch.dtype, schema: onnx.defs.OpSchema) -> bool:
"""Checks if the dtype is compatible with the schema.
When a dtype is "compatible" with the schema, it means we can use the dtype
to create sample inputs by OpInfo to test the ONNX function and expect outputs to match.
Args:
dtype: The torch dtype used to create sample inputs by OpInfo.
schema: The ONNX schema of the function.
Returns:
True if the dtype is compatible with the schema.
"""
if not schema.inputs:
# If there are no inputs, we can't check compatibility. Assume it is compatible.
# e.g. aten_randn has only attributes.
return True
if schema.inputs[0].name not in {"self", "input"}:
# If the name of the first input is not "self" or "input",
# it is usually an input that is not of the same type as the output.
# We assume support in this case.
#
# For example, `aten_ones(size: IntType, dtype: int = FLOAT.dtype)`
# has the first input as `size`, which is an integer, but it can support
# any dtype.
return True
# Otherwise we check the type constraints of the first input.
# For example, when dtype=torch.float32, and the op being tested has the schema
# ```
# OpSchema(
# name='aten_abs',
# domain='pkg.onnxscript.torch_lib',
# since_version=1,
# doc='abs(Tensor self) -> Tensor',
# type_constraints=[OpSchema.TypeConstraintParam(type_param_str='TReal',
# allowed_type_strs=['tensor(float)', 'tensor(int8)', 'tensor(int16)',
# 'tensor(int32)', 'tensor(int64)', 'tensor(float16)', 'tensor(double)',
# 'tensor(bfloat16)'], description='')],
# inputs=[OpSchema.FormalParameter(name='self', type_str='TReal',
# description='', param_option=<FormalParameterOption.Single: 0>,
# is_homogeneous=True, min_arity=1,
# differentiation_category=<DifferentiationCategory.Unknown: 0>)],
# outputs=[OpSchema.FormalParameter(name='return_val',
# type_str='TReal', description='',
# param_option=<FormalParameterOption.Single: 0>, is_homogeneous=True,
# min_arity=1, differentiation_category=<DifferentiationCategory.Unknown: 0>)],
# attributes={}
# )
# ```
# we see the first input type is "TReal", corresponding to the type constraint
# with allowed types ['tensor(float)', 'tensor(int8)', 'tensor(int16)',
# 'tensor(int32)', 'tensor(int64)', 'tensor(float16)', 'tensor(double)',
# 'tensor(bfloat16)'].
# Since torch.float32 (tensor(float)) is in the allowed types, we return True.
first_input_type_name = schema.inputs[0].type_str
# Find the type constraint for the first input by matching the parameter name
first_input_type_constraint = next(
(
x
for x in schema.type_constraints
if first_input_type_name in x.type_param_str
),
None,
)
assert first_input_type_constraint is not None
allowed_type_strs = first_input_type_constraint.allowed_type_strs
# Here we consider seq(tensor(float)) compatible with tensor(float) as well
return any(
_TORCH_DTYPE_TO_ONNX_STRING[dtype] in type_str for type_str in allowed_type_strs
)
def graph_executor(
test_name: str,
outputs: Sequence[Any],
opset_version: int = TEST_OPSET_VERSION,
) -> Callable[[Callable[..., Any], tuple[Any], dict[str, Any]], None]:
"""Eagerly executes a function."""
def _capture_graph_and_evaluate_torch_script_evaluator(
function: Callable, args, kwargs
) -> tuple[Any, onnx.ModelProto]:
"""Captures the graph of a function and evaluates it using TorchScriptEvaluator."""
# Initialize the ONNX graph
graph = ir.Graph(
(),
(),
nodes=(),
opset_imports={"": opset_version, "pkg.torch.onnx": 1},
name="main_graph",
)
opset = onnxscript.values.Opset("", opset_version)
tracer = _building.OpRecorder(opset, {})
ort_inputs = {}
onnxscript_args: list[Any] = []
onnxscript_kwargs = {}
for i, arg in enumerate(args):
if isinstance(arg, np.ndarray):
input_name = f"input_{i}"
input = _tensors.SymbolicTensor(
opset=opset,
name=input_name,
shape=ir.Shape(arg.shape),
type=ir.TensorType(_TORCH_DTYPE_TO_ONNX[torch.tensor(arg).dtype]),
)
graph.inputs.append(input)
onnxscript_args.append(input)
ort_inputs[input_name] = arg
elif isinstance(arg, (list, tuple)):
# str is also a sequence but we do not want to treat it as a tensor
sequence_input = []
for j, subarg in enumerate(arg):
if isinstance(subarg, np.ndarray):
input_name = f"input_{i}_{j}"
tensor = torch.tensor(subarg)
input = _tensors.SymbolicTensor(
opset=opset,
name=input_name,
shape=ir.Shape(tensor.shape),
type=ir.TensorType(_TORCH_DTYPE_TO_ONNX[tensor.dtype]),
)
graph.inputs.append(input)
sequence_input.append(input)
ort_inputs[input_name] = subarg
else:
# Include non-numpy inputs as-is
# For example, it could be a None value that we want to keep
sequence_input.append(subarg)
onnxscript_args.append(sequence_input)
else:
onnxscript_args.append(arg)
for key, value in kwargs.items():
if isinstance(value, np.ndarray):
input = _tensors.SymbolicTensor(
opset=opset,
name=key,
shape=ir.Shape(torch.tensor(value).shape),
type=ir.TensorType(_TORCH_DTYPE_TO_ONNX[torch.tensor(value).dtype]),
)
graph.inputs.append(input)
ort_inputs[key] = value
onnxscript_kwargs[key] = input
else:
onnxscript_kwargs[key] = value
with onnxscript.evaluator.default_as(tracer):
symbolic_outputs = function(*onnxscript_args, **onnxscript_kwargs)
if not isinstance(symbolic_outputs, Sequence):
symbolic_outputs = (symbolic_outputs,)
# We need to set the size of the output tensors for the ONNX model to be valid
for output, symbolic_output in zip(outputs, symbolic_outputs):
if isinstance(output, Sequence):
# Output is a sequence
elem_dtype = _TORCH_DTYPE_TO_ONNX[output[0].dtype]
symbolic_output.type = ir.SequenceType(ir.TensorType(elem_dtype))
continue
output = (
output
if isinstance(output, torch.Tensor)
else torch.tensor(output, device="cpu")
)
symbolic_output.shape = ir.Shape(output.shape)
symbolic_output.dtype = _TORCH_DTYPE_TO_ONNX[output.dtype]
graph.outputs.extend(symbolic_outputs)
graph.extend(tracer.nodes)
onnx_model = ir.Model(graph, ir_version=10, producer_name="torch_test")
for identifier, onnxscript_function in tracer.functions.items():
if identifier in onnx_model.functions:
continue
if isinstance(onnxscript_function, ir.Function):
ir_function = onnxscript_function
else:
# TODO: Get IR function directly when onnxscript is updated
proto = onnxscript_function.to_function_proto()
ir_function = ir.serde.deserialize_function(proto)
onnx_model.functions[identifier] = ir_function
_ir_passes.add_opset_imports(onnx_model)
# Make sure the model is valid
model_proto = ir.to_proto(onnx_model)
try:
onnx.checker.check_model(model_proto, full_check=True)
except (onnx.checker.ValidationError, onnx.shape_inference.InferenceError) as e:
raise AssertionError(f"ONNX model is invalid. Model:\n{onnx_model}") from e
model_proto = onnx.shape_inference.infer_shapes(model_proto, data_prop=True)
try:
if (
os.environ.get("CATCH_ORT_SEGFAULT") == "1"
or os.environ.get("CREATE_REPRODUCTION_REPORT") == "1"
):
# Use an individual process to run ONNX Runtime to catch segfaults
return _safe_ort_session_run(
model_proto.SerializeToString(), ort_inputs
), model_proto
return _ort_session_run(
model_proto.SerializeToString(), ort_inputs
), model_proto
except (
# pylint: disable=c-extension-no-member
onnxruntime.capi.onnxruntime_pybind11_state.Fail,
onnxruntime.capi.onnxruntime_pybind11_state.RuntimeException,
onnxruntime.capi.onnxruntime_pybind11_state.InvalidArgument,
onnxruntime.capi.onnxruntime_pybind11_state.InvalidGraph,
onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented,
# pylint: enable=c-extension-no-member
) as e:
if os.environ.get("CREATE_REPRODUCTION_REPORT") == "1":
error_reproduction.create_reproduction_report(
test_name,
model_proto,
ort_inputs,
e,
"test/onnx/torchlib/test_ops.py",
)
raise RuntimeError(
"ONNX Runtime failed to evaluate:\n"
+ _format_model_and_input_information(model_proto, ort_inputs)
) from e
except OrtAbortedError as e:
if os.environ.get("CREATE_REPRODUCTION_REPORT") == "1":
# Save the model and inputs to a file for reproduction
error_reproduction.create_reproduction_report(
test_name,
model_proto,
ort_inputs,
e,
"test/onnx/torchlib/test_ops.py",
)
raise OrtAbortedError(
"ONNX Runtime aborted:\n"
+ _format_model_and_input_information(model_proto, ort_inputs)
) from e
except Exception as e:
if os.environ.get("CREATE_REPRODUCTION_REPORT") == "1":
error_reproduction.create_reproduction_report(
test_name,
model_proto,
ort_inputs,
e,
"test/onnx/torchlib/test_ops.py",
)
raise
return _capture_graph_and_evaluate_torch_script_evaluator
@contextlib.contextmanager
def normal_xfail_skip_test_behaviors(
test_behavior: Optional[str] = None, reason: Optional[str] = None
):
"""This context manager is used to handle the different behaviors of xfail and skip.
Args:
test_behavior (optional[str]): From DecorateMeta name, can be 'skip', 'xfail', or None.
reason (optional[str]): The reason for the failure or skip.
Raises:
e: Any exception raised by the test case if it's not an expected failure.
"""
# We need to skip as soon as possible, as SegFault might also be a case.
if test_behavior == "skip":
pytest.skip(reason=reason)
try:
yield
# We could use `except (AssertionError, RuntimeError, ...) as e:`, but it needs
# to go over all test cases to find the right exception type.
except Exception: # pylint: disable=broad-exception-caught
if test_behavior is None:
raise
if test_behavior == "xfail":
pytest.xfail(reason=reason)
else:
if test_behavior == "xfail":
pytest.fail("Test unexpectedly passed")
| OrtAbortedError |
python | huggingface__transformers | src/transformers/models/idefics3/modeling_idefics3.py | {
"start": 1726,
"end": 3360
} | class ____(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder
"""
last_hidden_state: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for Idefics causal language model (or autoregressive) outputs.
"""
)
| Idefics3BaseModelOutputWithPast |
python | astropy__astropy | astropy/io/votable/dataorigin.py | {
"start": 2983,
"end": 5852
} | class ____:
"""Data class storing the basic provenance for a Dataset.
Notes
-----
DatasetOrigin is dedicated to a specific Element in a VOTable.
These ``<INFO>`` Elements describe a Resource, a TableElement or are Global.
Attributes
----------
ivoid : list
IVOID of underlying data collection (default: None)
citation : list
Dataset identifier that can be used for citation (default: None)
reference_url : list
Dataset landing page (default: None)
resource_version : list
Dataset version (default: None)
rights_uri : list
Licence URI (default: None)
rights : list
Licence or Copyright text (default: None)
creator : list
The person(s) mainly involved in the creation of the resource (default: None)
editor : list
Editor name of the reference article (default: None)
article : list
Bibcode or DOI of a reference article (default: None)
cites : list
An Identifier (ivoid, DOI, bibcode) of second resource (default: None)
is_derived_from : list
An Identifier (ivoid, DOI, bibcode) of second resource (default: None)
original_date : list
Date of the original resource from which the present resource is derived (default: None)
publication_date : list
Date of first publication in the data centre (default: None)
last_update_date : list
Last data centre update (default: None)
infos : list[astropy.io.votable.tree.Info]
list of ``<INFO>`` used by DataOrigin (default: None)
"""
def __init__(self, votable_element: astropy.io.votable.tree.Element = None):
"""
Constructor
Parameters
----------
votable_element: astropy.io.votable.tree.Element, optional
indicates the VOTable element
"""
self.ivoid = None
self.citation = None
self.reference_url = None
self.resource_version = None
self.rights_uri = None
self.rights = None
self.creator = None
self.editor = None
self.article = None
self.cites = None
self.is_derived_from = None
self.original_date = None
self.publication_date = None
self.last_update_date = None
self.__vo_elt = votable_element
self.infos = []
def get_votable_element(self) -> astropy.io.votable.tree.Element:
"""
Get the VOTable element
Returns
-------
astropy.io.votable.tree.Element
"""
return self.__vo_elt
def __str__(self) -> str:
s = []
for info_name in DATAORIGIN_INFO:
info = getattr(self, info_name)
if info:
s.append(f"{info_name}: {','.join(info)}")
return "\n".join(s)
| DatasetOrigin |
python | walkccc__LeetCode | solutions/1337. The K Weakest Rows in a Matrix/1337.py | {
"start": 0,
"end": 191
} | class ____:
def kWeakestRows(self, mat: list[list[int]], k: int) -> list[int]:
rowSums = [(sum(row), i) for i, row in enumerate(mat)]
return [i for _, i in sorted(rowSums)[:k]]
| Solution |
python | apache__airflow | airflow-core/src/airflow/timetables/interval.py | {
"start": 1316,
"end": 4798
} | class ____(Timetable):
"""
Basis for timetable implementations that schedule data intervals.
This kind of timetable classes create periodic data intervals from an
underlying schedule representation (e.g. a cron expression, or a timedelta
instance), and schedule a DagRun at the end of each interval.
"""
def _skip_to_latest(self, earliest: DateTime | None) -> DateTime:
"""
Bound the earliest time a run can be scheduled.
This is called when ``catchup=False``. See docstring of subclasses for
exact skipping behaviour of a schedule.
"""
raise NotImplementedError()
def _align_to_next(self, current: DateTime) -> DateTime:
"""
Align given time to the next scheduled time.
For fixed schedules (e.g. every midnight); this finds the next time that
aligns to the declared time, if the given time does not align. If the
schedule is not fixed (e.g. every hour), the given time is returned.
"""
raise NotImplementedError()
def _align_to_prev(self, current: DateTime) -> DateTime:
"""
Align given time to the previous scheduled time.
For fixed schedules (e.g. every midnight); this finds the prev time that
aligns to the declared time, if the given time does not align. If the
schedule is not fixed (e.g. every hour), the given time is returned.
It is not enough to use ``_get_prev(_align_to_next())``, since when a
DAG's schedule changes, this alternative would make the first scheduling
after the schedule change remain the same.
"""
raise NotImplementedError()
def _get_next(self, current: DateTime) -> DateTime:
"""Get the first schedule after the current time."""
raise NotImplementedError()
def _get_prev(self, current: DateTime) -> DateTime:
"""Get the last schedule before the current time."""
raise NotImplementedError()
def next_dagrun_info(
self,
*,
last_automated_data_interval: DataInterval | None,
restriction: TimeRestriction,
) -> DagRunInfo | None:
earliest = restriction.earliest
if not restriction.catchup:
earliest = self._skip_to_latest(earliest)
elif earliest is not None:
earliest = self._align_to_next(earliest)
if last_automated_data_interval is None:
# First run; schedule the run at the first available time matching
# the schedule, and retrospectively create a data interval for it.
if earliest is None:
return None
start = earliest
else: # There's a previous run.
# Alignment is needed when DAG has new schedule interval.
align_last_data_interval_end = self._align_to_prev(last_automated_data_interval.end)
if earliest is not None:
# Catchup is False or DAG has new start date in the future.
# Make sure we get the later one.
start = max(align_last_data_interval_end, earliest)
else:
# Data interval starts from the end of the previous interval.
start = align_last_data_interval_end
if restriction.latest is not None and start > restriction.latest:
return None
end = self._get_next(start)
return DagRunInfo.interval(start=start, end=end)
| _DataIntervalTimetable |
python | anthropics__anthropic-sdk-python | src/anthropic/types/message_count_tokens_params.py | {
"start": 547,
"end": 6749
} | class ____(TypedDict, total=False):
messages: Required[Iterable[MessageParam]]
"""Input messages.
Our models are trained to operate on alternating `user` and `assistant`
conversational turns. When creating a new `Message`, you specify the prior
conversational turns with the `messages` parameter, and the model then generates
the next `Message` in the conversation. Consecutive `user` or `assistant` turns
in your request will be combined into a single turn.
Each input message must be an object with a `role` and `content`. You can
specify a single `user`-role message, or you can include multiple `user` and
`assistant` messages.
If the final message uses the `assistant` role, the response content will
continue immediately from the content in that message. This can be used to
constrain part of the model's response.
Example with a single `user` message:
```json
[{ "role": "user", "content": "Hello, Claude" }]
```
Example with multiple conversational turns:
```json
[
{ "role": "user", "content": "Hello there." },
{ "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
{ "role": "user", "content": "Can you explain LLMs in plain English?" }
]
```
Example with a partially-filled response from Claude:
```json
[
{
"role": "user",
"content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
},
{ "role": "assistant", "content": "The best answer is (" }
]
```
Each input message `content` may be either a single `string` or an array of
content blocks, where each block has a specific `type`. Using a `string` for
`content` is shorthand for an array of one content block of type `"text"`. The
following input messages are equivalent:
```json
{ "role": "user", "content": "Hello, Claude" }
```
```json
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
See [input examples](https://docs.claude.com/en/api/messages-examples).
Note that if you want to include a
[system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
top-level `system` parameter — there is no `"system"` role for input messages in
the Messages API.
There is a limit of 100,000 messages in a single request.
"""
model: Required[ModelParam]
"""
The model that will complete your prompt.\n\nSee
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
details and options.
"""
system: Union[str, Iterable[TextBlockParam]]
"""System prompt.
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
[guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
"""
thinking: ThinkingConfigParam
"""Configuration for enabling Claude's extended thinking.
When enabled, responses include `thinking` content blocks showing Claude's
thinking process before the final answer. Requires a minimum budget of 1,024
tokens and counts towards your `max_tokens` limit.
See
[extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
for details.
"""
tool_choice: ToolChoiceParam
"""How the model should use the provided tools.
The model can use a specific tool, any available tool, decide by itself, or not
use tools at all.
"""
tools: Iterable[MessageCountTokensToolParam]
"""Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
content blocks that represent the model's use of those tools. You can then run
those tools using the tool input generated by the model and then optionally
return results back to the model using `tool_result` content blocks.
There are two types of tools: **client tools** and **server tools**. The
behavior described below applies to client tools. For
[server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
see their individual documentation as each has its own behavior (e.g., the
[web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
Each tool definition includes:
- `name`: Name of the tool.
- `description`: Optional, but strongly-recommended description of the tool.
- `input_schema`: [JSON schema](https://json-schema.org/draft/2020-12) for the
tool `input` shape that the model will produce in `tool_use` output content
blocks.
For example, if you defined `tools` as:
```json
[
{
"name": "get_stock_price",
"description": "Get the current stock price for a given ticker symbol.",
"input_schema": {
"type": "object",
"properties": {
"ticker": {
"type": "string",
"description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
}
},
"required": ["ticker"]
}
}
]
```
And then asked the model "What's the S&P 500 at today?", the model might produce
`tool_use` content blocks in the response like this:
```json
[
{
"type": "tool_use",
"id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"name": "get_stock_price",
"input": { "ticker": "^GSPC" }
}
]
```
You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
input, and return the following back to the model in a subsequent `user`
message:
```json
[
{
"type": "tool_result",
"tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"content": "259.75 USD"
}
]
```
Tools can be used for workflows that include running client-side tools and
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
"""
| MessageCountTokensParams |
python | pandas-dev__pandas | pandas/tests/indexes/base_class/test_indexing.py | {
"start": 2158,
"end": 3687
} | class ____:
@pytest.mark.slow # to_flat_index takes a while
def test_get_loc_tuple_monotonic_above_size_cutoff(self, monkeypatch):
# Go through the libindex path for which using
# _bin_search vs ndarray.searchsorted makes a difference
with monkeypatch.context():
monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 100)
lev = list("ABCD")
dti = pd.date_range("2016-01-01", periods=10)
mi = pd.MultiIndex.from_product([lev, range(5), dti])
oidx = mi.to_flat_index()
loc = len(oidx) // 2
tup = oidx[loc]
res = oidx.get_loc(tup)
assert res == loc
def test_get_loc_nan_object_dtype_nonmonotonic_nonunique(self):
# case that goes through _maybe_get_bool_indexer
idx = Index(["foo", np.nan, None, "foo", 1.0, None], dtype=object)
# we dont raise KeyError on nan
res = idx.get_loc(np.nan)
assert res == 1
# we only match on None, not on np.nan
res = idx.get_loc(None)
expected = np.array([False, False, True, False, False, True])
tm.assert_numpy_array_equal(res, expected)
# we don't match at all on mismatched NA
with pytest.raises(KeyError, match="NaT"):
idx.get_loc(NaT)
def test_getitem_boolean_ea_indexer():
# GH#45806
ser = pd.Series([True, False, pd.NA], dtype="boolean")
result = ser.index[ser]
expected = Index([0])
tm.assert_index_equal(result, expected)
| TestGetLoc |
python | kamyu104__LeetCode-Solutions | Python/divide-a-string-into-groups-of-size-k.py | {
"start": 38,
"end": 294
} | class ____(object):
def divideString(self, s, k, fill):
"""
:type s: str
:type k: int
:type fill: str
:rtype: List[str]
"""
return [s[i:i+k] + fill*(i+k-len(s)) for i in xrange(0, len(s), k)]
| Solution |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/settings.py | {
"start": 5150,
"end": 5633
} | class ____(Enum):
EXTRINSIC: str = "extrinsic"
GAIL: str = "gail"
CURIOSITY: str = "curiosity"
RND: str = "rnd"
def to_settings(self) -> type:
_mapping = {
RewardSignalType.EXTRINSIC: RewardSignalSettings,
RewardSignalType.GAIL: GAILSettings,
RewardSignalType.CURIOSITY: CuriositySettings,
RewardSignalType.RND: RNDSettings,
}
return _mapping[self]
@attr.s(auto_attribs=True)
| RewardSignalType |
python | django__django | tests/timezones/forms.py | {
"start": 378,
"end": 528
} | class ____(forms.ModelForm):
class Meta:
model = Event
fields = "__all__"
localized_fields = "__all__"
| EventLocalizedModelForm |
python | matplotlib__matplotlib | lib/matplotlib/patches.py | {
"start": 53670,
"end": 58935
} | class ____(Patch):
"""A scale-free ellipse."""
def __str__(self):
pars = (self._center[0], self._center[1],
self.width, self.height, self.angle)
fmt = "Ellipse(xy=(%s, %s), width=%s, height=%s, angle=%s)"
return fmt % pars
@_docstring.interpd
def __init__(self, xy, width, height, *, angle=0, **kwargs):
"""
Parameters
----------
xy : (float, float)
xy coordinates of ellipse centre.
width : float
Total length (diameter) of horizontal axis.
height : float
Total length (diameter) of vertical axis.
angle : float, default: 0
Rotation in degrees anti-clockwise.
Notes
-----
Valid keyword arguments are:
%(Patch:kwdoc)s
"""
super().__init__(**kwargs)
self._center = xy
self._width, self._height = width, height
self._angle = angle
self._path = Path.unit_circle()
# Required for EllipseSelector with axes aspect ratio != 1
# The patch is defined in data coordinates and when changing the
# selector with square modifier and not in data coordinates, we need
# to correct for the aspect ratio difference between the data and
# display coordinate systems.
self._aspect_ratio_correction = 1.0
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
def _recompute_transform(self):
"""
Notes
-----
This cannot be called until after this has been added to an Axes,
otherwise unit conversion will fail. This makes it very important to
call the accessor method and not directly access the transformation
member variable.
"""
center = (self.convert_xunits(self._center[0]),
self.convert_yunits(self._center[1]))
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5, height * 0.5 * self._aspect_ratio_correction) \
.rotate_deg(self.angle) \
.scale(1, 1 / self._aspect_ratio_correction) \
.translate(*center)
def get_path(self):
"""Return the path of the ellipse."""
return self._path
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
def set_center(self, xy):
"""
Set the center of the ellipse.
Parameters
----------
xy : (float, float)
"""
self._center = xy
self.stale = True
def get_center(self):
"""Return the center of the ellipse."""
return self._center
center = property(get_center, set_center)
def set_width(self, width):
"""
Set the width of the ellipse.
Parameters
----------
width : float
"""
self._width = width
self.stale = True
def get_width(self):
"""
Return the width of the ellipse.
"""
return self._width
width = property(get_width, set_width)
def set_height(self, height):
"""
Set the height of the ellipse.
Parameters
----------
height : float
"""
self._height = height
self.stale = True
def get_height(self):
"""Return the height of the ellipse."""
return self._height
height = property(get_height, set_height)
def set_angle(self, angle):
"""
Set the angle of the ellipse.
Parameters
----------
angle : float
"""
self._angle = angle
self.stale = True
def get_angle(self):
"""Return the angle of the ellipse."""
return self._angle
angle = property(get_angle, set_angle)
def get_corners(self):
"""
Return the corners of the ellipse bounding box.
The bounding box orientation is moving anti-clockwise from the
lower left corner defined before rotation.
"""
return self.get_patch_transform().transform(
[(-1, -1), (1, -1), (1, 1), (-1, 1)])
def get_vertices(self):
"""
Return the vertices coordinates of the ellipse.
The definition can be found `here <https://en.wikipedia.org/wiki/Ellipse>`_
.. versionadded:: 3.8
"""
if self.width < self.height:
ret = self.get_patch_transform().transform([(0, 1), (0, -1)])
else:
ret = self.get_patch_transform().transform([(1, 0), (-1, 0)])
return [tuple(x) for x in ret]
def get_co_vertices(self):
"""
Return the co-vertices coordinates of the ellipse.
The definition can be found `here <https://en.wikipedia.org/wiki/Ellipse>`_
.. versionadded:: 3.8
"""
if self.width < self.height:
ret = self.get_patch_transform().transform([(1, 0), (-1, 0)])
else:
ret = self.get_patch_transform().transform([(0, 1), (0, -1)])
return [tuple(x) for x in ret]
| Ellipse |
python | run-llama__llama_index | llama-index-core/tests/tools/test_query_engine_tool.py | {
"start": 252,
"end": 1524
} | class ____(CustomQueryEngine):
"""Custom query engine."""
def custom_query(self, query_str: str) -> str:
"""Query."""
return "custom_" + query_str
def test_query_engine_tool() -> None:
"""Test query engine tool."""
query_engine = MockQueryEngine() # type: ignore[call-arg]
query_tool = QueryEngineTool.from_defaults(query_engine)
# make sure both input formats work given function schema that assumes defaults
response = query_tool("hello world")
assert str(response) == "custom_hello world"
response = query_tool(input="foo")
assert str(response) == "custom_foo"
fn_schema_cls = cast(Type[BaseModel], query_tool.metadata.fn_schema)
fn_schema_obj = cast(BaseModel, fn_schema_cls(input="bar"))
response = query_tool(**fn_schema_obj.model_dump())
assert str(response) == "custom_bar"
# test resolve input errors
query_tool = QueryEngineTool.from_defaults(query_engine)
response = query_tool(tmp="hello world")
assert str(response) == "custom_{'tmp': 'hello world'}"
with pytest.raises(ValueError):
query_tool = QueryEngineTool.from_defaults(
query_engine, resolve_input_errors=False
)
response = query_tool(tmp="hello world")
| MockQueryEngine |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/auto-table.py | {
"start": 1831,
"end": 2454
} | class ____(LabeledBox):
DEFAULT_CSS = """
#issue-info {
height: auto;
border-bottom: dashed #632CA6;
}
#statuses-box {
height: 1fr;
width: auto;
}
"""
def __init__(self):
self.__info = Label("test")
super().__init__(
"",
ScrollableContainer(
Horizontal(self.__info, id="issue-info"),
Horizontal(*[Status(str(i)) for i in range(4)], id="statuses-box"),
id="issues-box",
),
)
@property
def info(self) -> Label:
return self.__info
| Rendering |
python | pandas-dev__pandas | pandas/tests/indexes/datetimes/test_ops.py | {
"start": 530,
"end": 1351
} | class ____:
@pytest.fixture
def rng(self, freq):
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
return bdate_range(START, END, freq=freq)
def test_comparison(self, rng):
d = rng[10]
comp = rng > d
assert comp[11]
assert not comp[9]
def test_copy(self, rng):
cp = rng.copy()
tm.assert_index_equal(cp, rng)
def test_identical(self, rng):
t1 = rng.copy()
t2 = rng.copy()
assert t1.identical(t2)
# name
t1 = t1.rename("foo")
assert t1.equals(t2)
assert not t1.identical(t2)
t2 = t2.rename("foo")
assert t1.identical(t2)
# freq
t2v = Index(t2.values)
assert t1.equals(t2v)
assert not t1.identical(t2v)
| TestBusinessDatetimeIndex |
python | nryoung__algorithms | algorithms/data_structures/digraph.py | {
"start": 260,
"end": 2596
} | class ____():
def __init__(self):
self.__adj = {}
self.__v_count = 0
self.__e_count = 0
def vertex_count(self):
"""
Returns the number of vertices in the graph.
Worst Case Complexity: O(1)
"""
return self.__v_count
def edge_count(self):
"""
Returns the number of edges in the graph.
Worst Case Complexity: O(1)
"""
return self.__e_count
def add_edge(self, src, dest):
"""
Adds an undirected edge 'src'-'dest' to the graph.
Worst Case Complexity O(1)
"""
if src in self.__adj:
self.__adj[src].append(dest)
else:
self.__adj[src] = [dest]
self.__v_count += 1
if dest in self.__adj:
pass
else:
self.__adj[dest] = []
self.__v_count += 1
self.__e_count += 1
def adj(self, src):
"""
Returns the vertices adjacent to vertex 'src'.
Worst Case Complexity: O(1)
"""
return self.__adj[src]
def outdegree(self, src):
"""
Returns the degree of the vertex 'src'
Worst Case Complexity: O(1)
"""
if src in self.__adj:
return len(self.__adj[src])
else:
raise LookupError("This vertex is not in the graph.")
def vertices(self):
"""
Returns an iterable of all the vertices in the graph.
Worst Case Complexity: O(V)
"""
return self.__adj.keys()
def reverse(self):
"""
Returns the reverse of this digraph
Worst Case Complexity: O(V+E)
"""
digraph_reversed = Digraph()
old_vertices = self.vertices()
for src in old_vertices:
for dest in self.adj(src):
digraph_reversed.add_edge(dest, src)
return digraph_reversed
def __str__(self):
s = []
s.append("{0} vertices and {1} edges \n".format(self.__v_count,
self.__e_count))
for key in self.vertices():
s.append("{0}: ".format(key))
for val in self.adj(key):
s.append("{0} ".format(val))
s.append("\n")
return "".join(s)
| Digraph |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/general_tests/utils_tests/test_enum.py | {
"start": 66,
"end": 341
} | class ____(Enum):
VAL = "VAL"
def test_enum_value():
assert is_enum_value(None) is False
assert is_enum_value(1) is False
assert is_enum_value("foo") is False
assert is_enum_value("VAL") is False
assert is_enum_value(APythonEnum.VAL) is True
| APythonEnum |
python | openai__openai-python | src/openai/types/responses/response_function_shell_call_output_content_param.py | {
"start": 723,
"end": 1083
} | class ____(TypedDict, total=False):
outcome: Required[Outcome]
"""The exit or timeout outcome associated with this chunk."""
stderr: Required[str]
"""Captured stderr output for this chunk of the shell call."""
stdout: Required[str]
"""Captured stdout output for this chunk of the shell call."""
| ResponseFunctionShellCallOutputContentParam |
python | tensorflow__tensorflow | tensorflow/core/function/capture/capture_container.py | {
"start": 1111,
"end": 1807
} | class ____(py_collections.OrderedDict):
"""A dict with a mutation flag."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._mutated = True
def pop(self, key, default=None):
self._mutated = True
return super().pop(key, default)
def __setitem__(self, key, value):
self._mutated = True
return super().__setitem__(key, value)
def __delitem__(self, key):
self._mutated = True
return super().__delitem__(key)
def clear(self):
self._mutated = True
return super().clear()
@property
def mutated(self):
return self._mutated
@mutated.setter
def mutated(self, value):
self._mutated = value
| MutationAwareDict |
python | catalyst-team__catalyst | tests/catalyst/callbacks/test_wrapper.py | {
"start": 393,
"end": 2264
} | class ____(unittest.TestCase):
def test_enabled(self):
runner = Mock(loader_key="train", epoch=1)
orders = (
CallbackOrder.Internal,
CallbackOrder.Metric,
CallbackOrder.MetricAggregation,
CallbackOrder.Optimizer,
CallbackOrder.Scheduler,
CallbackOrder.External,
)
events = (
"on_loader_start",
"on_loader_end",
"on_experiment_start",
"on_experiment_end",
"on_epoch_start",
"on_epoch_end",
"on_batch_start",
"on_batch_end",
"on_exception",
)
for event in events:
for order in orders:
callback = RaiserCallback(order, event)
wrapper = CallbackWrapper(callback, enable_callback=True)
with self.assertRaises(Dummy):
wrapper.__getattribute__(event)(runner)
def test_disabled(self):
runner = Mock(loader_key="train", epoch=1)
orders = (
CallbackOrder.Internal,
CallbackOrder.Metric,
CallbackOrder.MetricAggregation,
CallbackOrder.Optimizer,
CallbackOrder.Scheduler,
CallbackOrder.External,
)
events = (
"on_loader_start",
"on_loader_end",
"on_experiment_start",
"on_experiment_end",
"on_epoch_start",
"on_epoch_end",
"on_batch_start",
"on_batch_end",
"on_exception",
)
for event in events:
for order in orders:
callback = RaiserCallback(order, event)
wrapper = CallbackWrapper(callback, enable_callback=False)
wrapper.__getattribute__(event)(runner)
| TestWrapperCallback |
python | pypa__hatch | tests/backend/builders/plugin/test_interface.py | {
"start": 406,
"end": 797
} | class ____:
def test_default(self, isolation):
builder = MockBuilder(str(isolation))
assert isinstance(builder.plugin_manager, PluginManager)
def test_reuse(self, isolation):
plugin_manager = PluginManager()
builder = MockBuilder(str(isolation), plugin_manager=plugin_manager)
assert builder.plugin_manager is plugin_manager
| TestPluginManager |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/compiler.py | {
"start": 31186,
"end": 32128
} | class ____(
roles.BinaryElementRole[Any], elements.CompilerColumnElement
):
"""produce a wrapping element for a case-insensitive portion of
an ILIKE construct.
The construct usually renders the ``lower()`` function, but on
PostgreSQL will pass silently with the assumption that "ILIKE"
is being used.
.. versionadded:: 2.0
"""
__visit_name__ = "ilike_case_insensitive_operand"
__slots__ = "element", "comparator"
def __init__(self, element):
self.element = element
self.comparator = element.comparator
@property
def proxy_set(self):
return self.element.proxy_set
@property
def type(self):
return self.element.type
def self_group(self, **kw):
return self
def _with_binary_element_type(self, type_):
return ilike_case_insensitive(
self.element._with_binary_element_type(type_)
)
| ilike_case_insensitive |
python | python-pillow__Pillow | src/PIL/ImageFile.py | {
"start": 15162,
"end": 16053
} | class ____(ImageFile, metaclass=abc.ABCMeta):
"""
Base class for stub image loaders.
A stub loader is an image loader that can identify files of a
certain format, but relies on external code to load the file.
"""
@abc.abstractmethod
def _open(self) -> None:
pass
def load(self) -> Image.core.PixelAccess | None:
loader = self._load()
if loader is None:
msg = f"cannot find loader for this {self.format} file"
raise OSError(msg)
image = loader.load(self)
assert image is not None
# become the other object (!)
self.__class__ = image.__class__ # type: ignore[assignment]
self.__dict__ = image.__dict__
return image.load()
@abc.abstractmethod
def _load(self) -> StubHandler | None:
"""(Hook) Find actual image loader."""
pass
| StubImageFile |
python | django__django | tests/managers_regress/models.py | {
"start": 1049,
"end": 1173
} | class ____(models.Model):
comment = models.CharField(max_length=50)
class Meta:
abstract = True
| AbstractBase3 |
python | huggingface__transformers | src/transformers/models/bark/modeling_bark.py | {
"start": 6745,
"end": 10324
} | class ____(BarkSelfAttention):
"""
Bark flash attention module. This module inherits from `BarkSelfAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask()
def _split_heads(self, tensor, num_heads, attn_head_size):
"""
Splits hidden_size dim into attn_head_size and num_heads
"""
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(new_shape)
# Flash attention requires the input to have the shape
# batch_size x seq_length x head_dim x hidden_dim - (batch, seq_length, head, head_features)
return tensor
def _merge_heads(self, tensor, num_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
# re-assemble all head outputs side by side
# (batch, seq_len, num_heads, attn_head_size) -> (batch, seq_len, num_heads*attn_head_size)
tensor = tensor.view(tensor.size()[:-2] + (num_heads * attn_head_size,))
return tensor
def forward(
self,
hidden_states,
attention_mask=None,
past_key_values=None,
use_cache=False,
output_attentions=False,
cache_position=None,
):
batch_size, query_len, _ = hidden_states.size()
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
query, key, value = self.att_proj(hidden_states).split(self.embed_dim, dim=2)
query = self._split_heads(query, self.num_heads, self.head_dim)
key = self._split_heads(key, self.num_heads, self.head_dim)
value = self._split_heads(value, self.num_heads, self.head_dim)
if past_key_values is not None:
key, value = past_key_values.update(key, value, self.layer_idx, {"cache_position": cache_position})
target_dtype = get_target_dtype(query, self) # if the query is in float32, this is the dtype to cast to for FA
attn_output = _flash_attention_forward(
query,
key,
value,
attention_mask,
query_len,
dropout=self.dropout if self.training else 0.0,
use_top_left_mask=self._flash_attn_uses_top_left_mask,
is_causal=self.is_causal,
target_dtype=target_dtype,
)
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
return attn_output, None
BARK_ATTENTION_CLASSES = {
"eager": BarkSelfAttention,
"flash_attention_2": BarkSelfFlashAttention2,
}
| BarkSelfFlashAttention2 |
python | PyCQA__pylint | tests/functional/u/used/used_before_assignment_typing.py | {
"start": 2490,
"end": 2994
} | class ____:
"""Class to test self referential variable typing.
This regressed, reported in: https://github.com/pylint-dev/pylint/issues/5342
"""
def self_referential_optional_within_method(self) -> None:
variable: Optional[MySecondClass] = self
print(variable)
def correct_inner_typing_method(self) -> bool:
def inner_method(self, other: MySecondClass) -> bool:
return self == other
return inner_method(self, MySecondClass())
| MySecondClass |
python | kubernetes-client__python | kubernetes/client/models/v1_validating_webhook_configuration_list.py | {
"start": 383,
"end": 7373
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1ValidatingWebhookConfiguration]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1ValidatingWebhookConfigurationList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1ValidatingWebhookConfigurationList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ValidatingWebhookConfigurationList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ValidatingWebhookConfigurationList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ValidatingWebhookConfigurationList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1ValidatingWebhookConfigurationList. # noqa: E501
List of ValidatingWebhookConfiguration. # noqa: E501
:return: The items of this V1ValidatingWebhookConfigurationList. # noqa: E501
:rtype: list[V1ValidatingWebhookConfiguration]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1ValidatingWebhookConfigurationList.
List of ValidatingWebhookConfiguration. # noqa: E501
:param items: The items of this V1ValidatingWebhookConfigurationList. # noqa: E501
:type: list[V1ValidatingWebhookConfiguration]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1ValidatingWebhookConfigurationList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ValidatingWebhookConfigurationList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ValidatingWebhookConfigurationList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ValidatingWebhookConfigurationList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ValidatingWebhookConfigurationList. # noqa: E501
:return: The metadata of this V1ValidatingWebhookConfigurationList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ValidatingWebhookConfigurationList.
:param metadata: The metadata of this V1ValidatingWebhookConfigurationList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ValidatingWebhookConfigurationList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ValidatingWebhookConfigurationList):
return True
return self.to_dict() != other.to_dict()
| V1ValidatingWebhookConfigurationList |
python | sympy__sympy | sympy/physics/biomechanics/curve.py | {
"start": 2297,
"end": 10350
} | class ____(CharacteristicCurveFunction):
r"""Tendon force-length curve based on De Groote et al., 2016 [1]_.
Explanation
===========
Gives the normalized tendon force produced as a function of normalized
tendon length.
The function is defined by the equation:
$fl^T = c_0 \exp{c_3 \left( \tilde{l}^T - c_1 \right)} - c_2$
with constant values of $c_0 = 0.2$, $c_1 = 0.995$, $c_2 = 0.25$, and
$c_3 = 33.93669377311689$.
While it is possible to change the constant values, these were carefully
selected in the original publication to give the characteristic curve
specific and required properties. For example, the function produces no
force when the tendon is in an unstrained state. It also produces a force
of 1 normalized unit when the tendon is under a 5% strain.
Examples
========
The preferred way to instantiate :class:`TendonForceLengthDeGroote2016` is using
the :meth:`~.with_defaults` constructor because this will automatically
populate the constants within the characteristic curve equation with the
floating point values from the original publication. This constructor takes
a single argument corresponding to normalized tendon length. We'll create a
:class:`~.Symbol` called ``l_T_tilde`` to represent this.
>>> from sympy import Symbol
>>> from sympy.physics.biomechanics import TendonForceLengthDeGroote2016
>>> l_T_tilde = Symbol('l_T_tilde')
>>> fl_T = TendonForceLengthDeGroote2016.with_defaults(l_T_tilde)
>>> fl_T
TendonForceLengthDeGroote2016(l_T_tilde, 0.2, 0.995, 0.25,
33.93669377311689)
It's also possible to populate the four constants with your own values too.
>>> from sympy import symbols
>>> c0, c1, c2, c3 = symbols('c0 c1 c2 c3')
>>> fl_T = TendonForceLengthDeGroote2016(l_T_tilde, c0, c1, c2, c3)
>>> fl_T
TendonForceLengthDeGroote2016(l_T_tilde, c0, c1, c2, c3)
You don't just have to use symbols as the arguments, it's also possible to
use expressions. Let's create a new pair of symbols, ``l_T`` and
``l_T_slack``, representing tendon length and tendon slack length
respectively. We can then represent ``l_T_tilde`` as an expression, the
ratio of these.
>>> l_T, l_T_slack = symbols('l_T l_T_slack')
>>> l_T_tilde = l_T/l_T_slack
>>> fl_T = TendonForceLengthDeGroote2016.with_defaults(l_T_tilde)
>>> fl_T
TendonForceLengthDeGroote2016(l_T/l_T_slack, 0.2, 0.995, 0.25,
33.93669377311689)
To inspect the actual symbolic expression that this function represents,
we can call the :meth:`~.doit` method on an instance. We'll use the keyword
argument ``evaluate=False`` as this will keep the expression in its
canonical form and won't simplify any constants.
>>> fl_T.doit(evaluate=False)
-0.25 + 0.2*exp(33.93669377311689*(l_T/l_T_slack - 0.995))
The function can also be differentiated. We'll differentiate with respect
to l_T using the ``diff`` method on an instance with the single positional
argument ``l_T``.
>>> fl_T.diff(l_T)
6.787338754623378*exp(33.93669377311689*(l_T/l_T_slack - 0.995))/l_T_slack
References
==========
.. [1] De Groote, F., Kinney, A. L., Rao, A. V., & Fregly, B. J., Evaluation
of direct collocation optimal control problem formulations for
solving the muscle redundancy problem, Annals of biomedical
engineering, 44(10), (2016) pp. 2922-2936
"""
@classmethod
def with_defaults(cls, l_T_tilde):
r"""Recommended constructor that will use the published constants.
Explanation
===========
Returns a new instance of the tendon force-length function using the
four constant values specified in the original publication.
These have the values:
$c_0 = 0.2$
$c_1 = 0.995$
$c_2 = 0.25$
$c_3 = 33.93669377311689$
Parameters
==========
l_T_tilde : Any (sympifiable)
Normalized tendon length.
"""
c0 = Float('0.2')
c1 = Float('0.995')
c2 = Float('0.25')
c3 = Float('33.93669377311689')
return cls(l_T_tilde, c0, c1, c2, c3)
@classmethod
def eval(cls, l_T_tilde, c0, c1, c2, c3):
"""Evaluation of basic inputs.
Parameters
==========
l_T_tilde : Any (sympifiable)
Normalized tendon length.
c0 : Any (sympifiable)
The first constant in the characteristic equation. The published
value is ``0.2``.
c1 : Any (sympifiable)
The second constant in the characteristic equation. The published
value is ``0.995``.
c2 : Any (sympifiable)
The third constant in the characteristic equation. The published
value is ``0.25``.
c3 : Any (sympifiable)
The fourth constant in the characteristic equation. The published
value is ``33.93669377311689``.
"""
pass
def _eval_evalf(self, prec):
"""Evaluate the expression numerically using ``evalf``."""
return self.doit(deep=False, evaluate=False)._eval_evalf(prec)
def doit(self, deep=True, evaluate=True, **hints):
"""Evaluate the expression defining the function.
Parameters
==========
deep : bool
Whether ``doit`` should be recursively called. Default is ``True``.
evaluate : bool.
Whether the SymPy expression should be evaluated as it is
constructed. If ``False``, then no constant folding will be
conducted which will leave the expression in a more numerically-
stable for values of ``l_T_tilde`` that correspond to a sensible
operating range for a musculotendon. Default is ``True``.
**kwargs : dict[str, Any]
Additional keyword argument pairs to be recursively passed to
``doit``.
"""
l_T_tilde, *constants = self.args
if deep:
hints['evaluate'] = evaluate
l_T_tilde = l_T_tilde.doit(deep=deep, **hints)
c0, c1, c2, c3 = [c.doit(deep=deep, **hints) for c in constants]
else:
c0, c1, c2, c3 = constants
if evaluate:
return c0*exp(c3*(l_T_tilde - c1)) - c2
return c0*exp(c3*UnevaluatedExpr(l_T_tilde - c1)) - c2
def fdiff(self, argindex=1):
"""Derivative of the function with respect to a single argument.
Parameters
==========
argindex : int
The index of the function's arguments with respect to which the
derivative should be taken. Argument indexes start at ``1``.
Default is ``1``.
"""
l_T_tilde, c0, c1, c2, c3 = self.args
if argindex == 1:
return c0*c3*exp(c3*UnevaluatedExpr(l_T_tilde - c1))
elif argindex == 2:
return exp(c3*UnevaluatedExpr(l_T_tilde - c1))
elif argindex == 3:
return -c0*c3*exp(c3*UnevaluatedExpr(l_T_tilde - c1))
elif argindex == 4:
return Integer(-1)
elif argindex == 5:
return c0*(l_T_tilde - c1)*exp(c3*UnevaluatedExpr(l_T_tilde - c1))
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""Inverse function.
Parameters
==========
argindex : int
Value to start indexing the arguments at. Default is ``1``.
"""
return TendonForceLengthInverseDeGroote2016
def _latex(self, printer):
"""Print a LaTeX representation of the function defining the curve.
Parameters
==========
printer : Printer
The printer to be used to print the LaTeX string representation.
"""
l_T_tilde = self.args[0]
_l_T_tilde = printer._print(l_T_tilde)
return r'\operatorname{fl}^T \left( %s \right)' % _l_T_tilde
| TendonForceLengthDeGroote2016 |
python | tensorflow__tensorflow | tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py | {
"start": 9792,
"end": 15984
} | class ____(tf.test.Benchmark):
def _report(self, label, start, num_iters, device, batch_size, data_format,
num_replicas=1):
resnet50_test_util.report(self, label, start, num_iters, device, batch_size,
data_format, num_replicas)
def _train_batch_sizes(self):
"""Choose batch sizes based on GPU capability."""
for device in device_lib.list_local_devices():
# TODO(b/141475121): We need some way to check which batch sizes would
# work using a public API.
if tf.DeviceSpec.from_string(device.name).device_type == 'GPU':
# Avoid OOM errors with larger batch sizes, which seem to cause errors
# later on even if caught.
#
# TODO(allenl): Base this on device memory; memory limit information
# during the test seems to exclude the amount TensorFlow has allocated,
# which isn't useful.
if 'K20' in device.physical_device_desc:
return (16,)
# Quardro P1000.
if 'P1000' in device.physical_device_desc:
return (16,)
if 'P100' in device.physical_device_desc:
return (16, 32, 64)
if tf.DeviceSpec.from_string(device.name).device_type == 'TPU':
return (32,)
return (16, 32)
def _force_device_sync(self):
# If this function is called in the context of a non-CPU device
# (e.g., inside a 'with tf.device("/gpu:0")' block)
# then this will force a copy from CPU->NON_CPU_DEVICE->CPU,
# which forces a sync. This is a roundabout way, yes.
tf.constant(1.).cpu()
def _benchmark_eager_apply(self, label, device_and_format, defun=False,
execution_mode=None):
with context.execution_mode(execution_mode):
device, data_format = device_and_format
model = resnet50.ResNet50(data_format)
if defun:
model.call = tf.function(model.call)
batch_size = 64
num_burn = 5
num_iters = 30
with tf.device(device):
images, _ = resnet50_test_util.random_batch(batch_size, data_format)
for _ in range(num_burn):
model(images, training=False).cpu()
if execution_mode:
context.async_wait()
gc.collect()
start = time.time()
for _ in range(num_iters):
model(images, training=False).cpu()
if execution_mode:
context.async_wait()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_apply_sync(self):
self._benchmark_eager_apply(
'eager_apply', resnet50_test_util.device_and_data_format(),
defun=False)
def benchmark_eager_apply_async(self):
self._benchmark_eager_apply(
'eager_apply_async',
resnet50_test_util.device_and_data_format(),
defun=False,
execution_mode=context.ASYNC)
def benchmark_eager_apply_with_defun(self):
self._benchmark_eager_apply(
'eager_apply_with_defun',
resnet50_test_util.device_and_data_format(), defun=True)
def _benchmark_eager_train(self,
label,
make_iterator,
device_and_format,
defun=False,
execution_mode=None):
with context.execution_mode(execution_mode):
device, data_format = device_and_format
for batch_size in self._train_batch_sizes():
(images, labels) = resnet50_test_util.random_batch(
batch_size, data_format)
model = resnet50.ResNet50(data_format)
# TODO(b/161911585): tf_to_corert MLIR lowering pipeline should handle
# case when momentum is not set.
optimizer = tf.keras.optimizers.SGD(0.1, 0.1)
apply_grads = apply_gradients
if defun:
model.call = tf.function(model.call)
apply_grads = tf.function(apply_gradients)
num_burn = 3
num_iters = 10
with tf.device(device):
iterator = make_iterator((images, labels))
for _ in range(num_burn):
(images, labels) = iterator.next()
apply_grads(model, optimizer,
compute_gradients(model, images, labels))
if execution_mode:
context.async_wait()
self._force_device_sync()
gc.collect()
start = time.time()
for _ in range(num_iters):
(images, labels) = iterator.next()
apply_grads(model, optimizer,
compute_gradients(model, images, labels))
if execution_mode:
context.async_wait()
self._force_device_sync()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_train_sync(self):
self._benchmark_eager_train(
'eager_train', MockIterator,
resnet50_test_util.device_and_data_format(), defun=False)
def benchmark_eager_train_async(self):
self._benchmark_eager_train(
'eager_train_async',
MockIterator,
resnet50_test_util.device_and_data_format(),
defun=False,
execution_mode=context.ASYNC)
def benchmark_eager_train_with_defun(self):
self._benchmark_eager_train(
'eager_train_with_defun', MockIterator,
resnet50_test_util.device_and_data_format(), defun=True)
def benchmark_eager_train_datasets(self):
def make_iterator(tensors):
with tf.device('/device:CPU:0'):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return iter(ds)
self._benchmark_eager_train(
'eager_train_dataset',
make_iterator,
resnet50_test_util.device_and_data_format(),
defun=False)
def benchmark_eager_train_datasets_with_defun(self):
def make_iterator(tensors):
with tf.device('/device:CPU:0'):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return iter(ds)
self._benchmark_eager_train(
'eager_train_dataset_with_defun', make_iterator,
resnet50_test_util.device_and_data_format(), defun=True)
if __name__ == '__main__':
tf.compat.v1.enable_eager_execution()
tf.test.main()
| ResNet50Benchmarks |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI045.py | {
"start": 1170,
"end": 1269
} | class ____:
def __iter__(self) -> collections.abc.Iterator:
...
| CollectionsIteratorReturn |
python | pypa__setuptools | setuptools/tests/test_egg_info.py | {
"start": 1108,
"end": 43802
} | class ____:
setup_script = DALS(
"""
from setuptools import setup
setup(
name='foo',
py_modules=['hello'],
entry_points={'console_scripts': ['hi = hello.run']},
zip_safe=False,
)
"""
)
def _create_project(self):
path.build({
'setup.py': self.setup_script,
'hello.py': DALS(
"""
def run():
print('hello')
"""
),
})
@staticmethod
def _extract_mv_version(pkg_info_lines: list[str]) -> tuple[int, int]:
version_str = pkg_info_lines[0].split(' ')[1]
major, minor = map(int, version_str.split('.')[:2])
return major, minor
def test_egg_info_save_version_info_setup_empty(self, tmpdir_cwd, env):
"""
When the egg_info section is empty or not present, running
save_version_info should add the settings to the setup.cfg
in a deterministic order.
"""
setup_cfg = os.path.join(env.paths['home'], 'setup.cfg')
dist = Distribution()
ei = egg_info(dist)
ei.initialize_options()
ei.save_version_info(setup_cfg)
with open(setup_cfg, 'r', encoding="utf-8") as f:
content = f.read()
assert '[egg_info]' in content
assert 'tag_build =' in content
assert 'tag_date = 0' in content
expected_order = (
'tag_build',
'tag_date',
)
self._validate_content_order(content, expected_order)
@staticmethod
def _validate_content_order(content, expected):
"""
Assert that the strings in expected appear in content
in order.
"""
pattern = '.*'.join(expected)
flags = re.MULTILINE | re.DOTALL
assert re.search(pattern, content, flags)
def test_egg_info_save_version_info_setup_defaults(self, tmpdir_cwd, env):
"""
When running save_version_info on an existing setup.cfg
with the 'default' values present from a previous run,
the file should remain unchanged.
"""
setup_cfg = os.path.join(env.paths['home'], 'setup.cfg')
path.build({
setup_cfg: DALS(
"""
[egg_info]
tag_build =
tag_date = 0
"""
),
})
dist = Distribution()
ei = egg_info(dist)
ei.initialize_options()
ei.save_version_info(setup_cfg)
with open(setup_cfg, 'r', encoding="utf-8") as f:
content = f.read()
assert '[egg_info]' in content
assert 'tag_build =' in content
assert 'tag_date = 0' in content
expected_order = (
'tag_build',
'tag_date',
)
self._validate_content_order(content, expected_order)
def test_expected_files_produced(self, tmpdir_cwd, env):
self._create_project()
self._run_egg_info_command(tmpdir_cwd, env)
actual = os.listdir('foo.egg-info')
expected = [
'PKG-INFO',
'SOURCES.txt',
'dependency_links.txt',
'entry_points.txt',
'not-zip-safe',
'top_level.txt',
]
assert sorted(actual) == expected
def test_handling_utime_error(self, tmpdir_cwd, env):
dist = Distribution()
ei = egg_info(dist)
utime_patch = mock.patch('os.utime', side_effect=OSError("TEST"))
mkpath_patch = mock.patch(
'setuptools.command.egg_info.egg_info.mkpath', return_val=None
)
with utime_patch, mkpath_patch:
import distutils.errors
msg = r"Cannot update time stamp of directory 'None'"
with pytest.raises(distutils.errors.DistutilsFileError, match=msg):
ei.run()
def test_license_is_a_string(self, tmpdir_cwd, env):
setup_config = DALS(
"""
[metadata]
name=foo
version=0.0.1
license=file:MIT
"""
)
setup_script = DALS(
"""
from setuptools import setup
setup()
"""
)
path.build({
'setup.py': setup_script,
'setup.cfg': setup_config,
})
# This command should fail with a ValueError, but because it's
# currently configured to use a subprocess, the actual traceback
# object is lost and we need to parse it from stderr
with pytest.raises(AssertionError) as exc:
self._run_egg_info_command(tmpdir_cwd, env)
# The only argument to the assertion error should be a traceback
# containing a ValueError
assert 'ValueError' in exc.value.args[0]
def test_rebuilt(self, tmpdir_cwd, env):
"""Ensure timestamps are updated when the command is re-run."""
self._create_project()
self._run_egg_info_command(tmpdir_cwd, env)
timestamp_a = os.path.getmtime('foo.egg-info')
# arbitrary sleep just to handle *really* fast systems
time.sleep(0.001)
self._run_egg_info_command(tmpdir_cwd, env)
timestamp_b = os.path.getmtime('foo.egg-info')
assert timestamp_a != timestamp_b
def test_manifest_template_is_read(self, tmpdir_cwd, env):
self._create_project()
path.build({
'MANIFEST.in': DALS(
"""
recursive-include docs *.rst
"""
),
'docs': {
'usage.rst': "Run 'hi'",
},
})
self._run_egg_info_command(tmpdir_cwd, env)
egg_info_dir = os.path.join('.', 'foo.egg-info')
sources_txt = os.path.join(egg_info_dir, 'SOURCES.txt')
with open(sources_txt, encoding="utf-8") as f:
assert 'docs/usage.rst' in f.read().split('\n')
def _setup_script_with_requires(self, requires, use_setup_cfg=False):
setup_script = DALS(
"""
from setuptools import setup
setup(name='foo', zip_safe=False, %s)
"""
) % ('' if use_setup_cfg else requires)
setup_config = requires if use_setup_cfg else ''
path.build({
'setup.py': setup_script,
'setup.cfg': setup_config,
})
mismatch_marker = f"python_version<'{sys.version_info[0]}'"
# Alternate equivalent syntax.
mismatch_marker_alternate = f'python_version < "{sys.version_info[0]}"'
invalid_marker = "<=>++"
class RequiresTestHelper:
@staticmethod
def parametrize(*test_list, **format_dict):
idlist = []
argvalues = []
for test in test_list:
test_params = test.lstrip().split('\n\n', 3)
name_kwargs = test_params.pop(0).split('\n')
if len(name_kwargs) > 1:
val = name_kwargs[1].strip()
install_cmd_kwargs = ast.literal_eval(val)
else:
install_cmd_kwargs = {}
name = name_kwargs[0].strip()
setup_py_requires, setup_cfg_requires, expected_requires = [
DALS(a).format(**format_dict) for a in test_params
]
for id_, requires, use_cfg in (
(name, setup_py_requires, False),
(name + '_in_setup_cfg', setup_cfg_requires, True),
):
idlist.append(id_)
marks = ()
if requires.startswith('@xfail\n'):
requires = requires[7:]
marks = pytest.mark.xfail
argvalues.append(
pytest.param(
requires,
use_cfg,
expected_requires,
install_cmd_kwargs,
marks=marks,
)
)
return pytest.mark.parametrize(
(
"requires",
"use_setup_cfg",
"expected_requires",
"install_cmd_kwargs",
),
argvalues,
ids=idlist,
)
@RequiresTestHelper.parametrize(
# Format of a test:
#
# id
# install_cmd_kwargs [optional]
#
# requires block (when used in setup.py)
#
# requires block (when used in setup.cfg)
#
# expected contents of requires.txt
"""
install_requires_deterministic
install_requires=["wheel>=0.5", "pytest"]
[options]
install_requires =
wheel>=0.5
pytest
wheel>=0.5
pytest
""",
"""
install_requires_ordered
install_requires=["pytest>=3.0.2,!=10.9999"]
[options]
install_requires =
pytest>=3.0.2,!=10.9999
pytest!=10.9999,>=3.0.2
""",
"""
install_requires_with_marker
install_requires=["barbazquux;{mismatch_marker}"],
[options]
install_requires =
barbazquux; {mismatch_marker}
[:{mismatch_marker_alternate}]
barbazquux
""",
"""
install_requires_with_extra
{'cmd': ['egg_info']}
install_requires=["barbazquux [test]"],
[options]
install_requires =
barbazquux [test]
barbazquux[test]
""",
"""
install_requires_with_extra_and_marker
install_requires=["barbazquux [test]; {mismatch_marker}"],
[options]
install_requires =
barbazquux [test]; {mismatch_marker}
[:{mismatch_marker_alternate}]
barbazquux[test]
""",
"""
setup_requires_with_markers
setup_requires=["barbazquux;{mismatch_marker}"],
[options]
setup_requires =
barbazquux; {mismatch_marker}
""",
"""
extras_require_with_extra
{'cmd': ['egg_info']}
extras_require={{"extra": ["barbazquux [test]"]}},
[options.extras_require]
extra = barbazquux [test]
[extra]
barbazquux[test]
""",
"""
extras_require_with_extra_and_marker_in_req
extras_require={{"extra": ["barbazquux [test]; {mismatch_marker}"]}},
[options.extras_require]
extra =
barbazquux [test]; {mismatch_marker}
[extra]
[extra:{mismatch_marker_alternate}]
barbazquux[test]
""",
# FIXME: ConfigParser does not allow : in key names!
"""
extras_require_with_marker
extras_require={{":{mismatch_marker}": ["barbazquux"]}},
@xfail
[options.extras_require]
:{mismatch_marker} = barbazquux
[:{mismatch_marker}]
barbazquux
""",
"""
extras_require_with_marker_in_req
extras_require={{"extra": ["barbazquux; {mismatch_marker}"]}},
[options.extras_require]
extra =
barbazquux; {mismatch_marker}
[extra]
[extra:{mismatch_marker_alternate}]
barbazquux
""",
"""
extras_require_with_empty_section
extras_require={{"empty": []}},
[options.extras_require]
empty =
[empty]
""",
# Format arguments.
invalid_marker=invalid_marker,
mismatch_marker=mismatch_marker,
mismatch_marker_alternate=mismatch_marker_alternate,
)
def test_requires(
self,
tmpdir_cwd,
env,
requires,
use_setup_cfg,
expected_requires,
install_cmd_kwargs,
):
self._setup_script_with_requires(requires, use_setup_cfg)
self._run_egg_info_command(tmpdir_cwd, env, **install_cmd_kwargs)
egg_info_dir = os.path.join('.', 'foo.egg-info')
requires_txt = os.path.join(egg_info_dir, 'requires.txt')
if os.path.exists(requires_txt):
with open(requires_txt, encoding="utf-8") as fp:
install_requires = fp.read()
else:
install_requires = ''
assert install_requires.lstrip() == expected_requires
assert glob.glob(os.path.join(env.paths['lib'], 'barbazquux*')) == []
def test_install_requires_unordered_disallowed(self, tmpdir_cwd, env):
"""
Packages that pass unordered install_requires sequences
should be rejected as they produce non-deterministic
builds. See #458.
"""
req = 'install_requires={"fake-factory==0.5.2", "pytz"}'
self._setup_script_with_requires(req)
with pytest.raises(AssertionError):
self._run_egg_info_command(tmpdir_cwd, env)
def test_extras_require_with_invalid_marker(self, tmpdir_cwd, env):
tmpl = 'extras_require={{":{marker}": ["barbazquux"]}},'
req = tmpl.format(marker=self.invalid_marker)
self._setup_script_with_requires(req)
with pytest.raises(AssertionError):
self._run_egg_info_command(tmpdir_cwd, env)
assert glob.glob(os.path.join(env.paths['lib'], 'barbazquux*')) == []
def test_extras_require_with_invalid_marker_in_req(self, tmpdir_cwd, env):
tmpl = 'extras_require={{"extra": ["barbazquux; {marker}"]}},'
req = tmpl.format(marker=self.invalid_marker)
self._setup_script_with_requires(req)
with pytest.raises(AssertionError):
self._run_egg_info_command(tmpdir_cwd, env)
assert glob.glob(os.path.join(env.paths['lib'], 'barbazquux*')) == []
def test_provides_extra(self, tmpdir_cwd, env):
self._setup_script_with_requires('extras_require={"foobar": ["barbazquux"]},')
environ = os.environ.copy().update(
HOME=env.paths['home'],
)
environment.run_setup_py(
cmd=['egg_info'],
pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
data_stream=1,
env=environ,
)
egg_info_dir = os.path.join('.', 'foo.egg-info')
with open(os.path.join(egg_info_dir, 'PKG-INFO'), encoding="utf-8") as fp:
pkg_info_lines = fp.read().split('\n')
assert 'Provides-Extra: foobar' in pkg_info_lines
assert 'Metadata-Version: 2.4' in pkg_info_lines
def test_doesnt_provides_extra(self, tmpdir_cwd, env):
self._setup_script_with_requires(
"""install_requires=["spam ; python_version<'3.6'"]"""
)
environ = os.environ.copy().update(
HOME=env.paths['home'],
)
environment.run_setup_py(
cmd=['egg_info'],
pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
data_stream=1,
env=environ,
)
egg_info_dir = os.path.join('.', 'foo.egg-info')
with open(os.path.join(egg_info_dir, 'PKG-INFO'), encoding="utf-8") as fp:
pkg_info_text = fp.read()
assert 'Provides-Extra:' not in pkg_info_text
@pytest.mark.parametrize(
('files', 'license_in_sources'),
[
(
{
'setup.cfg': DALS(
"""
[metadata]
license_file = LICENSE
"""
),
'LICENSE': "Test license",
},
True,
), # with license
(
{
'setup.cfg': DALS(
"""
[metadata]
license_file = INVALID_LICENSE
"""
),
'LICENSE': "Test license",
},
False,
), # with an invalid license
(
{
'setup.cfg': DALS(
"""
"""
),
'LICENSE': "Test license",
},
True,
), # no license_file attribute, LICENSE auto-included
(
{
'setup.cfg': DALS(
"""
[metadata]
license_file = LICENSE
"""
),
'MANIFEST.in': "exclude LICENSE",
'LICENSE': "Test license",
},
True,
), # manifest is overwritten by license_file
pytest.param(
{
'setup.cfg': DALS(
"""
[metadata]
license_file = LICEN[CS]E*
"""
),
'LICENSE': "Test license",
},
True,
id="glob_pattern",
),
],
)
def test_setup_cfg_license_file(self, tmpdir_cwd, env, files, license_in_sources):
self._create_project()
path.build(files)
environment.run_setup_py(
cmd=['egg_info'],
pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
)
egg_info_dir = os.path.join('.', 'foo.egg-info')
sources_text = Path(egg_info_dir, "SOURCES.txt").read_text(encoding="utf-8")
if license_in_sources:
assert 'LICENSE' in sources_text
else:
assert 'LICENSE' not in sources_text
# for invalid license test
assert 'INVALID_LICENSE' not in sources_text
@pytest.mark.parametrize(
('files', 'incl_licenses', 'excl_licenses'),
[
(
{
'setup.cfg': DALS(
"""
[metadata]
license_files =
LICENSE-ABC
LICENSE-XYZ
"""
),
'LICENSE-ABC': "ABC license",
'LICENSE-XYZ': "XYZ license",
},
['LICENSE-ABC', 'LICENSE-XYZ'],
[],
), # with licenses
(
{
'setup.cfg': DALS(
"""
[metadata]
license_files = LICENSE-ABC, LICENSE-XYZ
"""
),
'LICENSE-ABC': "ABC license",
'LICENSE-XYZ': "XYZ license",
},
['LICENSE-ABC', 'LICENSE-XYZ'],
[],
), # with commas
(
{
'setup.cfg': DALS(
"""
[metadata]
license_files =
LICENSE-ABC
"""
),
'LICENSE-ABC': "ABC license",
'LICENSE-XYZ': "XYZ license",
},
['LICENSE-ABC'],
['LICENSE-XYZ'],
), # with one license
(
{
'setup.cfg': DALS(
"""
[metadata]
license_files =
"""
),
'LICENSE-ABC': "ABC license",
'LICENSE-XYZ': "XYZ license",
},
[],
['LICENSE-ABC', 'LICENSE-XYZ'],
), # empty
(
{
'setup.cfg': DALS(
"""
[metadata]
license_files = LICENSE-XYZ
"""
),
'LICENSE-ABC': "ABC license",
'LICENSE-XYZ': "XYZ license",
},
['LICENSE-XYZ'],
['LICENSE-ABC'],
), # on same line
(
{
'setup.cfg': DALS(
"""
[metadata]
license_files =
LICENSE-ABC
INVALID_LICENSE
"""
),
'LICENSE-ABC': "Test license",
},
['LICENSE-ABC'],
['INVALID_LICENSE'],
), # with an invalid license
(
{
'setup.cfg': DALS(
"""
"""
),
'LICENSE': "Test license",
},
['LICENSE'],
[],
), # no license_files attribute, LICENSE auto-included
(
{
'setup.cfg': DALS(
"""
[metadata]
license_files = LICENSE
"""
),
'MANIFEST.in': "exclude LICENSE",
'LICENSE': "Test license",
},
['LICENSE'],
[],
), # manifest is overwritten by license_files
(
{
'setup.cfg': DALS(
"""
[metadata]
license_files =
LICENSE-ABC
LICENSE-XYZ
"""
),
'MANIFEST.in': "exclude LICENSE-XYZ",
'LICENSE-ABC': "ABC license",
'LICENSE-XYZ': "XYZ license",
# manifest is overwritten by license_files
},
['LICENSE-ABC', 'LICENSE-XYZ'],
[],
),
pytest.param(
{
'setup.cfg': "",
'LICENSE-ABC': "ABC license",
'COPYING-ABC': "ABC copying",
'NOTICE-ABC': "ABC notice",
'AUTHORS-ABC': "ABC authors",
'LICENCE-XYZ': "XYZ license",
'LICENSE': "License",
'INVALID-LICENSE': "Invalid license",
},
[
'LICENSE-ABC',
'COPYING-ABC',
'NOTICE-ABC',
'AUTHORS-ABC',
'LICENCE-XYZ',
'LICENSE',
],
['INVALID-LICENSE'],
# ('LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*')
id="default_glob_patterns",
),
pytest.param(
{
'setup.cfg': DALS(
"""
[metadata]
license_files =
LICENSE*
"""
),
'LICENSE-ABC': "ABC license",
'NOTICE-XYZ': "XYZ notice",
},
['LICENSE-ABC'],
['NOTICE-XYZ'],
id="no_default_glob_patterns",
),
pytest.param(
{
'setup.cfg': DALS(
"""
[metadata]
license_files =
LICENSE-ABC
LICENSE*
"""
),
'LICENSE-ABC': "ABC license",
},
['LICENSE-ABC'],
[],
id="files_only_added_once",
),
pytest.param(
{
'setup.cfg': DALS(
"""
[metadata]
license_files = **/LICENSE
"""
),
'LICENSE': "ABC license",
'LICENSE-OTHER': "Don't include",
'vendor': {'LICENSE': "Vendor license"},
},
['LICENSE', 'vendor/LICENSE'],
['LICENSE-OTHER'],
id="recursive_glob",
),
],
)
def test_setup_cfg_license_files(
self, tmpdir_cwd, env, files, incl_licenses, excl_licenses
):
self._create_project()
path.build(files)
environment.run_setup_py(
cmd=['egg_info'],
pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
)
egg_info_dir = os.path.join('.', 'foo.egg-info')
sources_text = Path(egg_info_dir, "SOURCES.txt").read_text(encoding="utf-8")
sources_lines = [line.strip() for line in sources_text.splitlines()]
for lf in incl_licenses:
assert sources_lines.count(lf) == 1
for lf in excl_licenses:
assert sources_lines.count(lf) == 0
@pytest.mark.parametrize(
('files', 'incl_licenses', 'excl_licenses'),
[
(
{
'setup.cfg': DALS(
"""
[metadata]
license_file =
license_files =
"""
),
'LICENSE-ABC': "ABC license",
'LICENSE-XYZ': "XYZ license",
},
[],
['LICENSE-ABC', 'LICENSE-XYZ'],
), # both empty
(
{
'setup.cfg': DALS(
"""
[metadata]
license_file =
LICENSE-ABC
LICENSE-XYZ
"""
),
'LICENSE-ABC': "ABC license",
'LICENSE-XYZ': "XYZ license",
# license_file is still singular
},
[],
['LICENSE-ABC', 'LICENSE-XYZ'],
),
(
{
'setup.cfg': DALS(
"""
[metadata]
license_file = LICENSE-ABC
license_files =
LICENSE-XYZ
LICENSE-PQR
"""
),
'LICENSE-ABC': "ABC license",
'LICENSE-PQR': "PQR license",
'LICENSE-XYZ': "XYZ license",
},
['LICENSE-ABC', 'LICENSE-PQR', 'LICENSE-XYZ'],
[],
), # combined
(
{
'setup.cfg': DALS(
"""
[metadata]
license_file = LICENSE-ABC
license_files =
LICENSE-ABC
LICENSE-XYZ
LICENSE-PQR
"""
),
'LICENSE-ABC': "ABC license",
'LICENSE-PQR': "PQR license",
'LICENSE-XYZ': "XYZ license",
# duplicate license
},
['LICENSE-ABC', 'LICENSE-PQR', 'LICENSE-XYZ'],
[],
),
(
{
'setup.cfg': DALS(
"""
[metadata]
license_file = LICENSE-ABC
license_files =
LICENSE-XYZ
"""
),
'LICENSE-ABC': "ABC license",
'LICENSE-PQR': "PQR license",
'LICENSE-XYZ': "XYZ license",
# combined subset
},
['LICENSE-ABC', 'LICENSE-XYZ'],
['LICENSE-PQR'],
),
(
{
'setup.cfg': DALS(
"""
[metadata]
license_file = LICENSE-ABC
license_files =
LICENSE-XYZ
LICENSE-PQR
"""
),
'LICENSE-PQR': "Test license",
# with invalid licenses
},
['LICENSE-PQR'],
['LICENSE-ABC', 'LICENSE-XYZ'],
),
(
{
'setup.cfg': DALS(
"""
[metadata]
license_file = LICENSE-ABC
license_files =
LICENSE-PQR
LICENSE-XYZ
"""
),
'MANIFEST.in': "exclude LICENSE-ABC\nexclude LICENSE-PQR",
'LICENSE-ABC': "ABC license",
'LICENSE-PQR': "PQR license",
'LICENSE-XYZ': "XYZ license",
# manifest is overwritten
},
['LICENSE-ABC', 'LICENSE-PQR', 'LICENSE-XYZ'],
[],
),
pytest.param(
{
'setup.cfg': DALS(
"""
[metadata]
license_file = LICENSE*
"""
),
'LICENSE-ABC': "ABC license",
'NOTICE-XYZ': "XYZ notice",
},
['LICENSE-ABC'],
['NOTICE-XYZ'],
id="no_default_glob_patterns",
),
pytest.param(
{
'setup.cfg': DALS(
"""
[metadata]
license_file = LICENSE*
license_files =
NOTICE*
"""
),
'LICENSE-ABC': "ABC license",
'NOTICE-ABC': "ABC notice",
'AUTHORS-ABC': "ABC authors",
},
['LICENSE-ABC', 'NOTICE-ABC'],
['AUTHORS-ABC'],
id="combined_glob_patterrns",
),
],
)
def test_setup_cfg_license_file_license_files(
self, tmpdir_cwd, env, files, incl_licenses, excl_licenses
):
self._create_project()
path.build(files)
environment.run_setup_py(
cmd=['egg_info'],
pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
)
egg_info_dir = os.path.join('.', 'foo.egg-info')
sources_text = Path(egg_info_dir, "SOURCES.txt").read_text(encoding="utf-8")
sources_lines = [line.strip() for line in sources_text.splitlines()]
for lf in incl_licenses:
assert sources_lines.count(lf) == 1
for lf in excl_licenses:
assert sources_lines.count(lf) == 0
def test_license_file_attr_pkg_info(self, tmpdir_cwd, env):
"""All matched license files should have a corresponding License-File."""
self._create_project()
path.build({
"setup.cfg": DALS(
"""
[metadata]
license_files =
NOTICE*
LICENSE*
**/LICENSE
"""
),
"LICENSE-ABC": "ABC license",
"LICENSE-XYZ": "XYZ license",
"NOTICE": "included",
"IGNORE": "not include",
"vendor": {'LICENSE': "Vendor license"},
})
environment.run_setup_py(
cmd=['egg_info'],
pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
)
egg_info_dir = os.path.join('.', 'foo.egg-info')
with open(os.path.join(egg_info_dir, 'PKG-INFO'), encoding="utf-8") as fp:
pkg_info_lines = fp.read().split('\n')
license_file_lines = [
line for line in pkg_info_lines if line.startswith('License-File:')
]
# Only 'NOTICE', LICENSE-ABC', and 'LICENSE-XYZ' should have been matched
# Also assert that order from license_files is keeped
assert len(license_file_lines) == 4
assert "License-File: NOTICE" == license_file_lines[0]
assert "License-File: LICENSE-ABC" in license_file_lines[1:]
assert "License-File: LICENSE-XYZ" in license_file_lines[1:]
assert "License-File: vendor/LICENSE" in license_file_lines[3]
def test_metadata_version(self, tmpdir_cwd, env):
"""Make sure latest metadata version is used by default."""
self._setup_script_with_requires("")
environment.run_setup_py(
cmd=['egg_info'],
pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
data_stream=1,
)
egg_info_dir = os.path.join('.', 'foo.egg-info')
with open(os.path.join(egg_info_dir, 'PKG-INFO'), encoding="utf-8") as fp:
pkg_info_lines = fp.read().split('\n')
# Update metadata version if changed
assert self._extract_mv_version(pkg_info_lines) == (2, 4)
def test_long_description_content_type(self, tmpdir_cwd, env):
# Test that specifying a `long_description_content_type` keyword arg to
# the `setup` function results in writing a `Description-Content-Type`
# line to the `PKG-INFO` file in the `<distribution>.egg-info`
# directory.
# `Description-Content-Type` is described at
# https://github.com/pypa/python-packaging-user-guide/pull/258
self._setup_script_with_requires(
"""long_description_content_type='text/markdown',"""
)
environ = os.environ.copy().update(
HOME=env.paths['home'],
)
environment.run_setup_py(
cmd=['egg_info'],
pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
data_stream=1,
env=environ,
)
egg_info_dir = os.path.join('.', 'foo.egg-info')
with open(os.path.join(egg_info_dir, 'PKG-INFO'), encoding="utf-8") as fp:
pkg_info_lines = fp.read().split('\n')
expected_line = 'Description-Content-Type: text/markdown'
assert expected_line in pkg_info_lines
assert 'Metadata-Version: 2.4' in pkg_info_lines
def test_long_description(self, tmpdir_cwd, env):
# Test that specifying `long_description` and `long_description_content_type`
# keyword args to the `setup` function results in writing
# the description in the message payload of the `PKG-INFO` file
# in the `<distribution>.egg-info` directory.
self._setup_script_with_requires(
"long_description='This is a long description\\nover multiple lines',"
"long_description_content_type='text/markdown',"
)
environment.run_setup_py(
cmd=['egg_info'],
pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
data_stream=1,
)
egg_info_dir = os.path.join('.', 'foo.egg-info')
with open(os.path.join(egg_info_dir, 'PKG-INFO'), encoding="utf-8") as fp:
pkg_info_lines = fp.read().split('\n')
assert 'Metadata-Version: 2.4' in pkg_info_lines
assert '' == pkg_info_lines[-1] # last line should be empty
long_desc_lines = pkg_info_lines[pkg_info_lines.index('') :]
assert 'This is a long description' in long_desc_lines
assert 'over multiple lines' in long_desc_lines
def test_project_urls(self, tmpdir_cwd, env):
# Test that specifying a `project_urls` dict to the `setup`
# function results in writing multiple `Project-URL` lines to
# the `PKG-INFO` file in the `<distribution>.egg-info`
# directory.
# `Project-URL` is described at https://packaging.python.org
# /specifications/core-metadata/#project-url-multiple-use
self._setup_script_with_requires(
"""project_urls={
'Link One': 'https://example.com/one/',
'Link Two': 'https://example.com/two/',
},"""
)
environ = os.environ.copy().update(
HOME=env.paths['home'],
)
environment.run_setup_py(
cmd=['egg_info'],
pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
data_stream=1,
env=environ,
)
egg_info_dir = os.path.join('.', 'foo.egg-info')
with open(os.path.join(egg_info_dir, 'PKG-INFO'), encoding="utf-8") as fp:
pkg_info_lines = fp.read().split('\n')
expected_line = 'Project-URL: Link One, https://example.com/one/'
assert expected_line in pkg_info_lines
expected_line = 'Project-URL: Link Two, https://example.com/two/'
assert expected_line in pkg_info_lines
assert self._extract_mv_version(pkg_info_lines) >= (1, 2)
def test_license(self, tmpdir_cwd, env):
"""Test single line license."""
self._setup_script_with_requires("license='MIT',")
environment.run_setup_py(
cmd=['egg_info'],
pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
data_stream=1,
)
egg_info_dir = os.path.join('.', 'foo.egg-info')
with open(os.path.join(egg_info_dir, 'PKG-INFO'), encoding="utf-8") as fp:
pkg_info_lines = fp.read().split('\n')
assert 'License: MIT' in pkg_info_lines
def test_license_escape(self, tmpdir_cwd, env):
"""Test license is escaped correctly if longer than one line."""
self._setup_script_with_requires(
"license='This is a long license text \\nover multiple lines',"
)
environment.run_setup_py(
cmd=['egg_info'],
pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
data_stream=1,
)
egg_info_dir = os.path.join('.', 'foo.egg-info')
with open(os.path.join(egg_info_dir, 'PKG-INFO'), encoding="utf-8") as fp:
pkg_info_lines = fp.read().split('\n')
assert 'License: This is a long license text ' in pkg_info_lines
assert ' over multiple lines' in pkg_info_lines
assert 'text \n over multiple' in '\n'.join(pkg_info_lines)
def test_python_requires_egg_info(self, tmpdir_cwd, env):
self._setup_script_with_requires("""python_requires='>=2.7.12',""")
environ = os.environ.copy().update(
HOME=env.paths['home'],
)
environment.run_setup_py(
cmd=['egg_info'],
pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
data_stream=1,
env=environ,
)
egg_info_dir = os.path.join('.', 'foo.egg-info')
with open(os.path.join(egg_info_dir, 'PKG-INFO'), encoding="utf-8") as fp:
pkg_info_lines = fp.read().split('\n')
assert 'Requires-Python: >=2.7.12' in pkg_info_lines
assert self._extract_mv_version(pkg_info_lines) >= (1, 2)
def test_manifest_maker_warning_suppression(self):
fixtures = [
"standard file not found: should have one of foo.py, bar.py",
"standard file 'setup.py' not found",
]
for msg in fixtures:
assert manifest_maker._should_suppress_warning(msg)
def test_egg_info_includes_setup_py(self, tmpdir_cwd):
self._create_project()
dist = Distribution({"name": "foo", "version": "0.0.1"})
dist.script_name = "non_setup.py"
egg_info_instance = egg_info(dist)
egg_info_instance.finalize_options()
egg_info_instance.run()
assert 'setup.py' in egg_info_instance.filelist.files
with open(egg_info_instance.egg_info + "/SOURCES.txt", encoding="utf-8") as f:
sources = f.read().split('\n')
assert 'setup.py' in sources
def _run_egg_info_command(self, tmpdir_cwd, env, cmd=None, output=None):
environ = os.environ.copy().update(
HOME=env.paths['home'],
)
if cmd is None:
cmd = [
'egg_info',
]
code, data = environment.run_setup_py(
cmd=cmd,
pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
data_stream=1,
env=environ,
)
assert not code, data
if output:
assert output in data
def test_egg_info_tag_only_once(self, tmpdir_cwd, env):
self._create_project()
path.build({
'setup.cfg': DALS(
"""
[egg_info]
tag_build = dev
tag_date = 0
tag_svn_revision = 0
"""
),
})
self._run_egg_info_command(tmpdir_cwd, env)
egg_info_dir = os.path.join('.', 'foo.egg-info')
with open(os.path.join(egg_info_dir, 'PKG-INFO'), encoding="utf-8") as fp:
pkg_info_lines = fp.read().split('\n')
assert 'Version: 0.0.0.dev0' in pkg_info_lines
| TestEggInfo |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_sagemaker_model.py | {
"start": 3175,
"end": 7223
} | class ____:
@patch.object(SageMakerHook, "create_model_package_group")
@patch("airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.conn", new_callable=mock.PropertyMock)
def test_execute(self, conn_mock, create_group_mock):
image = "257758044811.dkr.ecr.us-east-2.amazonaws.com/sagemaker-xgboost:1.2-1"
model = "s3://your-bucket-name/model.tar.gz"
group = "group-name"
op = SageMakerRegisterModelVersionOperator(
task_id="test",
image_uri=image,
model_url=model,
package_group_name=group,
model_approval=ApprovalStatus.APPROVED,
)
op.execute(None)
create_group_mock.assert_called_once_with("group-name", "")
conn_mock().create_model_package.assert_called_once()
args_dict = conn_mock().create_model_package.call_args.kwargs
assert args_dict["InferenceSpecification"]["Containers"][0]["Image"] == image
assert args_dict["InferenceSpecification"]["Containers"][0]["ModelDataUrl"] == model
assert args_dict["ModelPackageGroupName"] == group
assert args_dict["ModelApprovalStatus"] == "Approved"
@pytest.mark.parametrize("group_created", [True, False])
@patch.object(SageMakerHook, "create_model_package_group")
@patch("airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.conn", new_callable=mock.PropertyMock)
def test_group_deleted_if_error_when_adding_model(self, conn_mock, create_group_mock, group_created):
group = "group-name"
op = SageMakerRegisterModelVersionOperator(
task_id="test",
image_uri="257758044811.dkr.ecr.us-east-2.amazonaws.com/sagemaker-xgboost:1.2-1",
model_url="s3://your-bucket-name/model.tar.gz",
package_group_name=group,
model_approval=ApprovalStatus.APPROVED,
)
create_group_mock.return_value = group_created
conn_mock().create_model_package.side_effect = ClientError(
error_response={"Error": {"Code": "ohno"}}, operation_name="empty"
)
with pytest.raises(ClientError):
op.execute(None)
if group_created:
# delete group if it was created and there was an error in the second step (create model package)
conn_mock().delete_model_package_group.assert_called_once_with(ModelPackageGroupName=group)
else:
# if the group already existed, we don't want to delete it in case of error on second step
conn_mock().delete_model_package_group.assert_not_called()
@patch.object(SageMakerHook, "create_model_package_group")
@patch("airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.conn", new_callable=mock.PropertyMock)
def test_can_override_parameters_using_extras(self, conn_mock, _):
response_type = ["test/test"]
op = SageMakerRegisterModelVersionOperator(
task_id="test",
image_uri="257758044811.dkr.ecr.us-east-2.amazonaws.com/sagemaker-xgboost:1.2-1",
model_url="s3://your-bucket-name/model.tar.gz",
package_group_name="group-name",
extras={"InferenceSpecification": {"SupportedResponseMIMETypes": response_type}},
)
op.execute(None)
conn_mock().create_model_package.assert_called_once()
args_dict = conn_mock().create_model_package.call_args.kwargs
assert args_dict["InferenceSpecification"]["SupportedResponseMIMETypes"] == response_type
def test_template_fields(self):
response_type = ["test/test"]
op = SageMakerRegisterModelVersionOperator(
task_id="test",
image_uri="257758044811.dkr.ecr.us-east-2.amazonaws.com/sagemaker-xgboost:1.2-1",
model_url="s3://your-bucket-name/model.tar.gz",
package_group_name="group-name",
extras={"InferenceSpecification": {"SupportedResponseMIMETypes": response_type}},
)
validate_template_fields(op)
| TestSageMakerRegisterModelVersionOperator |
python | huggingface__transformers | src/transformers/models/aimv2/modular_aimv2.py | {
"start": 24631,
"end": 28128
} | class ____(CLIPModel):
_supports_flash_attn = True
def __init__(self, config: Aimv2Config):
PreTrainedModel.__init__(self, config)
self.projection_dim = config.projection_dim
self.vision_embed_dim = config.vision_config.hidden_size
self.text_embed_dim = config.text_config.hidden_size
self.vision_model = Aimv2VisionModel._from_config(config.vision_config)
self.text_model = Aimv2TextModel._from_config(config.text_config)
self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
self.max_log_logit_scale = math.log(config.max_logit_scale)
self.post_init()
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Aimv2Output:
r"""
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Aimv2Model
>>> model = Aimv2Model.from_pretrained("apple/aimv2-large-patch14-224-lit")
>>> processor = AutoProcessor.from_pretrained("apple/aimv2-large-patch14-224-lit")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(
... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
... )
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```"""
vision_outputs: BaseModelOutputWithPooling = self.vision_model(
pixel_values=pixel_values,
**kwargs,
)
text_outputs: BaseModelOutputWithPooling = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
**kwargs,
)
image_embeds = vision_outputs.pooler_output
image_embeds = self.visual_projection(image_embeds)
text_embeds = text_outputs.pooler_output
text_embeds = self.text_projection(text_embeds)
# normalized features
image_embeds = image_embeds / _get_vector_norm(image_embeds)
text_embeds = text_embeds / _get_vector_norm(text_embeds)
logit_scale = self.logit_scale.clamp(0.0, self.max_log_logit_scale).exp().to(text_embeds.device)
logits_per_text = (logit_scale * text_embeds) @ image_embeds.t()
logits_per_image = logits_per_text.t()
return Aimv2Output(
logits_per_image=logits_per_image,
logits_per_text=logits_per_text,
text_embeds=text_embeds,
image_embeds=image_embeds,
text_model_output=text_outputs,
vision_model_output=vision_outputs,
)
__all__ = [
"Aimv2Config",
"Aimv2VisionConfig",
"Aimv2TextConfig",
"Aimv2VisionModel",
"Aimv2Model",
"Aimv2PreTrainedModel",
"Aimv2TextModel",
]
| Aimv2Model |
python | doocs__leetcode | solution/2300-2399/2384.Largest Palindromic Number/Solution.py | {
"start": 0,
"end": 475
} | class ____:
def largestPalindromic(self, num: str) -> str:
cnt = Counter(num)
ans = ''
for i in range(9, -1, -1):
v = str(i)
if cnt[v] % 2:
ans = v
cnt[v] -= 1
break
for i in range(10):
v = str(i)
if cnt[v]:
cnt[v] //= 2
s = cnt[v] * v
ans = s + ans + s
return ans.strip('0') or '0'
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 165142,
"end": 167144
} | class ____(Request):
"""
Get histogram data of all the scalar metrics and variants in the task
:param task: Task ID
:type task: str
:param metric:
:type metric: str
:param variant:
:type variant: str
"""
_service = "events"
_action = "vector_metrics_iter_histogram"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"metric": {"description": "", "type": "string"},
"task": {"description": "Task ID", "type": "string"},
"variant": {"description": "", "type": "string"},
},
"required": ["task", "metric", "variant"],
"type": "object",
}
def __init__(self, task: str, metric: str, variant: str, **kwargs: Any) -> None:
super(VectorMetricsIterHistogramRequest, self).__init__(**kwargs)
self.task = task
self.metric = metric
self.variant = variant
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("metric")
def metric(self) -> str:
return self._property_metric
@metric.setter
def metric(self, value: str) -> None:
if value is None:
self._property_metric = None
return
self.assert_isinstance(value, "metric", six.string_types)
self._property_metric = value
@schema_property("variant")
def variant(self) -> str:
return self._property_variant
@variant.setter
def variant(self, value: str) -> None:
if value is None:
self._property_variant = None
return
self.assert_isinstance(value, "variant", six.string_types)
self._property_variant = value
| VectorMetricsIterHistogramRequest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/mapper.py | {
"start": 156646,
"end": 169199
} | class ____(Exception):
pass
def configure_mappers() -> None:
"""Initialize the inter-mapper relationships of all mappers that
have been constructed thus far across all :class:`_orm.registry`
collections.
The configure step is used to reconcile and initialize the
:func:`_orm.relationship` linkages between mapped classes, as well as to
invoke configuration events such as the
:meth:`_orm.MapperEvents.before_configured` and
:meth:`_orm.MapperEvents.after_configured`, which may be used by ORM
extensions or user-defined extension hooks.
Mapper configuration is normally invoked automatically, the first time
mappings from a particular :class:`_orm.registry` are used, as well as
whenever mappings are used and additional not-yet-configured mappers have
been constructed. The automatic configuration process however is local only
to the :class:`_orm.registry` involving the target mapper and any related
:class:`_orm.registry` objects which it may depend on; this is
equivalent to invoking the :meth:`_orm.registry.configure` method
on a particular :class:`_orm.registry`.
By contrast, the :func:`_orm.configure_mappers` function will invoke the
configuration process on all :class:`_orm.registry` objects that
exist in memory, and may be useful for scenarios where many individual
:class:`_orm.registry` objects that are nonetheless interrelated are
in use.
.. versionchanged:: 1.4
As of SQLAlchemy 1.4.0b2, this function works on a
per-:class:`_orm.registry` basis, locating all :class:`_orm.registry`
objects present and invoking the :meth:`_orm.registry.configure` method
on each. The :meth:`_orm.registry.configure` method may be preferred to
limit the configuration of mappers to those local to a particular
:class:`_orm.registry` and/or declarative base class.
Points at which automatic configuration is invoked include when a mapped
class is instantiated into an instance, as well as when ORM queries
are emitted using :meth:`.Session.query` or :meth:`_orm.Session.execute`
with an ORM-enabled statement.
The mapper configure process, whether invoked by
:func:`_orm.configure_mappers` or from :meth:`_orm.registry.configure`,
provides several event hooks that can be used to augment the mapper
configuration step. These hooks include:
* :meth:`.MapperEvents.before_configured` - called once before
:func:`.configure_mappers` or :meth:`_orm.registry.configure` does any
work; this can be used to establish additional options, properties, or
related mappings before the operation proceeds.
* :meth:`.RegistryEvents.before_configured` - Like
:meth:`.MapperEvents.before_configured`, but local to a specific
:class:`_orm.registry`.
.. versionadded:: 2.1 - added :meth:`.RegistryEvents.before_configured`
* :meth:`.MapperEvents.mapper_configured` - called as each individual
:class:`_orm.Mapper` is configured within the process; will include all
mapper state except for backrefs set up by other mappers that are still
to be configured.
* :meth:`.MapperEvents.after_configured` - called once after
:func:`.configure_mappers` or :meth:`_orm.registry.configure` is
complete; at this stage, all :class:`_orm.Mapper` objects that fall
within the scope of the configuration operation will be fully configured.
Note that the calling application may still have other mappings that
haven't been produced yet, such as if they are in modules as yet
unimported, and may also have mappings that are still to be configured,
if they are in other :class:`_orm.registry` collections not part of the
current scope of configuration.
* :meth:`.RegistryEvents.after_configured` - Like
:meth:`.MapperEvents.after_configured`, but local to a specific
:class:`_orm.registry`.
.. versionadded:: 2.1 - added :meth:`.RegistryEvents.after_configured`
"""
_configure_registries(_all_registries(), cascade=True)
def _configure_registries(
registries: Set[_RegistryType], cascade: bool
) -> None:
for reg in registries:
if reg._new_mappers:
break
else:
return
with _CONFIGURE_MUTEX:
global _already_compiling
if _already_compiling:
return
_already_compiling = True
try:
# double-check inside mutex
for reg in registries:
if reg._new_mappers:
break
else:
return
Mapper.dispatch._for_class(Mapper).before_configured() # type: ignore # noqa: E501
# initialize properties on all mappers
# note that _mapper_registry is unordered, which
# may randomly conceal/reveal issues related to
# the order of mapper compilation
registries_configured = list(
_do_configure_registries(registries, cascade)
)
finally:
_already_compiling = False
for reg in registries_configured:
reg.dispatch.after_configured(reg)
Mapper.dispatch._for_class(Mapper).after_configured() # type: ignore
@util.preload_module("sqlalchemy.orm.decl_api")
def _do_configure_registries(
registries: Set[_RegistryType], cascade: bool
) -> Iterator[registry]:
registry = util.preloaded.orm_decl_api.registry
orig = set(registries)
for reg in registry._recurse_with_dependencies(registries):
if reg._new_mappers:
reg.dispatch.before_configured(reg)
has_skip = False
for mapper in reg._mappers_to_configure():
run_configure = None
for fn in mapper.dispatch.before_mapper_configured:
run_configure = fn(mapper, mapper.class_)
if run_configure is EXT_SKIP:
has_skip = True
break
if run_configure is EXT_SKIP:
continue
if getattr(mapper, "_configure_failed", False):
e = sa_exc.InvalidRequestError(
"One or more mappers failed to initialize - "
"can't proceed with initialization of other "
"mappers. Triggering mapper: '%s'. "
"Original exception was: %s"
% (mapper, mapper._configure_failed)
)
e._configure_failed = mapper._configure_failed # type: ignore
raise e
if not mapper.configured:
try:
mapper._post_configure_properties()
mapper._expire_memoizations()
mapper.dispatch.mapper_configured(mapper, mapper.class_)
except Exception:
exc = sys.exc_info()[1]
if not hasattr(exc, "_configure_failed"):
mapper._configure_failed = exc
raise
if reg._new_mappers:
yield reg
if not has_skip:
reg._new_mappers = False
if not cascade and reg._dependencies.difference(orig):
raise sa_exc.InvalidRequestError(
"configure was called with cascade=False but "
"additional registries remain"
)
@util.preload_module("sqlalchemy.orm.decl_api")
def _dispose_registries(registries: Set[_RegistryType], cascade: bool) -> None:
registry = util.preloaded.orm_decl_api.registry
orig = set(registries)
for reg in registry._recurse_with_dependents(registries):
if not cascade and reg._dependents.difference(orig):
raise sa_exc.InvalidRequestError(
"Registry has dependent registries that are not disposed; "
"pass cascade=True to clear these also"
)
while reg._managers:
try:
manager, _ = reg._managers.popitem()
except KeyError:
# guard against race between while and popitem
pass
else:
reg._dispose_manager_and_mapper(manager)
reg._dependents.clear()
for dep in reg._dependencies:
dep._dependents.discard(reg)
reg._dependencies.clear()
# this wasn't done in the 1.3 clear_mappers() and in fact it
# was a bug, as it could cause configure_mappers() to invoke
# the "before_configured" event even though mappers had all been
# disposed.
reg._new_mappers = False
def reconstructor(fn: _Fn) -> _Fn:
"""Decorate a method as the 'reconstructor' hook.
Designates a single method as the "reconstructor", an ``__init__``-like
method that will be called by the ORM after the instance has been
loaded from the database or otherwise reconstituted.
.. tip::
The :func:`_orm.reconstructor` decorator makes use of the
:meth:`_orm.InstanceEvents.load` event hook, which can be
used directly.
The reconstructor will be invoked with no arguments. Scalar
(non-collection) database-mapped attributes of the instance will
be available for use within the function. Eagerly-loaded
collections are generally not yet available and will usually only
contain the first element. ORM state changes made to objects at
this stage will not be recorded for the next flush() operation, so
the activity within a reconstructor should be conservative.
.. seealso::
:meth:`.InstanceEvents.load`
"""
fn.__sa_reconstructor__ = True # type: ignore[attr-defined]
return fn
def validates(
*names: str, include_removes: bool = False, include_backrefs: bool = True
) -> Callable[[_Fn], _Fn]:
r"""Decorate a method as a 'validator' for one or more named properties.
Designates a method as a validator, a method which receives the
name of the attribute as well as a value to be assigned, or in the
case of a collection, the value to be added to the collection.
The function can then raise validation exceptions to halt the
process from continuing (where Python's built-in ``ValueError``
and ``AssertionError`` exceptions are reasonable choices), or can
modify or replace the value before proceeding. The function should
otherwise return the given value.
Note that a validator for a collection **cannot** issue a load of that
collection within the validation routine - this usage raises
an assertion to avoid recursion overflows. This is a reentrant
condition which is not supported.
:param \*names: list of attribute names to be validated.
:param include_removes: if True, "remove" events will be
sent as well - the validation function must accept an additional
argument "is_remove" which will be a boolean.
:param include_backrefs: defaults to ``True``; if ``False``, the
validation function will not emit if the originator is an attribute
event related via a backref. This can be used for bi-directional
:func:`.validates` usage where only one validator should emit per
attribute operation.
.. versionchanged:: 2.0.16 This paramter inadvertently defaulted to
``False`` for releases 2.0.0 through 2.0.15. Its correct default
of ``True`` is restored in 2.0.16.
.. seealso::
:ref:`simple_validators` - usage examples for :func:`.validates`
"""
def wrap(fn: _Fn) -> _Fn:
fn.__sa_validators__ = names # type: ignore[attr-defined]
fn.__sa_validation_opts__ = { # type: ignore[attr-defined]
"include_removes": include_removes,
"include_backrefs": include_backrefs,
}
return fn
return wrap
def _event_on_load(state, ctx):
instrumenting_mapper = state.manager.mapper
if instrumenting_mapper._reconstructor:
instrumenting_mapper._reconstructor(state.obj())
def _event_on_init(state, args, kwargs):
"""Run init_instance hooks.
This also includes mapper compilation, normally not needed
here but helps with some piecemeal configuration
scenarios (such as in the ORM tutorial).
"""
instrumenting_mapper = state.manager.mapper
if instrumenting_mapper:
instrumenting_mapper._check_configure()
if instrumenting_mapper._set_polymorphic_identity:
instrumenting_mapper._set_polymorphic_identity(state)
| _OptGetColumnsNotAvailable |
python | getsentry__sentry | src/sentry/models/organizationmemberteamreplica.py | {
"start": 355,
"end": 1349
} | class ____(BaseModel):
"""
Identifies relationships between organization members and the teams they are on.
"""
__relocation_scope__ = RelocationScope.Excluded
id = BoundedAutoField(primary_key=True)
team_id = HybridCloudForeignKey("sentry.Team", on_delete="CASCADE")
organization_id = HybridCloudForeignKey("sentry.Organization", on_delete="CASCADE")
organizationmember_id = BoundedBigIntegerField(db_index=True)
organizationmemberteam_id = BoundedBigIntegerField(db_index=True)
is_active = models.BooleanField(db_default=True)
role = models.CharField(max_length=32, null=True, blank=True)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "sentry"
db_table = "sentry_organizationmember_teamsreplica"
unique_together = (("team_id", "organizationmember_id", "organization_id"),)
__repr__ = sane_repr("team_id", "organizationmember_id", "organization_id")
| OrganizationMemberTeamReplica |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_checkbox01.py | {
"start": 315,
"end": 1625
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("checkbox01.xlsx")
def test_create_file_with_insert_checkbox(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_checkbox("E9", False)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_insert_checkbox_and_manual_format(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format = workbook.add_format({"checkbox": True})
worksheet.insert_checkbox("E9", False, cell_format)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_boolean_and_format(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format = workbook.add_format({"checkbox": True})
worksheet.write("E9", False, cell_format)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 49124,
"end": 49707
} | class ____:
xlCubeAttribute = 4 # from enum XlCubeFieldSubType
xlCubeCalculatedMeasure = 5 # from enum XlCubeFieldSubType
xlCubeHierarchy = 1 # from enum XlCubeFieldSubType
xlCubeKPIGoal = 7 # from enum XlCubeFieldSubType
xlCubeKPIStatus = 8 # from enum XlCubeFieldSubType
xlCubeKPITrend = 9 # from enum XlCubeFieldSubType
xlCubeKPIValue = 6 # from enum XlCubeFieldSubType
xlCubeKPIWeight = 10 # from enum XlCubeFieldSubType
xlCubeMeasure = 2 # from enum XlCubeFieldSubType
xlCubeSet = 3 # from enum XlCubeFieldSubType
| CubeFieldSubType |
python | pdm-project__pdm | src/pdm/builders/sdist.py | {
"start": 102,
"end": 611
} | class ____(EnvBuilder):
"""Build sdist in isolated env with managed Python."""
@wrap_error
def build(self, out_dir: str, metadata_directory: str | None = None) -> str:
if self.isolated:
self.install(self._requires, shared=True)
requires = self._hook.get_requires_for_build_sdist(self.config_settings)
self.install(requires)
filename = self._hook.build_sdist(out_dir, self.config_settings)
return os.path.join(out_dir, filename)
| SdistBuilder |
python | pytorch__pytorch | test/functorch/test_vmap.py | {
"start": 194136,
"end": 225965
} | class ____(TestCase):
def _reset_random(self, generator, orig_state, use_generator, seed):
return (
generator.set_state(orig_state)
if use_generator
else torch.manual_seed(seed)
)
def _get_image(self, batched_input, batch_size, device):
if batched_input == "first":
return torch.ones([batch_size, 3, 3, 14, 14], device=device)
if batched_input == "last":
return torch.ones([3, 3, 14, 14, batch_size], device=device)
assert batched_input == "none"
return torch.ones([3, 3, 14, 14], device=device)
def _assert_all_slices_equal(self, tensor):
expected = tensor[0]
self.assertTrue((tensor == expected).all())
def _assert_all_slices_unique(self, tensor):
B0 = tensor.shape[0]
slices_equal = vmap(vmap(lambda x, y: (x == y).all(), (0, None)), (None, 0))(
tensor, tensor
)
assert slices_equal.shape == (B0, B0)
slices_equal.diagonal().zero_()
self.assertEqual(slices_equal, torch.zeros_like(slices_equal))
def _assert_throws_in_error_mode(self, fn, args, in_dims):
with self.assertRaisesRegex(
RuntimeError, r"called random operation while in randomness error mode"
):
vmap(fn, in_dims=in_dims, randomness="error")(*args)
def _assert_throws_in_different_mode_inplace(self, fn, args, in_dims):
with self.assertRaisesRegex(
RuntimeError, r"different inplace randomness on an unbatched tensor"
):
vmap(fn, in_dims=in_dims, randomness="different")(*args)
def _assert_throws_in_same_mode_batched(self, fn, args, in_dims):
with self.assertRaisesRegex(
RuntimeError,
r"Vmap does not currently support same randomness with a batched tensor input",
):
vmap(fn, in_dims=in_dims, randomness="same")(*args)
def _in_dims(self, *batched_strings):
def get_in_dim(batched_string):
if batched_string == "first":
return 0
if batched_string == "last":
return -1
assert batched_string == "none"
return None
batched_strings = batched_strings + (
"first",
) # for the always batched as first dim dummy argument
return tuple(get_in_dim(batched_string) for batched_string in batched_strings)
@parametrize("randomness", ["same", "different", "error"])
@parametrize("use_generator", [True, False])
def test_factory_ops(self, device, randomness, use_generator):
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = (
{"device": device, "generator": generator}
if use_generator
else {"device": device}
)
ops = [
lambda _, shape: torch.randn(shape, **kwargs),
lambda _, shape: torch.rand(shape, **kwargs),
lambda _, shape: torch.randint(100, shape, **kwargs),
lambda _, shape: torch.randint(5, 100, shape, **kwargs),
lambda _, shape: torch.normal(0.0, 1.0, shape, **kwargs),
]
B0 = 4
shape = (3, 3)
seed = 1234567
for op in ops:
passed = torch.randn(B0, device=device)
if randomness == "error":
self._assert_throws_in_error_mode(
op, (passed, shape), in_dims=(0, None)
)
return
generator = self._reset_random(generator, orig_state, use_generator, seed)
vmap_result = vmap(op, in_dims=(0, None), randomness=randomness)(
passed, shape
)
generator = self._reset_random(generator, orig_state, use_generator, seed)
if randomness == "different":
expected = op(passed, [B0, *shape])
self._assert_all_slices_unique(vmap_result)
self.assertEqual(vmap_result, expected)
else:
expected = op(passed, shape)
self._assert_all_slices_equal(vmap_result)
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
@parametrize("randomness", ["same", "different", "error"])
@parametrize("use_generator", [True, False])
def test_randperm(self, device, randomness, use_generator):
# needs a special case because randperm doesn't take a batch size
B0 = 4
seed = 1234567
passed = torch.randn(B0, device=device)
torch.manual_seed(seed)
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = (
{"device": device, "generator": generator}
if use_generator
else {"device": device}
)
if randomness == "error":
with self.assertRaisesRegex(
RuntimeError, r"called random operation while in randomness error mode"
):
vmap(lambda _: torch.randperm(10, **kwargs), randomness=randomness)(
passed
)
return
vmap_result = vmap(
lambda _: torch.randperm(10, **kwargs), randomness=randomness
)(passed)
generator = generator.set_state(orig_state)
torch.manual_seed(seed)
if randomness == "different":
for i in range(B0):
expected = torch.randperm(10, **kwargs)
# RNG differs between eager and via dynamo trace on CUDA
if TEST_WITH_TORCHDYNAMO and torch.device(device).type == "cuda":
self._assert_all_slices_unique(vmap_result)
else:
self.assertEqual(vmap_result[i], expected)
else:
expected = torch.randperm(10, **kwargs)
# RNG differs between eager and via dynamo trace on CUDA
if TEST_WITH_TORCHDYNAMO and torch.device(device).type == "cuda":
self._assert_all_slices_equal(vmap_result)
else:
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
@parametrize("randomness", ["error", "same", "different"])
@parametrize("batched_input", ["first", "last", "none"])
def test_dropout(self, device, randomness, batched_input):
def op(t, ignored):
return torch.nn.functional.dropout(torch.ones_like(t), training=True)
B0 = 4
always_batched = torch.randn((B0,))
passed = self._get_image(batched_input, B0, device)
in_dims = self._in_dims(batched_input)
if randomness == "error":
with self.assertRaisesRegex(
RuntimeError, r"called random operation while in randomness error mode"
):
vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
return
vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(
passed, always_batched
)
# Check that the randomness is within bounds...
# ideally this is close to 0.5
p_estimate = vmap_result.mean() / 2
self.assertTrue(p_estimate < 0.75)
self.assertTrue(p_estimate > 0.25)
if randomness == "different":
self._assert_all_slices_unique(vmap_result)
return
assert randomness == "same"
self._assert_all_slices_equal(vmap_result)
@parametrize("randomness", ["error", "same", "different"])
@parametrize("batched_input", ["first", "last", "none"])
def test_alpha_dropout(self, device, randomness, batched_input):
def op(t, ignored):
return torch.nn.functional.alpha_dropout(torch.ones_like(t), training=True)
B0 = 4
always_batched = torch.randn((B0,))
passed = self._get_image(batched_input, B0, device)
in_dims = self._in_dims(batched_input)
if randomness == "error":
with self.assertRaisesRegex(
RuntimeError, r"called random operation while in randomness error mode"
):
vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
return
# I have no clue how to actually test correctness of alpha dropout because the docs
# seem wrong: https://github.com/pytorch/pytorch/issues/74004
vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(
passed, always_batched
)
if randomness == "different":
self._assert_all_slices_unique(vmap_result)
return
assert randomness == "same"
self._assert_all_slices_equal(vmap_result)
@parametrize("randomness", ["error", "same", "different"])
@parametrize("batched_input", ["first", "last", "none"])
@parametrize("dim", [2, 3])
def test_feature_dropout(self, device, randomness, batched_input, dim):
def op(t, ignored):
f = (
torch.nn.functional.dropout2d
if dim == 2
else torch.nn.functional.dropout3d
)
return f(torch.ones_like(t), training=True)
B0 = 4
always_batched = torch.randn((B0,))
passed = self._get_image(batched_input, B0, device)
if dim == 3:
unsqueeze_dim = -2 if batched_input == "last" else -1
passed = passed.unsqueeze(unsqueeze_dim)
in_dims = self._in_dims(batched_input)
if randomness == "error":
with self.assertRaisesRegex(
RuntimeError, r"called random operation while in randomness error mode"
):
vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
return
vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(
passed, always_batched
)
# Check the "feature" pattern
dims = [-1, -2] if dim == 2 else [-1, -2, -3]
planes_numel = (
2
* vmap_result.numel()
/ (vmap_result.shape[0] * vmap_result.shape[1] * vmap_result.shape[2])
)
planes = vmap_result.sum(dims)
result = (planes == 0) ^ (planes == planes_numel)
self.assertEqual(result, torch.ones_like(result, dtype=torch.bool))
if randomness == "different":
self._assert_all_slices_unique(vmap_result)
return
assert randomness == "same"
self._assert_all_slices_equal(vmap_result)
@parametrize("randomness", ["error", "same", "different"])
@parametrize("batched_input", ["first", "last", "none"])
def test_feature_alpha_dropout(self, device, randomness, batched_input):
def op(t, ignored):
return torch.nn.functional.feature_alpha_dropout(
torch.ones_like(t), training=True
)
B0 = 4
always_batched = torch.randn((B0,))
passed = self._get_image(batched_input, B0, device)
unsqueeze_dim = -2 if batched_input == "last" else -1
passed = passed.unsqueeze(unsqueeze_dim)
in_dims = self._in_dims(batched_input)
if randomness == "error":
with self.assertRaisesRegex(
RuntimeError, r"called random operation while in randomness error mode"
):
vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
return
vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(
passed, always_batched
)
# I have no clue how to actually test correctness of alpha dropout because the docs
# seem wrong: https://github.com/pytorch/pytorch/issues/74004
# Check the "feature" pattern
dims = [-1, -2, -3]
planes = vmap_result.sum(dims)
max_elt = planes.max()
min_elt = planes.min()
result = (planes == min_elt) ^ (planes == max_elt)
self.assertEqual(result, torch.ones_like(result, dtype=torch.bool))
if randomness == "different":
self._assert_all_slices_unique(vmap_result)
return
assert randomness == "same"
self._assert_all_slices_equal(vmap_result)
@parametrize("randomness", ["error", "same", "different"])
@parametrize("batched_input", ["first", "last", "none"])
def test_like_functions(self, device, randomness, batched_input):
seed = 1234567
supported_ops = [
lambda t, _: torch.randint_like(t, 20),
lambda t, _: torch.randint_like(t, 0, 20),
lambda t, _: torch.rand_like(t),
lambda t, _: torch.randn_like(t),
]
B0 = 4
for op in supported_ops:
always_batched = torch.randn(B0)
passed = self._get_image(batched_input, B0, device)
in_dims = self._in_dims(batched_input)
if randomness == "error":
with self.assertRaisesRegex(
RuntimeError,
r"called random operation while in randomness error mode",
):
vmap(op, in_dims=in_dims, randomness=randomness)(
passed, always_batched
)
return
torch.manual_seed(seed)
vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(
passed, always_batched
)
torch.manual_seed(seed)
if batched_input == "last":
passed = passed.movedim(-1, 0)
if randomness == "different":
if batched_input == "none":
passed = passed.expand(B0, *passed.shape)
expected = op(passed, 0)
self._assert_all_slices_unique(vmap_result)
# RNG differs between eager and via dynamo trace on CUDA
if not (TEST_WITH_TORCHDYNAMO and torch.device(device).type == "cuda"):
self.assertEqual(expected, vmap_result)
return
assert randomness == "same"
if batched_input != "none":
passed = passed[0]
expected = op(passed, 0)
self._assert_all_slices_equal(vmap_result)
# RNG differs between eager and via dynamo trace on CUDA
if not (TEST_WITH_TORCHDYNAMO and torch.device(device).type == "cuda"):
for i in range(B0):
self.assertEqual(expected, vmap_result[i])
@parametrize("use_generator", [True, False])
@parametrize("randomness", ["error", "same", "different"])
@parametrize("batched_input", ["first", "last", "none"])
def test_random_unary_inplace(
self, device, use_generator, randomness, batched_input
):
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = {"generator": generator} if use_generator else {}
ops = [
lambda t, _: t.random_(**kwargs),
lambda t, _: t.random_(100, **kwargs),
lambda t, _: t.random_(-5, 100, **kwargs),
lambda t, _: t.normal_(**kwargs),
lambda t, _: t.bernoulli_(**kwargs),
lambda t, _: t.cauchy_(**kwargs),
lambda t, _: t.exponential_(**kwargs),
lambda t, _: t.geometric_(0.5, **kwargs),
lambda t, _: t.log_normal_(**kwargs),
lambda t, _: t.uniform_(**kwargs),
]
B0 = 4
seed = 1234567
in_dims = self._in_dims(batched_input)
for op in ops:
# because of in place updates, clone inputs
always_batched = torch.randn(B0, device=device)
passed = self._get_image(batched_input, B0, device)
passed_expected = passed.clone()
if randomness == "error":
self._assert_throws_in_error_mode(
op, (passed, always_batched), in_dims=in_dims
)
return
if randomness == "different" and batched_input == "none":
self._assert_throws_in_different_mode_inplace(
op, (passed, always_batched), in_dims=in_dims
)
return
generator = self._reset_random(generator, orig_state, use_generator, seed)
vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(
passed, always_batched
)
if batched_input == "last":
passed_expected = passed_expected.movedim(-1, 0)
generator = self._reset_random(generator, orig_state, use_generator, seed)
if randomness == "different":
expected = op(passed_expected, always_batched)
self._assert_all_slices_unique(vmap_result)
self.assertEqual(vmap_result, expected)
else:
if batched_input != "none":
passed_expected = passed_expected[
0
].clone() # bug in pytorch, normal_ on views doesn't work
expected = op(passed_expected, always_batched)
self._assert_all_slices_equal(vmap_result)
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
@parametrize("use_generator", [True, False])
@parametrize("randomness", ["error", "same", "different"])
@parametrize("batched_input", ["first", "last", "none"])
@parametrize("batched_probability", ["first", "last", "none"])
def test_bernoulli_in_place(
self, device, use_generator, randomness, batched_input, batched_probability
):
B0 = 4
seed = 1234567
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = {"generator": generator} if use_generator else {}
in_dims = self._in_dims(batched_input, batched_probability)
def op(t, p, ignored):
return t.bernoulli_(p, **kwargs)
# because of in place updates, clone inputs
always_batched = torch.randn(B0, device=device)
input = self._get_image(batched_input, B0, device)
input_expected = input.clone()
probability = self._get_image(batched_probability, B0, device) - 0.5
if randomness == "error":
self._assert_throws_in_error_mode(
op, (input, probability, always_batched), in_dims=in_dims
)
return
if randomness == "same" and batched_probability != "none":
self._assert_throws_in_same_mode_batched(
op, (input, probability, always_batched), in_dims=in_dims
)
return
if batched_input == "none" and batched_probability != "none":
regex = r"there exists a Tensor `other` in extra_args that has more elements than `self`"
with self.assertRaisesRegex(RuntimeError, regex):
vmap(op, in_dims=in_dims, randomness=randomness)(
input, probability, always_batched
)
return
if randomness == "different" and batched_input == "none":
self._assert_throws_in_different_mode_inplace(
op, (input, probability, always_batched), in_dims=in_dims
)
return
self._reset_random(generator, orig_state, use_generator, seed)
vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(
input, probability, always_batched
)
self._reset_random(generator, orig_state, use_generator, seed)
if batched_input == "last":
input_expected = input_expected.movedim(-1, 0)
if batched_probability == "last":
probability = probability.movedim(-1, 0)
if randomness == "different":
expected = op(input_expected, probability, always_batched)
self._assert_all_slices_unique(vmap_result)
self.assertEqual(vmap_result, expected)
else:
if batched_input != "none":
input_expected = input_expected[0]
expected = op(input_expected, probability, always_batched)
self._assert_all_slices_equal(vmap_result)
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
@parametrize("use_generator", [True, False])
@parametrize("randomness", ["error", "same", "different"])
@parametrize("batched_input", ["first", "last", "none"])
@parametrize("batched_other", ["first", "last", "none"])
def test_random_binary_out_of_place(
self, device, use_generator, randomness, batched_input, batched_other
):
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = {"generator": generator} if use_generator else {}
ops = [
lambda t, o, _: torch.normal(t, o, **kwargs),
lambda t, o, _: torch.binomial(t, (o - 0.5), **kwargs),
]
B0 = 4
seed = 1234567
in_dims = self._in_dims(batched_input, batched_other)
for op in ops:
always_batched = torch.randn(B0, device=device)
input = self._get_image(batched_input, B0, device)
other = self._get_image(batched_other, B0, device)
if randomness == "error":
self._assert_throws_in_error_mode(
op, (input, other, always_batched), in_dims=in_dims
)
return
if randomness == "same" and (
batched_input != "none" or batched_other != "none"
):
self._assert_throws_in_same_mode_batched(
op, (input, other, always_batched), in_dims=in_dims
)
return
generator = self._reset_random(generator, orig_state, use_generator, seed)
vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(
input, other, always_batched
)
if batched_input == "last":
input = input.movedim(-1, 0)
if batched_other == "last":
other = other.movedim(-1, 0)
generator = self._reset_random(generator, orig_state, use_generator, seed)
if randomness == "different":
if batched_input == "none":
input = input.expand(B0, *input.shape)
expected = op(input, other, always_batched)
self._assert_all_slices_unique(vmap_result)
self.assertEqual(vmap_result, expected)
else:
assert batched_input == "none" and batched_other == "none"
expected = op(input, other, always_batched)
self._assert_all_slices_equal(vmap_result)
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
@parametrize("use_generator", [True, False])
@parametrize("randomness", ["error", "same", "different"])
@parametrize("batched_input", ["first", "last", "none"])
def test_random_unary_out_of_place(
self, device, use_generator, randomness, batched_input
):
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = {"generator": generator} if use_generator else {}
ops = [
lambda t, _: torch.normal(0.0, torch.abs(t), **kwargs),
lambda t, _: torch.normal(t, 1.0, **kwargs),
lambda t, _: torch.bernoulli(t - 0.5, **kwargs),
lambda t, _: torch.bernoulli(t, 0.5, **kwargs),
lambda t, _: torch._standard_gamma(t, **kwargs),
lambda t, _: torch._sample_dirichlet(t, **kwargs),
lambda t, _: torch.poisson(t, **kwargs),
]
B0 = 4
seed = 1234567
in_dims = self._in_dims(batched_input)
for op in ops:
always_batched = torch.randn(B0, device=device)
passed = self._get_image(batched_input, B0, device)
if randomness == "error":
self._assert_throws_in_error_mode(
op, (passed, always_batched), in_dims=in_dims
)
return
if randomness == "same" and batched_input != "none":
self._assert_throws_in_same_mode_batched(
op, (passed, always_batched), in_dims=in_dims
)
return
generator = self._reset_random(generator, orig_state, use_generator, seed)
vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(
passed, always_batched
)
generator = self._reset_random(generator, orig_state, use_generator, seed)
if randomness == "different":
if batched_input == "none":
passed = passed.expand(B0, *passed.shape)
if batched_input == "last":
passed = passed.movedim(-1, 0)
expected = op(passed, always_batched)
self._assert_all_slices_unique(vmap_result)
self.assertEqual(vmap_result, expected)
else:
expected = op(passed, always_batched)
self._assert_all_slices_equal(vmap_result)
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
@parametrize("use_generator", [True, False])
@parametrize("randomness", ["error", "same", "different"])
@parametrize("batched_call", [True, False])
@parametrize("batched_input", ["first", "last", "none"])
def test_multinomial(
self, device, use_generator, randomness, batched_call, batched_input
):
def flatten_input(input, batch_call, batch_location):
if batch_call and batch_location != "none":
final_size = 3 # [B0, B, N]
elif not batch_call and batch_location == "none":
final_size = 1 # [N]
else:
final_size = 2 # [B0, N] or [B, N]
start_idx = final_size - 1
end_idx = -1
if batch_location == "last":
start_idx -= 1
end_idx -= (
1 # gets to correct final size because using negative indices
)
ret = input.flatten(start_idx, end_idx)
assert ret.dim() == final_size
return ret
def op(input, _):
return torch.multinomial(input, 10, **kwargs)
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = {"generator": generator} if use_generator else {}
B0 = 4
seed = 1234567
in_dims = self._in_dims(batched_input)
always_batched = torch.randn(B0, device=device)
passed = self._get_image(batched_input, B0, device)
passed = flatten_input(passed, batched_call, batched_input)
if randomness == "error":
self._assert_throws_in_error_mode(
op, (passed, always_batched), in_dims=in_dims
)
return
if randomness == "same" and batched_input != "none":
self._assert_throws_in_same_mode_batched(
op, (passed, always_batched), in_dims=in_dims
)
return
generator = self._reset_random(generator, orig_state, use_generator, seed)
vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(
passed, always_batched
)
generator = self._reset_random(generator, orig_state, use_generator, seed)
if randomness == "different":
if batched_input == "none":
passed = passed.expand(B0, *passed.shape)
if batched_input == "last":
passed = passed.movedim(-1, 0)
orig_passed_size = passed.shape[:2] if batched_call else passed.shape[:1]
passed = passed.flatten(0, 1) if batched_call else passed
expected = op(passed, always_batched)
expected = expected.reshape(*orig_passed_size, 10)
self._assert_all_slices_unique(vmap_result)
self.assertEqual(vmap_result, expected)
else:
expected = op(passed, always_batched)
self._assert_all_slices_equal(vmap_result)
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
def test_unsupported_random(self, device):
x = torch.randn(3, device=device)
y = x.abs()
z = x.abs()
with self.assertRaisesRegex(RuntimeError, "calling out variants"):
def f(x):
return torch.randn(3, device=device, out=y)
vmap(f, randomness="same")(x)
with self.assertRaisesRegex(RuntimeError, "calling out variants"):
def f(x0, x1):
return torch.normal(x, y, out=x)
vmap(f, randomness="same")(z, z)
with self.assertRaisesRegex(RuntimeError, "do not yet support"):
def f(z):
return torch.rrelu(x)
vmap(f, randomness="same")(z)
@parametrize("in_dim", [0, 1, 2])
@parametrize("out_dim", [0, 1, 2])
def test_chunk_vmap(self, in_dim, out_dim):
randomness = "different"
x = torch.randn(4, 5, 6)
def f(x):
y = x.sin() + torch.rand_like(x)
return y
for chunks in [1, 2, 3, 4, 7, 10, 16]:
output = chunk_vmap(
f,
in_dims=in_dim,
out_dims=out_dim,
randomness=randomness,
chunks=chunks,
)(x)
self._assert_all_slices_unique(output)
@parametrize("in_dim", [0, 1, 2])
@parametrize("out_dim", [0, 1, 2])
def test_vmap_chunksize(self, in_dim, out_dim):
randomness = "different"
x = torch.randn(4, 5, 6)
def f(x):
y = x.sin() + torch.rand_like(x)
return y
for chunk_size in [1, 2, 3, 4, 7, 10, 16, 100]:
output = vmap(
f,
in_dims=in_dim,
out_dims=out_dim,
randomness=randomness,
chunk_size=chunk_size,
)(x)
self._assert_all_slices_unique(output)
def test_jacfwd_with_random(self):
# checks on behavior are above, this just checks that jacfwd respects
# the randomness param
x = torch.rand(3, 4)
with self.assertRaisesRegex(
RuntimeError, r"called random operation while in randomness error mode"
):
jacfwd(torch.bernoulli)(x)
# x isn't batched so use bernoulli since it doesn't do inplace randomness
jacfwd(torch.bernoulli, randomness="same")(x)
jacfwd(torch.bernoulli, randomness="different")(x)
@parametrize("randomness", ["error", "same", "different"])
def test_dropout_unbatched(self, device, randomness):
x = torch.randn(3, device=device)
y = torch.randn(1, 3, device=device)
def fn(x, y):
# output from dropout should be a Tensor[B, 1, 3] (B=3)
return x + torch.nn.functional.dropout(y, p=0.5).mean(1)
# We just verify that this doesn't raise an error for
# `same` and `different` randomness.
# Ref: https://github.com/pytorch/pytorch/issues/92283
context = (
self.assertRaises(RuntimeError)
if randomness == "error"
else contextlib.nullcontext()
)
with context:
vmap(fn, in_dims=(0, None), randomness=randomness)(x, y)
@markDynamoStrictTest
| TestRandomness |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 15466,
"end": 15567
} | class ____(PydanticValueError):
msg_template = 'value is not a valid IPv4 address'
| IPv4AddressError |
python | readthedocs__readthedocs.org | readthedocs/integrations/migrations/0007_update-provider-data.py | {
"start": 417,
"end": 651
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("integrations", "0006_set-default-value-provider-data"),
]
operations = [
migrations.RunPython(forwards_func),
]
| Migration |
python | Netflix__metaflow | metaflow/plugins/airflow/airflow_utils.py | {
"start": 20915,
"end": 23779
} | class ____(object):
def __init__(
self,
name,
operator_type="kubernetes",
flow_name=None,
is_mapper_node=False,
flow_contains_foreach=False,
):
self.name = name
self._is_mapper_node = is_mapper_node
self._operator_args = None
self._operator_type = operator_type
self._flow_name = flow_name
self._flow_contains_foreach = flow_contains_foreach
@property
def is_mapper_node(self):
return self._is_mapper_node
def set_operator_args(self, **kwargs):
self._operator_args = kwargs
return self
def _make_sensor(self):
TaskSensor = _get_sensor(self._operator_type)
return TaskSensor(
task_id=self.name,
**_parse_sensor_args(self._operator_type, self._operator_args)
)
def to_dict(self):
return {
"name": self.name,
"is_mapper_node": self._is_mapper_node,
"operator_type": self._operator_type,
"operator_args": self._operator_args,
}
@classmethod
def from_dict(cls, task_dict, flow_name=None, flow_contains_foreach=False):
op_args = {} if "operator_args" not in task_dict else task_dict["operator_args"]
is_mapper_node = (
False if "is_mapper_node" not in task_dict else task_dict["is_mapper_node"]
)
return cls(
task_dict["name"],
is_mapper_node=is_mapper_node,
operator_type=(
task_dict["operator_type"]
if "operator_type" in task_dict
else "kubernetes"
),
flow_name=flow_name,
flow_contains_foreach=flow_contains_foreach,
).set_operator_args(**op_args)
def _kubernetes_task(self):
MetaflowKubernetesOperator = get_metaflow_kubernetes_operator()
k8s_args = _kubernetes_pod_operator_args(self._operator_args)
return MetaflowKubernetesOperator(
flow_name=self._flow_name,
flow_contains_foreach=self._flow_contains_foreach,
**k8s_args
)
def _kubernetes_mapper_task(self):
MetaflowKubernetesOperator = get_metaflow_kubernetes_operator()
k8s_args = _kubernetes_pod_operator_args(self._operator_args)
return MetaflowKubernetesOperator.partial(
flow_name=self._flow_name,
flow_contains_foreach=self._flow_contains_foreach,
**k8s_args
)
def to_task(self):
if self._operator_type == "kubernetes":
if not self.is_mapper_node:
return self._kubernetes_task()
else:
return self._kubernetes_mapper_task()
elif self._operator_type in SensorNames.get_supported_sensors():
return self._make_sensor()
| AirflowTask |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 124591,
"end": 127093
} | class ____(Request):
"""
Archive tasks.
If a task is queued it will first be dequeued and then archived.
:param tasks: List of task ids
:type tasks: Sequence[str]
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "archive"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"tasks": {
"description": "List of task ids",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["tasks"],
"type": "object",
}
def __init__(self, tasks, status_reason=None, status_message=None, **kwargs):
super(ArchiveRequest, self).__init__(**kwargs)
self.tasks = tasks
self.status_reason = status_reason
self.status_message = status_message
@schema_property("tasks")
def tasks(self):
return self._property_tasks
@tasks.setter
def tasks(self, value):
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
| ArchiveRequest |
python | celery__celery | celery/utils/collections.py | {
"start": 20274,
"end": 20871
} | class ____:
"""Mixin for classes supporting the ``evict`` method."""
Empty = Empty
def evict(self) -> None:
"""Force evict until maxsize is enforced."""
self._evict(range=count)
def _evict(self, limit: int = 100, range=range) -> None:
try:
[self._evict1() for _ in range(limit)]
except IndexError:
pass
def _evict1(self) -> None:
if self._evictcount <= self.maxsize:
raise IndexError()
try:
self._pop_to_evict()
except self.Empty:
raise IndexError()
| Evictable |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zenloop/source_zenloop/streams.py | {
"start": 5118,
"end": 6215
} | class ____(ChildStreamMixin, IncrementalZenloopStream):
# API Doc: https://docs.zenloop.com/reference#get-answers
primary_key = "id"
has_date_param = True
parent_stream_class = Surveys
extra_params = {
"page": "1",
"order_type": "desc",
"order_by": "inserted_at",
"date_shortcut": "custom",
"date_to": datetime.today().strftime("%Y-%m-%d"),
}
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
# take optional survey_id if entered
if self.survey_id:
return f"surveys/{self.survey_id}/answers"
# slice all survey_id's if nothing provided
else:
return f"surveys/{stream_slice['survey_slice']}/answers"
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
response_json = response.json()
# select answers and surveys to be able to link answer to a survey
yield from response_json.get("answers", [])
| Answers |
python | getsentry__sentry | tests/sentry/seer/endpoints/test_organization_seer_setup_check.py | {
"start": 267,
"end": 686
} | class ____(APITestCase, SnubaTestCase):
endpoint = "sentry-api-0-organization-seer-setup-check"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
def get_response(self, organization_slug, **kwargs):
url = f"/api/0/organizations/{organization_slug}/seer/setup-check/"
return self.client.get(url, format="json", **kwargs)
| OrganizationSeerSetupCheckTestBase |
python | kamyu104__LeetCode-Solutions | Python/jewels-and-stones.py | {
"start": 33,
"end": 251
} | class ____(object):
def numJewelsInStones(self, J, S):
"""
:type J: str
:type S: str
:rtype: int
"""
lookup = set(J)
return sum(s in lookup for s in S)
| Solution |
python | streamlit__streamlit | lib/tests/streamlit/runtime/secrets_test.py | {
"start": 1717,
"end": 2423
} | class ____(unittest.TestCase):
def test_changing_message(self):
messages = SecretErrorMessages()
assert (
messages.get_missing_attr_message("attr")
== 'st.secrets has no attribute "attr". Did you forget to add it to secrets.toml, '
"mount it to secret directory, or the app settings on Streamlit Cloud? More info: "
"https://docs.streamlit.io/deploy/streamlit-community-cloud/deploy-your-app/secrets-management"
)
messages.set_missing_attr_message(
lambda attr: "Missing attribute message",
)
assert messages.get_missing_attr_message([""]) == "Missing attribute message"
| TestSecretErrorMessages |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 80533,
"end": 83062
} | class ____(
ScaledTMAConfigMixin, CUDAConfigHeuristic
):
"""
Scaled TMA template heuristic for CUDA:
main loop scaling variants (BlockWise1x128, BlockWise1x32, BlockWise1x16, BlockWise128x128)
"""
def __init__(self) -> None:
super().__init__()
# Override mm_configs to use scaled_persistent_mm_configs for TMA
self.mm_configs = self.scaled_persistent_mm_configs
def _get_template_configs_impl(
self,
kernel_inputs: KernelInputs,
op_name: str,
) -> Generator[dict[str, Any], None, None]:
"""
Generate main loop scaling kernel inputs.
"""
mat_a, mat_b, scale_a, scale_b = kernel_inputs._input_nodes
scale_a_size, scale_b_size = scale_a.get_size(), scale_b.get_size()
scale_option_a, scale_option_b = get_scaling_options(
mat_a, mat_b, scale_a_size, scale_b_size
)
tile_size_a = get_tile_size(scale_option_a)
tile_size_b = get_tile_size(scale_option_b)
# Get base scaled MM template configs from superclass
for template_kwargs in super()._get_template_configs_impl(
kernel_inputs,
op_name,
):
# Add scaling-specific options for main loop scaling variants
# Inductor templates require compile-time constants passed in as tl.constexpr values.
# In cases in which the block size (BLOCK_*) is smaller than the tile size (128, 32, 16),
# scales must be broadcasted to BLOCK_* (rather than to a tile_sizextile_size chunk).
template_kwargs["TILE_SIZE_A"] = tile_size_a
template_kwargs["TILE_SIZE_B"] = tile_size_b
template_kwargs["MIN_BLOCK_TILE_AM"] = min(
template_kwargs["BLOCK_M"], tile_size_a
)
template_kwargs["MIN_BLOCK_TILE_AK"] = min(
template_kwargs["BLOCK_K"], tile_size_a
)
template_kwargs["MIN_BLOCK_TILE_BK"] = min(
template_kwargs["BLOCK_K"], tile_size_b
)
template_kwargs["MIN_BLOCK_TILE_BN"] = min(
template_kwargs["BLOCK_N"], tile_size_b
)
yield template_kwargs
@register_template_heuristic(
blackwell_ws_persistent_device_tma_mm_template.uid, # regular Blackwell MM template + scaling epilogue from ScaledMMConfigMixin
"cuda",
register=torch.version.hip is None,
op_name="scaled_mm",
)
| CUDAScaledTMAMainLoopScalingTemplateConfigHeuristic |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/partial7.py | {
"start": 235,
"end": 645
} | class ____(TypedDict):
c: list[str]
a: int
b: NotRequired[str]
def func1(**kwargs: Unpack[TD1]) -> None:
print(f"a: {kwargs['a']}, b: {kwargs.get('b')}, c: {kwargs['c']}")
func1_1 = partial(func1, c=["a", "b"], a=2)
func1_1(b="2")
func1_2 = partial(func1, a=2, b="", c=["a", "b"])
func1_2(a=2, b="2")
func1_3 = partial(func1, c=["a", "b"])
# This should generate an error.
func1_3(b="2")
| TD1 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor6.py | {
"start": 1002,
"end": 1115
} | class ____: ...
_T1 = TypeVar("_T1", bound="Optional[Model]")
_T2 = TypeVar("_T2", bound="Optional[Model]")
| Model |
python | pandas-dev__pandas | pandas/core/arrays/base.py | {
"start": 89563,
"end": 90118
} | class ____(ExtensionArray):
@overload
def any(self, *, skipna: Literal[True] = ...) -> bool: ...
@overload
def any(self, *, skipna: bool) -> bool | NAType: ...
def any(self, *, skipna: bool = True) -> bool | NAType:
raise AbstractMethodError(self)
@overload
def all(self, *, skipna: Literal[True] = ...) -> bool: ...
@overload
def all(self, *, skipna: bool) -> bool | NAType: ...
def all(self, *, skipna: bool = True) -> bool | NAType:
raise AbstractMethodError(self)
| ExtensionArraySupportsAnyAll |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.