language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
django__django
django/contrib/auth/middleware.py
{ "start": 10703, "end": 11208 }
class ____(RemoteUserMiddleware): """ Middleware for web-server provided authentication on logon pages. Like RemoteUserMiddleware but keeps the user authenticated even if the ``request.META`` key is not found in the request. Useful for setups when the external authentication is only expected to happen on some "logon" URL and the rest of the application wants to use Django's authentication mechanism. """ force_logout_if_no_header = False
PersistentRemoteUserMiddleware
python
doocs__leetcode
solution/2400-2499/2466.Count Ways To Build Good Strings/Solution.py
{ "start": 0, "end": 384 }
class ____: def countGoodStrings(self, low: int, high: int, zero: int, one: int) -> int: @cache def dfs(i): if i > high: return 0 ans = 0 if low <= i <= high: ans += 1 ans += dfs(i + zero) + dfs(i + one) return ans % mod mod = 10**9 + 7 return dfs(0)
Solution
python
sympy__sympy
sympy/geometry/ellipse.py
{ "start": 1341, "end": 42460 }
class ____(GeometrySet): """An elliptical GeometryEntity. Parameters ========== center : Point, optional Default value is Point(0, 0) hradius : number or SymPy expression, optional vradius : number or SymPy expression, optional eccentricity : number or SymPy expression, optional Two of `hradius`, `vradius` and `eccentricity` must be supplied to create an Ellipse. The third is derived from the two supplied. Attributes ========== center hradius vradius area circumference eccentricity periapsis apoapsis focus_distance foci Raises ====== GeometryError When `hradius`, `vradius` and `eccentricity` are incorrectly supplied as parameters. TypeError When `center` is not a Point. See Also ======== Circle Notes ----- Constructed from a center and two radii, the first being the horizontal radius (along the x-axis) and the second being the vertical radius (along the y-axis). When symbolic value for hradius and vradius are used, any calculation that refers to the foci or the major or minor axis will assume that the ellipse has its major radius on the x-axis. If this is not true then a manual rotation is necessary. Examples ======== >>> from sympy import Ellipse, Point, Rational >>> e1 = Ellipse(Point(0, 0), 5, 1) >>> e1.hradius, e1.vradius (5, 1) >>> e2 = Ellipse(Point(3, 1), hradius=3, eccentricity=Rational(4, 5)) >>> e2 Ellipse(Point2D(3, 1), 3, 9/5) """ def __contains__(self, o): if isinstance(o, Point): res = self.equation(x, y).subs({x: o.x, y: o.y}) return trigsimp(simplify(res)) is S.Zero elif isinstance(o, Ellipse): return self == o return False def __eq__(self, o): """Is the other GeometryEntity the same as this ellipse?""" return isinstance(o, Ellipse) and (self.center == o.center and self.hradius == o.hradius and self.vradius == o.vradius) def __hash__(self): return super().__hash__() def __new__( cls, center=None, hradius=None, vradius=None, eccentricity=None, **kwargs): hradius = sympify(hradius) vradius = sympify(vradius) if center is None: center = Point(0, 0) else: if len(center) != 2: raise ValueError('The center of "{}" must be a two dimensional point'.format(cls)) center = Point(center, dim=2) if len(list(filter(lambda x: x is not None, (hradius, vradius, eccentricity)))) != 2: raise ValueError(filldedent(''' Exactly two arguments of "hradius", "vradius", and "eccentricity" must not be None.''')) if eccentricity is not None: eccentricity = sympify(eccentricity) if eccentricity.is_negative: raise GeometryError("Eccentricity of ellipse/circle should lie between [0, 1)") elif hradius is None: hradius = vradius / sqrt(1 - eccentricity**2) elif vradius is None: vradius = hradius * sqrt(1 - eccentricity**2) if hradius == vradius: return Circle(center, hradius, **kwargs) if S.Zero in (hradius, vradius): return Segment(Point(center[0] - hradius, center[1] - vradius), Point(center[0] + hradius, center[1] + vradius)) if hradius.is_real is False or vradius.is_real is False: raise GeometryError("Invalid value encountered when computing hradius / vradius.") return GeometryEntity.__new__(cls, center, hradius, vradius, **kwargs) def _svg(self, scale_factor=1., fill_color="#66cc99"): """Returns SVG ellipse element for the Ellipse. Parameters ========== scale_factor : float Multiplication factor for the SVG stroke-width. Default is 1. fill_color : str, optional Hex string for fill color. Default is "#66cc99". """ c = N(self.center) h, v = N(self.hradius), N(self.vradius) return ( '<ellipse fill="{1}" stroke="#555555" ' 'stroke-width="{0}" opacity="0.6" cx="{2}" cy="{3}" rx="{4}" ry="{5}"/>' ).format(2. * scale_factor, fill_color, c.x, c.y, h, v) @property def ambient_dimension(self): return 2 @property def apoapsis(self): """The apoapsis of the ellipse. The greatest distance between the focus and the contour. Returns ======= apoapsis : number See Also ======== periapsis : Returns shortest distance between foci and contour Examples ======== >>> from sympy import Point, Ellipse >>> p1 = Point(0, 0) >>> e1 = Ellipse(p1, 3, 1) >>> e1.apoapsis 2*sqrt(2) + 3 """ return self.major * (1 + self.eccentricity) def arbitrary_point(self, parameter='t'): """A parameterized point on the ellipse. Parameters ========== parameter : str, optional Default value is 't'. Returns ======= arbitrary_point : Point Raises ====== ValueError When `parameter` already appears in the functions. See Also ======== sympy.geometry.point.Point Examples ======== >>> from sympy import Point, Ellipse >>> e1 = Ellipse(Point(0, 0), 3, 2) >>> e1.arbitrary_point() Point2D(3*cos(t), 2*sin(t)) """ t = _symbol(parameter, real=True) if t.name in (f.name for f in self.free_symbols): raise ValueError(filldedent('Symbol %s already appears in object ' 'and cannot be used as a parameter.' % t.name)) return Point(self.center.x + self.hradius*cos(t), self.center.y + self.vradius*sin(t)) @property def area(self): """The area of the ellipse. Returns ======= area : number Examples ======== >>> from sympy import Point, Ellipse >>> p1 = Point(0, 0) >>> e1 = Ellipse(p1, 3, 1) >>> e1.area 3*pi """ return simplify(S.Pi * self.hradius * self.vradius) @property def bounds(self): """Return a tuple (xmin, ymin, xmax, ymax) representing the bounding rectangle for the geometric figure. """ h, v = self.hradius, self.vradius return (self.center.x - h, self.center.y - v, self.center.x + h, self.center.y + v) @property def center(self): """The center of the ellipse. Returns ======= center : number See Also ======== sympy.geometry.point.Point Examples ======== >>> from sympy import Point, Ellipse >>> p1 = Point(0, 0) >>> e1 = Ellipse(p1, 3, 1) >>> e1.center Point2D(0, 0) """ return self.args[0] @property def circumference(self): """The circumference of the ellipse. Examples ======== >>> from sympy import Point, Ellipse >>> p1 = Point(0, 0) >>> e1 = Ellipse(p1, 3, 1) >>> e1.circumference 12*elliptic_e(8/9) """ if self.eccentricity == 1: # degenerate return 4*self.major elif self.eccentricity == 0: # circle return 2*pi*self.hradius else: return 4*self.major*elliptic_e(self.eccentricity**2) @property def eccentricity(self): """The eccentricity of the ellipse. Returns ======= eccentricity : number Examples ======== >>> from sympy import Point, Ellipse, sqrt >>> p1 = Point(0, 0) >>> e1 = Ellipse(p1, 3, sqrt(2)) >>> e1.eccentricity sqrt(7)/3 """ return self.focus_distance / self.major def encloses_point(self, p): """ Return True if p is enclosed by (is inside of) self. Notes ----- Being on the border of self is considered False. Parameters ========== p : Point Returns ======= encloses_point : True, False or None See Also ======== sympy.geometry.point.Point Examples ======== >>> from sympy import Ellipse, S >>> from sympy.abc import t >>> e = Ellipse((0, 0), 3, 2) >>> e.encloses_point((0, 0)) True >>> e.encloses_point(e.arbitrary_point(t).subs(t, S.Half)) False >>> e.encloses_point((4, 0)) False """ p = Point(p, dim=2) if p in self: return False if len(self.foci) == 2: # if the combined distance from the foci to p (h1 + h2) is less # than the combined distance from the foci to the minor axis # (which is the same as the major axis length) then p is inside # the ellipse h1, h2 = [f.distance(p) for f in self.foci] test = 2*self.major - (h1 + h2) else: test = self.radius - self.center.distance(p) return fuzzy_bool(test.is_positive) def equation(self, x='x', y='y', _slope=None): """ Returns the equation of an ellipse aligned with the x and y axes; when slope is given, the equation returned corresponds to an ellipse with a major axis having that slope. Parameters ========== x : str, optional Label for the x-axis. Default value is 'x'. y : str, optional Label for the y-axis. Default value is 'y'. _slope : Expr, optional The slope of the major axis. Ignored when 'None'. Returns ======= equation : SymPy expression See Also ======== arbitrary_point : Returns parameterized point on ellipse Examples ======== >>> from sympy import Point, Ellipse, pi >>> from sympy.abc import x, y >>> e1 = Ellipse(Point(1, 0), 3, 2) >>> eq1 = e1.equation(x, y); eq1 y**2/4 + (x/3 - 1/3)**2 - 1 >>> eq2 = e1.equation(x, y, _slope=1); eq2 (-x + y + 1)**2/8 + (x + y - 1)**2/18 - 1 A point on e1 satisfies eq1. Let's use one on the x-axis: >>> p1 = e1.center + Point(e1.major, 0) >>> assert eq1.subs(x, p1.x).subs(y, p1.y) == 0 When rotated the same as the rotated ellipse, about the center point of the ellipse, it will satisfy the rotated ellipse's equation, too: >>> r1 = p1.rotate(pi/4, e1.center) >>> assert eq2.subs(x, r1.x).subs(y, r1.y) == 0 References ========== .. [1] https://math.stackexchange.com/questions/108270/what-is-the-equation-of-an-ellipse-that-is-not-aligned-with-the-axis .. [2] https://en.wikipedia.org/wiki/Ellipse#Shifted_ellipse """ x = _symbol(x, real=True) y = _symbol(y, real=True) dx = x - self.center.x dy = y - self.center.y if _slope is not None: L = (dy - _slope*dx)**2 l = (_slope*dy + dx)**2 h = 1 + _slope**2 b = h*self.major**2 a = h*self.minor**2 return l/b + L/a - 1 else: t1 = (dx/self.hradius)**2 t2 = (dy/self.vradius)**2 return t1 + t2 - 1 def evolute(self, x='x', y='y'): """The equation of evolute of the ellipse. Parameters ========== x : str, optional Label for the x-axis. Default value is 'x'. y : str, optional Label for the y-axis. Default value is 'y'. Returns ======= equation : SymPy expression Examples ======== >>> from sympy import Point, Ellipse >>> e1 = Ellipse(Point(1, 0), 3, 2) >>> e1.evolute() 2**(2/3)*y**(2/3) + (3*x - 3)**(2/3) - 5**(2/3) """ if len(self.args) != 3: raise NotImplementedError('Evolute of arbitrary Ellipse is not supported.') x = _symbol(x, real=True) y = _symbol(y, real=True) t1 = (self.hradius*(x - self.center.x))**Rational(2, 3) t2 = (self.vradius*(y - self.center.y))**Rational(2, 3) return t1 + t2 - (self.hradius**2 - self.vradius**2)**Rational(2, 3) @property def foci(self): """The foci of the ellipse. Notes ----- The foci can only be calculated if the major/minor axes are known. Raises ====== ValueError When the major and minor axis cannot be determined. See Also ======== sympy.geometry.point.Point focus_distance : Returns the distance between focus and center Examples ======== >>> from sympy import Point, Ellipse >>> p1 = Point(0, 0) >>> e1 = Ellipse(p1, 3, 1) >>> e1.foci (Point2D(-2*sqrt(2), 0), Point2D(2*sqrt(2), 0)) """ c = self.center hr, vr = self.hradius, self.vradius if hr == vr: return (c, c) # calculate focus distance manually, since focus_distance calls this # routine fd = sqrt(self.major**2 - self.minor**2) if hr == self.minor: # foci on the y-axis return (c + Point(0, -fd), c + Point(0, fd)) elif hr == self.major: # foci on the x-axis return (c + Point(-fd, 0), c + Point(fd, 0)) @property def focus_distance(self): """The focal distance of the ellipse. The distance between the center and one focus. Returns ======= focus_distance : number See Also ======== foci Examples ======== >>> from sympy import Point, Ellipse >>> p1 = Point(0, 0) >>> e1 = Ellipse(p1, 3, 1) >>> e1.focus_distance 2*sqrt(2) """ return Point.distance(self.center, self.foci[0]) @property def hradius(self): """The horizontal radius of the ellipse. Returns ======= hradius : number See Also ======== vradius, major, minor Examples ======== >>> from sympy import Point, Ellipse >>> p1 = Point(0, 0) >>> e1 = Ellipse(p1, 3, 1) >>> e1.hradius 3 """ return self.args[1] def intersection(self, o): """The intersection of this ellipse and another geometrical entity `o`. Parameters ========== o : GeometryEntity Returns ======= intersection : list of GeometryEntity objects Notes ----- Currently supports intersections with Point, Line, Segment, Ray, Circle and Ellipse types. See Also ======== sympy.geometry.entity.GeometryEntity Examples ======== >>> from sympy import Ellipse, Point, Line >>> e = Ellipse(Point(0, 0), 5, 7) >>> e.intersection(Point(0, 0)) [] >>> e.intersection(Point(5, 0)) [Point2D(5, 0)] >>> e.intersection(Line(Point(0,0), Point(0, 1))) [Point2D(0, -7), Point2D(0, 7)] >>> e.intersection(Line(Point(5,0), Point(5, 1))) [Point2D(5, 0)] >>> e.intersection(Line(Point(6,0), Point(6, 1))) [] >>> e = Ellipse(Point(-1, 0), 4, 3) >>> e.intersection(Ellipse(Point(1, 0), 4, 3)) [Point2D(0, -3*sqrt(15)/4), Point2D(0, 3*sqrt(15)/4)] >>> e.intersection(Ellipse(Point(5, 0), 4, 3)) [Point2D(2, -3*sqrt(7)/4), Point2D(2, 3*sqrt(7)/4)] >>> e.intersection(Ellipse(Point(100500, 0), 4, 3)) [] >>> e.intersection(Ellipse(Point(0, 0), 3, 4)) [Point2D(3, 0), Point2D(-363/175, -48*sqrt(111)/175), Point2D(-363/175, 48*sqrt(111)/175)] >>> e.intersection(Ellipse(Point(-1, 0), 3, 4)) [Point2D(-17/5, -12/5), Point2D(-17/5, 12/5), Point2D(7/5, -12/5), Point2D(7/5, 12/5)] """ # TODO: Replace solve with nonlinsolve, when nonlinsolve will be able to solve in real domain if isinstance(o, Point): if o in self: return [o] else: return [] elif isinstance(o, (Segment2D, Ray2D)): ellipse_equation = self.equation(x, y) result = solve([ellipse_equation, Line( o.points[0], o.points[1]).equation(x, y)], [x, y], set=True)[1] return list(ordered([Point(i) for i in result if i in o])) elif isinstance(o, Polygon): return o.intersection(self) elif isinstance(o, (Ellipse, Line2D)): if o == self: return self else: ellipse_equation = self.equation(x, y) return list(ordered([Point(i) for i in solve( [ellipse_equation, o.equation(x, y)], [x, y], set=True)[1]])) elif isinstance(o, LinearEntity3D): raise TypeError('Entity must be two dimensional, not three dimensional') else: raise TypeError('Intersection not handled for %s' % func_name(o)) def is_tangent(self, o): """Is `o` tangent to the ellipse? Parameters ========== o : GeometryEntity An Ellipse, LinearEntity or Polygon Raises ====== NotImplementedError When the wrong type of argument is supplied. Returns ======= is_tangent: boolean True if o is tangent to the ellipse, False otherwise. See Also ======== tangent_lines Examples ======== >>> from sympy import Point, Ellipse, Line >>> p0, p1, p2 = Point(0, 0), Point(3, 0), Point(3, 3) >>> e1 = Ellipse(p0, 3, 2) >>> l1 = Line(p1, p2) >>> e1.is_tangent(l1) True """ if isinstance(o, Point2D): return False elif isinstance(o, Ellipse): intersect = self.intersection(o) if isinstance(intersect, Ellipse): return True elif intersect: return all((self.tangent_lines(i)[0]).equals(o.tangent_lines(i)[0]) for i in intersect) else: return False elif isinstance(o, Line2D): hit = self.intersection(o) if not hit: return False if len(hit) == 1: return True # might return None if it can't decide return hit[0].equals(hit[1]) elif isinstance(o, (Segment2D, Ray2D)): intersect = self.intersection(o) if len(intersect) == 1: return o in self.tangent_lines(intersect[0])[0] else: return False elif isinstance(o, Polygon): return all(self.is_tangent(s) for s in o.sides) elif isinstance(o, (LinearEntity3D, Point3D)): raise TypeError('Entity must be two dimensional, not three dimensional') else: raise TypeError('Is_tangent not handled for %s' % func_name(o)) @property def major(self): """Longer axis of the ellipse (if it can be determined) else hradius. Returns ======= major : number or expression See Also ======== hradius, vradius, minor Examples ======== >>> from sympy import Point, Ellipse, Symbol >>> p1 = Point(0, 0) >>> e1 = Ellipse(p1, 3, 1) >>> e1.major 3 >>> a = Symbol('a') >>> b = Symbol('b') >>> Ellipse(p1, a, b).major a >>> Ellipse(p1, b, a).major b >>> m = Symbol('m') >>> M = m + 1 >>> Ellipse(p1, m, M).major m + 1 """ ab = self.args[1:3] if len(ab) == 1: return ab[0] a, b = ab o = b - a < 0 if o == True: return a elif o == False: return b return self.hradius @property def minor(self): """Shorter axis of the ellipse (if it can be determined) else vradius. Returns ======= minor : number or expression See Also ======== hradius, vradius, major Examples ======== >>> from sympy import Point, Ellipse, Symbol >>> p1 = Point(0, 0) >>> e1 = Ellipse(p1, 3, 1) >>> e1.minor 1 >>> a = Symbol('a') >>> b = Symbol('b') >>> Ellipse(p1, a, b).minor b >>> Ellipse(p1, b, a).minor a >>> m = Symbol('m') >>> M = m + 1 >>> Ellipse(p1, m, M).minor m """ ab = self.args[1:3] if len(ab) == 1: return ab[0] a, b = ab o = a - b < 0 if o == True: return a elif o == False: return b return self.vradius def normal_lines(self, p, prec=None): """Normal lines between `p` and the ellipse. Parameters ========== p : Point Returns ======= normal_lines : list with 1, 2 or 4 Lines Examples ======== >>> from sympy import Point, Ellipse >>> e = Ellipse((0, 0), 2, 3) >>> c = e.center >>> e.normal_lines(c + Point(1, 0)) [Line2D(Point2D(0, 0), Point2D(1, 0))] >>> e.normal_lines(c) [Line2D(Point2D(0, 0), Point2D(0, 1)), Line2D(Point2D(0, 0), Point2D(1, 0))] Off-axis points require the solution of a quartic equation. This often leads to very large expressions that may be of little practical use. An approximate solution of `prec` digits can be obtained by passing in the desired value: >>> e.normal_lines((3, 3), prec=2) [Line2D(Point2D(-0.81, -2.7), Point2D(0.19, -1.2)), Line2D(Point2D(1.5, -2.0), Point2D(2.5, -2.7))] Whereas the above solution has an operation count of 12, the exact solution has an operation count of 2020. """ p = Point(p, dim=2) # XXX change True to something like self.angle == 0 if the arbitrarily # rotated ellipse is introduced. # https://github.com/sympy/sympy/issues/2815) if True: rv = [] if p.x == self.center.x: rv.append(Line(self.center, slope=oo)) if p.y == self.center.y: rv.append(Line(self.center, slope=0)) if rv: # at these special orientations of p either 1 or 2 normals # exist and we are done return rv # find the 4 normal points and construct lines through them with # the corresponding slope eq = self.equation(x, y) dydx = idiff(eq, y, x) norm = -1/dydx slope = Line(p, (x, y)).slope seq = slope - norm # TODO: Replace solve with solveset, when this line is tested yis = solve(seq, y)[0] xeq = eq.subs(y, yis).as_numer_denom()[0].expand() if len(xeq.free_symbols) == 1: try: # this is so much faster, it's worth a try xsol = Poly(xeq, x).real_roots() except (DomainError, PolynomialError, NotImplementedError): # TODO: Replace solve with solveset, when these lines are tested xsol = _nsort(solve(xeq, x), separated=True)[0] points = [Point(i, solve(eq.subs(x, i), y)[0]) for i in xsol] else: raise NotImplementedError( 'intersections for the general ellipse are not supported') slopes = [norm.subs(zip((x, y), pt.args)) for pt in points] if prec is not None: points = [pt.n(prec) for pt in points] slopes = [i if _not_a_coeff(i) else i.n(prec) for i in slopes] return [Line(pt, slope=s) for pt, s in zip(points, slopes)] @property def periapsis(self): """The periapsis of the ellipse. The shortest distance between the focus and the contour. Returns ======= periapsis : number See Also ======== apoapsis : Returns greatest distance between focus and contour Examples ======== >>> from sympy import Point, Ellipse >>> p1 = Point(0, 0) >>> e1 = Ellipse(p1, 3, 1) >>> e1.periapsis 3 - 2*sqrt(2) """ return self.major * (1 - self.eccentricity) @property def semilatus_rectum(self): """ Calculates the semi-latus rectum of the Ellipse. Semi-latus rectum is defined as one half of the chord through a focus parallel to the conic section directrix of a conic section. Returns ======= semilatus_rectum : number See Also ======== apoapsis : Returns greatest distance between focus and contour periapsis : The shortest distance between the focus and the contour Examples ======== >>> from sympy import Point, Ellipse >>> p1 = Point(0, 0) >>> e1 = Ellipse(p1, 3, 1) >>> e1.semilatus_rectum 1/3 References ========== .. [1] https://mathworld.wolfram.com/SemilatusRectum.html .. [2] https://en.wikipedia.org/wiki/Ellipse#Semi-latus_rectum """ return self.major * (1 - self.eccentricity ** 2) def auxiliary_circle(self): """Returns a Circle whose diameter is the major axis of the ellipse. Examples ======== >>> from sympy import Ellipse, Point, symbols >>> c = Point(1, 2) >>> Ellipse(c, 8, 7).auxiliary_circle() Circle(Point2D(1, 2), 8) >>> a, b = symbols('a b') >>> Ellipse(c, a, b).auxiliary_circle() Circle(Point2D(1, 2), Max(a, b)) """ return Circle(self.center, Max(self.hradius, self.vradius)) def director_circle(self): """ Returns a Circle consisting of all points where two perpendicular tangent lines to the ellipse cross each other. Returns ======= Circle A director circle returned as a geometric object. Examples ======== >>> from sympy import Ellipse, Point, symbols >>> c = Point(3,8) >>> Ellipse(c, 7, 9).director_circle() Circle(Point2D(3, 8), sqrt(130)) >>> a, b = symbols('a b') >>> Ellipse(c, a, b).director_circle() Circle(Point2D(3, 8), sqrt(a**2 + b**2)) References ========== .. [1] https://en.wikipedia.org/wiki/Director_circle """ return Circle(self.center, sqrt(self.hradius**2 + self.vradius**2)) def plot_interval(self, parameter='t'): """The plot interval for the default geometric plot of the Ellipse. Parameters ========== parameter : str, optional Default value is 't'. Returns ======= plot_interval : list [parameter, lower_bound, upper_bound] Examples ======== >>> from sympy import Point, Ellipse >>> e1 = Ellipse(Point(0, 0), 3, 2) >>> e1.plot_interval() [t, -pi, pi] """ t = _symbol(parameter, real=True) return [t, -S.Pi, S.Pi] def random_point(self, seed=None): """A random point on the ellipse. Returns ======= point : Point Examples ======== >>> from sympy import Point, Ellipse >>> e1 = Ellipse(Point(0, 0), 3, 2) >>> e1.random_point() # gives some random point Point2D(...) >>> p1 = e1.random_point(seed=0); p1.n(2) Point2D(2.1, 1.4) Notes ===== When creating a random point, one may simply replace the parameter with a random number. When doing so, however, the random number should be made a Rational or else the point may not test as being in the ellipse: >>> from sympy.abc import t >>> from sympy import Rational >>> arb = e1.arbitrary_point(t); arb Point2D(3*cos(t), 2*sin(t)) >>> arb.subs(t, .1) in e1 False >>> arb.subs(t, Rational(.1)) in e1 True >>> arb.subs(t, Rational('.1')) in e1 True See Also ======== sympy.geometry.point.Point arbitrary_point : Returns parameterized point on ellipse """ t = _symbol('t', real=True) x, y = self.arbitrary_point(t).args # get a random value in [-1, 1) corresponding to cos(t) # and confirm that it will test as being in the ellipse if seed is not None: rng = random.Random(seed) else: rng = random # simplify this now or else the Float will turn s into a Float r = Rational(rng.random()) c = 2*r - 1 s = sqrt(1 - c**2) return Point(x.subs(cos(t), c), y.subs(sin(t), s)) def reflect(self, line): """Override GeometryEntity.reflect since the radius is not a GeometryEntity. Examples ======== >>> from sympy import Circle, Line >>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1))) Circle(Point2D(1, 0), -1) >>> from sympy import Ellipse, Line, Point >>> Ellipse(Point(3, 4), 1, 3).reflect(Line(Point(0, -4), Point(5, 0))) Traceback (most recent call last): ... NotImplementedError: General Ellipse is not supported but the equation of the reflected Ellipse is given by the zeros of: f(x, y) = (9*x/41 + 40*y/41 + 37/41)**2 + (40*x/123 - 3*y/41 - 364/123)**2 - 1 Notes ===== Until the general ellipse (with no axis parallel to the x-axis) is supported a NotImplemented error is raised and the equation whose zeros define the rotated ellipse is given. """ if line.slope in (0, oo): c = self.center c = c.reflect(line) return self.func(c, -self.hradius, self.vradius) else: x, y = [uniquely_named_symbol( name, (self, line), modify=lambda s: '_' + s, real=True) for name in 'xy'] expr = self.equation(x, y) p = Point(x, y).reflect(line) result = expr.subs(zip((x, y), p.args ), simultaneous=True) raise NotImplementedError(filldedent( 'General Ellipse is not supported but the equation ' 'of the reflected Ellipse is given by the zeros of: ' + "f(%s, %s) = %s" % (str(x), str(y), str(result)))) def rotate(self, angle=0, pt=None): """Rotate ``angle`` radians counterclockwise about Point ``pt``. Note: since the general ellipse is not supported, only rotations that are integer multiples of pi/2 are allowed. Examples ======== >>> from sympy import Ellipse, pi >>> Ellipse((1, 0), 2, 1).rotate(pi/2) Ellipse(Point2D(0, 1), 1, 2) >>> Ellipse((1, 0), 2, 1).rotate(pi) Ellipse(Point2D(-1, 0), 2, 1) """ if self.hradius == self.vradius: return self.func(self.center.rotate(angle, pt), self.hradius) if (angle/S.Pi).is_integer: return super().rotate(angle, pt) if (2*angle/S.Pi).is_integer: return self.func(self.center.rotate(angle, pt), self.vradius, self.hradius) # XXX see https://github.com/sympy/sympy/issues/2815 for general ellipes raise NotImplementedError('Only rotations of pi/2 are currently supported for Ellipse.') def scale(self, x=1, y=1, pt=None): """Override GeometryEntity.scale since it is the major and minor axes which must be scaled and they are not GeometryEntities. Examples ======== >>> from sympy import Ellipse >>> Ellipse((0, 0), 2, 1).scale(2, 4) Circle(Point2D(0, 0), 4) >>> Ellipse((0, 0), 2, 1).scale(2) Ellipse(Point2D(0, 0), 4, 1) """ c = self.center if pt: pt = Point(pt, dim=2) return self.translate(*(-pt).args).scale(x, y).translate(*pt.args) h = self.hradius v = self.vradius return self.func(c.scale(x, y), hradius=h*x, vradius=v*y) def tangent_lines(self, p): """Tangent lines between `p` and the ellipse. If `p` is on the ellipse, returns the tangent line through point `p`. Otherwise, returns the tangent line(s) from `p` to the ellipse, or None if no tangent line is possible (e.g., `p` inside ellipse). Parameters ========== p : Point Returns ======= tangent_lines : list with 1 or 2 Lines Raises ====== NotImplementedError Can only find tangent lines for a point, `p`, on the ellipse. See Also ======== sympy.geometry.point.Point, sympy.geometry.line.Line Examples ======== >>> from sympy import Point, Ellipse >>> e1 = Ellipse(Point(0, 0), 3, 2) >>> e1.tangent_lines(Point(3, 0)) [Line2D(Point2D(3, 0), Point2D(3, -12))] """ p = Point(p, dim=2) if self.encloses_point(p): return [] if p in self: delta = self.center - p rise = (self.vradius**2)*delta.x run = -(self.hradius**2)*delta.y p2 = Point(simplify(p.x + run), simplify(p.y + rise)) return [Line(p, p2)] else: if len(self.foci) == 2: f1, f2 = self.foci maj = self.hradius test = (2*maj - Point.distance(f1, p) - Point.distance(f2, p)) else: test = self.radius - Point.distance(self.center, p) if test.is_number and test.is_positive: return [] # else p is outside the ellipse or we can't tell. In case of the # latter, the solutions returned will only be valid if # the point is not inside the ellipse; if it is, nan will result. eq = self.equation(x, y) dydx = idiff(eq, y, x) slope = Line(p, Point(x, y)).slope # TODO: Replace solve with solveset, when this line is tested tangent_points = solve([slope - dydx, eq], [x, y]) # handle horizontal and vertical tangent lines if len(tangent_points) == 1: if tangent_points[0][ 0] == p.x or tangent_points[0][1] == p.y: return [Line(p, p + Point(1, 0)), Line(p, p + Point(0, 1))] else: return [Line(p, p + Point(0, 1)), Line(p, tangent_points[0])] # others return [Line(p, tangent_points[0]), Line(p, tangent_points[1])] @property def vradius(self): """The vertical radius of the ellipse. Returns ======= vradius : number See Also ======== hradius, major, minor Examples ======== >>> from sympy import Point, Ellipse >>> p1 = Point(0, 0) >>> e1 = Ellipse(p1, 3, 1) >>> e1.vradius 1 """ return self.args[2] def second_moment_of_area(self, point=None): """Returns the second moment and product moment area of an ellipse. Parameters ========== point : Point, two-tuple of sympifiable objects, or None(default=None) point is the point about which second moment of area is to be found. If "point=None" it will be calculated about the axis passing through the centroid of the ellipse. Returns ======= I_xx, I_yy, I_xy : number or SymPy expression I_xx, I_yy are second moment of area of an ellise. I_xy is product moment of area of an ellipse. Examples ======== >>> from sympy import Point, Ellipse >>> p1 = Point(0, 0) >>> e1 = Ellipse(p1, 3, 1) >>> e1.second_moment_of_area() (3*pi/4, 27*pi/4, 0) References ========== .. [1] https://en.wikipedia.org/wiki/List_of_second_moments_of_area """ I_xx = (S.Pi*(self.hradius)*(self.vradius**3))/4 I_yy = (S.Pi*(self.hradius**3)*(self.vradius))/4 I_xy = 0 if point is None: return I_xx, I_yy, I_xy # parallel axis theorem I_xx = I_xx + self.area*((point[1] - self.center.y)**2) I_yy = I_yy + self.area*((point[0] - self.center.x)**2) I_xy = I_xy + self.area*(point[0] - self.center.x)*(point[1] - self.center.y) return I_xx, I_yy, I_xy def polar_second_moment_of_area(self): """Returns the polar second moment of area of an Ellipse It is a constituent of the second moment of area, linked through the perpendicular axis theorem. While the planar second moment of area describes an object's resistance to deflection (bending) when subjected to a force applied to a plane parallel to the central axis, the polar second moment of area describes an object's resistance to deflection when subjected to a moment applied in a plane perpendicular to the object's central axis (i.e. parallel to the cross-section) Examples ======== >>> from sympy import symbols, Circle, Ellipse >>> c = Circle((5, 5), 4) >>> c.polar_second_moment_of_area() 128*pi >>> a, b = symbols('a, b') >>> e = Ellipse((0, 0), a, b) >>> e.polar_second_moment_of_area() pi*a**3*b/4 + pi*a*b**3/4 References ========== .. [1] https://en.wikipedia.org/wiki/Polar_moment_of_inertia """ second_moment = self.second_moment_of_area() return second_moment[0] + second_moment[1] def section_modulus(self, point=None): """Returns a tuple with the section modulus of an ellipse Section modulus is a geometric property of an ellipse defined as the ratio of second moment of area to the distance of the extreme end of the ellipse from the centroidal axis. Parameters ========== point : Point, two-tuple of sympifyable objects, or None(default=None) point is the point at which section modulus is to be found. If "point=None" section modulus will be calculated for the point farthest from the centroidal axis of the ellipse. Returns ======= S_x, S_y: numbers or SymPy expressions S_x is the section modulus with respect to the x-axis S_y is the section modulus with respect to the y-axis A negative sign indicates that the section modulus is determined for a point below the centroidal axis. Examples ======== >>> from sympy import Symbol, Ellipse, Circle, Point2D >>> d = Symbol('d', positive=True) >>> c = Circle((0, 0), d/2) >>> c.section_modulus() (pi*d**3/32, pi*d**3/32) >>> e = Ellipse(Point2D(0, 0), 2, 4) >>> e.section_modulus() (8*pi, 4*pi) >>> e.section_modulus((2, 2)) (16*pi, 4*pi) References ========== .. [1] https://en.wikipedia.org/wiki/Section_modulus """ x_c, y_c = self.center if point is None: # taking x and y as maximum distances from centroid x_min, y_min, x_max, y_max = self.bounds y = max(y_c - y_min, y_max - y_c) x = max(x_c - x_min, x_max - x_c) else: # taking x and y as distances of the given point from the center point = Point2D(point) y = point.y - y_c x = point.x - x_c second_moment = self.second_moment_of_area() S_x = second_moment[0]/y S_y = second_moment[1]/x return S_x, S_y
Ellipse
python
great-expectations__great_expectations
great_expectations/data_context/types/resource_identifiers.py
{ "start": 1677, "end": 1915 }
class ____(Schema): name = fields.Str() # noinspection PyUnusedLocal @post_load def make_expectation_suite_identifier(self, data, **kwargs): return ExpectationSuiteIdentifier(**data)
ExpectationSuiteIdentifierSchema
python
ethereum__web3.py
web3/providers/persistent/persistent.py
{ "start": 1206, "end": 19399 }
class ____(AsyncJSONBaseProvider, ABC): logger = logging.getLogger("web3.providers.PersistentConnectionProvider") has_persistent_connection = True _send_func_cache: tuple[ int | None, Callable[..., Coroutine[Any, Any, RPCRequest]] | None ] = (None, None) _recv_func_cache: tuple[ int | None, Callable[..., Coroutine[Any, Any, RPCResponse]] | None ] = (None, None) _send_batch_func_cache: tuple[ int | None, Callable[..., Coroutine[Any, Any, list[RPCRequest]]] | None ] = (None, None) _recv_batch_func_cache: tuple[ int | None, Callable[..., Coroutine[Any, Any, list[RPCResponse]]] | None ] = (None, None) def __init__( self, request_timeout: float = DEFAULT_PERSISTENT_CONNECTION_TIMEOUT, subscription_response_queue_size: int = 500, silence_listener_task_exceptions: bool = False, max_connection_retries: int = 5, request_information_cache_size: int = 500, **kwargs: Any, ) -> None: super().__init__(**kwargs) self._request_processor = RequestProcessor( self, subscription_response_queue_size=subscription_response_queue_size, request_information_cache_size=request_information_cache_size, ) self._message_listener_task: Optional["asyncio.Task[None]"] = None self._listen_event: asyncio.Event = asyncio.Event() self._max_connection_retries = max_connection_retries self.request_timeout = request_timeout self.silence_listener_task_exceptions = silence_listener_task_exceptions # -- cached middleware request/response functions -- # async def send_func( self, async_w3: "AsyncWeb3[Any]", middleware_onion: "MiddlewareOnion" ) -> Callable[..., Coroutine[Any, Any, RPCRequest]]: """ Cache the middleware chain for `send`. """ middleware = middleware_onion.as_tuple_of_middleware() cache_key = hash(tuple(id(mw) for mw in middleware)) if cache_key != self._send_func_cache[0]: async def send_function(method: RPCEndpoint, params: Any) -> RPCRequest: for mw in middleware: initialized = mw(async_w3) method, params = await initialized.async_request_processor( method, params ) return await self.send_request(method, params) self._send_func_cache = (cache_key, send_function) return self._send_func_cache[1] async def recv_func( self, async_w3: "AsyncWeb3[Any]", middleware_onion: "MiddlewareOnion" ) -> Any: """ Cache and compose the middleware stack for `recv`. """ middleware = middleware_onion.as_tuple_of_middleware() cache_key = hash(tuple(id(mw) for mw in middleware)) if cache_key != self._recv_func_cache[0]: async def recv_function(rpc_request: RPCRequest) -> RPCResponse: # first, retrieve the response response = await self.recv_for_request(rpc_request) method = rpc_request["method"] for mw in reversed(middleware): initialized = mw(async_w3) response = await initialized.async_response_processor( method, response ) return response self._recv_func_cache = (cache_key, recv_function) return self._recv_func_cache[1] async def send_batch_func( self, async_w3: "AsyncWeb3[Any]", middleware_onion: "MiddlewareOnion" ) -> Callable[..., Coroutine[Any, Any, list[RPCRequest]]]: middleware = middleware_onion.as_tuple_of_middleware() cache_key = hash(tuple(id(mw) for mw in middleware)) if cache_key != self._send_batch_func_cache[0]: async def send_func( requests: list[tuple[RPCEndpoint, Any]], ) -> list[RPCRequest]: for mw in middleware: initialized = mw(async_w3) requests = [ await initialized.async_request_processor(method, params) for (method, params) in requests ] return await self.send_batch_request(requests) self._send_batch_func_cache = (cache_key, send_func) return self._send_batch_func_cache[1] async def recv_batch_func( self, async_w3: "AsyncWeb3[Any]", middleware_onion: "MiddlewareOnion" ) -> Callable[..., Coroutine[Any, Any, list[RPCResponse]]]: middleware = middleware_onion.as_tuple_of_middleware() cache_key = hash(tuple(id(mw) for mw in middleware)) if cache_key != self._recv_batch_func_cache[0]: async def recv_function( rpc_requests: list[RPCRequest], ) -> list[RPCResponse]: methods = [rpc_request["method"] for rpc_request in rpc_requests] responses = await self.recv_for_batch_request(rpc_requests) for mw in reversed(middleware): if not isinstance(responses, list): # RPC errors return only one response with the error object return responses initialized = mw(async_w3) responses = [ await initialized.async_response_processor(m, r) for m, r in zip(methods, responses) ] return responses self._recv_batch_func_cache = (cache_key, recv_function) return self._recv_batch_func_cache[1] # -- connection management -- # def get_endpoint_uri_or_ipc_path(self) -> str: if hasattr(self, "endpoint_uri"): return str(self.endpoint_uri) elif hasattr(self, "ipc_path"): return str(self.ipc_path) else: raise Web3AttributeError( "`PersistentConnectionProvider` must have either `endpoint_uri` or " "`ipc_path` attribute." ) async def connect(self) -> None: endpoint = self.get_endpoint_uri_or_ipc_path() _connection_attempts = 0 _backoff_rate_change = 1.75 _backoff_time = 1.75 while _connection_attempts != self._max_connection_retries: try: _connection_attempts += 1 self.logger.info("Connecting to: %s", endpoint) await self._provider_specific_connect() self._message_listener_task = asyncio.create_task( self._message_listener() ) self._message_listener_task.add_done_callback( self._message_listener_callback ) self.logger.info("Successfully connected to: %s", endpoint) break except (WebSocketException, OSError) as e: if _connection_attempts == self._max_connection_retries: raise ProviderConnectionError( f"Could not connect to: {endpoint}. " f"Retries exceeded max of {self._max_connection_retries}." ) from e self.logger.info( "Could not connect to: %s. Retrying in %s seconds.", endpoint, round(_backoff_time, 1), exc_info=True, ) await asyncio.sleep(_backoff_time) _backoff_time *= _backoff_rate_change async def disconnect(self) -> None: # this should remain idempotent try: if self._message_listener_task: self._message_listener_task.cancel() await self._message_listener_task except (asyncio.CancelledError, StopAsyncIteration, ConnectionClosed): pass finally: self._message_listener_task = None self.logger.info("Message listener background task successfully shut down.") await self._provider_specific_disconnect() self._request_processor.clear_caches() self.logger.info( "Successfully disconnected from: %s", self.get_endpoint_uri_or_ipc_path(), ) # -- request methods -- # @async_handle_send_caching async def send_request(self, method: RPCEndpoint, params: Any) -> RPCRequest: request_dict = self.form_request(method, params) await self.socket_send(self.encode_rpc_dict(request_dict)) return request_dict @async_handle_recv_caching async def recv_for_request(self, rpc_request: RPCRequest) -> RPCResponse: return await self._get_response_for_request_id(rpc_request["id"]) async def make_request( self, method: RPCEndpoint, params: Any, ) -> RPCResponse: rpc_request = await self.send_request(method, params) return await self.recv_for_request(rpc_request) # -- batch requests -- # async def send_batch_request( self, requests: list[tuple[RPCEndpoint, Any]] ) -> list[RPCRequest]: request_dicts = [ self.form_request(method, params) for (method, params) in requests ] request_data = self.encode_batch_request_dicts(request_dicts) await self.socket_send(request_data) return request_dicts async def recv_for_batch_request( self, _request_dicts: list[RPCRequest] ) -> list[RPCResponse]: response = cast( list[RPCResponse], await self._get_response_for_request_id(BATCH_REQUEST_ID), ) return response async def make_batch_request( self, requests: list[tuple[RPCEndpoint, Any]] ) -> list[RPCResponse]: request_dicts = await self.send_batch_request(requests) return await self.recv_for_batch_request(request_dicts) # -- abstract methods -- # @abstractmethod async def socket_send(self, request_data: bytes) -> None: """ Send an encoded RPC request to the provider over the persistent connection. """ raise NotImplementedError("Must be implemented by subclasses") @abstractmethod async def socket_recv(self) -> RPCResponse: """ Receive, decode, and return an RPC response from the provider over the persistent connection. """ raise NotImplementedError("Must be implemented by subclasses") # -- private methods -- # async def _provider_specific_connect(self) -> None: raise NotImplementedError("Must be implemented by subclasses") async def _provider_specific_disconnect(self) -> None: # this method should be idempotent raise NotImplementedError("Must be implemented by subclasses") async def _provider_specific_socket_reader(self) -> RPCResponse: raise NotImplementedError("Must be implemented by subclasses") def _set_signal_handlers(self) -> None: def extended_handler(sig: int, frame: Any, existing_handler: Any) -> None: loop = asyncio.get_event_loop() # invoke the existing handler, if callable if callable(existing_handler): existing_handler(sig, frame) loop.create_task(self.disconnect()) existing_sigint_handler = signal.getsignal(signal.SIGINT) existing_sigterm_handler = signal.getsignal(signal.SIGTERM) # extend the existing signal handlers to include the disconnect method signal.signal( signal.SIGINT, lambda sig, frame: extended_handler(sig, frame, existing_sigint_handler), ) signal.signal( signal.SIGTERM, lambda sig, frame: extended_handler(sig, frame, existing_sigterm_handler), ) def _message_listener_callback( self, message_listener_task: "asyncio.Task[None]" ) -> None: # Puts a `TaskNotRunning` in appropriate queues to signal the end of the # listener task to any listeners relying on the queues. message = "Message listener task has ended." self._request_processor._subscription_response_queue.put_nowait( TaskNotRunning(message_listener_task, message=message) ) self._request_processor._handler_subscription_queue.put_nowait( TaskNotRunning(message_listener_task, message=message) ) def _raise_stray_errors_from_cache(self) -> None: """ Check the request response cache for any errors not tied to current requests and raise them if found. """ for response in self._request_processor._request_response_cache._data.values(): if isinstance(response, dict): if "id" not in response: validate_rpc_response_and_raise_if_error( cast(RPCResponse, response), None, logger=self.logger ) else: request = self._request_processor._request_information_cache.get_cache_entry( # noqa: E501 generate_cache_key(response["id"]) ) if "error" in response and request is None: validate_rpc_response_and_raise_if_error( cast(RPCResponse, response), None, logger=self.logger ) async def _message_listener(self) -> None: self.logger.info( "%s listener background task started. Storing all messages in " "appropriate request processor queues / caches to be processed.", self.__class__.__qualname__, ) while True: # the use of sleep(0) seems to be the most efficient way to yield control # back to the event loop to share the loop with other tasks. await asyncio.sleep(0) try: response = await self._provider_specific_socket_reader() if isinstance(response, list): response = sort_batch_response_by_response_ids(response) subscription = ( response.get("method") == "eth_subscription" if not isinstance(response, list) else False ) await self._request_processor.cache_raw_response( response, subscription=subscription ) self._raise_stray_errors_from_cache() except PersistentConnectionClosedOK as e: self.logger.info( "Message listener background task has ended gracefully: %s", e.user_message, ) # trigger a return to end the listener task and initiate the callback fn return except Exception as e: if not self.silence_listener_task_exceptions: raise e else: self._error_log_listener_task_exception(e) def _error_log_listener_task_exception(self, e: Exception) -> None: """ When silencing listener task exceptions, this method is used to log the exception and keep the listener task alive. Override this method to fine-tune error logging behavior for the implementation class. """ self.logger.error( "Exception caught in listener, error logging and keeping " "listener background task alive.\n error=%s: %s", e.__class__.__name__, e, ) def _handle_listener_task_exceptions(self) -> None: """ Should be called every time a `PersistentConnectionProvider` is polling for messages in the main loop. If the message listener task has completed and an exception was recorded, raise the exception in the main loop. """ msg_listener_task = getattr(self, "_message_listener_task", None) if ( msg_listener_task and msg_listener_task.done() and msg_listener_task.exception() ): raise msg_listener_task.exception() async def _get_response_for_request_id( self, request_id: RPCId | list[RPCId], timeout: float | None = None ) -> RPCResponse: if timeout is None: timeout = self.request_timeout async def _match_response_id_to_request_id() -> RPCResponse: request_cache_key = generate_cache_key(request_id) while True: # check if an exception was recorded in the listener task and raise # it in the main loop if so self._handle_listener_task_exceptions() if request_cache_key in self._request_processor._request_response_cache: self.logger.debug( "Popping response for id %s from cache.", request_id, ) popped_response = await self._request_processor.pop_raw_response( cache_key=request_cache_key, ) return popped_response else: await asyncio.sleep(0) try: # Add the request timeout around the while loop that checks the request # cache. If the request is not in the cache within the request_timeout, # raise ``TimeExhausted``. return await asyncio.wait_for(_match_response_id_to_request_id(), timeout) except asyncio.TimeoutError: raise TimeExhausted( f"Timed out waiting for response with request id `{request_id}` after " f"{self.request_timeout} second(s). This may be due to the provider " "not returning a response with the same id that was sent in the " "request or an exception raised during the request was caught and " "allowed to continue." )
PersistentConnectionProvider
python
spack__spack
lib/spack/spack/util/package_hash.py
{ "start": 8084, "end": 14597 }
class ____(ast.NodeTransformer): """Remove multi-methods when we know statically that they won't be used. Say we have multi-methods like this:: class SomePackage: def foo(self): print("implementation 1") @when("@1.0") def foo(self): print("implementation 2") @when("@2.0") @when(sys.platform == "darwin") def foo(self): print("implementation 3") @when("@3.0") def foo(self): print("implementation 4") The multimethod that will be chosen at runtime depends on the package spec and on whether we're on the darwin platform *at build time* (the darwin condition for implementation 3 is dynamic). We know the package spec statically; we don't know statically what the runtime environment will be. We need to include things that can possibly affect package behavior in the package hash, and we want to exclude things when we know that they will not affect package behavior. If we're at version 4.0, we know that implementation 1 will win, because some @when for 2, 3, and 4 will be ``False``. We should only include implementation 1. If we're at version 1.0, we know that implementation 2 will win, because it overrides implementation 1. We should only include implementation 2. If we're at version 3.0, we know that implementation 4 will win, because it overrides implementation 1 (the default), and some @when on all others will be False. If we're at version 2.0, it's a bit more complicated. We know we can remove implementations 2 and 4, because their @when's will never be satisfied. But, the choice between implementations 1 and 3 will happen at runtime (this is a bad example because the spec itself has platform information, and we should prefer to use that, but we allow arbitrary boolean expressions in @when's, so this example suffices). For this case, we end up needing to include *both* implementation 1 and 3 in the package hash, because either could be chosen. """ def __init__(self, methods): self.methods = methods def resolve(self, impl_conditions): """Given list of nodes and conditions, figure out which node will be chosen.""" result = [] default = None for impl, conditions in impl_conditions: # if there's a default implementation with no conditions, remember that. if not conditions: default = impl result.append(default) continue # any known-false @when means the method won't be used if any(c is False for c in conditions): continue # anything with all known-true conditions will be picked if it's first if all(c is True for c in conditions): if result and result[0] is default: return [impl] # we know the first MM will always win # if anything dynamic comes before it we don't know if it'll win, # so just let this result get appended # anything else has to be determined dynamically, so add it to a list result.append(impl) # if nothing was picked, the last definition wins. return result def visit_FunctionDef(self, node: ast.FunctionDef) -> Optional[ast.FunctionDef]: # if the function def wasn't visited on the first traversal there is a problem assert node.name in self.methods, "Inconsistent package traversal!" # if the function is a multimethod, need to resolve it statically impl_conditions = self.methods[node.name] resolutions = self.resolve(impl_conditions) if not any(r is node for r in resolutions): # multimethod did not resolve to this function; remove it return None # if we get here, this function is a possible resolution for a multi-method. # it might be the only one, or there might be several that have to be evaluated # dynamcially. Either way, we include the function. # strip the when decorators (preserve the rest) node.decorator_list = [ dec for dec in node.decorator_list if not (isinstance(dec, ast.Call) and _is_when_decorator(dec)) ] return node def canonical_source( spec, filter_multimethods: bool = True, source: Optional[bytes] = None ) -> str: """Get canonical source for a spec's package.py by unparsing its AST. Arguments: filter_multimethods: By default, filter multimethods out of the AST if they are known statically to be unused. Supply False to disable. source: Optionally provide a string to read python code from. """ return unparse(package_ast(spec, filter_multimethods, source=source), py_ver_consistent=True) def package_hash(spec, source: Optional[bytes] = None) -> str: """Get a hash of a package's canonical source code. This function is used to determine whether a spec needs a rebuild when a package's source code changes. Arguments: source: Optionally provide a string to read python code from. """ source = canonical_source(spec, filter_multimethods=True, source=source) return spack.util.hash.b32_hash(source) def package_ast(spec, filter_multimethods: bool = True, source: Optional[bytes] = None) -> ast.AST: """Get the AST for the ``package.py`` file corresponding to ``spec``. Arguments: filter_multimethods: By default, filter multimethods out of the AST if they are known statically to be unused. Supply False to disable. source: Optionally provide a string to read python code from. """ spec = spack.spec.Spec(spec) if source is None: filename = spack.repo.PATH.filename_for_package_name(spec.name) with open(filename, "rb") as f: source = f.read() # create an AST root = ast.parse(source) # remove docstrings, comments, and directives from the package AST root = RemoveDocstrings().visit(root) root = RemoveDirectives(spec).visit(root) if filter_multimethods: # visit nodes and build up a dictionary of methods (no need to assign) tagger = TagMultiMethods(spec) tagger.visit(root) # transform AST using tagged methods root = ResolveMultiMethods(tagger.methods).visit(root) return root
ResolveMultiMethods
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/roles.py
{ "start": 6959, "end": 7151 }
class ____(SQLRole): """A SELECT statement embedded in DML, typically INSERT from SELECT""" __slots__ = () _role_name = "SELECT statement or equivalent textual object"
DMLSelectRole
python
scipy__scipy
scipy/stats/_hypotests.py
{ "start": 16380, "end": 30025 }
class ____: def __init__(self, statistic, pvalue): self.statistic = statistic self.pvalue = pvalue def __repr__(self): return (f"{self.__class__.__name__}(statistic={self.statistic}, " f"pvalue={self.pvalue})") def _psi1_mod(x, *, xp=None): """ psi1 is defined in equation 1.10 in Csörgő, S. and Faraway, J. (1996). This implements a modified version by excluding the term V(x) / 12 (here: _cdf_cvm_inf(x) / 12) to avoid evaluating _cdf_cvm_inf(x) twice in _cdf_cvm. Implementation based on MAPLE code of Julian Faraway and R code of the function pCvM in the package goftest (v1.1.1), permission granted by Adrian Baddeley. Main difference in the implementation: the code here keeps adding terms of the series until the terms are small enough. """ xp = array_namespace(x) if xp is None else xp def _ed2(y): z = y**2 / 4 z_ = np.asarray(z) b = xp.asarray(kv(1/4, z_) + kv(3/4, z_)) return xp.exp(-z) * (y/2)**(3/2) * b / math.sqrt(np.pi) def _ed3(y): z = y**2 / 4 z_ = np.asarray(z) c = xp.exp(-z) / math.sqrt(np.pi) kv_terms = xp.asarray(2*kv(1/4, z_) + 3*kv(3/4, z_) - kv(5/4, z_)) return c * (y/2)**(5/2) * kv_terms def _Ak(k, x): m = 2*k + 1 sx = 2 * xp.sqrt(x) y1 = x**(3/4) y2 = x**(5/4) gamma_kp1_2 = float(gamma(k + 1 / 2)) gamma_kp3_2 = float(gamma(k + 3 / 2)) e1 = m * gamma_kp1_2 * _ed2((4 * k + 3)/sx) / (9 * y1) e2 = gamma_kp1_2 * _ed3((4 * k + 1) / sx) / (72 * y2) e3 = 2 * (m + 2) * gamma_kp3_2 * _ed3((4 * k + 5) / sx) / (12 * y2) e4 = 7 * m * gamma_kp1_2 * _ed2((4 * k + 1) / sx) / (144 * y1) e5 = 7 * m * gamma_kp1_2 * _ed2((4 * k + 5) / sx) / (144 * y1) return e1 + e2 + e3 + e4 + e5 x = xp.asarray(x) tot = xp.zeros_like(x) cond = xp.ones_like(x, dtype=xp.bool) k = 0 while xp.any(cond): gamma_kp1 = float(gamma(k + 1)) z = -_Ak(k, x[cond]) / (xp.pi * gamma_kp1) tot = xpx.at(tot)[cond].set(tot[cond] + z) # For float32 arithmetic, the tolerance may need to be adjusted or the # algorithm may prove to be unsuitable. cond = xpx.at(cond)[xp_copy(cond)].set(xp.abs(z) >= 1e-7) k += 1 return tot def _cdf_cvm_inf(x, *, xp=None): """ Calculate the cdf of the Cramér-von Mises statistic (infinite sample size). See equation 1.2 in Csörgő, S. and Faraway, J. (1996). Implementation based on MAPLE code of Julian Faraway and R code of the function pCvM in the package goftest (v1.1.1), permission granted by Adrian Baddeley. Main difference in the implementation: the code here keeps adding terms of the series until the terms are small enough. The function is not expected to be accurate for large values of x, say x > 4, when the cdf is very close to 1. """ xp = array_namespace(x) if xp is None else xp x = xp.asarray(x) def term(x, k): # this expression can be found in [2], second line of (1.3) u = math.exp(gammaln(k + 0.5) - gammaln(k+1)) / (xp.pi**1.5 * xp.sqrt(x)) y = 4*k + 1 q = y**2 / (16*x) b = xp.asarray(kv(0.25, np.asarray(q)), dtype=u.dtype) # not automatic? return u * math.sqrt(y) * xp.exp(-q) * b tot = xp.zeros_like(x, dtype=x.dtype) cond = xp.ones_like(x, dtype=xp.bool) k = 0 while xp.any(cond): z = term(x[cond], k) # tot[cond] = tot[cond] + z tot = xpx.at(tot)[cond].add(z) # cond[cond] = np.abs(z) >= 1e-7 cond = xpx.at(cond)[xp_copy(cond)].set(xp.abs(z) >= 1e-7) # torch needs copy k += 1 return tot def _cdf_cvm(x, n=None, *, xp=None): """ Calculate the cdf of the Cramér-von Mises statistic for a finite sample size n. If N is None, use the asymptotic cdf (n=inf). See equation 1.8 in Csörgő, S. and Faraway, J. (1996) for finite samples, 1.2 for the asymptotic cdf. The function is not expected to be accurate for large values of x, say x > 2, when the cdf is very close to 1 and it might return values > 1 in that case, e.g. _cdf_cvm(2.0, 12) = 1.0000027556716846. Moreover, it is not accurate for small values of n, especially close to the bounds of the distribution's domain, [1/(12*n), n/3], where the value jumps to 0 and 1, respectively. These are limitations of the approximation by Csörgő and Faraway (1996) implemented in this function. """ xp = array_namespace(x) if xp is None else xp x = xp.asarray(x) if n is None: y = _cdf_cvm_inf(x, xp=xp) else: # support of the test statistic is [12/n, n/3], see 1.1 in [2] y = xp.zeros_like(x, dtype=x.dtype) sup = (1./(12*n) < x) & (x < n/3.) # note: _psi1_mod does not include the term _cdf_cvm_inf(x) / 12 # therefore, we need to add it here y = xpx.at(y)[sup].set(_cdf_cvm_inf(x[sup], xp=xp) * (1 + 1./(12*n)) + _psi1_mod(x[sup], xp=xp) / n) y = xpx.at(y)[x >= n/3].set(1.) return y[()] if y.ndim == 0 else y def _cvm_result_to_tuple(res, _): return res.statistic, res.pvalue @xp_capabilities(cpu_only=True, # needs special function `kv` skip_backends=[('dask.array', 'typical dask issues')], jax_jit=False) @_axis_nan_policy_factory(CramerVonMisesResult, n_samples=1, too_small=1, result_to_tuple=_cvm_result_to_tuple) def cramervonmises(rvs, cdf, args=(), *, axis=0): r"""Perform the one-sample Cramér-von Mises test for goodness of fit. This performs a test of the goodness of fit of a cumulative distribution function (cdf) :math:`F` compared to the empirical distribution function :math:`F_n` of observed random variates :math:`X_1, ..., X_n` that are assumed to be independent and identically distributed ([1]_). The null hypothesis is that the :math:`X_i` have cumulative distribution :math:`F`. The test statistic :math:`T` is defined as in [1]_, where :math:`\omega^2` is the Cramér-von Mises criterion and :math:`x_i` are the observed values. .. math:: T = n\omega^2 = \frac{1}{12n} + \sum_{i=1}^n \left[ \frac{2i-1}{2n} - F(x_i) \right]^2 Parameters ---------- rvs : array_like A 1-D array of observed values of the random variables :math:`X_i`. The sample must contain at least two observations. cdf : str or callable The cumulative distribution function :math:`F` to test the observations against. If a string, it should be the name of a distribution in `scipy.stats`. If a callable, that callable is used to calculate the cdf: ``cdf(x, *args) -> float``. args : tuple, optional Distribution parameters. These are assumed to be known; see Notes. axis : int or tuple of ints, default: 0 If an int or tuple of ints, the axis or axes of the input along which to compute the statistic. The statistic of each axis-slice (e.g. row) of the input will appear in a corresponding element of the output. If ``None``, the input will be raveled before computing the statistic. Returns ------- res : object with attributes statistic : float Cramér-von Mises statistic :math:`T`. pvalue : float The p-value. See Also -------- kstest, cramervonmises_2samp Notes ----- .. versionadded:: 1.6.0 The p-value relies on the approximation given by equation 1.8 in [2]_. It is important to keep in mind that the p-value is only accurate if one tests a simple hypothesis, i.e. the parameters of the reference distribution are known. If the parameters are estimated from the data (composite hypothesis), the computed p-value is not reliable. References ---------- .. [1] Cramér-von Mises criterion, Wikipedia, https://en.wikipedia.org/wiki/Cram%C3%A9r%E2%80%93von_Mises_criterion .. [2] Csörgő, S. and Faraway, J. (1996). The Exact and Asymptotic Distribution of Cramér-von Mises Statistics. Journal of the Royal Statistical Society, pp. 221-234. Examples -------- Suppose we wish to test whether data generated by ``scipy.stats.norm.rvs`` were, in fact, drawn from the standard normal distribution. We choose a significance level of ``alpha=0.05``. >>> import numpy as np >>> from scipy import stats >>> rng = np.random.default_rng(165417232101553420507139617764912913465) >>> x = stats.norm.rvs(size=500, random_state=rng) >>> res = stats.cramervonmises(x, 'norm') >>> res.statistic, res.pvalue (0.1072085112565724, 0.5508482238203407) The p-value exceeds our chosen significance level, so we do not reject the null hypothesis that the observed sample is drawn from the standard normal distribution. Now suppose we wish to check whether the same samples shifted by 2.1 is consistent with being drawn from a normal distribution with a mean of 2. >>> y = x + 2.1 >>> res = stats.cramervonmises(y, 'norm', args=(2,)) >>> res.statistic, res.pvalue (0.8364446265294695, 0.00596286797008283) Here we have used the `args` keyword to specify the mean (``loc``) of the normal distribution to test the data against. This is equivalent to the following, in which we create a frozen normal distribution with mean 2.1, then pass its ``cdf`` method as an argument. >>> frozen_dist = stats.norm(loc=2) >>> res = stats.cramervonmises(y, frozen_dist.cdf) >>> res.statistic, res.pvalue (0.8364446265294695, 0.00596286797008283) In either case, we would reject the null hypothesis that the observed sample is drawn from a normal distribution with a mean of 2 (and default variance of 1) because the p-value is less than our chosen significance level. """ # `_axis_nan_policy` decorator ensures `axis=-1` xp = array_namespace(rvs) if isinstance(cdf, str) and is_numpy(xp): cdf = getattr(distributions, cdf).cdf elif isinstance(cdf, str): message = "`cdf` must be a callable if `rvs` is a non-NumPy array." raise ValueError(message) n = rvs.shape[-1] if n <= 1: # only needed for `test_axis_nan_policy.py`; not user-facing raise ValueError('The sample must contain at least two observations.') rvs, n = xp_promote(rvs, n, force_floating=True, xp=xp) vals = xp.sort(rvs, axis=-1) cdfvals = cdf(vals, *args) u = (2*xp.arange(1, n+1, dtype=n.dtype) - 1)/(2*n) w = 1/(12*n) + xp.sum((u - cdfvals)**2, axis=-1) # avoid small negative values that can occur due to the approximation p = xp.clip(1. - _cdf_cvm(w, n), 0., None) return CramerVonMisesResult(statistic=w, pvalue=p) def _get_wilcoxon_distr(n): """ Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum of ranks of positive differences). Returns an array with the probabilities of all the possible ranks r = 0, ..., n*(n+1)/2 """ c = np.ones(1, dtype=np.float64) for k in range(1, n + 1): prev_c = c c = np.zeros(k * (k + 1) // 2 + 1, dtype=np.float64) m = len(prev_c) c[:m] = prev_c * 0.5 c[-m:] += prev_c * 0.5 return c def _get_wilcoxon_distr2(n): """ Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum of ranks of positive differences). Returns an array with the probabilities of all the possible ranks r = 0, ..., n*(n+1)/2 This is a slower reference function References ---------- .. [1] 1. Harris T, Hardin JW. Exact Wilcoxon Signed-Rank and Wilcoxon Mann-Whitney Ranksum Tests. The Stata Journal. 2013;13(2):337-343. """ ai = np.arange(1, n+1)[:, None] t = n*(n+1)/2 q = 2*t j = np.arange(q) theta = 2*np.pi/q*j phi_sp = np.prod(np.cos(theta*ai), axis=0) phi_s = np.exp(1j*theta*t) * phi_sp p = np.real(ifft(phi_s)) res = np.zeros(int(t)+1) res[:-1:] = p[::2] res[0] /= 2 res[-1] = res[0] return res def _tau_b(A): """Calculate Kendall's tau-b and p-value from contingency table.""" # See [2] 2.2 and 4.2 # contingency table must be truly 2D if A.shape[0] == 1 or A.shape[1] == 1: return np.nan, np.nan NA = A.sum() PA = _P(A) QA = _Q(A) Sri2 = (A.sum(axis=1)**2).sum() Scj2 = (A.sum(axis=0)**2).sum() denominator = (NA**2 - Sri2)*(NA**2 - Scj2) tau = (PA-QA)/(denominator)**0.5 numerator = 4*(_a_ij_Aij_Dij2(A) - (PA - QA)**2 / NA) s02_tau_b = numerator/denominator if s02_tau_b == 0: # Avoid divide by zero return tau, 0 Z = tau/s02_tau_b**0.5 p = 2*norm.sf(abs(Z)) # 2-sided p-value return tau, p def _somers_d(A, alternative='two-sided'): """Calculate Somers' D and p-value from contingency table.""" # See [3] page 1740 # contingency table must be truly 2D if A.shape[0] <= 1 or A.shape[1] <= 1: return np.nan, np.nan NA = A.sum() NA2 = NA**2 PA = _P(A) QA = _Q(A) Sri2 = (A.sum(axis=1)**2).sum() d = (PA - QA)/(NA2 - Sri2) S = _a_ij_Aij_Dij2(A) - (PA-QA)**2/NA with np.errstate(divide='ignore'): Z = (PA - QA)/(4*(S))**0.5 norm = _stats_py._SimpleNormal() p = _stats_py._get_pvalue(Z, norm, alternative, xp=np) return d, p @dataclass
CramerVonMisesResult
python
matplotlib__matplotlib
lib/matplotlib/tests/test_ticker.py
{ "start": 23736, "end": 25354 }
class ____: def test_set_params(self): """ Create symmetrical log locator with default subs =[1.0] numticks = 15, and change it to something else. See if change was successful. Should not exception. """ sym = mticker.SymmetricalLogLocator(base=10, linthresh=1) sym.set_params(subs=[2.0], numticks=8) assert sym._subs == [2.0] assert sym.numticks == 8 @pytest.mark.parametrize( 'vmin, vmax, expected', [ (0, 1, [0, 1]), (-1, 1, [-1, 0, 1]), ], ) def test_values(self, vmin, vmax, expected): # https://github.com/matplotlib/matplotlib/issues/25945 sym = mticker.SymmetricalLogLocator(base=10, linthresh=1) ticks = sym.tick_values(vmin=vmin, vmax=vmax) assert_array_equal(ticks, expected) def test_subs(self): sym = mticker.SymmetricalLogLocator(base=10, linthresh=1, subs=[2.0, 4.0]) sym.create_dummy_axis() sym.axis.set_view_interval(-10, 10) assert_array_equal(sym(), [-20, -40, -2, -4, 0, 2, 4, 20, 40]) def test_extending(self): sym = mticker.SymmetricalLogLocator(base=10, linthresh=1) sym.create_dummy_axis() sym.axis.set_view_interval(8, 9) assert (sym() == [1.0]).all() sym.axis.set_view_interval(8, 12) assert (sym() == [1.0, 10.0]).all() assert sym.view_limits(10, 10) == (1, 100) assert sym.view_limits(-10, -10) == (-100, -1) assert sym.view_limits(0, 0) == (-0.001, 0.001)
TestSymmetricalLogLocator
python
airbytehq__airbyte
airbyte-integrations/connectors/source-appsflyer/source_appsflyer/source.py
{ "start": 13465, "end": 13523 }
class ____(EventsMixin, GeoReport): pass
GeoEventsReport
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_autofilter08.py
{ "start": 315, "end": 2576 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("autofilter08.xlsx") self.set_text_file("autofilter_data.txt") def test_create_file(self): """ Test the creation of a simple XlsxWriter file with an autofilter. This test checks a normal filter + a blank filter. """ workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() # Set the autofilter. worksheet.autofilter("A1:D51") # Add filter criteria. worksheet.filter_column(0, "x == Blanks or x == North") # Open a text file with autofilter example data. textfile = open(self.txt_filename) # Read the headers from the first line of the input file. headers = textfile.readline().strip("\n").split() # Write out the headers. worksheet.write_row("A1", headers) # Start writing data after the headers. row = 1 # Read the rest of the text file and write it to the worksheet. for line in textfile: # Split the input data based on whitespace. data = line.strip("\n").split() # Convert the number data from the text file. for i, item in enumerate(data): try: data[i] = float(item) except ValueError: pass # Simulate a blank cell in the data. if row == 6: data[0] = "" # Get some of the field data. region = data[0] # Check for rows that match the filter. if region == "" or region == "North": # Row matches the filter, no further action required. pass else: # We need to hide rows that don't match the filter. worksheet.set_row(row, options={"hidden": True}) # Write out the row data. worksheet.write_row(row, 0, data) # Move on to the next worksheet row. row += 1 textfile.close() workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
google__pytype
pytype/rewrite/abstract/functions.py
{ "start": 10789, "end": 10929 }
class ____(Protocol): def get_return_value(self) -> base.BaseValue: ... _HasReturnT = TypeVar('_HasReturnT', bound=_HasReturn)
_HasReturn
python
ray-project__ray
rllib/examples/envs/classes/parametric_actions_cartpole.py
{ "start": 109, "end": 3328 }
class ____(gym.Env): """Parametric action version of CartPole. In this env there are only ever two valid actions, but we pretend there are actually up to `max_avail_actions` actions that can be taken, and the two valid actions are randomly hidden among this set. At each step, we emit a dict of: - the actual cart observation - a mask of valid actions (e.g., [0, 0, 1, 0, 0, 1] for 6 max avail) - the list of action embeddings (w/ zeroes for invalid actions) (e.g., [[0, 0], [0, 0], [-0.2322, -0.2569], [0, 0], [0, 0], [0.7878, 1.2297]] for max_avail_actions=6) In a real environment, the actions embeddings would be larger than two units of course, and also there would be a variable number of valid actions per step instead of always [LEFT, RIGHT]. """ def __init__(self, max_avail_actions): # Use simple random 2-unit action embeddings for [LEFT, RIGHT] self.left_action_embed = np.random.randn(2) self.right_action_embed = np.random.randn(2) self.action_space = Discrete(max_avail_actions) self.wrapped = gym.make("CartPole-v1") self.observation_space = Dict( { "action_mask": Box(0, 1, shape=(max_avail_actions,), dtype=np.int8), "avail_actions": Box(-10, 10, shape=(max_avail_actions, 2)), "cart": self.wrapped.observation_space, } ) def update_avail_actions(self): self.action_assignments = np.array( [[0.0, 0.0]] * self.action_space.n, dtype=np.float32 ) self.action_mask = np.array([0.0] * self.action_space.n, dtype=np.int8) self.left_idx, self.right_idx = random.sample(range(self.action_space.n), 2) self.action_assignments[self.left_idx] = self.left_action_embed self.action_assignments[self.right_idx] = self.right_action_embed self.action_mask[self.left_idx] = 1 self.action_mask[self.right_idx] = 1 def reset(self, *, seed=None, options=None): self.update_avail_actions() obs, infos = self.wrapped.reset() return { "action_mask": self.action_mask, "avail_actions": self.action_assignments, "cart": obs, }, infos def step(self, action): if action == self.left_idx: actual_action = 0 elif action == self.right_idx: actual_action = 1 else: raise ValueError( "Chosen action was not one of the non-zero action embeddings", action, self.action_assignments, self.action_mask, self.left_idx, self.right_idx, ) orig_obs, rew, done, truncated, info = self.wrapped.step(actual_action) self.update_avail_actions() self.action_mask = self.action_mask.astype(np.int8) obs = { "action_mask": self.action_mask, "avail_actions": self.action_assignments, "cart": orig_obs, } return obs, rew, done, truncated, info
ParametricActionsCartPole
python
PrefectHQ__prefect
src/prefect/server/schemas/actions.py
{ "start": 4479, "end": 9774 }
class ____(ActionBaseModel): """Data used by the Prefect REST API to create a deployment.""" name: str = Field( default=..., description="The name of the deployment.", examples=["my-deployment"], ) flow_id: UUID = Field( default=..., description="The ID of the flow associated with the deployment." ) paused: bool = Field( default=False, description="Whether or not the deployment is paused." ) schedules: list[DeploymentScheduleCreate] = Field( default_factory=lambda: [], description="A list of schedules for the deployment.", ) concurrency_limit: Optional[PositiveInteger] = Field( default=None, description="The deployment's concurrency limit." ) concurrency_options: Optional[schemas.core.ConcurrencyOptions] = Field( default=None, description="The deployment's concurrency options." ) global_concurrency_limit_id: Optional[UUID] = Field( default=None, description="The ID of the global concurrency limit to apply to the deployment.", ) enforce_parameter_schema: bool = Field( default=True, description=( "Whether or not the deployment should enforce the parameter schema." ), ) parameter_openapi_schema: Optional[ParameterSchema] = Field( default_factory=lambda: {"type": "object", "properties": {}}, description="The parameter schema of the flow, including defaults.", json_schema_extra={"additionalProperties": True}, ) parameters: Dict[str, Any] = Field( default_factory=dict, description="Parameters for flow runs scheduled by the deployment.", json_schema_extra={"additionalProperties": True}, ) tags: List[str] = Field( default_factory=list, description="A list of deployment tags.", examples=[["tag-1", "tag-2"]], ) labels: Union[KeyValueLabels, None] = Field( default_factory=dict, description="A dictionary of key-value labels. Values can be strings, numbers, or booleans.", examples=[{"key": "value1", "key2": 42}], ) pull_steps: Optional[List[dict[str, Any]]] = Field(None) work_queue_name: Optional[str] = Field(None) work_pool_name: Optional[str] = Field( default=None, description="The name of the deployment's work pool.", examples=["my-work-pool"], ) storage_document_id: Optional[UUID] = Field(None) infrastructure_document_id: Optional[UUID] = Field(None) description: Optional[str] = Field(None) path: Optional[str] = Field(None) version: Optional[str] = Field(None) entrypoint: Optional[str] = Field(None) job_variables: Dict[str, Any] = Field( default_factory=dict, description="Overrides for the flow's infrastructure configuration.", json_schema_extra={"additionalProperties": True}, ) version_info: Optional[schemas.core.VersionInfo] = Field( default=None, description="A description of this version of the deployment." ) def check_valid_configuration(self, base_job_template: dict[str, Any]) -> None: """ Check that the combination of base_job_template defaults and job_variables conforms to the specified schema. NOTE: This method does not hydrate block references in default values within the base job template to validate them. Failing to do this can cause user-facing errors. Instead of this method, use `validate_job_variables_for_deployment` function from `prefect_cloud.orion.api.validation`. """ # This import is here to avoid a circular import from prefect.utilities.schema_tools import validate variables_schema = deepcopy(base_job_template.get("variables")) if variables_schema is not None: validate( self.job_variables, variables_schema, raise_on_error=True, preprocess=True, ignore_required=True, ) @model_validator(mode="before") @classmethod def remove_old_fields(cls, values: dict[str, Any]) -> dict[str, Any]: return remove_old_deployment_fields(values) @model_validator(mode="before") def _validate_parameters_conform_to_schema( cls, values: dict[str, Any] ) -> dict[str, Any]: values["parameters"] = validate_parameters_conform_to_schema( values.get("parameters", {}), values ) schema = validate_parameter_openapi_schema( values.get("parameter_openapi_schema"), values ) if schema is not None: values["parameter_openapi_schema"] = schema return values @model_validator(mode="before") def _validate_concurrency_limits(cls, values: dict[str, Any]) -> dict[str, Any]: """Validate that a deployment does not have both a concurrency limit and global concurrency limit.""" if values.get("concurrency_limit") and values.get( "global_concurrency_limit_id" ): raise ValueError( "A deployment cannot have both a concurrency limit and a global concurrency limit." ) return values
DeploymentCreate
python
pytorch__pytorch
torch/profiler/profiler.py
{ "start": 2839, "end": 19934 }
class ____: """Low-level profiler wrap the autograd profile Args: activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values: ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``, ``torch.profiler.ProfilerActivity.XPU``. Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA or (when available) ProfilerActivity.XPU. record_shapes (bool): save information about operator's input shapes. profile_memory (bool): track tensor memory allocation/deallocation (see ``export_memory_timeline`` for more details). with_stack (bool): record source information (file and line number) for the ops. with_flops (bool): use formula to estimate the FLOPS of specific operators (matrix multiplication and 2D convolution). with_modules (bool): record module hierarchy (including function names) corresponding to the callstack of the op. e.g. If module A's forward call's module B's forward which contains an aten::add op, then aten::add's module hierarchy is A.B Note that this support exist, at the moment, only for TorchScript models and not eager mode models. experimental_config (_ExperimentalConfig) : A set of experimental options used by profiler libraries like Kineto. Note, backward compatibility is not guaranteed. execution_trace_observer (ExecutionTraceObserver) : A PyTorch Execution Trace Observer object. `PyTorch Execution Traces <https://arxiv.org/pdf/2305.14516.pdf>`__ offer a graph based representation of AI/ML workloads and enable replay benchmarks, simulators, and emulators. When this argument is included the observer start() and stop() will be called for the same time window as PyTorch profiler. acc_events (bool): Enable the accumulation of FunctionEvents across multiple profiling cycles .. note:: This API is experimental and subject to change in the future. Enabling shape and stack tracing results in additional overhead. When record_shapes=True is specified, profiler will temporarily hold references to the tensors; that may further prevent certain optimizations that depend on the reference count and introduce extra tensor copies. """ def __init__( self, *, activities: Optional[Iterable[ProfilerActivity]] = None, record_shapes: bool = False, profile_memory: bool = False, with_stack: bool = False, with_flops: bool = False, with_modules: bool = False, experimental_config: Optional[_ExperimentalConfig] = None, execution_trace_observer: Optional[_ITraceObserver] = None, acc_events: bool = False, custom_trace_id_callback: Optional[Callable[[], str]] = None, ) -> None: self.activities = set(activities) if activities else supported_activities() self.record_shapes = record_shapes self.with_flops = with_flops self.profile_memory = profile_memory self.with_stack = with_stack self.with_modules = with_modules self.experimental_config = experimental_config self.execution_trace_observer = execution_trace_observer self.acc_events = acc_events self.custom_trace_id_callback = custom_trace_id_callback self.profiler: Optional[prof.profile] = None self.has_cudagraphs = False self.mem_tl: Optional[MemoryProfileTimeline] = None self.use_device = None if ProfilerActivity.CUDA in self.activities: # pyrefly: ignore [bad-assignment] self.use_device = "cuda" elif ProfilerActivity.XPU in self.activities: # pyrefly: ignore [bad-assignment] self.use_device = "xpu" elif ProfilerActivity.MTIA in self.activities: # pyrefly: ignore [bad-assignment] self.use_device = "mtia" elif ProfilerActivity.HPU in self.activities: # pyrefly: ignore [bad-assignment] self.use_device = "hpu" elif ProfilerActivity.PrivateUse1 in self.activities: # pyrefly: ignore [bad-assignment] self.use_device = _get_privateuse1_backend_name() # user-defined metadata to be amended to the trace self.preset_metadata: dict[str, str] = {} def start(self) -> None: self.prepare_trace() self.start_trace() def stop(self) -> None: self.stop_trace() def prepare_trace(self) -> None: if hasattr(torch, "_inductor"): import torch._inductor.config as inductor_config self.has_cudagraphs = inductor_config.triton.cudagraphs if (self.profiler is None) or (not self.acc_events): self.profiler = prof.profile( use_cpu=(ProfilerActivity.CPU in self.activities), use_device=self.use_device, record_shapes=self.record_shapes, with_flops=self.with_flops, profile_memory=self.profile_memory, with_stack=self.with_stack, with_modules=self.with_modules, use_kineto=True, experimental_config=self.experimental_config, acc_events=self.acc_events, custom_trace_id_callback=self.custom_trace_id_callback, ) if (self.profiler is not None) and (not self.acc_events): _warn_once( "Warning: Profiler clears events at the end of each cycle." "Only events from the current cycle will be reported." "To keep events across cycles, set acc_events=True." ) self.profiler._prepare_trace() def start_trace(self) -> None: if self.execution_trace_observer: self.execution_trace_observer.start() if self.profiler is None: raise AssertionError("Profiler must be initialized before starting trace") self.profiler._start_trace() if self.profile_memory: self.add_metadata_json("profile_memory", "1") if self.with_stack: self.add_metadata_json("with_stack", "1") if self.record_shapes: self.add_metadata_json("record_shapes", "1") if self.with_modules: self.add_metadata_json("with_modules", "1") if self.with_flops: self.add_metadata_json("with_flops", "1") if kineto_available(): dist_info = self._get_distributed_info() if dist_info: self.add_metadata_json( "distributedInfo", json.dumps(dist_info, cls=_NumpyEncoder) ) cuda_version = None if hasattr(torch, "version"): from torch.torch_version import TorchVersion cuda_version = TorchVersion(getattr(torch.version, "cuda", "0.0")) if self.has_cudagraphs and ( (cuda_version and cuda_version < "12.6") or not profiler_allow_cudagraph_cupti_lazy_reinit_cuda12() ): os.environ["DISABLE_CUPTI_LAZY_REINIT"] = "1" self.add_metadata_json("DISABLE_CUPTI_LAZY_REINIT", "1") # FIXME: CUDA Graph does not work well with CUPTI teardown. # 1) crashes on 1st lazy CUPTI re-init after teardown (CUDA 11) # 2) crashes on 2nd non-lazy CUPTI re-init after teardown (CUDA 12) # Workaround: turn off CUPTI teardown when using CUDA Graphs. os.environ["TEARDOWN_CUPTI"] = "0" # Insert the preset user metadata to the trace for k, v in self.preset_metadata.items(): self.add_metadata_json(k, v) def stop_trace(self) -> None: if self.execution_trace_observer: self.execution_trace_observer.stop() if self.profiler is None: raise AssertionError("Profiler must be initialized before stopping trace") self.profiler.__exit__(None, None, None) def export_chrome_trace(self, path: str): """ Exports the collected trace in Chrome JSON format. If kineto is enabled, only last cycle in schedule is exported. """ if self.profiler is None: raise AssertionError( "Profiler must be initialized before exporting chrome trace" ) if path.endswith(".gz"): with tempfile.NamedTemporaryFile("w+b", suffix=".json") as fp: retvalue = self.profiler.export_chrome_trace(fp.name) with open(fp.name, "rb") as fin, gzip.open(path, "wb") as fout: fout.writelines(fin) return retvalue else: return self.profiler.export_chrome_trace(path) def export_stacks(self, path: str, metric: str = "self_cpu_time_total"): """Save stack traces to a file Args: path (str): save stacks file to this location; metric (str): metric to use: "self_cpu_time_total" or "self_cuda_time_total" """ if self.profiler is None: raise AssertionError("Profiler must be initialized before exporting stacks") return self.profiler.export_stacks(path, metric) def toggle_collection_dynamic( self, enable: bool, activities: Iterable[ProfilerActivity] ) -> None: """Toggle collection of activities on/off at any point of collection. Currently supports toggling Torch Ops (CPU) and CUDA activity supported in Kineto Args: activities (iterable): list of activity groups to use in profiling, supported values: ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA`` Examples: .. code-block:: python with torch.profiler.profile( activities=[ torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA, ] ) as p: code_to_profile_0() // turn off collection of all CUDA activity p.toggle_collection_dynamic(False, [torch.profiler.ProfilerActivity.CUDA]) code_to_profile_1() // turn on collection of all CUDA activity p.toggle_collection_dynamic(True, [torch.profiler.ProfilerActivity.CUDA]) code_to_profile_2() print(p.key_averages().table( sort_by="self_cuda_time_total", row_limit=-1)) """ if self.profiler is None: return self.profiler.toggle_collection_dynamic(enable, activities) def key_averages( self, group_by_input_shape: bool = False, group_by_stack_n: int = 0, group_by_overload_name: bool = False, ): """Averages events, grouping them by operator name and (optionally) input shapes, stack and overload name. .. note:: To use shape/stack functionality make sure to set record_shapes/with_stack when creating profiler context manager. """ if self.profiler is None: raise AssertionError( "Profiler must be initialized before getting key averages" ) return self.profiler.key_averages( group_by_input_shape, group_by_stack_n, group_by_overload_name ) def events(self): """ Returns the list of unaggregated profiler events, to be used in the trace callback or after the profiling is finished """ if self.profiler is None: raise AssertionError("Profiler must be initialized before accessing events") return self.profiler.function_events def add_metadata(self, key: str, value: str) -> None: """ Adds a user defined metadata with a string key and a string value into the trace file """ wrapped_value = '"' + value.replace('"', '\\"') + '"' torch.autograd._add_metadata_json(key, wrapped_value) def add_metadata_json(self, key: str, value: str) -> None: """ Adds a user defined metadata with a string key and a valid json value into the trace file """ torch.autograd._add_metadata_json(key, value) def preset_metadata_json(self, key: str, value: str) -> None: """ Preset a user defined metadata when the profiler is not started and added into the trace file later. Metadata is in the format of a string key and a valid json value """ self.preset_metadata[key] = value def _get_distributed_info(self): import torch.distributed as dist if not dist.is_available() or not dist.is_initialized(): return None backend = dist.get_backend() dist_info = { "backend": backend, "rank": dist.get_rank(), "world_size": dist.get_world_size(), "pg_count": dist.get_pg_count(), "pg_config": dist.distributed_c10d._get_all_pg_configs(), } if backend == "nccl": nccl_version = torch.cuda.nccl.version() # pyrefly: ignore [unsupported-operation] dist_info["nccl_version"] = ".".join(str(v) for v in nccl_version) return dist_info def _memory_profile(self) -> MemoryProfile: required = ("record_shapes", "profile_memory", "with_stack") missing = [f"{i}=True" for i in required if not getattr(self, i)] if missing: raise ValueError(f"{', '.join(missing)} required for memory profiling.") if self.profiler is None or self.profiler.kineto_results is None: raise AssertionError( "Profiler and kineto_results must be initialized for memory profiling" ) return MemoryProfile(self.profiler.kineto_results) @deprecated( "`export_memory_timeline` is deprecated and will be removed in a future version. " "Please use `torch.cuda.memory._record_memory_history` and `torch.cuda.memory._export_memory_snapshot` instead.", category=FutureWarning, ) def export_memory_timeline(self, path: str, device: Optional[str] = None) -> None: """Export memory event information from the profiler collected tree for a given device, and export a timeline plot. There are 3 exportable files using ``export_memory_timeline``, each controlled by the ``path``'s suffix. - For an HTML compatible plot, use the suffix ``.html``, and a memory timeline plot will be embedded as a PNG file in the HTML file. - For plot points consisting of ``[times, [sizes by category]]``, where ``times`` are timestamps and ``sizes`` are memory usage for each category. The memory timeline plot will be saved a JSON (``.json``) or gzipped JSON (``.json.gz``) depending on the suffix. - For raw memory points, use the suffix ``.raw.json.gz``. Each raw memory event will consist of ``(timestamp, action, numbytes, category)``, where ``action`` is one of ``[PREEXISTING, CREATE, INCREMENT_VERSION, DESTROY]``, and ``category`` is one of the enums from ``torch.profiler._memory_profiler.Category``. Output: Memory timeline written as gzipped JSON, JSON, or HTML. .. deprecated:: ``export_memory_timeline`` is deprecated and will be removed in a future version. Please use ``torch.cuda.memory._record_memory_history`` and ``torch.cuda.memory._export_memory_snapshot`` instead. """ # Default to device 0, if unset. Fallback on cpu. if device is None: if self.use_device and self.use_device != "cuda": device = self.use_device + ":0" else: device = "cuda:0" if torch.cuda.is_available() else "cpu" # Construct the memory timeline plot data self.mem_tl = MemoryProfileTimeline(self._memory_profile()) # Depending on the file suffix, save the data as json.gz or json. # For html, we can embed the image into an HTML file. if path.endswith(".html"): self.mem_tl.export_memory_timeline_html(path, device) elif path.endswith(".gz"): with tempfile.NamedTemporaryFile("w+t", suffix=".json") as fp: if path.endswith("raw.json.gz"): self.mem_tl.export_memory_timeline_raw(fp.name, device) else: self.mem_tl.export_memory_timeline(fp.name, device) with open(fp.name) as fin, gzip.open(path, "wt") as fout: fout.writelines(fin) else: self.mem_tl.export_memory_timeline(path, device)
_KinetoProfile
python
kamyu104__LeetCode-Solutions
Python/find-the-number-of-subarrays-where-boundary-elements-are-maximum.py
{ "start": 57, "end": 482 }
class ____(object): def numberOfSubarrays(self, nums): """ :type nums: List[int] :rtype: int """ result = 0 stk = [] for x in nums: while stk and stk[-1][0] < x: stk.pop() if not stk or stk[-1][0] != x: stk.append([x, 0]) stk[-1][1] += 1 result += stk[-1][1] return result
Solution
python
Textualize__rich
benchmarks/benchmarks.py
{ "start": 5078, "end": 5574 }
class ____: def setup(self): self.console = Console( file=StringIO(), color_system="truecolor", legacy_windows=False, width=100 ) self.color = Color.parse("#0d1da0") def time_downgrade_to_eight_bit(self): self.color.downgrade(ColorSystem.EIGHT_BIT) def time_downgrade_to_standard(self): self.color.downgrade(ColorSystem.STANDARD) def time_downgrade_to_windows(self): self.color.downgrade(ColorSystem.WINDOWS)
ColorSuite
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 7026, "end": 7514 }
class ____(BaseModel, extra="forbid"): """ Operation for performing changes of collection aliases. Alias changes are atomic, meaning that no collection modifications can happen between alias operations. """ actions: List["AliasOperations"] = Field( ..., description="Operation for performing changes of collection aliases. Alias changes are atomic, meaning that no collection modifications can happen between alias operations.", )
ChangeAliasesOperation
python
django-extensions__django-extensions
tests/test_randomchar_field.py
{ "start": 561, "end": 3623 }
class ____(TestCase): def testRandomCharField(self): m = RandomCharTestModel() m.save() assert len(m.random_char_field) == 8, m.random_char_field def testRandomCharFieldUnique(self): m = RandomCharTestModelUnique() m.save() assert len(m.random_char_field) == 8, m.random_char_field def testRandomCharFieldLowercase(self): m = RandomCharTestModelLowercase() m.save() for c in m.random_char_field: assert c.islower(), m.random_char_field def testRandomCharFieldUppercase(self): m = RandomCharTestModelUppercase() m.save() for c in m.random_char_field: assert c.isupper(), m.random_char_field def testRandomCharFieldAlpha(self): m = RandomCharTestModelAlpha() m.save() for c in m.random_char_field: assert c.isalpha(), m.random_char_field def testRandomCharFieldDigits(self): m = RandomCharTestModelDigits() m.save() for c in m.random_char_field: assert c.isdigit(), m.random_char_field def testRandomCharFieldPunctuation(self): m = RandomCharTestModelPunctuation() m.save() for c in m.random_char_field: assert c in string.punctuation, m.random_char_field def testRandomCharTestModelLowercaseAlphaDigits(self): m = RandomCharTestModelLowercaseAlphaDigits() m.save() for c in m.random_char_field: assert c.isdigit() or (c.isalpha() and c.islower()), m.random_char_field def testRandomCharTestModelUppercaseAlphaDigits(self): m = RandomCharTestModelUppercaseAlphaDigits() m.save() for c in m.random_char_field: assert c.isdigit() or (c.isalpha() and c.isupper()), m.random_char_field def testRandomCharTestModelDuplicate(self): m = RandomCharTestModelUnique() m.save() with mock.patch( "django_extensions.db.fields.RandomCharField.random_char_generator" ) as func: func.return_value = iter([m.random_char_field, "aaa"]) m = RandomCharTestModelUnique() m.save() assert m.random_char_field == "aaa" def testRandomCharTestModelAsserts(self): with mock.patch("django_extensions.db.fields.get_random_string") as mock_sample: mock_sample.return_value = "aaa" m = RandomCharTestModelUnique() m.save() m = RandomCharTestModelUnique() with pytest.raises(RuntimeError): m.save() def testRandomCharTestModelUniqueTogether(self): with mock.patch("django_extensions.db.fields.get_random_string") as mock_sample: mock_sample.return_value = "aaa" m = RandomCharTestModelUniqueTogether() m.common_field = "bbb" m.save() m = RandomCharTestModelUniqueTogether() m.common_field = "bbb" with pytest.raises(RuntimeError): m.save()
RandomCharFieldTest
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_default_format14.py
{ "start": 315, "end": 2245 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("default_format14.xlsx") def test_create_file(self): """Test the creation of a file with user defined default format""" workbook = Workbook( self.got_filename, { "default_format_properties": {"font_name": "Arial", "font_size": 18}, "default_row_height": 31, "default_column_width": 120, }, ) worksheet = workbook.add_worksheet() worksheet.insert_image("E9", self.image_dir + "red.png", {"x_offset": 32}) # Set user column width and row height to test positioning calculation. worksheet.set_column_pixels(4, 4, 96) worksheet.set_row_pixels(8, 32) # Set column to text column width less than 1 character. worksheet.set_column_pixels(6, 6, 10) workbook.close() self.assertExcelEqual() def test_create_file_with_character_units(self): """Test the creation of a file with user defined default format""" # Same as workbook = Workbook( self.got_filename, { "default_format_properties": {"font_name": "Arial", "font_size": 18}, "default_row_height": 31, "default_column_width": 120, }, ) worksheet = workbook.add_worksheet() worksheet.insert_image("E9", self.image_dir + "red.png", {"x_offset": 32}) # Set user column width and row height to test positioning calculation. worksheet.set_column(4, 4, 6.69) worksheet.set_row(8, 24.0) # Set column to text column width less than 1 character. worksheet.set_column(6, 6, 0.45) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
weaviate__weaviate-python-client
weaviate/collections/classes/config.py
{ "start": 7788, "end": 8272 }
class ____(str, BaseEnum): """How object deletions in multi node environments should be resolved. Attributes: PERMANENT_DELETION: Once an object has been deleted on one node it will be deleted on all nodes in case of conflicts. NO_AUTOMATED_RESOLUTION: No deletion resolution. """ DELETE_ON_CONFLICT = "DeleteOnConflict" NO_AUTOMATED_RESOLUTION = "NoAutomatedResolution" TIME_BASED_RESOLUTION = "TimeBasedResolution"
ReplicationDeletionStrategy
python
pallets__quart
src/quart/wrappers/response.py
{ "start": 3607, "end": 5604 }
class ____(ResponseBody): """Provides an async file accessor with range setting. The :attr:`Response.response` attribute must be async-iterable and yield bytes, which this wrapper does for a file. In addition it allows a range to be set on the file, thereby supporting conditional requests. Attributes: buffer_size: Size in bytes to load per iteration. """ buffer_size = 8192 def __init__( self, file_path: str | PathLike, *, buffer_size: int | None = None ) -> None: self.file_path = file_path_to_path(file_path) self.size = self.file_path.stat().st_size self.begin = 0 self.end = self.size if buffer_size is not None: self.buffer_size = buffer_size self.file: AsyncBufferedIOBase | None = None self.file_manager: AiofilesContextManager = None async def __aenter__(self) -> FileBody: self.file_manager = async_open(self.file_path, mode="rb") self.file = await self.file_manager.__aenter__() await self.file.seek(self.begin) return self async def __aexit__( self, exc_type: type, exc_value: BaseException, tb: TracebackType ) -> None: await self.file_manager.__aexit__(exc_type, exc_value, tb) def __aiter__(self) -> FileBody: return self async def __anext__(self) -> bytes: current = await self.file.tell() if current >= self.end: raise StopAsyncIteration() read_size = min(self.buffer_size, self.end - current) chunk = await self.file.read(read_size) if chunk: return chunk else: raise StopAsyncIteration() async def make_conditional(self, begin: int, end: int | None) -> int: self.begin = begin self.end = self.size if end is None else end self.end = min(self.size, self.end) _raise_if_invalid_range(self.begin, self.end, self.size) return self.size
FileBody
python
run-llama__llama_index
llama-index-packs/llama-index-packs-diff-private-simple-dataset/llama_index/packs/diff_private_simple_dataset/base.py
{ "start": 1535, "end": 2011 }
class ____(BaseModel): instruction: str = Field(description="Instruction associated with underlying task.") text_heading: str = Field(description="Heading used for text.") label_heading: str = Field(description="Label heading used for label.") def _batch(iterable, n=1) -> Iterable[Any]: """Return iterable batches of an iterable.""" length = len(iterable) for ndx in range(0, length, n): yield iterable[ndx : min(ndx + n, length)]
PromptBundle
python
apache__airflow
providers/standard/tests/unit/standard/decorators/test_branch_external_python.py
{ "start": 1234, "end": 3919 }
class ____: # when run in "Parallel" test run environment, sometimes this test runs for a long time # because creating virtualenv and starting new Python interpreter creates a lot of IO/contention # possibilities. So we are increasing the timeout for this test to 3x of the default timeout @pytest.mark.execution_timeout(180) @pytest.mark.parametrize( ("branch_task_name", "skipped_task_name"), [("task_1", "task_2"), ("task_2", "task_1")] ) def test_branch_one(self, dag_maker, branch_task_name, skipped_task_name): @task def dummy_f(): pass @task def task_1(): pass @task def task_2(): pass if ( branch_task_name == "task_1" ): # Note we manually need to carry the literal value into the venc code :-( @task.branch_external_python(task_id="branching", python=sys.executable) def branch_operator(): return "task_1" else: @task.branch_external_python(task_id="branching", python=sys.executable) def branch_operator(): return "task_2" with dag_maker(): branchoperator = branch_operator() df = dummy_f() task_1 = task_1() task_2 = task_2() df.set_downstream(branchoperator) branchoperator.set_downstream(task_1) branchoperator.set_downstream(task_2) dr = dag_maker.create_dagrun() dag_maker.run_ti("dummy_f", dr) if AIRFLOW_V_3_0_1: with pytest.raises(DownstreamTasksSkipped) as exc_info: dag_maker.run_ti("branching", dr) assert exc_info.value.tasks == [(skipped_task_name, -1)] else: dag_maker.run_ti("branching", dr) dag_maker.run_ti("task_1", dr) dag_maker.run_ti("task_2", dr) tis = dr.get_task_instances() for ti in tis: if ti.task_id == "dummy_f": assert ti.state == State.SUCCESS if ti.task_id == "branching": assert ti.state == State.SUCCESS if ti.task_id == "task_1" and branch_task_name == "task_1": assert ti.state == State.SUCCESS elif ti.task_id == "task_1": assert ti.state == State.SKIPPED if ti.task_id == "task_2" and branch_task_name == "task_2": assert ti.state == State.SUCCESS elif ti.task_id == "task_2": assert ti.state == State.SKIPPED
TestBranchExternalPythonDecoratedOperator
python
pyca__cryptography
tests/hazmat/primitives/test_hash_vectors.py
{ "start": 4048, "end": 4409 }
class ____: test_sha3_224 = generate_hash_test( load_hash_vectors, os.path.join("hashes", "SHA3"), ["SHA3_224LongMsg.rsp", "SHA3_224ShortMsg.rsp"], hashes.SHA3_224(), ) @pytest.mark.supported( only_if=lambda backend: backend.hash_supported(hashes.SHA3_256()), skip_message="Does not support SHA3_256", )
TestSHA3224
python
airbytehq__airbyte
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/publish/pipeline.py
{ "start": 19897, "end": 40113 }
class ____(StepModifyingFiles): context: PublishConnectorContext title = "Disable progressive rollout in metadata file" async def _run(self) -> StepResult: raw_metadata = await dagger_read_file(await self.context.get_connector_dir(include=[METADATA_FILE_NAME]), METADATA_FILE_NAME) current_metadata = yaml.safe_load(raw_metadata) enable_progressive_rollout = ( current_metadata.get("data", {}).get("releases", {}).get("rolloutConfiguration", {}).get("enableProgressiveRollout", False) ) if not enable_progressive_rollout: return StepResult(step=self, status=StepStatus.SKIPPED, stdout="Progressive rollout is already disabled.") # We do an in-place replacement instead of serializing back to yaml to preserve comments and formatting. new_raw_metadata = raw_metadata.replace("enableProgressiveRollout: true", "enableProgressiveRollout: false") self.modified_directory = dagger_write_file(self.modified_directory, METADATA_FILE_NAME, new_raw_metadata) self.modified_files.append(METADATA_FILE_NAME) return StepResult( step=self, status=StepStatus.SUCCESS, stdout="Set enableProgressiveRollout to false in connector metadata.", output=self.modified_directory, ) # Helpers def create_connector_report(results: List[StepResult], context: PublishConnectorContext) -> ConnectorReport: """Generate a connector report from results and assign it to the context. Args: results (List[StepResult]): List of step results. context (PublishConnectorContext): The connector context to assign the report to. Returns: ConnectorReport: The connector report. """ report = ConnectorReport(context, results, name="PUBLISH RESULTS") context.report = report return report # Pipeline async def run_connector_publish_pipeline(context: PublishConnectorContext, semaphore: anyio.Semaphore) -> ConnectorReport: """Run a publish pipeline for a single connector. 1. Validate the metadata file. 2. Check if the connector image already exists. 3. Build the connector, with platform variants. 4. Push the connector to DockerHub, with platform variants. 5. Upload its spec to the spec cache bucket. 6. Upload its metadata file to the metadata service bucket. Returns: ConnectorReport: The reports holding publish results. """ assert context.rollout_mode == RolloutMode.PUBLISH, "This pipeline can only run in publish mode." metadata_upload_step = MetadataUpload( context=context, metadata_service_gcs_credentials=context.metadata_service_gcs_credentials, docker_hub_username=context.docker_hub_username, docker_hub_password=context.docker_hub_password, metadata_bucket_name=context.metadata_bucket_name, pre_release=context.pre_release, pre_release_tag=context.docker_image_tag, ) upload_spec_to_cache_step = UploadSpecToCache(context) upload_sbom_step = UploadSbom(context) async with semaphore: async with context: results = [] # Check if the connector image is already published to the registry. check_connector_image_results = await CheckConnectorImageDoesNotExist(context).run() results.append(check_connector_image_results) python_registry_steps, terminate_early = await _run_python_registry_publish_pipeline(context) results.extend(python_registry_steps) if terminate_early: return create_connector_report(results, context) # If the connector image already exists, we don't need to build it, but we still need to upload the metadata file. # We also need to upload the spec to the spec cache bucket. # For pre-releases, rebuild all the time. if check_connector_image_results.status is StepStatus.SKIPPED and not context.pre_release: context.logger.info( "The connector version is already published. Let's upload metadata.yaml and spec to GCS even if no version bump happened." ) already_published_connector = context.dagger_client.container().from_(context.docker_image) upload_to_spec_cache_results = await upload_spec_to_cache_step.run(already_published_connector) results.append(upload_to_spec_cache_results) if upload_to_spec_cache_results.status is not StepStatus.SUCCESS: return create_connector_report(results, context) upload_sbom_results = await upload_sbom_step.run() results.append(upload_sbom_results) if upload_sbom_results.status is not StepStatus.SUCCESS: return create_connector_report(results, context) metadata_upload_results = await metadata_upload_step.run() results.append(metadata_upload_results) # Exit early if the connector image already exists if check_connector_image_results.status is not StepStatus.SUCCESS and not context.pre_release: return create_connector_report(results, context) build_connector_results = await steps.run_connector_build(context) results.append(build_connector_results) # Exit early if the connector image failed to build if build_connector_results.status is not StepStatus.SUCCESS: return create_connector_report(results, context) if context.connector.language in [ConnectorLanguage.PYTHON, ConnectorLanguage.LOW_CODE]: upload_dependencies_step = await UploadDependenciesToMetadataService(context).run(build_connector_results.output) results.append(upload_dependencies_step) built_connector_platform_variants = list(build_connector_results.output.values()) push_connector_image_results = await PushConnectorImageToRegistry(context).run(built_connector_platform_variants) results.append(push_connector_image_results) # Exit early if the connector image failed to push if push_connector_image_results.status is not StepStatus.SUCCESS: return create_connector_report(results, context) # Make sure the image published is healthy by pulling it and running SPEC on it. # See https://github.com/airbytehq/airbyte/issues/26085 pull_connector_image_results = await PullConnectorImageFromRegistry(context).run() results.append(pull_connector_image_results) # Exit early if the connector image failed to pull if pull_connector_image_results.status is not StepStatus.SUCCESS: return create_connector_report(results, context) upload_to_spec_cache_results = await upload_spec_to_cache_step.run(built_connector_platform_variants[0]) results.append(upload_to_spec_cache_results) if upload_to_spec_cache_results.status is not StepStatus.SUCCESS: return create_connector_report(results, context) upload_sbom_results = await upload_sbom_step.run() results.append(upload_sbom_results) if upload_sbom_results.status is not StepStatus.SUCCESS: return create_connector_report(results, context) metadata_upload_results = await metadata_upload_step.run() results.append(metadata_upload_results) connector_report = create_connector_report(results, context) return connector_report async def _run_python_registry_publish_pipeline(context: PublishConnectorContext) -> Tuple[List[StepResult], bool]: """ Run the python registry publish pipeline for a single connector. Return the results of the steps and a boolean indicating whether there was an error and the pipeline should be stopped. """ results: List[StepResult] = [] # Try to convert the context to a PythonRegistryPublishContext. If it returns None, it means we don't need to publish to a python registry. python_registry_context = await PythonRegistryPublishContext.from_publish_connector_context(context) if not python_registry_context: return results, False if not context.python_registry_token or not context.python_registry_url: # If the python registry token or url are not set, we can't publish to the python registry - stop the pipeline. return [ StepResult( step=PublishToPythonRegistry(python_registry_context), status=StepStatus.FAILURE, stderr="Pypi publishing is enabled, but python registry token or url are not set.", ) ], True check_python_registry_package_exists_results = await CheckPythonRegistryPackageDoesNotExist(python_registry_context).run() results.append(check_python_registry_package_exists_results) if check_python_registry_package_exists_results.status is StepStatus.SKIPPED: context.logger.info("The connector version is already published on python registry.") elif check_python_registry_package_exists_results.status is StepStatus.SUCCESS: context.logger.info("The connector version is not published on python registry. Let's build and publish it.") publish_to_python_registry_results = await PublishToPythonRegistry(python_registry_context).run() results.append(publish_to_python_registry_results) if publish_to_python_registry_results.status is StepStatus.FAILURE: return results, True elif check_python_registry_package_exists_results.status is StepStatus.FAILURE: return results, True return results, False def get_rollback_pr_creation_arguments( modified_files: Iterable[Path], context: PublishConnectorContext, step_results: Iterable[StepResult], release_candidate_version: str, ) -> Tuple[Tuple, Dict]: return ( (modified_files,), { "branch_id": f"{context.connector.technical_name}/rollback-{release_candidate_version}", "commit_message": "[auto-publish] " # << We can skip Vercel builds if this is in the commit message + "; ".join(step_result.step.title for step_result in step_results if step_result.success), "pr_title": f"🐙 {context.connector.technical_name}: Stop progressive rollout for {release_candidate_version}", "pr_body": f"The release candidate version {release_candidate_version} has been deemed unstable. This PR stops its progressive rollout. This PR will be automatically merged as part of the `auto-merge` workflow. This workflow runs every 2 hours.", }, ) async def run_connector_rollback_pipeline(context: PublishConnectorContext, semaphore: anyio.Semaphore) -> ConnectorReport: """Run a rollback pipeline for a single connector. 1. Disable progressive rollout in metadata file. 2. Open a PR with the updated metadata, set the auto-merge label. Returns: ConnectorReport: The reports holding promote results. """ results = [] current_version = context.connector.version all_modified_files = set() async with semaphore: async with context: assert context.rollout_mode == RolloutMode.ROLLBACK, "This pipeline can only run in rollback mode." original_connector_directory = await context.get_connector_dir() # Disable progressive rollout in metadata file reset_release_candidate = DisableProgressiveRollout(context, original_connector_directory) reset_release_candidate_results = await reset_release_candidate.run() results.append(reset_release_candidate_results) if reset_release_candidate_results.success: all_modified_files.update(await reset_release_candidate.export_modified_files(context.connector.code_directory)) if not all([result.success for result in results]): context.logger.error("The metadata update failed. Skipping PR creation.") connector_report = create_connector_report(results, context) return connector_report # Open PR when all previous steps are successful initial_pr_creation = CreateOrUpdatePullRequest( context, # We will merge even if the CI checks fail, due to this label: labels=[AUTO_MERGE_BYPASS_CI_CHECKS_LABEL, "rollback-rc"], # Let GitHub auto-merge this if all checks pass before the next run # of our auto-merge workflow: github_auto_merge=True, # Don't skip CI, as it prevents the PR from auto-merging naturally: skip_ci=False, ) pr_creation_args, pr_creation_kwargs = get_rollback_pr_creation_arguments(all_modified_files, context, results, current_version) initial_pr_creation_result = await initial_pr_creation.run(*pr_creation_args, **pr_creation_kwargs) results.append(initial_pr_creation_result) connector_report = create_connector_report(results, context) return connector_report def get_promotion_pr_creation_arguments( modified_files: Iterable[Path], context: PublishConnectorContext, step_results: Iterable[StepResult], release_candidate_version: str, promoted_version: str, ) -> Tuple[Tuple, Dict]: return ( (modified_files,), { "branch_id": f"{context.connector.technical_name}/{promoted_version}", "commit_message": "[auto-publish] " # << We can skip Vercel builds if this is in the commit message + "; ".join(step_result.step.title for step_result in step_results if step_result.success), "pr_title": f"🐙 {context.connector.technical_name}: release {promoted_version}", "pr_body": f"The release candidate version {release_candidate_version} has been deemed stable and is now ready to be promoted to an official release ({promoted_version}). This PR will be automatically merged as part of the `auto-merge` workflow. This workflow runs every 2 hours.", }, ) async def run_connector_promote_pipeline(context: PublishConnectorContext, semaphore: anyio.Semaphore) -> ConnectorReport: """Run a promote pipeline for a single connector. 1. Update connector metadata to: * Remove the RC suffix from the version. * Disable progressive rollout. 2. Open a PR with the updated metadata. 3. Add a changelog entry to the documentation. 4. Update the PR with the updated changelog, set the auto-merge label. Returns: ConnectorReport: The reports holding promote results. """ results = [] current_version = context.connector.version all_modified_files = set() async with semaphore: async with context: assert context.rollout_mode == RolloutMode.PROMOTE, "This pipeline can only run in promote mode." original_connector_directory = await context.get_connector_dir() # Remove RC suffix set_promoted_version = SetPromotedVersion(context, original_connector_directory) set_promoted_version_results = await set_promoted_version.run() results.append(set_promoted_version_results) if set_promoted_version_results.success: all_modified_files.update(await set_promoted_version.export_modified_files(context.connector.code_directory)) # Disable progressive rollout in metadata file reset_release_candidate = DisableProgressiveRollout(context, set_promoted_version_results.output) reset_release_candidate_results = await reset_release_candidate.run() results.append(reset_release_candidate_results) if reset_release_candidate_results.success: all_modified_files.update(await reset_release_candidate.export_modified_files(context.connector.code_directory)) if not all([result.success for result in results]): context.logger.error("The metadata update failed. Skipping PR creation.") connector_report = create_connector_report(results, context) return connector_report # Open PR when all previous steps are successful promoted_version = set_promoted_version.promoted_version initial_pr_creation = CreateOrUpdatePullRequest(context, skip_ci=False) pr_creation_args, pr_creation_kwargs = get_promotion_pr_creation_arguments( all_modified_files, context, results, current_version, promoted_version ) initial_pr_creation_result = await initial_pr_creation.run(*pr_creation_args, **pr_creation_kwargs) results.append(initial_pr_creation_result) # Update changelog and update PR if initial_pr_creation_result.success: created_pr = initial_pr_creation_result.output documentation_directory = await context.get_repo_dir( include=[str(context.connector.local_connector_documentation_directory)] ).directory(str(context.connector.local_connector_documentation_directory)) add_changelog_entry = AddChangelogEntry( context, documentation_directory, promoted_version, f"Promoting release candidate {current_version} to a main version.", created_pr.number, ) add_changelog_entry_result = await add_changelog_entry.run() results.append(add_changelog_entry_result) if add_changelog_entry_result.success: all_modified_files.update( await add_changelog_entry.export_modified_files(context.connector.local_connector_documentation_directory) ) post_changelog_pr_update = CreateOrUpdatePullRequest( context, skip_ci=False, # Don't skip CI, as it prevents the PR from auto-merging naturally. # We will merge even if the CI checks fail, due to the "bypass-ci-checks" label: labels=[AUTO_MERGE_BYPASS_CI_CHECKS_LABEL, "promoted-rc"], github_auto_merge=True, # Let GitHub auto-merge this if/when all required checks have passed. ) pr_creation_args, pr_creation_kwargs = get_promotion_pr_creation_arguments( all_modified_files, context, results, current_version, promoted_version ) post_changelog_pr_update_result = await post_changelog_pr_update.run(*pr_creation_args, **pr_creation_kwargs) results.append(post_changelog_pr_update_result) connector_report = create_connector_report(results, context) return connector_report def reorder_contexts(contexts: List[PublishConnectorContext]) -> List[PublishConnectorContext]: """Reorder contexts so that the ones that are for strict-encrypt/secure connectors come first. The metadata upload on publish checks if the the connectors referenced in the metadata file are already published to DockerHub. Non strict-encrypt variant reference the strict-encrypt variant in their metadata file for cloud. So if we publish the non strict-encrypt variant first, the metadata upload will fail if the strict-encrypt variant is not published yet. As strict-encrypt variant are often modified in the same PR as the non strict-encrypt variant, we want to publish them first. """ def is_secure_variant(context: PublishConnectorContext) -> bool: SECURE_VARIANT_KEYS = ["secure", "strict-encrypt"] return any(key in context.connector.technical_name for key in SECURE_VARIANT_KEYS) return sorted(contexts, key=lambda context: (is_secure_variant(context), context.connector.technical_name), reverse=True)
DisableProgressiveRollout
python
numba__numba
numba/tests/npyufunc/test_dufunc.py
{ "start": 29949, "end": 34743 }
class ____(TestDUFuncMethodsBase): def _check_reduce(self, ufunc, dtype=None, initial=None): @njit def foo(a, axis, dtype, initial): return ufunc.reduce(a, axis=axis, dtype=dtype, initial=initial) inputs = [ np.arange(5), np.arange(4).reshape(2, 2), np.arange(40).reshape(5, 4, 2), ] for array in inputs: for axis in range(array.ndim): expected = foo.py_func(array, axis, dtype, initial) got = foo(array, axis, dtype, initial) self.assertPreciseEqual(expected, got) def _check_reduce_axis(self, ufunc, dtype, initial=None): @njit def foo(a, axis): return ufunc.reduce(a, axis=axis, initial=initial) def _check(*args): try: expected = foo.py_func(array, axis) except ValueError as e: self.assertEqual(e.args[0], exc_msg) with self.assertRaisesRegex(TypingError, exc_msg): got = foo(array, axis) else: got = foo(array, axis) self.assertPreciseEqual(expected, got) exc_msg = (f"reduction operation '{ufunc.__name__}' is not " "reorderable, so at most one axis may be specified") inputs = [ np.arange(40, dtype=dtype).reshape(5, 4, 2), np.arange(10, dtype=dtype), ] for array in inputs: for i in range(1, array.ndim + 1): for axis in itertools.combinations(range(array.ndim), r=i): _check(array, axis) # corner cases: Reduce over axis=() and axis=None for axis in ((), None): _check(array, axis) def test_add_reduce(self): duadd = vectorize('int64(int64, int64)', identity=0)(pyuadd) self._check_reduce(duadd) self._check_reduce_axis(duadd, dtype=np.int64) def test_mul_reduce(self): dumul = vectorize('int64(int64, int64)', identity=1)(pymult) self._check_reduce(dumul) def test_non_associative_reduce(self): dusub = vectorize('int64(int64, int64)', identity=None)(pysub) dudiv = vectorize('int64(int64, int64)', identity=None)(pydiv) self._check_reduce(dusub) self._check_reduce_axis(dusub, dtype=np.int64) self._check_reduce(dudiv) self._check_reduce_axis(dudiv, dtype=np.int64) def test_reduce_dtype(self): duadd = vectorize('float64(float64, int64)', identity=0)(pyuadd) self._check_reduce(duadd, dtype=np.float64) def test_min_reduce(self): dumin = vectorize('int64(int64, int64)', identity='reorderable')(pymin) self._check_reduce(dumin, initial=10) self._check_reduce_axis(dumin, dtype=np.int64) def test_add_reduce_initial(self): # Initial should be used as a start duadd = vectorize('int64(int64, int64)', identity=0)(pyuadd) self._check_reduce(duadd, dtype=np.int64, initial=100) def test_add_reduce_no_initial_or_identity(self): # don't provide an initial or identity value duadd = vectorize('int64(int64, int64)')(pyuadd) self._check_reduce(duadd, dtype=np.int64) def test_invalid_input(self): duadd = vectorize('float64(float64, int64)', identity=0)(pyuadd) @njit def foo(a): return duadd.reduce(a) exc_msg = 'The first argument "array" must be array-like' with self.assertRaisesRegex(TypingError, exc_msg): foo('a') def test_dufunc_negative_axis(self): duadd = vectorize('int64(int64, int64)', identity=0)(pyuadd) @njit def foo(a, axis): return duadd.reduce(a, axis=axis) a = np.arange(40).reshape(5, 4, 2) cases = (0, -1, (0, -1), (-1, -2), (1, -1), -3) for axis in cases: expected = duadd.reduce(a, axis) got = foo(a, axis) self.assertPreciseEqual(expected, got) def test_dufunc_invalid_axis(self): duadd = vectorize('int64(int64, int64)', identity=0)(pyuadd) @njit def foo(a, axis): return duadd.reduce(a, axis=axis) a = np.arange(40).reshape(5, 4, 2) cases = ((0, 0), (0, 1, 0), (0, -3), (-1, -1), (-1, 2)) for axis in cases: msg = "duplicate value in 'axis'" with self.assertRaisesRegex(ValueError, msg): foo(a, axis) cases = (-4, 3, (0, -4),) for axis in cases: with self.assertRaisesRegex(ValueError, "Invalid axis"): foo(a, axis)
TestDUFuncReduce
python
scipy__scipy
scipy/spatial/transform/_rotation_spline.py
{ "start": 6411, "end": 14244 }
class ____: """Interpolate rotations with continuous angular rate and acceleration. The rotation vectors between each consecutive orientation are cubic functions of time and it is guaranteed that angular rate and acceleration are continuous. Such interpolation are analogous to cubic spline interpolation. Refer to [1]_ for math and implementation details. Parameters ---------- times : array_like, shape (N,) Times of the known rotations. At least 2 times must be specified. rotations : `Rotation` instance Rotations to perform the interpolation between. Must contain N rotations. Methods ------- __call__ References ---------- .. [1] `Smooth Attitude Interpolation <https://github.com/scipy/scipy/files/2932755/attitude_interpolation.pdf>`_ Examples -------- >>> from scipy.spatial.transform import Rotation, RotationSpline >>> import numpy as np Define the sequence of times and rotations from the Euler angles: >>> times = [0, 10, 20, 40] >>> angles = [[-10, 20, 30], [0, 15, 40], [-30, 45, 30], [20, 45, 90]] >>> rotations = Rotation.from_euler('XYZ', angles, degrees=True) Create the interpolator object: >>> spline = RotationSpline(times, rotations) Interpolate the Euler angles, angular rate and acceleration: >>> angular_rate = np.rad2deg(spline(times, 1)) >>> angular_acceleration = np.rad2deg(spline(times, 2)) >>> times_plot = np.linspace(times[0], times[-1], 100) >>> angles_plot = spline(times_plot).as_euler('XYZ', degrees=True) >>> angular_rate_plot = np.rad2deg(spline(times_plot, 1)) >>> angular_acceleration_plot = np.rad2deg(spline(times_plot, 2)) On this plot you see that Euler angles are continuous and smooth: >>> import matplotlib.pyplot as plt >>> plt.plot(times_plot, angles_plot) >>> plt.plot(times, angles, 'x') >>> plt.title("Euler angles") >>> plt.show() The angular rate is also smooth: >>> plt.plot(times_plot, angular_rate_plot) >>> plt.plot(times, angular_rate, 'x') >>> plt.title("Angular rate") >>> plt.show() The angular acceleration is continuous, but not smooth. Also note that the angular acceleration is not a piecewise-linear function, because it is different from the second derivative of the rotation vector (which is a piecewise-linear function as in the cubic spline). >>> plt.plot(times_plot, angular_acceleration_plot) >>> plt.plot(times, angular_acceleration, 'x') >>> plt.title("Angular acceleration") >>> plt.show() """ # Parameters for the solver for angular rate. MAX_ITER = 10 TOL = 1e-9 def _solve_for_angular_rates(self, dt, angular_rates, rotvecs): angular_rate_first = angular_rates[0].copy() A = _angular_rate_to_rotvec_dot_matrix(rotvecs) A_inv = _rotvec_dot_to_angular_rate_matrix(rotvecs) M = _create_block_3_diagonal_matrix( 2 * A_inv[1:-1] / dt[1:-1, None, None], 2 * A[1:-1] / dt[1:-1, None, None], 4 * (1 / dt[:-1] + 1 / dt[1:])) b0 = 6 * (rotvecs[:-1] * dt[:-1, None] ** -2 + rotvecs[1:] * dt[1:, None] ** -2) b0[0] -= 2 / dt[0] * A_inv[0].dot(angular_rate_first) b0[-1] -= 2 / dt[-1] * A[-1].dot(angular_rates[-1]) for iteration in range(self.MAX_ITER): rotvecs_dot = _matrix_vector_product_of_stacks(A, angular_rates) delta_beta = _angular_acceleration_nonlinear_term( rotvecs[:-1], rotvecs_dot[:-1]) b = b0 - delta_beta angular_rates_new = solve_banded((5, 5), M, b.ravel()) angular_rates_new = angular_rates_new.reshape((-1, 3)) delta = np.abs(angular_rates_new - angular_rates[:-1]) angular_rates[:-1] = angular_rates_new if np.all(delta < self.TOL * (1 + np.abs(angular_rates_new))): break rotvecs_dot = _matrix_vector_product_of_stacks(A, angular_rates) angular_rates = np.vstack((angular_rate_first, angular_rates[:-1])) return angular_rates, rotvecs_dot def __init__(self, times, rotations): from scipy.interpolate import PPoly if rotations.single: raise ValueError("`rotations` must be a sequence of rotations.") if rotations.as_quat().ndim > 2: raise ValueError( "Rotations with more than 1 leading dimension are not supported." ) if len(rotations) == 1: raise ValueError("`rotations` must contain at least 2 rotations.") times = np.asarray(times, dtype=float) if times.ndim != 1: raise ValueError("`times` must be 1-dimensional.") if len(times) != len(rotations): raise ValueError("Expected number of rotations to be equal to " "number of timestamps given, " f"got {len(rotations)} rotations " f"and {len(times)} timestamps.") dt = np.diff(times) if np.any(dt <= 0): raise ValueError("Values in `times` must be in a strictly " "increasing order.") rotvecs = (rotations[:-1].inv() * rotations[1:]).as_rotvec() angular_rates = rotvecs / dt[:, None] if len(rotations) == 2: rotvecs_dot = angular_rates else: angular_rates, rotvecs_dot = self._solve_for_angular_rates( dt, angular_rates, rotvecs) dt = dt[:, None] coeff = np.empty((4, len(times) - 1, 3)) coeff[0] = (-2 * rotvecs + dt * angular_rates + dt * rotvecs_dot) / dt ** 3 coeff[1] = (3 * rotvecs - 2 * dt * angular_rates - dt * rotvecs_dot) / dt ** 2 coeff[2] = angular_rates coeff[3] = 0 self.times = times self.rotations = rotations self.interpolator = PPoly(coeff, times) def __call__(self, times, order=0): """Compute interpolated values. Parameters ---------- times : float or array_like Times of interest. order : {0, 1, 2}, optional Order of differentiation: * 0 (default) : return Rotation * 1 : return the angular rate in rad/sec * 2 : return the angular acceleration in rad/sec/sec Returns ------- Interpolated Rotation, angular rate or acceleration. """ if order not in [0, 1, 2]: raise ValueError("`order` must be 0, 1 or 2.") times = np.asarray(times, dtype=float) if times.ndim > 1: raise ValueError("`times` must be at most 1-dimensional.") singe_time = times.ndim == 0 times = np.atleast_1d(times) rotvecs = self.interpolator(times) if order == 0: index = np.searchsorted(self.times, times, side='right') index -= 1 index[index < 0] = 0 n_segments = len(self.times) - 1 index[index > n_segments - 1] = n_segments - 1 result = self.rotations[index] * Rotation.from_rotvec(rotvecs) elif order == 1: rotvecs_dot = self.interpolator(times, 1) result = _compute_angular_rate(rotvecs, rotvecs_dot) elif order == 2: rotvecs_dot = self.interpolator(times, 1) rotvecs_dot_dot = self.interpolator(times, 2) result = _compute_angular_acceleration(rotvecs, rotvecs_dot, rotvecs_dot_dot) else: assert False if singe_time: result = result[0] return result
RotationSpline
python
Pylons__pyramid
tests/test_integration.py
{ "start": 566, "end": 714 }
class ____(Interface): pass @view_config(for_=INothing) @wsgiapp def wsgiapptest(environ, start_response): """ """ return '123'
INothing
python
ansible__ansible
lib/ansible/module_utils/facts/system/cmdline.py
{ "start": 854, "end": 2608 }
class ____(BaseFactCollector): name = 'cmdline' _fact_ids = set() # type: t.Set[str] def _get_proc_cmdline(self): return get_file_content('/proc/cmdline') def _parse_proc_cmdline(self, data): cmdline_dict = {} try: for piece in shlex.split(data, posix=False): item = piece.split('=', 1) if len(item) == 1: cmdline_dict[item[0]] = True else: cmdline_dict[item[0]] = item[1] except ValueError: pass return cmdline_dict def _parse_proc_cmdline_facts(self, data): cmdline_dict = {} try: for piece in shlex.split(data, posix=False): item = piece.split('=', 1) if len(item) == 1: cmdline_dict[item[0]] = True else: if item[0] in cmdline_dict: if isinstance(cmdline_dict[item[0]], list): cmdline_dict[item[0]].append(item[1]) else: new_list = [cmdline_dict[item[0]], item[1]] cmdline_dict[item[0]] = new_list else: cmdline_dict[item[0]] = item[1] except ValueError: pass return cmdline_dict def collect(self, module=None, collected_facts=None): cmdline_facts = {} data = self._get_proc_cmdline() if not data: return cmdline_facts cmdline_facts['cmdline'] = self._parse_proc_cmdline(data) cmdline_facts['proc_cmdline'] = self._parse_proc_cmdline_facts(data) return cmdline_facts
CmdLineFactCollector
python
huggingface__transformers
src/transformers/models/resnet/convert_resnet_to_pytorch.py
{ "start": 1964, "end": 6982 }
class ____: src: nn.Module dest: nn.Module verbose: int = 0 src_skip: list = field(default_factory=list) dest_skip: list = field(default_factory=list) def __call__(self, x: Tensor): """ Transfer the weights of `self.src` to `self.dest` by performing a forward pass using `x` as input. Under the hood we tracked all the operations in both modules. """ dest_traced = Tracker(self.dest)(x).parametrized src_traced = Tracker(self.src)(x).parametrized src_traced = list(filter(lambda x: type(x) not in self.src_skip, src_traced)) dest_traced = list(filter(lambda x: type(x) not in self.dest_skip, dest_traced)) if len(dest_traced) != len(src_traced): raise Exception( f"Numbers of operations are different. Source module has {len(src_traced)} operations while" f" destination module has {len(dest_traced)}." ) for dest_m, src_m in zip(dest_traced, src_traced): dest_m.load_state_dict(src_m.state_dict()) if self.verbose == 1: print(f"Transferred from={src_m} to={dest_m}") def convert_weight_and_push(name: str, config: ResNetConfig, save_directory: Path, push_to_hub: bool = True): print(f"Converting {name}...") with torch.no_grad(): from_model = timm.create_model(name, pretrained=True).eval() our_model = ResNetForImageClassification(config).eval() module_transfer = ModuleTransfer(src=from_model, dest=our_model) x = torch.randn((1, 3, 224, 224)) module_transfer(x) assert torch.allclose(from_model(x), our_model(x).logits), "The model logits don't match the original one." checkpoint_name = f"resnet{'-'.join(name.split('resnet'))}" print(checkpoint_name) if push_to_hub: our_model.push_to_hub(repo_id=checkpoint_name) # we can use the convnext one image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k") image_processor.push_to_hub(repo_id=checkpoint_name) print(f"Pushed {checkpoint_name}") def convert_weights_and_push(save_directory: Path, model_name: Optional[str] = None, push_to_hub: bool = True): filename = "imagenet-1k-id2label.json" num_labels = 1000 expected_shape = (1, num_labels) repo_id = "huggingface/label-files" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} ImageNetPreTrainedConfig = partial(ResNetConfig, num_labels=num_labels, id2label=id2label, label2id=label2id) names_to_config = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck" ), } if model_name: convert_weight_and_push(model_name, names_to_config[model_name], save_directory, push_to_hub) else: for model_name, config in names_to_config.items(): convert_weight_and_push(model_name, config, save_directory, push_to_hub) return config, expected_shape if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported resnet* architecture," " currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) args = parser.parse_args() pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
ModuleTransfer
python
pytest-dev__pytest
testing/example_scripts/unittest/test_unittest_asynctest.py
{ "start": 146, "end": 470 }
class ____(asynctest.TestCase): async def tearDown(self): teardowns.append(None) async def test_error(self): await asyncio.sleep(0) self.fail("failing on purpose") async def test_ok(self): await asyncio.sleep(0) def test_teardowns(self): assert len(teardowns) == 2
Test
python
ray-project__ray
python/ray/util/spark/cluster_init.py
{ "start": 69944, "end": 76922 }
class ____: """Create a ray on spark autoscaling cluster.""" def __init__( self, head_resources: dict, worker_node_types: dict, extra_provider_config: dict, upscaling_speed: float, idle_timeout_minutes: float, ): """Create the cluster. Args: head_resources: resources of the head node, including CPU. worker_node_types: autoscaler node types config for worker nodes. """ self._head_resources = head_resources.copy() self._head_resources["NODE_ID_AS_RESOURCE"] = HEAD_NODE_ID self._config = self._generate_config( head_resources, worker_node_types, extra_provider_config, upscaling_speed, idle_timeout_minutes, ) def _generate_config( self, head_resources, worker_node_types, extra_provider_config, upscaling_speed, idle_timeout_minutes, ): base_config = yaml.safe_load( open( os.path.join( os.path.dirname(ray.__file__), "autoscaler/spark/defaults.yaml", ) ) ) custom_config = copy.deepcopy(base_config) custom_config["available_node_types"] = worker_node_types custom_config["available_node_types"]["ray.head.default"] = { "resources": head_resources, "node_config": {}, "max_workers": 0, } custom_config["max_workers"] = sum( v["max_workers"] for _, v in worker_node_types.items() ) custom_config["provider"].update(extra_provider_config) custom_config["upscaling_speed"] = upscaling_speed custom_config["idle_timeout_minutes"] = idle_timeout_minutes return custom_config def start( self, ray_head_ip, ray_head_port, ray_client_server_port, ray_temp_dir, dashboard_options, head_node_options, collect_log_to_path, ray_node_custom_env, ): """Start the cluster. After this call returns, you can connect to the cluster with ray.init("auto"). """ from ray.util.spark.cluster_init import ( RAY_ON_SPARK_COLLECT_LOG_TO_PATH, _append_resources_config, _convert_ray_node_options, ) if ray_temp_dir is not None: autoscale_config = os.path.join(ray_temp_dir, "autoscaling_config.json") else: autoscale_config = os.path.join( _get_default_ray_tmp_dir(), "autoscaling_config.json" ) with open(autoscale_config, "w") as f: f.write(json.dumps(self._config)) ( worker_port_range_begin, worker_port_range_end, ) = _preallocate_ray_worker_port_range() ray_head_node_cmd = [ sys.executable, "-m", "ray.util.spark.start_ray_node", "--block", "--head", f"--node-ip-address={ray_head_ip}", f"--port={ray_head_port}", f"--ray-client-server-port={ray_client_server_port}", f"--autoscaling-config={autoscale_config}", f"--min-worker-port={worker_port_range_begin}", f"--max-worker-port={worker_port_range_end - 1}", *dashboard_options, ] if ray_temp_dir is not None: ray_head_node_cmd.append(f"--temp-dir={ray_temp_dir}") if "CPU" in self._head_resources: ray_head_node_cmd.append( "--num-cpus={}".format(self._head_resources.pop("CPU")) ) if "GPU" in self._head_resources: ray_head_node_cmd.append( "--num-gpus={}".format(self._head_resources.pop("GPU")) ) if "memory" in self._head_resources: ray_head_node_cmd.append( "--memory={}".format(self._head_resources.pop("memory")) ) if "object_store_memory" in self._head_resources: ray_head_node_cmd.append( "--object-store-memory={}".format( self._head_resources.pop("object_store_memory") ) ) head_node_options = _append_resources_config( head_node_options, self._head_resources ) ray_head_node_cmd.extend(_convert_ray_node_options(head_node_options)) extra_env = { "AUTOSCALER_UPDATE_INTERVAL_S": "1", RAY_ON_SPARK_COLLECT_LOG_TO_PATH: collect_log_to_path or "", RAY_ON_SPARK_START_RAY_PARENT_PID: str(os.getpid()), **ray_node_custom_env, } self.ray_head_node_cmd = ray_head_node_cmd return _start_ray_head_node( ray_head_node_cmd, synchronous=False, extra_env=extra_env ) def _start_ray_head_node(ray_head_node_cmd, synchronous, extra_env): def preexec_function(): # Make `start_ray_node` script and Ray node process run # in a separate group, # otherwise Ray node will be in the same group of parent process, # if parent process is a Jupyter notebook kernel, when user # clicks interrupt cell button, SIGINT signal is sent, then Ray node will # receive SIGINT signal, and it causes Ray node process dies. # `start_ray_node` script should also run in a separate group # because on Databricks Runtime, because if Databricks notebook # is detached, if the children processes don't exit within 1s, # they will receive SIGKILL, this behavior makes start_ray_node # doesn't have enough time to complete cleanup work like removing # temp directory and collecting logs. os.setpgrp() return exec_cmd( ray_head_node_cmd, synchronous=synchronous, extra_env=extra_env, preexec_fn=preexec_function, ) _sigterm_signal_installed = False def _install_sigterm_signal(): global _sigterm_signal_installed if _sigterm_signal_installed: return try: _origin_sigterm_handler = signal.getsignal(signal.SIGTERM) def _sigterm_handler(signum, frame): try: shutdown_ray_cluster() except Exception: # swallow exception to continue executing the following code in the # handler pass signal.signal( signal.SIGTERM, _origin_sigterm_handler ) # Reset to original signal os.kill( os.getpid(), signal.SIGTERM ) # Re-raise the signal to trigger original behavior signal.signal(signal.SIGTERM, _sigterm_handler) _sigterm_signal_installed = True except Exception: _logger.warning("Install Ray-on-Spark SIGTERM handler failed.")
AutoscalingCluster
python
huggingface__transformers
examples/quantization/custom_quantization_int8_example.py
{ "start": 4137, "end": 5026 }
class ____(QuantizationConfigMixin): """ Configuration for INT8 symmetric quantization. """ def __init__(self, modules_to_not_convert: Optional[list[str]] = None, **kwargs): self.quant_method = "int8_symmetric" self.modules_to_not_convert = modules_to_not_convert def __repr__(self): config_dict = self.to_dict() return f"{self.__class__.__name__} {json.dumps(config_dict, indent=2, sort_keys=True)}\n" def to_diff_dict(self) -> dict[str, Any]: config_dict = self.to_dict() default_config_dict = Int8SymmetricConfig().to_dict() serializable_config_dict = {} for key, value in config_dict.items(): if value != default_config_dict[key]: serializable_config_dict[key] = value return serializable_config_dict @register_quantizer("int8_symmetric")
Int8SymmetricConfig
python
openai__openai-python
src/openai/resources/beta/beta.py
{ "start": 2236, "end": 3571 }
class ____(AsyncAPIResource): @cached_property def chat(self) -> AsyncChat: return AsyncChat(self._client) @cached_property def realtime(self) -> AsyncRealtime: return AsyncRealtime(self._client) @cached_property def chatkit(self) -> AsyncChatKit: return AsyncChatKit(self._client) @cached_property def assistants(self) -> AsyncAssistants: return AsyncAssistants(self._client) @cached_property def threads(self) -> AsyncThreads: return AsyncThreads(self._client) @cached_property def with_raw_response(self) -> AsyncBetaWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers """ return AsyncBetaWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncBetaWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/openai/openai-python#with_streaming_response """ return AsyncBetaWithStreamingResponse(self)
AsyncBeta
python
getsentry__sentry
src/sentry/analytics/events/first_transaction_sent.py
{ "start": 79, "end": 293 }
class ____(analytics.Event): organization_id: int project_id: int platform: str | None = None default_user_id: int | None = None analytics.register(FirstTransactionSentEvent)
FirstTransactionSentEvent
python
cython__cython
Cython/Compiler/Code.py
{ "start": 50879, "end": 94850 }
class ____: # filename_table {string : int} for finding filename table indexes # filename_list [string] filenames in filename table order # input_file_contents dict contents (=list of lines) of any file that was used as input # to create this output C code. This is # used to annotate the comments. # # utility_codes set IDs of used utility code (to avoid reinsertion) # # declared_cnames {string:Entry} used in a transition phase to merge pxd-declared # constants etc. into the pyx-declared ones (i.e, # check if constants are already added). # In time, hopefully the literals etc. will be # supplied directly instead. # # const_cnames_used dict global counter for unique constant identifiers # shared_utility_functions List of parsed declaration lines of the shared utility functions # parts {string:CCodeWriter} # interned_strings # consts # interned_nums # directives set Temporary variable used to track # the current set of directives in the code generation # process. directives = {} code_layout = [ 'h_code', 'filename_table', 'utility_code_proto_before_types', 'numeric_typedefs', # Let these detailed individual parts stay!, 'complex_type_declarations', # as the proper solution is to make a full DAG... 'type_declarations', # More coarse-grained blocks would simply hide 'utility_code_proto', # the ugliness, not fix it 'module_declarations', 'typeinfo', 'before_global_var', 'global_var', 'string_decls', 'decls', 'late_includes', 'module_state', 'module_state_contents', # can be used to inject declarations into the modulestate struct 'module_state_end', 'constant_name_defines', 'module_state_clear', 'module_state_clear_contents', 'module_state_clear_end', 'module_state_traverse', 'module_state_traverse_contents', 'module_state_traverse_end', 'module_code', # user code goes here 'module_exttypes', 'initfunc_declarations', 'init_module', 'pystring_table', 'cached_builtins', 'cached_constants', 'init_constants', 'init_codeobjects', 'init_globals', # (utility code called at init-time) 'cleanup_globals', 'cleanup_module', 'main_method', 'utility_code_pragmas', # silence some irrelevant warnings in utility code 'utility_code_def', 'utility_code_pragmas_end', # clean-up the utility_code_pragmas 'end' ] # h files can only have a much smaller list of sections h_code_layout = [ 'h_code', 'utility_code_proto_before_types', 'type_declarations', 'utility_code_proto', 'end' ] def __init__(self, writer, module_node, code_config, common_utility_include_dir=None): self.filename_table = {} self.filename_list = [] self.input_file_contents = {} self.utility_codes = set() self.declared_cnames = {} self.in_utility_code_generation = False self.code_config = code_config self.common_utility_include_dir = common_utility_include_dir self.parts = {} self.module_node = module_node # because some utility code generation needs it # (generating backwards-compatible Get/ReleaseBuffer self.const_cnames_used = {} self.string_const_index = {} self.dedup_const_index = {} self.pyunicode_ptr_const_index = {} self.codeobject_constants = [] self.num_const_index = {} self.arg_default_constants = [] self.const_array_counters = {} # counts of differently prefixed arrays of constants self.cached_cmethods = {} self.initialised_constants = set() self.shared_utility_functions = [] writer.set_global_state(self) self.rootwriter = writer def initialize_main_c_code(self): rootwriter = self.rootwriter for i, part in enumerate(self.code_layout): w = self.parts[part] = rootwriter.insertion_point() if i > 0: w.putln("/* #### Code section: %s ### */" % part) w = self.parts['cached_builtins'] w.start_initcfunc( "int __Pyx_InitCachedBuiltins(" f"{Naming.modulestatetype_cname} *{Naming.modulestatevalue_cname})") w.putln(f"CYTHON_UNUSED_VAR({Naming.modulestatevalue_cname});") w = self.parts['cached_constants'] w.start_initcfunc( "int __Pyx_InitCachedConstants(" f"{Naming.modulestatetype_cname} *{Naming.modulestatevalue_cname})", refnanny=True) w.putln(f"CYTHON_UNUSED_VAR({Naming.modulestatevalue_cname});") w.put_setup_refcount_context(StringEncoding.EncodedString("__Pyx_InitCachedConstants")) w = self.parts['init_globals'] w.start_initcfunc("int __Pyx_InitGlobals(void)") w = self.parts['init_constants'] w.start_initcfunc( "int __Pyx_InitConstants(" f"{Naming.modulestatetype_cname} *{Naming.modulestatevalue_cname})") w.putln(f"CYTHON_UNUSED_VAR({Naming.modulestatevalue_cname});") if not Options.generate_cleanup_code: del self.parts['cleanup_globals'] else: w = self.parts['cleanup_globals'] w.start_initcfunc( "void __Pyx_CleanupGlobals(" f"{Naming.modulestatetype_cname} *{Naming.modulestatevalue_cname})") w.putln(f"CYTHON_UNUSED_VAR({Naming.modulestatevalue_cname});") code = self.parts['utility_code_proto'] code.putln("") code.putln("/* --- Runtime support code (head) --- */") code = self.parts['utility_code_def'] if self.code_config.emit_linenums: code.write('\n#line 1 "cython_utility"\n') code.putln("") code.putln("/* --- Runtime support code --- */") def initialize_main_h_code(self): rootwriter = self.rootwriter for part in self.h_code_layout: self.parts[part] = rootwriter.insertion_point() def finalize_main_c_code(self): self.close_global_decls() # # utility_code_def # code = self.parts['utility_code_def'] util = TempitaUtilityCode.load_cached("TypeConversions", "TypeConversion.c") code.put(util.format_code(util.impl)) code.putln("") # # utility code pragmas # code = self.parts['utility_code_pragmas'] util = UtilityCode.load_cached("UtilityCodePragmas", "ModuleSetupCode.c") code.putln(util.format_code(util.impl)) code.putln("") code = self.parts['utility_code_pragmas_end'] util = UtilityCode.load_cached("UtilityCodePragmasEnd", "ModuleSetupCode.c") code.putln(util.format_code(util.impl)) code.putln("") def __getitem__(self, key): return self.parts[key] # # Global constants, interned objects, etc. # def close_global_decls(self): # This is called when it is known that no more global declarations will # declared. self.generate_const_declarations() w = self.parts['cached_builtins'] w.putln("return 0;") if w.label_used(w.error_label): w.put_label(w.error_label) w.putln("return -1;") w.putln("}") w.exit_cfunc_scope() w = self.parts['cached_constants'] for const_type in ["tuple", "slice"]: if const_type in self.const_array_counters: self.immortalize_constants( w.name_in_module_state(Naming.pyrex_prefix + const_type), self.const_array_counters[const_type], w) w.put_finish_refcount_context() w.putln("return 0;") if w.label_used(w.error_label): w.put_label(w.error_label) w.put_finish_refcount_context() w.putln("return -1;") w.putln("}") w.exit_cfunc_scope() for part in ['init_globals', 'init_constants']: w = self.parts[part] w.putln("return 0;") if w.label_used(w.error_label): w.put_label(w.error_label) w.putln("return -1;") w.putln("}") w.exit_cfunc_scope() if Options.generate_cleanup_code: w = self.parts['cleanup_globals'] w.putln("}") w.exit_cfunc_scope() if Options.generate_cleanup_code: w = self.parts['cleanup_module'] w.putln("}") w.exit_cfunc_scope() def put_pyobject_decl(self, entry): self['global_var'].putln("static PyObject *%s;" % entry.cname) # constant handling at code generation time def get_cached_constants_writer(self, target=None): if target is not None: if target in self.initialised_constants: # Return None on second/later calls to prevent duplicate creation code. return None self.initialised_constants.add(target) return self.parts['cached_constants'] def get_int_const(self, str_value, longness=False): py_type = longness and 'long' or 'int' try: c = self.num_const_index[(str_value, py_type)] except KeyError: c = self.new_num_const(str_value, py_type) return c def get_float_const(self, str_value, value_code): try: c = self.num_const_index[(str_value, 'float')] except KeyError: c = self.new_num_const(str_value, 'float', value_code) return c def get_py_const(self, prefix, dedup_key=None): if dedup_key is not None: const = self.dedup_const_index.get(dedup_key) if const is not None: return const const = self.new_array_const_cname(prefix) if dedup_key is not None: self.dedup_const_index[dedup_key] = const return const def get_argument_default_const(self, type): cname = self.new_const_cname('') c = PyObjectConst(cname, type) self.arg_default_constants.append(c) # Argument default constants aren't currently cleaned up. # If that changes, it needs to account for the fact that they # aren't just Python objects return c def get_string_const(self, text, c_used=True): # return a C string constant, creating a new one if necessary if text.is_unicode: byte_string = text.utf8encode() else: byte_string = text.byteencode() try: c = self.string_const_index[byte_string] except KeyError: c = self.new_string_const(text, byte_string) if c_used: c.c_used = True return c def get_pyunicode_ptr_const(self, text): # return a Py_UNICODE[] constant, creating a new one if necessary assert text.is_unicode try: c = self.pyunicode_ptr_const_index[text] except KeyError: c = self.pyunicode_ptr_const_index[text] = self.new_const_cname() return c def get_py_string_const(self, text, identifier=None): # return a Python string constant, creating a new one if necessary c_string: StringConst = self.get_string_const(text, c_used=False) py_string = c_string.get_py_string_const(text.encoding, identifier) return py_string def get_py_codeobj_const(self, node): idx = len(self.codeobject_constants) name = f"{Naming.codeobjtab_cname}[{idx}]" self.codeobject_constants.append(node) return name def get_interned_identifier(self, text): return self.get_py_string_const(text, identifier=True) def new_string_const(self, text, byte_string): cname = self.new_string_const_cname(byte_string) c = StringConst(cname, text, byte_string) self.string_const_index[byte_string] = c return c def new_num_const(self, value, py_type, value_code=None): cname = self.new_num_const_cname(value, py_type) c = NumConst(cname, value, py_type, value_code) self.num_const_index[(value, py_type)] = c return c def new_string_const_cname(self, bytes_value): # Create a new globally-unique nice name for a C string constant. value = bytes_value.decode('ASCII', 'ignore') return self.new_const_cname(value=value) def unique_const_cname(self, format_str): # type: (str) -> str used = self.const_cnames_used cname = value = format_str.format(sep='', counter='') while cname in used: counter = used[value] = used[value] + 1 cname = format_str.format(sep='_', counter=counter) used[cname] = 1 return cname def new_num_const_cname(self, value, py_type): # type: (str, str) -> str if py_type == 'long': value += 'L' py_type = 'int' prefix = Naming.interned_prefixes[py_type] value = value.replace('.', '_').replace('+', '_').replace('-', 'neg_') if len(value) > 42: # update tests/run/large_integer_T5290.py in case the amount is changed cname = self.unique_const_cname( prefix + "large{counter}_" + value[:18] + "_xxx_" + value[-18:]) else: cname = "%s%s" % (prefix, value) return cname def new_const_cname(self, prefix='', value=''): value = replace_identifier('_', value)[:32].strip('_') name_suffix = self.unique_const_cname(value + "{sep}{counter}") if prefix: prefix = Naming.interned_prefixes[prefix] else: prefix = Naming.const_prefix return "%s%s" % (prefix, name_suffix) def new_array_const_cname(self, prefix: str): count = self.const_array_counters.get(prefix, 0) self.const_array_counters[prefix] = count+1 return f"{Naming.pyrex_prefix}{prefix}[{count}]" def get_cached_unbound_method(self, type_cname, method_name): key = (type_cname, method_name) try: cname = self.cached_cmethods[key] except KeyError: cname = self.cached_cmethods[key] = self.new_const_cname( 'umethod', '%s_%s' % (type_cname, method_name)) return cname def cached_unbound_method_call_code(self, modulestate_cname, obj_cname, type_cname, method_name, arg_cnames): # admittedly, not the best place to put this method, but it is reused by UtilityCode and ExprNodes ... utility_code_name = "CallUnboundCMethod%d" % len(arg_cnames) self.use_utility_code(UtilityCode.load_cached(utility_code_name, "ObjectHandling.c")) cache_cname = self.get_cached_unbound_method(type_cname, method_name) args = [obj_cname] + arg_cnames return "__Pyx_%s(&%s%s, %s)" % ( utility_code_name, modulestate_cname, cache_cname, ', '.join(args), ) def add_cached_builtin_decl(self, entry): if entry.is_builtin and entry.is_const: if self.should_declare(entry.cname, entry): self.put_pyobject_decl(entry) name = entry.name if name in renamed_py2_builtins_map: name = renamed_py2_builtins_map[name] self.put_cached_builtin_init( entry.pos, StringEncoding.EncodedString(name), entry.cname) def put_cached_builtin_init(self, pos, name, cname): w = self.parts['cached_builtins'] cname_in_modulestate = w.name_in_main_c_code_module_state( self.get_interned_identifier(name).cname) self.use_utility_code( UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c")) w.putln('%s = __Pyx_GetBuiltinName(%s); if (!%s) %s' % ( cname, cname_in_modulestate, cname, w.error_goto(pos))) def generate_const_declarations(self): self.generate_cached_methods_decls() self.generate_object_constant_decls() self.generate_codeobject_constants() # generate code for string and numeric constants as late as possible # to allow new constants be to created by the earlier stages. # (although the constants themselves are written early) self.generate_string_constants() self.generate_num_constants() def _generate_module_array_traverse_and_clear(self, struct_attr_cname, count, may_have_refcycles=True): counter_type = 'int' if count < 2**15 else 'Py_ssize_t' visit_call = "Py_VISIT" if may_have_refcycles else "__Pyx_VISIT_CONST" writer = self.parts['module_state_traverse'] writer.putln(f"for ({counter_type} i=0; i<{count}; ++i) {{ {visit_call}(traverse_module_state->{struct_attr_cname}[i]); }}") writer = self.parts['module_state_clear'] writer.putln(f"for ({counter_type} i=0; i<{count}; ++i) {{ Py_CLEAR(clear_module_state->{struct_attr_cname}[i]); }}") def generate_object_constant_decls(self): consts = [(len(c.cname), c.cname, c) for c in self.arg_default_constants] consts.sort() for _, cname, c in consts: self.parts['module_state'].putln("%s;" % c.type.declaration_code(cname)) if not c.type.needs_refcounting: # Note that py_constants is used for all argument defaults # which aren't necessarily PyObjects, so aren't appropriate # to clear. continue self.parts['module_state_clear'].put_xdecref_clear( f"clear_module_state->{cname}", c.type, clear_before_decref=True, nanny=False, ) if c.type.is_memoryviewslice: # TODO: Implement specific to type like CodeWriter.put_xdecref_clear() cname += "->memview" self.parts['module_state_traverse'].putln( f"Py_VISIT(traverse_module_state->{cname});") for prefix, count in sorted(self.const_array_counters.items()): struct_attr_cname = f"{Naming.pyrex_prefix}{prefix}" self.parts['module_state'].putln(f"PyObject *{struct_attr_cname}[{count}];") # The constant tuples/slices that we create can never participate in reference cycles. self._generate_module_array_traverse_and_clear(struct_attr_cname, count, may_have_refcycles=False) cleanup_level = cleanup_level_for_type_prefix(prefix) if cleanup_level is not None and cleanup_level <= Options.generate_cleanup_code: part_writer = self.parts['cleanup_globals'] part_writer.put(f"for (size_t i=0; i<{count}; ++i) ") part_writer.putln( "{ Py_CLEAR(%s); }" % part_writer.name_in_main_c_code_module_state(f"{struct_attr_cname}[i]") ) def generate_cached_methods_decls(self): if not self.cached_cmethods: return decl = self.parts['module_state'] init = self.parts['cached_builtins'] init.putln("") init.putln("/* Cached unbound methods */") cnames = [] for (type_cname, method_name), cname in sorted(self.cached_cmethods.items()): cnames.append(cname) method_name_cname = self.get_interned_identifier(StringEncoding.EncodedString(method_name)).cname decl.putln('__Pyx_CachedCFunction %s;' % ( cname)) # split type reference storage as it might not be static init.putln('%s.type = (PyObject*)%s;' % ( init.name_in_main_c_code_module_state(cname), type_cname)) # method name string isn't static in limited api init.putln( f'{init.name_in_main_c_code_module_state(cname)}.method_name = ' f'&{init.name_in_main_c_code_module_state(method_name_cname)};') if Options.generate_cleanup_code: cleanup = self.parts['cleanup_globals'] for cname in cnames: cleanup.putln(f"Py_CLEAR({init.name_in_main_c_code_module_state(cname)}.method);") def generate_string_constants(self): c_consts = [] py_bytes_consts = [] py_unicode_consts = [] # Split into buckets. for _, _, c in sorted([(len(c.cname), c.cname, c) for c in self.string_const_index.values()]): if c.c_used: c_consts.append((len(c.cname), c.cname, c.escaped_value)) if c.py_strings: for py_string in c.py_strings.values(): text = c.text if py_string.is_unicode and not isinstance(text, str): text = StringEncoding.EncodedString(text.decode(py_string.encoding or 'UTF-8')) (py_unicode_consts if py_string.is_unicode else py_bytes_consts).append(( py_string.intern and py_string.is_unicode, py_string.cname, text, )) c_consts.sort() py_bytes_consts.sort() py_unicode_consts.sort() # Generate C string constants. decls_writer = self.parts['string_decls'] for _, cname, escaped_value in c_consts: cliteral = StringEncoding.split_string_literal(escaped_value) decls_writer.putln( f'static const char {cname}[] = "{cliteral}";', safe=True, # Braces in user strings are not for indentation. ) # Generate legacy Py_UNICODE[] constants. for c, cname in sorted(self.pyunicode_ptr_const_index.items()): utf16_array, utf32_array = StringEncoding.encode_pyunicode_string(c) if utf16_array: # Narrow and wide representations differ decls_writer.putln("#ifdef Py_UNICODE_WIDE") decls_writer.putln("static Py_UNICODE %s[] = { %s };" % (cname, utf32_array)) if utf16_array: decls_writer.putln("#else") decls_writer.putln("static Py_UNICODE %s[] = { %s };" % (cname, utf16_array)) decls_writer.putln("#endif") # Generate stringtab and Python string constants. py_string_count = len(py_bytes_consts) + len(py_unicode_consts) self.parts['module_state'].putln(f"PyObject *{Naming.stringtab_cname}[{py_string_count}];") self._generate_module_array_traverse_and_clear(Naming.stringtab_cname, py_string_count, may_have_refcycles=False) self.generate_pystring_constants(py_unicode_consts, py_bytes_consts) def generate_pystring_constants(self, text_strings: list, byte_strings: list): # Concatenate all strings into one byte sequence and build a length index array. defines = self.parts['constant_name_defines'] bytes_values = [] first_interned: cython.Py_ssize_t = -1 stringtab_pos: cython.Py_ssize_t = 0 # For (Unicode) text strings, the index stores the character lengths after UTF8 decoding. for i, (is_interned, cname, text) in enumerate(text_strings): bytes_values.append(text.encode('utf-8')) if first_interned == -1 and is_interned: first_interned = i defines.putln(f"#define {cname} {Naming.stringtab_cname}[{stringtab_pos}]") stringtab_pos += 1 stringtab_bytes_start: cython.Py_ssize_t = len(text_strings) # For bytes objects, the index stores the byte lengths, ignoring the initial Unicode string. for _, cname, text in byte_strings: bytes_values.append(text.byteencode() if text.encoding else text.utf8encode()) defines.putln(f"#define {cname} {Naming.stringtab_cname}[{stringtab_pos}]") stringtab_pos += 1 index = list(map(len, bytes_values)) concat_bytes = b''.join(bytes_values) w = self.parts['init_constants'] w.putln("{") # Start code block. # Store the index of string lengths. w.putln( "const struct { " f"const unsigned int length: {max(index).bit_length()}; " "} " f"index[] = {{{','.join(['{%d}' % length for length in index])}}};", ) # Store and decompress the string data. self.use_utility_code(UtilityCode.load_cached("DecompressString", "StringTools.c")) has_if = False for algo_number, algo_name, compress in reversed(compression_algorithms): if compress is None: continue compressed_bytes = compress(concat_bytes) if len(compressed_bytes) >= len(concat_bytes) - 10: continue if algo_name == 'zlib': # Use zlib as fallback if the selected compression module is not available. assert algo_number == 1, f"Compression algorithm no. 1 must be 'zlib' to be used as fallback." guard = "(CYTHON_COMPRESS_STRINGS) != 0" elif algo_name == 'zstd': # 'compression.zstd' was added in Python 3.14. guard = f"(CYTHON_COMPRESS_STRINGS) == {algo_number} && __PYX_LIMITED_VERSION_HEX >= 0x030e0000" else: guard = f"(CYTHON_COMPRESS_STRINGS) == {algo_number}" w.putln(f"#{'if' if not has_if else 'elif'} {guard} /* compression: {algo_name} ({len(compressed_bytes)} bytes) */") has_if = True escaped_bytes = StringEncoding.split_string_literal( StringEncoding.escape_byte_string(compressed_bytes)) w.putln(f'const char* const cstring = "{escaped_bytes}";', safe=True) w.putln(f'PyObject *data = __Pyx_DecompressString(cstring, {len(compressed_bytes)}, {algo_number});') w.putln(w.error_goto_if_null('data', self.module_pos)) w.putln('const char* const bytes = __Pyx_PyBytes_AsString(data);') w.putln("#if !CYTHON_ASSUME_SAFE_MACROS") w.putln(f'if (likely(bytes)); else {{ Py_DECREF(data); {w.error_goto(self.module_pos)} }}') w.putln('#endif') if has_if: w.putln(f"#else /* compression: none ({len(concat_bytes)} bytes) */") escaped_bytes = StringEncoding.split_string_literal( StringEncoding.escape_byte_string(concat_bytes)) w.putln(f'const char* const bytes = "{escaped_bytes}";', safe=True) w.putln('PyObject *data = NULL;') # Always allow xdecref below. w.putln("CYTHON_UNUSED_VAR(__Pyx_DecompressString);") if has_if: w.putln("#endif") # Populate stringtab. w.putln(f"PyObject **stringtab = {w.name_in_main_c_code_module_state(Naming.stringtab_cname)};") w.putln("Py_ssize_t pos = 0;") # Unpack Unicode strings. if stringtab_bytes_start > 0: # Note: We could decode the concatenated Unicode string in one go, but this has a drawback: # If most strings are ASCII/Latin-1 or at most BMP, then a single non-BMP string in the mix # will make all strings use 4 bytes of RAM per character during initialisation, until we finish # splitting the user substrings. In addition to using more memory, this might not even be faster # because it must copy Unicode slices between different character sizes. # We avoid this by repeatedly calling PyUnicode_DecodeUTF8() for each substring. w.putln(f"for ({'int' if stringtab_bytes_start < 2**15 else 'Py_ssize_t'} i = 0; i < {stringtab_bytes_start}; i++) {{") w.putln("Py_ssize_t bytes_length = index[i].length;") w.putln("PyObject *string = PyUnicode_DecodeUTF8(bytes + pos, bytes_length, NULL);") if first_interned >= 0: w.putln(f"if (likely(string) && i >= {first_interned}) PyUnicode_InternInPlace(&string);") w.putln("if (unlikely(!string)) {") w.putln("Py_XDECREF(data);") w.putln(w.error_goto(self.module_pos)) w.putln('}') w.putln("stringtab[i] = string;") w.putln("pos += bytes_length;") w.putln("}") # for() # Unpack byte strings. if stringtab_bytes_start < len(index): w.putln(f"for ({'int' if len(index) < 2**15 else 'Py_ssize_t'} i = {stringtab_bytes_start}; i < {len(index)}; i++) {{") w.putln("Py_ssize_t bytes_length = index[i].length;") w.putln("PyObject *string = PyBytes_FromStringAndSize(bytes + pos, bytes_length);") w.putln("stringtab[i] = string;") w.putln("pos += bytes_length;") w.putln("if (unlikely(!string)) {") w.putln("Py_XDECREF(data);") w.putln(w.error_goto(self.module_pos)) w.putln('}') w.putln("}") # for() w.putln("Py_XDECREF(data);") # Set up hash values. w.putln(f"for (Py_ssize_t i = 0; i < {len(index)}; i++) {{") w.putln("if (unlikely(PyObject_Hash(stringtab[i]) == -1)) {") w.putln(w.error_goto(self.module_pos)) w.putln('}') w.putln('}') # Unicode strings are not trivially immortal but require certain rules. # See https://github.com/python/cpython/blob/920de7ccdcfa7284b6d23a124771b17c66dd3e4f/Objects/unicodeobject.c#L713-L739 # But we can make bytes strings immortal. if stringtab_bytes_start < len(index): self.immortalize_constants(f"stringtab + {stringtab_bytes_start}", len(index) - stringtab_bytes_start, w) w.putln("}") # close block def generate_codeobject_constants(self): w = self.parts['init_codeobjects'] init_function = ( f"int __Pyx_CreateCodeObjects({Naming.modulestatetype_cname} *{Naming.modulestatevalue_cname})" ) if not self.codeobject_constants: w.start_initcfunc(init_function) w.putln(f"CYTHON_UNUSED_VAR({Naming.modulestatevalue_cname});") w.putln("return 0;") w.exit_cfunc_scope() w.putln("}") return # Create a downsized config struct and build code objects from it. max_flags = 0x3ff # to be adapted when we start using new flags max_func_args = 1 max_kwonly_args = 1 max_posonly_args = 1 max_vars = 1 max_line = 1 for node in self.codeobject_constants: def_node = node.def_node if not def_node.is_generator_expression: max_func_args = max(max_func_args, len(def_node.args) - def_node.num_kwonly_args) max_kwonly_args = max(max_kwonly_args, def_node.num_kwonly_args) max_posonly_args = max(max_posonly_args, def_node.num_posonly_args) max_vars = max(max_vars, len(node.varnames)) max_line = max(max_line, def_node.pos[1]) w.put(textwrap.dedent(f"""\ #ifdef __cplusplus namespace {{ #endif typedef struct {{ unsigned int argcount : {max_func_args.bit_length()}; unsigned int num_posonly_args : {max_posonly_args.bit_length()}; unsigned int num_kwonly_args : {max_kwonly_args.bit_length()}; unsigned int nlocals : {max_vars.bit_length()}; unsigned int flags : {max_flags.bit_length()}; unsigned int first_line : {max_line.bit_length()}; }} __Pyx_PyCode_New_function_description; #ifdef __cplusplus }} /* anonymous namespace */ #endif """)) self.use_utility_code(UtilityCode.load_cached("NewCodeObj", "ModuleSetupCode.c")) w.start_initcfunc(init_function) w.putln("PyObject* tuple_dedup_map = PyDict_New();") w.putln("if (unlikely(!tuple_dedup_map)) return -1;") for node in self.codeobject_constants: node.generate_codeobj(w, "bad") w.putln("Py_DECREF(tuple_dedup_map);") w.putln("return 0;") w.putln("bad:") w.putln("Py_DECREF(tuple_dedup_map);") w.putln("return -1;") w.exit_cfunc_scope() w.putln("}") code_object_count = len(self.codeobject_constants) self.parts['module_state'].putln(f"PyObject *{Naming.codeobjtab_cname}[{code_object_count}];") # The code objects that we generate only contain plain constants and can never participate in reference cycles. self._generate_module_array_traverse_and_clear(Naming.codeobjtab_cname, code_object_count, may_have_refcycles=False) def generate_num_constants(self): consts = [(c.py_type, len(c.value.lstrip('-')), c.value.lstrip('-'), c.value, c.value_code, c) for c in self.num_const_index.values()] consts.sort() if not consts: return constant_count = len(consts) self.parts['module_state'].putln(f"PyObject *{Naming.numbertab_cname}[{constant_count}];") # Numeric constants can never participate in reference cycles. self._generate_module_array_traverse_and_clear(Naming.numbertab_cname, constant_count, may_have_refcycles=False) float_constants = [] int_constants_by_bytesize = [[]] # [[1 byte], [2 bytes], [4 bytes], [8 bytes]] large_constants = [] int_constant_count = 0 int_suffix = '' for py_type, _, _, value, value_code, c in consts: cname = c.cname if py_type == 'float': float_constants.append((cname, value_code)) else: number_value = Utils.str_to_number(value) bit_length = number_value.bit_length() if bit_length <= 63: while (bit_length + 8) // 8 > 1 << (len(int_constants_by_bytesize) - 1): int_constants_by_bytesize.append([]) # Our <= 31-bit integer values pass happily as 'int32' without further modifiers, # but MSVC misinterprets a negative '-2147483648' (== INT_MIN) and similar values as # "that's 'uint32' just with a minus sign", where '-(2147483648)' == '2147483648'. # See https://learn.microsoft.com/en-us/cpp/error-messages/compiler-warnings/compiler-warning-level-2-c4146?view=msvc-170 int_suffix = 'LL'[:len(int_constants_by_bytesize) - 2] int_constant_count += 1 int_constants_by_bytesize[-1].append((cname, f"{number_value}{int_suffix}")) else: large_constants.append((cname, number_value)) w = self.parts['init_constants'] defines = self.parts['constant_name_defines'] def store_array(w, name: str, ctype: str, constants: list): c: tuple values = ','.join([c[1] for c in constants]) w.putln(f"{ctype} const {name}[] = {{{values}}};") def generate_forloop_start(w, end: cython.Py_ssize_t): counter_type = 'int' if end < 2**15 else 'Py_ssize_t' w.putln(f"for ({counter_type} i = 0; i < {end}; i++) {{") def assign_constant(w, error_pos, rhs_code: str): w.putln(f"numbertab[i] = {rhs_code};") w.putln(w.error_goto_if_null("numbertab[i]", error_pos)) def define_constants(defines, constants: list, start_offset: cython.Py_ssize_t = 0): i: cython.Py_ssize_t c: tuple numbertab_cname: str = Naming.numbertab_cname for i, c in enumerate(constants): cname: str = c[0] defines.putln(f"#define {cname} {numbertab_cname}[{start_offset + i}]") constant_offset: cython.Py_ssize_t = 0 if float_constants: w.putln("{") w.putln(f"PyObject **numbertab = {w.name_in_main_c_code_module_state(Naming.numbertab_cname)};") store_array(w, "c_constants", 'double', float_constants) define_constants(defines, float_constants, constant_offset) generate_forloop_start(w, len(float_constants)) assign_constant(w, self.module_pos, "PyFloat_FromDouble(c_constants[i])") w.putln("}") # for() w.putln("}") constant_offset += len(float_constants) if int_constant_count > 0: w.putln("{") w.putln(f"PyObject **numbertab = {w.name_in_main_c_code_module_state(Naming.numbertab_cname)} + {constant_offset};") int_types = ['', 'int8_t', 'int16_t', 'int32_t', 'int64_t'] array_access = "%s" int_constants_seen: cython.Py_ssize_t = 0 byte_size: cython.int for byte_size, constants in enumerate(int_constants_by_bytesize, 1): if not constants: continue array_name = f"cint_constants_{1 << (byte_size - 1)}" store_array(w, array_name, int_types[byte_size], constants) define_constants(defines, constants, constant_offset + int_constants_seen) read_item = f"{array_name}[i - {int_constants_seen}]" int_constants_seen += len(constants) array_access %= ( read_item if byte_size == len(int_constants_by_bytesize) # last is simple access else f"(i < {int_constants_seen} ? {read_item} : %s)" # otherwise, access arrays step by step ) generate_forloop_start(w, int_constant_count) capi_func = "PyLong_FromLong" if len(int_constants_by_bytesize) <= 3 else "PyLong_FromLongLong" assign_constant(w, self.module_pos, f"{capi_func}({array_access})") w.putln("}") # for() w.putln("}") constant_offset += int_constant_count if large_constants: # We store large integer constants in a single '\0'-separated C string of base32 digits. def to_base32(number): is_neg: bool = number < 0 if is_neg: number = -number digits = bytearray() while number: digit: cython.uint = number & 31 digit_char: cython.char = b'0123456789abcdefghijklmnopqrstuv'[digit] digits.append(digit_char) number >>= 5 if not digits: return b'0' if is_neg: digits.append(ord(b'-')) digits.reverse() return digits w.putln("{") w.putln(f"PyObject **numbertab = {w.name_in_main_c_code_module_state(Naming.numbertab_cname)} + {constant_offset};") c_string = b'\\000'.join([to_base32(c[1]) for c in large_constants]).decode('ascii') w.putln(f'const char* c_constant = "{StringEncoding.split_string_literal(c_string)}";') define_constants(defines, large_constants, constant_offset) generate_forloop_start(w, len(large_constants)) w.putln("char *end_pos;") assign_constant(w, self.module_pos, "PyLong_FromString(c_constant, &end_pos, 32)") w.putln("c_constant = end_pos + 1;") w.putln("}") # for() w.putln("}") self.immortalize_constants( w.name_in_main_c_code_module_state(Naming.numbertab_cname), constant_count, w) @staticmethod def immortalize_constants(array_cname, constant_count, writer): writer.putln("#if CYTHON_IMMORTAL_CONSTANTS") writer.putln("{") writer.putln(f"PyObject **table = {array_cname};") writer.putln(f"for (Py_ssize_t i=0; i<{constant_count}; ++i) {{") writer.putln("#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING") writer.putln("Py_SET_REFCNT(table[i], _Py_IMMORTAL_REFCNT_LOCAL);") writer.putln("#else") writer.putln("Py_SET_REFCNT(table[i], _Py_IMMORTAL_INITIAL_REFCNT);") writer.putln("#endif") writer.putln("}") # for() writer.putln("}") writer.putln("#endif") # The functions below are there in a transition phase only # and will be deprecated. They are called from Nodes.BlockNode. # The copy&paste duplication is intentional in order to be able # to see quickly how BlockNode worked, until this is replaced. def should_declare(self, cname, entry): if cname in self.declared_cnames: other = self.declared_cnames[cname] assert str(entry.type) == str(other.type) assert entry.init == other.init return False else: self.declared_cnames[cname] = entry return True # # File name state # def lookup_filename(self, source_desc): entry = source_desc.get_filenametable_entry() try: index = self.filename_table[entry] except KeyError: index = len(self.filename_list) self.filename_list.append(source_desc) self.filename_table[entry] = index return index def commented_file_contents(self, source_desc): try: return self.input_file_contents[source_desc] except KeyError: pass source_file = source_desc.get_lines(encoding='ASCII', error_handling='ignore') F = [' * ' + ( line.replace( '*/', '*[inserted by cython to avoid comment closer]/' ).replace( '/*', '/[inserted by cython to avoid comment start]*' ) if '/' in line else line) for line in source_file ] if not F: F.append('') self.input_file_contents[source_desc] = F return F # # Utility code state # def use_utility_code(self, utility_code, used_by=None): """ Adds code to the C file. utility_code should a) implement __eq__/__hash__ for the purpose of knowing whether the same code has already been included b) implement put_code, which takes a globalstate instance See UtilityCode. """ if utility_code and utility_code not in self.utility_codes: self.utility_codes.add(utility_code) utility_code.put_code(self, used_by=used_by) def use_entry_utility_code(self, entry): if entry is None: return if entry.utility_code: self.use_utility_code(entry.utility_code) if entry.utility_code_definition: self.use_utility_code(entry.utility_code_definition) from . import PyrexTypes for tp in PyrexTypes.get_all_subtypes(entry.type): if hasattr(tp, "entry") and tp.entry is not entry: self.use_entry_utility_code(tp.entry) def funccontext_property(func): name = func.__name__ attribute_of = operator.attrgetter(name) def get(self): return attribute_of(self.funcstate) def set(self, value): setattr(self.funcstate, name, value) return property(get, set)
GlobalState
python
pytorch__pytorch
torch/__init__.py
{ "start": 72311, "end": 72541 }
class ____(_LegacyStorage): @classproperty def dtype(self): _warn_typed_storage_removal(stacklevel=3) return self._dtype @classproperty def _dtype(self): return torch.bfloat16
BFloat16Storage
python
Unity-Technologies__ml-agents
ml-agents/mlagents/trainers/subprocess_env_manager.py
{ "start": 1876, "end": 1981 }
class ____(NamedTuple): cmd: EnvironmentCommand worker_id: int payload: Any
EnvironmentResponse
python
doocs__leetcode
solution/1500-1599/1583.Count Unhappy Friends/Solution.py
{ "start": 0, "end": 556 }
class ____: def unhappyFriends( self, n: int, preferences: List[List[int]], pairs: List[List[int]] ) -> int: d = [{x: j for j, x in enumerate(p)} for p in preferences] p = {} for x, y in pairs: p[x] = y p[y] = x ans = 0 for x in range(n): y = p[x] for i in range(d[x][y]): u = preferences[x][i] v = p[u] if d[u][x] < d[u][v]: ans += 1 break return ans
Solution
python
RaRe-Technologies__gensim
gensim/similarities/nmslib.py
{ "start": 3601, "end": 9603 }
class ____(): """This class allows to use `NMSLIB <https://github.com/nmslib/nmslib>`_ as indexer for `most_similar` method from :class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`, :class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.Word2VecKeyedVectors` classes. """ def __init__(self, model, index_params=None, query_time_params=None): """ Parameters ---------- model : :class:`~gensim.models.base_any2vec.BaseWordEmbeddingsModel` Model, that will be used as source for index. index_params : dict, optional Indexing parameters passed through to NMSLIB: https://github.com/nmslib/nmslib/blob/master/manual/methods.md#graph-based-search-methods-sw-graph-and-hnsw If not specified, defaults to `{'M': 100, 'indexThreadQty': 1, 'efConstruction': 100, 'post': 0}`. query_time_params : dict, optional query_time_params for NMSLIB indexer. If not specified, defaults to `{'efSearch': 100}`. """ check_nmslib_compatibility() if index_params is None: index_params = {'M': 100, 'indexThreadQty': 1, 'efConstruction': 100, 'post': 0} if query_time_params is None: query_time_params = {'efSearch': 100} self.index = None self.labels = None self.model = model self.index_params = index_params self.query_time_params = query_time_params # # In the main use case, the user will pass us a non-None model, and we use that model # to initialize the index and labels. In a separate (completely internal) use case, the # NsmlibIndexer.load function handles the index and label initialization separately, # so it passes us None as the model. # if model: if isinstance(self.model, Doc2Vec): self._build_from_doc2vec() elif isinstance(self.model, (Word2Vec, FastText)): self._build_from_word2vec() elif isinstance(self.model, (KeyedVectors,)): self._build_from_keyedvectors() else: raise ValueError("model must be a Word2Vec, Doc2Vec, FastText or KeyedVectors instance") def save(self, fname, protocol=utils.PICKLE_PROTOCOL): """Save this NmslibIndexer instance to a file. Parameters ---------- fname : str Path to the output file, will produce 2 files: `fname` - parameters and `fname`.d - :class:`~nmslib.NmslibIndex`. protocol : int, optional Protocol for pickle. Notes ----- This method saves **only** the index (**the model isn't preserved**). """ fname_dict = fname + '.d' self.index.saveIndex(fname) d = {'index_params': self.index_params, 'query_time_params': self.query_time_params, 'labels': self.labels} with open(fname_dict, 'wb') as fout: _pickle.dump(d, fout, protocol=protocol) @classmethod def load(cls, fname): """Load a NmslibIndexer instance from a file. Parameters ---------- fname : str Path previously used in `save()`. """ fname_dict = fname + '.d' with open(fname_dict, 'rb') as f: d = _pickle.load(f) index_params = d['index_params'] query_time_params = d['query_time_params'] nmslib_instance = cls(model=None, index_params=index_params, query_time_params=query_time_params) index = nmslib.init(method='hnsw', space='cosinesimil') index.loadIndex(fname) nmslib_instance.index = index nmslib_instance.labels = d['labels'] return nmslib_instance def _build_from_word2vec(self): """Build an NMSLIB index using word vectors from a Word2Vec model.""" self._build_from_model(self.model.wv.get_normed_vectors(), self.model.wv.index_to_key) def _build_from_doc2vec(self): """Build an NMSLIB index using document vectors from a Doc2Vec model.""" docvecs = self.model.dv labels = docvecs.index_to_key self._build_from_model(docvecs.get_normed_vectors(), labels) def _build_from_keyedvectors(self): """Build an NMSLIB index using word vectors from a KeyedVectors model.""" self._build_from_model(self.model.get_normed_vectors(), self.model.index_to_key) def _build_from_model(self, vectors, labels): index = nmslib.init(method='hnsw', space='cosinesimil') index.addDataPointBatch(vectors) index.createIndex(self.index_params, print_progress=True) nmslib.setQueryTimeParams(index, self.query_time_params) self.index = index self.labels = labels def most_similar(self, vector, num_neighbors): """Find the approximate `num_neighbors` most similar items. Parameters ---------- vector : numpy.array Vector for a word or document. num_neighbors : int How many most similar items to look for? Returns ------- list of (str, float) List of most similar items in the format `[(item, cosine_similarity), ... ]`. """ ids, distances = self.index.knnQueryBatch(vector.reshape(1, -1), k=num_neighbors)[0] # NMSLIB returns cosine distance (not similarity), which is simply `dist = 1 - cossim`. # So, convert back to similarities here. return [(self.labels[id_], 1.0 - distance) for id_, distance in zip(ids, distances)] def check_nmslib_compatibility(): """NMSLib is only compatible with Python <3.10 and numPy <2.0.""" if sys.version_info >= (3, 10): raise RuntimeError("nmslib requires Python < 3.10") if tuple(map(int, np.__version__.split('.')[:2])) >= (2, 0): raise RuntimeError("nmslib requires NumPy < 2.0")
NmslibIndexer
python
spulec__freezegun
freezegun/api.py
{ "start": 9731, "end": 10778 }
class ____(real_date, metaclass=FakeDateMeta): def __add__(self, other: Any) -> "FakeDate": result = real_date.__add__(self, other) if result is NotImplemented: return result return date_to_fakedate(result) def __sub__(self, other: Any) -> "FakeDate": # type: ignore result = real_date.__sub__(self, other) if result is NotImplemented: return result # type: ignore if isinstance(result, real_date): return date_to_fakedate(result) else: return result # type: ignore @classmethod def today(cls: Type["FakeDate"]) -> "FakeDate": result = cls._date_to_freeze() + cls._tz_offset() return date_to_fakedate(result) @staticmethod def _date_to_freeze() -> datetime.datetime: return get_current_time() @classmethod def _tz_offset(cls) -> datetime.timedelta: return tz_offsets[-1] FakeDate.min = date_to_fakedate(real_date.min) FakeDate.max = date_to_fakedate(real_date.max)
FakeDate
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 199281, "end": 201715 }
class ____(VegaLiteSchema): """ BoxPlotConfig schema wrapper. Parameters ---------- box : bool, dict, :class:`BarConfig`, :class:`AreaConfig`, :class:`LineConfig`, :class:`MarkConfig`, :class:`RectConfig`, :class:`TickConfig`, :class:`AnyMarkConfig` extent : float, Literal['min-max'] The extent of the whiskers. Available options include: * ``"min-max"``: min and max are the lower and upper whiskers respectively. * A number representing multiple of the interquartile range. This number will be multiplied by the IQR to determine whisker boundary, which spans from the smallest data to the largest data within the range *[Q1 - k * IQR, Q3 + k * IQR]* where *Q1* and *Q3* are the first and third quartiles while *IQR* is the interquartile range (*Q3-Q1*). **Default value:** ``1.5``. median : bool, dict, :class:`BarConfig`, :class:`AreaConfig`, :class:`LineConfig`, :class:`MarkConfig`, :class:`RectConfig`, :class:`TickConfig`, :class:`AnyMarkConfig` outliers : bool, dict, :class:`BarConfig`, :class:`AreaConfig`, :class:`LineConfig`, :class:`MarkConfig`, :class:`RectConfig`, :class:`TickConfig`, :class:`AnyMarkConfig` rule : bool, dict, :class:`BarConfig`, :class:`AreaConfig`, :class:`LineConfig`, :class:`MarkConfig`, :class:`RectConfig`, :class:`TickConfig`, :class:`AnyMarkConfig` size : float Size of the box and median tick of a box plot ticks : bool, dict, :class:`BarConfig`, :class:`AreaConfig`, :class:`LineConfig`, :class:`MarkConfig`, :class:`RectConfig`, :class:`TickConfig`, :class:`AnyMarkConfig` """ _schema = {"$ref": "#/definitions/BoxPlotConfig"} def __init__( self, box: Optional[bool | SchemaBase | Map] = Undefined, extent: Optional[float | Literal["min-max"]] = Undefined, median: Optional[bool | SchemaBase | Map] = Undefined, outliers: Optional[bool | SchemaBase | Map] = Undefined, rule: Optional[bool | SchemaBase | Map] = Undefined, size: Optional[float] = Undefined, ticks: Optional[bool | SchemaBase | Map] = Undefined, **kwds, ): super().__init__( box=box, extent=extent, median=median, outliers=outliers, rule=rule, size=size, ticks=ticks, **kwds, )
BoxPlotConfig
python
pytorch__pytorch
torch/multiprocessing/spawn.py
{ "start": 2590, "end": 7645 }
class ____: def __init__(self, processes, error_files): self.error_files = error_files self.processes = processes self.sentinels = { process.sentinel: index for index, process in enumerate(processes) } def pids(self): return [int(process.pid) for process in self.processes] def _join_procs_with_timeout(self, timeout: float): """Attempt to join all processes with a shared timeout.""" end = time.monotonic() + timeout for process in self.processes: # pyrefly: ignore [no-matching-overload] time_to_wait = max(0, end - time.monotonic()) process.join(time_to_wait) def join( self, timeout: Optional[float] = None, grace_period: Optional[float] = None ): r"""Join one or more processes within spawn context. Attempt to join one or more processes in this spawn context. If one of them exited with a non-zero exit status, this function kills the remaining processes (optionally with a grace period) and raises an exception with the cause of the first process exiting. Returns ``True`` if all processes have been joined successfully, ``False`` if there are more processes that need to be joined. Args: timeout (float): Wait this long (in seconds) before giving up on waiting. grace_period (float): When any processes fail, wait this long (in seconds) for others to shutdown gracefully before terminating them. If they still don't exit, wait another grace period before killing them. """ # Ensure this function can be called even when we're done. if len(self.sentinels) == 0: return True # Wait for any process to fail or all of them to succeed. ready = multiprocessing.connection.wait( self.sentinels.keys(), timeout=timeout, ) error_index = None for sentinel in ready: index = self.sentinels.pop(sentinel) process = self.processes[index] process.join() if process.exitcode != 0: error_index = index break # Return if there was no error. if error_index is None: # Return whether or not all processes have been joined. return len(self.sentinels) == 0 # An error occurred. Clean-up all processes before returning. # First, allow a grace period for processes to shutdown themselves. if grace_period is not None: self._join_procs_with_timeout(grace_period) # Then, terminate processes that are still alive. Try SIGTERM first. for process in self.processes: if process.is_alive(): log.warning("Terminating process %s via signal SIGTERM", process.pid) process.terminate() # Try SIGKILL if the process isn't going down after another grace_period. # The reason is related to python signal handling is limited # to main thread and if that is in c/c++ land and stuck it won't # to handle it. We have seen processes getting stuck not handling # SIGTERM for the above reason. self._join_procs_with_timeout(30 if grace_period is None else grace_period) for process in self.processes: if process.is_alive(): log.warning( "Unable to shutdown process %s via SIGTERM , forcefully exiting via SIGKILL", process.pid, ) process.kill() process.join() # The file will only be created if the process crashed. failed_process = self.processes[error_index] if not os.access(self.error_files[error_index], os.R_OK): exitcode = self.processes[error_index].exitcode if exitcode < 0: try: name = signal.Signals(-exitcode).name except ValueError: name = f"<Unknown signal {-exitcode}>" raise ProcessExitedException( f"process {error_index:d} terminated with signal {name}", error_index=error_index, error_pid=failed_process.pid, exit_code=exitcode, signal_name=name, ) else: raise ProcessExitedException( f"process {error_index:d} terminated with exit code {exitcode:d}", error_index=error_index, error_pid=failed_process.pid, exit_code=exitcode, ) with open(self.error_files[error_index], "rb") as fh: original_trace = pickle.load(fh) msg = f"\n\n-- Process {error_index:d} terminated with the following error:\n" msg += original_trace raise ProcessRaisedException(msg, error_index, failed_process.pid)
ProcessContext
python
keras-team__keras
keras/src/ops/numpy.py
{ "start": 122971, "end": 123493 }
class ____(Operation): def call(self, x): return backend.numpy.isnan(x) def compute_output_spec(self, x): return KerasTensor(x.shape, dtype="bool") @keras_export(["keras.ops.isnan", "keras.ops.numpy.isnan"]) def isnan(x): """Test element-wise for NaN and return result as a boolean tensor. Args: x: Input tensor. Returns: Output boolean tensor. """ if any_symbolic_tensors((x,)): return Isnan().symbolic_call(x) return backend.numpy.isnan(x)
Isnan
python
kamyu104__LeetCode-Solutions
Python/shortest-subarray-with-or-at-least-k-i.py
{ "start": 83, "end": 1136 }
class ____(object): def minimumSubarrayLength(self, nums, k): """ :type nums: List[int] :type k: int :rtype: int """ def update(x, d, curr): for i in xrange(len(cnt)): if x < (1<<i): break if not (x&(1<<i)): continue if cnt[i] == 0: curr ^= 1<<i cnt[i] += d if cnt[i] == 0: curr ^= 1<<i return curr total = reduce(lambda x, y: x|y, nums) if total < k: return -1 cnt = [0]*total.bit_length() result = len(nums) left = curr = 0 for right in xrange(len(nums)): curr = update(nums[right], +1, curr) while left <= right and curr >= k: result = min(result, right-left+1) curr = update(nums[left], -1, curr) left += 1 return result # Time: O(n^2) # Space: O(1) # brute force
Solution
python
dagster-io__dagster
examples/docs_snippets/docs_snippets/integrations/fivetran/customize_fivetran_translator_asset_spec.py
{ "start": 473, "end": 1181 }
class ____(DagsterFivetranTranslator): def get_asset_spec(self, props: FivetranConnectorTableProps) -> dg.AssetSpec: # We create the default asset spec using super() default_spec = super().get_asset_spec(props) # We customize the metadata and asset key prefix for all assets return default_spec.replace_attributes( key=default_spec.key.with_prefix("prefix"), ).merge_attributes(metadata={"custom": "metadata"}) fivetran_specs = load_fivetran_asset_specs( fivetran_workspace, dagster_fivetran_translator=MyCustomFivetranTranslator() ) defs = dg.Definitions(assets=fivetran_specs, resources={"fivetran": fivetran_workspace})
MyCustomFivetranTranslator
python
astropy__astropy
astropy/time/tests/test_pickle.py
{ "start": 132, "end": 1201 }
class ____: """Basic pickle test of time""" def test_pickle(self): times = ["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"] t1 = Time(times, scale="utc") for prot in range(pickle.HIGHEST_PROTOCOL): t1d = pickle.dumps(t1, prot) t1l = pickle.loads(t1d) assert np.all(t1l == t1) t2 = Time("2012-06-30 12:00:00", scale="utc") for prot in range(pickle.HIGHEST_PROTOCOL): t2d = pickle.dumps(t2, prot) t2l = pickle.loads(t2d) assert t2l == t2 def test_cache_not_shared(self): t = Time(["2001:020", "2001:040", "2001:060", "2001:080"], out_subfmt="date") # Ensure something is in the cache. t.value assert "format" in t.cache td = pickle.dumps(t) assert "format" in t.cache tl = pickle.loads(td) assert "format" in t.cache assert "format" not in tl.cache t[0] = "1999:099" assert t.value[0] == "1999:099" assert tl.value[0] == "2001:020"
TestPickle
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chartsheet03.py
{ "start": 315, "end": 1425 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chartsheet03.xlsx") def test_create_file(self): """Test the worksheet properties of an XlsxWriter chartsheet file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chartsheet = workbook.add_chartsheet() chart = workbook.add_chart({"type": "bar"}) chart.axis_ids = [43794816, 43796352] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.add_series({"values": "=Sheet1!$B$1:$B$5"}) chart.add_series({"values": "=Sheet1!$C$1:$C$5"}) chartsheet.set_chart(chart) chartsheet.hide() workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
doocs__leetcode
solution/0000-0099/0077.Combinations/Solution2.py
{ "start": 0, "end": 412 }
class ____: def combine(self, n: int, k: int) -> List[List[int]]: def dfs(i: int): if len(t) == k: ans.append(t[:]) return if i > n: return for j in range(i, n + 1): t.append(j) dfs(j + 1) t.pop() ans = [] t = [] dfs(1) return ans
Solution
python
huggingface__transformers
src/transformers/generation/logits_process.py
{ "start": 50683, "end": 53556 }
class ____(LogitsProcessor): r""" N-grams are groups of "n" consecutive words, characters, or tokens taken from a sequence of text. Given the sentence: "She runs fast", the bi-grams (n=2) would be ("she", "runs") and ("runs", "fast"). In text generation, avoiding repetitions of word sequences provides a more diverse output. This [`LogitsProcessor`] enforces no repetition of n-grams by setting the scores of banned tokens to negative infinity which eliminates those tokens from consideration when further processing the scores. Note that, for decoder-only models like most LLMs, the prompt is also considered to obtain the n-grams. [Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345). <Tip> Use n-gram penalties with care. For instance, penalizing 2-grams (bigrams) in an article about the city of New York might lead to undesirable outcomes where the city's name appears only once in the entire text. [Reference](https://huggingface.co/blog/how-to-generate) </Tip> Args: ngram_size (`int`): All ngrams of size `ngram_size` can only occur once. Examples: ```py >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") >>> inputs = tokenizer(["Today I"], return_tensors="pt") >>> output = model.generate(**inputs) >>> print(tokenizer.decode(output[0], skip_special_tokens=True)) Today I'm not sure if I'm going to be able to do it. >>> # Now let's add ngram size using `no_repeat_ngram_size`. This stops the repetitions ("I'm") in the output. >>> output = model.generate(**inputs, no_repeat_ngram_size=2) >>> print(tokenizer.decode(output[0], skip_special_tokens=True)) Today I'm not sure if I can get a better understanding of the nature of this issue ``` """ def __init__(self, ngram_size: int): if not isinstance(ngram_size, int) or ngram_size <= 0: raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}") self.ngram_size = ngram_size @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: num_batch_hypotheses = scores.shape[0] cur_len = input_ids.shape[-1] scores_processed = scores.clone() banned_batch_tokens = _calc_banned_ngram_tokens(self.ngram_size, input_ids, num_batch_hypotheses, cur_len) for i, banned_tokens in enumerate(banned_batch_tokens): scores_processed[i, banned_tokens] = -float("inf") return scores_processed
NoRepeatNGramLogitsProcessor
python
getsentry__sentry
tests/sentry/notifications/notification_action/test_issue_alert_registry_handlers.py
{ "start": 19960, "end": 20508 }
class ____(TestTicketingIssueAlertHandlerBase): def setUp(self) -> None: super().setUp() self.handler = GithubIssueAlertHandler() def test_build_rule_action_blob(self) -> None: for expected in GITHUB_ACTION_DATA_BLOBS: if expected["id"] == ACTION_FIELD_MAPPINGS[Action.Type.GITHUB]["id"]: self._test_build_rule_action_blob(expected, Action.Type.GITHUB) else: self._test_build_rule_action_blob(expected, Action.Type.GITHUB_ENTERPRISE)
TestGithubIssueAlertHandler
python
huggingface__transformers
src/transformers/models/swin2sr/modeling_swin2sr.py
{ "start": 9095, "end": 16110 }
class ____(nn.Module): def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=[0, 0]): super().__init__() if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.window_size = ( window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) ) self.pretrained_window_size = pretrained_window_size self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1)))) # mlp to generate continuous relative position bias self.continuous_position_bias_mlp = nn.Sequential( nn.Linear(2, 512, bias=True), nn.ReLU(inplace=True), nn.Linear(512, num_heads, bias=False) ) # get relative_coords_table relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.int64).float() relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.int64).float() relative_coords_table = ( torch.stack(meshgrid([relative_coords_h, relative_coords_w], indexing="ij")) .permute(1, 2, 0) .contiguous() .unsqueeze(0) ) # [1, 2*window_height - 1, 2*window_width - 1, 2] if pretrained_window_size[0] > 0: relative_coords_table[:, :, :, 0] /= pretrained_window_size[0] - 1 relative_coords_table[:, :, :, 1] /= pretrained_window_size[1] - 1 elif window_size > 1: relative_coords_table[:, :, :, 0] /= self.window_size[0] - 1 relative_coords_table[:, :, :, 1] /= self.window_size[1] - 1 relative_coords_table *= 8 # normalize to -8, 8 relative_coords_table = ( torch.sign(relative_coords_table) * torch.log2(torch.abs(relative_coords_table) + 1.0) / math.log2(8) ) # set to same dtype as mlp weight relative_coords_table = relative_coords_table.to(next(self.continuous_position_bias_mlp.parameters()).dtype) self.register_buffer("relative_coords_table", relative_coords_table, persistent=False) # get pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += self.window_size[0] - 1 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) self.register_buffer("relative_position_index", relative_position_index, persistent=False) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=False) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: batch_size, dim, num_channels = hidden_states.shape query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) key_layer = ( self.key(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) # cosine attention attention_scores = nn.functional.normalize(query_layer, dim=-1) @ nn.functional.normalize( key_layer, dim=-1 ).transpose(-2, -1) logit_scale = torch.clamp(self.logit_scale, max=math.log(1.0 / 0.01)).exp() attention_scores = attention_scores * logit_scale relative_position_bias_table = self.continuous_position_bias_mlp(self.relative_coords_table).view( -1, self.num_attention_heads ) # [window_height*window_width,window_height*window_width,num_attention_heads] relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 ) # [num_attention_heads,window_height*window_width,window_height*window_width] relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww relative_position_bias = 16 * torch.sigmoid(relative_position_bias) attention_scores = attention_scores + relative_position_bias.unsqueeze(0) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in Swin2SRModel forward() function) mask_shape = attention_mask.shape[0] attention_scores = attention_scores.view( batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim ) + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.swin.modeling_swin.SwinSelfOutput with Swin->Swin2SR
Swin2SRSelfAttention
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_title03.py
{ "start": 315, "end": 1330 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_title03.xlsx") def test_create_file(self): """Test the creation of an XlsxWriter file with default title.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "column"}) chart.axis_ids = [93211648, 87847680] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.set_title({"name": "Title!", "name_font": {"bold": 0, "baseline": -1}}) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
HypothesisWorks__hypothesis
hypothesis-python/src/hypothesis/strategies/_internal/strategies.py
{ "start": 21949, "end": 31737 }
class ____(SearchStrategy[Ex]): """A strategy which samples from a set of elements. This is essentially equivalent to using a OneOfStrategy over Just strategies but may be more efficient and convenient. """ _MAX_FILTER_CALLS: ClassVar[int] = 10_000 def __init__( self, elements: Sequence[Ex], *, force_repr: str | None = None, force_repr_braces: tuple[str, str] | None = None, transformations: tuple[ tuple[Literal["filter", "map"], Callable[[Ex], Any]], ..., ] = (), ): super().__init__() self.elements = cu.check_sample(elements, "sampled_from") assert self.elements self.force_repr = force_repr self.force_repr_braces = force_repr_braces self._transformations = transformations self._cached_repr: str | None = None def map(self, pack: Callable[[Ex], T]) -> SearchStrategy[T]: s = type(self)( self.elements, force_repr=self.force_repr, force_repr_braces=self.force_repr_braces, transformations=(*self._transformations, ("map", pack)), ) # guaranteed by the ("map", pack) transformation return cast(SearchStrategy[T], s) def filter(self, condition: Callable[[Ex], Any]) -> SearchStrategy[Ex]: return type(self)( self.elements, force_repr=self.force_repr, force_repr_braces=self.force_repr_braces, transformations=(*self._transformations, ("filter", condition)), ) def __repr__(self): if self._cached_repr is None: rep = get_pretty_function_description elements_s = ( ", ".join(rep(v) for v in self.elements[:512]) + ", ..." if len(self.elements) > 512 else ", ".join(rep(v) for v in self.elements) ) braces = self.force_repr_braces or ("(", ")") instance_s = ( self.force_repr or f"sampled_from({braces[0]}{elements_s}{braces[1]})" ) transforms_s = "".join( f".{name}({get_pretty_function_description(f)})" for name, f in self._transformations ) repr_s = instance_s + transforms_s self._cached_repr = repr_s return self._cached_repr def calc_label(self) -> int: # strategy.label is effectively an under-approximation of structural # equality (i.e., some strategies may have the same label when they are not # structurally identical). More importantly for calculating the # SampledFromStrategy label, we might have hash(s1) != hash(s2) even # when s1 and s2 are structurally identical. For instance: # # s1 = st.sampled_from([st.none()]) # s2 = st.sampled_from([st.none()]) # assert hash(s1) != hash(s2) # # (see also test cases in test_labels.py). # # We therefore use the labels of any component strategies when calculating # our label, and only use the hash if it is not a strategy. # # That's the ideal, anyway. In reality the logic is more complicated than # necessary in order to be efficient in the presence of (very) large sequences: # * add an unabashed special case for range, to avoid iteration over an # enormous range when we know it is entirely integers. # * if there is at least one strategy in self.elements, use strategy label, # and the element hash otherwise. # * if there are no strategies in self.elements, take the hash of the # entire sequence. This prevents worst-case performance of hashing each # element when a hash of the entire sequence would have sufficed. # # The worst case performance of this scheme is # itertools.chain(range(2**100), [st.none()]), where it degrades to # hashing every int in the range. (elements_is_hashable, hash_value) = _is_hashable(self.elements) if isinstance(self.elements, range) or ( elements_is_hashable and not any(isinstance(e, SearchStrategy) for e in self.elements) ): return combine_labels( self.class_label, calc_label_from_name(str(hash_value)) ) labels = [self.class_label] for element in self.elements: if not is_hashable(element): continue labels.append( element.label if isinstance(element, SearchStrategy) else calc_label_from_hash(element) ) return combine_labels(*labels) def calc_has_reusable_values(self, recur: RecurT) -> bool: # Because our custom .map/.filter implementations skip the normal # wrapper strategies (which would automatically return False for us), # we need to manually return False here if any transformations have # been applied. return not self._transformations def calc_is_cacheable(self, recur: RecurT) -> bool: return is_hashable(self.elements) def _transform( self, # https://github.com/python/mypy/issues/7049, we're not writing `element` # anywhere in the class so this is still type-safe. mypy is being more # conservative than necessary element: Ex, # type: ignore ) -> Ex | UniqueIdentifier: # Used in UniqueSampledListStrategy for name, f in self._transformations: if name == "map": result = f(element) if build_context := _current_build_context.value: build_context.record_call(result, f, args=[element], kwargs={}) element = result else: assert name == "filter" if not f(element): return filter_not_satisfied return element def do_draw(self, data: ConjectureData) -> Ex: result = self.do_filtered_draw(data) if isinstance(result, SearchStrategy) and all( isinstance(x, SearchStrategy) for x in self.elements ): data._sampled_from_all_strategies_elements_message = ( "sampled_from was given a collection of strategies: " "{!r}. Was one_of intended?", self.elements, ) if result is filter_not_satisfied: data.mark_invalid(f"Aborted test because unable to satisfy {self!r}") assert not isinstance(result, UniqueIdentifier) return result def get_element(self, i: int) -> Ex | UniqueIdentifier: return self._transform(self.elements[i]) def do_filtered_draw(self, data: ConjectureData) -> Ex | UniqueIdentifier: # Set of indices that have been tried so far, so that we never test # the same element twice during a draw. known_bad_indices: set[int] = set() # Start with ordinary rejection sampling. It's fast if it works, and # if it doesn't work then it was only a small amount of overhead. for _ in range(3): i = data.draw_integer(0, len(self.elements) - 1) if i not in known_bad_indices: element = self.get_element(i) if element is not filter_not_satisfied: return element if not known_bad_indices: data.events[f"Retried draw from {self!r} to satisfy filter"] = "" known_bad_indices.add(i) # If we've tried all the possible elements, give up now. max_good_indices = len(self.elements) - len(known_bad_indices) if not max_good_indices: return filter_not_satisfied # Impose an arbitrary cutoff to prevent us from wasting too much time # on very large element lists. max_good_indices = min(max_good_indices, self._MAX_FILTER_CALLS - 3) # Before building the list of allowed indices, speculatively choose # one of them. We don't yet know how many allowed indices there will be, # so this choice might be out-of-bounds, but that's OK. speculative_index = data.draw_integer(0, max_good_indices - 1) # Calculate the indices of allowed values, so that we can choose one # of them at random. But if we encounter the speculatively-chosen one, # just use that and return immediately. Note that we also track the # allowed elements, in case of .map(some_stateful_function) allowed: list[tuple[int, Ex]] = [] for i in range(min(len(self.elements), self._MAX_FILTER_CALLS - 3)): if i not in known_bad_indices: element = self.get_element(i) if element is not filter_not_satisfied: assert not isinstance(element, UniqueIdentifier) allowed.append((i, element)) if len(allowed) > speculative_index: # Early-exit case: We reached the speculative index, so # we just return the corresponding element. data.draw_integer(0, len(self.elements) - 1, forced=i) return element # The speculative index didn't work out, but at this point we've built # and can choose from the complete list of allowed indices and elements. if allowed: i, element = data.choice(allowed) data.draw_integer(0, len(self.elements) - 1, forced=i) return element # If there are no allowed indices, the filter couldn't be satisfied. return filter_not_satisfied
SampledFromStrategy
python
Lightning-AI__lightning
tests/tests_pytorch/models/test_hparams.py
{ "start": 2805, "end": 9512 }
class ____(BoringDataModule): """Tests that a model can take an object.""" @decorate @decorate def __init__(self, hparams, *my_args, **my_kwargs): super().__init__() self.save_hyperparameters(hparams) # ------------------------- # STANDARD TESTS # ------------------------- def _run_standard_hparams_test( tmp_path, model, cls, datamodule=None, try_overwrite=False, weights_only: Optional[bool] = None ): """Tests for the existence of an arg 'test_arg=14'.""" obj = datamodule if issubclass(cls, LightningDataModule) else model hparam_type = type(obj.hparams) # test proper property assignments assert obj.hparams.test_arg == 14 # verify we can train trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, overfit_batches=2) trainer.fit(model, datamodule=datamodule if issubclass(cls, LightningDataModule) else None) # make sure the raw checkpoint saved the properties raw_checkpoint_path = _raw_checkpoint_path(trainer) raw_checkpoint = torch.load(raw_checkpoint_path, weights_only=weights_only) assert cls.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint assert raw_checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY]["test_arg"] == 14 # verify that model loads correctly obj2 = cls.load_from_checkpoint(raw_checkpoint_path, weights_only=weights_only) assert obj2.hparams.test_arg == 14 assert isinstance(obj2.hparams, hparam_type) if try_overwrite: # verify that we can overwrite the property obj3 = cls.load_from_checkpoint(raw_checkpoint_path, test_arg=78, weights_only=weights_only) assert obj3.hparams.test_arg == 78 return raw_checkpoint_path @pytest.mark.parametrize( "cls", [SaveHparamsModel, SaveHparamsDecoratedModel, SaveHparamsDataModule, SaveHparamsDecoratedDataModule] ) def test_namespace_hparams(tmp_path, cls): hparams = Namespace(test_arg=14) if issubclass(cls, LightningDataModule): model = BoringModel() datamodule = cls(hparams=hparams) else: model = cls(hparams=hparams) datamodule = None # run standard test suite _run_standard_hparams_test(tmp_path, model, cls, datamodule=datamodule) @pytest.mark.parametrize( "cls", [SaveHparamsModel, SaveHparamsDecoratedModel, SaveHparamsDataModule, SaveHparamsDecoratedDataModule] ) def test_dict_hparams(tmp_path, cls): hparams = {"test_arg": 14} if issubclass(cls, LightningDataModule): model = BoringModel() datamodule = cls(hparams=hparams) else: model = cls(hparams=hparams) datamodule = None # run standard test suite _run_standard_hparams_test(tmp_path, model, cls, datamodule=datamodule) @RunIf(omegaconf=True) @pytest.mark.parametrize( "cls", [SaveHparamsModel, SaveHparamsDecoratedModel, SaveHparamsDataModule, SaveHparamsDecoratedDataModule] ) def test_omega_conf_hparams(tmp_path, cls): conf = OmegaConf.create({"test_arg": 14, "mylist": [15.4, {"a": 1, "b": 2}]}) if issubclass(cls, LightningDataModule): model = BoringModel() obj = datamodule = cls(hparams=conf) else: obj = model = cls(hparams=conf) datamodule = None assert isinstance(obj.hparams, Container) # run standard test suite # weights_only=False as omegaconf.DictConfig is not an allowed global by default raw_checkpoint_path = _run_standard_hparams_test(tmp_path, model, cls, datamodule=datamodule, weights_only=False) obj2 = cls.load_from_checkpoint(raw_checkpoint_path, weights_only=False) assert isinstance(obj2.hparams, Container) # config specific tests assert obj2.hparams.test_arg == 14 assert obj2.hparams.mylist[0] == 15.4 def test_explicit_args_hparams(tmp_path): """Tests that a model can take implicit args and assign.""" # define model class LocalModel(BoringModel): def __init__(self, test_arg, test_arg2): super().__init__() self.save_hyperparameters("test_arg", "test_arg2") model = LocalModel(test_arg=14, test_arg2=90) # run standard test suite raw_checkpoint_path = _run_standard_hparams_test(tmp_path, model, LocalModel) model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=120) # config specific tests assert model.hparams.test_arg2 == 120 def test_implicit_args_hparams(tmp_path): """Tests that a model can take regular args and assign.""" # define model class LocalModel(BoringModel): def __init__(self, test_arg, test_arg2): super().__init__() self.save_hyperparameters() model = LocalModel(test_arg=14, test_arg2=90) # run standard test suite raw_checkpoint_path = _run_standard_hparams_test(tmp_path, model, LocalModel) model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=120) # config specific tests assert model.hparams.test_arg2 == 120 def test_explicit_missing_args_hparams(tmp_path): """Tests that a model can take regular args and assign.""" # define model class LocalModel(BoringModel): def __init__(self, test_arg, test_arg2): super().__init__() self.save_hyperparameters("test_arg") model = LocalModel(test_arg=14, test_arg2=90) # test proper property assignments assert model.hparams.test_arg == 14 # verify we can train trainer = Trainer(default_root_dir=tmp_path, max_epochs=2, overfit_batches=0.5) trainer.fit(model) # make sure the raw checkpoint saved the properties raw_checkpoint_path = _raw_checkpoint_path(trainer) raw_checkpoint = torch.load(raw_checkpoint_path, weights_only=True) assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]["test_arg"] == 14 # verify that model loads correctly model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=123) assert model.hparams.test_arg == 14 assert "test_arg2" not in model.hparams # test_arg2 is not registered in class init assert raw_checkpoint_path # ------------------------- # SPECIFIC TESTS # ------------------------- def test_class_nesting(): class MyModule(LightningModule): def forward(self): ... # make sure PL modules are always nn.Module a = MyModule() assert isinstance(a, torch.nn.Module) def test_outside(): a = MyModule() _ = a.hparams class A: def test(self): a = MyModule() _ = a.hparams def test2(self): test_outside() test_outside() A().test2() A().test()
SaveHparamsDecoratedDataModule
python
eventlet__eventlet
tests/websocket_test.py
{ "start": 757, "end": 20374 }
class ____(tests.wsgi_test._TestBase): TEST_TIMEOUT = 5 def set_site(self): self.site = wsapp def test_incorrect_headers(self): http = httplib.HTTPConnection(*self.server_addr) http.request("GET", "/echo") response = http.getresponse() assert response.status == 400 def test_incomplete_headers_75(self): headers = dict(kv.split(': ') for kv in [ "Upgrade: WebSocket", # NOTE: intentionally no connection header "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "WebSocket-Protocol: ws", ]) http = httplib.HTTPConnection(*self.server_addr) http.request("GET", "/echo", headers=headers) resp = http.getresponse() self.assertEqual(resp.status, 400) self.assertEqual(resp.getheader('connection'), 'close') self.assertEqual(resp.read(), b'') def test_incomplete_headers_76(self): # First test: Missing Connection: headers = dict(kv.split(': ') for kv in [ "Upgrade: WebSocket", # NOTE: intentionally no connection header "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", ]) http = httplib.HTTPConnection(*self.server_addr) http.request("GET", "/echo", headers=headers) resp = http.getresponse() self.assertEqual(resp.status, 400) self.assertEqual(resp.getheader('connection'), 'close') self.assertEqual(resp.read(), b'') # Now, miss off key2 headers = dict(kv.split(': ') for kv in [ "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", # NOTE: Intentionally no Key2 header ]) http = httplib.HTTPConnection(*self.server_addr) http.request("GET", "/echo", headers=headers) resp = http.getresponse() self.assertEqual(resp.status, 400) self.assertEqual(resp.getheader('connection'), 'close') self.assertEqual(resp.read(), b'') def test_correct_upgrade_request_75(self): connect = [ "GET /echo HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "WebSocket-Protocol: ws", ] sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n') result = sock.recv(1024) # The server responds the correct Websocket handshake self.assertEqual(result, '\r\n'.join([ 'HTTP/1.1 101 Web Socket Protocol Handshake', 'Upgrade: WebSocket', 'Connection: Upgrade', 'WebSocket-Origin: http://%s:%s' % self.server_addr, 'WebSocket-Location: ws://%s:%s/echo\r\n\r\n' % self.server_addr, ]).encode()) def test_correct_upgrade_request_76(self): connect = [ "GET /echo HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') result = sock.recv(1024) # The server responds the correct Websocket handshake self.assertEqual(result, '\r\n'.join([ 'HTTP/1.1 101 WebSocket Protocol Handshake', 'Upgrade: WebSocket', 'Connection: Upgrade', 'Sec-WebSocket-Origin: http://%s:%s' % self.server_addr, 'Sec-WebSocket-Protocol: ws', 'Sec-WebSocket-Location: ws://%s:%s/echo\r\n\r\n8jKS\'y:G*Co,Wxa-' % self.server_addr, ]).encode()) def test_query_string(self): # verify that the query string comes out the other side unscathed connect = [ "GET /echo?query_string HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') result = sock.recv(1024) self.assertEqual(result, '\r\n'.join([ 'HTTP/1.1 101 WebSocket Protocol Handshake', 'Upgrade: WebSocket', 'Connection: Upgrade', 'Sec-WebSocket-Origin: http://%s:%s' % self.server_addr, 'Sec-WebSocket-Protocol: ws', 'Sec-WebSocket-Location: ' 'ws://%s:%s/echo?query_string\r\n\r\n8jKS\'y:G*Co,Wxa-' % self.server_addr, ]).encode()) def test_empty_query_string(self): # verify that a single trailing ? doesn't get nuked connect = [ "GET /echo? HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') result = sock.recv(1024) self.assertEqual(result, '\r\n'.join([ 'HTTP/1.1 101 WebSocket Protocol Handshake', 'Upgrade: WebSocket', 'Connection: Upgrade', 'Sec-WebSocket-Origin: http://%s:%s' % self.server_addr, 'Sec-WebSocket-Protocol: ws', 'Sec-WebSocket-Location: ws://%s:%s/echo?\r\n\r\n8jKS\'y:G*Co,Wxa-' % self.server_addr, ]).encode()) def test_sending_messages_to_websocket_75(self): connect = [ "GET /echo HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "WebSocket-Protocol: ws", ] sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n') sock.recv(1024) sock.sendall(b'\x00hello\xFF') result = sock.recv(1024) self.assertEqual(result, b'\x00hello\xff') sock.sendall(b'\x00start') eventlet.sleep(0.001) sock.sendall(b' end\xff') result = sock.recv(1024) self.assertEqual(result, b'\x00start end\xff') sock.shutdown(socket.SHUT_RDWR) sock.close() eventlet.sleep(0.01) def test_sending_messages_to_websocket_76(self): connect = [ "GET /echo HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') sock.recv(1024) sock.sendall(b'\x00hello\xFF') result = sock.recv(1024) self.assertEqual(result, b'\x00hello\xff') sock.sendall(b'\x00start') eventlet.sleep(0.001) sock.sendall(b' end\xff') result = sock.recv(1024) self.assertEqual(result, b'\x00start end\xff') sock.shutdown(socket.SHUT_RDWR) sock.close() eventlet.sleep(0.01) def test_getting_messages_from_websocket_75(self): connect = [ "GET /range HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "WebSocket-Protocol: ws", ] sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n') resp = sock.recv(1024) headers, result = resp.split(b'\r\n\r\n') msgs = [result.strip(b'\x00\xff')] cnt = 10 while cnt: msgs.append(sock.recv(20).strip(b'\x00\xff')) cnt -= 1 # Last item in msgs is an empty string self.assertEqual(msgs[:-1], [('msg %d' % i).encode() for i in range(10)]) def test_getting_messages_from_websocket_76(self): connect = [ "GET /range HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') resp = sock.recv(1024) headers, result = resp.split(b'\r\n\r\n') msgs = [result[16:].strip(b'\x00\xff')] cnt = 10 while cnt: msgs.append(sock.recv(20).strip(b'\x00\xff')) cnt -= 1 # Last item in msgs is an empty string self.assertEqual(msgs[:-1], [('msg %d' % i).encode() for i in range(10)]) def test_breaking_the_connection_75(self): error_detected = [False] done_with_request = event.Event() site = self.site def error_detector(environ, start_response): try: try: return site(environ, start_response) except: error_detected[0] = True raise finally: done_with_request.send(True) self.site = error_detector self.spawn_server() connect = [ "GET /range HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "WebSocket-Protocol: ws", ] sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n') sock.recv(1024) # get the headers sock.close() # close while the app is running done_with_request.wait() assert not error_detected[0] def test_breaking_the_connection_76(self): error_detected = [False] done_with_request = event.Event() site = self.site def error_detector(environ, start_response): try: try: return site(environ, start_response) except: error_detected[0] = True raise finally: done_with_request.send(True) self.site = error_detector self.spawn_server() connect = [ "GET /range HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') sock.recv(1024) # get the headers sock.close() # close while the app is running done_with_request.wait() assert not error_detected[0] def test_client_closing_connection_76(self): error_detected = [False] done_with_request = event.Event() site = self.site def error_detector(environ, start_response): try: try: return site(environ, start_response) except: error_detected[0] = True raise finally: done_with_request.send(True) self.site = error_detector self.spawn_server() connect = [ "GET /echo HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') sock.recv(1024) # get the headers sock.sendall(b'\xff\x00') # "Close the connection" packet. done_with_request.wait() assert not error_detected[0] def test_client_invalid_packet_76(self): error_detected = [False] done_with_request = event.Event() site = self.site def error_detector(environ, start_response): try: try: return site(environ, start_response) except: error_detected[0] = True raise finally: done_with_request.send(True) self.site = error_detector self.spawn_server() connect = [ "GET /echo HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') sock.recv(1024) # get the headers sock.sendall(b'\xef\x00') # Weird packet. done_with_request.wait() assert error_detected[0] def test_server_closing_connect_76(self): connect = [ "GET / HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') resp = sock.recv(1024) headers, result = resp.split(b'\r\n\r\n') # The remote server should have immediately closed the connection. self.assertEqual(result[16:], b'\xff\x00') def test_app_socket_errors_75(self): error_detected = [False] done_with_request = event.Event() site = self.site def error_detector(environ, start_response): try: try: return site(environ, start_response) except: error_detected[0] = True raise finally: done_with_request.send(True) self.site = error_detector self.spawn_server() connect = [ "GET /error HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "WebSocket-Protocol: ws", ] sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n') sock.recv(1024) done_with_request.wait() assert error_detected[0] def test_app_socket_errors_76(self): error_detected = [False] done_with_request = event.Event() site = self.site def error_detector(environ, start_response): try: try: return site(environ, start_response) except: error_detected[0] = True raise finally: done_with_request.send(True) self.site = error_detector self.spawn_server() connect = [ "GET /error HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') sock.recv(1024) done_with_request.wait() assert error_detected[0] def test_close_idle(self): pool = eventlet.GreenPool() # use log=stderr when test runner can capture it self.spawn_server(custom_pool=pool, log=sys.stdout) connect = ( 'GET /echo HTTP/1.1', 'Upgrade: WebSocket', 'Connection: Upgrade', 'Host: %s:%s' % self.server_addr, 'Origin: http://%s:%s' % self.server_addr, 'Sec-WebSocket-Protocol: ws', 'Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5', 'Sec-WebSocket-Key2: 12998 5 Y3 1 .P00', ) sock = eventlet.connect(self.server_addr) sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') sock.recv(1024) sock.sendall(b'\x00hello\xff') result = sock.recv(1024) assert result, b'\x00hello\xff' self.killer.kill(KeyboardInterrupt) with eventlet.Timeout(1): pool.waitall() def test_wrapped_wsgi(self): site = self.site def wrapper(environ, start_response): yield from site(environ, start_response) self.site = wrapper self.spawn_server() connect = [ "GET /range HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: {}:{}".format(*self.server_addr), "Origin: http://{}:{}".format(*self.server_addr), "WebSocket-Protocol: ws", ] sock = eventlet.connect(self.server_addr) sock.sendall("\r\n".join(connect).encode() + b"\r\n\r\n") resp = sock.recv(1024) headers, result = resp.split(b"\r\n\r\n") msgs = [result.strip(b"\x00\xff")] msgs.extend(sock.recv(20).strip(b"\x00\xff") for _ in range(10)) expect = ["msg {}".format(i).encode() for i in range(10)] + [b""] assert msgs == expect # In case of server error, server will write HTTP 500 response to the socket msg = sock.recv(20) assert not msg sock.close() eventlet.sleep(0.01)
TestWebSocket
python
geekcomputers__Python
inheritance_YahV1729.py
{ "start": 221, "end": 525 }
class ____(object): # Constructor def __init__(self, name): self.name = name # To get name def getName(self): return self.name # To check if this person is employee def isEmployee(self): return False # Inherited or Sub class (Note Person in bracket)
Person
python
walkccc__LeetCode
solutions/29. Divide Two Integers/29.py
{ "start": 0, "end": 458 }
class ____: def divide(self, dividend: int, divisor: int) -> int: # -2^{31} / -1 = 2^31 will overflow, so return 2^31 - 1. if dividend == -2**31 and divisor == -1: return 2**31 - 1 sign = -1 if (dividend > 0) ^ (divisor > 0) else 1 ans = 0 dvd = abs(dividend) dvs = abs(divisor) while dvd >= dvs: k = 1 while k * 2 * dvs <= dvd: k <<= 1 dvd -= k * dvs ans += k return sign * ans
Solution
python
joke2k__faker
tests/mymodule/en_US/__init__.py
{ "start": 43, "end": 113 }
class ____(BaseProvider): def foo(self): return "bar"
Provider
python
django__django
tests/expressions/models.py
{ "start": 741, "end": 1295 }
class ____(models.Model): name = models.CharField(max_length=100) num_employees = models.PositiveIntegerField() num_chairs = models.PositiveIntegerField() ceo = models.ForeignKey( Employee, models.CASCADE, related_name="company_ceo_set", ) point_of_contact = models.ForeignKey( Employee, models.SET_NULL, related_name="company_point_of_contact_set", null=True, ) based_in_eu = models.BooleanField(default=False) def __str__(self): return self.name
Company
python
pytorch__pytorch
benchmarks/gpt_fast/mixtral_moe_quantize.py
{ "start": 2490, "end": 4446 }
class ____: def __init__(self, mod): self.mod = mod @torch.no_grad() def create_quantized_state_dict(self): cur_state_dict = self.mod.state_dict() for fqn, mod in self.mod.named_modules(): if isinstance(mod, torch.nn.Linear) and not fqn.endswith(".gate"): int8_weight, scales, _ = dynamically_quantize_per_channel( mod.weight.float(), -128, 127, torch.int8 ) cur_state_dict[f"{fqn}.weight"] = int8_weight cur_state_dict[f"{fqn}.scales"] = scales.to(mod.weight.dtype) elif isinstance(mod, ConditionalFeedForward): for weight_idx in range(3): weight_name = f"w{weight_idx + 1}" scales_name = f"scales{weight_idx + 1}" weight = getattr(mod, weight_name) num_experts, intermediate_size, dim = weight.shape bit8_weight_list = [] scales_list = [] for expert_idx in range(num_experts): bit8_weight, scales, _ = dynamically_quantize_per_channel( weight[expert_idx].float(), -128, 127, torch.int8 ) bit8_weight_list.append( bit8_weight.reshape(1, intermediate_size, dim) ) scales_list.append(scales.reshape(1, intermediate_size)) cur_state_dict[f"{fqn}.{weight_name}"] = torch.cat( bit8_weight_list, dim=0 ) cur_state_dict[f"{fqn}.{scales_name}"] = torch.cat( scales_list, dim=0 ) return cur_state_dict def convert_for_runtime(self): replace_linear_weight_only_int8_per_channel(self.mod) return self.mod
WeightOnlyInt8QuantHandler
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 124317, "end": 125020 }
class ____(str, Enum): """ Methods for transferring a shard from one node to another. - `stream_records` - Stream all shard records in batches until the whole shard is transferred. - `snapshot` - Snapshot the shard, transfer and restore it on the receiver. - `wal_delta` - Attempt to transfer shard difference by WAL delta. - `resharding_stream_records` - Shard transfer for resharding: stream all records in batches until all points are transferred. """ def __str__(self) -> str: return str(self.value) STREAM_RECORDS = "stream_records" SNAPSHOT = "snapshot" WAL_DELTA = "wal_delta" RESHARDING_STREAM_RECORDS = "resharding_stream_records"
ShardTransferMethod
python
numba__numba
numba/tests/test_sets.py
{ "start": 18248, "end": 20450 }
class ____(BaseTest): """ Test unboxing of Python sets into native Numba sets. """ @contextlib.contextmanager def assert_type_error(self, msg): with self.assertRaises(TypeError) as raises: yield if msg is not None: self.assertRegex(str(raises.exception), msg) def check_unary(self, pyfunc): cfunc = jit(nopython=True)(pyfunc) def check(arg): expected = pyfunc(arg) got = cfunc(arg) self.assertPreciseEqual(got, expected) return check def test_numbers(self): check = self.check_unary(unbox_usecase) check(set([1, 2])) check(set([1j, 2.5j])) # Check allocation and sizing check(set(range(100))) def test_tuples(self): check = self.check_unary(unbox_usecase2) check(set([(1, 2), (3, 4)])) check(set([(1, 2j), (3, 4j)])) def test_set_inside_tuple(self): check = self.check_unary(unbox_usecase3) check((1, set([2, 3, 4]))) def test_set_of_tuples_inside_tuple(self): check = self.check_unary(unbox_usecase4) check((1, set([(2,), (3,)]))) def test_errors(self): # Error checking should ensure the set is homogeneous msg = "can't unbox heterogeneous set" pyfunc = noop cfunc = jit(nopython=True)(pyfunc) val = set([1, 2.5]) with self.assert_type_error(msg): cfunc(val) # The set hasn't been changed (bogus reflecting) self.assertEqual(val, set([1, 2.5])) with self.assert_type_error(msg): cfunc(set([1, 2j])) # Same when the set is nested in a tuple or namedtuple with self.assert_type_error(msg): cfunc((1, set([1, 2j]))) with self.assert_type_error(msg): cfunc(Point(1, set([1, 2j]))) # Tuples of different size. # Note the check is really on the tuple side. lst = set([(1,), (2, 3)]) # Depending on which tuple is examined first, we could get # a IndexError or a ValueError. with self.assertRaises((IndexError, ValueError)) as raises: cfunc(lst)
TestUnboxing
python
vyperlang__vyper
vyper/ast/nodes.py
{ "start": 38156, "end": 38483 }
class ____(ExprNode): __slots__ = ("value",) def validate(self): if not isinstance(self.value, Call): raise StructureException( "`staticcall` must be followed by a function call", self.value, hint="did you forget parentheses?", )
StaticCall
python
pypa__setuptools
setuptools/command/editable_wheel.py
{ "start": 30377, "end": 31465 }
class ____(namespaces.Installer): def __init__(self, distribution, installation_dir, editable_name, src_root) -> None: self.distribution = distribution self.src_root = src_root self.installation_dir = installation_dir self.editable_name = editable_name self.outputs: list[str] = [] self.dry_run = False def _get_nspkg_file(self): """Installation target.""" return os.path.join(self.installation_dir, self.editable_name + self.nspkg_ext) def _get_root(self): """Where the modules/packages should be loaded from.""" return repr(str(self.src_root)) _FINDER_TEMPLATE = """\ from __future__ import annotations import sys from importlib.machinery import ModuleSpec, PathFinder from importlib.machinery import all_suffixes as module_suffixes from importlib.util import spec_from_file_location from itertools import chain from pathlib import Path MAPPING: dict[str, str] = {mapping!r} NAMESPACES: dict[str, list[str]] = {namespaces!r} PATH_PLACEHOLDER = {name!r} + ".__path_hook__"
_NamespaceInstaller
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_set.py
{ "start": 50508, "end": 50756 }
class ____(_TestSubsets, __TestCase): left = set() right = set([1, 2]) name = "one empty, one non-empty" cases = "!=", "<", "<=" #------------------------------------------------------------------------------
TestSubsetEmptyNonEmpty
python
google__jax
jax/experimental/sparse/transform.py
{ "start": 6134, "end": 8829 }
class ____(NamedTuple): shape: tuple[int, ...] data_ref: int | None indices_ref: int | None = None indptr_ref: int | None = None indices_sorted: bool | None = False unique_indices: bool | None = False @property def ndim(self): return len(self.shape) def is_sparse(self): return self.indices_ref is not None def is_dense(self): return self.indices_ref is None def is_bcoo(self): return self.is_sparse() and self.indptr_ref is None def is_bcsr(self): return self.is_sparse() and self.indptr_ref is not None _is_sparse_obj = lambda arg: isinstance(arg, (BCOO, BCSR)) _is_spvalue = lambda arg: isinstance(arg, SparsifyValue) def arrays_to_spvalues( spenv: SparsifyEnv, args: Any ) -> Any: """Convert a pytree of (sparse) arrays to an equivalent pytree of spvalues.""" def array_to_spvalue(arg): if isinstance(arg, BCOO): return spenv.sparse(arg.shape, arg.data, arg.indices, indices_sorted=arg.indices_sorted, unique_indices=arg.unique_indices) elif isinstance(arg, BCSR): return spenv.sparse(arg.shape, arg.data, arg.indices, arg.indptr, indices_sorted=arg.indices_sorted, unique_indices=arg.unique_indices) else: return spenv.dense(arg) return tree_map(array_to_spvalue, args, is_leaf=_is_sparse_obj) def spvalues_to_arrays( spenv: SparsifyEnv, spvalues: Any, ) -> Any: """Convert a pytree of spvalues to an equivalent pytree of (sparse) arrays.""" def spvalue_to_array(spvalue): if spvalue.is_bcoo(): return BCOO((spenv.data(spvalue), spenv.indices(spvalue)), shape=spvalue.shape, indices_sorted=spvalue.indices_sorted, unique_indices=spvalue.unique_indices) elif spvalue.is_bcsr(): return BCSR((spenv.data(spvalue), spenv.indices(spvalue), spenv.indptr(spvalue)), shape=spvalue.shape, indices_sorted=spvalue.indices_sorted, unique_indices=spvalue.unique_indices) else: return spenv.data(spvalue) return tree_map(spvalue_to_array, spvalues, is_leaf=_is_spvalue) def spvalues_to_avals( spenv: SparsifyEnv, spvalues: Any, ) -> Any: """Convert a pytree of spvalues to an equivalent pytree of abstract values.""" def spvalue_to_aval(spvalue): data = spenv.data(spvalue) return core.ShapedArray(spvalue.shape, data.dtype, data.aval.weak_type) return tree_map(spvalue_to_aval, spvalues, is_leaf=_is_spvalue) # ------------------------------------------------------------------------------ # Implementation of sparsify() using tracers.
SparsifyValue
python
ray-project__ray
python/ray/data/_internal/actor_autoscaler/default_actor_autoscaler.py
{ "start": 651, "end": 9448 }
class ____(ActorAutoscaler): def __init__( self, topology: "Topology", resource_manager: "ResourceManager", *, config: AutoscalingConfig, ): super().__init__(topology, resource_manager) self._actor_pool_scaling_up_threshold = ( config.actor_pool_util_upscaling_threshold ) self._actor_pool_scaling_down_threshold = ( config.actor_pool_util_downscaling_threshold ) self._actor_pool_max_upscaling_delta = config.actor_pool_max_upscaling_delta self._validate_autoscaling_config() def try_trigger_scaling(self): for op, state in self._topology.items(): actor_pools = op.get_autoscaling_actor_pools() for actor_pool in actor_pools: # Trigger auto-scaling actor_pool.scale( self._derive_target_scaling_config(actor_pool, op, state) ) def _derive_target_scaling_config( self, actor_pool: "AutoscalingActorPool", op: "PhysicalOperator", op_state: "OpState", ) -> ActorPoolScalingRequest: # If all inputs have been consumed, short-circuit if op.completed() or ( op._inputs_complete and op_state.total_enqueued_input_blocks() == 0 ): return ActorPoolScalingRequest.downscale( delta=-1, force=True, reason="consumed all inputs" ) if actor_pool.current_size() < actor_pool.min_size(): # Scale up, if the actor pool is below min size. return ActorPoolScalingRequest.upscale( delta=actor_pool.min_size() - actor_pool.current_size(), reason="pool below min size", ) elif actor_pool.current_size() > actor_pool.max_size(): # Do not scale up, if the actor pool is already at max size. return ActorPoolScalingRequest.downscale( # NOTE: For scale down delta has to be negative delta=-(actor_pool.current_size() - actor_pool.max_size()), reason="pool exceeding max size", ) # Determine whether to scale up based on the actor pool utilization. util = actor_pool.get_pool_util() if util >= self._actor_pool_scaling_up_threshold: # Do not scale up if either # - Previous scale up has not finished yet # - Actor Pool is at max size already # - Op is throttled (ie exceeding allocated resource quota) # - Actor Pool has sufficient amount of slots available to handle # pending tasks if actor_pool.num_pending_actors() > 0: return ActorPoolScalingRequest.no_op(reason="pending actors") elif actor_pool.current_size() >= actor_pool.max_size(): return ActorPoolScalingRequest.no_op(reason="reached max size") if not op_state._scheduling_status.under_resource_limits: return ActorPoolScalingRequest.no_op( reason="operator exceeding resource quota" ) budget = self._resource_manager.get_budget(op) max_scale_up = _get_max_scale_up(actor_pool, budget) if max_scale_up == 0: return ActorPoolScalingRequest.no_op(reason="exceeded resource limits") # Calculate desired delta based on utilization plan_delta = math.ceil( actor_pool.current_size() * (util / self._actor_pool_scaling_up_threshold - 1) ) upscale_capacities = self._get_upscale_capacities(actor_pool, max_scale_up) delta = min( plan_delta, *upscale_capacities, ) delta = max(1, delta) # At least scale up by 1 return ActorPoolScalingRequest.upscale( delta=delta, reason=( f"utilization of {util} >= " f"{self._actor_pool_scaling_up_threshold}" ), ) elif util <= self._actor_pool_scaling_down_threshold: if actor_pool.current_size() <= actor_pool.min_size(): return ActorPoolScalingRequest.no_op(reason="reached min size") return ActorPoolScalingRequest.downscale( delta=-1, reason=( f"utilization of {util} <= " f"{self._actor_pool_scaling_down_threshold}" ), ) else: return ActorPoolScalingRequest.no_op( reason=( f"utilization of {util} w/in limits " f"[{self._actor_pool_scaling_down_threshold}, " f"{self._actor_pool_scaling_up_threshold}]" ) ) def _validate_autoscaling_config(self): # Validate that max upscaling delta is positive to prevent override by safeguard if self._actor_pool_max_upscaling_delta <= 0: raise ValueError( f"actor_pool_max_upscaling_delta must be positive, " f"got {self._actor_pool_max_upscaling_delta}" ) # Validate that upscaling threshold is positive to prevent division by zero # and incorrect scaling calculations if self._actor_pool_scaling_up_threshold <= 0: raise ValueError( f"actor_pool_util_upscaling_threshold must be positive, " f"got {self._actor_pool_scaling_up_threshold}" ) for op, state in self._topology.items(): for actor_pool in op.get_autoscaling_actor_pools(): self._validate_actor_pool_autoscaling_config(actor_pool, op) def _get_upscale_capacities( self, actor_pool: "AutoscalingActorPool", max_scale_up: Optional[int], ): limits = [] if max_scale_up is not None: limits.append(max_scale_up) limits.append(self._actor_pool_max_upscaling_delta) limits.append(actor_pool.max_size() - actor_pool.current_size()) return limits def _validate_actor_pool_autoscaling_config( self, actor_pool: "AutoscalingActorPool", op: "PhysicalOperator", ) -> None: """Validate autoscaling configuration. Args: actor_pool: Actor pool to validate configuration thereof. op: ``PhysicalOperator`` using target actor pool. """ max_tasks_in_flight_per_actor = actor_pool.max_tasks_in_flight_per_actor() max_concurrency = actor_pool.max_actor_concurrency() if ( max_tasks_in_flight_per_actor / max_concurrency < self._actor_pool_scaling_up_threshold ): logger.warning( f"{WARN_PREFIX} Actor Pool configuration of the {op} will not allow it to scale up: " f"configured utilization threshold ({self._actor_pool_scaling_up_threshold * 100}%) " f"couldn't be reached with configured max_concurrency={max_concurrency} " f"and max_tasks_in_flight_per_actor={max_tasks_in_flight_per_actor} " f"(max utilization will be max_tasks_in_flight_per_actor / max_concurrency = {(max_tasks_in_flight_per_actor / max_concurrency) * 100:g}%)" ) def _get_max_scale_up( actor_pool: AutoscalingActorPool, budget: Optional[ExecutionResources], ) -> Optional[int]: """Get the maximum number of actors that can be scaled up. Args: actor_pool: The actor pool to scale up. budget: The budget to scale up. Returns: The maximum number of actors that can be scaled up, or `None` if you can scale up infinitely. """ if budget is None: return None assert budget.cpu >= 0 and budget.gpu >= 0 num_cpus_per_actor = actor_pool.per_actor_resource_usage().cpu num_gpus_per_actor = actor_pool.per_actor_resource_usage().gpu assert num_cpus_per_actor >= 0 and num_gpus_per_actor >= 0 max_cpu_scale_up: float = float("inf") if num_cpus_per_actor > 0 and not math.isinf(budget.cpu): max_cpu_scale_up = budget.cpu // num_cpus_per_actor max_gpu_scale_up: float = float("inf") if num_gpus_per_actor > 0 and not math.isinf(budget.gpu): max_gpu_scale_up = budget.gpu // num_gpus_per_actor max_scale_up = min(max_cpu_scale_up, max_gpu_scale_up) if math.isinf(max_scale_up): return None else: assert not math.isnan(max_scale_up), ( budget, num_cpus_per_actor, num_gpus_per_actor, ) return int(max_scale_up)
DefaultActorAutoscaler
python
pikepdf__pikepdf
tests/test_object.py
{ "start": 10316, "end": 14058 }
class ____: def test_contains(self): d = Dictionary({'/Monty': 'Python', '/Flying': 'Circus'}) assert Name.Flying in d assert Name('/Monty') in d assert Name.Brian not in d def test_none(self): d = pikepdf.Dictionary({'/One': 1, '/Two': 2}) with pytest.raises(ValueError): d['/Two'] = None def test_init(self): d1 = pikepdf.Dictionary({'/Animal': 'Dog'}) d2 = pikepdf.Dictionary(Animal='Dog') assert d1 == d2 def test_kwargs(self): d = pikepdf.Dictionary(A='a', B='b', C='c') assert '/B' in d assert 'B' in dir(d) def test_iter(self): d = pikepdf.Dictionary(A='a') for k in d: assert k == '/A' assert d[k] == 'a' def test_items(self): d = pikepdf.Dictionary(A='a') for _k in d.items(): pass def test_str(self): d = pikepdf.Dictionary(ABCD='abcd') assert 'ABCD' in str(d) def test_attr(self): d = pikepdf.Dictionary(A='a') with pytest.raises(AttributeError): d.invalidname # pylint: disable=pointless-statement def test_get(self): d = pikepdf.Dictionary(A='a') assert d.get(Name.A) == 'a' assert d.get(Name.Resources, 42) == 42 def test_bad_name_init(self): with pytest.raises(KeyError, match=r"must begin with '/'"): pikepdf.Dictionary({'/Slash': 'dot', 'unslash': 'error'}) with pytest.raises(KeyError, match=r"must begin with '/'"): pikepdf.Dictionary({'/': 'slash'}) def test_bad_name_set(self): d = pikepdf.Dictionary() d['/Slash'] = 'dot' with pytest.raises(KeyError, match=r"must begin with '/'"): d['unslash'] = 'error' with pytest.raises(KeyError, match=r"may not be '/'"): d['/'] = 'error' def test_del_missing_key(self): d = pikepdf.Dictionary(A='a') with pytest.raises(KeyError): del d.B def test_int_access(self): d = pikepdf.Dictionary() with pytest.raises(TypeError, match="not an array"): d[0] = 3 def test_wrong_contains_type(self): d = pikepdf.Dictionary() with pytest.raises(TypeError, match="can only contain Names"): assert pikepdf.Array([3]) not in d def test_dict_bad_params(self): with pytest.raises(ValueError): Dictionary({'/Foo': 1}, Bar=2) def test_dict_of_dict(self): d = Dictionary(One=1, Two=2) d2 = Dictionary(d) assert d == d2 assert d is not d2 def test_dict_bool(self): assert bool(pikepdf.Dictionary()) is False assert bool(pikepdf.Dictionary(One=1)) is True def test_not_convertible(): class PurePythonObj: def __repr__(self): return 'PurePythonObj()' c = PurePythonObj() with pytest.raises(RuntimeError): encode(c) with pytest.raises(RuntimeError): pikepdf.Array([1, 2, c]) d = pikepdf.Dictionary() with pytest.raises(RuntimeError): d.SomeKey = c assert d != c def test_json(): d = Dictionary( { '/Boolean': True, '/Integer': 42, '/Real': Decimal('42.42'), '/String': String('hi'), '/Array': Array([1, 2, 3.14]), '/Dictionary': Dictionary({'/Color': 'Red'}), } ) json_bytes = d.to_json(False) as_dict = json.loads(json_bytes) assert as_dict == { "/Array": [1, 2, 3.14], "/Boolean": True, "/Dictionary": {"/Color": "u:Red"}, "/Integer": 42, "/Real": 42.42, "/String": "u:hi", }
TestDictionary
python
ray-project__ray
python/ray/autoscaler/v2/schema.py
{ "start": 5601, "end": 6960 }
class ____: # Healthy nodes information (non-idle) active_nodes: List[NodeInfo] = field(default_factory=list) # Idle node information idle_nodes: List[NodeInfo] = field(default_factory=list) # Pending launches. pending_launches: List[LaunchRequest] = field(default_factory=list) # Failed launches. failed_launches: List[LaunchRequest] = field(default_factory=list) # Pending nodes. pending_nodes: List[NodeInfo] = field(default_factory=list) # Failures failed_nodes: List[NodeInfo] = field(default_factory=list) # Resource usage summary for entire cluster. cluster_resource_usage: List[ResourceUsage] = field(default_factory=list) # Demand summary. resource_demands: ResourceDemandSummary = field( default_factory=ResourceDemandSummary ) # Query metics stats: Stats = field(default_factory=Stats) def total_resources(self) -> Dict[str, float]: return {r.resource_name: r.total for r in self.cluster_resource_usage} def available_resources(self) -> Dict[str, float]: return {r.resource_name: r.total - r.used for r in self.cluster_resource_usage} # TODO(rickyx): we don't show infeasible requests as of now. # (They will just be pending forever as part of the demands) # We should show them properly in the future. @dataclass
ClusterStatus
python
python-pillow__Pillow
src/PIL/ExifTags.py
{ "start": 9305, "end": 9461 }
class ____(IntEnum): Exif = 0x8769 GPSInfo = 0x8825 MakerNote = 0x927C Makernote = 0x927C # Deprecated Interop = 0xA005 IFD1 = -1
IFD
python
ray-project__ray
python/ray/_common/tests/test_signature.py
{ "start": 14128, "end": 16658 }
class ____: """Integration tests for signature utilities working together.""" def test_complete_workflow(self): """Test complete workflow from function to flatten/recover.""" def test_func(x: int, y: str = "default", z: Optional[Any] = None): return f"{x}_{y}_{z}" # Extract signature params = extract_signature(test_func) assert len(params) == 3 # Validate arguments args = (42, "hello") kwargs = {"z": [1, 2, 3]} validate_args(params, args, kwargs) # Flatten arguments flattened = flatten_args(params, args, kwargs) expected = [DUMMY_TYPE, 42, DUMMY_TYPE, "hello", "z", [1, 2, 3]] assert flattened == expected # Recover arguments recovered_args, recovered_kwargs = recover_args(flattened) assert recovered_args == list(args) assert recovered_kwargs == kwargs def test_method_workflow_with_ignore_first(self): """Test complete workflow for class methods with ignore_first=True.""" class TestClass: def test_method(self, a: int, b: str = "test"): return f"{a}_{b}" # Extract signature ignoring 'self' params = extract_signature(TestClass.test_method, ignore_first=True) assert len(params) == 2 assert params[0].name == "a" assert params[1].name == "b" # Validate and flatten args = (100,) kwargs = {"b": "custom"} validate_args(params, args, kwargs) flattened = flatten_args(params, args, kwargs) # Recover and verify recovered_args, recovered_kwargs = recover_args(flattened) assert recovered_args == list(args) assert recovered_kwargs == kwargs def test_varargs_kwargs_workflow(self): """Test workflow with functions that have *args and **kwargs.""" def test_func(a, b=10, *args, **kwargs): return a + b + sum(args) + sum(kwargs.values()) params = extract_signature(test_func) # Test with extra positional and keyword arguments args = (1, 2, 3, 4, 5) kwargs = {"extra1": 10, "extra2": 20} validate_args(params, args, kwargs) flattened = flatten_args(params, args, kwargs) recovered_args, recovered_kwargs = recover_args(flattened) assert recovered_args == list(args) assert recovered_kwargs == kwargs if __name__ == "__main__": sys.exit(pytest.main(["-v", "-s", __file__]))
TestIntegration
python
huggingface__transformers
src/transformers/models/sew_d/modeling_sew_d.py
{ "start": 17616, "end": 18363 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size) self.dropout = StableDropout(config.pooler_dropout) self.config = config def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. context_token = hidden_states[:, 0] context_token = self.dropout(context_token) pooled_output = self.dense(context_token) pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output) return pooled_output @property def output_dim(self): return self.config.hidden_size
ContextPooler
python
walkccc__LeetCode
solutions/174. Dungeon Game/174.py
{ "start": 0, "end": 345 }
class ____: def calculateMinimumHP(self, dungeon: list[list[int]]) -> int: m = len(dungeon) n = len(dungeon[0]) dp = [math.inf] * (n + 1) dp[n - 1] = 1 for i in reversed(range(m)): for j in reversed(range(n)): dp[j] = min(dp[j], dp[j + 1]) - dungeon[i][j] dp[j] = max(dp[j], 1) return dp[0]
Solution
python
tiangolo__fastapi
scripts/docs.py
{ "start": 1276, "end": 17690 }
class ____(HTMLParser): """Extract visible text from a string with HTML tags.""" def __init__(self): super().__init__() self.text_parts = [] def handle_data(self, data): self.text_parts.append(data) def extract_visible_text(self, html: str) -> str: self.reset() self.text_parts = [] self.feed(html) return "".join(self.text_parts).strip() def slugify(text: str) -> str: return py_slugify( text, replacements=[ ("`", ""), # `dict`s -> dicts ("'s", "s"), # it's -> its ("'t", "t"), # don't -> dont ("**", ""), # **FastAPI**s -> FastAPIs ], ) def get_en_config() -> Dict[str, Any]: return mkdocs.utils.yaml_load(en_config_path.read_text(encoding="utf-8")) def get_lang_paths() -> List[Path]: return sorted(docs_path.iterdir()) def lang_callback(lang: Optional[str]) -> Union[str, None]: if lang is None: return None lang = lang.lower() return lang def complete_existing_lang(incomplete: str): lang_path: Path for lang_path in get_lang_paths(): if lang_path.is_dir() and lang_path.name.startswith(incomplete): yield lang_path.name @app.callback() def callback() -> None: # For MacOS with Cairo os.environ["DYLD_FALLBACK_LIBRARY_PATH"] = "/opt/homebrew/lib" @app.command() def new_lang(lang: str = typer.Argument(..., callback=lang_callback)): """ Generate a new docs translation directory for the language LANG. """ new_path: Path = Path("docs") / lang if new_path.exists(): typer.echo(f"The language was already created: {lang}") raise typer.Abort() new_path.mkdir() new_config_path: Path = Path(new_path) / mkdocs_name new_config_path.write_text("INHERIT: ../en/mkdocs.yml\n", encoding="utf-8") new_config_docs_path: Path = new_path / "docs" new_config_docs_path.mkdir() en_index_path: Path = en_docs_path / "docs" / "index.md" new_index_path: Path = new_config_docs_path / "index.md" en_index_content = en_index_path.read_text(encoding="utf-8") new_index_content = f"{missing_translation_snippet}\n\n{en_index_content}" new_index_path.write_text(new_index_content, encoding="utf-8") typer.secho(f"Successfully initialized: {new_path}", color=typer.colors.GREEN) update_languages() @app.command() def build_lang( lang: str = typer.Argument( ..., callback=lang_callback, autocompletion=complete_existing_lang ), ) -> None: """ Build the docs for a language. """ lang_path: Path = Path("docs") / lang if not lang_path.is_dir(): typer.echo(f"The language translation doesn't seem to exist yet: {lang}") raise typer.Abort() typer.echo(f"Building docs for: {lang}") build_site_dist_path = build_site_path / lang if lang == "en": dist_path = site_path # Don't remove en dist_path as it might already contain other languages. # When running build_all(), that function already removes site_path. # All this is only relevant locally, on GitHub Actions all this is done through # artifacts and multiple workflows, so it doesn't matter if directories are # removed or not. else: dist_path = site_path / lang shutil.rmtree(dist_path, ignore_errors=True) current_dir = os.getcwd() os.chdir(lang_path) shutil.rmtree(build_site_dist_path, ignore_errors=True) subprocess.run(["mkdocs", "build", "--site-dir", build_site_dist_path], check=True) shutil.copytree(build_site_dist_path, dist_path, dirs_exist_ok=True) os.chdir(current_dir) typer.secho(f"Successfully built docs for: {lang}", color=typer.colors.GREEN) index_sponsors_template = """ ### Keystone Sponsor {% for sponsor in sponsors.keystone -%} <a href="{{ sponsor.url }}" target="_blank" title="{{ sponsor.title }}"><img src="{{ sponsor.img }}"></a> {% endfor %} ### Gold and Silver Sponsors {% for sponsor in sponsors.gold -%} <a href="{{ sponsor.url }}" target="_blank" title="{{ sponsor.title }}"><img src="{{ sponsor.img }}"></a> {% endfor -%} {%- for sponsor in sponsors.silver -%} <a href="{{ sponsor.url }}" target="_blank" title="{{ sponsor.title }}"><img src="{{ sponsor.img }}"></a> {% endfor %} """ def remove_header_permalinks(content: str): lines: list[str] = [] for line in content.split("\n"): match = header_with_permalink_pattern.match(line) if match: hashes, title, *_ = match.groups() line = f"{hashes} {title}" lines.append(line) return "\n".join(lines) def generate_readme_content() -> str: en_index = en_docs_path / "docs" / "index.md" content = en_index.read_text("utf-8") content = remove_header_permalinks(content) # remove permalinks from headers match_pre = re.search(r"</style>\n\n", content) match_start = re.search(r"<!-- sponsors -->", content) match_end = re.search(r"<!-- /sponsors -->", content) sponsors_data_path = en_docs_path / "data" / "sponsors.yml" sponsors = mkdocs.utils.yaml_load(sponsors_data_path.read_text(encoding="utf-8")) if not (match_start and match_end): raise RuntimeError("Couldn't auto-generate sponsors section") if not match_pre: raise RuntimeError("Couldn't find pre section (<style>) in index.md") frontmatter_end = match_pre.end() pre_end = match_start.end() post_start = match_end.start() template = Template(index_sponsors_template) message = template.render(sponsors=sponsors) pre_content = content[frontmatter_end:pre_end] post_content = content[post_start:] new_content = pre_content + message + post_content # Remove content between <!-- only-mkdocs --> and <!-- /only-mkdocs --> new_content = re.sub( r"<!-- only-mkdocs -->.*?<!-- /only-mkdocs -->", "", new_content, flags=re.DOTALL, ) return new_content @app.command() def generate_readme() -> None: """ Generate README.md content from main index.md """ typer.echo("Generating README") readme_path = Path("README.md") new_content = generate_readme_content() readme_path.write_text(new_content, encoding="utf-8") @app.command() def verify_readme() -> None: """ Verify README.md content from main index.md """ typer.echo("Verifying README") readme_path = Path("README.md") generated_content = generate_readme_content() readme_content = readme_path.read_text("utf-8") if generated_content != readme_content: typer.secho( "README.md outdated from the latest index.md", color=typer.colors.RED ) raise typer.Abort() typer.echo("Valid README ✅") @app.command() def build_all() -> None: """ Build mkdocs site for en, and then build each language inside, end result is located at directory ./site/ with each language inside. """ update_languages() shutil.rmtree(site_path, ignore_errors=True) langs = [lang.name for lang in get_lang_paths() if lang.is_dir()] cpu_count = os.cpu_count() or 1 process_pool_size = cpu_count * 4 typer.echo(f"Using process pool size: {process_pool_size}") with Pool(process_pool_size) as p: p.map(build_lang, langs) @app.command() def update_languages() -> None: """ Update the mkdocs.yml file Languages section including all the available languages. """ update_config() @app.command() def serve() -> None: """ A quick server to preview a built site with translations. For development, prefer the command live (or just mkdocs serve). This is here only to preview a site with translations already built. Make sure you run the build-all command first. """ typer.echo("Warning: this is a very simple server.") typer.echo("For development, use the command live instead.") typer.echo("This is here only to preview a site with translations already built.") typer.echo("Make sure you run the build-all command first.") os.chdir("site") server_address = ("", 8008) server = HTTPServer(server_address, SimpleHTTPRequestHandler) typer.echo("Serving at: http://127.0.0.1:8008") server.serve_forever() @app.command() def live( lang: str = typer.Argument( None, callback=lang_callback, autocompletion=complete_existing_lang ), dirty: bool = False, ) -> None: """ Serve with livereload a docs site for a specific language. This only shows the actual translated files, not the placeholders created with build-all. Takes an optional LANG argument with the name of the language to serve, by default en. """ # Enable line numbers during local development to make it easier to highlight if lang is None: lang = "en" lang_path: Path = docs_path / lang # Enable line numbers during local development to make it easier to highlight args = ["mkdocs", "serve", "--dev-addr", "127.0.0.1:8008"] if dirty: args.append("--dirty") subprocess.run( args, env={**os.environ, "LINENUMS": "true"}, cwd=lang_path, check=True ) def get_updated_config_content() -> Dict[str, Any]: config = get_en_config() languages = [{"en": "/"}] new_alternate: List[Dict[str, str]] = [] # Language names sourced from https://quickref.me/iso-639-1 # Contributors may wish to update or change these, e.g. to fix capitalization. language_names_path = Path(__file__).parent / "../docs/language_names.yml" local_language_names: Dict[str, str] = mkdocs.utils.yaml_load( language_names_path.read_text(encoding="utf-8") ) for lang_path in get_lang_paths(): if lang_path.name in {"en", "em"} or not lang_path.is_dir(): continue code = lang_path.name languages.append({code: f"/{code}/"}) for lang_dict in languages: code = list(lang_dict.keys())[0] url = lang_dict[code] if code not in local_language_names: print( f"Missing language name for: {code}, " "update it in docs/language_names.yml" ) raise typer.Abort() use_name = f"{code} - {local_language_names[code]}" new_alternate.append({"link": url, "name": use_name}) new_alternate.append({"link": "/em/", "name": "😉"}) config["extra"]["alternate"] = new_alternate return config def update_config() -> None: config = get_updated_config_content() en_config_path.write_text( yaml.dump(config, sort_keys=False, width=200, allow_unicode=True), encoding="utf-8", ) @app.command() def verify_config() -> None: """ Verify main mkdocs.yml content to make sure it uses the latest language names. """ typer.echo("Verifying mkdocs.yml") config = get_en_config() updated_config = get_updated_config_content() if config != updated_config: typer.secho( "docs/en/mkdocs.yml outdated from docs/language_names.yml, " "update language_names.yml and run " "python ./scripts/docs.py update-languages", color=typer.colors.RED, ) raise typer.Abort() typer.echo("Valid mkdocs.yml ✅") @app.command() def verify_non_translated() -> None: """ Verify there are no files in the non translatable pages. """ print("Verifying non translated pages") lang_paths = get_lang_paths() error_paths = [] for lang in lang_paths: if lang.name == "en": continue for non_translatable in non_translated_sections: non_translatable_path = lang / "docs" / non_translatable if non_translatable_path.exists(): error_paths.append(non_translatable_path) if error_paths: print("Non-translated pages found, remove them:") for error_path in error_paths: print(error_path) raise typer.Abort() print("No non-translated pages found ✅") @app.command() def verify_docs(): verify_readme() verify_config() verify_non_translated() @app.command() def langs_json(): langs = [] for lang_path in get_lang_paths(): if lang_path.is_dir(): langs.append(lang_path.name) print(json.dumps(langs)) @app.command() def generate_docs_src_versions_for_file(file_path: Path) -> None: target_versions = ["py39", "py310"] base_content = file_path.read_text(encoding="utf-8") previous_content = {base_content} for target_version in target_versions: version_result = subprocess.run( [ find_ruff_bin(), "check", "--target-version", target_version, "--fix", "--unsafe-fixes", "-", ], input=base_content.encode("utf-8"), capture_output=True, ) content_target = version_result.stdout.decode("utf-8") format_result = subprocess.run( [find_ruff_bin(), "format", "-"], input=content_target.encode("utf-8"), capture_output=True, ) content_format = format_result.stdout.decode("utf-8") if content_format in previous_content: continue previous_content.add(content_format) version_file = file_path.with_name( file_path.name.replace(".py", f"_{target_version}.py") ) logging.info(f"Writing to {version_file}") version_file.write_text(content_format, encoding="utf-8") @app.command() def add_permalinks_page(path: Path, update_existing: bool = False): """ Add or update header permalinks in specific page of En docs. """ if not path.is_relative_to(en_docs_path / "docs"): raise RuntimeError(f"Path must be inside {en_docs_path}") rel_path = path.relative_to(en_docs_path / "docs") # Skip excluded sections if str(rel_path).startswith(non_translated_sections): return visible_text_extractor = VisibleTextExtractor() updated_lines = [] in_code_block3 = False in_code_block4 = False permalinks = set() with path.open("r", encoding="utf-8") as f: lines = f.readlines() for line in lines: # Handle codeblocks start and end if not (in_code_block3 or in_code_block4): if code_block4_pattern.match(line): in_code_block4 = True elif code_block3_pattern.match(line): in_code_block3 = True else: if in_code_block4 and code_block4_pattern.match(line): in_code_block4 = False elif in_code_block3 and code_block3_pattern.match(line): in_code_block3 = False # Process Headers only outside codeblocks if not (in_code_block3 or in_code_block4): match = header_pattern.match(line) if match: hashes, title, _permalink = match.groups() if (not _permalink) or update_existing: slug = slugify(visible_text_extractor.extract_visible_text(title)) if slug in permalinks: # If the slug is already used, append a number to make it unique count = 1 original_slug = slug while slug in permalinks: slug = f"{original_slug}_{count}" count += 1 permalinks.add(slug) line = f"{hashes} {title} {{ #{slug} }}\n" updated_lines.append(line) with path.open("w", encoding="utf-8") as f: f.writelines(updated_lines) @app.command() def add_permalinks_pages(pages: List[Path], update_existing: bool = False) -> None: """ Add or update header permalinks in specific pages of En docs. """ for md_file in pages: add_permalinks_page(md_file, update_existing=update_existing) @app.command() def add_permalinks(update_existing: bool = False) -> None: """ Add or update header permalinks in all pages of En docs. """ for md_file in en_docs_path.rglob("*.md"): add_permalinks_page(md_file, update_existing=update_existing) if __name__ == "__main__": app()
VisibleTextExtractor
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/hooks/neptune.py
{ "start": 894, "end": 4704 }
class ____(AwsBaseHook): """ Interact with Amazon Neptune. Additional arguments (such as ``aws_conn_id``) may be specified and are passed down to the underlying AwsBaseHook. .. seealso:: - :class:`~airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook` """ AVAILABLE_STATES = ["available"] STOPPED_STATES = ["stopped"] ERROR_STATES = [ "cloning-failed", "inaccessible-encryption-credentials", "inaccessible-encryption-credentials-recoverable", "migration-failed", ] def __init__(self, *args, **kwargs): kwargs["client_type"] = "neptune" super().__init__(*args, **kwargs) def wait_for_cluster_availability(self, cluster_id: str, delay: int = 30, max_attempts: int = 60) -> str: """ Wait for Neptune cluster to start. :param cluster_id: The ID of the cluster to wait for. :param delay: Time in seconds to delay between polls. :param max_attempts: Maximum number of attempts to poll for completion. :return: The status of the cluster. """ self.get_waiter("cluster_available").wait( DBClusterIdentifier=cluster_id, WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts} ) status = self.get_cluster_status(cluster_id) self.log.info("Finished waiting for cluster %s. Status is now %s", cluster_id, status) return status def wait_for_cluster_stopped(self, cluster_id: str, delay: int = 30, max_attempts: int = 60) -> str: """ Wait for Neptune cluster to stop. :param cluster_id: The ID of the cluster to wait for. :param delay: Time in seconds to delay between polls. :param max_attempts: Maximum number of attempts to poll for completion. :return: The status of the cluster. """ self.get_waiter("cluster_stopped").wait( DBClusterIdentifier=cluster_id, WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts} ) status = self.get_cluster_status(cluster_id) self.log.info("Finished waiting for cluster %s. Status is now %s", cluster_id, status) return status def get_cluster_status(self, cluster_id: str) -> str: """ Get the status of a Neptune cluster. :param cluster_id: The ID of the cluster to get the status of. :return: The status of the cluster. """ return self.conn.describe_db_clusters(DBClusterIdentifier=cluster_id)["DBClusters"][0]["Status"] def get_db_instance_status(self, instance_id: str) -> str: """ Get the status of a Neptune instance. :param instance_id: The ID of the instance to get the status of. :return: The status of the instance. """ return self.conn.describe_db_instances(DBInstanceIdentifier=instance_id)["DBInstances"][0][ "DBInstanceStatus" ] def wait_for_cluster_instance_availability( self, cluster_id: str, delay: int = 30, max_attempts: int = 60 ) -> None: """ Wait for Neptune instances in a cluster to be available. :param cluster_id: The cluster ID of the instances to wait for. :param delay: Time in seconds to delay between polls. :param max_attempts: Maximum number of attempts to poll for completion. :return: The status of the instances. """ filters = [{"Name": "db-cluster-id", "Values": [cluster_id]}] self.log.info("Waiting for instances in cluster %s.", cluster_id) self.get_waiter("db_instance_available").wait( Filters=filters, WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts} ) self.log.info("Finished waiting for instances in cluster %s.", cluster_id)
NeptuneHook
python
dagster-io__dagster
python_modules/libraries/dagster-gcp/dagster_gcp/pipes/message_readers.py
{ "start": 988, "end": 2388 }
class ____(PipesChunkedLogReader): def __init__( self, *, bucket: str, key: str, client: Optional[GCSClient] = None, interval: float = 10, target_stream: Optional[IO[str]] = None, # TODO: maybe move this parameter to a different scope decode_fn: Optional[Callable[[bytes], str]] = None, debug_info: Optional[str] = None, ): self.bucket = bucket self.key = key self.client: GCSClient = client or GCSClient() self.decode_fn = decode_fn or default_log_decode_fn self.log_position = 0 super().__init__( interval=interval, target_stream=target_stream or sys.stdout, debug_info=debug_info ) @property def name(self) -> str: return f"PipesGCSLogReader(gs://{os.path.join(self.bucket, self.key)})" def target_is_readable(self, params: PipesParams) -> bool: return _can_read_from_gcs( client=self.client, bucket=self.bucket, key=self.key, ) def download_log_chunk(self, params: PipesParams) -> Optional[str]: text = self.decode_fn( self.client.get_bucket(self.bucket).blob(self.key).download_as_bytes() ) current_position = self.log_position self.log_position += len(text) return text[current_position:]
PipesGCSLogReader
python
viewflow__viewflow
tests/json/test_json__basics.py
{ "start": 3131, "end": 3234 }
class ____(forms.ModelForm): class Meta: model = Client exclude = ["data"]
ClientForm
python
huggingface__transformers
src/transformers/models/pop2piano/modeling_pop2piano.py
{ "start": 43094, "end": 58257 }
class ____(Pop2PianoPreTrainedModel, GenerationMixin): _tied_weights_keys = { "encoder.embed_tokens.weight": "shared.weight", "decoder.embed_tokens.weight": "shared.weight", } def __init__(self, config: Pop2PianoConfig): super().__init__(config) self.config = config self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) self.mel_conditioner = Pop2PianoConcatEmbeddingToMel(config) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False self.encoder = Pop2PianoStack(encoder_config) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.num_layers = config.num_decoder_layers self.decoder = Pop2PianoStack(decoder_config) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_mel_conditioner_outputs( self, input_features: torch.FloatTensor, composer: str, generation_config: GenerationConfig, attention_mask: Optional[torch.FloatTensor] = None, ): """ This method is used to concatenate mel conditioner tokens at the front of the input_features in order to control the type of MIDI token generated by the model. Args: input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): input features extracted from the feature extractor. composer (`str`): composer token which determines the type of MIDI tokens to be generated. generation_config (`~generation.GenerationConfig`): The generation is used to get the composer-feature_token pair. attention_mask (``, *optional*): For batched generation `input_features` are padded to have the same shape across all examples. `attention_mask` helps to determine which areas were padded and which were not. - 1 for tokens that are **not padded**, - 0 for tokens that are **padded**. """ composer_to_feature_token = generation_config.composer_to_feature_token if composer not in composer_to_feature_token: raise ValueError( f"Please choose a composer from {list(composer_to_feature_token.keys())}. Composer received - {composer}" ) composer_value = composer_to_feature_token[composer] composer_value = torch.tensor(composer_value, device=self.device) composer_value = composer_value.repeat(input_features.shape[0]) embedding_offset = min(composer_to_feature_token.values()) input_features = self.mel_conditioner( feature=input_features, index_value=composer_value, embedding_offset=embedding_offset, ) if attention_mask is not None: input_features[~attention_mask[:, 0].bool()] = 0.0 # since self.mel_conditioner adds a new array at the front of inputs_embeds we need to do the same for attention_mask to keep the shapes same attention_mask = torch.concatenate([attention_mask[:, 0].view(-1, 1), attention_mask], axis=1) return input_features, attention_mask return input_features, None @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, input_features: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Pop2Piano is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [Pop2Piano Training](./Pop2Piano#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Pop2Piano uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None and input_features is not None: raise ValueError("Both `inputs_embeds` and `input_features` received! Please provide only one of them") elif input_features is not None and inputs_embeds is None: inputs_embeds = input_features # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @torch.no_grad() def generate( self, input_features, attention_mask=None, composer="composer1", generation_config=None, **kwargs, ): """ Generates token ids for midi outputs. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Parameters: input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): This is the featurized version of audio generated by `Pop2PianoFeatureExtractor`. attention_mask: For batched generation `input_features` are padded to have the same shape across all examples. `attention_mask` helps to determine which areas were padded and which were not. - 1 for tokens that are **not padded**, - 0 for tokens that are **padded**. composer (`str`, *optional*, defaults to `"composer1"`): This value is passed to `Pop2PianoConcatEmbeddingToMel` to generate different embeddings for each `"composer"`. Please make sure that the composer value is present in `composer_to_feature_token` in `generation_config`. For an example please see https://huggingface.co/sweetcocoa/pop2piano/blob/main/generation_config.json . generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. kwargs: Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. Since Pop2Piano is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateEncoderDecoderOutput`], - [`~generation.GenerateBeamEncoderDecoderOutput`] """ if generation_config is None: generation_config = self.generation_config generation_config.update(**kwargs) # check for composer_to_feature_token if not hasattr(generation_config, "composer_to_feature_token"): raise ValueError( "`composer_to_feature_token` was not found! Please refer to " "https://huggingface.co/sweetcocoa/pop2piano/blob/main/generation_config.json" "and parse a dict like that." ) if len(generation_config.composer_to_feature_token) != self.config.composer_vocab_size: raise ValueError( "config.composer_vocab_size must be same as the number of keys in " f"generation_config.composer_to_feature_token! " f"Found {self.config.composer_vocab_size} vs {len(generation_config.composer_to_feature_token)}." ) # to control the variation of generated MIDI tokens we concatenate mel-conditioner tokens(which depends on composer_token) # at the front of input_features. input_features, attention_mask = self.get_mel_conditioner_outputs( input_features=input_features, attention_mask=attention_mask, composer=composer, generation_config=generation_config, ) return super().generate( inputs=None, inputs_embeds=input_features, attention_mask=attention_mask, generation_config=generation_config, **kwargs, ) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) __all__ = ["Pop2PianoForConditionalGeneration", "Pop2PianoPreTrainedModel"]
Pop2PianoForConditionalGeneration
python
ApeWorX__ape
src/ape/managers/accounts.py
{ "start": 1018, "end": 7304 }
class ____(list, ManagerAccessMixin): __test__ = False _impersonated_accounts: dict[AddressType, ImpersonatedAccount] = {} _accounts_by_index: dict[int, AccountAPI] = {} @log_instead_of_fail(default="<TestAccountManager>") def __repr__(self) -> str: return f"<apetest-wallet {self.hd_path}>" @cached_property def containers(self) -> dict[str, TestAccountContainerAPI]: account_types = filter( lambda t: issubclass(t[1][1], TestAccountAPI), self.plugin_manager.account_types ) return { plugin_name: container_type(name=plugin_name, account_type=account_type) for plugin_name, (container_type, account_type) in account_types } @property def mnemonic(self) -> str: """ The seed phrase for generated test accounts. """ return self.config_manager.get_config("test").mnemonic @mnemonic.setter def mnemonic(self, value: str): """ The seed phrase for generated test accounts. **WARNING**: Changing the test-mnemonic mid-session re-starts the provider (if connected to one). """ self.config_manager.test.mnemonic = value self.containers["test"].mnemonic = value if provider := self.network_manager.active_provider: provider.update_settings({"mnemonic": value}) self._accounts_by_index = {} @property def number_of_accounts(self) -> int: """ The number of test accounts to generate and fund by default. """ return self.config_manager.test.number_of_accounts @property def hd_path(self) -> str: """ The HD path used for generating the test accounts. """ return self.config_manager.get_config("test").hd_path @property def accounts(self) -> Iterator[AccountAPI]: for container in self.containers.values(): yield from container.accounts def aliases(self) -> Iterator[str]: for account in self.accounts: if account.alias: yield account.alias def __len__(self) -> int: return sum(len(c) for c in self.containers.values()) def __iter__(self) -> Iterator[AccountAPI]: yield from self.accounts @singledispatchmethod def __getitem__(self, account_id): raise NotImplementedError(f"Cannot use {type(account_id)} as account ID.") @__getitem__.register def __getitem_int(self, account_id: int): if account_id < 0: account_id = len(self) + account_id if account_id in self._accounts_by_index: return self._accounts_by_index[account_id] account = self.containers["test"].get_test_account(account_id) self._accounts_by_index[account_id] = account return account @__getitem__.register def __getitem_slice(self, account_id: slice): start_idx = account_id.start or 0 if start_idx < 0: start_idx += len(self) stop_idx = account_id.stop or len(self) if stop_idx < 0: stop_idx += len(self) step_size = account_id.step or 1 return [self[i] for i in range(start_idx, stop_idx, step_size)] @__getitem__.register def __getitem_str(self, account_str: str): message_fmt = "No account with {} '{}'." try: account_id = self.conversion_manager.convert(account_str, AddressType) except ConversionError as err: message = message_fmt.format("ID", account_str) raise KeyError(message) from err for account in self.accounts: if account.address == account_id: return account try: return self.impersonate_account(account_id) except AccountsError as err: err_message = message_fmt.format("address", account_id) raise KeyError(f"{str(err).rstrip('.')}:\n{err_message}") from err def __contains__(self, address: AddressType) -> bool: # type: ignore return any(address in container for container in self.containers.values()) def impersonate_account(self, address: AddressType) -> ImpersonatedAccount: """ Impersonate an account for testing purposes. Args: address (AddressType): The address to impersonate. """ try: result = self.provider.unlock_account(address) except NotImplementedError as err: raise AccountsError( f"Provider '{self.provider.name}' does not support impersonating accounts." ) from err if result: if address in self._impersonated_accounts: return self._impersonated_accounts[address] account = ImpersonatedAccount(raw_address=address) self._impersonated_accounts[address] = account return account raise AccountsError(f"Unable to unlocked account '{address}'.") def stop_impersonating(self, address: AddressType): """ End the impersonating of an account, if it is being impersonated. Args: address (AddressType): The address to stop impersonating. """ if address in self._impersonated_accounts: del self._impersonated_accounts[address] try: self.provider.relock_account(address) except NotImplementedError: pass def generate_test_account(self, container_name: str = "test") -> TestAccountAPI: return self.containers[container_name].generate_account() def use_sender(self, account_id: Union[TestAccountAPI, AddressType, int]) -> "ContextManager": account = account_id if isinstance(account_id, TestAccountAPI) else self[account_id] return _use_sender(account) def init_test_account( self, index: int, address: AddressType, private_key: str ) -> "TestAccountAPI": container = self.containers["test"] return container.init_test_account( # type: ignore[attr-defined] index, address, private_key ) def reset(self): self._accounts_by_index = {} for container in self.containers.values(): container.reset()
TestAccountManager
python
pytorch__pytorch
.ci/lumen_cli/cli/lib/common/cli_helper.py
{ "start": 393, "end": 620 }
class ____(ABC): def __init__(self, args: Any) -> None: self.args = args @abstractmethod def run(self) -> None: """runs main logics, required""" # Pretty help: keep newlines + show defaults
BaseRunner
python
pypa__pip
tests/conftest.py
{ "start": 25071, "end": 33096 }
class ____: """Mock package structure used to generate a PyPI repository. FakePackage name and version should correspond to sdists (.tar.gz files) in our test data.""" name: str version: str filename: str metadata: MetadataKind # This will override any dependencies specified in the actual dist's METADATA. requires_dist: tuple[str, ...] = () # This will override the Name specified in the actual dist's METADATA. metadata_name: str | None = None def metadata_filename(self) -> str: """This is specified by PEP 658.""" return f"{self.filename}.metadata" def generate_additional_tag(self) -> str: """This gets injected into the <a> tag in the generated PyPI index page for this package.""" if self.metadata == MetadataKind.No: return "" if self.metadata in [MetadataKind.Unhashed, MetadataKind.NoFile]: return 'data-dist-info-metadata="true"' if self.metadata == MetadataKind.WrongHash: return 'data-dist-info-metadata="sha256=WRONG-HASH"' assert self.metadata == MetadataKind.Sha256 checksum = sha256(self.generate_metadata()).hexdigest() return f'data-dist-info-metadata="sha256={checksum}"' def requires_str(self) -> str: if not self.requires_dist: return "" joined = " and ".join(self.requires_dist) return f"Requires-Dist: {joined}" def generate_metadata(self) -> bytes: """This is written to `self.metadata_filename()` and will override the actual dist's METADATA, unless `self.metadata == MetadataKind.NoFile`.""" return dedent( f"""\ Metadata-Version: 2.1 Name: {self.metadata_name or self.name} Version: {self.version} {self.requires_str()} """ ).encode("utf-8") @pytest.fixture(scope="session") def fake_packages() -> dict[str, list[FakePackage]]: """The package database we generate for testing PEP 658 support.""" return { "simple": [ FakePackage("simple", "1.0", "simple-1.0.tar.gz", MetadataKind.Sha256), FakePackage("simple", "2.0", "simple-2.0.tar.gz", MetadataKind.No), # This will raise a hashing error. FakePackage("simple", "3.0", "simple-3.0.tar.gz", MetadataKind.WrongHash), ], "simple2": [ # Override the dependencies here in order to force pip to download # simple-1.0.tar.gz as well. FakePackage( "simple2", "1.0", "simple2-1.0.tar.gz", MetadataKind.Unhashed, ("simple==1.0",), ), # This will raise an error when pip attempts to fetch the metadata file. FakePackage("simple2", "2.0", "simple2-2.0.tar.gz", MetadataKind.NoFile), # This has a METADATA file with a mismatched name. FakePackage( "simple2", "3.0", "simple2-3.0.tar.gz", MetadataKind.Sha256, metadata_name="not-simple2", ), ], "colander": [ # Ensure we can read the dependencies from a metadata file within a wheel # *without* PEP 658 metadata. FakePackage( "colander", "0.9.9", "colander-0.9.9-py2.py3-none-any.whl", MetadataKind.No, ), ], "compilewheel": [ # Ensure we can override the dependencies of a wheel file by injecting PEP # 658 metadata. FakePackage( "compilewheel", "1.0", "compilewheel-1.0-py2.py3-none-any.whl", MetadataKind.Unhashed, ("simple==1.0",), ), ], "has-script": [ # Ensure we check PEP 658 metadata hashing errors for wheel files. FakePackage( "has-script", "1.0", "has.script-1.0-py2.py3-none-any.whl", MetadataKind.WrongHash, ), ], "translationstring": [ FakePackage( "translationstring", "1.1", "translationstring-1.1.tar.gz", MetadataKind.No, ), ], "priority": [ # Ensure we check for a missing metadata file for wheels. FakePackage( "priority", "1.0", "priority-1.0-py2.py3-none-any.whl", MetadataKind.NoFile, ), ], "requires-simple-extra": [ # Metadata name is not canonicalized. FakePackage( "requires-simple-extra", "0.1", "requires_simple_extra-0.1-py2.py3-none-any.whl", MetadataKind.Sha256, metadata_name="Requires_Simple.Extra", ), ], } @pytest.fixture(scope="session") def html_index_for_packages( shared_data: TestData, fake_packages: dict[str, list[FakePackage]], tmpdir_factory: pytest.TempPathFactory, ) -> Path: """Generate a PyPI HTML package index within a local directory pointing to synthetic test data.""" html_dir = tmpdir_factory.mktemp("fake_index_html_content") # (1) Generate the content for a PyPI index.html. pkg_links = "\n".join( f' <a href="{pkg}/index.html">{pkg}</a>' for pkg in fake_packages.keys() ) # Output won't be nicely indented because dedent() acts after f-string # arg insertion. index_html = dedent( f"""\ <!DOCTYPE html> <html> <head> <meta name="pypi:repository-version" content="1.0"> <title>Simple index</title> </head> <body> {pkg_links} </body> </html>""" ) # (2) Generate the index.html in a new subdirectory of the temp directory. (html_dir / "index.html").write_text(index_html) # (3) Generate subdirectories for individual packages, each with their own # index.html. for pkg, links in fake_packages.items(): pkg_subdir = html_dir / pkg pkg_subdir.mkdir() download_links: list[str] = [] for package_link in links: # (3.1) Generate the <a> tag which pip can crawl pointing to this # specific package version. download_links.append( f' <a href="{package_link.filename}" {package_link.generate_additional_tag()}>{package_link.filename}</a><br/>' # noqa: E501 ) # (3.2) Copy over the corresponding file in `shared_data.packages`. shutil.copy( shared_data.packages / package_link.filename, pkg_subdir / package_link.filename, ) # (3.3) Write a metadata file, if applicable. if package_link.metadata != MetadataKind.NoFile: with open(pkg_subdir / package_link.metadata_filename(), "wb") as f: f.write(package_link.generate_metadata()) # (3.4) After collating all the download links and copying over the files, # write an index.html with the generated download links for each # copied file for this specific package name. download_links_str = "\n".join(download_links) pkg_index_content = dedent( f"""\ <!DOCTYPE html> <html> <head> <meta name="pypi:repository-version" content="1.0"> <title>Links for {pkg}</title> </head> <body> <h1>Links for {pkg}</h1> {download_links_str} </body> </html>""" ) with open(pkg_subdir / "index.html", "w") as f: f.write(pkg_index_content) return html_dir
FakePackage
python
doocs__leetcode
solution/3600-3699/3652.Best Time to Buy and Sell Stock using Strategy/Solution.py
{ "start": 0, "end": 450 }
class ____: def maxProfit(self, prices: List[int], strategy: List[int], k: int) -> int: n = len(prices) s = [0] * (n + 1) t = [0] * (n + 1) for i, (a, b) in enumerate(zip(prices, strategy), 1): s[i] = s[i - 1] + a * b t[i] = t[i - 1] + a ans = s[n] for i in range(k, n + 1): ans = max(ans, s[n] - (s[i] - s[i - k]) + t[i] - t[i - k // 2]) return ans
Solution
python
pytorch__pytorch
torch/ao/nn/quantized/modules/functional_modules.py
{ "start": 2819, "end": 4551 }
class ____(torch.nn.Module): r"""module to replace FloatFunctional module before FX graph mode quantization, since activation_post_process will be inserted in top level module directly Valid operation names: - add - cat - mul - add_relu - add_scalar - mul_scalar """ def forward(self, x): raise RuntimeError( "FloatFunctional is not intended to use the " + "'forward'. Please use the underlying operation" ) r"""Operation equivalent to ``torch.add(Tensor, Tensor)``""" def add(self, x: Tensor, y: Tensor) -> Tensor: r = torch.add(x, y) return r r"""Operation equivalent to ``torch.add(Tensor, float)``""" def add_scalar(self, x: Tensor, y: float) -> Tensor: r = torch.add(x, y) return r r"""Operation equivalent to ``torch.mul(Tensor, Tensor)``""" def mul(self, x: Tensor, y: Tensor) -> Tensor: r = torch.mul(x, y) return r r"""Operation equivalent to ``torch.mul(Tensor, float)``""" def mul_scalar(self, x: Tensor, y: float) -> Tensor: r = torch.mul(x, y) return r r"""Operation equivalent to ``torch.cat``""" def cat(self, x: list[Tensor], dim: int = 0) -> Tensor: r = torch.cat(x, dim=dim) return r r"""Operation equivalent to ``relu(torch.add(x,y))``""" def add_relu(self, x: Tensor, y: Tensor) -> Tensor: r = torch.add(x, y) r = torch.nn.functional.relu(r) return r r"""Operation equivalent to ``torch.matmul(Tensor, Tensor)``""" def matmul(self, x: Tensor, y: Tensor) -> Tensor: r = torch.matmul(x, y) return r
FXFloatFunctional
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typedDict18.py
{ "start": 1858, "end": 1907 }
class ____(TypedDict, Generic[_T1]): x: _T1
TD9
python
pandas-dev__pandas
pandas/tests/scalar/timestamp/methods/test_normalize.py
{ "start": 114, "end": 1009 }
class ____: @pytest.mark.parametrize("arg", ["2013-11-30", "2013-11-30 12:00:00"]) def test_normalize(self, tz_naive_fixture, arg, unit): tz = tz_naive_fixture ts = Timestamp(arg, tz=tz).as_unit(unit) result = ts.normalize() expected = Timestamp("2013-11-30", tz=tz) assert result == expected assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value def test_normalize_pre_epoch_dates(self): # GH: 36294 result = Timestamp("1969-01-01 09:00:00").normalize() expected = Timestamp("1969-01-01 00:00:00") assert result == expected def test_normalize_overflow_raises(self): # GH#60583 ts = Timestamp.min msg = "Cannot normalize Timestamp without integer overflow" with pytest.raises(ValueError, match=msg): ts.normalize()
TestTimestampNormalize
python
django__django
tests/model_formsets/models.py
{ "start": 3837, "end": 3963 }
class ____(models.Model): name = models.CharField(max_length=25) def __str__(self): return self.name
Repository
python
tensorflow__tensorflow
tensorflow/lite/python/lite.py
{ "start": 81225, "end": 92335 }
class ____(TFLiteFrozenGraphConverterV2): """Converts a TensorFlow model into TensorFlow Lite model. Attributes: optimizations: Experimental flag, subject to change. Set of optimizations to apply. e.g {tf.lite.Optimize.DEFAULT}. (default None, must be None or a set of values of type `tf.lite.Optimize`) representative_dataset: A generator function used for integer quantization where each generated sample has the same order, type and shape as the inputs to the model. Usually, this is a small subset of a few hundred samples randomly chosen, in no particular order, from the training or evaluation dataset. This is an optional attribute, but required for full integer quantization, i.e, if `tf.int8` is the only supported type in `target_spec.supported_types`. Refer to `tf.lite.RepresentativeDataset`. (default None) target_spec: Experimental flag, subject to change. Specifications of the target device, including supported ops set, supported types and a set of user's defined TensorFlow operators required in the TensorFlow Lite runtime. Refer to `tf.lite.TargetSpec`. inference_input_type: Data type of the input layer. Note that integer types (tf.int8 and tf.uint8) are currently only supported for post-training integer quantization and quantization-aware training. (default tf.float32, must be in {tf.float32, tf.int8, tf.uint8}) inference_output_type: Data type of the output layer. Note that integer types (tf.int8 and tf.uint8) are currently only supported for post-training integer quantization and quantization-aware training. (default tf.float32, must be in {tf.float32, tf.int8, tf.uint8}) allow_custom_ops: Boolean indicating whether to allow custom operations. When False, any unknown operation is an error. When True, custom ops are created for any op that is unknown. The developer needs to provide these to the TensorFlow Lite runtime with a custom resolver. (default False) exclude_conversion_metadata: Whether not to embed the conversion metadata into the converted model. (default False) experimental_new_converter: Experimental flag, subject to change. Enables MLIR-based conversion. (default True) experimental_new_quantizer: Experimental flag, subject to change. Enables MLIR-based quantization conversion instead of Flatbuffer-based conversion. (default True) experimental_enable_resource_variables: Experimental flag, subject to change. Enables [resource variables](https://tensorflow.org/guide/migrate/tf1_vs_tf2#resourcevariables_instead_of_referencevariables) to be converted by this converter. This is only allowed if the from_saved_model interface is used. (default True) serialize_debug_metadata: Enables serializing debug metadata into the TFLite model. (default False) Example usage: ```python # Converting a SavedModel to a TensorFlow Lite model. converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) tflite_model = converter.convert() # Converting a tf.Keras model to a TensorFlow Lite model. converter = tf.lite.TFLiteConverter.from_keras_model(model) tflite_model = converter.convert() # Converting ConcreteFunctions to a TensorFlow Lite model. converter = tf.lite.TFLiteConverter.from_concrete_functions([func], model) tflite_model = converter.convert() # Converting a Jax model to a TensorFlow Lite model. converter = tf.lite.TFLiteConverter.experimental_from_jax( [func], [[ ('input1', input1), ('input2', input2)]]) tflite_model = converter.convert() ``` """ # fmt: skip # pylint: disable=useless-super-delegation def __init__(self, funcs, trackable_obj=None): """Constructor for TFLiteConverter. Args: funcs: List of TensorFlow ConcreteFunctions. The list should not contain duplicate elements. trackable_obj: tf.AutoTrackable object associated with `funcs`. A reference to this object needs to be maintained so that Variables do not get garbage collected since functions have a weak reference to Variables. This is only required when the tf.AutoTrackable object is not maintained by the user (e.g. `from_saved_model`). """ super(TFLiteConverterV2, self).__init__(funcs, trackable_obj) @classmethod def from_concrete_functions(cls, funcs, trackable_obj=None): """Creates a TFLiteConverter object from ConcreteFunctions. Args: funcs: List of TensorFlow ConcreteFunctions. The list should not contain duplicate elements. Currently converter can only convert a single ConcreteFunction. Converting multiple functions is under development. trackable_obj: An `AutoTrackable` object (typically `tf.module`) associated with `funcs`. A reference to this object needs to be maintained so that Variables do not get garbage collected since functions have a weak reference to Variables. Returns: TFLiteConverter object. Raises: Invalid input type. """ # pylint: disable=protected-access TFLiteConverterBase._set_original_model_type( conversion_metadata_fb.ModelType.TF_CONCRETE_FUNCTIONS ) # pylint: enable=protected-access if trackable_obj is None: logging.warning( "Please consider providing the trackable_obj argument in the " "from_concrete_functions. Providing without the trackable_obj " "argument is deprecated and it will use the deprecated conversion " "path." ) for func in funcs: if not isinstance(func, _function.ConcreteFunction): message = "This function takes in a list of ConcreteFunction." if isinstance(func, _def_function.Function): message += ( " To get the ConcreteFunction from a Function," " call get_concrete_function." ) raise ValueError(message) return cls(funcs, trackable_obj) @classmethod def from_saved_model(cls, saved_model_dir, signature_keys=None, tags=None): """Creates a TFLiteConverter object from a SavedModel directory. Args: saved_model_dir: SavedModel directory to convert. signature_keys: List of keys identifying SignatureDef containing inputs and outputs. Elements should not be duplicated. By default the `signatures` attribute of the MetaGraphdef is used. (default saved_model.signatures) tags: Set of tags identifying the MetaGraphDef within the SavedModel to analyze. All tags in the tag set must be present. (default {tf.saved_model.SERVING} or {'serve'}) Returns: TFLiteConverter object. Raises: Invalid signature keys. """ # pylint: disable=protected-access TFLiteConverterBase._set_original_model_type( conversion_metadata_fb.ModelType.TF_SAVED_MODEL ) # pylint: enable=protected-access # When run without eager enabled, this will return the legacy # TFLiteConverter. if not context.executing_eagerly(): signature_key = None if signature_keys: if len(signature_keys) != 1: raise ValueError("Only support a single signature key.") else: signature_key = signature_keys[0] logging.warning( "Invoking the TF1 implementation of TFLiteConverter " "because eager is disabled. Consider enabling eager." ) return TFLiteConverter.from_saved_model( saved_model_dir, signature_key=signature_key, tag_set=tags ) if tags is None: tags = set([_tag_constants.SERVING]) with context.eager_mode(): saved_model = _load(saved_model_dir, tags) if not signature_keys: signature_keys = list(saved_model.signatures.keys()) if not signature_keys: raise ValueError("Only support at least one signature key.") # Distinguishes SavedModel artifacts created by `model.export` # from SavedModel created by `model.save`/`tf.saved_model.save`. if ( len(signature_keys) > 1 and hasattr(saved_model, "serve") # `model.export` default endpoint and not hasattr(saved_model, "_default_save_signature") # `_default_save_signature` does not exist for `model.export` artifacts. ): # Default `serve` endpoint for `model.export` should be copied # to `serving_default` to prevent issues in TF Lite serving. saved_model.serving_default = saved_model.serve delattr(saved_model, "serve") signature_keys = ["serving_default"] funcs = [] for key in signature_keys: if key not in saved_model.signatures: raise ValueError( "Invalid signature key '{}' found. Valid keys are '{}'.".format( key, ",".join(saved_model.signatures) ) ) funcs.append(saved_model.signatures[key]) saved_model_converter = TFLiteSavedModelConverterV2( saved_model_dir, tags, signature_keys ) if saved_model_converter.saved_model_dir: return saved_model_converter return cls(funcs, saved_model) @classmethod def from_keras_model(cls, model): """Creates a TFLiteConverter object from a Keras model. Args: model: tf.Keras.Model Returns: TFLiteConverter object. """ # pylint: disable=protected-access TFLiteConverterBase._set_original_model_type( conversion_metadata_fb.ModelType.KERAS_MODEL ) # pylint: enable=protected-access return TFLiteKerasModelConverterV2(model) @classmethod @_deprecation.deprecated( None, "Use `jax2tf.convert` and (`lite.TFLiteConverter.from_saved_model`" " or `lite.TFLiteConverter.from_concrete_functions`) instead.", ) def experimental_from_jax(cls, serving_funcs, inputs): # Experimental API, subject to changes. # TODO(b/197690428): Currently only supports single function. """Creates a TFLiteConverter object from a Jax model with its inputs. Args: serving_funcs: An array of Jax functions with all the weights applied already. inputs: An array of Jax input placeholders tuples list, e.g., jnp.zeros(INPUT_SHAPE). Each tuple list should correspond with the serving function. Returns: TFLiteConverter object. """ # pylint: disable=protected-access TFLiteConverterBase._set_original_model_type( conversion_metadata_fb.ModelType.JAX ) # pylint: enable=protected-access return TFLiteJaxConverterV2(serving_funcs, inputs) # pylint: disable=useless-super-delegation def convert(self): """Converts a TensorFlow GraphDef based on instance variables. Returns: The converted data in serialized format. Raises: ValueError: No concrete function is specified. Multiple concrete functions are specified. Input shape is not specified. Invalid quantization parameters. """ return super(TFLiteConverterV2, self).convert()
TFLiteConverterV2
python
getsentry__sentry
src/sentry/search/events/builder/profile_functions.py
{ "start": 3004, "end": 3435 }
class ____(ProfileFunctionsQueryBuilderMixin, BaseQueryBuilder): function_alias_prefix = "sentry_" config_class = ProfileFunctionsDatasetConfig def process_results(self, results: Any) -> EventsResponse: processed: EventsResponse = super().process_results(results) for row in processed["data"]: self.process_profiling_function_columns(row) return processed
ProfileFunctionsQueryBuilder
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pylint/property_with_parameters.py
{ "start": 1063, "end": 1214 }
class ____: @cached_property def cached_prop(self, value): # [property-with-parameters] ... import abc import enum import types
Cached
python
numba__numba
numba/core/types/scalars.py
{ "start": 4084, "end": 4989 }
class ____(Type): """ Common base class for np.datetime64 and np.timedelta64. """ def __init__(self, unit, *args, **kws): name = '%s[%s]' % (self.type_name, unit) self.unit = unit self.unit_code = npdatetime_helpers.DATETIME_UNITS[self.unit] super(_NPDatetimeBase, self).__init__(name, *args, **kws) def __lt__(self, other): if self.__class__ is not other.__class__: return NotImplemented # A coarser-grained unit is "smaller", i.e. less precise values # can be represented (but the magnitude of representable values is # also greater...). return self.unit_code < other.unit_code def cast_python_value(self, value): cls = getattr(np, self.type_name) if self.unit: return cls(value, self.unit) else: return cls(value) @total_ordering
_NPDatetimeBase
python
getsentry__sentry
src/sentry/integrations/github_enterprise/integration.py
{ "start": 14401, "end": 20295 }
class ____(GitHubIntegrationProvider): key = IntegrationProviderSlug.GITHUB_ENTERPRISE.value name = "GitHub Enterprise" metadata = metadata integration_cls = GitHubEnterpriseIntegration features = frozenset( [ IntegrationFeatures.COMMITS, IntegrationFeatures.ISSUE_BASIC, IntegrationFeatures.STACKTRACE_LINK, IntegrationFeatures.CODEOWNERS, ] ) def _make_identity_pipeline_view(self) -> PipelineView[IntegrationPipeline]: """ Make the nested identity provider view. It is important that this view is not constructed until we reach this step and the ``oauth_config_information`` is available in the pipeline state. This method should be late bound into the pipeline vies. """ oauth_information = self.pipeline.fetch_state("oauth_config_information") if oauth_information is None: raise AssertionError("pipeline called out of order") identity_pipeline_config = dict( oauth_scopes=(), redirect_url=absolute_uri("/extensions/github-enterprise/setup/"), **oauth_information, ) return NestedPipelineView( bind_key="identity", provider_key=IntegrationProviderSlug.GITHUB_ENTERPRISE.value, pipeline_cls=IdentityPipeline, config=identity_pipeline_config, ) def get_pipeline_views( self, ) -> Sequence[ PipelineView[IntegrationPipeline] | Callable[[], PipelineView[IntegrationPipeline]] ]: return ( InstallationConfigView(), GitHubEnterpriseInstallationRedirect(), # The identity provider pipeline should be constructed at execution # time, this allows for the oauth configuration parameters to be made # available from the installation config view. lambda: self._make_identity_pipeline_view(), ) def post_install( self, integration: Integration, organization: RpcOrganization, *, extra: dict[str, Any], ) -> None: pass def _get_ghe_installation_info(self, installation_data, access_token, installation_id): headers = { # TODO(jess): remove this whenever it's out of preview "Accept": "application/vnd.github.machine-man-preview+json", } headers.update( jwt.authorization_header( get_jwt( github_id=installation_data["id"], github_private_key=installation_data["private_key"], ) ) ) with http.build_session() as session: resp = session.get( f"https://{installation_data['url']}/api/v3/app/installations/{installation_id}", headers=headers, verify=installation_data["verify_ssl"], ) resp.raise_for_status() installation_resp = resp.json() resp = session.get( f"https://{installation_data['url']}/api/v3/user/installations", headers={ "Accept": "application/vnd.github.machine-man-preview+json", "Authorization": f"token {access_token}", }, verify=installation_data["verify_ssl"], ) resp.raise_for_status() user_installations_resp = resp.json() # verify that user actually has access to the installation for installation in user_installations_resp["installations"]: if installation["id"] == installation_resp["id"]: return installation_resp return None def build_integration(self, state: Mapping[str, Any]) -> IntegrationData: identity = state["identity"]["data"] installation_data = state["installation_data"] user = get_user_info(installation_data["url"], identity["access_token"]) installation = self._get_ghe_installation_info( installation_data, identity["access_token"], state["installation_id"] ) domain = urlparse(installation["account"]["html_url"]).netloc return { "name": installation["account"]["login"], # installation id is not enough to be unique for self-hosted GH "external_id": "{}:{}".format(domain, installation["id"]), # GitHub identity is associated directly to the application, *not* # to the installation itself. # app id is not enough to be unique for self-hosted GH "idp_external_id": "{}:{}".format(domain, installation["app_id"]), "metadata": { # The access token will be populated upon API usage "access_token": None, "expires_at": None, "icon": installation["account"]["avatar_url"], "domain_name": installation["account"]["html_url"].replace("https://", ""), "account_type": installation["account"]["type"], "installation_id": installation["id"], "installation": installation_data, }, "user_identity": { "type": IntegrationProviderSlug.GITHUB_ENTERPRISE.value, "external_id": user["id"], "scopes": [], # GitHub apps do not have user scopes "data": {"access_token": identity["access_token"]}, }, "idp_config": state["oauth_config_information"], } def setup(self): from sentry.plugins.base import bindings bindings.add( "integration-repository.provider", GitHubEnterpriseRepositoryProvider, id="integrations:github_enterprise", )
GitHubEnterpriseIntegrationProvider
python
openai__openai-python
src/openai/resources/beta/beta.py
{ "start": 5147, "end": 5725 }
class ____: def __init__(self, beta: AsyncBeta) -> None: self._beta = beta @cached_property def chatkit(self) -> AsyncChatKitWithStreamingResponse: return AsyncChatKitWithStreamingResponse(self._beta.chatkit) @cached_property def assistants(self) -> AsyncAssistantsWithStreamingResponse: return AsyncAssistantsWithStreamingResponse(self._beta.assistants) @cached_property def threads(self) -> AsyncThreadsWithStreamingResponse: return AsyncThreadsWithStreamingResponse(self._beta.threads)
AsyncBetaWithStreamingResponse
python
pennersr__django-allauth
allauth/headless/account/views.py
{ "start": 8579, "end": 9195 }
class ____(APIView): handle_json_input = False def post(self, request, *args, **kwargs): process = None stage = LoginStageController.enter(request, PhoneVerificationStage.key) if stage: process = flows.phone_verification.PhoneVerificationStageProcess.resume( stage ) if not process or not process.can_resend: return ConflictResponse(request) try: process.resend() except RateLimited: return RateLimitResponse(request) return APIResponse(request)
ResendPhoneVerificationCodeView
python
astropy__astropy
astropy/samp/hub.py
{ "start": 54486, "end": 56022 }
class ____: """ A base class to make writing Web Profile GUI consent dialogs easier. The concrete class must: 1) Poll ``handle_queue`` periodically, using the timer services of the GUI's event loop. This function will call ``self.show_dialog`` when a request requires authorization. ``self.show_dialog`` will be given the arguments: - ``samp_name``: The name of the application making the request. - ``details``: A dictionary of details about the client making the request. - ``client``: A hostname, port pair containing the client address. - ``origin``: A string containing the origin of the request. 2) Call ``consent`` or ``reject`` based on the user's response to the dialog. """ def handle_queue(self): try: request = self.queue_request.get_nowait() except queue.Empty: # queue is set but empty pass except AttributeError: # queue has not been set yet pass else: if isinstance(request[0], str): # To support the old protocol version samp_name = request[0] else: samp_name = request[0]["samp.name"] self.show_dialog(samp_name, request[0], request[1], request[2]) def consent(self): self.queue_result.put(True) def reject(self): self.queue_result.put(False)
WebProfileDialog
python
Lightning-AI__lightning
tests/tests_pytorch/models/test_hparams.py
{ "start": 11474, "end": 17760 }
class ____(CustomBoringModel, metaclass=_MetaType): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.save_hyperparameters() if _OMEGACONF_AVAILABLE: class DictConfSubClassBoringModel(SubClassBoringModel): def __init__(self, *args, dict_conf=OmegaConf.create({"my_param": "something"}), **kwargs): super().__init__(*args, **kwargs) self.save_hyperparameters() else: class DictConfSubClassBoringModel: ... @pytest.mark.parametrize( "cls", [ CustomBoringModel, SubClassBoringModel, NonSavingSubClassBoringModel, SubSubClassBoringModel, AggSubClassBoringModel, UnconventionalArgsBoringModel, pytest.param(DictConfSubClassBoringModel, marks=RunIf(omegaconf=True)), BoringModelWithMixin, BoringModelWithMixinAndInit, MetaTypeBoringModel, ], ) def test_collect_init_arguments(tmp_path, cls: BoringModel): """Test that the model automatically saves the arguments passed into the constructor.""" extra_args = {} weights_only = True if cls is AggSubClassBoringModel: extra_args.update(my_loss=torch.nn.CosineEmbeddingLoss()) weights_only = False elif cls is DictConfSubClassBoringModel: extra_args.update(dict_conf=OmegaConf.create({"my_param": "anything"})) weights_only = False model = cls(**extra_args) assert model.hparams.batch_size == 64 model = cls(batch_size=179, **extra_args) assert model.hparams.batch_size == 179 if isinstance(model, (SubClassBoringModel, NonSavingSubClassBoringModel, MixinForBoringModel)): assert model.hparams.subclass_arg == 1200 if isinstance(model, AggSubClassBoringModel): assert isinstance(model.hparams.my_loss, torch.nn.CosineEmbeddingLoss) # verify that the checkpoint saved the correct values trainer = Trainer(default_root_dir=tmp_path, max_epochs=2, overfit_batches=0.5) trainer.fit(model) raw_checkpoint_path = _raw_checkpoint_path(trainer) raw_checkpoint = torch.load(raw_checkpoint_path, weights_only=weights_only) assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]["batch_size"] == 179 # verify that model loads correctly model = cls.load_from_checkpoint(raw_checkpoint_path, weights_only=weights_only) assert model.hparams.batch_size == 179 if isinstance(model, AggSubClassBoringModel): assert isinstance(model.hparams.my_loss, torch.nn.CosineEmbeddingLoss) if isinstance(model, DictConfSubClassBoringModel): assert isinstance(model.hparams.dict_conf, Container) assert model.hparams.dict_conf["my_param"] == "anything" # verify that we can overwrite whatever we want model = cls.load_from_checkpoint(raw_checkpoint_path, batch_size=99, weights_only=weights_only) assert model.hparams.batch_size == 99 def _raw_checkpoint_path(trainer) -> str: raw_checkpoint_paths = os.listdir(trainer.checkpoint_callback.dirpath) raw_checkpoint_paths = [x for x in raw_checkpoint_paths if ".ckpt" in x] assert raw_checkpoint_paths raw_checkpoint_path = raw_checkpoint_paths[0] return os.path.join(trainer.checkpoint_callback.dirpath, raw_checkpoint_path) def test_collect_init_arguments_in_other_methods(): class _ABCModelCreator: def init(self, model, **kwargs) -> LightningModule: self.model = model return self.model class ConcreteModelCreator(_ABCModelCreator): def init(self, model=None, **kwargs) -> LightningModule: return super().init(model=model or CustomBoringModel(**kwargs)) model_creator = ConcreteModelCreator() model = model_creator.init(batch_size=123) assert model.hparams.batch_size == 123 @pytest.mark.parametrize("base_class", [HyperparametersMixin, LightningModule, LightningDataModule]) def test_save_hyperparameters_under_composition(base_class): """Test that in a composition where the parent is not a Lightning-like module, the parent's arguments don't get collected.""" class ChildInComposition(base_class): def __init__(self, same_arg): super().__init__() self.save_hyperparameters() class NotPLSubclass: # intentionally not subclassing LightningModule/LightningDataModule def __init__(self, same_arg="parent_default", other_arg="other"): self.child = ChildInComposition(same_arg="cocofruit") parent = NotPLSubclass() assert parent.child.hparams == {"same_arg": "cocofruit"} @pytest.mark.parametrize("base_class", [HyperparametersMixin, LightningModule, LightningDataModule]) def test_save_hyperparameters_ignore(base_class): """Test if `save_hyperparameter` applies the ignore list correctly during initialization.""" class PLSubclass(base_class): def __init__(self, learning_rate=1e-3, optimizer="adam"): super().__init__() self.save_hyperparameters(ignore=["learning_rate"]) pl_instance = PLSubclass(learning_rate=0.01, optimizer="sgd") assert pl_instance.hparams == {"optimizer": "sgd"} @pytest.mark.parametrize("base_class", [HyperparametersMixin, LightningModule, LightningDataModule]) def test_save_hyperparameters_ignore_under_composition(base_class): """Test that in a composed system, hyperparameter saving skips ignored fields from nested modules.""" class ChildModule(base_class): def __init__(self, dropout, activation, init_method): super().__init__() self.save_hyperparameters(ignore=["dropout", "activation"]) class ParentModule(base_class): def __init__(self, batch_size, optimizer): super().__init__() self.child = ChildModule(dropout=0.1, activation="relu", init_method="xavier") class PipelineWrapper: # not a Lightning subclass on purpose def __init__(self, run_id="abc123", seed=42): self.parent_module = ParentModule(batch_size=64, optimizer="adam") pipeline = PipelineWrapper() assert pipeline.parent_module.child.hparams == {"init_method": "xavier", "batch_size": 64, "optimizer": "adam"}
MetaTypeBoringModel
python
PrefectHQ__prefect
src/integrations/prefect-azure/tests/conftest.py
{ "start": 485, "end": 753 }
class ____: def __init__(self, items): self.items = items async def __aiter__(self): for item in self.items: yield item mock_container = { "prefect.txt": b"prefect_works", "folder/prefect.txt": b"prefect_works", }
AsyncIter
python
airbytehq__airbyte
airbyte-integrations/bases/base-normalization/normalization/transform_catalog/stream_processor.py
{ "start": 2055, "end": 73366 }
class ____(object): """ Takes as input an Airbyte Stream as described in the (configured) Airbyte Catalog's Json Schema. Associated input raw data is expected to be stored in a staging area table. This processor generates SQL models to transform such a stream into a final table in the destination schema. This is done by generating a DBT pipeline of transformations (multiple SQL models queries) that may be materialized in the intermediate schema "raw_schema" (changing the dbt_project.yml settings). The final output data should be written in "schema". The pipeline includes transformations such as: - Parsing a JSON blob column and extracting each field property in its own SQL column - Casting each SQL column to the proper JSON data type - Generating an artificial (primary key) ID column based on the hashing of the row If any nested columns are discovered in the stream, a JSON blob SQL column is created in the top level parent stream and a new StreamProcessor instance will be spawned for each children substreams. These Sub-Stream Processors are then able to generate models to parse and extract recursively from its parent StreamProcessor model into separate SQL tables the content of that JSON blob SQL column. """ def __init__( self, stream_name: str, destination_type: DestinationType, raw_schema: str, default_schema: str, schema: str, source_sync_mode: SyncMode, destination_sync_mode: DestinationSyncMode, cursor_field: List[str], primary_key: List[List[str]], json_column_name: str, properties: Dict, tables_registry: TableNameRegistry, from_table: Union[str, dbt_macro.Macro], ): """ See StreamProcessor.create() """ self.stream_name: str = stream_name self.destination_type: DestinationType = destination_type self.raw_schema: str = raw_schema self.schema: str = schema self.source_sync_mode: SyncMode = source_sync_mode self.destination_sync_mode: DestinationSyncMode = destination_sync_mode self.cursor_field: List[str] = cursor_field self.primary_key: List[List[str]] = primary_key self.json_column_name: str = json_column_name self.properties: Dict = properties self.tables_registry: TableNameRegistry = tables_registry self.from_table: Union[str, dbt_macro.Macro] = from_table self.name_transformer: DestinationNameTransformer = DestinationNameTransformer(destination_type) self.json_path: List[str] = [stream_name] self.final_table_name: str = "" self.sql_outputs: Dict[str, str] = {} self.parent: Optional["StreamProcessor"] = None self.is_nested_array: bool = False self.default_schema: str = default_schema self.airbyte_ab_id = "_airbyte_ab_id" self.airbyte_emitted_at = "_airbyte_emitted_at" self.airbyte_normalized_at = "_airbyte_normalized_at" self.airbyte_unique_key = "_airbyte_unique_key" self.models_to_source: Dict[str, str] = {} @staticmethod def create_from_parent( parent, child_name: str, json_column_name: str, properties: Dict, is_nested_array: bool, from_table: str ) -> "StreamProcessor": """ @param parent is the Stream Processor that originally created this instance to handle a nested column from that parent table. @param json_column_name is the name of the column in the parent data table containing the json column to transform @param properties is the json schema description of this nested stream @param is_nested_array is a boolean flag specifying if the child is a nested array that needs to be extracted @param tables_registry is the global context recording all tables created so far @param from_table is the parent table to extract the nested stream from The child stream processor will create a separate table to contain the unnested data. """ if parent.destination_sync_mode.value == DestinationSyncMode.append_dedup.value: # nested streams can't be deduped like their parents (as they may not share the same cursor/primary keys) parent_sync_mode = DestinationSyncMode.append else: parent_sync_mode = parent.destination_sync_mode result = StreamProcessor.create( stream_name=child_name, destination_type=parent.destination_type, raw_schema=parent.raw_schema, default_schema=parent.default_schema, schema=parent.schema, source_sync_mode=parent.source_sync_mode, destination_sync_mode=parent_sync_mode, cursor_field=[], primary_key=[], json_column_name=json_column_name, properties=properties, tables_registry=parent.tables_registry, from_table=from_table, ) result.parent = parent result.is_nested_array = is_nested_array result.json_path = parent.json_path + [child_name] return result @staticmethod def create( stream_name: str, destination_type: DestinationType, raw_schema: str, default_schema: str, schema: str, source_sync_mode: SyncMode, destination_sync_mode: DestinationSyncMode, cursor_field: List[str], primary_key: List[List[str]], json_column_name: str, properties: Dict, tables_registry: TableNameRegistry, from_table: Union[str, dbt_macro.Macro], ) -> "StreamProcessor": """ @param stream_name of the stream being processed @param destination_type is the destination type of warehouse @param raw_schema is the name of the staging intermediate schema where to create internal tables/views @param schema is the name of the schema where to store the final tables where to store the transformed data @param source_sync_mode is describing how source are producing data @param destination_sync_mode is describing how destination should handle the new data batch @param cursor_field is the field to use to determine order of records @param primary_key is a list of fields to use as a (composite) primary key @param json_column_name is the name of the column in the raw data table containing the json column to transform @param properties is the json schema description of this stream @param tables_registry is the global context recording all tables created so far @param from_table is the table this stream is being extracted from originally """ return StreamProcessor( stream_name, destination_type, raw_schema, default_schema, schema, source_sync_mode, destination_sync_mode, cursor_field, primary_key, json_column_name, properties, tables_registry, from_table, ) def collect_table_names(self): column_names = self.extract_column_names() self.tables_registry.register_table(self.get_schema(True), self.get_schema(False), self.stream_name, self.json_path) for child in self.find_children_streams(self.from_table, column_names): child.collect_table_names() def get_stream_source(self): if not self.parent: return self.from_table.source_name + "." + self.from_table.table_name cur = self.parent while cur.parent: cur = cur.parent return cur.from_table.source_name + "." + cur.from_table.table_name def process(self) -> List["StreamProcessor"]: """ See description of StreamProcessor class. @return List of StreamProcessor to handle recursively nested columns from this stream """ # Check properties if not self.properties: print(f" Ignoring stream '{self.stream_name}' from {self.current_json_path()} because properties list is empty") return [] column_names = self.extract_column_names() column_count = len(column_names) if column_count == 0: print(f" Ignoring stream '{self.stream_name}' from {self.current_json_path()} because no columns were identified") return [] from_table = str(self.from_table) # Transformation Pipeline for this stream from_table = self.add_to_outputs( self.generate_json_parsing_model(from_table, column_names), self.get_model_materialization_mode(is_intermediate=True), is_intermediate=True, suffix="ab1", ) from_table = self.add_to_outputs( self.generate_column_typing_model(from_table, column_names), self.get_model_materialization_mode(is_intermediate=True, column_count=column_count), is_intermediate=True, suffix="ab2", ) if self.destination_sync_mode != DestinationSyncMode.append_dedup: from_table = self.add_to_outputs( self.generate_id_hashing_model(from_table, column_names), self.get_model_materialization_mode(is_intermediate=True, column_count=column_count), is_intermediate=True, suffix="ab3", ) from_table = self.add_to_outputs( self.generate_final_model(from_table, column_names), self.get_model_materialization_mode(is_intermediate=False, column_count=column_count), is_intermediate=False, ) else: if self.is_incremental_mode(self.destination_sync_mode): # Force different materialization here because incremental scd models rely on star* macros that requires it if self.destination_type.value == DestinationType.POSTGRES.value: # because of https://github.com/dbt-labs/docs.getdbt.com/issues/335, we avoid VIEW for postgres forced_materialization_type = TableMaterializationType.INCREMENTAL else: forced_materialization_type = TableMaterializationType.VIEW else: forced_materialization_type = TableMaterializationType.CTE from_table = self.add_to_outputs( self.generate_id_hashing_model(from_table, column_names), forced_materialization_type, is_intermediate=True, suffix="stg", ) from_table = self.add_to_outputs( self.generate_scd_type_2_model(from_table, column_names), self.get_model_materialization_mode(is_intermediate=False, column_count=column_count), is_intermediate=False, suffix="scd", subdir="scd", unique_key=self.name_transformer.normalize_column_name(f"{self.airbyte_unique_key}_scd"), partition_by=PartitionScheme.ACTIVE_ROW, ) where_clause = f"\nand {self.name_transformer.normalize_column_name('_airbyte_active_row')} = 1" # from_table should not use the de-duplicated final table or tables downstream (nested streams) will miss non active rows self.add_to_outputs( self.generate_final_model(from_table, column_names, unique_key=self.get_unique_key()) + where_clause, self.get_model_materialization_mode(is_intermediate=False, column_count=column_count), is_intermediate=False, unique_key=self.get_unique_key(), partition_by=PartitionScheme.UNIQUE_KEY, ) return self.find_children_streams(from_table, column_names) def extract_column_names(self) -> Dict[str, Tuple[str, str]]: """ Generate a mapping of JSON properties to normalized SQL Column names, handling collisions and avoid duplicate names The mapped value to a field property is a tuple where: - the first value is the normalized "raw" column name - the second value is the normalized quoted column name to be used in jinja context """ fields = [] for field in self.properties.keys(): if not is_airbyte_column(field): fields.append(field) result = {} field_names = set() for field in fields: field_name = self.name_transformer.normalize_column_name(field, in_jinja=False) field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name) jinja_name = self.name_transformer.normalize_column_name(field, in_jinja=True) if field_name_lookup in field_names: # TODO handle column name duplicates or collisions deterministically in this stream for i in range(1, 1000): field_name = self.name_transformer.normalize_column_name(f"{field}_{i}", in_jinja=False) field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name) jinja_name = self.name_transformer.normalize_column_name(f"{field}_{i}", in_jinja=True) if field_name_lookup not in field_names: break field_names.add(field_name_lookup) result[field] = (field_name, jinja_name) return result def find_children_streams(self, from_table: str, column_names: Dict[str, Tuple[str, str]]) -> List["StreamProcessor"]: """ For each complex type properties, generate a new child StreamProcessor that produce separate child pipelines. The current stream/table is used as the parent from which to extract data from. """ properties = self.properties children: List[StreamProcessor] = [] for field in properties.keys(): children_properties = None is_nested_array = False json_column_name = "" if is_airbyte_column(field): pass elif is_combining_node(properties[field]): # TODO: merge properties of all combinations pass elif "type" not in properties[field] or is_object(properties[field]["type"]): # properties without 'type' field are treated like properties with 'type' = 'object' children_properties = find_properties_object([], field, properties[field]) is_nested_array = False json_column_name = column_names[field][1] elif is_array(properties[field]["type"]) and "items" in properties[field]: quoted_field = column_names[field][1] children_properties = find_properties_object([], field, properties[field]["items"]) is_nested_array = True json_column_name = f"unnested_column_value({quoted_field})" if children_properties: for child_key in children_properties: stream_processor = StreamProcessor.create_from_parent( parent=self, child_name=field, json_column_name=json_column_name, properties=children_properties[child_key], is_nested_array=is_nested_array, from_table=from_table, ) children.append(stream_processor) return children def generate_json_parsing_model(self, from_table: str, column_names: Dict[str, Tuple[str, str]]) -> Any: if self.destination_type == DestinationType.ORACLE: table_alias = "" else: table_alias = "as table_alias" template = Template( """ -- SQL model to parse JSON blob stored in a single column and extract into separated field columns as described by the JSON Schema -- depends_on: {{ from_table }} {{ unnesting_before_query }} select {%- if parent_hash_id %} {{ parent_hash_id }}, {%- endif %} {%- for field in fields %} {{ field }}, {%- endfor %} {{ col_ab_id }}, {{ col_emitted_at }}, {{ '{{ current_timestamp() }}' }} as {{ col_normalized_at }} from {{ from_table }} {{ table_alias }} {{ sql_table_comment }} {{ unnesting_from }} where 1 = 1 {{ unnesting_where }} """ ) sql = template.render( col_ab_id=self.get_ab_id(), col_emitted_at=self.get_emitted_at(), col_normalized_at=self.get_normalized_at(), table_alias=table_alias, unnesting_before_query=self.unnesting_before_query(from_table), parent_hash_id=self.parent_hash_id(), fields=self.extract_json_columns(column_names), from_table=jinja_call(from_table), unnesting_from=self.unnesting_from(), unnesting_where=self.unnesting_where(), sql_table_comment=self.sql_table_comment(), ) return sql def get_ab_id(self, in_jinja: bool = False): # this is also tied to dbt-project-template/macros/should_full_refresh.sql # as it is needed by the macro should_full_refresh return self.name_transformer.normalize_column_name(self.airbyte_ab_id, in_jinja, False) def get_emitted_at(self, in_jinja: bool = False): return self.name_transformer.normalize_column_name(self.airbyte_emitted_at, in_jinja, False) def get_normalized_at(self, in_jinja: bool = False): return self.name_transformer.normalize_column_name(self.airbyte_normalized_at, in_jinja, False) def get_unique_key(self, in_jinja: bool = False): return self.name_transformer.normalize_column_name(self.airbyte_unique_key, in_jinja, False) def extract_json_columns(self, column_names: Dict[str, Tuple[str, str]]) -> List[str]: return [ self.extract_json_column(field, self.json_column_name, self.properties[field], column_names[field][0], "table_alias") for field in column_names ] @staticmethod def extract_json_column(property_name: str, json_column_name: str, definition: Dict, column_name: str, table_alias: str) -> str: json_path = [property_name] # In some cases, some destination aren't able to parse the JSON blob using the original property name # we make their life easier by using a pre-populated and sanitized column name instead... normalized_json_path = [transform_json_naming(property_name)] table_alias = f"{table_alias}" if "unnested_column_value" in json_column_name: table_alias = "" json_extract = jinja_call(f"json_extract('{table_alias}', {json_column_name}, {json_path})") if "type" in definition: if is_array(definition["type"]): json_extract = jinja_call(f"json_extract_array({json_column_name}, {json_path}, {normalized_json_path})") if is_simple_property(definition.get("items", {"type": "object"})): json_extract = jinja_call(f"json_extract_string_array({json_column_name}, {json_path}, {normalized_json_path})") elif is_object(definition["type"]): json_extract = jinja_call(f"json_extract('{table_alias}', {json_column_name}, {json_path}, {normalized_json_path})") elif is_simple_property(definition): json_extract = jinja_call(f"json_extract_scalar({json_column_name}, {json_path}, {normalized_json_path})") return f"{json_extract} as {column_name}" def generate_column_typing_model(self, from_table: str, column_names: Dict[str, Tuple[str, str]]) -> Any: template = Template( """ -- SQL model to cast each column to its adequate SQL type converted from the JSON schema type -- depends_on: {{ from_table }} select {%- if parent_hash_id %} {{ parent_hash_id }}, {%- endif %} {%- for field in fields %} {{ field }}, {%- endfor %} {{ col_ab_id }}, {{ col_emitted_at }}, {{ '{{ current_timestamp() }}' }} as {{ col_normalized_at }} from {{ from_table }} {{ sql_table_comment }} where 1 = 1 """ ) sql = template.render( col_ab_id=self.get_ab_id(), col_emitted_at=self.get_emitted_at(), col_normalized_at=self.get_normalized_at(), parent_hash_id=self.parent_hash_id(), fields=self.cast_property_types(column_names), from_table=jinja_call(from_table), sql_table_comment=self.sql_table_comment(), ) return sql def cast_property_types(self, column_names: Dict[str, Tuple[str, str]]) -> List[str]: return [self.cast_property_type(field, column_names[field][0], column_names[field][1]) for field in column_names] def cast_property_type(self, property_name: str, column_name: str, jinja_column: str) -> Any: # noqa: C901 definition = self.properties[property_name] if "type" not in definition: print(f"WARN: Unknown type for column {property_name} at {self.current_json_path()}") return column_name elif is_array(definition["type"]): return column_name elif is_object(definition["type"]): sql_type = jinja_call("type_json()") # Treat simple types from narrower to wider scope type: boolean < integer < number < string elif is_boolean(definition["type"], definition): cast_operation = jinja_call(f"cast_to_boolean({jinja_column})") return f"{cast_operation} as {column_name}" elif is_big_integer(definition): sql_type = jinja_call("type_very_large_integer()") elif is_long(definition["type"], definition): sql_type = jinja_call("dbt_utils.type_bigint()") elif is_number(definition["type"]): sql_type = jinja_call("dbt_utils.type_float()") elif is_datetime(definition): if self.destination_type == DestinationType.SNOWFLAKE: # snowflake uses case when statement to parse timestamp field # in this case [cast] operator is not needed as data already converted to timestamp type if is_datetime_without_timezone(definition): return self.generate_snowflake_timestamp_statement(column_name) return self.generate_snowflake_timestamp_tz_statement(column_name) if self.destination_type == DestinationType.MYSQL and is_datetime_without_timezone(definition): # MySQL does not support [cast] and [nullif] functions together return self.generate_mysql_datetime_format_statement(column_name) replace_operation = jinja_call(f"empty_string_to_null({jinja_column})") if self.destination_type.value == DestinationType.MSSQL.value: # in case of datetime, we don't need to use [cast] function, use try_parse instead. if is_datetime_with_timezone(definition): sql_type = jinja_call("type_timestamp_with_timezone()") else: sql_type = jinja_call("type_timestamp_without_timezone()") return f"try_parse({replace_operation} as {sql_type}) as {column_name}" if self.destination_type == DestinationType.CLICKHOUSE: return f"parseDateTime64BestEffortOrNull(trim(BOTH '\"' from {replace_operation})) as {column_name}" # in all other cases if is_datetime_without_timezone(definition): sql_type = jinja_call("type_timestamp_without_timezone()") else: sql_type = jinja_call("type_timestamp_with_timezone()") return f"cast({replace_operation} as {sql_type}) as {column_name}" elif is_date(definition): if ( self.destination_type.value == DestinationType.MYSQL.value or self.destination_type.value == DestinationType.TIDB.value or self.destination_type.value == DestinationType.DUCKDB.value ): # MySQL does not support [cast] and [nullif] functions together return self.generate_mysql_date_format_statement(column_name) replace_operation = jinja_call(f"empty_string_to_null({jinja_column})") if self.destination_type.value == DestinationType.MSSQL.value: # in case of date, we don't need to use [cast] function, use try_parse instead. sql_type = jinja_call("type_date()") return f"try_parse({replace_operation} as {sql_type}) as {column_name}" if self.destination_type == DestinationType.CLICKHOUSE: return f"toDate(parseDateTimeBestEffortOrNull(trim(BOTH '\"' from {replace_operation}))) as {column_name}" # in all other cases sql_type = jinja_call("type_date()") return f"cast({replace_operation} as {sql_type}) as {column_name}" elif is_time(definition): if is_time_with_timezone(definition): sql_type = jinja_call("type_time_with_timezone()") else: sql_type = jinja_call("type_time_without_timezone()") if self.destination_type == DestinationType.CLICKHOUSE: trimmed_column_name = f"trim(BOTH '\"' from {column_name})" sql_type = f"'{sql_type}'" return f"nullif(accurateCastOrNull({trimmed_column_name}, {sql_type}), 'null') as {column_name}" if ( self.destination_type == DestinationType.MYSQL or self.destination_type == DestinationType.TIDB or self.destination_type == DestinationType.DUCKDB ): return f'nullif(cast({column_name} as {sql_type}), "") as {column_name}' replace_operation = jinja_call(f"empty_string_to_null({jinja_column})") return f"cast({replace_operation} as {sql_type}) as {column_name}" elif is_string(definition["type"]): sql_type = jinja_call("dbt_utils.type_string()") if self.destination_type == DestinationType.CLICKHOUSE: trimmed_column_name = f"trim(BOTH '\"' from {column_name})" sql_type = f"'{sql_type}'" return f"nullif(accurateCastOrNull({trimmed_column_name}, {sql_type}), 'null') as {column_name}" elif self.destination_type == DestinationType.MYSQL: # Cast to `text` datatype. See https://github.com/airbytehq/airbyte/issues/7994 sql_type = f"{sql_type}(1024)" else: print(f"WARN: Unknown type {definition['type']} for column {property_name} at {self.current_json_path()}") return column_name if self.destination_type == DestinationType.CLICKHOUSE: return f"accurateCastOrNull({column_name}, '{sql_type}') as {column_name}" else: return f"cast({column_name} as {sql_type}) as {column_name}" @staticmethod def generate_mysql_date_format_statement(column_name: str) -> Any: template = Template( """ case when {{column_name}} = '' then NULL else cast({{column_name}} as date) end as {{column_name}} """ ) return template.render(column_name=column_name) @staticmethod def generate_mysql_datetime_format_statement(column_name: str) -> Any: regexp = r"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.*" template = Template( """ case when {{column_name}} regexp '{{regexp}}' THEN STR_TO_DATE(SUBSTR({{column_name}}, 1, 19), '%Y-%m-%dT%H:%i:%S') else cast(if({{column_name}} = '', NULL, {{column_name}}) as datetime) end as {{column_name}} """ ) return template.render(column_name=column_name, regexp=regexp) @staticmethod def generate_snowflake_timestamp_tz_statement(column_name: str) -> Any: """ Generates snowflake DB specific timestamp case when statement """ formats = [ {"regex": r"\\d{4}-\\d{2}-\\d{2}T(\\d{2}:){2}\\d{2}(\\+|-)\\d{4}", "format": "YYYY-MM-DDTHH24:MI:SSTZHTZM"}, {"regex": r"\\d{4}-\\d{2}-\\d{2}T(\\d{2}:){2}\\d{2}(\\+|-)\\d{2}", "format": "YYYY-MM-DDTHH24:MI:SSTZH"}, { "regex": r"\\d{4}-\\d{2}-\\d{2}T(\\d{2}:){2}\\d{2}\\.\\d{1,7}(\\+|-)\\d{4}", "format": "YYYY-MM-DDTHH24:MI:SS.FFTZHTZM", }, {"regex": r"\\d{4}-\\d{2}-\\d{2}T(\\d{2}:){2}\\d{2}\\.\\d{1,7}(\\+|-)\\d{2}", "format": "YYYY-MM-DDTHH24:MI:SS.FFTZH"}, ] template = Template( """ case {% for format_item in formats %} when {{column_name}} regexp '{{format_item['regex']}}' then to_timestamp_tz({{column_name}}, '{{format_item['format']}}') {% endfor %} when {{column_name}} = '' then NULL else to_timestamp_tz({{column_name}}) end as {{column_name}} """ ) return template.render(formats=formats, column_name=column_name) @staticmethod def generate_snowflake_timestamp_statement(column_name: str) -> Any: """ Generates snowflake DB specific timestamp case when statement """ formats = [ {"regex": r"\\d{4}-\\d{2}-\\d{2}T(\\d{2}:){2}\\d{2}", "format": "YYYY-MM-DDTHH24:MI:SS"}, {"regex": r"\\d{4}-\\d{2}-\\d{2}T(\\d{2}:){2}\\d{2}\\.\\d{1,7}", "format": "YYYY-MM-DDTHH24:MI:SS.FF"}, ] template = Template( """ case {% for format_item in formats %} when {{column_name}} regexp '{{format_item['regex']}}' then to_timestamp({{column_name}}, '{{format_item['format']}}') {% endfor %} when {{column_name}} = '' then NULL else to_timestamp({{column_name}}) end as {{column_name}} """ ) return template.render(formats=formats, column_name=column_name) def generate_id_hashing_model(self, from_table: str, column_names: Dict[str, Tuple[str, str]]) -> Any: template = Template( """ -- SQL model to build a hash column based on the values of this record -- depends_on: {{ from_table }} select {{ '{{' }} dbt_utils.surrogate_key([ {%- if parent_hash_id %} {{ parent_hash_id }}, {%- endif %} {%- for field in fields %} {{ field }}, {%- endfor %} ]) {{ '}}' }} as {{ hash_id }}, tmp.* from {{ from_table }} tmp {{ sql_table_comment }} where 1 = 1 """ ) sql = template.render( parent_hash_id=self.parent_hash_id(in_jinja=True), fields=self.safe_cast_to_strings(column_names), hash_id=self.hash_id(), from_table=jinja_call(from_table), sql_table_comment=self.sql_table_comment(), ) return sql def safe_cast_to_strings(self, column_names: Dict[str, Tuple[str, str]]) -> List[str]: return [ StreamProcessor.safe_cast_to_string(self.properties[field], column_names[field][1], self.destination_type) for field in column_names ] @staticmethod def safe_cast_to_string(definition: Dict, column_name: str, destination_type: DestinationType) -> str: """ Note that the result from this static method should always be used within a jinja context (for example, from jinja macro surrogate_key call) The jinja_remove function is necessary because of Oracle database, some columns are created with {{ quote('column_name') }} and reused the same fields for this operation. Because the quote is injected inside a jinja macro we need to remove the curly brackets. """ if "type" not in definition: col = column_name elif is_boolean(definition["type"], definition): col = f"boolean_to_string({column_name})" elif is_array(definition["type"]): col = f"array_to_string({column_name})" elif is_object(definition["type"]): col = f"object_to_string({column_name})" else: col = column_name if destination_type == DestinationType.ORACLE: quote_in_parenthesis = re.compile(r"quote\((.*)\)") return remove_jinja(col) if quote_in_parenthesis.findall(col) else col return col def generate_scd_type_2_model(self, from_table: str, column_names: Dict[str, Tuple[str, str]]) -> Any: """ This model pulls data from the ID-hashing model and appends it to a log of record updates. When inserting an update to a record, it also checks whether that record had a previously-existing row in the SCD model; if it does, then that previous row's end_at column is set to the new update's start_at. See the docs for more details: https://docs.airbyte.com/understanding-airbyte/basic-normalization#normalization-metadata-columns """ cursor_field = self.get_cursor_field(column_names) order_null = f"is null asc,\n {cursor_field} desc" if self.destination_type.value == DestinationType.ORACLE.value: order_null = "desc nulls last" if self.destination_type.value == DestinationType.MSSQL.value: # SQL Server treats NULL values as the lowest values, thus NULLs come last when desc. order_null = "desc" lag_begin = "lag" lag_end = "" input_data_table = "input_data" if self.destination_type == DestinationType.CLICKHOUSE: # ClickHouse doesn't support lag() yet, this is a workaround solution # Ref: https://clickhouse.com/docs/en/sql-reference/window-functions/ lag_begin = "anyOrNull" lag_end = " ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING" input_data_table = "input_data_with_active_row_num" enable_left_join_null = "" cast_begin = "cast(" cast_as = " as " cast_end = ")" if self.destination_type == DestinationType.CLICKHOUSE: enable_left_join_null = "--" cast_begin = "accurateCastOrNull(" cast_as = ", '" cast_end = "')" # TODO move all cdc columns out of scd models cdc_active_row_pattern = "" cdc_updated_order_pattern = "" cdc_cols = "" quoted_cdc_cols = "" if "_ab_cdc_deleted_at" in column_names.keys(): col_cdc_deleted_at = self.name_transformer.normalize_column_name("_ab_cdc_deleted_at") col_cdc_updated_at = self.name_transformer.normalize_column_name("_ab_cdc_updated_at") quoted_col_cdc_deleted_at = self.name_transformer.normalize_column_name("_ab_cdc_deleted_at", in_jinja=True) quoted_col_cdc_updated_at = self.name_transformer.normalize_column_name("_ab_cdc_updated_at", in_jinja=True) cdc_active_row_pattern = f" and {col_cdc_deleted_at} is null" cdc_updated_order_pattern = f"\n {col_cdc_updated_at} desc," cdc_cols = ( f", {cast_begin}{col_cdc_deleted_at}{cast_as}" + "{{ dbt_utils.type_string() }}" + f"{cast_end}" + f", {cast_begin}{col_cdc_updated_at}{cast_as}" + "{{ dbt_utils.type_string() }}" + f"{cast_end}" ) quoted_cdc_cols = f", {quoted_col_cdc_deleted_at}, {quoted_col_cdc_updated_at}" if "_ab_cdc_log_pos" in column_names.keys(): col_cdc_log_pos = self.name_transformer.normalize_column_name("_ab_cdc_log_pos") quoted_col_cdc_log_pos = self.name_transformer.normalize_column_name("_ab_cdc_log_pos", in_jinja=True) cdc_updated_order_pattern += f"\n {col_cdc_log_pos} desc," cdc_cols += "".join([", ", cast_begin, col_cdc_log_pos, cast_as, "{{ dbt_utils.type_string() }}", cast_end]) quoted_cdc_cols += f", {quoted_col_cdc_log_pos}" if "_ab_cdc_lsn" in column_names.keys(): col_cdc_lsn = self.name_transformer.normalize_column_name("_ab_cdc_lsn") quoted_col_cdc_lsn = self.name_transformer.normalize_column_name("_ab_cdc_lsn", in_jinja=True) cdc_updated_order_pattern += f"\n {col_cdc_lsn} desc," cdc_cols += "".join([", ", cast_begin, col_cdc_lsn, cast_as, "{{ dbt_utils.type_string() }}", cast_end]) quoted_cdc_cols += f", {quoted_col_cdc_lsn}" if ( self.destination_type == DestinationType.BIGQUERY and self.get_cursor_field_property_name(column_names) != self.airbyte_emitted_at and is_number(self.properties[self.get_cursor_field_property_name(column_names)]["type"]) ): # partition by float columns is not allowed in BigQuery, cast it to string airbyte_start_at_string = ( cast_begin + self.name_transformer.normalize_column_name("_airbyte_start_at") + cast_as + "{{ dbt_utils.type_string() }}" + cast_end ) else: airbyte_start_at_string = self.name_transformer.normalize_column_name("_airbyte_start_at") jinja_variables = { "active_row": self.name_transformer.normalize_column_name("_airbyte_active_row"), "airbyte_end_at": self.name_transformer.normalize_column_name("_airbyte_end_at"), "airbyte_row_num": self.name_transformer.normalize_column_name("_airbyte_row_num"), "airbyte_start_at": self.name_transformer.normalize_column_name("_airbyte_start_at"), "airbyte_start_at_string": airbyte_start_at_string, "airbyte_unique_key_scd": self.name_transformer.normalize_column_name(f"{self.airbyte_unique_key}_scd"), "cdc_active_row": cdc_active_row_pattern, "cdc_cols": cdc_cols, "cdc_updated_at_order": cdc_updated_order_pattern, "col_ab_id": self.get_ab_id(), "col_emitted_at": self.get_emitted_at(), "col_normalized_at": self.get_normalized_at(), "cursor_field": cursor_field, "enable_left_join_null": enable_left_join_null, "fields": self.list_fields(column_names), "from_table": from_table, "hash_id": self.hash_id(), "incremental_clause": self.get_incremental_clause("this"), "input_data_table": input_data_table, "lag_begin": lag_begin, "lag_end": lag_end, "order_null": order_null, "parent_hash_id": self.parent_hash_id(), "primary_key_partition": self.get_primary_key_partition(column_names), "primary_keys": self.list_primary_keys(column_names), "quoted_airbyte_row_num": self.name_transformer.normalize_column_name("_airbyte_row_num", in_jinja=True), "quoted_airbyte_start_at": self.name_transformer.normalize_column_name("_airbyte_start_at", in_jinja=True), "quoted_cdc_cols": quoted_cdc_cols, "quoted_col_emitted_at": self.get_emitted_at(in_jinja=True), "quoted_unique_key": self.get_unique_key(in_jinja=True), "sql_table_comment": self.sql_table_comment(include_from_table=True), "unique_key": self.get_unique_key(), } if self.destination_type == DestinationType.CLICKHOUSE: clickhouse_active_row_sql = Template( """ input_data_with_active_row_num as ( select *, row_number() over ( partition by {{ primary_key_partition | join(", ") }} order by {{ cursor_field }} {{ order_null }},{{ cdc_updated_at_order }} {{ col_emitted_at }} desc ) as _airbyte_active_row_num from input_data ),""" ).render(jinja_variables) jinja_variables["clickhouse_active_row_sql"] = clickhouse_active_row_sql scd_columns_sql = Template( """ case when _airbyte_active_row_num = 1{{ cdc_active_row }} then 1 else 0 end as {{ active_row }}, {{ lag_begin }}({{ cursor_field }}) over ( partition by {{ primary_key_partition | join(", ") }} order by {{ cursor_field }} {{ order_null }},{{ cdc_updated_at_order }} {{ col_emitted_at }} desc {{ lag_end }}) as {{ airbyte_end_at }}""" ).render(jinja_variables) jinja_variables["scd_columns_sql"] = scd_columns_sql else: scd_columns_sql = Template( """ lag({{ cursor_field }}) over ( partition by {{ primary_key_partition | join(", ") }} order by {{ cursor_field }} {{ order_null }},{{ cdc_updated_at_order }} {{ col_emitted_at }} desc ) as {{ airbyte_end_at }}, case when row_number() over ( partition by {{ primary_key_partition | join(", ") }} order by {{ cursor_field }} {{ order_null }},{{ cdc_updated_at_order }} {{ col_emitted_at }} desc ) = 1{{ cdc_active_row }} then 1 else 0 end as {{ active_row }}""" ).render(jinja_variables) jinja_variables["scd_columns_sql"] = scd_columns_sql sql = Template( """ -- depends_on: {{ from_table }} with {{ '{% if is_incremental() %}' }} new_data as ( -- retrieve incremental "new" data select * from {{'{{'}} {{ from_table }} {{'}}'}} {{ sql_table_comment }} where 1 = 1 {{ incremental_clause }} ), new_data_ids as ( -- build a subset of {{ unique_key }} from rows that are new select distinct {{ '{{' }} dbt_utils.surrogate_key([ {%- for primary_key in primary_keys %} {{ primary_key }}, {%- endfor %} ]) {{ '}}' }} as {{ unique_key }} from new_data ), empty_new_data as ( -- build an empty table to only keep the table's column types select * from new_data where 1 = 0 ), previous_active_scd_data as ( -- retrieve "incomplete old" data that needs to be updated with an end date because of new changes select {{ '{{' }} star_intersect({{ from_table }}, this, from_alias='inc_data', intersect_alias='this_data') {{ '}}' }} from {{ '{{ this }}' }} as this_data -- make a join with new_data using primary key to filter active data that need to be updated only join new_data_ids on this_data.{{ unique_key }} = new_data_ids.{{ unique_key }} -- force left join to NULL values (we just need to transfer column types only for the star_intersect macro on schema changes) {{ enable_left_join_null }}left join empty_new_data as inc_data on this_data.{{ col_ab_id }} = inc_data.{{ col_ab_id }} where {{ active_row }} = 1 ), input_data as ( select {{ '{{' }} dbt_utils.star({{ from_table }}) {{ '}}' }} from new_data union all select {{ '{{' }} dbt_utils.star({{ from_table }}) {{ '}}' }} from previous_active_scd_data ), {{ '{% else %}' }} input_data as ( select * from {{'{{'}} {{ from_table }} {{'}}'}} {{ sql_table_comment }} ), {{ '{% endif %}' }} {{ clickhouse_active_row_sql }} scd_data as ( -- SQL model to build a Type 2 Slowly Changing Dimension (SCD) table for each record identified by their primary key select {%- if parent_hash_id %} {{ parent_hash_id }}, {%- endif %} {{ '{{' }} dbt_utils.surrogate_key([ {%- for primary_key in primary_keys %} {{ primary_key }}, {%- endfor %} ]) {{ '}}' }} as {{ unique_key }}, {%- for field in fields %} {{ field }}, {%- endfor %} {{ cursor_field }} as {{ airbyte_start_at }}, {{ scd_columns_sql }}, {{ col_ab_id }}, {{ col_emitted_at }}, {{ hash_id }} from {{ input_data_table }} ), dedup_data as ( select -- we need to ensure de-duplicated rows for merge/update queries -- additionally, we generate a unique key for the scd table row_number() over ( partition by {{ unique_key }}, {{ airbyte_start_at_string }}, {{ col_emitted_at }}{{ cdc_cols }} order by {{ active_row }} desc, {{ col_ab_id }} ) as {{ airbyte_row_num }}, {{ '{{' }} dbt_utils.surrogate_key([ {{ quoted_unique_key }}, {{ quoted_airbyte_start_at }}, {{ quoted_col_emitted_at }}{{ quoted_cdc_cols }} ]) {{ '}}' }} as {{ airbyte_unique_key_scd }}, scd_data.* from scd_data ) select {%- if parent_hash_id %} {{ parent_hash_id }}, {%- endif %} {{ unique_key }}, {{ airbyte_unique_key_scd }}, {%- for field in fields %} {{ field }}, {%- endfor %} {{ airbyte_start_at }}, {{ airbyte_end_at }}, {{ active_row }}, {{ col_ab_id }}, {{ col_emitted_at }}, {{ '{{ current_timestamp() }}' }} as {{ col_normalized_at }}, {{ hash_id }} from dedup_data where {{ airbyte_row_num }} = 1 """ ).render(jinja_variables) return sql def get_cursor_field_property_name(self, column_names: Dict[str, Tuple[str, str]]) -> str: if not self.cursor_field: if "_ab_cdc_updated_at" in column_names.keys(): return "_ab_cdc_updated_at" elif "_ab_cdc_log_pos" in column_names.keys(): return "_ab_cdc_log_pos" elif "_ab_cdc_lsn" in column_names.keys(): return "_ab_cdc_lsn" else: return self.airbyte_emitted_at elif len(self.cursor_field) == 1: return self.cursor_field[0] else: raise ValueError(f"Unsupported nested cursor field {'.'.join(self.cursor_field)} for stream {self.stream_name}") def get_cursor_field(self, column_names: Dict[str, Tuple[str, str]], in_jinja: bool = False) -> str: if not self.cursor_field: cursor = self.name_transformer.normalize_column_name(self.get_cursor_field_property_name(column_names), in_jinja) elif len(self.cursor_field) == 1: if not is_airbyte_column(self.cursor_field[0]): cursor = column_names[self.cursor_field[0]][0] else: # using an airbyte generated column cursor = self.cursor_field[0] else: raise ValueError(f"Unsupported nested cursor field {'.'.join(self.cursor_field)} for stream {self.stream_name}") return cursor def list_primary_keys(self, column_names: Dict[str, Tuple[str, str]]) -> List[str]: primary_keys = [] for key_path in self.primary_key: if len(key_path) == 1: primary_keys.append(column_names[key_path[0]][1]) else: raise ValueError(f"Unsupported nested path {'.'.join(key_path)} for stream {self.stream_name}") return primary_keys def get_primary_key_partition(self, column_names: Dict[str, Tuple[str, str]]) -> List[str]: if self.primary_key and len(self.primary_key) > 0: return [self.get_primary_key_from_path(column_names, path) for path in self.primary_key] else: raise ValueError(f"No primary key specified for stream {self.stream_name}") def get_primary_key_from_path(self, column_names: Dict[str, Tuple[str, str]], path: List[str]) -> str: if path and len(path) == 1: field = path[0] if not is_airbyte_column(field): if "type" in self.properties[field]: property_type = self.properties[field]["type"] else: property_type = "object" if is_number(property_type) or is_object(property_type): # some destinations don't handle float columns (or complex types) as primary keys, turn them to string return f"cast({column_names[field][0]} as {jinja_call('dbt_utils.type_string()')})" else: return column_names[field][0] else: # using an airbyte generated column return f"cast({field} as {jinja_call('dbt_utils.type_string()')})" else: if path: raise ValueError(f"Unsupported nested path {'.'.join(path)} for stream {self.stream_name}") else: raise ValueError(f"No path specified for stream {self.stream_name}") def generate_final_model(self, from_table: str, column_names: Dict[str, Tuple[str, str]], unique_key: str = "") -> Any: """ This is the table that the user actually wants. In addition to the columns that the source outputs, it has some additional metadata columns; see the basic normalization docs for an explanation: https://docs.airbyte.com/understanding-airbyte/basic-normalization#normalization-metadata-columns """ template = Template( """ -- Final base SQL model -- depends_on: {{ from_table }} select {%- if parent_hash_id %} {{ parent_hash_id }}, {%- endif %} {%- if unique_key %} {{ unique_key }}, {%- endif %} {%- for field in fields %} {{ field }}, {%- endfor %} {{ col_ab_id }}, {{ col_emitted_at }}, {{ '{{ current_timestamp() }}' }} as {{ col_normalized_at }}, {{ hash_id }} from {{ from_table }} {{ sql_table_comment }} where 1 = 1 """ ) sql = template.render( col_ab_id=self.get_ab_id(), col_emitted_at=self.get_emitted_at(), col_normalized_at=self.get_normalized_at(), parent_hash_id=self.parent_hash_id(), fields=self.list_fields(column_names), hash_id=self.hash_id(), from_table=jinja_call(from_table), sql_table_comment=self.sql_table_comment(include_from_table=True), unique_key=unique_key, ) return sql @staticmethod def is_incremental_mode(destination_sync_mode: DestinationSyncMode) -> bool: return destination_sync_mode.value in [DestinationSyncMode.append.value, DestinationSyncMode.append_dedup.value] def add_incremental_clause(self, sql_query: str) -> Any: template = Template( """ {{ sql_query }} {{ incremental_clause }} """ ) sql = template.render(sql_query=sql_query, incremental_clause=self.get_incremental_clause("this")) return sql def get_incremental_clause(self, tablename: str) -> Any: return self.get_incremental_clause_for_column(tablename, self.get_emitted_at(in_jinja=True)) def get_incremental_clause_for_column(self, tablename: str, column: str) -> Any: return "{{ incremental_clause(" + column + ", " + tablename + ") }}" @staticmethod def list_fields(column_names: Dict[str, Tuple[str, str]]) -> List[str]: return [column_names[field][0] for field in column_names] def add_to_outputs( self, sql: str, materialization_mode: TableMaterializationType, is_intermediate: bool = True, suffix: str = "", unique_key: str = "", subdir: str = "", partition_by: PartitionScheme = PartitionScheme.DEFAULT, ) -> str: # Explicit function so that we can have type hints to satisfy the linter def wrap_in_quotes(s: str) -> str: return '"' + s + '"' schema = self.get_schema(is_intermediate) # MySQL table names need to be manually truncated, because it does not do it automatically truncate_name = ( self.destination_type == DestinationType.MYSQL or self.destination_type == DestinationType.TIDB or self.destination_type == DestinationType.DUCKDB ) table_name = self.tables_registry.get_table_name(schema, self.json_path, self.stream_name, suffix, truncate_name) file_name = self.tables_registry.get_file_name(schema, self.json_path, self.stream_name, suffix, truncate_name) file = f"{file_name}.sql" output = os.path.join(materialization_mode.value, subdir, self.schema, file) config = self.get_model_partition_config(partition_by, unique_key) if file_name != table_name: # The alias() macro configs a model's final table name. config["alias"] = f'"{table_name}"' if self.destination_type == DestinationType.ORACLE: # oracle does not allow changing schemas config["schema"] = f'"{self.default_schema}"' else: config["schema"] = f'"{schema}"' if self.is_incremental_mode(self.destination_sync_mode): stg_schema = self.get_schema(True) stg_table = self.tables_registry.get_file_name(schema, self.json_path, self.stream_name, "stg", truncate_name) if self.name_transformer.needs_quotes(stg_table): stg_table = jinja_call(self.name_transformer.apply_quote(stg_table)) if suffix == "scd": hooks = [] final_table_name = self.tables_registry.get_file_name(schema, self.json_path, self.stream_name, "", truncate_name) active_row_column_name = self.name_transformer.normalize_column_name("_airbyte_active_row") clickhouse_nullable_join_setting = "" if self.destination_type == DestinationType.CLICKHOUSE: # Clickhouse has special delete syntax delete_statement = "alter table {{ final_table_relation }} delete" unique_key_reference = self.get_unique_key(in_jinja=False) noop_delete_statement = "alter table {{ this }} delete where 1=0" # Without this, our LEFT JOIN would return empty string for non-matching rows, so our COUNT would include those rows. # We want to exclude them (this is the default behavior in other DBs) so we have to set join_use_nulls=1 clickhouse_nullable_join_setting = "SETTINGS join_use_nulls=1" elif self.destination_type == DestinationType.BIGQUERY: # Bigquery doesn't like the "delete from project.schema.table where project.schema.table.column in" syntax; # it requires "delete from project.schema.table table_alias where table_alias.column in" delete_statement = "delete from {{ final_table_relation }} final_table" unique_key_reference = "final_table." + self.get_unique_key(in_jinja=False) noop_delete_statement = "delete from {{ this }} where 1=0" else: delete_statement = "delete from {{ final_table_relation }}" unique_key_reference = "{{ final_table_relation }}." + self.get_unique_key(in_jinja=False) noop_delete_statement = "delete from {{ this }} where 1=0" deletion_hook = Template( """ {{ '{%' }} set final_table_relation = adapter.get_relation( database=this.database, schema=this.schema, identifier='{{ final_table_name }}' ) {{ '%}' }} {{ '{#' }} If the final table doesn't exist, then obviously we can't delete anything from it. Also, after a reset, the final table is created without the _airbyte_unique_key column (this column is created during the first sync) So skip this deletion if the column doesn't exist. (in this case, the table is guaranteed to be empty anyway) {{ '#}' }} {{ '{%' }} if final_table_relation is not none and {{ quoted_unique_key }} in adapter.get_columns_in_relation(final_table_relation)|map(attribute='name') {{ '%}' }} -- Delete records which are no longer active: -- This query is equivalent, but the left join version is more performant: -- delete from final_table where unique_key in ( -- select unique_key from scd_table where 1 = 1 <incremental_clause(normalized_at, final_table)> -- ) and unique_key not in ( -- select unique_key from scd_table where active_row = 1 <incremental_clause(normalized_at, final_table)> -- ) -- We're incremental against normalized_at rather than emitted_at because we need to fetch the SCD -- entries that were _updated_ recently. This is because a deleted record will have an SCD record -- which was emitted a long time ago, but recently re-normalized to have active_row = 0. {{ delete_statement }} where {{ unique_key_reference }} in ( select recent_records.unique_key from ( select distinct {{ unique_key }} as unique_key from {{ '{{ this }}' }} where 1=1 {{ normalized_at_incremental_clause }} ) recent_records left join ( select {{ unique_key }} as unique_key, count({{ unique_key }}) as active_count from {{ '{{ this }}' }} where {{ active_row_column_name }} = 1 {{ normalized_at_incremental_clause }} group by {{ unique_key }} ) active_counts on recent_records.unique_key = active_counts.unique_key where active_count is null or active_count = 0 ) {{ '{% else %}' }} -- We have to have a non-empty query, so just do a noop delete {{ noop_delete_statement }} {{ '{% endif %}' }} """ ).render( delete_statement=delete_statement, noop_delete_statement=noop_delete_statement, final_table_name=final_table_name, unique_key=self.get_unique_key(in_jinja=False), quoted_unique_key=self.get_unique_key(in_jinja=True), active_row_column_name=active_row_column_name, normalized_at_incremental_clause=self.get_incremental_clause_for_column( "{} + '.' + {}".format( self.name_transformer.apply_quote("this.schema", literal=False), self.name_transformer.apply_quote(final_table_name), ), self.get_normalized_at(in_jinja=True), ), unique_key_reference=unique_key_reference, clickhouse_nullable_join_setting=clickhouse_nullable_join_setting, ) hooks.append(deletion_hook) if self.destination_type.value == DestinationType.POSTGRES.value: # Keep only rows with the max emitted_at to keep incremental behavior hooks.append( f"delete from {stg_schema}.{stg_table} where {self.airbyte_emitted_at} != (select max({self.airbyte_emitted_at}) from {stg_schema}.{stg_table})", ) else: hooks.append(f"drop view {stg_schema}.{stg_table}") config["post_hook"] = "[" + ",".join(map(wrap_in_quotes, hooks)) + "]" else: # incremental is handled in the SCD SQL already sql = self.add_incremental_clause(sql) elif self.destination_sync_mode == DestinationSyncMode.overwrite: if suffix == "" and not is_intermediate: # drop SCD table after creating the destination table scd_table_name = self.tables_registry.get_table_name(schema, self.json_path, self.stream_name, "scd", truncate_name) print(f" Adding drop table hook for {scd_table_name} to {file_name}") hooks = [ Template( """ {{ '{%' }} set scd_table_relation = adapter.get_relation( database=this.database, schema=this.schema, identifier='{{ scd_table_name }}' ) {{ '%}' }} {{ '{%' }} if scd_table_relation is not none {{ '%}' }} {{ '{%' }} do adapter.drop_relation(scd_table_relation) {{ '%}' }} {{ '{% endif %}' }} """ ).render(scd_table_name=scd_table_name) ] config["post_hook"] = "[" + ",".join(map(wrap_in_quotes, hooks)) + "]" template = Template( """ {{ '{{' }} config( {%- for key in config %} {{ key }} = {{ config[key] }}, {%- endfor %} tags = [ {{ tags }} ] ) {{ '}}' }} {{ sql }} """ ) self.sql_outputs[output] = template.render(config=config, sql=sql, tags=self.get_model_tags(is_intermediate)) json_path = self.current_json_path() print(f" Generating {output} from {json_path}") self.models_to_source[file_name] = self.get_stream_source() return str(dbt_macro.Ref(file_name)) def get_model_materialization_mode(self, is_intermediate: bool, column_count: int = 0) -> TableMaterializationType: if is_intermediate: if column_count <= MAXIMUM_COLUMNS_TO_USE_EPHEMERAL: return TableMaterializationType.CTE else: # dbt throws "maximum recursion depth exceeded" exception at runtime # if ephemeral is used with large number of columns, use views instead return TableMaterializationType.VIEW else: if self.is_incremental_mode(self.destination_sync_mode): return TableMaterializationType.INCREMENTAL else: return TableMaterializationType.TABLE def get_model_partition_config(self, partition_by: PartitionScheme, unique_key: str) -> Dict: """ Defines partition, clustering and unique key parameters for each destination. The goal of these are to make read more performant. In general, we need to do lookups on the last emitted_at column to know if a record is freshly produced and need to be incrementally processed or not. But in certain models, such as SCD tables for example, we also need to retrieve older data to update their type 2 SCD end_dates, thus a different partitioning scheme is used to optimize that use case. """ config = {} if self.destination_type == DestinationType.BIGQUERY: # see https://docs.getdbt.com/reference/resource-configs/bigquery-configs if partition_by == PartitionScheme.UNIQUE_KEY: config["cluster_by"] = f'["{self.airbyte_unique_key}","{self.airbyte_emitted_at}"]' elif partition_by == PartitionScheme.ACTIVE_ROW: config["cluster_by"] = f'["{self.airbyte_unique_key}_scd","{self.airbyte_emitted_at}"]' else: config["cluster_by"] = f'"{self.airbyte_emitted_at}"' if partition_by == PartitionScheme.ACTIVE_ROW: config["partition_by"] = ( '{"field": "_airbyte_active_row", "data_type": "int64", ' '"range": {"start": 0, "end": 1, "interval": 1}}' ) elif partition_by == PartitionScheme.NOTHING: pass else: config["partition_by"] = '{"field": "' + self.airbyte_emitted_at + '", "data_type": "timestamp", "granularity": "day"}' elif self.destination_type == DestinationType.POSTGRES: # see https://docs.getdbt.com/reference/resource-configs/postgres-configs if partition_by == PartitionScheme.ACTIVE_ROW: config["indexes"] = ( "[{'columns':['_airbyte_active_row','" + self.airbyte_unique_key + "_scd','" + self.airbyte_emitted_at + "'],'type': 'btree'}]" ) elif partition_by == PartitionScheme.UNIQUE_KEY: config["indexes"] = "[{'columns':['" + self.airbyte_unique_key + "'],'unique':True}]" else: config["indexes"] = "[{'columns':['" + self.airbyte_emitted_at + "'],'type':'btree'}]" elif self.destination_type == DestinationType.REDSHIFT: # see https://docs.getdbt.com/reference/resource-configs/redshift-configs if partition_by == PartitionScheme.ACTIVE_ROW: config["sort"] = f'["_airbyte_active_row", "{self.airbyte_unique_key}_scd", "{self.airbyte_emitted_at}"]' elif partition_by == PartitionScheme.UNIQUE_KEY: config["sort"] = f'["{self.airbyte_unique_key}", "{self.airbyte_emitted_at}"]' elif partition_by == PartitionScheme.NOTHING: pass else: config["sort"] = f'"{self.airbyte_emitted_at}"' elif self.destination_type == DestinationType.SNOWFLAKE: # see https://docs.getdbt.com/reference/resource-configs/snowflake-configs if partition_by == PartitionScheme.ACTIVE_ROW: config[ "cluster_by" ] = f'["_AIRBYTE_ACTIVE_ROW", "{self.airbyte_unique_key.upper()}_SCD", "{self.airbyte_emitted_at.upper()}"]' elif partition_by == PartitionScheme.UNIQUE_KEY: config["cluster_by"] = f'["{self.airbyte_unique_key.upper()}", "{self.airbyte_emitted_at.upper()}"]' elif partition_by == PartitionScheme.NOTHING: pass else: config["cluster_by"] = f'["{self.airbyte_emitted_at.upper()}"]' if unique_key: config["unique_key"] = f'"{unique_key}"' elif not self.parent: # in nested arrays, each element is sharing the same _airbyte_ab_id, so it's not unique config["unique_key"] = self.get_ab_id(in_jinja=True) return config def get_model_tags(self, is_intermediate: bool) -> str: tags = "" if self.parent: tags += "nested" else: tags += "top-level" if is_intermediate: tags += "-intermediate" return f'"{tags}"' def get_schema(self, is_intermediate: bool) -> str: if is_intermediate: return self.raw_schema else: return self.schema def current_json_path(self) -> str: return "/".join(self.json_path) def normalized_stream_name(self) -> str: """ This is the normalized name of this stream to be used as a table (different as referring it as a column). Note that it might not be the actual table name in case of collisions with other streams (see actual_table_name)... """ return self.name_transformer.normalize_table_name(self.stream_name) def sql_table_comment(self, include_from_table: bool = False) -> str: result = f"-- {self.normalized_stream_name()}" if len(self.json_path) > 1: result += f" at {self.current_json_path()}" if include_from_table: from_table = jinja_call(self.from_table) result += f" from {from_table}" return result def hash_id(self, in_jinja: bool = False) -> str: hash_id_col = f"_airbyte_{self.normalized_stream_name()}_hashid" if self.parent: if self.normalized_stream_name().lower() == self.parent.stream_name.lower(): level = len(self.json_path) hash_id_col = f"_airbyte_{self.normalized_stream_name()}_{level}_hashid" return self.name_transformer.normalize_column_name(hash_id_col, in_jinja) # Nested Streams def parent_hash_id(self, in_jinja: bool = False) -> str: if self.parent: return self.parent.hash_id(in_jinja) return "" def unnesting_before_query(self, from_table: str) -> str: if self.parent and self.is_nested_array: parent_stream_name = f"'{self.parent.normalized_stream_name()}'" quoted_field = self.name_transformer.normalize_column_name(self.stream_name, in_jinja=True) return jinja_call(f"unnest_cte({from_table}, {parent_stream_name}, {quoted_field})") return "" def unnesting_from(self) -> str: if self.parent: if self.is_nested_array: parent_stream_name = f"'{self.parent.normalized_stream_name()}'" quoted_field = self.name_transformer.normalize_column_name(self.stream_name, in_jinja=True) return jinja_call(f"cross_join_unnest({parent_stream_name}, {quoted_field})") return "" def unnesting_where(self) -> str: if self.parent: column_name = self.name_transformer.normalize_column_name(self.stream_name) return f"and {column_name} is not null" return "" # Static Functions def find_properties_object(path: List[str], field: str, properties) -> Dict[str, Dict]: """ This function is trying to look for a nested "properties" node under the current JSON node to identify all nested objects. @param path JSON path traversed so far to arrive to this node @param field is the current field being considered in the Json Tree @param properties is the child tree of properties of the current field being searched """ result = {} current_path = path + [field] current = "_".join(current_path) if isinstance(properties, str) or isinstance(properties, int): return {} else: if "items" in properties: return find_properties_object(path, field, properties["items"]) elif "properties" in properties: # we found a properties object return {current: properties["properties"]} elif "type" in properties and is_simple_property(properties): # we found a basic type return {current: {}} elif isinstance(properties, dict): for key in properties.keys(): child = find_properties_object(path=current_path, field=key, properties=properties[key]) if child: result.update(child) elif isinstance(properties, list): for item in properties: child = find_properties_object(path=current_path, field=field, properties=item) if child: result.update(child) return result
StreamProcessor
python
huggingface__transformers
tests/utils/test_image_processing_utils.py
{ "start": 2871, "end": 8714 }
class ____(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN def test_push_to_hub(self): with TemporaryHubRepo(token=self._token) as tmp_repo: image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) image_processor.push_to_hub(tmp_repo.repo_id, token=self._token) new_image_processor = ViTImageProcessor.from_pretrained(tmp_repo.repo_id) for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_fast(self): with TemporaryHubRepo(token=self._token) as tmp_repo: image_processor = ViTImageProcessorFast.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) image_processor.push_to_hub(tmp_repo.repo_id, token=self._token) new_image_processor = ViTImageProcessorFast.from_pretrained(tmp_repo.repo_id) for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_via_save_pretrained(self): with TemporaryHubRepo(token=self._token) as tmp_repo: image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_image_processor = ViTImageProcessor.from_pretrained(tmp_repo.repo_id) for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_via_save_pretrained_fast(self): with TemporaryHubRepo(token=self._token) as tmp_repo: image_processor = ViTImageProcessorFast.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_image_processor = ViTImageProcessorFast.from_pretrained(tmp_repo.repo_id) for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_in_organization(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) image_processor.push_to_hub(tmp_repo.repo_id, token=self._token) new_image_processor = ViTImageProcessor.from_pretrained(tmp_repo.repo_id) for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_in_organization_fast(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: image_processor = ViTImageProcessorFast.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) image_processor.push_to_hub(tmp_repo.repo_id, token=self._token) new_image_processor = ViTImageProcessorFast.from_pretrained(tmp_repo.repo_id) for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_in_organization_via_save_pretrained(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_image_processor = ViTImageProcessor.from_pretrained(tmp_repo.repo_id) for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_in_organization_via_save_pretrained_fast(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: image_processor = ViTImageProcessorFast.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_image_processor = ViTImageProcessorFast.from_pretrained(tmp_repo.repo_id) for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_dynamic_image_processor(self): with TemporaryHubRepo(token=self._token) as tmp_repo: CustomImageProcessor.register_for_auto_class() image_processor = CustomImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) image_processor.push_to_hub(tmp_repo.repo_id, token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, ) new_image_processor = AutoImageProcessor.from_pretrained(tmp_repo.repo_id, trust_remote_code=True) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor")
ImageProcessorPushToHubTester